1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Ethernet driver
4 * Copyright (C) 2020 Marvell International Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #include <linux/pci.h>
12 #include <linux/ethtool.h>
13 #include <linux/stddef.h>
14 #include <linux/etherdevice.h>
15 #include <linux/log2.h>
16 #include <linux/net_tstamp.h>
17 #include <linux/linkmode.h>
19 #include "otx2_common.h"
22 #define DRV_NAME "octeontx2-nicpf"
23 #define DRV_VF_NAME "octeontx2-nicvf"
26 char name[ETH_GSTRING_LEN];
31 #define OTX2_DEV_STAT(stat) { \
33 .index = offsetof(struct otx2_dev_stats, stat) / sizeof(u64), \
36 /* Physical link config */
37 #define OTX2_ETHTOOL_SUPPORTED_MODES 0x638CCBF //110001110001100110010111111
44 static const struct otx2_stat otx2_dev_stats[] = {
45 OTX2_DEV_STAT(rx_ucast_frames),
46 OTX2_DEV_STAT(rx_bcast_frames),
47 OTX2_DEV_STAT(rx_mcast_frames),
49 OTX2_DEV_STAT(tx_ucast_frames),
50 OTX2_DEV_STAT(tx_bcast_frames),
51 OTX2_DEV_STAT(tx_mcast_frames),
54 /* Driver level stats */
55 #define OTX2_DRV_STAT(stat) { \
57 .index = offsetof(struct otx2_drv_stats, stat) / sizeof(atomic_t), \
60 static const struct otx2_stat otx2_drv_stats[] = {
61 OTX2_DRV_STAT(rx_fcs_errs),
62 OTX2_DRV_STAT(rx_oversize_errs),
63 OTX2_DRV_STAT(rx_undersize_errs),
64 OTX2_DRV_STAT(rx_csum_errs),
65 OTX2_DRV_STAT(rx_len_errs),
66 OTX2_DRV_STAT(rx_other_errs),
69 static const struct otx2_stat otx2_queue_stats[] = {
74 static const unsigned int otx2_n_dev_stats = ARRAY_SIZE(otx2_dev_stats);
75 static const unsigned int otx2_n_drv_stats = ARRAY_SIZE(otx2_drv_stats);
76 static const unsigned int otx2_n_queue_stats = ARRAY_SIZE(otx2_queue_stats);
78 static struct cgx_fw_data *otx2_get_fwdata(struct otx2_nic *pfvf);
80 static void otx2_get_drvinfo(struct net_device *netdev,
81 struct ethtool_drvinfo *info)
83 struct otx2_nic *pfvf = netdev_priv(netdev);
85 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
86 strlcpy(info->bus_info, pci_name(pfvf->pdev), sizeof(info->bus_info));
89 static void otx2_get_qset_strings(struct otx2_nic *pfvf, u8 **data, int qset)
91 int start_qidx = qset * pfvf->hw.rx_queues;
94 for (qidx = 0; qidx < pfvf->hw.rx_queues; qidx++) {
95 for (stats = 0; stats < otx2_n_queue_stats; stats++) {
96 sprintf(*data, "rxq%d: %s", qidx + start_qidx,
97 otx2_queue_stats[stats].name);
98 *data += ETH_GSTRING_LEN;
101 for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) {
102 for (stats = 0; stats < otx2_n_queue_stats; stats++) {
103 sprintf(*data, "txq%d: %s", qidx + start_qidx,
104 otx2_queue_stats[stats].name);
105 *data += ETH_GSTRING_LEN;
110 static void otx2_get_strings(struct net_device *netdev, u32 sset, u8 *data)
112 struct otx2_nic *pfvf = netdev_priv(netdev);
115 if (sset != ETH_SS_STATS)
118 for (stats = 0; stats < otx2_n_dev_stats; stats++) {
119 memcpy(data, otx2_dev_stats[stats].name, ETH_GSTRING_LEN);
120 data += ETH_GSTRING_LEN;
123 for (stats = 0; stats < otx2_n_drv_stats; stats++) {
124 memcpy(data, otx2_drv_stats[stats].name, ETH_GSTRING_LEN);
125 data += ETH_GSTRING_LEN;
128 otx2_get_qset_strings(pfvf, &data, 0);
130 for (stats = 0; stats < CGX_RX_STATS_COUNT; stats++) {
131 sprintf(data, "cgx_rxstat%d: ", stats);
132 data += ETH_GSTRING_LEN;
135 for (stats = 0; stats < CGX_TX_STATS_COUNT; stats++) {
136 sprintf(data, "cgx_txstat%d: ", stats);
137 data += ETH_GSTRING_LEN;
140 strcpy(data, "reset_count");
141 data += ETH_GSTRING_LEN;
142 sprintf(data, "Fec Corrected Errors: ");
143 data += ETH_GSTRING_LEN;
144 sprintf(data, "Fec Uncorrected Errors: ");
145 data += ETH_GSTRING_LEN;
148 static void otx2_get_qset_stats(struct otx2_nic *pfvf,
149 struct ethtool_stats *stats, u64 **data)
155 for (qidx = 0; qidx < pfvf->hw.rx_queues; qidx++) {
156 if (!otx2_update_rq_stats(pfvf, qidx)) {
157 for (stat = 0; stat < otx2_n_queue_stats; stat++)
161 for (stat = 0; stat < otx2_n_queue_stats; stat++)
162 *((*data)++) = ((u64 *)&pfvf->qset.rq[qidx].stats)
163 [otx2_queue_stats[stat].index];
166 for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) {
167 if (!otx2_update_sq_stats(pfvf, qidx)) {
168 for (stat = 0; stat < otx2_n_queue_stats; stat++)
172 for (stat = 0; stat < otx2_n_queue_stats; stat++)
173 *((*data)++) = ((u64 *)&pfvf->qset.sq[qidx].stats)
174 [otx2_queue_stats[stat].index];
178 static int otx2_get_phy_fec_stats(struct otx2_nic *pfvf)
183 mutex_lock(&pfvf->mbox.lock);
184 req = otx2_mbox_alloc_msg_cgx_get_phy_fec_stats(&pfvf->mbox);
188 if (!otx2_sync_mbox_msg(&pfvf->mbox))
191 mutex_unlock(&pfvf->mbox.lock);
195 /* Get device and per queue statistics */
196 static void otx2_get_ethtool_stats(struct net_device *netdev,
197 struct ethtool_stats *stats, u64 *data)
199 struct otx2_nic *pfvf = netdev_priv(netdev);
200 u64 fec_corr_blks, fec_uncorr_blks;
201 struct cgx_fw_data *rsp;
204 otx2_get_dev_stats(pfvf);
205 for (stat = 0; stat < otx2_n_dev_stats; stat++)
206 *(data++) = ((u64 *)&pfvf->hw.dev_stats)
207 [otx2_dev_stats[stat].index];
209 for (stat = 0; stat < otx2_n_drv_stats; stat++)
210 *(data++) = atomic_read(&((atomic_t *)&pfvf->hw.drv_stats)
211 [otx2_drv_stats[stat].index]);
213 otx2_get_qset_stats(pfvf, stats, &data);
214 otx2_update_lmac_stats(pfvf);
215 for (stat = 0; stat < CGX_RX_STATS_COUNT; stat++)
216 *(data++) = pfvf->hw.cgx_rx_stats[stat];
217 for (stat = 0; stat < CGX_TX_STATS_COUNT; stat++)
218 *(data++) = pfvf->hw.cgx_tx_stats[stat];
219 *(data++) = pfvf->reset_count;
221 fec_corr_blks = pfvf->hw.cgx_fec_corr_blks;
222 fec_uncorr_blks = pfvf->hw.cgx_fec_uncorr_blks;
224 rsp = otx2_get_fwdata(pfvf);
225 if (!IS_ERR(rsp) && rsp->fwdata.phy.misc.has_fec_stats &&
226 !otx2_get_phy_fec_stats(pfvf)) {
227 /* Fetch fwdata again because it's been recently populated with
228 * latest PHY FEC stats.
230 rsp = otx2_get_fwdata(pfvf);
232 struct fec_stats_s *p = &rsp->fwdata.phy.fec_stats;
234 if (pfvf->linfo.fec == OTX2_FEC_BASER) {
235 fec_corr_blks = p->brfec_corr_blks;
236 fec_uncorr_blks = p->brfec_uncorr_blks;
238 fec_corr_blks = p->rsfec_corr_cws;
239 fec_uncorr_blks = p->rsfec_uncorr_cws;
244 *(data++) = fec_corr_blks;
245 *(data++) = fec_uncorr_blks;
248 static int otx2_get_sset_count(struct net_device *netdev, int sset)
250 struct otx2_nic *pfvf = netdev_priv(netdev);
253 if (sset != ETH_SS_STATS)
256 qstats_count = otx2_n_queue_stats *
257 (pfvf->hw.rx_queues + pfvf->hw.tx_queues);
258 otx2_update_lmac_fec_stats(pfvf);
260 return otx2_n_dev_stats + otx2_n_drv_stats + qstats_count +
261 CGX_RX_STATS_COUNT + CGX_TX_STATS_COUNT + OTX2_FEC_STATS_CNT
265 /* Get no of queues device supports and current queue count */
266 static void otx2_get_channels(struct net_device *dev,
267 struct ethtool_channels *channel)
269 struct otx2_nic *pfvf = netdev_priv(dev);
271 channel->max_rx = pfvf->hw.max_queues;
272 channel->max_tx = pfvf->hw.max_queues;
274 channel->rx_count = pfvf->hw.rx_queues;
275 channel->tx_count = pfvf->hw.tx_queues;
278 /* Set no of Tx, Rx queues to be used */
279 static int otx2_set_channels(struct net_device *dev,
280 struct ethtool_channels *channel)
282 struct otx2_nic *pfvf = netdev_priv(dev);
283 bool if_up = netif_running(dev);
286 if (!channel->rx_count || !channel->tx_count)
290 dev->netdev_ops->ndo_stop(dev);
292 err = otx2_set_real_num_queues(dev, channel->tx_count,
297 pfvf->hw.rx_queues = channel->rx_count;
298 pfvf->hw.tx_queues = channel->tx_count;
299 pfvf->qset.cq_cnt = pfvf->hw.tx_queues + pfvf->hw.rx_queues;
303 dev->netdev_ops->ndo_open(dev);
305 netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n",
306 pfvf->hw.tx_queues, pfvf->hw.rx_queues);
311 static void otx2_get_pauseparam(struct net_device *netdev,
312 struct ethtool_pauseparam *pause)
314 struct otx2_nic *pfvf = netdev_priv(netdev);
315 struct cgx_pause_frm_cfg *req, *rsp;
317 if (is_otx2_lbkvf(pfvf->pdev))
320 req = otx2_mbox_alloc_msg_cgx_cfg_pause_frm(&pfvf->mbox);
324 if (!otx2_sync_mbox_msg(&pfvf->mbox)) {
325 rsp = (struct cgx_pause_frm_cfg *)
326 otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
327 pause->rx_pause = rsp->rx_pause;
328 pause->tx_pause = rsp->tx_pause;
332 static int otx2_set_pauseparam(struct net_device *netdev,
333 struct ethtool_pauseparam *pause)
335 struct otx2_nic *pfvf = netdev_priv(netdev);
340 if (is_otx2_lbkvf(pfvf->pdev))
344 pfvf->flags |= OTX2_FLAG_RX_PAUSE_ENABLED;
346 pfvf->flags &= ~OTX2_FLAG_RX_PAUSE_ENABLED;
349 pfvf->flags |= OTX2_FLAG_TX_PAUSE_ENABLED;
351 pfvf->flags &= ~OTX2_FLAG_TX_PAUSE_ENABLED;
353 return otx2_config_pause_frm(pfvf);
356 static void otx2_get_ringparam(struct net_device *netdev,
357 struct ethtool_ringparam *ring)
359 struct otx2_nic *pfvf = netdev_priv(netdev);
360 struct otx2_qset *qs = &pfvf->qset;
362 ring->rx_max_pending = Q_COUNT(Q_SIZE_MAX);
363 ring->rx_pending = qs->rqe_cnt ? qs->rqe_cnt : Q_COUNT(Q_SIZE_256);
364 ring->tx_max_pending = Q_COUNT(Q_SIZE_MAX);
365 ring->tx_pending = qs->sqe_cnt ? qs->sqe_cnt : Q_COUNT(Q_SIZE_4K);
368 static int otx2_set_ringparam(struct net_device *netdev,
369 struct ethtool_ringparam *ring)
371 struct otx2_nic *pfvf = netdev_priv(netdev);
372 bool if_up = netif_running(netdev);
373 struct otx2_qset *qs = &pfvf->qset;
374 u32 rx_count, tx_count;
376 if (ring->rx_mini_pending || ring->rx_jumbo_pending)
379 /* Permitted lengths are 16 64 256 1K 4K 16K 64K 256K 1M */
380 rx_count = ring->rx_pending;
381 /* On some silicon variants a skid or reserved CQEs are
382 * needed to avoid CQ overflow.
384 if (rx_count < pfvf->hw.rq_skid)
385 rx_count = pfvf->hw.rq_skid;
386 rx_count = Q_COUNT(Q_SIZE(rx_count, 3));
388 /* Due pipelining impact minimum 2000 unused SQ CQE's
389 * need to be maintained to avoid CQ overflow, hence the
392 tx_count = clamp_t(u32, ring->tx_pending,
393 Q_COUNT(Q_SIZE_4K), Q_COUNT(Q_SIZE_MAX));
394 tx_count = Q_COUNT(Q_SIZE(tx_count, 3));
396 if (tx_count == qs->sqe_cnt && rx_count == qs->rqe_cnt)
400 netdev->netdev_ops->ndo_stop(netdev);
402 /* Assigned to the nearest possible exponent. */
403 qs->sqe_cnt = tx_count;
404 qs->rqe_cnt = rx_count;
407 netdev->netdev_ops->ndo_open(netdev);
412 static int otx2_get_coalesce(struct net_device *netdev,
413 struct ethtool_coalesce *cmd)
415 struct otx2_nic *pfvf = netdev_priv(netdev);
416 struct otx2_hw *hw = &pfvf->hw;
418 cmd->rx_coalesce_usecs = hw->cq_time_wait;
419 cmd->rx_max_coalesced_frames = hw->cq_ecount_wait;
420 cmd->tx_coalesce_usecs = hw->cq_time_wait;
421 cmd->tx_max_coalesced_frames = hw->cq_ecount_wait;
426 static int otx2_set_coalesce(struct net_device *netdev,
427 struct ethtool_coalesce *ec)
429 struct otx2_nic *pfvf = netdev_priv(netdev);
430 struct otx2_hw *hw = &pfvf->hw;
433 if (!ec->rx_max_coalesced_frames || !ec->tx_max_coalesced_frames)
436 /* 'cq_time_wait' is 8bit and is in multiple of 100ns,
437 * so clamp the user given value to the range of 1 to 25usec.
439 ec->rx_coalesce_usecs = clamp_t(u32, ec->rx_coalesce_usecs,
440 1, CQ_TIMER_THRESH_MAX);
441 ec->tx_coalesce_usecs = clamp_t(u32, ec->tx_coalesce_usecs,
442 1, CQ_TIMER_THRESH_MAX);
444 /* Rx and Tx are mapped to same CQ, check which one
445 * is changed, if both then choose the min.
447 if (hw->cq_time_wait == ec->rx_coalesce_usecs)
448 hw->cq_time_wait = ec->tx_coalesce_usecs;
449 else if (hw->cq_time_wait == ec->tx_coalesce_usecs)
450 hw->cq_time_wait = ec->rx_coalesce_usecs;
452 hw->cq_time_wait = min_t(u8, ec->rx_coalesce_usecs,
453 ec->tx_coalesce_usecs);
455 /* Max ecount_wait supported is 16bit,
456 * so clamp the user given value to the range of 1 to 64k.
458 ec->rx_max_coalesced_frames = clamp_t(u32, ec->rx_max_coalesced_frames,
460 ec->tx_max_coalesced_frames = clamp_t(u32, ec->tx_max_coalesced_frames,
463 /* Rx and Tx are mapped to same CQ, check which one
464 * is changed, if both then choose the min.
466 if (hw->cq_ecount_wait == ec->rx_max_coalesced_frames)
467 hw->cq_ecount_wait = ec->tx_max_coalesced_frames;
468 else if (hw->cq_ecount_wait == ec->tx_max_coalesced_frames)
469 hw->cq_ecount_wait = ec->rx_max_coalesced_frames;
471 hw->cq_ecount_wait = min_t(u16, ec->rx_max_coalesced_frames,
472 ec->tx_max_coalesced_frames);
474 if (netif_running(netdev)) {
475 for (qidx = 0; qidx < pfvf->hw.cint_cnt; qidx++)
476 otx2_config_irq_coalescing(pfvf, qidx);
482 static int otx2_get_rss_hash_opts(struct otx2_nic *pfvf,
483 struct ethtool_rxnfc *nfc)
485 struct otx2_rss_info *rss = &pfvf->hw.rss_info;
487 if (!(rss->flowkey_cfg &
488 (NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6)))
491 /* Mimimum is IPv4 and IPv6, SIP/DIP */
492 nfc->data = RXH_IP_SRC | RXH_IP_DST;
493 if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_VLAN)
494 nfc->data |= RXH_VLAN;
496 switch (nfc->flow_type) {
499 if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_TCP)
500 nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
504 if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_UDP)
505 nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
509 if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_SCTP)
510 nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
514 if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_ESP)
515 nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
532 static int otx2_set_rss_hash_opts(struct otx2_nic *pfvf,
533 struct ethtool_rxnfc *nfc)
535 struct otx2_rss_info *rss = &pfvf->hw.rss_info;
536 u32 rxh_l4 = RXH_L4_B_0_1 | RXH_L4_B_2_3;
537 u32 rss_cfg = rss->flowkey_cfg;
540 netdev_err(pfvf->netdev,
541 "RSS is disabled, cannot change settings\n");
545 /* Mimimum is IPv4 and IPv6, SIP/DIP */
546 if (!(nfc->data & RXH_IP_SRC) || !(nfc->data & RXH_IP_DST))
549 if (nfc->data & RXH_VLAN)
550 rss_cfg |= NIX_FLOW_KEY_TYPE_VLAN;
552 rss_cfg &= ~NIX_FLOW_KEY_TYPE_VLAN;
554 switch (nfc->flow_type) {
557 /* Different config for v4 and v6 is not supported.
558 * Both of them have to be either 4-tuple or 2-tuple.
560 switch (nfc->data & rxh_l4) {
562 rss_cfg &= ~NIX_FLOW_KEY_TYPE_TCP;
564 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
565 rss_cfg |= NIX_FLOW_KEY_TYPE_TCP;
573 switch (nfc->data & rxh_l4) {
575 rss_cfg &= ~NIX_FLOW_KEY_TYPE_UDP;
577 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
578 rss_cfg |= NIX_FLOW_KEY_TYPE_UDP;
586 switch (nfc->data & rxh_l4) {
588 rss_cfg &= ~NIX_FLOW_KEY_TYPE_SCTP;
590 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
591 rss_cfg |= NIX_FLOW_KEY_TYPE_SCTP;
599 switch (nfc->data & rxh_l4) {
601 rss_cfg &= ~(NIX_FLOW_KEY_TYPE_ESP |
602 NIX_FLOW_KEY_TYPE_AH);
603 rss_cfg |= NIX_FLOW_KEY_TYPE_VLAN |
604 NIX_FLOW_KEY_TYPE_IPV4_PROTO;
606 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
607 /* If VLAN hashing is also requested for ESP then do not
608 * allow because of hardware 40 bytes flow key limit.
610 if (rss_cfg & NIX_FLOW_KEY_TYPE_VLAN) {
611 netdev_err(pfvf->netdev,
612 "RSS hash of ESP or AH with VLAN is not supported\n");
616 rss_cfg |= NIX_FLOW_KEY_TYPE_ESP | NIX_FLOW_KEY_TYPE_AH;
617 /* Disable IPv4 proto hashing since IPv6 SA+DA(32 bytes)
618 * and ESP SPI+sequence(8 bytes) uses hardware maximum
619 * limit of 40 byte flow key.
621 rss_cfg &= ~NIX_FLOW_KEY_TYPE_IPV4_PROTO;
629 rss_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6;
635 rss->flowkey_cfg = rss_cfg;
636 otx2_set_flowkey_cfg(pfvf);
640 static int otx2_get_rxnfc(struct net_device *dev,
641 struct ethtool_rxnfc *nfc, u32 *rules)
643 struct otx2_nic *pfvf = netdev_priv(dev);
644 int ret = -EOPNOTSUPP;
647 case ETHTOOL_GRXRINGS:
648 nfc->data = pfvf->hw.rx_queues;
651 case ETHTOOL_GRXCLSRLCNT:
652 nfc->rule_cnt = pfvf->flow_cfg->nr_flows;
655 case ETHTOOL_GRXCLSRULE:
656 ret = otx2_get_flow(pfvf, nfc, nfc->fs.location);
658 case ETHTOOL_GRXCLSRLALL:
659 ret = otx2_get_all_flows(pfvf, nfc, rules);
662 return otx2_get_rss_hash_opts(pfvf, nfc);
669 static int otx2_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *nfc)
671 bool ntuple = !!(dev->features & NETIF_F_NTUPLE);
672 struct otx2_nic *pfvf = netdev_priv(dev);
673 int ret = -EOPNOTSUPP;
677 ret = otx2_set_rss_hash_opts(pfvf, nfc);
679 case ETHTOOL_SRXCLSRLINS:
680 if (netif_running(dev) && ntuple)
681 ret = otx2_add_flow(pfvf, nfc);
683 case ETHTOOL_SRXCLSRLDEL:
684 if (netif_running(dev) && ntuple)
685 ret = otx2_remove_flow(pfvf, nfc->fs.location);
694 static int otx2vf_get_rxnfc(struct net_device *dev,
695 struct ethtool_rxnfc *nfc, u32 *rules)
697 struct otx2_nic *pfvf = netdev_priv(dev);
698 int ret = -EOPNOTSUPP;
701 case ETHTOOL_GRXRINGS:
702 nfc->data = pfvf->hw.rx_queues;
706 return otx2_get_rss_hash_opts(pfvf, nfc);
713 static int otx2vf_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *nfc)
715 struct otx2_nic *pfvf = netdev_priv(dev);
716 int ret = -EOPNOTSUPP;
720 ret = otx2_set_rss_hash_opts(pfvf, nfc);
729 static u32 otx2_get_rxfh_key_size(struct net_device *netdev)
731 struct otx2_nic *pfvf = netdev_priv(netdev);
732 struct otx2_rss_info *rss;
734 rss = &pfvf->hw.rss_info;
736 return sizeof(rss->key);
739 static u32 otx2_get_rxfh_indir_size(struct net_device *dev)
741 return MAX_RSS_INDIR_TBL_SIZE;
744 static int otx2_rss_ctx_delete(struct otx2_nic *pfvf, int ctx_id)
746 struct otx2_rss_info *rss = &pfvf->hw.rss_info;
748 otx2_rss_ctx_flow_del(pfvf, ctx_id);
749 kfree(rss->rss_ctx[ctx_id]);
750 rss->rss_ctx[ctx_id] = NULL;
755 static int otx2_rss_ctx_create(struct otx2_nic *pfvf,
758 struct otx2_rss_info *rss = &pfvf->hw.rss_info;
761 for (ctx = 0; ctx < MAX_RSS_GROUPS; ctx++) {
762 if (!rss->rss_ctx[ctx])
765 if (ctx == MAX_RSS_GROUPS)
768 rss->rss_ctx[ctx] = kzalloc(sizeof(*rss->rss_ctx[ctx]), GFP_KERNEL);
769 if (!rss->rss_ctx[ctx])
776 /* RSS context configuration */
777 static int otx2_set_rxfh_context(struct net_device *dev, const u32 *indir,
778 const u8 *hkey, const u8 hfunc,
779 u32 *rss_context, bool delete)
781 struct otx2_nic *pfvf = netdev_priv(dev);
782 struct otx2_rss_ctx *rss_ctx;
783 struct otx2_rss_info *rss;
786 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
789 if (*rss_context != ETH_RXFH_CONTEXT_ALLOC &&
790 *rss_context >= MAX_RSS_GROUPS)
793 rss = &pfvf->hw.rss_info;
796 netdev_err(dev, "RSS is disabled, cannot change settings\n");
801 memcpy(rss->key, hkey, sizeof(rss->key));
802 otx2_set_rss_key(pfvf);
805 return otx2_rss_ctx_delete(pfvf, *rss_context);
807 if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) {
808 ret = otx2_rss_ctx_create(pfvf, rss_context);
813 rss_ctx = rss->rss_ctx[*rss_context];
814 for (idx = 0; idx < rss->rss_size; idx++)
815 rss_ctx->ind_tbl[idx] = indir[idx];
817 otx2_set_rss_table(pfvf, *rss_context);
822 static int otx2_get_rxfh_context(struct net_device *dev, u32 *indir,
823 u8 *hkey, u8 *hfunc, u32 rss_context)
825 struct otx2_nic *pfvf = netdev_priv(dev);
826 struct otx2_rss_ctx *rss_ctx;
827 struct otx2_rss_info *rss;
830 rss = &pfvf->hw.rss_info;
833 *hfunc = ETH_RSS_HASH_TOP;
838 if (!rss->enable && rss_context == DEFAULT_RSS_CONTEXT_GROUP) {
839 rx_queues = pfvf->hw.rx_queues;
840 for (idx = 0; idx < MAX_RSS_INDIR_TBL_SIZE; idx++)
841 indir[idx] = ethtool_rxfh_indir_default(idx, rx_queues);
844 if (rss_context >= MAX_RSS_GROUPS)
847 rss_ctx = rss->rss_ctx[rss_context];
852 for (idx = 0; idx < rss->rss_size; idx++)
853 indir[idx] = rss_ctx->ind_tbl[idx];
856 memcpy(hkey, rss->key, sizeof(rss->key));
861 /* Get RSS configuration */
862 static int otx2_get_rxfh(struct net_device *dev, u32 *indir,
865 return otx2_get_rxfh_context(dev, indir, hkey, hfunc,
866 DEFAULT_RSS_CONTEXT_GROUP);
869 /* Configure RSS table and hash key */
870 static int otx2_set_rxfh(struct net_device *dev, const u32 *indir,
871 const u8 *hkey, const u8 hfunc)
874 u32 rss_context = DEFAULT_RSS_CONTEXT_GROUP;
876 return otx2_set_rxfh_context(dev, indir, hkey, hfunc, &rss_context, 0);
879 static u32 otx2_get_msglevel(struct net_device *netdev)
881 struct otx2_nic *pfvf = netdev_priv(netdev);
883 return pfvf->msg_enable;
886 static void otx2_set_msglevel(struct net_device *netdev, u32 val)
888 struct otx2_nic *pfvf = netdev_priv(netdev);
890 pfvf->msg_enable = val;
893 static u32 otx2_get_link(struct net_device *netdev)
895 struct otx2_nic *pfvf = netdev_priv(netdev);
897 /* LBK link is internal and always UP */
898 if (is_otx2_lbkvf(pfvf->pdev))
900 return pfvf->linfo.link_up;
903 static int otx2_get_ts_info(struct net_device *netdev,
904 struct ethtool_ts_info *info)
906 struct otx2_nic *pfvf = netdev_priv(netdev);
909 return ethtool_op_get_ts_info(netdev, info);
911 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
912 SOF_TIMESTAMPING_RX_SOFTWARE |
913 SOF_TIMESTAMPING_SOFTWARE |
914 SOF_TIMESTAMPING_TX_HARDWARE |
915 SOF_TIMESTAMPING_RX_HARDWARE |
916 SOF_TIMESTAMPING_RAW_HARDWARE;
918 info->phc_index = otx2_ptp_clock_index(pfvf);
920 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
922 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
923 (1 << HWTSTAMP_FILTER_ALL);
928 static struct cgx_fw_data *otx2_get_fwdata(struct otx2_nic *pfvf)
930 struct cgx_fw_data *rsp = NULL;
934 mutex_lock(&pfvf->mbox.lock);
935 req = otx2_mbox_alloc_msg_cgx_get_aux_link_info(&pfvf->mbox);
937 mutex_unlock(&pfvf->mbox.lock);
938 return ERR_PTR(-ENOMEM);
941 err = otx2_sync_mbox_msg(&pfvf->mbox);
943 rsp = (struct cgx_fw_data *)
944 otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
949 mutex_unlock(&pfvf->mbox.lock);
953 static int otx2_get_fecparam(struct net_device *netdev,
954 struct ethtool_fecparam *fecparam)
956 struct otx2_nic *pfvf = netdev_priv(netdev);
957 struct cgx_fw_data *rsp;
962 ETHTOOL_FEC_BASER | ETHTOOL_FEC_RS};
963 #define FEC_MAX_INDEX 4
964 if (pfvf->linfo.fec < FEC_MAX_INDEX)
965 fecparam->active_fec = fec[pfvf->linfo.fec];
967 rsp = otx2_get_fwdata(pfvf);
971 if (rsp->fwdata.supported_fec < FEC_MAX_INDEX) {
972 if (!rsp->fwdata.supported_fec)
973 fecparam->fec = ETHTOOL_FEC_NONE;
975 fecparam->fec = fec[rsp->fwdata.supported_fec];
980 static int otx2_set_fecparam(struct net_device *netdev,
981 struct ethtool_fecparam *fecparam)
983 struct otx2_nic *pfvf = netdev_priv(netdev);
984 struct mbox *mbox = &pfvf->mbox;
985 struct fec_mode *req, *rsp;
986 int err = 0, fec = 0;
988 switch (fecparam->fec) {
989 /* Firmware does not support AUTO mode consider it as FEC_OFF */
990 case ETHTOOL_FEC_OFF:
991 case ETHTOOL_FEC_AUTO:
997 case ETHTOOL_FEC_BASER:
998 fec = OTX2_FEC_BASER;
1001 netdev_warn(pfvf->netdev, "Unsupported FEC mode: %d",
1006 if (fec == pfvf->linfo.fec)
1009 mutex_lock(&mbox->lock);
1010 req = otx2_mbox_alloc_msg_cgx_set_fec_param(&pfvf->mbox);
1016 err = otx2_sync_mbox_msg(&pfvf->mbox);
1020 rsp = (struct fec_mode *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
1023 pfvf->linfo.fec = rsp->fec;
1027 mutex_unlock(&mbox->lock);
1031 static void otx2_get_fec_info(u64 index, int req_mode,
1032 struct ethtool_link_ksettings *link_ksettings)
1034 __ETHTOOL_DECLARE_LINK_MODE_MASK(otx2_fec_modes) = { 0, };
1038 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT,
1041 case OTX2_FEC_BASER:
1042 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1046 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1049 case OTX2_FEC_BASER | OTX2_FEC_RS:
1050 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1052 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1057 /* Add fec modes to existing modes */
1058 if (req_mode == OTX2_MODE_ADVERTISED)
1059 linkmode_or(link_ksettings->link_modes.advertising,
1060 link_ksettings->link_modes.advertising,
1063 linkmode_or(link_ksettings->link_modes.supported,
1064 link_ksettings->link_modes.supported,
1068 static void otx2_get_link_mode_info(u64 link_mode_bmap,
1070 struct ethtool_link_ksettings
1073 __ETHTOOL_DECLARE_LINK_MODE_MASK(otx2_link_modes) = { 0, };
1074 const int otx2_sgmii_features[6] = {
1075 ETHTOOL_LINK_MODE_10baseT_Half_BIT,
1076 ETHTOOL_LINK_MODE_10baseT_Full_BIT,
1077 ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1078 ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1079 ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
1080 ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1082 /* CGX link modes to Ethtool link mode mapping */
1083 const int cgx_link_mode[27] = {
1085 ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1086 ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
1087 ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1088 ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1089 ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1091 ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1094 ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1095 ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1096 ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1097 ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1098 ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1099 ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1101 ETHTOOL_LINK_MODE_50000baseSR_Full_BIT,
1103 ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1104 ETHTOOL_LINK_MODE_50000baseCR_Full_BIT,
1105 ETHTOOL_LINK_MODE_50000baseKR_Full_BIT,
1107 ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1108 ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1109 ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1110 ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT
1114 link_mode_bmap = link_mode_bmap & OTX2_ETHTOOL_SUPPORTED_MODES;
1116 for_each_set_bit(bit, (unsigned long *)&link_mode_bmap, 27) {
1117 /* SGMII mode is set */
1119 linkmode_set_bit_array(otx2_sgmii_features,
1120 ARRAY_SIZE(otx2_sgmii_features),
1123 linkmode_set_bit(cgx_link_mode[bit], otx2_link_modes);
1126 if (req_mode == OTX2_MODE_ADVERTISED)
1127 linkmode_copy(link_ksettings->link_modes.advertising,
1130 linkmode_copy(link_ksettings->link_modes.supported,
1134 static int otx2_get_link_ksettings(struct net_device *netdev,
1135 struct ethtool_link_ksettings *cmd)
1137 struct otx2_nic *pfvf = netdev_priv(netdev);
1138 struct cgx_fw_data *rsp = NULL;
1140 cmd->base.duplex = pfvf->linfo.full_duplex;
1141 cmd->base.speed = pfvf->linfo.speed;
1142 cmd->base.autoneg = pfvf->linfo.an;
1144 rsp = otx2_get_fwdata(pfvf);
1146 return PTR_ERR(rsp);
1148 if (rsp->fwdata.supported_an)
1149 ethtool_link_ksettings_add_link_mode(cmd,
1153 otx2_get_link_mode_info(rsp->fwdata.advertised_link_modes,
1154 OTX2_MODE_ADVERTISED, cmd);
1155 otx2_get_fec_info(rsp->fwdata.advertised_fec,
1156 OTX2_MODE_ADVERTISED, cmd);
1157 otx2_get_link_mode_info(rsp->fwdata.supported_link_modes,
1158 OTX2_MODE_SUPPORTED, cmd);
1159 otx2_get_fec_info(rsp->fwdata.supported_fec,
1160 OTX2_MODE_SUPPORTED, cmd);
1164 static void otx2_get_advertised_mode(const struct ethtool_link_ksettings *cmd,
1169 /* Firmware does not support requesting multiple advertised modes
1170 * return first set bit
1172 bit_pos = find_first_bit(cmd->link_modes.advertising,
1173 __ETHTOOL_LINK_MODE_MASK_NBITS);
1174 if (bit_pos != __ETHTOOL_LINK_MODE_MASK_NBITS)
1178 static int otx2_set_link_ksettings(struct net_device *netdev,
1179 const struct ethtool_link_ksettings *cmd)
1181 struct otx2_nic *pf = netdev_priv(netdev);
1182 struct ethtool_link_ksettings cur_ks;
1183 struct cgx_set_link_mode_req *req;
1184 struct mbox *mbox = &pf->mbox;
1187 memset(&cur_ks, 0, sizeof(struct ethtool_link_ksettings));
1189 if (!ethtool_validate_speed(cmd->base.speed) ||
1190 !ethtool_validate_duplex(cmd->base.duplex))
1193 if (cmd->base.autoneg != AUTONEG_ENABLE &&
1194 cmd->base.autoneg != AUTONEG_DISABLE)
1197 otx2_get_link_ksettings(netdev, &cur_ks);
1199 /* Check requested modes against supported modes by hardware */
1200 if (!bitmap_subset(cmd->link_modes.advertising,
1201 cur_ks.link_modes.supported,
1202 __ETHTOOL_LINK_MODE_MASK_NBITS))
1205 mutex_lock(&mbox->lock);
1206 req = otx2_mbox_alloc_msg_cgx_set_link_mode(&pf->mbox);
1212 req->args.speed = cmd->base.speed;
1213 /* firmware expects 1 for half duplex and 0 for full duplex
1216 req->args.duplex = cmd->base.duplex ^ 0x1;
1217 req->args.an = cmd->base.autoneg;
1218 otx2_get_advertised_mode(cmd, &req->args.mode);
1220 err = otx2_sync_mbox_msg(&pf->mbox);
1222 mutex_unlock(&mbox->lock);
1226 static const struct ethtool_ops otx2_ethtool_ops = {
1227 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
1228 ETHTOOL_COALESCE_MAX_FRAMES,
1229 .get_link = otx2_get_link,
1230 .get_drvinfo = otx2_get_drvinfo,
1231 .get_strings = otx2_get_strings,
1232 .get_ethtool_stats = otx2_get_ethtool_stats,
1233 .get_sset_count = otx2_get_sset_count,
1234 .set_channels = otx2_set_channels,
1235 .get_channels = otx2_get_channels,
1236 .get_ringparam = otx2_get_ringparam,
1237 .set_ringparam = otx2_set_ringparam,
1238 .get_coalesce = otx2_get_coalesce,
1239 .set_coalesce = otx2_set_coalesce,
1240 .get_rxnfc = otx2_get_rxnfc,
1241 .set_rxnfc = otx2_set_rxnfc,
1242 .get_rxfh_key_size = otx2_get_rxfh_key_size,
1243 .get_rxfh_indir_size = otx2_get_rxfh_indir_size,
1244 .get_rxfh = otx2_get_rxfh,
1245 .set_rxfh = otx2_set_rxfh,
1246 .get_rxfh_context = otx2_get_rxfh_context,
1247 .set_rxfh_context = otx2_set_rxfh_context,
1248 .get_msglevel = otx2_get_msglevel,
1249 .set_msglevel = otx2_set_msglevel,
1250 .get_pauseparam = otx2_get_pauseparam,
1251 .set_pauseparam = otx2_set_pauseparam,
1252 .get_ts_info = otx2_get_ts_info,
1253 .get_fecparam = otx2_get_fecparam,
1254 .set_fecparam = otx2_set_fecparam,
1255 .get_link_ksettings = otx2_get_link_ksettings,
1256 .set_link_ksettings = otx2_set_link_ksettings,
1259 void otx2_set_ethtool_ops(struct net_device *netdev)
1261 netdev->ethtool_ops = &otx2_ethtool_ops;
1264 /* VF's ethtool APIs */
1265 static void otx2vf_get_drvinfo(struct net_device *netdev,
1266 struct ethtool_drvinfo *info)
1268 struct otx2_nic *vf = netdev_priv(netdev);
1270 strlcpy(info->driver, DRV_VF_NAME, sizeof(info->driver));
1271 strlcpy(info->bus_info, pci_name(vf->pdev), sizeof(info->bus_info));
1274 static void otx2vf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
1276 struct otx2_nic *vf = netdev_priv(netdev);
1279 if (sset != ETH_SS_STATS)
1282 for (stats = 0; stats < otx2_n_dev_stats; stats++) {
1283 memcpy(data, otx2_dev_stats[stats].name, ETH_GSTRING_LEN);
1284 data += ETH_GSTRING_LEN;
1287 for (stats = 0; stats < otx2_n_drv_stats; stats++) {
1288 memcpy(data, otx2_drv_stats[stats].name, ETH_GSTRING_LEN);
1289 data += ETH_GSTRING_LEN;
1292 otx2_get_qset_strings(vf, &data, 0);
1294 strcpy(data, "reset_count");
1295 data += ETH_GSTRING_LEN;
1298 static void otx2vf_get_ethtool_stats(struct net_device *netdev,
1299 struct ethtool_stats *stats, u64 *data)
1301 struct otx2_nic *vf = netdev_priv(netdev);
1304 otx2_get_dev_stats(vf);
1305 for (stat = 0; stat < otx2_n_dev_stats; stat++)
1306 *(data++) = ((u64 *)&vf->hw.dev_stats)
1307 [otx2_dev_stats[stat].index];
1309 for (stat = 0; stat < otx2_n_drv_stats; stat++)
1310 *(data++) = atomic_read(&((atomic_t *)&vf->hw.drv_stats)
1311 [otx2_drv_stats[stat].index]);
1313 otx2_get_qset_stats(vf, stats, &data);
1314 *(data++) = vf->reset_count;
1317 static int otx2vf_get_sset_count(struct net_device *netdev, int sset)
1319 struct otx2_nic *vf = netdev_priv(netdev);
1322 if (sset != ETH_SS_STATS)
1325 qstats_count = otx2_n_queue_stats *
1326 (vf->hw.rx_queues + vf->hw.tx_queues);
1328 return otx2_n_dev_stats + otx2_n_drv_stats + qstats_count + 1;
1331 static int otx2vf_get_link_ksettings(struct net_device *netdev,
1332 struct ethtool_link_ksettings *cmd)
1334 struct otx2_nic *pfvf = netdev_priv(netdev);
1336 if (is_otx2_lbkvf(pfvf->pdev)) {
1337 cmd->base.duplex = DUPLEX_FULL;
1338 cmd->base.speed = SPEED_100000;
1340 return otx2_get_link_ksettings(netdev, cmd);
1345 static const struct ethtool_ops otx2vf_ethtool_ops = {
1346 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
1347 ETHTOOL_COALESCE_MAX_FRAMES,
1348 .get_link = otx2_get_link,
1349 .get_drvinfo = otx2vf_get_drvinfo,
1350 .get_strings = otx2vf_get_strings,
1351 .get_ethtool_stats = otx2vf_get_ethtool_stats,
1352 .get_sset_count = otx2vf_get_sset_count,
1353 .set_channels = otx2_set_channels,
1354 .get_channels = otx2_get_channels,
1355 .get_rxnfc = otx2vf_get_rxnfc,
1356 .set_rxnfc = otx2vf_set_rxnfc,
1357 .get_rxfh_key_size = otx2_get_rxfh_key_size,
1358 .get_rxfh_indir_size = otx2_get_rxfh_indir_size,
1359 .get_rxfh = otx2_get_rxfh,
1360 .set_rxfh = otx2_set_rxfh,
1361 .get_rxfh_context = otx2_get_rxfh_context,
1362 .set_rxfh_context = otx2_set_rxfh_context,
1363 .get_ringparam = otx2_get_ringparam,
1364 .set_ringparam = otx2_set_ringparam,
1365 .get_coalesce = otx2_get_coalesce,
1366 .set_coalesce = otx2_set_coalesce,
1367 .get_msglevel = otx2_get_msglevel,
1368 .set_msglevel = otx2_set_msglevel,
1369 .get_pauseparam = otx2_get_pauseparam,
1370 .set_pauseparam = otx2_set_pauseparam,
1371 .get_link_ksettings = otx2vf_get_link_ksettings,
1374 void otx2vf_set_ethtool_ops(struct net_device *netdev)
1376 netdev->ethtool_ops = &otx2vf_ethtool_ops;
1378 EXPORT_SYMBOL(otx2vf_set_ethtool_ops);