1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /* Copyright 2014-2016 Freescale Semiconductor Inc.
6 #include <linux/net_tstamp.h>
7 #include <linux/nospec.h>
9 #include "dpni.h" /* DPNI_LINK_OPT_* */
10 #include "dpaa2-eth.h"
12 /* To be kept in sync with DPNI statistics */
13 static char dpaa2_ethtool_stats[][ETH_GSTRING_LEN] = {
16 "[hw] rx mcast frames",
17 "[hw] rx mcast bytes",
18 "[hw] rx bcast frames",
19 "[hw] rx bcast bytes",
22 "[hw] tx mcast frames",
23 "[hw] tx mcast bytes",
24 "[hw] tx bcast frames",
25 "[hw] tx bcast bytes",
26 "[hw] rx filtered frames",
27 "[hw] rx discarded frames",
28 "[hw] rx nobuffer discards",
29 "[hw] tx discarded frames",
30 "[hw] tx confirmed frames",
31 "[hw] tx dequeued bytes",
32 "[hw] tx dequeued frames",
33 "[hw] tx rejected bytes",
34 "[hw] tx rejected frames",
35 "[hw] tx pending frames",
38 #define DPAA2_ETH_NUM_STATS ARRAY_SIZE(dpaa2_ethtool_stats)
40 static char dpaa2_ethtool_extras[][ETH_GSTRING_LEN] = {
42 "[drv] tx conf frames",
43 "[drv] tx conf bytes",
46 "[drv] tx realloc frames",
49 "[drv] enqueue portal busy",
51 "[drv] dequeue portal busy",
52 "[drv] channel pull errors",
56 "[drv] xdp tx errors",
59 "[qbman] rx pending frames",
60 "[qbman] rx pending bytes",
61 "[qbman] tx conf pending frames",
62 "[qbman] tx conf pending bytes",
63 "[qbman] buffer count",
66 #define DPAA2_ETH_NUM_EXTRA_STATS ARRAY_SIZE(dpaa2_ethtool_extras)
68 static void dpaa2_eth_get_drvinfo(struct net_device *net_dev,
69 struct ethtool_drvinfo *drvinfo)
71 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
73 strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
75 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
76 "%u.%u", priv->dpni_ver_major, priv->dpni_ver_minor);
78 strlcpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
79 sizeof(drvinfo->bus_info));
82 static int dpaa2_eth_nway_reset(struct net_device *net_dev)
84 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
87 return phylink_ethtool_nway_reset(priv->mac->phylink);
93 dpaa2_eth_get_link_ksettings(struct net_device *net_dev,
94 struct ethtool_link_ksettings *link_settings)
96 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
99 return phylink_ethtool_ksettings_get(priv->mac->phylink,
102 link_settings->base.autoneg = AUTONEG_DISABLE;
103 if (!(priv->link_state.options & DPNI_LINK_OPT_HALF_DUPLEX))
104 link_settings->base.duplex = DUPLEX_FULL;
105 link_settings->base.speed = priv->link_state.rate;
111 dpaa2_eth_set_link_ksettings(struct net_device *net_dev,
112 const struct ethtool_link_ksettings *link_settings)
114 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
119 return phylink_ethtool_ksettings_set(priv->mac->phylink, link_settings);
122 static void dpaa2_eth_get_pauseparam(struct net_device *net_dev,
123 struct ethtool_pauseparam *pause)
125 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
126 u64 link_options = priv->link_state.options;
129 phylink_ethtool_get_pauseparam(priv->mac->phylink, pause);
133 pause->rx_pause = !!(link_options & DPNI_LINK_OPT_PAUSE);
134 pause->tx_pause = pause->rx_pause ^
135 !!(link_options & DPNI_LINK_OPT_ASYM_PAUSE);
136 pause->autoneg = AUTONEG_DISABLE;
139 static int dpaa2_eth_set_pauseparam(struct net_device *net_dev,
140 struct ethtool_pauseparam *pause)
142 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
143 struct dpni_link_cfg cfg = {0};
146 if (!dpaa2_eth_has_pause_support(priv)) {
147 netdev_info(net_dev, "No pause frame support for DPNI version < %d.%d\n",
148 DPNI_PAUSE_VER_MAJOR, DPNI_PAUSE_VER_MINOR);
153 return phylink_ethtool_set_pauseparam(priv->mac->phylink,
158 cfg.rate = priv->link_state.rate;
159 cfg.options = priv->link_state.options;
161 cfg.options |= DPNI_LINK_OPT_PAUSE;
163 cfg.options &= ~DPNI_LINK_OPT_PAUSE;
164 if (!!pause->rx_pause ^ !!pause->tx_pause)
165 cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
167 cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
169 if (cfg.options == priv->link_state.options)
172 err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
174 netdev_err(net_dev, "dpni_set_link_state failed\n");
178 priv->link_state.options = cfg.options;
183 static void dpaa2_eth_get_strings(struct net_device *netdev, u32 stringset,
186 struct dpaa2_eth_priv *priv = netdev_priv(netdev);
192 for (i = 0; i < DPAA2_ETH_NUM_STATS; i++) {
193 strlcpy(p, dpaa2_ethtool_stats[i], ETH_GSTRING_LEN);
194 p += ETH_GSTRING_LEN;
196 for (i = 0; i < DPAA2_ETH_NUM_EXTRA_STATS; i++) {
197 strlcpy(p, dpaa2_ethtool_extras[i], ETH_GSTRING_LEN);
198 p += ETH_GSTRING_LEN;
201 dpaa2_mac_get_strings(p);
206 static int dpaa2_eth_get_sset_count(struct net_device *net_dev, int sset)
208 int num_ss_stats = DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS;
209 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
212 case ETH_SS_STATS: /* ethtool_get_stats(), ethtool_get_drvinfo() */
214 num_ss_stats += dpaa2_mac_get_sset_count();
221 /** Fill in hardware counters, as returned by MC.
223 static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
224 struct ethtool_stats *stats,
230 union dpni_statistics dpni_stats;
232 u32 fcnt_rx_total = 0, fcnt_tx_total = 0;
233 u32 bcnt_rx_total = 0, bcnt_tx_total = 0;
235 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
236 struct dpaa2_eth_drv_stats *extras;
237 struct dpaa2_eth_ch_stats *ch_stats;
238 int dpni_stats_page_size[DPNI_STATISTICS_CNT] = {
239 sizeof(dpni_stats.page_0),
240 sizeof(dpni_stats.page_1),
241 sizeof(dpni_stats.page_2),
242 sizeof(dpni_stats.page_3),
243 sizeof(dpni_stats.page_4),
244 sizeof(dpni_stats.page_5),
245 sizeof(dpni_stats.page_6),
249 sizeof(u64) * (DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS));
251 /* Print standard counters, from DPNI statistics */
252 for (j = 0; j <= 6; j++) {
253 /* We're not interested in pages 4 & 5 for now */
254 if (j == 4 || j == 5)
256 err = dpni_get_statistics(priv->mc_io, 0, priv->mc_token,
259 /* Older firmware versions don't support all pages */
260 memset(&dpni_stats, 0, sizeof(dpni_stats));
262 netdev_warn(net_dev, "dpni_get_stats(%d) failed\n", j);
264 num_cnt = dpni_stats_page_size[j] / sizeof(u64);
265 for (k = 0; k < num_cnt; k++)
266 *(data + i++) = dpni_stats.raw.counter[k];
269 /* Print per-cpu extra stats */
270 for_each_online_cpu(k) {
271 extras = per_cpu_ptr(priv->percpu_extras, k);
272 for (j = 0; j < sizeof(*extras) / sizeof(__u64); j++)
273 *((__u64 *)data + i + j) += *((__u64 *)extras + j);
277 /* Per-channel stats */
278 for (k = 0; k < priv->num_channels; k++) {
279 ch_stats = &priv->channel[k]->stats;
280 for (j = 0; j < sizeof(*ch_stats) / sizeof(__u64); j++)
281 *((__u64 *)data + i + j) += *((__u64 *)ch_stats + j);
285 for (j = 0; j < priv->num_fqs; j++) {
286 /* Print FQ instantaneous counts */
287 err = dpaa2_io_query_fq_count(NULL, priv->fq[j].fqid,
290 netdev_warn(net_dev, "FQ query error %d", err);
294 if (priv->fq[j].type == DPAA2_TX_CONF_FQ) {
295 fcnt_tx_total += fcnt;
296 bcnt_tx_total += bcnt;
298 fcnt_rx_total += fcnt;
299 bcnt_rx_total += bcnt;
303 *(data + i++) = fcnt_rx_total;
304 *(data + i++) = bcnt_rx_total;
305 *(data + i++) = fcnt_tx_total;
306 *(data + i++) = bcnt_tx_total;
308 err = dpaa2_io_query_bp_count(NULL, priv->bpid, &buf_cnt);
310 netdev_warn(net_dev, "Buffer count query error %d\n", err);
313 *(data + i++) = buf_cnt;
316 dpaa2_mac_get_ethtool_stats(priv->mac, data + i);
319 static int prep_eth_rule(struct ethhdr *eth_value, struct ethhdr *eth_mask,
320 void *key, void *mask, u64 *fields)
324 if (eth_mask->h_proto) {
325 off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
326 *(__be16 *)(key + off) = eth_value->h_proto;
327 *(__be16 *)(mask + off) = eth_mask->h_proto;
328 *fields |= DPAA2_ETH_DIST_ETHTYPE;
331 if (!is_zero_ether_addr(eth_mask->h_source)) {
332 off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_SA);
333 ether_addr_copy(key + off, eth_value->h_source);
334 ether_addr_copy(mask + off, eth_mask->h_source);
335 *fields |= DPAA2_ETH_DIST_ETHSRC;
338 if (!is_zero_ether_addr(eth_mask->h_dest)) {
339 off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_DA);
340 ether_addr_copy(key + off, eth_value->h_dest);
341 ether_addr_copy(mask + off, eth_mask->h_dest);
342 *fields |= DPAA2_ETH_DIST_ETHDST;
348 static int prep_uip_rule(struct ethtool_usrip4_spec *uip_value,
349 struct ethtool_usrip4_spec *uip_mask,
350 void *key, void *mask, u64 *fields)
353 u32 tmp_value, tmp_mask;
355 if (uip_mask->tos || uip_mask->ip_ver)
358 if (uip_mask->ip4src) {
359 off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_SRC);
360 *(__be32 *)(key + off) = uip_value->ip4src;
361 *(__be32 *)(mask + off) = uip_mask->ip4src;
362 *fields |= DPAA2_ETH_DIST_IPSRC;
365 if (uip_mask->ip4dst) {
366 off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_DST);
367 *(__be32 *)(key + off) = uip_value->ip4dst;
368 *(__be32 *)(mask + off) = uip_mask->ip4dst;
369 *fields |= DPAA2_ETH_DIST_IPDST;
372 if (uip_mask->proto) {
373 off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_PROTO);
374 *(u8 *)(key + off) = uip_value->proto;
375 *(u8 *)(mask + off) = uip_mask->proto;
376 *fields |= DPAA2_ETH_DIST_IPPROTO;
379 if (uip_mask->l4_4_bytes) {
380 tmp_value = be32_to_cpu(uip_value->l4_4_bytes);
381 tmp_mask = be32_to_cpu(uip_mask->l4_4_bytes);
383 off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
384 *(__be16 *)(key + off) = htons(tmp_value >> 16);
385 *(__be16 *)(mask + off) = htons(tmp_mask >> 16);
386 *fields |= DPAA2_ETH_DIST_L4SRC;
388 off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
389 *(__be16 *)(key + off) = htons(tmp_value & 0xFFFF);
390 *(__be16 *)(mask + off) = htons(tmp_mask & 0xFFFF);
391 *fields |= DPAA2_ETH_DIST_L4DST;
394 /* Only apply the rule for IPv4 frames */
395 off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
396 *(__be16 *)(key + off) = htons(ETH_P_IP);
397 *(__be16 *)(mask + off) = htons(0xFFFF);
398 *fields |= DPAA2_ETH_DIST_ETHTYPE;
403 static int prep_l4_rule(struct ethtool_tcpip4_spec *l4_value,
404 struct ethtool_tcpip4_spec *l4_mask,
405 void *key, void *mask, u8 l4_proto, u64 *fields)
412 if (l4_mask->ip4src) {
413 off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_SRC);
414 *(__be32 *)(key + off) = l4_value->ip4src;
415 *(__be32 *)(mask + off) = l4_mask->ip4src;
416 *fields |= DPAA2_ETH_DIST_IPSRC;
419 if (l4_mask->ip4dst) {
420 off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_DST);
421 *(__be32 *)(key + off) = l4_value->ip4dst;
422 *(__be32 *)(mask + off) = l4_mask->ip4dst;
423 *fields |= DPAA2_ETH_DIST_IPDST;
427 off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
428 *(__be16 *)(key + off) = l4_value->psrc;
429 *(__be16 *)(mask + off) = l4_mask->psrc;
430 *fields |= DPAA2_ETH_DIST_L4SRC;
434 off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
435 *(__be16 *)(key + off) = l4_value->pdst;
436 *(__be16 *)(mask + off) = l4_mask->pdst;
437 *fields |= DPAA2_ETH_DIST_L4DST;
440 /* Only apply the rule for IPv4 frames with the specified L4 proto */
441 off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
442 *(__be16 *)(key + off) = htons(ETH_P_IP);
443 *(__be16 *)(mask + off) = htons(0xFFFF);
444 *fields |= DPAA2_ETH_DIST_ETHTYPE;
446 off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_PROTO);
447 *(u8 *)(key + off) = l4_proto;
448 *(u8 *)(mask + off) = 0xFF;
449 *fields |= DPAA2_ETH_DIST_IPPROTO;
454 static int prep_ext_rule(struct ethtool_flow_ext *ext_value,
455 struct ethtool_flow_ext *ext_mask,
456 void *key, void *mask, u64 *fields)
460 if (ext_mask->vlan_etype)
463 if (ext_mask->vlan_tci) {
464 off = dpaa2_eth_cls_fld_off(NET_PROT_VLAN, NH_FLD_VLAN_TCI);
465 *(__be16 *)(key + off) = ext_value->vlan_tci;
466 *(__be16 *)(mask + off) = ext_mask->vlan_tci;
467 *fields |= DPAA2_ETH_DIST_VLAN;
473 static int prep_mac_ext_rule(struct ethtool_flow_ext *ext_value,
474 struct ethtool_flow_ext *ext_mask,
475 void *key, void *mask, u64 *fields)
479 if (!is_zero_ether_addr(ext_mask->h_dest)) {
480 off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_DA);
481 ether_addr_copy(key + off, ext_value->h_dest);
482 ether_addr_copy(mask + off, ext_mask->h_dest);
483 *fields |= DPAA2_ETH_DIST_ETHDST;
489 static int prep_cls_rule(struct ethtool_rx_flow_spec *fs, void *key, void *mask,
494 switch (fs->flow_type & 0xFF) {
496 err = prep_eth_rule(&fs->h_u.ether_spec, &fs->m_u.ether_spec,
500 err = prep_uip_rule(&fs->h_u.usr_ip4_spec,
501 &fs->m_u.usr_ip4_spec, key, mask, fields);
504 err = prep_l4_rule(&fs->h_u.tcp_ip4_spec, &fs->m_u.tcp_ip4_spec,
505 key, mask, IPPROTO_TCP, fields);
508 err = prep_l4_rule(&fs->h_u.udp_ip4_spec, &fs->m_u.udp_ip4_spec,
509 key, mask, IPPROTO_UDP, fields);
512 err = prep_l4_rule(&fs->h_u.sctp_ip4_spec,
513 &fs->m_u.sctp_ip4_spec, key, mask,
514 IPPROTO_SCTP, fields);
523 if (fs->flow_type & FLOW_EXT) {
524 err = prep_ext_rule(&fs->h_ext, &fs->m_ext, key, mask, fields);
529 if (fs->flow_type & FLOW_MAC_EXT) {
530 err = prep_mac_ext_rule(&fs->h_ext, &fs->m_ext, key, mask,
539 static int do_cls_rule(struct net_device *net_dev,
540 struct ethtool_rx_flow_spec *fs,
543 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
544 struct device *dev = net_dev->dev.parent;
545 struct dpni_rule_cfg rule_cfg = { 0 };
546 struct dpni_fs_action_cfg fs_act = { 0 };
552 if (fs->ring_cookie != RX_CLS_FLOW_DISC &&
553 fs->ring_cookie >= dpaa2_eth_queue_count(priv))
556 rule_cfg.key_size = dpaa2_eth_cls_key_size(DPAA2_ETH_DIST_ALL);
558 /* allocate twice the key size, for the actual key and for mask */
559 key_buf = kzalloc(rule_cfg.key_size * 2, GFP_KERNEL);
563 /* Fill the key and mask memory areas */
564 err = prep_cls_rule(fs, key_buf, key_buf + rule_cfg.key_size, &fields);
568 if (!dpaa2_eth_fs_mask_enabled(priv)) {
569 /* Masking allows us to configure a maximal key during init and
570 * use it for all flow steering rules. Without it, we include
571 * in the key only the fields actually used, so we need to
572 * extract the others from the final key buffer.
574 * Program the FS key if needed, or return error if previously
575 * set key can't be used for the current rule. User needs to
576 * delete existing rules in this case to allow for the new one.
578 if (!priv->rx_cls_fields) {
579 err = dpaa2_eth_set_cls(net_dev, fields);
583 priv->rx_cls_fields = fields;
584 } else if (priv->rx_cls_fields != fields) {
585 netdev_err(net_dev, "No support for multiple FS keys, need to delete existing rules\n");
590 dpaa2_eth_cls_trim_rule(key_buf, fields);
591 rule_cfg.key_size = dpaa2_eth_cls_key_size(fields);
594 key_iova = dma_map_single(dev, key_buf, rule_cfg.key_size * 2,
596 if (dma_mapping_error(dev, key_iova)) {
601 rule_cfg.key_iova = key_iova;
602 if (dpaa2_eth_fs_mask_enabled(priv))
603 rule_cfg.mask_iova = key_iova + rule_cfg.key_size;
606 if (fs->ring_cookie == RX_CLS_FLOW_DISC)
607 fs_act.options |= DPNI_FS_OPT_DISCARD;
609 fs_act.flow_id = fs->ring_cookie;
610 err = dpni_add_fs_entry(priv->mc_io, 0, priv->mc_token, 0,
611 fs->location, &rule_cfg, &fs_act);
613 err = dpni_remove_fs_entry(priv->mc_io, 0, priv->mc_token, 0,
617 dma_unmap_single(dev, key_iova, rule_cfg.key_size * 2, DMA_TO_DEVICE);
625 static int num_rules(struct dpaa2_eth_priv *priv)
629 for (i = 0; i < dpaa2_eth_fs_count(priv); i++)
630 if (priv->cls_rules[i].in_use)
636 static int update_cls_rule(struct net_device *net_dev,
637 struct ethtool_rx_flow_spec *new_fs,
640 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
641 struct dpaa2_eth_cls_rule *rule;
644 if (!priv->rx_cls_enabled)
647 if (location >= dpaa2_eth_fs_count(priv))
650 rule = &priv->cls_rules[location];
652 /* If a rule is present at the specified location, delete it. */
654 err = do_cls_rule(net_dev, &rule->fs, false);
660 if (!dpaa2_eth_fs_mask_enabled(priv) && !num_rules(priv))
661 priv->rx_cls_fields = 0;
664 /* If no new entry to add, return here */
668 err = do_cls_rule(net_dev, new_fs, true);
678 static int dpaa2_eth_get_rxnfc(struct net_device *net_dev,
679 struct ethtool_rxnfc *rxnfc, u32 *rule_locs)
681 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
682 int max_rules = dpaa2_eth_fs_count(priv);
685 switch (rxnfc->cmd) {
687 /* we purposely ignore cmd->flow_type for now, because the
688 * classifier only supports a single set of fields for all
691 rxnfc->data = priv->rx_hash_fields;
693 case ETHTOOL_GRXRINGS:
694 rxnfc->data = dpaa2_eth_queue_count(priv);
696 case ETHTOOL_GRXCLSRLCNT:
698 rxnfc->rule_cnt = num_rules(priv);
699 rxnfc->data = max_rules;
701 case ETHTOOL_GRXCLSRULE:
702 if (rxnfc->fs.location >= max_rules)
704 rxnfc->fs.location = array_index_nospec(rxnfc->fs.location,
706 if (!priv->cls_rules[rxnfc->fs.location].in_use)
708 rxnfc->fs = priv->cls_rules[rxnfc->fs.location].fs;
710 case ETHTOOL_GRXCLSRLALL:
711 for (i = 0; i < max_rules; i++) {
712 if (!priv->cls_rules[i].in_use)
714 if (j == rxnfc->rule_cnt)
719 rxnfc->data = max_rules;
728 static int dpaa2_eth_set_rxnfc(struct net_device *net_dev,
729 struct ethtool_rxnfc *rxnfc)
733 switch (rxnfc->cmd) {
735 if ((rxnfc->data & DPAA2_RXH_SUPPORTED) != rxnfc->data)
737 err = dpaa2_eth_set_hash(net_dev, rxnfc->data);
739 case ETHTOOL_SRXCLSRLINS:
740 err = update_cls_rule(net_dev, &rxnfc->fs, rxnfc->fs.location);
742 case ETHTOOL_SRXCLSRLDEL:
743 err = update_cls_rule(net_dev, NULL, rxnfc->fs.location);
752 int dpaa2_phc_index = -1;
753 EXPORT_SYMBOL(dpaa2_phc_index);
755 static int dpaa2_eth_get_ts_info(struct net_device *dev,
756 struct ethtool_ts_info *info)
758 info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
759 SOF_TIMESTAMPING_RX_HARDWARE |
760 SOF_TIMESTAMPING_RAW_HARDWARE;
762 info->phc_index = dpaa2_phc_index;
764 info->tx_types = (1 << HWTSTAMP_TX_OFF) |
765 (1 << HWTSTAMP_TX_ON);
767 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
768 (1 << HWTSTAMP_FILTER_ALL);
772 const struct ethtool_ops dpaa2_ethtool_ops = {
773 .get_drvinfo = dpaa2_eth_get_drvinfo,
774 .nway_reset = dpaa2_eth_nway_reset,
775 .get_link = ethtool_op_get_link,
776 .get_link_ksettings = dpaa2_eth_get_link_ksettings,
777 .set_link_ksettings = dpaa2_eth_set_link_ksettings,
778 .get_pauseparam = dpaa2_eth_get_pauseparam,
779 .set_pauseparam = dpaa2_eth_set_pauseparam,
780 .get_sset_count = dpaa2_eth_get_sset_count,
781 .get_ethtool_stats = dpaa2_eth_get_ethtool_stats,
782 .get_strings = dpaa2_eth_get_strings,
783 .get_rxnfc = dpaa2_eth_get_rxnfc,
784 .set_rxnfc = dpaa2_eth_set_rxnfc,
785 .get_ts_info = dpaa2_eth_get_ts_info,