1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /* Copyright 2014-2016 Freescale Semiconductor Inc.
6 #include <linux/net_tstamp.h>
7 #include <linux/nospec.h>
9 #include "dpni.h" /* DPNI_LINK_OPT_* */
10 #include "dpaa2-eth.h"
12 /* To be kept in sync with DPNI statistics */
13 static char dpaa2_ethtool_stats[][ETH_GSTRING_LEN] = {
16 "[hw] rx mcast frames",
17 "[hw] rx mcast bytes",
18 "[hw] rx bcast frames",
19 "[hw] rx bcast bytes",
22 "[hw] tx mcast frames",
23 "[hw] tx mcast bytes",
24 "[hw] tx bcast frames",
25 "[hw] tx bcast bytes",
26 "[hw] rx filtered frames",
27 "[hw] rx discarded frames",
28 "[hw] rx nobuffer discards",
29 "[hw] tx discarded frames",
30 "[hw] tx confirmed frames",
31 "[hw] tx dequeued bytes",
32 "[hw] tx dequeued frames",
33 "[hw] tx rejected bytes",
34 "[hw] tx rejected frames",
35 "[hw] tx pending frames",
38 #define DPAA2_ETH_NUM_STATS ARRAY_SIZE(dpaa2_ethtool_stats)
40 static char dpaa2_ethtool_extras[][ETH_GSTRING_LEN] = {
42 "[drv] tx conf frames",
43 "[drv] tx conf bytes",
48 "[drv] enqueue portal busy",
50 "[drv] dequeue portal busy",
51 "[drv] channel pull errors",
55 "[drv] xdp tx errors",
58 "[qbman] rx pending frames",
59 "[qbman] rx pending bytes",
60 "[qbman] tx conf pending frames",
61 "[qbman] tx conf pending bytes",
62 "[qbman] buffer count",
65 #define DPAA2_ETH_NUM_EXTRA_STATS ARRAY_SIZE(dpaa2_ethtool_extras)
67 static void dpaa2_eth_get_drvinfo(struct net_device *net_dev,
68 struct ethtool_drvinfo *drvinfo)
70 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
72 strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
74 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
75 "%u.%u", priv->dpni_ver_major, priv->dpni_ver_minor);
77 strlcpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
78 sizeof(drvinfo->bus_info));
81 static int dpaa2_eth_nway_reset(struct net_device *net_dev)
83 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
86 return phylink_ethtool_nway_reset(priv->mac->phylink);
92 dpaa2_eth_get_link_ksettings(struct net_device *net_dev,
93 struct ethtool_link_ksettings *link_settings)
95 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
98 return phylink_ethtool_ksettings_get(priv->mac->phylink,
101 link_settings->base.autoneg = AUTONEG_DISABLE;
102 if (!(priv->link_state.options & DPNI_LINK_OPT_HALF_DUPLEX))
103 link_settings->base.duplex = DUPLEX_FULL;
104 link_settings->base.speed = priv->link_state.rate;
110 dpaa2_eth_set_link_ksettings(struct net_device *net_dev,
111 const struct ethtool_link_ksettings *link_settings)
113 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
118 return phylink_ethtool_ksettings_set(priv->mac->phylink, link_settings);
121 static void dpaa2_eth_get_pauseparam(struct net_device *net_dev,
122 struct ethtool_pauseparam *pause)
124 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
125 u64 link_options = priv->link_state.options;
128 phylink_ethtool_get_pauseparam(priv->mac->phylink, pause);
132 pause->rx_pause = dpaa2_eth_rx_pause_enabled(link_options);
133 pause->tx_pause = dpaa2_eth_tx_pause_enabled(link_options);
134 pause->autoneg = AUTONEG_DISABLE;
137 static int dpaa2_eth_set_pauseparam(struct net_device *net_dev,
138 struct ethtool_pauseparam *pause)
140 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
141 struct dpni_link_cfg cfg = {0};
144 if (!dpaa2_eth_has_pause_support(priv)) {
145 netdev_info(net_dev, "No pause frame support for DPNI version < %d.%d\n",
146 DPNI_PAUSE_VER_MAJOR, DPNI_PAUSE_VER_MINOR);
151 return phylink_ethtool_set_pauseparam(priv->mac->phylink,
156 cfg.rate = priv->link_state.rate;
157 cfg.options = priv->link_state.options;
159 cfg.options |= DPNI_LINK_OPT_PAUSE;
161 cfg.options &= ~DPNI_LINK_OPT_PAUSE;
162 if (!!pause->rx_pause ^ !!pause->tx_pause)
163 cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
165 cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
167 if (cfg.options == priv->link_state.options)
170 err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
172 netdev_err(net_dev, "dpni_set_link_state failed\n");
176 priv->link_state.options = cfg.options;
181 static void dpaa2_eth_get_strings(struct net_device *netdev, u32 stringset,
184 struct dpaa2_eth_priv *priv = netdev_priv(netdev);
190 for (i = 0; i < DPAA2_ETH_NUM_STATS; i++) {
191 strlcpy(p, dpaa2_ethtool_stats[i], ETH_GSTRING_LEN);
192 p += ETH_GSTRING_LEN;
194 for (i = 0; i < DPAA2_ETH_NUM_EXTRA_STATS; i++) {
195 strlcpy(p, dpaa2_ethtool_extras[i], ETH_GSTRING_LEN);
196 p += ETH_GSTRING_LEN;
199 dpaa2_mac_get_strings(p);
204 static int dpaa2_eth_get_sset_count(struct net_device *net_dev, int sset)
206 int num_ss_stats = DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS;
207 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
210 case ETH_SS_STATS: /* ethtool_get_stats(), ethtool_get_drvinfo() */
212 num_ss_stats += dpaa2_mac_get_sset_count();
219 /** Fill in hardware counters, as returned by MC.
221 static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
222 struct ethtool_stats *stats,
228 union dpni_statistics dpni_stats;
230 u32 fcnt_rx_total = 0, fcnt_tx_total = 0;
231 u32 bcnt_rx_total = 0, bcnt_tx_total = 0;
233 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
234 struct dpaa2_eth_drv_stats *extras;
235 struct dpaa2_eth_ch_stats *ch_stats;
236 int dpni_stats_page_size[DPNI_STATISTICS_CNT] = {
237 sizeof(dpni_stats.page_0),
238 sizeof(dpni_stats.page_1),
239 sizeof(dpni_stats.page_2),
240 sizeof(dpni_stats.page_3),
241 sizeof(dpni_stats.page_4),
242 sizeof(dpni_stats.page_5),
243 sizeof(dpni_stats.page_6),
247 sizeof(u64) * (DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS));
249 /* Print standard counters, from DPNI statistics */
250 for (j = 0; j <= 6; j++) {
251 /* We're not interested in pages 4 & 5 for now */
252 if (j == 4 || j == 5)
254 err = dpni_get_statistics(priv->mc_io, 0, priv->mc_token,
257 /* Older firmware versions don't support all pages */
258 memset(&dpni_stats, 0, sizeof(dpni_stats));
260 netdev_warn(net_dev, "dpni_get_stats(%d) failed\n", j);
262 num_cnt = dpni_stats_page_size[j] / sizeof(u64);
263 for (k = 0; k < num_cnt; k++)
264 *(data + i++) = dpni_stats.raw.counter[k];
267 /* Print per-cpu extra stats */
268 for_each_online_cpu(k) {
269 extras = per_cpu_ptr(priv->percpu_extras, k);
270 for (j = 0; j < sizeof(*extras) / sizeof(__u64); j++)
271 *((__u64 *)data + i + j) += *((__u64 *)extras + j);
275 /* Per-channel stats */
276 for (k = 0; k < priv->num_channels; k++) {
277 ch_stats = &priv->channel[k]->stats;
278 for (j = 0; j < sizeof(*ch_stats) / sizeof(__u64) - 1; j++)
279 *((__u64 *)data + i + j) += *((__u64 *)ch_stats + j);
283 for (j = 0; j < priv->num_fqs; j++) {
284 /* Print FQ instantaneous counts */
285 err = dpaa2_io_query_fq_count(NULL, priv->fq[j].fqid,
288 netdev_warn(net_dev, "FQ query error %d", err);
292 if (priv->fq[j].type == DPAA2_TX_CONF_FQ) {
293 fcnt_tx_total += fcnt;
294 bcnt_tx_total += bcnt;
296 fcnt_rx_total += fcnt;
297 bcnt_rx_total += bcnt;
301 *(data + i++) = fcnt_rx_total;
302 *(data + i++) = bcnt_rx_total;
303 *(data + i++) = fcnt_tx_total;
304 *(data + i++) = bcnt_tx_total;
306 err = dpaa2_io_query_bp_count(NULL, priv->bpid, &buf_cnt);
308 netdev_warn(net_dev, "Buffer count query error %d\n", err);
311 *(data + i++) = buf_cnt;
314 dpaa2_mac_get_ethtool_stats(priv->mac, data + i);
317 static int prep_eth_rule(struct ethhdr *eth_value, struct ethhdr *eth_mask,
318 void *key, void *mask, u64 *fields)
322 if (eth_mask->h_proto) {
323 off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
324 *(__be16 *)(key + off) = eth_value->h_proto;
325 *(__be16 *)(mask + off) = eth_mask->h_proto;
326 *fields |= DPAA2_ETH_DIST_ETHTYPE;
329 if (!is_zero_ether_addr(eth_mask->h_source)) {
330 off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_SA);
331 ether_addr_copy(key + off, eth_value->h_source);
332 ether_addr_copy(mask + off, eth_mask->h_source);
333 *fields |= DPAA2_ETH_DIST_ETHSRC;
336 if (!is_zero_ether_addr(eth_mask->h_dest)) {
337 off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_DA);
338 ether_addr_copy(key + off, eth_value->h_dest);
339 ether_addr_copy(mask + off, eth_mask->h_dest);
340 *fields |= DPAA2_ETH_DIST_ETHDST;
346 static int prep_uip_rule(struct ethtool_usrip4_spec *uip_value,
347 struct ethtool_usrip4_spec *uip_mask,
348 void *key, void *mask, u64 *fields)
351 u32 tmp_value, tmp_mask;
353 if (uip_mask->tos || uip_mask->ip_ver)
356 if (uip_mask->ip4src) {
357 off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_SRC);
358 *(__be32 *)(key + off) = uip_value->ip4src;
359 *(__be32 *)(mask + off) = uip_mask->ip4src;
360 *fields |= DPAA2_ETH_DIST_IPSRC;
363 if (uip_mask->ip4dst) {
364 off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_DST);
365 *(__be32 *)(key + off) = uip_value->ip4dst;
366 *(__be32 *)(mask + off) = uip_mask->ip4dst;
367 *fields |= DPAA2_ETH_DIST_IPDST;
370 if (uip_mask->proto) {
371 off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_PROTO);
372 *(u8 *)(key + off) = uip_value->proto;
373 *(u8 *)(mask + off) = uip_mask->proto;
374 *fields |= DPAA2_ETH_DIST_IPPROTO;
377 if (uip_mask->l4_4_bytes) {
378 tmp_value = be32_to_cpu(uip_value->l4_4_bytes);
379 tmp_mask = be32_to_cpu(uip_mask->l4_4_bytes);
381 off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
382 *(__be16 *)(key + off) = htons(tmp_value >> 16);
383 *(__be16 *)(mask + off) = htons(tmp_mask >> 16);
384 *fields |= DPAA2_ETH_DIST_L4SRC;
386 off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
387 *(__be16 *)(key + off) = htons(tmp_value & 0xFFFF);
388 *(__be16 *)(mask + off) = htons(tmp_mask & 0xFFFF);
389 *fields |= DPAA2_ETH_DIST_L4DST;
392 /* Only apply the rule for IPv4 frames */
393 off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
394 *(__be16 *)(key + off) = htons(ETH_P_IP);
395 *(__be16 *)(mask + off) = htons(0xFFFF);
396 *fields |= DPAA2_ETH_DIST_ETHTYPE;
401 static int prep_l4_rule(struct ethtool_tcpip4_spec *l4_value,
402 struct ethtool_tcpip4_spec *l4_mask,
403 void *key, void *mask, u8 l4_proto, u64 *fields)
410 if (l4_mask->ip4src) {
411 off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_SRC);
412 *(__be32 *)(key + off) = l4_value->ip4src;
413 *(__be32 *)(mask + off) = l4_mask->ip4src;
414 *fields |= DPAA2_ETH_DIST_IPSRC;
417 if (l4_mask->ip4dst) {
418 off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_DST);
419 *(__be32 *)(key + off) = l4_value->ip4dst;
420 *(__be32 *)(mask + off) = l4_mask->ip4dst;
421 *fields |= DPAA2_ETH_DIST_IPDST;
425 off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
426 *(__be16 *)(key + off) = l4_value->psrc;
427 *(__be16 *)(mask + off) = l4_mask->psrc;
428 *fields |= DPAA2_ETH_DIST_L4SRC;
432 off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
433 *(__be16 *)(key + off) = l4_value->pdst;
434 *(__be16 *)(mask + off) = l4_mask->pdst;
435 *fields |= DPAA2_ETH_DIST_L4DST;
438 /* Only apply the rule for IPv4 frames with the specified L4 proto */
439 off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
440 *(__be16 *)(key + off) = htons(ETH_P_IP);
441 *(__be16 *)(mask + off) = htons(0xFFFF);
442 *fields |= DPAA2_ETH_DIST_ETHTYPE;
444 off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_PROTO);
445 *(u8 *)(key + off) = l4_proto;
446 *(u8 *)(mask + off) = 0xFF;
447 *fields |= DPAA2_ETH_DIST_IPPROTO;
452 static int prep_ext_rule(struct ethtool_flow_ext *ext_value,
453 struct ethtool_flow_ext *ext_mask,
454 void *key, void *mask, u64 *fields)
458 if (ext_mask->vlan_etype)
461 if (ext_mask->vlan_tci) {
462 off = dpaa2_eth_cls_fld_off(NET_PROT_VLAN, NH_FLD_VLAN_TCI);
463 *(__be16 *)(key + off) = ext_value->vlan_tci;
464 *(__be16 *)(mask + off) = ext_mask->vlan_tci;
465 *fields |= DPAA2_ETH_DIST_VLAN;
471 static int prep_mac_ext_rule(struct ethtool_flow_ext *ext_value,
472 struct ethtool_flow_ext *ext_mask,
473 void *key, void *mask, u64 *fields)
477 if (!is_zero_ether_addr(ext_mask->h_dest)) {
478 off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_DA);
479 ether_addr_copy(key + off, ext_value->h_dest);
480 ether_addr_copy(mask + off, ext_mask->h_dest);
481 *fields |= DPAA2_ETH_DIST_ETHDST;
487 static int prep_cls_rule(struct ethtool_rx_flow_spec *fs, void *key, void *mask,
492 switch (fs->flow_type & 0xFF) {
494 err = prep_eth_rule(&fs->h_u.ether_spec, &fs->m_u.ether_spec,
498 err = prep_uip_rule(&fs->h_u.usr_ip4_spec,
499 &fs->m_u.usr_ip4_spec, key, mask, fields);
502 err = prep_l4_rule(&fs->h_u.tcp_ip4_spec, &fs->m_u.tcp_ip4_spec,
503 key, mask, IPPROTO_TCP, fields);
506 err = prep_l4_rule(&fs->h_u.udp_ip4_spec, &fs->m_u.udp_ip4_spec,
507 key, mask, IPPROTO_UDP, fields);
510 err = prep_l4_rule(&fs->h_u.sctp_ip4_spec,
511 &fs->m_u.sctp_ip4_spec, key, mask,
512 IPPROTO_SCTP, fields);
521 if (fs->flow_type & FLOW_EXT) {
522 err = prep_ext_rule(&fs->h_ext, &fs->m_ext, key, mask, fields);
527 if (fs->flow_type & FLOW_MAC_EXT) {
528 err = prep_mac_ext_rule(&fs->h_ext, &fs->m_ext, key, mask,
537 static int do_cls_rule(struct net_device *net_dev,
538 struct ethtool_rx_flow_spec *fs,
541 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
542 struct device *dev = net_dev->dev.parent;
543 struct dpni_rule_cfg rule_cfg = { 0 };
544 struct dpni_fs_action_cfg fs_act = { 0 };
550 if (fs->ring_cookie != RX_CLS_FLOW_DISC &&
551 fs->ring_cookie >= dpaa2_eth_queue_count(priv))
554 rule_cfg.key_size = dpaa2_eth_cls_key_size(DPAA2_ETH_DIST_ALL);
556 /* allocate twice the key size, for the actual key and for mask */
557 key_buf = kzalloc(rule_cfg.key_size * 2, GFP_KERNEL);
561 /* Fill the key and mask memory areas */
562 err = prep_cls_rule(fs, key_buf, key_buf + rule_cfg.key_size, &fields);
566 if (!dpaa2_eth_fs_mask_enabled(priv)) {
567 /* Masking allows us to configure a maximal key during init and
568 * use it for all flow steering rules. Without it, we include
569 * in the key only the fields actually used, so we need to
570 * extract the others from the final key buffer.
572 * Program the FS key if needed, or return error if previously
573 * set key can't be used for the current rule. User needs to
574 * delete existing rules in this case to allow for the new one.
576 if (!priv->rx_cls_fields) {
577 err = dpaa2_eth_set_cls(net_dev, fields);
581 priv->rx_cls_fields = fields;
582 } else if (priv->rx_cls_fields != fields) {
583 netdev_err(net_dev, "No support for multiple FS keys, need to delete existing rules\n");
588 dpaa2_eth_cls_trim_rule(key_buf, fields);
589 rule_cfg.key_size = dpaa2_eth_cls_key_size(fields);
592 key_iova = dma_map_single(dev, key_buf, rule_cfg.key_size * 2,
594 if (dma_mapping_error(dev, key_iova)) {
599 rule_cfg.key_iova = key_iova;
600 if (dpaa2_eth_fs_mask_enabled(priv))
601 rule_cfg.mask_iova = key_iova + rule_cfg.key_size;
604 if (fs->ring_cookie == RX_CLS_FLOW_DISC)
605 fs_act.options |= DPNI_FS_OPT_DISCARD;
607 fs_act.flow_id = fs->ring_cookie;
609 for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
611 err = dpni_add_fs_entry(priv->mc_io, 0, priv->mc_token,
612 i, fs->location, &rule_cfg,
615 err = dpni_remove_fs_entry(priv->mc_io, 0,
622 dma_unmap_single(dev, key_iova, rule_cfg.key_size * 2, DMA_TO_DEVICE);
630 static int num_rules(struct dpaa2_eth_priv *priv)
634 for (i = 0; i < dpaa2_eth_fs_count(priv); i++)
635 if (priv->cls_rules[i].in_use)
641 static int update_cls_rule(struct net_device *net_dev,
642 struct ethtool_rx_flow_spec *new_fs,
643 unsigned int location)
645 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
646 struct dpaa2_eth_cls_rule *rule;
649 if (!priv->rx_cls_enabled)
652 if (location >= dpaa2_eth_fs_count(priv))
655 rule = &priv->cls_rules[location];
657 /* If a rule is present at the specified location, delete it. */
659 err = do_cls_rule(net_dev, &rule->fs, false);
665 if (!dpaa2_eth_fs_mask_enabled(priv) && !num_rules(priv))
666 priv->rx_cls_fields = 0;
669 /* If no new entry to add, return here */
673 err = do_cls_rule(net_dev, new_fs, true);
683 static int dpaa2_eth_get_rxnfc(struct net_device *net_dev,
684 struct ethtool_rxnfc *rxnfc, u32 *rule_locs)
686 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
687 int max_rules = dpaa2_eth_fs_count(priv);
690 switch (rxnfc->cmd) {
692 /* we purposely ignore cmd->flow_type for now, because the
693 * classifier only supports a single set of fields for all
696 rxnfc->data = priv->rx_hash_fields;
698 case ETHTOOL_GRXRINGS:
699 rxnfc->data = dpaa2_eth_queue_count(priv);
701 case ETHTOOL_GRXCLSRLCNT:
703 rxnfc->rule_cnt = num_rules(priv);
704 rxnfc->data = max_rules;
706 case ETHTOOL_GRXCLSRULE:
707 if (rxnfc->fs.location >= max_rules)
709 rxnfc->fs.location = array_index_nospec(rxnfc->fs.location,
711 if (!priv->cls_rules[rxnfc->fs.location].in_use)
713 rxnfc->fs = priv->cls_rules[rxnfc->fs.location].fs;
715 case ETHTOOL_GRXCLSRLALL:
716 for (i = 0; i < max_rules; i++) {
717 if (!priv->cls_rules[i].in_use)
719 if (j == rxnfc->rule_cnt)
724 rxnfc->data = max_rules;
733 static int dpaa2_eth_set_rxnfc(struct net_device *net_dev,
734 struct ethtool_rxnfc *rxnfc)
738 switch (rxnfc->cmd) {
740 if ((rxnfc->data & DPAA2_RXH_SUPPORTED) != rxnfc->data)
742 err = dpaa2_eth_set_hash(net_dev, rxnfc->data);
744 case ETHTOOL_SRXCLSRLINS:
745 err = update_cls_rule(net_dev, &rxnfc->fs, rxnfc->fs.location);
747 case ETHTOOL_SRXCLSRLDEL:
748 err = update_cls_rule(net_dev, NULL, rxnfc->fs.location);
757 int dpaa2_phc_index = -1;
758 EXPORT_SYMBOL(dpaa2_phc_index);
760 static int dpaa2_eth_get_ts_info(struct net_device *dev,
761 struct ethtool_ts_info *info)
763 info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
764 SOF_TIMESTAMPING_RX_HARDWARE |
765 SOF_TIMESTAMPING_RAW_HARDWARE;
767 info->phc_index = dpaa2_phc_index;
769 info->tx_types = (1 << HWTSTAMP_TX_OFF) |
770 (1 << HWTSTAMP_TX_ON);
772 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
773 (1 << HWTSTAMP_FILTER_ALL);
777 const struct ethtool_ops dpaa2_ethtool_ops = {
778 .get_drvinfo = dpaa2_eth_get_drvinfo,
779 .nway_reset = dpaa2_eth_nway_reset,
780 .get_link = ethtool_op_get_link,
781 .get_link_ksettings = dpaa2_eth_get_link_ksettings,
782 .set_link_ksettings = dpaa2_eth_set_link_ksettings,
783 .get_pauseparam = dpaa2_eth_get_pauseparam,
784 .set_pauseparam = dpaa2_eth_set_pauseparam,
785 .get_sset_count = dpaa2_eth_get_sset_count,
786 .get_ethtool_stats = dpaa2_eth_get_ethtool_stats,
787 .get_strings = dpaa2_eth_get_strings,
788 .get_rxnfc = dpaa2_eth_get_rxnfc,
789 .set_rxnfc = dpaa2_eth_set_rxnfc,
790 .get_ts_info = dpaa2_eth_get_ts_info,