1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2019 Synopsys, Inc. and/or its affiliates.
4 * stmmac Selftests Support
6 * Author: Jose Abreu <joabreu@synopsys.com>
9 #include <linux/completion.h>
10 #include <linux/ethtool.h>
12 #include <linux/phy.h>
13 #include <linux/udp.h>
14 #include <net/pkt_cls.h>
17 #include <net/tc_act/tc_gact.h>
26 #define STMMAC_TEST_PKT_SIZE (sizeof(struct ethhdr) + sizeof(struct iphdr) + \
27 sizeof(struct stmmachdr))
28 #define STMMAC_TEST_PKT_MAGIC 0xdeadcafecafedeadULL
29 #define STMMAC_LB_TIMEOUT msecs_to_jiffies(200)
31 struct stmmac_packet_attrs {
53 static u8 stmmac_test_next_id;
55 static struct sk_buff *stmmac_test_get_udp_skb(struct stmmac_priv *priv,
56 struct stmmac_packet_attrs *attr)
58 struct sk_buff *skb = NULL;
59 struct udphdr *uhdr = NULL;
60 struct tcphdr *thdr = NULL;
61 struct stmmachdr *shdr;
66 size = attr->size + STMMAC_TEST_PKT_SIZE;
74 size += sizeof(struct tcphdr);
76 size += sizeof(struct udphdr);
78 if (attr->max_size && (attr->max_size > size))
79 size = attr->max_size;
81 skb = netdev_alloc_skb_ip_align(priv->dev, size);
88 ehdr = skb_push(skb, ETH_HLEN + 8);
90 ehdr = skb_push(skb, ETH_HLEN + 4);
91 else if (attr->remove_sa)
92 ehdr = skb_push(skb, ETH_HLEN - 6);
94 ehdr = skb_push(skb, ETH_HLEN);
95 skb_reset_mac_header(skb);
97 skb_set_network_header(skb, skb->len);
98 ihdr = skb_put(skb, sizeof(*ihdr));
100 skb_set_transport_header(skb, skb->len);
102 thdr = skb_put(skb, sizeof(*thdr));
104 uhdr = skb_put(skb, sizeof(*uhdr));
106 if (!attr->remove_sa)
107 eth_zero_addr(ehdr->h_source);
108 eth_zero_addr(ehdr->h_dest);
109 if (attr->src && !attr->remove_sa)
110 ether_addr_copy(ehdr->h_source, attr->src);
112 ether_addr_copy(ehdr->h_dest, attr->dst);
114 if (!attr->remove_sa) {
115 ehdr->h_proto = htons(ETH_P_IP);
117 __be16 *ptr = (__be16 *)ehdr;
120 ptr[3] = htons(ETH_P_IP);
126 if (!attr->remove_sa) {
127 tag = (void *)ehdr + ETH_HLEN;
128 proto = (void *)ehdr + (2 * ETH_ALEN);
130 tag = (void *)ehdr + ETH_HLEN - 6;
131 proto = (void *)ehdr + ETH_ALEN;
134 proto[0] = htons(ETH_P_8021Q);
135 tag[0] = htons(attr->vlan_id_out);
136 tag[1] = htons(ETH_P_IP);
137 if (attr->vlan > 1) {
138 proto[0] = htons(ETH_P_8021AD);
139 tag[1] = htons(ETH_P_8021Q);
140 tag[2] = htons(attr->vlan_id_in);
141 tag[3] = htons(ETH_P_IP);
146 thdr->source = htons(attr->sport);
147 thdr->dest = htons(attr->dport);
148 thdr->doff = sizeof(struct tcphdr) / 4;
151 uhdr->source = htons(attr->sport);
152 uhdr->dest = htons(attr->dport);
153 uhdr->len = htons(sizeof(*shdr) + sizeof(*uhdr) + attr->size);
155 uhdr->len = htons(attr->max_size -
156 (sizeof(*ihdr) + sizeof(*ehdr)));
164 ihdr->protocol = IPPROTO_TCP;
166 ihdr->protocol = IPPROTO_UDP;
167 iplen = sizeof(*ihdr) + sizeof(*shdr) + attr->size;
169 iplen += sizeof(*thdr);
171 iplen += sizeof(*uhdr);
174 iplen = attr->max_size - sizeof(*ehdr);
176 ihdr->tot_len = htons(iplen);
178 ihdr->saddr = htonl(attr->ip_src);
179 ihdr->daddr = htonl(attr->ip_dst);
184 shdr = skb_put(skb, sizeof(*shdr));
186 shdr->magic = cpu_to_be64(STMMAC_TEST_PKT_MAGIC);
187 attr->id = stmmac_test_next_id;
188 shdr->id = stmmac_test_next_id++;
191 skb_put(skb, attr->size);
192 if (attr->max_size && (attr->max_size > skb->len))
193 skb_put(skb, attr->max_size - skb->len);
196 skb->ip_summed = CHECKSUM_PARTIAL;
198 thdr->check = ~tcp_v4_check(skb->len, ihdr->saddr, ihdr->daddr, 0);
199 skb->csum_start = skb_transport_header(skb) - skb->head;
200 skb->csum_offset = offsetof(struct tcphdr, check);
202 udp4_hwcsum(skb, ihdr->saddr, ihdr->daddr);
205 skb->protocol = htons(ETH_P_IP);
206 skb->pkt_type = PACKET_HOST;
207 skb->dev = priv->dev;
212 static struct sk_buff *stmmac_test_get_arp_skb(struct stmmac_priv *priv,
213 struct stmmac_packet_attrs *attr)
215 __be32 ip_src = htonl(attr->ip_src);
216 __be32 ip_dst = htonl(attr->ip_dst);
217 struct sk_buff *skb = NULL;
219 skb = arp_create(ARPOP_REQUEST, ETH_P_ARP, ip_dst, priv->dev, ip_src,
220 NULL, attr->src, attr->dst);
224 skb->pkt_type = PACKET_HOST;
225 skb->dev = priv->dev;
230 struct stmmac_test_priv {
231 struct stmmac_packet_attrs *packet;
232 struct packet_type pt;
233 struct completion comp;
239 static int stmmac_test_loopback_validate(struct sk_buff *skb,
240 struct net_device *ndev,
241 struct packet_type *pt,
242 struct net_device *orig_ndev)
244 struct stmmac_test_priv *tpriv = pt->af_packet_priv;
245 struct stmmachdr *shdr;
251 skb = skb_unshare(skb, GFP_ATOMIC);
255 if (skb_linearize(skb))
257 if (skb_headlen(skb) < (STMMAC_TEST_PKT_SIZE - ETH_HLEN))
260 ehdr = (struct ethhdr *)skb_mac_header(skb);
261 if (tpriv->packet->dst) {
262 if (!ether_addr_equal(ehdr->h_dest, tpriv->packet->dst))
265 if (tpriv->packet->sarc) {
266 if (!ether_addr_equal(ehdr->h_source, ehdr->h_dest))
268 } else if (tpriv->packet->src) {
269 if (!ether_addr_equal(ehdr->h_source, tpriv->packet->src))
274 if (tpriv->double_vlan)
275 ihdr = (struct iphdr *)(skb_network_header(skb) + 4);
277 if (tpriv->packet->tcp) {
278 if (ihdr->protocol != IPPROTO_TCP)
281 thdr = (struct tcphdr *)((u8 *)ihdr + 4 * ihdr->ihl);
282 if (thdr->dest != htons(tpriv->packet->dport))
285 shdr = (struct stmmachdr *)((u8 *)thdr + sizeof(*thdr));
287 if (ihdr->protocol != IPPROTO_UDP)
290 uhdr = (struct udphdr *)((u8 *)ihdr + 4 * ihdr->ihl);
291 if (uhdr->dest != htons(tpriv->packet->dport))
294 shdr = (struct stmmachdr *)((u8 *)uhdr + sizeof(*uhdr));
297 if (shdr->magic != cpu_to_be64(STMMAC_TEST_PKT_MAGIC))
299 if (tpriv->packet->exp_hash && !skb->hash)
301 if (tpriv->packet->id != shdr->id)
305 complete(&tpriv->comp);
311 static int __stmmac_test_loopback(struct stmmac_priv *priv,
312 struct stmmac_packet_attrs *attr)
314 struct stmmac_test_priv *tpriv;
315 struct sk_buff *skb = NULL;
318 tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
323 init_completion(&tpriv->comp);
325 tpriv->pt.type = htons(ETH_P_IP);
326 tpriv->pt.func = stmmac_test_loopback_validate;
327 tpriv->pt.dev = priv->dev;
328 tpriv->pt.af_packet_priv = tpriv;
329 tpriv->packet = attr;
331 if (!attr->dont_wait)
332 dev_add_pack(&tpriv->pt);
334 skb = stmmac_test_get_udp_skb(priv, attr);
340 skb_set_queue_mapping(skb, attr->queue_mapping);
341 ret = dev_queue_xmit(skb);
349 attr->timeout = STMMAC_LB_TIMEOUT;
351 wait_for_completion_timeout(&tpriv->comp, attr->timeout);
352 ret = tpriv->ok ? 0 : -ETIMEDOUT;
355 if (!attr->dont_wait)
356 dev_remove_pack(&tpriv->pt);
361 static int stmmac_test_mac_loopback(struct stmmac_priv *priv)
363 struct stmmac_packet_attrs attr = { };
365 attr.dst = priv->dev->dev_addr;
366 return __stmmac_test_loopback(priv, &attr);
369 static int stmmac_test_phy_loopback(struct stmmac_priv *priv)
371 struct stmmac_packet_attrs attr = { };
374 if (!priv->dev->phydev)
377 ret = phy_loopback(priv->dev->phydev, true);
381 attr.dst = priv->dev->dev_addr;
382 ret = __stmmac_test_loopback(priv, &attr);
384 phy_loopback(priv->dev->phydev, false);
388 static int stmmac_test_mmc(struct stmmac_priv *priv)
390 struct stmmac_counters initial, final;
393 memset(&initial, 0, sizeof(initial));
394 memset(&final, 0, sizeof(final));
396 if (!priv->dma_cap.rmon)
399 /* Save previous results into internal struct */
400 stmmac_mmc_read(priv, priv->mmcaddr, &priv->mmc);
402 ret = stmmac_test_mac_loopback(priv);
406 /* These will be loopback results so no need to save them */
407 stmmac_mmc_read(priv, priv->mmcaddr, &final);
410 * The number of MMC counters available depends on HW configuration
411 * so we just use this one to validate the feature. I hope there is
412 * not a version without this counter.
414 if (final.mmc_tx_framecount_g <= initial.mmc_tx_framecount_g)
420 static int stmmac_test_eee(struct stmmac_priv *priv)
422 struct stmmac_extra_stats *initial, *final;
426 if (!priv->dma_cap.eee || !priv->eee_active)
429 initial = kzalloc(sizeof(*initial), GFP_KERNEL);
433 final = kzalloc(sizeof(*final), GFP_KERNEL);
436 goto out_free_initial;
439 memcpy(initial, &priv->xstats, sizeof(*initial));
441 ret = stmmac_test_mac_loopback(priv);
445 /* We have no traffic in the line so, sooner or later it will go LPI */
447 memcpy(final, &priv->xstats, sizeof(*final));
449 if (final->irq_tx_path_in_lpi_mode_n >
450 initial->irq_tx_path_in_lpi_mode_n)
460 if (final->irq_tx_path_in_lpi_mode_n <=
461 initial->irq_tx_path_in_lpi_mode_n) {
466 if (final->irq_tx_path_exit_lpi_mode_n <=
467 initial->irq_tx_path_exit_lpi_mode_n) {
479 static int stmmac_filter_check(struct stmmac_priv *priv)
481 if (!(priv->dev->flags & IFF_PROMISC))
484 netdev_warn(priv->dev, "Test can't be run in promiscuous mode!\n");
488 static int stmmac_test_hfilt(struct stmmac_priv *priv)
490 unsigned char gd_addr[ETH_ALEN] = {0x01, 0x00, 0xcc, 0xcc, 0xdd, 0xdd};
491 unsigned char bd_addr[ETH_ALEN] = {0x09, 0x00, 0xaa, 0xaa, 0xbb, 0xbb};
492 struct stmmac_packet_attrs attr = { };
495 ret = stmmac_filter_check(priv);
499 ret = dev_mc_add(priv->dev, gd_addr);
505 /* Shall receive packet */
506 ret = __stmmac_test_loopback(priv, &attr);
512 /* Shall NOT receive packet */
513 ret = __stmmac_test_loopback(priv, &attr);
514 ret = ret ? 0 : -EINVAL;
517 dev_mc_del(priv->dev, gd_addr);
521 static int stmmac_test_pfilt(struct stmmac_priv *priv)
523 unsigned char gd_addr[ETH_ALEN] = {0x00, 0x01, 0x44, 0x55, 0x66, 0x77};
524 unsigned char bd_addr[ETH_ALEN] = {0x08, 0x00, 0x22, 0x33, 0x44, 0x55};
525 struct stmmac_packet_attrs attr = { };
528 if (stmmac_filter_check(priv))
531 ret = dev_uc_add(priv->dev, gd_addr);
537 /* Shall receive packet */
538 ret = __stmmac_test_loopback(priv, &attr);
544 /* Shall NOT receive packet */
545 ret = __stmmac_test_loopback(priv, &attr);
546 ret = ret ? 0 : -EINVAL;
549 dev_uc_del(priv->dev, gd_addr);
553 static int stmmac_dummy_sync(struct net_device *netdev, const u8 *addr)
558 static void stmmac_test_set_rx_mode(struct net_device *netdev)
560 /* As we are in test mode of ethtool we already own the rtnl lock
561 * so no address will change from user. We can just call the
562 * ndo_set_rx_mode() callback directly */
563 if (netdev->netdev_ops->ndo_set_rx_mode)
564 netdev->netdev_ops->ndo_set_rx_mode(netdev);
567 static int stmmac_test_mcfilt(struct stmmac_priv *priv)
569 unsigned char uc_addr[ETH_ALEN] = {0x00, 0x01, 0x44, 0x55, 0x66, 0x77};
570 unsigned char mc_addr[ETH_ALEN] = {0x01, 0x01, 0x44, 0x55, 0x66, 0x77};
571 struct stmmac_packet_attrs attr = { };
574 if (stmmac_filter_check(priv))
577 /* Remove all MC addresses */
578 __dev_mc_unsync(priv->dev, NULL);
579 stmmac_test_set_rx_mode(priv->dev);
581 ret = dev_uc_add(priv->dev, uc_addr);
587 /* Shall receive packet */
588 ret = __stmmac_test_loopback(priv, &attr);
594 /* Shall NOT receive packet */
595 ret = __stmmac_test_loopback(priv, &attr);
596 ret = ret ? 0 : -EINVAL;
599 dev_uc_del(priv->dev, uc_addr);
600 __dev_mc_sync(priv->dev, stmmac_dummy_sync, NULL);
601 stmmac_test_set_rx_mode(priv->dev);
605 static int stmmac_test_ucfilt(struct stmmac_priv *priv)
607 unsigned char uc_addr[ETH_ALEN] = {0x00, 0x01, 0x44, 0x55, 0x66, 0x77};
608 unsigned char mc_addr[ETH_ALEN] = {0x01, 0x01, 0x44, 0x55, 0x66, 0x77};
609 struct stmmac_packet_attrs attr = { };
612 if (stmmac_filter_check(priv))
615 /* Remove all UC addresses */
616 __dev_uc_unsync(priv->dev, NULL);
617 stmmac_test_set_rx_mode(priv->dev);
619 ret = dev_mc_add(priv->dev, mc_addr);
625 /* Shall receive packet */
626 ret = __stmmac_test_loopback(priv, &attr);
632 /* Shall NOT receive packet */
633 ret = __stmmac_test_loopback(priv, &attr);
634 ret = ret ? 0 : -EINVAL;
637 dev_mc_del(priv->dev, mc_addr);
638 __dev_uc_sync(priv->dev, stmmac_dummy_sync, NULL);
639 stmmac_test_set_rx_mode(priv->dev);
643 static int stmmac_test_flowctrl_validate(struct sk_buff *skb,
644 struct net_device *ndev,
645 struct packet_type *pt,
646 struct net_device *orig_ndev)
648 struct stmmac_test_priv *tpriv = pt->af_packet_priv;
651 ehdr = (struct ethhdr *)skb_mac_header(skb);
652 if (!ether_addr_equal(ehdr->h_source, orig_ndev->dev_addr))
654 if (ehdr->h_proto != htons(ETH_P_PAUSE))
658 complete(&tpriv->comp);
664 static int stmmac_test_flowctrl(struct stmmac_priv *priv)
666 unsigned char paddr[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00, 0x00, 0x01};
667 struct phy_device *phydev = priv->dev->phydev;
668 u32 rx_cnt = priv->plat->rx_queues_to_use;
669 struct stmmac_test_priv *tpriv;
670 unsigned int pkt_count;
673 if (!phydev || (!phydev->pause && !phydev->asym_pause))
676 tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
681 init_completion(&tpriv->comp);
682 tpriv->pt.type = htons(ETH_P_PAUSE);
683 tpriv->pt.func = stmmac_test_flowctrl_validate;
684 tpriv->pt.dev = priv->dev;
685 tpriv->pt.af_packet_priv = tpriv;
686 dev_add_pack(&tpriv->pt);
688 /* Compute minimum number of packets to make FIFO full */
689 pkt_count = priv->plat->rx_fifo_size;
691 pkt_count = priv->dma_cap.rx_fifo_size;
695 for (i = 0; i < rx_cnt; i++)
696 stmmac_stop_rx(priv, priv->ioaddr, i);
698 ret = dev_set_promiscuity(priv->dev, 1);
702 ret = dev_mc_add(priv->dev, paddr);
706 for (i = 0; i < pkt_count; i++) {
707 struct stmmac_packet_attrs attr = { };
709 attr.dst = priv->dev->dev_addr;
710 attr.dont_wait = true;
713 ret = __stmmac_test_loopback(priv, &attr);
720 /* Wait for some time in case RX Watchdog is enabled */
723 for (i = 0; i < rx_cnt; i++) {
724 struct stmmac_channel *ch = &priv->channel[i];
727 tail = priv->rx_queue[i].dma_rx_phy +
728 (DMA_RX_SIZE * sizeof(struct dma_desc));
730 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, tail, i);
731 stmmac_start_rx(priv, priv->ioaddr, i);
734 napi_reschedule(&ch->rx_napi);
738 wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT);
739 ret = tpriv->ok ? 0 : -ETIMEDOUT;
742 dev_mc_del(priv->dev, paddr);
743 dev_set_promiscuity(priv->dev, -1);
744 dev_remove_pack(&tpriv->pt);
749 static int stmmac_test_rss(struct stmmac_priv *priv)
751 struct stmmac_packet_attrs attr = { };
753 if (!priv->dma_cap.rssen || !priv->rss.enable)
756 attr.dst = priv->dev->dev_addr;
757 attr.exp_hash = true;
761 return __stmmac_test_loopback(priv, &attr);
764 static int stmmac_test_vlan_validate(struct sk_buff *skb,
765 struct net_device *ndev,
766 struct packet_type *pt,
767 struct net_device *orig_ndev)
769 struct stmmac_test_priv *tpriv = pt->af_packet_priv;
770 struct stmmachdr *shdr;
776 proto = tpriv->double_vlan ? ETH_P_8021AD : ETH_P_8021Q;
778 skb = skb_unshare(skb, GFP_ATOMIC);
782 if (skb_linearize(skb))
784 if (skb_headlen(skb) < (STMMAC_TEST_PKT_SIZE - ETH_HLEN))
786 if (tpriv->vlan_id) {
787 if (skb->vlan_proto != htons(proto))
789 if (skb->vlan_tci != tpriv->vlan_id)
793 ehdr = (struct ethhdr *)skb_mac_header(skb);
794 if (!ether_addr_equal(ehdr->h_dest, tpriv->packet->dst))
798 if (tpriv->double_vlan)
799 ihdr = (struct iphdr *)(skb_network_header(skb) + 4);
800 if (ihdr->protocol != IPPROTO_UDP)
803 uhdr = (struct udphdr *)((u8 *)ihdr + 4 * ihdr->ihl);
804 if (uhdr->dest != htons(tpriv->packet->dport))
807 shdr = (struct stmmachdr *)((u8 *)uhdr + sizeof(*uhdr));
808 if (shdr->magic != cpu_to_be64(STMMAC_TEST_PKT_MAGIC))
812 complete(&tpriv->comp);
819 static int stmmac_test_vlanfilt(struct stmmac_priv *priv)
821 struct stmmac_packet_attrs attr = { };
822 struct stmmac_test_priv *tpriv;
823 struct sk_buff *skb = NULL;
826 if (!priv->dma_cap.vlhash)
829 tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
834 init_completion(&tpriv->comp);
836 tpriv->pt.type = htons(ETH_P_IP);
837 tpriv->pt.func = stmmac_test_vlan_validate;
838 tpriv->pt.dev = priv->dev;
839 tpriv->pt.af_packet_priv = tpriv;
840 tpriv->packet = &attr;
843 * As we use HASH filtering, false positives may appear. This is a
844 * specially chosen ID so that adjacent IDs (+4) have different
847 tpriv->vlan_id = 0x123;
848 dev_add_pack(&tpriv->pt);
850 ret = vlan_vid_add(priv->dev, htons(ETH_P_8021Q), tpriv->vlan_id);
854 for (i = 0; i < 4; i++) {
856 attr.vlan_id_out = tpriv->vlan_id + i;
857 attr.dst = priv->dev->dev_addr;
861 skb = stmmac_test_get_udp_skb(priv, &attr);
867 skb_set_queue_mapping(skb, 0);
868 ret = dev_queue_xmit(skb);
872 wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT);
873 ret = tpriv->ok ? 0 : -ETIMEDOUT;
876 } else if (!ret && i) {
887 vlan_vid_del(priv->dev, htons(ETH_P_8021Q), tpriv->vlan_id);
889 dev_remove_pack(&tpriv->pt);
894 static int stmmac_test_dvlanfilt(struct stmmac_priv *priv)
896 struct stmmac_packet_attrs attr = { };
897 struct stmmac_test_priv *tpriv;
898 struct sk_buff *skb = NULL;
901 if (!priv->dma_cap.vlhash)
904 tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
909 tpriv->double_vlan = true;
910 init_completion(&tpriv->comp);
912 tpriv->pt.type = htons(ETH_P_8021Q);
913 tpriv->pt.func = stmmac_test_vlan_validate;
914 tpriv->pt.dev = priv->dev;
915 tpriv->pt.af_packet_priv = tpriv;
916 tpriv->packet = &attr;
919 * As we use HASH filtering, false positives may appear. This is a
920 * specially chosen ID so that adjacent IDs (+4) have different
923 tpriv->vlan_id = 0x123;
924 dev_add_pack(&tpriv->pt);
926 ret = vlan_vid_add(priv->dev, htons(ETH_P_8021AD), tpriv->vlan_id);
930 for (i = 0; i < 4; i++) {
932 attr.vlan_id_out = tpriv->vlan_id + i;
933 attr.dst = priv->dev->dev_addr;
937 skb = stmmac_test_get_udp_skb(priv, &attr);
943 skb_set_queue_mapping(skb, 0);
944 ret = dev_queue_xmit(skb);
948 wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT);
949 ret = tpriv->ok ? 0 : -ETIMEDOUT;
952 } else if (!ret && i) {
963 vlan_vid_del(priv->dev, htons(ETH_P_8021AD), tpriv->vlan_id);
965 dev_remove_pack(&tpriv->pt);
970 #ifdef CONFIG_NET_CLS_ACT
971 static int stmmac_test_rxp(struct stmmac_priv *priv)
973 unsigned char addr[ETH_ALEN] = {0xde, 0xad, 0xbe, 0xef, 0x00, 0x00};
974 struct tc_cls_u32_offload cls_u32 = { };
975 struct stmmac_packet_attrs attr = { };
976 struct tc_action **actions, *act;
977 struct tc_u32_sel *sel;
978 struct tcf_exts *exts;
981 if (!tc_can_offload(priv->dev))
983 if (!priv->dma_cap.frpsel)
986 sel = kzalloc(sizeof(*sel) + nk * sizeof(struct tc_u32_key), GFP_KERNEL);
990 exts = kzalloc(sizeof(*exts), GFP_KERNEL);
996 actions = kzalloc(nk * sizeof(*actions), GFP_KERNEL);
1002 act = kzalloc(nk * sizeof(*act), GFP_KERNEL);
1005 goto cleanup_actions;
1008 cls_u32.command = TC_CLSU32_NEW_KNODE;
1009 cls_u32.common.chain_index = 0;
1010 cls_u32.common.protocol = htons(ETH_P_ALL);
1011 cls_u32.knode.exts = exts;
1012 cls_u32.knode.sel = sel;
1013 cls_u32.knode.handle = 0x123;
1015 exts->nr_actions = nk;
1016 exts->actions = actions;
1017 for (i = 0; i < nk; i++) {
1018 struct tcf_gact *gact = to_gact(&act[i]);
1020 actions[i] = &act[i];
1021 gact->tcf_action = TC_ACT_SHOT;
1026 sel->keys[0].off = 6;
1027 sel->keys[0].val = htonl(0xdeadbeef);
1028 sel->keys[0].mask = ~0x0;
1030 ret = stmmac_tc_setup_cls_u32(priv, priv, &cls_u32);
1034 attr.dst = priv->dev->dev_addr;
1037 ret = __stmmac_test_loopback(priv, &attr);
1038 ret = ret ? 0 : -EINVAL; /* Shall NOT receive packet */
1040 cls_u32.command = TC_CLSU32_DELETE_KNODE;
1041 stmmac_tc_setup_cls_u32(priv, priv, &cls_u32);
1054 static int stmmac_test_rxp(struct stmmac_priv *priv)
1060 static int stmmac_test_desc_sai(struct stmmac_priv *priv)
1062 unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
1063 struct stmmac_packet_attrs attr = { };
1066 if (!priv->dma_cap.vlins)
1069 attr.remove_sa = true;
1072 attr.dst = priv->dev->dev_addr;
1074 priv->sarc_type = 0x1;
1076 ret = __stmmac_test_loopback(priv, &attr);
1078 priv->sarc_type = 0x0;
1082 static int stmmac_test_desc_sar(struct stmmac_priv *priv)
1084 unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
1085 struct stmmac_packet_attrs attr = { };
1088 if (!priv->dma_cap.vlins)
1093 attr.dst = priv->dev->dev_addr;
1095 priv->sarc_type = 0x2;
1097 ret = __stmmac_test_loopback(priv, &attr);
1099 priv->sarc_type = 0x0;
1103 static int stmmac_test_reg_sai(struct stmmac_priv *priv)
1105 unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
1106 struct stmmac_packet_attrs attr = { };
1109 if (!priv->dma_cap.vlins)
1112 attr.remove_sa = true;
1115 attr.dst = priv->dev->dev_addr;
1117 if (stmmac_sarc_configure(priv, priv->ioaddr, 0x2))
1120 ret = __stmmac_test_loopback(priv, &attr);
1122 stmmac_sarc_configure(priv, priv->ioaddr, 0x0);
1126 static int stmmac_test_reg_sar(struct stmmac_priv *priv)
1128 unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
1129 struct stmmac_packet_attrs attr = { };
1132 if (!priv->dma_cap.vlins)
1137 attr.dst = priv->dev->dev_addr;
1139 if (stmmac_sarc_configure(priv, priv->ioaddr, 0x3))
1142 ret = __stmmac_test_loopback(priv, &attr);
1144 stmmac_sarc_configure(priv, priv->ioaddr, 0x0);
1148 static int stmmac_test_vlanoff_common(struct stmmac_priv *priv, bool svlan)
1150 struct stmmac_packet_attrs attr = { };
1151 struct stmmac_test_priv *tpriv;
1152 struct sk_buff *skb = NULL;
1156 if (!priv->dma_cap.vlins)
1159 tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
1163 proto = svlan ? ETH_P_8021AD : ETH_P_8021Q;
1166 tpriv->double_vlan = svlan;
1167 init_completion(&tpriv->comp);
1169 tpriv->pt.type = svlan ? htons(ETH_P_8021Q) : htons(ETH_P_IP);
1170 tpriv->pt.func = stmmac_test_vlan_validate;
1171 tpriv->pt.dev = priv->dev;
1172 tpriv->pt.af_packet_priv = tpriv;
1173 tpriv->packet = &attr;
1174 tpriv->vlan_id = 0x123;
1175 dev_add_pack(&tpriv->pt);
1177 ret = vlan_vid_add(priv->dev, htons(proto), tpriv->vlan_id);
1181 attr.dst = priv->dev->dev_addr;
1183 skb = stmmac_test_get_udp_skb(priv, &attr);
1189 __vlan_hwaccel_put_tag(skb, htons(proto), tpriv->vlan_id);
1190 skb->protocol = htons(proto);
1192 skb_set_queue_mapping(skb, 0);
1193 ret = dev_queue_xmit(skb);
1197 wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT);
1198 ret = tpriv->ok ? 0 : -ETIMEDOUT;
1201 vlan_vid_del(priv->dev, htons(proto), tpriv->vlan_id);
1203 dev_remove_pack(&tpriv->pt);
1208 static int stmmac_test_vlanoff(struct stmmac_priv *priv)
1210 return stmmac_test_vlanoff_common(priv, false);
1213 static int stmmac_test_svlanoff(struct stmmac_priv *priv)
1215 if (!priv->dma_cap.dvlan)
1217 return stmmac_test_vlanoff_common(priv, true);
1220 #ifdef CONFIG_NET_CLS_ACT
1221 static int __stmmac_test_l3filt(struct stmmac_priv *priv, u32 dst, u32 src,
1222 u32 dst_mask, u32 src_mask)
1224 struct flow_dissector_key_ipv4_addrs key, mask;
1225 unsigned long dummy_cookie = 0xdeadbeef;
1226 struct stmmac_packet_attrs attr = { };
1227 struct flow_dissector *dissector;
1228 struct flow_cls_offload *cls;
1229 struct flow_rule *rule;
1232 if (!tc_can_offload(priv->dev))
1234 if (!priv->dma_cap.l3l4fnum)
1236 if (priv->rss.enable)
1237 stmmac_rss_configure(priv, priv->hw, NULL,
1238 priv->plat->rx_queues_to_use);
1240 dissector = kzalloc(sizeof(*dissector), GFP_KERNEL);
1246 dissector->used_keys |= (1 << FLOW_DISSECTOR_KEY_IPV4_ADDRS);
1247 dissector->offset[FLOW_DISSECTOR_KEY_IPV4_ADDRS] = 0;
1249 cls = kzalloc(sizeof(*cls), GFP_KERNEL);
1252 goto cleanup_dissector;
1255 cls->common.chain_index = 0;
1256 cls->command = FLOW_CLS_REPLACE;
1257 cls->cookie = dummy_cookie;
1259 rule = kzalloc(struct_size(rule, action.entries, 1), GFP_KERNEL);
1265 rule->match.dissector = dissector;
1266 rule->match.key = (void *)&key;
1267 rule->match.mask = (void *)&mask;
1269 key.src = htonl(src);
1270 key.dst = htonl(dst);
1271 mask.src = src_mask;
1272 mask.dst = dst_mask;
1276 rule->action.entries[0].id = FLOW_ACTION_DROP;
1277 rule->action.num_entries = 1;
1279 attr.dst = priv->dev->dev_addr;
1283 /* Shall receive packet */
1284 ret = __stmmac_test_loopback(priv, &attr);
1288 ret = stmmac_tc_setup_cls(priv, priv, cls);
1292 /* Shall NOT receive packet */
1293 ret = __stmmac_test_loopback(priv, &attr);
1294 ret = ret ? 0 : -EINVAL;
1296 cls->command = FLOW_CLS_DESTROY;
1297 stmmac_tc_setup_cls(priv, priv, cls);
1305 if (priv->rss.enable) {
1306 stmmac_rss_configure(priv, priv->hw, &priv->rss,
1307 priv->plat->rx_queues_to_use);
1313 static int __stmmac_test_l3filt(struct stmmac_priv *priv, u32 dst, u32 src,
1314 u32 dst_mask, u32 src_mask)
1320 static int stmmac_test_l3filt_da(struct stmmac_priv *priv)
1322 u32 addr = 0x10203040;
1324 return __stmmac_test_l3filt(priv, addr, 0, ~0, 0);
1327 static int stmmac_test_l3filt_sa(struct stmmac_priv *priv)
1329 u32 addr = 0x10203040;
1331 return __stmmac_test_l3filt(priv, 0, addr, 0, ~0);
1334 #ifdef CONFIG_NET_CLS_ACT
1335 static int __stmmac_test_l4filt(struct stmmac_priv *priv, u32 dst, u32 src,
1336 u32 dst_mask, u32 src_mask, bool udp)
1339 struct flow_dissector_key_basic bkey;
1340 struct flow_dissector_key_ports key;
1341 } __aligned(BITS_PER_LONG / 8) keys;
1343 struct flow_dissector_key_basic bmask;
1344 struct flow_dissector_key_ports mask;
1345 } __aligned(BITS_PER_LONG / 8) masks;
1346 unsigned long dummy_cookie = 0xdeadbeef;
1347 struct stmmac_packet_attrs attr = { };
1348 struct flow_dissector *dissector;
1349 struct flow_cls_offload *cls;
1350 struct flow_rule *rule;
1353 if (!tc_can_offload(priv->dev))
1355 if (!priv->dma_cap.l3l4fnum)
1357 if (priv->rss.enable)
1358 stmmac_rss_configure(priv, priv->hw, NULL,
1359 priv->plat->rx_queues_to_use);
1361 dissector = kzalloc(sizeof(*dissector), GFP_KERNEL);
1367 dissector->used_keys |= (1 << FLOW_DISSECTOR_KEY_BASIC);
1368 dissector->used_keys |= (1 << FLOW_DISSECTOR_KEY_PORTS);
1369 dissector->offset[FLOW_DISSECTOR_KEY_BASIC] = 0;
1370 dissector->offset[FLOW_DISSECTOR_KEY_PORTS] = offsetof(typeof(keys), key);
1372 cls = kzalloc(sizeof(*cls), GFP_KERNEL);
1375 goto cleanup_dissector;
1378 cls->common.chain_index = 0;
1379 cls->command = FLOW_CLS_REPLACE;
1380 cls->cookie = dummy_cookie;
1382 rule = kzalloc(struct_size(rule, action.entries, 1), GFP_KERNEL);
1388 rule->match.dissector = dissector;
1389 rule->match.key = (void *)&keys;
1390 rule->match.mask = (void *)&masks;
1392 keys.bkey.ip_proto = udp ? IPPROTO_UDP : IPPROTO_TCP;
1393 keys.key.src = htons(src);
1394 keys.key.dst = htons(dst);
1395 masks.mask.src = src_mask;
1396 masks.mask.dst = dst_mask;
1400 rule->action.entries[0].id = FLOW_ACTION_DROP;
1401 rule->action.num_entries = 1;
1403 attr.dst = priv->dev->dev_addr;
1409 /* Shall receive packet */
1410 ret = __stmmac_test_loopback(priv, &attr);
1414 ret = stmmac_tc_setup_cls(priv, priv, cls);
1418 /* Shall NOT receive packet */
1419 ret = __stmmac_test_loopback(priv, &attr);
1420 ret = ret ? 0 : -EINVAL;
1422 cls->command = FLOW_CLS_DESTROY;
1423 stmmac_tc_setup_cls(priv, priv, cls);
1431 if (priv->rss.enable) {
1432 stmmac_rss_configure(priv, priv->hw, &priv->rss,
1433 priv->plat->rx_queues_to_use);
1439 static int __stmmac_test_l4filt(struct stmmac_priv *priv, u32 dst, u32 src,
1440 u32 dst_mask, u32 src_mask, bool udp)
1446 static int stmmac_test_l4filt_da_tcp(struct stmmac_priv *priv)
1448 u16 dummy_port = 0x123;
1450 return __stmmac_test_l4filt(priv, dummy_port, 0, ~0, 0, false);
1453 static int stmmac_test_l4filt_sa_tcp(struct stmmac_priv *priv)
1455 u16 dummy_port = 0x123;
1457 return __stmmac_test_l4filt(priv, 0, dummy_port, 0, ~0, false);
1460 static int stmmac_test_l4filt_da_udp(struct stmmac_priv *priv)
1462 u16 dummy_port = 0x123;
1464 return __stmmac_test_l4filt(priv, dummy_port, 0, ~0, 0, true);
1467 static int stmmac_test_l4filt_sa_udp(struct stmmac_priv *priv)
1469 u16 dummy_port = 0x123;
1471 return __stmmac_test_l4filt(priv, 0, dummy_port, 0, ~0, true);
1474 static int stmmac_test_arp_validate(struct sk_buff *skb,
1475 struct net_device *ndev,
1476 struct packet_type *pt,
1477 struct net_device *orig_ndev)
1479 struct stmmac_test_priv *tpriv = pt->af_packet_priv;
1480 struct ethhdr *ehdr;
1481 struct arphdr *ahdr;
1483 ehdr = (struct ethhdr *)skb_mac_header(skb);
1484 if (!ether_addr_equal(ehdr->h_dest, tpriv->packet->src))
1487 ahdr = arp_hdr(skb);
1488 if (ahdr->ar_op != htons(ARPOP_REPLY))
1492 complete(&tpriv->comp);
1498 static int stmmac_test_arpoffload(struct stmmac_priv *priv)
1500 unsigned char src[ETH_ALEN] = {0x01, 0x02, 0x03, 0x04, 0x05, 0x06};
1501 unsigned char dst[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
1502 struct stmmac_packet_attrs attr = { };
1503 struct stmmac_test_priv *tpriv;
1504 struct sk_buff *skb = NULL;
1505 u32 ip_addr = 0xdeadcafe;
1506 u32 ip_src = 0xdeadbeef;
1509 if (!priv->dma_cap.arpoffsel)
1512 tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
1517 init_completion(&tpriv->comp);
1519 tpriv->pt.type = htons(ETH_P_ARP);
1520 tpriv->pt.func = stmmac_test_arp_validate;
1521 tpriv->pt.dev = priv->dev;
1522 tpriv->pt.af_packet_priv = tpriv;
1523 tpriv->packet = &attr;
1524 dev_add_pack(&tpriv->pt);
1527 attr.ip_src = ip_src;
1529 attr.ip_dst = ip_addr;
1531 skb = stmmac_test_get_arp_skb(priv, &attr);
1537 ret = stmmac_set_arp_offload(priv, priv->hw, true, ip_addr);
1541 ret = dev_set_promiscuity(priv->dev, 1);
1545 skb_set_queue_mapping(skb, 0);
1546 ret = dev_queue_xmit(skb);
1548 goto cleanup_promisc;
1550 wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT);
1551 ret = tpriv->ok ? 0 : -ETIMEDOUT;
1554 dev_set_promiscuity(priv->dev, -1);
1556 stmmac_set_arp_offload(priv, priv->hw, false, 0x0);
1557 dev_remove_pack(&tpriv->pt);
1562 static int __stmmac_test_jumbo(struct stmmac_priv *priv, u16 queue)
1564 struct stmmac_packet_attrs attr = { };
1565 int size = priv->dma_buf_sz;
1567 /* Only XGMAC has SW support for multiple RX descs in same packet */
1568 if (priv->plat->has_xgmac)
1569 size = priv->dev->max_mtu;
1571 attr.dst = priv->dev->dev_addr;
1572 attr.max_size = size - ETH_FCS_LEN;
1573 attr.queue_mapping = queue;
1575 return __stmmac_test_loopback(priv, &attr);
1578 static int stmmac_test_jumbo(struct stmmac_priv *priv)
1580 return __stmmac_test_jumbo(priv, 0);
1583 static int stmmac_test_mjumbo(struct stmmac_priv *priv)
1585 u32 chan, tx_cnt = priv->plat->tx_queues_to_use;
1591 for (chan = 0; chan < tx_cnt; chan++) {
1592 ret = __stmmac_test_jumbo(priv, chan);
1600 static int stmmac_test_sph(struct stmmac_priv *priv)
1602 unsigned long cnt_end, cnt_start = priv->xstats.rx_split_hdr_pkt_n;
1603 struct stmmac_packet_attrs attr = { };
1609 /* Check for UDP first */
1610 attr.dst = priv->dev->dev_addr;
1613 ret = __stmmac_test_loopback(priv, &attr);
1617 cnt_end = priv->xstats.rx_split_hdr_pkt_n;
1618 if (cnt_end <= cnt_start)
1621 /* Check for TCP now */
1622 cnt_start = cnt_end;
1624 attr.dst = priv->dev->dev_addr;
1627 ret = __stmmac_test_loopback(priv, &attr);
1631 cnt_end = priv->xstats.rx_split_hdr_pkt_n;
1632 if (cnt_end <= cnt_start)
1638 #define STMMAC_LOOPBACK_NONE 0
1639 #define STMMAC_LOOPBACK_MAC 1
1640 #define STMMAC_LOOPBACK_PHY 2
1642 static const struct stmmac_test {
1643 char name[ETH_GSTRING_LEN];
1645 int (*fn)(struct stmmac_priv *priv);
1646 } stmmac_selftests[] = {
1648 .name = "MAC Loopback ",
1649 .lb = STMMAC_LOOPBACK_MAC,
1650 .fn = stmmac_test_mac_loopback,
1652 .name = "PHY Loopback ",
1653 .lb = STMMAC_LOOPBACK_NONE, /* Test will handle it */
1654 .fn = stmmac_test_phy_loopback,
1656 .name = "MMC Counters ",
1657 .lb = STMMAC_LOOPBACK_PHY,
1658 .fn = stmmac_test_mmc,
1661 .lb = STMMAC_LOOPBACK_PHY,
1662 .fn = stmmac_test_eee,
1664 .name = "Hash Filter MC ",
1665 .lb = STMMAC_LOOPBACK_PHY,
1666 .fn = stmmac_test_hfilt,
1668 .name = "Perfect Filter UC ",
1669 .lb = STMMAC_LOOPBACK_PHY,
1670 .fn = stmmac_test_pfilt,
1672 .name = "MC Filter ",
1673 .lb = STMMAC_LOOPBACK_PHY,
1674 .fn = stmmac_test_mcfilt,
1676 .name = "UC Filter ",
1677 .lb = STMMAC_LOOPBACK_PHY,
1678 .fn = stmmac_test_ucfilt,
1680 .name = "Flow Control ",
1681 .lb = STMMAC_LOOPBACK_PHY,
1682 .fn = stmmac_test_flowctrl,
1685 .lb = STMMAC_LOOPBACK_PHY,
1686 .fn = stmmac_test_rss,
1688 .name = "VLAN Filtering ",
1689 .lb = STMMAC_LOOPBACK_PHY,
1690 .fn = stmmac_test_vlanfilt,
1692 .name = "Double VLAN Filtering",
1693 .lb = STMMAC_LOOPBACK_PHY,
1694 .fn = stmmac_test_dvlanfilt,
1696 .name = "Flexible RX Parser ",
1697 .lb = STMMAC_LOOPBACK_PHY,
1698 .fn = stmmac_test_rxp,
1700 .name = "SA Insertion (desc) ",
1701 .lb = STMMAC_LOOPBACK_PHY,
1702 .fn = stmmac_test_desc_sai,
1704 .name = "SA Replacement (desc)",
1705 .lb = STMMAC_LOOPBACK_PHY,
1706 .fn = stmmac_test_desc_sar,
1708 .name = "SA Insertion (reg) ",
1709 .lb = STMMAC_LOOPBACK_PHY,
1710 .fn = stmmac_test_reg_sai,
1712 .name = "SA Replacement (reg)",
1713 .lb = STMMAC_LOOPBACK_PHY,
1714 .fn = stmmac_test_reg_sar,
1716 .name = "VLAN TX Insertion ",
1717 .lb = STMMAC_LOOPBACK_PHY,
1718 .fn = stmmac_test_vlanoff,
1720 .name = "SVLAN TX Insertion ",
1721 .lb = STMMAC_LOOPBACK_PHY,
1722 .fn = stmmac_test_svlanoff,
1724 .name = "L3 DA Filtering ",
1725 .lb = STMMAC_LOOPBACK_PHY,
1726 .fn = stmmac_test_l3filt_da,
1728 .name = "L3 SA Filtering ",
1729 .lb = STMMAC_LOOPBACK_PHY,
1730 .fn = stmmac_test_l3filt_sa,
1732 .name = "L4 DA TCP Filtering ",
1733 .lb = STMMAC_LOOPBACK_PHY,
1734 .fn = stmmac_test_l4filt_da_tcp,
1736 .name = "L4 SA TCP Filtering ",
1737 .lb = STMMAC_LOOPBACK_PHY,
1738 .fn = stmmac_test_l4filt_sa_tcp,
1740 .name = "L4 DA UDP Filtering ",
1741 .lb = STMMAC_LOOPBACK_PHY,
1742 .fn = stmmac_test_l4filt_da_udp,
1744 .name = "L4 SA UDP Filtering ",
1745 .lb = STMMAC_LOOPBACK_PHY,
1746 .fn = stmmac_test_l4filt_sa_udp,
1748 .name = "ARP Offload ",
1749 .lb = STMMAC_LOOPBACK_PHY,
1750 .fn = stmmac_test_arpoffload,
1752 .name = "Jumbo Frame ",
1753 .lb = STMMAC_LOOPBACK_PHY,
1754 .fn = stmmac_test_jumbo,
1756 .name = "Multichannel Jumbo ",
1757 .lb = STMMAC_LOOPBACK_PHY,
1758 .fn = stmmac_test_mjumbo,
1760 .name = "Split Header ",
1761 .lb = STMMAC_LOOPBACK_PHY,
1762 .fn = stmmac_test_sph,
1766 void stmmac_selftest_run(struct net_device *dev,
1767 struct ethtool_test *etest, u64 *buf)
1769 struct stmmac_priv *priv = netdev_priv(dev);
1770 int count = stmmac_selftest_get_count(priv);
1771 int carrier = netif_carrier_ok(dev);
1774 memset(buf, 0, sizeof(*buf) * count);
1775 stmmac_test_next_id = 0;
1777 if (etest->flags != ETH_TEST_FL_OFFLINE) {
1778 netdev_err(priv->dev, "Only offline tests are supported\n");
1779 etest->flags |= ETH_TEST_FL_FAILED;
1781 } else if (!carrier) {
1782 netdev_err(priv->dev, "You need valid Link to execute tests\n");
1783 etest->flags |= ETH_TEST_FL_FAILED;
1787 /* We don't want extra traffic */
1788 netif_carrier_off(dev);
1790 /* Wait for queues drain */
1793 for (i = 0; i < count; i++) {
1796 switch (stmmac_selftests[i].lb) {
1797 case STMMAC_LOOPBACK_PHY:
1800 ret = phy_loopback(dev->phydev, true);
1804 case STMMAC_LOOPBACK_MAC:
1805 ret = stmmac_set_mac_loopback(priv, priv->ioaddr, true);
1807 case STMMAC_LOOPBACK_NONE:
1815 * First tests will always be MAC / PHY loobpack. If any of
1816 * them is not supported we abort earlier.
1819 netdev_err(priv->dev, "Loopback is not supported\n");
1820 etest->flags |= ETH_TEST_FL_FAILED;
1824 ret = stmmac_selftests[i].fn(priv);
1825 if (ret && (ret != -EOPNOTSUPP))
1826 etest->flags |= ETH_TEST_FL_FAILED;
1829 switch (stmmac_selftests[i].lb) {
1830 case STMMAC_LOOPBACK_PHY:
1833 ret = phy_loopback(dev->phydev, false);
1837 case STMMAC_LOOPBACK_MAC:
1838 stmmac_set_mac_loopback(priv, priv->ioaddr, false);
1845 /* Restart everything */
1847 netif_carrier_on(dev);
1850 void stmmac_selftest_get_strings(struct stmmac_priv *priv, u8 *data)
1855 for (i = 0; i < stmmac_selftest_get_count(priv); i++) {
1856 snprintf(p, ETH_GSTRING_LEN, "%2d. %s", i + 1,
1857 stmmac_selftests[i].name);
1858 p += ETH_GSTRING_LEN;
1862 int stmmac_selftest_get_count(struct stmmac_priv *priv)
1864 return ARRAY_SIZE(stmmac_selftests);