1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2019 Synopsys, Inc. and/or its affiliates.
4 * stmmac Selftests Support
6 * Author: Jose Abreu <joabreu@synopsys.com>
9 #include <linux/completion.h>
10 #include <linux/ethtool.h>
12 #include <linux/phy.h>
13 #include <linux/udp.h>
14 #include <net/pkt_cls.h>
17 #include <net/tc_act/tc_gact.h>
26 #define STMMAC_TEST_PKT_SIZE (sizeof(struct ethhdr) + sizeof(struct iphdr) + \
27 sizeof(struct stmmachdr))
28 #define STMMAC_TEST_PKT_MAGIC 0xdeadcafecafedeadULL
29 #define STMMAC_LB_TIMEOUT msecs_to_jiffies(200)
31 struct stmmac_packet_attrs {
53 static u8 stmmac_test_next_id;
55 static struct sk_buff *stmmac_test_get_udp_skb(struct stmmac_priv *priv,
56 struct stmmac_packet_attrs *attr)
58 struct sk_buff *skb = NULL;
59 struct udphdr *uhdr = NULL;
60 struct tcphdr *thdr = NULL;
61 struct stmmachdr *shdr;
66 size = attr->size + STMMAC_TEST_PKT_SIZE;
74 size += sizeof(struct tcphdr);
76 size += sizeof(struct udphdr);
78 if (attr->max_size && (attr->max_size > size))
79 size = attr->max_size;
81 skb = netdev_alloc_skb_ip_align(priv->dev, size);
88 ehdr = skb_push(skb, ETH_HLEN + 8);
90 ehdr = skb_push(skb, ETH_HLEN + 4);
91 else if (attr->remove_sa)
92 ehdr = skb_push(skb, ETH_HLEN - 6);
94 ehdr = skb_push(skb, ETH_HLEN);
95 skb_reset_mac_header(skb);
97 skb_set_network_header(skb, skb->len);
98 ihdr = skb_put(skb, sizeof(*ihdr));
100 skb_set_transport_header(skb, skb->len);
102 thdr = skb_put(skb, sizeof(*thdr));
104 uhdr = skb_put(skb, sizeof(*uhdr));
106 if (!attr->remove_sa)
107 eth_zero_addr(ehdr->h_source);
108 eth_zero_addr(ehdr->h_dest);
109 if (attr->src && !attr->remove_sa)
110 ether_addr_copy(ehdr->h_source, attr->src);
112 ether_addr_copy(ehdr->h_dest, attr->dst);
114 if (!attr->remove_sa) {
115 ehdr->h_proto = htons(ETH_P_IP);
117 __be16 *ptr = (__be16 *)ehdr;
120 ptr[3] = htons(ETH_P_IP);
126 if (!attr->remove_sa) {
127 tag = (void *)ehdr + ETH_HLEN;
128 proto = (void *)ehdr + (2 * ETH_ALEN);
130 tag = (void *)ehdr + ETH_HLEN - 6;
131 proto = (void *)ehdr + ETH_ALEN;
134 proto[0] = htons(ETH_P_8021Q);
135 tag[0] = htons(attr->vlan_id_out);
136 tag[1] = htons(ETH_P_IP);
137 if (attr->vlan > 1) {
138 proto[0] = htons(ETH_P_8021AD);
139 tag[1] = htons(ETH_P_8021Q);
140 tag[2] = htons(attr->vlan_id_in);
141 tag[3] = htons(ETH_P_IP);
146 thdr->source = htons(attr->sport);
147 thdr->dest = htons(attr->dport);
148 thdr->doff = sizeof(struct tcphdr) / 4;
151 uhdr->source = htons(attr->sport);
152 uhdr->dest = htons(attr->dport);
153 uhdr->len = htons(sizeof(*shdr) + sizeof(*uhdr) + attr->size);
155 uhdr->len = htons(attr->max_size -
156 (sizeof(*ihdr) + sizeof(*ehdr)));
164 ihdr->protocol = IPPROTO_TCP;
166 ihdr->protocol = IPPROTO_UDP;
167 iplen = sizeof(*ihdr) + sizeof(*shdr) + attr->size;
169 iplen += sizeof(*thdr);
171 iplen += sizeof(*uhdr);
174 iplen = attr->max_size - sizeof(*ehdr);
176 ihdr->tot_len = htons(iplen);
178 ihdr->saddr = htonl(attr->ip_src);
179 ihdr->daddr = htonl(attr->ip_dst);
184 shdr = skb_put(skb, sizeof(*shdr));
186 shdr->magic = cpu_to_be64(STMMAC_TEST_PKT_MAGIC);
187 attr->id = stmmac_test_next_id;
188 shdr->id = stmmac_test_next_id++;
191 skb_put(skb, attr->size);
192 if (attr->max_size && (attr->max_size > skb->len))
193 skb_put(skb, attr->max_size - skb->len);
196 skb->ip_summed = CHECKSUM_PARTIAL;
198 thdr->check = ~tcp_v4_check(skb->len, ihdr->saddr, ihdr->daddr, 0);
199 skb->csum_start = skb_transport_header(skb) - skb->head;
200 skb->csum_offset = offsetof(struct tcphdr, check);
202 udp4_hwcsum(skb, ihdr->saddr, ihdr->daddr);
205 skb->protocol = htons(ETH_P_IP);
206 skb->pkt_type = PACKET_HOST;
207 skb->dev = priv->dev;
212 static struct sk_buff *stmmac_test_get_arp_skb(struct stmmac_priv *priv,
213 struct stmmac_packet_attrs *attr)
215 __be32 ip_src = htonl(attr->ip_src);
216 __be32 ip_dst = htonl(attr->ip_dst);
217 struct sk_buff *skb = NULL;
219 skb = arp_create(ARPOP_REQUEST, ETH_P_ARP, ip_dst, priv->dev, ip_src,
220 NULL, attr->src, attr->dst);
224 skb->pkt_type = PACKET_HOST;
225 skb->dev = priv->dev;
230 struct stmmac_test_priv {
231 struct stmmac_packet_attrs *packet;
232 struct packet_type pt;
233 struct completion comp;
239 static int stmmac_test_loopback_validate(struct sk_buff *skb,
240 struct net_device *ndev,
241 struct packet_type *pt,
242 struct net_device *orig_ndev)
244 struct stmmac_test_priv *tpriv = pt->af_packet_priv;
245 struct stmmachdr *shdr;
251 skb = skb_unshare(skb, GFP_ATOMIC);
255 if (skb_linearize(skb))
257 if (skb_headlen(skb) < (STMMAC_TEST_PKT_SIZE - ETH_HLEN))
260 ehdr = (struct ethhdr *)skb_mac_header(skb);
261 if (tpriv->packet->dst) {
262 if (!ether_addr_equal(ehdr->h_dest, tpriv->packet->dst))
265 if (tpriv->packet->sarc) {
266 if (!ether_addr_equal(ehdr->h_source, ehdr->h_dest))
268 } else if (tpriv->packet->src) {
269 if (!ether_addr_equal(ehdr->h_source, tpriv->packet->src))
274 if (tpriv->double_vlan)
275 ihdr = (struct iphdr *)(skb_network_header(skb) + 4);
277 if (tpriv->packet->tcp) {
278 if (ihdr->protocol != IPPROTO_TCP)
281 thdr = (struct tcphdr *)((u8 *)ihdr + 4 * ihdr->ihl);
282 if (thdr->dest != htons(tpriv->packet->dport))
285 shdr = (struct stmmachdr *)((u8 *)thdr + sizeof(*thdr));
287 if (ihdr->protocol != IPPROTO_UDP)
290 uhdr = (struct udphdr *)((u8 *)ihdr + 4 * ihdr->ihl);
291 if (uhdr->dest != htons(tpriv->packet->dport))
294 shdr = (struct stmmachdr *)((u8 *)uhdr + sizeof(*uhdr));
297 if (shdr->magic != cpu_to_be64(STMMAC_TEST_PKT_MAGIC))
299 if (tpriv->packet->exp_hash && !skb->hash)
301 if (tpriv->packet->id != shdr->id)
305 complete(&tpriv->comp);
311 static int __stmmac_test_loopback(struct stmmac_priv *priv,
312 struct stmmac_packet_attrs *attr)
314 struct stmmac_test_priv *tpriv;
315 struct sk_buff *skb = NULL;
318 tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
323 init_completion(&tpriv->comp);
325 tpriv->pt.type = htons(ETH_P_IP);
326 tpriv->pt.func = stmmac_test_loopback_validate;
327 tpriv->pt.dev = priv->dev;
328 tpriv->pt.af_packet_priv = tpriv;
329 tpriv->packet = attr;
331 if (!attr->dont_wait)
332 dev_add_pack(&tpriv->pt);
334 skb = stmmac_test_get_udp_skb(priv, attr);
340 skb_set_queue_mapping(skb, attr->queue_mapping);
341 ret = dev_queue_xmit(skb);
349 attr->timeout = STMMAC_LB_TIMEOUT;
351 wait_for_completion_timeout(&tpriv->comp, attr->timeout);
352 ret = tpriv->ok ? 0 : -ETIMEDOUT;
355 if (!attr->dont_wait)
356 dev_remove_pack(&tpriv->pt);
361 static int stmmac_test_mac_loopback(struct stmmac_priv *priv)
363 struct stmmac_packet_attrs attr = { };
365 attr.dst = priv->dev->dev_addr;
366 return __stmmac_test_loopback(priv, &attr);
369 static int stmmac_test_phy_loopback(struct stmmac_priv *priv)
371 struct stmmac_packet_attrs attr = { };
374 if (!priv->dev->phydev)
377 ret = phy_loopback(priv->dev->phydev, true);
381 attr.dst = priv->dev->dev_addr;
382 ret = __stmmac_test_loopback(priv, &attr);
384 phy_loopback(priv->dev->phydev, false);
388 static int stmmac_test_mmc(struct stmmac_priv *priv)
390 struct stmmac_counters initial, final;
393 memset(&initial, 0, sizeof(initial));
394 memset(&final, 0, sizeof(final));
396 if (!priv->dma_cap.rmon)
399 /* Save previous results into internal struct */
400 stmmac_mmc_read(priv, priv->mmcaddr, &priv->mmc);
402 ret = stmmac_test_mac_loopback(priv);
406 /* These will be loopback results so no need to save them */
407 stmmac_mmc_read(priv, priv->mmcaddr, &final);
410 * The number of MMC counters available depends on HW configuration
411 * so we just use this one to validate the feature. I hope there is
412 * not a version without this counter.
414 if (final.mmc_tx_framecount_g <= initial.mmc_tx_framecount_g)
420 static int stmmac_test_eee(struct stmmac_priv *priv)
422 struct stmmac_extra_stats *initial, *final;
426 if (!priv->dma_cap.eee || !priv->eee_active)
429 initial = kzalloc(sizeof(*initial), GFP_KERNEL);
433 final = kzalloc(sizeof(*final), GFP_KERNEL);
436 goto out_free_initial;
439 memcpy(initial, &priv->xstats, sizeof(*initial));
441 ret = stmmac_test_mac_loopback(priv);
445 /* We have no traffic in the line so, sooner or later it will go LPI */
447 memcpy(final, &priv->xstats, sizeof(*final));
449 if (final->irq_tx_path_in_lpi_mode_n >
450 initial->irq_tx_path_in_lpi_mode_n)
460 if (final->irq_tx_path_in_lpi_mode_n <=
461 initial->irq_tx_path_in_lpi_mode_n) {
466 if (final->irq_tx_path_exit_lpi_mode_n <=
467 initial->irq_tx_path_exit_lpi_mode_n) {
479 static int stmmac_filter_check(struct stmmac_priv *priv)
481 if (!(priv->dev->flags & IFF_PROMISC))
484 netdev_warn(priv->dev, "Test can't be run in promiscuous mode!\n");
488 static int stmmac_test_hfilt(struct stmmac_priv *priv)
490 unsigned char gd_addr[ETH_ALEN] = {0x01, 0xee, 0xdd, 0xcc, 0xbb, 0xaa};
491 unsigned char bd_addr[ETH_ALEN] = {0x01, 0x01, 0x02, 0x03, 0x04, 0x05};
492 struct stmmac_packet_attrs attr = { };
495 ret = stmmac_filter_check(priv);
499 if (netdev_mc_count(priv->dev) >= priv->hw->multicast_filter_bins)
502 ret = dev_mc_add(priv->dev, gd_addr);
508 /* Shall receive packet */
509 ret = __stmmac_test_loopback(priv, &attr);
515 /* Shall NOT receive packet */
516 ret = __stmmac_test_loopback(priv, &attr);
517 ret = ret ? 0 : -EINVAL;
520 dev_mc_del(priv->dev, gd_addr);
524 static int stmmac_test_pfilt(struct stmmac_priv *priv)
526 unsigned char gd_addr[ETH_ALEN] = {0x00, 0x01, 0x44, 0x55, 0x66, 0x77};
527 unsigned char bd_addr[ETH_ALEN] = {0x08, 0x00, 0x22, 0x33, 0x44, 0x55};
528 struct stmmac_packet_attrs attr = { };
531 if (stmmac_filter_check(priv))
534 ret = dev_uc_add(priv->dev, gd_addr);
540 /* Shall receive packet */
541 ret = __stmmac_test_loopback(priv, &attr);
547 /* Shall NOT receive packet */
548 ret = __stmmac_test_loopback(priv, &attr);
549 ret = ret ? 0 : -EINVAL;
552 dev_uc_del(priv->dev, gd_addr);
556 static int stmmac_dummy_sync(struct net_device *netdev, const u8 *addr)
561 static void stmmac_test_set_rx_mode(struct net_device *netdev)
563 /* As we are in test mode of ethtool we already own the rtnl lock
564 * so no address will change from user. We can just call the
565 * ndo_set_rx_mode() callback directly */
566 if (netdev->netdev_ops->ndo_set_rx_mode)
567 netdev->netdev_ops->ndo_set_rx_mode(netdev);
570 static int stmmac_test_mcfilt(struct stmmac_priv *priv)
572 unsigned char uc_addr[ETH_ALEN] = {0x00, 0x01, 0x44, 0x55, 0x66, 0x77};
573 unsigned char mc_addr[ETH_ALEN] = {0x01, 0x01, 0x44, 0x55, 0x66, 0x77};
574 struct stmmac_packet_attrs attr = { };
577 if (stmmac_filter_check(priv))
579 if (!priv->hw->multicast_filter_bins)
582 /* Remove all MC addresses */
583 __dev_mc_unsync(priv->dev, NULL);
584 stmmac_test_set_rx_mode(priv->dev);
586 ret = dev_uc_add(priv->dev, uc_addr);
592 /* Shall receive packet */
593 ret = __stmmac_test_loopback(priv, &attr);
599 /* Shall NOT receive packet */
600 ret = __stmmac_test_loopback(priv, &attr);
601 ret = ret ? 0 : -EINVAL;
604 dev_uc_del(priv->dev, uc_addr);
605 __dev_mc_sync(priv->dev, stmmac_dummy_sync, NULL);
606 stmmac_test_set_rx_mode(priv->dev);
610 static int stmmac_test_ucfilt(struct stmmac_priv *priv)
612 unsigned char uc_addr[ETH_ALEN] = {0x00, 0x01, 0x44, 0x55, 0x66, 0x77};
613 unsigned char mc_addr[ETH_ALEN] = {0x01, 0x01, 0x44, 0x55, 0x66, 0x77};
614 struct stmmac_packet_attrs attr = { };
617 if (stmmac_filter_check(priv))
619 if (!priv->hw->multicast_filter_bins)
622 /* Remove all UC addresses */
623 __dev_uc_unsync(priv->dev, NULL);
624 stmmac_test_set_rx_mode(priv->dev);
626 ret = dev_mc_add(priv->dev, mc_addr);
632 /* Shall receive packet */
633 ret = __stmmac_test_loopback(priv, &attr);
639 /* Shall NOT receive packet */
640 ret = __stmmac_test_loopback(priv, &attr);
641 ret = ret ? 0 : -EINVAL;
644 dev_mc_del(priv->dev, mc_addr);
645 __dev_uc_sync(priv->dev, stmmac_dummy_sync, NULL);
646 stmmac_test_set_rx_mode(priv->dev);
650 static int stmmac_test_flowctrl_validate(struct sk_buff *skb,
651 struct net_device *ndev,
652 struct packet_type *pt,
653 struct net_device *orig_ndev)
655 struct stmmac_test_priv *tpriv = pt->af_packet_priv;
658 ehdr = (struct ethhdr *)skb_mac_header(skb);
659 if (!ether_addr_equal(ehdr->h_source, orig_ndev->dev_addr))
661 if (ehdr->h_proto != htons(ETH_P_PAUSE))
665 complete(&tpriv->comp);
671 static int stmmac_test_flowctrl(struct stmmac_priv *priv)
673 unsigned char paddr[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00, 0x00, 0x01};
674 struct phy_device *phydev = priv->dev->phydev;
675 u32 rx_cnt = priv->plat->rx_queues_to_use;
676 struct stmmac_test_priv *tpriv;
677 unsigned int pkt_count;
680 if (!phydev || (!phydev->pause && !phydev->asym_pause))
683 tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
688 init_completion(&tpriv->comp);
689 tpriv->pt.type = htons(ETH_P_PAUSE);
690 tpriv->pt.func = stmmac_test_flowctrl_validate;
691 tpriv->pt.dev = priv->dev;
692 tpriv->pt.af_packet_priv = tpriv;
693 dev_add_pack(&tpriv->pt);
695 /* Compute minimum number of packets to make FIFO full */
696 pkt_count = priv->plat->rx_fifo_size;
698 pkt_count = priv->dma_cap.rx_fifo_size;
702 for (i = 0; i < rx_cnt; i++)
703 stmmac_stop_rx(priv, priv->ioaddr, i);
705 ret = dev_set_promiscuity(priv->dev, 1);
709 ret = dev_mc_add(priv->dev, paddr);
713 for (i = 0; i < pkt_count; i++) {
714 struct stmmac_packet_attrs attr = { };
716 attr.dst = priv->dev->dev_addr;
717 attr.dont_wait = true;
720 ret = __stmmac_test_loopback(priv, &attr);
727 /* Wait for some time in case RX Watchdog is enabled */
730 for (i = 0; i < rx_cnt; i++) {
731 struct stmmac_channel *ch = &priv->channel[i];
734 tail = priv->rx_queue[i].dma_rx_phy +
735 (DMA_RX_SIZE * sizeof(struct dma_desc));
737 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, tail, i);
738 stmmac_start_rx(priv, priv->ioaddr, i);
741 napi_reschedule(&ch->rx_napi);
745 wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT);
746 ret = tpriv->ok ? 0 : -ETIMEDOUT;
749 dev_mc_del(priv->dev, paddr);
750 dev_set_promiscuity(priv->dev, -1);
751 dev_remove_pack(&tpriv->pt);
756 static int stmmac_test_rss(struct stmmac_priv *priv)
758 struct stmmac_packet_attrs attr = { };
760 if (!priv->dma_cap.rssen || !priv->rss.enable)
763 attr.dst = priv->dev->dev_addr;
764 attr.exp_hash = true;
768 return __stmmac_test_loopback(priv, &attr);
771 static int stmmac_test_vlan_validate(struct sk_buff *skb,
772 struct net_device *ndev,
773 struct packet_type *pt,
774 struct net_device *orig_ndev)
776 struct stmmac_test_priv *tpriv = pt->af_packet_priv;
777 struct stmmachdr *shdr;
783 proto = tpriv->double_vlan ? ETH_P_8021AD : ETH_P_8021Q;
785 skb = skb_unshare(skb, GFP_ATOMIC);
789 if (skb_linearize(skb))
791 if (skb_headlen(skb) < (STMMAC_TEST_PKT_SIZE - ETH_HLEN))
793 if (tpriv->vlan_id) {
794 if (skb->vlan_proto != htons(proto))
796 if (skb->vlan_tci != tpriv->vlan_id)
800 ehdr = (struct ethhdr *)skb_mac_header(skb);
801 if (!ether_addr_equal(ehdr->h_dest, tpriv->packet->dst))
805 if (tpriv->double_vlan)
806 ihdr = (struct iphdr *)(skb_network_header(skb) + 4);
807 if (ihdr->protocol != IPPROTO_UDP)
810 uhdr = (struct udphdr *)((u8 *)ihdr + 4 * ihdr->ihl);
811 if (uhdr->dest != htons(tpriv->packet->dport))
814 shdr = (struct stmmachdr *)((u8 *)uhdr + sizeof(*uhdr));
815 if (shdr->magic != cpu_to_be64(STMMAC_TEST_PKT_MAGIC))
819 complete(&tpriv->comp);
826 static int stmmac_test_vlanfilt(struct stmmac_priv *priv)
828 struct stmmac_packet_attrs attr = { };
829 struct stmmac_test_priv *tpriv;
830 struct sk_buff *skb = NULL;
833 if (!priv->dma_cap.vlhash)
836 tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
841 init_completion(&tpriv->comp);
843 tpriv->pt.type = htons(ETH_P_IP);
844 tpriv->pt.func = stmmac_test_vlan_validate;
845 tpriv->pt.dev = priv->dev;
846 tpriv->pt.af_packet_priv = tpriv;
847 tpriv->packet = &attr;
850 * As we use HASH filtering, false positives may appear. This is a
851 * specially chosen ID so that adjacent IDs (+4) have different
854 tpriv->vlan_id = 0x123;
855 dev_add_pack(&tpriv->pt);
857 ret = vlan_vid_add(priv->dev, htons(ETH_P_8021Q), tpriv->vlan_id);
861 for (i = 0; i < 4; i++) {
863 attr.vlan_id_out = tpriv->vlan_id + i;
864 attr.dst = priv->dev->dev_addr;
868 skb = stmmac_test_get_udp_skb(priv, &attr);
874 skb_set_queue_mapping(skb, 0);
875 ret = dev_queue_xmit(skb);
879 wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT);
880 ret = tpriv->ok ? 0 : -ETIMEDOUT;
883 } else if (!ret && i) {
894 vlan_vid_del(priv->dev, htons(ETH_P_8021Q), tpriv->vlan_id);
896 dev_remove_pack(&tpriv->pt);
901 static int stmmac_test_dvlanfilt(struct stmmac_priv *priv)
903 struct stmmac_packet_attrs attr = { };
904 struct stmmac_test_priv *tpriv;
905 struct sk_buff *skb = NULL;
908 if (!priv->dma_cap.vlhash)
911 tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
916 tpriv->double_vlan = true;
917 init_completion(&tpriv->comp);
919 tpriv->pt.type = htons(ETH_P_8021Q);
920 tpriv->pt.func = stmmac_test_vlan_validate;
921 tpriv->pt.dev = priv->dev;
922 tpriv->pt.af_packet_priv = tpriv;
923 tpriv->packet = &attr;
926 * As we use HASH filtering, false positives may appear. This is a
927 * specially chosen ID so that adjacent IDs (+4) have different
930 tpriv->vlan_id = 0x123;
931 dev_add_pack(&tpriv->pt);
933 ret = vlan_vid_add(priv->dev, htons(ETH_P_8021AD), tpriv->vlan_id);
937 for (i = 0; i < 4; i++) {
939 attr.vlan_id_out = tpriv->vlan_id + i;
940 attr.dst = priv->dev->dev_addr;
944 skb = stmmac_test_get_udp_skb(priv, &attr);
950 skb_set_queue_mapping(skb, 0);
951 ret = dev_queue_xmit(skb);
955 wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT);
956 ret = tpriv->ok ? 0 : -ETIMEDOUT;
959 } else if (!ret && i) {
970 vlan_vid_del(priv->dev, htons(ETH_P_8021AD), tpriv->vlan_id);
972 dev_remove_pack(&tpriv->pt);
977 #ifdef CONFIG_NET_CLS_ACT
978 static int stmmac_test_rxp(struct stmmac_priv *priv)
980 unsigned char addr[ETH_ALEN] = {0xde, 0xad, 0xbe, 0xef, 0x00, 0x00};
981 struct tc_cls_u32_offload cls_u32 = { };
982 struct stmmac_packet_attrs attr = { };
983 struct tc_action **actions, *act;
984 struct tc_u32_sel *sel;
985 struct tcf_exts *exts;
988 if (!tc_can_offload(priv->dev))
990 if (!priv->dma_cap.frpsel)
993 sel = kzalloc(sizeof(*sel) + nk * sizeof(struct tc_u32_key), GFP_KERNEL);
997 exts = kzalloc(sizeof(*exts), GFP_KERNEL);
1003 actions = kzalloc(nk * sizeof(*actions), GFP_KERNEL);
1009 act = kzalloc(nk * sizeof(*act), GFP_KERNEL);
1012 goto cleanup_actions;
1015 cls_u32.command = TC_CLSU32_NEW_KNODE;
1016 cls_u32.common.chain_index = 0;
1017 cls_u32.common.protocol = htons(ETH_P_ALL);
1018 cls_u32.knode.exts = exts;
1019 cls_u32.knode.sel = sel;
1020 cls_u32.knode.handle = 0x123;
1022 exts->nr_actions = nk;
1023 exts->actions = actions;
1024 for (i = 0; i < nk; i++) {
1025 struct tcf_gact *gact = to_gact(&act[i]);
1027 actions[i] = &act[i];
1028 gact->tcf_action = TC_ACT_SHOT;
1033 sel->keys[0].off = 6;
1034 sel->keys[0].val = htonl(0xdeadbeef);
1035 sel->keys[0].mask = ~0x0;
1037 ret = stmmac_tc_setup_cls_u32(priv, priv, &cls_u32);
1041 attr.dst = priv->dev->dev_addr;
1044 ret = __stmmac_test_loopback(priv, &attr);
1045 ret = ret ? 0 : -EINVAL; /* Shall NOT receive packet */
1047 cls_u32.command = TC_CLSU32_DELETE_KNODE;
1048 stmmac_tc_setup_cls_u32(priv, priv, &cls_u32);
1061 static int stmmac_test_rxp(struct stmmac_priv *priv)
1067 static int stmmac_test_desc_sai(struct stmmac_priv *priv)
1069 unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
1070 struct stmmac_packet_attrs attr = { };
1073 if (!priv->dma_cap.vlins)
1076 attr.remove_sa = true;
1079 attr.dst = priv->dev->dev_addr;
1081 priv->sarc_type = 0x1;
1083 ret = __stmmac_test_loopback(priv, &attr);
1085 priv->sarc_type = 0x0;
1089 static int stmmac_test_desc_sar(struct stmmac_priv *priv)
1091 unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
1092 struct stmmac_packet_attrs attr = { };
1095 if (!priv->dma_cap.vlins)
1100 attr.dst = priv->dev->dev_addr;
1102 priv->sarc_type = 0x2;
1104 ret = __stmmac_test_loopback(priv, &attr);
1106 priv->sarc_type = 0x0;
1110 static int stmmac_test_reg_sai(struct stmmac_priv *priv)
1112 unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
1113 struct stmmac_packet_attrs attr = { };
1116 if (!priv->dma_cap.vlins)
1119 attr.remove_sa = true;
1122 attr.dst = priv->dev->dev_addr;
1124 if (stmmac_sarc_configure(priv, priv->ioaddr, 0x2))
1127 ret = __stmmac_test_loopback(priv, &attr);
1129 stmmac_sarc_configure(priv, priv->ioaddr, 0x0);
1133 static int stmmac_test_reg_sar(struct stmmac_priv *priv)
1135 unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
1136 struct stmmac_packet_attrs attr = { };
1139 if (!priv->dma_cap.vlins)
1144 attr.dst = priv->dev->dev_addr;
1146 if (stmmac_sarc_configure(priv, priv->ioaddr, 0x3))
1149 ret = __stmmac_test_loopback(priv, &attr);
1151 stmmac_sarc_configure(priv, priv->ioaddr, 0x0);
1155 static int stmmac_test_vlanoff_common(struct stmmac_priv *priv, bool svlan)
1157 struct stmmac_packet_attrs attr = { };
1158 struct stmmac_test_priv *tpriv;
1159 struct sk_buff *skb = NULL;
1163 if (!priv->dma_cap.vlins)
1166 tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
1170 proto = svlan ? ETH_P_8021AD : ETH_P_8021Q;
1173 tpriv->double_vlan = svlan;
1174 init_completion(&tpriv->comp);
1176 tpriv->pt.type = svlan ? htons(ETH_P_8021Q) : htons(ETH_P_IP);
1177 tpriv->pt.func = stmmac_test_vlan_validate;
1178 tpriv->pt.dev = priv->dev;
1179 tpriv->pt.af_packet_priv = tpriv;
1180 tpriv->packet = &attr;
1181 tpriv->vlan_id = 0x123;
1182 dev_add_pack(&tpriv->pt);
1184 ret = vlan_vid_add(priv->dev, htons(proto), tpriv->vlan_id);
1188 attr.dst = priv->dev->dev_addr;
1190 skb = stmmac_test_get_udp_skb(priv, &attr);
1196 __vlan_hwaccel_put_tag(skb, htons(proto), tpriv->vlan_id);
1197 skb->protocol = htons(proto);
1199 skb_set_queue_mapping(skb, 0);
1200 ret = dev_queue_xmit(skb);
1204 wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT);
1205 ret = tpriv->ok ? 0 : -ETIMEDOUT;
1208 vlan_vid_del(priv->dev, htons(proto), tpriv->vlan_id);
1210 dev_remove_pack(&tpriv->pt);
1215 static int stmmac_test_vlanoff(struct stmmac_priv *priv)
1217 return stmmac_test_vlanoff_common(priv, false);
1220 static int stmmac_test_svlanoff(struct stmmac_priv *priv)
1222 if (!priv->dma_cap.dvlan)
1224 return stmmac_test_vlanoff_common(priv, true);
1227 #ifdef CONFIG_NET_CLS_ACT
1228 static int __stmmac_test_l3filt(struct stmmac_priv *priv, u32 dst, u32 src,
1229 u32 dst_mask, u32 src_mask)
1231 struct flow_dissector_key_ipv4_addrs key, mask;
1232 unsigned long dummy_cookie = 0xdeadbeef;
1233 struct stmmac_packet_attrs attr = { };
1234 struct flow_dissector *dissector;
1235 struct flow_cls_offload *cls;
1236 struct flow_rule *rule;
1239 if (!tc_can_offload(priv->dev))
1241 if (!priv->dma_cap.l3l4fnum)
1243 if (priv->rss.enable)
1244 stmmac_rss_configure(priv, priv->hw, NULL,
1245 priv->plat->rx_queues_to_use);
1247 dissector = kzalloc(sizeof(*dissector), GFP_KERNEL);
1253 dissector->used_keys |= (1 << FLOW_DISSECTOR_KEY_IPV4_ADDRS);
1254 dissector->offset[FLOW_DISSECTOR_KEY_IPV4_ADDRS] = 0;
1256 cls = kzalloc(sizeof(*cls), GFP_KERNEL);
1259 goto cleanup_dissector;
1262 cls->common.chain_index = 0;
1263 cls->command = FLOW_CLS_REPLACE;
1264 cls->cookie = dummy_cookie;
1266 rule = kzalloc(struct_size(rule, action.entries, 1), GFP_KERNEL);
1272 rule->match.dissector = dissector;
1273 rule->match.key = (void *)&key;
1274 rule->match.mask = (void *)&mask;
1276 key.src = htonl(src);
1277 key.dst = htonl(dst);
1278 mask.src = src_mask;
1279 mask.dst = dst_mask;
1283 rule->action.entries[0].id = FLOW_ACTION_DROP;
1284 rule->action.num_entries = 1;
1286 attr.dst = priv->dev->dev_addr;
1290 /* Shall receive packet */
1291 ret = __stmmac_test_loopback(priv, &attr);
1295 ret = stmmac_tc_setup_cls(priv, priv, cls);
1299 /* Shall NOT receive packet */
1300 ret = __stmmac_test_loopback(priv, &attr);
1301 ret = ret ? 0 : -EINVAL;
1303 cls->command = FLOW_CLS_DESTROY;
1304 stmmac_tc_setup_cls(priv, priv, cls);
1312 if (priv->rss.enable) {
1313 stmmac_rss_configure(priv, priv->hw, &priv->rss,
1314 priv->plat->rx_queues_to_use);
1320 static int __stmmac_test_l3filt(struct stmmac_priv *priv, u32 dst, u32 src,
1321 u32 dst_mask, u32 src_mask)
1327 static int stmmac_test_l3filt_da(struct stmmac_priv *priv)
1329 u32 addr = 0x10203040;
1331 return __stmmac_test_l3filt(priv, addr, 0, ~0, 0);
1334 static int stmmac_test_l3filt_sa(struct stmmac_priv *priv)
1336 u32 addr = 0x10203040;
1338 return __stmmac_test_l3filt(priv, 0, addr, 0, ~0);
1341 #ifdef CONFIG_NET_CLS_ACT
1342 static int __stmmac_test_l4filt(struct stmmac_priv *priv, u32 dst, u32 src,
1343 u32 dst_mask, u32 src_mask, bool udp)
1346 struct flow_dissector_key_basic bkey;
1347 struct flow_dissector_key_ports key;
1348 } __aligned(BITS_PER_LONG / 8) keys;
1350 struct flow_dissector_key_basic bmask;
1351 struct flow_dissector_key_ports mask;
1352 } __aligned(BITS_PER_LONG / 8) masks;
1353 unsigned long dummy_cookie = 0xdeadbeef;
1354 struct stmmac_packet_attrs attr = { };
1355 struct flow_dissector *dissector;
1356 struct flow_cls_offload *cls;
1357 struct flow_rule *rule;
1360 if (!tc_can_offload(priv->dev))
1362 if (!priv->dma_cap.l3l4fnum)
1364 if (priv->rss.enable)
1365 stmmac_rss_configure(priv, priv->hw, NULL,
1366 priv->plat->rx_queues_to_use);
1368 dissector = kzalloc(sizeof(*dissector), GFP_KERNEL);
1374 dissector->used_keys |= (1 << FLOW_DISSECTOR_KEY_BASIC);
1375 dissector->used_keys |= (1 << FLOW_DISSECTOR_KEY_PORTS);
1376 dissector->offset[FLOW_DISSECTOR_KEY_BASIC] = 0;
1377 dissector->offset[FLOW_DISSECTOR_KEY_PORTS] = offsetof(typeof(keys), key);
1379 cls = kzalloc(sizeof(*cls), GFP_KERNEL);
1382 goto cleanup_dissector;
1385 cls->common.chain_index = 0;
1386 cls->command = FLOW_CLS_REPLACE;
1387 cls->cookie = dummy_cookie;
1389 rule = kzalloc(struct_size(rule, action.entries, 1), GFP_KERNEL);
1395 rule->match.dissector = dissector;
1396 rule->match.key = (void *)&keys;
1397 rule->match.mask = (void *)&masks;
1399 keys.bkey.ip_proto = udp ? IPPROTO_UDP : IPPROTO_TCP;
1400 keys.key.src = htons(src);
1401 keys.key.dst = htons(dst);
1402 masks.mask.src = src_mask;
1403 masks.mask.dst = dst_mask;
1407 rule->action.entries[0].id = FLOW_ACTION_DROP;
1408 rule->action.num_entries = 1;
1410 attr.dst = priv->dev->dev_addr;
1416 /* Shall receive packet */
1417 ret = __stmmac_test_loopback(priv, &attr);
1421 ret = stmmac_tc_setup_cls(priv, priv, cls);
1425 /* Shall NOT receive packet */
1426 ret = __stmmac_test_loopback(priv, &attr);
1427 ret = ret ? 0 : -EINVAL;
1429 cls->command = FLOW_CLS_DESTROY;
1430 stmmac_tc_setup_cls(priv, priv, cls);
1438 if (priv->rss.enable) {
1439 stmmac_rss_configure(priv, priv->hw, &priv->rss,
1440 priv->plat->rx_queues_to_use);
1446 static int __stmmac_test_l4filt(struct stmmac_priv *priv, u32 dst, u32 src,
1447 u32 dst_mask, u32 src_mask, bool udp)
1453 static int stmmac_test_l4filt_da_tcp(struct stmmac_priv *priv)
1455 u16 dummy_port = 0x123;
1457 return __stmmac_test_l4filt(priv, dummy_port, 0, ~0, 0, false);
1460 static int stmmac_test_l4filt_sa_tcp(struct stmmac_priv *priv)
1462 u16 dummy_port = 0x123;
1464 return __stmmac_test_l4filt(priv, 0, dummy_port, 0, ~0, false);
1467 static int stmmac_test_l4filt_da_udp(struct stmmac_priv *priv)
1469 u16 dummy_port = 0x123;
1471 return __stmmac_test_l4filt(priv, dummy_port, 0, ~0, 0, true);
1474 static int stmmac_test_l4filt_sa_udp(struct stmmac_priv *priv)
1476 u16 dummy_port = 0x123;
1478 return __stmmac_test_l4filt(priv, 0, dummy_port, 0, ~0, true);
1481 static int stmmac_test_arp_validate(struct sk_buff *skb,
1482 struct net_device *ndev,
1483 struct packet_type *pt,
1484 struct net_device *orig_ndev)
1486 struct stmmac_test_priv *tpriv = pt->af_packet_priv;
1487 struct ethhdr *ehdr;
1488 struct arphdr *ahdr;
1490 ehdr = (struct ethhdr *)skb_mac_header(skb);
1491 if (!ether_addr_equal(ehdr->h_dest, tpriv->packet->src))
1494 ahdr = arp_hdr(skb);
1495 if (ahdr->ar_op != htons(ARPOP_REPLY))
1499 complete(&tpriv->comp);
1505 static int stmmac_test_arpoffload(struct stmmac_priv *priv)
1507 unsigned char src[ETH_ALEN] = {0x01, 0x02, 0x03, 0x04, 0x05, 0x06};
1508 unsigned char dst[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
1509 struct stmmac_packet_attrs attr = { };
1510 struct stmmac_test_priv *tpriv;
1511 struct sk_buff *skb = NULL;
1512 u32 ip_addr = 0xdeadcafe;
1513 u32 ip_src = 0xdeadbeef;
1516 if (!priv->dma_cap.arpoffsel)
1519 tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
1524 init_completion(&tpriv->comp);
1526 tpriv->pt.type = htons(ETH_P_ARP);
1527 tpriv->pt.func = stmmac_test_arp_validate;
1528 tpriv->pt.dev = priv->dev;
1529 tpriv->pt.af_packet_priv = tpriv;
1530 tpriv->packet = &attr;
1531 dev_add_pack(&tpriv->pt);
1534 attr.ip_src = ip_src;
1536 attr.ip_dst = ip_addr;
1538 skb = stmmac_test_get_arp_skb(priv, &attr);
1544 ret = stmmac_set_arp_offload(priv, priv->hw, true, ip_addr);
1548 ret = dev_set_promiscuity(priv->dev, 1);
1552 skb_set_queue_mapping(skb, 0);
1553 ret = dev_queue_xmit(skb);
1555 goto cleanup_promisc;
1557 wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT);
1558 ret = tpriv->ok ? 0 : -ETIMEDOUT;
1561 dev_set_promiscuity(priv->dev, -1);
1563 stmmac_set_arp_offload(priv, priv->hw, false, 0x0);
1564 dev_remove_pack(&tpriv->pt);
1569 static int __stmmac_test_jumbo(struct stmmac_priv *priv, u16 queue)
1571 struct stmmac_packet_attrs attr = { };
1572 int size = priv->dma_buf_sz;
1574 attr.dst = priv->dev->dev_addr;
1575 attr.max_size = size - ETH_FCS_LEN;
1576 attr.queue_mapping = queue;
1578 return __stmmac_test_loopback(priv, &attr);
1581 static int stmmac_test_jumbo(struct stmmac_priv *priv)
1583 return __stmmac_test_jumbo(priv, 0);
1586 static int stmmac_test_mjumbo(struct stmmac_priv *priv)
1588 u32 chan, tx_cnt = priv->plat->tx_queues_to_use;
1594 for (chan = 0; chan < tx_cnt; chan++) {
1595 ret = __stmmac_test_jumbo(priv, chan);
1603 static int stmmac_test_sph(struct stmmac_priv *priv)
1605 unsigned long cnt_end, cnt_start = priv->xstats.rx_split_hdr_pkt_n;
1606 struct stmmac_packet_attrs attr = { };
1612 /* Check for UDP first */
1613 attr.dst = priv->dev->dev_addr;
1616 ret = __stmmac_test_loopback(priv, &attr);
1620 cnt_end = priv->xstats.rx_split_hdr_pkt_n;
1621 if (cnt_end <= cnt_start)
1624 /* Check for TCP now */
1625 cnt_start = cnt_end;
1627 attr.dst = priv->dev->dev_addr;
1630 ret = __stmmac_test_loopback(priv, &attr);
1634 cnt_end = priv->xstats.rx_split_hdr_pkt_n;
1635 if (cnt_end <= cnt_start)
1641 #define STMMAC_LOOPBACK_NONE 0
1642 #define STMMAC_LOOPBACK_MAC 1
1643 #define STMMAC_LOOPBACK_PHY 2
1645 static const struct stmmac_test {
1646 char name[ETH_GSTRING_LEN];
1648 int (*fn)(struct stmmac_priv *priv);
1649 } stmmac_selftests[] = {
1651 .name = "MAC Loopback ",
1652 .lb = STMMAC_LOOPBACK_MAC,
1653 .fn = stmmac_test_mac_loopback,
1655 .name = "PHY Loopback ",
1656 .lb = STMMAC_LOOPBACK_NONE, /* Test will handle it */
1657 .fn = stmmac_test_phy_loopback,
1659 .name = "MMC Counters ",
1660 .lb = STMMAC_LOOPBACK_PHY,
1661 .fn = stmmac_test_mmc,
1664 .lb = STMMAC_LOOPBACK_PHY,
1665 .fn = stmmac_test_eee,
1667 .name = "Hash Filter MC ",
1668 .lb = STMMAC_LOOPBACK_PHY,
1669 .fn = stmmac_test_hfilt,
1671 .name = "Perfect Filter UC ",
1672 .lb = STMMAC_LOOPBACK_PHY,
1673 .fn = stmmac_test_pfilt,
1675 .name = "MC Filter ",
1676 .lb = STMMAC_LOOPBACK_PHY,
1677 .fn = stmmac_test_mcfilt,
1679 .name = "UC Filter ",
1680 .lb = STMMAC_LOOPBACK_PHY,
1681 .fn = stmmac_test_ucfilt,
1683 .name = "Flow Control ",
1684 .lb = STMMAC_LOOPBACK_PHY,
1685 .fn = stmmac_test_flowctrl,
1688 .lb = STMMAC_LOOPBACK_PHY,
1689 .fn = stmmac_test_rss,
1691 .name = "VLAN Filtering ",
1692 .lb = STMMAC_LOOPBACK_PHY,
1693 .fn = stmmac_test_vlanfilt,
1695 .name = "Double VLAN Filtering",
1696 .lb = STMMAC_LOOPBACK_PHY,
1697 .fn = stmmac_test_dvlanfilt,
1699 .name = "Flexible RX Parser ",
1700 .lb = STMMAC_LOOPBACK_PHY,
1701 .fn = stmmac_test_rxp,
1703 .name = "SA Insertion (desc) ",
1704 .lb = STMMAC_LOOPBACK_PHY,
1705 .fn = stmmac_test_desc_sai,
1707 .name = "SA Replacement (desc)",
1708 .lb = STMMAC_LOOPBACK_PHY,
1709 .fn = stmmac_test_desc_sar,
1711 .name = "SA Insertion (reg) ",
1712 .lb = STMMAC_LOOPBACK_PHY,
1713 .fn = stmmac_test_reg_sai,
1715 .name = "SA Replacement (reg)",
1716 .lb = STMMAC_LOOPBACK_PHY,
1717 .fn = stmmac_test_reg_sar,
1719 .name = "VLAN TX Insertion ",
1720 .lb = STMMAC_LOOPBACK_PHY,
1721 .fn = stmmac_test_vlanoff,
1723 .name = "SVLAN TX Insertion ",
1724 .lb = STMMAC_LOOPBACK_PHY,
1725 .fn = stmmac_test_svlanoff,
1727 .name = "L3 DA Filtering ",
1728 .lb = STMMAC_LOOPBACK_PHY,
1729 .fn = stmmac_test_l3filt_da,
1731 .name = "L3 SA Filtering ",
1732 .lb = STMMAC_LOOPBACK_PHY,
1733 .fn = stmmac_test_l3filt_sa,
1735 .name = "L4 DA TCP Filtering ",
1736 .lb = STMMAC_LOOPBACK_PHY,
1737 .fn = stmmac_test_l4filt_da_tcp,
1739 .name = "L4 SA TCP Filtering ",
1740 .lb = STMMAC_LOOPBACK_PHY,
1741 .fn = stmmac_test_l4filt_sa_tcp,
1743 .name = "L4 DA UDP Filtering ",
1744 .lb = STMMAC_LOOPBACK_PHY,
1745 .fn = stmmac_test_l4filt_da_udp,
1747 .name = "L4 SA UDP Filtering ",
1748 .lb = STMMAC_LOOPBACK_PHY,
1749 .fn = stmmac_test_l4filt_sa_udp,
1751 .name = "ARP Offload ",
1752 .lb = STMMAC_LOOPBACK_PHY,
1753 .fn = stmmac_test_arpoffload,
1755 .name = "Jumbo Frame ",
1756 .lb = STMMAC_LOOPBACK_PHY,
1757 .fn = stmmac_test_jumbo,
1759 .name = "Multichannel Jumbo ",
1760 .lb = STMMAC_LOOPBACK_PHY,
1761 .fn = stmmac_test_mjumbo,
1763 .name = "Split Header ",
1764 .lb = STMMAC_LOOPBACK_PHY,
1765 .fn = stmmac_test_sph,
1769 void stmmac_selftest_run(struct net_device *dev,
1770 struct ethtool_test *etest, u64 *buf)
1772 struct stmmac_priv *priv = netdev_priv(dev);
1773 int count = stmmac_selftest_get_count(priv);
1774 int carrier = netif_carrier_ok(dev);
1777 memset(buf, 0, sizeof(*buf) * count);
1778 stmmac_test_next_id = 0;
1780 if (etest->flags != ETH_TEST_FL_OFFLINE) {
1781 netdev_err(priv->dev, "Only offline tests are supported\n");
1782 etest->flags |= ETH_TEST_FL_FAILED;
1784 } else if (!carrier) {
1785 netdev_err(priv->dev, "You need valid Link to execute tests\n");
1786 etest->flags |= ETH_TEST_FL_FAILED;
1790 /* We don't want extra traffic */
1791 netif_carrier_off(dev);
1793 /* Wait for queues drain */
1796 for (i = 0; i < count; i++) {
1799 switch (stmmac_selftests[i].lb) {
1800 case STMMAC_LOOPBACK_PHY:
1803 ret = phy_loopback(dev->phydev, true);
1807 case STMMAC_LOOPBACK_MAC:
1808 ret = stmmac_set_mac_loopback(priv, priv->ioaddr, true);
1810 case STMMAC_LOOPBACK_NONE:
1818 * First tests will always be MAC / PHY loobpack. If any of
1819 * them is not supported we abort earlier.
1822 netdev_err(priv->dev, "Loopback is not supported\n");
1823 etest->flags |= ETH_TEST_FL_FAILED;
1827 ret = stmmac_selftests[i].fn(priv);
1828 if (ret && (ret != -EOPNOTSUPP))
1829 etest->flags |= ETH_TEST_FL_FAILED;
1832 switch (stmmac_selftests[i].lb) {
1833 case STMMAC_LOOPBACK_PHY:
1836 ret = phy_loopback(dev->phydev, false);
1840 case STMMAC_LOOPBACK_MAC:
1841 stmmac_set_mac_loopback(priv, priv->ioaddr, false);
1848 /* Restart everything */
1850 netif_carrier_on(dev);
1853 void stmmac_selftest_get_strings(struct stmmac_priv *priv, u8 *data)
1858 for (i = 0; i < stmmac_selftest_get_count(priv); i++) {
1859 snprintf(p, ETH_GSTRING_LEN, "%2d. %s", i + 1,
1860 stmmac_selftests[i].name);
1861 p += ETH_GSTRING_LEN;
1865 int stmmac_selftest_get_count(struct stmmac_priv *priv)
1867 return ARRAY_SIZE(stmmac_selftests);