1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2021 Gerhard Engleder <gerhard@engleder-embedded.com> */
4 /* TSN endpoint Ethernet MAC driver
6 * The TSN endpoint Ethernet MAC is a FPGA based network device for real-time
7 * communication. It is designed for endpoints within TSN (Time Sensitive
8 * Networking) networks; e.g., for PLCs in the industrial automation case.
10 * It supports multiple TX/RX queue pairs. The first TX/RX queue pair is used
13 * More information can be found here:
14 * - www.embedded-experts.at/tsn
15 * - www.engleder-embedded.com
21 #include <linux/module.h>
23 #include <linux/of_net.h>
24 #include <linux/of_mdio.h>
25 #include <linux/interrupt.h>
26 #include <linux/etherdevice.h>
27 #include <linux/phy.h>
28 #include <linux/iopoll.h>
30 #define TSNEP_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
31 #define TSNEP_HEADROOM ALIGN(TSNEP_SKB_PAD, 4)
32 #define TSNEP_MAX_RX_BUF_SIZE (PAGE_SIZE - TSNEP_HEADROOM - \
33 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
35 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
36 #define DMA_ADDR_HIGH(dma_addr) ((u32)(((dma_addr) >> 32) & 0xFFFFFFFF))
38 #define DMA_ADDR_HIGH(dma_addr) ((u32)(0))
40 #define DMA_ADDR_LOW(dma_addr) ((u32)((dma_addr) & 0xFFFFFFFF))
42 #define TSNEP_COALESCE_USECS_DEFAULT 64
43 #define TSNEP_COALESCE_USECS_MAX ((ECM_INT_DELAY_MASK >> ECM_INT_DELAY_SHIFT) * \
44 ECM_INT_DELAY_BASE_US + ECM_INT_DELAY_BASE_US - 1)
46 static void tsnep_enable_irq(struct tsnep_adapter *adapter, u32 mask)
48 iowrite32(mask, adapter->addr + ECM_INT_ENABLE);
51 static void tsnep_disable_irq(struct tsnep_adapter *adapter, u32 mask)
53 mask |= ECM_INT_DISABLE;
54 iowrite32(mask, adapter->addr + ECM_INT_ENABLE);
57 static irqreturn_t tsnep_irq(int irq, void *arg)
59 struct tsnep_adapter *adapter = arg;
60 u32 active = ioread32(adapter->addr + ECM_INT_ACTIVE);
62 /* acknowledge interrupt */
64 iowrite32(active, adapter->addr + ECM_INT_ACKNOWLEDGE);
66 /* handle link interrupt */
67 if ((active & ECM_INT_LINK) != 0)
68 phy_mac_interrupt(adapter->netdev->phydev);
70 /* handle TX/RX queue 0 interrupt */
71 if ((active & adapter->queue[0].irq_mask) != 0) {
72 tsnep_disable_irq(adapter, adapter->queue[0].irq_mask);
73 napi_schedule(&adapter->queue[0].napi);
79 static irqreturn_t tsnep_irq_txrx(int irq, void *arg)
81 struct tsnep_queue *queue = arg;
83 /* handle TX/RX queue interrupt */
84 tsnep_disable_irq(queue->adapter, queue->irq_mask);
85 napi_schedule(&queue->napi);
90 int tsnep_set_irq_coalesce(struct tsnep_queue *queue, u32 usecs)
92 if (usecs > TSNEP_COALESCE_USECS_MAX)
95 usecs /= ECM_INT_DELAY_BASE_US;
96 usecs <<= ECM_INT_DELAY_SHIFT;
97 usecs &= ECM_INT_DELAY_MASK;
99 queue->irq_delay &= ~ECM_INT_DELAY_MASK;
100 queue->irq_delay |= usecs;
101 iowrite8(queue->irq_delay, queue->irq_delay_addr);
106 u32 tsnep_get_irq_coalesce(struct tsnep_queue *queue)
110 usecs = (queue->irq_delay & ECM_INT_DELAY_MASK);
111 usecs >>= ECM_INT_DELAY_SHIFT;
112 usecs *= ECM_INT_DELAY_BASE_US;
117 static int tsnep_mdiobus_read(struct mii_bus *bus, int addr, int regnum)
119 struct tsnep_adapter *adapter = bus->priv;
123 if (regnum & MII_ADDR_C45)
127 if (!adapter->suppress_preamble)
128 md |= ECM_MD_PREAMBLE;
129 md |= (regnum << ECM_MD_ADDR_SHIFT) & ECM_MD_ADDR_MASK;
130 md |= (addr << ECM_MD_PHY_ADDR_SHIFT) & ECM_MD_PHY_ADDR_MASK;
131 iowrite32(md, adapter->addr + ECM_MD_CONTROL);
132 retval = readl_poll_timeout_atomic(adapter->addr + ECM_MD_STATUS, md,
133 !(md & ECM_MD_BUSY), 16, 1000);
137 return (md & ECM_MD_DATA_MASK) >> ECM_MD_DATA_SHIFT;
140 static int tsnep_mdiobus_write(struct mii_bus *bus, int addr, int regnum,
143 struct tsnep_adapter *adapter = bus->priv;
147 if (regnum & MII_ADDR_C45)
151 if (!adapter->suppress_preamble)
152 md |= ECM_MD_PREAMBLE;
153 md |= (regnum << ECM_MD_ADDR_SHIFT) & ECM_MD_ADDR_MASK;
154 md |= (addr << ECM_MD_PHY_ADDR_SHIFT) & ECM_MD_PHY_ADDR_MASK;
155 md |= ((u32)val << ECM_MD_DATA_SHIFT) & ECM_MD_DATA_MASK;
156 iowrite32(md, adapter->addr + ECM_MD_CONTROL);
157 retval = readl_poll_timeout_atomic(adapter->addr + ECM_MD_STATUS, md,
158 !(md & ECM_MD_BUSY), 16, 1000);
165 static void tsnep_set_link_mode(struct tsnep_adapter *adapter)
169 switch (adapter->phydev->speed) {
171 mode = ECM_LINK_MODE_100;
174 mode = ECM_LINK_MODE_1000;
177 mode = ECM_LINK_MODE_OFF;
180 iowrite32(mode, adapter->addr + ECM_STATUS);
183 static void tsnep_phy_link_status_change(struct net_device *netdev)
185 struct tsnep_adapter *adapter = netdev_priv(netdev);
186 struct phy_device *phydev = netdev->phydev;
189 tsnep_set_link_mode(adapter);
191 phy_print_status(netdev->phydev);
194 static int tsnep_phy_loopback(struct tsnep_adapter *adapter, bool enable)
198 retval = phy_loopback(adapter->phydev, enable);
200 /* PHY link state change is not signaled if loopback is enabled, it
201 * would delay a working loopback anyway, let's ensure that loopback
202 * is working immediately by setting link mode directly
204 if (!retval && enable)
205 tsnep_set_link_mode(adapter);
210 static int tsnep_phy_open(struct tsnep_adapter *adapter)
212 struct phy_device *phydev;
213 struct ethtool_eee ethtool_eee;
216 retval = phy_connect_direct(adapter->netdev, adapter->phydev,
217 tsnep_phy_link_status_change,
221 phydev = adapter->netdev->phydev;
223 /* MAC supports only 100Mbps|1000Mbps full duplex
224 * SPE (Single Pair Ethernet) is also an option but not implemented yet
226 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
227 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
228 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
229 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
231 /* disable EEE autoneg, EEE not supported by TSNEP */
232 memset(ðtool_eee, 0, sizeof(ethtool_eee));
233 phy_ethtool_set_eee(adapter->phydev, ðtool_eee);
235 adapter->phydev->irq = PHY_MAC_INTERRUPT;
236 phy_start(adapter->phydev);
241 static void tsnep_phy_close(struct tsnep_adapter *adapter)
243 phy_stop(adapter->netdev->phydev);
244 phy_disconnect(adapter->netdev->phydev);
245 adapter->netdev->phydev = NULL;
248 static void tsnep_tx_ring_cleanup(struct tsnep_tx *tx)
250 struct device *dmadev = tx->adapter->dmadev;
253 memset(tx->entry, 0, sizeof(tx->entry));
255 for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) {
257 dma_free_coherent(dmadev, PAGE_SIZE, tx->page[i],
265 static int tsnep_tx_ring_init(struct tsnep_tx *tx)
267 struct device *dmadev = tx->adapter->dmadev;
268 struct tsnep_tx_entry *entry;
269 struct tsnep_tx_entry *next_entry;
273 for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) {
275 dma_alloc_coherent(dmadev, PAGE_SIZE, &tx->page_dma[i],
281 for (j = 0; j < TSNEP_RING_ENTRIES_PER_PAGE; j++) {
282 entry = &tx->entry[TSNEP_RING_ENTRIES_PER_PAGE * i + j];
283 entry->desc_wb = (struct tsnep_tx_desc_wb *)
284 (((u8 *)tx->page[i]) + TSNEP_DESC_SIZE * j);
285 entry->desc = (struct tsnep_tx_desc *)
286 (((u8 *)entry->desc_wb) + TSNEP_DESC_OFFSET);
287 entry->desc_dma = tx->page_dma[i] + TSNEP_DESC_SIZE * j;
290 for (i = 0; i < TSNEP_RING_SIZE; i++) {
291 entry = &tx->entry[i];
292 next_entry = &tx->entry[(i + 1) % TSNEP_RING_SIZE];
293 entry->desc->next = __cpu_to_le64(next_entry->desc_dma);
299 tsnep_tx_ring_cleanup(tx);
303 static void tsnep_tx_activate(struct tsnep_tx *tx, int index, int length,
306 struct tsnep_tx_entry *entry = &tx->entry[index];
308 entry->properties = 0;
310 entry->properties = length & TSNEP_DESC_LENGTH_MASK;
311 entry->properties |= TSNEP_DESC_INTERRUPT_FLAG;
312 if (skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS)
313 entry->properties |= TSNEP_DESC_EXTENDED_WRITEBACK_FLAG;
315 /* toggle user flag to prevent false acknowledge
317 * Only the first fragment is acknowledged. For all other
318 * fragments no acknowledge is done and the last written owner
319 * counter stays in the writeback descriptor. Therefore, it is
320 * possible that the last written owner counter is identical to
321 * the new incremented owner counter and a false acknowledge is
322 * detected before the real acknowledge has been done by
325 * The user flag is used to prevent this situation. The user
326 * flag is copied to the writeback descriptor by the hardware
327 * and is used as additional acknowledge data. By toggeling the
328 * user flag only for the first fragment (which is
329 * acknowledged), it is guaranteed that the last acknowledge
330 * done for this descriptor has used a different user flag and
331 * cannot be detected as false acknowledge.
333 entry->owner_user_flag = !entry->owner_user_flag;
336 entry->properties |= TSNEP_TX_DESC_LAST_FRAGMENT_FLAG;
337 if (index == tx->increment_owner_counter) {
339 if (tx->owner_counter == 4)
340 tx->owner_counter = 1;
341 tx->increment_owner_counter--;
342 if (tx->increment_owner_counter < 0)
343 tx->increment_owner_counter = TSNEP_RING_SIZE - 1;
346 (tx->owner_counter << TSNEP_DESC_OWNER_COUNTER_SHIFT) &
347 TSNEP_DESC_OWNER_COUNTER_MASK;
348 if (entry->owner_user_flag)
349 entry->properties |= TSNEP_TX_DESC_OWNER_USER_FLAG;
350 entry->desc->more_properties =
351 __cpu_to_le32(entry->len & TSNEP_DESC_LENGTH_MASK);
353 /* descriptor properties shall be written last, because valid data is
358 entry->desc->properties = __cpu_to_le32(entry->properties);
361 static int tsnep_tx_desc_available(struct tsnep_tx *tx)
363 if (tx->read <= tx->write)
364 return TSNEP_RING_SIZE - tx->write + tx->read - 1;
366 return tx->read - tx->write - 1;
369 static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count)
371 struct device *dmadev = tx->adapter->dmadev;
372 struct tsnep_tx_entry *entry;
378 for (i = 0; i < count; i++) {
379 entry = &tx->entry[(tx->write + i) % TSNEP_RING_SIZE];
382 len = skb_headlen(skb);
383 dma = dma_map_single(dmadev, skb->data, len,
386 len = skb_frag_size(&skb_shinfo(skb)->frags[i - 1]);
387 dma = skb_frag_dma_map(dmadev,
388 &skb_shinfo(skb)->frags[i - 1],
389 0, len, DMA_TO_DEVICE);
391 if (dma_mapping_error(dmadev, dma))
395 dma_unmap_addr_set(entry, dma, dma);
397 entry->desc->tx = __cpu_to_le64(dma);
405 static int tsnep_tx_unmap(struct tsnep_tx *tx, int index, int count)
407 struct device *dmadev = tx->adapter->dmadev;
408 struct tsnep_tx_entry *entry;
412 for (i = 0; i < count; i++) {
413 entry = &tx->entry[(index + i) % TSNEP_RING_SIZE];
417 dma_unmap_single(dmadev,
418 dma_unmap_addr(entry, dma),
419 dma_unmap_len(entry, len),
422 dma_unmap_page(dmadev,
423 dma_unmap_addr(entry, dma),
424 dma_unmap_len(entry, len),
426 map_len += entry->len;
434 static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
439 struct tsnep_tx_entry *entry;
444 if (skb_shinfo(skb)->nr_frags > 0)
445 count += skb_shinfo(skb)->nr_frags;
447 spin_lock_irqsave(&tx->lock, flags);
449 if (tsnep_tx_desc_available(tx) < count) {
450 /* ring full, shall not happen because queue is stopped if full
453 netif_stop_queue(tx->adapter->netdev);
455 spin_unlock_irqrestore(&tx->lock, flags);
457 return NETDEV_TX_BUSY;
460 entry = &tx->entry[tx->write];
463 retval = tsnep_tx_map(skb, tx, count);
465 tsnep_tx_unmap(tx, tx->write, count);
466 dev_kfree_skb_any(entry->skb);
471 spin_unlock_irqrestore(&tx->lock, flags);
473 netdev_err(tx->adapter->netdev, "TX DMA map failed\n");
479 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
480 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
482 for (i = 0; i < count; i++)
483 tsnep_tx_activate(tx, (tx->write + i) % TSNEP_RING_SIZE, length,
485 tx->write = (tx->write + count) % TSNEP_RING_SIZE;
487 skb_tx_timestamp(skb);
489 /* descriptor properties shall be valid before hardware is notified */
492 iowrite32(TSNEP_CONTROL_TX_ENABLE, tx->addr + TSNEP_CONTROL);
494 if (tsnep_tx_desc_available(tx) < (MAX_SKB_FRAGS + 1)) {
495 /* ring can get full with next frame */
496 netif_stop_queue(tx->adapter->netdev);
499 spin_unlock_irqrestore(&tx->lock, flags);
504 static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
508 struct tsnep_tx_entry *entry;
512 spin_lock_irqsave(&tx->lock, flags);
515 if (tx->read == tx->write)
518 entry = &tx->entry[tx->read];
519 if ((__le32_to_cpu(entry->desc_wb->properties) &
520 TSNEP_TX_DESC_OWNER_MASK) !=
521 (entry->properties & TSNEP_TX_DESC_OWNER_MASK))
524 /* descriptor properties shall be read first, because valid data
530 if (skb_shinfo(entry->skb)->nr_frags > 0)
531 count += skb_shinfo(entry->skb)->nr_frags;
533 length = tsnep_tx_unmap(tx, tx->read, count);
535 if ((skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS) &&
536 (__le32_to_cpu(entry->desc_wb->properties) &
537 TSNEP_DESC_EXTENDED_WRITEBACK_FLAG)) {
538 struct skb_shared_hwtstamps hwtstamps;
541 if (skb_shinfo(entry->skb)->tx_flags &
542 SKBTX_HW_TSTAMP_USE_CYCLES)
544 __le64_to_cpu(entry->desc_wb->counter);
547 __le64_to_cpu(entry->desc_wb->timestamp);
549 memset(&hwtstamps, 0, sizeof(hwtstamps));
550 hwtstamps.hwtstamp = ns_to_ktime(timestamp);
552 skb_tstamp_tx(entry->skb, &hwtstamps);
555 napi_consume_skb(entry->skb, budget);
558 tx->read = (tx->read + count) % TSNEP_RING_SIZE;
561 tx->bytes += length + ETH_FCS_LEN;
564 } while (likely(budget));
566 if ((tsnep_tx_desc_available(tx) >= ((MAX_SKB_FRAGS + 1) * 2)) &&
567 netif_queue_stopped(tx->adapter->netdev)) {
568 netif_wake_queue(tx->adapter->netdev);
571 spin_unlock_irqrestore(&tx->lock, flags);
573 return (budget != 0);
576 static bool tsnep_tx_pending(struct tsnep_tx *tx)
579 struct tsnep_tx_entry *entry;
580 bool pending = false;
582 spin_lock_irqsave(&tx->lock, flags);
584 if (tx->read != tx->write) {
585 entry = &tx->entry[tx->read];
586 if ((__le32_to_cpu(entry->desc_wb->properties) &
587 TSNEP_TX_DESC_OWNER_MASK) ==
588 (entry->properties & TSNEP_TX_DESC_OWNER_MASK))
592 spin_unlock_irqrestore(&tx->lock, flags);
597 static int tsnep_tx_open(struct tsnep_adapter *adapter, void __iomem *addr,
598 int queue_index, struct tsnep_tx *tx)
603 memset(tx, 0, sizeof(*tx));
604 tx->adapter = adapter;
606 tx->queue_index = queue_index;
608 retval = tsnep_tx_ring_init(tx);
612 dma = tx->entry[0].desc_dma | TSNEP_RESET_OWNER_COUNTER;
613 iowrite32(DMA_ADDR_LOW(dma), tx->addr + TSNEP_TX_DESC_ADDR_LOW);
614 iowrite32(DMA_ADDR_HIGH(dma), tx->addr + TSNEP_TX_DESC_ADDR_HIGH);
615 tx->owner_counter = 1;
616 tx->increment_owner_counter = TSNEP_RING_SIZE - 1;
618 spin_lock_init(&tx->lock);
623 static void tsnep_tx_close(struct tsnep_tx *tx)
627 readx_poll_timeout(ioread32, tx->addr + TSNEP_CONTROL, val,
628 ((val & TSNEP_CONTROL_TX_ENABLE) == 0), 10000,
631 tsnep_tx_ring_cleanup(tx);
634 static void tsnep_rx_ring_cleanup(struct tsnep_rx *rx)
636 struct device *dmadev = rx->adapter->dmadev;
637 struct tsnep_rx_entry *entry;
640 for (i = 0; i < TSNEP_RING_SIZE; i++) {
641 entry = &rx->entry[i];
643 page_pool_put_full_page(rx->page_pool, entry->page,
649 page_pool_destroy(rx->page_pool);
651 memset(rx->entry, 0, sizeof(rx->entry));
653 for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) {
655 dma_free_coherent(dmadev, PAGE_SIZE, rx->page[i],
663 static int tsnep_rx_ring_init(struct tsnep_rx *rx)
665 struct device *dmadev = rx->adapter->dmadev;
666 struct tsnep_rx_entry *entry;
667 struct page_pool_params pp_params = { 0 };
668 struct tsnep_rx_entry *next_entry;
672 for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) {
674 dma_alloc_coherent(dmadev, PAGE_SIZE, &rx->page_dma[i],
680 for (j = 0; j < TSNEP_RING_ENTRIES_PER_PAGE; j++) {
681 entry = &rx->entry[TSNEP_RING_ENTRIES_PER_PAGE * i + j];
682 entry->desc_wb = (struct tsnep_rx_desc_wb *)
683 (((u8 *)rx->page[i]) + TSNEP_DESC_SIZE * j);
684 entry->desc = (struct tsnep_rx_desc *)
685 (((u8 *)entry->desc_wb) + TSNEP_DESC_OFFSET);
686 entry->desc_dma = rx->page_dma[i] + TSNEP_DESC_SIZE * j;
690 pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
692 pp_params.pool_size = TSNEP_RING_SIZE;
693 pp_params.nid = dev_to_node(dmadev);
694 pp_params.dev = dmadev;
695 pp_params.dma_dir = DMA_FROM_DEVICE;
696 pp_params.max_len = TSNEP_MAX_RX_BUF_SIZE;
697 pp_params.offset = TSNEP_SKB_PAD;
698 rx->page_pool = page_pool_create(&pp_params);
699 if (IS_ERR(rx->page_pool)) {
700 retval = PTR_ERR(rx->page_pool);
701 rx->page_pool = NULL;
705 for (i = 0; i < TSNEP_RING_SIZE; i++) {
706 entry = &rx->entry[i];
707 next_entry = &rx->entry[(i + 1) % TSNEP_RING_SIZE];
708 entry->desc->next = __cpu_to_le64(next_entry->desc_dma);
714 tsnep_rx_ring_cleanup(rx);
718 static int tsnep_rx_desc_available(struct tsnep_rx *rx)
720 if (rx->read <= rx->write)
721 return TSNEP_RING_SIZE - rx->write + rx->read - 1;
723 return rx->read - rx->write - 1;
726 static void tsnep_rx_set_page(struct tsnep_rx *rx, struct tsnep_rx_entry *entry,
730 entry->len = TSNEP_MAX_RX_BUF_SIZE;
731 entry->dma = page_pool_get_dma_addr(entry->page);
732 entry->desc->rx = __cpu_to_le64(entry->dma + TSNEP_SKB_PAD);
735 static int tsnep_rx_alloc_buffer(struct tsnep_rx *rx, int index)
737 struct tsnep_rx_entry *entry = &rx->entry[index];
740 page = page_pool_dev_alloc_pages(rx->page_pool);
743 tsnep_rx_set_page(rx, entry, page);
748 static void tsnep_rx_reuse_buffer(struct tsnep_rx *rx, int index)
750 struct tsnep_rx_entry *entry = &rx->entry[index];
751 struct tsnep_rx_entry *read = &rx->entry[rx->read];
753 tsnep_rx_set_page(rx, entry, read->page);
757 static void tsnep_rx_activate(struct tsnep_rx *rx, int index)
759 struct tsnep_rx_entry *entry = &rx->entry[index];
761 /* TSNEP_MAX_RX_BUF_SIZE is a multiple of 4 */
762 entry->properties = entry->len & TSNEP_DESC_LENGTH_MASK;
763 entry->properties |= TSNEP_DESC_INTERRUPT_FLAG;
764 if (index == rx->increment_owner_counter) {
766 if (rx->owner_counter == 4)
767 rx->owner_counter = 1;
768 rx->increment_owner_counter--;
769 if (rx->increment_owner_counter < 0)
770 rx->increment_owner_counter = TSNEP_RING_SIZE - 1;
773 (rx->owner_counter << TSNEP_DESC_OWNER_COUNTER_SHIFT) &
774 TSNEP_DESC_OWNER_COUNTER_MASK;
776 /* descriptor properties shall be written last, because valid data is
781 entry->desc->properties = __cpu_to_le32(entry->properties);
784 static int tsnep_rx_refill(struct tsnep_rx *rx, int count, bool reuse)
787 bool alloc_failed = false;
792 for (i = 0; i < count && !alloc_failed; i++) {
793 index = (rx->write + i) % TSNEP_RING_SIZE;
795 retval = tsnep_rx_alloc_buffer(rx, index);
796 if (unlikely(retval)) {
800 /* reuse only if no other allocation was successful */
802 tsnep_rx_reuse_buffer(rx, index);
807 tsnep_rx_activate(rx, index);
813 rx->write = (rx->write + i) % TSNEP_RING_SIZE;
815 /* descriptor properties shall be valid before hardware is
820 iowrite32(TSNEP_CONTROL_RX_ENABLE, rx->addr + TSNEP_CONTROL);
826 static struct sk_buff *tsnep_build_skb(struct tsnep_rx *rx, struct page *page,
831 skb = napi_build_skb(page_address(page), PAGE_SIZE);
835 /* update pointers within the skb to store the data */
836 skb_reserve(skb, TSNEP_SKB_PAD + TSNEP_RX_INLINE_METADATA_SIZE);
837 __skb_put(skb, length - TSNEP_RX_INLINE_METADATA_SIZE - ETH_FCS_LEN);
839 if (rx->adapter->hwtstamp_config.rx_filter == HWTSTAMP_FILTER_ALL) {
840 struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
841 struct tsnep_rx_inline *rx_inline =
842 (struct tsnep_rx_inline *)(page_address(page) +
845 skb_shinfo(skb)->tx_flags |=
846 SKBTX_HW_TSTAMP_NETDEV;
847 memset(hwtstamps, 0, sizeof(*hwtstamps));
848 hwtstamps->netdev_data = rx_inline;
851 skb_record_rx_queue(skb, rx->queue_index);
852 skb->protocol = eth_type_trans(skb, rx->adapter->netdev);
857 static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi,
860 struct device *dmadev = rx->adapter->dmadev;
863 enum dma_data_direction dma_dir;
864 struct tsnep_rx_entry *entry;
868 desc_available = tsnep_rx_desc_available(rx);
869 dma_dir = page_pool_get_dma_dir(rx->page_pool);
871 while (likely(done < budget) && (rx->read != rx->write)) {
872 entry = &rx->entry[rx->read];
873 if ((__le32_to_cpu(entry->desc_wb->properties) &
874 TSNEP_DESC_OWNER_COUNTER_MASK) !=
875 (entry->properties & TSNEP_DESC_OWNER_COUNTER_MASK))
879 if (desc_available >= TSNEP_RING_RX_REFILL) {
880 bool reuse = desc_available >= TSNEP_RING_RX_REUSE;
882 desc_available -= tsnep_rx_refill(rx, desc_available,
885 /* buffer has been reused for refill to prevent
886 * empty RX ring, thus buffer cannot be used for
889 rx->read = (rx->read + 1) % TSNEP_RING_SIZE;
898 /* descriptor properties shall be read first, because valid data
903 prefetch(page_address(entry->page) + TSNEP_SKB_PAD);
904 length = __le32_to_cpu(entry->desc_wb->properties) &
905 TSNEP_DESC_LENGTH_MASK;
906 dma_sync_single_range_for_cpu(dmadev, entry->dma, TSNEP_SKB_PAD,
909 rx->read = (rx->read + 1) % TSNEP_RING_SIZE;
912 skb = tsnep_build_skb(rx, entry->page, length);
914 page_pool_release_page(rx->page_pool, entry->page);
917 rx->bytes += length - TSNEP_RX_INLINE_METADATA_SIZE;
918 if (skb->pkt_type == PACKET_MULTICAST)
921 napi_gro_receive(napi, skb);
923 page_pool_recycle_direct(rx->page_pool, entry->page);
931 tsnep_rx_refill(rx, desc_available, false);
936 static bool tsnep_rx_pending(struct tsnep_rx *rx)
938 struct tsnep_rx_entry *entry;
940 if (rx->read != rx->write) {
941 entry = &rx->entry[rx->read];
942 if ((__le32_to_cpu(entry->desc_wb->properties) &
943 TSNEP_DESC_OWNER_COUNTER_MASK) ==
944 (entry->properties & TSNEP_DESC_OWNER_COUNTER_MASK))
951 static int tsnep_rx_open(struct tsnep_adapter *adapter, void __iomem *addr,
952 int queue_index, struct tsnep_rx *rx)
957 memset(rx, 0, sizeof(*rx));
958 rx->adapter = adapter;
960 rx->queue_index = queue_index;
962 retval = tsnep_rx_ring_init(rx);
966 dma = rx->entry[0].desc_dma | TSNEP_RESET_OWNER_COUNTER;
967 iowrite32(DMA_ADDR_LOW(dma), rx->addr + TSNEP_RX_DESC_ADDR_LOW);
968 iowrite32(DMA_ADDR_HIGH(dma), rx->addr + TSNEP_RX_DESC_ADDR_HIGH);
969 rx->owner_counter = 1;
970 rx->increment_owner_counter = TSNEP_RING_SIZE - 1;
972 tsnep_rx_refill(rx, tsnep_rx_desc_available(rx), false);
977 static void tsnep_rx_close(struct tsnep_rx *rx)
981 iowrite32(TSNEP_CONTROL_RX_DISABLE, rx->addr + TSNEP_CONTROL);
982 readx_poll_timeout(ioread32, rx->addr + TSNEP_CONTROL, val,
983 ((val & TSNEP_CONTROL_RX_ENABLE) == 0), 10000,
986 tsnep_rx_ring_cleanup(rx);
989 static bool tsnep_pending(struct tsnep_queue *queue)
991 if (queue->tx && tsnep_tx_pending(queue->tx))
994 if (queue->rx && tsnep_rx_pending(queue->rx))
1000 static int tsnep_poll(struct napi_struct *napi, int budget)
1002 struct tsnep_queue *queue = container_of(napi, struct tsnep_queue,
1004 bool complete = true;
1008 complete = tsnep_tx_poll(queue->tx, budget);
1011 done = tsnep_rx_poll(queue->rx, napi, budget);
1016 /* if all work not completed, return budget and keep polling */
1020 if (likely(napi_complete_done(napi, done))) {
1021 tsnep_enable_irq(queue->adapter, queue->irq_mask);
1023 /* reschedule if work is already pending, prevent rotten packets
1024 * which are transmitted or received after polling but before
1027 if (tsnep_pending(queue)) {
1028 tsnep_disable_irq(queue->adapter, queue->irq_mask);
1029 napi_schedule(napi);
1033 return min(done, budget - 1);
1036 static int tsnep_request_irq(struct tsnep_queue *queue, bool first)
1038 const char *name = netdev_name(queue->adapter->netdev);
1039 irq_handler_t handler;
1044 sprintf(queue->name, "%s-mac", name);
1045 handler = tsnep_irq;
1046 dev = queue->adapter;
1048 if (queue->tx && queue->rx)
1049 sprintf(queue->name, "%s-txrx-%d", name,
1050 queue->rx->queue_index);
1052 sprintf(queue->name, "%s-tx-%d", name,
1053 queue->tx->queue_index);
1055 sprintf(queue->name, "%s-rx-%d", name,
1056 queue->rx->queue_index);
1057 handler = tsnep_irq_txrx;
1061 retval = request_irq(queue->irq, handler, 0, queue->name, dev);
1063 /* if name is empty, then interrupt won't be freed */
1064 memset(queue->name, 0, sizeof(queue->name));
1070 static void tsnep_free_irq(struct tsnep_queue *queue, bool first)
1074 if (!strlen(queue->name))
1078 dev = queue->adapter;
1082 free_irq(queue->irq, dev);
1083 memset(queue->name, 0, sizeof(queue->name));
1086 static int tsnep_netdev_open(struct net_device *netdev)
1088 struct tsnep_adapter *adapter = netdev_priv(netdev);
1091 int tx_queue_index = 0;
1092 int rx_queue_index = 0;
1095 for (i = 0; i < adapter->num_queues; i++) {
1096 adapter->queue[i].adapter = adapter;
1097 if (adapter->queue[i].tx) {
1098 addr = adapter->addr + TSNEP_QUEUE(tx_queue_index);
1099 retval = tsnep_tx_open(adapter, addr, tx_queue_index,
1100 adapter->queue[i].tx);
1105 if (adapter->queue[i].rx) {
1106 addr = adapter->addr + TSNEP_QUEUE(rx_queue_index);
1107 retval = tsnep_rx_open(adapter, addr,
1109 adapter->queue[i].rx);
1115 retval = tsnep_request_irq(&adapter->queue[i], i == 0);
1117 netif_err(adapter, drv, adapter->netdev,
1118 "can't get assigned irq %d.\n",
1119 adapter->queue[i].irq);
1124 retval = netif_set_real_num_tx_queues(adapter->netdev,
1125 adapter->num_tx_queues);
1128 retval = netif_set_real_num_rx_queues(adapter->netdev,
1129 adapter->num_rx_queues);
1133 tsnep_enable_irq(adapter, ECM_INT_LINK);
1134 retval = tsnep_phy_open(adapter);
1138 for (i = 0; i < adapter->num_queues; i++) {
1139 netif_napi_add(adapter->netdev, &adapter->queue[i].napi,
1141 napi_enable(&adapter->queue[i].napi);
1143 tsnep_enable_irq(adapter, adapter->queue[i].irq_mask);
1149 tsnep_disable_irq(adapter, ECM_INT_LINK);
1150 tsnep_phy_close(adapter);
1152 for (i = 0; i < adapter->num_queues; i++) {
1153 tsnep_free_irq(&adapter->queue[i], i == 0);
1155 if (adapter->queue[i].rx)
1156 tsnep_rx_close(adapter->queue[i].rx);
1157 if (adapter->queue[i].tx)
1158 tsnep_tx_close(adapter->queue[i].tx);
1163 static int tsnep_netdev_close(struct net_device *netdev)
1165 struct tsnep_adapter *adapter = netdev_priv(netdev);
1168 tsnep_disable_irq(adapter, ECM_INT_LINK);
1169 tsnep_phy_close(adapter);
1171 for (i = 0; i < adapter->num_queues; i++) {
1172 tsnep_disable_irq(adapter, adapter->queue[i].irq_mask);
1174 napi_disable(&adapter->queue[i].napi);
1175 netif_napi_del(&adapter->queue[i].napi);
1177 tsnep_free_irq(&adapter->queue[i], i == 0);
1179 if (adapter->queue[i].rx)
1180 tsnep_rx_close(adapter->queue[i].rx);
1181 if (adapter->queue[i].tx)
1182 tsnep_tx_close(adapter->queue[i].tx);
1188 static netdev_tx_t tsnep_netdev_xmit_frame(struct sk_buff *skb,
1189 struct net_device *netdev)
1191 struct tsnep_adapter *adapter = netdev_priv(netdev);
1192 u16 queue_mapping = skb_get_queue_mapping(skb);
1194 if (queue_mapping >= adapter->num_tx_queues)
1197 return tsnep_xmit_frame_ring(skb, &adapter->tx[queue_mapping]);
1200 static int tsnep_netdev_ioctl(struct net_device *netdev, struct ifreq *ifr,
1203 if (!netif_running(netdev))
1205 if (cmd == SIOCSHWTSTAMP || cmd == SIOCGHWTSTAMP)
1206 return tsnep_ptp_ioctl(netdev, ifr, cmd);
1207 return phy_mii_ioctl(netdev->phydev, ifr, cmd);
1210 static void tsnep_netdev_set_multicast(struct net_device *netdev)
1212 struct tsnep_adapter *adapter = netdev_priv(netdev);
1216 /* configured MAC address and broadcasts are never filtered */
1217 if (netdev->flags & IFF_PROMISC) {
1218 rx_filter |= TSNEP_RX_FILTER_ACCEPT_ALL_MULTICASTS;
1219 rx_filter |= TSNEP_RX_FILTER_ACCEPT_ALL_UNICASTS;
1220 } else if (!netdev_mc_empty(netdev) || (netdev->flags & IFF_ALLMULTI)) {
1221 rx_filter |= TSNEP_RX_FILTER_ACCEPT_ALL_MULTICASTS;
1223 iowrite16(rx_filter, adapter->addr + TSNEP_RX_FILTER);
1226 static void tsnep_netdev_get_stats64(struct net_device *netdev,
1227 struct rtnl_link_stats64 *stats)
1229 struct tsnep_adapter *adapter = netdev_priv(netdev);
1234 for (i = 0; i < adapter->num_tx_queues; i++) {
1235 stats->tx_packets += adapter->tx[i].packets;
1236 stats->tx_bytes += adapter->tx[i].bytes;
1237 stats->tx_dropped += adapter->tx[i].dropped;
1239 for (i = 0; i < adapter->num_rx_queues; i++) {
1240 stats->rx_packets += adapter->rx[i].packets;
1241 stats->rx_bytes += adapter->rx[i].bytes;
1242 stats->rx_dropped += adapter->rx[i].dropped;
1243 stats->multicast += adapter->rx[i].multicast;
1245 reg = ioread32(adapter->addr + TSNEP_QUEUE(i) +
1246 TSNEP_RX_STATISTIC);
1247 val = (reg & TSNEP_RX_STATISTIC_NO_DESC_MASK) >>
1248 TSNEP_RX_STATISTIC_NO_DESC_SHIFT;
1249 stats->rx_dropped += val;
1250 val = (reg & TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL_MASK) >>
1251 TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL_SHIFT;
1252 stats->rx_dropped += val;
1253 val = (reg & TSNEP_RX_STATISTIC_FIFO_OVERFLOW_MASK) >>
1254 TSNEP_RX_STATISTIC_FIFO_OVERFLOW_SHIFT;
1255 stats->rx_errors += val;
1256 stats->rx_fifo_errors += val;
1257 val = (reg & TSNEP_RX_STATISTIC_INVALID_FRAME_MASK) >>
1258 TSNEP_RX_STATISTIC_INVALID_FRAME_SHIFT;
1259 stats->rx_errors += val;
1260 stats->rx_frame_errors += val;
1263 reg = ioread32(adapter->addr + ECM_STAT);
1264 val = (reg & ECM_STAT_RX_ERR_MASK) >> ECM_STAT_RX_ERR_SHIFT;
1265 stats->rx_errors += val;
1266 val = (reg & ECM_STAT_INV_FRM_MASK) >> ECM_STAT_INV_FRM_SHIFT;
1267 stats->rx_errors += val;
1268 stats->rx_crc_errors += val;
1269 val = (reg & ECM_STAT_FWD_RX_ERR_MASK) >> ECM_STAT_FWD_RX_ERR_SHIFT;
1270 stats->rx_errors += val;
1273 static void tsnep_mac_set_address(struct tsnep_adapter *adapter, u8 *addr)
1275 iowrite32(*(u32 *)addr, adapter->addr + TSNEP_MAC_ADDRESS_LOW);
1276 iowrite16(*(u16 *)(addr + sizeof(u32)),
1277 adapter->addr + TSNEP_MAC_ADDRESS_HIGH);
1279 ether_addr_copy(adapter->mac_address, addr);
1280 netif_info(adapter, drv, adapter->netdev, "MAC address set to %pM\n",
1284 static int tsnep_netdev_set_mac_address(struct net_device *netdev, void *addr)
1286 struct tsnep_adapter *adapter = netdev_priv(netdev);
1287 struct sockaddr *sock_addr = addr;
1290 retval = eth_prepare_mac_addr_change(netdev, sock_addr);
1293 eth_hw_addr_set(netdev, sock_addr->sa_data);
1294 tsnep_mac_set_address(adapter, sock_addr->sa_data);
1299 static int tsnep_netdev_set_features(struct net_device *netdev,
1300 netdev_features_t features)
1302 struct tsnep_adapter *adapter = netdev_priv(netdev);
1303 netdev_features_t changed = netdev->features ^ features;
1307 if (changed & NETIF_F_LOOPBACK) {
1308 enable = !!(features & NETIF_F_LOOPBACK);
1309 retval = tsnep_phy_loopback(adapter, enable);
1315 static ktime_t tsnep_netdev_get_tstamp(struct net_device *netdev,
1316 const struct skb_shared_hwtstamps *hwtstamps,
1319 struct tsnep_rx_inline *rx_inline = hwtstamps->netdev_data;
1323 timestamp = __le64_to_cpu(rx_inline->counter);
1325 timestamp = __le64_to_cpu(rx_inline->timestamp);
1327 return ns_to_ktime(timestamp);
1330 static const struct net_device_ops tsnep_netdev_ops = {
1331 .ndo_open = tsnep_netdev_open,
1332 .ndo_stop = tsnep_netdev_close,
1333 .ndo_start_xmit = tsnep_netdev_xmit_frame,
1334 .ndo_eth_ioctl = tsnep_netdev_ioctl,
1335 .ndo_set_rx_mode = tsnep_netdev_set_multicast,
1336 .ndo_get_stats64 = tsnep_netdev_get_stats64,
1337 .ndo_set_mac_address = tsnep_netdev_set_mac_address,
1338 .ndo_set_features = tsnep_netdev_set_features,
1339 .ndo_get_tstamp = tsnep_netdev_get_tstamp,
1340 .ndo_setup_tc = tsnep_tc_setup,
1343 static int tsnep_mac_init(struct tsnep_adapter *adapter)
1347 /* initialize RX filtering, at least configured MAC address and
1348 * broadcast are not filtered
1350 iowrite16(0, adapter->addr + TSNEP_RX_FILTER);
1352 /* try to get MAC address in the following order:
1354 * - valid MAC address already set
1355 * - MAC address register if valid
1356 * - random MAC address
1358 retval = of_get_mac_address(adapter->pdev->dev.of_node,
1359 adapter->mac_address);
1360 if (retval == -EPROBE_DEFER)
1362 if (retval && !is_valid_ether_addr(adapter->mac_address)) {
1363 *(u32 *)adapter->mac_address =
1364 ioread32(adapter->addr + TSNEP_MAC_ADDRESS_LOW);
1365 *(u16 *)(adapter->mac_address + sizeof(u32)) =
1366 ioread16(adapter->addr + TSNEP_MAC_ADDRESS_HIGH);
1367 if (!is_valid_ether_addr(adapter->mac_address))
1368 eth_random_addr(adapter->mac_address);
1371 tsnep_mac_set_address(adapter, adapter->mac_address);
1372 eth_hw_addr_set(adapter->netdev, adapter->mac_address);
1377 static int tsnep_mdio_init(struct tsnep_adapter *adapter)
1379 struct device_node *np = adapter->pdev->dev.of_node;
1383 np = of_get_child_by_name(np, "mdio");
1387 adapter->suppress_preamble =
1388 of_property_read_bool(np, "suppress-preamble");
1391 adapter->mdiobus = devm_mdiobus_alloc(&adapter->pdev->dev);
1392 if (!adapter->mdiobus) {
1398 adapter->mdiobus->priv = (void *)adapter;
1399 adapter->mdiobus->parent = &adapter->pdev->dev;
1400 adapter->mdiobus->read = tsnep_mdiobus_read;
1401 adapter->mdiobus->write = tsnep_mdiobus_write;
1402 adapter->mdiobus->name = TSNEP "-mdiobus";
1403 snprintf(adapter->mdiobus->id, MII_BUS_ID_SIZE, "%s",
1404 adapter->pdev->name);
1406 /* do not scan broadcast address */
1407 adapter->mdiobus->phy_mask = 0x0000001;
1409 retval = of_mdiobus_register(adapter->mdiobus, np);
1417 static int tsnep_phy_init(struct tsnep_adapter *adapter)
1419 struct device_node *phy_node;
1422 retval = of_get_phy_mode(adapter->pdev->dev.of_node,
1423 &adapter->phy_mode);
1425 adapter->phy_mode = PHY_INTERFACE_MODE_GMII;
1427 phy_node = of_parse_phandle(adapter->pdev->dev.of_node, "phy-handle",
1429 adapter->phydev = of_phy_find_device(phy_node);
1430 of_node_put(phy_node);
1431 if (!adapter->phydev && adapter->mdiobus)
1432 adapter->phydev = phy_find_first(adapter->mdiobus);
1433 if (!adapter->phydev)
1439 static int tsnep_queue_init(struct tsnep_adapter *adapter, int queue_count)
1441 u32 irq_mask = ECM_INT_TX_0 | ECM_INT_RX_0;
1446 /* one TX/RX queue pair for netdev is mandatory */
1447 if (platform_irq_count(adapter->pdev) == 1)
1448 retval = platform_get_irq(adapter->pdev, 0);
1450 retval = platform_get_irq_byname(adapter->pdev, "mac");
1453 adapter->num_tx_queues = 1;
1454 adapter->num_rx_queues = 1;
1455 adapter->num_queues = 1;
1456 adapter->queue[0].irq = retval;
1457 adapter->queue[0].tx = &adapter->tx[0];
1458 adapter->queue[0].rx = &adapter->rx[0];
1459 adapter->queue[0].irq_mask = irq_mask;
1460 adapter->queue[0].irq_delay_addr = adapter->addr + ECM_INT_DELAY;
1461 retval = tsnep_set_irq_coalesce(&adapter->queue[0],
1462 TSNEP_COALESCE_USECS_DEFAULT);
1466 adapter->netdev->irq = adapter->queue[0].irq;
1468 /* add additional TX/RX queue pairs only if dedicated interrupt is
1471 for (i = 1; i < queue_count; i++) {
1472 sprintf(name, "txrx-%d", i);
1473 retval = platform_get_irq_byname_optional(adapter->pdev, name);
1477 adapter->num_tx_queues++;
1478 adapter->num_rx_queues++;
1479 adapter->num_queues++;
1480 adapter->queue[i].irq = retval;
1481 adapter->queue[i].tx = &adapter->tx[i];
1482 adapter->queue[i].rx = &adapter->rx[i];
1483 adapter->queue[i].irq_mask =
1484 irq_mask << (ECM_INT_TXRX_SHIFT * i);
1485 adapter->queue[i].irq_delay_addr =
1486 adapter->addr + ECM_INT_DELAY + ECM_INT_DELAY_OFFSET * i;
1487 retval = tsnep_set_irq_coalesce(&adapter->queue[i],
1488 TSNEP_COALESCE_USECS_DEFAULT);
1496 static int tsnep_probe(struct platform_device *pdev)
1498 struct tsnep_adapter *adapter;
1499 struct net_device *netdev;
1500 struct resource *io;
1507 netdev = devm_alloc_etherdev_mqs(&pdev->dev,
1508 sizeof(struct tsnep_adapter),
1509 TSNEP_MAX_QUEUES, TSNEP_MAX_QUEUES);
1512 SET_NETDEV_DEV(netdev, &pdev->dev);
1513 adapter = netdev_priv(netdev);
1514 platform_set_drvdata(pdev, adapter);
1515 adapter->pdev = pdev;
1516 adapter->dmadev = &pdev->dev;
1517 adapter->netdev = netdev;
1518 adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE |
1519 NETIF_MSG_LINK | NETIF_MSG_IFUP |
1520 NETIF_MSG_IFDOWN | NETIF_MSG_TX_QUEUED;
1522 netdev->min_mtu = ETH_MIN_MTU;
1523 netdev->max_mtu = TSNEP_MAX_FRAME_SIZE;
1525 mutex_init(&adapter->gate_control_lock);
1526 mutex_init(&adapter->rxnfc_lock);
1527 INIT_LIST_HEAD(&adapter->rxnfc_rules);
1529 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1530 adapter->addr = devm_ioremap_resource(&pdev->dev, io);
1531 if (IS_ERR(adapter->addr))
1532 return PTR_ERR(adapter->addr);
1533 netdev->mem_start = io->start;
1534 netdev->mem_end = io->end;
1536 type = ioread32(adapter->addr + ECM_TYPE);
1537 revision = (type & ECM_REVISION_MASK) >> ECM_REVISION_SHIFT;
1538 version = (type & ECM_VERSION_MASK) >> ECM_VERSION_SHIFT;
1539 queue_count = (type & ECM_QUEUE_COUNT_MASK) >> ECM_QUEUE_COUNT_SHIFT;
1540 adapter->gate_control = type & ECM_GATE_CONTROL;
1541 adapter->rxnfc_max = TSNEP_RX_ASSIGN_ETHER_TYPE_COUNT;
1543 tsnep_disable_irq(adapter, ECM_INT_ALL);
1545 retval = tsnep_queue_init(adapter, queue_count);
1549 retval = dma_set_mask_and_coherent(&adapter->pdev->dev,
1552 dev_err(&adapter->pdev->dev, "no usable DMA configuration.\n");
1556 retval = tsnep_mac_init(adapter);
1560 retval = tsnep_mdio_init(adapter);
1562 goto mdio_init_failed;
1564 retval = tsnep_phy_init(adapter);
1566 goto phy_init_failed;
1568 retval = tsnep_ptp_init(adapter);
1570 goto ptp_init_failed;
1572 retval = tsnep_tc_init(adapter);
1574 goto tc_init_failed;
1576 retval = tsnep_rxnfc_init(adapter);
1578 goto rxnfc_init_failed;
1580 netdev->netdev_ops = &tsnep_netdev_ops;
1581 netdev->ethtool_ops = &tsnep_ethtool_ops;
1582 netdev->features = NETIF_F_SG;
1583 netdev->hw_features = netdev->features | NETIF_F_LOOPBACK;
1585 /* carrier off reporting is important to ethtool even BEFORE open */
1586 netif_carrier_off(netdev);
1588 retval = register_netdev(netdev);
1590 goto register_failed;
1592 dev_info(&adapter->pdev->dev, "device version %d.%02d\n", version,
1594 if (adapter->gate_control)
1595 dev_info(&adapter->pdev->dev, "gate control detected\n");
1600 tsnep_rxnfc_cleanup(adapter);
1602 tsnep_tc_cleanup(adapter);
1604 tsnep_ptp_cleanup(adapter);
1607 if (adapter->mdiobus)
1608 mdiobus_unregister(adapter->mdiobus);
1613 static int tsnep_remove(struct platform_device *pdev)
1615 struct tsnep_adapter *adapter = platform_get_drvdata(pdev);
1617 unregister_netdev(adapter->netdev);
1619 tsnep_rxnfc_cleanup(adapter);
1621 tsnep_tc_cleanup(adapter);
1623 tsnep_ptp_cleanup(adapter);
1625 if (adapter->mdiobus)
1626 mdiobus_unregister(adapter->mdiobus);
1628 tsnep_disable_irq(adapter, ECM_INT_ALL);
1633 static const struct of_device_id tsnep_of_match[] = {
1634 { .compatible = "engleder,tsnep", },
1637 MODULE_DEVICE_TABLE(of, tsnep_of_match);
1639 static struct platform_driver tsnep_driver = {
1642 .of_match_table = tsnep_of_match,
1644 .probe = tsnep_probe,
1645 .remove = tsnep_remove,
1647 module_platform_driver(tsnep_driver);
1649 MODULE_AUTHOR("Gerhard Engleder <gerhard@engleder-embedded.com>");
1650 MODULE_DESCRIPTION("TSN endpoint Ethernet MAC driver");
1651 MODULE_LICENSE("GPL");