1 /* SuperH Ethernet device driver
3 * Copyright (C) 2014 Renesas Electronics Corporation
4 * Copyright (C) 2006-2012 Nobuhiro Iwamatsu
5 * Copyright (C) 2008-2014 Renesas Solutions Corp.
6 * Copyright (C) 2013-2014 Cogent Embedded, Inc.
7 * Copyright (C) 2014 Codethink Limited
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License,
11 * version 2, as published by the Free Software Foundation.
13 * This program is distributed in the hope it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/spinlock.h>
25 #include <linux/interrupt.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/etherdevice.h>
28 #include <linux/delay.h>
29 #include <linux/platform_device.h>
30 #include <linux/mdio-bitbang.h>
31 #include <linux/netdevice.h>
33 #include <linux/of_device.h>
34 #include <linux/of_irq.h>
35 #include <linux/of_net.h>
36 #include <linux/phy.h>
37 #include <linux/cache.h>
39 #include <linux/pm_runtime.h>
40 #include <linux/slab.h>
41 #include <linux/ethtool.h>
42 #include <linux/if_vlan.h>
43 #include <linux/clk.h>
44 #include <linux/sh_eth.h>
45 #include <linux/of_mdio.h>
49 #define SH_ETH_DEF_MSG_ENABLE \
55 #define SH_ETH_OFFSET_DEFAULTS \
56 [0 ... SH_ETH_MAX_REGISTER_OFFSET - 1] = SH_ETH_OFFSET_INVALID
58 static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
59 SH_ETH_OFFSET_DEFAULTS,
114 [TSU_CTRST] = 0x0004,
115 [TSU_FWEN0] = 0x0010,
116 [TSU_FWEN1] = 0x0014,
118 [TSU_BSYSL0] = 0x0020,
119 [TSU_BSYSL1] = 0x0024,
120 [TSU_PRISL0] = 0x0028,
121 [TSU_PRISL1] = 0x002c,
122 [TSU_FWSL0] = 0x0030,
123 [TSU_FWSL1] = 0x0034,
124 [TSU_FWSLC] = 0x0038,
125 [TSU_QTAG0] = 0x0040,
126 [TSU_QTAG1] = 0x0044,
128 [TSU_FWINMK] = 0x0054,
129 [TSU_ADQT0] = 0x0048,
130 [TSU_ADQT1] = 0x004c,
131 [TSU_VTAG0] = 0x0058,
132 [TSU_VTAG1] = 0x005c,
133 [TSU_ADSBSY] = 0x0060,
135 [TSU_POST1] = 0x0070,
136 [TSU_POST2] = 0x0074,
137 [TSU_POST3] = 0x0078,
138 [TSU_POST4] = 0x007c,
139 [TSU_ADRH0] = 0x0100,
155 static const u16 sh_eth_offset_fast_rz[SH_ETH_MAX_REGISTER_OFFSET] = {
156 SH_ETH_OFFSET_DEFAULTS,
201 [TSU_CTRST] = 0x0004,
202 [TSU_VTAG0] = 0x0058,
203 [TSU_ADSBSY] = 0x0060,
205 [TSU_ADRH0] = 0x0100,
213 static const u16 sh_eth_offset_fast_rcar[SH_ETH_MAX_REGISTER_OFFSET] = {
214 SH_ETH_OFFSET_DEFAULTS,
261 static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = {
262 SH_ETH_OFFSET_DEFAULTS,
315 static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
316 SH_ETH_OFFSET_DEFAULTS,
364 [TSU_CTRST] = 0x0004,
365 [TSU_FWEN0] = 0x0010,
366 [TSU_FWEN1] = 0x0014,
368 [TSU_BSYSL0] = 0x0020,
369 [TSU_BSYSL1] = 0x0024,
370 [TSU_PRISL0] = 0x0028,
371 [TSU_PRISL1] = 0x002c,
372 [TSU_FWSL0] = 0x0030,
373 [TSU_FWSL1] = 0x0034,
374 [TSU_FWSLC] = 0x0038,
375 [TSU_QTAGM0] = 0x0040,
376 [TSU_QTAGM1] = 0x0044,
377 [TSU_ADQT0] = 0x0048,
378 [TSU_ADQT1] = 0x004c,
380 [TSU_FWINMK] = 0x0054,
381 [TSU_ADSBSY] = 0x0060,
383 [TSU_POST1] = 0x0070,
384 [TSU_POST2] = 0x0074,
385 [TSU_POST3] = 0x0078,
386 [TSU_POST4] = 0x007c,
401 [TSU_ADRH0] = 0x0100,
404 static void sh_eth_rcv_snd_disable(struct net_device *ndev);
405 static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev);
407 static bool sh_eth_is_gether(struct sh_eth_private *mdp)
409 return mdp->reg_offset == sh_eth_offset_gigabit;
412 static bool sh_eth_is_rz_fast_ether(struct sh_eth_private *mdp)
414 return mdp->reg_offset == sh_eth_offset_fast_rz;
417 static void sh_eth_select_mii(struct net_device *ndev)
420 struct sh_eth_private *mdp = netdev_priv(ndev);
422 switch (mdp->phy_interface) {
423 case PHY_INTERFACE_MODE_GMII:
426 case PHY_INTERFACE_MODE_MII:
429 case PHY_INTERFACE_MODE_RMII:
434 "PHY interface mode was not setup. Set to MII.\n");
439 sh_eth_write(ndev, value, RMII_MII);
442 static void sh_eth_set_duplex(struct net_device *ndev)
444 struct sh_eth_private *mdp = netdev_priv(ndev);
446 if (mdp->duplex) /* Full */
447 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
449 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
452 /* There is CPU dependent code */
453 static void sh_eth_set_rate_r8a777x(struct net_device *ndev)
455 struct sh_eth_private *mdp = netdev_priv(ndev);
457 switch (mdp->speed) {
458 case 10: /* 10BASE */
459 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_ELB, ECMR);
461 case 100:/* 100BASE */
462 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_ELB, ECMR);
470 static struct sh_eth_cpu_data r8a777x_data = {
471 .set_duplex = sh_eth_set_duplex,
472 .set_rate = sh_eth_set_rate_r8a777x,
474 .register_type = SH_ETH_REG_FAST_RCAR,
476 .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
477 .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
478 .eesipr_value = 0x01ff009f,
480 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
481 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
482 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
484 .fdr_value = 0x00000f0f,
493 static struct sh_eth_cpu_data r8a779x_data = {
494 .set_duplex = sh_eth_set_duplex,
495 .set_rate = sh_eth_set_rate_r8a777x,
497 .register_type = SH_ETH_REG_FAST_RCAR,
499 .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
500 .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
501 .eesipr_value = 0x01ff009f,
503 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
504 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
505 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
507 .fdr_value = 0x00000f0f,
509 .trscer_err_mask = DESC_I_RINT8,
518 static void sh_eth_set_rate_sh7724(struct net_device *ndev)
520 struct sh_eth_private *mdp = netdev_priv(ndev);
522 switch (mdp->speed) {
523 case 10: /* 10BASE */
524 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_RTM, ECMR);
526 case 100:/* 100BASE */
527 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_RTM, ECMR);
535 static struct sh_eth_cpu_data sh7724_data = {
536 .set_duplex = sh_eth_set_duplex,
537 .set_rate = sh_eth_set_rate_sh7724,
539 .register_type = SH_ETH_REG_FAST_SH4,
541 .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
542 .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
543 .eesipr_value = 0x01ff009f,
545 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
546 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
547 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
555 .rpadir_value = 0x00020000, /* NET_IP_ALIGN assumed to be 2 */
558 static void sh_eth_set_rate_sh7757(struct net_device *ndev)
560 struct sh_eth_private *mdp = netdev_priv(ndev);
562 switch (mdp->speed) {
563 case 10: /* 10BASE */
564 sh_eth_write(ndev, 0, RTRATE);
566 case 100:/* 100BASE */
567 sh_eth_write(ndev, 1, RTRATE);
575 static struct sh_eth_cpu_data sh7757_data = {
576 .set_duplex = sh_eth_set_duplex,
577 .set_rate = sh_eth_set_rate_sh7757,
579 .register_type = SH_ETH_REG_FAST_SH4,
581 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
583 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
584 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
585 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
588 .irq_flags = IRQF_SHARED,
595 .rpadir_value = 2 << 16,
599 #define SH_GIGA_ETH_BASE 0xfee00000UL
600 #define GIGA_MALR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c8)
601 #define GIGA_MAHR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0)
602 static void sh_eth_chip_reset_giga(struct net_device *ndev)
605 u32 mahr[2], malr[2];
607 /* save MAHR and MALR */
608 for (i = 0; i < 2; i++) {
609 malr[i] = ioread32((void *)GIGA_MALR(i));
610 mahr[i] = ioread32((void *)GIGA_MAHR(i));
614 iowrite32(ARSTR_ARSTR, (void *)(SH_GIGA_ETH_BASE + 0x1800));
617 /* restore MAHR and MALR */
618 for (i = 0; i < 2; i++) {
619 iowrite32(malr[i], (void *)GIGA_MALR(i));
620 iowrite32(mahr[i], (void *)GIGA_MAHR(i));
624 static void sh_eth_set_rate_giga(struct net_device *ndev)
626 struct sh_eth_private *mdp = netdev_priv(ndev);
628 switch (mdp->speed) {
629 case 10: /* 10BASE */
630 sh_eth_write(ndev, 0x00000000, GECMR);
632 case 100:/* 100BASE */
633 sh_eth_write(ndev, 0x00000010, GECMR);
635 case 1000: /* 1000BASE */
636 sh_eth_write(ndev, 0x00000020, GECMR);
643 /* SH7757(GETHERC) */
644 static struct sh_eth_cpu_data sh7757_data_giga = {
645 .chip_reset = sh_eth_chip_reset_giga,
646 .set_duplex = sh_eth_set_duplex,
647 .set_rate = sh_eth_set_rate_giga,
649 .register_type = SH_ETH_REG_GIGABIT,
651 .ecsr_value = ECSR_ICD | ECSR_MPD,
652 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
653 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
655 .tx_check = EESR_TC1 | EESR_FTC,
656 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
657 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
659 .fdr_value = 0x0000072f,
661 .irq_flags = IRQF_SHARED,
668 .rpadir_value = 2 << 16,
674 static void sh_eth_chip_reset(struct net_device *ndev)
676 struct sh_eth_private *mdp = netdev_priv(ndev);
679 sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
683 static void sh_eth_set_rate_gether(struct net_device *ndev)
685 struct sh_eth_private *mdp = netdev_priv(ndev);
687 switch (mdp->speed) {
688 case 10: /* 10BASE */
689 sh_eth_write(ndev, GECMR_10, GECMR);
691 case 100:/* 100BASE */
692 sh_eth_write(ndev, GECMR_100, GECMR);
694 case 1000: /* 1000BASE */
695 sh_eth_write(ndev, GECMR_1000, GECMR);
703 static struct sh_eth_cpu_data sh7734_data = {
704 .chip_reset = sh_eth_chip_reset,
705 .set_duplex = sh_eth_set_duplex,
706 .set_rate = sh_eth_set_rate_gether,
708 .register_type = SH_ETH_REG_GIGABIT,
710 .ecsr_value = ECSR_ICD | ECSR_MPD,
711 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
712 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
714 .tx_check = EESR_TC1 | EESR_FTC,
715 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
716 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
732 static struct sh_eth_cpu_data sh7763_data = {
733 .chip_reset = sh_eth_chip_reset,
734 .set_duplex = sh_eth_set_duplex,
735 .set_rate = sh_eth_set_rate_gether,
737 .register_type = SH_ETH_REG_GIGABIT,
739 .ecsr_value = ECSR_ICD | ECSR_MPD,
740 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
741 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
743 .tx_check = EESR_TC1 | EESR_FTC,
744 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
745 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
756 .irq_flags = IRQF_SHARED,
759 static void sh_eth_chip_reset_r8a7740(struct net_device *ndev)
761 struct sh_eth_private *mdp = netdev_priv(ndev);
764 sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
767 sh_eth_select_mii(ndev);
771 static struct sh_eth_cpu_data r8a7740_data = {
772 .chip_reset = sh_eth_chip_reset_r8a7740,
773 .set_duplex = sh_eth_set_duplex,
774 .set_rate = sh_eth_set_rate_gether,
776 .register_type = SH_ETH_REG_GIGABIT,
778 .ecsr_value = ECSR_ICD | ECSR_MPD,
779 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
780 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
782 .tx_check = EESR_TC1 | EESR_FTC,
783 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
784 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
786 .fdr_value = 0x0000070f,
794 .rpadir_value = 2 << 16,
803 static struct sh_eth_cpu_data r7s72100_data = {
804 .chip_reset = sh_eth_chip_reset,
805 .set_duplex = sh_eth_set_duplex,
807 .register_type = SH_ETH_REG_FAST_RZ,
809 .ecsr_value = ECSR_ICD,
810 .ecsipr_value = ECSIPR_ICDIP,
811 .eesipr_value = 0xff7f009f,
813 .tx_check = EESR_TC1 | EESR_FTC,
814 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
815 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
817 .fdr_value = 0x0000070f,
825 .rpadir_value = 2 << 16,
833 static struct sh_eth_cpu_data sh7619_data = {
834 .register_type = SH_ETH_REG_FAST_SH3_SH2,
836 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
844 static struct sh_eth_cpu_data sh771x_data = {
845 .register_type = SH_ETH_REG_FAST_SH3_SH2,
847 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
851 static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
854 cd->ecsr_value = DEFAULT_ECSR_INIT;
856 if (!cd->ecsipr_value)
857 cd->ecsipr_value = DEFAULT_ECSIPR_INIT;
859 if (!cd->fcftr_value)
860 cd->fcftr_value = DEFAULT_FIFO_F_D_RFF |
861 DEFAULT_FIFO_F_D_RFD;
864 cd->fdr_value = DEFAULT_FDR_INIT;
867 cd->tx_check = DEFAULT_TX_CHECK;
869 if (!cd->eesr_err_check)
870 cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK;
872 if (!cd->trscer_err_mask)
873 cd->trscer_err_mask = DEFAULT_TRSCER_ERR_MASK;
876 static int sh_eth_check_reset(struct net_device *ndev)
882 if (!(sh_eth_read(ndev, EDMR) & 0x3))
888 netdev_err(ndev, "Device reset failed\n");
894 static int sh_eth_reset(struct net_device *ndev)
896 struct sh_eth_private *mdp = netdev_priv(ndev);
899 if (sh_eth_is_gether(mdp) || sh_eth_is_rz_fast_ether(mdp)) {
900 sh_eth_write(ndev, EDSR_ENALL, EDSR);
901 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER,
904 ret = sh_eth_check_reset(ndev);
909 sh_eth_write(ndev, 0x0, TDLAR);
910 sh_eth_write(ndev, 0x0, TDFAR);
911 sh_eth_write(ndev, 0x0, TDFXR);
912 sh_eth_write(ndev, 0x0, TDFFR);
913 sh_eth_write(ndev, 0x0, RDLAR);
914 sh_eth_write(ndev, 0x0, RDFAR);
915 sh_eth_write(ndev, 0x0, RDFXR);
916 sh_eth_write(ndev, 0x0, RDFFR);
918 /* Reset HW CRC register */
920 sh_eth_write(ndev, 0x0, CSMR);
922 /* Select MII mode */
923 if (mdp->cd->select_mii)
924 sh_eth_select_mii(ndev);
926 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER,
929 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER,
936 static void sh_eth_set_receive_align(struct sk_buff *skb)
938 uintptr_t reserve = (uintptr_t)skb->data & (SH_ETH_RX_ALIGN - 1);
941 skb_reserve(skb, SH_ETH_RX_ALIGN - reserve);
945 /* CPU <-> EDMAC endian convert */
946 static inline __u32 cpu_to_edmac(struct sh_eth_private *mdp, u32 x)
948 switch (mdp->edmac_endian) {
949 case EDMAC_LITTLE_ENDIAN:
950 return cpu_to_le32(x);
951 case EDMAC_BIG_ENDIAN:
952 return cpu_to_be32(x);
957 static inline __u32 edmac_to_cpu(struct sh_eth_private *mdp, u32 x)
959 switch (mdp->edmac_endian) {
960 case EDMAC_LITTLE_ENDIAN:
961 return le32_to_cpu(x);
962 case EDMAC_BIG_ENDIAN:
963 return be32_to_cpu(x);
968 /* Program the hardware MAC address from dev->dev_addr. */
969 static void update_mac_address(struct net_device *ndev)
972 (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
973 (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
975 (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
978 /* Get MAC address from SuperH MAC address register
980 * SuperH's Ethernet device doesn't have 'ROM' to MAC address.
981 * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g).
982 * When you want use this device, you must set MAC address in bootloader.
985 static void read_mac_address(struct net_device *ndev, unsigned char *mac)
987 if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
988 memcpy(ndev->dev_addr, mac, ETH_ALEN);
990 ndev->dev_addr[0] = (sh_eth_read(ndev, MAHR) >> 24);
991 ndev->dev_addr[1] = (sh_eth_read(ndev, MAHR) >> 16) & 0xFF;
992 ndev->dev_addr[2] = (sh_eth_read(ndev, MAHR) >> 8) & 0xFF;
993 ndev->dev_addr[3] = (sh_eth_read(ndev, MAHR) & 0xFF);
994 ndev->dev_addr[4] = (sh_eth_read(ndev, MALR) >> 8) & 0xFF;
995 ndev->dev_addr[5] = (sh_eth_read(ndev, MALR) & 0xFF);
999 static u32 sh_eth_get_edtrr_trns(struct sh_eth_private *mdp)
1001 if (sh_eth_is_gether(mdp) || sh_eth_is_rz_fast_ether(mdp))
1002 return EDTRR_TRNS_GETHER;
1004 return EDTRR_TRNS_ETHER;
1008 void (*set_gate)(void *addr);
1009 struct mdiobb_ctrl ctrl;
1011 u32 mmd_msk;/* MMD */
1018 static void bb_set(void *addr, u32 msk)
1020 iowrite32(ioread32(addr) | msk, addr);
1024 static void bb_clr(void *addr, u32 msk)
1026 iowrite32((ioread32(addr) & ~msk), addr);
1030 static int bb_read(void *addr, u32 msk)
1032 return (ioread32(addr) & msk) != 0;
1035 /* Data I/O pin control */
1036 static void sh_mmd_ctrl(struct mdiobb_ctrl *ctrl, int bit)
1038 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
1040 if (bitbang->set_gate)
1041 bitbang->set_gate(bitbang->addr);
1044 bb_set(bitbang->addr, bitbang->mmd_msk);
1046 bb_clr(bitbang->addr, bitbang->mmd_msk);
1050 static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit)
1052 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
1054 if (bitbang->set_gate)
1055 bitbang->set_gate(bitbang->addr);
1058 bb_set(bitbang->addr, bitbang->mdo_msk);
1060 bb_clr(bitbang->addr, bitbang->mdo_msk);
1064 static int sh_get_mdio(struct mdiobb_ctrl *ctrl)
1066 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
1068 if (bitbang->set_gate)
1069 bitbang->set_gate(bitbang->addr);
1071 return bb_read(bitbang->addr, bitbang->mdi_msk);
1074 /* MDC pin control */
1075 static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit)
1077 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
1079 if (bitbang->set_gate)
1080 bitbang->set_gate(bitbang->addr);
1083 bb_set(bitbang->addr, bitbang->mdc_msk);
1085 bb_clr(bitbang->addr, bitbang->mdc_msk);
1088 /* mdio bus control struct */
1089 static struct mdiobb_ops bb_ops = {
1090 .owner = THIS_MODULE,
1091 .set_mdc = sh_mdc_ctrl,
1092 .set_mdio_dir = sh_mmd_ctrl,
1093 .set_mdio_data = sh_set_mdio,
1094 .get_mdio_data = sh_get_mdio,
1097 /* free skb and descriptor buffer */
1098 static void sh_eth_ring_free(struct net_device *ndev)
1100 struct sh_eth_private *mdp = netdev_priv(ndev);
1103 /* Free Rx skb ringbuffer */
1104 if (mdp->rx_skbuff) {
1105 for (i = 0; i < mdp->num_rx_ring; i++)
1106 dev_kfree_skb(mdp->rx_skbuff[i]);
1108 kfree(mdp->rx_skbuff);
1109 mdp->rx_skbuff = NULL;
1111 /* Free Tx skb ringbuffer */
1112 if (mdp->tx_skbuff) {
1113 for (i = 0; i < mdp->num_tx_ring; i++)
1114 dev_kfree_skb(mdp->tx_skbuff[i]);
1116 kfree(mdp->tx_skbuff);
1117 mdp->tx_skbuff = NULL;
1120 /* format skb and descriptor buffer */
1121 static void sh_eth_ring_format(struct net_device *ndev)
1123 struct sh_eth_private *mdp = netdev_priv(ndev);
1125 struct sk_buff *skb;
1126 struct sh_eth_rxdesc *rxdesc = NULL;
1127 struct sh_eth_txdesc *txdesc = NULL;
1128 int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
1129 int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
1130 int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1;
1131 dma_addr_t dma_addr;
1138 memset(mdp->rx_ring, 0, rx_ringsize);
1140 /* build Rx ring buffer */
1141 for (i = 0; i < mdp->num_rx_ring; i++) {
1143 mdp->rx_skbuff[i] = NULL;
1144 skb = netdev_alloc_skb(ndev, skbuff_size);
1147 sh_eth_set_receive_align(skb);
1150 rxdesc = &mdp->rx_ring[i];
1151 /* The size of the buffer is a multiple of 16 bytes. */
1152 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
1153 dma_addr = dma_map_single(&ndev->dev, skb->data,
1154 rxdesc->buffer_length,
1156 if (dma_mapping_error(&ndev->dev, dma_addr)) {
1160 mdp->rx_skbuff[i] = skb;
1161 rxdesc->addr = dma_addr;
1162 rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
1164 /* Rx descriptor address set */
1166 sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR);
1167 if (sh_eth_is_gether(mdp) ||
1168 sh_eth_is_rz_fast_ether(mdp))
1169 sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR);
1173 mdp->dirty_rx = (u32) (i - mdp->num_rx_ring);
1175 /* Mark the last entry as wrapping the ring. */
1176 rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL);
1178 memset(mdp->tx_ring, 0, tx_ringsize);
1180 /* build Tx ring buffer */
1181 for (i = 0; i < mdp->num_tx_ring; i++) {
1182 mdp->tx_skbuff[i] = NULL;
1183 txdesc = &mdp->tx_ring[i];
1184 txdesc->status = cpu_to_edmac(mdp, TD_TFP);
1185 txdesc->buffer_length = 0;
1187 /* Tx descriptor address set */
1188 sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR);
1189 if (sh_eth_is_gether(mdp) ||
1190 sh_eth_is_rz_fast_ether(mdp))
1191 sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR);
1195 txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
1198 /* Get skb and descriptor buffer */
1199 static int sh_eth_ring_init(struct net_device *ndev)
1201 struct sh_eth_private *mdp = netdev_priv(ndev);
1202 int rx_ringsize, tx_ringsize, ret = 0;
1204 /* +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
1205 * card needs room to do 8 byte alignment, +2 so we can reserve
1206 * the first 2 bytes, and +16 gets room for the status word from the
1209 mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ :
1210 (((ndev->mtu + 26 + 7) & ~7) + 2 + 16));
1211 if (mdp->cd->rpadir)
1212 mdp->rx_buf_sz += NET_IP_ALIGN;
1214 /* Allocate RX and TX skb rings */
1215 mdp->rx_skbuff = kmalloc_array(mdp->num_rx_ring,
1216 sizeof(*mdp->rx_skbuff), GFP_KERNEL);
1217 if (!mdp->rx_skbuff) {
1222 mdp->tx_skbuff = kmalloc_array(mdp->num_tx_ring,
1223 sizeof(*mdp->tx_skbuff), GFP_KERNEL);
1224 if (!mdp->tx_skbuff) {
1229 /* Allocate all Rx descriptors. */
1230 rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
1231 mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma,
1233 if (!mdp->rx_ring) {
1235 goto desc_ring_free;
1240 /* Allocate all Tx descriptors. */
1241 tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
1242 mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma,
1244 if (!mdp->tx_ring) {
1246 goto desc_ring_free;
1251 /* free DMA buffer */
1252 dma_free_coherent(NULL, rx_ringsize, mdp->rx_ring, mdp->rx_desc_dma);
1255 /* Free Rx and Tx skb ring buffer */
1256 sh_eth_ring_free(ndev);
1257 mdp->tx_ring = NULL;
1258 mdp->rx_ring = NULL;
1263 static void sh_eth_free_dma_buffer(struct sh_eth_private *mdp)
1268 ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
1269 dma_free_coherent(NULL, ringsize, mdp->rx_ring,
1271 mdp->rx_ring = NULL;
1275 ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
1276 dma_free_coherent(NULL, ringsize, mdp->tx_ring,
1278 mdp->tx_ring = NULL;
1282 static int sh_eth_dev_init(struct net_device *ndev, bool start)
1285 struct sh_eth_private *mdp = netdev_priv(ndev);
1289 ret = sh_eth_reset(ndev);
1293 if (mdp->cd->rmiimode)
1294 sh_eth_write(ndev, 0x1, RMIIMODE);
1296 /* Descriptor format */
1297 sh_eth_ring_format(ndev);
1298 if (mdp->cd->rpadir)
1299 sh_eth_write(ndev, mdp->cd->rpadir_value, RPADIR);
1301 /* all sh_eth int mask */
1302 sh_eth_write(ndev, 0, EESIPR);
1304 #if defined(__LITTLE_ENDIAN)
1305 if (mdp->cd->hw_swap)
1306 sh_eth_write(ndev, EDMR_EL, EDMR);
1309 sh_eth_write(ndev, 0, EDMR);
1312 sh_eth_write(ndev, mdp->cd->fdr_value, FDR);
1313 sh_eth_write(ndev, 0, TFTR);
1315 /* Frame recv control (enable multiple-packets per rx irq) */
1316 sh_eth_write(ndev, RMCR_RNC, RMCR);
1318 sh_eth_write(ndev, mdp->cd->trscer_err_mask, TRSCER);
1321 sh_eth_write(ndev, 0x800, BCULR); /* Burst sycle set */
1323 sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR);
1325 if (!mdp->cd->no_trimd)
1326 sh_eth_write(ndev, 0, TRIMD);
1328 /* Recv frame limit set register */
1329 sh_eth_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN,
1332 sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR);
1334 mdp->irq_enabled = true;
1335 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1338 /* PAUSE Prohibition */
1339 val = (sh_eth_read(ndev, ECMR) & ECMR_DM) |
1340 ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE;
1342 sh_eth_write(ndev, val, ECMR);
1344 if (mdp->cd->set_rate)
1345 mdp->cd->set_rate(ndev);
1347 /* E-MAC Status Register clear */
1348 sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR);
1350 /* E-MAC Interrupt Enable register */
1352 sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR);
1354 /* Set MAC address */
1355 update_mac_address(ndev);
1359 sh_eth_write(ndev, APR_AP, APR);
1361 sh_eth_write(ndev, MPR_MP, MPR);
1362 if (mdp->cd->tpauser)
1363 sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER);
1366 /* Setting the Rx mode will start the Rx process. */
1367 sh_eth_write(ndev, EDRRR_R, EDRRR);
1369 netif_start_queue(ndev);
1375 static void sh_eth_dev_exit(struct net_device *ndev)
1377 struct sh_eth_private *mdp = netdev_priv(ndev);
1380 /* Deactivate all TX descriptors, so DMA should stop at next
1381 * packet boundary if it's currently running
1383 for (i = 0; i < mdp->num_tx_ring; i++)
1384 mdp->tx_ring[i].status &= ~cpu_to_edmac(mdp, TD_TACT);
1386 /* Disable TX FIFO egress to MAC */
1387 sh_eth_rcv_snd_disable(ndev);
1389 /* Stop RX DMA at next packet boundary */
1390 sh_eth_write(ndev, 0, EDRRR);
1392 /* Aside from TX DMA, we can't tell when the hardware is
1393 * really stopped, so we need to reset to make sure.
1394 * Before doing that, wait for long enough to *probably*
1395 * finish transmitting the last packet and poll stats.
1397 msleep(2); /* max frame time at 10 Mbps < 1250 us */
1398 sh_eth_get_stats(ndev);
1401 /* Set MAC address again */
1402 update_mac_address(ndev);
1405 /* free Tx skb function */
1406 static int sh_eth_txfree(struct net_device *ndev)
1408 struct sh_eth_private *mdp = netdev_priv(ndev);
1409 struct sh_eth_txdesc *txdesc;
1413 for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
1414 entry = mdp->dirty_tx % mdp->num_tx_ring;
1415 txdesc = &mdp->tx_ring[entry];
1416 if (txdesc->status & cpu_to_edmac(mdp, TD_TACT))
1418 /* TACT bit must be checked before all the following reads */
1420 netif_info(mdp, tx_done, ndev,
1421 "tx entry %d status 0x%08x\n",
1422 entry, edmac_to_cpu(mdp, txdesc->status));
1423 /* Free the original skb. */
1424 if (mdp->tx_skbuff[entry]) {
1425 dma_unmap_single(&ndev->dev, txdesc->addr,
1426 txdesc->buffer_length, DMA_TO_DEVICE);
1427 dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
1428 mdp->tx_skbuff[entry] = NULL;
1431 txdesc->status = cpu_to_edmac(mdp, TD_TFP);
1432 if (entry >= mdp->num_tx_ring - 1)
1433 txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
1435 ndev->stats.tx_packets++;
1436 ndev->stats.tx_bytes += txdesc->buffer_length;
1441 /* Packet receive function */
1442 static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1444 struct sh_eth_private *mdp = netdev_priv(ndev);
1445 struct sh_eth_rxdesc *rxdesc;
1447 int entry = mdp->cur_rx % mdp->num_rx_ring;
1448 int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx;
1450 struct sk_buff *skb;
1453 int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1;
1454 dma_addr_t dma_addr;
1456 boguscnt = min(boguscnt, *quota);
1458 rxdesc = &mdp->rx_ring[entry];
1459 while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
1460 /* RACT bit must be checked before all the following reads */
1462 desc_status = edmac_to_cpu(mdp, rxdesc->status);
1463 pkt_len = rxdesc->frame_length;
1468 netif_info(mdp, rx_status, ndev,
1469 "rx entry %d status 0x%08x len %d\n",
1470 entry, desc_status, pkt_len);
1472 if (!(desc_status & RDFEND))
1473 ndev->stats.rx_length_errors++;
1475 /* In case of almost all GETHER/ETHERs, the Receive Frame State
1476 * (RFS) bits in the Receive Descriptor 0 are from bit 9 to
1477 * bit 0. However, in case of the R8A7740 and R7S72100
1478 * the RFS bits are from bit 25 to bit 16. So, the
1479 * driver needs right shifting by 16.
1481 if (mdp->cd->shift_rd0)
1484 if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
1485 RD_RFS5 | RD_RFS6 | RD_RFS10)) {
1486 ndev->stats.rx_errors++;
1487 if (desc_status & RD_RFS1)
1488 ndev->stats.rx_crc_errors++;
1489 if (desc_status & RD_RFS2)
1490 ndev->stats.rx_frame_errors++;
1491 if (desc_status & RD_RFS3)
1492 ndev->stats.rx_length_errors++;
1493 if (desc_status & RD_RFS4)
1494 ndev->stats.rx_length_errors++;
1495 if (desc_status & RD_RFS6)
1496 ndev->stats.rx_missed_errors++;
1497 if (desc_status & RD_RFS10)
1498 ndev->stats.rx_over_errors++;
1500 if (!mdp->cd->hw_swap)
1502 phys_to_virt(ALIGN(rxdesc->addr, 4)),
1504 skb = mdp->rx_skbuff[entry];
1505 mdp->rx_skbuff[entry] = NULL;
1506 if (mdp->cd->rpadir)
1507 skb_reserve(skb, NET_IP_ALIGN);
1508 dma_unmap_single(&ndev->dev, rxdesc->addr,
1509 ALIGN(mdp->rx_buf_sz, 16),
1511 skb_put(skb, pkt_len);
1512 skb->protocol = eth_type_trans(skb, ndev);
1513 netif_receive_skb(skb);
1514 ndev->stats.rx_packets++;
1515 ndev->stats.rx_bytes += pkt_len;
1516 if (desc_status & RD_RFS8)
1517 ndev->stats.multicast++;
1519 entry = (++mdp->cur_rx) % mdp->num_rx_ring;
1520 rxdesc = &mdp->rx_ring[entry];
1523 /* Refill the Rx ring buffers. */
1524 for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
1525 entry = mdp->dirty_rx % mdp->num_rx_ring;
1526 rxdesc = &mdp->rx_ring[entry];
1527 /* The size of the buffer is 16 byte boundary. */
1528 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
1530 if (mdp->rx_skbuff[entry] == NULL) {
1531 skb = netdev_alloc_skb(ndev, skbuff_size);
1533 break; /* Better luck next round. */
1534 sh_eth_set_receive_align(skb);
1535 dma_addr = dma_map_single(&ndev->dev, skb->data,
1536 rxdesc->buffer_length,
1538 if (dma_mapping_error(&ndev->dev, dma_addr)) {
1542 mdp->rx_skbuff[entry] = skb;
1544 skb_checksum_none_assert(skb);
1545 rxdesc->addr = dma_addr;
1547 wmb(); /* RACT bit must be set after all the above writes */
1548 if (entry >= mdp->num_rx_ring - 1)
1550 cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL);
1553 cpu_to_edmac(mdp, RD_RACT | RD_RFP);
1556 /* Restart Rx engine if stopped. */
1557 /* If we don't need to check status, don't. -KDU */
1558 if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) {
1559 /* fix the values for the next receiving if RDE is set */
1560 if (intr_status & EESR_RDE &&
1561 mdp->reg_offset[RDFAR] != SH_ETH_OFFSET_INVALID) {
1562 u32 count = (sh_eth_read(ndev, RDFAR) -
1563 sh_eth_read(ndev, RDLAR)) >> 4;
1565 mdp->cur_rx = count;
1566 mdp->dirty_rx = count;
1568 sh_eth_write(ndev, EDRRR_R, EDRRR);
1571 *quota -= limit - boguscnt - 1;
1576 static void sh_eth_rcv_snd_disable(struct net_device *ndev)
1578 /* disable tx and rx */
1579 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) &
1580 ~(ECMR_RE | ECMR_TE), ECMR);
1583 static void sh_eth_rcv_snd_enable(struct net_device *ndev)
1585 /* enable tx and rx */
1586 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) |
1587 (ECMR_RE | ECMR_TE), ECMR);
1590 /* error control function */
1591 static void sh_eth_error(struct net_device *ndev, u32 intr_status)
1593 struct sh_eth_private *mdp = netdev_priv(ndev);
1598 if (intr_status & EESR_ECI) {
1599 felic_stat = sh_eth_read(ndev, ECSR);
1600 sh_eth_write(ndev, felic_stat, ECSR); /* clear int */
1601 if (felic_stat & ECSR_ICD)
1602 ndev->stats.tx_carrier_errors++;
1603 if (felic_stat & ECSR_LCHNG) {
1605 if (mdp->cd->no_psr || mdp->no_ether_link) {
1608 link_stat = (sh_eth_read(ndev, PSR));
1609 if (mdp->ether_link_active_low)
1610 link_stat = ~link_stat;
1612 if (!(link_stat & PHY_ST_LINK)) {
1613 sh_eth_rcv_snd_disable(ndev);
1616 sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) &
1617 ~DMAC_M_ECI, EESIPR);
1619 sh_eth_write(ndev, sh_eth_read(ndev, ECSR),
1621 sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) |
1622 DMAC_M_ECI, EESIPR);
1623 /* enable tx and rx */
1624 sh_eth_rcv_snd_enable(ndev);
1630 if (intr_status & EESR_TWB) {
1631 /* Unused write back interrupt */
1632 if (intr_status & EESR_TABT) { /* Transmit Abort int */
1633 ndev->stats.tx_aborted_errors++;
1634 netif_err(mdp, tx_err, ndev, "Transmit Abort\n");
1638 if (intr_status & EESR_RABT) {
1639 /* Receive Abort int */
1640 if (intr_status & EESR_RFRMER) {
1641 /* Receive Frame Overflow int */
1642 ndev->stats.rx_frame_errors++;
1646 if (intr_status & EESR_TDE) {
1647 /* Transmit Descriptor Empty int */
1648 ndev->stats.tx_fifo_errors++;
1649 netif_err(mdp, tx_err, ndev, "Transmit Descriptor Empty\n");
1652 if (intr_status & EESR_TFE) {
1653 /* FIFO under flow */
1654 ndev->stats.tx_fifo_errors++;
1655 netif_err(mdp, tx_err, ndev, "Transmit FIFO Under flow\n");
1658 if (intr_status & EESR_RDE) {
1659 /* Receive Descriptor Empty int */
1660 ndev->stats.rx_over_errors++;
1663 if (intr_status & EESR_RFE) {
1664 /* Receive FIFO Overflow int */
1665 ndev->stats.rx_fifo_errors++;
1668 if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
1670 ndev->stats.tx_fifo_errors++;
1671 netif_err(mdp, tx_err, ndev, "Address Error\n");
1674 mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE;
1675 if (mdp->cd->no_ade)
1677 if (intr_status & mask) {
1679 u32 edtrr = sh_eth_read(ndev, EDTRR);
1682 netdev_err(ndev, "TX error. status=%8.8x cur_tx=%8.8x dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
1683 intr_status, mdp->cur_tx, mdp->dirty_tx,
1684 (u32)ndev->state, edtrr);
1685 /* dirty buffer free */
1686 sh_eth_txfree(ndev);
1689 if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) {
1691 sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
1694 netif_wake_queue(ndev);
1698 static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
1700 struct net_device *ndev = netdev;
1701 struct sh_eth_private *mdp = netdev_priv(ndev);
1702 struct sh_eth_cpu_data *cd = mdp->cd;
1703 irqreturn_t ret = IRQ_NONE;
1704 u32 intr_status, intr_enable;
1706 spin_lock(&mdp->lock);
1708 /* Get interrupt status */
1709 intr_status = sh_eth_read(ndev, EESR);
1710 /* Mask it with the interrupt mask, forcing ECI interrupt to be always
1711 * enabled since it's the one that comes thru regardless of the mask,
1712 * and we need to fully handle it in sh_eth_error() in order to quench
1713 * it as it doesn't get cleared by just writing 1 to the ECI bit...
1715 intr_enable = sh_eth_read(ndev, EESIPR);
1716 intr_status &= intr_enable | DMAC_M_ECI;
1717 if (intr_status & (EESR_RX_CHECK | cd->tx_check | cd->eesr_err_check))
1722 if (!likely(mdp->irq_enabled)) {
1723 sh_eth_write(ndev, 0, EESIPR);
1727 if (intr_status & EESR_RX_CHECK) {
1728 if (napi_schedule_prep(&mdp->napi)) {
1729 /* Mask Rx interrupts */
1730 sh_eth_write(ndev, intr_enable & ~EESR_RX_CHECK,
1732 __napi_schedule(&mdp->napi);
1735 "ignoring interrupt, status 0x%08x, mask 0x%08x.\n",
1736 intr_status, intr_enable);
1741 if (intr_status & cd->tx_check) {
1742 /* Clear Tx interrupts */
1743 sh_eth_write(ndev, intr_status & cd->tx_check, EESR);
1745 sh_eth_txfree(ndev);
1746 netif_wake_queue(ndev);
1749 if (intr_status & cd->eesr_err_check) {
1750 /* Clear error interrupts */
1751 sh_eth_write(ndev, intr_status & cd->eesr_err_check, EESR);
1753 sh_eth_error(ndev, intr_status);
1757 spin_unlock(&mdp->lock);
1762 static int sh_eth_poll(struct napi_struct *napi, int budget)
1764 struct sh_eth_private *mdp = container_of(napi, struct sh_eth_private,
1766 struct net_device *ndev = napi->dev;
1771 intr_status = sh_eth_read(ndev, EESR);
1772 if (!(intr_status & EESR_RX_CHECK))
1774 /* Clear Rx interrupts */
1775 sh_eth_write(ndev, intr_status & EESR_RX_CHECK, EESR);
1777 if (sh_eth_rx(ndev, intr_status, "a))
1781 napi_complete(napi);
1783 /* Reenable Rx interrupts */
1784 if (mdp->irq_enabled)
1785 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1787 return budget - quota;
1790 /* PHY state control function */
1791 static void sh_eth_adjust_link(struct net_device *ndev)
1793 struct sh_eth_private *mdp = netdev_priv(ndev);
1794 struct phy_device *phydev = mdp->phydev;
1798 if (phydev->duplex != mdp->duplex) {
1800 mdp->duplex = phydev->duplex;
1801 if (mdp->cd->set_duplex)
1802 mdp->cd->set_duplex(ndev);
1805 if (phydev->speed != mdp->speed) {
1807 mdp->speed = phydev->speed;
1808 if (mdp->cd->set_rate)
1809 mdp->cd->set_rate(ndev);
1813 sh_eth_read(ndev, ECMR) & ~ECMR_TXF,
1816 mdp->link = phydev->link;
1817 if (mdp->cd->no_psr || mdp->no_ether_link)
1818 sh_eth_rcv_snd_enable(ndev);
1820 } else if (mdp->link) {
1825 if (mdp->cd->no_psr || mdp->no_ether_link)
1826 sh_eth_rcv_snd_disable(ndev);
1829 if (new_state && netif_msg_link(mdp))
1830 phy_print_status(phydev);
1833 /* PHY init function */
1834 static int sh_eth_phy_init(struct net_device *ndev)
1836 struct device_node *np = ndev->dev.parent->of_node;
1837 struct sh_eth_private *mdp = netdev_priv(ndev);
1838 struct phy_device *phydev = NULL;
1844 /* Try connect to PHY */
1846 struct device_node *pn;
1848 pn = of_parse_phandle(np, "phy-handle", 0);
1849 phydev = of_phy_connect(ndev, pn,
1850 sh_eth_adjust_link, 0,
1851 mdp->phy_interface);
1854 phydev = ERR_PTR(-ENOENT);
1856 char phy_id[MII_BUS_ID_SIZE + 3];
1858 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
1859 mdp->mii_bus->id, mdp->phy_id);
1861 phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link,
1862 mdp->phy_interface);
1865 if (IS_ERR(phydev)) {
1866 netdev_err(ndev, "failed to connect PHY\n");
1867 return PTR_ERR(phydev);
1870 netdev_info(ndev, "attached PHY %d (IRQ %d) to driver %s\n",
1871 phydev->addr, phydev->irq, phydev->drv->name);
1873 mdp->phydev = phydev;
1878 /* PHY control start function */
1879 static int sh_eth_phy_start(struct net_device *ndev)
1881 struct sh_eth_private *mdp = netdev_priv(ndev);
1884 ret = sh_eth_phy_init(ndev);
1888 phy_start(mdp->phydev);
1893 static int sh_eth_get_settings(struct net_device *ndev,
1894 struct ethtool_cmd *ecmd)
1896 struct sh_eth_private *mdp = netdev_priv(ndev);
1897 unsigned long flags;
1903 spin_lock_irqsave(&mdp->lock, flags);
1904 ret = phy_ethtool_gset(mdp->phydev, ecmd);
1905 spin_unlock_irqrestore(&mdp->lock, flags);
1910 static int sh_eth_set_settings(struct net_device *ndev,
1911 struct ethtool_cmd *ecmd)
1913 struct sh_eth_private *mdp = netdev_priv(ndev);
1914 unsigned long flags;
1920 spin_lock_irqsave(&mdp->lock, flags);
1922 /* disable tx and rx */
1923 sh_eth_rcv_snd_disable(ndev);
1925 ret = phy_ethtool_sset(mdp->phydev, ecmd);
1929 if (ecmd->duplex == DUPLEX_FULL)
1934 if (mdp->cd->set_duplex)
1935 mdp->cd->set_duplex(ndev);
1940 /* enable tx and rx */
1941 sh_eth_rcv_snd_enable(ndev);
1943 spin_unlock_irqrestore(&mdp->lock, flags);
1948 /* If it is ever necessary to increase SH_ETH_REG_DUMP_MAX_REGS, the
1949 * version must be bumped as well. Just adding registers up to that
1950 * limit is fine, as long as the existing register indices don't
1953 #define SH_ETH_REG_DUMP_VERSION 1
1954 #define SH_ETH_REG_DUMP_MAX_REGS 256
1956 static size_t __sh_eth_get_regs(struct net_device *ndev, u32 *buf)
1958 struct sh_eth_private *mdp = netdev_priv(ndev);
1959 struct sh_eth_cpu_data *cd = mdp->cd;
1963 BUILD_BUG_ON(SH_ETH_MAX_REGISTER_OFFSET > SH_ETH_REG_DUMP_MAX_REGS);
1965 /* Dump starts with a bitmap that tells ethtool which
1966 * registers are defined for this chip.
1968 len = DIV_ROUND_UP(SH_ETH_REG_DUMP_MAX_REGS, 32);
1976 /* Add a register to the dump, if it has a defined offset.
1977 * This automatically skips most undefined registers, but for
1978 * some it is also necessary to check a capability flag in
1979 * struct sh_eth_cpu_data.
1981 #define mark_reg_valid(reg) valid_map[reg / 32] |= 1U << (reg % 32)
1982 #define add_reg_from(reg, read_expr) do { \
1983 if (mdp->reg_offset[reg] != SH_ETH_OFFSET_INVALID) { \
1985 mark_reg_valid(reg); \
1986 *buf++ = read_expr; \
1991 #define add_reg(reg) add_reg_from(reg, sh_eth_read(ndev, reg))
1992 #define add_tsu_reg(reg) add_reg_from(reg, sh_eth_tsu_read(mdp, reg))
2064 add_tsu_reg(TSU_CTRST);
2065 add_tsu_reg(TSU_FWEN0);
2066 add_tsu_reg(TSU_FWEN1);
2067 add_tsu_reg(TSU_FCM);
2068 add_tsu_reg(TSU_BSYSL0);
2069 add_tsu_reg(TSU_BSYSL1);
2070 add_tsu_reg(TSU_PRISL0);
2071 add_tsu_reg(TSU_PRISL1);
2072 add_tsu_reg(TSU_FWSL0);
2073 add_tsu_reg(TSU_FWSL1);
2074 add_tsu_reg(TSU_FWSLC);
2075 add_tsu_reg(TSU_QTAG0);
2076 add_tsu_reg(TSU_QTAG1);
2077 add_tsu_reg(TSU_QTAGM0);
2078 add_tsu_reg(TSU_QTAGM1);
2079 add_tsu_reg(TSU_FWSR);
2080 add_tsu_reg(TSU_FWINMK);
2081 add_tsu_reg(TSU_ADQT0);
2082 add_tsu_reg(TSU_ADQT1);
2083 add_tsu_reg(TSU_VTAG0);
2084 add_tsu_reg(TSU_VTAG1);
2085 add_tsu_reg(TSU_ADSBSY);
2086 add_tsu_reg(TSU_TEN);
2087 add_tsu_reg(TSU_POST1);
2088 add_tsu_reg(TSU_POST2);
2089 add_tsu_reg(TSU_POST3);
2090 add_tsu_reg(TSU_POST4);
2091 if (mdp->reg_offset[TSU_ADRH0] != SH_ETH_OFFSET_INVALID) {
2092 /* This is the start of a table, not just a single
2098 mark_reg_valid(TSU_ADRH0);
2099 for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES * 2; i++)
2102 mdp->reg_offset[TSU_ADRH0] +
2105 len += SH_ETH_TSU_CAM_ENTRIES * 2;
2109 #undef mark_reg_valid
2117 static int sh_eth_get_regs_len(struct net_device *ndev)
2119 return __sh_eth_get_regs(ndev, NULL);
2122 static void sh_eth_get_regs(struct net_device *ndev, struct ethtool_regs *regs,
2125 struct sh_eth_private *mdp = netdev_priv(ndev);
2127 regs->version = SH_ETH_REG_DUMP_VERSION;
2129 pm_runtime_get_sync(&mdp->pdev->dev);
2130 __sh_eth_get_regs(ndev, buf);
2131 pm_runtime_put_sync(&mdp->pdev->dev);
2134 static int sh_eth_nway_reset(struct net_device *ndev)
2136 struct sh_eth_private *mdp = netdev_priv(ndev);
2137 unsigned long flags;
2143 spin_lock_irqsave(&mdp->lock, flags);
2144 ret = phy_start_aneg(mdp->phydev);
2145 spin_unlock_irqrestore(&mdp->lock, flags);
2150 static u32 sh_eth_get_msglevel(struct net_device *ndev)
2152 struct sh_eth_private *mdp = netdev_priv(ndev);
2153 return mdp->msg_enable;
2156 static void sh_eth_set_msglevel(struct net_device *ndev, u32 value)
2158 struct sh_eth_private *mdp = netdev_priv(ndev);
2159 mdp->msg_enable = value;
2162 static const char sh_eth_gstrings_stats[][ETH_GSTRING_LEN] = {
2163 "rx_current", "tx_current",
2164 "rx_dirty", "tx_dirty",
2166 #define SH_ETH_STATS_LEN ARRAY_SIZE(sh_eth_gstrings_stats)
2168 static int sh_eth_get_sset_count(struct net_device *netdev, int sset)
2172 return SH_ETH_STATS_LEN;
2178 static void sh_eth_get_ethtool_stats(struct net_device *ndev,
2179 struct ethtool_stats *stats, u64 *data)
2181 struct sh_eth_private *mdp = netdev_priv(ndev);
2184 /* device-specific stats */
2185 data[i++] = mdp->cur_rx;
2186 data[i++] = mdp->cur_tx;
2187 data[i++] = mdp->dirty_rx;
2188 data[i++] = mdp->dirty_tx;
2191 static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
2193 switch (stringset) {
2195 memcpy(data, *sh_eth_gstrings_stats,
2196 sizeof(sh_eth_gstrings_stats));
2201 static void sh_eth_get_ringparam(struct net_device *ndev,
2202 struct ethtool_ringparam *ring)
2204 struct sh_eth_private *mdp = netdev_priv(ndev);
2206 ring->rx_max_pending = RX_RING_MAX;
2207 ring->tx_max_pending = TX_RING_MAX;
2208 ring->rx_pending = mdp->num_rx_ring;
2209 ring->tx_pending = mdp->num_tx_ring;
2212 static int sh_eth_set_ringparam(struct net_device *ndev,
2213 struct ethtool_ringparam *ring)
2215 struct sh_eth_private *mdp = netdev_priv(ndev);
2218 if (ring->tx_pending > TX_RING_MAX ||
2219 ring->rx_pending > RX_RING_MAX ||
2220 ring->tx_pending < TX_RING_MIN ||
2221 ring->rx_pending < RX_RING_MIN)
2223 if (ring->rx_mini_pending || ring->rx_jumbo_pending)
2226 if (netif_running(ndev)) {
2227 netif_device_detach(ndev);
2228 netif_tx_disable(ndev);
2230 /* Serialise with the interrupt handler and NAPI, then
2231 * disable interrupts. We have to clear the
2232 * irq_enabled flag first to ensure that interrupts
2233 * won't be re-enabled.
2235 mdp->irq_enabled = false;
2236 synchronize_irq(ndev->irq);
2237 napi_synchronize(&mdp->napi);
2238 sh_eth_write(ndev, 0x0000, EESIPR);
2240 sh_eth_dev_exit(ndev);
2242 /* Free all the skbuffs in the Rx queue. */
2243 sh_eth_ring_free(ndev);
2244 /* Free DMA buffer */
2245 sh_eth_free_dma_buffer(mdp);
2248 /* Set new parameters */
2249 mdp->num_rx_ring = ring->rx_pending;
2250 mdp->num_tx_ring = ring->tx_pending;
2252 if (netif_running(ndev)) {
2253 ret = sh_eth_ring_init(ndev);
2255 netdev_err(ndev, "%s: sh_eth_ring_init failed.\n",
2259 ret = sh_eth_dev_init(ndev, false);
2261 netdev_err(ndev, "%s: sh_eth_dev_init failed.\n",
2266 mdp->irq_enabled = true;
2267 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
2268 /* Setting the Rx mode will start the Rx process. */
2269 sh_eth_write(ndev, EDRRR_R, EDRRR);
2270 netif_device_attach(ndev);
2276 static const struct ethtool_ops sh_eth_ethtool_ops = {
2277 .get_settings = sh_eth_get_settings,
2278 .set_settings = sh_eth_set_settings,
2279 .get_regs_len = sh_eth_get_regs_len,
2280 .get_regs = sh_eth_get_regs,
2281 .nway_reset = sh_eth_nway_reset,
2282 .get_msglevel = sh_eth_get_msglevel,
2283 .set_msglevel = sh_eth_set_msglevel,
2284 .get_link = ethtool_op_get_link,
2285 .get_strings = sh_eth_get_strings,
2286 .get_ethtool_stats = sh_eth_get_ethtool_stats,
2287 .get_sset_count = sh_eth_get_sset_count,
2288 .get_ringparam = sh_eth_get_ringparam,
2289 .set_ringparam = sh_eth_set_ringparam,
2292 /* network device open function */
2293 static int sh_eth_open(struct net_device *ndev)
2296 struct sh_eth_private *mdp = netdev_priv(ndev);
2298 pm_runtime_get_sync(&mdp->pdev->dev);
2300 napi_enable(&mdp->napi);
2302 ret = request_irq(ndev->irq, sh_eth_interrupt,
2303 mdp->cd->irq_flags, ndev->name, ndev);
2305 netdev_err(ndev, "Can not assign IRQ number\n");
2309 /* Descriptor set */
2310 ret = sh_eth_ring_init(ndev);
2315 ret = sh_eth_dev_init(ndev, true);
2319 /* PHY control start*/
2320 ret = sh_eth_phy_start(ndev);
2329 free_irq(ndev->irq, ndev);
2331 napi_disable(&mdp->napi);
2332 pm_runtime_put_sync(&mdp->pdev->dev);
2336 /* Timeout function */
2337 static void sh_eth_tx_timeout(struct net_device *ndev)
2339 struct sh_eth_private *mdp = netdev_priv(ndev);
2340 struct sh_eth_rxdesc *rxdesc;
2343 netif_stop_queue(ndev);
2345 netif_err(mdp, timer, ndev,
2346 "transmit timed out, status %8.8x, resetting...\n",
2347 sh_eth_read(ndev, EESR));
2349 /* tx_errors count up */
2350 ndev->stats.tx_errors++;
2352 /* Free all the skbuffs in the Rx queue. */
2353 for (i = 0; i < mdp->num_rx_ring; i++) {
2354 rxdesc = &mdp->rx_ring[i];
2356 rxdesc->addr = 0xBADF00D0;
2357 dev_kfree_skb(mdp->rx_skbuff[i]);
2358 mdp->rx_skbuff[i] = NULL;
2360 for (i = 0; i < mdp->num_tx_ring; i++) {
2361 dev_kfree_skb(mdp->tx_skbuff[i]);
2362 mdp->tx_skbuff[i] = NULL;
2366 sh_eth_dev_init(ndev, true);
2369 /* Packet transmit function */
2370 static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2372 struct sh_eth_private *mdp = netdev_priv(ndev);
2373 struct sh_eth_txdesc *txdesc;
2375 unsigned long flags;
2377 spin_lock_irqsave(&mdp->lock, flags);
2378 if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) {
2379 if (!sh_eth_txfree(ndev)) {
2380 netif_warn(mdp, tx_queued, ndev, "TxFD exhausted.\n");
2381 netif_stop_queue(ndev);
2382 spin_unlock_irqrestore(&mdp->lock, flags);
2383 return NETDEV_TX_BUSY;
2386 spin_unlock_irqrestore(&mdp->lock, flags);
2388 if (skb_put_padto(skb, ETH_ZLEN))
2389 return NETDEV_TX_OK;
2391 entry = mdp->cur_tx % mdp->num_tx_ring;
2392 mdp->tx_skbuff[entry] = skb;
2393 txdesc = &mdp->tx_ring[entry];
2395 if (!mdp->cd->hw_swap)
2396 sh_eth_soft_swap(phys_to_virt(ALIGN(txdesc->addr, 4)),
2398 txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len,
2400 if (dma_mapping_error(&ndev->dev, txdesc->addr)) {
2402 return NETDEV_TX_OK;
2404 txdesc->buffer_length = skb->len;
2406 wmb(); /* TACT bit must be set after all the above writes */
2407 if (entry >= mdp->num_tx_ring - 1)
2408 txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
2410 txdesc->status |= cpu_to_edmac(mdp, TD_TACT);
2414 if (!(sh_eth_read(ndev, EDTRR) & sh_eth_get_edtrr_trns(mdp)))
2415 sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
2417 return NETDEV_TX_OK;
2420 /* The statistics registers have write-clear behaviour, which means we
2421 * will lose any increment between the read and write. We mitigate
2422 * this by only clearing when we read a non-zero value, so we will
2423 * never falsely report a total of zero.
2426 sh_eth_update_stat(struct net_device *ndev, unsigned long *stat, int reg)
2428 u32 delta = sh_eth_read(ndev, reg);
2432 sh_eth_write(ndev, 0, reg);
2436 static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
2438 struct sh_eth_private *mdp = netdev_priv(ndev);
2440 if (sh_eth_is_rz_fast_ether(mdp))
2441 return &ndev->stats;
2443 if (!mdp->is_opened)
2444 return &ndev->stats;
2446 sh_eth_update_stat(ndev, &ndev->stats.tx_dropped, TROCR);
2447 sh_eth_update_stat(ndev, &ndev->stats.collisions, CDCR);
2448 sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors, LCCR);
2450 if (sh_eth_is_gether(mdp)) {
2451 sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors,
2453 sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors,
2456 sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors,
2460 return &ndev->stats;
2463 /* device close function */
2464 static int sh_eth_close(struct net_device *ndev)
2466 struct sh_eth_private *mdp = netdev_priv(ndev);
2468 netif_stop_queue(ndev);
2470 /* Serialise with the interrupt handler and NAPI, then disable
2471 * interrupts. We have to clear the irq_enabled flag first to
2472 * ensure that interrupts won't be re-enabled.
2474 mdp->irq_enabled = false;
2475 synchronize_irq(ndev->irq);
2476 napi_disable(&mdp->napi);
2477 sh_eth_write(ndev, 0x0000, EESIPR);
2479 sh_eth_dev_exit(ndev);
2481 /* PHY Disconnect */
2483 phy_stop(mdp->phydev);
2484 phy_disconnect(mdp->phydev);
2488 free_irq(ndev->irq, ndev);
2490 /* Free all the skbuffs in the Rx queue. */
2491 sh_eth_ring_free(ndev);
2493 /* free DMA buffer */
2494 sh_eth_free_dma_buffer(mdp);
2496 pm_runtime_put_sync(&mdp->pdev->dev);
2503 /* ioctl to device function */
2504 static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2506 struct sh_eth_private *mdp = netdev_priv(ndev);
2507 struct phy_device *phydev = mdp->phydev;
2509 if (!netif_running(ndev))
2515 return phy_mii_ioctl(phydev, rq, cmd);
2518 /* For TSU_POSTn. Please refer to the manual about this (strange) bitfields */
2519 static void *sh_eth_tsu_get_post_reg_offset(struct sh_eth_private *mdp,
2522 return sh_eth_tsu_get_offset(mdp, TSU_POST1) + (entry / 8 * 4);
2525 static u32 sh_eth_tsu_get_post_mask(int entry)
2527 return 0x0f << (28 - ((entry % 8) * 4));
2530 static u32 sh_eth_tsu_get_post_bit(struct sh_eth_private *mdp, int entry)
2532 return (0x08 >> (mdp->port << 1)) << (28 - ((entry % 8) * 4));
2535 static void sh_eth_tsu_enable_cam_entry_post(struct net_device *ndev,
2538 struct sh_eth_private *mdp = netdev_priv(ndev);
2542 reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry);
2543 tmp = ioread32(reg_offset);
2544 iowrite32(tmp | sh_eth_tsu_get_post_bit(mdp, entry), reg_offset);
2547 static bool sh_eth_tsu_disable_cam_entry_post(struct net_device *ndev,
2550 struct sh_eth_private *mdp = netdev_priv(ndev);
2551 u32 post_mask, ref_mask, tmp;
2554 reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry);
2555 post_mask = sh_eth_tsu_get_post_mask(entry);
2556 ref_mask = sh_eth_tsu_get_post_bit(mdp, entry) & ~post_mask;
2558 tmp = ioread32(reg_offset);
2559 iowrite32(tmp & ~post_mask, reg_offset);
2561 /* If other port enables, the function returns "true" */
2562 return tmp & ref_mask;
2565 static int sh_eth_tsu_busy(struct net_device *ndev)
2567 int timeout = SH_ETH_TSU_TIMEOUT_MS * 100;
2568 struct sh_eth_private *mdp = netdev_priv(ndev);
2570 while ((sh_eth_tsu_read(mdp, TSU_ADSBSY) & TSU_ADSBSY_0)) {
2574 netdev_err(ndev, "%s: timeout\n", __func__);
2582 static int sh_eth_tsu_write_entry(struct net_device *ndev, void *reg,
2587 val = addr[0] << 24 | addr[1] << 16 | addr[2] << 8 | addr[3];
2588 iowrite32(val, reg);
2589 if (sh_eth_tsu_busy(ndev) < 0)
2592 val = addr[4] << 8 | addr[5];
2593 iowrite32(val, reg + 4);
2594 if (sh_eth_tsu_busy(ndev) < 0)
2600 static void sh_eth_tsu_read_entry(void *reg, u8 *addr)
2604 val = ioread32(reg);
2605 addr[0] = (val >> 24) & 0xff;
2606 addr[1] = (val >> 16) & 0xff;
2607 addr[2] = (val >> 8) & 0xff;
2608 addr[3] = val & 0xff;
2609 val = ioread32(reg + 4);
2610 addr[4] = (val >> 8) & 0xff;
2611 addr[5] = val & 0xff;
2615 static int sh_eth_tsu_find_entry(struct net_device *ndev, const u8 *addr)
2617 struct sh_eth_private *mdp = netdev_priv(ndev);
2618 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2620 u8 c_addr[ETH_ALEN];
2622 for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
2623 sh_eth_tsu_read_entry(reg_offset, c_addr);
2624 if (ether_addr_equal(addr, c_addr))
2631 static int sh_eth_tsu_find_empty(struct net_device *ndev)
2636 memset(blank, 0, sizeof(blank));
2637 entry = sh_eth_tsu_find_entry(ndev, blank);
2638 return (entry < 0) ? -ENOMEM : entry;
2641 static int sh_eth_tsu_disable_cam_entry_table(struct net_device *ndev,
2644 struct sh_eth_private *mdp = netdev_priv(ndev);
2645 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2649 sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) &
2650 ~(1 << (31 - entry)), TSU_TEN);
2652 memset(blank, 0, sizeof(blank));
2653 ret = sh_eth_tsu_write_entry(ndev, reg_offset + entry * 8, blank);
2659 static int sh_eth_tsu_add_entry(struct net_device *ndev, const u8 *addr)
2661 struct sh_eth_private *mdp = netdev_priv(ndev);
2662 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2668 i = sh_eth_tsu_find_entry(ndev, addr);
2670 /* No entry found, create one */
2671 i = sh_eth_tsu_find_empty(ndev);
2674 ret = sh_eth_tsu_write_entry(ndev, reg_offset + i * 8, addr);
2678 /* Enable the entry */
2679 sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) |
2680 (1 << (31 - i)), TSU_TEN);
2683 /* Entry found or created, enable POST */
2684 sh_eth_tsu_enable_cam_entry_post(ndev, i);
2689 static int sh_eth_tsu_del_entry(struct net_device *ndev, const u8 *addr)
2691 struct sh_eth_private *mdp = netdev_priv(ndev);
2697 i = sh_eth_tsu_find_entry(ndev, addr);
2700 if (sh_eth_tsu_disable_cam_entry_post(ndev, i))
2703 /* Disable the entry if both ports was disabled */
2704 ret = sh_eth_tsu_disable_cam_entry_table(ndev, i);
2712 static int sh_eth_tsu_purge_all(struct net_device *ndev)
2714 struct sh_eth_private *mdp = netdev_priv(ndev);
2720 for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++) {
2721 if (sh_eth_tsu_disable_cam_entry_post(ndev, i))
2724 /* Disable the entry if both ports was disabled */
2725 ret = sh_eth_tsu_disable_cam_entry_table(ndev, i);
2733 static void sh_eth_tsu_purge_mcast(struct net_device *ndev)
2735 struct sh_eth_private *mdp = netdev_priv(ndev);
2737 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2743 for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
2744 sh_eth_tsu_read_entry(reg_offset, addr);
2745 if (is_multicast_ether_addr(addr))
2746 sh_eth_tsu_del_entry(ndev, addr);
2750 /* Update promiscuous flag and multicast filter */
2751 static void sh_eth_set_rx_mode(struct net_device *ndev)
2753 struct sh_eth_private *mdp = netdev_priv(ndev);
2756 unsigned long flags;
2758 spin_lock_irqsave(&mdp->lock, flags);
2759 /* Initial condition is MCT = 1, PRM = 0.
2760 * Depending on ndev->flags, set PRM or clear MCT
2762 ecmr_bits = sh_eth_read(ndev, ECMR) & ~ECMR_PRM;
2764 ecmr_bits |= ECMR_MCT;
2766 if (!(ndev->flags & IFF_MULTICAST)) {
2767 sh_eth_tsu_purge_mcast(ndev);
2770 if (ndev->flags & IFF_ALLMULTI) {
2771 sh_eth_tsu_purge_mcast(ndev);
2772 ecmr_bits &= ~ECMR_MCT;
2776 if (ndev->flags & IFF_PROMISC) {
2777 sh_eth_tsu_purge_all(ndev);
2778 ecmr_bits = (ecmr_bits & ~ECMR_MCT) | ECMR_PRM;
2779 } else if (mdp->cd->tsu) {
2780 struct netdev_hw_addr *ha;
2781 netdev_for_each_mc_addr(ha, ndev) {
2782 if (mcast_all && is_multicast_ether_addr(ha->addr))
2785 if (sh_eth_tsu_add_entry(ndev, ha->addr) < 0) {
2787 sh_eth_tsu_purge_mcast(ndev);
2788 ecmr_bits &= ~ECMR_MCT;
2795 /* update the ethernet mode */
2796 sh_eth_write(ndev, ecmr_bits, ECMR);
2798 spin_unlock_irqrestore(&mdp->lock, flags);
2801 static int sh_eth_get_vtag_index(struct sh_eth_private *mdp)
2809 static int sh_eth_vlan_rx_add_vid(struct net_device *ndev,
2810 __be16 proto, u16 vid)
2812 struct sh_eth_private *mdp = netdev_priv(ndev);
2813 int vtag_reg_index = sh_eth_get_vtag_index(mdp);
2815 if (unlikely(!mdp->cd->tsu))
2818 /* No filtering if vid = 0 */
2822 mdp->vlan_num_ids++;
2824 /* The controller has one VLAN tag HW filter. So, if the filter is
2825 * already enabled, the driver disables it and the filte
2827 if (mdp->vlan_num_ids > 1) {
2828 /* disable VLAN filter */
2829 sh_eth_tsu_write(mdp, 0, vtag_reg_index);
2833 sh_eth_tsu_write(mdp, TSU_VTAG_ENABLE | (vid & TSU_VTAG_VID_MASK),
2839 static int sh_eth_vlan_rx_kill_vid(struct net_device *ndev,
2840 __be16 proto, u16 vid)
2842 struct sh_eth_private *mdp = netdev_priv(ndev);
2843 int vtag_reg_index = sh_eth_get_vtag_index(mdp);
2845 if (unlikely(!mdp->cd->tsu))
2848 /* No filtering if vid = 0 */
2852 mdp->vlan_num_ids--;
2853 sh_eth_tsu_write(mdp, 0, vtag_reg_index);
2858 /* SuperH's TSU register init function */
2859 static void sh_eth_tsu_init(struct sh_eth_private *mdp)
2861 if (sh_eth_is_rz_fast_ether(mdp)) {
2862 sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */
2866 sh_eth_tsu_write(mdp, 0, TSU_FWEN0); /* Disable forward(0->1) */
2867 sh_eth_tsu_write(mdp, 0, TSU_FWEN1); /* Disable forward(1->0) */
2868 sh_eth_tsu_write(mdp, 0, TSU_FCM); /* forward fifo 3k-3k */
2869 sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL0);
2870 sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL1);
2871 sh_eth_tsu_write(mdp, 0, TSU_PRISL0);
2872 sh_eth_tsu_write(mdp, 0, TSU_PRISL1);
2873 sh_eth_tsu_write(mdp, 0, TSU_FWSL0);
2874 sh_eth_tsu_write(mdp, 0, TSU_FWSL1);
2875 sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC);
2876 if (sh_eth_is_gether(mdp)) {
2877 sh_eth_tsu_write(mdp, 0, TSU_QTAG0); /* Disable QTAG(0->1) */
2878 sh_eth_tsu_write(mdp, 0, TSU_QTAG1); /* Disable QTAG(1->0) */
2880 sh_eth_tsu_write(mdp, 0, TSU_QTAGM0); /* Disable QTAG(0->1) */
2881 sh_eth_tsu_write(mdp, 0, TSU_QTAGM1); /* Disable QTAG(1->0) */
2883 sh_eth_tsu_write(mdp, 0, TSU_FWSR); /* all interrupt status clear */
2884 sh_eth_tsu_write(mdp, 0, TSU_FWINMK); /* Disable all interrupt */
2885 sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */
2886 sh_eth_tsu_write(mdp, 0, TSU_POST1); /* Disable CAM entry [ 0- 7] */
2887 sh_eth_tsu_write(mdp, 0, TSU_POST2); /* Disable CAM entry [ 8-15] */
2888 sh_eth_tsu_write(mdp, 0, TSU_POST3); /* Disable CAM entry [16-23] */
2889 sh_eth_tsu_write(mdp, 0, TSU_POST4); /* Disable CAM entry [24-31] */
2892 /* MDIO bus release function */
2893 static int sh_mdio_release(struct sh_eth_private *mdp)
2895 /* unregister mdio bus */
2896 mdiobus_unregister(mdp->mii_bus);
2898 /* free bitbang info */
2899 free_mdio_bitbang(mdp->mii_bus);
2904 /* MDIO bus init function */
2905 static int sh_mdio_init(struct sh_eth_private *mdp,
2906 struct sh_eth_plat_data *pd)
2909 struct bb_info *bitbang;
2910 struct platform_device *pdev = mdp->pdev;
2911 struct device *dev = &mdp->pdev->dev;
2913 /* create bit control struct for PHY */
2914 bitbang = devm_kzalloc(dev, sizeof(struct bb_info), GFP_KERNEL);
2919 bitbang->addr = mdp->addr + mdp->reg_offset[PIR];
2920 bitbang->set_gate = pd->set_mdio_gate;
2921 bitbang->mdi_msk = PIR_MDI;
2922 bitbang->mdo_msk = PIR_MDO;
2923 bitbang->mmd_msk = PIR_MMD;
2924 bitbang->mdc_msk = PIR_MDC;
2925 bitbang->ctrl.ops = &bb_ops;
2927 /* MII controller setting */
2928 mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl);
2932 /* Hook up MII support for ethtool */
2933 mdp->mii_bus->name = "sh_mii";
2934 mdp->mii_bus->parent = dev;
2935 snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
2936 pdev->name, pdev->id);
2939 mdp->mii_bus->irq = devm_kmalloc_array(dev, PHY_MAX_ADDR, sizeof(int),
2941 if (!mdp->mii_bus->irq) {
2946 /* register MDIO bus */
2948 ret = of_mdiobus_register(mdp->mii_bus, dev->of_node);
2950 for (i = 0; i < PHY_MAX_ADDR; i++)
2951 mdp->mii_bus->irq[i] = PHY_POLL;
2952 if (pd->phy_irq > 0)
2953 mdp->mii_bus->irq[pd->phy] = pd->phy_irq;
2955 ret = mdiobus_register(mdp->mii_bus);
2964 free_mdio_bitbang(mdp->mii_bus);
2968 static const u16 *sh_eth_get_register_offset(int register_type)
2970 const u16 *reg_offset = NULL;
2972 switch (register_type) {
2973 case SH_ETH_REG_GIGABIT:
2974 reg_offset = sh_eth_offset_gigabit;
2976 case SH_ETH_REG_FAST_RZ:
2977 reg_offset = sh_eth_offset_fast_rz;
2979 case SH_ETH_REG_FAST_RCAR:
2980 reg_offset = sh_eth_offset_fast_rcar;
2982 case SH_ETH_REG_FAST_SH4:
2983 reg_offset = sh_eth_offset_fast_sh4;
2985 case SH_ETH_REG_FAST_SH3_SH2:
2986 reg_offset = sh_eth_offset_fast_sh3_sh2;
2995 static const struct net_device_ops sh_eth_netdev_ops = {
2996 .ndo_open = sh_eth_open,
2997 .ndo_stop = sh_eth_close,
2998 .ndo_start_xmit = sh_eth_start_xmit,
2999 .ndo_get_stats = sh_eth_get_stats,
3000 .ndo_set_rx_mode = sh_eth_set_rx_mode,
3001 .ndo_tx_timeout = sh_eth_tx_timeout,
3002 .ndo_do_ioctl = sh_eth_do_ioctl,
3003 .ndo_validate_addr = eth_validate_addr,
3004 .ndo_set_mac_address = eth_mac_addr,
3005 .ndo_change_mtu = eth_change_mtu,
3008 static const struct net_device_ops sh_eth_netdev_ops_tsu = {
3009 .ndo_open = sh_eth_open,
3010 .ndo_stop = sh_eth_close,
3011 .ndo_start_xmit = sh_eth_start_xmit,
3012 .ndo_get_stats = sh_eth_get_stats,
3013 .ndo_set_rx_mode = sh_eth_set_rx_mode,
3014 .ndo_vlan_rx_add_vid = sh_eth_vlan_rx_add_vid,
3015 .ndo_vlan_rx_kill_vid = sh_eth_vlan_rx_kill_vid,
3016 .ndo_tx_timeout = sh_eth_tx_timeout,
3017 .ndo_do_ioctl = sh_eth_do_ioctl,
3018 .ndo_validate_addr = eth_validate_addr,
3019 .ndo_set_mac_address = eth_mac_addr,
3020 .ndo_change_mtu = eth_change_mtu,
3024 static struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
3026 struct device_node *np = dev->of_node;
3027 struct sh_eth_plat_data *pdata;
3028 const char *mac_addr;
3030 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
3034 pdata->phy_interface = of_get_phy_mode(np);
3036 mac_addr = of_get_mac_address(np);
3038 memcpy(pdata->mac_addr, mac_addr, ETH_ALEN);
3040 pdata->no_ether_link =
3041 of_property_read_bool(np, "renesas,no-ether-link");
3042 pdata->ether_link_active_low =
3043 of_property_read_bool(np, "renesas,ether-link-active-low");
3048 static const struct of_device_id sh_eth_match_table[] = {
3049 { .compatible = "renesas,gether-r8a7740", .data = &r8a7740_data },
3050 { .compatible = "renesas,ether-r8a7778", .data = &r8a777x_data },
3051 { .compatible = "renesas,ether-r8a7779", .data = &r8a777x_data },
3052 { .compatible = "renesas,ether-r8a7790", .data = &r8a779x_data },
3053 { .compatible = "renesas,ether-r8a7791", .data = &r8a779x_data },
3054 { .compatible = "renesas,ether-r8a7793", .data = &r8a779x_data },
3055 { .compatible = "renesas,ether-r8a7794", .data = &r8a779x_data },
3056 { .compatible = "renesas,ether-r7s72100", .data = &r7s72100_data },
3059 MODULE_DEVICE_TABLE(of, sh_eth_match_table);
3061 static inline struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
3067 static int sh_eth_drv_probe(struct platform_device *pdev)
3070 struct resource *res;
3071 struct net_device *ndev = NULL;
3072 struct sh_eth_private *mdp = NULL;
3073 struct sh_eth_plat_data *pd = dev_get_platdata(&pdev->dev);
3074 const struct platform_device_id *id = platform_get_device_id(pdev);
3077 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3079 ndev = alloc_etherdev(sizeof(struct sh_eth_private));
3083 pm_runtime_enable(&pdev->dev);
3084 pm_runtime_get_sync(&pdev->dev);
3091 ret = platform_get_irq(pdev, 0);
3096 SET_NETDEV_DEV(ndev, &pdev->dev);
3098 mdp = netdev_priv(ndev);
3099 mdp->num_tx_ring = TX_RING_SIZE;
3100 mdp->num_rx_ring = RX_RING_SIZE;
3101 mdp->addr = devm_ioremap_resource(&pdev->dev, res);
3102 if (IS_ERR(mdp->addr)) {
3103 ret = PTR_ERR(mdp->addr);
3107 ndev->base_addr = res->start;
3109 spin_lock_init(&mdp->lock);
3112 if (pdev->dev.of_node)
3113 pd = sh_eth_parse_dt(&pdev->dev);
3115 dev_err(&pdev->dev, "no platform data\n");
3121 mdp->phy_id = pd->phy;
3122 mdp->phy_interface = pd->phy_interface;
3124 mdp->edmac_endian = pd->edmac_endian;
3125 mdp->no_ether_link = pd->no_ether_link;
3126 mdp->ether_link_active_low = pd->ether_link_active_low;
3130 mdp->cd = (struct sh_eth_cpu_data *)id->driver_data;
3132 const struct of_device_id *match;
3134 match = of_match_device(of_match_ptr(sh_eth_match_table),
3136 mdp->cd = (struct sh_eth_cpu_data *)match->data;
3138 mdp->reg_offset = sh_eth_get_register_offset(mdp->cd->register_type);
3139 if (!mdp->reg_offset) {
3140 dev_err(&pdev->dev, "Unknown register type (%d)\n",
3141 mdp->cd->register_type);
3145 sh_eth_set_default_cpu_data(mdp->cd);
3149 ndev->netdev_ops = &sh_eth_netdev_ops_tsu;
3151 ndev->netdev_ops = &sh_eth_netdev_ops;
3152 ndev->ethtool_ops = &sh_eth_ethtool_ops;
3153 ndev->watchdog_timeo = TX_TIMEOUT;
3155 /* debug message level */
3156 mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE;
3158 /* read and set MAC address */
3159 read_mac_address(ndev, pd->mac_addr);
3160 if (!is_valid_ether_addr(ndev->dev_addr)) {
3161 dev_warn(&pdev->dev,
3162 "no valid MAC address supplied, using a random one.\n");
3163 eth_hw_addr_random(ndev);
3166 /* ioremap the TSU registers */
3168 struct resource *rtsu;
3169 rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1);
3170 mdp->tsu_addr = devm_ioremap_resource(&pdev->dev, rtsu);
3171 if (IS_ERR(mdp->tsu_addr)) {
3172 ret = PTR_ERR(mdp->tsu_addr);
3175 mdp->port = devno % 2;
3176 ndev->features = NETIF_F_HW_VLAN_CTAG_FILTER;
3179 /* initialize first or needed device */
3180 if (!devno || pd->needs_init) {
3181 if (mdp->cd->chip_reset)
3182 mdp->cd->chip_reset(ndev);
3185 /* TSU init (Init only)*/
3186 sh_eth_tsu_init(mdp);
3190 if (mdp->cd->rmiimode)
3191 sh_eth_write(ndev, 0x1, RMIIMODE);
3194 ret = sh_mdio_init(mdp, pd);
3196 dev_err(&ndev->dev, "failed to initialise MDIO\n");
3200 netif_napi_add(ndev, &mdp->napi, sh_eth_poll, 64);
3202 /* network device register */
3203 ret = register_netdev(ndev);
3207 /* print device information */
3208 netdev_info(ndev, "Base address at 0x%x, %pM, IRQ %d.\n",
3209 (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
3211 pm_runtime_put(&pdev->dev);
3212 platform_set_drvdata(pdev, ndev);
3217 netif_napi_del(&mdp->napi);
3218 sh_mdio_release(mdp);
3225 pm_runtime_put(&pdev->dev);
3226 pm_runtime_disable(&pdev->dev);
3230 static int sh_eth_drv_remove(struct platform_device *pdev)
3232 struct net_device *ndev = platform_get_drvdata(pdev);
3233 struct sh_eth_private *mdp = netdev_priv(ndev);
3235 unregister_netdev(ndev);
3236 netif_napi_del(&mdp->napi);
3237 sh_mdio_release(mdp);
3238 pm_runtime_disable(&pdev->dev);
3245 #ifdef CONFIG_PM_SLEEP
3246 static int sh_eth_suspend(struct device *dev)
3248 struct net_device *ndev = dev_get_drvdata(dev);
3251 if (netif_running(ndev)) {
3252 netif_device_detach(ndev);
3253 ret = sh_eth_close(ndev);
3259 static int sh_eth_resume(struct device *dev)
3261 struct net_device *ndev = dev_get_drvdata(dev);
3264 if (netif_running(ndev)) {
3265 ret = sh_eth_open(ndev);
3268 netif_device_attach(ndev);
3275 static int sh_eth_runtime_nop(struct device *dev)
3277 /* Runtime PM callback shared between ->runtime_suspend()
3278 * and ->runtime_resume(). Simply returns success.
3280 * This driver re-initializes all registers after
3281 * pm_runtime_get_sync() anyway so there is no need
3282 * to save and restore registers here.
3287 static const struct dev_pm_ops sh_eth_dev_pm_ops = {
3288 SET_SYSTEM_SLEEP_PM_OPS(sh_eth_suspend, sh_eth_resume)
3289 SET_RUNTIME_PM_OPS(sh_eth_runtime_nop, sh_eth_runtime_nop, NULL)
3291 #define SH_ETH_PM_OPS (&sh_eth_dev_pm_ops)
3293 #define SH_ETH_PM_OPS NULL
3296 static struct platform_device_id sh_eth_id_table[] = {
3297 { "sh7619-ether", (kernel_ulong_t)&sh7619_data },
3298 { "sh771x-ether", (kernel_ulong_t)&sh771x_data },
3299 { "sh7724-ether", (kernel_ulong_t)&sh7724_data },
3300 { "sh7734-gether", (kernel_ulong_t)&sh7734_data },
3301 { "sh7757-ether", (kernel_ulong_t)&sh7757_data },
3302 { "sh7757-gether", (kernel_ulong_t)&sh7757_data_giga },
3303 { "sh7763-gether", (kernel_ulong_t)&sh7763_data },
3304 { "r7s72100-ether", (kernel_ulong_t)&r7s72100_data },
3305 { "r8a7740-gether", (kernel_ulong_t)&r8a7740_data },
3306 { "r8a777x-ether", (kernel_ulong_t)&r8a777x_data },
3307 { "r8a7790-ether", (kernel_ulong_t)&r8a779x_data },
3308 { "r8a7791-ether", (kernel_ulong_t)&r8a779x_data },
3309 { "r8a7793-ether", (kernel_ulong_t)&r8a779x_data },
3310 { "r8a7794-ether", (kernel_ulong_t)&r8a779x_data },
3313 MODULE_DEVICE_TABLE(platform, sh_eth_id_table);
3315 static struct platform_driver sh_eth_driver = {
3316 .probe = sh_eth_drv_probe,
3317 .remove = sh_eth_drv_remove,
3318 .id_table = sh_eth_id_table,
3321 .pm = SH_ETH_PM_OPS,
3322 .of_match_table = of_match_ptr(sh_eth_match_table),
3326 module_platform_driver(sh_eth_driver);
3328 MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda");
3329 MODULE_DESCRIPTION("Renesas SuperH Ethernet driver");
3330 MODULE_LICENSE("GPL v2");