1 // SPDX-License-Identifier: GPL-2.0+
3 #include <linux/types.h>
5 #include <linux/platform_device.h>
6 #include <linux/pm_runtime.h>
7 #include <linux/acpi.h>
8 #include <linux/of_mdio.h>
9 #include <linux/etherdevice.h>
10 #include <linux/interrupt.h>
14 #include <net/ip6_checksum.h>
16 #define NETSEC_REG_SOFT_RST 0x104
17 #define NETSEC_REG_COM_INIT 0x120
19 #define NETSEC_REG_TOP_STATUS 0x200
20 #define NETSEC_IRQ_RX BIT(1)
21 #define NETSEC_IRQ_TX BIT(0)
23 #define NETSEC_REG_TOP_INTEN 0x204
24 #define NETSEC_REG_INTEN_SET 0x234
25 #define NETSEC_REG_INTEN_CLR 0x238
27 #define NETSEC_REG_NRM_TX_STATUS 0x400
28 #define NETSEC_REG_NRM_TX_INTEN 0x404
29 #define NETSEC_REG_NRM_TX_INTEN_SET 0x428
30 #define NETSEC_REG_NRM_TX_INTEN_CLR 0x42c
31 #define NRM_TX_ST_NTOWNR BIT(17)
32 #define NRM_TX_ST_TR_ERR BIT(16)
33 #define NRM_TX_ST_TXDONE BIT(15)
34 #define NRM_TX_ST_TMREXP BIT(14)
36 #define NETSEC_REG_NRM_RX_STATUS 0x440
37 #define NETSEC_REG_NRM_RX_INTEN 0x444
38 #define NETSEC_REG_NRM_RX_INTEN_SET 0x468
39 #define NETSEC_REG_NRM_RX_INTEN_CLR 0x46c
40 #define NRM_RX_ST_RC_ERR BIT(16)
41 #define NRM_RX_ST_PKTCNT BIT(15)
42 #define NRM_RX_ST_TMREXP BIT(14)
44 #define NETSEC_REG_PKT_CMD_BUF 0xd0
46 #define NETSEC_REG_CLK_EN 0x100
48 #define NETSEC_REG_PKT_CTRL 0x140
50 #define NETSEC_REG_DMA_TMR_CTRL 0x20c
51 #define NETSEC_REG_F_TAIKI_MC_VER 0x22c
52 #define NETSEC_REG_F_TAIKI_VER 0x230
53 #define NETSEC_REG_DMA_HM_CTRL 0x214
54 #define NETSEC_REG_DMA_MH_CTRL 0x220
55 #define NETSEC_REG_ADDR_DIS_CORE 0x218
56 #define NETSEC_REG_DMAC_HM_CMD_BUF 0x210
57 #define NETSEC_REG_DMAC_MH_CMD_BUF 0x21c
59 #define NETSEC_REG_NRM_TX_PKTCNT 0x410
61 #define NETSEC_REG_NRM_TX_DONE_PKTCNT 0x414
62 #define NETSEC_REG_NRM_TX_DONE_TXINT_PKTCNT 0x418
64 #define NETSEC_REG_NRM_TX_TMR 0x41c
66 #define NETSEC_REG_NRM_RX_PKTCNT 0x454
67 #define NETSEC_REG_NRM_RX_RXINT_PKTCNT 0x458
68 #define NETSEC_REG_NRM_TX_TXINT_TMR 0x420
69 #define NETSEC_REG_NRM_RX_RXINT_TMR 0x460
71 #define NETSEC_REG_NRM_RX_TMR 0x45c
73 #define NETSEC_REG_NRM_TX_DESC_START_UP 0x434
74 #define NETSEC_REG_NRM_TX_DESC_START_LW 0x408
75 #define NETSEC_REG_NRM_RX_DESC_START_UP 0x474
76 #define NETSEC_REG_NRM_RX_DESC_START_LW 0x448
78 #define NETSEC_REG_NRM_TX_CONFIG 0x430
79 #define NETSEC_REG_NRM_RX_CONFIG 0x470
81 #define MAC_REG_STATUS 0x1024
82 #define MAC_REG_DATA 0x11c0
83 #define MAC_REG_CMD 0x11c4
84 #define MAC_REG_FLOW_TH 0x11cc
85 #define MAC_REG_INTF_SEL 0x11d4
86 #define MAC_REG_DESC_INIT 0x11fc
87 #define MAC_REG_DESC_SOFT_RST 0x1204
88 #define NETSEC_REG_MODE_TRANS_COMP_STATUS 0x500
90 #define GMAC_REG_MCR 0x0000
91 #define GMAC_REG_MFFR 0x0004
92 #define GMAC_REG_GAR 0x0010
93 #define GMAC_REG_GDR 0x0014
94 #define GMAC_REG_FCR 0x0018
95 #define GMAC_REG_BMR 0x1000
96 #define GMAC_REG_RDLAR 0x100c
97 #define GMAC_REG_TDLAR 0x1010
98 #define GMAC_REG_OMR 0x1018
100 #define MHZ(n) ((n) * 1000 * 1000)
102 #define NETSEC_TX_SHIFT_OWN_FIELD 31
103 #define NETSEC_TX_SHIFT_LD_FIELD 30
104 #define NETSEC_TX_SHIFT_DRID_FIELD 24
105 #define NETSEC_TX_SHIFT_PT_FIELD 21
106 #define NETSEC_TX_SHIFT_TDRID_FIELD 16
107 #define NETSEC_TX_SHIFT_CC_FIELD 15
108 #define NETSEC_TX_SHIFT_FS_FIELD 9
109 #define NETSEC_TX_LAST 8
110 #define NETSEC_TX_SHIFT_CO 7
111 #define NETSEC_TX_SHIFT_SO 6
112 #define NETSEC_TX_SHIFT_TRS_FIELD 4
114 #define NETSEC_RX_PKT_OWN_FIELD 31
115 #define NETSEC_RX_PKT_LD_FIELD 30
116 #define NETSEC_RX_PKT_SDRID_FIELD 24
117 #define NETSEC_RX_PKT_FR_FIELD 23
118 #define NETSEC_RX_PKT_ER_FIELD 21
119 #define NETSEC_RX_PKT_ERR_FIELD 16
120 #define NETSEC_RX_PKT_TDRID_FIELD 12
121 #define NETSEC_RX_PKT_FS_FIELD 9
122 #define NETSEC_RX_PKT_LS_FIELD 8
123 #define NETSEC_RX_PKT_CO_FIELD 6
125 #define NETSEC_RX_PKT_ERR_MASK 3
127 #define NETSEC_MAX_TX_PKT_LEN 1518
128 #define NETSEC_MAX_TX_JUMBO_PKT_LEN 9018
130 #define NETSEC_RING_GMAC 15
131 #define NETSEC_RING_MAX 2
133 #define NETSEC_TCP_SEG_LEN_MAX 1460
134 #define NETSEC_TCP_JUMBO_SEG_LEN_MAX 8960
136 #define NETSEC_RX_CKSUM_NOTAVAIL 0
137 #define NETSEC_RX_CKSUM_OK 1
138 #define NETSEC_RX_CKSUM_NG 2
140 #define NETSEC_TOP_IRQ_REG_CODE_LOAD_END BIT(20)
141 #define NETSEC_IRQ_TRANSITION_COMPLETE BIT(4)
143 #define NETSEC_MODE_TRANS_COMP_IRQ_N2T BIT(20)
144 #define NETSEC_MODE_TRANS_COMP_IRQ_T2N BIT(19)
146 #define NETSEC_INT_PKTCNT_MAX 2047
148 #define NETSEC_FLOW_START_TH_MAX 95
149 #define NETSEC_FLOW_STOP_TH_MAX 95
150 #define NETSEC_FLOW_PAUSE_TIME_MIN 5
152 #define NETSEC_CLK_EN_REG_DOM_ALL 0x3f
154 #define NETSEC_PKT_CTRL_REG_MODE_NRM BIT(28)
155 #define NETSEC_PKT_CTRL_REG_EN_JUMBO BIT(27)
156 #define NETSEC_PKT_CTRL_REG_LOG_CHKSUM_ER BIT(3)
157 #define NETSEC_PKT_CTRL_REG_LOG_HD_INCOMPLETE BIT(2)
158 #define NETSEC_PKT_CTRL_REG_LOG_HD_ER BIT(1)
159 #define NETSEC_PKT_CTRL_REG_DRP_NO_MATCH BIT(0)
161 #define NETSEC_CLK_EN_REG_DOM_G BIT(5)
162 #define NETSEC_CLK_EN_REG_DOM_C BIT(1)
163 #define NETSEC_CLK_EN_REG_DOM_D BIT(0)
165 #define NETSEC_COM_INIT_REG_DB BIT(2)
166 #define NETSEC_COM_INIT_REG_CLS BIT(1)
167 #define NETSEC_COM_INIT_REG_ALL (NETSEC_COM_INIT_REG_CLS | \
168 NETSEC_COM_INIT_REG_DB)
170 #define NETSEC_SOFT_RST_REG_RESET 0
171 #define NETSEC_SOFT_RST_REG_RUN BIT(31)
173 #define NETSEC_DMA_CTRL_REG_STOP 1
174 #define MH_CTRL__MODE_TRANS BIT(20)
176 #define NETSEC_GMAC_CMD_ST_READ 0
177 #define NETSEC_GMAC_CMD_ST_WRITE BIT(28)
178 #define NETSEC_GMAC_CMD_ST_BUSY BIT(31)
180 #define NETSEC_GMAC_BMR_REG_COMMON 0x00412080
181 #define NETSEC_GMAC_BMR_REG_RESET 0x00020181
182 #define NETSEC_GMAC_BMR_REG_SWR 0x00000001
184 #define NETSEC_GMAC_OMR_REG_ST BIT(13)
185 #define NETSEC_GMAC_OMR_REG_SR BIT(1)
187 #define NETSEC_GMAC_MCR_REG_IBN BIT(30)
188 #define NETSEC_GMAC_MCR_REG_CST BIT(25)
189 #define NETSEC_GMAC_MCR_REG_JE BIT(20)
190 #define NETSEC_MCR_PS BIT(15)
191 #define NETSEC_GMAC_MCR_REG_FES BIT(14)
192 #define NETSEC_GMAC_MCR_REG_FULL_DUPLEX_COMMON 0x0000280c
193 #define NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON 0x0001a00c
195 #define NETSEC_FCR_RFE BIT(2)
196 #define NETSEC_FCR_TFE BIT(1)
198 #define NETSEC_GMAC_GAR_REG_GW BIT(1)
199 #define NETSEC_GMAC_GAR_REG_GB BIT(0)
201 #define NETSEC_GMAC_GAR_REG_SHIFT_PA 11
202 #define NETSEC_GMAC_GAR_REG_SHIFT_GR 6
203 #define GMAC_REG_SHIFT_CR_GAR 2
205 #define NETSEC_GMAC_GAR_REG_CR_25_35_MHZ 2
206 #define NETSEC_GMAC_GAR_REG_CR_35_60_MHZ 3
207 #define NETSEC_GMAC_GAR_REG_CR_60_100_MHZ 0
208 #define NETSEC_GMAC_GAR_REG_CR_100_150_MHZ 1
209 #define NETSEC_GMAC_GAR_REG_CR_150_250_MHZ 4
210 #define NETSEC_GMAC_GAR_REG_CR_250_300_MHZ 5
212 #define NETSEC_GMAC_RDLAR_REG_COMMON 0x18000
213 #define NETSEC_GMAC_TDLAR_REG_COMMON 0x1c000
215 #define NETSEC_REG_NETSEC_VER_F_TAIKI 0x50000
217 #define NETSEC_REG_DESC_RING_CONFIG_CFG_UP BIT(31)
218 #define NETSEC_REG_DESC_RING_CONFIG_CH_RST BIT(30)
219 #define NETSEC_REG_DESC_TMR_MODE 4
220 #define NETSEC_REG_DESC_ENDIAN 0
222 #define NETSEC_MAC_DESC_SOFT_RST_SOFT_RST 1
223 #define NETSEC_MAC_DESC_INIT_REG_INIT 1
225 #define NETSEC_EEPROM_MAC_ADDRESS 0x00
226 #define NETSEC_EEPROM_HM_ME_ADDRESS_H 0x08
227 #define NETSEC_EEPROM_HM_ME_ADDRESS_L 0x0C
228 #define NETSEC_EEPROM_HM_ME_SIZE 0x10
229 #define NETSEC_EEPROM_MH_ME_ADDRESS_H 0x14
230 #define NETSEC_EEPROM_MH_ME_ADDRESS_L 0x18
231 #define NETSEC_EEPROM_MH_ME_SIZE 0x1C
232 #define NETSEC_EEPROM_PKT_ME_ADDRESS 0x20
233 #define NETSEC_EEPROM_PKT_ME_SIZE 0x24
237 #define NETSEC_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
238 #define NETSEC_RX_BUF_SZ 1536
240 #define DESC_SZ sizeof(struct netsec_de)
242 #define NETSEC_F_NETSEC_VER_MAJOR_NUM(x) ((x) & 0xffff0000)
256 struct netsec_desc_ring {
258 struct netsec_desc *desc;
264 struct netsec_desc_ring desc_ring[NETSEC_RING_MAX];
265 struct ethtool_coalesce et_coalesce;
266 spinlock_t reglock; /* protect reg access */
267 struct napi_struct napi;
268 phy_interface_t phy_interface;
269 struct net_device *ndev;
270 struct device_node *phy_np;
271 struct phy_device *phydev;
272 struct mii_bus *mii_bus;
273 void __iomem *ioaddr;
274 void __iomem *eeprom_base;
280 bool rx_cksum_offload_flag;
283 struct netsec_de { /* Netsec Descriptor layout */
285 u32 data_buf_addr_up;
286 u32 data_buf_addr_lw;
290 struct netsec_tx_pkt_ctrl {
292 bool tcp_seg_offload_flag;
293 bool cksum_offload_flag;
296 struct netsec_rx_pkt_info {
302 static void netsec_write(struct netsec_priv *priv, u32 reg_addr, u32 val)
304 writel(val, priv->ioaddr + reg_addr);
307 static u32 netsec_read(struct netsec_priv *priv, u32 reg_addr)
309 return readl(priv->ioaddr + reg_addr);
312 /************* MDIO BUS OPS FOLLOW *************/
314 #define TIMEOUT_SPINS_MAC 1000
315 #define TIMEOUT_SECONDARY_MS_MAC 100
317 static u32 netsec_clk_type(u32 freq)
320 return NETSEC_GMAC_GAR_REG_CR_25_35_MHZ;
322 return NETSEC_GMAC_GAR_REG_CR_35_60_MHZ;
324 return NETSEC_GMAC_GAR_REG_CR_60_100_MHZ;
326 return NETSEC_GMAC_GAR_REG_CR_100_150_MHZ;
328 return NETSEC_GMAC_GAR_REG_CR_150_250_MHZ;
330 return NETSEC_GMAC_GAR_REG_CR_250_300_MHZ;
333 static int netsec_wait_while_busy(struct netsec_priv *priv, u32 addr, u32 mask)
335 u32 timeout = TIMEOUT_SPINS_MAC;
337 while (--timeout && netsec_read(priv, addr) & mask)
342 timeout = TIMEOUT_SECONDARY_MS_MAC;
343 while (--timeout && netsec_read(priv, addr) & mask)
344 usleep_range(1000, 2000);
349 netdev_WARN(priv->ndev, "%s: timeout\n", __func__);
354 static int netsec_mac_write(struct netsec_priv *priv, u32 addr, u32 value)
356 netsec_write(priv, MAC_REG_DATA, value);
357 netsec_write(priv, MAC_REG_CMD, addr | NETSEC_GMAC_CMD_ST_WRITE);
358 return netsec_wait_while_busy(priv,
359 MAC_REG_CMD, NETSEC_GMAC_CMD_ST_BUSY);
362 static int netsec_mac_read(struct netsec_priv *priv, u32 addr, u32 *read)
366 netsec_write(priv, MAC_REG_CMD, addr | NETSEC_GMAC_CMD_ST_READ);
367 ret = netsec_wait_while_busy(priv,
368 MAC_REG_CMD, NETSEC_GMAC_CMD_ST_BUSY);
372 *read = netsec_read(priv, MAC_REG_DATA);
377 static int netsec_mac_wait_while_busy(struct netsec_priv *priv,
380 u32 timeout = TIMEOUT_SPINS_MAC;
384 ret = netsec_mac_read(priv, addr, &data);
388 } while (--timeout && (data & mask));
393 timeout = TIMEOUT_SECONDARY_MS_MAC;
395 usleep_range(1000, 2000);
397 ret = netsec_mac_read(priv, addr, &data);
401 } while (--timeout && (data & mask));
406 netdev_WARN(priv->ndev, "%s: timeout\n", __func__);
411 static int netsec_mac_update_to_phy_state(struct netsec_priv *priv)
413 struct phy_device *phydev = priv->ndev->phydev;
416 value = phydev->duplex ? NETSEC_GMAC_MCR_REG_FULL_DUPLEX_COMMON :
417 NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON;
419 if (phydev->speed != SPEED_1000)
420 value |= NETSEC_MCR_PS;
422 if (priv->phy_interface != PHY_INTERFACE_MODE_GMII &&
423 phydev->speed == SPEED_100)
424 value |= NETSEC_GMAC_MCR_REG_FES;
426 value |= NETSEC_GMAC_MCR_REG_CST | NETSEC_GMAC_MCR_REG_JE;
428 if (phy_interface_mode_is_rgmii(priv->phy_interface))
429 value |= NETSEC_GMAC_MCR_REG_IBN;
431 if (netsec_mac_write(priv, GMAC_REG_MCR, value))
437 static int netsec_phy_read(struct mii_bus *bus, int phy_addr, int reg_addr);
439 static int netsec_phy_write(struct mii_bus *bus,
440 int phy_addr, int reg, u16 val)
443 struct netsec_priv *priv = bus->priv;
445 if (netsec_mac_write(priv, GMAC_REG_GDR, val))
447 if (netsec_mac_write(priv, GMAC_REG_GAR,
448 phy_addr << NETSEC_GMAC_GAR_REG_SHIFT_PA |
449 reg << NETSEC_GMAC_GAR_REG_SHIFT_GR |
450 NETSEC_GMAC_GAR_REG_GW | NETSEC_GMAC_GAR_REG_GB |
451 (netsec_clk_type(priv->freq) <<
452 GMAC_REG_SHIFT_CR_GAR)))
455 status = netsec_mac_wait_while_busy(priv, GMAC_REG_GAR,
456 NETSEC_GMAC_GAR_REG_GB);
458 /* Developerbox implements RTL8211E PHY and there is
459 * a compatibility problem with F_GMAC4.
460 * RTL8211E expects MDC clock must be kept toggling for several
461 * clock cycle with MDIO high before entering the IDLE state.
462 * To meet this requirement, netsec driver needs to issue dummy
463 * read(e.g. read PHYID1(offset 0x2) register) right after write.
465 netsec_phy_read(bus, phy_addr, MII_PHYSID1);
470 static int netsec_phy_read(struct mii_bus *bus, int phy_addr, int reg_addr)
472 struct netsec_priv *priv = bus->priv;
476 if (netsec_mac_write(priv, GMAC_REG_GAR, NETSEC_GMAC_GAR_REG_GB |
477 phy_addr << NETSEC_GMAC_GAR_REG_SHIFT_PA |
478 reg_addr << NETSEC_GMAC_GAR_REG_SHIFT_GR |
479 (netsec_clk_type(priv->freq) <<
480 GMAC_REG_SHIFT_CR_GAR)))
483 ret = netsec_mac_wait_while_busy(priv, GMAC_REG_GAR,
484 NETSEC_GMAC_GAR_REG_GB);
488 ret = netsec_mac_read(priv, GMAC_REG_GDR, &data);
495 /************* ETHTOOL_OPS FOLLOW *************/
497 static void netsec_et_get_drvinfo(struct net_device *net_device,
498 struct ethtool_drvinfo *info)
500 strlcpy(info->driver, "netsec", sizeof(info->driver));
501 strlcpy(info->bus_info, dev_name(net_device->dev.parent),
502 sizeof(info->bus_info));
505 static int netsec_et_get_coalesce(struct net_device *net_device,
506 struct ethtool_coalesce *et_coalesce)
508 struct netsec_priv *priv = netdev_priv(net_device);
510 *et_coalesce = priv->et_coalesce;
515 static int netsec_et_set_coalesce(struct net_device *net_device,
516 struct ethtool_coalesce *et_coalesce)
518 struct netsec_priv *priv = netdev_priv(net_device);
520 priv->et_coalesce = *et_coalesce;
522 if (priv->et_coalesce.tx_coalesce_usecs < 50)
523 priv->et_coalesce.tx_coalesce_usecs = 50;
524 if (priv->et_coalesce.tx_max_coalesced_frames < 1)
525 priv->et_coalesce.tx_max_coalesced_frames = 1;
527 netsec_write(priv, NETSEC_REG_NRM_TX_DONE_TXINT_PKTCNT,
528 priv->et_coalesce.tx_max_coalesced_frames);
529 netsec_write(priv, NETSEC_REG_NRM_TX_TXINT_TMR,
530 priv->et_coalesce.tx_coalesce_usecs);
531 netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_SET, NRM_TX_ST_TXDONE);
532 netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_SET, NRM_TX_ST_TMREXP);
534 if (priv->et_coalesce.rx_coalesce_usecs < 50)
535 priv->et_coalesce.rx_coalesce_usecs = 50;
536 if (priv->et_coalesce.rx_max_coalesced_frames < 1)
537 priv->et_coalesce.rx_max_coalesced_frames = 1;
539 netsec_write(priv, NETSEC_REG_NRM_RX_RXINT_PKTCNT,
540 priv->et_coalesce.rx_max_coalesced_frames);
541 netsec_write(priv, NETSEC_REG_NRM_RX_RXINT_TMR,
542 priv->et_coalesce.rx_coalesce_usecs);
543 netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_SET, NRM_RX_ST_PKTCNT);
544 netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_SET, NRM_RX_ST_TMREXP);
549 static u32 netsec_et_get_msglevel(struct net_device *dev)
551 struct netsec_priv *priv = netdev_priv(dev);
553 return priv->msg_enable;
556 static void netsec_et_set_msglevel(struct net_device *dev, u32 datum)
558 struct netsec_priv *priv = netdev_priv(dev);
560 priv->msg_enable = datum;
563 static const struct ethtool_ops netsec_ethtool_ops = {
564 .get_drvinfo = netsec_et_get_drvinfo,
565 .get_link_ksettings = phy_ethtool_get_link_ksettings,
566 .set_link_ksettings = phy_ethtool_set_link_ksettings,
567 .get_link = ethtool_op_get_link,
568 .get_coalesce = netsec_et_get_coalesce,
569 .set_coalesce = netsec_et_set_coalesce,
570 .get_msglevel = netsec_et_get_msglevel,
571 .set_msglevel = netsec_et_set_msglevel,
574 /************* NETDEV_OPS FOLLOW *************/
577 static void netsec_set_rx_de(struct netsec_priv *priv,
578 struct netsec_desc_ring *dring, u16 idx,
579 const struct netsec_desc *desc)
581 struct netsec_de *de = dring->vaddr + DESC_SZ * idx;
582 u32 attr = (1 << NETSEC_RX_PKT_OWN_FIELD) |
583 (1 << NETSEC_RX_PKT_FS_FIELD) |
584 (1 << NETSEC_RX_PKT_LS_FIELD);
586 if (idx == DESC_NUM - 1)
587 attr |= (1 << NETSEC_RX_PKT_LD_FIELD);
589 de->data_buf_addr_up = upper_32_bits(desc->dma_addr);
590 de->data_buf_addr_lw = lower_32_bits(desc->dma_addr);
591 de->buf_len_info = desc->len;
595 dring->desc[idx].dma_addr = desc->dma_addr;
596 dring->desc[idx].addr = desc->addr;
597 dring->desc[idx].len = desc->len;
600 static bool netsec_clean_tx_dring(struct netsec_priv *priv)
602 struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX];
603 unsigned int pkts, bytes;
604 struct netsec_de *entry;
605 int tail = dring->tail;
610 entry = dring->vaddr + DESC_SZ * tail;
612 while (!(entry->attr & (1U << NETSEC_TX_SHIFT_OWN_FIELD)) &&
614 struct netsec_desc *desc;
617 desc = &dring->desc[tail];
618 eop = (entry->attr >> NETSEC_TX_LAST) & 1;
621 dma_unmap_single(priv->dev, desc->dma_addr, desc->len,
625 bytes += desc->skb->len;
626 dev_kfree_skb(desc->skb);
628 /* clean up so netsec_uninit_pkt_dring() won't free the skb
631 *desc = (struct netsec_desc){};
633 /* entry->attr is not going to be accessed by the NIC until
634 * netsec_set_tx_de() is called. No need for a dma_wmb() here
636 entry->attr = 1U << NETSEC_TX_SHIFT_OWN_FIELD;
637 /* move tail ahead */
638 dring->tail = (tail + 1) % DESC_NUM;
641 entry = dring->vaddr + DESC_SZ * tail;
648 /* reading the register clears the irq */
649 netsec_read(priv, NETSEC_REG_NRM_TX_DONE_PKTCNT);
651 priv->ndev->stats.tx_packets += cnt;
652 priv->ndev->stats.tx_bytes += bytes;
654 netdev_completed_queue(priv->ndev, cnt, bytes);
659 static void netsec_process_tx(struct netsec_priv *priv)
661 struct net_device *ndev = priv->ndev;
664 cleaned = netsec_clean_tx_dring(priv);
666 if (cleaned && netif_queue_stopped(ndev)) {
667 /* Make sure we update the value, anyone stopping the queue
668 * after this will read the proper consumer idx
671 netif_wake_queue(ndev);
675 static void *netsec_alloc_rx_data(struct netsec_priv *priv,
676 dma_addr_t *dma_handle, u16 *desc_len)
678 size_t total_len = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
679 size_t payload_len = NETSEC_RX_BUF_SZ;
683 total_len += SKB_DATA_ALIGN(payload_len + NETSEC_SKB_PAD);
685 buf = napi_alloc_frag(total_len);
689 mapping = dma_map_single(priv->dev, buf + NETSEC_SKB_PAD, payload_len,
691 if (unlikely(dma_mapping_error(priv->dev, mapping)))
694 *dma_handle = mapping;
695 *desc_len = payload_len;
704 static void netsec_rx_fill(struct netsec_priv *priv, u16 from, u16 num)
706 struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
710 netsec_set_rx_de(priv, dring, idx, &dring->desc[idx]);
718 static int netsec_process_rx(struct netsec_priv *priv, int budget)
720 struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
721 struct net_device *ndev = priv->ndev;
722 struct netsec_rx_pkt_info rx_info;
726 while (done < budget) {
727 u16 idx = dring->tail;
728 struct netsec_de *de = dring->vaddr + (DESC_SZ * idx);
729 struct netsec_desc *desc = &dring->desc[idx];
730 u16 pkt_len, desc_len;
731 dma_addr_t dma_handle;
735 if (de->attr & (1U << NETSEC_RX_PKT_OWN_FIELD)) {
736 /* reading the register clears the irq */
737 netsec_read(priv, NETSEC_REG_NRM_RX_PKTCNT);
741 /* This barrier is needed to keep us from reading
742 * any other fields out of the netsec_de until we have
743 * verified the descriptor has been written back
748 pkt_len = de->buf_len_info >> 16;
749 rx_info.err_code = (de->attr >> NETSEC_RX_PKT_ERR_FIELD) &
750 NETSEC_RX_PKT_ERR_MASK;
751 rx_info.err_flag = (de->attr >> NETSEC_RX_PKT_ER_FIELD) & 1;
752 if (rx_info.err_flag) {
753 netif_err(priv, drv, priv->ndev,
754 "%s: rx fail err(%d)\n", __func__,
756 ndev->stats.rx_dropped++;
757 dring->tail = (dring->tail + 1) % DESC_NUM;
758 /* reuse buffer page frag */
759 netsec_rx_fill(priv, idx, 1);
762 rx_info.rx_cksum_result =
763 (de->attr >> NETSEC_RX_PKT_CO_FIELD) & 3;
765 /* allocate a fresh buffer and map it to the hardware.
766 * This will eventually replace the old buffer in the hardware
768 buf_addr = netsec_alloc_rx_data(priv, &dma_handle, &desc_len);
769 if (unlikely(!buf_addr))
772 dma_sync_single_for_cpu(priv->dev, desc->dma_addr, pkt_len,
774 prefetch(desc->addr);
776 truesize = SKB_DATA_ALIGN(desc->len + NETSEC_SKB_PAD) +
777 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
778 skb = build_skb(desc->addr, truesize);
779 if (unlikely(!skb)) {
780 /* free the newly allocated buffer, we are not going to
783 dma_unmap_single(priv->dev, dma_handle, desc_len,
785 skb_free_frag(buf_addr);
786 netif_err(priv, drv, priv->ndev,
787 "rx failed to build skb\n");
790 dma_unmap_single_attrs(priv->dev, desc->dma_addr, desc->len,
791 DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
793 /* Update the descriptor with the new buffer we allocated */
794 desc->len = desc_len;
795 desc->dma_addr = dma_handle;
796 desc->addr = buf_addr;
798 skb_reserve(skb, NETSEC_SKB_PAD);
799 skb_put(skb, pkt_len);
800 skb->protocol = eth_type_trans(skb, priv->ndev);
802 if (priv->rx_cksum_offload_flag &&
803 rx_info.rx_cksum_result == NETSEC_RX_CKSUM_OK)
804 skb->ip_summed = CHECKSUM_UNNECESSARY;
806 if (napi_gro_receive(&priv->napi, skb) != GRO_DROP) {
807 ndev->stats.rx_packets++;
808 ndev->stats.rx_bytes += pkt_len;
811 netsec_rx_fill(priv, idx, 1);
812 dring->tail = (dring->tail + 1) % DESC_NUM;
818 static int netsec_napi_poll(struct napi_struct *napi, int budget)
820 struct netsec_priv *priv;
823 priv = container_of(napi, struct netsec_priv, napi);
825 netsec_process_tx(priv);
829 rx = netsec_process_rx(priv, todo);
833 done = budget - todo;
835 if (done < budget && napi_complete_done(napi, done)) {
838 spin_lock_irqsave(&priv->reglock, flags);
839 netsec_write(priv, NETSEC_REG_INTEN_SET,
840 NETSEC_IRQ_RX | NETSEC_IRQ_TX);
841 spin_unlock_irqrestore(&priv->reglock, flags);
847 static void netsec_set_tx_de(struct netsec_priv *priv,
848 struct netsec_desc_ring *dring,
849 const struct netsec_tx_pkt_ctrl *tx_ctrl,
850 const struct netsec_desc *desc,
853 int idx = dring->head;
854 struct netsec_de *de;
857 de = dring->vaddr + (DESC_SZ * idx);
859 attr = (1 << NETSEC_TX_SHIFT_OWN_FIELD) |
860 (1 << NETSEC_TX_SHIFT_PT_FIELD) |
861 (NETSEC_RING_GMAC << NETSEC_TX_SHIFT_TDRID_FIELD) |
862 (1 << NETSEC_TX_SHIFT_FS_FIELD) |
863 (1 << NETSEC_TX_LAST) |
864 (tx_ctrl->cksum_offload_flag << NETSEC_TX_SHIFT_CO) |
865 (tx_ctrl->tcp_seg_offload_flag << NETSEC_TX_SHIFT_SO) |
866 (1 << NETSEC_TX_SHIFT_TRS_FIELD);
867 if (idx == DESC_NUM - 1)
868 attr |= (1 << NETSEC_TX_SHIFT_LD_FIELD);
870 de->data_buf_addr_up = upper_32_bits(desc->dma_addr);
871 de->data_buf_addr_lw = lower_32_bits(desc->dma_addr);
872 de->buf_len_info = (tx_ctrl->tcp_seg_len << 16) | desc->len;
876 dring->desc[idx] = *desc;
877 dring->desc[idx].skb = skb;
879 /* move head ahead */
880 dring->head = (dring->head + 1) % DESC_NUM;
883 static int netsec_desc_used(struct netsec_desc_ring *dring)
887 if (dring->head >= dring->tail)
888 used = dring->head - dring->tail;
890 used = dring->head + DESC_NUM - dring->tail;
895 static int netsec_check_stop_tx(struct netsec_priv *priv, int used)
897 struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX];
899 /* keep tail from touching the queue */
900 if (DESC_NUM - used < 2) {
901 netif_stop_queue(priv->ndev);
903 /* Make sure we read the updated value in case
904 * descriptors got freed
908 used = netsec_desc_used(dring);
909 if (DESC_NUM - used < 2)
910 return NETDEV_TX_BUSY;
912 netif_wake_queue(priv->ndev);
918 static netdev_tx_t netsec_netdev_start_xmit(struct sk_buff *skb,
919 struct net_device *ndev)
921 struct netsec_priv *priv = netdev_priv(ndev);
922 struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX];
923 struct netsec_tx_pkt_ctrl tx_ctrl = {};
924 struct netsec_desc tx_desc;
928 filled = netsec_desc_used(dring);
929 if (netsec_check_stop_tx(priv, filled)) {
930 net_warn_ratelimited("%s %s Tx queue full\n",
931 dev_name(priv->dev), ndev->name);
932 return NETDEV_TX_BUSY;
935 if (skb->ip_summed == CHECKSUM_PARTIAL)
936 tx_ctrl.cksum_offload_flag = true;
939 tso_seg_len = skb_shinfo(skb)->gso_size;
941 if (tso_seg_len > 0) {
942 if (skb->protocol == htons(ETH_P_IP)) {
943 ip_hdr(skb)->tot_len = 0;
944 tcp_hdr(skb)->check =
945 ~tcp_v4_check(0, ip_hdr(skb)->saddr,
946 ip_hdr(skb)->daddr, 0);
948 ipv6_hdr(skb)->payload_len = 0;
949 tcp_hdr(skb)->check =
950 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
951 &ipv6_hdr(skb)->daddr,
955 tx_ctrl.tcp_seg_offload_flag = true;
956 tx_ctrl.tcp_seg_len = tso_seg_len;
959 tx_desc.dma_addr = dma_map_single(priv->dev, skb->data,
960 skb_headlen(skb), DMA_TO_DEVICE);
961 if (dma_mapping_error(priv->dev, tx_desc.dma_addr)) {
962 netif_err(priv, drv, priv->ndev,
963 "%s: DMA mapping failed\n", __func__);
964 ndev->stats.tx_dropped++;
965 dev_kfree_skb_any(skb);
968 tx_desc.addr = skb->data;
969 tx_desc.len = skb_headlen(skb);
971 skb_tx_timestamp(skb);
972 netdev_sent_queue(priv->ndev, skb->len);
974 netsec_set_tx_de(priv, dring, &tx_ctrl, &tx_desc, skb);
975 netsec_write(priv, NETSEC_REG_NRM_TX_PKTCNT, 1); /* submit another tx */
980 static void netsec_uninit_pkt_dring(struct netsec_priv *priv, int id)
982 struct netsec_desc_ring *dring = &priv->desc_ring[id];
983 struct netsec_desc *desc;
986 if (!dring->vaddr || !dring->desc)
989 for (idx = 0; idx < DESC_NUM; idx++) {
990 desc = &dring->desc[idx];
994 dma_unmap_single(priv->dev, desc->dma_addr, desc->len,
995 id == NETSEC_RING_RX ? DMA_FROM_DEVICE :
997 if (id == NETSEC_RING_RX)
998 skb_free_frag(desc->addr);
999 else if (id == NETSEC_RING_TX)
1000 dev_kfree_skb(desc->skb);
1003 memset(dring->desc, 0, sizeof(struct netsec_desc) * DESC_NUM);
1004 memset(dring->vaddr, 0, DESC_SZ * DESC_NUM);
1009 if (id == NETSEC_RING_TX)
1010 netdev_reset_queue(priv->ndev);
1013 static void netsec_free_dring(struct netsec_priv *priv, int id)
1015 struct netsec_desc_ring *dring = &priv->desc_ring[id];
1018 dma_free_coherent(priv->dev, DESC_SZ * DESC_NUM,
1019 dring->vaddr, dring->desc_dma);
1020 dring->vaddr = NULL;
1027 static int netsec_alloc_dring(struct netsec_priv *priv, enum ring_id id)
1029 struct netsec_desc_ring *dring = &priv->desc_ring[id];
1032 dring->vaddr = dma_alloc_coherent(priv->dev, DESC_SZ * DESC_NUM,
1033 &dring->desc_dma, GFP_KERNEL);
1037 dring->desc = kcalloc(DESC_NUM, sizeof(*dring->desc), GFP_KERNEL);
1041 if (id == NETSEC_RING_TX) {
1042 for (i = 0; i < DESC_NUM; i++) {
1043 struct netsec_de *de;
1045 de = dring->vaddr + (DESC_SZ * i);
1046 /* de->attr is not going to be accessed by the NIC
1047 * until netsec_set_tx_de() is called.
1048 * No need for a dma_wmb() here
1050 de->attr = 1U << NETSEC_TX_SHIFT_OWN_FIELD;
1056 netsec_free_dring(priv, id);
1061 static int netsec_setup_rx_dring(struct netsec_priv *priv)
1063 struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
1066 for (i = 0; i < DESC_NUM; i++) {
1067 struct netsec_desc *desc = &dring->desc[i];
1068 dma_addr_t dma_handle;
1072 buf = netsec_alloc_rx_data(priv, &dma_handle, &len);
1074 netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
1077 desc->dma_addr = dma_handle;
1082 netsec_rx_fill(priv, 0, DESC_NUM);
1090 static int netsec_netdev_load_ucode_region(struct netsec_priv *priv, u32 reg,
1091 u32 addr_h, u32 addr_l, u32 size)
1093 u64 base = (u64)addr_h << 32 | addr_l;
1094 void __iomem *ucode;
1097 ucode = ioremap(base, size * sizeof(u32));
1101 for (i = 0; i < size; i++)
1102 netsec_write(priv, reg, readl(ucode + i * 4));
1108 static int netsec_netdev_load_microcode(struct netsec_priv *priv)
1110 u32 addr_h, addr_l, size;
1113 addr_h = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_ADDRESS_H);
1114 addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_ADDRESS_L);
1115 size = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_SIZE);
1116 err = netsec_netdev_load_ucode_region(priv, NETSEC_REG_DMAC_HM_CMD_BUF,
1117 addr_h, addr_l, size);
1121 addr_h = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_ADDRESS_H);
1122 addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_ADDRESS_L);
1123 size = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_SIZE);
1124 err = netsec_netdev_load_ucode_region(priv, NETSEC_REG_DMAC_MH_CMD_BUF,
1125 addr_h, addr_l, size);
1130 addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_PKT_ME_ADDRESS);
1131 size = readl(priv->eeprom_base + NETSEC_EEPROM_PKT_ME_SIZE);
1132 err = netsec_netdev_load_ucode_region(priv, NETSEC_REG_PKT_CMD_BUF,
1133 addr_h, addr_l, size);
1140 static int netsec_reset_hardware(struct netsec_priv *priv,
1146 /* stop DMA engines */
1147 if (!netsec_read(priv, NETSEC_REG_ADDR_DIS_CORE)) {
1148 netsec_write(priv, NETSEC_REG_DMA_HM_CTRL,
1149 NETSEC_DMA_CTRL_REG_STOP);
1150 netsec_write(priv, NETSEC_REG_DMA_MH_CTRL,
1151 NETSEC_DMA_CTRL_REG_STOP);
1153 while (netsec_read(priv, NETSEC_REG_DMA_HM_CTRL) &
1154 NETSEC_DMA_CTRL_REG_STOP)
1157 while (netsec_read(priv, NETSEC_REG_DMA_MH_CTRL) &
1158 NETSEC_DMA_CTRL_REG_STOP)
1162 netsec_write(priv, NETSEC_REG_SOFT_RST, NETSEC_SOFT_RST_REG_RESET);
1163 netsec_write(priv, NETSEC_REG_SOFT_RST, NETSEC_SOFT_RST_REG_RUN);
1164 netsec_write(priv, NETSEC_REG_COM_INIT, NETSEC_COM_INIT_REG_ALL);
1166 while (netsec_read(priv, NETSEC_REG_COM_INIT) != 0)
1169 /* set desc_start addr */
1170 netsec_write(priv, NETSEC_REG_NRM_RX_DESC_START_UP,
1171 upper_32_bits(priv->desc_ring[NETSEC_RING_RX].desc_dma));
1172 netsec_write(priv, NETSEC_REG_NRM_RX_DESC_START_LW,
1173 lower_32_bits(priv->desc_ring[NETSEC_RING_RX].desc_dma));
1175 netsec_write(priv, NETSEC_REG_NRM_TX_DESC_START_UP,
1176 upper_32_bits(priv->desc_ring[NETSEC_RING_TX].desc_dma));
1177 netsec_write(priv, NETSEC_REG_NRM_TX_DESC_START_LW,
1178 lower_32_bits(priv->desc_ring[NETSEC_RING_TX].desc_dma));
1180 /* set normal tx dring ring config */
1181 netsec_write(priv, NETSEC_REG_NRM_TX_CONFIG,
1182 1 << NETSEC_REG_DESC_ENDIAN);
1183 netsec_write(priv, NETSEC_REG_NRM_RX_CONFIG,
1184 1 << NETSEC_REG_DESC_ENDIAN);
1187 err = netsec_netdev_load_microcode(priv);
1189 netif_err(priv, probe, priv->ndev,
1190 "%s: failed to load microcode (%d)\n",
1196 /* start DMA engines */
1197 netsec_write(priv, NETSEC_REG_DMA_TMR_CTRL, priv->freq / 1000000 - 1);
1198 netsec_write(priv, NETSEC_REG_ADDR_DIS_CORE, 0);
1200 usleep_range(1000, 2000);
1202 if (!(netsec_read(priv, NETSEC_REG_TOP_STATUS) &
1203 NETSEC_TOP_IRQ_REG_CODE_LOAD_END)) {
1204 netif_err(priv, probe, priv->ndev,
1205 "microengine start failed\n");
1208 netsec_write(priv, NETSEC_REG_TOP_STATUS,
1209 NETSEC_TOP_IRQ_REG_CODE_LOAD_END);
1211 value = NETSEC_PKT_CTRL_REG_MODE_NRM;
1212 if (priv->ndev->mtu > ETH_DATA_LEN)
1213 value |= NETSEC_PKT_CTRL_REG_EN_JUMBO;
1215 /* change to normal mode */
1216 netsec_write(priv, NETSEC_REG_DMA_MH_CTRL, MH_CTRL__MODE_TRANS);
1217 netsec_write(priv, NETSEC_REG_PKT_CTRL, value);
1219 while ((netsec_read(priv, NETSEC_REG_MODE_TRANS_COMP_STATUS) &
1220 NETSEC_MODE_TRANS_COMP_IRQ_T2N) == 0)
1223 /* clear any pending EMPTY/ERR irq status */
1224 netsec_write(priv, NETSEC_REG_NRM_TX_STATUS, ~0);
1226 /* Disable TX & RX intr */
1227 netsec_write(priv, NETSEC_REG_INTEN_CLR, ~0);
1232 static int netsec_start_gmac(struct netsec_priv *priv)
1234 struct phy_device *phydev = priv->ndev->phydev;
1238 if (phydev->speed != SPEED_1000)
1239 value = (NETSEC_GMAC_MCR_REG_CST |
1240 NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON);
1242 if (netsec_mac_write(priv, GMAC_REG_MCR, value))
1244 if (netsec_mac_write(priv, GMAC_REG_BMR,
1245 NETSEC_GMAC_BMR_REG_RESET))
1248 /* Wait soft reset */
1249 usleep_range(1000, 5000);
1251 ret = netsec_mac_read(priv, GMAC_REG_BMR, &value);
1254 if (value & NETSEC_GMAC_BMR_REG_SWR)
1257 netsec_write(priv, MAC_REG_DESC_SOFT_RST, 1);
1258 if (netsec_wait_while_busy(priv, MAC_REG_DESC_SOFT_RST, 1))
1261 netsec_write(priv, MAC_REG_DESC_INIT, 1);
1262 if (netsec_wait_while_busy(priv, MAC_REG_DESC_INIT, 1))
1265 if (netsec_mac_write(priv, GMAC_REG_BMR,
1266 NETSEC_GMAC_BMR_REG_COMMON))
1268 if (netsec_mac_write(priv, GMAC_REG_RDLAR,
1269 NETSEC_GMAC_RDLAR_REG_COMMON))
1271 if (netsec_mac_write(priv, GMAC_REG_TDLAR,
1272 NETSEC_GMAC_TDLAR_REG_COMMON))
1274 if (netsec_mac_write(priv, GMAC_REG_MFFR, 0x80000001))
1277 ret = netsec_mac_update_to_phy_state(priv);
1281 ret = netsec_mac_read(priv, GMAC_REG_OMR, &value);
1285 value |= NETSEC_GMAC_OMR_REG_SR;
1286 value |= NETSEC_GMAC_OMR_REG_ST;
1288 netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_CLR, ~0);
1289 netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_CLR, ~0);
1291 netsec_et_set_coalesce(priv->ndev, &priv->et_coalesce);
1293 if (netsec_mac_write(priv, GMAC_REG_OMR, value))
1299 static int netsec_stop_gmac(struct netsec_priv *priv)
1304 ret = netsec_mac_read(priv, GMAC_REG_OMR, &value);
1307 value &= ~NETSEC_GMAC_OMR_REG_SR;
1308 value &= ~NETSEC_GMAC_OMR_REG_ST;
1310 /* disable all interrupts */
1311 netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_CLR, ~0);
1312 netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_CLR, ~0);
1314 return netsec_mac_write(priv, GMAC_REG_OMR, value);
1317 static void netsec_phy_adjust_link(struct net_device *ndev)
1319 struct netsec_priv *priv = netdev_priv(ndev);
1321 if (ndev->phydev->link)
1322 netsec_start_gmac(priv);
1324 netsec_stop_gmac(priv);
1326 phy_print_status(ndev->phydev);
1329 static irqreturn_t netsec_irq_handler(int irq, void *dev_id)
1331 struct netsec_priv *priv = dev_id;
1332 u32 val, status = netsec_read(priv, NETSEC_REG_TOP_STATUS);
1333 unsigned long flags;
1335 /* Disable interrupts */
1336 if (status & NETSEC_IRQ_TX) {
1337 val = netsec_read(priv, NETSEC_REG_NRM_TX_STATUS);
1338 netsec_write(priv, NETSEC_REG_NRM_TX_STATUS, val);
1340 if (status & NETSEC_IRQ_RX) {
1341 val = netsec_read(priv, NETSEC_REG_NRM_RX_STATUS);
1342 netsec_write(priv, NETSEC_REG_NRM_RX_STATUS, val);
1345 spin_lock_irqsave(&priv->reglock, flags);
1346 netsec_write(priv, NETSEC_REG_INTEN_CLR, NETSEC_IRQ_RX | NETSEC_IRQ_TX);
1347 spin_unlock_irqrestore(&priv->reglock, flags);
1349 napi_schedule(&priv->napi);
1354 static int netsec_netdev_open(struct net_device *ndev)
1356 struct netsec_priv *priv = netdev_priv(ndev);
1359 pm_runtime_get_sync(priv->dev);
1361 ret = netsec_setup_rx_dring(priv);
1363 netif_err(priv, probe, priv->ndev,
1364 "%s: fail setup ring\n", __func__);
1368 ret = request_irq(priv->ndev->irq, netsec_irq_handler,
1369 IRQF_SHARED, "netsec", priv);
1371 netif_err(priv, drv, priv->ndev, "request_irq failed\n");
1375 if (dev_of_node(priv->dev)) {
1376 if (!of_phy_connect(priv->ndev, priv->phy_np,
1377 netsec_phy_adjust_link, 0,
1378 priv->phy_interface)) {
1379 netif_err(priv, link, priv->ndev, "missing PHY\n");
1384 ret = phy_connect_direct(priv->ndev, priv->phydev,
1385 netsec_phy_adjust_link,
1386 priv->phy_interface);
1388 netif_err(priv, link, priv->ndev,
1389 "phy_connect_direct() failed (%d)\n", ret);
1394 phy_start(ndev->phydev);
1396 netsec_start_gmac(priv);
1397 napi_enable(&priv->napi);
1398 netif_start_queue(ndev);
1400 /* Enable TX+RX intr. */
1401 netsec_write(priv, NETSEC_REG_INTEN_SET, NETSEC_IRQ_RX | NETSEC_IRQ_TX);
1405 free_irq(priv->ndev->irq, priv);
1407 netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
1409 pm_runtime_put_sync(priv->dev);
1413 static int netsec_netdev_stop(struct net_device *ndev)
1416 struct netsec_priv *priv = netdev_priv(ndev);
1418 netif_stop_queue(priv->ndev);
1421 napi_disable(&priv->napi);
1423 netsec_write(priv, NETSEC_REG_INTEN_CLR, ~0);
1424 netsec_stop_gmac(priv);
1426 free_irq(priv->ndev->irq, priv);
1428 netsec_uninit_pkt_dring(priv, NETSEC_RING_TX);
1429 netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
1431 phy_stop(ndev->phydev);
1432 phy_disconnect(ndev->phydev);
1434 ret = netsec_reset_hardware(priv, false);
1436 pm_runtime_put_sync(priv->dev);
1441 static int netsec_netdev_init(struct net_device *ndev)
1443 struct netsec_priv *priv = netdev_priv(ndev);
1447 BUILD_BUG_ON_NOT_POWER_OF_2(DESC_NUM);
1449 ret = netsec_alloc_dring(priv, NETSEC_RING_TX);
1453 ret = netsec_alloc_dring(priv, NETSEC_RING_RX);
1457 /* set phy power down */
1458 data = netsec_phy_read(priv->mii_bus, priv->phy_addr, MII_BMCR) |
1460 netsec_phy_write(priv->mii_bus, priv->phy_addr, MII_BMCR, data);
1462 ret = netsec_reset_hardware(priv, true);
1468 netsec_free_dring(priv, NETSEC_RING_RX);
1470 netsec_free_dring(priv, NETSEC_RING_TX);
1474 static void netsec_netdev_uninit(struct net_device *ndev)
1476 struct netsec_priv *priv = netdev_priv(ndev);
1478 netsec_free_dring(priv, NETSEC_RING_RX);
1479 netsec_free_dring(priv, NETSEC_RING_TX);
1482 static int netsec_netdev_set_features(struct net_device *ndev,
1483 netdev_features_t features)
1485 struct netsec_priv *priv = netdev_priv(ndev);
1487 priv->rx_cksum_offload_flag = !!(features & NETIF_F_RXCSUM);
1492 static int netsec_netdev_ioctl(struct net_device *ndev, struct ifreq *ifr,
1495 return phy_mii_ioctl(ndev->phydev, ifr, cmd);
1498 static const struct net_device_ops netsec_netdev_ops = {
1499 .ndo_init = netsec_netdev_init,
1500 .ndo_uninit = netsec_netdev_uninit,
1501 .ndo_open = netsec_netdev_open,
1502 .ndo_stop = netsec_netdev_stop,
1503 .ndo_start_xmit = netsec_netdev_start_xmit,
1504 .ndo_set_features = netsec_netdev_set_features,
1505 .ndo_set_mac_address = eth_mac_addr,
1506 .ndo_validate_addr = eth_validate_addr,
1507 .ndo_do_ioctl = netsec_netdev_ioctl,
1510 static int netsec_of_probe(struct platform_device *pdev,
1511 struct netsec_priv *priv, u32 *phy_addr)
1513 priv->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
1514 if (!priv->phy_np) {
1515 dev_err(&pdev->dev, "missing required property 'phy-handle'\n");
1519 *phy_addr = of_mdio_parse_addr(&pdev->dev, priv->phy_np);
1521 priv->clk = devm_clk_get(&pdev->dev, NULL); /* get by 'phy_ref_clk' */
1522 if (IS_ERR(priv->clk)) {
1523 dev_err(&pdev->dev, "phy_ref_clk not found\n");
1524 return PTR_ERR(priv->clk);
1526 priv->freq = clk_get_rate(priv->clk);
1531 static int netsec_acpi_probe(struct platform_device *pdev,
1532 struct netsec_priv *priv, u32 *phy_addr)
1536 if (!IS_ENABLED(CONFIG_ACPI))
1539 ret = device_property_read_u32(&pdev->dev, "phy-channel", phy_addr);
1542 "missing required property 'phy-channel'\n");
1546 ret = device_property_read_u32(&pdev->dev,
1547 "socionext,phy-clock-frequency",
1551 "missing required property 'socionext,phy-clock-frequency'\n");
1555 static void netsec_unregister_mdio(struct netsec_priv *priv)
1557 struct phy_device *phydev = priv->phydev;
1559 if (!dev_of_node(priv->dev) && phydev) {
1560 phy_device_remove(phydev);
1561 phy_device_free(phydev);
1564 mdiobus_unregister(priv->mii_bus);
1567 static int netsec_register_mdio(struct netsec_priv *priv, u32 phy_addr)
1569 struct mii_bus *bus;
1572 bus = devm_mdiobus_alloc(priv->dev);
1576 snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(priv->dev));
1578 bus->name = "SNI NETSEC MDIO";
1579 bus->read = netsec_phy_read;
1580 bus->write = netsec_phy_write;
1581 bus->parent = priv->dev;
1582 priv->mii_bus = bus;
1584 if (dev_of_node(priv->dev)) {
1585 struct device_node *mdio_node, *parent = dev_of_node(priv->dev);
1587 mdio_node = of_get_child_by_name(parent, "mdio");
1591 /* older f/w doesn't populate the mdio subnode,
1592 * allow relaxed upgrade of f/w in due time.
1594 dev_info(priv->dev, "Upgrade f/w for mdio subnode!\n");
1597 ret = of_mdiobus_register(bus, parent);
1598 of_node_put(mdio_node);
1601 dev_err(priv->dev, "mdiobus register err(%d)\n", ret);
1605 /* Mask out all PHYs from auto probing. */
1607 ret = mdiobus_register(bus);
1609 dev_err(priv->dev, "mdiobus register err(%d)\n", ret);
1613 priv->phydev = get_phy_device(bus, phy_addr, false);
1614 if (IS_ERR(priv->phydev)) {
1615 ret = PTR_ERR(priv->phydev);
1616 dev_err(priv->dev, "get_phy_device err(%d)\n", ret);
1617 priv->phydev = NULL;
1621 ret = phy_device_register(priv->phydev);
1623 mdiobus_unregister(bus);
1625 "phy_device_register err(%d)\n", ret);
1632 static int netsec_probe(struct platform_device *pdev)
1634 struct resource *mmio_res, *eeprom_res, *irq_res;
1635 u8 *mac, macbuf[ETH_ALEN];
1636 struct netsec_priv *priv;
1637 u32 hw_ver, phy_addr = 0;
1638 struct net_device *ndev;
1641 mmio_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1643 dev_err(&pdev->dev, "No MMIO resource found.\n");
1647 eeprom_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1649 dev_info(&pdev->dev, "No EEPROM resource found.\n");
1653 irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1655 dev_err(&pdev->dev, "No IRQ resource found.\n");
1659 ndev = alloc_etherdev(sizeof(*priv));
1663 priv = netdev_priv(ndev);
1665 spin_lock_init(&priv->reglock);
1666 SET_NETDEV_DEV(ndev, &pdev->dev);
1667 platform_set_drvdata(pdev, priv);
1668 ndev->irq = irq_res->start;
1669 priv->dev = &pdev->dev;
1672 priv->msg_enable = NETIF_MSG_TX_ERR | NETIF_MSG_HW | NETIF_MSG_DRV |
1673 NETIF_MSG_LINK | NETIF_MSG_PROBE;
1675 priv->phy_interface = device_get_phy_mode(&pdev->dev);
1676 if (priv->phy_interface < 0) {
1677 dev_err(&pdev->dev, "missing required property 'phy-mode'\n");
1682 priv->ioaddr = devm_ioremap(&pdev->dev, mmio_res->start,
1683 resource_size(mmio_res));
1684 if (!priv->ioaddr) {
1685 dev_err(&pdev->dev, "devm_ioremap() failed\n");
1690 priv->eeprom_base = devm_ioremap(&pdev->dev, eeprom_res->start,
1691 resource_size(eeprom_res));
1692 if (!priv->eeprom_base) {
1693 dev_err(&pdev->dev, "devm_ioremap() failed for EEPROM\n");
1698 mac = device_get_mac_address(&pdev->dev, macbuf, sizeof(macbuf));
1700 ether_addr_copy(ndev->dev_addr, mac);
1702 if (priv->eeprom_base &&
1703 (!mac || !is_valid_ether_addr(ndev->dev_addr))) {
1704 void __iomem *macp = priv->eeprom_base +
1705 NETSEC_EEPROM_MAC_ADDRESS;
1707 ndev->dev_addr[0] = readb(macp + 3);
1708 ndev->dev_addr[1] = readb(macp + 2);
1709 ndev->dev_addr[2] = readb(macp + 1);
1710 ndev->dev_addr[3] = readb(macp + 0);
1711 ndev->dev_addr[4] = readb(macp + 7);
1712 ndev->dev_addr[5] = readb(macp + 6);
1715 if (!is_valid_ether_addr(ndev->dev_addr)) {
1716 dev_warn(&pdev->dev, "No MAC address found, using random\n");
1717 eth_hw_addr_random(ndev);
1720 if (dev_of_node(&pdev->dev))
1721 ret = netsec_of_probe(pdev, priv, &phy_addr);
1723 ret = netsec_acpi_probe(pdev, priv, &phy_addr);
1727 priv->phy_addr = phy_addr;
1730 dev_err(&pdev->dev, "missing PHY reference clock frequency\n");
1735 /* default for throughput */
1736 priv->et_coalesce.rx_coalesce_usecs = 500;
1737 priv->et_coalesce.rx_max_coalesced_frames = 8;
1738 priv->et_coalesce.tx_coalesce_usecs = 500;
1739 priv->et_coalesce.tx_max_coalesced_frames = 8;
1741 ret = device_property_read_u32(&pdev->dev, "max-frame-size",
1744 ndev->max_mtu = ETH_DATA_LEN;
1746 /* runtime_pm coverage just for probe, open/close also cover it */
1747 pm_runtime_enable(&pdev->dev);
1748 pm_runtime_get_sync(&pdev->dev);
1750 hw_ver = netsec_read(priv, NETSEC_REG_F_TAIKI_VER);
1751 /* this driver only supports F_TAIKI style NETSEC */
1752 if (NETSEC_F_NETSEC_VER_MAJOR_NUM(hw_ver) !=
1753 NETSEC_F_NETSEC_VER_MAJOR_NUM(NETSEC_REG_NETSEC_VER_F_TAIKI)) {
1758 dev_info(&pdev->dev, "hardware revision %d.%d\n",
1759 hw_ver >> 16, hw_ver & 0xffff);
1761 netif_napi_add(ndev, &priv->napi, netsec_napi_poll, NAPI_POLL_WEIGHT);
1763 ndev->netdev_ops = &netsec_netdev_ops;
1764 ndev->ethtool_ops = &netsec_ethtool_ops;
1766 ndev->features |= NETIF_F_HIGHDMA | NETIF_F_RXCSUM | NETIF_F_GSO |
1767 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1768 ndev->hw_features = ndev->features;
1770 priv->rx_cksum_offload_flag = true;
1772 ret = netsec_register_mdio(priv, phy_addr);
1776 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)))
1777 dev_warn(&pdev->dev, "Failed to set DMA mask\n");
1779 ret = register_netdev(ndev);
1781 netif_err(priv, probe, ndev, "register_netdev() failed\n");
1785 pm_runtime_put_sync(&pdev->dev);
1789 netsec_unregister_mdio(priv);
1791 netif_napi_del(&priv->napi);
1793 pm_runtime_put_sync(&pdev->dev);
1794 pm_runtime_disable(&pdev->dev);
1797 dev_err(&pdev->dev, "init failed\n");
1802 static int netsec_remove(struct platform_device *pdev)
1804 struct netsec_priv *priv = platform_get_drvdata(pdev);
1806 unregister_netdev(priv->ndev);
1808 netsec_unregister_mdio(priv);
1810 netif_napi_del(&priv->napi);
1812 pm_runtime_disable(&pdev->dev);
1813 free_netdev(priv->ndev);
1819 static int netsec_runtime_suspend(struct device *dev)
1821 struct netsec_priv *priv = dev_get_drvdata(dev);
1823 netsec_write(priv, NETSEC_REG_CLK_EN, 0);
1825 clk_disable_unprepare(priv->clk);
1830 static int netsec_runtime_resume(struct device *dev)
1832 struct netsec_priv *priv = dev_get_drvdata(dev);
1834 clk_prepare_enable(priv->clk);
1836 netsec_write(priv, NETSEC_REG_CLK_EN, NETSEC_CLK_EN_REG_DOM_D |
1837 NETSEC_CLK_EN_REG_DOM_C |
1838 NETSEC_CLK_EN_REG_DOM_G);
1843 static const struct dev_pm_ops netsec_pm_ops = {
1844 SET_RUNTIME_PM_OPS(netsec_runtime_suspend, netsec_runtime_resume, NULL)
1847 static const struct of_device_id netsec_dt_ids[] = {
1848 { .compatible = "socionext,synquacer-netsec" },
1851 MODULE_DEVICE_TABLE(of, netsec_dt_ids);
1854 static const struct acpi_device_id netsec_acpi_ids[] = {
1858 MODULE_DEVICE_TABLE(acpi, netsec_acpi_ids);
1861 static struct platform_driver netsec_driver = {
1862 .probe = netsec_probe,
1863 .remove = netsec_remove,
1866 .pm = &netsec_pm_ops,
1867 .of_match_table = netsec_dt_ids,
1868 .acpi_match_table = ACPI_PTR(netsec_acpi_ids),
1871 module_platform_driver(netsec_driver);
1873 MODULE_AUTHOR("Jassi Brar <jaswinder.singh@linaro.org>");
1874 MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
1875 MODULE_DESCRIPTION("NETSEC Ethernet driver");
1876 MODULE_LICENSE("GPL");