1 // SPDX-License-Identifier: GPL-2.0+
3 #include <linux/types.h>
5 #include <linux/platform_device.h>
6 #include <linux/pm_runtime.h>
7 #include <linux/acpi.h>
8 #include <linux/of_mdio.h>
9 #include <linux/etherdevice.h>
10 #include <linux/interrupt.h>
12 #include <linux/netlink.h>
13 #include <linux/bpf.h>
14 #include <linux/bpf_trace.h>
17 #include <net/page_pool.h>
18 #include <net/ip6_checksum.h>
20 #define NETSEC_REG_SOFT_RST 0x104
21 #define NETSEC_REG_COM_INIT 0x120
23 #define NETSEC_REG_TOP_STATUS 0x200
24 #define NETSEC_IRQ_RX BIT(1)
25 #define NETSEC_IRQ_TX BIT(0)
27 #define NETSEC_REG_TOP_INTEN 0x204
28 #define NETSEC_REG_INTEN_SET 0x234
29 #define NETSEC_REG_INTEN_CLR 0x238
31 #define NETSEC_REG_NRM_TX_STATUS 0x400
32 #define NETSEC_REG_NRM_TX_INTEN 0x404
33 #define NETSEC_REG_NRM_TX_INTEN_SET 0x428
34 #define NETSEC_REG_NRM_TX_INTEN_CLR 0x42c
35 #define NRM_TX_ST_NTOWNR BIT(17)
36 #define NRM_TX_ST_TR_ERR BIT(16)
37 #define NRM_TX_ST_TXDONE BIT(15)
38 #define NRM_TX_ST_TMREXP BIT(14)
40 #define NETSEC_REG_NRM_RX_STATUS 0x440
41 #define NETSEC_REG_NRM_RX_INTEN 0x444
42 #define NETSEC_REG_NRM_RX_INTEN_SET 0x468
43 #define NETSEC_REG_NRM_RX_INTEN_CLR 0x46c
44 #define NRM_RX_ST_RC_ERR BIT(16)
45 #define NRM_RX_ST_PKTCNT BIT(15)
46 #define NRM_RX_ST_TMREXP BIT(14)
48 #define NETSEC_REG_PKT_CMD_BUF 0xd0
50 #define NETSEC_REG_CLK_EN 0x100
52 #define NETSEC_REG_PKT_CTRL 0x140
54 #define NETSEC_REG_DMA_TMR_CTRL 0x20c
55 #define NETSEC_REG_F_TAIKI_MC_VER 0x22c
56 #define NETSEC_REG_F_TAIKI_VER 0x230
57 #define NETSEC_REG_DMA_HM_CTRL 0x214
58 #define NETSEC_REG_DMA_MH_CTRL 0x220
59 #define NETSEC_REG_ADDR_DIS_CORE 0x218
60 #define NETSEC_REG_DMAC_HM_CMD_BUF 0x210
61 #define NETSEC_REG_DMAC_MH_CMD_BUF 0x21c
63 #define NETSEC_REG_NRM_TX_PKTCNT 0x410
65 #define NETSEC_REG_NRM_TX_DONE_PKTCNT 0x414
66 #define NETSEC_REG_NRM_TX_DONE_TXINT_PKTCNT 0x418
68 #define NETSEC_REG_NRM_TX_TMR 0x41c
70 #define NETSEC_REG_NRM_RX_PKTCNT 0x454
71 #define NETSEC_REG_NRM_RX_RXINT_PKTCNT 0x458
72 #define NETSEC_REG_NRM_TX_TXINT_TMR 0x420
73 #define NETSEC_REG_NRM_RX_RXINT_TMR 0x460
75 #define NETSEC_REG_NRM_RX_TMR 0x45c
77 #define NETSEC_REG_NRM_TX_DESC_START_UP 0x434
78 #define NETSEC_REG_NRM_TX_DESC_START_LW 0x408
79 #define NETSEC_REG_NRM_RX_DESC_START_UP 0x474
80 #define NETSEC_REG_NRM_RX_DESC_START_LW 0x448
82 #define NETSEC_REG_NRM_TX_CONFIG 0x430
83 #define NETSEC_REG_NRM_RX_CONFIG 0x470
85 #define MAC_REG_STATUS 0x1024
86 #define MAC_REG_DATA 0x11c0
87 #define MAC_REG_CMD 0x11c4
88 #define MAC_REG_FLOW_TH 0x11cc
89 #define MAC_REG_INTF_SEL 0x11d4
90 #define MAC_REG_DESC_INIT 0x11fc
91 #define MAC_REG_DESC_SOFT_RST 0x1204
92 #define NETSEC_REG_MODE_TRANS_COMP_STATUS 0x500
94 #define GMAC_REG_MCR 0x0000
95 #define GMAC_REG_MFFR 0x0004
96 #define GMAC_REG_GAR 0x0010
97 #define GMAC_REG_GDR 0x0014
98 #define GMAC_REG_FCR 0x0018
99 #define GMAC_REG_BMR 0x1000
100 #define GMAC_REG_RDLAR 0x100c
101 #define GMAC_REG_TDLAR 0x1010
102 #define GMAC_REG_OMR 0x1018
104 #define MHZ(n) ((n) * 1000 * 1000)
106 #define NETSEC_TX_SHIFT_OWN_FIELD 31
107 #define NETSEC_TX_SHIFT_LD_FIELD 30
108 #define NETSEC_TX_SHIFT_DRID_FIELD 24
109 #define NETSEC_TX_SHIFT_PT_FIELD 21
110 #define NETSEC_TX_SHIFT_TDRID_FIELD 16
111 #define NETSEC_TX_SHIFT_CC_FIELD 15
112 #define NETSEC_TX_SHIFT_FS_FIELD 9
113 #define NETSEC_TX_LAST 8
114 #define NETSEC_TX_SHIFT_CO 7
115 #define NETSEC_TX_SHIFT_SO 6
116 #define NETSEC_TX_SHIFT_TRS_FIELD 4
118 #define NETSEC_RX_PKT_OWN_FIELD 31
119 #define NETSEC_RX_PKT_LD_FIELD 30
120 #define NETSEC_RX_PKT_SDRID_FIELD 24
121 #define NETSEC_RX_PKT_FR_FIELD 23
122 #define NETSEC_RX_PKT_ER_FIELD 21
123 #define NETSEC_RX_PKT_ERR_FIELD 16
124 #define NETSEC_RX_PKT_TDRID_FIELD 12
125 #define NETSEC_RX_PKT_FS_FIELD 9
126 #define NETSEC_RX_PKT_LS_FIELD 8
127 #define NETSEC_RX_PKT_CO_FIELD 6
129 #define NETSEC_RX_PKT_ERR_MASK 3
131 #define NETSEC_MAX_TX_PKT_LEN 1518
132 #define NETSEC_MAX_TX_JUMBO_PKT_LEN 9018
134 #define NETSEC_RING_GMAC 15
135 #define NETSEC_RING_MAX 2
137 #define NETSEC_TCP_SEG_LEN_MAX 1460
138 #define NETSEC_TCP_JUMBO_SEG_LEN_MAX 8960
140 #define NETSEC_RX_CKSUM_NOTAVAIL 0
141 #define NETSEC_RX_CKSUM_OK 1
142 #define NETSEC_RX_CKSUM_NG 2
144 #define NETSEC_TOP_IRQ_REG_CODE_LOAD_END BIT(20)
145 #define NETSEC_IRQ_TRANSITION_COMPLETE BIT(4)
147 #define NETSEC_MODE_TRANS_COMP_IRQ_N2T BIT(20)
148 #define NETSEC_MODE_TRANS_COMP_IRQ_T2N BIT(19)
150 #define NETSEC_INT_PKTCNT_MAX 2047
152 #define NETSEC_FLOW_START_TH_MAX 95
153 #define NETSEC_FLOW_STOP_TH_MAX 95
154 #define NETSEC_FLOW_PAUSE_TIME_MIN 5
156 #define NETSEC_CLK_EN_REG_DOM_ALL 0x3f
158 #define NETSEC_PKT_CTRL_REG_MODE_NRM BIT(28)
159 #define NETSEC_PKT_CTRL_REG_EN_JUMBO BIT(27)
160 #define NETSEC_PKT_CTRL_REG_LOG_CHKSUM_ER BIT(3)
161 #define NETSEC_PKT_CTRL_REG_LOG_HD_INCOMPLETE BIT(2)
162 #define NETSEC_PKT_CTRL_REG_LOG_HD_ER BIT(1)
163 #define NETSEC_PKT_CTRL_REG_DRP_NO_MATCH BIT(0)
165 #define NETSEC_CLK_EN_REG_DOM_G BIT(5)
166 #define NETSEC_CLK_EN_REG_DOM_C BIT(1)
167 #define NETSEC_CLK_EN_REG_DOM_D BIT(0)
169 #define NETSEC_COM_INIT_REG_DB BIT(2)
170 #define NETSEC_COM_INIT_REG_CLS BIT(1)
171 #define NETSEC_COM_INIT_REG_ALL (NETSEC_COM_INIT_REG_CLS | \
172 NETSEC_COM_INIT_REG_DB)
174 #define NETSEC_SOFT_RST_REG_RESET 0
175 #define NETSEC_SOFT_RST_REG_RUN BIT(31)
177 #define NETSEC_DMA_CTRL_REG_STOP 1
178 #define MH_CTRL__MODE_TRANS BIT(20)
180 #define NETSEC_GMAC_CMD_ST_READ 0
181 #define NETSEC_GMAC_CMD_ST_WRITE BIT(28)
182 #define NETSEC_GMAC_CMD_ST_BUSY BIT(31)
184 #define NETSEC_GMAC_BMR_REG_COMMON 0x00412080
185 #define NETSEC_GMAC_BMR_REG_RESET 0x00020181
186 #define NETSEC_GMAC_BMR_REG_SWR 0x00000001
188 #define NETSEC_GMAC_OMR_REG_ST BIT(13)
189 #define NETSEC_GMAC_OMR_REG_SR BIT(1)
191 #define NETSEC_GMAC_MCR_REG_IBN BIT(30)
192 #define NETSEC_GMAC_MCR_REG_CST BIT(25)
193 #define NETSEC_GMAC_MCR_REG_JE BIT(20)
194 #define NETSEC_MCR_PS BIT(15)
195 #define NETSEC_GMAC_MCR_REG_FES BIT(14)
196 #define NETSEC_GMAC_MCR_REG_FULL_DUPLEX_COMMON 0x0000280c
197 #define NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON 0x0001a00c
199 #define NETSEC_FCR_RFE BIT(2)
200 #define NETSEC_FCR_TFE BIT(1)
202 #define NETSEC_GMAC_GAR_REG_GW BIT(1)
203 #define NETSEC_GMAC_GAR_REG_GB BIT(0)
205 #define NETSEC_GMAC_GAR_REG_SHIFT_PA 11
206 #define NETSEC_GMAC_GAR_REG_SHIFT_GR 6
207 #define GMAC_REG_SHIFT_CR_GAR 2
209 #define NETSEC_GMAC_GAR_REG_CR_25_35_MHZ 2
210 #define NETSEC_GMAC_GAR_REG_CR_35_60_MHZ 3
211 #define NETSEC_GMAC_GAR_REG_CR_60_100_MHZ 0
212 #define NETSEC_GMAC_GAR_REG_CR_100_150_MHZ 1
213 #define NETSEC_GMAC_GAR_REG_CR_150_250_MHZ 4
214 #define NETSEC_GMAC_GAR_REG_CR_250_300_MHZ 5
216 #define NETSEC_GMAC_RDLAR_REG_COMMON 0x18000
217 #define NETSEC_GMAC_TDLAR_REG_COMMON 0x1c000
219 #define NETSEC_REG_NETSEC_VER_F_TAIKI 0x50000
221 #define NETSEC_REG_DESC_RING_CONFIG_CFG_UP BIT(31)
222 #define NETSEC_REG_DESC_RING_CONFIG_CH_RST BIT(30)
223 #define NETSEC_REG_DESC_TMR_MODE 4
224 #define NETSEC_REG_DESC_ENDIAN 0
226 #define NETSEC_MAC_DESC_SOFT_RST_SOFT_RST 1
227 #define NETSEC_MAC_DESC_INIT_REG_INIT 1
229 #define NETSEC_EEPROM_MAC_ADDRESS 0x00
230 #define NETSEC_EEPROM_HM_ME_ADDRESS_H 0x08
231 #define NETSEC_EEPROM_HM_ME_ADDRESS_L 0x0C
232 #define NETSEC_EEPROM_HM_ME_SIZE 0x10
233 #define NETSEC_EEPROM_MH_ME_ADDRESS_H 0x14
234 #define NETSEC_EEPROM_MH_ME_ADDRESS_L 0x18
235 #define NETSEC_EEPROM_MH_ME_SIZE 0x1C
236 #define NETSEC_EEPROM_PKT_ME_ADDRESS 0x20
237 #define NETSEC_EEPROM_PKT_ME_SIZE 0x24
241 #define NETSEC_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
242 #define NETSEC_RXBUF_HEADROOM (max(XDP_PACKET_HEADROOM, NET_SKB_PAD) + \
244 #define NETSEC_RX_BUF_NON_DATA (NETSEC_RXBUF_HEADROOM + \
245 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
247 #define DESC_SZ sizeof(struct netsec_de)
249 #define NETSEC_F_NETSEC_VER_MAJOR_NUM(x) ((x) & 0xffff0000)
251 #define NETSEC_XDP_PASS 0
252 #define NETSEC_XDP_CONSUMED BIT(0)
253 #define NETSEC_XDP_TX BIT(1)
254 #define NETSEC_XDP_REDIR BIT(2)
255 #define NETSEC_XDP_RX_OK (NETSEC_XDP_PASS | NETSEC_XDP_TX | NETSEC_XDP_REDIR)
271 struct xdp_frame *xdpf;
279 struct netsec_desc_ring {
281 struct netsec_desc *desc;
284 u16 xdp_xmit; /* netsec_xdp_xmit packets */
286 struct page_pool *page_pool;
287 struct xdp_rxq_info xdp_rxq;
288 spinlock_t lock; /* XDP tx queue locking */
292 struct netsec_desc_ring desc_ring[NETSEC_RING_MAX];
293 struct ethtool_coalesce et_coalesce;
294 struct bpf_prog *xdp_prog;
295 spinlock_t reglock; /* protect reg access */
296 struct napi_struct napi;
297 phy_interface_t phy_interface;
298 struct net_device *ndev;
299 struct device_node *phy_np;
300 struct phy_device *phydev;
301 struct mii_bus *mii_bus;
302 void __iomem *ioaddr;
303 void __iomem *eeprom_base;
309 bool rx_cksum_offload_flag;
312 struct netsec_de { /* Netsec Descriptor layout */
314 u32 data_buf_addr_up;
315 u32 data_buf_addr_lw;
319 struct netsec_tx_pkt_ctrl {
321 bool tcp_seg_offload_flag;
322 bool cksum_offload_flag;
325 struct netsec_rx_pkt_info {
331 static void netsec_write(struct netsec_priv *priv, u32 reg_addr, u32 val)
333 writel(val, priv->ioaddr + reg_addr);
336 static u32 netsec_read(struct netsec_priv *priv, u32 reg_addr)
338 return readl(priv->ioaddr + reg_addr);
341 /************* MDIO BUS OPS FOLLOW *************/
343 #define TIMEOUT_SPINS_MAC 1000
344 #define TIMEOUT_SECONDARY_MS_MAC 100
346 static u32 netsec_clk_type(u32 freq)
349 return NETSEC_GMAC_GAR_REG_CR_25_35_MHZ;
351 return NETSEC_GMAC_GAR_REG_CR_35_60_MHZ;
353 return NETSEC_GMAC_GAR_REG_CR_60_100_MHZ;
355 return NETSEC_GMAC_GAR_REG_CR_100_150_MHZ;
357 return NETSEC_GMAC_GAR_REG_CR_150_250_MHZ;
359 return NETSEC_GMAC_GAR_REG_CR_250_300_MHZ;
362 static int netsec_wait_while_busy(struct netsec_priv *priv, u32 addr, u32 mask)
364 u32 timeout = TIMEOUT_SPINS_MAC;
366 while (--timeout && netsec_read(priv, addr) & mask)
371 timeout = TIMEOUT_SECONDARY_MS_MAC;
372 while (--timeout && netsec_read(priv, addr) & mask)
373 usleep_range(1000, 2000);
378 netdev_WARN(priv->ndev, "%s: timeout\n", __func__);
383 static int netsec_mac_write(struct netsec_priv *priv, u32 addr, u32 value)
385 netsec_write(priv, MAC_REG_DATA, value);
386 netsec_write(priv, MAC_REG_CMD, addr | NETSEC_GMAC_CMD_ST_WRITE);
387 return netsec_wait_while_busy(priv,
388 MAC_REG_CMD, NETSEC_GMAC_CMD_ST_BUSY);
391 static int netsec_mac_read(struct netsec_priv *priv, u32 addr, u32 *read)
395 netsec_write(priv, MAC_REG_CMD, addr | NETSEC_GMAC_CMD_ST_READ);
396 ret = netsec_wait_while_busy(priv,
397 MAC_REG_CMD, NETSEC_GMAC_CMD_ST_BUSY);
401 *read = netsec_read(priv, MAC_REG_DATA);
406 static int netsec_mac_wait_while_busy(struct netsec_priv *priv,
409 u32 timeout = TIMEOUT_SPINS_MAC;
413 ret = netsec_mac_read(priv, addr, &data);
417 } while (--timeout && (data & mask));
422 timeout = TIMEOUT_SECONDARY_MS_MAC;
424 usleep_range(1000, 2000);
426 ret = netsec_mac_read(priv, addr, &data);
430 } while (--timeout && (data & mask));
435 netdev_WARN(priv->ndev, "%s: timeout\n", __func__);
440 static int netsec_mac_update_to_phy_state(struct netsec_priv *priv)
442 struct phy_device *phydev = priv->ndev->phydev;
445 value = phydev->duplex ? NETSEC_GMAC_MCR_REG_FULL_DUPLEX_COMMON :
446 NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON;
448 if (phydev->speed != SPEED_1000)
449 value |= NETSEC_MCR_PS;
451 if (priv->phy_interface != PHY_INTERFACE_MODE_GMII &&
452 phydev->speed == SPEED_100)
453 value |= NETSEC_GMAC_MCR_REG_FES;
455 value |= NETSEC_GMAC_MCR_REG_CST | NETSEC_GMAC_MCR_REG_JE;
457 if (phy_interface_mode_is_rgmii(priv->phy_interface))
458 value |= NETSEC_GMAC_MCR_REG_IBN;
460 if (netsec_mac_write(priv, GMAC_REG_MCR, value))
466 static int netsec_phy_read(struct mii_bus *bus, int phy_addr, int reg_addr);
468 static int netsec_phy_write(struct mii_bus *bus,
469 int phy_addr, int reg, u16 val)
472 struct netsec_priv *priv = bus->priv;
474 if (netsec_mac_write(priv, GMAC_REG_GDR, val))
476 if (netsec_mac_write(priv, GMAC_REG_GAR,
477 phy_addr << NETSEC_GMAC_GAR_REG_SHIFT_PA |
478 reg << NETSEC_GMAC_GAR_REG_SHIFT_GR |
479 NETSEC_GMAC_GAR_REG_GW | NETSEC_GMAC_GAR_REG_GB |
480 (netsec_clk_type(priv->freq) <<
481 GMAC_REG_SHIFT_CR_GAR)))
484 status = netsec_mac_wait_while_busy(priv, GMAC_REG_GAR,
485 NETSEC_GMAC_GAR_REG_GB);
487 /* Developerbox implements RTL8211E PHY and there is
488 * a compatibility problem with F_GMAC4.
489 * RTL8211E expects MDC clock must be kept toggling for several
490 * clock cycle with MDIO high before entering the IDLE state.
491 * To meet this requirement, netsec driver needs to issue dummy
492 * read(e.g. read PHYID1(offset 0x2) register) right after write.
494 netsec_phy_read(bus, phy_addr, MII_PHYSID1);
499 static int netsec_phy_read(struct mii_bus *bus, int phy_addr, int reg_addr)
501 struct netsec_priv *priv = bus->priv;
505 if (netsec_mac_write(priv, GMAC_REG_GAR, NETSEC_GMAC_GAR_REG_GB |
506 phy_addr << NETSEC_GMAC_GAR_REG_SHIFT_PA |
507 reg_addr << NETSEC_GMAC_GAR_REG_SHIFT_GR |
508 (netsec_clk_type(priv->freq) <<
509 GMAC_REG_SHIFT_CR_GAR)))
512 ret = netsec_mac_wait_while_busy(priv, GMAC_REG_GAR,
513 NETSEC_GMAC_GAR_REG_GB);
517 ret = netsec_mac_read(priv, GMAC_REG_GDR, &data);
524 /************* ETHTOOL_OPS FOLLOW *************/
526 static void netsec_et_get_drvinfo(struct net_device *net_device,
527 struct ethtool_drvinfo *info)
529 strlcpy(info->driver, "netsec", sizeof(info->driver));
530 strlcpy(info->bus_info, dev_name(net_device->dev.parent),
531 sizeof(info->bus_info));
534 static int netsec_et_get_coalesce(struct net_device *net_device,
535 struct ethtool_coalesce *et_coalesce)
537 struct netsec_priv *priv = netdev_priv(net_device);
539 *et_coalesce = priv->et_coalesce;
544 static int netsec_et_set_coalesce(struct net_device *net_device,
545 struct ethtool_coalesce *et_coalesce)
547 struct netsec_priv *priv = netdev_priv(net_device);
549 priv->et_coalesce = *et_coalesce;
551 if (priv->et_coalesce.tx_coalesce_usecs < 50)
552 priv->et_coalesce.tx_coalesce_usecs = 50;
553 if (priv->et_coalesce.tx_max_coalesced_frames < 1)
554 priv->et_coalesce.tx_max_coalesced_frames = 1;
556 netsec_write(priv, NETSEC_REG_NRM_TX_DONE_TXINT_PKTCNT,
557 priv->et_coalesce.tx_max_coalesced_frames);
558 netsec_write(priv, NETSEC_REG_NRM_TX_TXINT_TMR,
559 priv->et_coalesce.tx_coalesce_usecs);
560 netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_SET, NRM_TX_ST_TXDONE);
561 netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_SET, NRM_TX_ST_TMREXP);
563 if (priv->et_coalesce.rx_coalesce_usecs < 50)
564 priv->et_coalesce.rx_coalesce_usecs = 50;
565 if (priv->et_coalesce.rx_max_coalesced_frames < 1)
566 priv->et_coalesce.rx_max_coalesced_frames = 1;
568 netsec_write(priv, NETSEC_REG_NRM_RX_RXINT_PKTCNT,
569 priv->et_coalesce.rx_max_coalesced_frames);
570 netsec_write(priv, NETSEC_REG_NRM_RX_RXINT_TMR,
571 priv->et_coalesce.rx_coalesce_usecs);
572 netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_SET, NRM_RX_ST_PKTCNT);
573 netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_SET, NRM_RX_ST_TMREXP);
578 static u32 netsec_et_get_msglevel(struct net_device *dev)
580 struct netsec_priv *priv = netdev_priv(dev);
582 return priv->msg_enable;
585 static void netsec_et_set_msglevel(struct net_device *dev, u32 datum)
587 struct netsec_priv *priv = netdev_priv(dev);
589 priv->msg_enable = datum;
592 static const struct ethtool_ops netsec_ethtool_ops = {
593 .get_drvinfo = netsec_et_get_drvinfo,
594 .get_link_ksettings = phy_ethtool_get_link_ksettings,
595 .set_link_ksettings = phy_ethtool_set_link_ksettings,
596 .get_link = ethtool_op_get_link,
597 .get_coalesce = netsec_et_get_coalesce,
598 .set_coalesce = netsec_et_set_coalesce,
599 .get_msglevel = netsec_et_get_msglevel,
600 .set_msglevel = netsec_et_set_msglevel,
603 /************* NETDEV_OPS FOLLOW *************/
606 static void netsec_set_rx_de(struct netsec_priv *priv,
607 struct netsec_desc_ring *dring, u16 idx,
608 const struct netsec_desc *desc)
610 struct netsec_de *de = dring->vaddr + DESC_SZ * idx;
611 u32 attr = (1 << NETSEC_RX_PKT_OWN_FIELD) |
612 (1 << NETSEC_RX_PKT_FS_FIELD) |
613 (1 << NETSEC_RX_PKT_LS_FIELD);
615 if (idx == DESC_NUM - 1)
616 attr |= (1 << NETSEC_RX_PKT_LD_FIELD);
618 de->data_buf_addr_up = upper_32_bits(desc->dma_addr);
619 de->data_buf_addr_lw = lower_32_bits(desc->dma_addr);
620 de->buf_len_info = desc->len;
624 dring->desc[idx].dma_addr = desc->dma_addr;
625 dring->desc[idx].addr = desc->addr;
626 dring->desc[idx].len = desc->len;
629 static bool netsec_clean_tx_dring(struct netsec_priv *priv)
631 struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX];
632 struct netsec_de *entry;
633 int tail = dring->tail;
638 spin_lock(&dring->lock);
641 entry = dring->vaddr + DESC_SZ * tail;
643 while (!(entry->attr & (1U << NETSEC_TX_SHIFT_OWN_FIELD)) &&
645 struct netsec_desc *desc;
648 desc = &dring->desc[tail];
649 eop = (entry->attr >> NETSEC_TX_LAST) & 1;
652 /* if buf_type is either TYPE_NETSEC_SKB or
653 * TYPE_NETSEC_XDP_NDO we mapped it
655 if (desc->buf_type != TYPE_NETSEC_XDP_TX)
656 dma_unmap_single(priv->dev, desc->dma_addr, desc->len,
662 if (desc->buf_type == TYPE_NETSEC_SKB) {
663 bytes += desc->skb->len;
664 dev_kfree_skb(desc->skb);
666 xdp_return_frame(desc->xdpf);
669 /* clean up so netsec_uninit_pkt_dring() won't free the skb
672 *desc = (struct netsec_desc){};
674 /* entry->attr is not going to be accessed by the NIC until
675 * netsec_set_tx_de() is called. No need for a dma_wmb() here
677 entry->attr = 1U << NETSEC_TX_SHIFT_OWN_FIELD;
678 /* move tail ahead */
679 dring->tail = (tail + 1) % DESC_NUM;
682 entry = dring->vaddr + DESC_SZ * tail;
686 spin_unlock(&dring->lock);
691 /* reading the register clears the irq */
692 netsec_read(priv, NETSEC_REG_NRM_TX_DONE_PKTCNT);
694 priv->ndev->stats.tx_packets += cnt;
695 priv->ndev->stats.tx_bytes += bytes;
697 netdev_completed_queue(priv->ndev, cnt, bytes);
702 static void netsec_process_tx(struct netsec_priv *priv)
704 struct net_device *ndev = priv->ndev;
707 cleaned = netsec_clean_tx_dring(priv);
709 if (cleaned && netif_queue_stopped(ndev)) {
710 /* Make sure we update the value, anyone stopping the queue
711 * after this will read the proper consumer idx
714 netif_wake_queue(ndev);
718 static void *netsec_alloc_rx_data(struct netsec_priv *priv,
719 dma_addr_t *dma_handle, u16 *desc_len)
723 struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
724 enum dma_data_direction dma_dir;
727 page = page_pool_dev_alloc_pages(dring->page_pool);
731 /* We allocate the same buffer length for XDP and non-XDP cases.
732 * page_pool API will map the whole page, skip what's needed for
733 * network payloads and/or XDP
735 *dma_handle = page_pool_get_dma_addr(page) + NETSEC_RXBUF_HEADROOM;
736 /* Make sure the incoming payload fits in the page for XDP and non-XDP
737 * cases and reserve enough space for headroom + skb_shared_info
739 *desc_len = PAGE_SIZE - NETSEC_RX_BUF_NON_DATA;
740 dma_dir = page_pool_get_dma_dir(dring->page_pool);
741 dma_sync_single_for_device(priv->dev, *dma_handle, *desc_len, dma_dir);
743 return page_address(page);
746 static void netsec_rx_fill(struct netsec_priv *priv, u16 from, u16 num)
748 struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
752 netsec_set_rx_de(priv, dring, idx, &dring->desc[idx]);
760 static void netsec_xdp_ring_tx_db(struct netsec_priv *priv, u16 pkts)
763 netsec_write(priv, NETSEC_REG_NRM_TX_PKTCNT, pkts);
766 static void netsec_finalize_xdp_rx(struct netsec_priv *priv, u32 xdp_res,
769 if (xdp_res & NETSEC_XDP_REDIR)
772 if (xdp_res & NETSEC_XDP_TX)
773 netsec_xdp_ring_tx_db(priv, pkts);
776 static void netsec_set_tx_de(struct netsec_priv *priv,
777 struct netsec_desc_ring *dring,
778 const struct netsec_tx_pkt_ctrl *tx_ctrl,
779 const struct netsec_desc *desc, void *buf)
781 int idx = dring->head;
782 struct netsec_de *de;
785 de = dring->vaddr + (DESC_SZ * idx);
787 attr = (1 << NETSEC_TX_SHIFT_OWN_FIELD) |
788 (1 << NETSEC_TX_SHIFT_PT_FIELD) |
789 (NETSEC_RING_GMAC << NETSEC_TX_SHIFT_TDRID_FIELD) |
790 (1 << NETSEC_TX_SHIFT_FS_FIELD) |
791 (1 << NETSEC_TX_LAST) |
792 (tx_ctrl->cksum_offload_flag << NETSEC_TX_SHIFT_CO) |
793 (tx_ctrl->tcp_seg_offload_flag << NETSEC_TX_SHIFT_SO) |
794 (1 << NETSEC_TX_SHIFT_TRS_FIELD);
795 if (idx == DESC_NUM - 1)
796 attr |= (1 << NETSEC_TX_SHIFT_LD_FIELD);
798 de->data_buf_addr_up = upper_32_bits(desc->dma_addr);
799 de->data_buf_addr_lw = lower_32_bits(desc->dma_addr);
800 de->buf_len_info = (tx_ctrl->tcp_seg_len << 16) | desc->len;
802 /* under spin_lock if using XDP */
806 dring->desc[idx] = *desc;
807 if (desc->buf_type == TYPE_NETSEC_SKB)
808 dring->desc[idx].skb = buf;
809 else if (desc->buf_type == TYPE_NETSEC_XDP_TX ||
810 desc->buf_type == TYPE_NETSEC_XDP_NDO)
811 dring->desc[idx].xdpf = buf;
813 /* move head ahead */
814 dring->head = (dring->head + 1) % DESC_NUM;
817 /* The current driver only supports 1 Txq, this should run under spin_lock() */
818 static u32 netsec_xdp_queue_one(struct netsec_priv *priv,
819 struct xdp_frame *xdpf, bool is_ndo)
822 struct netsec_desc_ring *tx_ring = &priv->desc_ring[NETSEC_RING_TX];
823 struct page *page = virt_to_page(xdpf->data);
824 struct netsec_tx_pkt_ctrl tx_ctrl = {};
825 struct netsec_desc tx_desc;
826 dma_addr_t dma_handle;
829 if (tx_ring->head >= tx_ring->tail)
830 filled = tx_ring->head - tx_ring->tail;
832 filled = tx_ring->head + DESC_NUM - tx_ring->tail;
834 if (DESC_NUM - filled <= 1)
835 return NETSEC_XDP_CONSUMED;
838 /* this is for ndo_xdp_xmit, the buffer needs mapping before
841 dma_handle = dma_map_single(priv->dev, xdpf->data, xdpf->len,
843 if (dma_mapping_error(priv->dev, dma_handle))
844 return NETSEC_XDP_CONSUMED;
845 tx_desc.buf_type = TYPE_NETSEC_XDP_NDO;
847 /* This is the device Rx buffer from page_pool. No need to remap
848 * just sync and send it
850 struct netsec_desc_ring *rx_ring =
851 &priv->desc_ring[NETSEC_RING_RX];
852 enum dma_data_direction dma_dir =
853 page_pool_get_dma_dir(rx_ring->page_pool);
855 dma_handle = page_pool_get_dma_addr(page) +
856 NETSEC_RXBUF_HEADROOM;
857 dma_sync_single_for_device(priv->dev, dma_handle, xdpf->len,
859 tx_desc.buf_type = TYPE_NETSEC_XDP_TX;
862 tx_desc.dma_addr = dma_handle;
863 tx_desc.addr = xdpf->data;
864 tx_desc.len = xdpf->len;
866 netsec_set_tx_de(priv, tx_ring, &tx_ctrl, &tx_desc, xdpf);
868 return NETSEC_XDP_TX;
871 static u32 netsec_xdp_xmit_back(struct netsec_priv *priv, struct xdp_buff *xdp)
873 struct netsec_desc_ring *tx_ring = &priv->desc_ring[NETSEC_RING_TX];
874 struct xdp_frame *xdpf = convert_to_xdp_frame(xdp);
878 return NETSEC_XDP_CONSUMED;
880 spin_lock(&tx_ring->lock);
881 ret = netsec_xdp_queue_one(priv, xdpf, false);
882 spin_unlock(&tx_ring->lock);
887 static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog,
888 struct xdp_buff *xdp)
890 u32 ret = NETSEC_XDP_PASS;
894 act = bpf_prog_run_xdp(prog, xdp);
898 ret = NETSEC_XDP_PASS;
901 ret = netsec_xdp_xmit_back(priv, xdp);
902 if (ret != NETSEC_XDP_TX)
903 xdp_return_buff(xdp);
906 err = xdp_do_redirect(priv->ndev, xdp, prog);
908 ret = NETSEC_XDP_REDIR;
910 ret = NETSEC_XDP_CONSUMED;
911 xdp_return_buff(xdp);
915 bpf_warn_invalid_xdp_action(act);
918 trace_xdp_exception(priv->ndev, prog, act);
919 /* fall through -- handle aborts by dropping packet */
921 ret = NETSEC_XDP_CONSUMED;
922 xdp_return_buff(xdp);
929 static int netsec_process_rx(struct netsec_priv *priv, int budget)
931 struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
932 struct net_device *ndev = priv->ndev;
933 struct netsec_rx_pkt_info rx_info;
934 enum dma_data_direction dma_dir;
935 struct bpf_prog *xdp_prog;
936 struct sk_buff *skb = NULL;
942 xdp_prog = READ_ONCE(priv->xdp_prog);
943 dma_dir = page_pool_get_dma_dir(dring->page_pool);
945 while (done < budget) {
946 u16 idx = dring->tail;
947 struct netsec_de *de = dring->vaddr + (DESC_SZ * idx);
948 struct netsec_desc *desc = &dring->desc[idx];
949 struct page *page = virt_to_page(desc->addr);
950 u32 xdp_result = XDP_PASS;
951 u16 pkt_len, desc_len;
952 dma_addr_t dma_handle;
956 if (de->attr & (1U << NETSEC_RX_PKT_OWN_FIELD)) {
957 /* reading the register clears the irq */
958 netsec_read(priv, NETSEC_REG_NRM_RX_PKTCNT);
962 /* This barrier is needed to keep us from reading
963 * any other fields out of the netsec_de until we have
964 * verified the descriptor has been written back
969 pkt_len = de->buf_len_info >> 16;
970 rx_info.err_code = (de->attr >> NETSEC_RX_PKT_ERR_FIELD) &
971 NETSEC_RX_PKT_ERR_MASK;
972 rx_info.err_flag = (de->attr >> NETSEC_RX_PKT_ER_FIELD) & 1;
973 if (rx_info.err_flag) {
974 netif_err(priv, drv, priv->ndev,
975 "%s: rx fail err(%d)\n", __func__,
977 ndev->stats.rx_dropped++;
978 dring->tail = (dring->tail + 1) % DESC_NUM;
979 /* reuse buffer page frag */
980 netsec_rx_fill(priv, idx, 1);
983 rx_info.rx_cksum_result =
984 (de->attr >> NETSEC_RX_PKT_CO_FIELD) & 3;
986 /* allocate a fresh buffer and map it to the hardware.
987 * This will eventually replace the old buffer in the hardware
989 buf_addr = netsec_alloc_rx_data(priv, &dma_handle, &desc_len);
991 if (unlikely(!buf_addr))
994 dma_sync_single_for_cpu(priv->dev, desc->dma_addr, pkt_len,
996 prefetch(desc->addr);
998 xdp.data_hard_start = desc->addr;
999 xdp.data = desc->addr + NETSEC_RXBUF_HEADROOM;
1000 xdp_set_data_meta_invalid(&xdp);
1001 xdp.data_end = xdp.data + pkt_len;
1002 xdp.rxq = &dring->xdp_rxq;
1005 xdp_result = netsec_run_xdp(priv, xdp_prog, &xdp);
1006 if (xdp_result != NETSEC_XDP_PASS) {
1007 xdp_act |= xdp_result;
1008 if (xdp_result == NETSEC_XDP_TX)
1013 skb = build_skb(desc->addr, desc->len + NETSEC_RX_BUF_NON_DATA);
1015 if (unlikely(!skb)) {
1016 /* If skb fails recycle_direct will either unmap and
1017 * free the page or refill the cache depending on the
1018 * cache state. Since we paid the allocation cost if
1019 * building an skb fails try to put the page into cache
1021 page_pool_recycle_direct(dring->page_pool, page);
1022 netif_err(priv, drv, priv->ndev,
1023 "rx failed to build skb\n");
1026 page_pool_release_page(dring->page_pool, page);
1028 skb_reserve(skb, xdp.data - xdp.data_hard_start);
1029 skb_put(skb, xdp.data_end - xdp.data);
1030 skb->protocol = eth_type_trans(skb, priv->ndev);
1032 if (priv->rx_cksum_offload_flag &&
1033 rx_info.rx_cksum_result == NETSEC_RX_CKSUM_OK)
1034 skb->ip_summed = CHECKSUM_UNNECESSARY;
1037 if ((skb && napi_gro_receive(&priv->napi, skb) != GRO_DROP) ||
1038 xdp_result & NETSEC_XDP_RX_OK) {
1039 ndev->stats.rx_packets++;
1040 ndev->stats.rx_bytes += xdp.data_end - xdp.data;
1043 /* Update the descriptor with fresh buffers */
1044 desc->len = desc_len;
1045 desc->dma_addr = dma_handle;
1046 desc->addr = buf_addr;
1048 netsec_rx_fill(priv, idx, 1);
1049 dring->tail = (dring->tail + 1) % DESC_NUM;
1051 netsec_finalize_xdp_rx(priv, xdp_act, xdp_xmit);
1058 static int netsec_napi_poll(struct napi_struct *napi, int budget)
1060 struct netsec_priv *priv;
1063 priv = container_of(napi, struct netsec_priv, napi);
1065 netsec_process_tx(priv);
1066 done = netsec_process_rx(priv, budget);
1068 if (done < budget && napi_complete_done(napi, done)) {
1069 unsigned long flags;
1071 spin_lock_irqsave(&priv->reglock, flags);
1072 netsec_write(priv, NETSEC_REG_INTEN_SET,
1073 NETSEC_IRQ_RX | NETSEC_IRQ_TX);
1074 spin_unlock_irqrestore(&priv->reglock, flags);
1081 static int netsec_desc_used(struct netsec_desc_ring *dring)
1085 if (dring->head >= dring->tail)
1086 used = dring->head - dring->tail;
1088 used = dring->head + DESC_NUM - dring->tail;
1093 static int netsec_check_stop_tx(struct netsec_priv *priv, int used)
1095 struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX];
1097 /* keep tail from touching the queue */
1098 if (DESC_NUM - used < 2) {
1099 netif_stop_queue(priv->ndev);
1101 /* Make sure we read the updated value in case
1102 * descriptors got freed
1106 used = netsec_desc_used(dring);
1107 if (DESC_NUM - used < 2)
1108 return NETDEV_TX_BUSY;
1110 netif_wake_queue(priv->ndev);
1116 static netdev_tx_t netsec_netdev_start_xmit(struct sk_buff *skb,
1117 struct net_device *ndev)
1119 struct netsec_priv *priv = netdev_priv(ndev);
1120 struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX];
1121 struct netsec_tx_pkt_ctrl tx_ctrl = {};
1122 struct netsec_desc tx_desc;
1123 u16 tso_seg_len = 0;
1127 spin_lock_bh(&dring->lock);
1128 filled = netsec_desc_used(dring);
1129 if (netsec_check_stop_tx(priv, filled)) {
1131 spin_unlock_bh(&dring->lock);
1132 net_warn_ratelimited("%s %s Tx queue full\n",
1133 dev_name(priv->dev), ndev->name);
1134 return NETDEV_TX_BUSY;
1137 if (skb->ip_summed == CHECKSUM_PARTIAL)
1138 tx_ctrl.cksum_offload_flag = true;
1140 if (skb_is_gso(skb))
1141 tso_seg_len = skb_shinfo(skb)->gso_size;
1143 if (tso_seg_len > 0) {
1144 if (skb->protocol == htons(ETH_P_IP)) {
1145 ip_hdr(skb)->tot_len = 0;
1146 tcp_hdr(skb)->check =
1147 ~tcp_v4_check(0, ip_hdr(skb)->saddr,
1148 ip_hdr(skb)->daddr, 0);
1150 ipv6_hdr(skb)->payload_len = 0;
1151 tcp_hdr(skb)->check =
1152 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1153 &ipv6_hdr(skb)->daddr,
1157 tx_ctrl.tcp_seg_offload_flag = true;
1158 tx_ctrl.tcp_seg_len = tso_seg_len;
1161 tx_desc.dma_addr = dma_map_single(priv->dev, skb->data,
1162 skb_headlen(skb), DMA_TO_DEVICE);
1163 if (dma_mapping_error(priv->dev, tx_desc.dma_addr)) {
1165 spin_unlock_bh(&dring->lock);
1166 netif_err(priv, drv, priv->ndev,
1167 "%s: DMA mapping failed\n", __func__);
1168 ndev->stats.tx_dropped++;
1169 dev_kfree_skb_any(skb);
1170 return NETDEV_TX_OK;
1172 tx_desc.addr = skb->data;
1173 tx_desc.len = skb_headlen(skb);
1174 tx_desc.buf_type = TYPE_NETSEC_SKB;
1176 skb_tx_timestamp(skb);
1177 netdev_sent_queue(priv->ndev, skb->len);
1179 netsec_set_tx_de(priv, dring, &tx_ctrl, &tx_desc, skb);
1181 spin_unlock_bh(&dring->lock);
1182 netsec_write(priv, NETSEC_REG_NRM_TX_PKTCNT, 1); /* submit another tx */
1184 return NETDEV_TX_OK;
1187 static void netsec_uninit_pkt_dring(struct netsec_priv *priv, int id)
1189 struct netsec_desc_ring *dring = &priv->desc_ring[id];
1190 struct netsec_desc *desc;
1193 if (!dring->vaddr || !dring->desc)
1195 for (idx = 0; idx < DESC_NUM; idx++) {
1196 desc = &dring->desc[idx];
1200 if (id == NETSEC_RING_RX) {
1201 struct page *page = virt_to_page(desc->addr);
1203 page_pool_put_page(dring->page_pool, page, false);
1204 } else if (id == NETSEC_RING_TX) {
1205 dma_unmap_single(priv->dev, desc->dma_addr, desc->len,
1207 dev_kfree_skb(desc->skb);
1211 /* Rx is currently using page_pool */
1212 if (id == NETSEC_RING_RX) {
1213 if (xdp_rxq_info_is_reg(&dring->xdp_rxq))
1214 xdp_rxq_info_unreg(&dring->xdp_rxq);
1215 page_pool_destroy(dring->page_pool);
1218 memset(dring->desc, 0, sizeof(struct netsec_desc) * DESC_NUM);
1219 memset(dring->vaddr, 0, DESC_SZ * DESC_NUM);
1224 if (id == NETSEC_RING_TX)
1225 netdev_reset_queue(priv->ndev);
1228 static void netsec_free_dring(struct netsec_priv *priv, int id)
1230 struct netsec_desc_ring *dring = &priv->desc_ring[id];
1233 dma_free_coherent(priv->dev, DESC_SZ * DESC_NUM,
1234 dring->vaddr, dring->desc_dma);
1235 dring->vaddr = NULL;
1242 static int netsec_alloc_dring(struct netsec_priv *priv, enum ring_id id)
1244 struct netsec_desc_ring *dring = &priv->desc_ring[id];
1246 dring->vaddr = dma_alloc_coherent(priv->dev, DESC_SZ * DESC_NUM,
1247 &dring->desc_dma, GFP_KERNEL);
1251 dring->desc = kcalloc(DESC_NUM, sizeof(*dring->desc), GFP_KERNEL);
1257 netsec_free_dring(priv, id);
1262 static void netsec_setup_tx_dring(struct netsec_priv *priv)
1264 struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX];
1265 struct bpf_prog *xdp_prog = READ_ONCE(priv->xdp_prog);
1268 for (i = 0; i < DESC_NUM; i++) {
1269 struct netsec_de *de;
1271 de = dring->vaddr + (DESC_SZ * i);
1272 /* de->attr is not going to be accessed by the NIC
1273 * until netsec_set_tx_de() is called.
1274 * No need for a dma_wmb() here
1276 de->attr = 1U << NETSEC_TX_SHIFT_OWN_FIELD;
1280 dring->is_xdp = true;
1282 dring->is_xdp = false;
1286 static int netsec_setup_rx_dring(struct netsec_priv *priv)
1288 struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
1289 struct bpf_prog *xdp_prog = READ_ONCE(priv->xdp_prog);
1290 struct page_pool_params pp_params = { 0 };
1293 pp_params.order = 0;
1294 /* internal DMA mapping in page_pool */
1295 pp_params.flags = PP_FLAG_DMA_MAP;
1296 pp_params.pool_size = DESC_NUM;
1297 pp_params.nid = cpu_to_node(0);
1298 pp_params.dev = priv->dev;
1299 pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
1301 dring->page_pool = page_pool_create(&pp_params);
1302 if (IS_ERR(dring->page_pool)) {
1303 err = PTR_ERR(dring->page_pool);
1304 dring->page_pool = NULL;
1308 err = xdp_rxq_info_reg(&dring->xdp_rxq, priv->ndev, 0);
1312 err = xdp_rxq_info_reg_mem_model(&dring->xdp_rxq, MEM_TYPE_PAGE_POOL,
1317 for (i = 0; i < DESC_NUM; i++) {
1318 struct netsec_desc *desc = &dring->desc[i];
1319 dma_addr_t dma_handle;
1323 buf = netsec_alloc_rx_data(priv, &dma_handle, &len);
1329 desc->dma_addr = dma_handle;
1334 netsec_rx_fill(priv, 0, DESC_NUM);
1339 netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
1343 static int netsec_netdev_load_ucode_region(struct netsec_priv *priv, u32 reg,
1344 u32 addr_h, u32 addr_l, u32 size)
1346 u64 base = (u64)addr_h << 32 | addr_l;
1347 void __iomem *ucode;
1350 ucode = ioremap(base, size * sizeof(u32));
1354 for (i = 0; i < size; i++)
1355 netsec_write(priv, reg, readl(ucode + i * 4));
1361 static int netsec_netdev_load_microcode(struct netsec_priv *priv)
1363 u32 addr_h, addr_l, size;
1366 addr_h = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_ADDRESS_H);
1367 addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_ADDRESS_L);
1368 size = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_SIZE);
1369 err = netsec_netdev_load_ucode_region(priv, NETSEC_REG_DMAC_HM_CMD_BUF,
1370 addr_h, addr_l, size);
1374 addr_h = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_ADDRESS_H);
1375 addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_ADDRESS_L);
1376 size = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_SIZE);
1377 err = netsec_netdev_load_ucode_region(priv, NETSEC_REG_DMAC_MH_CMD_BUF,
1378 addr_h, addr_l, size);
1383 addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_PKT_ME_ADDRESS);
1384 size = readl(priv->eeprom_base + NETSEC_EEPROM_PKT_ME_SIZE);
1385 err = netsec_netdev_load_ucode_region(priv, NETSEC_REG_PKT_CMD_BUF,
1386 addr_h, addr_l, size);
1393 static int netsec_reset_hardware(struct netsec_priv *priv,
1399 /* stop DMA engines */
1400 if (!netsec_read(priv, NETSEC_REG_ADDR_DIS_CORE)) {
1401 netsec_write(priv, NETSEC_REG_DMA_HM_CTRL,
1402 NETSEC_DMA_CTRL_REG_STOP);
1403 netsec_write(priv, NETSEC_REG_DMA_MH_CTRL,
1404 NETSEC_DMA_CTRL_REG_STOP);
1406 while (netsec_read(priv, NETSEC_REG_DMA_HM_CTRL) &
1407 NETSEC_DMA_CTRL_REG_STOP)
1410 while (netsec_read(priv, NETSEC_REG_DMA_MH_CTRL) &
1411 NETSEC_DMA_CTRL_REG_STOP)
1415 netsec_write(priv, NETSEC_REG_SOFT_RST, NETSEC_SOFT_RST_REG_RESET);
1416 netsec_write(priv, NETSEC_REG_SOFT_RST, NETSEC_SOFT_RST_REG_RUN);
1417 netsec_write(priv, NETSEC_REG_COM_INIT, NETSEC_COM_INIT_REG_ALL);
1419 while (netsec_read(priv, NETSEC_REG_COM_INIT) != 0)
1422 /* set desc_start addr */
1423 netsec_write(priv, NETSEC_REG_NRM_RX_DESC_START_UP,
1424 upper_32_bits(priv->desc_ring[NETSEC_RING_RX].desc_dma));
1425 netsec_write(priv, NETSEC_REG_NRM_RX_DESC_START_LW,
1426 lower_32_bits(priv->desc_ring[NETSEC_RING_RX].desc_dma));
1428 netsec_write(priv, NETSEC_REG_NRM_TX_DESC_START_UP,
1429 upper_32_bits(priv->desc_ring[NETSEC_RING_TX].desc_dma));
1430 netsec_write(priv, NETSEC_REG_NRM_TX_DESC_START_LW,
1431 lower_32_bits(priv->desc_ring[NETSEC_RING_TX].desc_dma));
1433 /* set normal tx dring ring config */
1434 netsec_write(priv, NETSEC_REG_NRM_TX_CONFIG,
1435 1 << NETSEC_REG_DESC_ENDIAN);
1436 netsec_write(priv, NETSEC_REG_NRM_RX_CONFIG,
1437 1 << NETSEC_REG_DESC_ENDIAN);
1440 err = netsec_netdev_load_microcode(priv);
1442 netif_err(priv, probe, priv->ndev,
1443 "%s: failed to load microcode (%d)\n",
1449 /* start DMA engines */
1450 netsec_write(priv, NETSEC_REG_DMA_TMR_CTRL, priv->freq / 1000000 - 1);
1451 netsec_write(priv, NETSEC_REG_ADDR_DIS_CORE, 0);
1453 usleep_range(1000, 2000);
1455 if (!(netsec_read(priv, NETSEC_REG_TOP_STATUS) &
1456 NETSEC_TOP_IRQ_REG_CODE_LOAD_END)) {
1457 netif_err(priv, probe, priv->ndev,
1458 "microengine start failed\n");
1461 netsec_write(priv, NETSEC_REG_TOP_STATUS,
1462 NETSEC_TOP_IRQ_REG_CODE_LOAD_END);
1464 value = NETSEC_PKT_CTRL_REG_MODE_NRM;
1465 if (priv->ndev->mtu > ETH_DATA_LEN)
1466 value |= NETSEC_PKT_CTRL_REG_EN_JUMBO;
1468 /* change to normal mode */
1469 netsec_write(priv, NETSEC_REG_DMA_MH_CTRL, MH_CTRL__MODE_TRANS);
1470 netsec_write(priv, NETSEC_REG_PKT_CTRL, value);
1472 while ((netsec_read(priv, NETSEC_REG_MODE_TRANS_COMP_STATUS) &
1473 NETSEC_MODE_TRANS_COMP_IRQ_T2N) == 0)
1476 /* clear any pending EMPTY/ERR irq status */
1477 netsec_write(priv, NETSEC_REG_NRM_TX_STATUS, ~0);
1479 /* Disable TX & RX intr */
1480 netsec_write(priv, NETSEC_REG_INTEN_CLR, ~0);
1485 static int netsec_start_gmac(struct netsec_priv *priv)
1487 struct phy_device *phydev = priv->ndev->phydev;
1491 if (phydev->speed != SPEED_1000)
1492 value = (NETSEC_GMAC_MCR_REG_CST |
1493 NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON);
1495 if (netsec_mac_write(priv, GMAC_REG_MCR, value))
1497 if (netsec_mac_write(priv, GMAC_REG_BMR,
1498 NETSEC_GMAC_BMR_REG_RESET))
1501 /* Wait soft reset */
1502 usleep_range(1000, 5000);
1504 ret = netsec_mac_read(priv, GMAC_REG_BMR, &value);
1507 if (value & NETSEC_GMAC_BMR_REG_SWR)
1510 netsec_write(priv, MAC_REG_DESC_SOFT_RST, 1);
1511 if (netsec_wait_while_busy(priv, MAC_REG_DESC_SOFT_RST, 1))
1514 netsec_write(priv, MAC_REG_DESC_INIT, 1);
1515 if (netsec_wait_while_busy(priv, MAC_REG_DESC_INIT, 1))
1518 if (netsec_mac_write(priv, GMAC_REG_BMR,
1519 NETSEC_GMAC_BMR_REG_COMMON))
1521 if (netsec_mac_write(priv, GMAC_REG_RDLAR,
1522 NETSEC_GMAC_RDLAR_REG_COMMON))
1524 if (netsec_mac_write(priv, GMAC_REG_TDLAR,
1525 NETSEC_GMAC_TDLAR_REG_COMMON))
1527 if (netsec_mac_write(priv, GMAC_REG_MFFR, 0x80000001))
1530 ret = netsec_mac_update_to_phy_state(priv);
1534 ret = netsec_mac_read(priv, GMAC_REG_OMR, &value);
1538 value |= NETSEC_GMAC_OMR_REG_SR;
1539 value |= NETSEC_GMAC_OMR_REG_ST;
1541 netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_CLR, ~0);
1542 netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_CLR, ~0);
1544 netsec_et_set_coalesce(priv->ndev, &priv->et_coalesce);
1546 if (netsec_mac_write(priv, GMAC_REG_OMR, value))
1552 static int netsec_stop_gmac(struct netsec_priv *priv)
1557 ret = netsec_mac_read(priv, GMAC_REG_OMR, &value);
1560 value &= ~NETSEC_GMAC_OMR_REG_SR;
1561 value &= ~NETSEC_GMAC_OMR_REG_ST;
1563 /* disable all interrupts */
1564 netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_CLR, ~0);
1565 netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_CLR, ~0);
1567 return netsec_mac_write(priv, GMAC_REG_OMR, value);
1570 static void netsec_phy_adjust_link(struct net_device *ndev)
1572 struct netsec_priv *priv = netdev_priv(ndev);
1574 if (ndev->phydev->link)
1575 netsec_start_gmac(priv);
1577 netsec_stop_gmac(priv);
1579 phy_print_status(ndev->phydev);
1582 static irqreturn_t netsec_irq_handler(int irq, void *dev_id)
1584 struct netsec_priv *priv = dev_id;
1585 u32 val, status = netsec_read(priv, NETSEC_REG_TOP_STATUS);
1586 unsigned long flags;
1588 /* Disable interrupts */
1589 if (status & NETSEC_IRQ_TX) {
1590 val = netsec_read(priv, NETSEC_REG_NRM_TX_STATUS);
1591 netsec_write(priv, NETSEC_REG_NRM_TX_STATUS, val);
1593 if (status & NETSEC_IRQ_RX) {
1594 val = netsec_read(priv, NETSEC_REG_NRM_RX_STATUS);
1595 netsec_write(priv, NETSEC_REG_NRM_RX_STATUS, val);
1598 spin_lock_irqsave(&priv->reglock, flags);
1599 netsec_write(priv, NETSEC_REG_INTEN_CLR, NETSEC_IRQ_RX | NETSEC_IRQ_TX);
1600 spin_unlock_irqrestore(&priv->reglock, flags);
1602 napi_schedule(&priv->napi);
1607 static int netsec_netdev_open(struct net_device *ndev)
1609 struct netsec_priv *priv = netdev_priv(ndev);
1612 pm_runtime_get_sync(priv->dev);
1614 netsec_setup_tx_dring(priv);
1615 ret = netsec_setup_rx_dring(priv);
1617 netif_err(priv, probe, priv->ndev,
1618 "%s: fail setup ring\n", __func__);
1622 ret = request_irq(priv->ndev->irq, netsec_irq_handler,
1623 IRQF_SHARED, "netsec", priv);
1625 netif_err(priv, drv, priv->ndev, "request_irq failed\n");
1629 if (dev_of_node(priv->dev)) {
1630 if (!of_phy_connect(priv->ndev, priv->phy_np,
1631 netsec_phy_adjust_link, 0,
1632 priv->phy_interface)) {
1633 netif_err(priv, link, priv->ndev, "missing PHY\n");
1638 ret = phy_connect_direct(priv->ndev, priv->phydev,
1639 netsec_phy_adjust_link,
1640 priv->phy_interface);
1642 netif_err(priv, link, priv->ndev,
1643 "phy_connect_direct() failed (%d)\n", ret);
1648 phy_start(ndev->phydev);
1650 netsec_start_gmac(priv);
1651 napi_enable(&priv->napi);
1652 netif_start_queue(ndev);
1654 /* Enable TX+RX intr. */
1655 netsec_write(priv, NETSEC_REG_INTEN_SET, NETSEC_IRQ_RX | NETSEC_IRQ_TX);
1659 free_irq(priv->ndev->irq, priv);
1661 netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
1663 pm_runtime_put_sync(priv->dev);
1667 static int netsec_netdev_stop(struct net_device *ndev)
1670 struct netsec_priv *priv = netdev_priv(ndev);
1672 netif_stop_queue(priv->ndev);
1675 napi_disable(&priv->napi);
1677 netsec_write(priv, NETSEC_REG_INTEN_CLR, ~0);
1678 netsec_stop_gmac(priv);
1680 free_irq(priv->ndev->irq, priv);
1682 netsec_uninit_pkt_dring(priv, NETSEC_RING_TX);
1683 netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
1685 phy_stop(ndev->phydev);
1686 phy_disconnect(ndev->phydev);
1688 ret = netsec_reset_hardware(priv, false);
1690 pm_runtime_put_sync(priv->dev);
1695 static int netsec_netdev_init(struct net_device *ndev)
1697 struct netsec_priv *priv = netdev_priv(ndev);
1701 BUILD_BUG_ON_NOT_POWER_OF_2(DESC_NUM);
1703 ret = netsec_alloc_dring(priv, NETSEC_RING_TX);
1707 ret = netsec_alloc_dring(priv, NETSEC_RING_RX);
1711 /* set phy power down */
1712 data = netsec_phy_read(priv->mii_bus, priv->phy_addr, MII_BMCR) |
1714 netsec_phy_write(priv->mii_bus, priv->phy_addr, MII_BMCR, data);
1716 ret = netsec_reset_hardware(priv, true);
1720 spin_lock_init(&priv->desc_ring[NETSEC_RING_TX].lock);
1721 spin_lock_init(&priv->desc_ring[NETSEC_RING_RX].lock);
1725 netsec_free_dring(priv, NETSEC_RING_RX);
1727 netsec_free_dring(priv, NETSEC_RING_TX);
1731 static void netsec_netdev_uninit(struct net_device *ndev)
1733 struct netsec_priv *priv = netdev_priv(ndev);
1735 netsec_free_dring(priv, NETSEC_RING_RX);
1736 netsec_free_dring(priv, NETSEC_RING_TX);
1739 static int netsec_netdev_set_features(struct net_device *ndev,
1740 netdev_features_t features)
1742 struct netsec_priv *priv = netdev_priv(ndev);
1744 priv->rx_cksum_offload_flag = !!(features & NETIF_F_RXCSUM);
1749 static int netsec_netdev_ioctl(struct net_device *ndev, struct ifreq *ifr,
1752 return phy_mii_ioctl(ndev->phydev, ifr, cmd);
1755 static int netsec_xdp_xmit(struct net_device *ndev, int n,
1756 struct xdp_frame **frames, u32 flags)
1758 struct netsec_priv *priv = netdev_priv(ndev);
1759 struct netsec_desc_ring *tx_ring = &priv->desc_ring[NETSEC_RING_TX];
1763 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
1766 spin_lock(&tx_ring->lock);
1767 for (i = 0; i < n; i++) {
1768 struct xdp_frame *xdpf = frames[i];
1771 err = netsec_xdp_queue_one(priv, xdpf, true);
1772 if (err != NETSEC_XDP_TX) {
1773 xdp_return_frame_rx_napi(xdpf);
1776 tx_ring->xdp_xmit++;
1779 spin_unlock(&tx_ring->lock);
1781 if (unlikely(flags & XDP_XMIT_FLUSH)) {
1782 netsec_xdp_ring_tx_db(priv, tx_ring->xdp_xmit);
1783 tx_ring->xdp_xmit = 0;
1789 static int netsec_xdp_setup(struct netsec_priv *priv, struct bpf_prog *prog,
1790 struct netlink_ext_ack *extack)
1792 struct net_device *dev = priv->ndev;
1793 struct bpf_prog *old_prog;
1795 /* For now just support only the usual MTU sized frames */
1796 if (prog && dev->mtu > 1500) {
1797 NL_SET_ERR_MSG_MOD(extack, "Jumbo frames not supported on XDP");
1801 if (netif_running(dev))
1802 netsec_netdev_stop(dev);
1804 /* Detach old prog, if any */
1805 old_prog = xchg(&priv->xdp_prog, prog);
1807 bpf_prog_put(old_prog);
1809 if (netif_running(dev))
1810 netsec_netdev_open(dev);
1815 static int netsec_xdp(struct net_device *ndev, struct netdev_bpf *xdp)
1817 struct netsec_priv *priv = netdev_priv(ndev);
1819 switch (xdp->command) {
1820 case XDP_SETUP_PROG:
1821 return netsec_xdp_setup(priv, xdp->prog, xdp->extack);
1822 case XDP_QUERY_PROG:
1823 xdp->prog_id = priv->xdp_prog ? priv->xdp_prog->aux->id : 0;
1830 static const struct net_device_ops netsec_netdev_ops = {
1831 .ndo_init = netsec_netdev_init,
1832 .ndo_uninit = netsec_netdev_uninit,
1833 .ndo_open = netsec_netdev_open,
1834 .ndo_stop = netsec_netdev_stop,
1835 .ndo_start_xmit = netsec_netdev_start_xmit,
1836 .ndo_set_features = netsec_netdev_set_features,
1837 .ndo_set_mac_address = eth_mac_addr,
1838 .ndo_validate_addr = eth_validate_addr,
1839 .ndo_do_ioctl = netsec_netdev_ioctl,
1840 .ndo_xdp_xmit = netsec_xdp_xmit,
1841 .ndo_bpf = netsec_xdp,
1844 static int netsec_of_probe(struct platform_device *pdev,
1845 struct netsec_priv *priv, u32 *phy_addr)
1847 priv->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
1848 if (!priv->phy_np) {
1849 dev_err(&pdev->dev, "missing required property 'phy-handle'\n");
1853 *phy_addr = of_mdio_parse_addr(&pdev->dev, priv->phy_np);
1855 priv->clk = devm_clk_get(&pdev->dev, NULL); /* get by 'phy_ref_clk' */
1856 if (IS_ERR(priv->clk)) {
1857 dev_err(&pdev->dev, "phy_ref_clk not found\n");
1858 return PTR_ERR(priv->clk);
1860 priv->freq = clk_get_rate(priv->clk);
1865 static int netsec_acpi_probe(struct platform_device *pdev,
1866 struct netsec_priv *priv, u32 *phy_addr)
1870 if (!IS_ENABLED(CONFIG_ACPI))
1873 ret = device_property_read_u32(&pdev->dev, "phy-channel", phy_addr);
1876 "missing required property 'phy-channel'\n");
1880 ret = device_property_read_u32(&pdev->dev,
1881 "socionext,phy-clock-frequency",
1885 "missing required property 'socionext,phy-clock-frequency'\n");
1889 static void netsec_unregister_mdio(struct netsec_priv *priv)
1891 struct phy_device *phydev = priv->phydev;
1893 if (!dev_of_node(priv->dev) && phydev) {
1894 phy_device_remove(phydev);
1895 phy_device_free(phydev);
1898 mdiobus_unregister(priv->mii_bus);
1901 static int netsec_register_mdio(struct netsec_priv *priv, u32 phy_addr)
1903 struct mii_bus *bus;
1906 bus = devm_mdiobus_alloc(priv->dev);
1910 snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(priv->dev));
1912 bus->name = "SNI NETSEC MDIO";
1913 bus->read = netsec_phy_read;
1914 bus->write = netsec_phy_write;
1915 bus->parent = priv->dev;
1916 priv->mii_bus = bus;
1918 if (dev_of_node(priv->dev)) {
1919 struct device_node *mdio_node, *parent = dev_of_node(priv->dev);
1921 mdio_node = of_get_child_by_name(parent, "mdio");
1925 /* older f/w doesn't populate the mdio subnode,
1926 * allow relaxed upgrade of f/w in due time.
1928 dev_info(priv->dev, "Upgrade f/w for mdio subnode!\n");
1931 ret = of_mdiobus_register(bus, parent);
1932 of_node_put(mdio_node);
1935 dev_err(priv->dev, "mdiobus register err(%d)\n", ret);
1939 /* Mask out all PHYs from auto probing. */
1941 ret = mdiobus_register(bus);
1943 dev_err(priv->dev, "mdiobus register err(%d)\n", ret);
1947 priv->phydev = get_phy_device(bus, phy_addr, false);
1948 if (IS_ERR(priv->phydev)) {
1949 ret = PTR_ERR(priv->phydev);
1950 dev_err(priv->dev, "get_phy_device err(%d)\n", ret);
1951 priv->phydev = NULL;
1955 ret = phy_device_register(priv->phydev);
1957 mdiobus_unregister(bus);
1959 "phy_device_register err(%d)\n", ret);
1966 static int netsec_probe(struct platform_device *pdev)
1968 struct resource *mmio_res, *eeprom_res, *irq_res;
1969 u8 *mac, macbuf[ETH_ALEN];
1970 struct netsec_priv *priv;
1971 u32 hw_ver, phy_addr = 0;
1972 struct net_device *ndev;
1975 mmio_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1977 dev_err(&pdev->dev, "No MMIO resource found.\n");
1981 eeprom_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1983 dev_info(&pdev->dev, "No EEPROM resource found.\n");
1987 irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1989 dev_err(&pdev->dev, "No IRQ resource found.\n");
1993 ndev = alloc_etherdev(sizeof(*priv));
1997 priv = netdev_priv(ndev);
1999 spin_lock_init(&priv->reglock);
2000 SET_NETDEV_DEV(ndev, &pdev->dev);
2001 platform_set_drvdata(pdev, priv);
2002 ndev->irq = irq_res->start;
2003 priv->dev = &pdev->dev;
2006 priv->msg_enable = NETIF_MSG_TX_ERR | NETIF_MSG_HW | NETIF_MSG_DRV |
2007 NETIF_MSG_LINK | NETIF_MSG_PROBE;
2009 priv->phy_interface = device_get_phy_mode(&pdev->dev);
2010 if (priv->phy_interface < 0) {
2011 dev_err(&pdev->dev, "missing required property 'phy-mode'\n");
2016 priv->ioaddr = devm_ioremap(&pdev->dev, mmio_res->start,
2017 resource_size(mmio_res));
2018 if (!priv->ioaddr) {
2019 dev_err(&pdev->dev, "devm_ioremap() failed\n");
2024 priv->eeprom_base = devm_ioremap(&pdev->dev, eeprom_res->start,
2025 resource_size(eeprom_res));
2026 if (!priv->eeprom_base) {
2027 dev_err(&pdev->dev, "devm_ioremap() failed for EEPROM\n");
2032 mac = device_get_mac_address(&pdev->dev, macbuf, sizeof(macbuf));
2034 ether_addr_copy(ndev->dev_addr, mac);
2036 if (priv->eeprom_base &&
2037 (!mac || !is_valid_ether_addr(ndev->dev_addr))) {
2038 void __iomem *macp = priv->eeprom_base +
2039 NETSEC_EEPROM_MAC_ADDRESS;
2041 ndev->dev_addr[0] = readb(macp + 3);
2042 ndev->dev_addr[1] = readb(macp + 2);
2043 ndev->dev_addr[2] = readb(macp + 1);
2044 ndev->dev_addr[3] = readb(macp + 0);
2045 ndev->dev_addr[4] = readb(macp + 7);
2046 ndev->dev_addr[5] = readb(macp + 6);
2049 if (!is_valid_ether_addr(ndev->dev_addr)) {
2050 dev_warn(&pdev->dev, "No MAC address found, using random\n");
2051 eth_hw_addr_random(ndev);
2054 if (dev_of_node(&pdev->dev))
2055 ret = netsec_of_probe(pdev, priv, &phy_addr);
2057 ret = netsec_acpi_probe(pdev, priv, &phy_addr);
2061 priv->phy_addr = phy_addr;
2064 dev_err(&pdev->dev, "missing PHY reference clock frequency\n");
2069 /* default for throughput */
2070 priv->et_coalesce.rx_coalesce_usecs = 500;
2071 priv->et_coalesce.rx_max_coalesced_frames = 8;
2072 priv->et_coalesce.tx_coalesce_usecs = 500;
2073 priv->et_coalesce.tx_max_coalesced_frames = 8;
2075 ret = device_property_read_u32(&pdev->dev, "max-frame-size",
2078 ndev->max_mtu = ETH_DATA_LEN;
2080 /* runtime_pm coverage just for probe, open/close also cover it */
2081 pm_runtime_enable(&pdev->dev);
2082 pm_runtime_get_sync(&pdev->dev);
2084 hw_ver = netsec_read(priv, NETSEC_REG_F_TAIKI_VER);
2085 /* this driver only supports F_TAIKI style NETSEC */
2086 if (NETSEC_F_NETSEC_VER_MAJOR_NUM(hw_ver) !=
2087 NETSEC_F_NETSEC_VER_MAJOR_NUM(NETSEC_REG_NETSEC_VER_F_TAIKI)) {
2092 dev_info(&pdev->dev, "hardware revision %d.%d\n",
2093 hw_ver >> 16, hw_ver & 0xffff);
2095 netif_napi_add(ndev, &priv->napi, netsec_napi_poll, NAPI_POLL_WEIGHT);
2097 ndev->netdev_ops = &netsec_netdev_ops;
2098 ndev->ethtool_ops = &netsec_ethtool_ops;
2100 ndev->features |= NETIF_F_HIGHDMA | NETIF_F_RXCSUM | NETIF_F_GSO |
2101 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2102 ndev->hw_features = ndev->features;
2104 priv->rx_cksum_offload_flag = true;
2106 ret = netsec_register_mdio(priv, phy_addr);
2110 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)))
2111 dev_warn(&pdev->dev, "Failed to set DMA mask\n");
2113 ret = register_netdev(ndev);
2115 netif_err(priv, probe, ndev, "register_netdev() failed\n");
2119 pm_runtime_put_sync(&pdev->dev);
2123 netsec_unregister_mdio(priv);
2125 netif_napi_del(&priv->napi);
2127 pm_runtime_put_sync(&pdev->dev);
2128 pm_runtime_disable(&pdev->dev);
2131 dev_err(&pdev->dev, "init failed\n");
2136 static int netsec_remove(struct platform_device *pdev)
2138 struct netsec_priv *priv = platform_get_drvdata(pdev);
2140 unregister_netdev(priv->ndev);
2142 netsec_unregister_mdio(priv);
2144 netif_napi_del(&priv->napi);
2146 pm_runtime_disable(&pdev->dev);
2147 free_netdev(priv->ndev);
2153 static int netsec_runtime_suspend(struct device *dev)
2155 struct netsec_priv *priv = dev_get_drvdata(dev);
2157 netsec_write(priv, NETSEC_REG_CLK_EN, 0);
2159 clk_disable_unprepare(priv->clk);
2164 static int netsec_runtime_resume(struct device *dev)
2166 struct netsec_priv *priv = dev_get_drvdata(dev);
2168 clk_prepare_enable(priv->clk);
2170 netsec_write(priv, NETSEC_REG_CLK_EN, NETSEC_CLK_EN_REG_DOM_D |
2171 NETSEC_CLK_EN_REG_DOM_C |
2172 NETSEC_CLK_EN_REG_DOM_G);
2177 static const struct dev_pm_ops netsec_pm_ops = {
2178 SET_RUNTIME_PM_OPS(netsec_runtime_suspend, netsec_runtime_resume, NULL)
2181 static const struct of_device_id netsec_dt_ids[] = {
2182 { .compatible = "socionext,synquacer-netsec" },
2185 MODULE_DEVICE_TABLE(of, netsec_dt_ids);
2188 static const struct acpi_device_id netsec_acpi_ids[] = {
2192 MODULE_DEVICE_TABLE(acpi, netsec_acpi_ids);
2195 static struct platform_driver netsec_driver = {
2196 .probe = netsec_probe,
2197 .remove = netsec_remove,
2200 .pm = &netsec_pm_ops,
2201 .of_match_table = netsec_dt_ids,
2202 .acpi_match_table = ACPI_PTR(netsec_acpi_ids),
2205 module_platform_driver(netsec_driver);
2207 MODULE_AUTHOR("Jassi Brar <jaswinder.singh@linaro.org>");
2208 MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
2209 MODULE_DESCRIPTION("NETSEC Ethernet driver");
2210 MODULE_LICENSE("GPL");