1 // SPDX-License-Identifier: GPL-2.0+
3 #include <linux/types.h>
5 #include <linux/platform_device.h>
6 #include <linux/pm_runtime.h>
7 #include <linux/acpi.h>
8 #include <linux/of_mdio.h>
9 #include <linux/etherdevice.h>
10 #include <linux/interrupt.h>
12 #include <linux/netlink.h>
13 #include <linux/bpf.h>
14 #include <linux/bpf_trace.h>
17 #include <net/page_pool.h>
18 #include <net/ip6_checksum.h>
20 #define NETSEC_REG_SOFT_RST 0x104
21 #define NETSEC_REG_COM_INIT 0x120
23 #define NETSEC_REG_TOP_STATUS 0x200
24 #define NETSEC_IRQ_RX BIT(1)
25 #define NETSEC_IRQ_TX BIT(0)
27 #define NETSEC_REG_TOP_INTEN 0x204
28 #define NETSEC_REG_INTEN_SET 0x234
29 #define NETSEC_REG_INTEN_CLR 0x238
31 #define NETSEC_REG_NRM_TX_STATUS 0x400
32 #define NETSEC_REG_NRM_TX_INTEN 0x404
33 #define NETSEC_REG_NRM_TX_INTEN_SET 0x428
34 #define NETSEC_REG_NRM_TX_INTEN_CLR 0x42c
35 #define NRM_TX_ST_NTOWNR BIT(17)
36 #define NRM_TX_ST_TR_ERR BIT(16)
37 #define NRM_TX_ST_TXDONE BIT(15)
38 #define NRM_TX_ST_TMREXP BIT(14)
40 #define NETSEC_REG_NRM_RX_STATUS 0x440
41 #define NETSEC_REG_NRM_RX_INTEN 0x444
42 #define NETSEC_REG_NRM_RX_INTEN_SET 0x468
43 #define NETSEC_REG_NRM_RX_INTEN_CLR 0x46c
44 #define NRM_RX_ST_RC_ERR BIT(16)
45 #define NRM_RX_ST_PKTCNT BIT(15)
46 #define NRM_RX_ST_TMREXP BIT(14)
48 #define NETSEC_REG_PKT_CMD_BUF 0xd0
50 #define NETSEC_REG_CLK_EN 0x100
52 #define NETSEC_REG_PKT_CTRL 0x140
54 #define NETSEC_REG_DMA_TMR_CTRL 0x20c
55 #define NETSEC_REG_F_TAIKI_MC_VER 0x22c
56 #define NETSEC_REG_F_TAIKI_VER 0x230
57 #define NETSEC_REG_DMA_HM_CTRL 0x214
58 #define NETSEC_REG_DMA_MH_CTRL 0x220
59 #define NETSEC_REG_ADDR_DIS_CORE 0x218
60 #define NETSEC_REG_DMAC_HM_CMD_BUF 0x210
61 #define NETSEC_REG_DMAC_MH_CMD_BUF 0x21c
63 #define NETSEC_REG_NRM_TX_PKTCNT 0x410
65 #define NETSEC_REG_NRM_TX_DONE_PKTCNT 0x414
66 #define NETSEC_REG_NRM_TX_DONE_TXINT_PKTCNT 0x418
68 #define NETSEC_REG_NRM_TX_TMR 0x41c
70 #define NETSEC_REG_NRM_RX_PKTCNT 0x454
71 #define NETSEC_REG_NRM_RX_RXINT_PKTCNT 0x458
72 #define NETSEC_REG_NRM_TX_TXINT_TMR 0x420
73 #define NETSEC_REG_NRM_RX_RXINT_TMR 0x460
75 #define NETSEC_REG_NRM_RX_TMR 0x45c
77 #define NETSEC_REG_NRM_TX_DESC_START_UP 0x434
78 #define NETSEC_REG_NRM_TX_DESC_START_LW 0x408
79 #define NETSEC_REG_NRM_RX_DESC_START_UP 0x474
80 #define NETSEC_REG_NRM_RX_DESC_START_LW 0x448
82 #define NETSEC_REG_NRM_TX_CONFIG 0x430
83 #define NETSEC_REG_NRM_RX_CONFIG 0x470
85 #define MAC_REG_STATUS 0x1024
86 #define MAC_REG_DATA 0x11c0
87 #define MAC_REG_CMD 0x11c4
88 #define MAC_REG_FLOW_TH 0x11cc
89 #define MAC_REG_INTF_SEL 0x11d4
90 #define MAC_REG_DESC_INIT 0x11fc
91 #define MAC_REG_DESC_SOFT_RST 0x1204
92 #define NETSEC_REG_MODE_TRANS_COMP_STATUS 0x500
94 #define GMAC_REG_MCR 0x0000
95 #define GMAC_REG_MFFR 0x0004
96 #define GMAC_REG_GAR 0x0010
97 #define GMAC_REG_GDR 0x0014
98 #define GMAC_REG_FCR 0x0018
99 #define GMAC_REG_BMR 0x1000
100 #define GMAC_REG_RDLAR 0x100c
101 #define GMAC_REG_TDLAR 0x1010
102 #define GMAC_REG_OMR 0x1018
104 #define MHZ(n) ((n) * 1000 * 1000)
106 #define NETSEC_TX_SHIFT_OWN_FIELD 31
107 #define NETSEC_TX_SHIFT_LD_FIELD 30
108 #define NETSEC_TX_SHIFT_DRID_FIELD 24
109 #define NETSEC_TX_SHIFT_PT_FIELD 21
110 #define NETSEC_TX_SHIFT_TDRID_FIELD 16
111 #define NETSEC_TX_SHIFT_CC_FIELD 15
112 #define NETSEC_TX_SHIFT_FS_FIELD 9
113 #define NETSEC_TX_LAST 8
114 #define NETSEC_TX_SHIFT_CO 7
115 #define NETSEC_TX_SHIFT_SO 6
116 #define NETSEC_TX_SHIFT_TRS_FIELD 4
118 #define NETSEC_RX_PKT_OWN_FIELD 31
119 #define NETSEC_RX_PKT_LD_FIELD 30
120 #define NETSEC_RX_PKT_SDRID_FIELD 24
121 #define NETSEC_RX_PKT_FR_FIELD 23
122 #define NETSEC_RX_PKT_ER_FIELD 21
123 #define NETSEC_RX_PKT_ERR_FIELD 16
124 #define NETSEC_RX_PKT_TDRID_FIELD 12
125 #define NETSEC_RX_PKT_FS_FIELD 9
126 #define NETSEC_RX_PKT_LS_FIELD 8
127 #define NETSEC_RX_PKT_CO_FIELD 6
129 #define NETSEC_RX_PKT_ERR_MASK 3
131 #define NETSEC_MAX_TX_PKT_LEN 1518
132 #define NETSEC_MAX_TX_JUMBO_PKT_LEN 9018
134 #define NETSEC_RING_GMAC 15
135 #define NETSEC_RING_MAX 2
137 #define NETSEC_TCP_SEG_LEN_MAX 1460
138 #define NETSEC_TCP_JUMBO_SEG_LEN_MAX 8960
140 #define NETSEC_RX_CKSUM_NOTAVAIL 0
141 #define NETSEC_RX_CKSUM_OK 1
142 #define NETSEC_RX_CKSUM_NG 2
144 #define NETSEC_TOP_IRQ_REG_CODE_LOAD_END BIT(20)
145 #define NETSEC_IRQ_TRANSITION_COMPLETE BIT(4)
147 #define NETSEC_MODE_TRANS_COMP_IRQ_N2T BIT(20)
148 #define NETSEC_MODE_TRANS_COMP_IRQ_T2N BIT(19)
150 #define NETSEC_INT_PKTCNT_MAX 2047
152 #define NETSEC_FLOW_START_TH_MAX 95
153 #define NETSEC_FLOW_STOP_TH_MAX 95
154 #define NETSEC_FLOW_PAUSE_TIME_MIN 5
156 #define NETSEC_CLK_EN_REG_DOM_ALL 0x3f
158 #define NETSEC_PKT_CTRL_REG_MODE_NRM BIT(28)
159 #define NETSEC_PKT_CTRL_REG_EN_JUMBO BIT(27)
160 #define NETSEC_PKT_CTRL_REG_LOG_CHKSUM_ER BIT(3)
161 #define NETSEC_PKT_CTRL_REG_LOG_HD_INCOMPLETE BIT(2)
162 #define NETSEC_PKT_CTRL_REG_LOG_HD_ER BIT(1)
163 #define NETSEC_PKT_CTRL_REG_DRP_NO_MATCH BIT(0)
165 #define NETSEC_CLK_EN_REG_DOM_G BIT(5)
166 #define NETSEC_CLK_EN_REG_DOM_C BIT(1)
167 #define NETSEC_CLK_EN_REG_DOM_D BIT(0)
169 #define NETSEC_COM_INIT_REG_DB BIT(2)
170 #define NETSEC_COM_INIT_REG_CLS BIT(1)
171 #define NETSEC_COM_INIT_REG_ALL (NETSEC_COM_INIT_REG_CLS | \
172 NETSEC_COM_INIT_REG_DB)
174 #define NETSEC_SOFT_RST_REG_RESET 0
175 #define NETSEC_SOFT_RST_REG_RUN BIT(31)
177 #define NETSEC_DMA_CTRL_REG_STOP 1
178 #define MH_CTRL__MODE_TRANS BIT(20)
180 #define NETSEC_GMAC_CMD_ST_READ 0
181 #define NETSEC_GMAC_CMD_ST_WRITE BIT(28)
182 #define NETSEC_GMAC_CMD_ST_BUSY BIT(31)
184 #define NETSEC_GMAC_BMR_REG_COMMON 0x00412080
185 #define NETSEC_GMAC_BMR_REG_RESET 0x00020181
186 #define NETSEC_GMAC_BMR_REG_SWR 0x00000001
188 #define NETSEC_GMAC_OMR_REG_ST BIT(13)
189 #define NETSEC_GMAC_OMR_REG_SR BIT(1)
191 #define NETSEC_GMAC_MCR_REG_IBN BIT(30)
192 #define NETSEC_GMAC_MCR_REG_CST BIT(25)
193 #define NETSEC_GMAC_MCR_REG_JE BIT(20)
194 #define NETSEC_MCR_PS BIT(15)
195 #define NETSEC_GMAC_MCR_REG_FES BIT(14)
196 #define NETSEC_GMAC_MCR_REG_FULL_DUPLEX_COMMON 0x0000280c
197 #define NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON 0x0001a00c
199 #define NETSEC_FCR_RFE BIT(2)
200 #define NETSEC_FCR_TFE BIT(1)
202 #define NETSEC_GMAC_GAR_REG_GW BIT(1)
203 #define NETSEC_GMAC_GAR_REG_GB BIT(0)
205 #define NETSEC_GMAC_GAR_REG_SHIFT_PA 11
206 #define NETSEC_GMAC_GAR_REG_SHIFT_GR 6
207 #define GMAC_REG_SHIFT_CR_GAR 2
209 #define NETSEC_GMAC_GAR_REG_CR_25_35_MHZ 2
210 #define NETSEC_GMAC_GAR_REG_CR_35_60_MHZ 3
211 #define NETSEC_GMAC_GAR_REG_CR_60_100_MHZ 0
212 #define NETSEC_GMAC_GAR_REG_CR_100_150_MHZ 1
213 #define NETSEC_GMAC_GAR_REG_CR_150_250_MHZ 4
214 #define NETSEC_GMAC_GAR_REG_CR_250_300_MHZ 5
216 #define NETSEC_GMAC_RDLAR_REG_COMMON 0x18000
217 #define NETSEC_GMAC_TDLAR_REG_COMMON 0x1c000
219 #define NETSEC_REG_NETSEC_VER_F_TAIKI 0x50000
221 #define NETSEC_REG_DESC_RING_CONFIG_CFG_UP BIT(31)
222 #define NETSEC_REG_DESC_RING_CONFIG_CH_RST BIT(30)
223 #define NETSEC_REG_DESC_TMR_MODE 4
224 #define NETSEC_REG_DESC_ENDIAN 0
226 #define NETSEC_MAC_DESC_SOFT_RST_SOFT_RST 1
227 #define NETSEC_MAC_DESC_INIT_REG_INIT 1
229 #define NETSEC_EEPROM_MAC_ADDRESS 0x00
230 #define NETSEC_EEPROM_HM_ME_ADDRESS_H 0x08
231 #define NETSEC_EEPROM_HM_ME_ADDRESS_L 0x0C
232 #define NETSEC_EEPROM_HM_ME_SIZE 0x10
233 #define NETSEC_EEPROM_MH_ME_ADDRESS_H 0x14
234 #define NETSEC_EEPROM_MH_ME_ADDRESS_L 0x18
235 #define NETSEC_EEPROM_MH_ME_SIZE 0x1C
236 #define NETSEC_EEPROM_PKT_ME_ADDRESS 0x20
237 #define NETSEC_EEPROM_PKT_ME_SIZE 0x24
241 #define NETSEC_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
242 #define NETSEC_RXBUF_HEADROOM (max(XDP_PACKET_HEADROOM, NET_SKB_PAD) + \
244 #define NETSEC_RX_BUF_NON_DATA (NETSEC_RXBUF_HEADROOM + \
245 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
247 #define DESC_SZ sizeof(struct netsec_de)
249 #define NETSEC_F_NETSEC_VER_MAJOR_NUM(x) ((x) & 0xffff0000)
251 #define NETSEC_XDP_PASS 0
252 #define NETSEC_XDP_CONSUMED BIT(0)
253 #define NETSEC_XDP_TX BIT(1)
254 #define NETSEC_XDP_REDIR BIT(2)
255 #define NETSEC_XDP_RX_OK (NETSEC_XDP_PASS | NETSEC_XDP_TX | NETSEC_XDP_REDIR)
271 struct xdp_frame *xdpf;
279 struct netsec_desc_ring {
281 struct netsec_desc *desc;
284 u16 xdp_xmit; /* netsec_xdp_xmit packets */
285 struct page_pool *page_pool;
286 struct xdp_rxq_info xdp_rxq;
287 spinlock_t lock; /* XDP tx queue locking */
291 struct netsec_desc_ring desc_ring[NETSEC_RING_MAX];
292 struct ethtool_coalesce et_coalesce;
293 struct bpf_prog *xdp_prog;
294 spinlock_t reglock; /* protect reg access */
295 struct napi_struct napi;
296 phy_interface_t phy_interface;
297 struct net_device *ndev;
298 struct device_node *phy_np;
299 struct phy_device *phydev;
300 struct mii_bus *mii_bus;
301 void __iomem *ioaddr;
302 void __iomem *eeprom_base;
308 bool rx_cksum_offload_flag;
311 struct netsec_de { /* Netsec Descriptor layout */
313 u32 data_buf_addr_up;
314 u32 data_buf_addr_lw;
318 struct netsec_tx_pkt_ctrl {
320 bool tcp_seg_offload_flag;
321 bool cksum_offload_flag;
324 struct netsec_rx_pkt_info {
330 static void netsec_write(struct netsec_priv *priv, u32 reg_addr, u32 val)
332 writel(val, priv->ioaddr + reg_addr);
335 static u32 netsec_read(struct netsec_priv *priv, u32 reg_addr)
337 return readl(priv->ioaddr + reg_addr);
340 /************* MDIO BUS OPS FOLLOW *************/
342 #define TIMEOUT_SPINS_MAC 1000
343 #define TIMEOUT_SECONDARY_MS_MAC 100
345 static u32 netsec_clk_type(u32 freq)
348 return NETSEC_GMAC_GAR_REG_CR_25_35_MHZ;
350 return NETSEC_GMAC_GAR_REG_CR_35_60_MHZ;
352 return NETSEC_GMAC_GAR_REG_CR_60_100_MHZ;
354 return NETSEC_GMAC_GAR_REG_CR_100_150_MHZ;
356 return NETSEC_GMAC_GAR_REG_CR_150_250_MHZ;
358 return NETSEC_GMAC_GAR_REG_CR_250_300_MHZ;
361 static int netsec_wait_while_busy(struct netsec_priv *priv, u32 addr, u32 mask)
363 u32 timeout = TIMEOUT_SPINS_MAC;
365 while (--timeout && netsec_read(priv, addr) & mask)
370 timeout = TIMEOUT_SECONDARY_MS_MAC;
371 while (--timeout && netsec_read(priv, addr) & mask)
372 usleep_range(1000, 2000);
377 netdev_WARN(priv->ndev, "%s: timeout\n", __func__);
382 static int netsec_mac_write(struct netsec_priv *priv, u32 addr, u32 value)
384 netsec_write(priv, MAC_REG_DATA, value);
385 netsec_write(priv, MAC_REG_CMD, addr | NETSEC_GMAC_CMD_ST_WRITE);
386 return netsec_wait_while_busy(priv,
387 MAC_REG_CMD, NETSEC_GMAC_CMD_ST_BUSY);
390 static int netsec_mac_read(struct netsec_priv *priv, u32 addr, u32 *read)
394 netsec_write(priv, MAC_REG_CMD, addr | NETSEC_GMAC_CMD_ST_READ);
395 ret = netsec_wait_while_busy(priv,
396 MAC_REG_CMD, NETSEC_GMAC_CMD_ST_BUSY);
400 *read = netsec_read(priv, MAC_REG_DATA);
405 static int netsec_mac_wait_while_busy(struct netsec_priv *priv,
408 u32 timeout = TIMEOUT_SPINS_MAC;
412 ret = netsec_mac_read(priv, addr, &data);
416 } while (--timeout && (data & mask));
421 timeout = TIMEOUT_SECONDARY_MS_MAC;
423 usleep_range(1000, 2000);
425 ret = netsec_mac_read(priv, addr, &data);
429 } while (--timeout && (data & mask));
434 netdev_WARN(priv->ndev, "%s: timeout\n", __func__);
439 static int netsec_mac_update_to_phy_state(struct netsec_priv *priv)
441 struct phy_device *phydev = priv->ndev->phydev;
444 value = phydev->duplex ? NETSEC_GMAC_MCR_REG_FULL_DUPLEX_COMMON :
445 NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON;
447 if (phydev->speed != SPEED_1000)
448 value |= NETSEC_MCR_PS;
450 if (priv->phy_interface != PHY_INTERFACE_MODE_GMII &&
451 phydev->speed == SPEED_100)
452 value |= NETSEC_GMAC_MCR_REG_FES;
454 value |= NETSEC_GMAC_MCR_REG_CST | NETSEC_GMAC_MCR_REG_JE;
456 if (phy_interface_mode_is_rgmii(priv->phy_interface))
457 value |= NETSEC_GMAC_MCR_REG_IBN;
459 if (netsec_mac_write(priv, GMAC_REG_MCR, value))
465 static int netsec_phy_read(struct mii_bus *bus, int phy_addr, int reg_addr);
467 static int netsec_phy_write(struct mii_bus *bus,
468 int phy_addr, int reg, u16 val)
471 struct netsec_priv *priv = bus->priv;
473 if (netsec_mac_write(priv, GMAC_REG_GDR, val))
475 if (netsec_mac_write(priv, GMAC_REG_GAR,
476 phy_addr << NETSEC_GMAC_GAR_REG_SHIFT_PA |
477 reg << NETSEC_GMAC_GAR_REG_SHIFT_GR |
478 NETSEC_GMAC_GAR_REG_GW | NETSEC_GMAC_GAR_REG_GB |
479 (netsec_clk_type(priv->freq) <<
480 GMAC_REG_SHIFT_CR_GAR)))
483 status = netsec_mac_wait_while_busy(priv, GMAC_REG_GAR,
484 NETSEC_GMAC_GAR_REG_GB);
486 /* Developerbox implements RTL8211E PHY and there is
487 * a compatibility problem with F_GMAC4.
488 * RTL8211E expects MDC clock must be kept toggling for several
489 * clock cycle with MDIO high before entering the IDLE state.
490 * To meet this requirement, netsec driver needs to issue dummy
491 * read(e.g. read PHYID1(offset 0x2) register) right after write.
493 netsec_phy_read(bus, phy_addr, MII_PHYSID1);
498 static int netsec_phy_read(struct mii_bus *bus, int phy_addr, int reg_addr)
500 struct netsec_priv *priv = bus->priv;
504 if (netsec_mac_write(priv, GMAC_REG_GAR, NETSEC_GMAC_GAR_REG_GB |
505 phy_addr << NETSEC_GMAC_GAR_REG_SHIFT_PA |
506 reg_addr << NETSEC_GMAC_GAR_REG_SHIFT_GR |
507 (netsec_clk_type(priv->freq) <<
508 GMAC_REG_SHIFT_CR_GAR)))
511 ret = netsec_mac_wait_while_busy(priv, GMAC_REG_GAR,
512 NETSEC_GMAC_GAR_REG_GB);
516 ret = netsec_mac_read(priv, GMAC_REG_GDR, &data);
523 /************* ETHTOOL_OPS FOLLOW *************/
525 static void netsec_et_get_drvinfo(struct net_device *net_device,
526 struct ethtool_drvinfo *info)
528 strlcpy(info->driver, "netsec", sizeof(info->driver));
529 strlcpy(info->bus_info, dev_name(net_device->dev.parent),
530 sizeof(info->bus_info));
533 static int netsec_et_get_coalesce(struct net_device *net_device,
534 struct ethtool_coalesce *et_coalesce)
536 struct netsec_priv *priv = netdev_priv(net_device);
538 *et_coalesce = priv->et_coalesce;
543 static int netsec_et_set_coalesce(struct net_device *net_device,
544 struct ethtool_coalesce *et_coalesce)
546 struct netsec_priv *priv = netdev_priv(net_device);
548 priv->et_coalesce = *et_coalesce;
550 if (priv->et_coalesce.tx_coalesce_usecs < 50)
551 priv->et_coalesce.tx_coalesce_usecs = 50;
552 if (priv->et_coalesce.tx_max_coalesced_frames < 1)
553 priv->et_coalesce.tx_max_coalesced_frames = 1;
555 netsec_write(priv, NETSEC_REG_NRM_TX_DONE_TXINT_PKTCNT,
556 priv->et_coalesce.tx_max_coalesced_frames);
557 netsec_write(priv, NETSEC_REG_NRM_TX_TXINT_TMR,
558 priv->et_coalesce.tx_coalesce_usecs);
559 netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_SET, NRM_TX_ST_TXDONE);
560 netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_SET, NRM_TX_ST_TMREXP);
562 if (priv->et_coalesce.rx_coalesce_usecs < 50)
563 priv->et_coalesce.rx_coalesce_usecs = 50;
564 if (priv->et_coalesce.rx_max_coalesced_frames < 1)
565 priv->et_coalesce.rx_max_coalesced_frames = 1;
567 netsec_write(priv, NETSEC_REG_NRM_RX_RXINT_PKTCNT,
568 priv->et_coalesce.rx_max_coalesced_frames);
569 netsec_write(priv, NETSEC_REG_NRM_RX_RXINT_TMR,
570 priv->et_coalesce.rx_coalesce_usecs);
571 netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_SET, NRM_RX_ST_PKTCNT);
572 netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_SET, NRM_RX_ST_TMREXP);
577 static u32 netsec_et_get_msglevel(struct net_device *dev)
579 struct netsec_priv *priv = netdev_priv(dev);
581 return priv->msg_enable;
584 static void netsec_et_set_msglevel(struct net_device *dev, u32 datum)
586 struct netsec_priv *priv = netdev_priv(dev);
588 priv->msg_enable = datum;
591 static const struct ethtool_ops netsec_ethtool_ops = {
592 .get_drvinfo = netsec_et_get_drvinfo,
593 .get_link_ksettings = phy_ethtool_get_link_ksettings,
594 .set_link_ksettings = phy_ethtool_set_link_ksettings,
595 .get_link = ethtool_op_get_link,
596 .get_coalesce = netsec_et_get_coalesce,
597 .set_coalesce = netsec_et_set_coalesce,
598 .get_msglevel = netsec_et_get_msglevel,
599 .set_msglevel = netsec_et_set_msglevel,
602 /************* NETDEV_OPS FOLLOW *************/
605 static void netsec_set_rx_de(struct netsec_priv *priv,
606 struct netsec_desc_ring *dring, u16 idx,
607 const struct netsec_desc *desc)
609 struct netsec_de *de = dring->vaddr + DESC_SZ * idx;
610 u32 attr = (1 << NETSEC_RX_PKT_OWN_FIELD) |
611 (1 << NETSEC_RX_PKT_FS_FIELD) |
612 (1 << NETSEC_RX_PKT_LS_FIELD);
614 if (idx == DESC_NUM - 1)
615 attr |= (1 << NETSEC_RX_PKT_LD_FIELD);
617 de->data_buf_addr_up = upper_32_bits(desc->dma_addr);
618 de->data_buf_addr_lw = lower_32_bits(desc->dma_addr);
619 de->buf_len_info = desc->len;
623 dring->desc[idx].dma_addr = desc->dma_addr;
624 dring->desc[idx].addr = desc->addr;
625 dring->desc[idx].len = desc->len;
628 static bool netsec_clean_tx_dring(struct netsec_priv *priv)
630 struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX];
631 struct netsec_de *entry;
632 int tail = dring->tail;
636 spin_lock(&dring->lock);
639 entry = dring->vaddr + DESC_SZ * tail;
641 while (!(entry->attr & (1U << NETSEC_TX_SHIFT_OWN_FIELD)) &&
643 struct netsec_desc *desc;
646 desc = &dring->desc[tail];
647 eop = (entry->attr >> NETSEC_TX_LAST) & 1;
650 /* if buf_type is either TYPE_NETSEC_SKB or
651 * TYPE_NETSEC_XDP_NDO we mapped it
653 if (desc->buf_type != TYPE_NETSEC_XDP_TX)
654 dma_unmap_single(priv->dev, desc->dma_addr, desc->len,
660 if (desc->buf_type == TYPE_NETSEC_SKB) {
661 bytes += desc->skb->len;
662 dev_kfree_skb(desc->skb);
664 xdp_return_frame(desc->xdpf);
667 /* clean up so netsec_uninit_pkt_dring() won't free the skb
670 *desc = (struct netsec_desc){};
672 /* entry->attr is not going to be accessed by the NIC until
673 * netsec_set_tx_de() is called. No need for a dma_wmb() here
675 entry->attr = 1U << NETSEC_TX_SHIFT_OWN_FIELD;
676 /* move tail ahead */
677 dring->tail = (tail + 1) % DESC_NUM;
680 entry = dring->vaddr + DESC_SZ * tail;
684 spin_unlock(&dring->lock);
689 /* reading the register clears the irq */
690 netsec_read(priv, NETSEC_REG_NRM_TX_DONE_PKTCNT);
692 priv->ndev->stats.tx_packets += cnt;
693 priv->ndev->stats.tx_bytes += bytes;
695 netdev_completed_queue(priv->ndev, cnt, bytes);
700 static void netsec_process_tx(struct netsec_priv *priv)
702 struct net_device *ndev = priv->ndev;
705 cleaned = netsec_clean_tx_dring(priv);
707 if (cleaned && netif_queue_stopped(ndev)) {
708 /* Make sure we update the value, anyone stopping the queue
709 * after this will read the proper consumer idx
712 netif_wake_queue(ndev);
716 static void *netsec_alloc_rx_data(struct netsec_priv *priv,
717 dma_addr_t *dma_handle, u16 *desc_len)
721 struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
722 enum dma_data_direction dma_dir;
725 page = page_pool_dev_alloc_pages(dring->page_pool);
729 /* We allocate the same buffer length for XDP and non-XDP cases.
730 * page_pool API will map the whole page, skip what's needed for
731 * network payloads and/or XDP
733 *dma_handle = page_pool_get_dma_addr(page) + NETSEC_RXBUF_HEADROOM;
734 /* Make sure the incoming payload fits in the page for XDP and non-XDP
735 * cases and reserve enough space for headroom + skb_shared_info
737 *desc_len = PAGE_SIZE - NETSEC_RX_BUF_NON_DATA;
738 dma_dir = page_pool_get_dma_dir(dring->page_pool);
739 dma_sync_single_for_device(priv->dev, *dma_handle, *desc_len, dma_dir);
741 return page_address(page);
744 static void netsec_rx_fill(struct netsec_priv *priv, u16 from, u16 num)
746 struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
750 netsec_set_rx_de(priv, dring, idx, &dring->desc[idx]);
758 static void netsec_xdp_ring_tx_db(struct netsec_priv *priv, u16 pkts)
761 netsec_write(priv, NETSEC_REG_NRM_TX_PKTCNT, pkts);
764 static void netsec_finalize_xdp_rx(struct netsec_priv *priv, u32 xdp_res,
767 if (xdp_res & NETSEC_XDP_REDIR)
770 if (xdp_res & NETSEC_XDP_TX)
771 netsec_xdp_ring_tx_db(priv, pkts);
774 static void netsec_set_tx_de(struct netsec_priv *priv,
775 struct netsec_desc_ring *dring,
776 const struct netsec_tx_pkt_ctrl *tx_ctrl,
777 const struct netsec_desc *desc, void *buf)
779 int idx = dring->head;
780 struct netsec_de *de;
783 de = dring->vaddr + (DESC_SZ * idx);
785 attr = (1 << NETSEC_TX_SHIFT_OWN_FIELD) |
786 (1 << NETSEC_TX_SHIFT_PT_FIELD) |
787 (NETSEC_RING_GMAC << NETSEC_TX_SHIFT_TDRID_FIELD) |
788 (1 << NETSEC_TX_SHIFT_FS_FIELD) |
789 (1 << NETSEC_TX_LAST) |
790 (tx_ctrl->cksum_offload_flag << NETSEC_TX_SHIFT_CO) |
791 (tx_ctrl->tcp_seg_offload_flag << NETSEC_TX_SHIFT_SO) |
792 (1 << NETSEC_TX_SHIFT_TRS_FIELD);
793 if (idx == DESC_NUM - 1)
794 attr |= (1 << NETSEC_TX_SHIFT_LD_FIELD);
796 de->data_buf_addr_up = upper_32_bits(desc->dma_addr);
797 de->data_buf_addr_lw = lower_32_bits(desc->dma_addr);
798 de->buf_len_info = (tx_ctrl->tcp_seg_len << 16) | desc->len;
801 dring->desc[idx] = *desc;
802 if (desc->buf_type == TYPE_NETSEC_SKB)
803 dring->desc[idx].skb = buf;
804 else if (desc->buf_type == TYPE_NETSEC_XDP_TX ||
805 desc->buf_type == TYPE_NETSEC_XDP_NDO)
806 dring->desc[idx].xdpf = buf;
808 /* move head ahead */
809 dring->head = (dring->head + 1) % DESC_NUM;
812 /* The current driver only supports 1 Txq, this should run under spin_lock() */
813 static u32 netsec_xdp_queue_one(struct netsec_priv *priv,
814 struct xdp_frame *xdpf, bool is_ndo)
817 struct netsec_desc_ring *tx_ring = &priv->desc_ring[NETSEC_RING_TX];
818 struct page *page = virt_to_page(xdpf->data);
819 struct netsec_tx_pkt_ctrl tx_ctrl = {};
820 struct netsec_desc tx_desc;
821 dma_addr_t dma_handle;
824 if (tx_ring->head >= tx_ring->tail)
825 filled = tx_ring->head - tx_ring->tail;
827 filled = tx_ring->head + DESC_NUM - tx_ring->tail;
829 if (DESC_NUM - filled <= 1)
830 return NETSEC_XDP_CONSUMED;
833 /* this is for ndo_xdp_xmit, the buffer needs mapping before
836 dma_handle = dma_map_single(priv->dev, xdpf->data, xdpf->len,
838 if (dma_mapping_error(priv->dev, dma_handle))
839 return NETSEC_XDP_CONSUMED;
840 tx_desc.buf_type = TYPE_NETSEC_XDP_NDO;
842 /* This is the device Rx buffer from page_pool. No need to remap
843 * just sync and send it
845 struct netsec_desc_ring *rx_ring =
846 &priv->desc_ring[NETSEC_RING_RX];
847 enum dma_data_direction dma_dir =
848 page_pool_get_dma_dir(rx_ring->page_pool);
850 dma_handle = page_pool_get_dma_addr(page) +
851 NETSEC_RXBUF_HEADROOM;
852 dma_sync_single_for_device(priv->dev, dma_handle, xdpf->len,
854 tx_desc.buf_type = TYPE_NETSEC_XDP_TX;
857 tx_desc.dma_addr = dma_handle;
858 tx_desc.addr = xdpf->data;
859 tx_desc.len = xdpf->len;
861 netsec_set_tx_de(priv, tx_ring, &tx_ctrl, &tx_desc, xdpf);
863 return NETSEC_XDP_TX;
866 static u32 netsec_xdp_xmit_back(struct netsec_priv *priv, struct xdp_buff *xdp)
868 struct netsec_desc_ring *tx_ring = &priv->desc_ring[NETSEC_RING_TX];
869 struct xdp_frame *xdpf = convert_to_xdp_frame(xdp);
873 return NETSEC_XDP_CONSUMED;
875 spin_lock(&tx_ring->lock);
876 ret = netsec_xdp_queue_one(priv, xdpf, false);
877 spin_unlock(&tx_ring->lock);
882 static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog,
883 struct xdp_buff *xdp)
885 u32 ret = NETSEC_XDP_PASS;
889 act = bpf_prog_run_xdp(prog, xdp);
893 ret = NETSEC_XDP_PASS;
896 ret = netsec_xdp_xmit_back(priv, xdp);
897 if (ret != NETSEC_XDP_TX)
898 xdp_return_buff(xdp);
901 err = xdp_do_redirect(priv->ndev, xdp, prog);
903 ret = NETSEC_XDP_REDIR;
905 ret = NETSEC_XDP_CONSUMED;
906 xdp_return_buff(xdp);
910 bpf_warn_invalid_xdp_action(act);
913 trace_xdp_exception(priv->ndev, prog, act);
914 /* fall through -- handle aborts by dropping packet */
916 ret = NETSEC_XDP_CONSUMED;
917 xdp_return_buff(xdp);
924 static int netsec_process_rx(struct netsec_priv *priv, int budget)
926 struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
927 struct net_device *ndev = priv->ndev;
928 struct netsec_rx_pkt_info rx_info;
929 enum dma_data_direction dma_dir;
930 struct bpf_prog *xdp_prog;
931 struct sk_buff *skb = NULL;
937 xdp_prog = READ_ONCE(priv->xdp_prog);
938 dma_dir = page_pool_get_dma_dir(dring->page_pool);
940 while (done < budget) {
941 u16 idx = dring->tail;
942 struct netsec_de *de = dring->vaddr + (DESC_SZ * idx);
943 struct netsec_desc *desc = &dring->desc[idx];
944 struct page *page = virt_to_page(desc->addr);
945 u32 xdp_result = XDP_PASS;
946 u16 pkt_len, desc_len;
947 dma_addr_t dma_handle;
951 if (de->attr & (1U << NETSEC_RX_PKT_OWN_FIELD)) {
952 /* reading the register clears the irq */
953 netsec_read(priv, NETSEC_REG_NRM_RX_PKTCNT);
957 /* This barrier is needed to keep us from reading
958 * any other fields out of the netsec_de until we have
959 * verified the descriptor has been written back
964 pkt_len = de->buf_len_info >> 16;
965 rx_info.err_code = (de->attr >> NETSEC_RX_PKT_ERR_FIELD) &
966 NETSEC_RX_PKT_ERR_MASK;
967 rx_info.err_flag = (de->attr >> NETSEC_RX_PKT_ER_FIELD) & 1;
968 if (rx_info.err_flag) {
969 netif_err(priv, drv, priv->ndev,
970 "%s: rx fail err(%d)\n", __func__,
972 ndev->stats.rx_dropped++;
973 dring->tail = (dring->tail + 1) % DESC_NUM;
974 /* reuse buffer page frag */
975 netsec_rx_fill(priv, idx, 1);
978 rx_info.rx_cksum_result =
979 (de->attr >> NETSEC_RX_PKT_CO_FIELD) & 3;
981 /* allocate a fresh buffer and map it to the hardware.
982 * This will eventually replace the old buffer in the hardware
984 buf_addr = netsec_alloc_rx_data(priv, &dma_handle, &desc_len);
986 if (unlikely(!buf_addr))
989 dma_sync_single_for_cpu(priv->dev, desc->dma_addr, pkt_len,
991 prefetch(desc->addr);
993 xdp.data_hard_start = desc->addr;
994 xdp.data = desc->addr + NETSEC_RXBUF_HEADROOM;
995 xdp_set_data_meta_invalid(&xdp);
996 xdp.data_end = xdp.data + pkt_len;
997 xdp.rxq = &dring->xdp_rxq;
1000 xdp_result = netsec_run_xdp(priv, xdp_prog, &xdp);
1001 if (xdp_result != NETSEC_XDP_PASS) {
1002 xdp_act |= xdp_result;
1003 if (xdp_result == NETSEC_XDP_TX)
1008 skb = build_skb(desc->addr, desc->len + NETSEC_RX_BUF_NON_DATA);
1010 if (unlikely(!skb)) {
1011 /* If skb fails recycle_direct will either unmap and
1012 * free the page or refill the cache depending on the
1013 * cache state. Since we paid the allocation cost if
1014 * building an skb fails try to put the page into cache
1016 page_pool_recycle_direct(dring->page_pool, page);
1017 netif_err(priv, drv, priv->ndev,
1018 "rx failed to build skb\n");
1021 page_pool_release_page(dring->page_pool, page);
1023 skb_reserve(skb, xdp.data - xdp.data_hard_start);
1024 skb_put(skb, xdp.data_end - xdp.data);
1025 skb->protocol = eth_type_trans(skb, priv->ndev);
1027 if (priv->rx_cksum_offload_flag &&
1028 rx_info.rx_cksum_result == NETSEC_RX_CKSUM_OK)
1029 skb->ip_summed = CHECKSUM_UNNECESSARY;
1032 if ((skb && napi_gro_receive(&priv->napi, skb) != GRO_DROP) ||
1033 xdp_result & NETSEC_XDP_RX_OK) {
1034 ndev->stats.rx_packets++;
1035 ndev->stats.rx_bytes += xdp.data_end - xdp.data;
1038 /* Update the descriptor with fresh buffers */
1039 desc->len = desc_len;
1040 desc->dma_addr = dma_handle;
1041 desc->addr = buf_addr;
1043 netsec_rx_fill(priv, idx, 1);
1044 dring->tail = (dring->tail + 1) % DESC_NUM;
1046 netsec_finalize_xdp_rx(priv, xdp_act, xdp_xmit);
1053 static int netsec_napi_poll(struct napi_struct *napi, int budget)
1055 struct netsec_priv *priv;
1058 priv = container_of(napi, struct netsec_priv, napi);
1060 netsec_process_tx(priv);
1061 done = netsec_process_rx(priv, budget);
1063 if (done < budget && napi_complete_done(napi, done)) {
1064 unsigned long flags;
1066 spin_lock_irqsave(&priv->reglock, flags);
1067 netsec_write(priv, NETSEC_REG_INTEN_SET,
1068 NETSEC_IRQ_RX | NETSEC_IRQ_TX);
1069 spin_unlock_irqrestore(&priv->reglock, flags);
1076 static int netsec_desc_used(struct netsec_desc_ring *dring)
1080 if (dring->head >= dring->tail)
1081 used = dring->head - dring->tail;
1083 used = dring->head + DESC_NUM - dring->tail;
1088 static int netsec_check_stop_tx(struct netsec_priv *priv, int used)
1090 struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX];
1092 /* keep tail from touching the queue */
1093 if (DESC_NUM - used < 2) {
1094 netif_stop_queue(priv->ndev);
1096 /* Make sure we read the updated value in case
1097 * descriptors got freed
1101 used = netsec_desc_used(dring);
1102 if (DESC_NUM - used < 2)
1103 return NETDEV_TX_BUSY;
1105 netif_wake_queue(priv->ndev);
1111 static netdev_tx_t netsec_netdev_start_xmit(struct sk_buff *skb,
1112 struct net_device *ndev)
1114 struct netsec_priv *priv = netdev_priv(ndev);
1115 struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX];
1116 struct netsec_tx_pkt_ctrl tx_ctrl = {};
1117 struct netsec_desc tx_desc;
1118 u16 tso_seg_len = 0;
1121 spin_lock_bh(&dring->lock);
1122 filled = netsec_desc_used(dring);
1123 if (netsec_check_stop_tx(priv, filled)) {
1124 spin_unlock_bh(&dring->lock);
1125 net_warn_ratelimited("%s %s Tx queue full\n",
1126 dev_name(priv->dev), ndev->name);
1127 return NETDEV_TX_BUSY;
1130 if (skb->ip_summed == CHECKSUM_PARTIAL)
1131 tx_ctrl.cksum_offload_flag = true;
1133 if (skb_is_gso(skb))
1134 tso_seg_len = skb_shinfo(skb)->gso_size;
1136 if (tso_seg_len > 0) {
1137 if (skb->protocol == htons(ETH_P_IP)) {
1138 ip_hdr(skb)->tot_len = 0;
1139 tcp_hdr(skb)->check =
1140 ~tcp_v4_check(0, ip_hdr(skb)->saddr,
1141 ip_hdr(skb)->daddr, 0);
1143 ipv6_hdr(skb)->payload_len = 0;
1144 tcp_hdr(skb)->check =
1145 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1146 &ipv6_hdr(skb)->daddr,
1150 tx_ctrl.tcp_seg_offload_flag = true;
1151 tx_ctrl.tcp_seg_len = tso_seg_len;
1154 tx_desc.dma_addr = dma_map_single(priv->dev, skb->data,
1155 skb_headlen(skb), DMA_TO_DEVICE);
1156 if (dma_mapping_error(priv->dev, tx_desc.dma_addr)) {
1157 spin_unlock_bh(&dring->lock);
1158 netif_err(priv, drv, priv->ndev,
1159 "%s: DMA mapping failed\n", __func__);
1160 ndev->stats.tx_dropped++;
1161 dev_kfree_skb_any(skb);
1162 return NETDEV_TX_OK;
1164 tx_desc.addr = skb->data;
1165 tx_desc.len = skb_headlen(skb);
1166 tx_desc.buf_type = TYPE_NETSEC_SKB;
1168 skb_tx_timestamp(skb);
1169 netdev_sent_queue(priv->ndev, skb->len);
1171 netsec_set_tx_de(priv, dring, &tx_ctrl, &tx_desc, skb);
1172 spin_unlock_bh(&dring->lock);
1173 netsec_write(priv, NETSEC_REG_NRM_TX_PKTCNT, 1); /* submit another tx */
1175 return NETDEV_TX_OK;
1178 static void netsec_uninit_pkt_dring(struct netsec_priv *priv, int id)
1180 struct netsec_desc_ring *dring = &priv->desc_ring[id];
1181 struct netsec_desc *desc;
1184 if (!dring->vaddr || !dring->desc)
1186 for (idx = 0; idx < DESC_NUM; idx++) {
1187 desc = &dring->desc[idx];
1191 if (id == NETSEC_RING_RX) {
1192 struct page *page = virt_to_page(desc->addr);
1194 page_pool_put_page(dring->page_pool, page, false);
1195 } else if (id == NETSEC_RING_TX) {
1196 dma_unmap_single(priv->dev, desc->dma_addr, desc->len,
1198 dev_kfree_skb(desc->skb);
1202 /* Rx is currently using page_pool */
1203 if (id == NETSEC_RING_RX) {
1204 if (xdp_rxq_info_is_reg(&dring->xdp_rxq))
1205 xdp_rxq_info_unreg(&dring->xdp_rxq);
1206 page_pool_destroy(dring->page_pool);
1209 memset(dring->desc, 0, sizeof(struct netsec_desc) * DESC_NUM);
1210 memset(dring->vaddr, 0, DESC_SZ * DESC_NUM);
1215 if (id == NETSEC_RING_TX)
1216 netdev_reset_queue(priv->ndev);
1219 static void netsec_free_dring(struct netsec_priv *priv, int id)
1221 struct netsec_desc_ring *dring = &priv->desc_ring[id];
1224 dma_free_coherent(priv->dev, DESC_SZ * DESC_NUM,
1225 dring->vaddr, dring->desc_dma);
1226 dring->vaddr = NULL;
1233 static int netsec_alloc_dring(struct netsec_priv *priv, enum ring_id id)
1235 struct netsec_desc_ring *dring = &priv->desc_ring[id];
1237 dring->vaddr = dma_alloc_coherent(priv->dev, DESC_SZ * DESC_NUM,
1238 &dring->desc_dma, GFP_KERNEL);
1242 dring->desc = kcalloc(DESC_NUM, sizeof(*dring->desc), GFP_KERNEL);
1248 netsec_free_dring(priv, id);
1253 static void netsec_setup_tx_dring(struct netsec_priv *priv)
1255 struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX];
1258 for (i = 0; i < DESC_NUM; i++) {
1259 struct netsec_de *de;
1261 de = dring->vaddr + (DESC_SZ * i);
1262 /* de->attr is not going to be accessed by the NIC
1263 * until netsec_set_tx_de() is called.
1264 * No need for a dma_wmb() here
1266 de->attr = 1U << NETSEC_TX_SHIFT_OWN_FIELD;
1270 static int netsec_setup_rx_dring(struct netsec_priv *priv)
1272 struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
1273 struct bpf_prog *xdp_prog = READ_ONCE(priv->xdp_prog);
1274 struct page_pool_params pp_params = { 0 };
1277 pp_params.order = 0;
1278 /* internal DMA mapping in page_pool */
1279 pp_params.flags = PP_FLAG_DMA_MAP;
1280 pp_params.pool_size = DESC_NUM;
1281 pp_params.nid = cpu_to_node(0);
1282 pp_params.dev = priv->dev;
1283 pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
1285 dring->page_pool = page_pool_create(&pp_params);
1286 if (IS_ERR(dring->page_pool)) {
1287 err = PTR_ERR(dring->page_pool);
1288 dring->page_pool = NULL;
1292 err = xdp_rxq_info_reg(&dring->xdp_rxq, priv->ndev, 0);
1296 err = xdp_rxq_info_reg_mem_model(&dring->xdp_rxq, MEM_TYPE_PAGE_POOL,
1301 for (i = 0; i < DESC_NUM; i++) {
1302 struct netsec_desc *desc = &dring->desc[i];
1303 dma_addr_t dma_handle;
1307 buf = netsec_alloc_rx_data(priv, &dma_handle, &len);
1313 desc->dma_addr = dma_handle;
1318 netsec_rx_fill(priv, 0, DESC_NUM);
1323 netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
1327 static int netsec_netdev_load_ucode_region(struct netsec_priv *priv, u32 reg,
1328 u32 addr_h, u32 addr_l, u32 size)
1330 u64 base = (u64)addr_h << 32 | addr_l;
1331 void __iomem *ucode;
1334 ucode = ioremap(base, size * sizeof(u32));
1338 for (i = 0; i < size; i++)
1339 netsec_write(priv, reg, readl(ucode + i * 4));
1345 static int netsec_netdev_load_microcode(struct netsec_priv *priv)
1347 u32 addr_h, addr_l, size;
1350 addr_h = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_ADDRESS_H);
1351 addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_ADDRESS_L);
1352 size = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_SIZE);
1353 err = netsec_netdev_load_ucode_region(priv, NETSEC_REG_DMAC_HM_CMD_BUF,
1354 addr_h, addr_l, size);
1358 addr_h = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_ADDRESS_H);
1359 addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_ADDRESS_L);
1360 size = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_SIZE);
1361 err = netsec_netdev_load_ucode_region(priv, NETSEC_REG_DMAC_MH_CMD_BUF,
1362 addr_h, addr_l, size);
1367 addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_PKT_ME_ADDRESS);
1368 size = readl(priv->eeprom_base + NETSEC_EEPROM_PKT_ME_SIZE);
1369 err = netsec_netdev_load_ucode_region(priv, NETSEC_REG_PKT_CMD_BUF,
1370 addr_h, addr_l, size);
1377 static int netsec_reset_hardware(struct netsec_priv *priv,
1383 /* stop DMA engines */
1384 if (!netsec_read(priv, NETSEC_REG_ADDR_DIS_CORE)) {
1385 netsec_write(priv, NETSEC_REG_DMA_HM_CTRL,
1386 NETSEC_DMA_CTRL_REG_STOP);
1387 netsec_write(priv, NETSEC_REG_DMA_MH_CTRL,
1388 NETSEC_DMA_CTRL_REG_STOP);
1390 while (netsec_read(priv, NETSEC_REG_DMA_HM_CTRL) &
1391 NETSEC_DMA_CTRL_REG_STOP)
1394 while (netsec_read(priv, NETSEC_REG_DMA_MH_CTRL) &
1395 NETSEC_DMA_CTRL_REG_STOP)
1399 netsec_write(priv, NETSEC_REG_SOFT_RST, NETSEC_SOFT_RST_REG_RESET);
1400 netsec_write(priv, NETSEC_REG_SOFT_RST, NETSEC_SOFT_RST_REG_RUN);
1401 netsec_write(priv, NETSEC_REG_COM_INIT, NETSEC_COM_INIT_REG_ALL);
1403 while (netsec_read(priv, NETSEC_REG_COM_INIT) != 0)
1406 /* set desc_start addr */
1407 netsec_write(priv, NETSEC_REG_NRM_RX_DESC_START_UP,
1408 upper_32_bits(priv->desc_ring[NETSEC_RING_RX].desc_dma));
1409 netsec_write(priv, NETSEC_REG_NRM_RX_DESC_START_LW,
1410 lower_32_bits(priv->desc_ring[NETSEC_RING_RX].desc_dma));
1412 netsec_write(priv, NETSEC_REG_NRM_TX_DESC_START_UP,
1413 upper_32_bits(priv->desc_ring[NETSEC_RING_TX].desc_dma));
1414 netsec_write(priv, NETSEC_REG_NRM_TX_DESC_START_LW,
1415 lower_32_bits(priv->desc_ring[NETSEC_RING_TX].desc_dma));
1417 /* set normal tx dring ring config */
1418 netsec_write(priv, NETSEC_REG_NRM_TX_CONFIG,
1419 1 << NETSEC_REG_DESC_ENDIAN);
1420 netsec_write(priv, NETSEC_REG_NRM_RX_CONFIG,
1421 1 << NETSEC_REG_DESC_ENDIAN);
1424 err = netsec_netdev_load_microcode(priv);
1426 netif_err(priv, probe, priv->ndev,
1427 "%s: failed to load microcode (%d)\n",
1433 /* start DMA engines */
1434 netsec_write(priv, NETSEC_REG_DMA_TMR_CTRL, priv->freq / 1000000 - 1);
1435 netsec_write(priv, NETSEC_REG_ADDR_DIS_CORE, 0);
1437 usleep_range(1000, 2000);
1439 if (!(netsec_read(priv, NETSEC_REG_TOP_STATUS) &
1440 NETSEC_TOP_IRQ_REG_CODE_LOAD_END)) {
1441 netif_err(priv, probe, priv->ndev,
1442 "microengine start failed\n");
1445 netsec_write(priv, NETSEC_REG_TOP_STATUS,
1446 NETSEC_TOP_IRQ_REG_CODE_LOAD_END);
1448 value = NETSEC_PKT_CTRL_REG_MODE_NRM;
1449 if (priv->ndev->mtu > ETH_DATA_LEN)
1450 value |= NETSEC_PKT_CTRL_REG_EN_JUMBO;
1452 /* change to normal mode */
1453 netsec_write(priv, NETSEC_REG_DMA_MH_CTRL, MH_CTRL__MODE_TRANS);
1454 netsec_write(priv, NETSEC_REG_PKT_CTRL, value);
1456 while ((netsec_read(priv, NETSEC_REG_MODE_TRANS_COMP_STATUS) &
1457 NETSEC_MODE_TRANS_COMP_IRQ_T2N) == 0)
1460 /* clear any pending EMPTY/ERR irq status */
1461 netsec_write(priv, NETSEC_REG_NRM_TX_STATUS, ~0);
1463 /* Disable TX & RX intr */
1464 netsec_write(priv, NETSEC_REG_INTEN_CLR, ~0);
1469 static int netsec_start_gmac(struct netsec_priv *priv)
1471 struct phy_device *phydev = priv->ndev->phydev;
1475 if (phydev->speed != SPEED_1000)
1476 value = (NETSEC_GMAC_MCR_REG_CST |
1477 NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON);
1479 if (netsec_mac_write(priv, GMAC_REG_MCR, value))
1481 if (netsec_mac_write(priv, GMAC_REG_BMR,
1482 NETSEC_GMAC_BMR_REG_RESET))
1485 /* Wait soft reset */
1486 usleep_range(1000, 5000);
1488 ret = netsec_mac_read(priv, GMAC_REG_BMR, &value);
1491 if (value & NETSEC_GMAC_BMR_REG_SWR)
1494 netsec_write(priv, MAC_REG_DESC_SOFT_RST, 1);
1495 if (netsec_wait_while_busy(priv, MAC_REG_DESC_SOFT_RST, 1))
1498 netsec_write(priv, MAC_REG_DESC_INIT, 1);
1499 if (netsec_wait_while_busy(priv, MAC_REG_DESC_INIT, 1))
1502 if (netsec_mac_write(priv, GMAC_REG_BMR,
1503 NETSEC_GMAC_BMR_REG_COMMON))
1505 if (netsec_mac_write(priv, GMAC_REG_RDLAR,
1506 NETSEC_GMAC_RDLAR_REG_COMMON))
1508 if (netsec_mac_write(priv, GMAC_REG_TDLAR,
1509 NETSEC_GMAC_TDLAR_REG_COMMON))
1511 if (netsec_mac_write(priv, GMAC_REG_MFFR, 0x80000001))
1514 ret = netsec_mac_update_to_phy_state(priv);
1518 ret = netsec_mac_read(priv, GMAC_REG_OMR, &value);
1522 value |= NETSEC_GMAC_OMR_REG_SR;
1523 value |= NETSEC_GMAC_OMR_REG_ST;
1525 netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_CLR, ~0);
1526 netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_CLR, ~0);
1528 netsec_et_set_coalesce(priv->ndev, &priv->et_coalesce);
1530 if (netsec_mac_write(priv, GMAC_REG_OMR, value))
1536 static int netsec_stop_gmac(struct netsec_priv *priv)
1541 ret = netsec_mac_read(priv, GMAC_REG_OMR, &value);
1544 value &= ~NETSEC_GMAC_OMR_REG_SR;
1545 value &= ~NETSEC_GMAC_OMR_REG_ST;
1547 /* disable all interrupts */
1548 netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_CLR, ~0);
1549 netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_CLR, ~0);
1551 return netsec_mac_write(priv, GMAC_REG_OMR, value);
1554 static void netsec_phy_adjust_link(struct net_device *ndev)
1556 struct netsec_priv *priv = netdev_priv(ndev);
1558 if (ndev->phydev->link)
1559 netsec_start_gmac(priv);
1561 netsec_stop_gmac(priv);
1563 phy_print_status(ndev->phydev);
1566 static irqreturn_t netsec_irq_handler(int irq, void *dev_id)
1568 struct netsec_priv *priv = dev_id;
1569 u32 val, status = netsec_read(priv, NETSEC_REG_TOP_STATUS);
1570 unsigned long flags;
1572 /* Disable interrupts */
1573 if (status & NETSEC_IRQ_TX) {
1574 val = netsec_read(priv, NETSEC_REG_NRM_TX_STATUS);
1575 netsec_write(priv, NETSEC_REG_NRM_TX_STATUS, val);
1577 if (status & NETSEC_IRQ_RX) {
1578 val = netsec_read(priv, NETSEC_REG_NRM_RX_STATUS);
1579 netsec_write(priv, NETSEC_REG_NRM_RX_STATUS, val);
1582 spin_lock_irqsave(&priv->reglock, flags);
1583 netsec_write(priv, NETSEC_REG_INTEN_CLR, NETSEC_IRQ_RX | NETSEC_IRQ_TX);
1584 spin_unlock_irqrestore(&priv->reglock, flags);
1586 napi_schedule(&priv->napi);
1591 static int netsec_netdev_open(struct net_device *ndev)
1593 struct netsec_priv *priv = netdev_priv(ndev);
1596 pm_runtime_get_sync(priv->dev);
1598 netsec_setup_tx_dring(priv);
1599 ret = netsec_setup_rx_dring(priv);
1601 netif_err(priv, probe, priv->ndev,
1602 "%s: fail setup ring\n", __func__);
1606 ret = request_irq(priv->ndev->irq, netsec_irq_handler,
1607 IRQF_SHARED, "netsec", priv);
1609 netif_err(priv, drv, priv->ndev, "request_irq failed\n");
1613 if (dev_of_node(priv->dev)) {
1614 if (!of_phy_connect(priv->ndev, priv->phy_np,
1615 netsec_phy_adjust_link, 0,
1616 priv->phy_interface)) {
1617 netif_err(priv, link, priv->ndev, "missing PHY\n");
1622 ret = phy_connect_direct(priv->ndev, priv->phydev,
1623 netsec_phy_adjust_link,
1624 priv->phy_interface);
1626 netif_err(priv, link, priv->ndev,
1627 "phy_connect_direct() failed (%d)\n", ret);
1632 phy_start(ndev->phydev);
1634 netsec_start_gmac(priv);
1635 napi_enable(&priv->napi);
1636 netif_start_queue(ndev);
1638 /* Enable TX+RX intr. */
1639 netsec_write(priv, NETSEC_REG_INTEN_SET, NETSEC_IRQ_RX | NETSEC_IRQ_TX);
1643 free_irq(priv->ndev->irq, priv);
1645 netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
1647 pm_runtime_put_sync(priv->dev);
1651 static int netsec_netdev_stop(struct net_device *ndev)
1654 struct netsec_priv *priv = netdev_priv(ndev);
1656 netif_stop_queue(priv->ndev);
1659 napi_disable(&priv->napi);
1661 netsec_write(priv, NETSEC_REG_INTEN_CLR, ~0);
1662 netsec_stop_gmac(priv);
1664 free_irq(priv->ndev->irq, priv);
1666 netsec_uninit_pkt_dring(priv, NETSEC_RING_TX);
1667 netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
1669 phy_stop(ndev->phydev);
1670 phy_disconnect(ndev->phydev);
1672 ret = netsec_reset_hardware(priv, false);
1674 pm_runtime_put_sync(priv->dev);
1679 static int netsec_netdev_init(struct net_device *ndev)
1681 struct netsec_priv *priv = netdev_priv(ndev);
1685 BUILD_BUG_ON_NOT_POWER_OF_2(DESC_NUM);
1687 ret = netsec_alloc_dring(priv, NETSEC_RING_TX);
1691 ret = netsec_alloc_dring(priv, NETSEC_RING_RX);
1695 /* set phy power down */
1696 data = netsec_phy_read(priv->mii_bus, priv->phy_addr, MII_BMCR) |
1698 netsec_phy_write(priv->mii_bus, priv->phy_addr, MII_BMCR, data);
1700 ret = netsec_reset_hardware(priv, true);
1704 spin_lock_init(&priv->desc_ring[NETSEC_RING_TX].lock);
1705 spin_lock_init(&priv->desc_ring[NETSEC_RING_RX].lock);
1709 netsec_free_dring(priv, NETSEC_RING_RX);
1711 netsec_free_dring(priv, NETSEC_RING_TX);
1715 static void netsec_netdev_uninit(struct net_device *ndev)
1717 struct netsec_priv *priv = netdev_priv(ndev);
1719 netsec_free_dring(priv, NETSEC_RING_RX);
1720 netsec_free_dring(priv, NETSEC_RING_TX);
1723 static int netsec_netdev_set_features(struct net_device *ndev,
1724 netdev_features_t features)
1726 struct netsec_priv *priv = netdev_priv(ndev);
1728 priv->rx_cksum_offload_flag = !!(features & NETIF_F_RXCSUM);
1733 static int netsec_netdev_ioctl(struct net_device *ndev, struct ifreq *ifr,
1736 return phy_mii_ioctl(ndev->phydev, ifr, cmd);
1739 static int netsec_xdp_xmit(struct net_device *ndev, int n,
1740 struct xdp_frame **frames, u32 flags)
1742 struct netsec_priv *priv = netdev_priv(ndev);
1743 struct netsec_desc_ring *tx_ring = &priv->desc_ring[NETSEC_RING_TX];
1747 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
1750 spin_lock(&tx_ring->lock);
1751 for (i = 0; i < n; i++) {
1752 struct xdp_frame *xdpf = frames[i];
1755 err = netsec_xdp_queue_one(priv, xdpf, true);
1756 if (err != NETSEC_XDP_TX) {
1757 xdp_return_frame_rx_napi(xdpf);
1760 tx_ring->xdp_xmit++;
1763 spin_unlock(&tx_ring->lock);
1765 if (unlikely(flags & XDP_XMIT_FLUSH)) {
1766 netsec_xdp_ring_tx_db(priv, tx_ring->xdp_xmit);
1767 tx_ring->xdp_xmit = 0;
1773 static int netsec_xdp_setup(struct netsec_priv *priv, struct bpf_prog *prog,
1774 struct netlink_ext_ack *extack)
1776 struct net_device *dev = priv->ndev;
1777 struct bpf_prog *old_prog;
1779 /* For now just support only the usual MTU sized frames */
1780 if (prog && dev->mtu > 1500) {
1781 NL_SET_ERR_MSG_MOD(extack, "Jumbo frames not supported on XDP");
1785 if (netif_running(dev))
1786 netsec_netdev_stop(dev);
1788 /* Detach old prog, if any */
1789 old_prog = xchg(&priv->xdp_prog, prog);
1791 bpf_prog_put(old_prog);
1793 if (netif_running(dev))
1794 netsec_netdev_open(dev);
1799 static int netsec_xdp(struct net_device *ndev, struct netdev_bpf *xdp)
1801 struct netsec_priv *priv = netdev_priv(ndev);
1803 switch (xdp->command) {
1804 case XDP_SETUP_PROG:
1805 return netsec_xdp_setup(priv, xdp->prog, xdp->extack);
1806 case XDP_QUERY_PROG:
1807 xdp->prog_id = priv->xdp_prog ? priv->xdp_prog->aux->id : 0;
1814 static const struct net_device_ops netsec_netdev_ops = {
1815 .ndo_init = netsec_netdev_init,
1816 .ndo_uninit = netsec_netdev_uninit,
1817 .ndo_open = netsec_netdev_open,
1818 .ndo_stop = netsec_netdev_stop,
1819 .ndo_start_xmit = netsec_netdev_start_xmit,
1820 .ndo_set_features = netsec_netdev_set_features,
1821 .ndo_set_mac_address = eth_mac_addr,
1822 .ndo_validate_addr = eth_validate_addr,
1823 .ndo_do_ioctl = netsec_netdev_ioctl,
1824 .ndo_xdp_xmit = netsec_xdp_xmit,
1825 .ndo_bpf = netsec_xdp,
1828 static int netsec_of_probe(struct platform_device *pdev,
1829 struct netsec_priv *priv, u32 *phy_addr)
1831 priv->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
1832 if (!priv->phy_np) {
1833 dev_err(&pdev->dev, "missing required property 'phy-handle'\n");
1837 *phy_addr = of_mdio_parse_addr(&pdev->dev, priv->phy_np);
1839 priv->clk = devm_clk_get(&pdev->dev, NULL); /* get by 'phy_ref_clk' */
1840 if (IS_ERR(priv->clk)) {
1841 dev_err(&pdev->dev, "phy_ref_clk not found\n");
1842 return PTR_ERR(priv->clk);
1844 priv->freq = clk_get_rate(priv->clk);
1849 static int netsec_acpi_probe(struct platform_device *pdev,
1850 struct netsec_priv *priv, u32 *phy_addr)
1854 if (!IS_ENABLED(CONFIG_ACPI))
1857 ret = device_property_read_u32(&pdev->dev, "phy-channel", phy_addr);
1860 "missing required property 'phy-channel'\n");
1864 ret = device_property_read_u32(&pdev->dev,
1865 "socionext,phy-clock-frequency",
1869 "missing required property 'socionext,phy-clock-frequency'\n");
1873 static void netsec_unregister_mdio(struct netsec_priv *priv)
1875 struct phy_device *phydev = priv->phydev;
1877 if (!dev_of_node(priv->dev) && phydev) {
1878 phy_device_remove(phydev);
1879 phy_device_free(phydev);
1882 mdiobus_unregister(priv->mii_bus);
1885 static int netsec_register_mdio(struct netsec_priv *priv, u32 phy_addr)
1887 struct mii_bus *bus;
1890 bus = devm_mdiobus_alloc(priv->dev);
1894 snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(priv->dev));
1896 bus->name = "SNI NETSEC MDIO";
1897 bus->read = netsec_phy_read;
1898 bus->write = netsec_phy_write;
1899 bus->parent = priv->dev;
1900 priv->mii_bus = bus;
1902 if (dev_of_node(priv->dev)) {
1903 struct device_node *mdio_node, *parent = dev_of_node(priv->dev);
1905 mdio_node = of_get_child_by_name(parent, "mdio");
1909 /* older f/w doesn't populate the mdio subnode,
1910 * allow relaxed upgrade of f/w in due time.
1912 dev_info(priv->dev, "Upgrade f/w for mdio subnode!\n");
1915 ret = of_mdiobus_register(bus, parent);
1916 of_node_put(mdio_node);
1919 dev_err(priv->dev, "mdiobus register err(%d)\n", ret);
1923 /* Mask out all PHYs from auto probing. */
1925 ret = mdiobus_register(bus);
1927 dev_err(priv->dev, "mdiobus register err(%d)\n", ret);
1931 priv->phydev = get_phy_device(bus, phy_addr, false);
1932 if (IS_ERR(priv->phydev)) {
1933 ret = PTR_ERR(priv->phydev);
1934 dev_err(priv->dev, "get_phy_device err(%d)\n", ret);
1935 priv->phydev = NULL;
1939 ret = phy_device_register(priv->phydev);
1941 mdiobus_unregister(bus);
1943 "phy_device_register err(%d)\n", ret);
1950 static int netsec_probe(struct platform_device *pdev)
1952 struct resource *mmio_res, *eeprom_res, *irq_res;
1953 u8 *mac, macbuf[ETH_ALEN];
1954 struct netsec_priv *priv;
1955 u32 hw_ver, phy_addr = 0;
1956 struct net_device *ndev;
1959 mmio_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1961 dev_err(&pdev->dev, "No MMIO resource found.\n");
1965 eeprom_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1967 dev_info(&pdev->dev, "No EEPROM resource found.\n");
1971 irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1973 dev_err(&pdev->dev, "No IRQ resource found.\n");
1977 ndev = alloc_etherdev(sizeof(*priv));
1981 priv = netdev_priv(ndev);
1983 spin_lock_init(&priv->reglock);
1984 SET_NETDEV_DEV(ndev, &pdev->dev);
1985 platform_set_drvdata(pdev, priv);
1986 ndev->irq = irq_res->start;
1987 priv->dev = &pdev->dev;
1990 priv->msg_enable = NETIF_MSG_TX_ERR | NETIF_MSG_HW | NETIF_MSG_DRV |
1991 NETIF_MSG_LINK | NETIF_MSG_PROBE;
1993 priv->phy_interface = device_get_phy_mode(&pdev->dev);
1994 if ((int)priv->phy_interface < 0) {
1995 dev_err(&pdev->dev, "missing required property 'phy-mode'\n");
2000 priv->ioaddr = devm_ioremap(&pdev->dev, mmio_res->start,
2001 resource_size(mmio_res));
2002 if (!priv->ioaddr) {
2003 dev_err(&pdev->dev, "devm_ioremap() failed\n");
2008 priv->eeprom_base = devm_ioremap(&pdev->dev, eeprom_res->start,
2009 resource_size(eeprom_res));
2010 if (!priv->eeprom_base) {
2011 dev_err(&pdev->dev, "devm_ioremap() failed for EEPROM\n");
2016 mac = device_get_mac_address(&pdev->dev, macbuf, sizeof(macbuf));
2018 ether_addr_copy(ndev->dev_addr, mac);
2020 if (priv->eeprom_base &&
2021 (!mac || !is_valid_ether_addr(ndev->dev_addr))) {
2022 void __iomem *macp = priv->eeprom_base +
2023 NETSEC_EEPROM_MAC_ADDRESS;
2025 ndev->dev_addr[0] = readb(macp + 3);
2026 ndev->dev_addr[1] = readb(macp + 2);
2027 ndev->dev_addr[2] = readb(macp + 1);
2028 ndev->dev_addr[3] = readb(macp + 0);
2029 ndev->dev_addr[4] = readb(macp + 7);
2030 ndev->dev_addr[5] = readb(macp + 6);
2033 if (!is_valid_ether_addr(ndev->dev_addr)) {
2034 dev_warn(&pdev->dev, "No MAC address found, using random\n");
2035 eth_hw_addr_random(ndev);
2038 if (dev_of_node(&pdev->dev))
2039 ret = netsec_of_probe(pdev, priv, &phy_addr);
2041 ret = netsec_acpi_probe(pdev, priv, &phy_addr);
2045 priv->phy_addr = phy_addr;
2048 dev_err(&pdev->dev, "missing PHY reference clock frequency\n");
2053 /* default for throughput */
2054 priv->et_coalesce.rx_coalesce_usecs = 500;
2055 priv->et_coalesce.rx_max_coalesced_frames = 8;
2056 priv->et_coalesce.tx_coalesce_usecs = 500;
2057 priv->et_coalesce.tx_max_coalesced_frames = 8;
2059 ret = device_property_read_u32(&pdev->dev, "max-frame-size",
2062 ndev->max_mtu = ETH_DATA_LEN;
2064 /* runtime_pm coverage just for probe, open/close also cover it */
2065 pm_runtime_enable(&pdev->dev);
2066 pm_runtime_get_sync(&pdev->dev);
2068 hw_ver = netsec_read(priv, NETSEC_REG_F_TAIKI_VER);
2069 /* this driver only supports F_TAIKI style NETSEC */
2070 if (NETSEC_F_NETSEC_VER_MAJOR_NUM(hw_ver) !=
2071 NETSEC_F_NETSEC_VER_MAJOR_NUM(NETSEC_REG_NETSEC_VER_F_TAIKI)) {
2076 dev_info(&pdev->dev, "hardware revision %d.%d\n",
2077 hw_ver >> 16, hw_ver & 0xffff);
2079 netif_napi_add(ndev, &priv->napi, netsec_napi_poll, NAPI_POLL_WEIGHT);
2081 ndev->netdev_ops = &netsec_netdev_ops;
2082 ndev->ethtool_ops = &netsec_ethtool_ops;
2084 ndev->features |= NETIF_F_HIGHDMA | NETIF_F_RXCSUM | NETIF_F_GSO |
2085 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2086 ndev->hw_features = ndev->features;
2088 priv->rx_cksum_offload_flag = true;
2090 ret = netsec_register_mdio(priv, phy_addr);
2094 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)))
2095 dev_warn(&pdev->dev, "Failed to set DMA mask\n");
2097 ret = register_netdev(ndev);
2099 netif_err(priv, probe, ndev, "register_netdev() failed\n");
2103 pm_runtime_put_sync(&pdev->dev);
2107 netsec_unregister_mdio(priv);
2109 netif_napi_del(&priv->napi);
2111 pm_runtime_put_sync(&pdev->dev);
2112 pm_runtime_disable(&pdev->dev);
2115 dev_err(&pdev->dev, "init failed\n");
2120 static int netsec_remove(struct platform_device *pdev)
2122 struct netsec_priv *priv = platform_get_drvdata(pdev);
2124 unregister_netdev(priv->ndev);
2126 netsec_unregister_mdio(priv);
2128 netif_napi_del(&priv->napi);
2130 pm_runtime_disable(&pdev->dev);
2131 free_netdev(priv->ndev);
2137 static int netsec_runtime_suspend(struct device *dev)
2139 struct netsec_priv *priv = dev_get_drvdata(dev);
2141 netsec_write(priv, NETSEC_REG_CLK_EN, 0);
2143 clk_disable_unprepare(priv->clk);
2148 static int netsec_runtime_resume(struct device *dev)
2150 struct netsec_priv *priv = dev_get_drvdata(dev);
2152 clk_prepare_enable(priv->clk);
2154 netsec_write(priv, NETSEC_REG_CLK_EN, NETSEC_CLK_EN_REG_DOM_D |
2155 NETSEC_CLK_EN_REG_DOM_C |
2156 NETSEC_CLK_EN_REG_DOM_G);
2161 static const struct dev_pm_ops netsec_pm_ops = {
2162 SET_RUNTIME_PM_OPS(netsec_runtime_suspend, netsec_runtime_resume, NULL)
2165 static const struct of_device_id netsec_dt_ids[] = {
2166 { .compatible = "socionext,synquacer-netsec" },
2169 MODULE_DEVICE_TABLE(of, netsec_dt_ids);
2172 static const struct acpi_device_id netsec_acpi_ids[] = {
2176 MODULE_DEVICE_TABLE(acpi, netsec_acpi_ids);
2179 static struct platform_driver netsec_driver = {
2180 .probe = netsec_probe,
2181 .remove = netsec_remove,
2184 .pm = &netsec_pm_ops,
2185 .of_match_table = netsec_dt_ids,
2186 .acpi_match_table = ACPI_PTR(netsec_acpi_ids),
2189 module_platform_driver(netsec_driver);
2191 MODULE_AUTHOR("Jassi Brar <jaswinder.singh@linaro.org>");
2192 MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
2193 MODULE_DESCRIPTION("NETSEC Ethernet driver");
2194 MODULE_LICENSE("GPL");