1 // SPDX-License-Identifier: GPL-2.0
2 /* Renesas Ethernet AVB device driver
4 * Copyright (C) 2014-2019 Renesas Electronics Corporation
5 * Copyright (C) 2015 Renesas Solutions Corp.
6 * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com>
8 * Based on the SuperH Ethernet driver
11 #include <linux/cache.h>
12 #include <linux/clk.h>
13 #include <linux/delay.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/err.h>
16 #include <linux/etherdevice.h>
17 #include <linux/ethtool.h>
18 #include <linux/if_vlan.h>
19 #include <linux/kernel.h>
20 #include <linux/list.h>
21 #include <linux/module.h>
22 #include <linux/net_tstamp.h>
24 #include <linux/of_device.h>
25 #include <linux/of_irq.h>
26 #include <linux/of_mdio.h>
27 #include <linux/of_net.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/slab.h>
30 #include <linux/spinlock.h>
31 #include <linux/sys_soc.h>
33 #include <asm/div64.h>
37 #define RAVB_DEF_MSG_ENABLE \
43 static const char *ravb_rx_irqs[NUM_RX_QUEUE] = {
48 static const char *ravb_tx_irqs[NUM_TX_QUEUE] = {
53 void ravb_modify(struct net_device *ndev, enum ravb_reg reg, u32 clear,
56 ravb_write(ndev, (ravb_read(ndev, reg) & ~clear) | set, reg);
59 int ravb_wait(struct net_device *ndev, enum ravb_reg reg, u32 mask, u32 value)
63 for (i = 0; i < 10000; i++) {
64 if ((ravb_read(ndev, reg) & mask) == value)
71 static int ravb_config(struct net_device *ndev)
76 ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG);
77 /* Check if the operating mode is changed to the config mode */
78 error = ravb_wait(ndev, CSR, CSR_OPS, CSR_OPS_CONFIG);
80 netdev_err(ndev, "failed to switch device to config mode\n");
85 static void ravb_set_rate(struct net_device *ndev)
87 struct ravb_private *priv = netdev_priv(ndev);
89 switch (priv->speed) {
90 case 100: /* 100BASE */
91 ravb_write(ndev, GECMR_SPEED_100, GECMR);
93 case 1000: /* 1000BASE */
94 ravb_write(ndev, GECMR_SPEED_1000, GECMR);
99 static void ravb_set_buffer_align(struct sk_buff *skb)
101 u32 reserve = (unsigned long)skb->data & (RAVB_ALIGN - 1);
104 skb_reserve(skb, RAVB_ALIGN - reserve);
107 /* Get MAC address from the MAC address registers
109 * Ethernet AVB device doesn't have ROM for MAC address.
110 * This function gets the MAC address that was used by a bootloader.
112 static void ravb_read_mac_address(struct device_node *np,
113 struct net_device *ndev)
117 ret = of_get_mac_address(np, ndev->dev_addr);
119 u32 mahr = ravb_read(ndev, MAHR);
120 u32 malr = ravb_read(ndev, MALR);
122 ndev->dev_addr[0] = (mahr >> 24) & 0xFF;
123 ndev->dev_addr[1] = (mahr >> 16) & 0xFF;
124 ndev->dev_addr[2] = (mahr >> 8) & 0xFF;
125 ndev->dev_addr[3] = (mahr >> 0) & 0xFF;
126 ndev->dev_addr[4] = (malr >> 8) & 0xFF;
127 ndev->dev_addr[5] = (malr >> 0) & 0xFF;
131 static void ravb_mdio_ctrl(struct mdiobb_ctrl *ctrl, u32 mask, int set)
133 struct ravb_private *priv = container_of(ctrl, struct ravb_private,
136 ravb_modify(priv->ndev, PIR, mask, set ? mask : 0);
139 /* MDC pin control */
140 static void ravb_set_mdc(struct mdiobb_ctrl *ctrl, int level)
142 ravb_mdio_ctrl(ctrl, PIR_MDC, level);
145 /* Data I/O pin control */
146 static void ravb_set_mdio_dir(struct mdiobb_ctrl *ctrl, int output)
148 ravb_mdio_ctrl(ctrl, PIR_MMD, output);
152 static void ravb_set_mdio_data(struct mdiobb_ctrl *ctrl, int value)
154 ravb_mdio_ctrl(ctrl, PIR_MDO, value);
158 static int ravb_get_mdio_data(struct mdiobb_ctrl *ctrl)
160 struct ravb_private *priv = container_of(ctrl, struct ravb_private,
163 return (ravb_read(priv->ndev, PIR) & PIR_MDI) != 0;
166 /* MDIO bus control struct */
167 static const struct mdiobb_ops bb_ops = {
168 .owner = THIS_MODULE,
169 .set_mdc = ravb_set_mdc,
170 .set_mdio_dir = ravb_set_mdio_dir,
171 .set_mdio_data = ravb_set_mdio_data,
172 .get_mdio_data = ravb_get_mdio_data,
175 /* Free TX skb function for AVB-IP */
176 static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only)
178 struct ravb_private *priv = netdev_priv(ndev);
179 struct net_device_stats *stats = &priv->stats[q];
180 int num_tx_desc = priv->num_tx_desc;
181 struct ravb_tx_desc *desc;
186 for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
189 entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] *
191 desc = &priv->tx_ring[q][entry];
192 txed = desc->die_dt == DT_FEMPTY;
193 if (free_txed_only && !txed)
195 /* Descriptor type must be checked before all other reads */
197 size = le16_to_cpu(desc->ds_tagl) & TX_DS;
198 /* Free the original skb. */
199 if (priv->tx_skb[q][entry / num_tx_desc]) {
200 dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
201 size, DMA_TO_DEVICE);
202 /* Last packet descriptor? */
203 if (entry % num_tx_desc == num_tx_desc - 1) {
204 entry /= num_tx_desc;
205 dev_kfree_skb_any(priv->tx_skb[q][entry]);
206 priv->tx_skb[q][entry] = NULL;
213 stats->tx_bytes += size;
214 desc->die_dt = DT_EEMPTY;
219 /* Free skb's and DMA buffers for Ethernet AVB */
220 static void ravb_ring_free(struct net_device *ndev, int q)
222 struct ravb_private *priv = netdev_priv(ndev);
223 int num_tx_desc = priv->num_tx_desc;
227 if (priv->rx_ring[q]) {
228 for (i = 0; i < priv->num_rx_ring[q]; i++) {
229 struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i];
231 if (!dma_mapping_error(ndev->dev.parent,
232 le32_to_cpu(desc->dptr)))
233 dma_unmap_single(ndev->dev.parent,
234 le32_to_cpu(desc->dptr),
238 ring_size = sizeof(struct ravb_ex_rx_desc) *
239 (priv->num_rx_ring[q] + 1);
240 dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q],
241 priv->rx_desc_dma[q]);
242 priv->rx_ring[q] = NULL;
245 if (priv->tx_ring[q]) {
246 ravb_tx_free(ndev, q, false);
248 ring_size = sizeof(struct ravb_tx_desc) *
249 (priv->num_tx_ring[q] * num_tx_desc + 1);
250 dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q],
251 priv->tx_desc_dma[q]);
252 priv->tx_ring[q] = NULL;
255 /* Free RX skb ringbuffer */
256 if (priv->rx_skb[q]) {
257 for (i = 0; i < priv->num_rx_ring[q]; i++)
258 dev_kfree_skb(priv->rx_skb[q][i]);
260 kfree(priv->rx_skb[q]);
261 priv->rx_skb[q] = NULL;
263 /* Free aligned TX buffers */
264 kfree(priv->tx_align[q]);
265 priv->tx_align[q] = NULL;
267 /* Free TX skb ringbuffer.
268 * SKBs are freed by ravb_tx_free() call above.
270 kfree(priv->tx_skb[q]);
271 priv->tx_skb[q] = NULL;
274 /* Format skb and descriptor buffer for Ethernet AVB */
275 static void ravb_ring_format(struct net_device *ndev, int q)
277 struct ravb_private *priv = netdev_priv(ndev);
278 int num_tx_desc = priv->num_tx_desc;
279 struct ravb_ex_rx_desc *rx_desc;
280 struct ravb_tx_desc *tx_desc;
281 struct ravb_desc *desc;
282 int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
283 int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q] *
290 priv->dirty_rx[q] = 0;
291 priv->dirty_tx[q] = 0;
293 memset(priv->rx_ring[q], 0, rx_ring_size);
294 /* Build RX ring buffer */
295 for (i = 0; i < priv->num_rx_ring[q]; i++) {
297 rx_desc = &priv->rx_ring[q][i];
298 rx_desc->ds_cc = cpu_to_le16(RX_BUF_SZ);
299 dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
302 /* We just set the data size to 0 for a failed mapping which
303 * should prevent DMA from happening...
305 if (dma_mapping_error(ndev->dev.parent, dma_addr))
306 rx_desc->ds_cc = cpu_to_le16(0);
307 rx_desc->dptr = cpu_to_le32(dma_addr);
308 rx_desc->die_dt = DT_FEMPTY;
310 rx_desc = &priv->rx_ring[q][i];
311 rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
312 rx_desc->die_dt = DT_LINKFIX; /* type */
314 memset(priv->tx_ring[q], 0, tx_ring_size);
315 /* Build TX ring buffer */
316 for (i = 0, tx_desc = priv->tx_ring[q]; i < priv->num_tx_ring[q];
318 tx_desc->die_dt = DT_EEMPTY;
319 if (num_tx_desc > 1) {
321 tx_desc->die_dt = DT_EEMPTY;
324 tx_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]);
325 tx_desc->die_dt = DT_LINKFIX; /* type */
327 /* RX descriptor base address for best effort */
328 desc = &priv->desc_bat[RX_QUEUE_OFFSET + q];
329 desc->die_dt = DT_LINKFIX; /* type */
330 desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
332 /* TX descriptor base address for best effort */
333 desc = &priv->desc_bat[q];
334 desc->die_dt = DT_LINKFIX; /* type */
335 desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]);
338 /* Init skb and descriptor buffer for Ethernet AVB */
339 static int ravb_ring_init(struct net_device *ndev, int q)
341 struct ravb_private *priv = netdev_priv(ndev);
342 int num_tx_desc = priv->num_tx_desc;
347 /* Allocate RX and TX skb rings */
348 priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q],
349 sizeof(*priv->rx_skb[q]), GFP_KERNEL);
350 priv->tx_skb[q] = kcalloc(priv->num_tx_ring[q],
351 sizeof(*priv->tx_skb[q]), GFP_KERNEL);
352 if (!priv->rx_skb[q] || !priv->tx_skb[q])
355 for (i = 0; i < priv->num_rx_ring[q]; i++) {
356 skb = netdev_alloc_skb(ndev, RX_BUF_SZ + RAVB_ALIGN - 1);
359 ravb_set_buffer_align(skb);
360 priv->rx_skb[q][i] = skb;
363 if (num_tx_desc > 1) {
364 /* Allocate rings for the aligned buffers */
365 priv->tx_align[q] = kmalloc(DPTR_ALIGN * priv->num_tx_ring[q] +
366 DPTR_ALIGN - 1, GFP_KERNEL);
367 if (!priv->tx_align[q])
371 /* Allocate all RX descriptors. */
372 ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1);
373 priv->rx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size,
374 &priv->rx_desc_dma[q],
376 if (!priv->rx_ring[q])
379 priv->dirty_rx[q] = 0;
381 /* Allocate all TX descriptors. */
382 ring_size = sizeof(struct ravb_tx_desc) *
383 (priv->num_tx_ring[q] * num_tx_desc + 1);
384 priv->tx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size,
385 &priv->tx_desc_dma[q],
387 if (!priv->tx_ring[q])
393 ravb_ring_free(ndev, q);
398 /* E-MAC init function */
399 static void ravb_emac_init(struct net_device *ndev)
401 /* Receive frame limit set register */
402 ravb_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, RFLR);
404 /* EMAC Mode: PAUSE prohibition; Duplex; RX Checksum; TX; RX */
405 ravb_write(ndev, ECMR_ZPF | ECMR_DM |
406 (ndev->features & NETIF_F_RXCSUM ? ECMR_RCSC : 0) |
407 ECMR_TE | ECMR_RE, ECMR);
411 /* Set MAC address */
413 (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
414 (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
416 (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
418 /* E-MAC status register clear */
419 ravb_write(ndev, ECSR_ICD | ECSR_MPD, ECSR);
421 /* E-MAC interrupt enable register */
422 ravb_write(ndev, ECSIPR_ICDIP | ECSIPR_MPDIP | ECSIPR_LCHNGIP, ECSIPR);
425 /* Device init function for Ethernet AVB */
426 static int ravb_dmac_init(struct net_device *ndev)
428 struct ravb_private *priv = netdev_priv(ndev);
431 /* Set CONFIG mode */
432 error = ravb_config(ndev);
436 error = ravb_ring_init(ndev, RAVB_BE);
439 error = ravb_ring_init(ndev, RAVB_NC);
441 ravb_ring_free(ndev, RAVB_BE);
445 /* Descriptor format */
446 ravb_ring_format(ndev, RAVB_BE);
447 ravb_ring_format(ndev, RAVB_NC);
451 RCR_EFFS | RCR_ENCF | RCR_ETS0 | RCR_ESF | 0x18000000, RCR);
454 ravb_write(ndev, TGC_TQP_AVBMODE1 | 0x00112200, TGC);
456 /* Timestamp enable */
457 ravb_write(ndev, TCCR_TFEN, TCCR);
459 /* Interrupt init: */
460 if (priv->chip_id == RCAR_GEN3) {
462 ravb_write(ndev, 0, DIL);
463 /* Set queue specific interrupt */
464 ravb_write(ndev, CIE_CRIE | CIE_CTIE | CIE_CL0M, CIE);
467 ravb_write(ndev, RIC0_FRE0 | RIC0_FRE1, RIC0);
468 /* Disable FIFO full warning */
469 ravb_write(ndev, 0, RIC1);
470 /* Receive FIFO full error, descriptor empty */
471 ravb_write(ndev, RIC2_QFE0 | RIC2_QFE1 | RIC2_RFFE, RIC2);
472 /* Frame transmitted, timestamp FIFO updated */
473 ravb_write(ndev, TIC_FTE0 | TIC_FTE1 | TIC_TFUE, TIC);
475 /* Setting the control will start the AVB-DMAC process. */
476 ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_OPERATION);
481 static void ravb_get_tx_tstamp(struct net_device *ndev)
483 struct ravb_private *priv = netdev_priv(ndev);
484 struct ravb_tstamp_skb *ts_skb, *ts_skb2;
485 struct skb_shared_hwtstamps shhwtstamps;
487 struct timespec64 ts;
492 count = (ravb_read(ndev, TSR) & TSR_TFFL) >> 8;
494 tfa2 = ravb_read(ndev, TFA2);
495 tfa_tag = (tfa2 & TFA2_TST) >> 16;
496 ts.tv_nsec = (u64)ravb_read(ndev, TFA0);
497 ts.tv_sec = ((u64)(tfa2 & TFA2_TSV) << 32) |
498 ravb_read(ndev, TFA1);
499 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
500 shhwtstamps.hwtstamp = timespec64_to_ktime(ts);
501 list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list,
505 list_del(&ts_skb->list);
507 if (tag == tfa_tag) {
508 skb_tstamp_tx(skb, &shhwtstamps);
509 dev_consume_skb_any(skb);
512 dev_kfree_skb_any(skb);
515 ravb_modify(ndev, TCCR, TCCR_TFR, TCCR_TFR);
519 static void ravb_rx_csum(struct sk_buff *skb)
523 /* The hardware checksum is contained in sizeof(__sum16) (2) bytes
524 * appended to packet data
526 if (unlikely(skb->len < sizeof(__sum16)))
528 hw_csum = skb_tail_pointer(skb) - sizeof(__sum16);
529 skb->csum = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum));
530 skb->ip_summed = CHECKSUM_COMPLETE;
531 skb_trim(skb, skb->len - sizeof(__sum16));
534 /* Packet receive function for Ethernet AVB */
535 static bool ravb_rx(struct net_device *ndev, int *quota, int q)
537 struct ravb_private *priv = netdev_priv(ndev);
538 int entry = priv->cur_rx[q] % priv->num_rx_ring[q];
539 int boguscnt = (priv->dirty_rx[q] + priv->num_rx_ring[q]) -
541 struct net_device_stats *stats = &priv->stats[q];
542 struct ravb_ex_rx_desc *desc;
545 struct timespec64 ts;
550 boguscnt = min(boguscnt, *quota);
552 desc = &priv->rx_ring[q][entry];
553 while (desc->die_dt != DT_FEMPTY) {
554 /* Descriptor type must be checked before all other reads */
556 desc_status = desc->msc;
557 pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS;
562 /* We use 0-byte descriptors to mark the DMA mapping errors */
566 if (desc_status & MSC_MC)
569 if (desc_status & (MSC_CRC | MSC_RFE | MSC_RTSF | MSC_RTLF |
572 if (desc_status & MSC_CRC)
573 stats->rx_crc_errors++;
574 if (desc_status & MSC_RFE)
575 stats->rx_frame_errors++;
576 if (desc_status & (MSC_RTLF | MSC_RTSF))
577 stats->rx_length_errors++;
578 if (desc_status & MSC_CEEF)
579 stats->rx_missed_errors++;
581 u32 get_ts = priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE;
583 skb = priv->rx_skb[q][entry];
584 priv->rx_skb[q][entry] = NULL;
585 dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
588 get_ts &= (q == RAVB_NC) ?
589 RAVB_RXTSTAMP_TYPE_V2_L2_EVENT :
590 ~RAVB_RXTSTAMP_TYPE_V2_L2_EVENT;
592 struct skb_shared_hwtstamps *shhwtstamps;
594 shhwtstamps = skb_hwtstamps(skb);
595 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
596 ts.tv_sec = ((u64) le16_to_cpu(desc->ts_sh) <<
597 32) | le32_to_cpu(desc->ts_sl);
598 ts.tv_nsec = le32_to_cpu(desc->ts_n);
599 shhwtstamps->hwtstamp = timespec64_to_ktime(ts);
602 skb_put(skb, pkt_len);
603 skb->protocol = eth_type_trans(skb, ndev);
604 if (ndev->features & NETIF_F_RXCSUM)
606 napi_gro_receive(&priv->napi[q], skb);
608 stats->rx_bytes += pkt_len;
611 entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
612 desc = &priv->rx_ring[q][entry];
615 /* Refill the RX ring buffers. */
616 for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
617 entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
618 desc = &priv->rx_ring[q][entry];
619 desc->ds_cc = cpu_to_le16(RX_BUF_SZ);
621 if (!priv->rx_skb[q][entry]) {
622 skb = netdev_alloc_skb(ndev,
626 break; /* Better luck next round. */
627 ravb_set_buffer_align(skb);
628 dma_addr = dma_map_single(ndev->dev.parent, skb->data,
629 le16_to_cpu(desc->ds_cc),
631 skb_checksum_none_assert(skb);
632 /* We just set the data size to 0 for a failed mapping
633 * which should prevent DMA from happening...
635 if (dma_mapping_error(ndev->dev.parent, dma_addr))
636 desc->ds_cc = cpu_to_le16(0);
637 desc->dptr = cpu_to_le32(dma_addr);
638 priv->rx_skb[q][entry] = skb;
640 /* Descriptor type must be set after all the above writes */
642 desc->die_dt = DT_FEMPTY;
645 *quota -= limit - (++boguscnt);
647 return boguscnt <= 0;
650 static void ravb_rcv_snd_disable(struct net_device *ndev)
652 /* Disable TX and RX */
653 ravb_modify(ndev, ECMR, ECMR_RE | ECMR_TE, 0);
656 static void ravb_rcv_snd_enable(struct net_device *ndev)
658 /* Enable TX and RX */
659 ravb_modify(ndev, ECMR, ECMR_RE | ECMR_TE, ECMR_RE | ECMR_TE);
662 /* function for waiting dma process finished */
663 static int ravb_stop_dma(struct net_device *ndev)
667 /* Wait for stopping the hardware TX process */
668 error = ravb_wait(ndev, TCCR,
669 TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3, 0);
673 error = ravb_wait(ndev, CSR, CSR_TPO0 | CSR_TPO1 | CSR_TPO2 | CSR_TPO3,
678 /* Stop the E-MAC's RX/TX processes. */
679 ravb_rcv_snd_disable(ndev);
681 /* Wait for stopping the RX DMA process */
682 error = ravb_wait(ndev, CSR, CSR_RPO, 0);
686 /* Stop AVB-DMAC process */
687 return ravb_config(ndev);
690 /* E-MAC interrupt handler */
691 static void ravb_emac_interrupt_unlocked(struct net_device *ndev)
693 struct ravb_private *priv = netdev_priv(ndev);
696 ecsr = ravb_read(ndev, ECSR);
697 ravb_write(ndev, ecsr, ECSR); /* clear interrupt */
700 pm_wakeup_event(&priv->pdev->dev, 0);
702 ndev->stats.tx_carrier_errors++;
703 if (ecsr & ECSR_LCHNG) {
705 if (priv->no_avb_link)
707 psr = ravb_read(ndev, PSR);
708 if (priv->avb_link_active_low)
710 if (!(psr & PSR_LMON)) {
711 /* DIsable RX and TX */
712 ravb_rcv_snd_disable(ndev);
714 /* Enable RX and TX */
715 ravb_rcv_snd_enable(ndev);
720 static irqreturn_t ravb_emac_interrupt(int irq, void *dev_id)
722 struct net_device *ndev = dev_id;
723 struct ravb_private *priv = netdev_priv(ndev);
725 spin_lock(&priv->lock);
726 ravb_emac_interrupt_unlocked(ndev);
727 spin_unlock(&priv->lock);
731 /* Error interrupt handler */
732 static void ravb_error_interrupt(struct net_device *ndev)
734 struct ravb_private *priv = netdev_priv(ndev);
737 eis = ravb_read(ndev, EIS);
738 ravb_write(ndev, ~(EIS_QFS | EIS_RESERVED), EIS);
740 ris2 = ravb_read(ndev, RIS2);
741 ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF | RIS2_RESERVED),
744 /* Receive Descriptor Empty int */
745 if (ris2 & RIS2_QFF0)
746 priv->stats[RAVB_BE].rx_over_errors++;
748 /* Receive Descriptor Empty int */
749 if (ris2 & RIS2_QFF1)
750 priv->stats[RAVB_NC].rx_over_errors++;
752 /* Receive FIFO Overflow int */
753 if (ris2 & RIS2_RFFF)
754 priv->rx_fifo_errors++;
758 static bool ravb_queue_interrupt(struct net_device *ndev, int q)
760 struct ravb_private *priv = netdev_priv(ndev);
761 u32 ris0 = ravb_read(ndev, RIS0);
762 u32 ric0 = ravb_read(ndev, RIC0);
763 u32 tis = ravb_read(ndev, TIS);
764 u32 tic = ravb_read(ndev, TIC);
766 if (((ris0 & ric0) & BIT(q)) || ((tis & tic) & BIT(q))) {
767 if (napi_schedule_prep(&priv->napi[q])) {
768 /* Mask RX and TX interrupts */
769 if (priv->chip_id == RCAR_GEN2) {
770 ravb_write(ndev, ric0 & ~BIT(q), RIC0);
771 ravb_write(ndev, tic & ~BIT(q), TIC);
773 ravb_write(ndev, BIT(q), RID0);
774 ravb_write(ndev, BIT(q), TID);
776 __napi_schedule(&priv->napi[q]);
779 "ignoring interrupt, rx status 0x%08x, rx mask 0x%08x,\n",
782 " tx status 0x%08x, tx mask 0x%08x.\n",
790 static bool ravb_timestamp_interrupt(struct net_device *ndev)
792 u32 tis = ravb_read(ndev, TIS);
794 if (tis & TIS_TFUF) {
795 ravb_write(ndev, ~(TIS_TFUF | TIS_RESERVED), TIS);
796 ravb_get_tx_tstamp(ndev);
802 static irqreturn_t ravb_interrupt(int irq, void *dev_id)
804 struct net_device *ndev = dev_id;
805 struct ravb_private *priv = netdev_priv(ndev);
806 irqreturn_t result = IRQ_NONE;
809 spin_lock(&priv->lock);
810 /* Get interrupt status */
811 iss = ravb_read(ndev, ISS);
813 /* Received and transmitted interrupts */
814 if (iss & (ISS_FRS | ISS_FTS | ISS_TFUS)) {
817 /* Timestamp updated */
818 if (ravb_timestamp_interrupt(ndev))
819 result = IRQ_HANDLED;
821 /* Network control and best effort queue RX/TX */
822 for (q = RAVB_NC; q >= RAVB_BE; q--) {
823 if (ravb_queue_interrupt(ndev, q))
824 result = IRQ_HANDLED;
828 /* E-MAC status summary */
830 ravb_emac_interrupt_unlocked(ndev);
831 result = IRQ_HANDLED;
834 /* Error status summary */
836 ravb_error_interrupt(ndev);
837 result = IRQ_HANDLED;
840 /* gPTP interrupt status summary */
841 if (iss & ISS_CGIS) {
842 ravb_ptp_interrupt(ndev);
843 result = IRQ_HANDLED;
846 spin_unlock(&priv->lock);
850 /* Timestamp/Error/gPTP interrupt handler */
851 static irqreturn_t ravb_multi_interrupt(int irq, void *dev_id)
853 struct net_device *ndev = dev_id;
854 struct ravb_private *priv = netdev_priv(ndev);
855 irqreturn_t result = IRQ_NONE;
858 spin_lock(&priv->lock);
859 /* Get interrupt status */
860 iss = ravb_read(ndev, ISS);
862 /* Timestamp updated */
863 if ((iss & ISS_TFUS) && ravb_timestamp_interrupt(ndev))
864 result = IRQ_HANDLED;
866 /* Error status summary */
868 ravb_error_interrupt(ndev);
869 result = IRQ_HANDLED;
872 /* gPTP interrupt status summary */
873 if (iss & ISS_CGIS) {
874 ravb_ptp_interrupt(ndev);
875 result = IRQ_HANDLED;
878 spin_unlock(&priv->lock);
882 static irqreturn_t ravb_dma_interrupt(int irq, void *dev_id, int q)
884 struct net_device *ndev = dev_id;
885 struct ravb_private *priv = netdev_priv(ndev);
886 irqreturn_t result = IRQ_NONE;
888 spin_lock(&priv->lock);
890 /* Network control/Best effort queue RX/TX */
891 if (ravb_queue_interrupt(ndev, q))
892 result = IRQ_HANDLED;
894 spin_unlock(&priv->lock);
898 static irqreturn_t ravb_be_interrupt(int irq, void *dev_id)
900 return ravb_dma_interrupt(irq, dev_id, RAVB_BE);
903 static irqreturn_t ravb_nc_interrupt(int irq, void *dev_id)
905 return ravb_dma_interrupt(irq, dev_id, RAVB_NC);
908 static int ravb_poll(struct napi_struct *napi, int budget)
910 struct net_device *ndev = napi->dev;
911 struct ravb_private *priv = netdev_priv(ndev);
913 int q = napi - priv->napi;
917 /* Processing RX Descriptor Ring */
918 /* Clear RX interrupt */
919 ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0);
920 if (ravb_rx(ndev, "a, q))
923 /* Processing RX Descriptor Ring */
924 spin_lock_irqsave(&priv->lock, flags);
925 /* Clear TX interrupt */
926 ravb_write(ndev, ~(mask | TIS_RESERVED), TIS);
927 ravb_tx_free(ndev, q, true);
928 netif_wake_subqueue(ndev, q);
929 spin_unlock_irqrestore(&priv->lock, flags);
933 /* Re-enable RX/TX interrupts */
934 spin_lock_irqsave(&priv->lock, flags);
935 if (priv->chip_id == RCAR_GEN2) {
936 ravb_modify(ndev, RIC0, mask, mask);
937 ravb_modify(ndev, TIC, mask, mask);
939 ravb_write(ndev, mask, RIE0);
940 ravb_write(ndev, mask, TIE);
942 spin_unlock_irqrestore(&priv->lock, flags);
944 /* Receive error message handling */
945 priv->rx_over_errors = priv->stats[RAVB_BE].rx_over_errors;
946 priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors;
947 if (priv->rx_over_errors != ndev->stats.rx_over_errors)
948 ndev->stats.rx_over_errors = priv->rx_over_errors;
949 if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors)
950 ndev->stats.rx_fifo_errors = priv->rx_fifo_errors;
952 return budget - quota;
955 /* PHY state control function */
956 static void ravb_adjust_link(struct net_device *ndev)
958 struct ravb_private *priv = netdev_priv(ndev);
959 struct phy_device *phydev = ndev->phydev;
960 bool new_state = false;
963 spin_lock_irqsave(&priv->lock, flags);
965 /* Disable TX and RX right over here, if E-MAC change is ignored */
966 if (priv->no_avb_link)
967 ravb_rcv_snd_disable(ndev);
970 if (phydev->speed != priv->speed) {
972 priv->speed = phydev->speed;
976 ravb_modify(ndev, ECMR, ECMR_TXF, 0);
978 priv->link = phydev->link;
980 } else if (priv->link) {
986 /* Enable TX and RX right over here, if E-MAC change is ignored */
987 if (priv->no_avb_link && phydev->link)
988 ravb_rcv_snd_enable(ndev);
990 spin_unlock_irqrestore(&priv->lock, flags);
992 if (new_state && netif_msg_link(priv))
993 phy_print_status(phydev);
996 static const struct soc_device_attribute r8a7795es10[] = {
997 { .soc_id = "r8a7795", .revision = "ES1.0", },
1001 /* PHY init function */
1002 static int ravb_phy_init(struct net_device *ndev)
1004 struct device_node *np = ndev->dev.parent->of_node;
1005 struct ravb_private *priv = netdev_priv(ndev);
1006 struct phy_device *phydev;
1007 struct device_node *pn;
1008 phy_interface_t iface;
1014 /* Try connecting to PHY */
1015 pn = of_parse_phandle(np, "phy-handle", 0);
1017 /* In the case of a fixed PHY, the DT node associated
1018 * to the PHY is the Ethernet MAC DT node.
1020 if (of_phy_is_fixed_link(np)) {
1021 err = of_phy_register_fixed_link(np);
1025 pn = of_node_get(np);
1028 iface = priv->rgmii_override ? PHY_INTERFACE_MODE_RGMII
1029 : priv->phy_interface;
1030 phydev = of_phy_connect(ndev, pn, ravb_adjust_link, 0, iface);
1033 netdev_err(ndev, "failed to connect PHY\n");
1035 goto err_deregister_fixed_link;
1038 /* This driver only support 10/100Mbit speeds on R-Car H3 ES1.0
1041 if (soc_device_match(r8a7795es10)) {
1042 err = phy_set_max_speed(phydev, SPEED_100);
1044 netdev_err(ndev, "failed to limit PHY to 100Mbit/s\n");
1045 goto err_phy_disconnect;
1048 netdev_info(ndev, "limited PHY to 100Mbit/s\n");
1051 /* 10BASE, Pause and Asym Pause is not supported */
1052 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
1053 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
1054 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Pause_BIT);
1055 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT);
1057 /* Half Duplex is not supported */
1058 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
1059 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
1061 phy_attached_info(phydev);
1066 phy_disconnect(phydev);
1067 err_deregister_fixed_link:
1068 if (of_phy_is_fixed_link(np))
1069 of_phy_deregister_fixed_link(np);
1074 /* PHY control start function */
1075 static int ravb_phy_start(struct net_device *ndev)
1079 error = ravb_phy_init(ndev);
1083 phy_start(ndev->phydev);
1088 static u32 ravb_get_msglevel(struct net_device *ndev)
1090 struct ravb_private *priv = netdev_priv(ndev);
1092 return priv->msg_enable;
1095 static void ravb_set_msglevel(struct net_device *ndev, u32 value)
1097 struct ravb_private *priv = netdev_priv(ndev);
1099 priv->msg_enable = value;
1102 static const char ravb_gstrings_stats[][ETH_GSTRING_LEN] = {
1103 "rx_queue_0_current",
1104 "tx_queue_0_current",
1107 "rx_queue_0_packets",
1108 "tx_queue_0_packets",
1111 "rx_queue_0_mcast_packets",
1112 "rx_queue_0_errors",
1113 "rx_queue_0_crc_errors",
1114 "rx_queue_0_frame_errors",
1115 "rx_queue_0_length_errors",
1116 "rx_queue_0_missed_errors",
1117 "rx_queue_0_over_errors",
1119 "rx_queue_1_current",
1120 "tx_queue_1_current",
1123 "rx_queue_1_packets",
1124 "tx_queue_1_packets",
1127 "rx_queue_1_mcast_packets",
1128 "rx_queue_1_errors",
1129 "rx_queue_1_crc_errors",
1130 "rx_queue_1_frame_errors",
1131 "rx_queue_1_length_errors",
1132 "rx_queue_1_missed_errors",
1133 "rx_queue_1_over_errors",
1136 #define RAVB_STATS_LEN ARRAY_SIZE(ravb_gstrings_stats)
1138 static int ravb_get_sset_count(struct net_device *netdev, int sset)
1142 return RAVB_STATS_LEN;
1148 static void ravb_get_ethtool_stats(struct net_device *ndev,
1149 struct ethtool_stats *estats, u64 *data)
1151 struct ravb_private *priv = netdev_priv(ndev);
1155 /* Device-specific stats */
1156 for (q = RAVB_BE; q < NUM_RX_QUEUE; q++) {
1157 struct net_device_stats *stats = &priv->stats[q];
1159 data[i++] = priv->cur_rx[q];
1160 data[i++] = priv->cur_tx[q];
1161 data[i++] = priv->dirty_rx[q];
1162 data[i++] = priv->dirty_tx[q];
1163 data[i++] = stats->rx_packets;
1164 data[i++] = stats->tx_packets;
1165 data[i++] = stats->rx_bytes;
1166 data[i++] = stats->tx_bytes;
1167 data[i++] = stats->multicast;
1168 data[i++] = stats->rx_errors;
1169 data[i++] = stats->rx_crc_errors;
1170 data[i++] = stats->rx_frame_errors;
1171 data[i++] = stats->rx_length_errors;
1172 data[i++] = stats->rx_missed_errors;
1173 data[i++] = stats->rx_over_errors;
1177 static void ravb_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
1179 switch (stringset) {
1181 memcpy(data, ravb_gstrings_stats, sizeof(ravb_gstrings_stats));
1186 static void ravb_get_ringparam(struct net_device *ndev,
1187 struct ethtool_ringparam *ring)
1189 struct ravb_private *priv = netdev_priv(ndev);
1191 ring->rx_max_pending = BE_RX_RING_MAX;
1192 ring->tx_max_pending = BE_TX_RING_MAX;
1193 ring->rx_pending = priv->num_rx_ring[RAVB_BE];
1194 ring->tx_pending = priv->num_tx_ring[RAVB_BE];
1197 static int ravb_set_ringparam(struct net_device *ndev,
1198 struct ethtool_ringparam *ring)
1200 struct ravb_private *priv = netdev_priv(ndev);
1203 if (ring->tx_pending > BE_TX_RING_MAX ||
1204 ring->rx_pending > BE_RX_RING_MAX ||
1205 ring->tx_pending < BE_TX_RING_MIN ||
1206 ring->rx_pending < BE_RX_RING_MIN)
1208 if (ring->rx_mini_pending || ring->rx_jumbo_pending)
1211 if (netif_running(ndev)) {
1212 netif_device_detach(ndev);
1213 /* Stop PTP Clock driver */
1214 if (priv->chip_id == RCAR_GEN2)
1215 ravb_ptp_stop(ndev);
1216 /* Wait for DMA stopping */
1217 error = ravb_stop_dma(ndev);
1220 "cannot set ringparam! Any AVB processes are still running?\n");
1223 synchronize_irq(ndev->irq);
1225 /* Free all the skb's in the RX queue and the DMA buffers. */
1226 ravb_ring_free(ndev, RAVB_BE);
1227 ravb_ring_free(ndev, RAVB_NC);
1230 /* Set new parameters */
1231 priv->num_rx_ring[RAVB_BE] = ring->rx_pending;
1232 priv->num_tx_ring[RAVB_BE] = ring->tx_pending;
1234 if (netif_running(ndev)) {
1235 error = ravb_dmac_init(ndev);
1238 "%s: ravb_dmac_init() failed, error %d\n",
1243 ravb_emac_init(ndev);
1245 /* Initialise PTP Clock driver */
1246 if (priv->chip_id == RCAR_GEN2)
1247 ravb_ptp_init(ndev, priv->pdev);
1249 netif_device_attach(ndev);
1255 static int ravb_get_ts_info(struct net_device *ndev,
1256 struct ethtool_ts_info *info)
1258 struct ravb_private *priv = netdev_priv(ndev);
1260 info->so_timestamping =
1261 SOF_TIMESTAMPING_TX_SOFTWARE |
1262 SOF_TIMESTAMPING_RX_SOFTWARE |
1263 SOF_TIMESTAMPING_SOFTWARE |
1264 SOF_TIMESTAMPING_TX_HARDWARE |
1265 SOF_TIMESTAMPING_RX_HARDWARE |
1266 SOF_TIMESTAMPING_RAW_HARDWARE;
1267 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
1269 (1 << HWTSTAMP_FILTER_NONE) |
1270 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
1271 (1 << HWTSTAMP_FILTER_ALL);
1272 info->phc_index = ptp_clock_index(priv->ptp.clock);
1277 static void ravb_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
1279 struct ravb_private *priv = netdev_priv(ndev);
1281 wol->supported = WAKE_MAGIC;
1282 wol->wolopts = priv->wol_enabled ? WAKE_MAGIC : 0;
1285 static int ravb_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
1287 struct ravb_private *priv = netdev_priv(ndev);
1289 if (wol->wolopts & ~WAKE_MAGIC)
1292 priv->wol_enabled = !!(wol->wolopts & WAKE_MAGIC);
1294 device_set_wakeup_enable(&priv->pdev->dev, priv->wol_enabled);
1299 static const struct ethtool_ops ravb_ethtool_ops = {
1300 .nway_reset = phy_ethtool_nway_reset,
1301 .get_msglevel = ravb_get_msglevel,
1302 .set_msglevel = ravb_set_msglevel,
1303 .get_link = ethtool_op_get_link,
1304 .get_strings = ravb_get_strings,
1305 .get_ethtool_stats = ravb_get_ethtool_stats,
1306 .get_sset_count = ravb_get_sset_count,
1307 .get_ringparam = ravb_get_ringparam,
1308 .set_ringparam = ravb_set_ringparam,
1309 .get_ts_info = ravb_get_ts_info,
1310 .get_link_ksettings = phy_ethtool_get_link_ksettings,
1311 .set_link_ksettings = phy_ethtool_set_link_ksettings,
1312 .get_wol = ravb_get_wol,
1313 .set_wol = ravb_set_wol,
1316 static inline int ravb_hook_irq(unsigned int irq, irq_handler_t handler,
1317 struct net_device *ndev, struct device *dev,
1323 name = devm_kasprintf(dev, GFP_KERNEL, "%s:%s", ndev->name, ch);
1326 error = request_irq(irq, handler, 0, name, ndev);
1328 netdev_err(ndev, "cannot request IRQ %s\n", name);
1333 /* Network device open function for Ethernet AVB */
1334 static int ravb_open(struct net_device *ndev)
1336 struct ravb_private *priv = netdev_priv(ndev);
1337 struct platform_device *pdev = priv->pdev;
1338 struct device *dev = &pdev->dev;
1341 napi_enable(&priv->napi[RAVB_BE]);
1342 napi_enable(&priv->napi[RAVB_NC]);
1344 if (priv->chip_id == RCAR_GEN2) {
1345 error = request_irq(ndev->irq, ravb_interrupt, IRQF_SHARED,
1348 netdev_err(ndev, "cannot request IRQ\n");
1352 error = ravb_hook_irq(ndev->irq, ravb_multi_interrupt, ndev,
1356 error = ravb_hook_irq(priv->emac_irq, ravb_emac_interrupt, ndev,
1360 error = ravb_hook_irq(priv->rx_irqs[RAVB_BE], ravb_be_interrupt,
1361 ndev, dev, "ch0:rx_be");
1363 goto out_free_irq_emac;
1364 error = ravb_hook_irq(priv->tx_irqs[RAVB_BE], ravb_be_interrupt,
1365 ndev, dev, "ch18:tx_be");
1367 goto out_free_irq_be_rx;
1368 error = ravb_hook_irq(priv->rx_irqs[RAVB_NC], ravb_nc_interrupt,
1369 ndev, dev, "ch1:rx_nc");
1371 goto out_free_irq_be_tx;
1372 error = ravb_hook_irq(priv->tx_irqs[RAVB_NC], ravb_nc_interrupt,
1373 ndev, dev, "ch19:tx_nc");
1375 goto out_free_irq_nc_rx;
1379 error = ravb_dmac_init(ndev);
1381 goto out_free_irq_nc_tx;
1382 ravb_emac_init(ndev);
1384 /* Initialise PTP Clock driver */
1385 if (priv->chip_id == RCAR_GEN2)
1386 ravb_ptp_init(ndev, priv->pdev);
1388 netif_tx_start_all_queues(ndev);
1390 /* PHY control start */
1391 error = ravb_phy_start(ndev);
1398 /* Stop PTP Clock driver */
1399 if (priv->chip_id == RCAR_GEN2)
1400 ravb_ptp_stop(ndev);
1402 if (priv->chip_id == RCAR_GEN2)
1404 free_irq(priv->tx_irqs[RAVB_NC], ndev);
1406 free_irq(priv->rx_irqs[RAVB_NC], ndev);
1408 free_irq(priv->tx_irqs[RAVB_BE], ndev);
1410 free_irq(priv->rx_irqs[RAVB_BE], ndev);
1412 free_irq(priv->emac_irq, ndev);
1414 free_irq(ndev->irq, ndev);
1416 napi_disable(&priv->napi[RAVB_NC]);
1417 napi_disable(&priv->napi[RAVB_BE]);
1421 /* Timeout function for Ethernet AVB */
1422 static void ravb_tx_timeout(struct net_device *ndev, unsigned int txqueue)
1424 struct ravb_private *priv = netdev_priv(ndev);
1426 netif_err(priv, tx_err, ndev,
1427 "transmit timed out, status %08x, resetting...\n",
1428 ravb_read(ndev, ISS));
1430 /* tx_errors count up */
1431 ndev->stats.tx_errors++;
1433 schedule_work(&priv->work);
1436 static void ravb_tx_timeout_work(struct work_struct *work)
1438 struct ravb_private *priv = container_of(work, struct ravb_private,
1440 struct net_device *ndev = priv->ndev;
1443 netif_tx_stop_all_queues(ndev);
1445 /* Stop PTP Clock driver */
1446 if (priv->chip_id == RCAR_GEN2)
1447 ravb_ptp_stop(ndev);
1449 /* Wait for DMA stopping */
1450 if (ravb_stop_dma(ndev)) {
1451 /* If ravb_stop_dma() fails, the hardware is still operating
1452 * for TX and/or RX. So, this should not call the following
1453 * functions because ravb_dmac_init() is possible to fail too.
1454 * Also, this should not retry ravb_stop_dma() again and again
1455 * here because it's possible to wait forever. So, this just
1456 * re-enables the TX and RX and skip the following
1457 * re-initialization procedure.
1459 ravb_rcv_snd_enable(ndev);
1463 ravb_ring_free(ndev, RAVB_BE);
1464 ravb_ring_free(ndev, RAVB_NC);
1467 error = ravb_dmac_init(ndev);
1469 /* If ravb_dmac_init() fails, descriptors are freed. So, this
1470 * should return here to avoid re-enabling the TX and RX in
1473 netdev_err(ndev, "%s: ravb_dmac_init() failed, error %d\n",
1477 ravb_emac_init(ndev);
1480 /* Initialise PTP Clock driver */
1481 if (priv->chip_id == RCAR_GEN2)
1482 ravb_ptp_init(ndev, priv->pdev);
1484 netif_tx_start_all_queues(ndev);
1487 /* Packet transmit function for Ethernet AVB */
1488 static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1490 struct ravb_private *priv = netdev_priv(ndev);
1491 int num_tx_desc = priv->num_tx_desc;
1492 u16 q = skb_get_queue_mapping(skb);
1493 struct ravb_tstamp_skb *ts_skb;
1494 struct ravb_tx_desc *desc;
1495 unsigned long flags;
1501 spin_lock_irqsave(&priv->lock, flags);
1502 if (priv->cur_tx[q] - priv->dirty_tx[q] > (priv->num_tx_ring[q] - 1) *
1504 netif_err(priv, tx_queued, ndev,
1505 "still transmitting with the full ring!\n");
1506 netif_stop_subqueue(ndev, q);
1507 spin_unlock_irqrestore(&priv->lock, flags);
1508 return NETDEV_TX_BUSY;
1511 if (skb_put_padto(skb, ETH_ZLEN))
1514 entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * num_tx_desc);
1515 priv->tx_skb[q][entry / num_tx_desc] = skb;
1517 if (num_tx_desc > 1) {
1518 buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) +
1519 entry / num_tx_desc * DPTR_ALIGN;
1520 len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data;
1522 /* Zero length DMA descriptors are problematic as they seem
1523 * to terminate DMA transfers. Avoid them by simply using a
1524 * length of DPTR_ALIGN (4) when skb data is aligned to
1527 * As skb is guaranteed to have at least ETH_ZLEN (60)
1528 * bytes of data by the call to skb_put_padto() above this
1529 * is safe with respect to both the length of the first DMA
1530 * descriptor (len) overflowing the available data and the
1531 * length of the second DMA descriptor (skb->len - len)
1537 memcpy(buffer, skb->data, len);
1538 dma_addr = dma_map_single(ndev->dev.parent, buffer, len,
1540 if (dma_mapping_error(ndev->dev.parent, dma_addr))
1543 desc = &priv->tx_ring[q][entry];
1544 desc->ds_tagl = cpu_to_le16(len);
1545 desc->dptr = cpu_to_le32(dma_addr);
1547 buffer = skb->data + len;
1548 len = skb->len - len;
1549 dma_addr = dma_map_single(ndev->dev.parent, buffer, len,
1551 if (dma_mapping_error(ndev->dev.parent, dma_addr))
1556 desc = &priv->tx_ring[q][entry];
1558 dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb->len,
1560 if (dma_mapping_error(ndev->dev.parent, dma_addr))
1563 desc->ds_tagl = cpu_to_le16(len);
1564 desc->dptr = cpu_to_le32(dma_addr);
1566 /* TX timestamp required */
1568 ts_skb = kmalloc(sizeof(*ts_skb), GFP_ATOMIC);
1570 if (num_tx_desc > 1) {
1572 dma_unmap_single(ndev->dev.parent, dma_addr,
1573 len, DMA_TO_DEVICE);
1577 ts_skb->skb = skb_get(skb);
1578 ts_skb->tag = priv->ts_skb_tag++;
1579 priv->ts_skb_tag &= 0x3ff;
1580 list_add_tail(&ts_skb->list, &priv->ts_skb_list);
1582 /* TAG and timestamp required flag */
1583 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1584 desc->tagh_tsr = (ts_skb->tag >> 4) | TX_TSR;
1585 desc->ds_tagl |= cpu_to_le16(ts_skb->tag << 12);
1588 skb_tx_timestamp(skb);
1589 /* Descriptor type must be set after all the above writes */
1591 if (num_tx_desc > 1) {
1592 desc->die_dt = DT_FEND;
1594 desc->die_dt = DT_FSTART;
1596 desc->die_dt = DT_FSINGLE;
1598 ravb_modify(ndev, TCCR, TCCR_TSRQ0 << q, TCCR_TSRQ0 << q);
1600 priv->cur_tx[q] += num_tx_desc;
1601 if (priv->cur_tx[q] - priv->dirty_tx[q] >
1602 (priv->num_tx_ring[q] - 1) * num_tx_desc &&
1603 !ravb_tx_free(ndev, q, true))
1604 netif_stop_subqueue(ndev, q);
1607 spin_unlock_irqrestore(&priv->lock, flags);
1608 return NETDEV_TX_OK;
1611 dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
1612 le16_to_cpu(desc->ds_tagl), DMA_TO_DEVICE);
1614 dev_kfree_skb_any(skb);
1615 priv->tx_skb[q][entry / num_tx_desc] = NULL;
1619 static u16 ravb_select_queue(struct net_device *ndev, struct sk_buff *skb,
1620 struct net_device *sb_dev)
1622 /* If skb needs TX timestamp, it is handled in network control queue */
1623 return (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ? RAVB_NC :
1628 static struct net_device_stats *ravb_get_stats(struct net_device *ndev)
1630 struct ravb_private *priv = netdev_priv(ndev);
1631 struct net_device_stats *nstats, *stats0, *stats1;
1633 nstats = &ndev->stats;
1634 stats0 = &priv->stats[RAVB_BE];
1635 stats1 = &priv->stats[RAVB_NC];
1637 if (priv->chip_id == RCAR_GEN3) {
1638 nstats->tx_dropped += ravb_read(ndev, TROCR);
1639 ravb_write(ndev, 0, TROCR); /* (write clear) */
1642 nstats->rx_packets = stats0->rx_packets + stats1->rx_packets;
1643 nstats->tx_packets = stats0->tx_packets + stats1->tx_packets;
1644 nstats->rx_bytes = stats0->rx_bytes + stats1->rx_bytes;
1645 nstats->tx_bytes = stats0->tx_bytes + stats1->tx_bytes;
1646 nstats->multicast = stats0->multicast + stats1->multicast;
1647 nstats->rx_errors = stats0->rx_errors + stats1->rx_errors;
1648 nstats->rx_crc_errors = stats0->rx_crc_errors + stats1->rx_crc_errors;
1649 nstats->rx_frame_errors =
1650 stats0->rx_frame_errors + stats1->rx_frame_errors;
1651 nstats->rx_length_errors =
1652 stats0->rx_length_errors + stats1->rx_length_errors;
1653 nstats->rx_missed_errors =
1654 stats0->rx_missed_errors + stats1->rx_missed_errors;
1655 nstats->rx_over_errors =
1656 stats0->rx_over_errors + stats1->rx_over_errors;
1661 /* Update promiscuous bit */
1662 static void ravb_set_rx_mode(struct net_device *ndev)
1664 struct ravb_private *priv = netdev_priv(ndev);
1665 unsigned long flags;
1667 spin_lock_irqsave(&priv->lock, flags);
1668 ravb_modify(ndev, ECMR, ECMR_PRM,
1669 ndev->flags & IFF_PROMISC ? ECMR_PRM : 0);
1670 spin_unlock_irqrestore(&priv->lock, flags);
1673 /* Device close function for Ethernet AVB */
1674 static int ravb_close(struct net_device *ndev)
1676 struct device_node *np = ndev->dev.parent->of_node;
1677 struct ravb_private *priv = netdev_priv(ndev);
1678 struct ravb_tstamp_skb *ts_skb, *ts_skb2;
1680 netif_tx_stop_all_queues(ndev);
1682 /* Disable interrupts by clearing the interrupt masks. */
1683 ravb_write(ndev, 0, RIC0);
1684 ravb_write(ndev, 0, RIC2);
1685 ravb_write(ndev, 0, TIC);
1687 /* Stop PTP Clock driver */
1688 if (priv->chip_id == RCAR_GEN2)
1689 ravb_ptp_stop(ndev);
1691 /* Set the config mode to stop the AVB-DMAC's processes */
1692 if (ravb_stop_dma(ndev) < 0)
1694 "device will be stopped after h/w processes are done.\n");
1696 /* Clear the timestamp list */
1697 list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, list) {
1698 list_del(&ts_skb->list);
1699 kfree_skb(ts_skb->skb);
1703 /* PHY disconnect */
1705 phy_stop(ndev->phydev);
1706 phy_disconnect(ndev->phydev);
1707 if (of_phy_is_fixed_link(np))
1708 of_phy_deregister_fixed_link(np);
1711 if (priv->chip_id != RCAR_GEN2) {
1712 free_irq(priv->tx_irqs[RAVB_NC], ndev);
1713 free_irq(priv->rx_irqs[RAVB_NC], ndev);
1714 free_irq(priv->tx_irqs[RAVB_BE], ndev);
1715 free_irq(priv->rx_irqs[RAVB_BE], ndev);
1716 free_irq(priv->emac_irq, ndev);
1718 free_irq(ndev->irq, ndev);
1720 napi_disable(&priv->napi[RAVB_NC]);
1721 napi_disable(&priv->napi[RAVB_BE]);
1723 /* Free all the skb's in the RX queue and the DMA buffers. */
1724 ravb_ring_free(ndev, RAVB_BE);
1725 ravb_ring_free(ndev, RAVB_NC);
1730 static int ravb_hwtstamp_get(struct net_device *ndev, struct ifreq *req)
1732 struct ravb_private *priv = netdev_priv(ndev);
1733 struct hwtstamp_config config;
1736 config.tx_type = priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON :
1738 switch (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE) {
1739 case RAVB_RXTSTAMP_TYPE_V2_L2_EVENT:
1740 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
1742 case RAVB_RXTSTAMP_TYPE_ALL:
1743 config.rx_filter = HWTSTAMP_FILTER_ALL;
1746 config.rx_filter = HWTSTAMP_FILTER_NONE;
1749 return copy_to_user(req->ifr_data, &config, sizeof(config)) ?
1753 /* Control hardware time stamping */
1754 static int ravb_hwtstamp_set(struct net_device *ndev, struct ifreq *req)
1756 struct ravb_private *priv = netdev_priv(ndev);
1757 struct hwtstamp_config config;
1758 u32 tstamp_rx_ctrl = RAVB_RXTSTAMP_ENABLED;
1761 if (copy_from_user(&config, req->ifr_data, sizeof(config)))
1764 /* Reserved for future extensions */
1768 switch (config.tx_type) {
1769 case HWTSTAMP_TX_OFF:
1772 case HWTSTAMP_TX_ON:
1773 tstamp_tx_ctrl = RAVB_TXTSTAMP_ENABLED;
1779 switch (config.rx_filter) {
1780 case HWTSTAMP_FILTER_NONE:
1783 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1784 tstamp_rx_ctrl |= RAVB_RXTSTAMP_TYPE_V2_L2_EVENT;
1787 config.rx_filter = HWTSTAMP_FILTER_ALL;
1788 tstamp_rx_ctrl |= RAVB_RXTSTAMP_TYPE_ALL;
1791 priv->tstamp_tx_ctrl = tstamp_tx_ctrl;
1792 priv->tstamp_rx_ctrl = tstamp_rx_ctrl;
1794 return copy_to_user(req->ifr_data, &config, sizeof(config)) ?
1798 /* ioctl to device function */
1799 static int ravb_do_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
1801 struct phy_device *phydev = ndev->phydev;
1803 if (!netif_running(ndev))
1811 return ravb_hwtstamp_get(ndev, req);
1813 return ravb_hwtstamp_set(ndev, req);
1816 return phy_mii_ioctl(phydev, req, cmd);
1819 static int ravb_change_mtu(struct net_device *ndev, int new_mtu)
1821 struct ravb_private *priv = netdev_priv(ndev);
1823 ndev->mtu = new_mtu;
1825 if (netif_running(ndev)) {
1826 synchronize_irq(priv->emac_irq);
1827 ravb_emac_init(ndev);
1830 netdev_update_features(ndev);
1835 static void ravb_set_rx_csum(struct net_device *ndev, bool enable)
1837 struct ravb_private *priv = netdev_priv(ndev);
1838 unsigned long flags;
1840 spin_lock_irqsave(&priv->lock, flags);
1842 /* Disable TX and RX */
1843 ravb_rcv_snd_disable(ndev);
1845 /* Modify RX Checksum setting */
1846 ravb_modify(ndev, ECMR, ECMR_RCSC, enable ? ECMR_RCSC : 0);
1848 /* Enable TX and RX */
1849 ravb_rcv_snd_enable(ndev);
1851 spin_unlock_irqrestore(&priv->lock, flags);
1854 static int ravb_set_features(struct net_device *ndev,
1855 netdev_features_t features)
1857 netdev_features_t changed = ndev->features ^ features;
1859 if (changed & NETIF_F_RXCSUM)
1860 ravb_set_rx_csum(ndev, features & NETIF_F_RXCSUM);
1862 ndev->features = features;
1867 static const struct net_device_ops ravb_netdev_ops = {
1868 .ndo_open = ravb_open,
1869 .ndo_stop = ravb_close,
1870 .ndo_start_xmit = ravb_start_xmit,
1871 .ndo_select_queue = ravb_select_queue,
1872 .ndo_get_stats = ravb_get_stats,
1873 .ndo_set_rx_mode = ravb_set_rx_mode,
1874 .ndo_tx_timeout = ravb_tx_timeout,
1875 .ndo_do_ioctl = ravb_do_ioctl,
1876 .ndo_change_mtu = ravb_change_mtu,
1877 .ndo_validate_addr = eth_validate_addr,
1878 .ndo_set_mac_address = eth_mac_addr,
1879 .ndo_set_features = ravb_set_features,
1882 /* MDIO bus init function */
1883 static int ravb_mdio_init(struct ravb_private *priv)
1885 struct platform_device *pdev = priv->pdev;
1886 struct device *dev = &pdev->dev;
1890 priv->mdiobb.ops = &bb_ops;
1892 /* MII controller setting */
1893 priv->mii_bus = alloc_mdio_bitbang(&priv->mdiobb);
1897 /* Hook up MII support for ethtool */
1898 priv->mii_bus->name = "ravb_mii";
1899 priv->mii_bus->parent = dev;
1900 snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
1901 pdev->name, pdev->id);
1903 /* Register MDIO bus */
1904 error = of_mdiobus_register(priv->mii_bus, dev->of_node);
1911 free_mdio_bitbang(priv->mii_bus);
1915 /* MDIO bus release function */
1916 static int ravb_mdio_release(struct ravb_private *priv)
1918 /* Unregister mdio bus */
1919 mdiobus_unregister(priv->mii_bus);
1921 /* Free bitbang info */
1922 free_mdio_bitbang(priv->mii_bus);
1927 static const struct of_device_id ravb_match_table[] = {
1928 { .compatible = "renesas,etheravb-r8a7790", .data = (void *)RCAR_GEN2 },
1929 { .compatible = "renesas,etheravb-r8a7794", .data = (void *)RCAR_GEN2 },
1930 { .compatible = "renesas,etheravb-rcar-gen2", .data = (void *)RCAR_GEN2 },
1931 { .compatible = "renesas,etheravb-r8a7795", .data = (void *)RCAR_GEN3 },
1932 { .compatible = "renesas,etheravb-rcar-gen3", .data = (void *)RCAR_GEN3 },
1935 MODULE_DEVICE_TABLE(of, ravb_match_table);
1937 static int ravb_set_gti(struct net_device *ndev)
1939 struct ravb_private *priv = netdev_priv(ndev);
1940 struct device *dev = ndev->dev.parent;
1944 rate = clk_get_rate(priv->clk);
1948 inc = 1000000000ULL << 20;
1951 if (inc < GTI_TIV_MIN || inc > GTI_TIV_MAX) {
1952 dev_err(dev, "gti.tiv increment 0x%llx is outside the range 0x%x - 0x%x\n",
1953 inc, GTI_TIV_MIN, GTI_TIV_MAX);
1957 ravb_write(ndev, inc, GTI);
1962 static void ravb_set_config_mode(struct net_device *ndev)
1964 struct ravb_private *priv = netdev_priv(ndev);
1966 if (priv->chip_id == RCAR_GEN2) {
1967 ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG);
1968 /* Set CSEL value */
1969 ravb_modify(ndev, CCC, CCC_CSEL, CCC_CSEL_HPB);
1971 ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG |
1972 CCC_GAC | CCC_CSEL_HPB);
1976 static const struct soc_device_attribute ravb_delay_mode_quirk_match[] = {
1977 { .soc_id = "r8a774c0" },
1978 { .soc_id = "r8a77990" },
1979 { .soc_id = "r8a77995" },
1983 /* Set tx and rx clock internal delay modes */
1984 static void ravb_parse_delay_mode(struct device_node *np, struct net_device *ndev)
1986 struct ravb_private *priv = netdev_priv(ndev);
1987 bool explicit_delay = false;
1990 if (!of_property_read_u32(np, "rx-internal-delay-ps", &delay)) {
1991 /* Valid values are 0 and 1800, according to DT bindings */
1992 priv->rxcidm = !!delay;
1993 explicit_delay = true;
1995 if (!of_property_read_u32(np, "tx-internal-delay-ps", &delay)) {
1996 /* Valid values are 0 and 2000, according to DT bindings */
1997 priv->txcidm = !!delay;
1998 explicit_delay = true;
2004 /* Fall back to legacy rgmii-*id behavior */
2005 if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
2006 priv->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) {
2008 priv->rgmii_override = 1;
2011 if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
2012 priv->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) {
2013 if (!WARN(soc_device_match(ravb_delay_mode_quirk_match),
2014 "phy-mode %s requires TX clock internal delay mode which is not supported by this hardware revision. Please update device tree",
2015 phy_modes(priv->phy_interface))) {
2017 priv->rgmii_override = 1;
2022 static void ravb_set_delay_mode(struct net_device *ndev)
2024 struct ravb_private *priv = netdev_priv(ndev);
2031 ravb_modify(ndev, APSR, APSR_RDM | APSR_TDM, set);
2034 static int ravb_probe(struct platform_device *pdev)
2036 struct device_node *np = pdev->dev.of_node;
2037 struct ravb_private *priv;
2038 enum ravb_chip_id chip_id;
2039 struct net_device *ndev;
2041 struct resource *res;
2046 "this driver is required to be instantiated from device tree\n");
2050 ndev = alloc_etherdev_mqs(sizeof(struct ravb_private),
2051 NUM_TX_QUEUE, NUM_RX_QUEUE);
2055 ndev->features = NETIF_F_RXCSUM;
2056 ndev->hw_features = NETIF_F_RXCSUM;
2058 pm_runtime_enable(&pdev->dev);
2059 pm_runtime_get_sync(&pdev->dev);
2061 chip_id = (enum ravb_chip_id)of_device_get_match_data(&pdev->dev);
2063 if (chip_id == RCAR_GEN3)
2064 irq = platform_get_irq_byname(pdev, "ch22");
2066 irq = platform_get_irq(pdev, 0);
2073 SET_NETDEV_DEV(ndev, &pdev->dev);
2075 priv = netdev_priv(ndev);
2078 priv->num_tx_ring[RAVB_BE] = BE_TX_RING_SIZE;
2079 priv->num_rx_ring[RAVB_BE] = BE_RX_RING_SIZE;
2080 priv->num_tx_ring[RAVB_NC] = NC_TX_RING_SIZE;
2081 priv->num_rx_ring[RAVB_NC] = NC_RX_RING_SIZE;
2082 priv->addr = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
2083 if (IS_ERR(priv->addr)) {
2084 error = PTR_ERR(priv->addr);
2088 /* The Ether-specific entries in the device structure. */
2089 ndev->base_addr = res->start;
2091 spin_lock_init(&priv->lock);
2092 INIT_WORK(&priv->work, ravb_tx_timeout_work);
2094 error = of_get_phy_mode(np, &priv->phy_interface);
2095 if (error && error != -ENODEV)
2098 priv->no_avb_link = of_property_read_bool(np, "renesas,no-ether-link");
2099 priv->avb_link_active_low =
2100 of_property_read_bool(np, "renesas,ether-link-active-low");
2102 if (chip_id == RCAR_GEN3) {
2103 irq = platform_get_irq_byname(pdev, "ch24");
2108 priv->emac_irq = irq;
2109 for (i = 0; i < NUM_RX_QUEUE; i++) {
2110 irq = platform_get_irq_byname(pdev, ravb_rx_irqs[i]);
2115 priv->rx_irqs[i] = irq;
2117 for (i = 0; i < NUM_TX_QUEUE; i++) {
2118 irq = platform_get_irq_byname(pdev, ravb_tx_irqs[i]);
2123 priv->tx_irqs[i] = irq;
2127 priv->chip_id = chip_id;
2129 priv->clk = devm_clk_get(&pdev->dev, NULL);
2130 if (IS_ERR(priv->clk)) {
2131 error = PTR_ERR(priv->clk);
2135 priv->refclk = devm_clk_get_optional(&pdev->dev, "refclk");
2136 if (IS_ERR(priv->refclk)) {
2137 error = PTR_ERR(priv->refclk);
2140 clk_prepare_enable(priv->refclk);
2142 ndev->max_mtu = 2048 - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
2143 ndev->min_mtu = ETH_MIN_MTU;
2145 priv->num_tx_desc = chip_id == RCAR_GEN2 ?
2146 NUM_TX_DESC_GEN2 : NUM_TX_DESC_GEN3;
2149 ndev->netdev_ops = &ravb_netdev_ops;
2150 ndev->ethtool_ops = &ravb_ethtool_ops;
2152 /* Set AVB config mode */
2153 ravb_set_config_mode(ndev);
2156 error = ravb_set_gti(ndev);
2158 goto out_disable_refclk;
2160 /* Request GTI loading */
2161 ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
2163 if (priv->chip_id != RCAR_GEN2) {
2164 ravb_parse_delay_mode(np, ndev);
2165 ravb_set_delay_mode(ndev);
2168 /* Allocate descriptor base address table */
2169 priv->desc_bat_size = sizeof(struct ravb_desc) * DBAT_ENTRY_NUM;
2170 priv->desc_bat = dma_alloc_coherent(ndev->dev.parent, priv->desc_bat_size,
2171 &priv->desc_bat_dma, GFP_KERNEL);
2172 if (!priv->desc_bat) {
2174 "Cannot allocate desc base address table (size %d bytes)\n",
2175 priv->desc_bat_size);
2177 goto out_disable_refclk;
2179 for (q = RAVB_BE; q < DBAT_ENTRY_NUM; q++)
2180 priv->desc_bat[q].die_dt = DT_EOS;
2181 ravb_write(ndev, priv->desc_bat_dma, DBAT);
2183 /* Initialise HW timestamp list */
2184 INIT_LIST_HEAD(&priv->ts_skb_list);
2186 /* Initialise PTP Clock driver */
2187 if (chip_id != RCAR_GEN2)
2188 ravb_ptp_init(ndev, pdev);
2190 /* Debug message level */
2191 priv->msg_enable = RAVB_DEF_MSG_ENABLE;
2193 /* Read and set MAC address */
2194 ravb_read_mac_address(np, ndev);
2195 if (!is_valid_ether_addr(ndev->dev_addr)) {
2196 dev_warn(&pdev->dev,
2197 "no valid MAC address supplied, using a random one\n");
2198 eth_hw_addr_random(ndev);
2202 error = ravb_mdio_init(priv);
2204 dev_err(&pdev->dev, "failed to initialize MDIO\n");
2208 netif_napi_add(ndev, &priv->napi[RAVB_BE], ravb_poll, 64);
2209 netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll, 64);
2211 /* Network device register */
2212 error = register_netdev(ndev);
2216 device_set_wakeup_capable(&pdev->dev, 1);
2218 /* Print device information */
2219 netdev_info(ndev, "Base address at %#x, %pM, IRQ %d.\n",
2220 (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
2222 platform_set_drvdata(pdev, ndev);
2227 netif_napi_del(&priv->napi[RAVB_NC]);
2228 netif_napi_del(&priv->napi[RAVB_BE]);
2229 ravb_mdio_release(priv);
2231 dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
2232 priv->desc_bat_dma);
2234 /* Stop PTP Clock driver */
2235 if (chip_id != RCAR_GEN2)
2236 ravb_ptp_stop(ndev);
2238 clk_disable_unprepare(priv->refclk);
2242 pm_runtime_put(&pdev->dev);
2243 pm_runtime_disable(&pdev->dev);
2247 static int ravb_remove(struct platform_device *pdev)
2249 struct net_device *ndev = platform_get_drvdata(pdev);
2250 struct ravb_private *priv = netdev_priv(ndev);
2252 /* Stop PTP Clock driver */
2253 if (priv->chip_id != RCAR_GEN2)
2254 ravb_ptp_stop(ndev);
2256 clk_disable_unprepare(priv->refclk);
2258 dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
2259 priv->desc_bat_dma);
2260 /* Set reset mode */
2261 ravb_write(ndev, CCC_OPC_RESET, CCC);
2262 pm_runtime_put_sync(&pdev->dev);
2263 unregister_netdev(ndev);
2264 netif_napi_del(&priv->napi[RAVB_NC]);
2265 netif_napi_del(&priv->napi[RAVB_BE]);
2266 ravb_mdio_release(priv);
2267 pm_runtime_disable(&pdev->dev);
2269 platform_set_drvdata(pdev, NULL);
2274 static int ravb_wol_setup(struct net_device *ndev)
2276 struct ravb_private *priv = netdev_priv(ndev);
2278 /* Disable interrupts by clearing the interrupt masks. */
2279 ravb_write(ndev, 0, RIC0);
2280 ravb_write(ndev, 0, RIC2);
2281 ravb_write(ndev, 0, TIC);
2283 /* Only allow ECI interrupts */
2284 synchronize_irq(priv->emac_irq);
2285 napi_disable(&priv->napi[RAVB_NC]);
2286 napi_disable(&priv->napi[RAVB_BE]);
2287 ravb_write(ndev, ECSIPR_MPDIP, ECSIPR);
2289 /* Enable MagicPacket */
2290 ravb_modify(ndev, ECMR, ECMR_MPDE, ECMR_MPDE);
2292 return enable_irq_wake(priv->emac_irq);
2295 static int ravb_wol_restore(struct net_device *ndev)
2297 struct ravb_private *priv = netdev_priv(ndev);
2300 napi_enable(&priv->napi[RAVB_NC]);
2301 napi_enable(&priv->napi[RAVB_BE]);
2303 /* Disable MagicPacket */
2304 ravb_modify(ndev, ECMR, ECMR_MPDE, 0);
2306 ret = ravb_close(ndev);
2310 return disable_irq_wake(priv->emac_irq);
2313 static int __maybe_unused ravb_suspend(struct device *dev)
2315 struct net_device *ndev = dev_get_drvdata(dev);
2316 struct ravb_private *priv = netdev_priv(ndev);
2319 if (!netif_running(ndev))
2322 netif_device_detach(ndev);
2324 if (priv->wol_enabled)
2325 ret = ravb_wol_setup(ndev);
2327 ret = ravb_close(ndev);
2332 static int __maybe_unused ravb_resume(struct device *dev)
2334 struct net_device *ndev = dev_get_drvdata(dev);
2335 struct ravb_private *priv = netdev_priv(ndev);
2338 /* If WoL is enabled set reset mode to rearm the WoL logic */
2339 if (priv->wol_enabled)
2340 ravb_write(ndev, CCC_OPC_RESET, CCC);
2342 /* All register have been reset to default values.
2343 * Restore all registers which where setup at probe time and
2344 * reopen device if it was running before system suspended.
2347 /* Set AVB config mode */
2348 ravb_set_config_mode(ndev);
2351 ret = ravb_set_gti(ndev);
2355 /* Request GTI loading */
2356 ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
2358 if (priv->chip_id != RCAR_GEN2)
2359 ravb_set_delay_mode(ndev);
2361 /* Restore descriptor base address table */
2362 ravb_write(ndev, priv->desc_bat_dma, DBAT);
2364 if (netif_running(ndev)) {
2365 if (priv->wol_enabled) {
2366 ret = ravb_wol_restore(ndev);
2370 ret = ravb_open(ndev);
2373 netif_device_attach(ndev);
2379 static int __maybe_unused ravb_runtime_nop(struct device *dev)
2381 /* Runtime PM callback shared between ->runtime_suspend()
2382 * and ->runtime_resume(). Simply returns success.
2384 * This driver re-initializes all registers after
2385 * pm_runtime_get_sync() anyway so there is no need
2386 * to save and restore registers here.
2391 static const struct dev_pm_ops ravb_dev_pm_ops = {
2392 SET_SYSTEM_SLEEP_PM_OPS(ravb_suspend, ravb_resume)
2393 SET_RUNTIME_PM_OPS(ravb_runtime_nop, ravb_runtime_nop, NULL)
2396 static struct platform_driver ravb_driver = {
2397 .probe = ravb_probe,
2398 .remove = ravb_remove,
2401 .pm = &ravb_dev_pm_ops,
2402 .of_match_table = ravb_match_table,
2406 module_platform_driver(ravb_driver);
2408 MODULE_AUTHOR("Mitsuhiro Kimura, Masaru Nagai");
2409 MODULE_DESCRIPTION("Renesas Ethernet AVB driver");
2410 MODULE_LICENSE("GPL v2");