1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Freescale QUICC Engine HDLC Device Driver
4 * Copyright 2016 Freescale Semiconductor Inc.
7 #include <linux/delay.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/hdlc.h>
10 #include <linux/init.h>
11 #include <linux/interrupt.h>
13 #include <linux/irq.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/netdevice.h>
17 #include <linux/of_address.h>
18 #include <linux/of_irq.h>
19 #include <linux/of_platform.h>
20 #include <linux/platform_device.h>
21 #include <linux/sched.h>
22 #include <linux/skbuff.h>
23 #include <linux/slab.h>
24 #include <linux/spinlock.h>
25 #include <linux/stddef.h>
26 #include <soc/fsl/qe/qe_tdm.h>
27 #include <uapi/linux/if_arp.h>
29 #include "fsl_ucc_hdlc.h"
31 #define DRV_DESC "Freescale QE UCC HDLC Driver"
32 #define DRV_NAME "ucc_hdlc"
34 #define TDM_PPPOHT_SLIC_MAXIN
35 #define RX_BD_ERRORS (R_CD_S | R_OV_S | R_CR_S | R_AB_S | R_NO_S | R_LG_S)
37 static struct ucc_tdm_info utdm_primary_info = {
52 .mode = UCC_FAST_PROTOCOL_MODE_HDLC,
53 .ttx_trx = UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL,
54 .tenc = UCC_FAST_TX_ENCODING_NRZ,
55 .renc = UCC_FAST_RX_ENCODING_NRZ,
56 .tcrc = UCC_FAST_16_BIT_CRC,
57 .synl = UCC_FAST_SYNC_LEN_NOT_USED,
61 #ifdef TDM_PPPOHT_SLIC_MAXIN
76 static struct ucc_tdm_info utdm_info[UCC_MAX_NUM];
78 static int uhdlc_init(struct ucc_hdlc_private *priv)
80 struct ucc_tdm_info *ut_info;
81 struct ucc_fast_info *uf_info;
86 dma_addr_t bd_dma_addr;
91 ut_info = priv->ut_info;
92 uf_info = &ut_info->uf_info;
105 /* This sets HPM register in CMXUCR register which configures a
106 * open drain connected HDLC bus
109 uf_info->brkpt_support = 1;
111 uf_info->uccm_mask = ((UCC_HDLC_UCCE_RXB | UCC_HDLC_UCCE_RXF |
112 UCC_HDLC_UCCE_TXB) << 16);
114 ret = ucc_fast_init(uf_info, &priv->uccf);
116 dev_err(priv->dev, "Failed to init uccf.");
120 priv->uf_regs = priv->uccf->uf_regs;
121 ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
124 if (priv->loopback) {
125 dev_info(priv->dev, "Loopback Mode\n");
126 /* use the same clock when work in loopback */
127 qe_setbrg(ut_info->uf_info.rx_clock, 20000000, 1);
129 gumr = ioread32be(&priv->uf_regs->gumr);
130 gumr |= (UCC_FAST_GUMR_LOOPBACK | UCC_FAST_GUMR_CDS |
132 gumr &= ~(UCC_FAST_GUMR_CTSP | UCC_FAST_GUMR_RSYN);
133 iowrite32be(gumr, &priv->uf_regs->gumr);
138 ucc_tdm_init(priv->utdm, priv->ut_info);
140 /* Write to QE CECR, UCCx channel to Stop Transmission */
141 cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
142 ret = qe_issue_cmd(QE_STOP_TX, cecr_subblock,
143 QE_CR_PROTOCOL_UNSPECIFIED, 0);
145 /* Set UPSMR normal mode (need fixed)*/
146 iowrite32be(0, &priv->uf_regs->upsmr);
149 if (priv->hdlc_bus) {
152 dev_info(priv->dev, "HDLC bus Mode\n");
153 upsmr = ioread32be(&priv->uf_regs->upsmr);
155 /* bus mode and retransmit enable, with collision window
158 upsmr |= UCC_HDLC_UPSMR_RTE | UCC_HDLC_UPSMR_BUS |
160 iowrite32be(upsmr, &priv->uf_regs->upsmr);
162 /* explicitly disable CDS & CTSP */
163 gumr = ioread32be(&priv->uf_regs->gumr);
164 gumr &= ~(UCC_FAST_GUMR_CDS | UCC_FAST_GUMR_CTSP);
165 /* set automatic sync to explicitly ignore CD signal */
166 gumr |= UCC_FAST_GUMR_SYNL_AUTO;
167 iowrite32be(gumr, &priv->uf_regs->gumr);
170 priv->rx_ring_size = RX_BD_RING_LEN;
171 priv->tx_ring_size = TX_BD_RING_LEN;
173 priv->rx_bd_base = dma_alloc_coherent(priv->dev,
174 RX_BD_RING_LEN * sizeof(struct qe_bd),
175 &priv->dma_rx_bd, GFP_KERNEL);
177 if (!priv->rx_bd_base) {
178 dev_err(priv->dev, "Cannot allocate MURAM memory for RxBDs\n");
184 priv->tx_bd_base = dma_alloc_coherent(priv->dev,
185 TX_BD_RING_LEN * sizeof(struct qe_bd),
186 &priv->dma_tx_bd, GFP_KERNEL);
188 if (!priv->tx_bd_base) {
189 dev_err(priv->dev, "Cannot allocate MURAM memory for TxBDs\n");
194 /* Alloc parameter ram for ucc hdlc */
195 priv->ucc_pram_offset = qe_muram_alloc(sizeof(struct ucc_hdlc_param),
196 ALIGNMENT_OF_UCC_HDLC_PRAM);
198 if (priv->ucc_pram_offset < 0) {
199 dev_err(priv->dev, "Can not allocate MURAM for hdlc parameter.\n");
204 priv->rx_skbuff = kcalloc(priv->rx_ring_size,
205 sizeof(*priv->rx_skbuff),
207 if (!priv->rx_skbuff)
210 priv->tx_skbuff = kcalloc(priv->tx_ring_size,
211 sizeof(*priv->tx_skbuff),
213 if (!priv->tx_skbuff)
217 priv->skb_dirtytx = 0;
218 priv->curtx_bd = priv->tx_bd_base;
219 priv->dirty_tx = priv->tx_bd_base;
220 priv->currx_bd = priv->rx_bd_base;
221 priv->currx_bdnum = 0;
223 /* init parameter base */
224 cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
225 ret = qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock,
226 QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset);
228 priv->ucc_pram = (struct ucc_hdlc_param __iomem *)
229 qe_muram_addr(priv->ucc_pram_offset);
231 /* Zero out parameter ram */
232 memset_io(priv->ucc_pram, 0, sizeof(struct ucc_hdlc_param));
234 /* Alloc riptr, tiptr */
235 riptr = qe_muram_alloc(32, 32);
237 dev_err(priv->dev, "Cannot allocate MURAM mem for Receive internal temp data pointer\n");
242 tiptr = qe_muram_alloc(32, 32);
244 dev_err(priv->dev, "Cannot allocate MURAM mem for Transmit internal temp data pointer\n");
248 if (riptr != (u16)riptr || tiptr != (u16)tiptr) {
249 dev_err(priv->dev, "MURAM allocation out of addressable range\n");
254 /* Set RIPTR, TIPTR */
255 iowrite16be(riptr, &priv->ucc_pram->riptr);
256 iowrite16be(tiptr, &priv->ucc_pram->tiptr);
259 iowrite16be(MAX_RX_BUF_LENGTH, &priv->ucc_pram->mrblr);
261 /* Set RBASE, TBASE */
262 iowrite32be(priv->dma_rx_bd, &priv->ucc_pram->rbase);
263 iowrite32be(priv->dma_tx_bd, &priv->ucc_pram->tbase);
265 /* Set RSTATE, TSTATE */
266 iowrite32be(BMR_GBL | BMR_BIG_ENDIAN, &priv->ucc_pram->rstate);
267 iowrite32be(BMR_GBL | BMR_BIG_ENDIAN, &priv->ucc_pram->tstate);
269 /* Set C_MASK, C_PRES for 16bit CRC */
270 iowrite32be(CRC_16BIT_MASK, &priv->ucc_pram->c_mask);
271 iowrite32be(CRC_16BIT_PRES, &priv->ucc_pram->c_pres);
273 iowrite16be(MAX_FRAME_LENGTH, &priv->ucc_pram->mflr);
274 iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfthr);
275 iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfcnt);
276 iowrite16be(priv->hmask, &priv->ucc_pram->hmask);
277 iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr1);
278 iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr2);
279 iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr3);
280 iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr4);
283 bd_buffer = dma_alloc_coherent(priv->dev,
284 (RX_BD_RING_LEN + TX_BD_RING_LEN) * MAX_RX_BUF_LENGTH,
285 &bd_dma_addr, GFP_KERNEL);
288 dev_err(priv->dev, "Could not allocate buffer descriptors\n");
293 priv->rx_buffer = bd_buffer;
294 priv->tx_buffer = bd_buffer + RX_BD_RING_LEN * MAX_RX_BUF_LENGTH;
296 priv->dma_rx_addr = bd_dma_addr;
297 priv->dma_tx_addr = bd_dma_addr + RX_BD_RING_LEN * MAX_RX_BUF_LENGTH;
299 for (i = 0; i < RX_BD_RING_LEN; i++) {
300 if (i < (RX_BD_RING_LEN - 1))
301 bd_status = R_E_S | R_I_S;
303 bd_status = R_E_S | R_I_S | R_W_S;
305 iowrite16be(bd_status, &priv->rx_bd_base[i].status);
306 iowrite32be(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH,
307 &priv->rx_bd_base[i].buf);
310 for (i = 0; i < TX_BD_RING_LEN; i++) {
311 if (i < (TX_BD_RING_LEN - 1))
312 bd_status = T_I_S | T_TC_S;
314 bd_status = T_I_S | T_TC_S | T_W_S;
316 iowrite16be(bd_status, &priv->tx_bd_base[i].status);
317 iowrite32be(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH,
318 &priv->tx_bd_base[i].buf);
324 qe_muram_free(tiptr);
326 qe_muram_free(riptr);
328 kfree(priv->tx_skbuff);
330 kfree(priv->rx_skbuff);
332 qe_muram_free(priv->ucc_pram_offset);
334 dma_free_coherent(priv->dev,
335 TX_BD_RING_LEN * sizeof(struct qe_bd),
336 priv->tx_bd_base, priv->dma_tx_bd);
338 dma_free_coherent(priv->dev,
339 RX_BD_RING_LEN * sizeof(struct qe_bd),
340 priv->rx_bd_base, priv->dma_rx_bd);
342 ucc_fast_free(priv->uccf);
347 static netdev_tx_t ucc_hdlc_tx(struct sk_buff *skb, struct net_device *dev)
349 hdlc_device *hdlc = dev_to_hdlc(dev);
350 struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)hdlc->priv;
351 struct qe_bd __iomem *bd;
358 if (skb_headroom(skb) < HDLC_HEAD_LEN) {
359 dev->stats.tx_dropped++;
361 netdev_err(dev, "No enough space for hdlc head\n");
365 skb_push(skb, HDLC_HEAD_LEN);
367 proto_head = (u16 *)skb->data;
368 *proto_head = htons(DEFAULT_HDLC_HEAD);
370 dev->stats.tx_bytes += skb->len;
374 proto_head = (u16 *)skb->data;
375 if (*proto_head != htons(DEFAULT_PPP_HEAD)) {
376 dev->stats.tx_dropped++;
378 netdev_err(dev, "Wrong ppp header\n");
382 dev->stats.tx_bytes += skb->len;
386 dev->stats.tx_bytes += skb->len;
390 dev->stats.tx_dropped++;
394 netdev_sent_queue(dev, skb->len);
395 spin_lock_irqsave(&priv->lock, flags);
397 /* Start from the next BD that should be filled */
399 bd_status = ioread16be(&bd->status);
400 /* Save the skb pointer so we can free it later */
401 priv->tx_skbuff[priv->skb_curtx] = skb;
403 /* Update the current skb pointer (wrapping if this was the last) */
405 (priv->skb_curtx + 1) & TX_RING_MOD_MASK(TX_BD_RING_LEN);
407 /* copy skb data to tx buffer for sdma processing */
408 memcpy(priv->tx_buffer + (be32_to_cpu(bd->buf) - priv->dma_tx_addr),
409 skb->data, skb->len);
411 /* set bd status and length */
412 bd_status = (bd_status & T_W_S) | T_R_S | T_I_S | T_L_S | T_TC_S;
414 iowrite16be(skb->len, &bd->length);
415 iowrite16be(bd_status, &bd->status);
417 /* Move to next BD in the ring */
418 if (!(bd_status & T_W_S))
421 bd = priv->tx_bd_base;
423 if (bd == priv->dirty_tx) {
424 if (!netif_queue_stopped(dev))
425 netif_stop_queue(dev);
430 spin_unlock_irqrestore(&priv->lock, flags);
435 static int hdlc_tx_restart(struct ucc_hdlc_private *priv)
440 ucc_fast_get_qe_cr_subblock(priv->ut_info->uf_info.ucc_num);
442 qe_issue_cmd(QE_RESTART_TX, cecr_subblock,
443 QE_CR_PROTOCOL_UNSPECIFIED, 0);
447 static int hdlc_tx_done(struct ucc_hdlc_private *priv)
449 /* Start from the next BD that should be filled */
450 struct net_device *dev = priv->ndev;
451 unsigned int bytes_sent = 0;
453 struct qe_bd *bd; /* BD pointer */
458 bd_status = ioread16be(&bd->status);
460 /* Normal processing. */
461 while ((bd_status & T_R_S) == 0) {
464 if (bd_status & T_UN_S) { /* Underrun */
465 dev->stats.tx_fifo_errors++;
468 if (bd_status & T_CT_S) { /* Carrier lost */
469 dev->stats.tx_carrier_errors++;
473 /* BD contains already transmitted buffer. */
474 /* Handle the transmitted buffer and release */
475 /* the BD to be used with the current frame */
477 skb = priv->tx_skbuff[priv->skb_dirtytx];
481 bytes_sent += skb->len;
482 dev->stats.tx_packets++;
483 memset(priv->tx_buffer +
484 (be32_to_cpu(bd->buf) - priv->dma_tx_addr),
486 dev_consume_skb_irq(skb);
488 priv->tx_skbuff[priv->skb_dirtytx] = NULL;
491 1) & TX_RING_MOD_MASK(TX_BD_RING_LEN);
493 /* We freed a buffer, so now we can restart transmission */
494 if (netif_queue_stopped(dev))
495 netif_wake_queue(dev);
497 /* Advance the confirmation BD pointer */
498 if (!(bd_status & T_W_S))
501 bd = priv->tx_bd_base;
502 bd_status = ioread16be(&bd->status);
507 hdlc_tx_restart(priv);
509 netdev_completed_queue(dev, howmany, bytes_sent);
513 static int hdlc_rx_done(struct ucc_hdlc_private *priv, int rx_work_limit)
515 struct net_device *dev = priv->ndev;
516 struct sk_buff *skb = NULL;
517 hdlc_device *hdlc = dev_to_hdlc(dev);
520 u16 length, howmany = 0;
524 bd_status = ioread16be(&bd->status);
526 /* while there are received buffers and BD is full (~R_E) */
527 while (!((bd_status & (R_E_S)) || (--rx_work_limit < 0))) {
528 if (bd_status & (RX_BD_ERRORS)) {
529 dev->stats.rx_errors++;
531 if (bd_status & R_CD_S)
532 dev->stats.collisions++;
533 if (bd_status & R_OV_S)
534 dev->stats.rx_fifo_errors++;
535 if (bd_status & R_CR_S)
536 dev->stats.rx_crc_errors++;
537 if (bd_status & R_AB_S)
538 dev->stats.rx_over_errors++;
539 if (bd_status & R_NO_S)
540 dev->stats.rx_frame_errors++;
541 if (bd_status & R_LG_S)
542 dev->stats.rx_length_errors++;
546 bdbuffer = priv->rx_buffer +
547 (priv->currx_bdnum * MAX_RX_BUF_LENGTH);
548 length = ioread16be(&bd->length);
552 bdbuffer += HDLC_HEAD_LEN;
553 length -= (HDLC_HEAD_LEN + HDLC_CRC_SIZE);
555 skb = dev_alloc_skb(length);
557 dev->stats.rx_dropped++;
561 skb_put(skb, length);
564 memcpy(skb->data, bdbuffer, length);
569 length -= HDLC_CRC_SIZE;
571 skb = dev_alloc_skb(length);
573 dev->stats.rx_dropped++;
577 skb_put(skb, length);
580 memcpy(skb->data, bdbuffer, length);
584 dev->stats.rx_packets++;
585 dev->stats.rx_bytes += skb->len;
588 skb->protocol = hdlc_type_trans(skb, dev);
589 netif_receive_skb(skb);
592 iowrite16be((bd_status & R_W_S) | R_E_S | R_I_S, &bd->status);
594 /* update to point at the next bd */
595 if (bd_status & R_W_S) {
596 priv->currx_bdnum = 0;
597 bd = priv->rx_bd_base;
599 if (priv->currx_bdnum < (RX_BD_RING_LEN - 1))
600 priv->currx_bdnum += 1;
602 priv->currx_bdnum = RX_BD_RING_LEN - 1;
607 bd_status = ioread16be(&bd->status);
614 static int ucc_hdlc_poll(struct napi_struct *napi, int budget)
616 struct ucc_hdlc_private *priv = container_of(napi,
617 struct ucc_hdlc_private,
621 /* Tx event processing */
622 spin_lock(&priv->lock);
624 spin_unlock(&priv->lock);
627 howmany += hdlc_rx_done(priv, budget - howmany);
629 if (howmany < budget) {
630 napi_complete_done(napi, howmany);
631 qe_setbits_be32(priv->uccf->p_uccm,
632 (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS) << 16);
638 static irqreturn_t ucc_hdlc_irq_handler(int irq, void *dev_id)
640 struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)dev_id;
641 struct net_device *dev = priv->ndev;
642 struct ucc_fast_private *uccf;
648 ucce = ioread32be(uccf->p_ucce);
649 uccm = ioread32be(uccf->p_uccm);
651 iowrite32be(ucce, uccf->p_ucce);
655 if ((ucce >> 16) & (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS)) {
656 if (napi_schedule_prep(&priv->napi)) {
657 uccm &= ~((UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS)
659 iowrite32be(uccm, uccf->p_uccm);
660 __napi_schedule(&priv->napi);
664 /* Errors and other events */
665 if (ucce >> 16 & UCC_HDLC_UCCE_BSY)
666 dev->stats.rx_missed_errors++;
667 if (ucce >> 16 & UCC_HDLC_UCCE_TXE)
668 dev->stats.tx_errors++;
673 static int uhdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
675 const size_t size = sizeof(te1_settings);
677 struct ucc_hdlc_private *priv = netdev_priv(dev);
679 if (cmd != SIOCWANDEV)
680 return hdlc_ioctl(dev, ifr, cmd);
682 switch (ifr->ifr_settings.type) {
684 ifr->ifr_settings.type = IF_IFACE_E1;
685 if (ifr->ifr_settings.size < size) {
686 ifr->ifr_settings.size = size; /* data size wanted */
689 memset(&line, 0, sizeof(line));
690 line.clock_type = priv->clocking;
692 if (copy_to_user(ifr->ifr_settings.ifs_ifsu.sync, &line, size))
697 return hdlc_ioctl(dev, ifr, cmd);
701 static int uhdlc_open(struct net_device *dev)
704 hdlc_device *hdlc = dev_to_hdlc(dev);
705 struct ucc_hdlc_private *priv = hdlc->priv;
706 struct ucc_tdm *utdm = priv->utdm;
708 if (priv->hdlc_busy != 1) {
709 if (request_irq(priv->ut_info->uf_info.irq,
710 ucc_hdlc_irq_handler, 0, "hdlc", priv))
713 cecr_subblock = ucc_fast_get_qe_cr_subblock(
714 priv->ut_info->uf_info.ucc_num);
716 qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock,
717 QE_CR_PROTOCOL_UNSPECIFIED, 0);
719 ucc_fast_enable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
721 /* Enable the TDM port */
723 utdm->si_regs->siglmr1_h |= (0x1 << utdm->tdm_port);
726 netif_device_attach(priv->ndev);
727 napi_enable(&priv->napi);
728 netdev_reset_queue(dev);
729 netif_start_queue(dev);
736 static void uhdlc_memclean(struct ucc_hdlc_private *priv)
738 qe_muram_free(ioread16be(&priv->ucc_pram->riptr));
739 qe_muram_free(ioread16be(&priv->ucc_pram->tiptr));
741 if (priv->rx_bd_base) {
742 dma_free_coherent(priv->dev,
743 RX_BD_RING_LEN * sizeof(struct qe_bd),
744 priv->rx_bd_base, priv->dma_rx_bd);
746 priv->rx_bd_base = NULL;
750 if (priv->tx_bd_base) {
751 dma_free_coherent(priv->dev,
752 TX_BD_RING_LEN * sizeof(struct qe_bd),
753 priv->tx_bd_base, priv->dma_tx_bd);
755 priv->tx_bd_base = NULL;
759 if (priv->ucc_pram) {
760 qe_muram_free(priv->ucc_pram_offset);
761 priv->ucc_pram = NULL;
762 priv->ucc_pram_offset = 0;
765 kfree(priv->rx_skbuff);
766 priv->rx_skbuff = NULL;
768 kfree(priv->tx_skbuff);
769 priv->tx_skbuff = NULL;
772 iounmap(priv->uf_regs);
773 priv->uf_regs = NULL;
777 ucc_fast_free(priv->uccf);
781 if (priv->rx_buffer) {
782 dma_free_coherent(priv->dev,
783 RX_BD_RING_LEN * MAX_RX_BUF_LENGTH,
784 priv->rx_buffer, priv->dma_rx_addr);
785 priv->rx_buffer = NULL;
786 priv->dma_rx_addr = 0;
789 if (priv->tx_buffer) {
790 dma_free_coherent(priv->dev,
791 TX_BD_RING_LEN * MAX_RX_BUF_LENGTH,
792 priv->tx_buffer, priv->dma_tx_addr);
793 priv->tx_buffer = NULL;
794 priv->dma_tx_addr = 0;
798 static int uhdlc_close(struct net_device *dev)
800 struct ucc_hdlc_private *priv = dev_to_hdlc(dev)->priv;
801 struct ucc_tdm *utdm = priv->utdm;
804 napi_disable(&priv->napi);
805 cecr_subblock = ucc_fast_get_qe_cr_subblock(
806 priv->ut_info->uf_info.ucc_num);
808 qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock,
809 (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
810 qe_issue_cmd(QE_CLOSE_RX_BD, cecr_subblock,
811 (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
814 utdm->si_regs->siglmr1_h &= ~(0x1 << utdm->tdm_port);
816 ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
818 free_irq(priv->ut_info->uf_info.irq, priv);
819 netif_stop_queue(dev);
820 netdev_reset_queue(dev);
826 static int ucc_hdlc_attach(struct net_device *dev, unsigned short encoding,
827 unsigned short parity)
829 struct ucc_hdlc_private *priv = dev_to_hdlc(dev)->priv;
831 if (encoding != ENCODING_NRZ &&
832 encoding != ENCODING_NRZI)
835 if (parity != PARITY_NONE &&
836 parity != PARITY_CRC32_PR1_CCITT &&
837 parity != PARITY_CRC16_PR0_CCITT &&
838 parity != PARITY_CRC16_PR1_CCITT)
841 priv->encoding = encoding;
842 priv->parity = parity;
848 static void store_clk_config(struct ucc_hdlc_private *priv)
850 struct qe_mux *qe_mux_reg = &qe_immr->qmx;
853 priv->cmxsi1cr_h = ioread32be(&qe_mux_reg->cmxsi1cr_h);
854 priv->cmxsi1cr_l = ioread32be(&qe_mux_reg->cmxsi1cr_l);
857 priv->cmxsi1syr = ioread32be(&qe_mux_reg->cmxsi1syr);
860 memcpy_fromio(priv->cmxucr, qe_mux_reg->cmxucr, 4 * sizeof(u32));
863 static void resume_clk_config(struct ucc_hdlc_private *priv)
865 struct qe_mux *qe_mux_reg = &qe_immr->qmx;
867 memcpy_toio(qe_mux_reg->cmxucr, priv->cmxucr, 4 * sizeof(u32));
869 iowrite32be(priv->cmxsi1cr_h, &qe_mux_reg->cmxsi1cr_h);
870 iowrite32be(priv->cmxsi1cr_l, &qe_mux_reg->cmxsi1cr_l);
872 iowrite32be(priv->cmxsi1syr, &qe_mux_reg->cmxsi1syr);
875 static int uhdlc_suspend(struct device *dev)
877 struct ucc_hdlc_private *priv = dev_get_drvdata(dev);
878 struct ucc_fast __iomem *uf_regs;
883 if (!netif_running(priv->ndev))
886 netif_device_detach(priv->ndev);
887 napi_disable(&priv->napi);
889 uf_regs = priv->uf_regs;
891 /* backup gumr guemr*/
892 priv->gumr = ioread32be(&uf_regs->gumr);
893 priv->guemr = ioread8(&uf_regs->guemr);
895 priv->ucc_pram_bak = kmalloc(sizeof(*priv->ucc_pram_bak),
897 if (!priv->ucc_pram_bak)
900 /* backup HDLC parameter */
901 memcpy_fromio(priv->ucc_pram_bak, priv->ucc_pram,
902 sizeof(struct ucc_hdlc_param));
904 /* store the clk configuration */
905 store_clk_config(priv);
908 ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
913 static int uhdlc_resume(struct device *dev)
915 struct ucc_hdlc_private *priv = dev_get_drvdata(dev);
916 struct ucc_tdm *utdm;
917 struct ucc_tdm_info *ut_info;
918 struct ucc_fast __iomem *uf_regs;
919 struct ucc_fast_private *uccf;
920 struct ucc_fast_info *uf_info;
928 if (!netif_running(priv->ndev))
932 ut_info = priv->ut_info;
933 uf_info = &ut_info->uf_info;
934 uf_regs = priv->uf_regs;
937 /* restore gumr guemr */
938 iowrite8(priv->guemr, &uf_regs->guemr);
939 iowrite32be(priv->gumr, &uf_regs->gumr);
941 /* Set Virtual Fifo registers */
942 iowrite16be(uf_info->urfs, &uf_regs->urfs);
943 iowrite16be(uf_info->urfet, &uf_regs->urfet);
944 iowrite16be(uf_info->urfset, &uf_regs->urfset);
945 iowrite16be(uf_info->utfs, &uf_regs->utfs);
946 iowrite16be(uf_info->utfet, &uf_regs->utfet);
947 iowrite16be(uf_info->utftt, &uf_regs->utftt);
948 /* utfb, urfb are offsets from MURAM base */
949 iowrite32be(uccf->ucc_fast_tx_virtual_fifo_base_offset, &uf_regs->utfb);
950 iowrite32be(uccf->ucc_fast_rx_virtual_fifo_base_offset, &uf_regs->urfb);
952 /* Rx Tx and sync clock routing */
953 resume_clk_config(priv);
955 iowrite32be(uf_info->uccm_mask, &uf_regs->uccm);
956 iowrite32be(0xffffffff, &uf_regs->ucce);
958 ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
962 ucc_tdm_init(priv->utdm, priv->ut_info);
964 /* Write to QE CECR, UCCx channel to Stop Transmission */
965 cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
966 qe_issue_cmd(QE_STOP_TX, cecr_subblock,
967 (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
969 /* Set UPSMR normal mode */
970 iowrite32be(0, &uf_regs->upsmr);
972 /* init parameter base */
973 cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
974 qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock,
975 QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset);
977 priv->ucc_pram = (struct ucc_hdlc_param __iomem *)
978 qe_muram_addr(priv->ucc_pram_offset);
980 /* restore ucc parameter */
981 memcpy_toio(priv->ucc_pram, priv->ucc_pram_bak,
982 sizeof(struct ucc_hdlc_param));
983 kfree(priv->ucc_pram_bak);
985 /* rebuild BD entry */
986 for (i = 0; i < RX_BD_RING_LEN; i++) {
987 if (i < (RX_BD_RING_LEN - 1))
988 bd_status = R_E_S | R_I_S;
990 bd_status = R_E_S | R_I_S | R_W_S;
992 iowrite16be(bd_status, &priv->rx_bd_base[i].status);
993 iowrite32be(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH,
994 &priv->rx_bd_base[i].buf);
997 for (i = 0; i < TX_BD_RING_LEN; i++) {
998 if (i < (TX_BD_RING_LEN - 1))
999 bd_status = T_I_S | T_TC_S;
1001 bd_status = T_I_S | T_TC_S | T_W_S;
1003 iowrite16be(bd_status, &priv->tx_bd_base[i].status);
1004 iowrite32be(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH,
1005 &priv->tx_bd_base[i].buf);
1008 /* if hdlc is busy enable TX and RX */
1009 if (priv->hdlc_busy == 1) {
1010 cecr_subblock = ucc_fast_get_qe_cr_subblock(
1011 priv->ut_info->uf_info.ucc_num);
1013 qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock,
1014 (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
1016 ucc_fast_enable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
1018 /* Enable the TDM port */
1020 utdm->si_regs->siglmr1_h |= (0x1 << utdm->tdm_port);
1023 napi_enable(&priv->napi);
1024 netif_device_attach(priv->ndev);
1029 static const struct dev_pm_ops uhdlc_pm_ops = {
1030 .suspend = uhdlc_suspend,
1031 .resume = uhdlc_resume,
1032 .freeze = uhdlc_suspend,
1033 .thaw = uhdlc_resume,
1036 #define HDLC_PM_OPS (&uhdlc_pm_ops)
1040 #define HDLC_PM_OPS NULL
1043 static void uhdlc_tx_timeout(struct net_device *ndev, unsigned int txqueue)
1045 netdev_err(ndev, "%s\n", __func__);
1048 static const struct net_device_ops uhdlc_ops = {
1049 .ndo_open = uhdlc_open,
1050 .ndo_stop = uhdlc_close,
1051 .ndo_start_xmit = hdlc_start_xmit,
1052 .ndo_do_ioctl = uhdlc_ioctl,
1053 .ndo_tx_timeout = uhdlc_tx_timeout,
1056 static int hdlc_map_iomem(char *name, int init_flag, void __iomem **ptr)
1058 struct device_node *np;
1059 struct platform_device *pdev;
1060 struct resource *res;
1061 static int siram_init_flag;
1064 np = of_find_compatible_node(NULL, NULL, name);
1068 pdev = of_find_device_by_node(np);
1070 pr_err("%pOFn: failed to lookup pdev\n", np);
1076 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1079 goto error_put_device;
1081 *ptr = ioremap(res->start, resource_size(res));
1084 goto error_put_device;
1087 /* We've remapped the addresses, and we don't need the device any
1088 * more, so we should release it.
1090 put_device(&pdev->dev);
1092 if (init_flag && siram_init_flag == 0) {
1093 memset_io(*ptr, 0, resource_size(res));
1094 siram_init_flag = 1;
1099 put_device(&pdev->dev);
1104 static int ucc_hdlc_probe(struct platform_device *pdev)
1106 struct device_node *np = pdev->dev.of_node;
1107 struct ucc_hdlc_private *uhdlc_priv = NULL;
1108 struct ucc_tdm_info *ut_info;
1109 struct ucc_tdm *utdm = NULL;
1110 struct resource res;
1111 struct net_device *dev;
1118 ret = of_property_read_u32_index(np, "cell-index", 0, &val);
1120 dev_err(&pdev->dev, "Invalid ucc property\n");
1125 if (ucc_num > (UCC_MAX_NUM - 1) || ucc_num < 0) {
1126 dev_err(&pdev->dev, ": Invalid UCC num\n");
1130 memcpy(&utdm_info[ucc_num], &utdm_primary_info,
1131 sizeof(utdm_primary_info));
1133 ut_info = &utdm_info[ucc_num];
1134 ut_info->uf_info.ucc_num = ucc_num;
1136 sprop = of_get_property(np, "rx-clock-name", NULL);
1138 ut_info->uf_info.rx_clock = qe_clock_source(sprop);
1139 if ((ut_info->uf_info.rx_clock < QE_CLK_NONE) ||
1140 (ut_info->uf_info.rx_clock > QE_CLK24)) {
1141 dev_err(&pdev->dev, "Invalid rx-clock-name property\n");
1145 dev_err(&pdev->dev, "Invalid rx-clock-name property\n");
1149 sprop = of_get_property(np, "tx-clock-name", NULL);
1151 ut_info->uf_info.tx_clock = qe_clock_source(sprop);
1152 if ((ut_info->uf_info.tx_clock < QE_CLK_NONE) ||
1153 (ut_info->uf_info.tx_clock > QE_CLK24)) {
1154 dev_err(&pdev->dev, "Invalid tx-clock-name property\n");
1158 dev_err(&pdev->dev, "Invalid tx-clock-name property\n");
1162 ret = of_address_to_resource(np, 0, &res);
1166 ut_info->uf_info.regs = res.start;
1167 ut_info->uf_info.irq = irq_of_parse_and_map(np, 0);
1169 uhdlc_priv = kzalloc(sizeof(*uhdlc_priv), GFP_KERNEL);
1174 dev_set_drvdata(&pdev->dev, uhdlc_priv);
1175 uhdlc_priv->dev = &pdev->dev;
1176 uhdlc_priv->ut_info = ut_info;
1178 if (of_get_property(np, "fsl,tdm-interface", NULL))
1179 uhdlc_priv->tsa = 1;
1181 if (of_get_property(np, "fsl,ucc-internal-loopback", NULL))
1182 uhdlc_priv->loopback = 1;
1184 if (of_get_property(np, "fsl,hdlc-bus", NULL))
1185 uhdlc_priv->hdlc_bus = 1;
1187 if (uhdlc_priv->tsa == 1) {
1188 utdm = kzalloc(sizeof(*utdm), GFP_KERNEL);
1191 dev_err(&pdev->dev, "No mem to alloc ucc tdm data\n");
1192 goto free_uhdlc_priv;
1194 uhdlc_priv->utdm = utdm;
1195 ret = ucc_of_parse_tdm(np, utdm, ut_info);
1199 ret = hdlc_map_iomem("fsl,t1040-qe-si", 0,
1200 (void __iomem **)&utdm->si_regs);
1203 ret = hdlc_map_iomem("fsl,t1040-qe-siram", 1,
1204 (void __iomem **)&utdm->siram);
1209 if (of_property_read_u16(np, "fsl,hmask", &uhdlc_priv->hmask))
1210 uhdlc_priv->hmask = DEFAULT_ADDR_MASK;
1212 ret = uhdlc_init(uhdlc_priv);
1214 dev_err(&pdev->dev, "Failed to init uhdlc\n");
1215 goto undo_uhdlc_init;
1218 dev = alloc_hdlcdev(uhdlc_priv);
1221 pr_err("ucc_hdlc: unable to allocate memory\n");
1222 goto undo_uhdlc_init;
1225 uhdlc_priv->ndev = dev;
1226 hdlc = dev_to_hdlc(dev);
1227 dev->tx_queue_len = 16;
1228 dev->netdev_ops = &uhdlc_ops;
1229 dev->watchdog_timeo = 2 * HZ;
1230 hdlc->attach = ucc_hdlc_attach;
1231 hdlc->xmit = ucc_hdlc_tx;
1232 netif_napi_add(dev, &uhdlc_priv->napi, ucc_hdlc_poll, 32);
1233 if (register_hdlc_device(dev)) {
1235 pr_err("ucc_hdlc: unable to register hdlc device\n");
1244 iounmap(utdm->siram);
1246 iounmap(utdm->si_regs);
1248 if (uhdlc_priv->tsa)
1255 static int ucc_hdlc_remove(struct platform_device *pdev)
1257 struct ucc_hdlc_private *priv = dev_get_drvdata(&pdev->dev);
1259 uhdlc_memclean(priv);
1261 if (priv->utdm->si_regs) {
1262 iounmap(priv->utdm->si_regs);
1263 priv->utdm->si_regs = NULL;
1266 if (priv->utdm->siram) {
1267 iounmap(priv->utdm->siram);
1268 priv->utdm->siram = NULL;
1272 dev_info(&pdev->dev, "UCC based hdlc module removed\n");
1277 static const struct of_device_id fsl_ucc_hdlc_of_match[] = {
1279 .compatible = "fsl,ucc-hdlc",
1284 MODULE_DEVICE_TABLE(of, fsl_ucc_hdlc_of_match);
1286 static struct platform_driver ucc_hdlc_driver = {
1287 .probe = ucc_hdlc_probe,
1288 .remove = ucc_hdlc_remove,
1292 .of_match_table = fsl_ucc_hdlc_of_match,
1296 module_platform_driver(ucc_hdlc_driver);
1297 MODULE_LICENSE("GPL");
1298 MODULE_DESCRIPTION(DRV_DESC);