1 /* Freescale QUICC Engine HDLC Device Driver
3 * Copyright 2016 Freescale Semiconductor Inc.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/hdlc.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
17 #include <linux/irq.h>
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/netdevice.h>
21 #include <linux/of_address.h>
22 #include <linux/of_irq.h>
23 #include <linux/of_platform.h>
24 #include <linux/platform_device.h>
25 #include <linux/sched.h>
26 #include <linux/skbuff.h>
27 #include <linux/slab.h>
28 #include <linux/spinlock.h>
29 #include <linux/stddef.h>
30 #include <soc/fsl/qe/qe_tdm.h>
31 #include <uapi/linux/if_arp.h>
33 #include "fsl_ucc_hdlc.h"
35 #define DRV_DESC "Freescale QE UCC HDLC Driver"
36 #define DRV_NAME "ucc_hdlc"
38 #define TDM_PPPOHT_SLIC_MAXIN
39 #define RX_BD_ERRORS (R_CD_S | R_OV_S | R_CR_S | R_AB_S | R_NO_S | R_LG_S)
41 static struct ucc_tdm_info utdm_primary_info = {
56 .mode = UCC_FAST_PROTOCOL_MODE_HDLC,
57 .ttx_trx = UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL,
58 .tenc = UCC_FAST_TX_ENCODING_NRZ,
59 .renc = UCC_FAST_RX_ENCODING_NRZ,
60 .tcrc = UCC_FAST_16_BIT_CRC,
61 .synl = UCC_FAST_SYNC_LEN_NOT_USED,
65 #ifdef TDM_PPPOHT_SLIC_MAXIN
80 static struct ucc_tdm_info utdm_info[MAX_HDLC_NUM];
82 static int uhdlc_init(struct ucc_hdlc_private *priv)
84 struct ucc_tdm_info *ut_info;
85 struct ucc_fast_info *uf_info;
90 dma_addr_t bd_dma_addr;
95 ut_info = priv->ut_info;
96 uf_info = &ut_info->uf_info;
109 /* This sets HPM register in CMXUCR register which configures a
110 * open drain connected HDLC bus
113 uf_info->brkpt_support = 1;
115 uf_info->uccm_mask = ((UCC_HDLC_UCCE_RXB | UCC_HDLC_UCCE_RXF |
116 UCC_HDLC_UCCE_TXB) << 16);
118 ret = ucc_fast_init(uf_info, &priv->uccf);
120 dev_err(priv->dev, "Failed to init uccf.");
124 priv->uf_regs = priv->uccf->uf_regs;
125 ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
128 if (priv->loopback) {
129 dev_info(priv->dev, "Loopback Mode\n");
130 /* use the same clock when work in loopback */
131 qe_setbrg(ut_info->uf_info.rx_clock, 20000000, 1);
133 gumr = ioread32be(&priv->uf_regs->gumr);
134 gumr |= (UCC_FAST_GUMR_LOOPBACK | UCC_FAST_GUMR_CDS |
136 gumr &= ~(UCC_FAST_GUMR_CTSP | UCC_FAST_GUMR_RSYN);
137 iowrite32be(gumr, &priv->uf_regs->gumr);
142 ucc_tdm_init(priv->utdm, priv->ut_info);
144 /* Write to QE CECR, UCCx channel to Stop Transmission */
145 cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
146 ret = qe_issue_cmd(QE_STOP_TX, cecr_subblock,
147 QE_CR_PROTOCOL_UNSPECIFIED, 0);
149 /* Set UPSMR normal mode (need fixed)*/
150 iowrite32be(0, &priv->uf_regs->upsmr);
153 if (priv->hdlc_bus) {
156 dev_info(priv->dev, "HDLC bus Mode\n");
157 upsmr = ioread32be(&priv->uf_regs->upsmr);
159 /* bus mode and retransmit enable, with collision window
162 upsmr |= UCC_HDLC_UPSMR_RTE | UCC_HDLC_UPSMR_BUS |
164 iowrite32be(upsmr, &priv->uf_regs->upsmr);
166 /* explicitly disable CDS & CTSP */
167 gumr = ioread32be(&priv->uf_regs->gumr);
168 gumr &= ~(UCC_FAST_GUMR_CDS | UCC_FAST_GUMR_CTSP);
169 /* set automatic sync to explicitly ignore CD signal */
170 gumr |= UCC_FAST_GUMR_SYNL_AUTO;
171 iowrite32be(gumr, &priv->uf_regs->gumr);
174 priv->rx_ring_size = RX_BD_RING_LEN;
175 priv->tx_ring_size = TX_BD_RING_LEN;
177 priv->rx_bd_base = dma_alloc_coherent(priv->dev,
178 RX_BD_RING_LEN * sizeof(struct qe_bd),
179 &priv->dma_rx_bd, GFP_KERNEL);
181 if (!priv->rx_bd_base) {
182 dev_err(priv->dev, "Cannot allocate MURAM memory for RxBDs\n");
188 priv->tx_bd_base = dma_alloc_coherent(priv->dev,
189 TX_BD_RING_LEN * sizeof(struct qe_bd),
190 &priv->dma_tx_bd, GFP_KERNEL);
192 if (!priv->tx_bd_base) {
193 dev_err(priv->dev, "Cannot allocate MURAM memory for TxBDs\n");
198 /* Alloc parameter ram for ucc hdlc */
199 priv->ucc_pram_offset = qe_muram_alloc(sizeof(struct ucc_hdlc_param),
200 ALIGNMENT_OF_UCC_HDLC_PRAM);
202 if (IS_ERR_VALUE(priv->ucc_pram_offset)) {
203 dev_err(priv->dev, "Can not allocate MURAM for hdlc parameter.\n");
208 priv->rx_skbuff = kcalloc(priv->rx_ring_size,
209 sizeof(*priv->rx_skbuff),
211 if (!priv->rx_skbuff)
214 priv->tx_skbuff = kcalloc(priv->tx_ring_size,
215 sizeof(*priv->tx_skbuff),
217 if (!priv->tx_skbuff)
221 priv->skb_dirtytx = 0;
222 priv->curtx_bd = priv->tx_bd_base;
223 priv->dirty_tx = priv->tx_bd_base;
224 priv->currx_bd = priv->rx_bd_base;
225 priv->currx_bdnum = 0;
227 /* init parameter base */
228 cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
229 ret = qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock,
230 QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset);
232 priv->ucc_pram = (struct ucc_hdlc_param __iomem *)
233 qe_muram_addr(priv->ucc_pram_offset);
235 /* Zero out parameter ram */
236 memset_io(priv->ucc_pram, 0, sizeof(struct ucc_hdlc_param));
238 /* Alloc riptr, tiptr */
239 riptr = qe_muram_alloc(32, 32);
240 if (IS_ERR_VALUE(riptr)) {
241 dev_err(priv->dev, "Cannot allocate MURAM mem for Receive internal temp data pointer\n");
246 tiptr = qe_muram_alloc(32, 32);
247 if (IS_ERR_VALUE(tiptr)) {
248 dev_err(priv->dev, "Cannot allocate MURAM mem for Transmit internal temp data pointer\n");
253 /* Set RIPTR, TIPTR */
254 iowrite16be(riptr, &priv->ucc_pram->riptr);
255 iowrite16be(tiptr, &priv->ucc_pram->tiptr);
258 iowrite16be(MAX_RX_BUF_LENGTH, &priv->ucc_pram->mrblr);
260 /* Set RBASE, TBASE */
261 iowrite32be(priv->dma_rx_bd, &priv->ucc_pram->rbase);
262 iowrite32be(priv->dma_tx_bd, &priv->ucc_pram->tbase);
264 /* Set RSTATE, TSTATE */
265 iowrite32be(BMR_GBL | BMR_BIG_ENDIAN, &priv->ucc_pram->rstate);
266 iowrite32be(BMR_GBL | BMR_BIG_ENDIAN, &priv->ucc_pram->tstate);
268 /* Set C_MASK, C_PRES for 16bit CRC */
269 iowrite32be(CRC_16BIT_MASK, &priv->ucc_pram->c_mask);
270 iowrite32be(CRC_16BIT_PRES, &priv->ucc_pram->c_pres);
272 iowrite16be(MAX_FRAME_LENGTH, &priv->ucc_pram->mflr);
273 iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfthr);
274 iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfcnt);
275 iowrite16be(priv->hmask, &priv->ucc_pram->hmask);
276 iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr1);
277 iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr2);
278 iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr3);
279 iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr4);
282 bd_buffer = dma_alloc_coherent(priv->dev,
283 (RX_BD_RING_LEN + TX_BD_RING_LEN) * MAX_RX_BUF_LENGTH,
284 &bd_dma_addr, GFP_KERNEL);
287 dev_err(priv->dev, "Could not allocate buffer descriptors\n");
292 priv->rx_buffer = bd_buffer;
293 priv->tx_buffer = bd_buffer + RX_BD_RING_LEN * MAX_RX_BUF_LENGTH;
295 priv->dma_rx_addr = bd_dma_addr;
296 priv->dma_tx_addr = bd_dma_addr + RX_BD_RING_LEN * MAX_RX_BUF_LENGTH;
298 for (i = 0; i < RX_BD_RING_LEN; i++) {
299 if (i < (RX_BD_RING_LEN - 1))
300 bd_status = R_E_S | R_I_S;
302 bd_status = R_E_S | R_I_S | R_W_S;
304 iowrite16be(bd_status, &priv->rx_bd_base[i].status);
305 iowrite32be(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH,
306 &priv->rx_bd_base[i].buf);
309 for (i = 0; i < TX_BD_RING_LEN; i++) {
310 if (i < (TX_BD_RING_LEN - 1))
311 bd_status = T_I_S | T_TC_S;
313 bd_status = T_I_S | T_TC_S | T_W_S;
315 iowrite16be(bd_status, &priv->tx_bd_base[i].status);
316 iowrite32be(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH,
317 &priv->tx_bd_base[i].buf);
323 qe_muram_free(tiptr);
325 qe_muram_free(riptr);
327 kfree(priv->tx_skbuff);
329 kfree(priv->rx_skbuff);
331 qe_muram_free(priv->ucc_pram_offset);
333 dma_free_coherent(priv->dev,
334 TX_BD_RING_LEN * sizeof(struct qe_bd),
335 priv->tx_bd_base, priv->dma_tx_bd);
337 dma_free_coherent(priv->dev,
338 RX_BD_RING_LEN * sizeof(struct qe_bd),
339 priv->rx_bd_base, priv->dma_rx_bd);
341 ucc_fast_free(priv->uccf);
346 static netdev_tx_t ucc_hdlc_tx(struct sk_buff *skb, struct net_device *dev)
348 hdlc_device *hdlc = dev_to_hdlc(dev);
349 struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)hdlc->priv;
350 struct qe_bd __iomem *bd;
357 if (skb_headroom(skb) < HDLC_HEAD_LEN) {
358 dev->stats.tx_dropped++;
360 netdev_err(dev, "No enough space for hdlc head\n");
364 skb_push(skb, HDLC_HEAD_LEN);
366 proto_head = (u16 *)skb->data;
367 *proto_head = htons(DEFAULT_HDLC_HEAD);
369 dev->stats.tx_bytes += skb->len;
373 proto_head = (u16 *)skb->data;
374 if (*proto_head != htons(DEFAULT_PPP_HEAD)) {
375 dev->stats.tx_dropped++;
377 netdev_err(dev, "Wrong ppp header\n");
381 dev->stats.tx_bytes += skb->len;
385 dev->stats.tx_bytes += skb->len;
389 dev->stats.tx_dropped++;
393 netdev_sent_queue(dev, skb->len);
394 spin_lock_irqsave(&priv->lock, flags);
396 /* Start from the next BD that should be filled */
398 bd_status = ioread16be(&bd->status);
399 /* Save the skb pointer so we can free it later */
400 priv->tx_skbuff[priv->skb_curtx] = skb;
402 /* Update the current skb pointer (wrapping if this was the last) */
404 (priv->skb_curtx + 1) & TX_RING_MOD_MASK(TX_BD_RING_LEN);
406 /* copy skb data to tx buffer for sdma processing */
407 memcpy(priv->tx_buffer + (be32_to_cpu(bd->buf) - priv->dma_tx_addr),
408 skb->data, skb->len);
410 /* set bd status and length */
411 bd_status = (bd_status & T_W_S) | T_R_S | T_I_S | T_L_S | T_TC_S;
413 iowrite16be(skb->len, &bd->length);
414 iowrite16be(bd_status, &bd->status);
416 /* Move to next BD in the ring */
417 if (!(bd_status & T_W_S))
420 bd = priv->tx_bd_base;
422 if (bd == priv->dirty_tx) {
423 if (!netif_queue_stopped(dev))
424 netif_stop_queue(dev);
429 spin_unlock_irqrestore(&priv->lock, flags);
434 static int hdlc_tx_restart(struct ucc_hdlc_private *priv)
439 ucc_fast_get_qe_cr_subblock(priv->ut_info->uf_info.ucc_num);
441 qe_issue_cmd(QE_RESTART_TX, cecr_subblock,
442 QE_CR_PROTOCOL_UNSPECIFIED, 0);
446 static int hdlc_tx_done(struct ucc_hdlc_private *priv)
448 /* Start from the next BD that should be filled */
449 struct net_device *dev = priv->ndev;
450 unsigned int bytes_sent = 0;
452 struct qe_bd *bd; /* BD pointer */
457 bd_status = ioread16be(&bd->status);
459 /* Normal processing. */
460 while ((bd_status & T_R_S) == 0) {
463 if (bd_status & T_UN_S) { /* Underrun */
464 dev->stats.tx_fifo_errors++;
467 if (bd_status & T_CT_S) { /* Carrier lost */
468 dev->stats.tx_carrier_errors++;
472 /* BD contains already transmitted buffer. */
473 /* Handle the transmitted buffer and release */
474 /* the BD to be used with the current frame */
476 skb = priv->tx_skbuff[priv->skb_dirtytx];
480 bytes_sent += skb->len;
481 dev->stats.tx_packets++;
482 memset(priv->tx_buffer +
483 (be32_to_cpu(bd->buf) - priv->dma_tx_addr),
485 dev_kfree_skb_irq(skb);
487 priv->tx_skbuff[priv->skb_dirtytx] = NULL;
490 1) & TX_RING_MOD_MASK(TX_BD_RING_LEN);
492 /* We freed a buffer, so now we can restart transmission */
493 if (netif_queue_stopped(dev))
494 netif_wake_queue(dev);
496 /* Advance the confirmation BD pointer */
497 if (!(bd_status & T_W_S))
500 bd = priv->tx_bd_base;
501 bd_status = ioread16be(&bd->status);
506 hdlc_tx_restart(priv);
508 netdev_completed_queue(dev, howmany, bytes_sent);
512 static int hdlc_rx_done(struct ucc_hdlc_private *priv, int rx_work_limit)
514 struct net_device *dev = priv->ndev;
515 struct sk_buff *skb = NULL;
516 hdlc_device *hdlc = dev_to_hdlc(dev);
519 u16 length, howmany = 0;
523 bd_status = ioread16be(&bd->status);
525 /* while there are received buffers and BD is full (~R_E) */
526 while (!((bd_status & (R_E_S)) || (--rx_work_limit < 0))) {
527 if (bd_status & (RX_BD_ERRORS)) {
528 dev->stats.rx_errors++;
530 if (bd_status & R_CD_S)
531 dev->stats.collisions++;
532 if (bd_status & R_OV_S)
533 dev->stats.rx_fifo_errors++;
534 if (bd_status & R_CR_S)
535 dev->stats.rx_crc_errors++;
536 if (bd_status & R_AB_S)
537 dev->stats.rx_over_errors++;
538 if (bd_status & R_NO_S)
539 dev->stats.rx_frame_errors++;
540 if (bd_status & R_LG_S)
541 dev->stats.rx_length_errors++;
545 bdbuffer = priv->rx_buffer +
546 (priv->currx_bdnum * MAX_RX_BUF_LENGTH);
547 length = ioread16be(&bd->length);
551 bdbuffer += HDLC_HEAD_LEN;
552 length -= (HDLC_HEAD_LEN + HDLC_CRC_SIZE);
554 skb = dev_alloc_skb(length);
556 dev->stats.rx_dropped++;
560 skb_put(skb, length);
563 memcpy(skb->data, bdbuffer, length);
568 length -= HDLC_CRC_SIZE;
570 skb = dev_alloc_skb(length);
572 dev->stats.rx_dropped++;
576 skb_put(skb, length);
579 memcpy(skb->data, bdbuffer, length);
583 dev->stats.rx_packets++;
584 dev->stats.rx_bytes += skb->len;
587 skb->protocol = hdlc_type_trans(skb, dev);
588 netif_receive_skb(skb);
591 iowrite16be((bd_status & R_W_S) | R_E_S | R_I_S, &bd->status);
593 /* update to point at the next bd */
594 if (bd_status & R_W_S) {
595 priv->currx_bdnum = 0;
596 bd = priv->rx_bd_base;
598 if (priv->currx_bdnum < (RX_BD_RING_LEN - 1))
599 priv->currx_bdnum += 1;
601 priv->currx_bdnum = RX_BD_RING_LEN - 1;
606 bd_status = ioread16be(&bd->status);
613 static int ucc_hdlc_poll(struct napi_struct *napi, int budget)
615 struct ucc_hdlc_private *priv = container_of(napi,
616 struct ucc_hdlc_private,
620 /* Tx event processing */
621 spin_lock(&priv->lock);
623 spin_unlock(&priv->lock);
626 howmany += hdlc_rx_done(priv, budget - howmany);
628 if (howmany < budget) {
629 napi_complete_done(napi, howmany);
630 qe_setbits32(priv->uccf->p_uccm,
631 (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS) << 16);
637 static irqreturn_t ucc_hdlc_irq_handler(int irq, void *dev_id)
639 struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)dev_id;
640 struct net_device *dev = priv->ndev;
641 struct ucc_fast_private *uccf;
642 struct ucc_tdm_info *ut_info;
646 ut_info = priv->ut_info;
649 ucce = ioread32be(uccf->p_ucce);
650 uccm = ioread32be(uccf->p_uccm);
652 iowrite32be(ucce, uccf->p_ucce);
656 if ((ucce >> 16) & (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS)) {
657 if (napi_schedule_prep(&priv->napi)) {
658 uccm &= ~((UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS)
660 iowrite32be(uccm, uccf->p_uccm);
661 __napi_schedule(&priv->napi);
665 /* Errors and other events */
666 if (ucce >> 16 & UCC_HDLC_UCCE_BSY)
667 dev->stats.rx_missed_errors++;
668 if (ucce >> 16 & UCC_HDLC_UCCE_TXE)
669 dev->stats.tx_errors++;
674 static int uhdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
676 const size_t size = sizeof(te1_settings);
678 struct ucc_hdlc_private *priv = netdev_priv(dev);
680 if (cmd != SIOCWANDEV)
681 return hdlc_ioctl(dev, ifr, cmd);
683 switch (ifr->ifr_settings.type) {
685 ifr->ifr_settings.type = IF_IFACE_E1;
686 if (ifr->ifr_settings.size < size) {
687 ifr->ifr_settings.size = size; /* data size wanted */
690 memset(&line, 0, sizeof(line));
691 line.clock_type = priv->clocking;
693 if (copy_to_user(ifr->ifr_settings.ifs_ifsu.sync, &line, size))
698 return hdlc_ioctl(dev, ifr, cmd);
702 static int uhdlc_open(struct net_device *dev)
705 hdlc_device *hdlc = dev_to_hdlc(dev);
706 struct ucc_hdlc_private *priv = hdlc->priv;
707 struct ucc_tdm *utdm = priv->utdm;
709 if (priv->hdlc_busy != 1) {
710 if (request_irq(priv->ut_info->uf_info.irq,
711 ucc_hdlc_irq_handler, 0, "hdlc", priv))
714 cecr_subblock = ucc_fast_get_qe_cr_subblock(
715 priv->ut_info->uf_info.ucc_num);
717 qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock,
718 QE_CR_PROTOCOL_UNSPECIFIED, 0);
720 ucc_fast_enable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
722 /* Enable the TDM port */
724 utdm->si_regs->siglmr1_h |= (0x1 << utdm->tdm_port);
727 netif_device_attach(priv->ndev);
728 napi_enable(&priv->napi);
729 netdev_reset_queue(dev);
730 netif_start_queue(dev);
737 static void uhdlc_memclean(struct ucc_hdlc_private *priv)
739 qe_muram_free(priv->ucc_pram->riptr);
740 qe_muram_free(priv->ucc_pram->tiptr);
742 if (priv->rx_bd_base) {
743 dma_free_coherent(priv->dev,
744 RX_BD_RING_LEN * sizeof(struct qe_bd),
745 priv->rx_bd_base, priv->dma_rx_bd);
747 priv->rx_bd_base = NULL;
751 if (priv->tx_bd_base) {
752 dma_free_coherent(priv->dev,
753 TX_BD_RING_LEN * sizeof(struct qe_bd),
754 priv->tx_bd_base, priv->dma_tx_bd);
756 priv->tx_bd_base = NULL;
760 if (priv->ucc_pram) {
761 qe_muram_free(priv->ucc_pram_offset);
762 priv->ucc_pram = NULL;
763 priv->ucc_pram_offset = 0;
766 kfree(priv->rx_skbuff);
767 priv->rx_skbuff = NULL;
769 kfree(priv->tx_skbuff);
770 priv->tx_skbuff = NULL;
773 iounmap(priv->uf_regs);
774 priv->uf_regs = NULL;
778 ucc_fast_free(priv->uccf);
782 if (priv->rx_buffer) {
783 dma_free_coherent(priv->dev,
784 RX_BD_RING_LEN * MAX_RX_BUF_LENGTH,
785 priv->rx_buffer, priv->dma_rx_addr);
786 priv->rx_buffer = NULL;
787 priv->dma_rx_addr = 0;
790 if (priv->tx_buffer) {
791 dma_free_coherent(priv->dev,
792 TX_BD_RING_LEN * MAX_RX_BUF_LENGTH,
793 priv->tx_buffer, priv->dma_tx_addr);
794 priv->tx_buffer = NULL;
795 priv->dma_tx_addr = 0;
799 static int uhdlc_close(struct net_device *dev)
801 struct ucc_hdlc_private *priv = dev_to_hdlc(dev)->priv;
802 struct ucc_tdm *utdm = priv->utdm;
805 napi_disable(&priv->napi);
806 cecr_subblock = ucc_fast_get_qe_cr_subblock(
807 priv->ut_info->uf_info.ucc_num);
809 qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock,
810 (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
811 qe_issue_cmd(QE_CLOSE_RX_BD, cecr_subblock,
812 (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
815 utdm->si_regs->siglmr1_h &= ~(0x1 << utdm->tdm_port);
817 ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
819 free_irq(priv->ut_info->uf_info.irq, priv);
820 netif_stop_queue(dev);
821 netdev_reset_queue(dev);
827 static int ucc_hdlc_attach(struct net_device *dev, unsigned short encoding,
828 unsigned short parity)
830 struct ucc_hdlc_private *priv = dev_to_hdlc(dev)->priv;
832 if (encoding != ENCODING_NRZ &&
833 encoding != ENCODING_NRZI)
836 if (parity != PARITY_NONE &&
837 parity != PARITY_CRC32_PR1_CCITT &&
838 parity != PARITY_CRC16_PR0_CCITT &&
839 parity != PARITY_CRC16_PR1_CCITT)
842 priv->encoding = encoding;
843 priv->parity = parity;
849 static void store_clk_config(struct ucc_hdlc_private *priv)
851 struct qe_mux *qe_mux_reg = &qe_immr->qmx;
854 priv->cmxsi1cr_h = ioread32be(&qe_mux_reg->cmxsi1cr_h);
855 priv->cmxsi1cr_l = ioread32be(&qe_mux_reg->cmxsi1cr_l);
858 priv->cmxsi1syr = ioread32be(&qe_mux_reg->cmxsi1syr);
861 memcpy_fromio(priv->cmxucr, qe_mux_reg->cmxucr, 4 * sizeof(u32));
864 static void resume_clk_config(struct ucc_hdlc_private *priv)
866 struct qe_mux *qe_mux_reg = &qe_immr->qmx;
868 memcpy_toio(qe_mux_reg->cmxucr, priv->cmxucr, 4 * sizeof(u32));
870 iowrite32be(priv->cmxsi1cr_h, &qe_mux_reg->cmxsi1cr_h);
871 iowrite32be(priv->cmxsi1cr_l, &qe_mux_reg->cmxsi1cr_l);
873 iowrite32be(priv->cmxsi1syr, &qe_mux_reg->cmxsi1syr);
876 static int uhdlc_suspend(struct device *dev)
878 struct ucc_hdlc_private *priv = dev_get_drvdata(dev);
879 struct ucc_tdm_info *ut_info;
880 struct ucc_fast __iomem *uf_regs;
885 if (!netif_running(priv->ndev))
888 netif_device_detach(priv->ndev);
889 napi_disable(&priv->napi);
891 ut_info = priv->ut_info;
892 uf_regs = priv->uf_regs;
894 /* backup gumr guemr*/
895 priv->gumr = ioread32be(&uf_regs->gumr);
896 priv->guemr = ioread8(&uf_regs->guemr);
898 priv->ucc_pram_bak = kmalloc(sizeof(*priv->ucc_pram_bak),
900 if (!priv->ucc_pram_bak)
903 /* backup HDLC parameter */
904 memcpy_fromio(priv->ucc_pram_bak, priv->ucc_pram,
905 sizeof(struct ucc_hdlc_param));
907 /* store the clk configuration */
908 store_clk_config(priv);
911 ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
916 static int uhdlc_resume(struct device *dev)
918 struct ucc_hdlc_private *priv = dev_get_drvdata(dev);
919 struct ucc_tdm *utdm;
920 struct ucc_tdm_info *ut_info;
921 struct ucc_fast __iomem *uf_regs;
922 struct ucc_fast_private *uccf;
923 struct ucc_fast_info *uf_info;
931 if (!netif_running(priv->ndev))
935 ut_info = priv->ut_info;
936 uf_info = &ut_info->uf_info;
937 uf_regs = priv->uf_regs;
940 /* restore gumr guemr */
941 iowrite8(priv->guemr, &uf_regs->guemr);
942 iowrite32be(priv->gumr, &uf_regs->gumr);
944 /* Set Virtual Fifo registers */
945 iowrite16be(uf_info->urfs, &uf_regs->urfs);
946 iowrite16be(uf_info->urfet, &uf_regs->urfet);
947 iowrite16be(uf_info->urfset, &uf_regs->urfset);
948 iowrite16be(uf_info->utfs, &uf_regs->utfs);
949 iowrite16be(uf_info->utfet, &uf_regs->utfet);
950 iowrite16be(uf_info->utftt, &uf_regs->utftt);
951 /* utfb, urfb are offsets from MURAM base */
952 iowrite32be(uccf->ucc_fast_tx_virtual_fifo_base_offset, &uf_regs->utfb);
953 iowrite32be(uccf->ucc_fast_rx_virtual_fifo_base_offset, &uf_regs->urfb);
955 /* Rx Tx and sync clock routing */
956 resume_clk_config(priv);
958 iowrite32be(uf_info->uccm_mask, &uf_regs->uccm);
959 iowrite32be(0xffffffff, &uf_regs->ucce);
961 ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
965 ucc_tdm_init(priv->utdm, priv->ut_info);
967 /* Write to QE CECR, UCCx channel to Stop Transmission */
968 cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
969 ret = qe_issue_cmd(QE_STOP_TX, cecr_subblock,
970 (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
972 /* Set UPSMR normal mode */
973 iowrite32be(0, &uf_regs->upsmr);
975 /* init parameter base */
976 cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
977 ret = qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock,
978 QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset);
980 priv->ucc_pram = (struct ucc_hdlc_param __iomem *)
981 qe_muram_addr(priv->ucc_pram_offset);
983 /* restore ucc parameter */
984 memcpy_toio(priv->ucc_pram, priv->ucc_pram_bak,
985 sizeof(struct ucc_hdlc_param));
986 kfree(priv->ucc_pram_bak);
988 /* rebuild BD entry */
989 for (i = 0; i < RX_BD_RING_LEN; i++) {
990 if (i < (RX_BD_RING_LEN - 1))
991 bd_status = R_E_S | R_I_S;
993 bd_status = R_E_S | R_I_S | R_W_S;
995 iowrite16be(bd_status, &priv->rx_bd_base[i].status);
996 iowrite32be(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH,
997 &priv->rx_bd_base[i].buf);
1000 for (i = 0; i < TX_BD_RING_LEN; i++) {
1001 if (i < (TX_BD_RING_LEN - 1))
1002 bd_status = T_I_S | T_TC_S;
1004 bd_status = T_I_S | T_TC_S | T_W_S;
1006 iowrite16be(bd_status, &priv->tx_bd_base[i].status);
1007 iowrite32be(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH,
1008 &priv->tx_bd_base[i].buf);
1011 /* if hdlc is busy enable TX and RX */
1012 if (priv->hdlc_busy == 1) {
1013 cecr_subblock = ucc_fast_get_qe_cr_subblock(
1014 priv->ut_info->uf_info.ucc_num);
1016 qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock,
1017 (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
1019 ucc_fast_enable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
1021 /* Enable the TDM port */
1023 utdm->si_regs->siglmr1_h |= (0x1 << utdm->tdm_port);
1026 napi_enable(&priv->napi);
1027 netif_device_attach(priv->ndev);
1032 static const struct dev_pm_ops uhdlc_pm_ops = {
1033 .suspend = uhdlc_suspend,
1034 .resume = uhdlc_resume,
1035 .freeze = uhdlc_suspend,
1036 .thaw = uhdlc_resume,
1039 #define HDLC_PM_OPS (&uhdlc_pm_ops)
1043 #define HDLC_PM_OPS NULL
1046 static void uhdlc_tx_timeout(struct net_device *ndev)
1048 netdev_err(ndev, "%s\n", __func__);
1051 static const struct net_device_ops uhdlc_ops = {
1052 .ndo_open = uhdlc_open,
1053 .ndo_stop = uhdlc_close,
1054 .ndo_start_xmit = hdlc_start_xmit,
1055 .ndo_do_ioctl = uhdlc_ioctl,
1056 .ndo_tx_timeout = uhdlc_tx_timeout,
1059 static int hdlc_map_iomem(char *name, int init_flag, void __iomem **ptr)
1061 struct device_node *np;
1062 struct platform_device *pdev;
1063 struct resource *res;
1064 static int siram_init_flag;
1067 np = of_find_compatible_node(NULL, NULL, name);
1071 pdev = of_find_device_by_node(np);
1073 pr_err("%pOFn: failed to lookup pdev\n", np);
1079 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1082 goto error_put_device;
1084 *ptr = ioremap(res->start, resource_size(res));
1087 goto error_put_device;
1090 /* We've remapped the addresses, and we don't need the device any
1091 * more, so we should release it.
1093 put_device(&pdev->dev);
1095 if (init_flag && siram_init_flag == 0) {
1096 memset_io(*ptr, 0, resource_size(res));
1097 siram_init_flag = 1;
1102 put_device(&pdev->dev);
1107 static int ucc_hdlc_probe(struct platform_device *pdev)
1109 struct device_node *np = pdev->dev.of_node;
1110 struct ucc_hdlc_private *uhdlc_priv = NULL;
1111 struct ucc_tdm_info *ut_info;
1112 struct ucc_tdm *utdm = NULL;
1113 struct resource res;
1114 struct net_device *dev;
1121 ret = of_property_read_u32_index(np, "cell-index", 0, &val);
1123 dev_err(&pdev->dev, "Invalid ucc property\n");
1128 if (ucc_num > (UCC_MAX_NUM - 1) || ucc_num < 0) {
1129 dev_err(&pdev->dev, ": Invalid UCC num\n");
1133 memcpy(&utdm_info[ucc_num], &utdm_primary_info,
1134 sizeof(utdm_primary_info));
1136 ut_info = &utdm_info[ucc_num];
1137 ut_info->uf_info.ucc_num = ucc_num;
1139 sprop = of_get_property(np, "rx-clock-name", NULL);
1141 ut_info->uf_info.rx_clock = qe_clock_source(sprop);
1142 if ((ut_info->uf_info.rx_clock < QE_CLK_NONE) ||
1143 (ut_info->uf_info.rx_clock > QE_CLK24)) {
1144 dev_err(&pdev->dev, "Invalid rx-clock-name property\n");
1148 dev_err(&pdev->dev, "Invalid rx-clock-name property\n");
1152 sprop = of_get_property(np, "tx-clock-name", NULL);
1154 ut_info->uf_info.tx_clock = qe_clock_source(sprop);
1155 if ((ut_info->uf_info.tx_clock < QE_CLK_NONE) ||
1156 (ut_info->uf_info.tx_clock > QE_CLK24)) {
1157 dev_err(&pdev->dev, "Invalid tx-clock-name property\n");
1161 dev_err(&pdev->dev, "Invalid tx-clock-name property\n");
1165 ret = of_address_to_resource(np, 0, &res);
1169 ut_info->uf_info.regs = res.start;
1170 ut_info->uf_info.irq = irq_of_parse_and_map(np, 0);
1172 uhdlc_priv = kzalloc(sizeof(*uhdlc_priv), GFP_KERNEL);
1177 dev_set_drvdata(&pdev->dev, uhdlc_priv);
1178 uhdlc_priv->dev = &pdev->dev;
1179 uhdlc_priv->ut_info = ut_info;
1181 if (of_get_property(np, "fsl,tdm-interface", NULL))
1182 uhdlc_priv->tsa = 1;
1184 if (of_get_property(np, "fsl,ucc-internal-loopback", NULL))
1185 uhdlc_priv->loopback = 1;
1187 if (of_get_property(np, "fsl,hdlc-bus", NULL))
1188 uhdlc_priv->hdlc_bus = 1;
1190 if (uhdlc_priv->tsa == 1) {
1191 utdm = kzalloc(sizeof(*utdm), GFP_KERNEL);
1194 dev_err(&pdev->dev, "No mem to alloc ucc tdm data\n");
1195 goto free_uhdlc_priv;
1197 uhdlc_priv->utdm = utdm;
1198 ret = ucc_of_parse_tdm(np, utdm, ut_info);
1202 ret = hdlc_map_iomem("fsl,t1040-qe-si", 0,
1203 (void __iomem **)&utdm->si_regs);
1206 ret = hdlc_map_iomem("fsl,t1040-qe-siram", 1,
1207 (void __iomem **)&utdm->siram);
1212 if (of_property_read_u16(np, "fsl,hmask", &uhdlc_priv->hmask))
1213 uhdlc_priv->hmask = DEFAULT_ADDR_MASK;
1215 ret = uhdlc_init(uhdlc_priv);
1217 dev_err(&pdev->dev, "Failed to init uhdlc\n");
1218 goto undo_uhdlc_init;
1221 dev = alloc_hdlcdev(uhdlc_priv);
1224 pr_err("ucc_hdlc: unable to allocate memory\n");
1225 goto undo_uhdlc_init;
1228 uhdlc_priv->ndev = dev;
1229 hdlc = dev_to_hdlc(dev);
1230 dev->tx_queue_len = 16;
1231 dev->netdev_ops = &uhdlc_ops;
1232 dev->watchdog_timeo = 2 * HZ;
1233 hdlc->attach = ucc_hdlc_attach;
1234 hdlc->xmit = ucc_hdlc_tx;
1235 netif_napi_add(dev, &uhdlc_priv->napi, ucc_hdlc_poll, 32);
1236 if (register_hdlc_device(dev)) {
1238 pr_err("ucc_hdlc: unable to register hdlc device\n");
1247 iounmap(utdm->siram);
1249 iounmap(utdm->si_regs);
1251 if (uhdlc_priv->tsa)
1258 static int ucc_hdlc_remove(struct platform_device *pdev)
1260 struct ucc_hdlc_private *priv = dev_get_drvdata(&pdev->dev);
1262 uhdlc_memclean(priv);
1264 if (priv->utdm->si_regs) {
1265 iounmap(priv->utdm->si_regs);
1266 priv->utdm->si_regs = NULL;
1269 if (priv->utdm->siram) {
1270 iounmap(priv->utdm->siram);
1271 priv->utdm->siram = NULL;
1275 dev_info(&pdev->dev, "UCC based hdlc module removed\n");
1280 static const struct of_device_id fsl_ucc_hdlc_of_match[] = {
1282 .compatible = "fsl,ucc-hdlc",
1287 MODULE_DEVICE_TABLE(of, fsl_ucc_hdlc_of_match);
1289 static struct platform_driver ucc_hdlc_driver = {
1290 .probe = ucc_hdlc_probe,
1291 .remove = ucc_hdlc_remove,
1295 .of_match_table = fsl_ucc_hdlc_of_match,
1299 module_platform_driver(ucc_hdlc_driver);
1300 MODULE_LICENSE("GPL");