1 // SPDX-License-Identifier: GPL-2.0+
3 * Driver for AMBA serial ports
5 * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
7 * Copyright 1999 ARM Limited
8 * Copyright (C) 2000 Deep Blue Solutions Ltd.
9 * Copyright (C) 2010 ST-Ericsson SA
11 * This is a generic driver for ARM AMBA-type serial ports. They
12 * have a lot of 16550-like features, but are not register compatible.
13 * Note that although they do have CTS, DCD and DSR inputs, they do
14 * not have an RI input, nor do they have DTR or RTS outputs. If
15 * required, these have to be supplied via some other means (eg, GPIO)
16 * and hooked into this driver.
20 #if defined(CONFIG_SERIAL_AMBA_PL011_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
24 #include <linux/module.h>
25 #include <linux/ioport.h>
26 #include <linux/init.h>
27 #include <linux/console.h>
28 #include <linux/sysrq.h>
29 #include <linux/device.h>
30 #include <linux/tty.h>
31 #include <linux/tty_flip.h>
32 #include <linux/serial_core.h>
33 #include <linux/serial.h>
34 #include <linux/amba/bus.h>
35 #include <linux/amba/serial.h>
36 #include <linux/clk.h>
37 #include <linux/slab.h>
38 #include <linux/dmaengine.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/scatterlist.h>
41 #include <linux/delay.h>
42 #include <linux/types.h>
44 #include <linux/of_device.h>
45 #include <linux/pinctrl/consumer.h>
46 #include <linux/sizes.h>
48 #include <linux/acpi.h>
50 #include "amba-pl011.h"
54 #define SERIAL_AMBA_MAJOR 204
55 #define SERIAL_AMBA_MINOR 64
56 #define SERIAL_AMBA_NR UART_NR
58 #define AMBA_ISR_PASS_LIMIT 256
60 #define UART_DR_ERROR (UART011_DR_OE|UART011_DR_BE|UART011_DR_PE|UART011_DR_FE)
61 #define UART_DUMMY_DR_RX (1 << 16)
63 static u16 pl011_std_offsets[REG_ARRAY_SIZE] = {
64 [REG_DR] = UART01x_DR,
65 [REG_FR] = UART01x_FR,
66 [REG_LCRH_RX] = UART011_LCRH,
67 [REG_LCRH_TX] = UART011_LCRH,
68 [REG_IBRD] = UART011_IBRD,
69 [REG_FBRD] = UART011_FBRD,
70 [REG_CR] = UART011_CR,
71 [REG_IFLS] = UART011_IFLS,
72 [REG_IMSC] = UART011_IMSC,
73 [REG_RIS] = UART011_RIS,
74 [REG_MIS] = UART011_MIS,
75 [REG_ICR] = UART011_ICR,
76 [REG_DMACR] = UART011_DMACR,
79 /* There is by now at least one vendor with differing details, so handle it */
81 const u16 *reg_offset;
91 bool cts_event_workaround;
95 unsigned int (*get_fifosize)(struct amba_device *dev);
98 static unsigned int get_fifosize_arm(struct amba_device *dev)
100 return amba_rev(dev) < 3 ? 16 : 32;
103 static struct vendor_data vendor_arm = {
104 .reg_offset = pl011_std_offsets,
105 .ifls = UART011_IFLS_RX4_8|UART011_IFLS_TX4_8,
106 .fr_busy = UART01x_FR_BUSY,
107 .fr_dsr = UART01x_FR_DSR,
108 .fr_cts = UART01x_FR_CTS,
109 .fr_ri = UART011_FR_RI,
110 .oversampling = false,
111 .dma_threshold = false,
112 .cts_event_workaround = false,
113 .always_enabled = false,
114 .fixed_options = false,
115 .get_fifosize = get_fifosize_arm,
118 static const struct vendor_data vendor_sbsa = {
119 .reg_offset = pl011_std_offsets,
120 .fr_busy = UART01x_FR_BUSY,
121 .fr_dsr = UART01x_FR_DSR,
122 .fr_cts = UART01x_FR_CTS,
123 .fr_ri = UART011_FR_RI,
125 .oversampling = false,
126 .dma_threshold = false,
127 .cts_event_workaround = false,
128 .always_enabled = true,
129 .fixed_options = true,
132 #ifdef CONFIG_ACPI_SPCR_TABLE
133 static const struct vendor_data vendor_qdt_qdf2400_e44 = {
134 .reg_offset = pl011_std_offsets,
135 .fr_busy = UART011_FR_TXFE,
136 .fr_dsr = UART01x_FR_DSR,
137 .fr_cts = UART01x_FR_CTS,
138 .fr_ri = UART011_FR_RI,
139 .inv_fr = UART011_FR_TXFE,
141 .oversampling = false,
142 .dma_threshold = false,
143 .cts_event_workaround = false,
144 .always_enabled = true,
145 .fixed_options = true,
149 static u16 pl011_st_offsets[REG_ARRAY_SIZE] = {
150 [REG_DR] = UART01x_DR,
151 [REG_ST_DMAWM] = ST_UART011_DMAWM,
152 [REG_ST_TIMEOUT] = ST_UART011_TIMEOUT,
153 [REG_FR] = UART01x_FR,
154 [REG_LCRH_RX] = ST_UART011_LCRH_RX,
155 [REG_LCRH_TX] = ST_UART011_LCRH_TX,
156 [REG_IBRD] = UART011_IBRD,
157 [REG_FBRD] = UART011_FBRD,
158 [REG_CR] = UART011_CR,
159 [REG_IFLS] = UART011_IFLS,
160 [REG_IMSC] = UART011_IMSC,
161 [REG_RIS] = UART011_RIS,
162 [REG_MIS] = UART011_MIS,
163 [REG_ICR] = UART011_ICR,
164 [REG_DMACR] = UART011_DMACR,
165 [REG_ST_XFCR] = ST_UART011_XFCR,
166 [REG_ST_XON1] = ST_UART011_XON1,
167 [REG_ST_XON2] = ST_UART011_XON2,
168 [REG_ST_XOFF1] = ST_UART011_XOFF1,
169 [REG_ST_XOFF2] = ST_UART011_XOFF2,
170 [REG_ST_ITCR] = ST_UART011_ITCR,
171 [REG_ST_ITIP] = ST_UART011_ITIP,
172 [REG_ST_ABCR] = ST_UART011_ABCR,
173 [REG_ST_ABIMSC] = ST_UART011_ABIMSC,
176 static unsigned int get_fifosize_st(struct amba_device *dev)
181 static struct vendor_data vendor_st = {
182 .reg_offset = pl011_st_offsets,
183 .ifls = UART011_IFLS_RX_HALF|UART011_IFLS_TX_HALF,
184 .fr_busy = UART01x_FR_BUSY,
185 .fr_dsr = UART01x_FR_DSR,
186 .fr_cts = UART01x_FR_CTS,
187 .fr_ri = UART011_FR_RI,
188 .oversampling = true,
189 .dma_threshold = true,
190 .cts_event_workaround = true,
191 .always_enabled = false,
192 .fixed_options = false,
193 .get_fifosize = get_fifosize_st,
196 static const u16 pl011_zte_offsets[REG_ARRAY_SIZE] = {
197 [REG_DR] = ZX_UART011_DR,
198 [REG_FR] = ZX_UART011_FR,
199 [REG_LCRH_RX] = ZX_UART011_LCRH,
200 [REG_LCRH_TX] = ZX_UART011_LCRH,
201 [REG_IBRD] = ZX_UART011_IBRD,
202 [REG_FBRD] = ZX_UART011_FBRD,
203 [REG_CR] = ZX_UART011_CR,
204 [REG_IFLS] = ZX_UART011_IFLS,
205 [REG_IMSC] = ZX_UART011_IMSC,
206 [REG_RIS] = ZX_UART011_RIS,
207 [REG_MIS] = ZX_UART011_MIS,
208 [REG_ICR] = ZX_UART011_ICR,
209 [REG_DMACR] = ZX_UART011_DMACR,
212 static unsigned int get_fifosize_zte(struct amba_device *dev)
217 static struct vendor_data vendor_zte = {
218 .reg_offset = pl011_zte_offsets,
220 .ifls = UART011_IFLS_RX4_8|UART011_IFLS_TX4_8,
221 .fr_busy = ZX_UART01x_FR_BUSY,
222 .fr_dsr = ZX_UART01x_FR_DSR,
223 .fr_cts = ZX_UART01x_FR_CTS,
224 .fr_ri = ZX_UART011_FR_RI,
225 .get_fifosize = get_fifosize_zte,
228 /* Deals with DMA transactions */
231 struct scatterlist sg;
235 struct pl011_dmarx_data {
236 struct dma_chan *chan;
237 struct completion complete;
239 struct pl011_sgbuf sgbuf_a;
240 struct pl011_sgbuf sgbuf_b;
243 struct timer_list timer;
244 unsigned int last_residue;
245 unsigned long last_jiffies;
247 unsigned int poll_rate;
248 unsigned int poll_timeout;
251 struct pl011_dmatx_data {
252 struct dma_chan *chan;
253 struct scatterlist sg;
259 * We wrap our port structure around the generic uart_port.
261 struct uart_amba_port {
262 struct uart_port port;
263 const u16 *reg_offset;
265 const struct vendor_data *vendor;
266 unsigned int dmacr; /* dma control reg */
267 unsigned int im; /* interrupt mask */
268 unsigned int old_status;
269 unsigned int fifosize; /* vendor-specific */
270 unsigned int old_cr; /* state during shutdown */
271 unsigned int fixed_baud; /* vendor-set fixed baud rate */
273 #ifdef CONFIG_DMA_ENGINE
277 struct pl011_dmarx_data dmarx;
278 struct pl011_dmatx_data dmatx;
283 static unsigned int pl011_reg_to_offset(const struct uart_amba_port *uap,
286 return uap->reg_offset[reg];
289 static unsigned int pl011_read(const struct uart_amba_port *uap,
292 void __iomem *addr = uap->port.membase + pl011_reg_to_offset(uap, reg);
294 return (uap->port.iotype == UPIO_MEM32) ?
295 readl_relaxed(addr) : readw_relaxed(addr);
298 static void pl011_write(unsigned int val, const struct uart_amba_port *uap,
301 void __iomem *addr = uap->port.membase + pl011_reg_to_offset(uap, reg);
303 if (uap->port.iotype == UPIO_MEM32)
304 writel_relaxed(val, addr);
306 writew_relaxed(val, addr);
310 * Reads up to 256 characters from the FIFO or until it's empty and
311 * inserts them into the TTY layer. Returns the number of characters
312 * read from the FIFO.
314 static int pl011_fifo_to_tty(struct uart_amba_port *uap)
317 unsigned int ch, flag, max_count = 256;
320 while (max_count--) {
321 status = pl011_read(uap, REG_FR);
322 if (status & UART01x_FR_RXFE)
325 /* Take chars from the FIFO and update status */
326 ch = pl011_read(uap, REG_DR) | UART_DUMMY_DR_RX;
328 uap->port.icount.rx++;
331 if (unlikely(ch & UART_DR_ERROR)) {
332 if (ch & UART011_DR_BE) {
333 ch &= ~(UART011_DR_FE | UART011_DR_PE);
334 uap->port.icount.brk++;
335 if (uart_handle_break(&uap->port))
337 } else if (ch & UART011_DR_PE)
338 uap->port.icount.parity++;
339 else if (ch & UART011_DR_FE)
340 uap->port.icount.frame++;
341 if (ch & UART011_DR_OE)
342 uap->port.icount.overrun++;
344 ch &= uap->port.read_status_mask;
346 if (ch & UART011_DR_BE)
348 else if (ch & UART011_DR_PE)
350 else if (ch & UART011_DR_FE)
354 if (uart_handle_sysrq_char(&uap->port, ch & 255))
357 uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag);
365 * All the DMA operation mode stuff goes inside this ifdef.
366 * This assumes that you have a generic DMA device interface,
367 * no custom DMA interfaces are supported.
369 #ifdef CONFIG_DMA_ENGINE
371 #define PL011_DMA_BUFFER_SIZE PAGE_SIZE
373 static int pl011_sgbuf_init(struct dma_chan *chan, struct pl011_sgbuf *sg,
374 enum dma_data_direction dir)
378 sg->buf = dma_alloc_coherent(chan->device->dev,
379 PL011_DMA_BUFFER_SIZE, &dma_addr, GFP_KERNEL);
383 sg_init_table(&sg->sg, 1);
384 sg_set_page(&sg->sg, phys_to_page(dma_addr),
385 PL011_DMA_BUFFER_SIZE, offset_in_page(dma_addr));
386 sg_dma_address(&sg->sg) = dma_addr;
387 sg_dma_len(&sg->sg) = PL011_DMA_BUFFER_SIZE;
392 static void pl011_sgbuf_free(struct dma_chan *chan, struct pl011_sgbuf *sg,
393 enum dma_data_direction dir)
396 dma_free_coherent(chan->device->dev,
397 PL011_DMA_BUFFER_SIZE, sg->buf,
398 sg_dma_address(&sg->sg));
402 static void pl011_dma_probe(struct uart_amba_port *uap)
404 /* DMA is the sole user of the platform data right now */
405 struct amba_pl011_data *plat = dev_get_platdata(uap->port.dev);
406 struct device *dev = uap->port.dev;
407 struct dma_slave_config tx_conf = {
408 .dst_addr = uap->port.mapbase +
409 pl011_reg_to_offset(uap, REG_DR),
410 .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
411 .direction = DMA_MEM_TO_DEV,
412 .dst_maxburst = uap->fifosize >> 1,
415 struct dma_chan *chan;
418 uap->dma_probed = true;
419 chan = dma_request_slave_channel_reason(dev, "tx");
421 if (PTR_ERR(chan) == -EPROBE_DEFER) {
422 uap->dma_probed = false;
426 /* We need platform data */
427 if (!plat || !plat->dma_filter) {
428 dev_info(uap->port.dev, "no DMA platform data\n");
432 /* Try to acquire a generic DMA engine slave TX channel */
434 dma_cap_set(DMA_SLAVE, mask);
436 chan = dma_request_channel(mask, plat->dma_filter,
439 dev_err(uap->port.dev, "no TX DMA channel!\n");
444 dmaengine_slave_config(chan, &tx_conf);
445 uap->dmatx.chan = chan;
447 dev_info(uap->port.dev, "DMA channel TX %s\n",
448 dma_chan_name(uap->dmatx.chan));
450 /* Optionally make use of an RX channel as well */
451 chan = dma_request_slave_channel(dev, "rx");
453 if (!chan && plat && plat->dma_rx_param) {
454 chan = dma_request_channel(mask, plat->dma_filter, plat->dma_rx_param);
457 dev_err(uap->port.dev, "no RX DMA channel!\n");
463 struct dma_slave_config rx_conf = {
464 .src_addr = uap->port.mapbase +
465 pl011_reg_to_offset(uap, REG_DR),
466 .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
467 .direction = DMA_DEV_TO_MEM,
468 .src_maxburst = uap->fifosize >> 2,
471 struct dma_slave_caps caps;
474 * Some DMA controllers provide information on their capabilities.
475 * If the controller does, check for suitable residue processing
476 * otherwise assime all is well.
478 if (0 == dma_get_slave_caps(chan, &caps)) {
479 if (caps.residue_granularity ==
480 DMA_RESIDUE_GRANULARITY_DESCRIPTOR) {
481 dma_release_channel(chan);
482 dev_info(uap->port.dev,
483 "RX DMA disabled - no residue processing\n");
487 dmaengine_slave_config(chan, &rx_conf);
488 uap->dmarx.chan = chan;
490 uap->dmarx.auto_poll_rate = false;
491 if (plat && plat->dma_rx_poll_enable) {
492 /* Set poll rate if specified. */
493 if (plat->dma_rx_poll_rate) {
494 uap->dmarx.auto_poll_rate = false;
495 uap->dmarx.poll_rate = plat->dma_rx_poll_rate;
498 * 100 ms defaults to poll rate if not
499 * specified. This will be adjusted with
500 * the baud rate at set_termios.
502 uap->dmarx.auto_poll_rate = true;
503 uap->dmarx.poll_rate = 100;
505 /* 3 secs defaults poll_timeout if not specified. */
506 if (plat->dma_rx_poll_timeout)
507 uap->dmarx.poll_timeout =
508 plat->dma_rx_poll_timeout;
510 uap->dmarx.poll_timeout = 3000;
511 } else if (!plat && dev->of_node) {
512 uap->dmarx.auto_poll_rate = of_property_read_bool(
513 dev->of_node, "auto-poll");
514 if (uap->dmarx.auto_poll_rate) {
517 if (0 == of_property_read_u32(dev->of_node,
519 uap->dmarx.poll_rate = x;
521 uap->dmarx.poll_rate = 100;
522 if (0 == of_property_read_u32(dev->of_node,
523 "poll-timeout-ms", &x))
524 uap->dmarx.poll_timeout = x;
526 uap->dmarx.poll_timeout = 3000;
529 dev_info(uap->port.dev, "DMA channel RX %s\n",
530 dma_chan_name(uap->dmarx.chan));
534 static void pl011_dma_remove(struct uart_amba_port *uap)
537 dma_release_channel(uap->dmatx.chan);
539 dma_release_channel(uap->dmarx.chan);
542 /* Forward declare these for the refill routine */
543 static int pl011_dma_tx_refill(struct uart_amba_port *uap);
544 static void pl011_start_tx_pio(struct uart_amba_port *uap);
547 * The current DMA TX buffer has been sent.
548 * Try to queue up another DMA buffer.
550 static void pl011_dma_tx_callback(void *data)
552 struct uart_amba_port *uap = data;
553 struct pl011_dmatx_data *dmatx = &uap->dmatx;
557 spin_lock_irqsave(&uap->port.lock, flags);
558 if (uap->dmatx.queued)
559 dma_unmap_sg(dmatx->chan->device->dev, &dmatx->sg, 1,
563 uap->dmacr = dmacr & ~UART011_TXDMAE;
564 pl011_write(uap->dmacr, uap, REG_DMACR);
567 * If TX DMA was disabled, it means that we've stopped the DMA for
568 * some reason (eg, XOFF received, or we want to send an X-char.)
570 * Note: we need to be careful here of a potential race between DMA
571 * and the rest of the driver - if the driver disables TX DMA while
572 * a TX buffer completing, we must update the tx queued status to
573 * get further refills (hence we check dmacr).
575 if (!(dmacr & UART011_TXDMAE) || uart_tx_stopped(&uap->port) ||
576 uart_circ_empty(&uap->port.state->xmit)) {
577 uap->dmatx.queued = false;
578 spin_unlock_irqrestore(&uap->port.lock, flags);
582 if (pl011_dma_tx_refill(uap) <= 0)
584 * We didn't queue a DMA buffer for some reason, but we
585 * have data pending to be sent. Re-enable the TX IRQ.
587 pl011_start_tx_pio(uap);
589 spin_unlock_irqrestore(&uap->port.lock, flags);
593 * Try to refill the TX DMA buffer.
594 * Locking: called with port lock held and IRQs disabled.
596 * 1 if we queued up a TX DMA buffer.
597 * 0 if we didn't want to handle this by DMA
600 static int pl011_dma_tx_refill(struct uart_amba_port *uap)
602 struct pl011_dmatx_data *dmatx = &uap->dmatx;
603 struct dma_chan *chan = dmatx->chan;
604 struct dma_device *dma_dev = chan->device;
605 struct dma_async_tx_descriptor *desc;
606 struct circ_buf *xmit = &uap->port.state->xmit;
610 * Try to avoid the overhead involved in using DMA if the
611 * transaction fits in the first half of the FIFO, by using
612 * the standard interrupt handling. This ensures that we
613 * issue a uart_write_wakeup() at the appropriate time.
615 count = uart_circ_chars_pending(xmit);
616 if (count < (uap->fifosize >> 1)) {
617 uap->dmatx.queued = false;
622 * Bodge: don't send the last character by DMA, as this
623 * will prevent XON from notifying us to restart DMA.
627 /* Else proceed to copy the TX chars to the DMA buffer and fire DMA */
628 if (count > PL011_DMA_BUFFER_SIZE)
629 count = PL011_DMA_BUFFER_SIZE;
631 if (xmit->tail < xmit->head)
632 memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], count);
634 size_t first = UART_XMIT_SIZE - xmit->tail;
639 second = count - first;
641 memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], first);
643 memcpy(&dmatx->buf[first], &xmit->buf[0], second);
646 dmatx->sg.length = count;
648 if (dma_map_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE) != 1) {
649 uap->dmatx.queued = false;
650 dev_dbg(uap->port.dev, "unable to map TX DMA\n");
654 desc = dmaengine_prep_slave_sg(chan, &dmatx->sg, 1, DMA_MEM_TO_DEV,
655 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
657 dma_unmap_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE);
658 uap->dmatx.queued = false;
660 * If DMA cannot be used right now, we complete this
661 * transaction via IRQ and let the TTY layer retry.
663 dev_dbg(uap->port.dev, "TX DMA busy\n");
667 /* Some data to go along to the callback */
668 desc->callback = pl011_dma_tx_callback;
669 desc->callback_param = uap;
671 /* All errors should happen at prepare time */
672 dmaengine_submit(desc);
674 /* Fire the DMA transaction */
675 dma_dev->device_issue_pending(chan);
677 uap->dmacr |= UART011_TXDMAE;
678 pl011_write(uap->dmacr, uap, REG_DMACR);
679 uap->dmatx.queued = true;
682 * Now we know that DMA will fire, so advance the ring buffer
683 * with the stuff we just dispatched.
685 xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
686 uap->port.icount.tx += count;
688 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
689 uart_write_wakeup(&uap->port);
695 * We received a transmit interrupt without a pending X-char but with
696 * pending characters.
697 * Locking: called with port lock held and IRQs disabled.
699 * false if we want to use PIO to transmit
700 * true if we queued a DMA buffer
702 static bool pl011_dma_tx_irq(struct uart_amba_port *uap)
704 if (!uap->using_tx_dma)
708 * If we already have a TX buffer queued, but received a
709 * TX interrupt, it will be because we've just sent an X-char.
710 * Ensure the TX DMA is enabled and the TX IRQ is disabled.
712 if (uap->dmatx.queued) {
713 uap->dmacr |= UART011_TXDMAE;
714 pl011_write(uap->dmacr, uap, REG_DMACR);
715 uap->im &= ~UART011_TXIM;
716 pl011_write(uap->im, uap, REG_IMSC);
721 * We don't have a TX buffer queued, so try to queue one.
722 * If we successfully queued a buffer, mask the TX IRQ.
724 if (pl011_dma_tx_refill(uap) > 0) {
725 uap->im &= ~UART011_TXIM;
726 pl011_write(uap->im, uap, REG_IMSC);
733 * Stop the DMA transmit (eg, due to received XOFF).
734 * Locking: called with port lock held and IRQs disabled.
736 static inline void pl011_dma_tx_stop(struct uart_amba_port *uap)
738 if (uap->dmatx.queued) {
739 uap->dmacr &= ~UART011_TXDMAE;
740 pl011_write(uap->dmacr, uap, REG_DMACR);
745 * Try to start a DMA transmit, or in the case of an XON/OFF
746 * character queued for send, try to get that character out ASAP.
747 * Locking: called with port lock held and IRQs disabled.
749 * false if we want the TX IRQ to be enabled
750 * true if we have a buffer queued
752 static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
756 if (!uap->using_tx_dma)
759 if (!uap->port.x_char) {
760 /* no X-char, try to push chars out in DMA mode */
763 if (!uap->dmatx.queued) {
764 if (pl011_dma_tx_refill(uap) > 0) {
765 uap->im &= ~UART011_TXIM;
766 pl011_write(uap->im, uap, REG_IMSC);
769 } else if (!(uap->dmacr & UART011_TXDMAE)) {
770 uap->dmacr |= UART011_TXDMAE;
771 pl011_write(uap->dmacr, uap, REG_DMACR);
777 * We have an X-char to send. Disable DMA to prevent it loading
778 * the TX fifo, and then see if we can stuff it into the FIFO.
781 uap->dmacr &= ~UART011_TXDMAE;
782 pl011_write(uap->dmacr, uap, REG_DMACR);
784 if (pl011_read(uap, REG_FR) & UART01x_FR_TXFF) {
786 * No space in the FIFO, so enable the transmit interrupt
787 * so we know when there is space. Note that once we've
788 * loaded the character, we should just re-enable DMA.
793 pl011_write(uap->port.x_char, uap, REG_DR);
794 uap->port.icount.tx++;
795 uap->port.x_char = 0;
797 /* Success - restore the DMA state */
799 pl011_write(dmacr, uap, REG_DMACR);
805 * Flush the transmit buffer.
806 * Locking: called with port lock held and IRQs disabled.
808 static void pl011_dma_flush_buffer(struct uart_port *port)
809 __releases(&uap->port.lock)
810 __acquires(&uap->port.lock)
812 struct uart_amba_port *uap =
813 container_of(port, struct uart_amba_port, port);
815 if (!uap->using_tx_dma)
818 /* Avoid deadlock with the DMA engine callback */
819 spin_unlock(&uap->port.lock);
820 dmaengine_terminate_all(uap->dmatx.chan);
821 spin_lock(&uap->port.lock);
822 if (uap->dmatx.queued) {
823 dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
825 uap->dmatx.queued = false;
826 uap->dmacr &= ~UART011_TXDMAE;
827 pl011_write(uap->dmacr, uap, REG_DMACR);
831 static void pl011_dma_rx_callback(void *data);
833 static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
835 struct dma_chan *rxchan = uap->dmarx.chan;
836 struct pl011_dmarx_data *dmarx = &uap->dmarx;
837 struct dma_async_tx_descriptor *desc;
838 struct pl011_sgbuf *sgbuf;
843 /* Start the RX DMA job */
844 sgbuf = uap->dmarx.use_buf_b ?
845 &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
846 desc = dmaengine_prep_slave_sg(rxchan, &sgbuf->sg, 1,
848 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
850 * If the DMA engine is busy and cannot prepare a
851 * channel, no big deal, the driver will fall back
852 * to interrupt mode as a result of this error code.
855 uap->dmarx.running = false;
856 dmaengine_terminate_all(rxchan);
860 /* Some data to go along to the callback */
861 desc->callback = pl011_dma_rx_callback;
862 desc->callback_param = uap;
863 dmarx->cookie = dmaengine_submit(desc);
864 dma_async_issue_pending(rxchan);
866 uap->dmacr |= UART011_RXDMAE;
867 pl011_write(uap->dmacr, uap, REG_DMACR);
868 uap->dmarx.running = true;
870 uap->im &= ~UART011_RXIM;
871 pl011_write(uap->im, uap, REG_IMSC);
877 * This is called when either the DMA job is complete, or
878 * the FIFO timeout interrupt occurred. This must be called
879 * with the port spinlock uap->port.lock held.
881 static void pl011_dma_rx_chars(struct uart_amba_port *uap,
882 u32 pending, bool use_buf_b,
885 struct tty_port *port = &uap->port.state->port;
886 struct pl011_sgbuf *sgbuf = use_buf_b ?
887 &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
889 u32 fifotaken = 0; /* only used for vdbg() */
891 struct pl011_dmarx_data *dmarx = &uap->dmarx;
894 if (uap->dmarx.poll_rate) {
895 /* The data can be taken by polling */
896 dmataken = sgbuf->sg.length - dmarx->last_residue;
897 /* Recalculate the pending size */
898 if (pending >= dmataken)
902 /* Pick the remain data from the DMA */
906 * First take all chars in the DMA pipe, then look in the FIFO.
907 * Note that tty_insert_flip_buf() tries to take as many chars
910 dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken,
913 uap->port.icount.rx += dma_count;
914 if (dma_count < pending)
915 dev_warn(uap->port.dev,
916 "couldn't insert all characters (TTY is full?)\n");
919 /* Reset the last_residue for Rx DMA poll */
920 if (uap->dmarx.poll_rate)
921 dmarx->last_residue = sgbuf->sg.length;
924 * Only continue with trying to read the FIFO if all DMA chars have
927 if (dma_count == pending && readfifo) {
928 /* Clear any error flags */
929 pl011_write(UART011_OEIS | UART011_BEIS | UART011_PEIS |
930 UART011_FEIS, uap, REG_ICR);
933 * If we read all the DMA'd characters, and we had an
934 * incomplete buffer, that could be due to an rx error, or
935 * maybe we just timed out. Read any pending chars and check
938 * Error conditions will only occur in the FIFO, these will
939 * trigger an immediate interrupt and stop the DMA job, so we
940 * will always find the error in the FIFO, never in the DMA
943 fifotaken = pl011_fifo_to_tty(uap);
946 spin_unlock(&uap->port.lock);
947 dev_vdbg(uap->port.dev,
948 "Took %d chars from DMA buffer and %d chars from the FIFO\n",
949 dma_count, fifotaken);
950 tty_flip_buffer_push(port);
951 spin_lock(&uap->port.lock);
954 static void pl011_dma_rx_irq(struct uart_amba_port *uap)
956 struct pl011_dmarx_data *dmarx = &uap->dmarx;
957 struct dma_chan *rxchan = dmarx->chan;
958 struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
959 &dmarx->sgbuf_b : &dmarx->sgbuf_a;
961 struct dma_tx_state state;
962 enum dma_status dmastat;
965 * Pause the transfer so we can trust the current counter,
966 * do this before we pause the PL011 block, else we may
969 if (dmaengine_pause(rxchan))
970 dev_err(uap->port.dev, "unable to pause DMA transfer\n");
971 dmastat = rxchan->device->device_tx_status(rxchan,
972 dmarx->cookie, &state);
973 if (dmastat != DMA_PAUSED)
974 dev_err(uap->port.dev, "unable to pause DMA transfer\n");
976 /* Disable RX DMA - incoming data will wait in the FIFO */
977 uap->dmacr &= ~UART011_RXDMAE;
978 pl011_write(uap->dmacr, uap, REG_DMACR);
979 uap->dmarx.running = false;
981 pending = sgbuf->sg.length - state.residue;
982 BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
983 /* Then we terminate the transfer - we now know our residue */
984 dmaengine_terminate_all(rxchan);
987 * This will take the chars we have so far and insert
988 * into the framework.
990 pl011_dma_rx_chars(uap, pending, dmarx->use_buf_b, true);
992 /* Switch buffer & re-trigger DMA job */
993 dmarx->use_buf_b = !dmarx->use_buf_b;
994 if (pl011_dma_rx_trigger_dma(uap)) {
995 dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
996 "fall back to interrupt mode\n");
997 uap->im |= UART011_RXIM;
998 pl011_write(uap->im, uap, REG_IMSC);
1002 static void pl011_dma_rx_callback(void *data)
1004 struct uart_amba_port *uap = data;
1005 struct pl011_dmarx_data *dmarx = &uap->dmarx;
1006 struct dma_chan *rxchan = dmarx->chan;
1007 bool lastbuf = dmarx->use_buf_b;
1008 struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
1009 &dmarx->sgbuf_b : &dmarx->sgbuf_a;
1011 struct dma_tx_state state;
1015 * This completion interrupt occurs typically when the
1016 * RX buffer is totally stuffed but no timeout has yet
1017 * occurred. When that happens, we just want the RX
1018 * routine to flush out the secondary DMA buffer while
1019 * we immediately trigger the next DMA job.
1021 spin_lock_irq(&uap->port.lock);
1023 * Rx data can be taken by the UART interrupts during
1024 * the DMA irq handler. So we check the residue here.
1026 rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
1027 pending = sgbuf->sg.length - state.residue;
1028 BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
1029 /* Then we terminate the transfer - we now know our residue */
1030 dmaengine_terminate_all(rxchan);
1032 uap->dmarx.running = false;
1033 dmarx->use_buf_b = !lastbuf;
1034 ret = pl011_dma_rx_trigger_dma(uap);
1036 pl011_dma_rx_chars(uap, pending, lastbuf, false);
1037 spin_unlock_irq(&uap->port.lock);
1039 * Do this check after we picked the DMA chars so we don't
1040 * get some IRQ immediately from RX.
1043 dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
1044 "fall back to interrupt mode\n");
1045 uap->im |= UART011_RXIM;
1046 pl011_write(uap->im, uap, REG_IMSC);
1051 * Stop accepting received characters, when we're shutting down or
1052 * suspending this port.
1053 * Locking: called with port lock held and IRQs disabled.
1055 static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
1057 /* FIXME. Just disable the DMA enable */
1058 uap->dmacr &= ~UART011_RXDMAE;
1059 pl011_write(uap->dmacr, uap, REG_DMACR);
1063 * Timer handler for Rx DMA polling.
1064 * Every polling, It checks the residue in the dma buffer and transfer
1065 * data to the tty. Also, last_residue is updated for the next polling.
1067 static void pl011_dma_rx_poll(struct timer_list *t)
1069 struct uart_amba_port *uap = from_timer(uap, t, dmarx.timer);
1070 struct tty_port *port = &uap->port.state->port;
1071 struct pl011_dmarx_data *dmarx = &uap->dmarx;
1072 struct dma_chan *rxchan = uap->dmarx.chan;
1073 unsigned long flags = 0;
1074 unsigned int dmataken = 0;
1075 unsigned int size = 0;
1076 struct pl011_sgbuf *sgbuf;
1078 struct dma_tx_state state;
1080 sgbuf = dmarx->use_buf_b ? &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
1081 rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
1082 if (likely(state.residue < dmarx->last_residue)) {
1083 dmataken = sgbuf->sg.length - dmarx->last_residue;
1084 size = dmarx->last_residue - state.residue;
1085 dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken,
1087 if (dma_count == size)
1088 dmarx->last_residue = state.residue;
1089 dmarx->last_jiffies = jiffies;
1091 tty_flip_buffer_push(port);
1094 * If no data is received in poll_timeout, the driver will fall back
1095 * to interrupt mode. We will retrigger DMA at the first interrupt.
1097 if (jiffies_to_msecs(jiffies - dmarx->last_jiffies)
1098 > uap->dmarx.poll_timeout) {
1100 spin_lock_irqsave(&uap->port.lock, flags);
1101 pl011_dma_rx_stop(uap);
1102 uap->im |= UART011_RXIM;
1103 pl011_write(uap->im, uap, REG_IMSC);
1104 spin_unlock_irqrestore(&uap->port.lock, flags);
1106 uap->dmarx.running = false;
1107 dmaengine_terminate_all(rxchan);
1108 del_timer(&uap->dmarx.timer);
1110 mod_timer(&uap->dmarx.timer,
1111 jiffies + msecs_to_jiffies(uap->dmarx.poll_rate));
1115 static void pl011_dma_startup(struct uart_amba_port *uap)
1119 if (!uap->dma_probed)
1120 pl011_dma_probe(uap);
1122 if (!uap->dmatx.chan)
1125 uap->dmatx.buf = kmalloc(PL011_DMA_BUFFER_SIZE, GFP_KERNEL | __GFP_DMA);
1126 if (!uap->dmatx.buf) {
1127 dev_err(uap->port.dev, "no memory for DMA TX buffer\n");
1128 uap->port.fifosize = uap->fifosize;
1132 sg_init_one(&uap->dmatx.sg, uap->dmatx.buf, PL011_DMA_BUFFER_SIZE);
1134 /* The DMA buffer is now the FIFO the TTY subsystem can use */
1135 uap->port.fifosize = PL011_DMA_BUFFER_SIZE;
1136 uap->using_tx_dma = true;
1138 if (!uap->dmarx.chan)
1141 /* Allocate and map DMA RX buffers */
1142 ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
1145 dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
1146 "RX buffer A", ret);
1150 ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_b,
1153 dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
1154 "RX buffer B", ret);
1155 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
1160 uap->using_rx_dma = true;
1163 /* Turn on DMA error (RX/TX will be enabled on demand) */
1164 uap->dmacr |= UART011_DMAONERR;
1165 pl011_write(uap->dmacr, uap, REG_DMACR);
1168 * ST Micro variants has some specific dma burst threshold
1169 * compensation. Set this to 16 bytes, so burst will only
1170 * be issued above/below 16 bytes.
1172 if (uap->vendor->dma_threshold)
1173 pl011_write(ST_UART011_DMAWM_RX_16 | ST_UART011_DMAWM_TX_16,
1176 if (uap->using_rx_dma) {
1177 if (pl011_dma_rx_trigger_dma(uap))
1178 dev_dbg(uap->port.dev, "could not trigger initial "
1179 "RX DMA job, fall back to interrupt mode\n");
1180 if (uap->dmarx.poll_rate) {
1181 timer_setup(&uap->dmarx.timer, pl011_dma_rx_poll, 0);
1182 mod_timer(&uap->dmarx.timer,
1184 msecs_to_jiffies(uap->dmarx.poll_rate));
1185 uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE;
1186 uap->dmarx.last_jiffies = jiffies;
1191 static void pl011_dma_shutdown(struct uart_amba_port *uap)
1193 if (!(uap->using_tx_dma || uap->using_rx_dma))
1196 /* Disable RX and TX DMA */
1197 while (pl011_read(uap, REG_FR) & uap->vendor->fr_busy)
1200 spin_lock_irq(&uap->port.lock);
1201 uap->dmacr &= ~(UART011_DMAONERR | UART011_RXDMAE | UART011_TXDMAE);
1202 pl011_write(uap->dmacr, uap, REG_DMACR);
1203 spin_unlock_irq(&uap->port.lock);
1205 if (uap->using_tx_dma) {
1206 /* In theory, this should already be done by pl011_dma_flush_buffer */
1207 dmaengine_terminate_all(uap->dmatx.chan);
1208 if (uap->dmatx.queued) {
1209 dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
1211 uap->dmatx.queued = false;
1214 kfree(uap->dmatx.buf);
1215 uap->using_tx_dma = false;
1218 if (uap->using_rx_dma) {
1219 dmaengine_terminate_all(uap->dmarx.chan);
1220 /* Clean up the RX DMA */
1221 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a, DMA_FROM_DEVICE);
1222 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_b, DMA_FROM_DEVICE);
1223 if (uap->dmarx.poll_rate)
1224 del_timer_sync(&uap->dmarx.timer);
1225 uap->using_rx_dma = false;
1229 static inline bool pl011_dma_rx_available(struct uart_amba_port *uap)
1231 return uap->using_rx_dma;
1234 static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
1236 return uap->using_rx_dma && uap->dmarx.running;
1240 /* Blank functions if the DMA engine is not available */
1241 static inline void pl011_dma_probe(struct uart_amba_port *uap)
1245 static inline void pl011_dma_remove(struct uart_amba_port *uap)
1249 static inline void pl011_dma_startup(struct uart_amba_port *uap)
1253 static inline void pl011_dma_shutdown(struct uart_amba_port *uap)
1257 static inline bool pl011_dma_tx_irq(struct uart_amba_port *uap)
1262 static inline void pl011_dma_tx_stop(struct uart_amba_port *uap)
1266 static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
1271 static inline void pl011_dma_rx_irq(struct uart_amba_port *uap)
1275 static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
1279 static inline int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
1284 static inline bool pl011_dma_rx_available(struct uart_amba_port *uap)
1289 static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
1294 #define pl011_dma_flush_buffer NULL
1297 static void pl011_stop_tx(struct uart_port *port)
1299 struct uart_amba_port *uap =
1300 container_of(port, struct uart_amba_port, port);
1302 uap->im &= ~UART011_TXIM;
1303 pl011_write(uap->im, uap, REG_IMSC);
1304 pl011_dma_tx_stop(uap);
1307 static bool pl011_tx_chars(struct uart_amba_port *uap, bool from_irq);
1309 /* Start TX with programmed I/O only (no DMA) */
1310 static void pl011_start_tx_pio(struct uart_amba_port *uap)
1312 if (pl011_tx_chars(uap, false)) {
1313 uap->im |= UART011_TXIM;
1314 pl011_write(uap->im, uap, REG_IMSC);
1318 static void pl011_start_tx(struct uart_port *port)
1320 struct uart_amba_port *uap =
1321 container_of(port, struct uart_amba_port, port);
1323 if (!pl011_dma_tx_start(uap))
1324 pl011_start_tx_pio(uap);
1327 static void pl011_stop_rx(struct uart_port *port)
1329 struct uart_amba_port *uap =
1330 container_of(port, struct uart_amba_port, port);
1332 uap->im &= ~(UART011_RXIM|UART011_RTIM|UART011_FEIM|
1333 UART011_PEIM|UART011_BEIM|UART011_OEIM);
1334 pl011_write(uap->im, uap, REG_IMSC);
1336 pl011_dma_rx_stop(uap);
1339 static void pl011_enable_ms(struct uart_port *port)
1341 struct uart_amba_port *uap =
1342 container_of(port, struct uart_amba_port, port);
1344 uap->im |= UART011_RIMIM|UART011_CTSMIM|UART011_DCDMIM|UART011_DSRMIM;
1345 pl011_write(uap->im, uap, REG_IMSC);
1348 static void pl011_rx_chars(struct uart_amba_port *uap)
1349 __releases(&uap->port.lock)
1350 __acquires(&uap->port.lock)
1352 pl011_fifo_to_tty(uap);
1354 spin_unlock(&uap->port.lock);
1355 tty_flip_buffer_push(&uap->port.state->port);
1357 * If we were temporarily out of DMA mode for a while,
1358 * attempt to switch back to DMA mode again.
1360 if (pl011_dma_rx_available(uap)) {
1361 if (pl011_dma_rx_trigger_dma(uap)) {
1362 dev_dbg(uap->port.dev, "could not trigger RX DMA job "
1363 "fall back to interrupt mode again\n");
1364 uap->im |= UART011_RXIM;
1365 pl011_write(uap->im, uap, REG_IMSC);
1367 #ifdef CONFIG_DMA_ENGINE
1368 /* Start Rx DMA poll */
1369 if (uap->dmarx.poll_rate) {
1370 uap->dmarx.last_jiffies = jiffies;
1371 uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE;
1372 mod_timer(&uap->dmarx.timer,
1374 msecs_to_jiffies(uap->dmarx.poll_rate));
1379 spin_lock(&uap->port.lock);
1382 static bool pl011_tx_char(struct uart_amba_port *uap, unsigned char c,
1385 if (unlikely(!from_irq) &&
1386 pl011_read(uap, REG_FR) & UART01x_FR_TXFF)
1387 return false; /* unable to transmit character */
1389 pl011_write(c, uap, REG_DR);
1390 uap->port.icount.tx++;
1395 /* Returns true if tx interrupts have to be (kept) enabled */
1396 static bool pl011_tx_chars(struct uart_amba_port *uap, bool from_irq)
1398 struct circ_buf *xmit = &uap->port.state->xmit;
1399 int count = uap->fifosize >> 1;
1401 if (uap->port.x_char) {
1402 if (!pl011_tx_char(uap, uap->port.x_char, from_irq))
1404 uap->port.x_char = 0;
1407 if (uart_circ_empty(xmit) || uart_tx_stopped(&uap->port)) {
1408 pl011_stop_tx(&uap->port);
1412 /* If we are using DMA mode, try to send some characters. */
1413 if (pl011_dma_tx_irq(uap))
1417 if (likely(from_irq) && count-- == 0)
1420 if (!pl011_tx_char(uap, xmit->buf[xmit->tail], from_irq))
1423 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
1424 } while (!uart_circ_empty(xmit));
1426 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
1427 uart_write_wakeup(&uap->port);
1429 if (uart_circ_empty(xmit)) {
1430 pl011_stop_tx(&uap->port);
1436 static void pl011_modem_status(struct uart_amba_port *uap)
1438 unsigned int status, delta;
1440 status = pl011_read(uap, REG_FR) & UART01x_FR_MODEM_ANY;
1442 delta = status ^ uap->old_status;
1443 uap->old_status = status;
1448 if (delta & UART01x_FR_DCD)
1449 uart_handle_dcd_change(&uap->port, status & UART01x_FR_DCD);
1451 if (delta & uap->vendor->fr_dsr)
1452 uap->port.icount.dsr++;
1454 if (delta & uap->vendor->fr_cts)
1455 uart_handle_cts_change(&uap->port,
1456 status & uap->vendor->fr_cts);
1458 wake_up_interruptible(&uap->port.state->port.delta_msr_wait);
1461 static void check_apply_cts_event_workaround(struct uart_amba_port *uap)
1463 unsigned int dummy_read;
1465 if (!uap->vendor->cts_event_workaround)
1468 /* workaround to make sure that all bits are unlocked.. */
1469 pl011_write(0x00, uap, REG_ICR);
1472 * WA: introduce 26ns(1 uart clk) delay before W1C;
1473 * single apb access will incur 2 pclk(133.12Mhz) delay,
1474 * so add 2 dummy reads
1476 dummy_read = pl011_read(uap, REG_ICR);
1477 dummy_read = pl011_read(uap, REG_ICR);
1480 static irqreturn_t pl011_int(int irq, void *dev_id)
1482 struct uart_amba_port *uap = dev_id;
1483 unsigned long flags;
1484 unsigned int status, pass_counter = AMBA_ISR_PASS_LIMIT;
1488 spin_lock_irqsave(&uap->port.lock, flags);
1489 imsc = pl011_read(uap, REG_IMSC);
1490 status = pl011_read(uap, REG_RIS) & imsc;
1493 check_apply_cts_event_workaround(uap);
1495 pl011_write(status & ~(UART011_TXIS|UART011_RTIS|
1499 if (status & (UART011_RTIS|UART011_RXIS)) {
1500 if (pl011_dma_rx_running(uap))
1501 pl011_dma_rx_irq(uap);
1503 pl011_rx_chars(uap);
1505 if (status & (UART011_DSRMIS|UART011_DCDMIS|
1506 UART011_CTSMIS|UART011_RIMIS))
1507 pl011_modem_status(uap);
1508 if (status & UART011_TXIS)
1509 pl011_tx_chars(uap, true);
1511 if (pass_counter-- == 0)
1514 status = pl011_read(uap, REG_RIS) & imsc;
1515 } while (status != 0);
1519 spin_unlock_irqrestore(&uap->port.lock, flags);
1521 return IRQ_RETVAL(handled);
1524 static unsigned int pl011_tx_empty(struct uart_port *port)
1526 struct uart_amba_port *uap =
1527 container_of(port, struct uart_amba_port, port);
1529 /* Allow feature register bits to be inverted to work around errata */
1530 unsigned int status = pl011_read(uap, REG_FR) ^ uap->vendor->inv_fr;
1532 return status & (uap->vendor->fr_busy | UART01x_FR_TXFF) ?
1536 static unsigned int pl011_get_mctrl(struct uart_port *port)
1538 struct uart_amba_port *uap =
1539 container_of(port, struct uart_amba_port, port);
1540 unsigned int result = 0;
1541 unsigned int status = pl011_read(uap, REG_FR);
1543 #define TIOCMBIT(uartbit, tiocmbit) \
1544 if (status & uartbit) \
1547 TIOCMBIT(UART01x_FR_DCD, TIOCM_CAR);
1548 TIOCMBIT(uap->vendor->fr_dsr, TIOCM_DSR);
1549 TIOCMBIT(uap->vendor->fr_cts, TIOCM_CTS);
1550 TIOCMBIT(uap->vendor->fr_ri, TIOCM_RNG);
1555 static void pl011_set_mctrl(struct uart_port *port, unsigned int mctrl)
1557 struct uart_amba_port *uap =
1558 container_of(port, struct uart_amba_port, port);
1561 cr = pl011_read(uap, REG_CR);
1563 #define TIOCMBIT(tiocmbit, uartbit) \
1564 if (mctrl & tiocmbit) \
1569 TIOCMBIT(TIOCM_RTS, UART011_CR_RTS);
1570 TIOCMBIT(TIOCM_DTR, UART011_CR_DTR);
1571 TIOCMBIT(TIOCM_OUT1, UART011_CR_OUT1);
1572 TIOCMBIT(TIOCM_OUT2, UART011_CR_OUT2);
1573 TIOCMBIT(TIOCM_LOOP, UART011_CR_LBE);
1575 if (port->status & UPSTAT_AUTORTS) {
1576 /* We need to disable auto-RTS if we want to turn RTS off */
1577 TIOCMBIT(TIOCM_RTS, UART011_CR_RTSEN);
1581 pl011_write(cr, uap, REG_CR);
1584 static void pl011_break_ctl(struct uart_port *port, int break_state)
1586 struct uart_amba_port *uap =
1587 container_of(port, struct uart_amba_port, port);
1588 unsigned long flags;
1591 spin_lock_irqsave(&uap->port.lock, flags);
1592 lcr_h = pl011_read(uap, REG_LCRH_TX);
1593 if (break_state == -1)
1594 lcr_h |= UART01x_LCRH_BRK;
1596 lcr_h &= ~UART01x_LCRH_BRK;
1597 pl011_write(lcr_h, uap, REG_LCRH_TX);
1598 spin_unlock_irqrestore(&uap->port.lock, flags);
1601 #ifdef CONFIG_CONSOLE_POLL
1603 static void pl011_quiesce_irqs(struct uart_port *port)
1605 struct uart_amba_port *uap =
1606 container_of(port, struct uart_amba_port, port);
1608 pl011_write(pl011_read(uap, REG_MIS), uap, REG_ICR);
1610 * There is no way to clear TXIM as this is "ready to transmit IRQ", so
1611 * we simply mask it. start_tx() will unmask it.
1613 * Note we can race with start_tx(), and if the race happens, the
1614 * polling user might get another interrupt just after we clear it.
1615 * But it should be OK and can happen even w/o the race, e.g.
1616 * controller immediately got some new data and raised the IRQ.
1618 * And whoever uses polling routines assumes that it manages the device
1619 * (including tx queue), so we're also fine with start_tx()'s caller
1622 pl011_write(pl011_read(uap, REG_IMSC) & ~UART011_TXIM, uap,
1626 static int pl011_get_poll_char(struct uart_port *port)
1628 struct uart_amba_port *uap =
1629 container_of(port, struct uart_amba_port, port);
1630 unsigned int status;
1633 * The caller might need IRQs lowered, e.g. if used with KDB NMI
1636 pl011_quiesce_irqs(port);
1638 status = pl011_read(uap, REG_FR);
1639 if (status & UART01x_FR_RXFE)
1640 return NO_POLL_CHAR;
1642 return pl011_read(uap, REG_DR);
1645 static void pl011_put_poll_char(struct uart_port *port,
1648 struct uart_amba_port *uap =
1649 container_of(port, struct uart_amba_port, port);
1651 while (pl011_read(uap, REG_FR) & UART01x_FR_TXFF)
1654 pl011_write(ch, uap, REG_DR);
1657 #endif /* CONFIG_CONSOLE_POLL */
1659 static int pl011_hwinit(struct uart_port *port)
1661 struct uart_amba_port *uap =
1662 container_of(port, struct uart_amba_port, port);
1665 /* Optionaly enable pins to be muxed in and configured */
1666 pinctrl_pm_select_default_state(port->dev);
1669 * Try to enable the clock producer.
1671 retval = clk_prepare_enable(uap->clk);
1675 uap->port.uartclk = clk_get_rate(uap->clk);
1677 /* Clear pending error and receive interrupts */
1678 pl011_write(UART011_OEIS | UART011_BEIS | UART011_PEIS |
1679 UART011_FEIS | UART011_RTIS | UART011_RXIS,
1683 * Save interrupts enable mask, and enable RX interrupts in case if
1684 * the interrupt is used for NMI entry.
1686 uap->im = pl011_read(uap, REG_IMSC);
1687 pl011_write(UART011_RTIM | UART011_RXIM, uap, REG_IMSC);
1689 if (dev_get_platdata(uap->port.dev)) {
1690 struct amba_pl011_data *plat;
1692 plat = dev_get_platdata(uap->port.dev);
1699 static bool pl011_split_lcrh(const struct uart_amba_port *uap)
1701 return pl011_reg_to_offset(uap, REG_LCRH_RX) !=
1702 pl011_reg_to_offset(uap, REG_LCRH_TX);
1705 static void pl011_write_lcr_h(struct uart_amba_port *uap, unsigned int lcr_h)
1707 pl011_write(lcr_h, uap, REG_LCRH_RX);
1708 if (pl011_split_lcrh(uap)) {
1711 * Wait 10 PCLKs before writing LCRH_TX register,
1712 * to get this delay write read only register 10 times
1714 for (i = 0; i < 10; ++i)
1715 pl011_write(0xff, uap, REG_MIS);
1716 pl011_write(lcr_h, uap, REG_LCRH_TX);
1720 static int pl011_allocate_irq(struct uart_amba_port *uap)
1722 pl011_write(uap->im, uap, REG_IMSC);
1724 return request_irq(uap->port.irq, pl011_int, 0, "uart-pl011", uap);
1728 * Enable interrupts, only timeouts when using DMA
1729 * if initial RX DMA job failed, start in interrupt mode
1732 static void pl011_enable_interrupts(struct uart_amba_port *uap)
1734 spin_lock_irq(&uap->port.lock);
1736 /* Clear out any spuriously appearing RX interrupts */
1737 pl011_write(UART011_RTIS | UART011_RXIS, uap, REG_ICR);
1738 uap->im = UART011_RTIM;
1739 if (!pl011_dma_rx_running(uap))
1740 uap->im |= UART011_RXIM;
1741 pl011_write(uap->im, uap, REG_IMSC);
1742 spin_unlock_irq(&uap->port.lock);
1745 static int pl011_startup(struct uart_port *port)
1747 struct uart_amba_port *uap =
1748 container_of(port, struct uart_amba_port, port);
1752 retval = pl011_hwinit(port);
1756 retval = pl011_allocate_irq(uap);
1760 pl011_write(uap->vendor->ifls, uap, REG_IFLS);
1762 spin_lock_irq(&uap->port.lock);
1764 /* restore RTS and DTR */
1765 cr = uap->old_cr & (UART011_CR_RTS | UART011_CR_DTR);
1766 cr |= UART01x_CR_UARTEN | UART011_CR_RXE | UART011_CR_TXE;
1767 pl011_write(cr, uap, REG_CR);
1769 spin_unlock_irq(&uap->port.lock);
1772 * initialise the old status of the modem signals
1774 uap->old_status = pl011_read(uap, REG_FR) & UART01x_FR_MODEM_ANY;
1777 pl011_dma_startup(uap);
1779 pl011_enable_interrupts(uap);
1784 clk_disable_unprepare(uap->clk);
1788 static int sbsa_uart_startup(struct uart_port *port)
1790 struct uart_amba_port *uap =
1791 container_of(port, struct uart_amba_port, port);
1794 retval = pl011_hwinit(port);
1798 retval = pl011_allocate_irq(uap);
1802 /* The SBSA UART does not support any modem status lines. */
1803 uap->old_status = 0;
1805 pl011_enable_interrupts(uap);
1810 static void pl011_shutdown_channel(struct uart_amba_port *uap,
1815 val = pl011_read(uap, lcrh);
1816 val &= ~(UART01x_LCRH_BRK | UART01x_LCRH_FEN);
1817 pl011_write(val, uap, lcrh);
1821 * disable the port. It should not disable RTS and DTR.
1822 * Also RTS and DTR state should be preserved to restore
1823 * it during startup().
1825 static void pl011_disable_uart(struct uart_amba_port *uap)
1829 uap->port.status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS);
1830 spin_lock_irq(&uap->port.lock);
1831 cr = pl011_read(uap, REG_CR);
1833 cr &= UART011_CR_RTS | UART011_CR_DTR;
1834 cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
1835 pl011_write(cr, uap, REG_CR);
1836 spin_unlock_irq(&uap->port.lock);
1839 * disable break condition and fifos
1841 pl011_shutdown_channel(uap, REG_LCRH_RX);
1842 if (pl011_split_lcrh(uap))
1843 pl011_shutdown_channel(uap, REG_LCRH_TX);
1846 static void pl011_disable_interrupts(struct uart_amba_port *uap)
1848 spin_lock_irq(&uap->port.lock);
1850 /* mask all interrupts and clear all pending ones */
1852 pl011_write(uap->im, uap, REG_IMSC);
1853 pl011_write(0xffff, uap, REG_ICR);
1855 spin_unlock_irq(&uap->port.lock);
1858 static void pl011_shutdown(struct uart_port *port)
1860 struct uart_amba_port *uap =
1861 container_of(port, struct uart_amba_port, port);
1863 pl011_disable_interrupts(uap);
1865 pl011_dma_shutdown(uap);
1867 free_irq(uap->port.irq, uap);
1869 pl011_disable_uart(uap);
1872 * Shut down the clock producer
1874 clk_disable_unprepare(uap->clk);
1875 /* Optionally let pins go into sleep states */
1876 pinctrl_pm_select_sleep_state(port->dev);
1878 if (dev_get_platdata(uap->port.dev)) {
1879 struct amba_pl011_data *plat;
1881 plat = dev_get_platdata(uap->port.dev);
1886 if (uap->port.ops->flush_buffer)
1887 uap->port.ops->flush_buffer(port);
1890 static void sbsa_uart_shutdown(struct uart_port *port)
1892 struct uart_amba_port *uap =
1893 container_of(port, struct uart_amba_port, port);
1895 pl011_disable_interrupts(uap);
1897 free_irq(uap->port.irq, uap);
1899 if (uap->port.ops->flush_buffer)
1900 uap->port.ops->flush_buffer(port);
1904 pl011_setup_status_masks(struct uart_port *port, struct ktermios *termios)
1906 port->read_status_mask = UART011_DR_OE | 255;
1907 if (termios->c_iflag & INPCK)
1908 port->read_status_mask |= UART011_DR_FE | UART011_DR_PE;
1909 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
1910 port->read_status_mask |= UART011_DR_BE;
1913 * Characters to ignore
1915 port->ignore_status_mask = 0;
1916 if (termios->c_iflag & IGNPAR)
1917 port->ignore_status_mask |= UART011_DR_FE | UART011_DR_PE;
1918 if (termios->c_iflag & IGNBRK) {
1919 port->ignore_status_mask |= UART011_DR_BE;
1921 * If we're ignoring parity and break indicators,
1922 * ignore overruns too (for real raw support).
1924 if (termios->c_iflag & IGNPAR)
1925 port->ignore_status_mask |= UART011_DR_OE;
1929 * Ignore all characters if CREAD is not set.
1931 if ((termios->c_cflag & CREAD) == 0)
1932 port->ignore_status_mask |= UART_DUMMY_DR_RX;
1936 pl011_set_termios(struct uart_port *port, struct ktermios *termios,
1937 struct ktermios *old)
1939 struct uart_amba_port *uap =
1940 container_of(port, struct uart_amba_port, port);
1941 unsigned int lcr_h, old_cr;
1942 unsigned long flags;
1943 unsigned int baud, quot, clkdiv;
1945 if (uap->vendor->oversampling)
1951 * Ask the core to calculate the divisor for us.
1953 baud = uart_get_baud_rate(port, termios, old, 0,
1954 port->uartclk / clkdiv);
1955 #ifdef CONFIG_DMA_ENGINE
1957 * Adjust RX DMA polling rate with baud rate if not specified.
1959 if (uap->dmarx.auto_poll_rate)
1960 uap->dmarx.poll_rate = DIV_ROUND_UP(10000000, baud);
1963 if (baud > port->uartclk/16)
1964 quot = DIV_ROUND_CLOSEST(port->uartclk * 8, baud);
1966 quot = DIV_ROUND_CLOSEST(port->uartclk * 4, baud);
1968 switch (termios->c_cflag & CSIZE) {
1970 lcr_h = UART01x_LCRH_WLEN_5;
1973 lcr_h = UART01x_LCRH_WLEN_6;
1976 lcr_h = UART01x_LCRH_WLEN_7;
1979 lcr_h = UART01x_LCRH_WLEN_8;
1982 if (termios->c_cflag & CSTOPB)
1983 lcr_h |= UART01x_LCRH_STP2;
1984 if (termios->c_cflag & PARENB) {
1985 lcr_h |= UART01x_LCRH_PEN;
1986 if (!(termios->c_cflag & PARODD))
1987 lcr_h |= UART01x_LCRH_EPS;
1988 if (termios->c_cflag & CMSPAR)
1989 lcr_h |= UART011_LCRH_SPS;
1991 if (uap->fifosize > 1)
1992 lcr_h |= UART01x_LCRH_FEN;
1994 spin_lock_irqsave(&port->lock, flags);
1997 * Update the per-port timeout.
1999 uart_update_timeout(port, termios->c_cflag, baud);
2001 pl011_setup_status_masks(port, termios);
2003 if (UART_ENABLE_MS(port, termios->c_cflag))
2004 pl011_enable_ms(port);
2006 /* first, disable everything */
2007 old_cr = pl011_read(uap, REG_CR);
2008 pl011_write(0, uap, REG_CR);
2010 if (termios->c_cflag & CRTSCTS) {
2011 if (old_cr & UART011_CR_RTS)
2012 old_cr |= UART011_CR_RTSEN;
2014 old_cr |= UART011_CR_CTSEN;
2015 port->status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS;
2017 old_cr &= ~(UART011_CR_CTSEN | UART011_CR_RTSEN);
2018 port->status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS);
2021 if (uap->vendor->oversampling) {
2022 if (baud > port->uartclk / 16)
2023 old_cr |= ST_UART011_CR_OVSFACT;
2025 old_cr &= ~ST_UART011_CR_OVSFACT;
2029 * Workaround for the ST Micro oversampling variants to
2030 * increase the bitrate slightly, by lowering the divisor,
2031 * to avoid delayed sampling of start bit at high speeds,
2032 * else we see data corruption.
2034 if (uap->vendor->oversampling) {
2035 if ((baud >= 3000000) && (baud < 3250000) && (quot > 1))
2037 else if ((baud > 3250000) && (quot > 2))
2041 pl011_write(quot & 0x3f, uap, REG_FBRD);
2042 pl011_write(quot >> 6, uap, REG_IBRD);
2045 * ----------v----------v----------v----------v-----
2046 * NOTE: REG_LCRH_TX and REG_LCRH_RX MUST BE WRITTEN AFTER
2047 * REG_FBRD & REG_IBRD.
2048 * ----------^----------^----------^----------^-----
2050 pl011_write_lcr_h(uap, lcr_h);
2051 pl011_write(old_cr, uap, REG_CR);
2053 spin_unlock_irqrestore(&port->lock, flags);
2057 sbsa_uart_set_termios(struct uart_port *port, struct ktermios *termios,
2058 struct ktermios *old)
2060 struct uart_amba_port *uap =
2061 container_of(port, struct uart_amba_port, port);
2062 unsigned long flags;
2064 tty_termios_encode_baud_rate(termios, uap->fixed_baud, uap->fixed_baud);
2066 /* The SBSA UART only supports 8n1 without hardware flow control. */
2067 termios->c_cflag &= ~(CSIZE | CSTOPB | PARENB | PARODD);
2068 termios->c_cflag &= ~(CMSPAR | CRTSCTS);
2069 termios->c_cflag |= CS8 | CLOCAL;
2071 spin_lock_irqsave(&port->lock, flags);
2072 uart_update_timeout(port, CS8, uap->fixed_baud);
2073 pl011_setup_status_masks(port, termios);
2074 spin_unlock_irqrestore(&port->lock, flags);
2077 static const char *pl011_type(struct uart_port *port)
2079 struct uart_amba_port *uap =
2080 container_of(port, struct uart_amba_port, port);
2081 return uap->port.type == PORT_AMBA ? uap->type : NULL;
2085 * Release the memory region(s) being used by 'port'
2087 static void pl011_release_port(struct uart_port *port)
2089 release_mem_region(port->mapbase, SZ_4K);
2093 * Request the memory region(s) being used by 'port'
2095 static int pl011_request_port(struct uart_port *port)
2097 return request_mem_region(port->mapbase, SZ_4K, "uart-pl011")
2098 != NULL ? 0 : -EBUSY;
2102 * Configure/autoconfigure the port.
2104 static void pl011_config_port(struct uart_port *port, int flags)
2106 if (flags & UART_CONFIG_TYPE) {
2107 port->type = PORT_AMBA;
2108 pl011_request_port(port);
2113 * verify the new serial_struct (for TIOCSSERIAL).
2115 static int pl011_verify_port(struct uart_port *port, struct serial_struct *ser)
2118 if (ser->type != PORT_UNKNOWN && ser->type != PORT_AMBA)
2120 if (ser->irq < 0 || ser->irq >= nr_irqs)
2122 if (ser->baud_base < 9600)
2127 static const struct uart_ops amba_pl011_pops = {
2128 .tx_empty = pl011_tx_empty,
2129 .set_mctrl = pl011_set_mctrl,
2130 .get_mctrl = pl011_get_mctrl,
2131 .stop_tx = pl011_stop_tx,
2132 .start_tx = pl011_start_tx,
2133 .stop_rx = pl011_stop_rx,
2134 .enable_ms = pl011_enable_ms,
2135 .break_ctl = pl011_break_ctl,
2136 .startup = pl011_startup,
2137 .shutdown = pl011_shutdown,
2138 .flush_buffer = pl011_dma_flush_buffer,
2139 .set_termios = pl011_set_termios,
2141 .release_port = pl011_release_port,
2142 .request_port = pl011_request_port,
2143 .config_port = pl011_config_port,
2144 .verify_port = pl011_verify_port,
2145 #ifdef CONFIG_CONSOLE_POLL
2146 .poll_init = pl011_hwinit,
2147 .poll_get_char = pl011_get_poll_char,
2148 .poll_put_char = pl011_put_poll_char,
2152 static void sbsa_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
2156 static unsigned int sbsa_uart_get_mctrl(struct uart_port *port)
2161 static const struct uart_ops sbsa_uart_pops = {
2162 .tx_empty = pl011_tx_empty,
2163 .set_mctrl = sbsa_uart_set_mctrl,
2164 .get_mctrl = sbsa_uart_get_mctrl,
2165 .stop_tx = pl011_stop_tx,
2166 .start_tx = pl011_start_tx,
2167 .stop_rx = pl011_stop_rx,
2168 .startup = sbsa_uart_startup,
2169 .shutdown = sbsa_uart_shutdown,
2170 .set_termios = sbsa_uart_set_termios,
2172 .release_port = pl011_release_port,
2173 .request_port = pl011_request_port,
2174 .config_port = pl011_config_port,
2175 .verify_port = pl011_verify_port,
2176 #ifdef CONFIG_CONSOLE_POLL
2177 .poll_init = pl011_hwinit,
2178 .poll_get_char = pl011_get_poll_char,
2179 .poll_put_char = pl011_put_poll_char,
2183 static struct uart_amba_port *amba_ports[UART_NR];
2185 #ifdef CONFIG_SERIAL_AMBA_PL011_CONSOLE
2187 static void pl011_console_putchar(struct uart_port *port, int ch)
2189 struct uart_amba_port *uap =
2190 container_of(port, struct uart_amba_port, port);
2192 while (pl011_read(uap, REG_FR) & UART01x_FR_TXFF)
2194 pl011_write(ch, uap, REG_DR);
2198 pl011_console_write(struct console *co, const char *s, unsigned int count)
2200 struct uart_amba_port *uap = amba_ports[co->index];
2201 unsigned int old_cr = 0, new_cr;
2202 unsigned long flags;
2205 clk_enable(uap->clk);
2207 local_irq_save(flags);
2208 if (uap->port.sysrq)
2210 else if (oops_in_progress)
2211 locked = spin_trylock(&uap->port.lock);
2213 spin_lock(&uap->port.lock);
2216 * First save the CR then disable the interrupts
2218 if (!uap->vendor->always_enabled) {
2219 old_cr = pl011_read(uap, REG_CR);
2220 new_cr = old_cr & ~UART011_CR_CTSEN;
2221 new_cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
2222 pl011_write(new_cr, uap, REG_CR);
2225 uart_console_write(&uap->port, s, count, pl011_console_putchar);
2228 * Finally, wait for transmitter to become empty and restore the
2229 * TCR. Allow feature register bits to be inverted to work around
2232 while ((pl011_read(uap, REG_FR) ^ uap->vendor->inv_fr)
2233 & uap->vendor->fr_busy)
2235 if (!uap->vendor->always_enabled)
2236 pl011_write(old_cr, uap, REG_CR);
2239 spin_unlock(&uap->port.lock);
2240 local_irq_restore(flags);
2242 clk_disable(uap->clk);
2246 pl011_console_get_options(struct uart_amba_port *uap, int *baud,
2247 int *parity, int *bits)
2249 if (pl011_read(uap, REG_CR) & UART01x_CR_UARTEN) {
2250 unsigned int lcr_h, ibrd, fbrd;
2252 lcr_h = pl011_read(uap, REG_LCRH_TX);
2255 if (lcr_h & UART01x_LCRH_PEN) {
2256 if (lcr_h & UART01x_LCRH_EPS)
2262 if ((lcr_h & 0x60) == UART01x_LCRH_WLEN_7)
2267 ibrd = pl011_read(uap, REG_IBRD);
2268 fbrd = pl011_read(uap, REG_FBRD);
2270 *baud = uap->port.uartclk * 4 / (64 * ibrd + fbrd);
2272 if (uap->vendor->oversampling) {
2273 if (pl011_read(uap, REG_CR)
2274 & ST_UART011_CR_OVSFACT)
2280 static int __init pl011_console_setup(struct console *co, char *options)
2282 struct uart_amba_port *uap;
2290 * Check whether an invalid uart number has been specified, and
2291 * if so, search for the first available port that does have
2294 if (co->index >= UART_NR)
2296 uap = amba_ports[co->index];
2300 /* Allow pins to be muxed in and configured */
2301 pinctrl_pm_select_default_state(uap->port.dev);
2303 ret = clk_prepare(uap->clk);
2307 if (dev_get_platdata(uap->port.dev)) {
2308 struct amba_pl011_data *plat;
2310 plat = dev_get_platdata(uap->port.dev);
2315 uap->port.uartclk = clk_get_rate(uap->clk);
2317 if (uap->vendor->fixed_options) {
2318 baud = uap->fixed_baud;
2321 uart_parse_options(options,
2322 &baud, &parity, &bits, &flow);
2324 pl011_console_get_options(uap, &baud, &parity, &bits);
2327 return uart_set_options(&uap->port, co, baud, parity, bits, flow);
2331 * pl011_console_match - non-standard console matching
2332 * @co: registering console
2333 * @name: name from console command line
2334 * @idx: index from console command line
2335 * @options: ptr to option string from console command line
2337 * Only attempts to match console command lines of the form:
2338 * console=pl011,mmio|mmio32,<addr>[,<options>]
2339 * console=pl011,0x<addr>[,<options>]
2340 * This form is used to register an initial earlycon boot console and
2341 * replace it with the amba_console at pl011 driver init.
2343 * Performs console setup for a match (as required by interface)
2344 * If no <options> are specified, then assume the h/w is already setup.
2346 * Returns 0 if console matches; otherwise non-zero to use default matching
2348 static int __init pl011_console_match(struct console *co, char *name, int idx,
2351 unsigned char iotype;
2352 resource_size_t addr;
2356 * Systems affected by the Qualcomm Technologies QDF2400 E44 erratum
2357 * have a distinct console name, so make sure we check for that.
2358 * The actual implementation of the erratum occurs in the probe
2361 if ((strcmp(name, "qdf2400_e44") != 0) && (strcmp(name, "pl011") != 0))
2364 if (uart_parse_earlycon(options, &iotype, &addr, &options))
2367 if (iotype != UPIO_MEM && iotype != UPIO_MEM32)
2370 /* try to match the port specified on the command line */
2371 for (i = 0; i < ARRAY_SIZE(amba_ports); i++) {
2372 struct uart_port *port;
2377 port = &amba_ports[i]->port;
2379 if (port->mapbase != addr)
2384 return pl011_console_setup(co, options);
2390 static struct uart_driver amba_reg;
2391 static struct console amba_console = {
2393 .write = pl011_console_write,
2394 .device = uart_console_device,
2395 .setup = pl011_console_setup,
2396 .match = pl011_console_match,
2397 .flags = CON_PRINTBUFFER | CON_ANYTIME,
2402 #define AMBA_CONSOLE (&amba_console)
2404 static void qdf2400_e44_putc(struct uart_port *port, int c)
2406 while (readl(port->membase + UART01x_FR) & UART01x_FR_TXFF)
2408 writel(c, port->membase + UART01x_DR);
2409 while (!(readl(port->membase + UART01x_FR) & UART011_FR_TXFE))
2413 static void qdf2400_e44_early_write(struct console *con, const char *s, unsigned n)
2415 struct earlycon_device *dev = con->data;
2417 uart_console_write(&dev->port, s, n, qdf2400_e44_putc);
2420 static void pl011_putc(struct uart_port *port, int c)
2422 while (readl(port->membase + UART01x_FR) & UART01x_FR_TXFF)
2424 if (port->iotype == UPIO_MEM32)
2425 writel(c, port->membase + UART01x_DR);
2427 writeb(c, port->membase + UART01x_DR);
2428 while (readl(port->membase + UART01x_FR) & UART01x_FR_BUSY)
2432 static void pl011_early_write(struct console *con, const char *s, unsigned n)
2434 struct earlycon_device *dev = con->data;
2436 uart_console_write(&dev->port, s, n, pl011_putc);
2440 * On non-ACPI systems, earlycon is enabled by specifying
2441 * "earlycon=pl011,<address>" on the kernel command line.
2443 * On ACPI ARM64 systems, an "early" console is enabled via the SPCR table,
2444 * by specifying only "earlycon" on the command line. Because it requires
2445 * SPCR, the console starts after ACPI is parsed, which is later than a
2446 * traditional early console.
2448 * To get the traditional early console that starts before ACPI is parsed,
2449 * specify the full "earlycon=pl011,<address>" option.
2451 static int __init pl011_early_console_setup(struct earlycon_device *device,
2454 if (!device->port.membase)
2457 device->con->write = pl011_early_write;
2461 OF_EARLYCON_DECLARE(pl011, "arm,pl011", pl011_early_console_setup);
2462 OF_EARLYCON_DECLARE(pl011, "arm,sbsa-uart", pl011_early_console_setup);
2465 * On Qualcomm Datacenter Technologies QDF2400 SOCs affected by
2466 * Erratum 44, traditional earlycon can be enabled by specifying
2467 * "earlycon=qdf2400_e44,<address>". Any options are ignored.
2469 * Alternatively, you can just specify "earlycon", and the early console
2470 * will be enabled with the information from the SPCR table. In this
2471 * case, the SPCR code will detect the need for the E44 work-around,
2472 * and set the console name to "qdf2400_e44".
2475 qdf2400_e44_early_console_setup(struct earlycon_device *device,
2478 if (!device->port.membase)
2481 device->con->write = qdf2400_e44_early_write;
2484 EARLYCON_DECLARE(qdf2400_e44, qdf2400_e44_early_console_setup);
2487 #define AMBA_CONSOLE NULL
2490 static struct uart_driver amba_reg = {
2491 .owner = THIS_MODULE,
2492 .driver_name = "ttyAMA",
2493 .dev_name = "ttyAMA",
2494 .major = SERIAL_AMBA_MAJOR,
2495 .minor = SERIAL_AMBA_MINOR,
2497 .cons = AMBA_CONSOLE,
2500 static int pl011_probe_dt_alias(int index, struct device *dev)
2502 struct device_node *np;
2503 static bool seen_dev_with_alias = false;
2504 static bool seen_dev_without_alias = false;
2507 if (!IS_ENABLED(CONFIG_OF))
2514 ret = of_alias_get_id(np, "serial");
2516 seen_dev_without_alias = true;
2519 seen_dev_with_alias = true;
2520 if (ret >= ARRAY_SIZE(amba_ports) || amba_ports[ret] != NULL) {
2521 dev_warn(dev, "requested serial port %d not available.\n", ret);
2526 if (seen_dev_with_alias && seen_dev_without_alias)
2527 dev_warn(dev, "aliased and non-aliased serial devices found in device tree. Serial port enumeration may be unpredictable.\n");
2532 /* unregisters the driver also if no more ports are left */
2533 static void pl011_unregister_port(struct uart_amba_port *uap)
2538 for (i = 0; i < ARRAY_SIZE(amba_ports); i++) {
2539 if (amba_ports[i] == uap)
2540 amba_ports[i] = NULL;
2541 else if (amba_ports[i])
2544 pl011_dma_remove(uap);
2546 uart_unregister_driver(&amba_reg);
2549 static int pl011_find_free_port(void)
2553 for (i = 0; i < ARRAY_SIZE(amba_ports); i++)
2554 if (amba_ports[i] == NULL)
2560 static int pl011_setup_port(struct device *dev, struct uart_amba_port *uap,
2561 struct resource *mmiobase, int index)
2565 base = devm_ioremap_resource(dev, mmiobase);
2567 return PTR_ERR(base);
2569 index = pl011_probe_dt_alias(index, dev);
2572 uap->port.dev = dev;
2573 uap->port.mapbase = mmiobase->start;
2574 uap->port.membase = base;
2575 uap->port.fifosize = uap->fifosize;
2576 uap->port.flags = UPF_BOOT_AUTOCONF;
2577 uap->port.line = index;
2579 amba_ports[index] = uap;
2584 static int pl011_register_port(struct uart_amba_port *uap)
2588 /* Ensure interrupts from this UART are masked and cleared */
2589 pl011_write(0, uap, REG_IMSC);
2590 pl011_write(0xffff, uap, REG_ICR);
2592 if (!amba_reg.state) {
2593 ret = uart_register_driver(&amba_reg);
2595 dev_err(uap->port.dev,
2596 "Failed to register AMBA-PL011 driver\n");
2601 ret = uart_add_one_port(&amba_reg, &uap->port);
2603 pl011_unregister_port(uap);
2608 static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
2610 struct uart_amba_port *uap;
2611 struct vendor_data *vendor = id->data;
2614 portnr = pl011_find_free_port();
2618 uap = devm_kzalloc(&dev->dev, sizeof(struct uart_amba_port),
2623 uap->clk = devm_clk_get(&dev->dev, NULL);
2624 if (IS_ERR(uap->clk))
2625 return PTR_ERR(uap->clk);
2627 uap->reg_offset = vendor->reg_offset;
2628 uap->vendor = vendor;
2629 uap->fifosize = vendor->get_fifosize(dev);
2630 uap->port.iotype = vendor->access_32b ? UPIO_MEM32 : UPIO_MEM;
2631 uap->port.irq = dev->irq[0];
2632 uap->port.ops = &amba_pl011_pops;
2634 snprintf(uap->type, sizeof(uap->type), "PL011 rev%u", amba_rev(dev));
2636 ret = pl011_setup_port(&dev->dev, uap, &dev->res, portnr);
2640 amba_set_drvdata(dev, uap);
2642 return pl011_register_port(uap);
2645 static int pl011_remove(struct amba_device *dev)
2647 struct uart_amba_port *uap = amba_get_drvdata(dev);
2649 uart_remove_one_port(&amba_reg, &uap->port);
2650 pl011_unregister_port(uap);
2654 #ifdef CONFIG_PM_SLEEP
2655 static int pl011_suspend(struct device *dev)
2657 struct uart_amba_port *uap = dev_get_drvdata(dev);
2662 return uart_suspend_port(&amba_reg, &uap->port);
2665 static int pl011_resume(struct device *dev)
2667 struct uart_amba_port *uap = dev_get_drvdata(dev);
2672 return uart_resume_port(&amba_reg, &uap->port);
2676 static SIMPLE_DEV_PM_OPS(pl011_dev_pm_ops, pl011_suspend, pl011_resume);
2678 static int sbsa_uart_probe(struct platform_device *pdev)
2680 struct uart_amba_port *uap;
2686 * Check the mandatory baud rate parameter in the DT node early
2687 * so that we can easily exit with the error.
2689 if (pdev->dev.of_node) {
2690 struct device_node *np = pdev->dev.of_node;
2692 ret = of_property_read_u32(np, "current-speed", &baudrate);
2699 portnr = pl011_find_free_port();
2703 uap = devm_kzalloc(&pdev->dev, sizeof(struct uart_amba_port),
2708 ret = platform_get_irq(pdev, 0);
2710 if (ret != -EPROBE_DEFER)
2711 dev_err(&pdev->dev, "cannot obtain irq\n");
2714 uap->port.irq = ret;
2716 #ifdef CONFIG_ACPI_SPCR_TABLE
2717 if (qdf2400_e44_present) {
2718 dev_info(&pdev->dev, "working around QDF2400 SoC erratum 44\n");
2719 uap->vendor = &vendor_qdt_qdf2400_e44;
2722 uap->vendor = &vendor_sbsa;
2724 uap->reg_offset = uap->vendor->reg_offset;
2726 uap->port.iotype = uap->vendor->access_32b ? UPIO_MEM32 : UPIO_MEM;
2727 uap->port.ops = &sbsa_uart_pops;
2728 uap->fixed_baud = baudrate;
2730 snprintf(uap->type, sizeof(uap->type), "SBSA");
2732 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2734 ret = pl011_setup_port(&pdev->dev, uap, r, portnr);
2738 platform_set_drvdata(pdev, uap);
2740 return pl011_register_port(uap);
2743 static int sbsa_uart_remove(struct platform_device *pdev)
2745 struct uart_amba_port *uap = platform_get_drvdata(pdev);
2747 uart_remove_one_port(&amba_reg, &uap->port);
2748 pl011_unregister_port(uap);
2752 static const struct of_device_id sbsa_uart_of_match[] = {
2753 { .compatible = "arm,sbsa-uart", },
2756 MODULE_DEVICE_TABLE(of, sbsa_uart_of_match);
2758 static const struct acpi_device_id sbsa_uart_acpi_match[] = {
2762 MODULE_DEVICE_TABLE(acpi, sbsa_uart_acpi_match);
2764 static struct platform_driver arm_sbsa_uart_platform_driver = {
2765 .probe = sbsa_uart_probe,
2766 .remove = sbsa_uart_remove,
2768 .name = "sbsa-uart",
2769 .of_match_table = of_match_ptr(sbsa_uart_of_match),
2770 .acpi_match_table = ACPI_PTR(sbsa_uart_acpi_match),
2774 static const struct amba_id pl011_ids[] = {
2778 .data = &vendor_arm,
2786 .id = AMBA_LINUX_ID(0x00, 0x1, 0xffe),
2788 .data = &vendor_zte,
2793 MODULE_DEVICE_TABLE(amba, pl011_ids);
2795 static struct amba_driver pl011_driver = {
2797 .name = "uart-pl011",
2798 .pm = &pl011_dev_pm_ops,
2800 .id_table = pl011_ids,
2801 .probe = pl011_probe,
2802 .remove = pl011_remove,
2805 static int __init pl011_init(void)
2807 printk(KERN_INFO "Serial: AMBA PL011 UART driver\n");
2809 if (platform_driver_register(&arm_sbsa_uart_platform_driver))
2810 pr_warn("could not register SBSA UART platform driver\n");
2811 return amba_driver_register(&pl011_driver);
2814 static void __exit pl011_exit(void)
2816 platform_driver_unregister(&arm_sbsa_uart_platform_driver);
2817 amba_driver_unregister(&pl011_driver);
2821 * While this can be a module, if builtin it's most likely the console
2822 * So let's leave module_exit but move module_init to an earlier place
2824 arch_initcall(pl011_init);
2825 module_exit(pl011_exit);
2827 MODULE_AUTHOR("ARM Ltd/Deep Blue Solutions Ltd");
2828 MODULE_DESCRIPTION("ARM AMBA serial port driver");
2829 MODULE_LICENSE("GPL");