2 * Driver for AMBA serial ports
4 * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
6 * Copyright 1999 ARM Limited
7 * Copyright (C) 2000 Deep Blue Solutions Ltd.
8 * Copyright (C) 2010 ST-Ericsson SA
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 * This is a generic driver for ARM AMBA-type serial ports. They
25 * have a lot of 16550-like features, but are not register compatible.
26 * Note that although they do have CTS, DCD and DSR inputs, they do
27 * not have an RI input, nor do they have DTR or RTS outputs. If
28 * required, these have to be supplied via some other means (eg, GPIO)
29 * and hooked into this driver.
33 #if defined(CONFIG_SERIAL_AMBA_PL011_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
37 #include <linux/module.h>
38 #include <linux/ioport.h>
39 #include <linux/init.h>
40 #include <linux/console.h>
41 #include <linux/sysrq.h>
42 #include <linux/device.h>
43 #include <linux/tty.h>
44 #include <linux/tty_flip.h>
45 #include <linux/serial_core.h>
46 #include <linux/serial.h>
47 #include <linux/amba/bus.h>
48 #include <linux/amba/serial.h>
49 #include <linux/clk.h>
50 #include <linux/slab.h>
51 #include <linux/dmaengine.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/scatterlist.h>
54 #include <linux/delay.h>
55 #include <linux/types.h>
57 #include <linux/of_device.h>
58 #include <linux/pinctrl/consumer.h>
59 #include <linux/sizes.h>
61 #include <linux/acpi.h>
63 #include "amba-pl011.h"
67 #define SERIAL_AMBA_MAJOR 204
68 #define SERIAL_AMBA_MINOR 64
69 #define SERIAL_AMBA_NR UART_NR
71 #define AMBA_ISR_PASS_LIMIT 256
73 #define UART_DR_ERROR (UART011_DR_OE|UART011_DR_BE|UART011_DR_PE|UART011_DR_FE)
74 #define UART_DUMMY_DR_RX (1 << 16)
76 static u16 pl011_std_offsets[REG_ARRAY_SIZE] = {
77 [REG_DR] = UART01x_DR,
78 [REG_FR] = UART01x_FR,
79 [REG_LCRH_RX] = UART011_LCRH,
80 [REG_LCRH_TX] = UART011_LCRH,
81 [REG_IBRD] = UART011_IBRD,
82 [REG_FBRD] = UART011_FBRD,
83 [REG_CR] = UART011_CR,
84 [REG_IFLS] = UART011_IFLS,
85 [REG_IMSC] = UART011_IMSC,
86 [REG_RIS] = UART011_RIS,
87 [REG_MIS] = UART011_MIS,
88 [REG_ICR] = UART011_ICR,
89 [REG_DMACR] = UART011_DMACR,
92 /* There is by now at least one vendor with differing details, so handle it */
94 const u16 *reg_offset;
104 bool cts_event_workaround;
108 unsigned int (*get_fifosize)(struct amba_device *dev);
111 static unsigned int get_fifosize_arm(struct amba_device *dev)
113 return amba_rev(dev) < 3 ? 16 : 32;
116 static struct vendor_data vendor_arm = {
117 .reg_offset = pl011_std_offsets,
118 .ifls = UART011_IFLS_RX4_8|UART011_IFLS_TX4_8,
119 .fr_busy = UART01x_FR_BUSY,
120 .fr_dsr = UART01x_FR_DSR,
121 .fr_cts = UART01x_FR_CTS,
122 .fr_ri = UART011_FR_RI,
123 .oversampling = false,
124 .dma_threshold = false,
125 .cts_event_workaround = false,
126 .always_enabled = false,
127 .fixed_options = false,
128 .get_fifosize = get_fifosize_arm,
131 static const struct vendor_data vendor_sbsa = {
132 .reg_offset = pl011_std_offsets,
133 .fr_busy = UART01x_FR_BUSY,
134 .fr_dsr = UART01x_FR_DSR,
135 .fr_cts = UART01x_FR_CTS,
136 .fr_ri = UART011_FR_RI,
138 .oversampling = false,
139 .dma_threshold = false,
140 .cts_event_workaround = false,
141 .always_enabled = true,
142 .fixed_options = true,
145 #ifdef CONFIG_ACPI_SPCR_TABLE
146 static const struct vendor_data vendor_qdt_qdf2400_e44 = {
147 .reg_offset = pl011_std_offsets,
148 .fr_busy = UART011_FR_TXFE,
149 .fr_dsr = UART01x_FR_DSR,
150 .fr_cts = UART01x_FR_CTS,
151 .fr_ri = UART011_FR_RI,
152 .inv_fr = UART011_FR_TXFE,
154 .oversampling = false,
155 .dma_threshold = false,
156 .cts_event_workaround = false,
157 .always_enabled = true,
158 .fixed_options = true,
162 static u16 pl011_st_offsets[REG_ARRAY_SIZE] = {
163 [REG_DR] = UART01x_DR,
164 [REG_ST_DMAWM] = ST_UART011_DMAWM,
165 [REG_ST_TIMEOUT] = ST_UART011_TIMEOUT,
166 [REG_FR] = UART01x_FR,
167 [REG_LCRH_RX] = ST_UART011_LCRH_RX,
168 [REG_LCRH_TX] = ST_UART011_LCRH_TX,
169 [REG_IBRD] = UART011_IBRD,
170 [REG_FBRD] = UART011_FBRD,
171 [REG_CR] = UART011_CR,
172 [REG_IFLS] = UART011_IFLS,
173 [REG_IMSC] = UART011_IMSC,
174 [REG_RIS] = UART011_RIS,
175 [REG_MIS] = UART011_MIS,
176 [REG_ICR] = UART011_ICR,
177 [REG_DMACR] = UART011_DMACR,
178 [REG_ST_XFCR] = ST_UART011_XFCR,
179 [REG_ST_XON1] = ST_UART011_XON1,
180 [REG_ST_XON2] = ST_UART011_XON2,
181 [REG_ST_XOFF1] = ST_UART011_XOFF1,
182 [REG_ST_XOFF2] = ST_UART011_XOFF2,
183 [REG_ST_ITCR] = ST_UART011_ITCR,
184 [REG_ST_ITIP] = ST_UART011_ITIP,
185 [REG_ST_ABCR] = ST_UART011_ABCR,
186 [REG_ST_ABIMSC] = ST_UART011_ABIMSC,
189 static unsigned int get_fifosize_st(struct amba_device *dev)
194 static struct vendor_data vendor_st = {
195 .reg_offset = pl011_st_offsets,
196 .ifls = UART011_IFLS_RX_HALF|UART011_IFLS_TX_HALF,
197 .fr_busy = UART01x_FR_BUSY,
198 .fr_dsr = UART01x_FR_DSR,
199 .fr_cts = UART01x_FR_CTS,
200 .fr_ri = UART011_FR_RI,
201 .oversampling = true,
202 .dma_threshold = true,
203 .cts_event_workaround = true,
204 .always_enabled = false,
205 .fixed_options = false,
206 .get_fifosize = get_fifosize_st,
209 static const u16 pl011_zte_offsets[REG_ARRAY_SIZE] = {
210 [REG_DR] = ZX_UART011_DR,
211 [REG_FR] = ZX_UART011_FR,
212 [REG_LCRH_RX] = ZX_UART011_LCRH,
213 [REG_LCRH_TX] = ZX_UART011_LCRH,
214 [REG_IBRD] = ZX_UART011_IBRD,
215 [REG_FBRD] = ZX_UART011_FBRD,
216 [REG_CR] = ZX_UART011_CR,
217 [REG_IFLS] = ZX_UART011_IFLS,
218 [REG_IMSC] = ZX_UART011_IMSC,
219 [REG_RIS] = ZX_UART011_RIS,
220 [REG_MIS] = ZX_UART011_MIS,
221 [REG_ICR] = ZX_UART011_ICR,
222 [REG_DMACR] = ZX_UART011_DMACR,
225 static unsigned int get_fifosize_zte(struct amba_device *dev)
230 static struct vendor_data vendor_zte = {
231 .reg_offset = pl011_zte_offsets,
233 .ifls = UART011_IFLS_RX4_8|UART011_IFLS_TX4_8,
234 .fr_busy = ZX_UART01x_FR_BUSY,
235 .fr_dsr = ZX_UART01x_FR_DSR,
236 .fr_cts = ZX_UART01x_FR_CTS,
237 .fr_ri = ZX_UART011_FR_RI,
238 .get_fifosize = get_fifosize_zte,
241 /* Deals with DMA transactions */
244 struct scatterlist sg;
248 struct pl011_dmarx_data {
249 struct dma_chan *chan;
250 struct completion complete;
252 struct pl011_sgbuf sgbuf_a;
253 struct pl011_sgbuf sgbuf_b;
256 struct timer_list timer;
257 unsigned int last_residue;
258 unsigned long last_jiffies;
260 unsigned int poll_rate;
261 unsigned int poll_timeout;
264 struct pl011_dmatx_data {
265 struct dma_chan *chan;
266 struct scatterlist sg;
272 * We wrap our port structure around the generic uart_port.
274 struct uart_amba_port {
275 struct uart_port port;
276 const u16 *reg_offset;
278 const struct vendor_data *vendor;
279 unsigned int dmacr; /* dma control reg */
280 unsigned int im; /* interrupt mask */
281 unsigned int old_status;
282 unsigned int fifosize; /* vendor-specific */
283 unsigned int old_cr; /* state during shutdown */
285 unsigned int fixed_baud; /* vendor-set fixed baud rate */
287 #ifdef CONFIG_DMA_ENGINE
291 struct pl011_dmarx_data dmarx;
292 struct pl011_dmatx_data dmatx;
297 static unsigned int pl011_reg_to_offset(const struct uart_amba_port *uap,
300 return uap->reg_offset[reg];
303 static unsigned int pl011_read(const struct uart_amba_port *uap,
306 void __iomem *addr = uap->port.membase + pl011_reg_to_offset(uap, reg);
308 return (uap->port.iotype == UPIO_MEM32) ?
309 readl_relaxed(addr) : readw_relaxed(addr);
312 static void pl011_write(unsigned int val, const struct uart_amba_port *uap,
315 void __iomem *addr = uap->port.membase + pl011_reg_to_offset(uap, reg);
317 if (uap->port.iotype == UPIO_MEM32)
318 writel_relaxed(val, addr);
320 writew_relaxed(val, addr);
324 * Reads up to 256 characters from the FIFO or until it's empty and
325 * inserts them into the TTY layer. Returns the number of characters
326 * read from the FIFO.
328 static int pl011_fifo_to_tty(struct uart_amba_port *uap)
331 unsigned int ch, flag, max_count = 256;
334 while (max_count--) {
335 status = pl011_read(uap, REG_FR);
336 if (status & UART01x_FR_RXFE)
339 /* Take chars from the FIFO and update status */
340 ch = pl011_read(uap, REG_DR) | UART_DUMMY_DR_RX;
342 uap->port.icount.rx++;
345 if (unlikely(ch & UART_DR_ERROR)) {
346 if (ch & UART011_DR_BE) {
347 ch &= ~(UART011_DR_FE | UART011_DR_PE);
348 uap->port.icount.brk++;
349 if (uart_handle_break(&uap->port))
351 } else if (ch & UART011_DR_PE)
352 uap->port.icount.parity++;
353 else if (ch & UART011_DR_FE)
354 uap->port.icount.frame++;
355 if (ch & UART011_DR_OE)
356 uap->port.icount.overrun++;
358 ch &= uap->port.read_status_mask;
360 if (ch & UART011_DR_BE)
362 else if (ch & UART011_DR_PE)
364 else if (ch & UART011_DR_FE)
368 if (uart_handle_sysrq_char(&uap->port, ch & 255))
371 uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag);
379 * All the DMA operation mode stuff goes inside this ifdef.
380 * This assumes that you have a generic DMA device interface,
381 * no custom DMA interfaces are supported.
383 #ifdef CONFIG_DMA_ENGINE
385 #define PL011_DMA_BUFFER_SIZE PAGE_SIZE
387 static int pl011_sgbuf_init(struct dma_chan *chan, struct pl011_sgbuf *sg,
388 enum dma_data_direction dir)
392 sg->buf = dma_alloc_coherent(chan->device->dev,
393 PL011_DMA_BUFFER_SIZE, &dma_addr, GFP_KERNEL);
397 sg_init_table(&sg->sg, 1);
398 sg_set_page(&sg->sg, phys_to_page(dma_addr),
399 PL011_DMA_BUFFER_SIZE, offset_in_page(dma_addr));
400 sg_dma_address(&sg->sg) = dma_addr;
401 sg_dma_len(&sg->sg) = PL011_DMA_BUFFER_SIZE;
406 static void pl011_sgbuf_free(struct dma_chan *chan, struct pl011_sgbuf *sg,
407 enum dma_data_direction dir)
410 dma_free_coherent(chan->device->dev,
411 PL011_DMA_BUFFER_SIZE, sg->buf,
412 sg_dma_address(&sg->sg));
416 static void pl011_dma_probe(struct uart_amba_port *uap)
418 /* DMA is the sole user of the platform data right now */
419 struct amba_pl011_data *plat = dev_get_platdata(uap->port.dev);
420 struct device *dev = uap->port.dev;
421 struct dma_slave_config tx_conf = {
422 .dst_addr = uap->port.mapbase +
423 pl011_reg_to_offset(uap, REG_DR),
424 .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
425 .direction = DMA_MEM_TO_DEV,
426 .dst_maxburst = uap->fifosize >> 1,
429 struct dma_chan *chan;
432 uap->dma_probed = true;
433 chan = dma_request_slave_channel_reason(dev, "tx");
435 if (PTR_ERR(chan) == -EPROBE_DEFER) {
436 uap->dma_probed = false;
440 /* We need platform data */
441 if (!plat || !plat->dma_filter) {
442 dev_info(uap->port.dev, "no DMA platform data\n");
446 /* Try to acquire a generic DMA engine slave TX channel */
448 dma_cap_set(DMA_SLAVE, mask);
450 chan = dma_request_channel(mask, plat->dma_filter,
453 dev_err(uap->port.dev, "no TX DMA channel!\n");
458 dmaengine_slave_config(chan, &tx_conf);
459 uap->dmatx.chan = chan;
461 dev_info(uap->port.dev, "DMA channel TX %s\n",
462 dma_chan_name(uap->dmatx.chan));
464 /* Optionally make use of an RX channel as well */
465 chan = dma_request_slave_channel(dev, "rx");
467 if (!chan && plat && plat->dma_rx_param) {
468 chan = dma_request_channel(mask, plat->dma_filter, plat->dma_rx_param);
471 dev_err(uap->port.dev, "no RX DMA channel!\n");
477 struct dma_slave_config rx_conf = {
478 .src_addr = uap->port.mapbase +
479 pl011_reg_to_offset(uap, REG_DR),
480 .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
481 .direction = DMA_DEV_TO_MEM,
482 .src_maxburst = uap->fifosize >> 2,
485 struct dma_slave_caps caps;
488 * Some DMA controllers provide information on their capabilities.
489 * If the controller does, check for suitable residue processing
490 * otherwise assime all is well.
492 if (0 == dma_get_slave_caps(chan, &caps)) {
493 if (caps.residue_granularity ==
494 DMA_RESIDUE_GRANULARITY_DESCRIPTOR) {
495 dma_release_channel(chan);
496 dev_info(uap->port.dev,
497 "RX DMA disabled - no residue processing\n");
501 dmaengine_slave_config(chan, &rx_conf);
502 uap->dmarx.chan = chan;
504 uap->dmarx.auto_poll_rate = false;
505 if (plat && plat->dma_rx_poll_enable) {
506 /* Set poll rate if specified. */
507 if (plat->dma_rx_poll_rate) {
508 uap->dmarx.auto_poll_rate = false;
509 uap->dmarx.poll_rate = plat->dma_rx_poll_rate;
512 * 100 ms defaults to poll rate if not
513 * specified. This will be adjusted with
514 * the baud rate at set_termios.
516 uap->dmarx.auto_poll_rate = true;
517 uap->dmarx.poll_rate = 100;
519 /* 3 secs defaults poll_timeout if not specified. */
520 if (plat->dma_rx_poll_timeout)
521 uap->dmarx.poll_timeout =
522 plat->dma_rx_poll_timeout;
524 uap->dmarx.poll_timeout = 3000;
525 } else if (!plat && dev->of_node) {
526 uap->dmarx.auto_poll_rate = of_property_read_bool(
527 dev->of_node, "auto-poll");
528 if (uap->dmarx.auto_poll_rate) {
531 if (0 == of_property_read_u32(dev->of_node,
533 uap->dmarx.poll_rate = x;
535 uap->dmarx.poll_rate = 100;
536 if (0 == of_property_read_u32(dev->of_node,
537 "poll-timeout-ms", &x))
538 uap->dmarx.poll_timeout = x;
540 uap->dmarx.poll_timeout = 3000;
543 dev_info(uap->port.dev, "DMA channel RX %s\n",
544 dma_chan_name(uap->dmarx.chan));
548 static void pl011_dma_remove(struct uart_amba_port *uap)
551 dma_release_channel(uap->dmatx.chan);
553 dma_release_channel(uap->dmarx.chan);
556 /* Forward declare these for the refill routine */
557 static int pl011_dma_tx_refill(struct uart_amba_port *uap);
558 static void pl011_start_tx_pio(struct uart_amba_port *uap);
561 * The current DMA TX buffer has been sent.
562 * Try to queue up another DMA buffer.
564 static void pl011_dma_tx_callback(void *data)
566 struct uart_amba_port *uap = data;
567 struct pl011_dmatx_data *dmatx = &uap->dmatx;
571 spin_lock_irqsave(&uap->port.lock, flags);
572 if (uap->dmatx.queued)
573 dma_unmap_sg(dmatx->chan->device->dev, &dmatx->sg, 1,
577 uap->dmacr = dmacr & ~UART011_TXDMAE;
578 pl011_write(uap->dmacr, uap, REG_DMACR);
581 * If TX DMA was disabled, it means that we've stopped the DMA for
582 * some reason (eg, XOFF received, or we want to send an X-char.)
584 * Note: we need to be careful here of a potential race between DMA
585 * and the rest of the driver - if the driver disables TX DMA while
586 * a TX buffer completing, we must update the tx queued status to
587 * get further refills (hence we check dmacr).
589 if (!(dmacr & UART011_TXDMAE) || uart_tx_stopped(&uap->port) ||
590 uart_circ_empty(&uap->port.state->xmit)) {
591 uap->dmatx.queued = false;
592 spin_unlock_irqrestore(&uap->port.lock, flags);
596 if (pl011_dma_tx_refill(uap) <= 0)
598 * We didn't queue a DMA buffer for some reason, but we
599 * have data pending to be sent. Re-enable the TX IRQ.
601 pl011_start_tx_pio(uap);
603 spin_unlock_irqrestore(&uap->port.lock, flags);
607 * Try to refill the TX DMA buffer.
608 * Locking: called with port lock held and IRQs disabled.
610 * 1 if we queued up a TX DMA buffer.
611 * 0 if we didn't want to handle this by DMA
614 static int pl011_dma_tx_refill(struct uart_amba_port *uap)
616 struct pl011_dmatx_data *dmatx = &uap->dmatx;
617 struct dma_chan *chan = dmatx->chan;
618 struct dma_device *dma_dev = chan->device;
619 struct dma_async_tx_descriptor *desc;
620 struct circ_buf *xmit = &uap->port.state->xmit;
624 * Try to avoid the overhead involved in using DMA if the
625 * transaction fits in the first half of the FIFO, by using
626 * the standard interrupt handling. This ensures that we
627 * issue a uart_write_wakeup() at the appropriate time.
629 count = uart_circ_chars_pending(xmit);
630 if (count < (uap->fifosize >> 1)) {
631 uap->dmatx.queued = false;
636 * Bodge: don't send the last character by DMA, as this
637 * will prevent XON from notifying us to restart DMA.
641 /* Else proceed to copy the TX chars to the DMA buffer and fire DMA */
642 if (count > PL011_DMA_BUFFER_SIZE)
643 count = PL011_DMA_BUFFER_SIZE;
645 if (xmit->tail < xmit->head)
646 memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], count);
648 size_t first = UART_XMIT_SIZE - xmit->tail;
653 second = count - first;
655 memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], first);
657 memcpy(&dmatx->buf[first], &xmit->buf[0], second);
660 dmatx->sg.length = count;
662 if (dma_map_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE) != 1) {
663 uap->dmatx.queued = false;
664 dev_dbg(uap->port.dev, "unable to map TX DMA\n");
668 desc = dmaengine_prep_slave_sg(chan, &dmatx->sg, 1, DMA_MEM_TO_DEV,
669 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
671 dma_unmap_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE);
672 uap->dmatx.queued = false;
674 * If DMA cannot be used right now, we complete this
675 * transaction via IRQ and let the TTY layer retry.
677 dev_dbg(uap->port.dev, "TX DMA busy\n");
681 /* Some data to go along to the callback */
682 desc->callback = pl011_dma_tx_callback;
683 desc->callback_param = uap;
685 /* All errors should happen at prepare time */
686 dmaengine_submit(desc);
688 /* Fire the DMA transaction */
689 dma_dev->device_issue_pending(chan);
691 uap->dmacr |= UART011_TXDMAE;
692 pl011_write(uap->dmacr, uap, REG_DMACR);
693 uap->dmatx.queued = true;
696 * Now we know that DMA will fire, so advance the ring buffer
697 * with the stuff we just dispatched.
699 xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
700 uap->port.icount.tx += count;
702 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
703 uart_write_wakeup(&uap->port);
709 * We received a transmit interrupt without a pending X-char but with
710 * pending characters.
711 * Locking: called with port lock held and IRQs disabled.
713 * false if we want to use PIO to transmit
714 * true if we queued a DMA buffer
716 static bool pl011_dma_tx_irq(struct uart_amba_port *uap)
718 if (!uap->using_tx_dma)
722 * If we already have a TX buffer queued, but received a
723 * TX interrupt, it will be because we've just sent an X-char.
724 * Ensure the TX DMA is enabled and the TX IRQ is disabled.
726 if (uap->dmatx.queued) {
727 uap->dmacr |= UART011_TXDMAE;
728 pl011_write(uap->dmacr, uap, REG_DMACR);
729 uap->im &= ~UART011_TXIM;
730 pl011_write(uap->im, uap, REG_IMSC);
735 * We don't have a TX buffer queued, so try to queue one.
736 * If we successfully queued a buffer, mask the TX IRQ.
738 if (pl011_dma_tx_refill(uap) > 0) {
739 uap->im &= ~UART011_TXIM;
740 pl011_write(uap->im, uap, REG_IMSC);
747 * Stop the DMA transmit (eg, due to received XOFF).
748 * Locking: called with port lock held and IRQs disabled.
750 static inline void pl011_dma_tx_stop(struct uart_amba_port *uap)
752 if (uap->dmatx.queued) {
753 uap->dmacr &= ~UART011_TXDMAE;
754 pl011_write(uap->dmacr, uap, REG_DMACR);
759 * Try to start a DMA transmit, or in the case of an XON/OFF
760 * character queued for send, try to get that character out ASAP.
761 * Locking: called with port lock held and IRQs disabled.
763 * false if we want the TX IRQ to be enabled
764 * true if we have a buffer queued
766 static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
770 if (!uap->using_tx_dma)
773 if (!uap->port.x_char) {
774 /* no X-char, try to push chars out in DMA mode */
777 if (!uap->dmatx.queued) {
778 if (pl011_dma_tx_refill(uap) > 0) {
779 uap->im &= ~UART011_TXIM;
780 pl011_write(uap->im, uap, REG_IMSC);
783 } else if (!(uap->dmacr & UART011_TXDMAE)) {
784 uap->dmacr |= UART011_TXDMAE;
785 pl011_write(uap->dmacr, uap, REG_DMACR);
791 * We have an X-char to send. Disable DMA to prevent it loading
792 * the TX fifo, and then see if we can stuff it into the FIFO.
795 uap->dmacr &= ~UART011_TXDMAE;
796 pl011_write(uap->dmacr, uap, REG_DMACR);
798 if (pl011_read(uap, REG_FR) & UART01x_FR_TXFF) {
800 * No space in the FIFO, so enable the transmit interrupt
801 * so we know when there is space. Note that once we've
802 * loaded the character, we should just re-enable DMA.
807 pl011_write(uap->port.x_char, uap, REG_DR);
808 uap->port.icount.tx++;
809 uap->port.x_char = 0;
811 /* Success - restore the DMA state */
813 pl011_write(dmacr, uap, REG_DMACR);
819 * Flush the transmit buffer.
820 * Locking: called with port lock held and IRQs disabled.
822 static void pl011_dma_flush_buffer(struct uart_port *port)
823 __releases(&uap->port.lock)
824 __acquires(&uap->port.lock)
826 struct uart_amba_port *uap =
827 container_of(port, struct uart_amba_port, port);
829 if (!uap->using_tx_dma)
832 dmaengine_terminate_async(uap->dmatx.chan);
834 if (uap->dmatx.queued) {
835 dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
837 uap->dmatx.queued = false;
838 uap->dmacr &= ~UART011_TXDMAE;
839 pl011_write(uap->dmacr, uap, REG_DMACR);
843 static void pl011_dma_rx_callback(void *data);
845 static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
847 struct dma_chan *rxchan = uap->dmarx.chan;
848 struct pl011_dmarx_data *dmarx = &uap->dmarx;
849 struct dma_async_tx_descriptor *desc;
850 struct pl011_sgbuf *sgbuf;
855 /* Start the RX DMA job */
856 sgbuf = uap->dmarx.use_buf_b ?
857 &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
858 desc = dmaengine_prep_slave_sg(rxchan, &sgbuf->sg, 1,
860 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
862 * If the DMA engine is busy and cannot prepare a
863 * channel, no big deal, the driver will fall back
864 * to interrupt mode as a result of this error code.
867 uap->dmarx.running = false;
868 dmaengine_terminate_all(rxchan);
872 /* Some data to go along to the callback */
873 desc->callback = pl011_dma_rx_callback;
874 desc->callback_param = uap;
875 dmarx->cookie = dmaengine_submit(desc);
876 dma_async_issue_pending(rxchan);
878 uap->dmacr |= UART011_RXDMAE;
879 pl011_write(uap->dmacr, uap, REG_DMACR);
880 uap->dmarx.running = true;
882 uap->im &= ~UART011_RXIM;
883 pl011_write(uap->im, uap, REG_IMSC);
889 * This is called when either the DMA job is complete, or
890 * the FIFO timeout interrupt occurred. This must be called
891 * with the port spinlock uap->port.lock held.
893 static void pl011_dma_rx_chars(struct uart_amba_port *uap,
894 u32 pending, bool use_buf_b,
897 struct tty_port *port = &uap->port.state->port;
898 struct pl011_sgbuf *sgbuf = use_buf_b ?
899 &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
901 u32 fifotaken = 0; /* only used for vdbg() */
903 struct pl011_dmarx_data *dmarx = &uap->dmarx;
906 if (uap->dmarx.poll_rate) {
907 /* The data can be taken by polling */
908 dmataken = sgbuf->sg.length - dmarx->last_residue;
909 /* Recalculate the pending size */
910 if (pending >= dmataken)
914 /* Pick the remain data from the DMA */
918 * First take all chars in the DMA pipe, then look in the FIFO.
919 * Note that tty_insert_flip_buf() tries to take as many chars
922 dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken,
925 uap->port.icount.rx += dma_count;
926 if (dma_count < pending)
927 dev_warn(uap->port.dev,
928 "couldn't insert all characters (TTY is full?)\n");
931 /* Reset the last_residue for Rx DMA poll */
932 if (uap->dmarx.poll_rate)
933 dmarx->last_residue = sgbuf->sg.length;
936 * Only continue with trying to read the FIFO if all DMA chars have
939 if (dma_count == pending && readfifo) {
940 /* Clear any error flags */
941 pl011_write(UART011_OEIS | UART011_BEIS | UART011_PEIS |
942 UART011_FEIS, uap, REG_ICR);
945 * If we read all the DMA'd characters, and we had an
946 * incomplete buffer, that could be due to an rx error, or
947 * maybe we just timed out. Read any pending chars and check
950 * Error conditions will only occur in the FIFO, these will
951 * trigger an immediate interrupt and stop the DMA job, so we
952 * will always find the error in the FIFO, never in the DMA
955 fifotaken = pl011_fifo_to_tty(uap);
958 spin_unlock(&uap->port.lock);
959 dev_vdbg(uap->port.dev,
960 "Took %d chars from DMA buffer and %d chars from the FIFO\n",
961 dma_count, fifotaken);
962 tty_flip_buffer_push(port);
963 spin_lock(&uap->port.lock);
966 static void pl011_dma_rx_irq(struct uart_amba_port *uap)
968 struct pl011_dmarx_data *dmarx = &uap->dmarx;
969 struct dma_chan *rxchan = dmarx->chan;
970 struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
971 &dmarx->sgbuf_b : &dmarx->sgbuf_a;
973 struct dma_tx_state state;
974 enum dma_status dmastat;
977 * Pause the transfer so we can trust the current counter,
978 * do this before we pause the PL011 block, else we may
981 if (dmaengine_pause(rxchan))
982 dev_err(uap->port.dev, "unable to pause DMA transfer\n");
983 dmastat = rxchan->device->device_tx_status(rxchan,
984 dmarx->cookie, &state);
985 if (dmastat != DMA_PAUSED)
986 dev_err(uap->port.dev, "unable to pause DMA transfer\n");
988 /* Disable RX DMA - incoming data will wait in the FIFO */
989 uap->dmacr &= ~UART011_RXDMAE;
990 pl011_write(uap->dmacr, uap, REG_DMACR);
991 uap->dmarx.running = false;
993 pending = sgbuf->sg.length - state.residue;
994 BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
995 /* Then we terminate the transfer - we now know our residue */
996 dmaengine_terminate_all(rxchan);
999 * This will take the chars we have so far and insert
1000 * into the framework.
1002 pl011_dma_rx_chars(uap, pending, dmarx->use_buf_b, true);
1004 /* Switch buffer & re-trigger DMA job */
1005 dmarx->use_buf_b = !dmarx->use_buf_b;
1006 if (pl011_dma_rx_trigger_dma(uap)) {
1007 dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
1008 "fall back to interrupt mode\n");
1009 uap->im |= UART011_RXIM;
1010 pl011_write(uap->im, uap, REG_IMSC);
1014 static void pl011_dma_rx_callback(void *data)
1016 struct uart_amba_port *uap = data;
1017 struct pl011_dmarx_data *dmarx = &uap->dmarx;
1018 struct dma_chan *rxchan = dmarx->chan;
1019 bool lastbuf = dmarx->use_buf_b;
1020 struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
1021 &dmarx->sgbuf_b : &dmarx->sgbuf_a;
1023 struct dma_tx_state state;
1027 * This completion interrupt occurs typically when the
1028 * RX buffer is totally stuffed but no timeout has yet
1029 * occurred. When that happens, we just want the RX
1030 * routine to flush out the secondary DMA buffer while
1031 * we immediately trigger the next DMA job.
1033 spin_lock_irq(&uap->port.lock);
1035 * Rx data can be taken by the UART interrupts during
1036 * the DMA irq handler. So we check the residue here.
1038 rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
1039 pending = sgbuf->sg.length - state.residue;
1040 BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
1041 /* Then we terminate the transfer - we now know our residue */
1042 dmaengine_terminate_all(rxchan);
1044 uap->dmarx.running = false;
1045 dmarx->use_buf_b = !lastbuf;
1046 ret = pl011_dma_rx_trigger_dma(uap);
1048 pl011_dma_rx_chars(uap, pending, lastbuf, false);
1049 spin_unlock_irq(&uap->port.lock);
1051 * Do this check after we picked the DMA chars so we don't
1052 * get some IRQ immediately from RX.
1055 dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
1056 "fall back to interrupt mode\n");
1057 uap->im |= UART011_RXIM;
1058 pl011_write(uap->im, uap, REG_IMSC);
1063 * Stop accepting received characters, when we're shutting down or
1064 * suspending this port.
1065 * Locking: called with port lock held and IRQs disabled.
1067 static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
1069 if (!uap->using_rx_dma)
1072 /* FIXME. Just disable the DMA enable */
1073 uap->dmacr &= ~UART011_RXDMAE;
1074 pl011_write(uap->dmacr, uap, REG_DMACR);
1078 * Timer handler for Rx DMA polling.
1079 * Every polling, It checks the residue in the dma buffer and transfer
1080 * data to the tty. Also, last_residue is updated for the next polling.
1082 static void pl011_dma_rx_poll(unsigned long args)
1084 struct uart_amba_port *uap = (struct uart_amba_port *)args;
1085 struct tty_port *port = &uap->port.state->port;
1086 struct pl011_dmarx_data *dmarx = &uap->dmarx;
1087 struct dma_chan *rxchan = uap->dmarx.chan;
1088 unsigned long flags = 0;
1089 unsigned int dmataken = 0;
1090 unsigned int size = 0;
1091 struct pl011_sgbuf *sgbuf;
1093 struct dma_tx_state state;
1095 sgbuf = dmarx->use_buf_b ? &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
1096 rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
1097 if (likely(state.residue < dmarx->last_residue)) {
1098 dmataken = sgbuf->sg.length - dmarx->last_residue;
1099 size = dmarx->last_residue - state.residue;
1100 dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken,
1102 if (dma_count == size)
1103 dmarx->last_residue = state.residue;
1104 dmarx->last_jiffies = jiffies;
1106 tty_flip_buffer_push(port);
1109 * If no data is received in poll_timeout, the driver will fall back
1110 * to interrupt mode. We will retrigger DMA at the first interrupt.
1112 if (jiffies_to_msecs(jiffies - dmarx->last_jiffies)
1113 > uap->dmarx.poll_timeout) {
1115 spin_lock_irqsave(&uap->port.lock, flags);
1116 pl011_dma_rx_stop(uap);
1117 uap->im |= UART011_RXIM;
1118 pl011_write(uap->im, uap, REG_IMSC);
1119 spin_unlock_irqrestore(&uap->port.lock, flags);
1121 uap->dmarx.running = false;
1122 dmaengine_terminate_all(rxchan);
1123 del_timer(&uap->dmarx.timer);
1125 mod_timer(&uap->dmarx.timer,
1126 jiffies + msecs_to_jiffies(uap->dmarx.poll_rate));
1130 static void pl011_dma_startup(struct uart_amba_port *uap)
1134 if (!uap->dma_probed)
1135 pl011_dma_probe(uap);
1137 if (!uap->dmatx.chan)
1140 uap->dmatx.buf = kmalloc(PL011_DMA_BUFFER_SIZE, GFP_KERNEL | __GFP_DMA);
1141 if (!uap->dmatx.buf) {
1142 dev_err(uap->port.dev, "no memory for DMA TX buffer\n");
1143 uap->port.fifosize = uap->fifosize;
1147 sg_init_one(&uap->dmatx.sg, uap->dmatx.buf, PL011_DMA_BUFFER_SIZE);
1149 /* The DMA buffer is now the FIFO the TTY subsystem can use */
1150 uap->port.fifosize = PL011_DMA_BUFFER_SIZE;
1151 uap->using_tx_dma = true;
1153 if (!uap->dmarx.chan)
1156 /* Allocate and map DMA RX buffers */
1157 ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
1160 dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
1161 "RX buffer A", ret);
1165 ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_b,
1168 dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
1169 "RX buffer B", ret);
1170 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
1175 uap->using_rx_dma = true;
1178 /* Turn on DMA error (RX/TX will be enabled on demand) */
1179 uap->dmacr |= UART011_DMAONERR;
1180 pl011_write(uap->dmacr, uap, REG_DMACR);
1183 * ST Micro variants has some specific dma burst threshold
1184 * compensation. Set this to 16 bytes, so burst will only
1185 * be issued above/below 16 bytes.
1187 if (uap->vendor->dma_threshold)
1188 pl011_write(ST_UART011_DMAWM_RX_16 | ST_UART011_DMAWM_TX_16,
1191 if (uap->using_rx_dma) {
1192 if (pl011_dma_rx_trigger_dma(uap))
1193 dev_dbg(uap->port.dev, "could not trigger initial "
1194 "RX DMA job, fall back to interrupt mode\n");
1195 if (uap->dmarx.poll_rate) {
1196 init_timer(&(uap->dmarx.timer));
1197 uap->dmarx.timer.function = pl011_dma_rx_poll;
1198 uap->dmarx.timer.data = (unsigned long)uap;
1199 mod_timer(&uap->dmarx.timer,
1201 msecs_to_jiffies(uap->dmarx.poll_rate));
1202 uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE;
1203 uap->dmarx.last_jiffies = jiffies;
1208 static void pl011_dma_shutdown(struct uart_amba_port *uap)
1210 if (!(uap->using_tx_dma || uap->using_rx_dma))
1213 /* Disable RX and TX DMA */
1214 while (pl011_read(uap, REG_FR) & uap->vendor->fr_busy)
1217 spin_lock_irq(&uap->port.lock);
1218 uap->dmacr &= ~(UART011_DMAONERR | UART011_RXDMAE | UART011_TXDMAE);
1219 pl011_write(uap->dmacr, uap, REG_DMACR);
1220 spin_unlock_irq(&uap->port.lock);
1222 if (uap->using_tx_dma) {
1223 /* In theory, this should already be done by pl011_dma_flush_buffer */
1224 dmaengine_terminate_all(uap->dmatx.chan);
1225 if (uap->dmatx.queued) {
1226 dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
1228 uap->dmatx.queued = false;
1231 kfree(uap->dmatx.buf);
1232 uap->using_tx_dma = false;
1235 if (uap->using_rx_dma) {
1236 dmaengine_terminate_all(uap->dmarx.chan);
1237 /* Clean up the RX DMA */
1238 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a, DMA_FROM_DEVICE);
1239 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_b, DMA_FROM_DEVICE);
1240 if (uap->dmarx.poll_rate)
1241 del_timer_sync(&uap->dmarx.timer);
1242 uap->using_rx_dma = false;
1246 static inline bool pl011_dma_rx_available(struct uart_amba_port *uap)
1248 return uap->using_rx_dma;
1251 static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
1253 return uap->using_rx_dma && uap->dmarx.running;
1257 /* Blank functions if the DMA engine is not available */
1258 static inline void pl011_dma_probe(struct uart_amba_port *uap)
1262 static inline void pl011_dma_remove(struct uart_amba_port *uap)
1266 static inline void pl011_dma_startup(struct uart_amba_port *uap)
1270 static inline void pl011_dma_shutdown(struct uart_amba_port *uap)
1274 static inline bool pl011_dma_tx_irq(struct uart_amba_port *uap)
1279 static inline void pl011_dma_tx_stop(struct uart_amba_port *uap)
1283 static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
1288 static inline void pl011_dma_rx_irq(struct uart_amba_port *uap)
1292 static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
1296 static inline int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
1301 static inline bool pl011_dma_rx_available(struct uart_amba_port *uap)
1306 static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
1311 #define pl011_dma_flush_buffer NULL
1314 static void pl011_stop_tx(struct uart_port *port)
1316 struct uart_amba_port *uap =
1317 container_of(port, struct uart_amba_port, port);
1319 uap->im &= ~UART011_TXIM;
1320 pl011_write(uap->im, uap, REG_IMSC);
1321 pl011_dma_tx_stop(uap);
1324 static bool pl011_tx_chars(struct uart_amba_port *uap, bool from_irq);
1326 /* Start TX with programmed I/O only (no DMA) */
1327 static void pl011_start_tx_pio(struct uart_amba_port *uap)
1329 if (pl011_tx_chars(uap, false)) {
1330 uap->im |= UART011_TXIM;
1331 pl011_write(uap->im, uap, REG_IMSC);
1335 static void pl011_start_tx(struct uart_port *port)
1337 struct uart_amba_port *uap =
1338 container_of(port, struct uart_amba_port, port);
1340 if (!pl011_dma_tx_start(uap))
1341 pl011_start_tx_pio(uap);
1344 static void pl011_stop_rx(struct uart_port *port)
1346 struct uart_amba_port *uap =
1347 container_of(port, struct uart_amba_port, port);
1349 uap->im &= ~(UART011_RXIM|UART011_RTIM|UART011_FEIM|
1350 UART011_PEIM|UART011_BEIM|UART011_OEIM);
1351 pl011_write(uap->im, uap, REG_IMSC);
1353 pl011_dma_rx_stop(uap);
1356 static void pl011_enable_ms(struct uart_port *port)
1358 struct uart_amba_port *uap =
1359 container_of(port, struct uart_amba_port, port);
1361 uap->im |= UART011_RIMIM|UART011_CTSMIM|UART011_DCDMIM|UART011_DSRMIM;
1362 pl011_write(uap->im, uap, REG_IMSC);
1365 static void pl011_rx_chars(struct uart_amba_port *uap)
1366 __releases(&uap->port.lock)
1367 __acquires(&uap->port.lock)
1369 pl011_fifo_to_tty(uap);
1371 spin_unlock(&uap->port.lock);
1372 tty_flip_buffer_push(&uap->port.state->port);
1374 * If we were temporarily out of DMA mode for a while,
1375 * attempt to switch back to DMA mode again.
1377 if (pl011_dma_rx_available(uap)) {
1378 if (pl011_dma_rx_trigger_dma(uap)) {
1379 dev_dbg(uap->port.dev, "could not trigger RX DMA job "
1380 "fall back to interrupt mode again\n");
1381 uap->im |= UART011_RXIM;
1382 pl011_write(uap->im, uap, REG_IMSC);
1384 #ifdef CONFIG_DMA_ENGINE
1385 /* Start Rx DMA poll */
1386 if (uap->dmarx.poll_rate) {
1387 uap->dmarx.last_jiffies = jiffies;
1388 uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE;
1389 mod_timer(&uap->dmarx.timer,
1391 msecs_to_jiffies(uap->dmarx.poll_rate));
1396 spin_lock(&uap->port.lock);
1399 static bool pl011_tx_char(struct uart_amba_port *uap, unsigned char c,
1402 if (unlikely(!from_irq) &&
1403 pl011_read(uap, REG_FR) & UART01x_FR_TXFF)
1404 return false; /* unable to transmit character */
1406 pl011_write(c, uap, REG_DR);
1407 uap->port.icount.tx++;
1412 /* Returns true if tx interrupts have to be (kept) enabled */
1413 static bool pl011_tx_chars(struct uart_amba_port *uap, bool from_irq)
1415 struct circ_buf *xmit = &uap->port.state->xmit;
1416 int count = uap->fifosize >> 1;
1418 if (uap->port.x_char) {
1419 if (!pl011_tx_char(uap, uap->port.x_char, from_irq))
1421 uap->port.x_char = 0;
1424 if (uart_circ_empty(xmit) || uart_tx_stopped(&uap->port)) {
1425 pl011_stop_tx(&uap->port);
1429 /* If we are using DMA mode, try to send some characters. */
1430 if (pl011_dma_tx_irq(uap))
1434 if (likely(from_irq) && count-- == 0)
1437 if (!pl011_tx_char(uap, xmit->buf[xmit->tail], from_irq))
1440 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
1441 } while (!uart_circ_empty(xmit));
1443 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
1444 uart_write_wakeup(&uap->port);
1446 if (uart_circ_empty(xmit)) {
1447 pl011_stop_tx(&uap->port);
1453 static void pl011_modem_status(struct uart_amba_port *uap)
1455 unsigned int status, delta;
1457 status = pl011_read(uap, REG_FR) & UART01x_FR_MODEM_ANY;
1459 delta = status ^ uap->old_status;
1460 uap->old_status = status;
1465 if (delta & UART01x_FR_DCD)
1466 uart_handle_dcd_change(&uap->port, status & UART01x_FR_DCD);
1468 if (delta & uap->vendor->fr_dsr)
1469 uap->port.icount.dsr++;
1471 if (delta & uap->vendor->fr_cts)
1472 uart_handle_cts_change(&uap->port,
1473 status & uap->vendor->fr_cts);
1475 wake_up_interruptible(&uap->port.state->port.delta_msr_wait);
1478 static void check_apply_cts_event_workaround(struct uart_amba_port *uap)
1480 unsigned int dummy_read;
1482 if (!uap->vendor->cts_event_workaround)
1485 /* workaround to make sure that all bits are unlocked.. */
1486 pl011_write(0x00, uap, REG_ICR);
1489 * WA: introduce 26ns(1 uart clk) delay before W1C;
1490 * single apb access will incur 2 pclk(133.12Mhz) delay,
1491 * so add 2 dummy reads
1493 dummy_read = pl011_read(uap, REG_ICR);
1494 dummy_read = pl011_read(uap, REG_ICR);
1497 static irqreturn_t pl011_int(int irq, void *dev_id)
1499 struct uart_amba_port *uap = dev_id;
1500 unsigned long flags;
1501 unsigned int status, pass_counter = AMBA_ISR_PASS_LIMIT;
1505 spin_lock_irqsave(&uap->port.lock, flags);
1506 imsc = pl011_read(uap, REG_IMSC);
1507 status = pl011_read(uap, REG_RIS) & imsc;
1510 check_apply_cts_event_workaround(uap);
1512 pl011_write(status & ~(UART011_TXIS|UART011_RTIS|
1516 if (status & (UART011_RTIS|UART011_RXIS)) {
1517 if (pl011_dma_rx_running(uap))
1518 pl011_dma_rx_irq(uap);
1520 pl011_rx_chars(uap);
1522 if (status & (UART011_DSRMIS|UART011_DCDMIS|
1523 UART011_CTSMIS|UART011_RIMIS))
1524 pl011_modem_status(uap);
1525 if (status & UART011_TXIS)
1526 pl011_tx_chars(uap, true);
1528 if (pass_counter-- == 0)
1531 status = pl011_read(uap, REG_RIS) & imsc;
1532 } while (status != 0);
1536 spin_unlock_irqrestore(&uap->port.lock, flags);
1538 return IRQ_RETVAL(handled);
1541 static unsigned int pl011_tx_empty(struct uart_port *port)
1543 struct uart_amba_port *uap =
1544 container_of(port, struct uart_amba_port, port);
1546 /* Allow feature register bits to be inverted to work around errata */
1547 unsigned int status = pl011_read(uap, REG_FR) ^ uap->vendor->inv_fr;
1549 return status & (uap->vendor->fr_busy | UART01x_FR_TXFF) ?
1553 static unsigned int pl011_get_mctrl(struct uart_port *port)
1555 struct uart_amba_port *uap =
1556 container_of(port, struct uart_amba_port, port);
1557 unsigned int result = 0;
1558 unsigned int status = pl011_read(uap, REG_FR);
1560 #define TIOCMBIT(uartbit, tiocmbit) \
1561 if (status & uartbit) \
1564 TIOCMBIT(UART01x_FR_DCD, TIOCM_CAR);
1565 TIOCMBIT(uap->vendor->fr_dsr, TIOCM_DSR);
1566 TIOCMBIT(uap->vendor->fr_cts, TIOCM_CTS);
1567 TIOCMBIT(uap->vendor->fr_ri, TIOCM_RNG);
1572 static void pl011_set_mctrl(struct uart_port *port, unsigned int mctrl)
1574 struct uart_amba_port *uap =
1575 container_of(port, struct uart_amba_port, port);
1578 cr = pl011_read(uap, REG_CR);
1580 #define TIOCMBIT(tiocmbit, uartbit) \
1581 if (mctrl & tiocmbit) \
1586 TIOCMBIT(TIOCM_RTS, UART011_CR_RTS);
1587 TIOCMBIT(TIOCM_DTR, UART011_CR_DTR);
1588 TIOCMBIT(TIOCM_OUT1, UART011_CR_OUT1);
1589 TIOCMBIT(TIOCM_OUT2, UART011_CR_OUT2);
1590 TIOCMBIT(TIOCM_LOOP, UART011_CR_LBE);
1593 /* We need to disable auto-RTS if we want to turn RTS off */
1594 TIOCMBIT(TIOCM_RTS, UART011_CR_RTSEN);
1598 pl011_write(cr, uap, REG_CR);
1601 static void pl011_break_ctl(struct uart_port *port, int break_state)
1603 struct uart_amba_port *uap =
1604 container_of(port, struct uart_amba_port, port);
1605 unsigned long flags;
1608 spin_lock_irqsave(&uap->port.lock, flags);
1609 lcr_h = pl011_read(uap, REG_LCRH_TX);
1610 if (break_state == -1)
1611 lcr_h |= UART01x_LCRH_BRK;
1613 lcr_h &= ~UART01x_LCRH_BRK;
1614 pl011_write(lcr_h, uap, REG_LCRH_TX);
1615 spin_unlock_irqrestore(&uap->port.lock, flags);
1618 #ifdef CONFIG_CONSOLE_POLL
1620 static void pl011_quiesce_irqs(struct uart_port *port)
1622 struct uart_amba_port *uap =
1623 container_of(port, struct uart_amba_port, port);
1625 pl011_write(pl011_read(uap, REG_MIS), uap, REG_ICR);
1627 * There is no way to clear TXIM as this is "ready to transmit IRQ", so
1628 * we simply mask it. start_tx() will unmask it.
1630 * Note we can race with start_tx(), and if the race happens, the
1631 * polling user might get another interrupt just after we clear it.
1632 * But it should be OK and can happen even w/o the race, e.g.
1633 * controller immediately got some new data and raised the IRQ.
1635 * And whoever uses polling routines assumes that it manages the device
1636 * (including tx queue), so we're also fine with start_tx()'s caller
1639 pl011_write(pl011_read(uap, REG_IMSC) & ~UART011_TXIM, uap,
1643 static int pl011_get_poll_char(struct uart_port *port)
1645 struct uart_amba_port *uap =
1646 container_of(port, struct uart_amba_port, port);
1647 unsigned int status;
1650 * The caller might need IRQs lowered, e.g. if used with KDB NMI
1653 pl011_quiesce_irqs(port);
1655 status = pl011_read(uap, REG_FR);
1656 if (status & UART01x_FR_RXFE)
1657 return NO_POLL_CHAR;
1659 return pl011_read(uap, REG_DR);
1662 static void pl011_put_poll_char(struct uart_port *port,
1665 struct uart_amba_port *uap =
1666 container_of(port, struct uart_amba_port, port);
1668 while (pl011_read(uap, REG_FR) & UART01x_FR_TXFF)
1671 pl011_write(ch, uap, REG_DR);
1674 #endif /* CONFIG_CONSOLE_POLL */
1676 static int pl011_hwinit(struct uart_port *port)
1678 struct uart_amba_port *uap =
1679 container_of(port, struct uart_amba_port, port);
1682 /* Optionaly enable pins to be muxed in and configured */
1683 pinctrl_pm_select_default_state(port->dev);
1686 * Try to enable the clock producer.
1688 retval = clk_prepare_enable(uap->clk);
1692 uap->port.uartclk = clk_get_rate(uap->clk);
1694 /* Clear pending error and receive interrupts */
1695 pl011_write(UART011_OEIS | UART011_BEIS | UART011_PEIS |
1696 UART011_FEIS | UART011_RTIS | UART011_RXIS,
1700 * Save interrupts enable mask, and enable RX interrupts in case if
1701 * the interrupt is used for NMI entry.
1703 uap->im = pl011_read(uap, REG_IMSC);
1704 pl011_write(UART011_RTIM | UART011_RXIM, uap, REG_IMSC);
1706 if (dev_get_platdata(uap->port.dev)) {
1707 struct amba_pl011_data *plat;
1709 plat = dev_get_platdata(uap->port.dev);
1716 static bool pl011_split_lcrh(const struct uart_amba_port *uap)
1718 return pl011_reg_to_offset(uap, REG_LCRH_RX) !=
1719 pl011_reg_to_offset(uap, REG_LCRH_TX);
1722 static void pl011_write_lcr_h(struct uart_amba_port *uap, unsigned int lcr_h)
1724 pl011_write(lcr_h, uap, REG_LCRH_RX);
1725 if (pl011_split_lcrh(uap)) {
1728 * Wait 10 PCLKs before writing LCRH_TX register,
1729 * to get this delay write read only register 10 times
1731 for (i = 0; i < 10; ++i)
1732 pl011_write(0xff, uap, REG_MIS);
1733 pl011_write(lcr_h, uap, REG_LCRH_TX);
1737 static int pl011_allocate_irq(struct uart_amba_port *uap)
1739 pl011_write(uap->im, uap, REG_IMSC);
1741 return request_irq(uap->port.irq, pl011_int, 0, "uart-pl011", uap);
1745 * Enable interrupts, only timeouts when using DMA
1746 * if initial RX DMA job failed, start in interrupt mode
1749 static void pl011_enable_interrupts(struct uart_amba_port *uap)
1753 spin_lock_irq(&uap->port.lock);
1755 /* Clear out any spuriously appearing RX interrupts */
1756 pl011_write(UART011_RTIS | UART011_RXIS, uap, REG_ICR);
1759 * RXIS is asserted only when the RX FIFO transitions from below
1760 * to above the trigger threshold. If the RX FIFO is already
1761 * full to the threshold this can't happen and RXIS will now be
1762 * stuck off. Drain the RX FIFO explicitly to fix this:
1764 for (i = 0; i < uap->fifosize * 2; ++i) {
1765 if (pl011_read(uap, REG_FR) & UART01x_FR_RXFE)
1768 pl011_read(uap, REG_DR);
1771 uap->im = UART011_RTIM;
1772 if (!pl011_dma_rx_running(uap))
1773 uap->im |= UART011_RXIM;
1774 pl011_write(uap->im, uap, REG_IMSC);
1775 spin_unlock_irq(&uap->port.lock);
1778 static int pl011_startup(struct uart_port *port)
1780 struct uart_amba_port *uap =
1781 container_of(port, struct uart_amba_port, port);
1785 retval = pl011_hwinit(port);
1789 retval = pl011_allocate_irq(uap);
1793 pl011_write(uap->vendor->ifls, uap, REG_IFLS);
1795 spin_lock_irq(&uap->port.lock);
1797 /* restore RTS and DTR */
1798 cr = uap->old_cr & (UART011_CR_RTS | UART011_CR_DTR);
1799 cr |= UART01x_CR_UARTEN | UART011_CR_RXE | UART011_CR_TXE;
1800 pl011_write(cr, uap, REG_CR);
1802 spin_unlock_irq(&uap->port.lock);
1805 * initialise the old status of the modem signals
1807 uap->old_status = pl011_read(uap, REG_FR) & UART01x_FR_MODEM_ANY;
1810 pl011_dma_startup(uap);
1812 pl011_enable_interrupts(uap);
1817 clk_disable_unprepare(uap->clk);
1821 static int sbsa_uart_startup(struct uart_port *port)
1823 struct uart_amba_port *uap =
1824 container_of(port, struct uart_amba_port, port);
1827 retval = pl011_hwinit(port);
1831 retval = pl011_allocate_irq(uap);
1835 /* The SBSA UART does not support any modem status lines. */
1836 uap->old_status = 0;
1838 pl011_enable_interrupts(uap);
1843 static void pl011_shutdown_channel(struct uart_amba_port *uap,
1848 val = pl011_read(uap, lcrh);
1849 val &= ~(UART01x_LCRH_BRK | UART01x_LCRH_FEN);
1850 pl011_write(val, uap, lcrh);
1854 * disable the port. It should not disable RTS and DTR.
1855 * Also RTS and DTR state should be preserved to restore
1856 * it during startup().
1858 static void pl011_disable_uart(struct uart_amba_port *uap)
1862 uap->autorts = false;
1863 spin_lock_irq(&uap->port.lock);
1864 cr = pl011_read(uap, REG_CR);
1866 cr &= UART011_CR_RTS | UART011_CR_DTR;
1867 cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
1868 pl011_write(cr, uap, REG_CR);
1869 spin_unlock_irq(&uap->port.lock);
1872 * disable break condition and fifos
1874 pl011_shutdown_channel(uap, REG_LCRH_RX);
1875 if (pl011_split_lcrh(uap))
1876 pl011_shutdown_channel(uap, REG_LCRH_TX);
1879 static void pl011_disable_interrupts(struct uart_amba_port *uap)
1881 spin_lock_irq(&uap->port.lock);
1883 /* mask all interrupts and clear all pending ones */
1885 pl011_write(uap->im, uap, REG_IMSC);
1886 pl011_write(0xffff, uap, REG_ICR);
1888 spin_unlock_irq(&uap->port.lock);
1891 static void pl011_shutdown(struct uart_port *port)
1893 struct uart_amba_port *uap =
1894 container_of(port, struct uart_amba_port, port);
1896 pl011_disable_interrupts(uap);
1898 pl011_dma_shutdown(uap);
1900 free_irq(uap->port.irq, uap);
1902 pl011_disable_uart(uap);
1905 * Shut down the clock producer
1907 clk_disable_unprepare(uap->clk);
1908 /* Optionally let pins go into sleep states */
1909 pinctrl_pm_select_sleep_state(port->dev);
1911 if (dev_get_platdata(uap->port.dev)) {
1912 struct amba_pl011_data *plat;
1914 plat = dev_get_platdata(uap->port.dev);
1919 if (uap->port.ops->flush_buffer)
1920 uap->port.ops->flush_buffer(port);
1923 static void sbsa_uart_shutdown(struct uart_port *port)
1925 struct uart_amba_port *uap =
1926 container_of(port, struct uart_amba_port, port);
1928 pl011_disable_interrupts(uap);
1930 free_irq(uap->port.irq, uap);
1932 if (uap->port.ops->flush_buffer)
1933 uap->port.ops->flush_buffer(port);
1937 pl011_setup_status_masks(struct uart_port *port, struct ktermios *termios)
1939 port->read_status_mask = UART011_DR_OE | 255;
1940 if (termios->c_iflag & INPCK)
1941 port->read_status_mask |= UART011_DR_FE | UART011_DR_PE;
1942 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
1943 port->read_status_mask |= UART011_DR_BE;
1946 * Characters to ignore
1948 port->ignore_status_mask = 0;
1949 if (termios->c_iflag & IGNPAR)
1950 port->ignore_status_mask |= UART011_DR_FE | UART011_DR_PE;
1951 if (termios->c_iflag & IGNBRK) {
1952 port->ignore_status_mask |= UART011_DR_BE;
1954 * If we're ignoring parity and break indicators,
1955 * ignore overruns too (for real raw support).
1957 if (termios->c_iflag & IGNPAR)
1958 port->ignore_status_mask |= UART011_DR_OE;
1962 * Ignore all characters if CREAD is not set.
1964 if ((termios->c_cflag & CREAD) == 0)
1965 port->ignore_status_mask |= UART_DUMMY_DR_RX;
1969 pl011_set_termios(struct uart_port *port, struct ktermios *termios,
1970 struct ktermios *old)
1972 struct uart_amba_port *uap =
1973 container_of(port, struct uart_amba_port, port);
1974 unsigned int lcr_h, old_cr;
1975 unsigned long flags;
1976 unsigned int baud, quot, clkdiv;
1978 if (uap->vendor->oversampling)
1984 * Ask the core to calculate the divisor for us.
1986 baud = uart_get_baud_rate(port, termios, old, 0,
1987 port->uartclk / clkdiv);
1988 #ifdef CONFIG_DMA_ENGINE
1990 * Adjust RX DMA polling rate with baud rate if not specified.
1992 if (uap->dmarx.auto_poll_rate)
1993 uap->dmarx.poll_rate = DIV_ROUND_UP(10000000, baud);
1996 if (baud > port->uartclk/16)
1997 quot = DIV_ROUND_CLOSEST(port->uartclk * 8, baud);
1999 quot = DIV_ROUND_CLOSEST(port->uartclk * 4, baud);
2001 switch (termios->c_cflag & CSIZE) {
2003 lcr_h = UART01x_LCRH_WLEN_5;
2006 lcr_h = UART01x_LCRH_WLEN_6;
2009 lcr_h = UART01x_LCRH_WLEN_7;
2012 lcr_h = UART01x_LCRH_WLEN_8;
2015 if (termios->c_cflag & CSTOPB)
2016 lcr_h |= UART01x_LCRH_STP2;
2017 if (termios->c_cflag & PARENB) {
2018 lcr_h |= UART01x_LCRH_PEN;
2019 if (!(termios->c_cflag & PARODD))
2020 lcr_h |= UART01x_LCRH_EPS;
2021 if (termios->c_cflag & CMSPAR)
2022 lcr_h |= UART011_LCRH_SPS;
2024 if (uap->fifosize > 1)
2025 lcr_h |= UART01x_LCRH_FEN;
2027 spin_lock_irqsave(&port->lock, flags);
2030 * Update the per-port timeout.
2032 uart_update_timeout(port, termios->c_cflag, baud);
2034 pl011_setup_status_masks(port, termios);
2036 if (UART_ENABLE_MS(port, termios->c_cflag))
2037 pl011_enable_ms(port);
2039 /* first, disable everything */
2040 old_cr = pl011_read(uap, REG_CR);
2041 pl011_write(0, uap, REG_CR);
2043 if (termios->c_cflag & CRTSCTS) {
2044 if (old_cr & UART011_CR_RTS)
2045 old_cr |= UART011_CR_RTSEN;
2047 old_cr |= UART011_CR_CTSEN;
2048 uap->autorts = true;
2050 old_cr &= ~(UART011_CR_CTSEN | UART011_CR_RTSEN);
2051 uap->autorts = false;
2054 if (uap->vendor->oversampling) {
2055 if (baud > port->uartclk / 16)
2056 old_cr |= ST_UART011_CR_OVSFACT;
2058 old_cr &= ~ST_UART011_CR_OVSFACT;
2062 * Workaround for the ST Micro oversampling variants to
2063 * increase the bitrate slightly, by lowering the divisor,
2064 * to avoid delayed sampling of start bit at high speeds,
2065 * else we see data corruption.
2067 if (uap->vendor->oversampling) {
2068 if ((baud >= 3000000) && (baud < 3250000) && (quot > 1))
2070 else if ((baud > 3250000) && (quot > 2))
2074 pl011_write(quot & 0x3f, uap, REG_FBRD);
2075 pl011_write(quot >> 6, uap, REG_IBRD);
2078 * ----------v----------v----------v----------v-----
2079 * NOTE: REG_LCRH_TX and REG_LCRH_RX MUST BE WRITTEN AFTER
2080 * REG_FBRD & REG_IBRD.
2081 * ----------^----------^----------^----------^-----
2083 pl011_write_lcr_h(uap, lcr_h);
2084 pl011_write(old_cr, uap, REG_CR);
2086 spin_unlock_irqrestore(&port->lock, flags);
2090 sbsa_uart_set_termios(struct uart_port *port, struct ktermios *termios,
2091 struct ktermios *old)
2093 struct uart_amba_port *uap =
2094 container_of(port, struct uart_amba_port, port);
2095 unsigned long flags;
2097 tty_termios_encode_baud_rate(termios, uap->fixed_baud, uap->fixed_baud);
2099 /* The SBSA UART only supports 8n1 without hardware flow control. */
2100 termios->c_cflag &= ~(CSIZE | CSTOPB | PARENB | PARODD);
2101 termios->c_cflag &= ~(CMSPAR | CRTSCTS);
2102 termios->c_cflag |= CS8 | CLOCAL;
2104 spin_lock_irqsave(&port->lock, flags);
2105 uart_update_timeout(port, CS8, uap->fixed_baud);
2106 pl011_setup_status_masks(port, termios);
2107 spin_unlock_irqrestore(&port->lock, flags);
2110 static const char *pl011_type(struct uart_port *port)
2112 struct uart_amba_port *uap =
2113 container_of(port, struct uart_amba_port, port);
2114 return uap->port.type == PORT_AMBA ? uap->type : NULL;
2118 * Configure/autoconfigure the port.
2120 static void pl011_config_port(struct uart_port *port, int flags)
2122 if (flags & UART_CONFIG_TYPE)
2123 port->type = PORT_AMBA;
2127 * verify the new serial_struct (for TIOCSSERIAL).
2129 static int pl011_verify_port(struct uart_port *port, struct serial_struct *ser)
2132 if (ser->type != PORT_UNKNOWN && ser->type != PORT_AMBA)
2134 if (ser->irq < 0 || ser->irq >= nr_irqs)
2136 if (ser->baud_base < 9600)
2138 if (port->mapbase != (unsigned long) ser->iomem_base)
2143 static const struct uart_ops amba_pl011_pops = {
2144 .tx_empty = pl011_tx_empty,
2145 .set_mctrl = pl011_set_mctrl,
2146 .get_mctrl = pl011_get_mctrl,
2147 .stop_tx = pl011_stop_tx,
2148 .start_tx = pl011_start_tx,
2149 .stop_rx = pl011_stop_rx,
2150 .enable_ms = pl011_enable_ms,
2151 .break_ctl = pl011_break_ctl,
2152 .startup = pl011_startup,
2153 .shutdown = pl011_shutdown,
2154 .flush_buffer = pl011_dma_flush_buffer,
2155 .set_termios = pl011_set_termios,
2157 .config_port = pl011_config_port,
2158 .verify_port = pl011_verify_port,
2159 #ifdef CONFIG_CONSOLE_POLL
2160 .poll_init = pl011_hwinit,
2161 .poll_get_char = pl011_get_poll_char,
2162 .poll_put_char = pl011_put_poll_char,
2166 static void sbsa_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
2170 static unsigned int sbsa_uart_get_mctrl(struct uart_port *port)
2175 static const struct uart_ops sbsa_uart_pops = {
2176 .tx_empty = pl011_tx_empty,
2177 .set_mctrl = sbsa_uart_set_mctrl,
2178 .get_mctrl = sbsa_uart_get_mctrl,
2179 .stop_tx = pl011_stop_tx,
2180 .start_tx = pl011_start_tx,
2181 .stop_rx = pl011_stop_rx,
2182 .startup = sbsa_uart_startup,
2183 .shutdown = sbsa_uart_shutdown,
2184 .set_termios = sbsa_uart_set_termios,
2186 .config_port = pl011_config_port,
2187 .verify_port = pl011_verify_port,
2188 #ifdef CONFIG_CONSOLE_POLL
2189 .poll_init = pl011_hwinit,
2190 .poll_get_char = pl011_get_poll_char,
2191 .poll_put_char = pl011_put_poll_char,
2195 static struct uart_amba_port *amba_ports[UART_NR];
2197 #ifdef CONFIG_SERIAL_AMBA_PL011_CONSOLE
2199 static void pl011_console_putchar(struct uart_port *port, int ch)
2201 struct uart_amba_port *uap =
2202 container_of(port, struct uart_amba_port, port);
2204 while (pl011_read(uap, REG_FR) & UART01x_FR_TXFF)
2206 pl011_write(ch, uap, REG_DR);
2210 pl011_console_write(struct console *co, const char *s, unsigned int count)
2212 struct uart_amba_port *uap = amba_ports[co->index];
2213 unsigned int old_cr = 0, new_cr;
2214 unsigned long flags;
2217 clk_enable(uap->clk);
2219 local_irq_save(flags);
2220 if (uap->port.sysrq)
2222 else if (oops_in_progress)
2223 locked = spin_trylock(&uap->port.lock);
2225 spin_lock(&uap->port.lock);
2228 * First save the CR then disable the interrupts
2230 if (!uap->vendor->always_enabled) {
2231 old_cr = pl011_read(uap, REG_CR);
2232 new_cr = old_cr & ~UART011_CR_CTSEN;
2233 new_cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
2234 pl011_write(new_cr, uap, REG_CR);
2237 uart_console_write(&uap->port, s, count, pl011_console_putchar);
2240 * Finally, wait for transmitter to become empty and restore the
2241 * TCR. Allow feature register bits to be inverted to work around
2244 while ((pl011_read(uap, REG_FR) ^ uap->vendor->inv_fr)
2245 & uap->vendor->fr_busy)
2247 if (!uap->vendor->always_enabled)
2248 pl011_write(old_cr, uap, REG_CR);
2251 spin_unlock(&uap->port.lock);
2252 local_irq_restore(flags);
2254 clk_disable(uap->clk);
2257 static void pl011_console_get_options(struct uart_amba_port *uap, int *baud,
2258 int *parity, int *bits)
2260 if (pl011_read(uap, REG_CR) & UART01x_CR_UARTEN) {
2261 unsigned int lcr_h, ibrd, fbrd;
2263 lcr_h = pl011_read(uap, REG_LCRH_TX);
2266 if (lcr_h & UART01x_LCRH_PEN) {
2267 if (lcr_h & UART01x_LCRH_EPS)
2273 if ((lcr_h & 0x60) == UART01x_LCRH_WLEN_7)
2278 ibrd = pl011_read(uap, REG_IBRD);
2279 fbrd = pl011_read(uap, REG_FBRD);
2281 *baud = uap->port.uartclk * 4 / (64 * ibrd + fbrd);
2283 if (uap->vendor->oversampling) {
2284 if (pl011_read(uap, REG_CR)
2285 & ST_UART011_CR_OVSFACT)
2291 static int pl011_console_setup(struct console *co, char *options)
2293 struct uart_amba_port *uap;
2301 * Check whether an invalid uart number has been specified, and
2302 * if so, search for the first available port that does have
2305 if (co->index >= UART_NR)
2307 uap = amba_ports[co->index];
2311 /* Allow pins to be muxed in and configured */
2312 pinctrl_pm_select_default_state(uap->port.dev);
2314 ret = clk_prepare(uap->clk);
2318 if (dev_get_platdata(uap->port.dev)) {
2319 struct amba_pl011_data *plat;
2321 plat = dev_get_platdata(uap->port.dev);
2326 uap->port.uartclk = clk_get_rate(uap->clk);
2328 if (uap->vendor->fixed_options) {
2329 baud = uap->fixed_baud;
2332 uart_parse_options(options,
2333 &baud, &parity, &bits, &flow);
2335 pl011_console_get_options(uap, &baud, &parity, &bits);
2338 return uart_set_options(&uap->port, co, baud, parity, bits, flow);
2342 * pl011_console_match - non-standard console matching
2343 * @co: registering console
2344 * @name: name from console command line
2345 * @idx: index from console command line
2346 * @options: ptr to option string from console command line
2348 * Only attempts to match console command lines of the form:
2349 * console=pl011,mmio|mmio32,<addr>[,<options>]
2350 * console=pl011,0x<addr>[,<options>]
2351 * This form is used to register an initial earlycon boot console and
2352 * replace it with the amba_console at pl011 driver init.
2354 * Performs console setup for a match (as required by interface)
2355 * If no <options> are specified, then assume the h/w is already setup.
2357 * Returns 0 if console matches; otherwise non-zero to use default matching
2359 static int pl011_console_match(struct console *co, char *name, int idx,
2362 unsigned char iotype;
2363 resource_size_t addr;
2367 * Systems affected by the Qualcomm Technologies QDF2400 E44 erratum
2368 * have a distinct console name, so make sure we check for that.
2369 * The actual implementation of the erratum occurs in the probe
2372 if ((strcmp(name, "qdf2400_e44") != 0) && (strcmp(name, "pl011") != 0))
2375 if (uart_parse_earlycon(options, &iotype, &addr, &options))
2378 if (iotype != UPIO_MEM && iotype != UPIO_MEM32)
2381 /* try to match the port specified on the command line */
2382 for (i = 0; i < ARRAY_SIZE(amba_ports); i++) {
2383 struct uart_port *port;
2388 port = &amba_ports[i]->port;
2390 if (port->mapbase != addr)
2395 return pl011_console_setup(co, options);
2401 static struct uart_driver amba_reg;
2402 static struct console amba_console = {
2404 .write = pl011_console_write,
2405 .device = uart_console_device,
2406 .setup = pl011_console_setup,
2407 .match = pl011_console_match,
2408 .flags = CON_PRINTBUFFER | CON_ANYTIME,
2413 #define AMBA_CONSOLE (&amba_console)
2415 static void qdf2400_e44_putc(struct uart_port *port, int c)
2417 while (readl(port->membase + UART01x_FR) & UART01x_FR_TXFF)
2419 writel(c, port->membase + UART01x_DR);
2420 while (!(readl(port->membase + UART01x_FR) & UART011_FR_TXFE))
2424 static void qdf2400_e44_early_write(struct console *con, const char *s, unsigned n)
2426 struct earlycon_device *dev = con->data;
2428 uart_console_write(&dev->port, s, n, qdf2400_e44_putc);
2431 static void pl011_putc(struct uart_port *port, int c)
2433 while (readl(port->membase + UART01x_FR) & UART01x_FR_TXFF)
2435 if (port->iotype == UPIO_MEM32)
2436 writel(c, port->membase + UART01x_DR);
2438 writeb(c, port->membase + UART01x_DR);
2439 while (readl(port->membase + UART01x_FR) & UART01x_FR_BUSY)
2443 static void pl011_early_write(struct console *con, const char *s, unsigned n)
2445 struct earlycon_device *dev = con->data;
2447 uart_console_write(&dev->port, s, n, pl011_putc);
2451 * On non-ACPI systems, earlycon is enabled by specifying
2452 * "earlycon=pl011,<address>" on the kernel command line.
2454 * On ACPI ARM64 systems, an "early" console is enabled via the SPCR table,
2455 * by specifying only "earlycon" on the command line. Because it requires
2456 * SPCR, the console starts after ACPI is parsed, which is later than a
2457 * traditional early console.
2459 * To get the traditional early console that starts before ACPI is parsed,
2460 * specify the full "earlycon=pl011,<address>" option.
2462 static int __init pl011_early_console_setup(struct earlycon_device *device,
2465 if (!device->port.membase)
2468 device->con->write = pl011_early_write;
2472 OF_EARLYCON_DECLARE(pl011, "arm,pl011", pl011_early_console_setup);
2473 OF_EARLYCON_DECLARE(pl011, "arm,sbsa-uart", pl011_early_console_setup);
2476 * On Qualcomm Datacenter Technologies QDF2400 SOCs affected by
2477 * Erratum 44, traditional earlycon can be enabled by specifying
2478 * "earlycon=qdf2400_e44,<address>". Any options are ignored.
2480 * Alternatively, you can just specify "earlycon", and the early console
2481 * will be enabled with the information from the SPCR table. In this
2482 * case, the SPCR code will detect the need for the E44 work-around,
2483 * and set the console name to "qdf2400_e44".
2486 qdf2400_e44_early_console_setup(struct earlycon_device *device,
2489 if (!device->port.membase)
2492 device->con->write = qdf2400_e44_early_write;
2495 EARLYCON_DECLARE(qdf2400_e44, qdf2400_e44_early_console_setup);
2498 #define AMBA_CONSOLE NULL
2501 static struct uart_driver amba_reg = {
2502 .owner = THIS_MODULE,
2503 .driver_name = "ttyAMA",
2504 .dev_name = "ttyAMA",
2505 .major = SERIAL_AMBA_MAJOR,
2506 .minor = SERIAL_AMBA_MINOR,
2508 .cons = AMBA_CONSOLE,
2511 static int pl011_probe_dt_alias(int index, struct device *dev)
2513 struct device_node *np;
2514 static bool seen_dev_with_alias = false;
2515 static bool seen_dev_without_alias = false;
2518 if (!IS_ENABLED(CONFIG_OF))
2525 ret = of_alias_get_id(np, "serial");
2527 seen_dev_without_alias = true;
2530 seen_dev_with_alias = true;
2531 if (ret >= ARRAY_SIZE(amba_ports) || amba_ports[ret] != NULL) {
2532 dev_warn(dev, "requested serial port %d not available.\n", ret);
2537 if (seen_dev_with_alias && seen_dev_without_alias)
2538 dev_warn(dev, "aliased and non-aliased serial devices found in device tree. Serial port enumeration may be unpredictable.\n");
2543 /* unregisters the driver also if no more ports are left */
2544 static void pl011_unregister_port(struct uart_amba_port *uap)
2549 for (i = 0; i < ARRAY_SIZE(amba_ports); i++) {
2550 if (amba_ports[i] == uap)
2551 amba_ports[i] = NULL;
2552 else if (amba_ports[i])
2555 pl011_dma_remove(uap);
2557 uart_unregister_driver(&amba_reg);
2560 static int pl011_find_free_port(void)
2564 for (i = 0; i < ARRAY_SIZE(amba_ports); i++)
2565 if (amba_ports[i] == NULL)
2571 static int pl011_setup_port(struct device *dev, struct uart_amba_port *uap,
2572 struct resource *mmiobase, int index)
2576 base = devm_ioremap_resource(dev, mmiobase);
2578 return PTR_ERR(base);
2580 index = pl011_probe_dt_alias(index, dev);
2583 uap->port.dev = dev;
2584 uap->port.mapbase = mmiobase->start;
2585 uap->port.membase = base;
2586 uap->port.fifosize = uap->fifosize;
2587 uap->port.flags = UPF_BOOT_AUTOCONF;
2588 uap->port.line = index;
2589 spin_lock_init(&uap->port.lock);
2591 amba_ports[index] = uap;
2596 static int pl011_register_port(struct uart_amba_port *uap)
2600 /* Ensure interrupts from this UART are masked and cleared */
2601 pl011_write(0, uap, REG_IMSC);
2602 pl011_write(0xffff, uap, REG_ICR);
2604 if (!amba_reg.state) {
2605 ret = uart_register_driver(&amba_reg);
2607 dev_err(uap->port.dev,
2608 "Failed to register AMBA-PL011 driver\n");
2609 for (i = 0; i < ARRAY_SIZE(amba_ports); i++)
2610 if (amba_ports[i] == uap)
2611 amba_ports[i] = NULL;
2616 ret = uart_add_one_port(&amba_reg, &uap->port);
2618 pl011_unregister_port(uap);
2623 static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
2625 struct uart_amba_port *uap;
2626 struct vendor_data *vendor = id->data;
2629 portnr = pl011_find_free_port();
2633 uap = devm_kzalloc(&dev->dev, sizeof(struct uart_amba_port),
2638 uap->clk = devm_clk_get(&dev->dev, NULL);
2639 if (IS_ERR(uap->clk))
2640 return PTR_ERR(uap->clk);
2642 uap->reg_offset = vendor->reg_offset;
2643 uap->vendor = vendor;
2644 uap->fifosize = vendor->get_fifosize(dev);
2645 uap->port.iotype = vendor->access_32b ? UPIO_MEM32 : UPIO_MEM;
2646 uap->port.irq = dev->irq[0];
2647 uap->port.ops = &amba_pl011_pops;
2649 snprintf(uap->type, sizeof(uap->type), "PL011 rev%u", amba_rev(dev));
2651 ret = pl011_setup_port(&dev->dev, uap, &dev->res, portnr);
2655 amba_set_drvdata(dev, uap);
2657 return pl011_register_port(uap);
2660 static int pl011_remove(struct amba_device *dev)
2662 struct uart_amba_port *uap = amba_get_drvdata(dev);
2664 uart_remove_one_port(&amba_reg, &uap->port);
2665 pl011_unregister_port(uap);
2669 #ifdef CONFIG_PM_SLEEP
2670 static int pl011_suspend(struct device *dev)
2672 struct uart_amba_port *uap = dev_get_drvdata(dev);
2677 return uart_suspend_port(&amba_reg, &uap->port);
2680 static int pl011_resume(struct device *dev)
2682 struct uart_amba_port *uap = dev_get_drvdata(dev);
2687 return uart_resume_port(&amba_reg, &uap->port);
2691 static SIMPLE_DEV_PM_OPS(pl011_dev_pm_ops, pl011_suspend, pl011_resume);
2693 static int sbsa_uart_probe(struct platform_device *pdev)
2695 struct uart_amba_port *uap;
2701 * Check the mandatory baud rate parameter in the DT node early
2702 * so that we can easily exit with the error.
2704 if (pdev->dev.of_node) {
2705 struct device_node *np = pdev->dev.of_node;
2707 ret = of_property_read_u32(np, "current-speed", &baudrate);
2714 portnr = pl011_find_free_port();
2718 uap = devm_kzalloc(&pdev->dev, sizeof(struct uart_amba_port),
2723 ret = platform_get_irq(pdev, 0);
2725 if (ret != -EPROBE_DEFER)
2726 dev_err(&pdev->dev, "cannot obtain irq\n");
2729 uap->port.irq = ret;
2731 #ifdef CONFIG_ACPI_SPCR_TABLE
2732 if (qdf2400_e44_present) {
2733 dev_info(&pdev->dev, "working around QDF2400 SoC erratum 44\n");
2734 uap->vendor = &vendor_qdt_qdf2400_e44;
2737 uap->vendor = &vendor_sbsa;
2739 uap->reg_offset = uap->vendor->reg_offset;
2741 uap->port.iotype = uap->vendor->access_32b ? UPIO_MEM32 : UPIO_MEM;
2742 uap->port.ops = &sbsa_uart_pops;
2743 uap->fixed_baud = baudrate;
2745 snprintf(uap->type, sizeof(uap->type), "SBSA");
2747 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2749 ret = pl011_setup_port(&pdev->dev, uap, r, portnr);
2753 platform_set_drvdata(pdev, uap);
2755 return pl011_register_port(uap);
2758 static int sbsa_uart_remove(struct platform_device *pdev)
2760 struct uart_amba_port *uap = platform_get_drvdata(pdev);
2762 uart_remove_one_port(&amba_reg, &uap->port);
2763 pl011_unregister_port(uap);
2767 static const struct of_device_id sbsa_uart_of_match[] = {
2768 { .compatible = "arm,sbsa-uart", },
2771 MODULE_DEVICE_TABLE(of, sbsa_uart_of_match);
2773 static const struct acpi_device_id sbsa_uart_acpi_match[] = {
2778 MODULE_DEVICE_TABLE(acpi, sbsa_uart_acpi_match);
2780 static struct platform_driver arm_sbsa_uart_platform_driver = {
2781 .probe = sbsa_uart_probe,
2782 .remove = sbsa_uart_remove,
2784 .name = "sbsa-uart",
2785 .of_match_table = of_match_ptr(sbsa_uart_of_match),
2786 .acpi_match_table = ACPI_PTR(sbsa_uart_acpi_match),
2787 .suppress_bind_attrs = IS_BUILTIN(CONFIG_SERIAL_AMBA_PL011),
2791 static const struct amba_id pl011_ids[] = {
2795 .data = &vendor_arm,
2803 .id = AMBA_LINUX_ID(0x00, 0x1, 0xffe),
2805 .data = &vendor_zte,
2810 MODULE_DEVICE_TABLE(amba, pl011_ids);
2812 static struct amba_driver pl011_driver = {
2814 .name = "uart-pl011",
2815 .pm = &pl011_dev_pm_ops,
2816 .suppress_bind_attrs = IS_BUILTIN(CONFIG_SERIAL_AMBA_PL011),
2818 .id_table = pl011_ids,
2819 .probe = pl011_probe,
2820 .remove = pl011_remove,
2823 static int __init pl011_init(void)
2825 printk(KERN_INFO "Serial: AMBA PL011 UART driver\n");
2827 if (platform_driver_register(&arm_sbsa_uart_platform_driver))
2828 pr_warn("could not register SBSA UART platform driver\n");
2829 return amba_driver_register(&pl011_driver);
2832 static void __exit pl011_exit(void)
2834 platform_driver_unregister(&arm_sbsa_uart_platform_driver);
2835 amba_driver_unregister(&pl011_driver);
2839 * While this can be a module, if builtin it's most likely the console
2840 * So let's leave module_exit but move module_init to an earlier place
2842 arch_initcall(pl011_init);
2843 module_exit(pl011_exit);
2845 MODULE_AUTHOR("ARM Ltd/Deep Blue Solutions Ltd");
2846 MODULE_DESCRIPTION("ARM AMBA serial port driver");
2847 MODULE_LICENSE("GPL");