1 // SPDX-License-Identifier: GPL-2.0+
3 * Driver for AMBA serial ports
5 * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
7 * Copyright 1999 ARM Limited
8 * Copyright (C) 2000 Deep Blue Solutions Ltd.
9 * Copyright (C) 2010 ST-Ericsson SA
11 * This is a generic driver for ARM AMBA-type serial ports. They
12 * have a lot of 16550-like features, but are not register compatible.
13 * Note that although they do have CTS, DCD and DSR inputs, they do
14 * not have an RI input, nor do they have DTR or RTS outputs. If
15 * required, these have to be supplied via some other means (eg, GPIO)
16 * and hooked into this driver.
20 #if defined(CONFIG_SERIAL_AMBA_PL011_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
24 #include <linux/module.h>
25 #include <linux/ioport.h>
26 #include <linux/init.h>
27 #include <linux/console.h>
28 #include <linux/sysrq.h>
29 #include <linux/device.h>
30 #include <linux/tty.h>
31 #include <linux/tty_flip.h>
32 #include <linux/serial_core.h>
33 #include <linux/serial.h>
34 #include <linux/amba/bus.h>
35 #include <linux/amba/serial.h>
36 #include <linux/clk.h>
37 #include <linux/slab.h>
38 #include <linux/dmaengine.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/scatterlist.h>
41 #include <linux/delay.h>
42 #include <linux/types.h>
44 #include <linux/of_device.h>
45 #include <linux/pinctrl/consumer.h>
46 #include <linux/sizes.h>
48 #include <linux/acpi.h>
50 #include "amba-pl011.h"
54 #define SERIAL_AMBA_MAJOR 204
55 #define SERIAL_AMBA_MINOR 64
56 #define SERIAL_AMBA_NR UART_NR
58 #define AMBA_ISR_PASS_LIMIT 256
60 #define UART_DR_ERROR (UART011_DR_OE|UART011_DR_BE|UART011_DR_PE|UART011_DR_FE)
61 #define UART_DUMMY_DR_RX (1 << 16)
63 static u16 pl011_std_offsets[REG_ARRAY_SIZE] = {
64 [REG_DR] = UART01x_DR,
65 [REG_FR] = UART01x_FR,
66 [REG_LCRH_RX] = UART011_LCRH,
67 [REG_LCRH_TX] = UART011_LCRH,
68 [REG_IBRD] = UART011_IBRD,
69 [REG_FBRD] = UART011_FBRD,
70 [REG_CR] = UART011_CR,
71 [REG_IFLS] = UART011_IFLS,
72 [REG_IMSC] = UART011_IMSC,
73 [REG_RIS] = UART011_RIS,
74 [REG_MIS] = UART011_MIS,
75 [REG_ICR] = UART011_ICR,
76 [REG_DMACR] = UART011_DMACR,
79 /* There is by now at least one vendor with differing details, so handle it */
81 const u16 *reg_offset;
91 bool cts_event_workaround;
95 unsigned int (*get_fifosize)(struct amba_device *dev);
98 static unsigned int get_fifosize_arm(struct amba_device *dev)
100 return amba_rev(dev) < 3 ? 16 : 32;
103 static struct vendor_data vendor_arm = {
104 .reg_offset = pl011_std_offsets,
105 .ifls = UART011_IFLS_RX4_8|UART011_IFLS_TX4_8,
106 .fr_busy = UART01x_FR_BUSY,
107 .fr_dsr = UART01x_FR_DSR,
108 .fr_cts = UART01x_FR_CTS,
109 .fr_ri = UART011_FR_RI,
110 .oversampling = false,
111 .dma_threshold = false,
112 .cts_event_workaround = false,
113 .always_enabled = false,
114 .fixed_options = false,
115 .get_fifosize = get_fifosize_arm,
118 static const struct vendor_data vendor_sbsa = {
119 .reg_offset = pl011_std_offsets,
120 .fr_busy = UART01x_FR_BUSY,
121 .fr_dsr = UART01x_FR_DSR,
122 .fr_cts = UART01x_FR_CTS,
123 .fr_ri = UART011_FR_RI,
125 .oversampling = false,
126 .dma_threshold = false,
127 .cts_event_workaround = false,
128 .always_enabled = true,
129 .fixed_options = true,
132 #ifdef CONFIG_ACPI_SPCR_TABLE
133 static const struct vendor_data vendor_qdt_qdf2400_e44 = {
134 .reg_offset = pl011_std_offsets,
135 .fr_busy = UART011_FR_TXFE,
136 .fr_dsr = UART01x_FR_DSR,
137 .fr_cts = UART01x_FR_CTS,
138 .fr_ri = UART011_FR_RI,
139 .inv_fr = UART011_FR_TXFE,
141 .oversampling = false,
142 .dma_threshold = false,
143 .cts_event_workaround = false,
144 .always_enabled = true,
145 .fixed_options = true,
149 static u16 pl011_st_offsets[REG_ARRAY_SIZE] = {
150 [REG_DR] = UART01x_DR,
151 [REG_ST_DMAWM] = ST_UART011_DMAWM,
152 [REG_ST_TIMEOUT] = ST_UART011_TIMEOUT,
153 [REG_FR] = UART01x_FR,
154 [REG_LCRH_RX] = ST_UART011_LCRH_RX,
155 [REG_LCRH_TX] = ST_UART011_LCRH_TX,
156 [REG_IBRD] = UART011_IBRD,
157 [REG_FBRD] = UART011_FBRD,
158 [REG_CR] = UART011_CR,
159 [REG_IFLS] = UART011_IFLS,
160 [REG_IMSC] = UART011_IMSC,
161 [REG_RIS] = UART011_RIS,
162 [REG_MIS] = UART011_MIS,
163 [REG_ICR] = UART011_ICR,
164 [REG_DMACR] = UART011_DMACR,
165 [REG_ST_XFCR] = ST_UART011_XFCR,
166 [REG_ST_XON1] = ST_UART011_XON1,
167 [REG_ST_XON2] = ST_UART011_XON2,
168 [REG_ST_XOFF1] = ST_UART011_XOFF1,
169 [REG_ST_XOFF2] = ST_UART011_XOFF2,
170 [REG_ST_ITCR] = ST_UART011_ITCR,
171 [REG_ST_ITIP] = ST_UART011_ITIP,
172 [REG_ST_ABCR] = ST_UART011_ABCR,
173 [REG_ST_ABIMSC] = ST_UART011_ABIMSC,
176 static unsigned int get_fifosize_st(struct amba_device *dev)
181 static struct vendor_data vendor_st = {
182 .reg_offset = pl011_st_offsets,
183 .ifls = UART011_IFLS_RX_HALF|UART011_IFLS_TX_HALF,
184 .fr_busy = UART01x_FR_BUSY,
185 .fr_dsr = UART01x_FR_DSR,
186 .fr_cts = UART01x_FR_CTS,
187 .fr_ri = UART011_FR_RI,
188 .oversampling = true,
189 .dma_threshold = true,
190 .cts_event_workaround = true,
191 .always_enabled = false,
192 .fixed_options = false,
193 .get_fifosize = get_fifosize_st,
196 static const u16 pl011_zte_offsets[REG_ARRAY_SIZE] = {
197 [REG_DR] = ZX_UART011_DR,
198 [REG_FR] = ZX_UART011_FR,
199 [REG_LCRH_RX] = ZX_UART011_LCRH,
200 [REG_LCRH_TX] = ZX_UART011_LCRH,
201 [REG_IBRD] = ZX_UART011_IBRD,
202 [REG_FBRD] = ZX_UART011_FBRD,
203 [REG_CR] = ZX_UART011_CR,
204 [REG_IFLS] = ZX_UART011_IFLS,
205 [REG_IMSC] = ZX_UART011_IMSC,
206 [REG_RIS] = ZX_UART011_RIS,
207 [REG_MIS] = ZX_UART011_MIS,
208 [REG_ICR] = ZX_UART011_ICR,
209 [REG_DMACR] = ZX_UART011_DMACR,
212 static unsigned int get_fifosize_zte(struct amba_device *dev)
217 static struct vendor_data vendor_zte = {
218 .reg_offset = pl011_zte_offsets,
220 .ifls = UART011_IFLS_RX4_8|UART011_IFLS_TX4_8,
221 .fr_busy = ZX_UART01x_FR_BUSY,
222 .fr_dsr = ZX_UART01x_FR_DSR,
223 .fr_cts = ZX_UART01x_FR_CTS,
224 .fr_ri = ZX_UART011_FR_RI,
225 .get_fifosize = get_fifosize_zte,
228 /* Deals with DMA transactions */
231 struct scatterlist sg;
235 struct pl011_dmarx_data {
236 struct dma_chan *chan;
237 struct completion complete;
239 struct pl011_sgbuf sgbuf_a;
240 struct pl011_sgbuf sgbuf_b;
243 struct timer_list timer;
244 unsigned int last_residue;
245 unsigned long last_jiffies;
247 unsigned int poll_rate;
248 unsigned int poll_timeout;
251 struct pl011_dmatx_data {
252 struct dma_chan *chan;
253 struct scatterlist sg;
259 * We wrap our port structure around the generic uart_port.
261 struct uart_amba_port {
262 struct uart_port port;
263 const u16 *reg_offset;
265 const struct vendor_data *vendor;
266 unsigned int dmacr; /* dma control reg */
267 unsigned int im; /* interrupt mask */
268 unsigned int old_status;
269 unsigned int fifosize; /* vendor-specific */
270 unsigned int old_cr; /* state during shutdown */
271 unsigned int fixed_baud; /* vendor-set fixed baud rate */
273 #ifdef CONFIG_DMA_ENGINE
277 struct pl011_dmarx_data dmarx;
278 struct pl011_dmatx_data dmatx;
283 static unsigned int pl011_reg_to_offset(const struct uart_amba_port *uap,
286 return uap->reg_offset[reg];
289 static unsigned int pl011_read(const struct uart_amba_port *uap,
292 void __iomem *addr = uap->port.membase + pl011_reg_to_offset(uap, reg);
294 return (uap->port.iotype == UPIO_MEM32) ?
295 readl_relaxed(addr) : readw_relaxed(addr);
298 static void pl011_write(unsigned int val, const struct uart_amba_port *uap,
301 void __iomem *addr = uap->port.membase + pl011_reg_to_offset(uap, reg);
303 if (uap->port.iotype == UPIO_MEM32)
304 writel_relaxed(val, addr);
306 writew_relaxed(val, addr);
310 * Reads up to 256 characters from the FIFO or until it's empty and
311 * inserts them into the TTY layer. Returns the number of characters
312 * read from the FIFO.
314 static int pl011_fifo_to_tty(struct uart_amba_port *uap)
316 unsigned int ch, flag, fifotaken;
320 for (fifotaken = 0; fifotaken != 256; fifotaken++) {
321 status = pl011_read(uap, REG_FR);
322 if (status & UART01x_FR_RXFE)
325 /* Take chars from the FIFO and update status */
326 ch = pl011_read(uap, REG_DR) | UART_DUMMY_DR_RX;
328 uap->port.icount.rx++;
330 if (unlikely(ch & UART_DR_ERROR)) {
331 if (ch & UART011_DR_BE) {
332 ch &= ~(UART011_DR_FE | UART011_DR_PE);
333 uap->port.icount.brk++;
334 if (uart_handle_break(&uap->port))
336 } else if (ch & UART011_DR_PE)
337 uap->port.icount.parity++;
338 else if (ch & UART011_DR_FE)
339 uap->port.icount.frame++;
340 if (ch & UART011_DR_OE)
341 uap->port.icount.overrun++;
343 ch &= uap->port.read_status_mask;
345 if (ch & UART011_DR_BE)
347 else if (ch & UART011_DR_PE)
349 else if (ch & UART011_DR_FE)
353 spin_unlock(&uap->port.lock);
354 sysrq = uart_handle_sysrq_char(&uap->port, ch & 255);
355 spin_lock(&uap->port.lock);
358 uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag);
366 * All the DMA operation mode stuff goes inside this ifdef.
367 * This assumes that you have a generic DMA device interface,
368 * no custom DMA interfaces are supported.
370 #ifdef CONFIG_DMA_ENGINE
372 #define PL011_DMA_BUFFER_SIZE PAGE_SIZE
374 static int pl011_sgbuf_init(struct dma_chan *chan, struct pl011_sgbuf *sg,
375 enum dma_data_direction dir)
379 sg->buf = dma_alloc_coherent(chan->device->dev,
380 PL011_DMA_BUFFER_SIZE, &dma_addr, GFP_KERNEL);
384 sg_init_table(&sg->sg, 1);
385 sg_set_page(&sg->sg, phys_to_page(dma_addr),
386 PL011_DMA_BUFFER_SIZE, offset_in_page(dma_addr));
387 sg_dma_address(&sg->sg) = dma_addr;
388 sg_dma_len(&sg->sg) = PL011_DMA_BUFFER_SIZE;
393 static void pl011_sgbuf_free(struct dma_chan *chan, struct pl011_sgbuf *sg,
394 enum dma_data_direction dir)
397 dma_free_coherent(chan->device->dev,
398 PL011_DMA_BUFFER_SIZE, sg->buf,
399 sg_dma_address(&sg->sg));
403 static void pl011_dma_probe(struct uart_amba_port *uap)
405 /* DMA is the sole user of the platform data right now */
406 struct amba_pl011_data *plat = dev_get_platdata(uap->port.dev);
407 struct device *dev = uap->port.dev;
408 struct dma_slave_config tx_conf = {
409 .dst_addr = uap->port.mapbase +
410 pl011_reg_to_offset(uap, REG_DR),
411 .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
412 .direction = DMA_MEM_TO_DEV,
413 .dst_maxburst = uap->fifosize >> 1,
416 struct dma_chan *chan;
419 uap->dma_probed = true;
420 chan = dma_request_slave_channel_reason(dev, "tx");
422 if (PTR_ERR(chan) == -EPROBE_DEFER) {
423 uap->dma_probed = false;
427 /* We need platform data */
428 if (!plat || !plat->dma_filter) {
429 dev_info(uap->port.dev, "no DMA platform data\n");
433 /* Try to acquire a generic DMA engine slave TX channel */
435 dma_cap_set(DMA_SLAVE, mask);
437 chan = dma_request_channel(mask, plat->dma_filter,
440 dev_err(uap->port.dev, "no TX DMA channel!\n");
445 dmaengine_slave_config(chan, &tx_conf);
446 uap->dmatx.chan = chan;
448 dev_info(uap->port.dev, "DMA channel TX %s\n",
449 dma_chan_name(uap->dmatx.chan));
451 /* Optionally make use of an RX channel as well */
452 chan = dma_request_slave_channel(dev, "rx");
454 if (!chan && plat && plat->dma_rx_param) {
455 chan = dma_request_channel(mask, plat->dma_filter, plat->dma_rx_param);
458 dev_err(uap->port.dev, "no RX DMA channel!\n");
464 struct dma_slave_config rx_conf = {
465 .src_addr = uap->port.mapbase +
466 pl011_reg_to_offset(uap, REG_DR),
467 .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
468 .direction = DMA_DEV_TO_MEM,
469 .src_maxburst = uap->fifosize >> 2,
472 struct dma_slave_caps caps;
475 * Some DMA controllers provide information on their capabilities.
476 * If the controller does, check for suitable residue processing
477 * otherwise assime all is well.
479 if (0 == dma_get_slave_caps(chan, &caps)) {
480 if (caps.residue_granularity ==
481 DMA_RESIDUE_GRANULARITY_DESCRIPTOR) {
482 dma_release_channel(chan);
483 dev_info(uap->port.dev,
484 "RX DMA disabled - no residue processing\n");
488 dmaengine_slave_config(chan, &rx_conf);
489 uap->dmarx.chan = chan;
491 uap->dmarx.auto_poll_rate = false;
492 if (plat && plat->dma_rx_poll_enable) {
493 /* Set poll rate if specified. */
494 if (plat->dma_rx_poll_rate) {
495 uap->dmarx.auto_poll_rate = false;
496 uap->dmarx.poll_rate = plat->dma_rx_poll_rate;
499 * 100 ms defaults to poll rate if not
500 * specified. This will be adjusted with
501 * the baud rate at set_termios.
503 uap->dmarx.auto_poll_rate = true;
504 uap->dmarx.poll_rate = 100;
506 /* 3 secs defaults poll_timeout if not specified. */
507 if (plat->dma_rx_poll_timeout)
508 uap->dmarx.poll_timeout =
509 plat->dma_rx_poll_timeout;
511 uap->dmarx.poll_timeout = 3000;
512 } else if (!plat && dev->of_node) {
513 uap->dmarx.auto_poll_rate = of_property_read_bool(
514 dev->of_node, "auto-poll");
515 if (uap->dmarx.auto_poll_rate) {
518 if (0 == of_property_read_u32(dev->of_node,
520 uap->dmarx.poll_rate = x;
522 uap->dmarx.poll_rate = 100;
523 if (0 == of_property_read_u32(dev->of_node,
524 "poll-timeout-ms", &x))
525 uap->dmarx.poll_timeout = x;
527 uap->dmarx.poll_timeout = 3000;
530 dev_info(uap->port.dev, "DMA channel RX %s\n",
531 dma_chan_name(uap->dmarx.chan));
535 static void pl011_dma_remove(struct uart_amba_port *uap)
538 dma_release_channel(uap->dmatx.chan);
540 dma_release_channel(uap->dmarx.chan);
543 /* Forward declare these for the refill routine */
544 static int pl011_dma_tx_refill(struct uart_amba_port *uap);
545 static void pl011_start_tx_pio(struct uart_amba_port *uap);
548 * The current DMA TX buffer has been sent.
549 * Try to queue up another DMA buffer.
551 static void pl011_dma_tx_callback(void *data)
553 struct uart_amba_port *uap = data;
554 struct pl011_dmatx_data *dmatx = &uap->dmatx;
558 spin_lock_irqsave(&uap->port.lock, flags);
559 if (uap->dmatx.queued)
560 dma_unmap_sg(dmatx->chan->device->dev, &dmatx->sg, 1,
564 uap->dmacr = dmacr & ~UART011_TXDMAE;
565 pl011_write(uap->dmacr, uap, REG_DMACR);
568 * If TX DMA was disabled, it means that we've stopped the DMA for
569 * some reason (eg, XOFF received, or we want to send an X-char.)
571 * Note: we need to be careful here of a potential race between DMA
572 * and the rest of the driver - if the driver disables TX DMA while
573 * a TX buffer completing, we must update the tx queued status to
574 * get further refills (hence we check dmacr).
576 if (!(dmacr & UART011_TXDMAE) || uart_tx_stopped(&uap->port) ||
577 uart_circ_empty(&uap->port.state->xmit)) {
578 uap->dmatx.queued = false;
579 spin_unlock_irqrestore(&uap->port.lock, flags);
583 if (pl011_dma_tx_refill(uap) <= 0)
585 * We didn't queue a DMA buffer for some reason, but we
586 * have data pending to be sent. Re-enable the TX IRQ.
588 pl011_start_tx_pio(uap);
590 spin_unlock_irqrestore(&uap->port.lock, flags);
594 * Try to refill the TX DMA buffer.
595 * Locking: called with port lock held and IRQs disabled.
597 * 1 if we queued up a TX DMA buffer.
598 * 0 if we didn't want to handle this by DMA
601 static int pl011_dma_tx_refill(struct uart_amba_port *uap)
603 struct pl011_dmatx_data *dmatx = &uap->dmatx;
604 struct dma_chan *chan = dmatx->chan;
605 struct dma_device *dma_dev = chan->device;
606 struct dma_async_tx_descriptor *desc;
607 struct circ_buf *xmit = &uap->port.state->xmit;
611 * Try to avoid the overhead involved in using DMA if the
612 * transaction fits in the first half of the FIFO, by using
613 * the standard interrupt handling. This ensures that we
614 * issue a uart_write_wakeup() at the appropriate time.
616 count = uart_circ_chars_pending(xmit);
617 if (count < (uap->fifosize >> 1)) {
618 uap->dmatx.queued = false;
623 * Bodge: don't send the last character by DMA, as this
624 * will prevent XON from notifying us to restart DMA.
628 /* Else proceed to copy the TX chars to the DMA buffer and fire DMA */
629 if (count > PL011_DMA_BUFFER_SIZE)
630 count = PL011_DMA_BUFFER_SIZE;
632 if (xmit->tail < xmit->head)
633 memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], count);
635 size_t first = UART_XMIT_SIZE - xmit->tail;
640 second = count - first;
642 memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], first);
644 memcpy(&dmatx->buf[first], &xmit->buf[0], second);
647 dmatx->sg.length = count;
649 if (dma_map_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE) != 1) {
650 uap->dmatx.queued = false;
651 dev_dbg(uap->port.dev, "unable to map TX DMA\n");
655 desc = dmaengine_prep_slave_sg(chan, &dmatx->sg, 1, DMA_MEM_TO_DEV,
656 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
658 dma_unmap_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE);
659 uap->dmatx.queued = false;
661 * If DMA cannot be used right now, we complete this
662 * transaction via IRQ and let the TTY layer retry.
664 dev_dbg(uap->port.dev, "TX DMA busy\n");
668 /* Some data to go along to the callback */
669 desc->callback = pl011_dma_tx_callback;
670 desc->callback_param = uap;
672 /* All errors should happen at prepare time */
673 dmaengine_submit(desc);
675 /* Fire the DMA transaction */
676 dma_dev->device_issue_pending(chan);
678 uap->dmacr |= UART011_TXDMAE;
679 pl011_write(uap->dmacr, uap, REG_DMACR);
680 uap->dmatx.queued = true;
683 * Now we know that DMA will fire, so advance the ring buffer
684 * with the stuff we just dispatched.
686 xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
687 uap->port.icount.tx += count;
689 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
690 uart_write_wakeup(&uap->port);
696 * We received a transmit interrupt without a pending X-char but with
697 * pending characters.
698 * Locking: called with port lock held and IRQs disabled.
700 * false if we want to use PIO to transmit
701 * true if we queued a DMA buffer
703 static bool pl011_dma_tx_irq(struct uart_amba_port *uap)
705 if (!uap->using_tx_dma)
709 * If we already have a TX buffer queued, but received a
710 * TX interrupt, it will be because we've just sent an X-char.
711 * Ensure the TX DMA is enabled and the TX IRQ is disabled.
713 if (uap->dmatx.queued) {
714 uap->dmacr |= UART011_TXDMAE;
715 pl011_write(uap->dmacr, uap, REG_DMACR);
716 uap->im &= ~UART011_TXIM;
717 pl011_write(uap->im, uap, REG_IMSC);
722 * We don't have a TX buffer queued, so try to queue one.
723 * If we successfully queued a buffer, mask the TX IRQ.
725 if (pl011_dma_tx_refill(uap) > 0) {
726 uap->im &= ~UART011_TXIM;
727 pl011_write(uap->im, uap, REG_IMSC);
734 * Stop the DMA transmit (eg, due to received XOFF).
735 * Locking: called with port lock held and IRQs disabled.
737 static inline void pl011_dma_tx_stop(struct uart_amba_port *uap)
739 if (uap->dmatx.queued) {
740 uap->dmacr &= ~UART011_TXDMAE;
741 pl011_write(uap->dmacr, uap, REG_DMACR);
746 * Try to start a DMA transmit, or in the case of an XON/OFF
747 * character queued for send, try to get that character out ASAP.
748 * Locking: called with port lock held and IRQs disabled.
750 * false if we want the TX IRQ to be enabled
751 * true if we have a buffer queued
753 static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
757 if (!uap->using_tx_dma)
760 if (!uap->port.x_char) {
761 /* no X-char, try to push chars out in DMA mode */
764 if (!uap->dmatx.queued) {
765 if (pl011_dma_tx_refill(uap) > 0) {
766 uap->im &= ~UART011_TXIM;
767 pl011_write(uap->im, uap, REG_IMSC);
770 } else if (!(uap->dmacr & UART011_TXDMAE)) {
771 uap->dmacr |= UART011_TXDMAE;
772 pl011_write(uap->dmacr, uap, REG_DMACR);
778 * We have an X-char to send. Disable DMA to prevent it loading
779 * the TX fifo, and then see if we can stuff it into the FIFO.
782 uap->dmacr &= ~UART011_TXDMAE;
783 pl011_write(uap->dmacr, uap, REG_DMACR);
785 if (pl011_read(uap, REG_FR) & UART01x_FR_TXFF) {
787 * No space in the FIFO, so enable the transmit interrupt
788 * so we know when there is space. Note that once we've
789 * loaded the character, we should just re-enable DMA.
794 pl011_write(uap->port.x_char, uap, REG_DR);
795 uap->port.icount.tx++;
796 uap->port.x_char = 0;
798 /* Success - restore the DMA state */
800 pl011_write(dmacr, uap, REG_DMACR);
806 * Flush the transmit buffer.
807 * Locking: called with port lock held and IRQs disabled.
809 static void pl011_dma_flush_buffer(struct uart_port *port)
810 __releases(&uap->port.lock)
811 __acquires(&uap->port.lock)
813 struct uart_amba_port *uap =
814 container_of(port, struct uart_amba_port, port);
816 if (!uap->using_tx_dma)
819 dmaengine_terminate_async(uap->dmatx.chan);
821 if (uap->dmatx.queued) {
822 dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
824 uap->dmatx.queued = false;
825 uap->dmacr &= ~UART011_TXDMAE;
826 pl011_write(uap->dmacr, uap, REG_DMACR);
830 static void pl011_dma_rx_callback(void *data);
832 static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
834 struct dma_chan *rxchan = uap->dmarx.chan;
835 struct pl011_dmarx_data *dmarx = &uap->dmarx;
836 struct dma_async_tx_descriptor *desc;
837 struct pl011_sgbuf *sgbuf;
842 /* Start the RX DMA job */
843 sgbuf = uap->dmarx.use_buf_b ?
844 &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
845 desc = dmaengine_prep_slave_sg(rxchan, &sgbuf->sg, 1,
847 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
849 * If the DMA engine is busy and cannot prepare a
850 * channel, no big deal, the driver will fall back
851 * to interrupt mode as a result of this error code.
854 uap->dmarx.running = false;
855 dmaengine_terminate_all(rxchan);
859 /* Some data to go along to the callback */
860 desc->callback = pl011_dma_rx_callback;
861 desc->callback_param = uap;
862 dmarx->cookie = dmaengine_submit(desc);
863 dma_async_issue_pending(rxchan);
865 uap->dmacr |= UART011_RXDMAE;
866 pl011_write(uap->dmacr, uap, REG_DMACR);
867 uap->dmarx.running = true;
869 uap->im &= ~UART011_RXIM;
870 pl011_write(uap->im, uap, REG_IMSC);
876 * This is called when either the DMA job is complete, or
877 * the FIFO timeout interrupt occurred. This must be called
878 * with the port spinlock uap->port.lock held.
880 static void pl011_dma_rx_chars(struct uart_amba_port *uap,
881 u32 pending, bool use_buf_b,
884 struct tty_port *port = &uap->port.state->port;
885 struct pl011_sgbuf *sgbuf = use_buf_b ?
886 &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
888 u32 fifotaken = 0; /* only used for vdbg() */
890 struct pl011_dmarx_data *dmarx = &uap->dmarx;
893 if (uap->dmarx.poll_rate) {
894 /* The data can be taken by polling */
895 dmataken = sgbuf->sg.length - dmarx->last_residue;
896 /* Recalculate the pending size */
897 if (pending >= dmataken)
901 /* Pick the remain data from the DMA */
905 * First take all chars in the DMA pipe, then look in the FIFO.
906 * Note that tty_insert_flip_buf() tries to take as many chars
909 dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken,
912 uap->port.icount.rx += dma_count;
913 if (dma_count < pending)
914 dev_warn(uap->port.dev,
915 "couldn't insert all characters (TTY is full?)\n");
918 /* Reset the last_residue for Rx DMA poll */
919 if (uap->dmarx.poll_rate)
920 dmarx->last_residue = sgbuf->sg.length;
923 * Only continue with trying to read the FIFO if all DMA chars have
926 if (dma_count == pending && readfifo) {
927 /* Clear any error flags */
928 pl011_write(UART011_OEIS | UART011_BEIS | UART011_PEIS |
929 UART011_FEIS, uap, REG_ICR);
932 * If we read all the DMA'd characters, and we had an
933 * incomplete buffer, that could be due to an rx error, or
934 * maybe we just timed out. Read any pending chars and check
937 * Error conditions will only occur in the FIFO, these will
938 * trigger an immediate interrupt and stop the DMA job, so we
939 * will always find the error in the FIFO, never in the DMA
942 fifotaken = pl011_fifo_to_tty(uap);
945 spin_unlock(&uap->port.lock);
946 dev_vdbg(uap->port.dev,
947 "Took %d chars from DMA buffer and %d chars from the FIFO\n",
948 dma_count, fifotaken);
949 tty_flip_buffer_push(port);
950 spin_lock(&uap->port.lock);
953 static void pl011_dma_rx_irq(struct uart_amba_port *uap)
955 struct pl011_dmarx_data *dmarx = &uap->dmarx;
956 struct dma_chan *rxchan = dmarx->chan;
957 struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
958 &dmarx->sgbuf_b : &dmarx->sgbuf_a;
960 struct dma_tx_state state;
961 enum dma_status dmastat;
964 * Pause the transfer so we can trust the current counter,
965 * do this before we pause the PL011 block, else we may
968 if (dmaengine_pause(rxchan))
969 dev_err(uap->port.dev, "unable to pause DMA transfer\n");
970 dmastat = rxchan->device->device_tx_status(rxchan,
971 dmarx->cookie, &state);
972 if (dmastat != DMA_PAUSED)
973 dev_err(uap->port.dev, "unable to pause DMA transfer\n");
975 /* Disable RX DMA - incoming data will wait in the FIFO */
976 uap->dmacr &= ~UART011_RXDMAE;
977 pl011_write(uap->dmacr, uap, REG_DMACR);
978 uap->dmarx.running = false;
980 pending = sgbuf->sg.length - state.residue;
981 BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
982 /* Then we terminate the transfer - we now know our residue */
983 dmaengine_terminate_all(rxchan);
986 * This will take the chars we have so far and insert
987 * into the framework.
989 pl011_dma_rx_chars(uap, pending, dmarx->use_buf_b, true);
991 /* Switch buffer & re-trigger DMA job */
992 dmarx->use_buf_b = !dmarx->use_buf_b;
993 if (pl011_dma_rx_trigger_dma(uap)) {
994 dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
995 "fall back to interrupt mode\n");
996 uap->im |= UART011_RXIM;
997 pl011_write(uap->im, uap, REG_IMSC);
1001 static void pl011_dma_rx_callback(void *data)
1003 struct uart_amba_port *uap = data;
1004 struct pl011_dmarx_data *dmarx = &uap->dmarx;
1005 struct dma_chan *rxchan = dmarx->chan;
1006 bool lastbuf = dmarx->use_buf_b;
1007 struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
1008 &dmarx->sgbuf_b : &dmarx->sgbuf_a;
1010 struct dma_tx_state state;
1014 * This completion interrupt occurs typically when the
1015 * RX buffer is totally stuffed but no timeout has yet
1016 * occurred. When that happens, we just want the RX
1017 * routine to flush out the secondary DMA buffer while
1018 * we immediately trigger the next DMA job.
1020 spin_lock_irq(&uap->port.lock);
1022 * Rx data can be taken by the UART interrupts during
1023 * the DMA irq handler. So we check the residue here.
1025 rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
1026 pending = sgbuf->sg.length - state.residue;
1027 BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
1028 /* Then we terminate the transfer - we now know our residue */
1029 dmaengine_terminate_all(rxchan);
1031 uap->dmarx.running = false;
1032 dmarx->use_buf_b = !lastbuf;
1033 ret = pl011_dma_rx_trigger_dma(uap);
1035 pl011_dma_rx_chars(uap, pending, lastbuf, false);
1036 spin_unlock_irq(&uap->port.lock);
1038 * Do this check after we picked the DMA chars so we don't
1039 * get some IRQ immediately from RX.
1042 dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
1043 "fall back to interrupt mode\n");
1044 uap->im |= UART011_RXIM;
1045 pl011_write(uap->im, uap, REG_IMSC);
1050 * Stop accepting received characters, when we're shutting down or
1051 * suspending this port.
1052 * Locking: called with port lock held and IRQs disabled.
1054 static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
1056 /* FIXME. Just disable the DMA enable */
1057 uap->dmacr &= ~UART011_RXDMAE;
1058 pl011_write(uap->dmacr, uap, REG_DMACR);
1062 * Timer handler for Rx DMA polling.
1063 * Every polling, It checks the residue in the dma buffer and transfer
1064 * data to the tty. Also, last_residue is updated for the next polling.
1066 static void pl011_dma_rx_poll(struct timer_list *t)
1068 struct uart_amba_port *uap = from_timer(uap, t, dmarx.timer);
1069 struct tty_port *port = &uap->port.state->port;
1070 struct pl011_dmarx_data *dmarx = &uap->dmarx;
1071 struct dma_chan *rxchan = uap->dmarx.chan;
1072 unsigned long flags = 0;
1073 unsigned int dmataken = 0;
1074 unsigned int size = 0;
1075 struct pl011_sgbuf *sgbuf;
1077 struct dma_tx_state state;
1079 sgbuf = dmarx->use_buf_b ? &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
1080 rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
1081 if (likely(state.residue < dmarx->last_residue)) {
1082 dmataken = sgbuf->sg.length - dmarx->last_residue;
1083 size = dmarx->last_residue - state.residue;
1084 dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken,
1086 if (dma_count == size)
1087 dmarx->last_residue = state.residue;
1088 dmarx->last_jiffies = jiffies;
1090 tty_flip_buffer_push(port);
1093 * If no data is received in poll_timeout, the driver will fall back
1094 * to interrupt mode. We will retrigger DMA at the first interrupt.
1096 if (jiffies_to_msecs(jiffies - dmarx->last_jiffies)
1097 > uap->dmarx.poll_timeout) {
1099 spin_lock_irqsave(&uap->port.lock, flags);
1100 pl011_dma_rx_stop(uap);
1101 uap->im |= UART011_RXIM;
1102 pl011_write(uap->im, uap, REG_IMSC);
1103 spin_unlock_irqrestore(&uap->port.lock, flags);
1105 uap->dmarx.running = false;
1106 dmaengine_terminate_all(rxchan);
1107 del_timer(&uap->dmarx.timer);
1109 mod_timer(&uap->dmarx.timer,
1110 jiffies + msecs_to_jiffies(uap->dmarx.poll_rate));
1114 static void pl011_dma_startup(struct uart_amba_port *uap)
1118 if (!uap->dma_probed)
1119 pl011_dma_probe(uap);
1121 if (!uap->dmatx.chan)
1124 uap->dmatx.buf = kmalloc(PL011_DMA_BUFFER_SIZE, GFP_KERNEL | __GFP_DMA);
1125 if (!uap->dmatx.buf) {
1126 dev_err(uap->port.dev, "no memory for DMA TX buffer\n");
1127 uap->port.fifosize = uap->fifosize;
1131 sg_init_one(&uap->dmatx.sg, uap->dmatx.buf, PL011_DMA_BUFFER_SIZE);
1133 /* The DMA buffer is now the FIFO the TTY subsystem can use */
1134 uap->port.fifosize = PL011_DMA_BUFFER_SIZE;
1135 uap->using_tx_dma = true;
1137 if (!uap->dmarx.chan)
1140 /* Allocate and map DMA RX buffers */
1141 ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
1144 dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
1145 "RX buffer A", ret);
1149 ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_b,
1152 dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
1153 "RX buffer B", ret);
1154 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
1159 uap->using_rx_dma = true;
1162 /* Turn on DMA error (RX/TX will be enabled on demand) */
1163 uap->dmacr |= UART011_DMAONERR;
1164 pl011_write(uap->dmacr, uap, REG_DMACR);
1167 * ST Micro variants has some specific dma burst threshold
1168 * compensation. Set this to 16 bytes, so burst will only
1169 * be issued above/below 16 bytes.
1171 if (uap->vendor->dma_threshold)
1172 pl011_write(ST_UART011_DMAWM_RX_16 | ST_UART011_DMAWM_TX_16,
1175 if (uap->using_rx_dma) {
1176 if (pl011_dma_rx_trigger_dma(uap))
1177 dev_dbg(uap->port.dev, "could not trigger initial "
1178 "RX DMA job, fall back to interrupt mode\n");
1179 if (uap->dmarx.poll_rate) {
1180 timer_setup(&uap->dmarx.timer, pl011_dma_rx_poll, 0);
1181 mod_timer(&uap->dmarx.timer,
1183 msecs_to_jiffies(uap->dmarx.poll_rate));
1184 uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE;
1185 uap->dmarx.last_jiffies = jiffies;
1190 static void pl011_dma_shutdown(struct uart_amba_port *uap)
1192 if (!(uap->using_tx_dma || uap->using_rx_dma))
1195 /* Disable RX and TX DMA */
1196 while (pl011_read(uap, REG_FR) & uap->vendor->fr_busy)
1199 spin_lock_irq(&uap->port.lock);
1200 uap->dmacr &= ~(UART011_DMAONERR | UART011_RXDMAE | UART011_TXDMAE);
1201 pl011_write(uap->dmacr, uap, REG_DMACR);
1202 spin_unlock_irq(&uap->port.lock);
1204 if (uap->using_tx_dma) {
1205 /* In theory, this should already be done by pl011_dma_flush_buffer */
1206 dmaengine_terminate_all(uap->dmatx.chan);
1207 if (uap->dmatx.queued) {
1208 dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
1210 uap->dmatx.queued = false;
1213 kfree(uap->dmatx.buf);
1214 uap->using_tx_dma = false;
1217 if (uap->using_rx_dma) {
1218 dmaengine_terminate_all(uap->dmarx.chan);
1219 /* Clean up the RX DMA */
1220 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a, DMA_FROM_DEVICE);
1221 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_b, DMA_FROM_DEVICE);
1222 if (uap->dmarx.poll_rate)
1223 del_timer_sync(&uap->dmarx.timer);
1224 uap->using_rx_dma = false;
1228 static inline bool pl011_dma_rx_available(struct uart_amba_port *uap)
1230 return uap->using_rx_dma;
1233 static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
1235 return uap->using_rx_dma && uap->dmarx.running;
1239 /* Blank functions if the DMA engine is not available */
1240 static inline void pl011_dma_probe(struct uart_amba_port *uap)
1244 static inline void pl011_dma_remove(struct uart_amba_port *uap)
1248 static inline void pl011_dma_startup(struct uart_amba_port *uap)
1252 static inline void pl011_dma_shutdown(struct uart_amba_port *uap)
1256 static inline bool pl011_dma_tx_irq(struct uart_amba_port *uap)
1261 static inline void pl011_dma_tx_stop(struct uart_amba_port *uap)
1265 static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
1270 static inline void pl011_dma_rx_irq(struct uart_amba_port *uap)
1274 static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
1278 static inline int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
1283 static inline bool pl011_dma_rx_available(struct uart_amba_port *uap)
1288 static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
1293 #define pl011_dma_flush_buffer NULL
1296 static void pl011_stop_tx(struct uart_port *port)
1298 struct uart_amba_port *uap =
1299 container_of(port, struct uart_amba_port, port);
1301 uap->im &= ~UART011_TXIM;
1302 pl011_write(uap->im, uap, REG_IMSC);
1303 pl011_dma_tx_stop(uap);
1306 static bool pl011_tx_chars(struct uart_amba_port *uap, bool from_irq);
1308 /* Start TX with programmed I/O only (no DMA) */
1309 static void pl011_start_tx_pio(struct uart_amba_port *uap)
1311 if (pl011_tx_chars(uap, false)) {
1312 uap->im |= UART011_TXIM;
1313 pl011_write(uap->im, uap, REG_IMSC);
1317 static void pl011_start_tx(struct uart_port *port)
1319 struct uart_amba_port *uap =
1320 container_of(port, struct uart_amba_port, port);
1322 if (!pl011_dma_tx_start(uap))
1323 pl011_start_tx_pio(uap);
1326 static void pl011_stop_rx(struct uart_port *port)
1328 struct uart_amba_port *uap =
1329 container_of(port, struct uart_amba_port, port);
1331 uap->im &= ~(UART011_RXIM|UART011_RTIM|UART011_FEIM|
1332 UART011_PEIM|UART011_BEIM|UART011_OEIM);
1333 pl011_write(uap->im, uap, REG_IMSC);
1335 pl011_dma_rx_stop(uap);
1338 static void pl011_enable_ms(struct uart_port *port)
1340 struct uart_amba_port *uap =
1341 container_of(port, struct uart_amba_port, port);
1343 uap->im |= UART011_RIMIM|UART011_CTSMIM|UART011_DCDMIM|UART011_DSRMIM;
1344 pl011_write(uap->im, uap, REG_IMSC);
1347 static void pl011_rx_chars(struct uart_amba_port *uap)
1348 __releases(&uap->port.lock)
1349 __acquires(&uap->port.lock)
1351 pl011_fifo_to_tty(uap);
1353 spin_unlock(&uap->port.lock);
1354 tty_flip_buffer_push(&uap->port.state->port);
1356 * If we were temporarily out of DMA mode for a while,
1357 * attempt to switch back to DMA mode again.
1359 if (pl011_dma_rx_available(uap)) {
1360 if (pl011_dma_rx_trigger_dma(uap)) {
1361 dev_dbg(uap->port.dev, "could not trigger RX DMA job "
1362 "fall back to interrupt mode again\n");
1363 uap->im |= UART011_RXIM;
1364 pl011_write(uap->im, uap, REG_IMSC);
1366 #ifdef CONFIG_DMA_ENGINE
1367 /* Start Rx DMA poll */
1368 if (uap->dmarx.poll_rate) {
1369 uap->dmarx.last_jiffies = jiffies;
1370 uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE;
1371 mod_timer(&uap->dmarx.timer,
1373 msecs_to_jiffies(uap->dmarx.poll_rate));
1378 spin_lock(&uap->port.lock);
1381 static bool pl011_tx_char(struct uart_amba_port *uap, unsigned char c,
1384 if (unlikely(!from_irq) &&
1385 pl011_read(uap, REG_FR) & UART01x_FR_TXFF)
1386 return false; /* unable to transmit character */
1388 pl011_write(c, uap, REG_DR);
1389 uap->port.icount.tx++;
1394 /* Returns true if tx interrupts have to be (kept) enabled */
1395 static bool pl011_tx_chars(struct uart_amba_port *uap, bool from_irq)
1397 struct circ_buf *xmit = &uap->port.state->xmit;
1398 int count = uap->fifosize >> 1;
1400 if (uap->port.x_char) {
1401 if (!pl011_tx_char(uap, uap->port.x_char, from_irq))
1403 uap->port.x_char = 0;
1406 if (uart_circ_empty(xmit) || uart_tx_stopped(&uap->port)) {
1407 pl011_stop_tx(&uap->port);
1411 /* If we are using DMA mode, try to send some characters. */
1412 if (pl011_dma_tx_irq(uap))
1416 if (likely(from_irq) && count-- == 0)
1419 if (!pl011_tx_char(uap, xmit->buf[xmit->tail], from_irq))
1422 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
1423 } while (!uart_circ_empty(xmit));
1425 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
1426 uart_write_wakeup(&uap->port);
1428 if (uart_circ_empty(xmit)) {
1429 pl011_stop_tx(&uap->port);
1435 static void pl011_modem_status(struct uart_amba_port *uap)
1437 unsigned int status, delta;
1439 status = pl011_read(uap, REG_FR) & UART01x_FR_MODEM_ANY;
1441 delta = status ^ uap->old_status;
1442 uap->old_status = status;
1447 if (delta & UART01x_FR_DCD)
1448 uart_handle_dcd_change(&uap->port, status & UART01x_FR_DCD);
1450 if (delta & uap->vendor->fr_dsr)
1451 uap->port.icount.dsr++;
1453 if (delta & uap->vendor->fr_cts)
1454 uart_handle_cts_change(&uap->port,
1455 status & uap->vendor->fr_cts);
1457 wake_up_interruptible(&uap->port.state->port.delta_msr_wait);
1460 static void check_apply_cts_event_workaround(struct uart_amba_port *uap)
1462 unsigned int dummy_read;
1464 if (!uap->vendor->cts_event_workaround)
1467 /* workaround to make sure that all bits are unlocked.. */
1468 pl011_write(0x00, uap, REG_ICR);
1471 * WA: introduce 26ns(1 uart clk) delay before W1C;
1472 * single apb access will incur 2 pclk(133.12Mhz) delay,
1473 * so add 2 dummy reads
1475 dummy_read = pl011_read(uap, REG_ICR);
1476 dummy_read = pl011_read(uap, REG_ICR);
1479 static irqreturn_t pl011_int(int irq, void *dev_id)
1481 struct uart_amba_port *uap = dev_id;
1482 unsigned long flags;
1483 unsigned int status, pass_counter = AMBA_ISR_PASS_LIMIT;
1486 spin_lock_irqsave(&uap->port.lock, flags);
1487 status = pl011_read(uap, REG_RIS) & uap->im;
1490 check_apply_cts_event_workaround(uap);
1492 pl011_write(status & ~(UART011_TXIS|UART011_RTIS|
1496 if (status & (UART011_RTIS|UART011_RXIS)) {
1497 if (pl011_dma_rx_running(uap))
1498 pl011_dma_rx_irq(uap);
1500 pl011_rx_chars(uap);
1502 if (status & (UART011_DSRMIS|UART011_DCDMIS|
1503 UART011_CTSMIS|UART011_RIMIS))
1504 pl011_modem_status(uap);
1505 if (status & UART011_TXIS)
1506 pl011_tx_chars(uap, true);
1508 if (pass_counter-- == 0)
1511 status = pl011_read(uap, REG_RIS) & uap->im;
1512 } while (status != 0);
1516 spin_unlock_irqrestore(&uap->port.lock, flags);
1518 return IRQ_RETVAL(handled);
1521 static unsigned int pl011_tx_empty(struct uart_port *port)
1523 struct uart_amba_port *uap =
1524 container_of(port, struct uart_amba_port, port);
1526 /* Allow feature register bits to be inverted to work around errata */
1527 unsigned int status = pl011_read(uap, REG_FR) ^ uap->vendor->inv_fr;
1529 return status & (uap->vendor->fr_busy | UART01x_FR_TXFF) ?
1533 static unsigned int pl011_get_mctrl(struct uart_port *port)
1535 struct uart_amba_port *uap =
1536 container_of(port, struct uart_amba_port, port);
1537 unsigned int result = 0;
1538 unsigned int status = pl011_read(uap, REG_FR);
1540 #define TIOCMBIT(uartbit, tiocmbit) \
1541 if (status & uartbit) \
1544 TIOCMBIT(UART01x_FR_DCD, TIOCM_CAR);
1545 TIOCMBIT(uap->vendor->fr_dsr, TIOCM_DSR);
1546 TIOCMBIT(uap->vendor->fr_cts, TIOCM_CTS);
1547 TIOCMBIT(uap->vendor->fr_ri, TIOCM_RNG);
1552 static void pl011_set_mctrl(struct uart_port *port, unsigned int mctrl)
1554 struct uart_amba_port *uap =
1555 container_of(port, struct uart_amba_port, port);
1558 cr = pl011_read(uap, REG_CR);
1560 #define TIOCMBIT(tiocmbit, uartbit) \
1561 if (mctrl & tiocmbit) \
1566 TIOCMBIT(TIOCM_RTS, UART011_CR_RTS);
1567 TIOCMBIT(TIOCM_DTR, UART011_CR_DTR);
1568 TIOCMBIT(TIOCM_OUT1, UART011_CR_OUT1);
1569 TIOCMBIT(TIOCM_OUT2, UART011_CR_OUT2);
1570 TIOCMBIT(TIOCM_LOOP, UART011_CR_LBE);
1572 if (port->status & UPSTAT_AUTORTS) {
1573 /* We need to disable auto-RTS if we want to turn RTS off */
1574 TIOCMBIT(TIOCM_RTS, UART011_CR_RTSEN);
1578 pl011_write(cr, uap, REG_CR);
1581 static void pl011_break_ctl(struct uart_port *port, int break_state)
1583 struct uart_amba_port *uap =
1584 container_of(port, struct uart_amba_port, port);
1585 unsigned long flags;
1588 spin_lock_irqsave(&uap->port.lock, flags);
1589 lcr_h = pl011_read(uap, REG_LCRH_TX);
1590 if (break_state == -1)
1591 lcr_h |= UART01x_LCRH_BRK;
1593 lcr_h &= ~UART01x_LCRH_BRK;
1594 pl011_write(lcr_h, uap, REG_LCRH_TX);
1595 spin_unlock_irqrestore(&uap->port.lock, flags);
1598 #ifdef CONFIG_CONSOLE_POLL
1600 static void pl011_quiesce_irqs(struct uart_port *port)
1602 struct uart_amba_port *uap =
1603 container_of(port, struct uart_amba_port, port);
1605 pl011_write(pl011_read(uap, REG_MIS), uap, REG_ICR);
1607 * There is no way to clear TXIM as this is "ready to transmit IRQ", so
1608 * we simply mask it. start_tx() will unmask it.
1610 * Note we can race with start_tx(), and if the race happens, the
1611 * polling user might get another interrupt just after we clear it.
1612 * But it should be OK and can happen even w/o the race, e.g.
1613 * controller immediately got some new data and raised the IRQ.
1615 * And whoever uses polling routines assumes that it manages the device
1616 * (including tx queue), so we're also fine with start_tx()'s caller
1619 pl011_write(pl011_read(uap, REG_IMSC) & ~UART011_TXIM, uap,
1623 static int pl011_get_poll_char(struct uart_port *port)
1625 struct uart_amba_port *uap =
1626 container_of(port, struct uart_amba_port, port);
1627 unsigned int status;
1630 * The caller might need IRQs lowered, e.g. if used with KDB NMI
1633 pl011_quiesce_irqs(port);
1635 status = pl011_read(uap, REG_FR);
1636 if (status & UART01x_FR_RXFE)
1637 return NO_POLL_CHAR;
1639 return pl011_read(uap, REG_DR);
1642 static void pl011_put_poll_char(struct uart_port *port,
1645 struct uart_amba_port *uap =
1646 container_of(port, struct uart_amba_port, port);
1648 while (pl011_read(uap, REG_FR) & UART01x_FR_TXFF)
1651 pl011_write(ch, uap, REG_DR);
1654 #endif /* CONFIG_CONSOLE_POLL */
1656 static int pl011_hwinit(struct uart_port *port)
1658 struct uart_amba_port *uap =
1659 container_of(port, struct uart_amba_port, port);
1662 /* Optionaly enable pins to be muxed in and configured */
1663 pinctrl_pm_select_default_state(port->dev);
1666 * Try to enable the clock producer.
1668 retval = clk_prepare_enable(uap->clk);
1672 uap->port.uartclk = clk_get_rate(uap->clk);
1674 /* Clear pending error and receive interrupts */
1675 pl011_write(UART011_OEIS | UART011_BEIS | UART011_PEIS |
1676 UART011_FEIS | UART011_RTIS | UART011_RXIS,
1680 * Save interrupts enable mask, and enable RX interrupts in case if
1681 * the interrupt is used for NMI entry.
1683 uap->im = pl011_read(uap, REG_IMSC);
1684 pl011_write(UART011_RTIM | UART011_RXIM, uap, REG_IMSC);
1686 if (dev_get_platdata(uap->port.dev)) {
1687 struct amba_pl011_data *plat;
1689 plat = dev_get_platdata(uap->port.dev);
1696 static bool pl011_split_lcrh(const struct uart_amba_port *uap)
1698 return pl011_reg_to_offset(uap, REG_LCRH_RX) !=
1699 pl011_reg_to_offset(uap, REG_LCRH_TX);
1702 static void pl011_write_lcr_h(struct uart_amba_port *uap, unsigned int lcr_h)
1704 pl011_write(lcr_h, uap, REG_LCRH_RX);
1705 if (pl011_split_lcrh(uap)) {
1708 * Wait 10 PCLKs before writing LCRH_TX register,
1709 * to get this delay write read only register 10 times
1711 for (i = 0; i < 10; ++i)
1712 pl011_write(0xff, uap, REG_MIS);
1713 pl011_write(lcr_h, uap, REG_LCRH_TX);
1717 static int pl011_allocate_irq(struct uart_amba_port *uap)
1719 pl011_write(uap->im, uap, REG_IMSC);
1721 return request_irq(uap->port.irq, pl011_int, 0, "uart-pl011", uap);
1725 * Enable interrupts, only timeouts when using DMA
1726 * if initial RX DMA job failed, start in interrupt mode
1729 static void pl011_enable_interrupts(struct uart_amba_port *uap)
1733 spin_lock_irq(&uap->port.lock);
1735 /* Clear out any spuriously appearing RX interrupts */
1736 pl011_write(UART011_RTIS | UART011_RXIS, uap, REG_ICR);
1739 * RXIS is asserted only when the RX FIFO transitions from below
1740 * to above the trigger threshold. If the RX FIFO is already
1741 * full to the threshold this can't happen and RXIS will now be
1742 * stuck off. Drain the RX FIFO explicitly to fix this:
1744 for (i = 0; i < uap->fifosize * 2; ++i) {
1745 if (pl011_read(uap, REG_FR) & UART01x_FR_RXFE)
1748 pl011_read(uap, REG_DR);
1751 uap->im = UART011_RTIM;
1752 if (!pl011_dma_rx_running(uap))
1753 uap->im |= UART011_RXIM;
1754 pl011_write(uap->im, uap, REG_IMSC);
1755 spin_unlock_irq(&uap->port.lock);
1758 static int pl011_startup(struct uart_port *port)
1760 struct uart_amba_port *uap =
1761 container_of(port, struct uart_amba_port, port);
1765 retval = pl011_hwinit(port);
1769 retval = pl011_allocate_irq(uap);
1773 pl011_write(uap->vendor->ifls, uap, REG_IFLS);
1775 spin_lock_irq(&uap->port.lock);
1777 /* restore RTS and DTR */
1778 cr = uap->old_cr & (UART011_CR_RTS | UART011_CR_DTR);
1779 cr |= UART01x_CR_UARTEN | UART011_CR_RXE | UART011_CR_TXE;
1780 pl011_write(cr, uap, REG_CR);
1782 spin_unlock_irq(&uap->port.lock);
1785 * initialise the old status of the modem signals
1787 uap->old_status = pl011_read(uap, REG_FR) & UART01x_FR_MODEM_ANY;
1790 pl011_dma_startup(uap);
1792 pl011_enable_interrupts(uap);
1797 clk_disable_unprepare(uap->clk);
1801 static int sbsa_uart_startup(struct uart_port *port)
1803 struct uart_amba_port *uap =
1804 container_of(port, struct uart_amba_port, port);
1807 retval = pl011_hwinit(port);
1811 retval = pl011_allocate_irq(uap);
1815 /* The SBSA UART does not support any modem status lines. */
1816 uap->old_status = 0;
1818 pl011_enable_interrupts(uap);
1823 static void pl011_shutdown_channel(struct uart_amba_port *uap,
1828 val = pl011_read(uap, lcrh);
1829 val &= ~(UART01x_LCRH_BRK | UART01x_LCRH_FEN);
1830 pl011_write(val, uap, lcrh);
1834 * disable the port. It should not disable RTS and DTR.
1835 * Also RTS and DTR state should be preserved to restore
1836 * it during startup().
1838 static void pl011_disable_uart(struct uart_amba_port *uap)
1842 uap->port.status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS);
1843 spin_lock_irq(&uap->port.lock);
1844 cr = pl011_read(uap, REG_CR);
1846 cr &= UART011_CR_RTS | UART011_CR_DTR;
1847 cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
1848 pl011_write(cr, uap, REG_CR);
1849 spin_unlock_irq(&uap->port.lock);
1852 * disable break condition and fifos
1854 pl011_shutdown_channel(uap, REG_LCRH_RX);
1855 if (pl011_split_lcrh(uap))
1856 pl011_shutdown_channel(uap, REG_LCRH_TX);
1859 static void pl011_disable_interrupts(struct uart_amba_port *uap)
1861 spin_lock_irq(&uap->port.lock);
1863 /* mask all interrupts and clear all pending ones */
1865 pl011_write(uap->im, uap, REG_IMSC);
1866 pl011_write(0xffff, uap, REG_ICR);
1868 spin_unlock_irq(&uap->port.lock);
1871 static void pl011_shutdown(struct uart_port *port)
1873 struct uart_amba_port *uap =
1874 container_of(port, struct uart_amba_port, port);
1876 pl011_disable_interrupts(uap);
1878 pl011_dma_shutdown(uap);
1880 free_irq(uap->port.irq, uap);
1882 pl011_disable_uart(uap);
1885 * Shut down the clock producer
1887 clk_disable_unprepare(uap->clk);
1888 /* Optionally let pins go into sleep states */
1889 pinctrl_pm_select_sleep_state(port->dev);
1891 if (dev_get_platdata(uap->port.dev)) {
1892 struct amba_pl011_data *plat;
1894 plat = dev_get_platdata(uap->port.dev);
1899 if (uap->port.ops->flush_buffer)
1900 uap->port.ops->flush_buffer(port);
1903 static void sbsa_uart_shutdown(struct uart_port *port)
1905 struct uart_amba_port *uap =
1906 container_of(port, struct uart_amba_port, port);
1908 pl011_disable_interrupts(uap);
1910 free_irq(uap->port.irq, uap);
1912 if (uap->port.ops->flush_buffer)
1913 uap->port.ops->flush_buffer(port);
1917 pl011_setup_status_masks(struct uart_port *port, struct ktermios *termios)
1919 port->read_status_mask = UART011_DR_OE | 255;
1920 if (termios->c_iflag & INPCK)
1921 port->read_status_mask |= UART011_DR_FE | UART011_DR_PE;
1922 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
1923 port->read_status_mask |= UART011_DR_BE;
1926 * Characters to ignore
1928 port->ignore_status_mask = 0;
1929 if (termios->c_iflag & IGNPAR)
1930 port->ignore_status_mask |= UART011_DR_FE | UART011_DR_PE;
1931 if (termios->c_iflag & IGNBRK) {
1932 port->ignore_status_mask |= UART011_DR_BE;
1934 * If we're ignoring parity and break indicators,
1935 * ignore overruns too (for real raw support).
1937 if (termios->c_iflag & IGNPAR)
1938 port->ignore_status_mask |= UART011_DR_OE;
1942 * Ignore all characters if CREAD is not set.
1944 if ((termios->c_cflag & CREAD) == 0)
1945 port->ignore_status_mask |= UART_DUMMY_DR_RX;
1949 pl011_set_termios(struct uart_port *port, struct ktermios *termios,
1950 struct ktermios *old)
1952 struct uart_amba_port *uap =
1953 container_of(port, struct uart_amba_port, port);
1954 unsigned int lcr_h, old_cr;
1955 unsigned long flags;
1956 unsigned int baud, quot, clkdiv;
1958 if (uap->vendor->oversampling)
1964 * Ask the core to calculate the divisor for us.
1966 baud = uart_get_baud_rate(port, termios, old, 0,
1967 port->uartclk / clkdiv);
1968 #ifdef CONFIG_DMA_ENGINE
1970 * Adjust RX DMA polling rate with baud rate if not specified.
1972 if (uap->dmarx.auto_poll_rate)
1973 uap->dmarx.poll_rate = DIV_ROUND_UP(10000000, baud);
1976 if (baud > port->uartclk/16)
1977 quot = DIV_ROUND_CLOSEST(port->uartclk * 8, baud);
1979 quot = DIV_ROUND_CLOSEST(port->uartclk * 4, baud);
1981 switch (termios->c_cflag & CSIZE) {
1983 lcr_h = UART01x_LCRH_WLEN_5;
1986 lcr_h = UART01x_LCRH_WLEN_6;
1989 lcr_h = UART01x_LCRH_WLEN_7;
1992 lcr_h = UART01x_LCRH_WLEN_8;
1995 if (termios->c_cflag & CSTOPB)
1996 lcr_h |= UART01x_LCRH_STP2;
1997 if (termios->c_cflag & PARENB) {
1998 lcr_h |= UART01x_LCRH_PEN;
1999 if (!(termios->c_cflag & PARODD))
2000 lcr_h |= UART01x_LCRH_EPS;
2001 if (termios->c_cflag & CMSPAR)
2002 lcr_h |= UART011_LCRH_SPS;
2004 if (uap->fifosize > 1)
2005 lcr_h |= UART01x_LCRH_FEN;
2007 spin_lock_irqsave(&port->lock, flags);
2010 * Update the per-port timeout.
2012 uart_update_timeout(port, termios->c_cflag, baud);
2014 pl011_setup_status_masks(port, termios);
2016 if (UART_ENABLE_MS(port, termios->c_cflag))
2017 pl011_enable_ms(port);
2019 /* first, disable everything */
2020 old_cr = pl011_read(uap, REG_CR);
2021 pl011_write(0, uap, REG_CR);
2023 if (termios->c_cflag & CRTSCTS) {
2024 if (old_cr & UART011_CR_RTS)
2025 old_cr |= UART011_CR_RTSEN;
2027 old_cr |= UART011_CR_CTSEN;
2028 port->status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS;
2030 old_cr &= ~(UART011_CR_CTSEN | UART011_CR_RTSEN);
2031 port->status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS);
2034 if (uap->vendor->oversampling) {
2035 if (baud > port->uartclk / 16)
2036 old_cr |= ST_UART011_CR_OVSFACT;
2038 old_cr &= ~ST_UART011_CR_OVSFACT;
2042 * Workaround for the ST Micro oversampling variants to
2043 * increase the bitrate slightly, by lowering the divisor,
2044 * to avoid delayed sampling of start bit at high speeds,
2045 * else we see data corruption.
2047 if (uap->vendor->oversampling) {
2048 if ((baud >= 3000000) && (baud < 3250000) && (quot > 1))
2050 else if ((baud > 3250000) && (quot > 2))
2054 pl011_write(quot & 0x3f, uap, REG_FBRD);
2055 pl011_write(quot >> 6, uap, REG_IBRD);
2058 * ----------v----------v----------v----------v-----
2059 * NOTE: REG_LCRH_TX and REG_LCRH_RX MUST BE WRITTEN AFTER
2060 * REG_FBRD & REG_IBRD.
2061 * ----------^----------^----------^----------^-----
2063 pl011_write_lcr_h(uap, lcr_h);
2064 pl011_write(old_cr, uap, REG_CR);
2066 spin_unlock_irqrestore(&port->lock, flags);
2070 sbsa_uart_set_termios(struct uart_port *port, struct ktermios *termios,
2071 struct ktermios *old)
2073 struct uart_amba_port *uap =
2074 container_of(port, struct uart_amba_port, port);
2075 unsigned long flags;
2077 tty_termios_encode_baud_rate(termios, uap->fixed_baud, uap->fixed_baud);
2079 /* The SBSA UART only supports 8n1 without hardware flow control. */
2080 termios->c_cflag &= ~(CSIZE | CSTOPB | PARENB | PARODD);
2081 termios->c_cflag &= ~(CMSPAR | CRTSCTS);
2082 termios->c_cflag |= CS8 | CLOCAL;
2084 spin_lock_irqsave(&port->lock, flags);
2085 uart_update_timeout(port, CS8, uap->fixed_baud);
2086 pl011_setup_status_masks(port, termios);
2087 spin_unlock_irqrestore(&port->lock, flags);
2090 static const char *pl011_type(struct uart_port *port)
2092 struct uart_amba_port *uap =
2093 container_of(port, struct uart_amba_port, port);
2094 return uap->port.type == PORT_AMBA ? uap->type : NULL;
2098 * Configure/autoconfigure the port.
2100 static void pl011_config_port(struct uart_port *port, int flags)
2102 if (flags & UART_CONFIG_TYPE)
2103 port->type = PORT_AMBA;
2107 * verify the new serial_struct (for TIOCSSERIAL).
2109 static int pl011_verify_port(struct uart_port *port, struct serial_struct *ser)
2112 if (ser->type != PORT_UNKNOWN && ser->type != PORT_AMBA)
2114 if (ser->irq < 0 || ser->irq >= nr_irqs)
2116 if (ser->baud_base < 9600)
2118 if (port->mapbase != (unsigned long) ser->iomem_base)
2123 static const struct uart_ops amba_pl011_pops = {
2124 .tx_empty = pl011_tx_empty,
2125 .set_mctrl = pl011_set_mctrl,
2126 .get_mctrl = pl011_get_mctrl,
2127 .stop_tx = pl011_stop_tx,
2128 .start_tx = pl011_start_tx,
2129 .stop_rx = pl011_stop_rx,
2130 .enable_ms = pl011_enable_ms,
2131 .break_ctl = pl011_break_ctl,
2132 .startup = pl011_startup,
2133 .shutdown = pl011_shutdown,
2134 .flush_buffer = pl011_dma_flush_buffer,
2135 .set_termios = pl011_set_termios,
2137 .config_port = pl011_config_port,
2138 .verify_port = pl011_verify_port,
2139 #ifdef CONFIG_CONSOLE_POLL
2140 .poll_init = pl011_hwinit,
2141 .poll_get_char = pl011_get_poll_char,
2142 .poll_put_char = pl011_put_poll_char,
2146 static void sbsa_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
2150 static unsigned int sbsa_uart_get_mctrl(struct uart_port *port)
2155 static const struct uart_ops sbsa_uart_pops = {
2156 .tx_empty = pl011_tx_empty,
2157 .set_mctrl = sbsa_uart_set_mctrl,
2158 .get_mctrl = sbsa_uart_get_mctrl,
2159 .stop_tx = pl011_stop_tx,
2160 .start_tx = pl011_start_tx,
2161 .stop_rx = pl011_stop_rx,
2162 .startup = sbsa_uart_startup,
2163 .shutdown = sbsa_uart_shutdown,
2164 .set_termios = sbsa_uart_set_termios,
2166 .config_port = pl011_config_port,
2167 .verify_port = pl011_verify_port,
2168 #ifdef CONFIG_CONSOLE_POLL
2169 .poll_init = pl011_hwinit,
2170 .poll_get_char = pl011_get_poll_char,
2171 .poll_put_char = pl011_put_poll_char,
2175 static struct uart_amba_port *amba_ports[UART_NR];
2177 #ifdef CONFIG_SERIAL_AMBA_PL011_CONSOLE
2179 static void pl011_console_putchar(struct uart_port *port, int ch)
2181 struct uart_amba_port *uap =
2182 container_of(port, struct uart_amba_port, port);
2184 while (pl011_read(uap, REG_FR) & UART01x_FR_TXFF)
2186 pl011_write(ch, uap, REG_DR);
2190 pl011_console_write(struct console *co, const char *s, unsigned int count)
2192 struct uart_amba_port *uap = amba_ports[co->index];
2193 unsigned int old_cr = 0, new_cr;
2194 unsigned long flags;
2197 clk_enable(uap->clk);
2199 local_irq_save(flags);
2200 if (uap->port.sysrq)
2202 else if (oops_in_progress)
2203 locked = spin_trylock(&uap->port.lock);
2205 spin_lock(&uap->port.lock);
2208 * First save the CR then disable the interrupts
2210 if (!uap->vendor->always_enabled) {
2211 old_cr = pl011_read(uap, REG_CR);
2212 new_cr = old_cr & ~UART011_CR_CTSEN;
2213 new_cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
2214 pl011_write(new_cr, uap, REG_CR);
2217 uart_console_write(&uap->port, s, count, pl011_console_putchar);
2220 * Finally, wait for transmitter to become empty and restore the
2221 * TCR. Allow feature register bits to be inverted to work around
2224 while ((pl011_read(uap, REG_FR) ^ uap->vendor->inv_fr)
2225 & uap->vendor->fr_busy)
2227 if (!uap->vendor->always_enabled)
2228 pl011_write(old_cr, uap, REG_CR);
2231 spin_unlock(&uap->port.lock);
2232 local_irq_restore(flags);
2234 clk_disable(uap->clk);
2237 static void pl011_console_get_options(struct uart_amba_port *uap, int *baud,
2238 int *parity, int *bits)
2240 if (pl011_read(uap, REG_CR) & UART01x_CR_UARTEN) {
2241 unsigned int lcr_h, ibrd, fbrd;
2243 lcr_h = pl011_read(uap, REG_LCRH_TX);
2246 if (lcr_h & UART01x_LCRH_PEN) {
2247 if (lcr_h & UART01x_LCRH_EPS)
2253 if ((lcr_h & 0x60) == UART01x_LCRH_WLEN_7)
2258 ibrd = pl011_read(uap, REG_IBRD);
2259 fbrd = pl011_read(uap, REG_FBRD);
2261 *baud = uap->port.uartclk * 4 / (64 * ibrd + fbrd);
2263 if (uap->vendor->oversampling) {
2264 if (pl011_read(uap, REG_CR)
2265 & ST_UART011_CR_OVSFACT)
2271 static int pl011_console_setup(struct console *co, char *options)
2273 struct uart_amba_port *uap;
2281 * Check whether an invalid uart number has been specified, and
2282 * if so, search for the first available port that does have
2285 if (co->index >= UART_NR)
2287 uap = amba_ports[co->index];
2291 /* Allow pins to be muxed in and configured */
2292 pinctrl_pm_select_default_state(uap->port.dev);
2294 ret = clk_prepare(uap->clk);
2298 if (dev_get_platdata(uap->port.dev)) {
2299 struct amba_pl011_data *plat;
2301 plat = dev_get_platdata(uap->port.dev);
2306 uap->port.uartclk = clk_get_rate(uap->clk);
2308 if (uap->vendor->fixed_options) {
2309 baud = uap->fixed_baud;
2312 uart_parse_options(options,
2313 &baud, &parity, &bits, &flow);
2315 pl011_console_get_options(uap, &baud, &parity, &bits);
2318 return uart_set_options(&uap->port, co, baud, parity, bits, flow);
2322 * pl011_console_match - non-standard console matching
2323 * @co: registering console
2324 * @name: name from console command line
2325 * @idx: index from console command line
2326 * @options: ptr to option string from console command line
2328 * Only attempts to match console command lines of the form:
2329 * console=pl011,mmio|mmio32,<addr>[,<options>]
2330 * console=pl011,0x<addr>[,<options>]
2331 * This form is used to register an initial earlycon boot console and
2332 * replace it with the amba_console at pl011 driver init.
2334 * Performs console setup for a match (as required by interface)
2335 * If no <options> are specified, then assume the h/w is already setup.
2337 * Returns 0 if console matches; otherwise non-zero to use default matching
2339 static int pl011_console_match(struct console *co, char *name, int idx,
2342 unsigned char iotype;
2343 resource_size_t addr;
2347 * Systems affected by the Qualcomm Technologies QDF2400 E44 erratum
2348 * have a distinct console name, so make sure we check for that.
2349 * The actual implementation of the erratum occurs in the probe
2352 if ((strcmp(name, "qdf2400_e44") != 0) && (strcmp(name, "pl011") != 0))
2355 if (uart_parse_earlycon(options, &iotype, &addr, &options))
2358 if (iotype != UPIO_MEM && iotype != UPIO_MEM32)
2361 /* try to match the port specified on the command line */
2362 for (i = 0; i < ARRAY_SIZE(amba_ports); i++) {
2363 struct uart_port *port;
2368 port = &amba_ports[i]->port;
2370 if (port->mapbase != addr)
2375 return pl011_console_setup(co, options);
2381 static struct uart_driver amba_reg;
2382 static struct console amba_console = {
2384 .write = pl011_console_write,
2385 .device = uart_console_device,
2386 .setup = pl011_console_setup,
2387 .match = pl011_console_match,
2388 .flags = CON_PRINTBUFFER | CON_ANYTIME,
2393 #define AMBA_CONSOLE (&amba_console)
2395 static void qdf2400_e44_putc(struct uart_port *port, int c)
2397 while (readl(port->membase + UART01x_FR) & UART01x_FR_TXFF)
2399 writel(c, port->membase + UART01x_DR);
2400 while (!(readl(port->membase + UART01x_FR) & UART011_FR_TXFE))
2404 static void qdf2400_e44_early_write(struct console *con, const char *s, unsigned n)
2406 struct earlycon_device *dev = con->data;
2408 uart_console_write(&dev->port, s, n, qdf2400_e44_putc);
2411 static void pl011_putc(struct uart_port *port, int c)
2413 while (readl(port->membase + UART01x_FR) & UART01x_FR_TXFF)
2415 if (port->iotype == UPIO_MEM32)
2416 writel(c, port->membase + UART01x_DR);
2418 writeb(c, port->membase + UART01x_DR);
2419 while (readl(port->membase + UART01x_FR) & UART01x_FR_BUSY)
2423 static void pl011_early_write(struct console *con, const char *s, unsigned n)
2425 struct earlycon_device *dev = con->data;
2427 uart_console_write(&dev->port, s, n, pl011_putc);
2431 * On non-ACPI systems, earlycon is enabled by specifying
2432 * "earlycon=pl011,<address>" on the kernel command line.
2434 * On ACPI ARM64 systems, an "early" console is enabled via the SPCR table,
2435 * by specifying only "earlycon" on the command line. Because it requires
2436 * SPCR, the console starts after ACPI is parsed, which is later than a
2437 * traditional early console.
2439 * To get the traditional early console that starts before ACPI is parsed,
2440 * specify the full "earlycon=pl011,<address>" option.
2442 static int __init pl011_early_console_setup(struct earlycon_device *device,
2445 if (!device->port.membase)
2448 device->con->write = pl011_early_write;
2452 OF_EARLYCON_DECLARE(pl011, "arm,pl011", pl011_early_console_setup);
2453 OF_EARLYCON_DECLARE(pl011, "arm,sbsa-uart", pl011_early_console_setup);
2456 * On Qualcomm Datacenter Technologies QDF2400 SOCs affected by
2457 * Erratum 44, traditional earlycon can be enabled by specifying
2458 * "earlycon=qdf2400_e44,<address>". Any options are ignored.
2460 * Alternatively, you can just specify "earlycon", and the early console
2461 * will be enabled with the information from the SPCR table. In this
2462 * case, the SPCR code will detect the need for the E44 work-around,
2463 * and set the console name to "qdf2400_e44".
2466 qdf2400_e44_early_console_setup(struct earlycon_device *device,
2469 if (!device->port.membase)
2472 device->con->write = qdf2400_e44_early_write;
2475 EARLYCON_DECLARE(qdf2400_e44, qdf2400_e44_early_console_setup);
2478 #define AMBA_CONSOLE NULL
2481 static struct uart_driver amba_reg = {
2482 .owner = THIS_MODULE,
2483 .driver_name = "ttyAMA",
2484 .dev_name = "ttyAMA",
2485 .major = SERIAL_AMBA_MAJOR,
2486 .minor = SERIAL_AMBA_MINOR,
2488 .cons = AMBA_CONSOLE,
2491 static int pl011_probe_dt_alias(int index, struct device *dev)
2493 struct device_node *np;
2494 static bool seen_dev_with_alias = false;
2495 static bool seen_dev_without_alias = false;
2498 if (!IS_ENABLED(CONFIG_OF))
2505 ret = of_alias_get_id(np, "serial");
2507 seen_dev_without_alias = true;
2510 seen_dev_with_alias = true;
2511 if (ret >= ARRAY_SIZE(amba_ports) || amba_ports[ret] != NULL) {
2512 dev_warn(dev, "requested serial port %d not available.\n", ret);
2517 if (seen_dev_with_alias && seen_dev_without_alias)
2518 dev_warn(dev, "aliased and non-aliased serial devices found in device tree. Serial port enumeration may be unpredictable.\n");
2523 /* unregisters the driver also if no more ports are left */
2524 static void pl011_unregister_port(struct uart_amba_port *uap)
2529 for (i = 0; i < ARRAY_SIZE(amba_ports); i++) {
2530 if (amba_ports[i] == uap)
2531 amba_ports[i] = NULL;
2532 else if (amba_ports[i])
2535 pl011_dma_remove(uap);
2537 uart_unregister_driver(&amba_reg);
2540 static int pl011_find_free_port(void)
2544 for (i = 0; i < ARRAY_SIZE(amba_ports); i++)
2545 if (amba_ports[i] == NULL)
2551 static int pl011_setup_port(struct device *dev, struct uart_amba_port *uap,
2552 struct resource *mmiobase, int index)
2556 base = devm_ioremap_resource(dev, mmiobase);
2558 return PTR_ERR(base);
2560 index = pl011_probe_dt_alias(index, dev);
2563 uap->port.dev = dev;
2564 uap->port.mapbase = mmiobase->start;
2565 uap->port.membase = base;
2566 uap->port.fifosize = uap->fifosize;
2567 uap->port.flags = UPF_BOOT_AUTOCONF;
2568 uap->port.line = index;
2569 spin_lock_init(&uap->port.lock);
2571 amba_ports[index] = uap;
2576 static int pl011_register_port(struct uart_amba_port *uap)
2580 /* Ensure interrupts from this UART are masked and cleared */
2581 pl011_write(0, uap, REG_IMSC);
2582 pl011_write(0xffff, uap, REG_ICR);
2584 if (!amba_reg.state) {
2585 ret = uart_register_driver(&amba_reg);
2587 dev_err(uap->port.dev,
2588 "Failed to register AMBA-PL011 driver\n");
2589 for (i = 0; i < ARRAY_SIZE(amba_ports); i++)
2590 if (amba_ports[i] == uap)
2591 amba_ports[i] = NULL;
2596 ret = uart_add_one_port(&amba_reg, &uap->port);
2598 pl011_unregister_port(uap);
2603 static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
2605 struct uart_amba_port *uap;
2606 struct vendor_data *vendor = id->data;
2609 portnr = pl011_find_free_port();
2613 uap = devm_kzalloc(&dev->dev, sizeof(struct uart_amba_port),
2618 uap->clk = devm_clk_get(&dev->dev, NULL);
2619 if (IS_ERR(uap->clk))
2620 return PTR_ERR(uap->clk);
2622 uap->reg_offset = vendor->reg_offset;
2623 uap->vendor = vendor;
2624 uap->fifosize = vendor->get_fifosize(dev);
2625 uap->port.iotype = vendor->access_32b ? UPIO_MEM32 : UPIO_MEM;
2626 uap->port.irq = dev->irq[0];
2627 uap->port.ops = &amba_pl011_pops;
2629 snprintf(uap->type, sizeof(uap->type), "PL011 rev%u", amba_rev(dev));
2631 ret = pl011_setup_port(&dev->dev, uap, &dev->res, portnr);
2635 amba_set_drvdata(dev, uap);
2637 return pl011_register_port(uap);
2640 static int pl011_remove(struct amba_device *dev)
2642 struct uart_amba_port *uap = amba_get_drvdata(dev);
2644 uart_remove_one_port(&amba_reg, &uap->port);
2645 pl011_unregister_port(uap);
2649 #ifdef CONFIG_PM_SLEEP
2650 static int pl011_suspend(struct device *dev)
2652 struct uart_amba_port *uap = dev_get_drvdata(dev);
2657 return uart_suspend_port(&amba_reg, &uap->port);
2660 static int pl011_resume(struct device *dev)
2662 struct uart_amba_port *uap = dev_get_drvdata(dev);
2667 return uart_resume_port(&amba_reg, &uap->port);
2671 static SIMPLE_DEV_PM_OPS(pl011_dev_pm_ops, pl011_suspend, pl011_resume);
2673 static int sbsa_uart_probe(struct platform_device *pdev)
2675 struct uart_amba_port *uap;
2681 * Check the mandatory baud rate parameter in the DT node early
2682 * so that we can easily exit with the error.
2684 if (pdev->dev.of_node) {
2685 struct device_node *np = pdev->dev.of_node;
2687 ret = of_property_read_u32(np, "current-speed", &baudrate);
2694 portnr = pl011_find_free_port();
2698 uap = devm_kzalloc(&pdev->dev, sizeof(struct uart_amba_port),
2703 ret = platform_get_irq(pdev, 0);
2705 if (ret != -EPROBE_DEFER)
2706 dev_err(&pdev->dev, "cannot obtain irq\n");
2709 uap->port.irq = ret;
2711 #ifdef CONFIG_ACPI_SPCR_TABLE
2712 if (qdf2400_e44_present) {
2713 dev_info(&pdev->dev, "working around QDF2400 SoC erratum 44\n");
2714 uap->vendor = &vendor_qdt_qdf2400_e44;
2717 uap->vendor = &vendor_sbsa;
2719 uap->reg_offset = uap->vendor->reg_offset;
2721 uap->port.iotype = uap->vendor->access_32b ? UPIO_MEM32 : UPIO_MEM;
2722 uap->port.ops = &sbsa_uart_pops;
2723 uap->fixed_baud = baudrate;
2725 snprintf(uap->type, sizeof(uap->type), "SBSA");
2727 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2729 ret = pl011_setup_port(&pdev->dev, uap, r, portnr);
2733 platform_set_drvdata(pdev, uap);
2735 return pl011_register_port(uap);
2738 static int sbsa_uart_remove(struct platform_device *pdev)
2740 struct uart_amba_port *uap = platform_get_drvdata(pdev);
2742 uart_remove_one_port(&amba_reg, &uap->port);
2743 pl011_unregister_port(uap);
2747 static const struct of_device_id sbsa_uart_of_match[] = {
2748 { .compatible = "arm,sbsa-uart", },
2751 MODULE_DEVICE_TABLE(of, sbsa_uart_of_match);
2753 static const struct acpi_device_id sbsa_uart_acpi_match[] = {
2758 MODULE_DEVICE_TABLE(acpi, sbsa_uart_acpi_match);
2760 static struct platform_driver arm_sbsa_uart_platform_driver = {
2761 .probe = sbsa_uart_probe,
2762 .remove = sbsa_uart_remove,
2764 .name = "sbsa-uart",
2765 .of_match_table = of_match_ptr(sbsa_uart_of_match),
2766 .acpi_match_table = ACPI_PTR(sbsa_uart_acpi_match),
2767 .suppress_bind_attrs = IS_BUILTIN(CONFIG_SERIAL_AMBA_PL011),
2771 static const struct amba_id pl011_ids[] = {
2775 .data = &vendor_arm,
2783 .id = AMBA_LINUX_ID(0x00, 0x1, 0xffe),
2785 .data = &vendor_zte,
2790 MODULE_DEVICE_TABLE(amba, pl011_ids);
2792 static struct amba_driver pl011_driver = {
2794 .name = "uart-pl011",
2795 .pm = &pl011_dev_pm_ops,
2796 .suppress_bind_attrs = IS_BUILTIN(CONFIG_SERIAL_AMBA_PL011),
2798 .id_table = pl011_ids,
2799 .probe = pl011_probe,
2800 .remove = pl011_remove,
2803 static int __init pl011_init(void)
2805 printk(KERN_INFO "Serial: AMBA PL011 UART driver\n");
2807 if (platform_driver_register(&arm_sbsa_uart_platform_driver))
2808 pr_warn("could not register SBSA UART platform driver\n");
2809 return amba_driver_register(&pl011_driver);
2812 static void __exit pl011_exit(void)
2814 platform_driver_unregister(&arm_sbsa_uart_platform_driver);
2815 amba_driver_unregister(&pl011_driver);
2819 * While this can be a module, if builtin it's most likely the console
2820 * So let's leave module_exit but move module_init to an earlier place
2822 arch_initcall(pl011_init);
2823 module_exit(pl011_exit);
2825 MODULE_AUTHOR("ARM Ltd/Deep Blue Solutions Ltd");
2826 MODULE_DESCRIPTION("ARM AMBA serial port driver");
2827 MODULE_LICENSE("GPL");