1 // SPDX-License-Identifier: GPL-2.0+
3 * Driver for AMBA serial ports
5 * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
7 * Copyright 1999 ARM Limited
8 * Copyright (C) 2000 Deep Blue Solutions Ltd.
9 * Copyright (C) 2010 ST-Ericsson SA
11 * This is a generic driver for ARM AMBA-type serial ports. They
12 * have a lot of 16550-like features, but are not register compatible.
13 * Note that although they do have CTS, DCD and DSR inputs, they do
14 * not have an RI input, nor do they have DTR or RTS outputs. If
15 * required, these have to be supplied via some other means (eg, GPIO)
16 * and hooked into this driver.
19 #include <linux/module.h>
20 #include <linux/ioport.h>
21 #include <linux/init.h>
22 #include <linux/console.h>
23 #include <linux/sysrq.h>
24 #include <linux/device.h>
25 #include <linux/tty.h>
26 #include <linux/tty_flip.h>
27 #include <linux/serial_core.h>
28 #include <linux/serial.h>
29 #include <linux/amba/bus.h>
30 #include <linux/amba/serial.h>
31 #include <linux/clk.h>
32 #include <linux/slab.h>
33 #include <linux/dmaengine.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/scatterlist.h>
36 #include <linux/delay.h>
37 #include <linux/types.h>
39 #include <linux/of_device.h>
40 #include <linux/pinctrl/consumer.h>
41 #include <linux/sizes.h>
43 #include <linux/acpi.h>
47 #define SERIAL_AMBA_MAJOR 204
48 #define SERIAL_AMBA_MINOR 64
49 #define SERIAL_AMBA_NR UART_NR
51 #define AMBA_ISR_PASS_LIMIT 256
53 #define UART_DR_ERROR (UART011_DR_OE|UART011_DR_BE|UART011_DR_PE|UART011_DR_FE)
54 #define UART_DUMMY_DR_RX (1 << 16)
82 /* The size of the array - must be last */
86 static u16 pl011_std_offsets[REG_ARRAY_SIZE] = {
87 [REG_DR] = UART01x_DR,
88 [REG_FR] = UART01x_FR,
89 [REG_LCRH_RX] = UART011_LCRH,
90 [REG_LCRH_TX] = UART011_LCRH,
91 [REG_IBRD] = UART011_IBRD,
92 [REG_FBRD] = UART011_FBRD,
93 [REG_CR] = UART011_CR,
94 [REG_IFLS] = UART011_IFLS,
95 [REG_IMSC] = UART011_IMSC,
96 [REG_RIS] = UART011_RIS,
97 [REG_MIS] = UART011_MIS,
98 [REG_ICR] = UART011_ICR,
99 [REG_DMACR] = UART011_DMACR,
102 /* There is by now at least one vendor with differing details, so handle it */
104 const u16 *reg_offset;
106 unsigned int fr_busy;
114 bool cts_event_workaround;
118 unsigned int (*get_fifosize)(struct amba_device *dev);
121 static unsigned int get_fifosize_arm(struct amba_device *dev)
123 return amba_rev(dev) < 3 ? 16 : 32;
126 static struct vendor_data vendor_arm = {
127 .reg_offset = pl011_std_offsets,
128 .ifls = UART011_IFLS_RX4_8|UART011_IFLS_TX4_8,
129 .fr_busy = UART01x_FR_BUSY,
130 .fr_dsr = UART01x_FR_DSR,
131 .fr_cts = UART01x_FR_CTS,
132 .fr_ri = UART011_FR_RI,
133 .oversampling = false,
134 .dma_threshold = false,
135 .cts_event_workaround = false,
136 .always_enabled = false,
137 .fixed_options = false,
138 .get_fifosize = get_fifosize_arm,
141 static const struct vendor_data vendor_sbsa = {
142 .reg_offset = pl011_std_offsets,
143 .fr_busy = UART01x_FR_BUSY,
144 .fr_dsr = UART01x_FR_DSR,
145 .fr_cts = UART01x_FR_CTS,
146 .fr_ri = UART011_FR_RI,
148 .oversampling = false,
149 .dma_threshold = false,
150 .cts_event_workaround = false,
151 .always_enabled = true,
152 .fixed_options = true,
155 #ifdef CONFIG_ACPI_SPCR_TABLE
156 static const struct vendor_data vendor_qdt_qdf2400_e44 = {
157 .reg_offset = pl011_std_offsets,
158 .fr_busy = UART011_FR_TXFE,
159 .fr_dsr = UART01x_FR_DSR,
160 .fr_cts = UART01x_FR_CTS,
161 .fr_ri = UART011_FR_RI,
162 .inv_fr = UART011_FR_TXFE,
164 .oversampling = false,
165 .dma_threshold = false,
166 .cts_event_workaround = false,
167 .always_enabled = true,
168 .fixed_options = true,
172 static u16 pl011_st_offsets[REG_ARRAY_SIZE] = {
173 [REG_DR] = UART01x_DR,
174 [REG_ST_DMAWM] = ST_UART011_DMAWM,
175 [REG_ST_TIMEOUT] = ST_UART011_TIMEOUT,
176 [REG_FR] = UART01x_FR,
177 [REG_LCRH_RX] = ST_UART011_LCRH_RX,
178 [REG_LCRH_TX] = ST_UART011_LCRH_TX,
179 [REG_IBRD] = UART011_IBRD,
180 [REG_FBRD] = UART011_FBRD,
181 [REG_CR] = UART011_CR,
182 [REG_IFLS] = UART011_IFLS,
183 [REG_IMSC] = UART011_IMSC,
184 [REG_RIS] = UART011_RIS,
185 [REG_MIS] = UART011_MIS,
186 [REG_ICR] = UART011_ICR,
187 [REG_DMACR] = UART011_DMACR,
188 [REG_ST_XFCR] = ST_UART011_XFCR,
189 [REG_ST_XON1] = ST_UART011_XON1,
190 [REG_ST_XON2] = ST_UART011_XON2,
191 [REG_ST_XOFF1] = ST_UART011_XOFF1,
192 [REG_ST_XOFF2] = ST_UART011_XOFF2,
193 [REG_ST_ITCR] = ST_UART011_ITCR,
194 [REG_ST_ITIP] = ST_UART011_ITIP,
195 [REG_ST_ABCR] = ST_UART011_ABCR,
196 [REG_ST_ABIMSC] = ST_UART011_ABIMSC,
199 static unsigned int get_fifosize_st(struct amba_device *dev)
204 static struct vendor_data vendor_st = {
205 .reg_offset = pl011_st_offsets,
206 .ifls = UART011_IFLS_RX_HALF|UART011_IFLS_TX_HALF,
207 .fr_busy = UART01x_FR_BUSY,
208 .fr_dsr = UART01x_FR_DSR,
209 .fr_cts = UART01x_FR_CTS,
210 .fr_ri = UART011_FR_RI,
211 .oversampling = true,
212 .dma_threshold = true,
213 .cts_event_workaround = true,
214 .always_enabled = false,
215 .fixed_options = false,
216 .get_fifosize = get_fifosize_st,
219 /* Deals with DMA transactions */
222 struct scatterlist sg;
226 struct pl011_dmarx_data {
227 struct dma_chan *chan;
228 struct completion complete;
230 struct pl011_sgbuf sgbuf_a;
231 struct pl011_sgbuf sgbuf_b;
234 struct timer_list timer;
235 unsigned int last_residue;
236 unsigned long last_jiffies;
238 unsigned int poll_rate;
239 unsigned int poll_timeout;
242 struct pl011_dmatx_data {
243 struct dma_chan *chan;
244 struct scatterlist sg;
250 * We wrap our port structure around the generic uart_port.
252 struct uart_amba_port {
253 struct uart_port port;
254 const u16 *reg_offset;
256 const struct vendor_data *vendor;
257 unsigned int dmacr; /* dma control reg */
258 unsigned int im; /* interrupt mask */
259 unsigned int old_status;
260 unsigned int fifosize; /* vendor-specific */
261 unsigned int fixed_baud; /* vendor-set fixed baud rate */
263 bool rs485_tx_started;
264 unsigned int rs485_tx_drain_interval; /* usecs */
265 #ifdef CONFIG_DMA_ENGINE
269 struct pl011_dmarx_data dmarx;
270 struct pl011_dmatx_data dmatx;
275 static unsigned int pl011_tx_empty(struct uart_port *port);
277 static unsigned int pl011_reg_to_offset(const struct uart_amba_port *uap,
280 return uap->reg_offset[reg];
283 static unsigned int pl011_read(const struct uart_amba_port *uap,
286 void __iomem *addr = uap->port.membase + pl011_reg_to_offset(uap, reg);
288 return (uap->port.iotype == UPIO_MEM32) ?
289 readl_relaxed(addr) : readw_relaxed(addr);
292 static void pl011_write(unsigned int val, const struct uart_amba_port *uap,
295 void __iomem *addr = uap->port.membase + pl011_reg_to_offset(uap, reg);
297 if (uap->port.iotype == UPIO_MEM32)
298 writel_relaxed(val, addr);
300 writew_relaxed(val, addr);
304 * Reads up to 256 characters from the FIFO or until it's empty and
305 * inserts them into the TTY layer. Returns the number of characters
306 * read from the FIFO.
308 static int pl011_fifo_to_tty(struct uart_amba_port *uap)
310 unsigned int ch, flag, fifotaken;
314 for (fifotaken = 0; fifotaken != 256; fifotaken++) {
315 status = pl011_read(uap, REG_FR);
316 if (status & UART01x_FR_RXFE)
319 /* Take chars from the FIFO and update status */
320 ch = pl011_read(uap, REG_DR) | UART_DUMMY_DR_RX;
322 uap->port.icount.rx++;
324 if (unlikely(ch & UART_DR_ERROR)) {
325 if (ch & UART011_DR_BE) {
326 ch &= ~(UART011_DR_FE | UART011_DR_PE);
327 uap->port.icount.brk++;
328 if (uart_handle_break(&uap->port))
330 } else if (ch & UART011_DR_PE)
331 uap->port.icount.parity++;
332 else if (ch & UART011_DR_FE)
333 uap->port.icount.frame++;
334 if (ch & UART011_DR_OE)
335 uap->port.icount.overrun++;
337 ch &= uap->port.read_status_mask;
339 if (ch & UART011_DR_BE)
341 else if (ch & UART011_DR_PE)
343 else if (ch & UART011_DR_FE)
347 spin_unlock(&uap->port.lock);
348 sysrq = uart_handle_sysrq_char(&uap->port, ch & 255);
349 spin_lock(&uap->port.lock);
352 uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag);
360 * All the DMA operation mode stuff goes inside this ifdef.
361 * This assumes that you have a generic DMA device interface,
362 * no custom DMA interfaces are supported.
364 #ifdef CONFIG_DMA_ENGINE
366 #define PL011_DMA_BUFFER_SIZE PAGE_SIZE
368 static int pl011_sgbuf_init(struct dma_chan *chan, struct pl011_sgbuf *sg,
369 enum dma_data_direction dir)
373 sg->buf = dma_alloc_coherent(chan->device->dev,
374 PL011_DMA_BUFFER_SIZE, &dma_addr, GFP_KERNEL);
378 sg_init_table(&sg->sg, 1);
379 sg_set_page(&sg->sg, phys_to_page(dma_addr),
380 PL011_DMA_BUFFER_SIZE, offset_in_page(dma_addr));
381 sg_dma_address(&sg->sg) = dma_addr;
382 sg_dma_len(&sg->sg) = PL011_DMA_BUFFER_SIZE;
387 static void pl011_sgbuf_free(struct dma_chan *chan, struct pl011_sgbuf *sg,
388 enum dma_data_direction dir)
391 dma_free_coherent(chan->device->dev,
392 PL011_DMA_BUFFER_SIZE, sg->buf,
393 sg_dma_address(&sg->sg));
397 static void pl011_dma_probe(struct uart_amba_port *uap)
399 /* DMA is the sole user of the platform data right now */
400 struct amba_pl011_data *plat = dev_get_platdata(uap->port.dev);
401 struct device *dev = uap->port.dev;
402 struct dma_slave_config tx_conf = {
403 .dst_addr = uap->port.mapbase +
404 pl011_reg_to_offset(uap, REG_DR),
405 .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
406 .direction = DMA_MEM_TO_DEV,
407 .dst_maxburst = uap->fifosize >> 1,
410 struct dma_chan *chan;
413 uap->dma_probed = true;
414 chan = dma_request_chan(dev, "tx");
416 if (PTR_ERR(chan) == -EPROBE_DEFER) {
417 uap->dma_probed = false;
421 /* We need platform data */
422 if (!plat || !plat->dma_filter) {
423 dev_info(uap->port.dev, "no DMA platform data\n");
427 /* Try to acquire a generic DMA engine slave TX channel */
429 dma_cap_set(DMA_SLAVE, mask);
431 chan = dma_request_channel(mask, plat->dma_filter,
434 dev_err(uap->port.dev, "no TX DMA channel!\n");
439 dmaengine_slave_config(chan, &tx_conf);
440 uap->dmatx.chan = chan;
442 dev_info(uap->port.dev, "DMA channel TX %s\n",
443 dma_chan_name(uap->dmatx.chan));
445 /* Optionally make use of an RX channel as well */
446 chan = dma_request_slave_channel(dev, "rx");
448 if (!chan && plat && plat->dma_rx_param) {
449 chan = dma_request_channel(mask, plat->dma_filter, plat->dma_rx_param);
452 dev_err(uap->port.dev, "no RX DMA channel!\n");
458 struct dma_slave_config rx_conf = {
459 .src_addr = uap->port.mapbase +
460 pl011_reg_to_offset(uap, REG_DR),
461 .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
462 .direction = DMA_DEV_TO_MEM,
463 .src_maxburst = uap->fifosize >> 2,
466 struct dma_slave_caps caps;
469 * Some DMA controllers provide information on their capabilities.
470 * If the controller does, check for suitable residue processing
471 * otherwise assime all is well.
473 if (0 == dma_get_slave_caps(chan, &caps)) {
474 if (caps.residue_granularity ==
475 DMA_RESIDUE_GRANULARITY_DESCRIPTOR) {
476 dma_release_channel(chan);
477 dev_info(uap->port.dev,
478 "RX DMA disabled - no residue processing\n");
482 dmaengine_slave_config(chan, &rx_conf);
483 uap->dmarx.chan = chan;
485 uap->dmarx.auto_poll_rate = false;
486 if (plat && plat->dma_rx_poll_enable) {
487 /* Set poll rate if specified. */
488 if (plat->dma_rx_poll_rate) {
489 uap->dmarx.auto_poll_rate = false;
490 uap->dmarx.poll_rate = plat->dma_rx_poll_rate;
493 * 100 ms defaults to poll rate if not
494 * specified. This will be adjusted with
495 * the baud rate at set_termios.
497 uap->dmarx.auto_poll_rate = true;
498 uap->dmarx.poll_rate = 100;
500 /* 3 secs defaults poll_timeout if not specified. */
501 if (plat->dma_rx_poll_timeout)
502 uap->dmarx.poll_timeout =
503 plat->dma_rx_poll_timeout;
505 uap->dmarx.poll_timeout = 3000;
506 } else if (!plat && dev->of_node) {
507 uap->dmarx.auto_poll_rate = of_property_read_bool(
508 dev->of_node, "auto-poll");
509 if (uap->dmarx.auto_poll_rate) {
512 if (0 == of_property_read_u32(dev->of_node,
514 uap->dmarx.poll_rate = x;
516 uap->dmarx.poll_rate = 100;
517 if (0 == of_property_read_u32(dev->of_node,
518 "poll-timeout-ms", &x))
519 uap->dmarx.poll_timeout = x;
521 uap->dmarx.poll_timeout = 3000;
524 dev_info(uap->port.dev, "DMA channel RX %s\n",
525 dma_chan_name(uap->dmarx.chan));
529 static void pl011_dma_remove(struct uart_amba_port *uap)
532 dma_release_channel(uap->dmatx.chan);
534 dma_release_channel(uap->dmarx.chan);
537 /* Forward declare these for the refill routine */
538 static int pl011_dma_tx_refill(struct uart_amba_port *uap);
539 static void pl011_start_tx_pio(struct uart_amba_port *uap);
542 * The current DMA TX buffer has been sent.
543 * Try to queue up another DMA buffer.
545 static void pl011_dma_tx_callback(void *data)
547 struct uart_amba_port *uap = data;
548 struct pl011_dmatx_data *dmatx = &uap->dmatx;
552 spin_lock_irqsave(&uap->port.lock, flags);
553 if (uap->dmatx.queued)
554 dma_unmap_sg(dmatx->chan->device->dev, &dmatx->sg, 1,
558 uap->dmacr = dmacr & ~UART011_TXDMAE;
559 pl011_write(uap->dmacr, uap, REG_DMACR);
562 * If TX DMA was disabled, it means that we've stopped the DMA for
563 * some reason (eg, XOFF received, or we want to send an X-char.)
565 * Note: we need to be careful here of a potential race between DMA
566 * and the rest of the driver - if the driver disables TX DMA while
567 * a TX buffer completing, we must update the tx queued status to
568 * get further refills (hence we check dmacr).
570 if (!(dmacr & UART011_TXDMAE) || uart_tx_stopped(&uap->port) ||
571 uart_circ_empty(&uap->port.state->xmit)) {
572 uap->dmatx.queued = false;
573 spin_unlock_irqrestore(&uap->port.lock, flags);
577 if (pl011_dma_tx_refill(uap) <= 0)
579 * We didn't queue a DMA buffer for some reason, but we
580 * have data pending to be sent. Re-enable the TX IRQ.
582 pl011_start_tx_pio(uap);
584 spin_unlock_irqrestore(&uap->port.lock, flags);
588 * Try to refill the TX DMA buffer.
589 * Locking: called with port lock held and IRQs disabled.
591 * 1 if we queued up a TX DMA buffer.
592 * 0 if we didn't want to handle this by DMA
595 static int pl011_dma_tx_refill(struct uart_amba_port *uap)
597 struct pl011_dmatx_data *dmatx = &uap->dmatx;
598 struct dma_chan *chan = dmatx->chan;
599 struct dma_device *dma_dev = chan->device;
600 struct dma_async_tx_descriptor *desc;
601 struct circ_buf *xmit = &uap->port.state->xmit;
605 * Try to avoid the overhead involved in using DMA if the
606 * transaction fits in the first half of the FIFO, by using
607 * the standard interrupt handling. This ensures that we
608 * issue a uart_write_wakeup() at the appropriate time.
610 count = uart_circ_chars_pending(xmit);
611 if (count < (uap->fifosize >> 1)) {
612 uap->dmatx.queued = false;
617 * Bodge: don't send the last character by DMA, as this
618 * will prevent XON from notifying us to restart DMA.
622 /* Else proceed to copy the TX chars to the DMA buffer and fire DMA */
623 if (count > PL011_DMA_BUFFER_SIZE)
624 count = PL011_DMA_BUFFER_SIZE;
626 if (xmit->tail < xmit->head)
627 memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], count);
629 size_t first = UART_XMIT_SIZE - xmit->tail;
634 second = count - first;
636 memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], first);
638 memcpy(&dmatx->buf[first], &xmit->buf[0], second);
641 dmatx->sg.length = count;
643 if (dma_map_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE) != 1) {
644 uap->dmatx.queued = false;
645 dev_dbg(uap->port.dev, "unable to map TX DMA\n");
649 desc = dmaengine_prep_slave_sg(chan, &dmatx->sg, 1, DMA_MEM_TO_DEV,
650 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
652 dma_unmap_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE);
653 uap->dmatx.queued = false;
655 * If DMA cannot be used right now, we complete this
656 * transaction via IRQ and let the TTY layer retry.
658 dev_dbg(uap->port.dev, "TX DMA busy\n");
662 /* Some data to go along to the callback */
663 desc->callback = pl011_dma_tx_callback;
664 desc->callback_param = uap;
666 /* All errors should happen at prepare time */
667 dmaengine_submit(desc);
669 /* Fire the DMA transaction */
670 dma_dev->device_issue_pending(chan);
672 uap->dmacr |= UART011_TXDMAE;
673 pl011_write(uap->dmacr, uap, REG_DMACR);
674 uap->dmatx.queued = true;
677 * Now we know that DMA will fire, so advance the ring buffer
678 * with the stuff we just dispatched.
680 xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
681 uap->port.icount.tx += count;
683 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
684 uart_write_wakeup(&uap->port);
690 * We received a transmit interrupt without a pending X-char but with
691 * pending characters.
692 * Locking: called with port lock held and IRQs disabled.
694 * false if we want to use PIO to transmit
695 * true if we queued a DMA buffer
697 static bool pl011_dma_tx_irq(struct uart_amba_port *uap)
699 if (!uap->using_tx_dma)
703 * If we already have a TX buffer queued, but received a
704 * TX interrupt, it will be because we've just sent an X-char.
705 * Ensure the TX DMA is enabled and the TX IRQ is disabled.
707 if (uap->dmatx.queued) {
708 uap->dmacr |= UART011_TXDMAE;
709 pl011_write(uap->dmacr, uap, REG_DMACR);
710 uap->im &= ~UART011_TXIM;
711 pl011_write(uap->im, uap, REG_IMSC);
716 * We don't have a TX buffer queued, so try to queue one.
717 * If we successfully queued a buffer, mask the TX IRQ.
719 if (pl011_dma_tx_refill(uap) > 0) {
720 uap->im &= ~UART011_TXIM;
721 pl011_write(uap->im, uap, REG_IMSC);
728 * Stop the DMA transmit (eg, due to received XOFF).
729 * Locking: called with port lock held and IRQs disabled.
731 static inline void pl011_dma_tx_stop(struct uart_amba_port *uap)
733 if (uap->dmatx.queued) {
734 uap->dmacr &= ~UART011_TXDMAE;
735 pl011_write(uap->dmacr, uap, REG_DMACR);
740 * Try to start a DMA transmit, or in the case of an XON/OFF
741 * character queued for send, try to get that character out ASAP.
742 * Locking: called with port lock held and IRQs disabled.
744 * false if we want the TX IRQ to be enabled
745 * true if we have a buffer queued
747 static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
751 if (!uap->using_tx_dma)
754 if (!uap->port.x_char) {
755 /* no X-char, try to push chars out in DMA mode */
758 if (!uap->dmatx.queued) {
759 if (pl011_dma_tx_refill(uap) > 0) {
760 uap->im &= ~UART011_TXIM;
761 pl011_write(uap->im, uap, REG_IMSC);
764 } else if (!(uap->dmacr & UART011_TXDMAE)) {
765 uap->dmacr |= UART011_TXDMAE;
766 pl011_write(uap->dmacr, uap, REG_DMACR);
772 * We have an X-char to send. Disable DMA to prevent it loading
773 * the TX fifo, and then see if we can stuff it into the FIFO.
776 uap->dmacr &= ~UART011_TXDMAE;
777 pl011_write(uap->dmacr, uap, REG_DMACR);
779 if (pl011_read(uap, REG_FR) & UART01x_FR_TXFF) {
781 * No space in the FIFO, so enable the transmit interrupt
782 * so we know when there is space. Note that once we've
783 * loaded the character, we should just re-enable DMA.
788 pl011_write(uap->port.x_char, uap, REG_DR);
789 uap->port.icount.tx++;
790 uap->port.x_char = 0;
792 /* Success - restore the DMA state */
794 pl011_write(dmacr, uap, REG_DMACR);
800 * Flush the transmit buffer.
801 * Locking: called with port lock held and IRQs disabled.
803 static void pl011_dma_flush_buffer(struct uart_port *port)
804 __releases(&uap->port.lock)
805 __acquires(&uap->port.lock)
807 struct uart_amba_port *uap =
808 container_of(port, struct uart_amba_port, port);
810 if (!uap->using_tx_dma)
813 dmaengine_terminate_async(uap->dmatx.chan);
815 if (uap->dmatx.queued) {
816 dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
818 uap->dmatx.queued = false;
819 uap->dmacr &= ~UART011_TXDMAE;
820 pl011_write(uap->dmacr, uap, REG_DMACR);
824 static void pl011_dma_rx_callback(void *data);
826 static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
828 struct dma_chan *rxchan = uap->dmarx.chan;
829 struct pl011_dmarx_data *dmarx = &uap->dmarx;
830 struct dma_async_tx_descriptor *desc;
831 struct pl011_sgbuf *sgbuf;
836 /* Start the RX DMA job */
837 sgbuf = uap->dmarx.use_buf_b ?
838 &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
839 desc = dmaengine_prep_slave_sg(rxchan, &sgbuf->sg, 1,
841 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
843 * If the DMA engine is busy and cannot prepare a
844 * channel, no big deal, the driver will fall back
845 * to interrupt mode as a result of this error code.
848 uap->dmarx.running = false;
849 dmaengine_terminate_all(rxchan);
853 /* Some data to go along to the callback */
854 desc->callback = pl011_dma_rx_callback;
855 desc->callback_param = uap;
856 dmarx->cookie = dmaengine_submit(desc);
857 dma_async_issue_pending(rxchan);
859 uap->dmacr |= UART011_RXDMAE;
860 pl011_write(uap->dmacr, uap, REG_DMACR);
861 uap->dmarx.running = true;
863 uap->im &= ~UART011_RXIM;
864 pl011_write(uap->im, uap, REG_IMSC);
870 * This is called when either the DMA job is complete, or
871 * the FIFO timeout interrupt occurred. This must be called
872 * with the port spinlock uap->port.lock held.
874 static void pl011_dma_rx_chars(struct uart_amba_port *uap,
875 u32 pending, bool use_buf_b,
878 struct tty_port *port = &uap->port.state->port;
879 struct pl011_sgbuf *sgbuf = use_buf_b ?
880 &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
882 u32 fifotaken = 0; /* only used for vdbg() */
884 struct pl011_dmarx_data *dmarx = &uap->dmarx;
887 if (uap->dmarx.poll_rate) {
888 /* The data can be taken by polling */
889 dmataken = sgbuf->sg.length - dmarx->last_residue;
890 /* Recalculate the pending size */
891 if (pending >= dmataken)
895 /* Pick the remain data from the DMA */
899 * First take all chars in the DMA pipe, then look in the FIFO.
900 * Note that tty_insert_flip_buf() tries to take as many chars
903 dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken,
906 uap->port.icount.rx += dma_count;
907 if (dma_count < pending)
908 dev_warn(uap->port.dev,
909 "couldn't insert all characters (TTY is full?)\n");
912 /* Reset the last_residue for Rx DMA poll */
913 if (uap->dmarx.poll_rate)
914 dmarx->last_residue = sgbuf->sg.length;
917 * Only continue with trying to read the FIFO if all DMA chars have
920 if (dma_count == pending && readfifo) {
921 /* Clear any error flags */
922 pl011_write(UART011_OEIS | UART011_BEIS | UART011_PEIS |
923 UART011_FEIS, uap, REG_ICR);
926 * If we read all the DMA'd characters, and we had an
927 * incomplete buffer, that could be due to an rx error, or
928 * maybe we just timed out. Read any pending chars and check
931 * Error conditions will only occur in the FIFO, these will
932 * trigger an immediate interrupt and stop the DMA job, so we
933 * will always find the error in the FIFO, never in the DMA
936 fifotaken = pl011_fifo_to_tty(uap);
939 dev_vdbg(uap->port.dev,
940 "Took %d chars from DMA buffer and %d chars from the FIFO\n",
941 dma_count, fifotaken);
942 tty_flip_buffer_push(port);
945 static void pl011_dma_rx_irq(struct uart_amba_port *uap)
947 struct pl011_dmarx_data *dmarx = &uap->dmarx;
948 struct dma_chan *rxchan = dmarx->chan;
949 struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
950 &dmarx->sgbuf_b : &dmarx->sgbuf_a;
952 struct dma_tx_state state;
953 enum dma_status dmastat;
956 * Pause the transfer so we can trust the current counter,
957 * do this before we pause the PL011 block, else we may
960 if (dmaengine_pause(rxchan))
961 dev_err(uap->port.dev, "unable to pause DMA transfer\n");
962 dmastat = rxchan->device->device_tx_status(rxchan,
963 dmarx->cookie, &state);
964 if (dmastat != DMA_PAUSED)
965 dev_err(uap->port.dev, "unable to pause DMA transfer\n");
967 /* Disable RX DMA - incoming data will wait in the FIFO */
968 uap->dmacr &= ~UART011_RXDMAE;
969 pl011_write(uap->dmacr, uap, REG_DMACR);
970 uap->dmarx.running = false;
972 pending = sgbuf->sg.length - state.residue;
973 BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
974 /* Then we terminate the transfer - we now know our residue */
975 dmaengine_terminate_all(rxchan);
978 * This will take the chars we have so far and insert
979 * into the framework.
981 pl011_dma_rx_chars(uap, pending, dmarx->use_buf_b, true);
983 /* Switch buffer & re-trigger DMA job */
984 dmarx->use_buf_b = !dmarx->use_buf_b;
985 if (pl011_dma_rx_trigger_dma(uap)) {
986 dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
987 "fall back to interrupt mode\n");
988 uap->im |= UART011_RXIM;
989 pl011_write(uap->im, uap, REG_IMSC);
993 static void pl011_dma_rx_callback(void *data)
995 struct uart_amba_port *uap = data;
996 struct pl011_dmarx_data *dmarx = &uap->dmarx;
997 struct dma_chan *rxchan = dmarx->chan;
998 bool lastbuf = dmarx->use_buf_b;
999 struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
1000 &dmarx->sgbuf_b : &dmarx->sgbuf_a;
1002 struct dma_tx_state state;
1006 * This completion interrupt occurs typically when the
1007 * RX buffer is totally stuffed but no timeout has yet
1008 * occurred. When that happens, we just want the RX
1009 * routine to flush out the secondary DMA buffer while
1010 * we immediately trigger the next DMA job.
1012 spin_lock_irq(&uap->port.lock);
1014 * Rx data can be taken by the UART interrupts during
1015 * the DMA irq handler. So we check the residue here.
1017 rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
1018 pending = sgbuf->sg.length - state.residue;
1019 BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
1020 /* Then we terminate the transfer - we now know our residue */
1021 dmaengine_terminate_all(rxchan);
1023 uap->dmarx.running = false;
1024 dmarx->use_buf_b = !lastbuf;
1025 ret = pl011_dma_rx_trigger_dma(uap);
1027 pl011_dma_rx_chars(uap, pending, lastbuf, false);
1028 spin_unlock_irq(&uap->port.lock);
1030 * Do this check after we picked the DMA chars so we don't
1031 * get some IRQ immediately from RX.
1034 dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
1035 "fall back to interrupt mode\n");
1036 uap->im |= UART011_RXIM;
1037 pl011_write(uap->im, uap, REG_IMSC);
1042 * Stop accepting received characters, when we're shutting down or
1043 * suspending this port.
1044 * Locking: called with port lock held and IRQs disabled.
1046 static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
1048 /* FIXME. Just disable the DMA enable */
1049 uap->dmacr &= ~UART011_RXDMAE;
1050 pl011_write(uap->dmacr, uap, REG_DMACR);
1054 * Timer handler for Rx DMA polling.
1055 * Every polling, It checks the residue in the dma buffer and transfer
1056 * data to the tty. Also, last_residue is updated for the next polling.
1058 static void pl011_dma_rx_poll(struct timer_list *t)
1060 struct uart_amba_port *uap = from_timer(uap, t, dmarx.timer);
1061 struct tty_port *port = &uap->port.state->port;
1062 struct pl011_dmarx_data *dmarx = &uap->dmarx;
1063 struct dma_chan *rxchan = uap->dmarx.chan;
1064 unsigned long flags;
1065 unsigned int dmataken = 0;
1066 unsigned int size = 0;
1067 struct pl011_sgbuf *sgbuf;
1069 struct dma_tx_state state;
1071 sgbuf = dmarx->use_buf_b ? &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
1072 rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
1073 if (likely(state.residue < dmarx->last_residue)) {
1074 dmataken = sgbuf->sg.length - dmarx->last_residue;
1075 size = dmarx->last_residue - state.residue;
1076 dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken,
1078 if (dma_count == size)
1079 dmarx->last_residue = state.residue;
1080 dmarx->last_jiffies = jiffies;
1082 tty_flip_buffer_push(port);
1085 * If no data is received in poll_timeout, the driver will fall back
1086 * to interrupt mode. We will retrigger DMA at the first interrupt.
1088 if (jiffies_to_msecs(jiffies - dmarx->last_jiffies)
1089 > uap->dmarx.poll_timeout) {
1091 spin_lock_irqsave(&uap->port.lock, flags);
1092 pl011_dma_rx_stop(uap);
1093 uap->im |= UART011_RXIM;
1094 pl011_write(uap->im, uap, REG_IMSC);
1095 spin_unlock_irqrestore(&uap->port.lock, flags);
1097 uap->dmarx.running = false;
1098 dmaengine_terminate_all(rxchan);
1099 del_timer(&uap->dmarx.timer);
1101 mod_timer(&uap->dmarx.timer,
1102 jiffies + msecs_to_jiffies(uap->dmarx.poll_rate));
1106 static void pl011_dma_startup(struct uart_amba_port *uap)
1110 if (!uap->dma_probed)
1111 pl011_dma_probe(uap);
1113 if (!uap->dmatx.chan)
1116 uap->dmatx.buf = kmalloc(PL011_DMA_BUFFER_SIZE, GFP_KERNEL | __GFP_DMA);
1117 if (!uap->dmatx.buf) {
1118 dev_err(uap->port.dev, "no memory for DMA TX buffer\n");
1119 uap->port.fifosize = uap->fifosize;
1123 sg_init_one(&uap->dmatx.sg, uap->dmatx.buf, PL011_DMA_BUFFER_SIZE);
1125 /* The DMA buffer is now the FIFO the TTY subsystem can use */
1126 uap->port.fifosize = PL011_DMA_BUFFER_SIZE;
1127 uap->using_tx_dma = true;
1129 if (!uap->dmarx.chan)
1132 /* Allocate and map DMA RX buffers */
1133 ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
1136 dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
1137 "RX buffer A", ret);
1141 ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_b,
1144 dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
1145 "RX buffer B", ret);
1146 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
1151 uap->using_rx_dma = true;
1154 /* Turn on DMA error (RX/TX will be enabled on demand) */
1155 uap->dmacr |= UART011_DMAONERR;
1156 pl011_write(uap->dmacr, uap, REG_DMACR);
1159 * ST Micro variants has some specific dma burst threshold
1160 * compensation. Set this to 16 bytes, so burst will only
1161 * be issued above/below 16 bytes.
1163 if (uap->vendor->dma_threshold)
1164 pl011_write(ST_UART011_DMAWM_RX_16 | ST_UART011_DMAWM_TX_16,
1167 if (uap->using_rx_dma) {
1168 if (pl011_dma_rx_trigger_dma(uap))
1169 dev_dbg(uap->port.dev, "could not trigger initial "
1170 "RX DMA job, fall back to interrupt mode\n");
1171 if (uap->dmarx.poll_rate) {
1172 timer_setup(&uap->dmarx.timer, pl011_dma_rx_poll, 0);
1173 mod_timer(&uap->dmarx.timer,
1175 msecs_to_jiffies(uap->dmarx.poll_rate));
1176 uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE;
1177 uap->dmarx.last_jiffies = jiffies;
1182 static void pl011_dma_shutdown(struct uart_amba_port *uap)
1184 if (!(uap->using_tx_dma || uap->using_rx_dma))
1187 /* Disable RX and TX DMA */
1188 while (pl011_read(uap, REG_FR) & uap->vendor->fr_busy)
1191 spin_lock_irq(&uap->port.lock);
1192 uap->dmacr &= ~(UART011_DMAONERR | UART011_RXDMAE | UART011_TXDMAE);
1193 pl011_write(uap->dmacr, uap, REG_DMACR);
1194 spin_unlock_irq(&uap->port.lock);
1196 if (uap->using_tx_dma) {
1197 /* In theory, this should already be done by pl011_dma_flush_buffer */
1198 dmaengine_terminate_all(uap->dmatx.chan);
1199 if (uap->dmatx.queued) {
1200 dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
1202 uap->dmatx.queued = false;
1205 kfree(uap->dmatx.buf);
1206 uap->using_tx_dma = false;
1209 if (uap->using_rx_dma) {
1210 dmaengine_terminate_all(uap->dmarx.chan);
1211 /* Clean up the RX DMA */
1212 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a, DMA_FROM_DEVICE);
1213 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_b, DMA_FROM_DEVICE);
1214 if (uap->dmarx.poll_rate)
1215 del_timer_sync(&uap->dmarx.timer);
1216 uap->using_rx_dma = false;
1220 static inline bool pl011_dma_rx_available(struct uart_amba_port *uap)
1222 return uap->using_rx_dma;
1225 static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
1227 return uap->using_rx_dma && uap->dmarx.running;
1231 /* Blank functions if the DMA engine is not available */
1232 static inline void pl011_dma_remove(struct uart_amba_port *uap)
1236 static inline void pl011_dma_startup(struct uart_amba_port *uap)
1240 static inline void pl011_dma_shutdown(struct uart_amba_port *uap)
1244 static inline bool pl011_dma_tx_irq(struct uart_amba_port *uap)
1249 static inline void pl011_dma_tx_stop(struct uart_amba_port *uap)
1253 static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
1258 static inline void pl011_dma_rx_irq(struct uart_amba_port *uap)
1262 static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
1266 static inline int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
1271 static inline bool pl011_dma_rx_available(struct uart_amba_port *uap)
1276 static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
1281 #define pl011_dma_flush_buffer NULL
1284 static void pl011_rs485_tx_stop(struct uart_amba_port *uap)
1287 * To be on the safe side only time out after twice as many iterations
1290 const int MAX_TX_DRAIN_ITERS = uap->port.fifosize * 2;
1291 struct uart_port *port = &uap->port;
1295 /* Wait until hardware tx queue is empty */
1296 while (!pl011_tx_empty(port)) {
1297 if (i > MAX_TX_DRAIN_ITERS) {
1299 "timeout while draining hardware tx queue\n");
1303 udelay(uap->rs485_tx_drain_interval);
1307 if (port->rs485.delay_rts_after_send)
1308 mdelay(port->rs485.delay_rts_after_send);
1310 cr = pl011_read(uap, REG_CR);
1312 if (port->rs485.flags & SER_RS485_RTS_AFTER_SEND)
1313 cr &= ~UART011_CR_RTS;
1315 cr |= UART011_CR_RTS;
1317 /* Disable the transmitter and reenable the transceiver */
1318 cr &= ~UART011_CR_TXE;
1319 cr |= UART011_CR_RXE;
1320 pl011_write(cr, uap, REG_CR);
1322 uap->rs485_tx_started = false;
1325 static void pl011_stop_tx(struct uart_port *port)
1327 struct uart_amba_port *uap =
1328 container_of(port, struct uart_amba_port, port);
1330 uap->im &= ~UART011_TXIM;
1331 pl011_write(uap->im, uap, REG_IMSC);
1332 pl011_dma_tx_stop(uap);
1334 if ((port->rs485.flags & SER_RS485_ENABLED) && uap->rs485_tx_started)
1335 pl011_rs485_tx_stop(uap);
1338 static bool pl011_tx_chars(struct uart_amba_port *uap, bool from_irq);
1340 /* Start TX with programmed I/O only (no DMA) */
1341 static void pl011_start_tx_pio(struct uart_amba_port *uap)
1343 if (pl011_tx_chars(uap, false)) {
1344 uap->im |= UART011_TXIM;
1345 pl011_write(uap->im, uap, REG_IMSC);
1349 static void pl011_start_tx(struct uart_port *port)
1351 struct uart_amba_port *uap =
1352 container_of(port, struct uart_amba_port, port);
1354 if (!pl011_dma_tx_start(uap))
1355 pl011_start_tx_pio(uap);
1358 static void pl011_stop_rx(struct uart_port *port)
1360 struct uart_amba_port *uap =
1361 container_of(port, struct uart_amba_port, port);
1363 uap->im &= ~(UART011_RXIM|UART011_RTIM|UART011_FEIM|
1364 UART011_PEIM|UART011_BEIM|UART011_OEIM);
1365 pl011_write(uap->im, uap, REG_IMSC);
1367 pl011_dma_rx_stop(uap);
1370 static void pl011_enable_ms(struct uart_port *port)
1372 struct uart_amba_port *uap =
1373 container_of(port, struct uart_amba_port, port);
1375 uap->im |= UART011_RIMIM|UART011_CTSMIM|UART011_DCDMIM|UART011_DSRMIM;
1376 pl011_write(uap->im, uap, REG_IMSC);
1379 static void pl011_rx_chars(struct uart_amba_port *uap)
1380 __releases(&uap->port.lock)
1381 __acquires(&uap->port.lock)
1383 pl011_fifo_to_tty(uap);
1385 spin_unlock(&uap->port.lock);
1386 tty_flip_buffer_push(&uap->port.state->port);
1388 * If we were temporarily out of DMA mode for a while,
1389 * attempt to switch back to DMA mode again.
1391 if (pl011_dma_rx_available(uap)) {
1392 if (pl011_dma_rx_trigger_dma(uap)) {
1393 dev_dbg(uap->port.dev, "could not trigger RX DMA job "
1394 "fall back to interrupt mode again\n");
1395 uap->im |= UART011_RXIM;
1396 pl011_write(uap->im, uap, REG_IMSC);
1398 #ifdef CONFIG_DMA_ENGINE
1399 /* Start Rx DMA poll */
1400 if (uap->dmarx.poll_rate) {
1401 uap->dmarx.last_jiffies = jiffies;
1402 uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE;
1403 mod_timer(&uap->dmarx.timer,
1405 msecs_to_jiffies(uap->dmarx.poll_rate));
1410 spin_lock(&uap->port.lock);
1413 static bool pl011_tx_char(struct uart_amba_port *uap, unsigned char c,
1416 if (unlikely(!from_irq) &&
1417 pl011_read(uap, REG_FR) & UART01x_FR_TXFF)
1418 return false; /* unable to transmit character */
1420 pl011_write(c, uap, REG_DR);
1421 uap->port.icount.tx++;
1426 static void pl011_rs485_tx_start(struct uart_amba_port *uap)
1428 struct uart_port *port = &uap->port;
1431 /* Enable transmitter */
1432 cr = pl011_read(uap, REG_CR);
1433 cr |= UART011_CR_TXE;
1435 /* Disable receiver if half-duplex */
1436 if (!(port->rs485.flags & SER_RS485_RX_DURING_TX))
1437 cr &= ~UART011_CR_RXE;
1439 if (port->rs485.flags & SER_RS485_RTS_ON_SEND)
1440 cr &= ~UART011_CR_RTS;
1442 cr |= UART011_CR_RTS;
1444 pl011_write(cr, uap, REG_CR);
1446 if (port->rs485.delay_rts_before_send)
1447 mdelay(port->rs485.delay_rts_before_send);
1449 uap->rs485_tx_started = true;
1452 /* Returns true if tx interrupts have to be (kept) enabled */
1453 static bool pl011_tx_chars(struct uart_amba_port *uap, bool from_irq)
1455 struct circ_buf *xmit = &uap->port.state->xmit;
1456 int count = uap->fifosize >> 1;
1458 if (uap->port.x_char) {
1459 if (!pl011_tx_char(uap, uap->port.x_char, from_irq))
1461 uap->port.x_char = 0;
1464 if (uart_circ_empty(xmit) || uart_tx_stopped(&uap->port)) {
1465 pl011_stop_tx(&uap->port);
1469 if ((uap->port.rs485.flags & SER_RS485_ENABLED) &&
1470 !uap->rs485_tx_started)
1471 pl011_rs485_tx_start(uap);
1473 /* If we are using DMA mode, try to send some characters. */
1474 if (pl011_dma_tx_irq(uap))
1478 if (likely(from_irq) && count-- == 0)
1481 if (!pl011_tx_char(uap, xmit->buf[xmit->tail], from_irq))
1484 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
1485 } while (!uart_circ_empty(xmit));
1487 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
1488 uart_write_wakeup(&uap->port);
1490 if (uart_circ_empty(xmit)) {
1491 pl011_stop_tx(&uap->port);
1497 static void pl011_modem_status(struct uart_amba_port *uap)
1499 unsigned int status, delta;
1501 status = pl011_read(uap, REG_FR) & UART01x_FR_MODEM_ANY;
1503 delta = status ^ uap->old_status;
1504 uap->old_status = status;
1509 if (delta & UART01x_FR_DCD)
1510 uart_handle_dcd_change(&uap->port, status & UART01x_FR_DCD);
1512 if (delta & uap->vendor->fr_dsr)
1513 uap->port.icount.dsr++;
1515 if (delta & uap->vendor->fr_cts)
1516 uart_handle_cts_change(&uap->port,
1517 status & uap->vendor->fr_cts);
1519 wake_up_interruptible(&uap->port.state->port.delta_msr_wait);
1522 static void check_apply_cts_event_workaround(struct uart_amba_port *uap)
1524 if (!uap->vendor->cts_event_workaround)
1527 /* workaround to make sure that all bits are unlocked.. */
1528 pl011_write(0x00, uap, REG_ICR);
1531 * WA: introduce 26ns(1 uart clk) delay before W1C;
1532 * single apb access will incur 2 pclk(133.12Mhz) delay,
1533 * so add 2 dummy reads
1535 pl011_read(uap, REG_ICR);
1536 pl011_read(uap, REG_ICR);
1539 static irqreturn_t pl011_int(int irq, void *dev_id)
1541 struct uart_amba_port *uap = dev_id;
1542 unsigned long flags;
1543 unsigned int status, pass_counter = AMBA_ISR_PASS_LIMIT;
1546 spin_lock_irqsave(&uap->port.lock, flags);
1547 status = pl011_read(uap, REG_RIS) & uap->im;
1550 check_apply_cts_event_workaround(uap);
1552 pl011_write(status & ~(UART011_TXIS|UART011_RTIS|
1556 if (status & (UART011_RTIS|UART011_RXIS)) {
1557 if (pl011_dma_rx_running(uap))
1558 pl011_dma_rx_irq(uap);
1560 pl011_rx_chars(uap);
1562 if (status & (UART011_DSRMIS|UART011_DCDMIS|
1563 UART011_CTSMIS|UART011_RIMIS))
1564 pl011_modem_status(uap);
1565 if (status & UART011_TXIS)
1566 pl011_tx_chars(uap, true);
1568 if (pass_counter-- == 0)
1571 status = pl011_read(uap, REG_RIS) & uap->im;
1572 } while (status != 0);
1576 spin_unlock_irqrestore(&uap->port.lock, flags);
1578 return IRQ_RETVAL(handled);
1581 static unsigned int pl011_tx_empty(struct uart_port *port)
1583 struct uart_amba_port *uap =
1584 container_of(port, struct uart_amba_port, port);
1586 /* Allow feature register bits to be inverted to work around errata */
1587 unsigned int status = pl011_read(uap, REG_FR) ^ uap->vendor->inv_fr;
1589 return status & (uap->vendor->fr_busy | UART01x_FR_TXFF) ?
1593 static unsigned int pl011_get_mctrl(struct uart_port *port)
1595 struct uart_amba_port *uap =
1596 container_of(port, struct uart_amba_port, port);
1597 unsigned int result = 0;
1598 unsigned int status = pl011_read(uap, REG_FR);
1600 #define TIOCMBIT(uartbit, tiocmbit) \
1601 if (status & uartbit) \
1604 TIOCMBIT(UART01x_FR_DCD, TIOCM_CAR);
1605 TIOCMBIT(uap->vendor->fr_dsr, TIOCM_DSR);
1606 TIOCMBIT(uap->vendor->fr_cts, TIOCM_CTS);
1607 TIOCMBIT(uap->vendor->fr_ri, TIOCM_RNG);
1612 static void pl011_set_mctrl(struct uart_port *port, unsigned int mctrl)
1614 struct uart_amba_port *uap =
1615 container_of(port, struct uart_amba_port, port);
1618 cr = pl011_read(uap, REG_CR);
1620 #define TIOCMBIT(tiocmbit, uartbit) \
1621 if (mctrl & tiocmbit) \
1626 TIOCMBIT(TIOCM_RTS, UART011_CR_RTS);
1627 TIOCMBIT(TIOCM_DTR, UART011_CR_DTR);
1628 TIOCMBIT(TIOCM_OUT1, UART011_CR_OUT1);
1629 TIOCMBIT(TIOCM_OUT2, UART011_CR_OUT2);
1630 TIOCMBIT(TIOCM_LOOP, UART011_CR_LBE);
1632 if (port->status & UPSTAT_AUTORTS) {
1633 /* We need to disable auto-RTS if we want to turn RTS off */
1634 TIOCMBIT(TIOCM_RTS, UART011_CR_RTSEN);
1638 pl011_write(cr, uap, REG_CR);
1641 static void pl011_break_ctl(struct uart_port *port, int break_state)
1643 struct uart_amba_port *uap =
1644 container_of(port, struct uart_amba_port, port);
1645 unsigned long flags;
1648 spin_lock_irqsave(&uap->port.lock, flags);
1649 lcr_h = pl011_read(uap, REG_LCRH_TX);
1650 if (break_state == -1)
1651 lcr_h |= UART01x_LCRH_BRK;
1653 lcr_h &= ~UART01x_LCRH_BRK;
1654 pl011_write(lcr_h, uap, REG_LCRH_TX);
1655 spin_unlock_irqrestore(&uap->port.lock, flags);
1658 #ifdef CONFIG_CONSOLE_POLL
1660 static void pl011_quiesce_irqs(struct uart_port *port)
1662 struct uart_amba_port *uap =
1663 container_of(port, struct uart_amba_port, port);
1665 pl011_write(pl011_read(uap, REG_MIS), uap, REG_ICR);
1667 * There is no way to clear TXIM as this is "ready to transmit IRQ", so
1668 * we simply mask it. start_tx() will unmask it.
1670 * Note we can race with start_tx(), and if the race happens, the
1671 * polling user might get another interrupt just after we clear it.
1672 * But it should be OK and can happen even w/o the race, e.g.
1673 * controller immediately got some new data and raised the IRQ.
1675 * And whoever uses polling routines assumes that it manages the device
1676 * (including tx queue), so we're also fine with start_tx()'s caller
1679 pl011_write(pl011_read(uap, REG_IMSC) & ~UART011_TXIM, uap,
1683 static int pl011_get_poll_char(struct uart_port *port)
1685 struct uart_amba_port *uap =
1686 container_of(port, struct uart_amba_port, port);
1687 unsigned int status;
1690 * The caller might need IRQs lowered, e.g. if used with KDB NMI
1693 pl011_quiesce_irqs(port);
1695 status = pl011_read(uap, REG_FR);
1696 if (status & UART01x_FR_RXFE)
1697 return NO_POLL_CHAR;
1699 return pl011_read(uap, REG_DR);
1702 static void pl011_put_poll_char(struct uart_port *port,
1705 struct uart_amba_port *uap =
1706 container_of(port, struct uart_amba_port, port);
1708 while (pl011_read(uap, REG_FR) & UART01x_FR_TXFF)
1711 pl011_write(ch, uap, REG_DR);
1714 #endif /* CONFIG_CONSOLE_POLL */
1716 static int pl011_hwinit(struct uart_port *port)
1718 struct uart_amba_port *uap =
1719 container_of(port, struct uart_amba_port, port);
1722 /* Optionaly enable pins to be muxed in and configured */
1723 pinctrl_pm_select_default_state(port->dev);
1726 * Try to enable the clock producer.
1728 retval = clk_prepare_enable(uap->clk);
1732 uap->port.uartclk = clk_get_rate(uap->clk);
1734 /* Clear pending error and receive interrupts */
1735 pl011_write(UART011_OEIS | UART011_BEIS | UART011_PEIS |
1736 UART011_FEIS | UART011_RTIS | UART011_RXIS,
1740 * Save interrupts enable mask, and enable RX interrupts in case if
1741 * the interrupt is used for NMI entry.
1743 uap->im = pl011_read(uap, REG_IMSC);
1744 pl011_write(UART011_RTIM | UART011_RXIM, uap, REG_IMSC);
1746 if (dev_get_platdata(uap->port.dev)) {
1747 struct amba_pl011_data *plat;
1749 plat = dev_get_platdata(uap->port.dev);
1756 static bool pl011_split_lcrh(const struct uart_amba_port *uap)
1758 return pl011_reg_to_offset(uap, REG_LCRH_RX) !=
1759 pl011_reg_to_offset(uap, REG_LCRH_TX);
1762 static void pl011_write_lcr_h(struct uart_amba_port *uap, unsigned int lcr_h)
1764 pl011_write(lcr_h, uap, REG_LCRH_RX);
1765 if (pl011_split_lcrh(uap)) {
1768 * Wait 10 PCLKs before writing LCRH_TX register,
1769 * to get this delay write read only register 10 times
1771 for (i = 0; i < 10; ++i)
1772 pl011_write(0xff, uap, REG_MIS);
1773 pl011_write(lcr_h, uap, REG_LCRH_TX);
1777 static int pl011_allocate_irq(struct uart_amba_port *uap)
1779 pl011_write(uap->im, uap, REG_IMSC);
1781 return request_irq(uap->port.irq, pl011_int, IRQF_SHARED, "uart-pl011", uap);
1785 * Enable interrupts, only timeouts when using DMA
1786 * if initial RX DMA job failed, start in interrupt mode
1789 static void pl011_enable_interrupts(struct uart_amba_port *uap)
1793 spin_lock_irq(&uap->port.lock);
1795 /* Clear out any spuriously appearing RX interrupts */
1796 pl011_write(UART011_RTIS | UART011_RXIS, uap, REG_ICR);
1799 * RXIS is asserted only when the RX FIFO transitions from below
1800 * to above the trigger threshold. If the RX FIFO is already
1801 * full to the threshold this can't happen and RXIS will now be
1802 * stuck off. Drain the RX FIFO explicitly to fix this:
1804 for (i = 0; i < uap->fifosize * 2; ++i) {
1805 if (pl011_read(uap, REG_FR) & UART01x_FR_RXFE)
1808 pl011_read(uap, REG_DR);
1811 uap->im = UART011_RTIM;
1812 if (!pl011_dma_rx_running(uap))
1813 uap->im |= UART011_RXIM;
1814 pl011_write(uap->im, uap, REG_IMSC);
1815 spin_unlock_irq(&uap->port.lock);
1818 static int pl011_startup(struct uart_port *port)
1820 struct uart_amba_port *uap =
1821 container_of(port, struct uart_amba_port, port);
1825 retval = pl011_hwinit(port);
1829 retval = pl011_allocate_irq(uap);
1833 pl011_write(uap->vendor->ifls, uap, REG_IFLS);
1835 spin_lock_irq(&uap->port.lock);
1837 cr = pl011_read(uap, REG_CR);
1838 cr &= UART011_CR_RTS | UART011_CR_DTR;
1839 cr |= UART01x_CR_UARTEN | UART011_CR_RXE;
1841 if (!(port->rs485.flags & SER_RS485_ENABLED))
1842 cr |= UART011_CR_TXE;
1844 pl011_write(cr, uap, REG_CR);
1846 spin_unlock_irq(&uap->port.lock);
1849 * initialise the old status of the modem signals
1851 uap->old_status = pl011_read(uap, REG_FR) & UART01x_FR_MODEM_ANY;
1854 pl011_dma_startup(uap);
1856 pl011_enable_interrupts(uap);
1861 clk_disable_unprepare(uap->clk);
1865 static int sbsa_uart_startup(struct uart_port *port)
1867 struct uart_amba_port *uap =
1868 container_of(port, struct uart_amba_port, port);
1871 retval = pl011_hwinit(port);
1875 retval = pl011_allocate_irq(uap);
1879 /* The SBSA UART does not support any modem status lines. */
1880 uap->old_status = 0;
1882 pl011_enable_interrupts(uap);
1887 static void pl011_shutdown_channel(struct uart_amba_port *uap,
1892 val = pl011_read(uap, lcrh);
1893 val &= ~(UART01x_LCRH_BRK | UART01x_LCRH_FEN);
1894 pl011_write(val, uap, lcrh);
1898 * disable the port. It should not disable RTS and DTR.
1899 * Also RTS and DTR state should be preserved to restore
1900 * it during startup().
1902 static void pl011_disable_uart(struct uart_amba_port *uap)
1906 uap->port.status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS);
1907 spin_lock_irq(&uap->port.lock);
1908 cr = pl011_read(uap, REG_CR);
1909 cr &= UART011_CR_RTS | UART011_CR_DTR;
1910 cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
1911 pl011_write(cr, uap, REG_CR);
1912 spin_unlock_irq(&uap->port.lock);
1915 * disable break condition and fifos
1917 pl011_shutdown_channel(uap, REG_LCRH_RX);
1918 if (pl011_split_lcrh(uap))
1919 pl011_shutdown_channel(uap, REG_LCRH_TX);
1922 static void pl011_disable_interrupts(struct uart_amba_port *uap)
1924 spin_lock_irq(&uap->port.lock);
1926 /* mask all interrupts and clear all pending ones */
1928 pl011_write(uap->im, uap, REG_IMSC);
1929 pl011_write(0xffff, uap, REG_ICR);
1931 spin_unlock_irq(&uap->port.lock);
1934 static void pl011_shutdown(struct uart_port *port)
1936 struct uart_amba_port *uap =
1937 container_of(port, struct uart_amba_port, port);
1939 pl011_disable_interrupts(uap);
1941 pl011_dma_shutdown(uap);
1943 if ((port->rs485.flags & SER_RS485_ENABLED) && uap->rs485_tx_started)
1944 pl011_rs485_tx_stop(uap);
1946 free_irq(uap->port.irq, uap);
1948 pl011_disable_uart(uap);
1951 * Shut down the clock producer
1953 clk_disable_unprepare(uap->clk);
1954 /* Optionally let pins go into sleep states */
1955 pinctrl_pm_select_sleep_state(port->dev);
1957 if (dev_get_platdata(uap->port.dev)) {
1958 struct amba_pl011_data *plat;
1960 plat = dev_get_platdata(uap->port.dev);
1965 if (uap->port.ops->flush_buffer)
1966 uap->port.ops->flush_buffer(port);
1969 static void sbsa_uart_shutdown(struct uart_port *port)
1971 struct uart_amba_port *uap =
1972 container_of(port, struct uart_amba_port, port);
1974 pl011_disable_interrupts(uap);
1976 free_irq(uap->port.irq, uap);
1978 if (uap->port.ops->flush_buffer)
1979 uap->port.ops->flush_buffer(port);
1983 pl011_setup_status_masks(struct uart_port *port, struct ktermios *termios)
1985 port->read_status_mask = UART011_DR_OE | 255;
1986 if (termios->c_iflag & INPCK)
1987 port->read_status_mask |= UART011_DR_FE | UART011_DR_PE;
1988 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
1989 port->read_status_mask |= UART011_DR_BE;
1992 * Characters to ignore
1994 port->ignore_status_mask = 0;
1995 if (termios->c_iflag & IGNPAR)
1996 port->ignore_status_mask |= UART011_DR_FE | UART011_DR_PE;
1997 if (termios->c_iflag & IGNBRK) {
1998 port->ignore_status_mask |= UART011_DR_BE;
2000 * If we're ignoring parity and break indicators,
2001 * ignore overruns too (for real raw support).
2003 if (termios->c_iflag & IGNPAR)
2004 port->ignore_status_mask |= UART011_DR_OE;
2008 * Ignore all characters if CREAD is not set.
2010 if ((termios->c_cflag & CREAD) == 0)
2011 port->ignore_status_mask |= UART_DUMMY_DR_RX;
2015 pl011_set_termios(struct uart_port *port, struct ktermios *termios,
2016 struct ktermios *old)
2018 struct uart_amba_port *uap =
2019 container_of(port, struct uart_amba_port, port);
2020 unsigned int lcr_h, old_cr;
2021 unsigned long flags;
2022 unsigned int baud, quot, clkdiv;
2025 if (uap->vendor->oversampling)
2031 * Ask the core to calculate the divisor for us.
2033 baud = uart_get_baud_rate(port, termios, old, 0,
2034 port->uartclk / clkdiv);
2035 #ifdef CONFIG_DMA_ENGINE
2037 * Adjust RX DMA polling rate with baud rate if not specified.
2039 if (uap->dmarx.auto_poll_rate)
2040 uap->dmarx.poll_rate = DIV_ROUND_UP(10000000, baud);
2043 if (baud > port->uartclk/16)
2044 quot = DIV_ROUND_CLOSEST(port->uartclk * 8, baud);
2046 quot = DIV_ROUND_CLOSEST(port->uartclk * 4, baud);
2048 switch (termios->c_cflag & CSIZE) {
2050 lcr_h = UART01x_LCRH_WLEN_5;
2053 lcr_h = UART01x_LCRH_WLEN_6;
2056 lcr_h = UART01x_LCRH_WLEN_7;
2059 lcr_h = UART01x_LCRH_WLEN_8;
2062 if (termios->c_cflag & CSTOPB)
2063 lcr_h |= UART01x_LCRH_STP2;
2064 if (termios->c_cflag & PARENB) {
2065 lcr_h |= UART01x_LCRH_PEN;
2066 if (!(termios->c_cflag & PARODD))
2067 lcr_h |= UART01x_LCRH_EPS;
2068 if (termios->c_cflag & CMSPAR)
2069 lcr_h |= UART011_LCRH_SPS;
2071 if (uap->fifosize > 1)
2072 lcr_h |= UART01x_LCRH_FEN;
2074 bits = tty_get_frame_size(termios->c_cflag);
2076 spin_lock_irqsave(&port->lock, flags);
2079 * Update the per-port timeout.
2081 uart_update_timeout(port, termios->c_cflag, baud);
2084 * Calculate the approximated time it takes to transmit one character
2085 * with the given baud rate. We use this as the poll interval when we
2086 * wait for the tx queue to empty.
2088 uap->rs485_tx_drain_interval = DIV_ROUND_UP(bits * 1000 * 1000, baud);
2090 pl011_setup_status_masks(port, termios);
2092 if (UART_ENABLE_MS(port, termios->c_cflag))
2093 pl011_enable_ms(port);
2095 if (port->rs485.flags & SER_RS485_ENABLED)
2096 termios->c_cflag &= ~CRTSCTS;
2098 old_cr = pl011_read(uap, REG_CR);
2100 if (termios->c_cflag & CRTSCTS) {
2101 if (old_cr & UART011_CR_RTS)
2102 old_cr |= UART011_CR_RTSEN;
2104 old_cr |= UART011_CR_CTSEN;
2105 port->status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS;
2107 old_cr &= ~(UART011_CR_CTSEN | UART011_CR_RTSEN);
2108 port->status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS);
2111 if (uap->vendor->oversampling) {
2112 if (baud > port->uartclk / 16)
2113 old_cr |= ST_UART011_CR_OVSFACT;
2115 old_cr &= ~ST_UART011_CR_OVSFACT;
2119 * Workaround for the ST Micro oversampling variants to
2120 * increase the bitrate slightly, by lowering the divisor,
2121 * to avoid delayed sampling of start bit at high speeds,
2122 * else we see data corruption.
2124 if (uap->vendor->oversampling) {
2125 if ((baud >= 3000000) && (baud < 3250000) && (quot > 1))
2127 else if ((baud > 3250000) && (quot > 2))
2131 pl011_write(quot & 0x3f, uap, REG_FBRD);
2132 pl011_write(quot >> 6, uap, REG_IBRD);
2135 * ----------v----------v----------v----------v-----
2136 * NOTE: REG_LCRH_TX and REG_LCRH_RX MUST BE WRITTEN AFTER
2137 * REG_FBRD & REG_IBRD.
2138 * ----------^----------^----------^----------^-----
2140 pl011_write_lcr_h(uap, lcr_h);
2141 pl011_write(old_cr, uap, REG_CR);
2143 spin_unlock_irqrestore(&port->lock, flags);
2147 sbsa_uart_set_termios(struct uart_port *port, struct ktermios *termios,
2148 struct ktermios *old)
2150 struct uart_amba_port *uap =
2151 container_of(port, struct uart_amba_port, port);
2152 unsigned long flags;
2154 tty_termios_encode_baud_rate(termios, uap->fixed_baud, uap->fixed_baud);
2156 /* The SBSA UART only supports 8n1 without hardware flow control. */
2157 termios->c_cflag &= ~(CSIZE | CSTOPB | PARENB | PARODD);
2158 termios->c_cflag &= ~(CMSPAR | CRTSCTS);
2159 termios->c_cflag |= CS8 | CLOCAL;
2161 spin_lock_irqsave(&port->lock, flags);
2162 uart_update_timeout(port, CS8, uap->fixed_baud);
2163 pl011_setup_status_masks(port, termios);
2164 spin_unlock_irqrestore(&port->lock, flags);
2167 static const char *pl011_type(struct uart_port *port)
2169 struct uart_amba_port *uap =
2170 container_of(port, struct uart_amba_port, port);
2171 return uap->port.type == PORT_AMBA ? uap->type : NULL;
2175 * Configure/autoconfigure the port.
2177 static void pl011_config_port(struct uart_port *port, int flags)
2179 if (flags & UART_CONFIG_TYPE)
2180 port->type = PORT_AMBA;
2184 * verify the new serial_struct (for TIOCSSERIAL).
2186 static int pl011_verify_port(struct uart_port *port, struct serial_struct *ser)
2189 if (ser->type != PORT_UNKNOWN && ser->type != PORT_AMBA)
2191 if (ser->irq < 0 || ser->irq >= nr_irqs)
2193 if (ser->baud_base < 9600)
2195 if (port->mapbase != (unsigned long) ser->iomem_base)
2200 static int pl011_rs485_config(struct uart_port *port,
2201 struct serial_rs485 *rs485)
2203 struct uart_amba_port *uap =
2204 container_of(port, struct uart_amba_port, port);
2206 if (port->rs485.flags & SER_RS485_ENABLED)
2207 pl011_rs485_tx_stop(uap);
2209 /* Make sure auto RTS is disabled */
2210 if (rs485->flags & SER_RS485_ENABLED) {
2211 u32 cr = pl011_read(uap, REG_CR);
2213 cr &= ~UART011_CR_RTSEN;
2214 pl011_write(cr, uap, REG_CR);
2215 port->status &= ~UPSTAT_AUTORTS;
2221 static const struct uart_ops amba_pl011_pops = {
2222 .tx_empty = pl011_tx_empty,
2223 .set_mctrl = pl011_set_mctrl,
2224 .get_mctrl = pl011_get_mctrl,
2225 .stop_tx = pl011_stop_tx,
2226 .start_tx = pl011_start_tx,
2227 .stop_rx = pl011_stop_rx,
2228 .enable_ms = pl011_enable_ms,
2229 .break_ctl = pl011_break_ctl,
2230 .startup = pl011_startup,
2231 .shutdown = pl011_shutdown,
2232 .flush_buffer = pl011_dma_flush_buffer,
2233 .set_termios = pl011_set_termios,
2235 .config_port = pl011_config_port,
2236 .verify_port = pl011_verify_port,
2237 #ifdef CONFIG_CONSOLE_POLL
2238 .poll_init = pl011_hwinit,
2239 .poll_get_char = pl011_get_poll_char,
2240 .poll_put_char = pl011_put_poll_char,
2244 static void sbsa_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
2248 static unsigned int sbsa_uart_get_mctrl(struct uart_port *port)
2253 static const struct uart_ops sbsa_uart_pops = {
2254 .tx_empty = pl011_tx_empty,
2255 .set_mctrl = sbsa_uart_set_mctrl,
2256 .get_mctrl = sbsa_uart_get_mctrl,
2257 .stop_tx = pl011_stop_tx,
2258 .start_tx = pl011_start_tx,
2259 .stop_rx = pl011_stop_rx,
2260 .startup = sbsa_uart_startup,
2261 .shutdown = sbsa_uart_shutdown,
2262 .set_termios = sbsa_uart_set_termios,
2264 .config_port = pl011_config_port,
2265 .verify_port = pl011_verify_port,
2266 #ifdef CONFIG_CONSOLE_POLL
2267 .poll_init = pl011_hwinit,
2268 .poll_get_char = pl011_get_poll_char,
2269 .poll_put_char = pl011_put_poll_char,
2273 static struct uart_amba_port *amba_ports[UART_NR];
2275 #ifdef CONFIG_SERIAL_AMBA_PL011_CONSOLE
2277 static void pl011_console_putchar(struct uart_port *port, unsigned char ch)
2279 struct uart_amba_port *uap =
2280 container_of(port, struct uart_amba_port, port);
2282 while (pl011_read(uap, REG_FR) & UART01x_FR_TXFF)
2284 pl011_write(ch, uap, REG_DR);
2288 pl011_console_write(struct console *co, const char *s, unsigned int count)
2290 struct uart_amba_port *uap = amba_ports[co->index];
2291 unsigned int old_cr = 0, new_cr;
2292 unsigned long flags;
2295 clk_enable(uap->clk);
2297 local_irq_save(flags);
2298 if (uap->port.sysrq)
2300 else if (oops_in_progress)
2301 locked = spin_trylock(&uap->port.lock);
2303 spin_lock(&uap->port.lock);
2306 * First save the CR then disable the interrupts
2308 if (!uap->vendor->always_enabled) {
2309 old_cr = pl011_read(uap, REG_CR);
2310 new_cr = old_cr & ~UART011_CR_CTSEN;
2311 new_cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
2312 pl011_write(new_cr, uap, REG_CR);
2315 uart_console_write(&uap->port, s, count, pl011_console_putchar);
2318 * Finally, wait for transmitter to become empty and restore the
2319 * TCR. Allow feature register bits to be inverted to work around
2322 while ((pl011_read(uap, REG_FR) ^ uap->vendor->inv_fr)
2323 & uap->vendor->fr_busy)
2325 if (!uap->vendor->always_enabled)
2326 pl011_write(old_cr, uap, REG_CR);
2329 spin_unlock(&uap->port.lock);
2330 local_irq_restore(flags);
2332 clk_disable(uap->clk);
2335 static void pl011_console_get_options(struct uart_amba_port *uap, int *baud,
2336 int *parity, int *bits)
2338 if (pl011_read(uap, REG_CR) & UART01x_CR_UARTEN) {
2339 unsigned int lcr_h, ibrd, fbrd;
2341 lcr_h = pl011_read(uap, REG_LCRH_TX);
2344 if (lcr_h & UART01x_LCRH_PEN) {
2345 if (lcr_h & UART01x_LCRH_EPS)
2351 if ((lcr_h & 0x60) == UART01x_LCRH_WLEN_7)
2356 ibrd = pl011_read(uap, REG_IBRD);
2357 fbrd = pl011_read(uap, REG_FBRD);
2359 *baud = uap->port.uartclk * 4 / (64 * ibrd + fbrd);
2361 if (uap->vendor->oversampling) {
2362 if (pl011_read(uap, REG_CR)
2363 & ST_UART011_CR_OVSFACT)
2369 static int pl011_console_setup(struct console *co, char *options)
2371 struct uart_amba_port *uap;
2379 * Check whether an invalid uart number has been specified, and
2380 * if so, search for the first available port that does have
2383 if (co->index >= UART_NR)
2385 uap = amba_ports[co->index];
2389 /* Allow pins to be muxed in and configured */
2390 pinctrl_pm_select_default_state(uap->port.dev);
2392 ret = clk_prepare(uap->clk);
2396 if (dev_get_platdata(uap->port.dev)) {
2397 struct amba_pl011_data *plat;
2399 plat = dev_get_platdata(uap->port.dev);
2404 uap->port.uartclk = clk_get_rate(uap->clk);
2406 if (uap->vendor->fixed_options) {
2407 baud = uap->fixed_baud;
2410 uart_parse_options(options,
2411 &baud, &parity, &bits, &flow);
2413 pl011_console_get_options(uap, &baud, &parity, &bits);
2416 return uart_set_options(&uap->port, co, baud, parity, bits, flow);
2420 * pl011_console_match - non-standard console matching
2421 * @co: registering console
2422 * @name: name from console command line
2423 * @idx: index from console command line
2424 * @options: ptr to option string from console command line
2426 * Only attempts to match console command lines of the form:
2427 * console=pl011,mmio|mmio32,<addr>[,<options>]
2428 * console=pl011,0x<addr>[,<options>]
2429 * This form is used to register an initial earlycon boot console and
2430 * replace it with the amba_console at pl011 driver init.
2432 * Performs console setup for a match (as required by interface)
2433 * If no <options> are specified, then assume the h/w is already setup.
2435 * Returns 0 if console matches; otherwise non-zero to use default matching
2437 static int pl011_console_match(struct console *co, char *name, int idx,
2440 unsigned char iotype;
2441 resource_size_t addr;
2445 * Systems affected by the Qualcomm Technologies QDF2400 E44 erratum
2446 * have a distinct console name, so make sure we check for that.
2447 * The actual implementation of the erratum occurs in the probe
2450 if ((strcmp(name, "qdf2400_e44") != 0) && (strcmp(name, "pl011") != 0))
2453 if (uart_parse_earlycon(options, &iotype, &addr, &options))
2456 if (iotype != UPIO_MEM && iotype != UPIO_MEM32)
2459 /* try to match the port specified on the command line */
2460 for (i = 0; i < ARRAY_SIZE(amba_ports); i++) {
2461 struct uart_port *port;
2466 port = &amba_ports[i]->port;
2468 if (port->mapbase != addr)
2473 return pl011_console_setup(co, options);
2479 static struct uart_driver amba_reg;
2480 static struct console amba_console = {
2482 .write = pl011_console_write,
2483 .device = uart_console_device,
2484 .setup = pl011_console_setup,
2485 .match = pl011_console_match,
2486 .flags = CON_PRINTBUFFER | CON_ANYTIME,
2491 #define AMBA_CONSOLE (&amba_console)
2493 static void qdf2400_e44_putc(struct uart_port *port, unsigned char c)
2495 while (readl(port->membase + UART01x_FR) & UART01x_FR_TXFF)
2497 writel(c, port->membase + UART01x_DR);
2498 while (!(readl(port->membase + UART01x_FR) & UART011_FR_TXFE))
2502 static void qdf2400_e44_early_write(struct console *con, const char *s, unsigned n)
2504 struct earlycon_device *dev = con->data;
2506 uart_console_write(&dev->port, s, n, qdf2400_e44_putc);
2509 static void pl011_putc(struct uart_port *port, unsigned char c)
2511 while (readl(port->membase + UART01x_FR) & UART01x_FR_TXFF)
2513 if (port->iotype == UPIO_MEM32)
2514 writel(c, port->membase + UART01x_DR);
2516 writeb(c, port->membase + UART01x_DR);
2517 while (readl(port->membase + UART01x_FR) & UART01x_FR_BUSY)
2521 static void pl011_early_write(struct console *con, const char *s, unsigned n)
2523 struct earlycon_device *dev = con->data;
2525 uart_console_write(&dev->port, s, n, pl011_putc);
2528 #ifdef CONFIG_CONSOLE_POLL
2529 static int pl011_getc(struct uart_port *port)
2531 if (readl(port->membase + UART01x_FR) & UART01x_FR_RXFE)
2532 return NO_POLL_CHAR;
2534 if (port->iotype == UPIO_MEM32)
2535 return readl(port->membase + UART01x_DR);
2537 return readb(port->membase + UART01x_DR);
2540 static int pl011_early_read(struct console *con, char *s, unsigned int n)
2542 struct earlycon_device *dev = con->data;
2543 int ch, num_read = 0;
2545 while (num_read < n) {
2546 ch = pl011_getc(&dev->port);
2547 if (ch == NO_POLL_CHAR)
2556 #define pl011_early_read NULL
2560 * On non-ACPI systems, earlycon is enabled by specifying
2561 * "earlycon=pl011,<address>" on the kernel command line.
2563 * On ACPI ARM64 systems, an "early" console is enabled via the SPCR table,
2564 * by specifying only "earlycon" on the command line. Because it requires
2565 * SPCR, the console starts after ACPI is parsed, which is later than a
2566 * traditional early console.
2568 * To get the traditional early console that starts before ACPI is parsed,
2569 * specify the full "earlycon=pl011,<address>" option.
2571 static int __init pl011_early_console_setup(struct earlycon_device *device,
2574 if (!device->port.membase)
2577 device->con->write = pl011_early_write;
2578 device->con->read = pl011_early_read;
2582 OF_EARLYCON_DECLARE(pl011, "arm,pl011", pl011_early_console_setup);
2583 OF_EARLYCON_DECLARE(pl011, "arm,sbsa-uart", pl011_early_console_setup);
2586 * On Qualcomm Datacenter Technologies QDF2400 SOCs affected by
2587 * Erratum 44, traditional earlycon can be enabled by specifying
2588 * "earlycon=qdf2400_e44,<address>". Any options are ignored.
2590 * Alternatively, you can just specify "earlycon", and the early console
2591 * will be enabled with the information from the SPCR table. In this
2592 * case, the SPCR code will detect the need for the E44 work-around,
2593 * and set the console name to "qdf2400_e44".
2596 qdf2400_e44_early_console_setup(struct earlycon_device *device,
2599 if (!device->port.membase)
2602 device->con->write = qdf2400_e44_early_write;
2605 EARLYCON_DECLARE(qdf2400_e44, qdf2400_e44_early_console_setup);
2608 #define AMBA_CONSOLE NULL
2611 static struct uart_driver amba_reg = {
2612 .owner = THIS_MODULE,
2613 .driver_name = "ttyAMA",
2614 .dev_name = "ttyAMA",
2615 .major = SERIAL_AMBA_MAJOR,
2616 .minor = SERIAL_AMBA_MINOR,
2618 .cons = AMBA_CONSOLE,
2621 static int pl011_probe_dt_alias(int index, struct device *dev)
2623 struct device_node *np;
2624 static bool seen_dev_with_alias = false;
2625 static bool seen_dev_without_alias = false;
2628 if (!IS_ENABLED(CONFIG_OF))
2635 ret = of_alias_get_id(np, "serial");
2637 seen_dev_without_alias = true;
2640 seen_dev_with_alias = true;
2641 if (ret >= ARRAY_SIZE(amba_ports) || amba_ports[ret] != NULL) {
2642 dev_warn(dev, "requested serial port %d not available.\n", ret);
2647 if (seen_dev_with_alias && seen_dev_without_alias)
2648 dev_warn(dev, "aliased and non-aliased serial devices found in device tree. Serial port enumeration may be unpredictable.\n");
2653 /* unregisters the driver also if no more ports are left */
2654 static void pl011_unregister_port(struct uart_amba_port *uap)
2659 for (i = 0; i < ARRAY_SIZE(amba_ports); i++) {
2660 if (amba_ports[i] == uap)
2661 amba_ports[i] = NULL;
2662 else if (amba_ports[i])
2665 pl011_dma_remove(uap);
2667 uart_unregister_driver(&amba_reg);
2670 static int pl011_find_free_port(void)
2674 for (i = 0; i < ARRAY_SIZE(amba_ports); i++)
2675 if (amba_ports[i] == NULL)
2681 static int pl011_get_rs485_mode(struct uart_amba_port *uap)
2683 struct uart_port *port = &uap->port;
2684 struct serial_rs485 *rs485 = &port->rs485;
2687 ret = uart_get_rs485_mode(port);
2691 /* clamp the delays to [0, 100ms] */
2692 rs485->delay_rts_before_send = min(rs485->delay_rts_before_send, 100U);
2693 rs485->delay_rts_after_send = min(rs485->delay_rts_after_send, 100U);
2698 static int pl011_setup_port(struct device *dev, struct uart_amba_port *uap,
2699 struct resource *mmiobase, int index)
2704 base = devm_ioremap_resource(dev, mmiobase);
2706 return PTR_ERR(base);
2708 index = pl011_probe_dt_alias(index, dev);
2710 uap->port.dev = dev;
2711 uap->port.mapbase = mmiobase->start;
2712 uap->port.membase = base;
2713 uap->port.fifosize = uap->fifosize;
2714 uap->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_AMBA_PL011_CONSOLE);
2715 uap->port.flags = UPF_BOOT_AUTOCONF;
2716 uap->port.line = index;
2718 ret = pl011_get_rs485_mode(uap);
2722 amba_ports[index] = uap;
2727 static int pl011_register_port(struct uart_amba_port *uap)
2731 /* Ensure interrupts from this UART are masked and cleared */
2732 pl011_write(0, uap, REG_IMSC);
2733 pl011_write(0xffff, uap, REG_ICR);
2735 if (!amba_reg.state) {
2736 ret = uart_register_driver(&amba_reg);
2738 dev_err(uap->port.dev,
2739 "Failed to register AMBA-PL011 driver\n");
2740 for (i = 0; i < ARRAY_SIZE(amba_ports); i++)
2741 if (amba_ports[i] == uap)
2742 amba_ports[i] = NULL;
2747 ret = uart_add_one_port(&amba_reg, &uap->port);
2749 pl011_unregister_port(uap);
2754 static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
2756 struct uart_amba_port *uap;
2757 struct vendor_data *vendor = id->data;
2760 portnr = pl011_find_free_port();
2764 uap = devm_kzalloc(&dev->dev, sizeof(struct uart_amba_port),
2769 uap->clk = devm_clk_get(&dev->dev, NULL);
2770 if (IS_ERR(uap->clk))
2771 return PTR_ERR(uap->clk);
2773 uap->reg_offset = vendor->reg_offset;
2774 uap->vendor = vendor;
2775 uap->fifosize = vendor->get_fifosize(dev);
2776 uap->port.iotype = vendor->access_32b ? UPIO_MEM32 : UPIO_MEM;
2777 uap->port.irq = dev->irq[0];
2778 uap->port.ops = &amba_pl011_pops;
2779 uap->port.rs485_config = pl011_rs485_config;
2780 snprintf(uap->type, sizeof(uap->type), "PL011 rev%u", amba_rev(dev));
2782 ret = pl011_setup_port(&dev->dev, uap, &dev->res, portnr);
2786 amba_set_drvdata(dev, uap);
2788 return pl011_register_port(uap);
2791 static void pl011_remove(struct amba_device *dev)
2793 struct uart_amba_port *uap = amba_get_drvdata(dev);
2795 uart_remove_one_port(&amba_reg, &uap->port);
2796 pl011_unregister_port(uap);
2799 #ifdef CONFIG_PM_SLEEP
2800 static int pl011_suspend(struct device *dev)
2802 struct uart_amba_port *uap = dev_get_drvdata(dev);
2807 return uart_suspend_port(&amba_reg, &uap->port);
2810 static int pl011_resume(struct device *dev)
2812 struct uart_amba_port *uap = dev_get_drvdata(dev);
2817 return uart_resume_port(&amba_reg, &uap->port);
2821 static SIMPLE_DEV_PM_OPS(pl011_dev_pm_ops, pl011_suspend, pl011_resume);
2823 static int sbsa_uart_probe(struct platform_device *pdev)
2825 struct uart_amba_port *uap;
2831 * Check the mandatory baud rate parameter in the DT node early
2832 * so that we can easily exit with the error.
2834 if (pdev->dev.of_node) {
2835 struct device_node *np = pdev->dev.of_node;
2837 ret = of_property_read_u32(np, "current-speed", &baudrate);
2844 portnr = pl011_find_free_port();
2848 uap = devm_kzalloc(&pdev->dev, sizeof(struct uart_amba_port),
2853 ret = platform_get_irq(pdev, 0);
2856 uap->port.irq = ret;
2858 #ifdef CONFIG_ACPI_SPCR_TABLE
2859 if (qdf2400_e44_present) {
2860 dev_info(&pdev->dev, "working around QDF2400 SoC erratum 44\n");
2861 uap->vendor = &vendor_qdt_qdf2400_e44;
2864 uap->vendor = &vendor_sbsa;
2866 uap->reg_offset = uap->vendor->reg_offset;
2868 uap->port.iotype = uap->vendor->access_32b ? UPIO_MEM32 : UPIO_MEM;
2869 uap->port.ops = &sbsa_uart_pops;
2870 uap->fixed_baud = baudrate;
2872 snprintf(uap->type, sizeof(uap->type), "SBSA");
2874 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2876 ret = pl011_setup_port(&pdev->dev, uap, r, portnr);
2880 platform_set_drvdata(pdev, uap);
2882 return pl011_register_port(uap);
2885 static int sbsa_uart_remove(struct platform_device *pdev)
2887 struct uart_amba_port *uap = platform_get_drvdata(pdev);
2889 uart_remove_one_port(&amba_reg, &uap->port);
2890 pl011_unregister_port(uap);
2894 static const struct of_device_id sbsa_uart_of_match[] = {
2895 { .compatible = "arm,sbsa-uart", },
2898 MODULE_DEVICE_TABLE(of, sbsa_uart_of_match);
2900 static const struct acpi_device_id __maybe_unused sbsa_uart_acpi_match[] = {
2905 MODULE_DEVICE_TABLE(acpi, sbsa_uart_acpi_match);
2907 static struct platform_driver arm_sbsa_uart_platform_driver = {
2908 .probe = sbsa_uart_probe,
2909 .remove = sbsa_uart_remove,
2911 .name = "sbsa-uart",
2912 .pm = &pl011_dev_pm_ops,
2913 .of_match_table = of_match_ptr(sbsa_uart_of_match),
2914 .acpi_match_table = ACPI_PTR(sbsa_uart_acpi_match),
2915 .suppress_bind_attrs = IS_BUILTIN(CONFIG_SERIAL_AMBA_PL011),
2919 static const struct amba_id pl011_ids[] = {
2923 .data = &vendor_arm,
2933 MODULE_DEVICE_TABLE(amba, pl011_ids);
2935 static struct amba_driver pl011_driver = {
2937 .name = "uart-pl011",
2938 .pm = &pl011_dev_pm_ops,
2939 .suppress_bind_attrs = IS_BUILTIN(CONFIG_SERIAL_AMBA_PL011),
2941 .id_table = pl011_ids,
2942 .probe = pl011_probe,
2943 .remove = pl011_remove,
2946 static int __init pl011_init(void)
2948 printk(KERN_INFO "Serial: AMBA PL011 UART driver\n");
2950 if (platform_driver_register(&arm_sbsa_uart_platform_driver))
2951 pr_warn("could not register SBSA UART platform driver\n");
2952 return amba_driver_register(&pl011_driver);
2955 static void __exit pl011_exit(void)
2957 platform_driver_unregister(&arm_sbsa_uart_platform_driver);
2958 amba_driver_unregister(&pl011_driver);
2962 * While this can be a module, if builtin it's most likely the console
2963 * So let's leave module_exit but move module_init to an earlier place
2965 arch_initcall(pl011_init);
2966 module_exit(pl011_exit);
2968 MODULE_AUTHOR("ARM Ltd/Deep Blue Solutions Ltd");
2969 MODULE_DESCRIPTION("ARM AMBA serial port driver");
2970 MODULE_LICENSE("GPL");