1 // SPDX-License-Identifier: GPL-2.0+
3 * 8250_dma.c - DMA Engine API support for 8250.c
5 * Copyright (C) 2013 Intel Corporation
8 #include <linux/tty_flip.h>
9 #include <linux/serial_reg.h>
10 #include <linux/dma-mapping.h>
14 static void __dma_tx_complete(void *param)
16 struct uart_8250_port *p = param;
17 struct uart_8250_dma *dma = p->dma;
18 struct circ_buf *xmit = &p->port.state->xmit;
22 dma_sync_single_for_cpu(dma->txchan->device->dev, dma->tx_addr,
23 UART_XMIT_SIZE, DMA_TO_DEVICE);
25 spin_lock_irqsave(&p->port.lock, flags);
29 xmit->tail += dma->tx_size;
30 xmit->tail &= UART_XMIT_SIZE - 1;
31 p->port.icount.tx += dma->tx_size;
33 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
34 uart_write_wakeup(&p->port);
36 ret = serial8250_tx_dma(p);
38 serial8250_set_THRI(p);
40 spin_unlock_irqrestore(&p->port.lock, flags);
43 static void __dma_rx_complete(void *param)
45 struct uart_8250_port *p = param;
46 struct uart_8250_dma *dma = p->dma;
47 struct tty_port *tty_port = &p->port.state->port;
48 struct dma_tx_state state;
49 enum dma_status dma_status;
53 * New DMA Rx can be started during the completion handler before it
54 * could acquire port's lock and it might still be ongoing. Don't to
55 * anything in such case.
57 dma_status = dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);
58 if (dma_status == DMA_IN_PROGRESS)
61 count = dma->rx_size - state.residue;
63 tty_insert_flip_string(tty_port, dma->rx_buf, count);
64 p->port.icount.rx += count;
67 tty_flip_buffer_push(tty_port);
70 static void dma_rx_complete(void *param)
72 struct uart_8250_port *p = param;
73 struct uart_8250_dma *dma = p->dma;
76 spin_lock_irqsave(&p->port.lock, flags);
79 spin_unlock_irqrestore(&p->port.lock, flags);
82 int serial8250_tx_dma(struct uart_8250_port *p)
84 struct uart_8250_dma *dma = p->dma;
85 struct circ_buf *xmit = &p->port.state->xmit;
86 struct dma_async_tx_descriptor *desc;
87 struct uart_port *up = &p->port;
90 if (dma->tx_running) {
92 dmaengine_pause(dma->txchan);
93 uart_xchar_out(up, UART_TX);
94 dmaengine_resume(dma->txchan);
97 } else if (up->x_char) {
98 uart_xchar_out(up, UART_TX);
101 if (uart_tx_stopped(&p->port) || uart_circ_empty(xmit)) {
102 /* We have been called from __dma_tx_complete() */
103 serial8250_rpm_put_tx(p);
107 dma->tx_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
109 desc = dmaengine_prep_slave_single(dma->txchan,
110 dma->tx_addr + xmit->tail,
111 dma->tx_size, DMA_MEM_TO_DEV,
112 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
119 desc->callback = __dma_tx_complete;
120 desc->callback_param = p;
122 dma->tx_cookie = dmaengine_submit(desc);
124 dma_sync_single_for_device(dma->txchan->device->dev, dma->tx_addr,
125 UART_XMIT_SIZE, DMA_TO_DEVICE);
127 dma_async_issue_pending(dma->txchan);
130 serial8250_clear_THRI(p);
138 int serial8250_rx_dma(struct uart_8250_port *p)
140 struct uart_8250_dma *dma = p->dma;
141 struct dma_async_tx_descriptor *desc;
146 desc = dmaengine_prep_slave_single(dma->rxchan, dma->rx_addr,
147 dma->rx_size, DMA_DEV_TO_MEM,
148 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
153 desc->callback = dma_rx_complete;
154 desc->callback_param = p;
156 dma->rx_cookie = dmaengine_submit(desc);
158 dma_async_issue_pending(dma->rxchan);
163 void serial8250_rx_dma_flush(struct uart_8250_port *p)
165 struct uart_8250_dma *dma = p->dma;
167 if (dma->rx_running) {
168 dmaengine_pause(dma->rxchan);
169 __dma_rx_complete(p);
170 dmaengine_terminate_async(dma->rxchan);
173 EXPORT_SYMBOL_GPL(serial8250_rx_dma_flush);
175 int serial8250_request_dma(struct uart_8250_port *p)
177 struct uart_8250_dma *dma = p->dma;
178 phys_addr_t rx_dma_addr = dma->rx_dma_addr ?
179 dma->rx_dma_addr : p->port.mapbase;
180 phys_addr_t tx_dma_addr = dma->tx_dma_addr ?
181 dma->tx_dma_addr : p->port.mapbase;
183 struct dma_slave_caps caps;
186 /* Default slave configuration parameters */
187 dma->rxconf.direction = DMA_DEV_TO_MEM;
188 dma->rxconf.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
189 dma->rxconf.src_addr = rx_dma_addr + UART_RX;
191 dma->txconf.direction = DMA_MEM_TO_DEV;
192 dma->txconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
193 dma->txconf.dst_addr = tx_dma_addr + UART_TX;
196 dma_cap_set(DMA_SLAVE, mask);
198 /* Get a channel for RX */
199 dma->rxchan = dma_request_slave_channel_compat(mask,
200 dma->fn, dma->rx_param,
205 /* 8250 rx dma requires dmaengine driver to support pause/terminate */
206 ret = dma_get_slave_caps(dma->rxchan, &caps);
209 if (!caps.cmd_pause || !caps.cmd_terminate ||
210 caps.residue_granularity == DMA_RESIDUE_GRANULARITY_DESCRIPTOR) {
215 dmaengine_slave_config(dma->rxchan, &dma->rxconf);
217 /* Get a channel for TX */
218 dma->txchan = dma_request_slave_channel_compat(mask,
219 dma->fn, dma->tx_param,
226 /* 8250 tx dma requires dmaengine driver to support terminate */
227 ret = dma_get_slave_caps(dma->txchan, &caps);
230 if (!caps.cmd_terminate) {
235 dmaengine_slave_config(dma->txchan, &dma->txconf);
239 dma->rx_size = PAGE_SIZE;
241 dma->rx_buf = dma_alloc_coherent(dma->rxchan->device->dev, dma->rx_size,
242 &dma->rx_addr, GFP_KERNEL);
249 dma->tx_addr = dma_map_single(dma->txchan->device->dev,
250 p->port.state->xmit.buf,
253 if (dma_mapping_error(dma->txchan->device->dev, dma->tx_addr)) {
254 dma_free_coherent(dma->rxchan->device->dev, dma->rx_size,
255 dma->rx_buf, dma->rx_addr);
260 dev_dbg_ratelimited(p->port.dev, "got both dma channels\n");
264 dma_release_channel(dma->txchan);
266 dma_release_channel(dma->rxchan);
269 EXPORT_SYMBOL_GPL(serial8250_request_dma);
271 void serial8250_release_dma(struct uart_8250_port *p)
273 struct uart_8250_dma *dma = p->dma;
278 /* Release RX resources */
279 dmaengine_terminate_sync(dma->rxchan);
280 dma_free_coherent(dma->rxchan->device->dev, dma->rx_size, dma->rx_buf,
282 dma_release_channel(dma->rxchan);
285 /* Release TX resources */
286 dmaengine_terminate_sync(dma->txchan);
287 dma_unmap_single(dma->txchan->device->dev, dma->tx_addr,
288 UART_XMIT_SIZE, DMA_TO_DEVICE);
289 dma_release_channel(dma->txchan);
293 dev_dbg_ratelimited(p->port.dev, "dma channels released\n");
295 EXPORT_SYMBOL_GPL(serial8250_release_dma);