1 // SPDX-License-Identifier: GPL-2.0-only
3 * AMD Passthrough DMA device driver
4 * -- Based on the CCP driver
6 * Copyright (C) 2016,2021 Advanced Micro Devices, Inc.
8 * Author: Sanjay R Mehta <sanju.mehta@amd.com>
9 * Author: Gary R Hook <gary.hook@amd.com>
13 #include "../dmaengine.h"
14 #include "../virt-dma.h"
16 static inline struct pt_dma_chan *to_pt_chan(struct dma_chan *dma_chan)
18 return container_of(dma_chan, struct pt_dma_chan, vc.chan);
21 static inline struct pt_dma_desc *to_pt_desc(struct virt_dma_desc *vd)
23 return container_of(vd, struct pt_dma_desc, vd);
26 static void pt_free_chan_resources(struct dma_chan *dma_chan)
28 struct pt_dma_chan *chan = to_pt_chan(dma_chan);
30 vchan_free_chan_resources(&chan->vc);
33 static void pt_synchronize(struct dma_chan *dma_chan)
35 struct pt_dma_chan *chan = to_pt_chan(dma_chan);
37 vchan_synchronize(&chan->vc);
40 static void pt_do_cleanup(struct virt_dma_desc *vd)
42 struct pt_dma_desc *desc = to_pt_desc(vd);
43 struct pt_device *pt = desc->pt;
45 kmem_cache_free(pt->dma_desc_cache, desc);
48 static int pt_dma_start_desc(struct pt_dma_desc *desc)
50 struct pt_passthru_engine *pt_engine;
52 struct pt_cmd *pt_cmd;
53 struct pt_cmd_queue *cmd_q;
55 desc->issued_to_hw = 1;
57 pt_cmd = &desc->pt_cmd;
60 pt_engine = &pt_cmd->passthru;
62 pt->tdata.cmd = pt_cmd;
64 /* Execute the command */
65 pt_cmd->ret = pt_core_perform_passthru(cmd_q, pt_engine);
70 static struct pt_dma_desc *pt_next_dma_desc(struct pt_dma_chan *chan)
72 /* Get the next DMA descriptor on the active list */
73 struct virt_dma_desc *vd = vchan_next_desc(&chan->vc);
75 return vd ? to_pt_desc(vd) : NULL;
78 static struct pt_dma_desc *pt_handle_active_desc(struct pt_dma_chan *chan,
79 struct pt_dma_desc *desc)
81 struct dma_async_tx_descriptor *tx_desc;
82 struct virt_dma_desc *vd;
85 /* Loop over descriptors until one is found with commands */
88 if (!desc->issued_to_hw) {
89 /* No errors, keep going */
90 if (desc->status != DMA_ERROR)
94 tx_desc = &desc->vd.tx;
100 spin_lock_irqsave(&chan->vc.lock, flags);
103 if (desc->status != DMA_ERROR)
104 desc->status = DMA_COMPLETE;
106 dma_cookie_complete(tx_desc);
107 dma_descriptor_unmap(tx_desc);
108 list_del(&desc->vd.node);
111 desc = pt_next_dma_desc(chan);
113 spin_unlock_irqrestore(&chan->vc.lock, flags);
116 dmaengine_desc_get_callback_invoke(tx_desc, NULL);
117 dma_run_dependencies(tx_desc);
118 vchan_vdesc_fini(vd);
125 static void pt_cmd_callback(void *data, int err)
127 struct pt_dma_desc *desc = data;
128 struct dma_chan *dma_chan;
129 struct pt_dma_chan *chan;
132 if (err == -EINPROGRESS)
135 dma_chan = desc->vd.tx.chan;
136 chan = to_pt_chan(dma_chan);
139 desc->status = DMA_ERROR;
142 /* Check for DMA descriptor completion */
143 desc = pt_handle_active_desc(chan, desc);
145 /* Don't submit cmd if no descriptor or DMA is paused */
149 ret = pt_dma_start_desc(desc);
153 desc->status = DMA_ERROR;
157 static struct pt_dma_desc *pt_alloc_dma_desc(struct pt_dma_chan *chan,
160 struct pt_dma_desc *desc;
162 desc = kmem_cache_zalloc(chan->pt->dma_desc_cache, GFP_NOWAIT);
166 vchan_tx_prep(&chan->vc, &desc->vd, flags);
169 desc->issued_to_hw = 0;
170 desc->status = DMA_IN_PROGRESS;
175 static struct pt_dma_desc *pt_create_desc(struct dma_chan *dma_chan,
181 struct pt_dma_chan *chan = to_pt_chan(dma_chan);
182 struct pt_passthru_engine *pt_engine;
183 struct pt_dma_desc *desc;
184 struct pt_cmd *pt_cmd;
186 desc = pt_alloc_dma_desc(chan, flags);
190 pt_cmd = &desc->pt_cmd;
191 pt_cmd->pt = chan->pt;
192 pt_engine = &pt_cmd->passthru;
193 pt_cmd->engine = PT_ENGINE_PASSTHRU;
194 pt_engine->src_dma = src;
195 pt_engine->dst_dma = dst;
196 pt_engine->src_len = len;
197 pt_cmd->pt_cmd_callback = pt_cmd_callback;
205 static struct dma_async_tx_descriptor *
206 pt_prep_dma_memcpy(struct dma_chan *dma_chan, dma_addr_t dst,
207 dma_addr_t src, size_t len, unsigned long flags)
209 struct pt_dma_desc *desc;
211 desc = pt_create_desc(dma_chan, dst, src, len, flags);
218 static struct dma_async_tx_descriptor *
219 pt_prep_dma_interrupt(struct dma_chan *dma_chan, unsigned long flags)
221 struct pt_dma_chan *chan = to_pt_chan(dma_chan);
222 struct pt_dma_desc *desc;
224 desc = pt_alloc_dma_desc(chan, flags);
231 static void pt_issue_pending(struct dma_chan *dma_chan)
233 struct pt_dma_chan *chan = to_pt_chan(dma_chan);
234 struct pt_dma_desc *desc;
237 spin_lock_irqsave(&chan->vc.lock, flags);
239 vchan_issue_pending(&chan->vc);
241 desc = pt_next_dma_desc(chan);
243 spin_unlock_irqrestore(&chan->vc.lock, flags);
245 /* If there was nothing active, start processing */
247 pt_cmd_callback(desc, 0);
250 static int pt_pause(struct dma_chan *dma_chan)
252 struct pt_dma_chan *chan = to_pt_chan(dma_chan);
255 spin_lock_irqsave(&chan->vc.lock, flags);
256 pt_stop_queue(&chan->pt->cmd_q);
257 spin_unlock_irqrestore(&chan->vc.lock, flags);
262 static int pt_resume(struct dma_chan *dma_chan)
264 struct pt_dma_chan *chan = to_pt_chan(dma_chan);
265 struct pt_dma_desc *desc = NULL;
268 spin_lock_irqsave(&chan->vc.lock, flags);
269 pt_start_queue(&chan->pt->cmd_q);
270 desc = pt_next_dma_desc(chan);
271 spin_unlock_irqrestore(&chan->vc.lock, flags);
273 /* If there was something active, re-start */
275 pt_cmd_callback(desc, 0);
280 static int pt_terminate_all(struct dma_chan *dma_chan)
282 struct pt_dma_chan *chan = to_pt_chan(dma_chan);
286 spin_lock_irqsave(&chan->vc.lock, flags);
287 vchan_get_all_descriptors(&chan->vc, &head);
288 spin_unlock_irqrestore(&chan->vc.lock, flags);
290 vchan_dma_desc_free_list(&chan->vc, &head);
291 vchan_free_chan_resources(&chan->vc);
296 int pt_dmaengine_register(struct pt_device *pt)
298 struct pt_dma_chan *chan;
299 struct dma_device *dma_dev = &pt->dma_dev;
300 char *cmd_cache_name;
301 char *desc_cache_name;
304 pt->pt_dma_chan = devm_kzalloc(pt->dev, sizeof(*pt->pt_dma_chan),
306 if (!pt->pt_dma_chan)
309 cmd_cache_name = devm_kasprintf(pt->dev, GFP_KERNEL,
310 "%s-dmaengine-cmd-cache",
315 desc_cache_name = devm_kasprintf(pt->dev, GFP_KERNEL,
316 "%s-dmaengine-desc-cache",
318 if (!desc_cache_name) {
323 pt->dma_desc_cache = kmem_cache_create(desc_cache_name,
324 sizeof(struct pt_dma_desc), 0,
325 SLAB_HWCACHE_ALIGN, NULL);
326 if (!pt->dma_desc_cache) {
331 dma_dev->dev = pt->dev;
332 dma_dev->src_addr_widths = DMA_SLAVE_BUSWIDTH_64_BYTES;
333 dma_dev->dst_addr_widths = DMA_SLAVE_BUSWIDTH_64_BYTES;
334 dma_dev->directions = DMA_MEM_TO_MEM;
335 dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
336 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
337 dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
340 * PTDMA is intended to be used with the AMD NTB devices, hence
341 * marking it as DMA_PRIVATE.
343 dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
345 INIT_LIST_HEAD(&dma_dev->channels);
347 chan = pt->pt_dma_chan;
350 /* Set base and prep routines */
351 dma_dev->device_free_chan_resources = pt_free_chan_resources;
352 dma_dev->device_prep_dma_memcpy = pt_prep_dma_memcpy;
353 dma_dev->device_prep_dma_interrupt = pt_prep_dma_interrupt;
354 dma_dev->device_issue_pending = pt_issue_pending;
355 dma_dev->device_tx_status = dma_cookie_status;
356 dma_dev->device_pause = pt_pause;
357 dma_dev->device_resume = pt_resume;
358 dma_dev->device_terminate_all = pt_terminate_all;
359 dma_dev->device_synchronize = pt_synchronize;
361 chan->vc.desc_free = pt_do_cleanup;
362 vchan_init(&chan->vc, dma_dev);
364 dma_set_mask_and_coherent(pt->dev, DMA_BIT_MASK(64));
366 ret = dma_async_device_register(dma_dev);
373 kmem_cache_destroy(pt->dma_desc_cache);
376 kmem_cache_destroy(pt->dma_cmd_cache);
381 void pt_dmaengine_unregister(struct pt_device *pt)
383 struct dma_device *dma_dev = &pt->dma_dev;
385 dma_async_device_unregister(dma_dev);
387 kmem_cache_destroy(pt->dma_desc_cache);
388 kmem_cache_destroy(pt->dma_cmd_cache);