2 * AMD Cryptographic Coprocessor (CCP) driver
4 * Copyright (C) 2016 Advanced Micro Devices, Inc.
6 * Author: Gary R Hook <gary.hook@amd.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/kernel.h>
14 #include <linux/dmaengine.h>
15 #include <linux/spinlock.h>
16 #include <linux/mutex.h>
17 #include <linux/ccp.h>
20 #include "../../dma/dmaengine.h"
22 #define CCP_DMA_WIDTH(_mask) \
24 u64 mask = _mask + 1; \
25 (mask == 0) ? 64 : fls64(mask); \
28 static void ccp_free_cmd_resources(struct ccp_device *ccp,
29 struct list_head *list)
31 struct ccp_dma_cmd *cmd, *ctmp;
33 list_for_each_entry_safe(cmd, ctmp, list, entry) {
34 list_del(&cmd->entry);
35 kmem_cache_free(ccp->dma_cmd_cache, cmd);
39 static void ccp_free_desc_resources(struct ccp_device *ccp,
40 struct list_head *list)
42 struct ccp_dma_desc *desc, *dtmp;
44 list_for_each_entry_safe(desc, dtmp, list, entry) {
45 ccp_free_cmd_resources(ccp, &desc->active);
46 ccp_free_cmd_resources(ccp, &desc->pending);
48 list_del(&desc->entry);
49 kmem_cache_free(ccp->dma_desc_cache, desc);
53 static void ccp_free_chan_resources(struct dma_chan *dma_chan)
55 struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
59 dev_dbg(chan->ccp->dev, "%s - chan=%p\n", __func__, chan);
61 spin_lock_irqsave(&chan->lock, flags);
63 ccp_free_desc_resources(chan->ccp, &chan->complete);
64 ccp_free_desc_resources(chan->ccp, &chan->active);
65 ccp_free_desc_resources(chan->ccp, &chan->pending);
66 ccp_free_desc_resources(chan->ccp, &chan->created);
68 spin_unlock_irqrestore(&chan->lock, flags);
71 static void ccp_cleanup_desc_resources(struct ccp_device *ccp,
72 struct list_head *list)
74 struct ccp_dma_desc *desc, *dtmp;
76 list_for_each_entry_safe_reverse(desc, dtmp, list, entry) {
77 if (!async_tx_test_ack(&desc->tx_desc))
80 dev_dbg(ccp->dev, "%s - desc=%p\n", __func__, desc);
82 ccp_free_cmd_resources(ccp, &desc->active);
83 ccp_free_cmd_resources(ccp, &desc->pending);
85 list_del(&desc->entry);
86 kmem_cache_free(ccp->dma_desc_cache, desc);
90 static void ccp_do_cleanup(unsigned long data)
92 struct ccp_dma_chan *chan = (struct ccp_dma_chan *)data;
95 dev_dbg(chan->ccp->dev, "%s - chan=%s\n", __func__,
96 dma_chan_name(&chan->dma_chan));
98 spin_lock_irqsave(&chan->lock, flags);
100 ccp_cleanup_desc_resources(chan->ccp, &chan->complete);
102 spin_unlock_irqrestore(&chan->lock, flags);
105 static int ccp_issue_next_cmd(struct ccp_dma_desc *desc)
107 struct ccp_dma_cmd *cmd;
110 cmd = list_first_entry(&desc->pending, struct ccp_dma_cmd, entry);
111 list_move(&cmd->entry, &desc->active);
113 dev_dbg(desc->ccp->dev, "%s - tx %d, cmd=%p\n", __func__,
114 desc->tx_desc.cookie, cmd);
116 ret = ccp_enqueue_cmd(&cmd->ccp_cmd);
117 if (!ret || (ret == -EINPROGRESS) || (ret == -EBUSY))
120 dev_dbg(desc->ccp->dev, "%s - error: ret=%d, tx %d, cmd=%p\n", __func__,
121 ret, desc->tx_desc.cookie, cmd);
126 static void ccp_free_active_cmd(struct ccp_dma_desc *desc)
128 struct ccp_dma_cmd *cmd;
130 cmd = list_first_entry_or_null(&desc->active, struct ccp_dma_cmd,
135 dev_dbg(desc->ccp->dev, "%s - freeing tx %d cmd=%p\n",
136 __func__, desc->tx_desc.cookie, cmd);
138 list_del(&cmd->entry);
139 kmem_cache_free(desc->ccp->dma_cmd_cache, cmd);
142 static struct ccp_dma_desc *__ccp_next_dma_desc(struct ccp_dma_chan *chan,
143 struct ccp_dma_desc *desc)
145 /* Move current DMA descriptor to the complete list */
147 list_move(&desc->entry, &chan->complete);
149 /* Get the next DMA descriptor on the active list */
150 desc = list_first_entry_or_null(&chan->active, struct ccp_dma_desc,
156 static struct ccp_dma_desc *ccp_handle_active_desc(struct ccp_dma_chan *chan,
157 struct ccp_dma_desc *desc)
159 struct dma_async_tx_descriptor *tx_desc;
162 /* Loop over descriptors until one is found with commands */
165 /* Remove the DMA command from the list and free it */
166 ccp_free_active_cmd(desc);
168 if (!list_empty(&desc->pending)) {
169 /* No errors, keep going */
170 if (desc->status != DMA_ERROR)
173 /* Error, free remaining commands and move on */
174 ccp_free_cmd_resources(desc->ccp,
178 tx_desc = &desc->tx_desc;
183 spin_lock_irqsave(&chan->lock, flags);
186 if (desc->status != DMA_ERROR)
187 desc->status = DMA_COMPLETE;
189 dev_dbg(desc->ccp->dev,
190 "%s - tx %d complete, status=%u\n", __func__,
191 desc->tx_desc.cookie, desc->status);
193 dma_cookie_complete(tx_desc);
196 desc = __ccp_next_dma_desc(chan, desc);
198 spin_unlock_irqrestore(&chan->lock, flags);
201 if (tx_desc->callback &&
202 (tx_desc->flags & DMA_PREP_INTERRUPT))
203 tx_desc->callback(tx_desc->callback_param);
205 dma_run_dependencies(tx_desc);
212 static struct ccp_dma_desc *__ccp_pending_to_active(struct ccp_dma_chan *chan)
214 struct ccp_dma_desc *desc;
216 if (list_empty(&chan->pending))
219 desc = list_empty(&chan->active)
220 ? list_first_entry(&chan->pending, struct ccp_dma_desc, entry)
223 list_splice_tail_init(&chan->pending, &chan->active);
228 static void ccp_cmd_callback(void *data, int err)
230 struct ccp_dma_desc *desc = data;
231 struct ccp_dma_chan *chan;
234 if (err == -EINPROGRESS)
237 chan = container_of(desc->tx_desc.chan, struct ccp_dma_chan,
240 dev_dbg(chan->ccp->dev, "%s - tx %d callback, err=%d\n",
241 __func__, desc->tx_desc.cookie, err);
244 desc->status = DMA_ERROR;
247 /* Check for DMA descriptor completion */
248 desc = ccp_handle_active_desc(chan, desc);
250 /* Don't submit cmd if no descriptor or DMA is paused */
251 if (!desc || (chan->status == DMA_PAUSED))
254 ret = ccp_issue_next_cmd(desc);
258 desc->status = DMA_ERROR;
261 tasklet_schedule(&chan->cleanup_tasklet);
264 static dma_cookie_t ccp_tx_submit(struct dma_async_tx_descriptor *tx_desc)
266 struct ccp_dma_desc *desc = container_of(tx_desc, struct ccp_dma_desc,
268 struct ccp_dma_chan *chan;
272 chan = container_of(tx_desc->chan, struct ccp_dma_chan, dma_chan);
274 spin_lock_irqsave(&chan->lock, flags);
276 cookie = dma_cookie_assign(tx_desc);
277 list_del(&desc->entry);
278 list_add_tail(&desc->entry, &chan->pending);
280 spin_unlock_irqrestore(&chan->lock, flags);
282 dev_dbg(chan->ccp->dev, "%s - added tx descriptor %d to pending list\n",
288 static struct ccp_dma_cmd *ccp_alloc_dma_cmd(struct ccp_dma_chan *chan)
290 struct ccp_dma_cmd *cmd;
292 cmd = kmem_cache_alloc(chan->ccp->dma_cmd_cache, GFP_NOWAIT);
294 memset(cmd, 0, sizeof(*cmd));
299 static struct ccp_dma_desc *ccp_alloc_dma_desc(struct ccp_dma_chan *chan,
302 struct ccp_dma_desc *desc;
304 desc = kmem_cache_zalloc(chan->ccp->dma_desc_cache, GFP_NOWAIT);
308 dma_async_tx_descriptor_init(&desc->tx_desc, &chan->dma_chan);
309 desc->tx_desc.flags = flags;
310 desc->tx_desc.tx_submit = ccp_tx_submit;
311 desc->ccp = chan->ccp;
312 INIT_LIST_HEAD(&desc->entry);
313 INIT_LIST_HEAD(&desc->pending);
314 INIT_LIST_HEAD(&desc->active);
315 desc->status = DMA_IN_PROGRESS;
320 static struct ccp_dma_desc *ccp_create_desc(struct dma_chan *dma_chan,
321 struct scatterlist *dst_sg,
322 unsigned int dst_nents,
323 struct scatterlist *src_sg,
324 unsigned int src_nents,
327 struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
329 struct ccp_device *ccp = chan->ccp;
330 struct ccp_dma_desc *desc;
331 struct ccp_dma_cmd *cmd;
332 struct ccp_cmd *ccp_cmd;
333 struct ccp_passthru_nomap_engine *ccp_pt;
334 unsigned int src_offset, src_len;
335 unsigned int dst_offset, dst_len;
337 unsigned long sflags;
340 if (!dst_sg || !src_sg)
343 if (!dst_nents || !src_nents)
346 desc = ccp_alloc_dma_desc(chan, flags);
352 src_len = sg_dma_len(src_sg);
355 dst_len = sg_dma_len(dst_sg);
364 src_sg = sg_next(src_sg);
368 src_len = sg_dma_len(src_sg);
378 dst_sg = sg_next(dst_sg);
382 dst_len = sg_dma_len(dst_sg);
387 len = min(dst_len, src_len);
389 cmd = ccp_alloc_dma_cmd(chan);
393 ccp_cmd = &cmd->ccp_cmd;
394 ccp_cmd->ccp = chan->ccp;
395 ccp_pt = &ccp_cmd->u.passthru_nomap;
396 ccp_cmd->flags = CCP_CMD_MAY_BACKLOG;
397 ccp_cmd->flags |= CCP_CMD_PASSTHRU_NO_DMA_MAP;
398 ccp_cmd->engine = CCP_ENGINE_PASSTHRU;
399 ccp_pt->bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
400 ccp_pt->byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
401 ccp_pt->src_dma = sg_dma_address(src_sg) + src_offset;
402 ccp_pt->dst_dma = sg_dma_address(dst_sg) + dst_offset;
403 ccp_pt->src_len = len;
405 ccp_cmd->callback = ccp_cmd_callback;
406 ccp_cmd->data = desc;
408 list_add_tail(&cmd->entry, &desc->pending);
411 "%s - cmd=%p, src=%pad, dst=%pad, len=%llu\n", __func__,
412 cmd, &ccp_pt->src_dma,
413 &ccp_pt->dst_dma, ccp_pt->src_len);
424 desc->len = total_len;
426 if (list_empty(&desc->pending))
429 dev_dbg(ccp->dev, "%s - desc=%p\n", __func__, desc);
431 spin_lock_irqsave(&chan->lock, sflags);
433 list_add_tail(&desc->entry, &chan->created);
435 spin_unlock_irqrestore(&chan->lock, sflags);
440 ccp_free_cmd_resources(ccp, &desc->pending);
441 kmem_cache_free(ccp->dma_desc_cache, desc);
446 static struct dma_async_tx_descriptor *ccp_prep_dma_memcpy(
447 struct dma_chan *dma_chan, dma_addr_t dst, dma_addr_t src, size_t len,
450 struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
452 struct ccp_dma_desc *desc;
453 struct scatterlist dst_sg, src_sg;
455 dev_dbg(chan->ccp->dev,
456 "%s - src=%pad, dst=%pad, len=%zu, flags=%#lx\n",
457 __func__, &src, &dst, len, flags);
459 sg_init_table(&dst_sg, 1);
460 sg_dma_address(&dst_sg) = dst;
461 sg_dma_len(&dst_sg) = len;
463 sg_init_table(&src_sg, 1);
464 sg_dma_address(&src_sg) = src;
465 sg_dma_len(&src_sg) = len;
467 desc = ccp_create_desc(dma_chan, &dst_sg, 1, &src_sg, 1, flags);
471 return &desc->tx_desc;
474 static struct dma_async_tx_descriptor *ccp_prep_dma_sg(
475 struct dma_chan *dma_chan, struct scatterlist *dst_sg,
476 unsigned int dst_nents, struct scatterlist *src_sg,
477 unsigned int src_nents, unsigned long flags)
479 struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
481 struct ccp_dma_desc *desc;
483 dev_dbg(chan->ccp->dev,
484 "%s - src=%p, src_nents=%u dst=%p, dst_nents=%u, flags=%#lx\n",
485 __func__, src_sg, src_nents, dst_sg, dst_nents, flags);
487 desc = ccp_create_desc(dma_chan, dst_sg, dst_nents, src_sg, src_nents,
492 return &desc->tx_desc;
495 static struct dma_async_tx_descriptor *ccp_prep_dma_interrupt(
496 struct dma_chan *dma_chan, unsigned long flags)
498 struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
500 struct ccp_dma_desc *desc;
502 desc = ccp_alloc_dma_desc(chan, flags);
506 return &desc->tx_desc;
509 static void ccp_issue_pending(struct dma_chan *dma_chan)
511 struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
513 struct ccp_dma_desc *desc;
516 dev_dbg(chan->ccp->dev, "%s\n", __func__);
518 spin_lock_irqsave(&chan->lock, flags);
520 desc = __ccp_pending_to_active(chan);
522 spin_unlock_irqrestore(&chan->lock, flags);
524 /* If there was nothing active, start processing */
526 ccp_cmd_callback(desc, 0);
529 static enum dma_status ccp_tx_status(struct dma_chan *dma_chan,
531 struct dma_tx_state *state)
533 struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
535 struct ccp_dma_desc *desc;
539 if (chan->status == DMA_PAUSED) {
544 ret = dma_cookie_status(dma_chan, cookie, state);
545 if (ret == DMA_COMPLETE) {
546 spin_lock_irqsave(&chan->lock, flags);
548 /* Get status from complete chain, if still there */
549 list_for_each_entry(desc, &chan->complete, entry) {
550 if (desc->tx_desc.cookie != cookie)
557 spin_unlock_irqrestore(&chan->lock, flags);
561 dev_dbg(chan->ccp->dev, "%s - %u\n", __func__, ret);
566 static int ccp_pause(struct dma_chan *dma_chan)
568 struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
571 chan->status = DMA_PAUSED;
573 /*TODO: Wait for active DMA to complete before returning? */
578 static int ccp_resume(struct dma_chan *dma_chan)
580 struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
582 struct ccp_dma_desc *desc;
585 spin_lock_irqsave(&chan->lock, flags);
587 desc = list_first_entry_or_null(&chan->active, struct ccp_dma_desc,
590 spin_unlock_irqrestore(&chan->lock, flags);
592 /* Indicate the channel is running again */
593 chan->status = DMA_IN_PROGRESS;
595 /* If there was something active, re-start */
597 ccp_cmd_callback(desc, 0);
602 static int ccp_terminate_all(struct dma_chan *dma_chan)
604 struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
608 dev_dbg(chan->ccp->dev, "%s\n", __func__);
610 /*TODO: Wait for active DMA to complete before continuing */
612 spin_lock_irqsave(&chan->lock, flags);
614 /*TODO: Purge the complete list? */
615 ccp_free_desc_resources(chan->ccp, &chan->active);
616 ccp_free_desc_resources(chan->ccp, &chan->pending);
617 ccp_free_desc_resources(chan->ccp, &chan->created);
619 spin_unlock_irqrestore(&chan->lock, flags);
624 static void ccp_dma_release(struct ccp_device *ccp)
626 struct ccp_dma_chan *chan;
627 struct dma_chan *dma_chan;
630 for (i = 0; i < ccp->cmd_q_count; i++) {
631 chan = ccp->ccp_dma_chan + i;
632 dma_chan = &chan->dma_chan;
633 tasklet_kill(&chan->cleanup_tasklet);
634 list_del_rcu(&dma_chan->device_node);
638 int ccp_dmaengine_register(struct ccp_device *ccp)
640 struct ccp_dma_chan *chan;
641 struct dma_device *dma_dev = &ccp->dma_dev;
642 struct dma_chan *dma_chan;
643 char *dma_cmd_cache_name;
644 char *dma_desc_cache_name;
648 ccp->ccp_dma_chan = devm_kcalloc(ccp->dev, ccp->cmd_q_count,
649 sizeof(*(ccp->ccp_dma_chan)),
651 if (!ccp->ccp_dma_chan)
654 dma_cmd_cache_name = devm_kasprintf(ccp->dev, GFP_KERNEL,
655 "%s-dmaengine-cmd-cache",
657 if (!dma_cmd_cache_name)
660 ccp->dma_cmd_cache = kmem_cache_create(dma_cmd_cache_name,
661 sizeof(struct ccp_dma_cmd),
663 SLAB_HWCACHE_ALIGN, NULL);
664 if (!ccp->dma_cmd_cache)
667 dma_desc_cache_name = devm_kasprintf(ccp->dev, GFP_KERNEL,
668 "%s-dmaengine-desc-cache",
670 if (!dma_desc_cache_name) {
675 ccp->dma_desc_cache = kmem_cache_create(dma_desc_cache_name,
676 sizeof(struct ccp_dma_desc),
678 SLAB_HWCACHE_ALIGN, NULL);
679 if (!ccp->dma_desc_cache) {
684 dma_dev->dev = ccp->dev;
685 dma_dev->src_addr_widths = CCP_DMA_WIDTH(dma_get_mask(ccp->dev));
686 dma_dev->dst_addr_widths = CCP_DMA_WIDTH(dma_get_mask(ccp->dev));
687 dma_dev->directions = DMA_MEM_TO_MEM;
688 dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
689 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
690 dma_cap_set(DMA_SG, dma_dev->cap_mask);
691 dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
693 INIT_LIST_HEAD(&dma_dev->channels);
694 for (i = 0; i < ccp->cmd_q_count; i++) {
695 chan = ccp->ccp_dma_chan + i;
696 dma_chan = &chan->dma_chan;
700 spin_lock_init(&chan->lock);
701 INIT_LIST_HEAD(&chan->created);
702 INIT_LIST_HEAD(&chan->pending);
703 INIT_LIST_HEAD(&chan->active);
704 INIT_LIST_HEAD(&chan->complete);
706 tasklet_init(&chan->cleanup_tasklet, ccp_do_cleanup,
707 (unsigned long)chan);
709 dma_chan->device = dma_dev;
710 dma_cookie_init(dma_chan);
712 list_add_tail(&dma_chan->device_node, &dma_dev->channels);
715 dma_dev->device_free_chan_resources = ccp_free_chan_resources;
716 dma_dev->device_prep_dma_memcpy = ccp_prep_dma_memcpy;
717 dma_dev->device_prep_dma_sg = ccp_prep_dma_sg;
718 dma_dev->device_prep_dma_interrupt = ccp_prep_dma_interrupt;
719 dma_dev->device_issue_pending = ccp_issue_pending;
720 dma_dev->device_tx_status = ccp_tx_status;
721 dma_dev->device_pause = ccp_pause;
722 dma_dev->device_resume = ccp_resume;
723 dma_dev->device_terminate_all = ccp_terminate_all;
725 ret = dma_async_device_register(dma_dev);
732 ccp_dma_release(ccp);
733 kmem_cache_destroy(ccp->dma_desc_cache);
736 kmem_cache_destroy(ccp->dma_cmd_cache);
741 void ccp_dmaengine_unregister(struct ccp_device *ccp)
743 struct dma_device *dma_dev = &ccp->dma_dev;
745 dma_async_device_unregister(dma_dev);
746 ccp_dma_release(ccp);
748 kmem_cache_destroy(ccp->dma_desc_cache);
749 kmem_cache_destroy(ccp->dma_cmd_cache);