2 * Freescale i.MX23/i.MX28 Data Co-Processor driver
4 * Copyright (C) 2013 Marek Vasut <marex@denx.de>
6 * The code contained herein is licensed under the GNU General Public
7 * License. You may obtain a copy of the GNU General Public License
8 * Version 2 or later at the following locations:
10 * http://www.opensource.org/licenses/gpl-license.html
11 * http://www.gnu.org/copyleft/gpl.html
14 #include <linux/dma-mapping.h>
15 #include <linux/interrupt.h>
17 #include <linux/kernel.h>
18 #include <linux/kthread.h>
19 #include <linux/module.h>
21 #include <linux/platform_device.h>
22 #include <linux/stmp_device.h>
24 #include <crypto/aes.h>
25 #include <crypto/sha.h>
26 #include <crypto/internal/hash.h>
27 #include <crypto/internal/skcipher.h>
28 #include <crypto/scatterwalk.h>
30 #define DCP_MAX_CHANS 4
31 #define DCP_BUF_SZ PAGE_SIZE
32 #define DCP_SHA_PAY_SZ 64
34 #define DCP_ALIGNMENT 64
37 * Null hashes to align with hw behavior on imx6sl and ull
38 * these are flipped for consistency with hw output
40 static const uint8_t sha1_null_hash[] =
41 "\x09\x07\xd8\xaf\x90\x18\x60\x95\xef\xbf"
42 "\x55\x32\x0d\x4b\x6b\x5e\xee\xa3\x39\xda";
44 static const uint8_t sha256_null_hash[] =
45 "\x55\xb8\x52\x78\x1b\x99\x95\xa4"
46 "\x4c\x93\x9b\x64\xe4\x41\xae\x27"
47 "\x24\xb9\x6f\x99\xc8\xf4\xfb\x9a"
48 "\x14\x1c\xfc\x98\x42\xc4\xb0\xe3";
50 /* DCP DMA descriptor. */
52 uint32_t next_cmd_addr;
62 /* Coherent aligned block for bounce buffering. */
63 struct dcp_coherent_block {
64 uint8_t aes_in_buf[DCP_BUF_SZ];
65 uint8_t aes_out_buf[DCP_BUF_SZ];
66 uint8_t sha_in_buf[DCP_BUF_SZ];
67 uint8_t sha_out_buf[DCP_SHA_PAY_SZ];
69 uint8_t aes_key[2 * AES_KEYSIZE_128];
71 struct dcp_dma_desc desc[DCP_MAX_CHANS];
80 struct dcp_coherent_block *coh;
82 struct completion completion[DCP_MAX_CHANS];
83 spinlock_t lock[DCP_MAX_CHANS];
84 struct task_struct *thread[DCP_MAX_CHANS];
85 struct crypto_queue queue[DCP_MAX_CHANS];
89 DCP_CHAN_HASH_SHA = 0,
93 struct dcp_async_ctx {
98 /* SHA Hash-specific context */
103 /* Crypto-specific context */
104 struct crypto_skcipher *fallback;
105 unsigned int key_len;
106 uint8_t key[AES_KEYSIZE_128];
109 struct dcp_aes_req_ctx {
114 struct dcp_sha_req_ctx {
120 * There can even be only one instance of the MXS DCP due to the
121 * design of Linux Crypto API.
123 static struct dcp *global_sdcp;
125 /* DCP register layout. */
126 #define MXS_DCP_CTRL 0x00
127 #define MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES (1 << 23)
128 #define MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING (1 << 22)
130 #define MXS_DCP_STAT 0x10
131 #define MXS_DCP_STAT_CLR 0x18
132 #define MXS_DCP_STAT_IRQ_MASK 0xf
134 #define MXS_DCP_CHANNELCTRL 0x20
135 #define MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK 0xff
137 #define MXS_DCP_CAPABILITY1 0x40
138 #define MXS_DCP_CAPABILITY1_SHA256 (4 << 16)
139 #define MXS_DCP_CAPABILITY1_SHA1 (1 << 16)
140 #define MXS_DCP_CAPABILITY1_AES128 (1 << 0)
142 #define MXS_DCP_CONTEXT 0x50
144 #define MXS_DCP_CH_N_CMDPTR(n) (0x100 + ((n) * 0x40))
146 #define MXS_DCP_CH_N_SEMA(n) (0x110 + ((n) * 0x40))
148 #define MXS_DCP_CH_N_STAT(n) (0x120 + ((n) * 0x40))
149 #define MXS_DCP_CH_N_STAT_CLR(n) (0x128 + ((n) * 0x40))
151 /* DMA descriptor bits. */
152 #define MXS_DCP_CONTROL0_HASH_TERM (1 << 13)
153 #define MXS_DCP_CONTROL0_HASH_INIT (1 << 12)
154 #define MXS_DCP_CONTROL0_PAYLOAD_KEY (1 << 11)
155 #define MXS_DCP_CONTROL0_CIPHER_ENCRYPT (1 << 8)
156 #define MXS_DCP_CONTROL0_CIPHER_INIT (1 << 9)
157 #define MXS_DCP_CONTROL0_ENABLE_HASH (1 << 6)
158 #define MXS_DCP_CONTROL0_ENABLE_CIPHER (1 << 5)
159 #define MXS_DCP_CONTROL0_DECR_SEMAPHORE (1 << 1)
160 #define MXS_DCP_CONTROL0_INTERRUPT (1 << 0)
162 #define MXS_DCP_CONTROL1_HASH_SELECT_SHA256 (2 << 16)
163 #define MXS_DCP_CONTROL1_HASH_SELECT_SHA1 (0 << 16)
164 #define MXS_DCP_CONTROL1_CIPHER_MODE_CBC (1 << 4)
165 #define MXS_DCP_CONTROL1_CIPHER_MODE_ECB (0 << 4)
166 #define MXS_DCP_CONTROL1_CIPHER_SELECT_AES128 (0 << 0)
168 static int mxs_dcp_start_dma(struct dcp_async_ctx *actx)
171 struct dcp *sdcp = global_sdcp;
172 const int chan = actx->chan;
175 struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
176 dma_addr_t desc_phys = dma_map_single(sdcp->dev, desc, sizeof(*desc),
179 dma_err = dma_mapping_error(sdcp->dev, desc_phys);
183 reinit_completion(&sdcp->completion[chan]);
185 /* Clear status register. */
186 writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(chan));
188 /* Load the DMA descriptor. */
189 writel(desc_phys, sdcp->base + MXS_DCP_CH_N_CMDPTR(chan));
191 /* Increment the semaphore to start the DMA transfer. */
192 writel(1, sdcp->base + MXS_DCP_CH_N_SEMA(chan));
194 ret = wait_for_completion_timeout(&sdcp->completion[chan],
195 msecs_to_jiffies(1000));
197 dev_err(sdcp->dev, "Channel %i timeout (DCP_STAT=0x%08x)\n",
198 chan, readl(sdcp->base + MXS_DCP_STAT));
202 stat = readl(sdcp->base + MXS_DCP_CH_N_STAT(chan));
204 dev_err(sdcp->dev, "Channel %i error (CH_STAT=0x%08x)\n",
209 dma_unmap_single(sdcp->dev, desc_phys, sizeof(*desc), DMA_TO_DEVICE);
215 * Encryption (AES128)
217 static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
218 struct ablkcipher_request *req, int init)
220 dma_addr_t key_phys, src_phys, dst_phys;
221 struct dcp *sdcp = global_sdcp;
222 struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
223 struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
226 key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key,
227 2 * AES_KEYSIZE_128, DMA_TO_DEVICE);
228 ret = dma_mapping_error(sdcp->dev, key_phys);
232 src_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_in_buf,
233 DCP_BUF_SZ, DMA_TO_DEVICE);
234 ret = dma_mapping_error(sdcp->dev, src_phys);
238 dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf,
239 DCP_BUF_SZ, DMA_FROM_DEVICE);
240 ret = dma_mapping_error(sdcp->dev, dst_phys);
244 if (actx->fill % AES_BLOCK_SIZE) {
245 dev_err(sdcp->dev, "Invalid block size!\n");
250 /* Fill in the DMA descriptor. */
251 desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE |
252 MXS_DCP_CONTROL0_INTERRUPT |
253 MXS_DCP_CONTROL0_ENABLE_CIPHER;
255 /* Payload contains the key. */
256 desc->control0 |= MXS_DCP_CONTROL0_PAYLOAD_KEY;
259 desc->control0 |= MXS_DCP_CONTROL0_CIPHER_ENCRYPT;
261 desc->control0 |= MXS_DCP_CONTROL0_CIPHER_INIT;
263 desc->control1 = MXS_DCP_CONTROL1_CIPHER_SELECT_AES128;
266 desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_ECB;
268 desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_CBC;
270 desc->next_cmd_addr = 0;
271 desc->source = src_phys;
272 desc->destination = dst_phys;
273 desc->size = actx->fill;
274 desc->payload = key_phys;
277 ret = mxs_dcp_start_dma(actx);
280 dma_unmap_single(sdcp->dev, dst_phys, DCP_BUF_SZ, DMA_FROM_DEVICE);
282 dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
284 dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128,
290 static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
292 struct dcp *sdcp = global_sdcp;
294 struct ablkcipher_request *req = ablkcipher_request_cast(arq);
295 struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm);
296 struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
298 struct scatterlist *dst = req->dst;
299 struct scatterlist *src = req->src;
300 int dst_nents = sg_nents(dst);
302 const int out_off = DCP_BUF_SZ;
303 uint8_t *in_buf = sdcp->coh->aes_in_buf;
304 uint8_t *out_buf = sdcp->coh->aes_out_buf;
306 uint32_t dst_off = 0;
307 uint8_t *src_buf = NULL;
308 uint32_t last_out_len = 0;
310 uint8_t *key = sdcp->coh->aes_key;
313 unsigned int i, len, clen, tlen = 0;
315 bool limit_hit = false;
319 /* Copy the key from the temporary location. */
320 memcpy(key, actx->key, actx->key_len);
323 /* Copy the CBC IV just past the key. */
324 memcpy(key + AES_KEYSIZE_128, req->info, AES_KEYSIZE_128);
325 /* CBC needs the INIT set. */
328 memset(key + AES_KEYSIZE_128, 0, AES_KEYSIZE_128);
331 for_each_sg(req->src, src, sg_nents(req->src), i) {
332 src_buf = sg_virt(src);
333 len = sg_dma_len(src);
335 limit_hit = tlen > req->nbytes;
338 len = req->nbytes - (tlen - len);
341 if (actx->fill + len > out_off)
342 clen = out_off - actx->fill;
346 memcpy(in_buf + actx->fill, src_buf, clen);
352 * If we filled the buffer or this is the last SG,
355 if (actx->fill == out_off || sg_is_last(src) ||
357 ret = mxs_dcp_run_aes(actx, req, init);
362 sg_pcopy_from_buffer(dst, dst_nents, out_buf,
363 actx->fill, dst_off);
364 dst_off += actx->fill;
365 last_out_len = actx->fill;
374 /* Copy the IV for CBC for chaining */
377 memcpy(req->info, out_buf+(last_out_len-AES_BLOCK_SIZE),
380 memcpy(req->info, in_buf+(last_out_len-AES_BLOCK_SIZE),
387 static int dcp_chan_thread_aes(void *data)
389 struct dcp *sdcp = global_sdcp;
390 const int chan = DCP_CHAN_CRYPTO;
392 struct crypto_async_request *backlog;
393 struct crypto_async_request *arq;
397 while (!kthread_should_stop()) {
398 set_current_state(TASK_INTERRUPTIBLE);
400 spin_lock(&sdcp->lock[chan]);
401 backlog = crypto_get_backlog(&sdcp->queue[chan]);
402 arq = crypto_dequeue_request(&sdcp->queue[chan]);
403 spin_unlock(&sdcp->lock[chan]);
405 if (!backlog && !arq) {
410 set_current_state(TASK_RUNNING);
413 backlog->complete(backlog, -EINPROGRESS);
416 ret = mxs_dcp_aes_block_crypt(arq);
417 arq->complete(arq, ret);
424 static int mxs_dcp_block_fallback(struct ablkcipher_request *req, int enc)
426 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
427 struct dcp_async_ctx *ctx = crypto_ablkcipher_ctx(tfm);
428 SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
431 skcipher_request_set_tfm(subreq, ctx->fallback);
432 skcipher_request_set_callback(subreq, req->base.flags, NULL, NULL);
433 skcipher_request_set_crypt(subreq, req->src, req->dst,
434 req->nbytes, req->info);
437 ret = crypto_skcipher_encrypt(subreq);
439 ret = crypto_skcipher_decrypt(subreq);
441 skcipher_request_zero(subreq);
446 static int mxs_dcp_aes_enqueue(struct ablkcipher_request *req, int enc, int ecb)
448 struct dcp *sdcp = global_sdcp;
449 struct crypto_async_request *arq = &req->base;
450 struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm);
451 struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
454 if (unlikely(actx->key_len != AES_KEYSIZE_128))
455 return mxs_dcp_block_fallback(req, enc);
459 actx->chan = DCP_CHAN_CRYPTO;
461 spin_lock(&sdcp->lock[actx->chan]);
462 ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
463 spin_unlock(&sdcp->lock[actx->chan]);
465 wake_up_process(sdcp->thread[actx->chan]);
470 static int mxs_dcp_aes_ecb_decrypt(struct ablkcipher_request *req)
472 return mxs_dcp_aes_enqueue(req, 0, 1);
475 static int mxs_dcp_aes_ecb_encrypt(struct ablkcipher_request *req)
477 return mxs_dcp_aes_enqueue(req, 1, 1);
480 static int mxs_dcp_aes_cbc_decrypt(struct ablkcipher_request *req)
482 return mxs_dcp_aes_enqueue(req, 0, 0);
485 static int mxs_dcp_aes_cbc_encrypt(struct ablkcipher_request *req)
487 return mxs_dcp_aes_enqueue(req, 1, 0);
490 static int mxs_dcp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
493 struct dcp_async_ctx *actx = crypto_ablkcipher_ctx(tfm);
497 * AES 128 is supposed by the hardware, store key into temporary
498 * buffer and exit. We must use the temporary buffer here, since
499 * there can still be an operation in progress.
502 if (len == AES_KEYSIZE_128) {
503 memcpy(actx->key, key, len);
508 * If the requested AES key size is not supported by the hardware,
509 * but is supported by in-kernel software implementation, we use
512 crypto_skcipher_clear_flags(actx->fallback, CRYPTO_TFM_REQ_MASK);
513 crypto_skcipher_set_flags(actx->fallback,
514 tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
516 ret = crypto_skcipher_setkey(actx->fallback, key, len);
520 tfm->base.crt_flags &= ~CRYPTO_TFM_RES_MASK;
521 tfm->base.crt_flags |= crypto_skcipher_get_flags(actx->fallback) &
527 static int mxs_dcp_aes_fallback_init(struct crypto_tfm *tfm)
529 const char *name = crypto_tfm_alg_name(tfm);
530 const uint32_t flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK;
531 struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm);
532 struct crypto_skcipher *blk;
534 blk = crypto_alloc_skcipher(name, 0, flags);
538 actx->fallback = blk;
539 tfm->crt_ablkcipher.reqsize = sizeof(struct dcp_aes_req_ctx);
543 static void mxs_dcp_aes_fallback_exit(struct crypto_tfm *tfm)
545 struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm);
547 crypto_free_skcipher(actx->fallback);
551 * Hashing (SHA1/SHA256)
553 static int mxs_dcp_run_sha(struct ahash_request *req)
555 struct dcp *sdcp = global_sdcp;
558 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
559 struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
560 struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
561 struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
563 dma_addr_t digest_phys = 0;
564 dma_addr_t buf_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_in_buf,
565 DCP_BUF_SZ, DMA_TO_DEVICE);
567 ret = dma_mapping_error(sdcp->dev, buf_phys);
571 /* Fill in the DMA descriptor. */
572 desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE |
573 MXS_DCP_CONTROL0_INTERRUPT |
574 MXS_DCP_CONTROL0_ENABLE_HASH;
576 desc->control0 |= MXS_DCP_CONTROL0_HASH_INIT;
578 desc->control1 = actx->alg;
579 desc->next_cmd_addr = 0;
580 desc->source = buf_phys;
581 desc->destination = 0;
582 desc->size = actx->fill;
587 * Align driver with hw behavior when generating null hashes
589 if (rctx->init && rctx->fini && desc->size == 0) {
590 struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
591 const uint8_t *sha_buf =
592 (actx->alg == MXS_DCP_CONTROL1_HASH_SELECT_SHA1) ?
593 sha1_null_hash : sha256_null_hash;
594 memcpy(sdcp->coh->sha_out_buf, sha_buf, halg->digestsize);
599 /* Set HASH_TERM bit for last transfer block. */
601 digest_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_out_buf,
602 DCP_SHA_PAY_SZ, DMA_FROM_DEVICE);
603 ret = dma_mapping_error(sdcp->dev, digest_phys);
607 desc->control0 |= MXS_DCP_CONTROL0_HASH_TERM;
608 desc->payload = digest_phys;
611 ret = mxs_dcp_start_dma(actx);
614 dma_unmap_single(sdcp->dev, digest_phys, DCP_SHA_PAY_SZ,
618 dma_unmap_single(sdcp->dev, buf_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
623 static int dcp_sha_req_to_buf(struct crypto_async_request *arq)
625 struct dcp *sdcp = global_sdcp;
627 struct ahash_request *req = ahash_request_cast(arq);
628 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
629 struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
630 struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
631 struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
633 uint8_t *in_buf = sdcp->coh->sha_in_buf;
634 uint8_t *out_buf = sdcp->coh->sha_out_buf;
636 struct scatterlist *src;
638 unsigned int i, len, clen, oft = 0;
641 int fin = rctx->fini;
649 if (actx->fill + len > DCP_BUF_SZ)
650 clen = DCP_BUF_SZ - actx->fill;
654 scatterwalk_map_and_copy(in_buf + actx->fill, src, oft, clen,
662 * If we filled the buffer and still have some
663 * more data, submit the buffer.
665 if (len && actx->fill == DCP_BUF_SZ) {
666 ret = mxs_dcp_run_sha(req);
677 /* Submit whatever is left. */
681 ret = mxs_dcp_run_sha(req);
687 /* For some reason the result is flipped */
688 for (i = 0; i < halg->digestsize; i++)
689 req->result[i] = out_buf[halg->digestsize - i - 1];
695 static int dcp_chan_thread_sha(void *data)
697 struct dcp *sdcp = global_sdcp;
698 const int chan = DCP_CHAN_HASH_SHA;
700 struct crypto_async_request *backlog;
701 struct crypto_async_request *arq;
703 struct dcp_sha_req_ctx *rctx;
705 struct ahash_request *req;
708 while (!kthread_should_stop()) {
709 set_current_state(TASK_INTERRUPTIBLE);
711 spin_lock(&sdcp->lock[chan]);
712 backlog = crypto_get_backlog(&sdcp->queue[chan]);
713 arq = crypto_dequeue_request(&sdcp->queue[chan]);
714 spin_unlock(&sdcp->lock[chan]);
716 if (!backlog && !arq) {
721 set_current_state(TASK_RUNNING);
724 backlog->complete(backlog, -EINPROGRESS);
727 req = ahash_request_cast(arq);
728 rctx = ahash_request_ctx(req);
730 ret = dcp_sha_req_to_buf(arq);
732 arq->complete(arq, ret);
739 static int dcp_sha_init(struct ahash_request *req)
741 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
742 struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
744 struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
747 * Start hashing session. The code below only inits the
748 * hashing session context, nothing more.
750 memset(actx, 0, sizeof(*actx));
752 if (strcmp(halg->base.cra_name, "sha1") == 0)
753 actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA1;
755 actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA256;
759 actx->chan = DCP_CHAN_HASH_SHA;
761 mutex_init(&actx->mutex);
766 static int dcp_sha_update_fx(struct ahash_request *req, int fini)
768 struct dcp *sdcp = global_sdcp;
770 struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
771 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
772 struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
777 * Ignore requests that have no data in them and are not
778 * the trailing requests in the stream of requests.
780 if (!req->nbytes && !fini)
783 mutex_lock(&actx->mutex);
792 spin_lock(&sdcp->lock[actx->chan]);
793 ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
794 spin_unlock(&sdcp->lock[actx->chan]);
796 wake_up_process(sdcp->thread[actx->chan]);
797 mutex_unlock(&actx->mutex);
802 static int dcp_sha_update(struct ahash_request *req)
804 return dcp_sha_update_fx(req, 0);
807 static int dcp_sha_final(struct ahash_request *req)
809 ahash_request_set_crypt(req, NULL, req->result, 0);
811 return dcp_sha_update_fx(req, 1);
814 static int dcp_sha_finup(struct ahash_request *req)
816 return dcp_sha_update_fx(req, 1);
819 static int dcp_sha_digest(struct ahash_request *req)
823 ret = dcp_sha_init(req);
827 return dcp_sha_finup(req);
830 static int dcp_sha_noimport(struct ahash_request *req, const void *in)
835 static int dcp_sha_noexport(struct ahash_request *req, void *out)
840 static int dcp_sha_cra_init(struct crypto_tfm *tfm)
842 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
843 sizeof(struct dcp_sha_req_ctx));
847 static void dcp_sha_cra_exit(struct crypto_tfm *tfm)
851 /* AES 128 ECB and AES 128 CBC */
852 static struct crypto_alg dcp_aes_algs[] = {
854 .cra_name = "ecb(aes)",
855 .cra_driver_name = "ecb-aes-dcp",
858 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
860 CRYPTO_ALG_NEED_FALLBACK,
861 .cra_init = mxs_dcp_aes_fallback_init,
862 .cra_exit = mxs_dcp_aes_fallback_exit,
863 .cra_blocksize = AES_BLOCK_SIZE,
864 .cra_ctxsize = sizeof(struct dcp_async_ctx),
865 .cra_type = &crypto_ablkcipher_type,
866 .cra_module = THIS_MODULE,
869 .min_keysize = AES_MIN_KEY_SIZE,
870 .max_keysize = AES_MAX_KEY_SIZE,
871 .setkey = mxs_dcp_aes_setkey,
872 .encrypt = mxs_dcp_aes_ecb_encrypt,
873 .decrypt = mxs_dcp_aes_ecb_decrypt
877 .cra_name = "cbc(aes)",
878 .cra_driver_name = "cbc-aes-dcp",
881 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
883 CRYPTO_ALG_NEED_FALLBACK,
884 .cra_init = mxs_dcp_aes_fallback_init,
885 .cra_exit = mxs_dcp_aes_fallback_exit,
886 .cra_blocksize = AES_BLOCK_SIZE,
887 .cra_ctxsize = sizeof(struct dcp_async_ctx),
888 .cra_type = &crypto_ablkcipher_type,
889 .cra_module = THIS_MODULE,
892 .min_keysize = AES_MIN_KEY_SIZE,
893 .max_keysize = AES_MAX_KEY_SIZE,
894 .setkey = mxs_dcp_aes_setkey,
895 .encrypt = mxs_dcp_aes_cbc_encrypt,
896 .decrypt = mxs_dcp_aes_cbc_decrypt,
897 .ivsize = AES_BLOCK_SIZE,
904 static struct ahash_alg dcp_sha1_alg = {
905 .init = dcp_sha_init,
906 .update = dcp_sha_update,
907 .final = dcp_sha_final,
908 .finup = dcp_sha_finup,
909 .digest = dcp_sha_digest,
910 .import = dcp_sha_noimport,
911 .export = dcp_sha_noexport,
913 .digestsize = SHA1_DIGEST_SIZE,
916 .cra_driver_name = "sha1-dcp",
919 .cra_flags = CRYPTO_ALG_ASYNC,
920 .cra_blocksize = SHA1_BLOCK_SIZE,
921 .cra_ctxsize = sizeof(struct dcp_async_ctx),
922 .cra_module = THIS_MODULE,
923 .cra_init = dcp_sha_cra_init,
924 .cra_exit = dcp_sha_cra_exit,
930 static struct ahash_alg dcp_sha256_alg = {
931 .init = dcp_sha_init,
932 .update = dcp_sha_update,
933 .final = dcp_sha_final,
934 .finup = dcp_sha_finup,
935 .digest = dcp_sha_digest,
936 .import = dcp_sha_noimport,
937 .export = dcp_sha_noexport,
939 .digestsize = SHA256_DIGEST_SIZE,
941 .cra_name = "sha256",
942 .cra_driver_name = "sha256-dcp",
945 .cra_flags = CRYPTO_ALG_ASYNC,
946 .cra_blocksize = SHA256_BLOCK_SIZE,
947 .cra_ctxsize = sizeof(struct dcp_async_ctx),
948 .cra_module = THIS_MODULE,
949 .cra_init = dcp_sha_cra_init,
950 .cra_exit = dcp_sha_cra_exit,
955 static irqreturn_t mxs_dcp_irq(int irq, void *context)
957 struct dcp *sdcp = context;
961 stat = readl(sdcp->base + MXS_DCP_STAT);
962 stat &= MXS_DCP_STAT_IRQ_MASK;
966 /* Clear the interrupts. */
967 writel(stat, sdcp->base + MXS_DCP_STAT_CLR);
969 /* Complete the DMA requests that finished. */
970 for (i = 0; i < DCP_MAX_CHANS; i++)
972 complete(&sdcp->completion[i]);
977 static int mxs_dcp_probe(struct platform_device *pdev)
979 struct device *dev = &pdev->dev;
980 struct dcp *sdcp = NULL;
983 struct resource *iores;
984 int dcp_vmi_irq, dcp_irq;
987 dev_err(dev, "Only one DCP instance allowed!\n");
991 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
992 dcp_vmi_irq = platform_get_irq(pdev, 0);
993 if (dcp_vmi_irq < 0) {
994 dev_err(dev, "Failed to get IRQ: (%d)!\n", dcp_vmi_irq);
998 dcp_irq = platform_get_irq(pdev, 1);
1000 dev_err(dev, "Failed to get IRQ: (%d)!\n", dcp_irq);
1004 sdcp = devm_kzalloc(dev, sizeof(*sdcp), GFP_KERNEL);
1009 sdcp->base = devm_ioremap_resource(dev, iores);
1010 if (IS_ERR(sdcp->base))
1011 return PTR_ERR(sdcp->base);
1014 ret = devm_request_irq(dev, dcp_vmi_irq, mxs_dcp_irq, 0,
1015 "dcp-vmi-irq", sdcp);
1017 dev_err(dev, "Failed to claim DCP VMI IRQ!\n");
1021 ret = devm_request_irq(dev, dcp_irq, mxs_dcp_irq, 0,
1024 dev_err(dev, "Failed to claim DCP IRQ!\n");
1028 /* Allocate coherent helper block. */
1029 sdcp->coh = devm_kzalloc(dev, sizeof(*sdcp->coh) + DCP_ALIGNMENT,
1034 /* Re-align the structure so it fits the DCP constraints. */
1035 sdcp->coh = PTR_ALIGN(sdcp->coh, DCP_ALIGNMENT);
1037 /* Restart the DCP block. */
1038 ret = stmp_reset_block(sdcp->base);
1042 /* Initialize control register. */
1043 writel(MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES |
1044 MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING | 0xf,
1045 sdcp->base + MXS_DCP_CTRL);
1047 /* Enable all DCP DMA channels. */
1048 writel(MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK,
1049 sdcp->base + MXS_DCP_CHANNELCTRL);
1052 * We do not enable context switching. Give the context buffer a
1053 * pointer to an illegal address so if context switching is
1054 * inadvertantly enabled, the DCP will return an error instead of
1055 * trashing good memory. The DCP DMA cannot access ROM, so any ROM
1058 writel(0xffff0000, sdcp->base + MXS_DCP_CONTEXT);
1059 for (i = 0; i < DCP_MAX_CHANS; i++)
1060 writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(i));
1061 writel(0xffffffff, sdcp->base + MXS_DCP_STAT_CLR);
1065 platform_set_drvdata(pdev, sdcp);
1067 for (i = 0; i < DCP_MAX_CHANS; i++) {
1068 spin_lock_init(&sdcp->lock[i]);
1069 init_completion(&sdcp->completion[i]);
1070 crypto_init_queue(&sdcp->queue[i], 50);
1073 /* Create the SHA and AES handler threads. */
1074 sdcp->thread[DCP_CHAN_HASH_SHA] = kthread_run(dcp_chan_thread_sha,
1075 NULL, "mxs_dcp_chan/sha");
1076 if (IS_ERR(sdcp->thread[DCP_CHAN_HASH_SHA])) {
1077 dev_err(dev, "Error starting SHA thread!\n");
1078 return PTR_ERR(sdcp->thread[DCP_CHAN_HASH_SHA]);
1081 sdcp->thread[DCP_CHAN_CRYPTO] = kthread_run(dcp_chan_thread_aes,
1082 NULL, "mxs_dcp_chan/aes");
1083 if (IS_ERR(sdcp->thread[DCP_CHAN_CRYPTO])) {
1084 dev_err(dev, "Error starting SHA thread!\n");
1085 ret = PTR_ERR(sdcp->thread[DCP_CHAN_CRYPTO]);
1086 goto err_destroy_sha_thread;
1089 /* Register the various crypto algorithms. */
1090 sdcp->caps = readl(sdcp->base + MXS_DCP_CAPABILITY1);
1092 if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) {
1093 ret = crypto_register_algs(dcp_aes_algs,
1094 ARRAY_SIZE(dcp_aes_algs));
1096 /* Failed to register algorithm. */
1097 dev_err(dev, "Failed to register AES crypto!\n");
1098 goto err_destroy_aes_thread;
1102 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1) {
1103 ret = crypto_register_ahash(&dcp_sha1_alg);
1105 dev_err(dev, "Failed to register %s hash!\n",
1106 dcp_sha1_alg.halg.base.cra_name);
1107 goto err_unregister_aes;
1111 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256) {
1112 ret = crypto_register_ahash(&dcp_sha256_alg);
1114 dev_err(dev, "Failed to register %s hash!\n",
1115 dcp_sha256_alg.halg.base.cra_name);
1116 goto err_unregister_sha1;
1122 err_unregister_sha1:
1123 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1)
1124 crypto_unregister_ahash(&dcp_sha1_alg);
1127 if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128)
1128 crypto_unregister_algs(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs));
1130 err_destroy_aes_thread:
1131 kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]);
1133 err_destroy_sha_thread:
1134 kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
1138 static int mxs_dcp_remove(struct platform_device *pdev)
1140 struct dcp *sdcp = platform_get_drvdata(pdev);
1142 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256)
1143 crypto_unregister_ahash(&dcp_sha256_alg);
1145 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1)
1146 crypto_unregister_ahash(&dcp_sha1_alg);
1148 if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128)
1149 crypto_unregister_algs(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs));
1151 kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
1152 kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]);
1154 platform_set_drvdata(pdev, NULL);
1161 static const struct of_device_id mxs_dcp_dt_ids[] = {
1162 { .compatible = "fsl,imx23-dcp", .data = NULL, },
1163 { .compatible = "fsl,imx28-dcp", .data = NULL, },
1167 MODULE_DEVICE_TABLE(of, mxs_dcp_dt_ids);
1169 static struct platform_driver mxs_dcp_driver = {
1170 .probe = mxs_dcp_probe,
1171 .remove = mxs_dcp_remove,
1174 .of_match_table = mxs_dcp_dt_ids,
1178 module_platform_driver(mxs_dcp_driver);
1180 MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
1181 MODULE_DESCRIPTION("Freescale MXS DCP Driver");
1182 MODULE_LICENSE("GPL");
1183 MODULE_ALIAS("platform:mxs-dcp");