2 * Freescale i.MX23/i.MX28 Data Co-Processor driver
4 * Copyright (C) 2013 Marek Vasut <marex@denx.de>
6 * The code contained herein is licensed under the GNU General Public
7 * License. You may obtain a copy of the GNU General Public License
8 * Version 2 or later at the following locations:
10 * http://www.opensource.org/licenses/gpl-license.html
11 * http://www.gnu.org/copyleft/gpl.html
14 #include <linux/crypto.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/interrupt.h>
18 #include <linux/kernel.h>
19 #include <linux/kthread.h>
20 #include <linux/module.h>
22 #include <linux/platform_device.h>
23 #include <linux/stmp_device.h>
25 #include <crypto/aes.h>
26 #include <crypto/sha.h>
27 #include <crypto/internal/hash.h>
28 #include <crypto/scatterwalk.h>
30 #define DCP_MAX_CHANS 4
31 #define DCP_BUF_SZ PAGE_SIZE
32 #define DCP_SHA_PAY_SZ 64
34 #define DCP_ALIGNMENT 64
37 * Null hashes to align with hw behavior on imx6sl and ull
38 * these are flipped for consistency with hw output
40 static const uint8_t sha1_null_hash[] =
41 "\x09\x07\xd8\xaf\x90\x18\x60\x95\xef\xbf"
42 "\x55\x32\x0d\x4b\x6b\x5e\xee\xa3\x39\xda";
44 static const uint8_t sha256_null_hash[] =
45 "\x55\xb8\x52\x78\x1b\x99\x95\xa4"
46 "\x4c\x93\x9b\x64\xe4\x41\xae\x27"
47 "\x24\xb9\x6f\x99\xc8\xf4\xfb\x9a"
48 "\x14\x1c\xfc\x98\x42\xc4\xb0\xe3";
50 /* DCP DMA descriptor. */
52 uint32_t next_cmd_addr;
62 /* Coherent aligned block for bounce buffering. */
63 struct dcp_coherent_block {
64 uint8_t aes_in_buf[DCP_BUF_SZ];
65 uint8_t aes_out_buf[DCP_BUF_SZ];
66 uint8_t sha_in_buf[DCP_BUF_SZ];
67 uint8_t sha_out_buf[DCP_SHA_PAY_SZ];
69 uint8_t aes_key[2 * AES_KEYSIZE_128];
71 struct dcp_dma_desc desc[DCP_MAX_CHANS];
80 struct dcp_coherent_block *coh;
82 struct completion completion[DCP_MAX_CHANS];
83 spinlock_t lock[DCP_MAX_CHANS];
84 struct task_struct *thread[DCP_MAX_CHANS];
85 struct crypto_queue queue[DCP_MAX_CHANS];
89 DCP_CHAN_HASH_SHA = 0,
93 struct dcp_async_ctx {
98 /* SHA Hash-specific context */
103 /* Crypto-specific context */
104 struct crypto_ablkcipher *fallback;
105 unsigned int key_len;
106 uint8_t key[AES_KEYSIZE_128];
109 struct dcp_aes_req_ctx {
114 struct dcp_sha_req_ctx {
120 * There can even be only one instance of the MXS DCP due to the
121 * design of Linux Crypto API.
123 static struct dcp *global_sdcp;
125 /* DCP register layout. */
126 #define MXS_DCP_CTRL 0x00
127 #define MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES (1 << 23)
128 #define MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING (1 << 22)
130 #define MXS_DCP_STAT 0x10
131 #define MXS_DCP_STAT_CLR 0x18
132 #define MXS_DCP_STAT_IRQ_MASK 0xf
134 #define MXS_DCP_CHANNELCTRL 0x20
135 #define MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK 0xff
137 #define MXS_DCP_CAPABILITY1 0x40
138 #define MXS_DCP_CAPABILITY1_SHA256 (4 << 16)
139 #define MXS_DCP_CAPABILITY1_SHA1 (1 << 16)
140 #define MXS_DCP_CAPABILITY1_AES128 (1 << 0)
142 #define MXS_DCP_CONTEXT 0x50
144 #define MXS_DCP_CH_N_CMDPTR(n) (0x100 + ((n) * 0x40))
146 #define MXS_DCP_CH_N_SEMA(n) (0x110 + ((n) * 0x40))
148 #define MXS_DCP_CH_N_STAT(n) (0x120 + ((n) * 0x40))
149 #define MXS_DCP_CH_N_STAT_CLR(n) (0x128 + ((n) * 0x40))
151 /* DMA descriptor bits. */
152 #define MXS_DCP_CONTROL0_HASH_TERM (1 << 13)
153 #define MXS_DCP_CONTROL0_HASH_INIT (1 << 12)
154 #define MXS_DCP_CONTROL0_PAYLOAD_KEY (1 << 11)
155 #define MXS_DCP_CONTROL0_CIPHER_ENCRYPT (1 << 8)
156 #define MXS_DCP_CONTROL0_CIPHER_INIT (1 << 9)
157 #define MXS_DCP_CONTROL0_ENABLE_HASH (1 << 6)
158 #define MXS_DCP_CONTROL0_ENABLE_CIPHER (1 << 5)
159 #define MXS_DCP_CONTROL0_DECR_SEMAPHORE (1 << 1)
160 #define MXS_DCP_CONTROL0_INTERRUPT (1 << 0)
162 #define MXS_DCP_CONTROL1_HASH_SELECT_SHA256 (2 << 16)
163 #define MXS_DCP_CONTROL1_HASH_SELECT_SHA1 (0 << 16)
164 #define MXS_DCP_CONTROL1_CIPHER_MODE_CBC (1 << 4)
165 #define MXS_DCP_CONTROL1_CIPHER_MODE_ECB (0 << 4)
166 #define MXS_DCP_CONTROL1_CIPHER_SELECT_AES128 (0 << 0)
168 static int mxs_dcp_start_dma(struct dcp_async_ctx *actx)
170 struct dcp *sdcp = global_sdcp;
171 const int chan = actx->chan;
174 struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
176 dma_addr_t desc_phys = dma_map_single(sdcp->dev, desc, sizeof(*desc),
179 reinit_completion(&sdcp->completion[chan]);
181 /* Clear status register. */
182 writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(chan));
184 /* Load the DMA descriptor. */
185 writel(desc_phys, sdcp->base + MXS_DCP_CH_N_CMDPTR(chan));
187 /* Increment the semaphore to start the DMA transfer. */
188 writel(1, sdcp->base + MXS_DCP_CH_N_SEMA(chan));
190 ret = wait_for_completion_timeout(&sdcp->completion[chan],
191 msecs_to_jiffies(1000));
193 dev_err(sdcp->dev, "Channel %i timeout (DCP_STAT=0x%08x)\n",
194 chan, readl(sdcp->base + MXS_DCP_STAT));
198 stat = readl(sdcp->base + MXS_DCP_CH_N_STAT(chan));
200 dev_err(sdcp->dev, "Channel %i error (CH_STAT=0x%08x)\n",
205 dma_unmap_single(sdcp->dev, desc_phys, sizeof(*desc), DMA_TO_DEVICE);
211 * Encryption (AES128)
213 static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
214 struct ablkcipher_request *req, int init)
216 struct dcp *sdcp = global_sdcp;
217 struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
218 struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
221 dma_addr_t key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key,
224 dma_addr_t src_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_in_buf,
225 DCP_BUF_SZ, DMA_TO_DEVICE);
226 dma_addr_t dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf,
227 DCP_BUF_SZ, DMA_FROM_DEVICE);
229 if (actx->fill % AES_BLOCK_SIZE) {
230 dev_err(sdcp->dev, "Invalid block size!\n");
235 /* Fill in the DMA descriptor. */
236 desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE |
237 MXS_DCP_CONTROL0_INTERRUPT |
238 MXS_DCP_CONTROL0_ENABLE_CIPHER;
240 /* Payload contains the key. */
241 desc->control0 |= MXS_DCP_CONTROL0_PAYLOAD_KEY;
244 desc->control0 |= MXS_DCP_CONTROL0_CIPHER_ENCRYPT;
246 desc->control0 |= MXS_DCP_CONTROL0_CIPHER_INIT;
248 desc->control1 = MXS_DCP_CONTROL1_CIPHER_SELECT_AES128;
251 desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_ECB;
253 desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_CBC;
255 desc->next_cmd_addr = 0;
256 desc->source = src_phys;
257 desc->destination = dst_phys;
258 desc->size = actx->fill;
259 desc->payload = key_phys;
262 ret = mxs_dcp_start_dma(actx);
265 dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128,
267 dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
268 dma_unmap_single(sdcp->dev, dst_phys, DCP_BUF_SZ, DMA_FROM_DEVICE);
273 static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
275 struct dcp *sdcp = global_sdcp;
277 struct ablkcipher_request *req = ablkcipher_request_cast(arq);
278 struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm);
279 struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
281 struct scatterlist *dst = req->dst;
282 struct scatterlist *src = req->src;
283 const int nents = sg_nents(req->src);
285 const int out_off = DCP_BUF_SZ;
286 uint8_t *in_buf = sdcp->coh->aes_in_buf;
287 uint8_t *out_buf = sdcp->coh->aes_out_buf;
289 uint8_t *out_tmp, *src_buf, *dst_buf = NULL;
290 uint32_t dst_off = 0;
291 uint32_t last_out_len = 0;
293 uint8_t *key = sdcp->coh->aes_key;
297 unsigned int i, len, clen, rem = 0, tlen = 0;
299 bool limit_hit = false;
303 /* Copy the key from the temporary location. */
304 memcpy(key, actx->key, actx->key_len);
307 /* Copy the CBC IV just past the key. */
308 memcpy(key + AES_KEYSIZE_128, req->info, AES_KEYSIZE_128);
309 /* CBC needs the INIT set. */
312 memset(key + AES_KEYSIZE_128, 0, AES_KEYSIZE_128);
315 for_each_sg(req->src, src, nents, i) {
316 src_buf = sg_virt(src);
317 len = sg_dma_len(src);
319 limit_hit = tlen > req->nbytes;
322 len = req->nbytes - (tlen - len);
325 if (actx->fill + len > out_off)
326 clen = out_off - actx->fill;
330 memcpy(in_buf + actx->fill, src_buf, clen);
336 * If we filled the buffer or this is the last SG,
339 if (actx->fill == out_off || sg_is_last(src) ||
341 ret = mxs_dcp_run_aes(actx, req, init);
347 last_out_len = actx->fill;
348 while (dst && actx->fill) {
350 dst_buf = sg_virt(dst);
353 rem = min(sg_dma_len(dst) - dst_off,
356 memcpy(dst_buf + dst_off, out_tmp, rem);
361 if (dst_off == sg_dma_len(dst)) {
375 /* Copy the IV for CBC for chaining */
378 memcpy(req->info, out_buf+(last_out_len-AES_BLOCK_SIZE),
381 memcpy(req->info, in_buf+(last_out_len-AES_BLOCK_SIZE),
388 static int dcp_chan_thread_aes(void *data)
390 struct dcp *sdcp = global_sdcp;
391 const int chan = DCP_CHAN_CRYPTO;
393 struct crypto_async_request *backlog;
394 struct crypto_async_request *arq;
398 while (!kthread_should_stop()) {
399 set_current_state(TASK_INTERRUPTIBLE);
401 spin_lock(&sdcp->lock[chan]);
402 backlog = crypto_get_backlog(&sdcp->queue[chan]);
403 arq = crypto_dequeue_request(&sdcp->queue[chan]);
404 spin_unlock(&sdcp->lock[chan]);
406 if (!backlog && !arq) {
411 set_current_state(TASK_RUNNING);
414 backlog->complete(backlog, -EINPROGRESS);
417 ret = mxs_dcp_aes_block_crypt(arq);
418 arq->complete(arq, ret);
425 static int mxs_dcp_block_fallback(struct ablkcipher_request *req, int enc)
427 struct crypto_tfm *tfm =
428 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
429 struct dcp_async_ctx *ctx = crypto_ablkcipher_ctx(
430 crypto_ablkcipher_reqtfm(req));
433 ablkcipher_request_set_tfm(req, ctx->fallback);
436 ret = crypto_ablkcipher_encrypt(req);
438 ret = crypto_ablkcipher_decrypt(req);
440 ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
445 static int mxs_dcp_aes_enqueue(struct ablkcipher_request *req, int enc, int ecb)
447 struct dcp *sdcp = global_sdcp;
448 struct crypto_async_request *arq = &req->base;
449 struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm);
450 struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
453 if (unlikely(actx->key_len != AES_KEYSIZE_128))
454 return mxs_dcp_block_fallback(req, enc);
458 actx->chan = DCP_CHAN_CRYPTO;
460 spin_lock(&sdcp->lock[actx->chan]);
461 ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
462 spin_unlock(&sdcp->lock[actx->chan]);
464 wake_up_process(sdcp->thread[actx->chan]);
469 static int mxs_dcp_aes_ecb_decrypt(struct ablkcipher_request *req)
471 return mxs_dcp_aes_enqueue(req, 0, 1);
474 static int mxs_dcp_aes_ecb_encrypt(struct ablkcipher_request *req)
476 return mxs_dcp_aes_enqueue(req, 1, 1);
479 static int mxs_dcp_aes_cbc_decrypt(struct ablkcipher_request *req)
481 return mxs_dcp_aes_enqueue(req, 0, 0);
484 static int mxs_dcp_aes_cbc_encrypt(struct ablkcipher_request *req)
486 return mxs_dcp_aes_enqueue(req, 1, 0);
489 static int mxs_dcp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
492 struct dcp_async_ctx *actx = crypto_ablkcipher_ctx(tfm);
496 * AES 128 is supposed by the hardware, store key into temporary
497 * buffer and exit. We must use the temporary buffer here, since
498 * there can still be an operation in progress.
501 if (len == AES_KEYSIZE_128) {
502 memcpy(actx->key, key, len);
506 /* Check if the key size is supported by kernel at all. */
507 if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) {
508 tfm->base.crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
513 * If the requested AES key size is not supported by the hardware,
514 * but is supported by in-kernel software implementation, we use
517 actx->fallback->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
518 actx->fallback->base.crt_flags |=
519 tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK;
521 ret = crypto_ablkcipher_setkey(actx->fallback, key, len);
525 tfm->base.crt_flags &= ~CRYPTO_TFM_RES_MASK;
526 tfm->base.crt_flags |=
527 actx->fallback->base.crt_flags & CRYPTO_TFM_RES_MASK;
532 static int mxs_dcp_aes_fallback_init(struct crypto_tfm *tfm)
534 const char *name = crypto_tfm_alg_name(tfm);
535 const uint32_t flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK;
536 struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm);
537 struct crypto_ablkcipher *blk;
539 blk = crypto_alloc_ablkcipher(name, 0, flags);
543 actx->fallback = blk;
544 tfm->crt_ablkcipher.reqsize = sizeof(struct dcp_aes_req_ctx);
548 static void mxs_dcp_aes_fallback_exit(struct crypto_tfm *tfm)
550 struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm);
552 crypto_free_ablkcipher(actx->fallback);
553 actx->fallback = NULL;
557 * Hashing (SHA1/SHA256)
559 static int mxs_dcp_run_sha(struct ahash_request *req)
561 struct dcp *sdcp = global_sdcp;
564 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
565 struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
566 struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
567 struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
569 dma_addr_t digest_phys = 0;
570 dma_addr_t buf_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_in_buf,
571 DCP_BUF_SZ, DMA_TO_DEVICE);
573 /* Fill in the DMA descriptor. */
574 desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE |
575 MXS_DCP_CONTROL0_INTERRUPT |
576 MXS_DCP_CONTROL0_ENABLE_HASH;
578 desc->control0 |= MXS_DCP_CONTROL0_HASH_INIT;
580 desc->control1 = actx->alg;
581 desc->next_cmd_addr = 0;
582 desc->source = buf_phys;
583 desc->destination = 0;
584 desc->size = actx->fill;
589 * Align driver with hw behavior when generating null hashes
591 if (rctx->init && rctx->fini && desc->size == 0) {
592 struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
593 const uint8_t *sha_buf =
594 (actx->alg == MXS_DCP_CONTROL1_HASH_SELECT_SHA1) ?
595 sha1_null_hash : sha256_null_hash;
596 memcpy(sdcp->coh->sha_out_buf, sha_buf, halg->digestsize);
601 /* Set HASH_TERM bit for last transfer block. */
603 digest_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_out_buf,
604 DCP_SHA_PAY_SZ, DMA_FROM_DEVICE);
605 desc->control0 |= MXS_DCP_CONTROL0_HASH_TERM;
606 desc->payload = digest_phys;
609 ret = mxs_dcp_start_dma(actx);
612 dma_unmap_single(sdcp->dev, digest_phys, DCP_SHA_PAY_SZ,
616 dma_unmap_single(sdcp->dev, buf_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
621 static int dcp_sha_req_to_buf(struct crypto_async_request *arq)
623 struct dcp *sdcp = global_sdcp;
625 struct ahash_request *req = ahash_request_cast(arq);
626 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
627 struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
628 struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
629 struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
631 uint8_t *in_buf = sdcp->coh->sha_in_buf;
632 uint8_t *out_buf = sdcp->coh->sha_out_buf;
634 struct scatterlist *src;
636 unsigned int i, len, clen, oft = 0;
639 int fin = rctx->fini;
647 if (actx->fill + len > DCP_BUF_SZ)
648 clen = DCP_BUF_SZ - actx->fill;
652 scatterwalk_map_and_copy(in_buf + actx->fill, src, oft, clen,
660 * If we filled the buffer and still have some
661 * more data, submit the buffer.
663 if (len && actx->fill == DCP_BUF_SZ) {
664 ret = mxs_dcp_run_sha(req);
675 /* Submit whatever is left. */
679 ret = mxs_dcp_run_sha(req);
685 /* For some reason the result is flipped */
686 for (i = 0; i < halg->digestsize; i++)
687 req->result[i] = out_buf[halg->digestsize - i - 1];
693 static int dcp_chan_thread_sha(void *data)
695 struct dcp *sdcp = global_sdcp;
696 const int chan = DCP_CHAN_HASH_SHA;
698 struct crypto_async_request *backlog;
699 struct crypto_async_request *arq;
701 struct dcp_sha_req_ctx *rctx;
703 struct ahash_request *req;
706 while (!kthread_should_stop()) {
707 set_current_state(TASK_INTERRUPTIBLE);
709 spin_lock(&sdcp->lock[chan]);
710 backlog = crypto_get_backlog(&sdcp->queue[chan]);
711 arq = crypto_dequeue_request(&sdcp->queue[chan]);
712 spin_unlock(&sdcp->lock[chan]);
714 if (!backlog && !arq) {
719 set_current_state(TASK_RUNNING);
722 backlog->complete(backlog, -EINPROGRESS);
725 req = ahash_request_cast(arq);
726 rctx = ahash_request_ctx(req);
728 ret = dcp_sha_req_to_buf(arq);
730 arq->complete(arq, ret);
737 static int dcp_sha_init(struct ahash_request *req)
739 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
740 struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
742 struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
745 * Start hashing session. The code below only inits the
746 * hashing session context, nothing more.
748 memset(actx, 0, sizeof(*actx));
750 if (strcmp(halg->base.cra_name, "sha1") == 0)
751 actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA1;
753 actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA256;
757 actx->chan = DCP_CHAN_HASH_SHA;
759 mutex_init(&actx->mutex);
764 static int dcp_sha_update_fx(struct ahash_request *req, int fini)
766 struct dcp *sdcp = global_sdcp;
768 struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
769 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
770 struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
775 * Ignore requests that have no data in them and are not
776 * the trailing requests in the stream of requests.
778 if (!req->nbytes && !fini)
781 mutex_lock(&actx->mutex);
790 spin_lock(&sdcp->lock[actx->chan]);
791 ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
792 spin_unlock(&sdcp->lock[actx->chan]);
794 wake_up_process(sdcp->thread[actx->chan]);
795 mutex_unlock(&actx->mutex);
800 static int dcp_sha_update(struct ahash_request *req)
802 return dcp_sha_update_fx(req, 0);
805 static int dcp_sha_final(struct ahash_request *req)
807 ahash_request_set_crypt(req, NULL, req->result, 0);
809 return dcp_sha_update_fx(req, 1);
812 static int dcp_sha_finup(struct ahash_request *req)
814 return dcp_sha_update_fx(req, 1);
817 static int dcp_sha_digest(struct ahash_request *req)
821 ret = dcp_sha_init(req);
825 return dcp_sha_finup(req);
828 static int dcp_sha_cra_init(struct crypto_tfm *tfm)
830 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
831 sizeof(struct dcp_sha_req_ctx));
835 static void dcp_sha_cra_exit(struct crypto_tfm *tfm)
839 /* AES 128 ECB and AES 128 CBC */
840 static struct crypto_alg dcp_aes_algs[] = {
842 .cra_name = "ecb(aes)",
843 .cra_driver_name = "ecb-aes-dcp",
846 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
848 CRYPTO_ALG_NEED_FALLBACK,
849 .cra_init = mxs_dcp_aes_fallback_init,
850 .cra_exit = mxs_dcp_aes_fallback_exit,
851 .cra_blocksize = AES_BLOCK_SIZE,
852 .cra_ctxsize = sizeof(struct dcp_async_ctx),
853 .cra_type = &crypto_ablkcipher_type,
854 .cra_module = THIS_MODULE,
857 .min_keysize = AES_MIN_KEY_SIZE,
858 .max_keysize = AES_MAX_KEY_SIZE,
859 .setkey = mxs_dcp_aes_setkey,
860 .encrypt = mxs_dcp_aes_ecb_encrypt,
861 .decrypt = mxs_dcp_aes_ecb_decrypt
865 .cra_name = "cbc(aes)",
866 .cra_driver_name = "cbc-aes-dcp",
869 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
871 CRYPTO_ALG_NEED_FALLBACK,
872 .cra_init = mxs_dcp_aes_fallback_init,
873 .cra_exit = mxs_dcp_aes_fallback_exit,
874 .cra_blocksize = AES_BLOCK_SIZE,
875 .cra_ctxsize = sizeof(struct dcp_async_ctx),
876 .cra_type = &crypto_ablkcipher_type,
877 .cra_module = THIS_MODULE,
880 .min_keysize = AES_MIN_KEY_SIZE,
881 .max_keysize = AES_MAX_KEY_SIZE,
882 .setkey = mxs_dcp_aes_setkey,
883 .encrypt = mxs_dcp_aes_cbc_encrypt,
884 .decrypt = mxs_dcp_aes_cbc_decrypt,
885 .ivsize = AES_BLOCK_SIZE,
892 static struct ahash_alg dcp_sha1_alg = {
893 .init = dcp_sha_init,
894 .update = dcp_sha_update,
895 .final = dcp_sha_final,
896 .finup = dcp_sha_finup,
897 .digest = dcp_sha_digest,
899 .digestsize = SHA1_DIGEST_SIZE,
902 .cra_driver_name = "sha1-dcp",
905 .cra_flags = CRYPTO_ALG_ASYNC,
906 .cra_blocksize = SHA1_BLOCK_SIZE,
907 .cra_ctxsize = sizeof(struct dcp_async_ctx),
908 .cra_module = THIS_MODULE,
909 .cra_init = dcp_sha_cra_init,
910 .cra_exit = dcp_sha_cra_exit,
916 static struct ahash_alg dcp_sha256_alg = {
917 .init = dcp_sha_init,
918 .update = dcp_sha_update,
919 .final = dcp_sha_final,
920 .finup = dcp_sha_finup,
921 .digest = dcp_sha_digest,
923 .digestsize = SHA256_DIGEST_SIZE,
925 .cra_name = "sha256",
926 .cra_driver_name = "sha256-dcp",
929 .cra_flags = CRYPTO_ALG_ASYNC,
930 .cra_blocksize = SHA256_BLOCK_SIZE,
931 .cra_ctxsize = sizeof(struct dcp_async_ctx),
932 .cra_module = THIS_MODULE,
933 .cra_init = dcp_sha_cra_init,
934 .cra_exit = dcp_sha_cra_exit,
939 static irqreturn_t mxs_dcp_irq(int irq, void *context)
941 struct dcp *sdcp = context;
945 stat = readl(sdcp->base + MXS_DCP_STAT);
946 stat &= MXS_DCP_STAT_IRQ_MASK;
950 /* Clear the interrupts. */
951 writel(stat, sdcp->base + MXS_DCP_STAT_CLR);
953 /* Complete the DMA requests that finished. */
954 for (i = 0; i < DCP_MAX_CHANS; i++)
956 complete(&sdcp->completion[i]);
961 static int mxs_dcp_probe(struct platform_device *pdev)
963 struct device *dev = &pdev->dev;
964 struct dcp *sdcp = NULL;
967 struct resource *iores;
968 int dcp_vmi_irq, dcp_irq;
971 dev_err(dev, "Only one DCP instance allowed!\n");
975 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
976 dcp_vmi_irq = platform_get_irq(pdev, 0);
980 dcp_irq = platform_get_irq(pdev, 1);
984 sdcp = devm_kzalloc(dev, sizeof(*sdcp), GFP_KERNEL);
989 sdcp->base = devm_ioremap_resource(dev, iores);
990 if (IS_ERR(sdcp->base))
991 return PTR_ERR(sdcp->base);
994 ret = devm_request_irq(dev, dcp_vmi_irq, mxs_dcp_irq, 0,
995 "dcp-vmi-irq", sdcp);
997 dev_err(dev, "Failed to claim DCP VMI IRQ!\n");
1001 ret = devm_request_irq(dev, dcp_irq, mxs_dcp_irq, 0,
1004 dev_err(dev, "Failed to claim DCP IRQ!\n");
1008 /* Allocate coherent helper block. */
1009 sdcp->coh = devm_kzalloc(dev, sizeof(*sdcp->coh) + DCP_ALIGNMENT,
1014 /* Re-align the structure so it fits the DCP constraints. */
1015 sdcp->coh = PTR_ALIGN(sdcp->coh, DCP_ALIGNMENT);
1017 /* Restart the DCP block. */
1018 ret = stmp_reset_block(sdcp->base);
1022 /* Initialize control register. */
1023 writel(MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES |
1024 MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING | 0xf,
1025 sdcp->base + MXS_DCP_CTRL);
1027 /* Enable all DCP DMA channels. */
1028 writel(MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK,
1029 sdcp->base + MXS_DCP_CHANNELCTRL);
1032 * We do not enable context switching. Give the context buffer a
1033 * pointer to an illegal address so if context switching is
1034 * inadvertantly enabled, the DCP will return an error instead of
1035 * trashing good memory. The DCP DMA cannot access ROM, so any ROM
1038 writel(0xffff0000, sdcp->base + MXS_DCP_CONTEXT);
1039 for (i = 0; i < DCP_MAX_CHANS; i++)
1040 writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(i));
1041 writel(0xffffffff, sdcp->base + MXS_DCP_STAT_CLR);
1045 platform_set_drvdata(pdev, sdcp);
1047 for (i = 0; i < DCP_MAX_CHANS; i++) {
1048 spin_lock_init(&sdcp->lock[i]);
1049 init_completion(&sdcp->completion[i]);
1050 crypto_init_queue(&sdcp->queue[i], 50);
1053 /* Create the SHA and AES handler threads. */
1054 sdcp->thread[DCP_CHAN_HASH_SHA] = kthread_run(dcp_chan_thread_sha,
1055 NULL, "mxs_dcp_chan/sha");
1056 if (IS_ERR(sdcp->thread[DCP_CHAN_HASH_SHA])) {
1057 dev_err(dev, "Error starting SHA thread!\n");
1058 return PTR_ERR(sdcp->thread[DCP_CHAN_HASH_SHA]);
1061 sdcp->thread[DCP_CHAN_CRYPTO] = kthread_run(dcp_chan_thread_aes,
1062 NULL, "mxs_dcp_chan/aes");
1063 if (IS_ERR(sdcp->thread[DCP_CHAN_CRYPTO])) {
1064 dev_err(dev, "Error starting SHA thread!\n");
1065 ret = PTR_ERR(sdcp->thread[DCP_CHAN_CRYPTO]);
1066 goto err_destroy_sha_thread;
1069 /* Register the various crypto algorithms. */
1070 sdcp->caps = readl(sdcp->base + MXS_DCP_CAPABILITY1);
1072 if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) {
1073 ret = crypto_register_algs(dcp_aes_algs,
1074 ARRAY_SIZE(dcp_aes_algs));
1076 /* Failed to register algorithm. */
1077 dev_err(dev, "Failed to register AES crypto!\n");
1078 goto err_destroy_aes_thread;
1082 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1) {
1083 ret = crypto_register_ahash(&dcp_sha1_alg);
1085 dev_err(dev, "Failed to register %s hash!\n",
1086 dcp_sha1_alg.halg.base.cra_name);
1087 goto err_unregister_aes;
1091 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256) {
1092 ret = crypto_register_ahash(&dcp_sha256_alg);
1094 dev_err(dev, "Failed to register %s hash!\n",
1095 dcp_sha256_alg.halg.base.cra_name);
1096 goto err_unregister_sha1;
1102 err_unregister_sha1:
1103 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1)
1104 crypto_unregister_ahash(&dcp_sha1_alg);
1107 if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128)
1108 crypto_unregister_algs(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs));
1110 err_destroy_aes_thread:
1111 kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]);
1113 err_destroy_sha_thread:
1114 kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
1118 static int mxs_dcp_remove(struct platform_device *pdev)
1120 struct dcp *sdcp = platform_get_drvdata(pdev);
1122 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256)
1123 crypto_unregister_ahash(&dcp_sha256_alg);
1125 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1)
1126 crypto_unregister_ahash(&dcp_sha1_alg);
1128 if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128)
1129 crypto_unregister_algs(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs));
1131 kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
1132 kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]);
1134 platform_set_drvdata(pdev, NULL);
1141 static const struct of_device_id mxs_dcp_dt_ids[] = {
1142 { .compatible = "fsl,imx23-dcp", .data = NULL, },
1143 { .compatible = "fsl,imx28-dcp", .data = NULL, },
1147 MODULE_DEVICE_TABLE(of, mxs_dcp_dt_ids);
1149 static struct platform_driver mxs_dcp_driver = {
1150 .probe = mxs_dcp_probe,
1151 .remove = mxs_dcp_remove,
1154 .of_match_table = mxs_dcp_dt_ids,
1158 module_platform_driver(mxs_dcp_driver);
1160 MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
1161 MODULE_DESCRIPTION("Freescale MXS DCP Driver");
1162 MODULE_LICENSE("GPL");
1163 MODULE_ALIAS("platform:mxs-dcp");