1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Freescale i.MX23/i.MX28 Data Co-Processor driver
5 * Copyright (C) 2013 Marek Vasut <marex@denx.de>
8 #include <linux/dma-mapping.h>
9 #include <linux/interrupt.h>
11 #include <linux/kernel.h>
12 #include <linux/kthread.h>
13 #include <linux/module.h>
15 #include <linux/platform_device.h>
16 #include <linux/stmp_device.h>
17 #include <linux/clk.h>
19 #include <crypto/aes.h>
20 #include <crypto/sha.h>
21 #include <crypto/internal/hash.h>
22 #include <crypto/internal/skcipher.h>
23 #include <crypto/scatterwalk.h>
25 #define DCP_MAX_CHANS 4
26 #define DCP_BUF_SZ PAGE_SIZE
27 #define DCP_SHA_PAY_SZ 64
29 #define DCP_ALIGNMENT 64
32 * Null hashes to align with hw behavior on imx6sl and ull
33 * these are flipped for consistency with hw output
35 static const uint8_t sha1_null_hash[] =
36 "\x09\x07\xd8\xaf\x90\x18\x60\x95\xef\xbf"
37 "\x55\x32\x0d\x4b\x6b\x5e\xee\xa3\x39\xda";
39 static const uint8_t sha256_null_hash[] =
40 "\x55\xb8\x52\x78\x1b\x99\x95\xa4"
41 "\x4c\x93\x9b\x64\xe4\x41\xae\x27"
42 "\x24\xb9\x6f\x99\xc8\xf4\xfb\x9a"
43 "\x14\x1c\xfc\x98\x42\xc4\xb0\xe3";
45 /* DCP DMA descriptor. */
47 uint32_t next_cmd_addr;
57 /* Coherent aligned block for bounce buffering. */
58 struct dcp_coherent_block {
59 uint8_t aes_in_buf[DCP_BUF_SZ];
60 uint8_t aes_out_buf[DCP_BUF_SZ];
61 uint8_t sha_in_buf[DCP_BUF_SZ];
62 uint8_t sha_out_buf[DCP_SHA_PAY_SZ];
64 uint8_t aes_key[2 * AES_KEYSIZE_128];
66 struct dcp_dma_desc desc[DCP_MAX_CHANS];
75 struct dcp_coherent_block *coh;
77 struct completion completion[DCP_MAX_CHANS];
78 spinlock_t lock[DCP_MAX_CHANS];
79 struct task_struct *thread[DCP_MAX_CHANS];
80 struct crypto_queue queue[DCP_MAX_CHANS];
85 DCP_CHAN_HASH_SHA = 0,
89 struct dcp_async_ctx {
94 /* SHA Hash-specific context */
99 /* Crypto-specific context */
100 struct crypto_sync_skcipher *fallback;
101 unsigned int key_len;
102 uint8_t key[AES_KEYSIZE_128];
105 struct dcp_aes_req_ctx {
110 struct dcp_sha_req_ctx {
115 struct dcp_export_state {
116 struct dcp_sha_req_ctx req_ctx;
117 struct dcp_async_ctx async_ctx;
121 * There can even be only one instance of the MXS DCP due to the
122 * design of Linux Crypto API.
124 static struct dcp *global_sdcp;
126 /* DCP register layout. */
127 #define MXS_DCP_CTRL 0x00
128 #define MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES (1 << 23)
129 #define MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING (1 << 22)
131 #define MXS_DCP_STAT 0x10
132 #define MXS_DCP_STAT_CLR 0x18
133 #define MXS_DCP_STAT_IRQ_MASK 0xf
135 #define MXS_DCP_CHANNELCTRL 0x20
136 #define MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK 0xff
138 #define MXS_DCP_CAPABILITY1 0x40
139 #define MXS_DCP_CAPABILITY1_SHA256 (4 << 16)
140 #define MXS_DCP_CAPABILITY1_SHA1 (1 << 16)
141 #define MXS_DCP_CAPABILITY1_AES128 (1 << 0)
143 #define MXS_DCP_CONTEXT 0x50
145 #define MXS_DCP_CH_N_CMDPTR(n) (0x100 + ((n) * 0x40))
147 #define MXS_DCP_CH_N_SEMA(n) (0x110 + ((n) * 0x40))
149 #define MXS_DCP_CH_N_STAT(n) (0x120 + ((n) * 0x40))
150 #define MXS_DCP_CH_N_STAT_CLR(n) (0x128 + ((n) * 0x40))
152 /* DMA descriptor bits. */
153 #define MXS_DCP_CONTROL0_HASH_TERM (1 << 13)
154 #define MXS_DCP_CONTROL0_HASH_INIT (1 << 12)
155 #define MXS_DCP_CONTROL0_PAYLOAD_KEY (1 << 11)
156 #define MXS_DCP_CONTROL0_CIPHER_ENCRYPT (1 << 8)
157 #define MXS_DCP_CONTROL0_CIPHER_INIT (1 << 9)
158 #define MXS_DCP_CONTROL0_ENABLE_HASH (1 << 6)
159 #define MXS_DCP_CONTROL0_ENABLE_CIPHER (1 << 5)
160 #define MXS_DCP_CONTROL0_DECR_SEMAPHORE (1 << 1)
161 #define MXS_DCP_CONTROL0_INTERRUPT (1 << 0)
163 #define MXS_DCP_CONTROL1_HASH_SELECT_SHA256 (2 << 16)
164 #define MXS_DCP_CONTROL1_HASH_SELECT_SHA1 (0 << 16)
165 #define MXS_DCP_CONTROL1_CIPHER_MODE_CBC (1 << 4)
166 #define MXS_DCP_CONTROL1_CIPHER_MODE_ECB (0 << 4)
167 #define MXS_DCP_CONTROL1_CIPHER_SELECT_AES128 (0 << 0)
169 static int mxs_dcp_start_dma(struct dcp_async_ctx *actx)
172 struct dcp *sdcp = global_sdcp;
173 const int chan = actx->chan;
176 struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
177 dma_addr_t desc_phys = dma_map_single(sdcp->dev, desc, sizeof(*desc),
180 dma_err = dma_mapping_error(sdcp->dev, desc_phys);
184 reinit_completion(&sdcp->completion[chan]);
186 /* Clear status register. */
187 writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(chan));
189 /* Load the DMA descriptor. */
190 writel(desc_phys, sdcp->base + MXS_DCP_CH_N_CMDPTR(chan));
192 /* Increment the semaphore to start the DMA transfer. */
193 writel(1, sdcp->base + MXS_DCP_CH_N_SEMA(chan));
195 ret = wait_for_completion_timeout(&sdcp->completion[chan],
196 msecs_to_jiffies(1000));
198 dev_err(sdcp->dev, "Channel %i timeout (DCP_STAT=0x%08x)\n",
199 chan, readl(sdcp->base + MXS_DCP_STAT));
203 stat = readl(sdcp->base + MXS_DCP_CH_N_STAT(chan));
205 dev_err(sdcp->dev, "Channel %i error (CH_STAT=0x%08x)\n",
210 dma_unmap_single(sdcp->dev, desc_phys, sizeof(*desc), DMA_TO_DEVICE);
216 * Encryption (AES128)
218 static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
219 struct ablkcipher_request *req, int init)
221 dma_addr_t key_phys, src_phys, dst_phys;
222 struct dcp *sdcp = global_sdcp;
223 struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
224 struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
227 key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key,
228 2 * AES_KEYSIZE_128, DMA_TO_DEVICE);
229 ret = dma_mapping_error(sdcp->dev, key_phys);
233 src_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_in_buf,
234 DCP_BUF_SZ, DMA_TO_DEVICE);
235 ret = dma_mapping_error(sdcp->dev, src_phys);
239 dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf,
240 DCP_BUF_SZ, DMA_FROM_DEVICE);
241 ret = dma_mapping_error(sdcp->dev, dst_phys);
245 if (actx->fill % AES_BLOCK_SIZE) {
246 dev_err(sdcp->dev, "Invalid block size!\n");
251 /* Fill in the DMA descriptor. */
252 desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE |
253 MXS_DCP_CONTROL0_INTERRUPT |
254 MXS_DCP_CONTROL0_ENABLE_CIPHER;
256 /* Payload contains the key. */
257 desc->control0 |= MXS_DCP_CONTROL0_PAYLOAD_KEY;
260 desc->control0 |= MXS_DCP_CONTROL0_CIPHER_ENCRYPT;
262 desc->control0 |= MXS_DCP_CONTROL0_CIPHER_INIT;
264 desc->control1 = MXS_DCP_CONTROL1_CIPHER_SELECT_AES128;
267 desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_ECB;
269 desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_CBC;
271 desc->next_cmd_addr = 0;
272 desc->source = src_phys;
273 desc->destination = dst_phys;
274 desc->size = actx->fill;
275 desc->payload = key_phys;
278 ret = mxs_dcp_start_dma(actx);
281 dma_unmap_single(sdcp->dev, dst_phys, DCP_BUF_SZ, DMA_FROM_DEVICE);
283 dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
285 dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128,
291 static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
293 struct dcp *sdcp = global_sdcp;
295 struct ablkcipher_request *req = ablkcipher_request_cast(arq);
296 struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm);
297 struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
299 struct scatterlist *dst = req->dst;
300 struct scatterlist *src = req->src;
301 int dst_nents = sg_nents(dst);
303 const int out_off = DCP_BUF_SZ;
304 uint8_t *in_buf = sdcp->coh->aes_in_buf;
305 uint8_t *out_buf = sdcp->coh->aes_out_buf;
307 uint32_t dst_off = 0;
308 uint8_t *src_buf = NULL;
309 uint32_t last_out_len = 0;
311 uint8_t *key = sdcp->coh->aes_key;
314 unsigned int i, len, clen, tlen = 0;
316 bool limit_hit = false;
320 /* Copy the key from the temporary location. */
321 memcpy(key, actx->key, actx->key_len);
324 /* Copy the CBC IV just past the key. */
325 memcpy(key + AES_KEYSIZE_128, req->info, AES_KEYSIZE_128);
326 /* CBC needs the INIT set. */
329 memset(key + AES_KEYSIZE_128, 0, AES_KEYSIZE_128);
332 for_each_sg(req->src, src, sg_nents(req->src), i) {
333 src_buf = sg_virt(src);
334 len = sg_dma_len(src);
336 limit_hit = tlen > req->nbytes;
339 len = req->nbytes - (tlen - len);
342 if (actx->fill + len > out_off)
343 clen = out_off - actx->fill;
347 memcpy(in_buf + actx->fill, src_buf, clen);
353 * If we filled the buffer or this is the last SG,
356 if (actx->fill == out_off || sg_is_last(src) ||
358 ret = mxs_dcp_run_aes(actx, req, init);
363 sg_pcopy_from_buffer(dst, dst_nents, out_buf,
364 actx->fill, dst_off);
365 dst_off += actx->fill;
366 last_out_len = actx->fill;
375 /* Copy the IV for CBC for chaining */
378 memcpy(req->info, out_buf+(last_out_len-AES_BLOCK_SIZE),
381 memcpy(req->info, in_buf+(last_out_len-AES_BLOCK_SIZE),
388 static int dcp_chan_thread_aes(void *data)
390 struct dcp *sdcp = global_sdcp;
391 const int chan = DCP_CHAN_CRYPTO;
393 struct crypto_async_request *backlog;
394 struct crypto_async_request *arq;
398 while (!kthread_should_stop()) {
399 set_current_state(TASK_INTERRUPTIBLE);
401 spin_lock(&sdcp->lock[chan]);
402 backlog = crypto_get_backlog(&sdcp->queue[chan]);
403 arq = crypto_dequeue_request(&sdcp->queue[chan]);
404 spin_unlock(&sdcp->lock[chan]);
406 if (!backlog && !arq) {
411 set_current_state(TASK_RUNNING);
414 backlog->complete(backlog, -EINPROGRESS);
417 ret = mxs_dcp_aes_block_crypt(arq);
418 arq->complete(arq, ret);
425 static int mxs_dcp_block_fallback(struct ablkcipher_request *req, int enc)
427 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
428 struct dcp_async_ctx *ctx = crypto_ablkcipher_ctx(tfm);
429 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
432 skcipher_request_set_sync_tfm(subreq, ctx->fallback);
433 skcipher_request_set_callback(subreq, req->base.flags, NULL, NULL);
434 skcipher_request_set_crypt(subreq, req->src, req->dst,
435 req->nbytes, req->info);
438 ret = crypto_skcipher_encrypt(subreq);
440 ret = crypto_skcipher_decrypt(subreq);
442 skcipher_request_zero(subreq);
447 static int mxs_dcp_aes_enqueue(struct ablkcipher_request *req, int enc, int ecb)
449 struct dcp *sdcp = global_sdcp;
450 struct crypto_async_request *arq = &req->base;
451 struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm);
452 struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
455 if (unlikely(actx->key_len != AES_KEYSIZE_128))
456 return mxs_dcp_block_fallback(req, enc);
460 actx->chan = DCP_CHAN_CRYPTO;
462 spin_lock(&sdcp->lock[actx->chan]);
463 ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
464 spin_unlock(&sdcp->lock[actx->chan]);
466 wake_up_process(sdcp->thread[actx->chan]);
471 static int mxs_dcp_aes_ecb_decrypt(struct ablkcipher_request *req)
473 return mxs_dcp_aes_enqueue(req, 0, 1);
476 static int mxs_dcp_aes_ecb_encrypt(struct ablkcipher_request *req)
478 return mxs_dcp_aes_enqueue(req, 1, 1);
481 static int mxs_dcp_aes_cbc_decrypt(struct ablkcipher_request *req)
483 return mxs_dcp_aes_enqueue(req, 0, 0);
486 static int mxs_dcp_aes_cbc_encrypt(struct ablkcipher_request *req)
488 return mxs_dcp_aes_enqueue(req, 1, 0);
491 static int mxs_dcp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
494 struct dcp_async_ctx *actx = crypto_ablkcipher_ctx(tfm);
498 * AES 128 is supposed by the hardware, store key into temporary
499 * buffer and exit. We must use the temporary buffer here, since
500 * there can still be an operation in progress.
503 if (len == AES_KEYSIZE_128) {
504 memcpy(actx->key, key, len);
509 * If the requested AES key size is not supported by the hardware,
510 * but is supported by in-kernel software implementation, we use
513 crypto_sync_skcipher_clear_flags(actx->fallback, CRYPTO_TFM_REQ_MASK);
514 crypto_sync_skcipher_set_flags(actx->fallback,
515 tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
517 ret = crypto_sync_skcipher_setkey(actx->fallback, key, len);
521 tfm->base.crt_flags &= ~CRYPTO_TFM_RES_MASK;
522 tfm->base.crt_flags |= crypto_sync_skcipher_get_flags(actx->fallback) &
528 static int mxs_dcp_aes_fallback_init(struct crypto_tfm *tfm)
530 const char *name = crypto_tfm_alg_name(tfm);
531 struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm);
532 struct crypto_sync_skcipher *blk;
534 blk = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
538 actx->fallback = blk;
539 tfm->crt_ablkcipher.reqsize = sizeof(struct dcp_aes_req_ctx);
543 static void mxs_dcp_aes_fallback_exit(struct crypto_tfm *tfm)
545 struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm);
547 crypto_free_sync_skcipher(actx->fallback);
551 * Hashing (SHA1/SHA256)
553 static int mxs_dcp_run_sha(struct ahash_request *req)
555 struct dcp *sdcp = global_sdcp;
558 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
559 struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
560 struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
561 struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
563 dma_addr_t digest_phys = 0;
564 dma_addr_t buf_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_in_buf,
565 DCP_BUF_SZ, DMA_TO_DEVICE);
567 ret = dma_mapping_error(sdcp->dev, buf_phys);
571 /* Fill in the DMA descriptor. */
572 desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE |
573 MXS_DCP_CONTROL0_INTERRUPT |
574 MXS_DCP_CONTROL0_ENABLE_HASH;
576 desc->control0 |= MXS_DCP_CONTROL0_HASH_INIT;
578 desc->control1 = actx->alg;
579 desc->next_cmd_addr = 0;
580 desc->source = buf_phys;
581 desc->destination = 0;
582 desc->size = actx->fill;
587 * Align driver with hw behavior when generating null hashes
589 if (rctx->init && rctx->fini && desc->size == 0) {
590 struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
591 const uint8_t *sha_buf =
592 (actx->alg == MXS_DCP_CONTROL1_HASH_SELECT_SHA1) ?
593 sha1_null_hash : sha256_null_hash;
594 memcpy(sdcp->coh->sha_out_buf, sha_buf, halg->digestsize);
599 /* Set HASH_TERM bit for last transfer block. */
601 digest_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_out_buf,
602 DCP_SHA_PAY_SZ, DMA_FROM_DEVICE);
603 ret = dma_mapping_error(sdcp->dev, digest_phys);
607 desc->control0 |= MXS_DCP_CONTROL0_HASH_TERM;
608 desc->payload = digest_phys;
611 ret = mxs_dcp_start_dma(actx);
614 dma_unmap_single(sdcp->dev, digest_phys, DCP_SHA_PAY_SZ,
618 dma_unmap_single(sdcp->dev, buf_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
623 static int dcp_sha_req_to_buf(struct crypto_async_request *arq)
625 struct dcp *sdcp = global_sdcp;
627 struct ahash_request *req = ahash_request_cast(arq);
628 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
629 struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
630 struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
631 struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
633 uint8_t *in_buf = sdcp->coh->sha_in_buf;
634 uint8_t *out_buf = sdcp->coh->sha_out_buf;
636 struct scatterlist *src;
638 unsigned int i, len, clen, oft = 0;
641 int fin = rctx->fini;
649 if (actx->fill + len > DCP_BUF_SZ)
650 clen = DCP_BUF_SZ - actx->fill;
654 scatterwalk_map_and_copy(in_buf + actx->fill, src, oft, clen,
662 * If we filled the buffer and still have some
663 * more data, submit the buffer.
665 if (len && actx->fill == DCP_BUF_SZ) {
666 ret = mxs_dcp_run_sha(req);
677 /* Submit whatever is left. */
681 ret = mxs_dcp_run_sha(req);
687 /* For some reason the result is flipped */
688 for (i = 0; i < halg->digestsize; i++)
689 req->result[i] = out_buf[halg->digestsize - i - 1];
695 static int dcp_chan_thread_sha(void *data)
697 struct dcp *sdcp = global_sdcp;
698 const int chan = DCP_CHAN_HASH_SHA;
700 struct crypto_async_request *backlog;
701 struct crypto_async_request *arq;
704 while (!kthread_should_stop()) {
705 set_current_state(TASK_INTERRUPTIBLE);
707 spin_lock(&sdcp->lock[chan]);
708 backlog = crypto_get_backlog(&sdcp->queue[chan]);
709 arq = crypto_dequeue_request(&sdcp->queue[chan]);
710 spin_unlock(&sdcp->lock[chan]);
712 if (!backlog && !arq) {
717 set_current_state(TASK_RUNNING);
720 backlog->complete(backlog, -EINPROGRESS);
723 ret = dcp_sha_req_to_buf(arq);
724 arq->complete(arq, ret);
731 static int dcp_sha_init(struct ahash_request *req)
733 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
734 struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
736 struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
739 * Start hashing session. The code below only inits the
740 * hashing session context, nothing more.
742 memset(actx, 0, sizeof(*actx));
744 if (strcmp(halg->base.cra_name, "sha1") == 0)
745 actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA1;
747 actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA256;
751 actx->chan = DCP_CHAN_HASH_SHA;
753 mutex_init(&actx->mutex);
758 static int dcp_sha_update_fx(struct ahash_request *req, int fini)
760 struct dcp *sdcp = global_sdcp;
762 struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
763 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
764 struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
769 * Ignore requests that have no data in them and are not
770 * the trailing requests in the stream of requests.
772 if (!req->nbytes && !fini)
775 mutex_lock(&actx->mutex);
784 spin_lock(&sdcp->lock[actx->chan]);
785 ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
786 spin_unlock(&sdcp->lock[actx->chan]);
788 wake_up_process(sdcp->thread[actx->chan]);
789 mutex_unlock(&actx->mutex);
794 static int dcp_sha_update(struct ahash_request *req)
796 return dcp_sha_update_fx(req, 0);
799 static int dcp_sha_final(struct ahash_request *req)
801 ahash_request_set_crypt(req, NULL, req->result, 0);
803 return dcp_sha_update_fx(req, 1);
806 static int dcp_sha_finup(struct ahash_request *req)
808 return dcp_sha_update_fx(req, 1);
811 static int dcp_sha_digest(struct ahash_request *req)
815 ret = dcp_sha_init(req);
819 return dcp_sha_finup(req);
822 static int dcp_sha_import(struct ahash_request *req, const void *in)
824 struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
825 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
826 struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
827 const struct dcp_export_state *export = in;
829 memset(rctx, 0, sizeof(struct dcp_sha_req_ctx));
830 memset(actx, 0, sizeof(struct dcp_async_ctx));
831 memcpy(rctx, &export->req_ctx, sizeof(struct dcp_sha_req_ctx));
832 memcpy(actx, &export->async_ctx, sizeof(struct dcp_async_ctx));
837 static int dcp_sha_export(struct ahash_request *req, void *out)
839 struct dcp_sha_req_ctx *rctx_state = ahash_request_ctx(req);
840 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
841 struct dcp_async_ctx *actx_state = crypto_ahash_ctx(tfm);
842 struct dcp_export_state *export = out;
844 memcpy(&export->req_ctx, rctx_state, sizeof(struct dcp_sha_req_ctx));
845 memcpy(&export->async_ctx, actx_state, sizeof(struct dcp_async_ctx));
850 static int dcp_sha_cra_init(struct crypto_tfm *tfm)
852 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
853 sizeof(struct dcp_sha_req_ctx));
857 static void dcp_sha_cra_exit(struct crypto_tfm *tfm)
861 /* AES 128 ECB and AES 128 CBC */
862 static struct crypto_alg dcp_aes_algs[] = {
864 .cra_name = "ecb(aes)",
865 .cra_driver_name = "ecb-aes-dcp",
868 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
870 CRYPTO_ALG_NEED_FALLBACK,
871 .cra_init = mxs_dcp_aes_fallback_init,
872 .cra_exit = mxs_dcp_aes_fallback_exit,
873 .cra_blocksize = AES_BLOCK_SIZE,
874 .cra_ctxsize = sizeof(struct dcp_async_ctx),
875 .cra_type = &crypto_ablkcipher_type,
876 .cra_module = THIS_MODULE,
879 .min_keysize = AES_MIN_KEY_SIZE,
880 .max_keysize = AES_MAX_KEY_SIZE,
881 .setkey = mxs_dcp_aes_setkey,
882 .encrypt = mxs_dcp_aes_ecb_encrypt,
883 .decrypt = mxs_dcp_aes_ecb_decrypt
887 .cra_name = "cbc(aes)",
888 .cra_driver_name = "cbc-aes-dcp",
891 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
893 CRYPTO_ALG_NEED_FALLBACK,
894 .cra_init = mxs_dcp_aes_fallback_init,
895 .cra_exit = mxs_dcp_aes_fallback_exit,
896 .cra_blocksize = AES_BLOCK_SIZE,
897 .cra_ctxsize = sizeof(struct dcp_async_ctx),
898 .cra_type = &crypto_ablkcipher_type,
899 .cra_module = THIS_MODULE,
902 .min_keysize = AES_MIN_KEY_SIZE,
903 .max_keysize = AES_MAX_KEY_SIZE,
904 .setkey = mxs_dcp_aes_setkey,
905 .encrypt = mxs_dcp_aes_cbc_encrypt,
906 .decrypt = mxs_dcp_aes_cbc_decrypt,
907 .ivsize = AES_BLOCK_SIZE,
914 static struct ahash_alg dcp_sha1_alg = {
915 .init = dcp_sha_init,
916 .update = dcp_sha_update,
917 .final = dcp_sha_final,
918 .finup = dcp_sha_finup,
919 .digest = dcp_sha_digest,
920 .import = dcp_sha_import,
921 .export = dcp_sha_export,
923 .digestsize = SHA1_DIGEST_SIZE,
924 .statesize = sizeof(struct dcp_export_state),
927 .cra_driver_name = "sha1-dcp",
930 .cra_flags = CRYPTO_ALG_ASYNC,
931 .cra_blocksize = SHA1_BLOCK_SIZE,
932 .cra_ctxsize = sizeof(struct dcp_async_ctx),
933 .cra_module = THIS_MODULE,
934 .cra_init = dcp_sha_cra_init,
935 .cra_exit = dcp_sha_cra_exit,
941 static struct ahash_alg dcp_sha256_alg = {
942 .init = dcp_sha_init,
943 .update = dcp_sha_update,
944 .final = dcp_sha_final,
945 .finup = dcp_sha_finup,
946 .digest = dcp_sha_digest,
947 .import = dcp_sha_import,
948 .export = dcp_sha_export,
950 .digestsize = SHA256_DIGEST_SIZE,
951 .statesize = sizeof(struct dcp_export_state),
953 .cra_name = "sha256",
954 .cra_driver_name = "sha256-dcp",
957 .cra_flags = CRYPTO_ALG_ASYNC,
958 .cra_blocksize = SHA256_BLOCK_SIZE,
959 .cra_ctxsize = sizeof(struct dcp_async_ctx),
960 .cra_module = THIS_MODULE,
961 .cra_init = dcp_sha_cra_init,
962 .cra_exit = dcp_sha_cra_exit,
967 static irqreturn_t mxs_dcp_irq(int irq, void *context)
969 struct dcp *sdcp = context;
973 stat = readl(sdcp->base + MXS_DCP_STAT);
974 stat &= MXS_DCP_STAT_IRQ_MASK;
978 /* Clear the interrupts. */
979 writel(stat, sdcp->base + MXS_DCP_STAT_CLR);
981 /* Complete the DMA requests that finished. */
982 for (i = 0; i < DCP_MAX_CHANS; i++)
984 complete(&sdcp->completion[i]);
989 static int mxs_dcp_probe(struct platform_device *pdev)
991 struct device *dev = &pdev->dev;
992 struct dcp *sdcp = NULL;
994 int dcp_vmi_irq, dcp_irq;
997 dev_err(dev, "Only one DCP instance allowed!\n");
1001 dcp_vmi_irq = platform_get_irq(pdev, 0);
1002 if (dcp_vmi_irq < 0)
1005 dcp_irq = platform_get_irq(pdev, 1);
1009 sdcp = devm_kzalloc(dev, sizeof(*sdcp), GFP_KERNEL);
1014 sdcp->base = devm_platform_ioremap_resource(pdev, 0);
1015 if (IS_ERR(sdcp->base))
1016 return PTR_ERR(sdcp->base);
1019 ret = devm_request_irq(dev, dcp_vmi_irq, mxs_dcp_irq, 0,
1020 "dcp-vmi-irq", sdcp);
1022 dev_err(dev, "Failed to claim DCP VMI IRQ!\n");
1026 ret = devm_request_irq(dev, dcp_irq, mxs_dcp_irq, 0,
1029 dev_err(dev, "Failed to claim DCP IRQ!\n");
1033 /* Allocate coherent helper block. */
1034 sdcp->coh = devm_kzalloc(dev, sizeof(*sdcp->coh) + DCP_ALIGNMENT,
1039 /* Re-align the structure so it fits the DCP constraints. */
1040 sdcp->coh = PTR_ALIGN(sdcp->coh, DCP_ALIGNMENT);
1042 /* DCP clock is optional, only used on some SOCs */
1043 sdcp->dcp_clk = devm_clk_get(dev, "dcp");
1044 if (IS_ERR(sdcp->dcp_clk)) {
1045 if (sdcp->dcp_clk != ERR_PTR(-ENOENT))
1046 return PTR_ERR(sdcp->dcp_clk);
1047 sdcp->dcp_clk = NULL;
1049 ret = clk_prepare_enable(sdcp->dcp_clk);
1053 /* Restart the DCP block. */
1054 ret = stmp_reset_block(sdcp->base);
1056 dev_err(dev, "Failed reset\n");
1057 goto err_disable_unprepare_clk;
1060 /* Initialize control register. */
1061 writel(MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES |
1062 MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING | 0xf,
1063 sdcp->base + MXS_DCP_CTRL);
1065 /* Enable all DCP DMA channels. */
1066 writel(MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK,
1067 sdcp->base + MXS_DCP_CHANNELCTRL);
1070 * We do not enable context switching. Give the context buffer a
1071 * pointer to an illegal address so if context switching is
1072 * inadvertantly enabled, the DCP will return an error instead of
1073 * trashing good memory. The DCP DMA cannot access ROM, so any ROM
1076 writel(0xffff0000, sdcp->base + MXS_DCP_CONTEXT);
1077 for (i = 0; i < DCP_MAX_CHANS; i++)
1078 writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(i));
1079 writel(0xffffffff, sdcp->base + MXS_DCP_STAT_CLR);
1083 platform_set_drvdata(pdev, sdcp);
1085 for (i = 0; i < DCP_MAX_CHANS; i++) {
1086 spin_lock_init(&sdcp->lock[i]);
1087 init_completion(&sdcp->completion[i]);
1088 crypto_init_queue(&sdcp->queue[i], 50);
1091 /* Create the SHA and AES handler threads. */
1092 sdcp->thread[DCP_CHAN_HASH_SHA] = kthread_run(dcp_chan_thread_sha,
1093 NULL, "mxs_dcp_chan/sha");
1094 if (IS_ERR(sdcp->thread[DCP_CHAN_HASH_SHA])) {
1095 dev_err(dev, "Error starting SHA thread!\n");
1096 ret = PTR_ERR(sdcp->thread[DCP_CHAN_HASH_SHA]);
1097 goto err_disable_unprepare_clk;
1100 sdcp->thread[DCP_CHAN_CRYPTO] = kthread_run(dcp_chan_thread_aes,
1101 NULL, "mxs_dcp_chan/aes");
1102 if (IS_ERR(sdcp->thread[DCP_CHAN_CRYPTO])) {
1103 dev_err(dev, "Error starting SHA thread!\n");
1104 ret = PTR_ERR(sdcp->thread[DCP_CHAN_CRYPTO]);
1105 goto err_destroy_sha_thread;
1108 /* Register the various crypto algorithms. */
1109 sdcp->caps = readl(sdcp->base + MXS_DCP_CAPABILITY1);
1111 if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) {
1112 ret = crypto_register_algs(dcp_aes_algs,
1113 ARRAY_SIZE(dcp_aes_algs));
1115 /* Failed to register algorithm. */
1116 dev_err(dev, "Failed to register AES crypto!\n");
1117 goto err_destroy_aes_thread;
1121 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1) {
1122 ret = crypto_register_ahash(&dcp_sha1_alg);
1124 dev_err(dev, "Failed to register %s hash!\n",
1125 dcp_sha1_alg.halg.base.cra_name);
1126 goto err_unregister_aes;
1130 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256) {
1131 ret = crypto_register_ahash(&dcp_sha256_alg);
1133 dev_err(dev, "Failed to register %s hash!\n",
1134 dcp_sha256_alg.halg.base.cra_name);
1135 goto err_unregister_sha1;
1141 err_unregister_sha1:
1142 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1)
1143 crypto_unregister_ahash(&dcp_sha1_alg);
1146 if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128)
1147 crypto_unregister_algs(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs));
1149 err_destroy_aes_thread:
1150 kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]);
1152 err_destroy_sha_thread:
1153 kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
1155 err_disable_unprepare_clk:
1156 clk_disable_unprepare(sdcp->dcp_clk);
1161 static int mxs_dcp_remove(struct platform_device *pdev)
1163 struct dcp *sdcp = platform_get_drvdata(pdev);
1165 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256)
1166 crypto_unregister_ahash(&dcp_sha256_alg);
1168 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1)
1169 crypto_unregister_ahash(&dcp_sha1_alg);
1171 if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128)
1172 crypto_unregister_algs(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs));
1174 kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
1175 kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]);
1177 clk_disable_unprepare(sdcp->dcp_clk);
1179 platform_set_drvdata(pdev, NULL);
1186 static const struct of_device_id mxs_dcp_dt_ids[] = {
1187 { .compatible = "fsl,imx23-dcp", .data = NULL, },
1188 { .compatible = "fsl,imx28-dcp", .data = NULL, },
1192 MODULE_DEVICE_TABLE(of, mxs_dcp_dt_ids);
1194 static struct platform_driver mxs_dcp_driver = {
1195 .probe = mxs_dcp_probe,
1196 .remove = mxs_dcp_remove,
1199 .of_match_table = mxs_dcp_dt_ids,
1203 module_platform_driver(mxs_dcp_driver);
1205 MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
1206 MODULE_DESCRIPTION("Freescale MXS DCP Driver");
1207 MODULE_LICENSE("GPL");
1208 MODULE_ALIAS("platform:mxs-dcp");