4 * Driver for EIP97 AES acceleration.
6 * Copyright (c) 2016 Ryder Lee <ryder.lee@mediatek.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 * Some ideas are from atmel-aes.c drivers.
15 #include <crypto/aes.h>
16 #include "mtk-platform.h"
18 #define AES_QUEUE_SIZE 512
19 #define AES_BUF_ORDER 2
20 #define AES_BUF_SIZE ((PAGE_SIZE << AES_BUF_ORDER) \
21 & ~(AES_BLOCK_SIZE - 1))
22 #define AES_MAX_STATE_BUF_SIZE SIZE_IN_WORDS(AES_KEYSIZE_256 + \
24 #define AES_MAX_CT_SIZE 6
26 #define AES_CT_CTRL_HDR cpu_to_le32(0x00220000)
28 /* AES-CBC/ECB/CTR command token */
29 #define AES_CMD0 cpu_to_le32(0x05000000)
30 #define AES_CMD1 cpu_to_le32(0x2d060000)
31 #define AES_CMD2 cpu_to_le32(0xe4a63806)
32 /* AES-GCM command token */
33 #define AES_GCM_CMD0 cpu_to_le32(0x0b000000)
34 #define AES_GCM_CMD1 cpu_to_le32(0xa0800000)
35 #define AES_GCM_CMD2 cpu_to_le32(0x25000010)
36 #define AES_GCM_CMD3 cpu_to_le32(0x0f020000)
37 #define AES_GCM_CMD4 cpu_to_le32(0x21e60000)
38 #define AES_GCM_CMD5 cpu_to_le32(0x40e60000)
39 #define AES_GCM_CMD6 cpu_to_le32(0xd0070000)
41 /* AES transform information word 0 fields */
42 #define AES_TFM_BASIC_OUT cpu_to_le32(0x4 << 0)
43 #define AES_TFM_BASIC_IN cpu_to_le32(0x5 << 0)
44 #define AES_TFM_GCM_OUT cpu_to_le32(0x6 << 0)
45 #define AES_TFM_GCM_IN cpu_to_le32(0xf << 0)
46 #define AES_TFM_SIZE(x) cpu_to_le32((x) << 8)
47 #define AES_TFM_128BITS cpu_to_le32(0xb << 16)
48 #define AES_TFM_192BITS cpu_to_le32(0xd << 16)
49 #define AES_TFM_256BITS cpu_to_le32(0xf << 16)
50 #define AES_TFM_GHASH_DIGEST cpu_to_le32(0x2 << 21)
51 #define AES_TFM_GHASH cpu_to_le32(0x4 << 23)
52 /* AES transform information word 1 fields */
53 #define AES_TFM_ECB cpu_to_le32(0x0 << 0)
54 #define AES_TFM_CBC cpu_to_le32(0x1 << 0)
55 #define AES_TFM_CTR_INIT cpu_to_le32(0x2 << 0) /* init counter to 1 */
56 #define AES_TFM_CTR_LOAD cpu_to_le32(0x6 << 0) /* load/reuse counter */
57 #define AES_TFM_3IV cpu_to_le32(0x7 << 5) /* using IV 0-2 */
58 #define AES_TFM_FULL_IV cpu_to_le32(0xf << 5) /* using IV 0-3 */
59 #define AES_TFM_IV_CTR_MODE cpu_to_le32(0x1 << 10)
60 #define AES_TFM_ENC_HASH cpu_to_le32(0x1 << 17)
63 #define AES_FLAGS_CIPHER_MSK GENMASK(2, 0)
64 #define AES_FLAGS_ECB BIT(0)
65 #define AES_FLAGS_CBC BIT(1)
66 #define AES_FLAGS_CTR BIT(2)
67 #define AES_FLAGS_GCM BIT(3)
68 #define AES_FLAGS_ENCRYPT BIT(4)
69 #define AES_FLAGS_BUSY BIT(5)
71 #define AES_AUTH_TAG_ERR cpu_to_le32(BIT(26))
74 * mtk_aes_info - hardware information of AES
75 * @cmd: command token, hardware instruction
76 * @tfm: transform state of cipher algorithm.
77 * @state: contains keys and initial vectors.
79 * Memory layout of GCM buffer:
81 * | AES KEY | 128/196/256 bits
83 * | HASH KEY | a string 128 zero bits encrypted using the block cipher
88 * The engine requires all these info to do:
89 * - Commands decoding and control of the engine's data path.
90 * - Coordinating hardware data fetch and store operations.
91 * - Result token construction and output.
94 __le32 cmd[AES_MAX_CT_SIZE];
96 __le32 state[AES_MAX_STATE_BUF_SIZE];
99 struct mtk_aes_reqctx {
103 struct mtk_aes_base_ctx {
104 struct mtk_cryp *cryp;
110 struct mtk_aes_info info;
119 struct mtk_aes_base_ctx base;
122 struct mtk_aes_ctr_ctx {
123 struct mtk_aes_base_ctx base;
125 u32 iv[AES_BLOCK_SIZE / sizeof(u32)];
127 struct scatterlist src[2];
128 struct scatterlist dst[2];
131 struct mtk_aes_gcm_ctx {
132 struct mtk_aes_base_ctx base;
137 struct crypto_skcipher *ctr;
140 struct mtk_aes_gcm_setkey_result {
142 struct completion completion;
146 struct list_head dev_list;
147 /* Device list lock */
151 static struct mtk_aes_drv mtk_aes = {
152 .dev_list = LIST_HEAD_INIT(mtk_aes.dev_list),
153 .lock = __SPIN_LOCK_UNLOCKED(mtk_aes.lock),
156 static inline u32 mtk_aes_read(struct mtk_cryp *cryp, u32 offset)
158 return readl_relaxed(cryp->base + offset);
161 static inline void mtk_aes_write(struct mtk_cryp *cryp,
162 u32 offset, u32 value)
164 writel_relaxed(value, cryp->base + offset);
167 static struct mtk_cryp *mtk_aes_find_dev(struct mtk_aes_base_ctx *ctx)
169 struct mtk_cryp *cryp = NULL;
170 struct mtk_cryp *tmp;
172 spin_lock_bh(&mtk_aes.lock);
174 list_for_each_entry(tmp, &mtk_aes.dev_list, aes_list) {
182 spin_unlock_bh(&mtk_aes.lock);
187 static inline size_t mtk_aes_padlen(size_t len)
189 len &= AES_BLOCK_SIZE - 1;
190 return len ? AES_BLOCK_SIZE - len : 0;
193 static bool mtk_aes_check_aligned(struct scatterlist *sg, size_t len,
194 struct mtk_aes_dma *dma)
198 if (!IS_ALIGNED(len, AES_BLOCK_SIZE))
201 for (nents = 0; sg; sg = sg_next(sg), ++nents) {
202 if (!IS_ALIGNED(sg->offset, sizeof(u32)))
205 if (len <= sg->length) {
206 if (!IS_ALIGNED(len, AES_BLOCK_SIZE))
209 dma->nents = nents + 1;
210 dma->remainder = sg->length - len;
215 if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE))
224 static inline void mtk_aes_set_mode(struct mtk_aes_rec *aes,
225 const struct mtk_aes_reqctx *rctx)
227 /* Clear all but persistent flags and set request flags. */
228 aes->flags = (aes->flags & AES_FLAGS_BUSY) | rctx->mode;
231 static inline void mtk_aes_restore_sg(const struct mtk_aes_dma *dma)
233 struct scatterlist *sg = dma->sg;
234 int nents = dma->nents;
239 while (--nents > 0 && sg)
245 sg->length += dma->remainder;
248 static inline void mtk_aes_write_state_le(__le32 *dst, const u32 *src, u32 size)
252 for (i = 0; i < SIZE_IN_WORDS(size); i++)
253 dst[i] = cpu_to_le32(src[i]);
256 static inline void mtk_aes_write_state_be(__be32 *dst, const u32 *src, u32 size)
260 for (i = 0; i < SIZE_IN_WORDS(size); i++)
261 dst[i] = cpu_to_be32(src[i]);
264 static inline int mtk_aes_complete(struct mtk_cryp *cryp,
265 struct mtk_aes_rec *aes,
268 aes->flags &= ~AES_FLAGS_BUSY;
269 aes->areq->complete(aes->areq, err);
270 /* Handle new request */
271 tasklet_schedule(&aes->queue_task);
276 * Write descriptors for processing. This will configure the engine, load
277 * the transform information and then start the packet processing.
279 static int mtk_aes_xmit(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
281 struct mtk_ring *ring = cryp->ring[aes->id];
282 struct mtk_desc *cmd = NULL, *res = NULL;
283 struct scatterlist *ssg = aes->src.sg, *dsg = aes->dst.sg;
284 u32 slen = aes->src.sg_len, dlen = aes->dst.sg_len;
287 /* Write command descriptors */
288 for (nents = 0; nents < slen; ++nents, ssg = sg_next(ssg)) {
289 cmd = ring->cmd_next;
290 cmd->hdr = MTK_DESC_BUF_LEN(ssg->length);
291 cmd->buf = cpu_to_le32(sg_dma_address(ssg));
294 cmd->hdr |= MTK_DESC_FIRST |
295 MTK_DESC_CT_LEN(aes->ctx->ct_size);
296 cmd->ct = cpu_to_le32(aes->ctx->ct_dma);
297 cmd->ct_hdr = aes->ctx->ct_hdr;
298 cmd->tfm = cpu_to_le32(aes->ctx->tfm_dma);
301 /* Shift ring buffer and check boundary */
302 if (++ring->cmd_next == ring->cmd_base + MTK_DESC_NUM)
303 ring->cmd_next = ring->cmd_base;
305 cmd->hdr |= MTK_DESC_LAST;
307 /* Prepare result descriptors */
308 for (nents = 0; nents < dlen; ++nents, dsg = sg_next(dsg)) {
309 res = ring->res_next;
310 res->hdr = MTK_DESC_BUF_LEN(dsg->length);
311 res->buf = cpu_to_le32(sg_dma_address(dsg));
314 res->hdr |= MTK_DESC_FIRST;
316 /* Shift ring buffer and check boundary */
317 if (++ring->res_next == ring->res_base + MTK_DESC_NUM)
318 ring->res_next = ring->res_base;
320 res->hdr |= MTK_DESC_LAST;
322 /* Pointer to current result descriptor */
323 ring->res_prev = res;
325 /* Prepare enough space for authenticated tag */
326 if (aes->flags & AES_FLAGS_GCM)
327 res->hdr += AES_BLOCK_SIZE;
330 * Make sure that all changes to the DMA ring are done before we
334 /* Start DMA transfer */
335 mtk_aes_write(cryp, RDR_PREP_COUNT(aes->id), MTK_DESC_CNT(dlen));
336 mtk_aes_write(cryp, CDR_PREP_COUNT(aes->id), MTK_DESC_CNT(slen));
341 static void mtk_aes_unmap(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
343 struct mtk_aes_base_ctx *ctx = aes->ctx;
345 dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(ctx->info),
348 if (aes->src.sg == aes->dst.sg) {
349 dma_unmap_sg(cryp->dev, aes->src.sg, aes->src.nents,
352 if (aes->src.sg != &aes->aligned_sg)
353 mtk_aes_restore_sg(&aes->src);
355 dma_unmap_sg(cryp->dev, aes->dst.sg, aes->dst.nents,
358 if (aes->dst.sg != &aes->aligned_sg)
359 mtk_aes_restore_sg(&aes->dst);
361 dma_unmap_sg(cryp->dev, aes->src.sg, aes->src.nents,
364 if (aes->src.sg != &aes->aligned_sg)
365 mtk_aes_restore_sg(&aes->src);
368 if (aes->dst.sg == &aes->aligned_sg)
369 sg_copy_from_buffer(aes->real_dst, sg_nents(aes->real_dst),
370 aes->buf, aes->total);
373 static int mtk_aes_map(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
375 struct mtk_aes_base_ctx *ctx = aes->ctx;
376 struct mtk_aes_info *info = &ctx->info;
378 ctx->ct_dma = dma_map_single(cryp->dev, info, sizeof(*info),
380 if (unlikely(dma_mapping_error(cryp->dev, ctx->ct_dma)))
383 ctx->tfm_dma = ctx->ct_dma + sizeof(info->cmd);
385 if (aes->src.sg == aes->dst.sg) {
386 aes->src.sg_len = dma_map_sg(cryp->dev, aes->src.sg,
389 aes->dst.sg_len = aes->src.sg_len;
390 if (unlikely(!aes->src.sg_len))
393 aes->src.sg_len = dma_map_sg(cryp->dev, aes->src.sg,
394 aes->src.nents, DMA_TO_DEVICE);
395 if (unlikely(!aes->src.sg_len))
398 aes->dst.sg_len = dma_map_sg(cryp->dev, aes->dst.sg,
399 aes->dst.nents, DMA_FROM_DEVICE);
400 if (unlikely(!aes->dst.sg_len)) {
401 dma_unmap_sg(cryp->dev, aes->src.sg, aes->src.nents,
407 return mtk_aes_xmit(cryp, aes);
410 dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(*info), DMA_TO_DEVICE);
412 return mtk_aes_complete(cryp, aes, -EINVAL);
415 /* Initialize transform information of CBC/ECB/CTR mode */
416 static void mtk_aes_info_init(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
419 struct ablkcipher_request *req = ablkcipher_request_cast(aes->areq);
420 struct mtk_aes_base_ctx *ctx = aes->ctx;
421 struct mtk_aes_info *info = &ctx->info;
424 ctx->ct_hdr = AES_CT_CTRL_HDR | cpu_to_le32(len);
425 info->cmd[cnt++] = AES_CMD0 | cpu_to_le32(len);
426 info->cmd[cnt++] = AES_CMD1;
428 info->tfm[0] = AES_TFM_SIZE(ctx->keylen) | ctx->keymode;
429 if (aes->flags & AES_FLAGS_ENCRYPT)
430 info->tfm[0] |= AES_TFM_BASIC_OUT;
432 info->tfm[0] |= AES_TFM_BASIC_IN;
434 switch (aes->flags & AES_FLAGS_CIPHER_MSK) {
436 info->tfm[1] = AES_TFM_CBC;
439 info->tfm[1] = AES_TFM_ECB;
442 info->tfm[1] = AES_TFM_CTR_LOAD;
446 /* Should not happen... */
450 mtk_aes_write_state_le(info->state + ctx->keylen, req->info,
453 info->tfm[0] += AES_TFM_SIZE(SIZE_IN_WORDS(AES_BLOCK_SIZE));
454 info->tfm[1] |= AES_TFM_FULL_IV;
455 info->cmd[cnt++] = AES_CMD2;
460 static int mtk_aes_dma(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
461 struct scatterlist *src, struct scatterlist *dst,
465 bool src_aligned, dst_aligned;
472 src_aligned = mtk_aes_check_aligned(src, len, &aes->src);
474 dst_aligned = src_aligned;
476 dst_aligned = mtk_aes_check_aligned(dst, len, &aes->dst);
478 if (!src_aligned || !dst_aligned) {
479 padlen = mtk_aes_padlen(len);
481 if (len + padlen > AES_BUF_SIZE)
482 return mtk_aes_complete(cryp, aes, -ENOMEM);
485 sg_copy_to_buffer(src, sg_nents(src), aes->buf, len);
486 aes->src.sg = &aes->aligned_sg;
488 aes->src.remainder = 0;
492 aes->dst.sg = &aes->aligned_sg;
494 aes->dst.remainder = 0;
497 sg_init_table(&aes->aligned_sg, 1);
498 sg_set_buf(&aes->aligned_sg, aes->buf, len + padlen);
501 mtk_aes_info_init(cryp, aes, len + padlen);
503 return mtk_aes_map(cryp, aes);
506 static int mtk_aes_handle_queue(struct mtk_cryp *cryp, u8 id,
507 struct crypto_async_request *new_areq)
509 struct mtk_aes_rec *aes = cryp->aes[id];
510 struct crypto_async_request *areq, *backlog;
511 struct mtk_aes_base_ctx *ctx;
515 spin_lock_irqsave(&aes->lock, flags);
517 ret = crypto_enqueue_request(&aes->queue, new_areq);
518 if (aes->flags & AES_FLAGS_BUSY) {
519 spin_unlock_irqrestore(&aes->lock, flags);
522 backlog = crypto_get_backlog(&aes->queue);
523 areq = crypto_dequeue_request(&aes->queue);
525 aes->flags |= AES_FLAGS_BUSY;
526 spin_unlock_irqrestore(&aes->lock, flags);
532 backlog->complete(backlog, -EINPROGRESS);
534 ctx = crypto_tfm_ctx(areq->tfm);
539 return ctx->start(cryp, aes);
542 static int mtk_aes_transfer_complete(struct mtk_cryp *cryp,
543 struct mtk_aes_rec *aes)
545 return mtk_aes_complete(cryp, aes, 0);
548 static int mtk_aes_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
550 struct ablkcipher_request *req = ablkcipher_request_cast(aes->areq);
551 struct mtk_aes_reqctx *rctx = ablkcipher_request_ctx(req);
553 mtk_aes_set_mode(aes, rctx);
554 aes->resume = mtk_aes_transfer_complete;
556 return mtk_aes_dma(cryp, aes, req->src, req->dst, req->nbytes);
559 static inline struct mtk_aes_ctr_ctx *
560 mtk_aes_ctr_ctx_cast(struct mtk_aes_base_ctx *ctx)
562 return container_of(ctx, struct mtk_aes_ctr_ctx, base);
565 static int mtk_aes_ctr_transfer(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
567 struct mtk_aes_base_ctx *ctx = aes->ctx;
568 struct mtk_aes_ctr_ctx *cctx = mtk_aes_ctr_ctx_cast(ctx);
569 struct ablkcipher_request *req = ablkcipher_request_cast(aes->areq);
570 struct scatterlist *src, *dst;
571 u32 start, end, ctr, blocks;
573 bool fragmented = false;
575 /* Check for transfer completion. */
576 cctx->offset += aes->total;
577 if (cctx->offset >= req->nbytes)
578 return mtk_aes_transfer_complete(cryp, aes);
580 /* Compute data length. */
581 datalen = req->nbytes - cctx->offset;
582 blocks = DIV_ROUND_UP(datalen, AES_BLOCK_SIZE);
583 ctr = be32_to_cpu(cctx->iv[3]);
585 /* Check 32bit counter overflow. */
587 end = start + blocks - 1;
590 datalen = AES_BLOCK_SIZE * -start;
594 /* Jump to offset. */
595 src = scatterwalk_ffwd(cctx->src, req->src, cctx->offset);
596 dst = ((req->src == req->dst) ? src :
597 scatterwalk_ffwd(cctx->dst, req->dst, cctx->offset));
599 /* Write IVs into transform state buffer. */
600 mtk_aes_write_state_le(ctx->info.state + ctx->keylen, cctx->iv,
603 if (unlikely(fragmented)) {
605 * Increment the counter manually to cope with the hardware
608 cctx->iv[3] = cpu_to_be32(ctr);
609 crypto_inc((u8 *)cctx->iv, AES_BLOCK_SIZE);
612 return mtk_aes_dma(cryp, aes, src, dst, datalen);
615 static int mtk_aes_ctr_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
617 struct mtk_aes_ctr_ctx *cctx = mtk_aes_ctr_ctx_cast(aes->ctx);
618 struct ablkcipher_request *req = ablkcipher_request_cast(aes->areq);
619 struct mtk_aes_reqctx *rctx = ablkcipher_request_ctx(req);
621 mtk_aes_set_mode(aes, rctx);
623 memcpy(cctx->iv, req->info, AES_BLOCK_SIZE);
626 aes->resume = mtk_aes_ctr_transfer;
628 return mtk_aes_ctr_transfer(cryp, aes);
631 /* Check and set the AES key to transform state buffer */
632 static int mtk_aes_setkey(struct crypto_ablkcipher *tfm,
633 const u8 *key, u32 keylen)
635 struct mtk_aes_base_ctx *ctx = crypto_ablkcipher_ctx(tfm);
638 case AES_KEYSIZE_128:
639 ctx->keymode = AES_TFM_128BITS;
641 case AES_KEYSIZE_192:
642 ctx->keymode = AES_TFM_192BITS;
644 case AES_KEYSIZE_256:
645 ctx->keymode = AES_TFM_256BITS;
649 crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
653 ctx->keylen = SIZE_IN_WORDS(keylen);
654 mtk_aes_write_state_le(ctx->info.state, (const u32 *)key, keylen);
659 static int mtk_aes_crypt(struct ablkcipher_request *req, u64 mode)
661 struct mtk_aes_base_ctx *ctx;
662 struct mtk_aes_reqctx *rctx;
664 ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
665 rctx = ablkcipher_request_ctx(req);
668 return mtk_aes_handle_queue(ctx->cryp, !(mode & AES_FLAGS_ENCRYPT),
672 static int mtk_aes_ecb_encrypt(struct ablkcipher_request *req)
674 return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_ECB);
677 static int mtk_aes_ecb_decrypt(struct ablkcipher_request *req)
679 return mtk_aes_crypt(req, AES_FLAGS_ECB);
682 static int mtk_aes_cbc_encrypt(struct ablkcipher_request *req)
684 return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_CBC);
687 static int mtk_aes_cbc_decrypt(struct ablkcipher_request *req)
689 return mtk_aes_crypt(req, AES_FLAGS_CBC);
692 static int mtk_aes_ctr_encrypt(struct ablkcipher_request *req)
694 return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_CTR);
697 static int mtk_aes_ctr_decrypt(struct ablkcipher_request *req)
699 return mtk_aes_crypt(req, AES_FLAGS_CTR);
702 static int mtk_aes_cra_init(struct crypto_tfm *tfm)
704 struct mtk_aes_ctx *ctx = crypto_tfm_ctx(tfm);
705 struct mtk_cryp *cryp = NULL;
707 cryp = mtk_aes_find_dev(&ctx->base);
709 pr_err("can't find crypto device\n");
713 tfm->crt_ablkcipher.reqsize = sizeof(struct mtk_aes_reqctx);
714 ctx->base.start = mtk_aes_start;
718 static int mtk_aes_ctr_cra_init(struct crypto_tfm *tfm)
720 struct mtk_aes_ctx *ctx = crypto_tfm_ctx(tfm);
721 struct mtk_cryp *cryp = NULL;
723 cryp = mtk_aes_find_dev(&ctx->base);
725 pr_err("can't find crypto device\n");
729 tfm->crt_ablkcipher.reqsize = sizeof(struct mtk_aes_reqctx);
730 ctx->base.start = mtk_aes_ctr_start;
734 static struct crypto_alg aes_algs[] = {
736 .cra_name = "cbc(aes)",
737 .cra_driver_name = "cbc-aes-mtk",
739 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
741 .cra_init = mtk_aes_cra_init,
742 .cra_blocksize = AES_BLOCK_SIZE,
743 .cra_ctxsize = sizeof(struct mtk_aes_ctx),
744 .cra_alignmask = 0xf,
745 .cra_type = &crypto_ablkcipher_type,
746 .cra_module = THIS_MODULE,
747 .cra_u.ablkcipher = {
748 .min_keysize = AES_MIN_KEY_SIZE,
749 .max_keysize = AES_MAX_KEY_SIZE,
750 .setkey = mtk_aes_setkey,
751 .encrypt = mtk_aes_cbc_encrypt,
752 .decrypt = mtk_aes_cbc_decrypt,
753 .ivsize = AES_BLOCK_SIZE,
757 .cra_name = "ecb(aes)",
758 .cra_driver_name = "ecb-aes-mtk",
760 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
762 .cra_init = mtk_aes_cra_init,
763 .cra_blocksize = AES_BLOCK_SIZE,
764 .cra_ctxsize = sizeof(struct mtk_aes_ctx),
765 .cra_alignmask = 0xf,
766 .cra_type = &crypto_ablkcipher_type,
767 .cra_module = THIS_MODULE,
768 .cra_u.ablkcipher = {
769 .min_keysize = AES_MIN_KEY_SIZE,
770 .max_keysize = AES_MAX_KEY_SIZE,
771 .setkey = mtk_aes_setkey,
772 .encrypt = mtk_aes_ecb_encrypt,
773 .decrypt = mtk_aes_ecb_decrypt,
777 .cra_name = "ctr(aes)",
778 .cra_driver_name = "ctr-aes-mtk",
780 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
782 .cra_init = mtk_aes_ctr_cra_init,
784 .cra_ctxsize = sizeof(struct mtk_aes_ctr_ctx),
785 .cra_alignmask = 0xf,
786 .cra_type = &crypto_ablkcipher_type,
787 .cra_module = THIS_MODULE,
788 .cra_u.ablkcipher = {
789 .min_keysize = AES_MIN_KEY_SIZE,
790 .max_keysize = AES_MAX_KEY_SIZE,
791 .ivsize = AES_BLOCK_SIZE,
792 .setkey = mtk_aes_setkey,
793 .encrypt = mtk_aes_ctr_encrypt,
794 .decrypt = mtk_aes_ctr_decrypt,
799 static inline struct mtk_aes_gcm_ctx *
800 mtk_aes_gcm_ctx_cast(struct mtk_aes_base_ctx *ctx)
802 return container_of(ctx, struct mtk_aes_gcm_ctx, base);
806 * Engine will verify and compare tag automatically, so we just need
807 * to check returned status which stored in the result descriptor.
809 static int mtk_aes_gcm_tag_verify(struct mtk_cryp *cryp,
810 struct mtk_aes_rec *aes)
812 u32 status = cryp->ring[aes->id]->res_prev->ct;
814 return mtk_aes_complete(cryp, aes, (status & AES_AUTH_TAG_ERR) ?
818 /* Initialize transform information of GCM mode */
819 static void mtk_aes_gcm_info_init(struct mtk_cryp *cryp,
820 struct mtk_aes_rec *aes,
823 struct aead_request *req = aead_request_cast(aes->areq);
824 struct mtk_aes_base_ctx *ctx = aes->ctx;
825 struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx);
826 struct mtk_aes_info *info = &ctx->info;
827 u32 ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));
830 ctx->ct_hdr = AES_CT_CTRL_HDR | len;
832 info->cmd[cnt++] = AES_GCM_CMD0 | cpu_to_le32(req->assoclen);
833 info->cmd[cnt++] = AES_GCM_CMD1 | cpu_to_le32(req->assoclen);
834 info->cmd[cnt++] = AES_GCM_CMD2;
835 info->cmd[cnt++] = AES_GCM_CMD3 | cpu_to_le32(gctx->textlen);
837 if (aes->flags & AES_FLAGS_ENCRYPT) {
838 info->cmd[cnt++] = AES_GCM_CMD4 | cpu_to_le32(gctx->authsize);
839 info->tfm[0] = AES_TFM_GCM_OUT;
841 info->cmd[cnt++] = AES_GCM_CMD5 | cpu_to_le32(gctx->authsize);
842 info->cmd[cnt++] = AES_GCM_CMD6 | cpu_to_le32(gctx->authsize);
843 info->tfm[0] = AES_TFM_GCM_IN;
847 info->tfm[0] |= AES_TFM_GHASH_DIGEST | AES_TFM_GHASH | AES_TFM_SIZE(
848 ctx->keylen + SIZE_IN_WORDS(AES_BLOCK_SIZE + ivsize)) |
850 info->tfm[1] = AES_TFM_CTR_INIT | AES_TFM_IV_CTR_MODE | AES_TFM_3IV |
853 mtk_aes_write_state_le(info->state + ctx->keylen + SIZE_IN_WORDS(
854 AES_BLOCK_SIZE), (const u32 *)req->iv, ivsize);
857 static int mtk_aes_gcm_dma(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
858 struct scatterlist *src, struct scatterlist *dst,
861 bool src_aligned, dst_aligned;
867 src_aligned = mtk_aes_check_aligned(src, len, &aes->src);
869 dst_aligned = src_aligned;
871 dst_aligned = mtk_aes_check_aligned(dst, len, &aes->dst);
873 if (!src_aligned || !dst_aligned) {
874 if (aes->total > AES_BUF_SIZE)
875 return mtk_aes_complete(cryp, aes, -ENOMEM);
878 sg_copy_to_buffer(src, sg_nents(src), aes->buf, len);
879 aes->src.sg = &aes->aligned_sg;
881 aes->src.remainder = 0;
885 aes->dst.sg = &aes->aligned_sg;
887 aes->dst.remainder = 0;
890 sg_init_table(&aes->aligned_sg, 1);
891 sg_set_buf(&aes->aligned_sg, aes->buf, aes->total);
894 mtk_aes_gcm_info_init(cryp, aes, len);
896 return mtk_aes_map(cryp, aes);
900 static int mtk_aes_gcm_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
902 struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(aes->ctx);
903 struct aead_request *req = aead_request_cast(aes->areq);
904 struct mtk_aes_reqctx *rctx = aead_request_ctx(req);
905 u32 len = req->assoclen + req->cryptlen;
907 mtk_aes_set_mode(aes, rctx);
909 if (aes->flags & AES_FLAGS_ENCRYPT) {
912 aes->resume = mtk_aes_transfer_complete;
913 /* Compute total process length. */
914 aes->total = len + gctx->authsize;
915 /* Compute text length. */
916 gctx->textlen = req->cryptlen;
917 /* Hardware will append authenticated tag to output buffer */
918 scatterwalk_map_and_copy(tag, req->dst, len, gctx->authsize, 1);
920 aes->resume = mtk_aes_gcm_tag_verify;
922 gctx->textlen = req->cryptlen - gctx->authsize;
925 return mtk_aes_gcm_dma(cryp, aes, req->src, req->dst, len);
928 static int mtk_aes_gcm_crypt(struct aead_request *req, u64 mode)
930 struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
931 struct mtk_aes_reqctx *rctx = aead_request_ctx(req);
933 rctx->mode = AES_FLAGS_GCM | mode;
935 return mtk_aes_handle_queue(ctx->cryp, !!(mode & AES_FLAGS_ENCRYPT),
939 static void mtk_gcm_setkey_done(struct crypto_async_request *req, int err)
941 struct mtk_aes_gcm_setkey_result *result = req->data;
943 if (err == -EINPROGRESS)
947 complete(&result->completion);
951 * Because of the hardware limitation, we need to pre-calculate key(H)
952 * for the GHASH operation. The result of the encryption operation
953 * need to be stored in the transform state buffer.
955 static int mtk_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key,
958 struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(aead);
959 struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx);
960 struct crypto_skcipher *ctr = gctx->ctr;
965 struct mtk_aes_gcm_setkey_result result;
967 struct scatterlist sg[1];
968 struct skcipher_request req;
973 case AES_KEYSIZE_128:
974 ctx->keymode = AES_TFM_128BITS;
976 case AES_KEYSIZE_192:
977 ctx->keymode = AES_TFM_192BITS;
979 case AES_KEYSIZE_256:
980 ctx->keymode = AES_TFM_256BITS;
984 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
988 ctx->keylen = SIZE_IN_WORDS(keylen);
990 /* Same as crypto_gcm_setkey() from crypto/gcm.c */
991 crypto_skcipher_clear_flags(ctr, CRYPTO_TFM_REQ_MASK);
992 crypto_skcipher_set_flags(ctr, crypto_aead_get_flags(aead) &
993 CRYPTO_TFM_REQ_MASK);
994 err = crypto_skcipher_setkey(ctr, key, keylen);
995 crypto_aead_set_flags(aead, crypto_skcipher_get_flags(ctr) &
996 CRYPTO_TFM_RES_MASK);
1000 data = kzalloc(sizeof(*data) + crypto_skcipher_reqsize(ctr),
1005 init_completion(&data->result.completion);
1006 sg_init_one(data->sg, &data->hash, AES_BLOCK_SIZE);
1007 skcipher_request_set_tfm(&data->req, ctr);
1008 skcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP |
1009 CRYPTO_TFM_REQ_MAY_BACKLOG,
1010 mtk_gcm_setkey_done, &data->result);
1011 skcipher_request_set_crypt(&data->req, data->sg, data->sg,
1012 AES_BLOCK_SIZE, data->iv);
1014 err = crypto_skcipher_encrypt(&data->req);
1015 if (err == -EINPROGRESS || err == -EBUSY) {
1016 err = wait_for_completion_interruptible(
1017 &data->result.completion);
1019 err = data->result.err;
1024 /* Write key into state buffer */
1025 mtk_aes_write_state_le(ctx->info.state, (const u32 *)key, keylen);
1026 /* Write key(H) into state buffer */
1027 mtk_aes_write_state_be(ctx->info.state + ctx->keylen, data->hash,
1034 static int mtk_aes_gcm_setauthsize(struct crypto_aead *aead,
1037 struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(aead);
1038 struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx);
1040 /* Same as crypto_gcm_authsize() from crypto/gcm.c */
1050 gctx->authsize = authsize;
1054 static int mtk_aes_gcm_encrypt(struct aead_request *req)
1056 return mtk_aes_gcm_crypt(req, AES_FLAGS_ENCRYPT);
1059 static int mtk_aes_gcm_decrypt(struct aead_request *req)
1061 return mtk_aes_gcm_crypt(req, 0);
1064 static int mtk_aes_gcm_init(struct crypto_aead *aead)
1066 struct mtk_aes_gcm_ctx *ctx = crypto_aead_ctx(aead);
1067 struct mtk_cryp *cryp = NULL;
1069 cryp = mtk_aes_find_dev(&ctx->base);
1071 pr_err("can't find crypto device\n");
1075 ctx->ctr = crypto_alloc_skcipher("ctr(aes)", 0,
1077 if (IS_ERR(ctx->ctr)) {
1078 pr_err("Error allocating ctr(aes)\n");
1079 return PTR_ERR(ctx->ctr);
1082 crypto_aead_set_reqsize(aead, sizeof(struct mtk_aes_reqctx));
1083 ctx->base.start = mtk_aes_gcm_start;
1087 static void mtk_aes_gcm_exit(struct crypto_aead *aead)
1089 struct mtk_aes_gcm_ctx *ctx = crypto_aead_ctx(aead);
1091 crypto_free_skcipher(ctx->ctr);
1094 static struct aead_alg aes_gcm_alg = {
1095 .setkey = mtk_aes_gcm_setkey,
1096 .setauthsize = mtk_aes_gcm_setauthsize,
1097 .encrypt = mtk_aes_gcm_encrypt,
1098 .decrypt = mtk_aes_gcm_decrypt,
1099 .init = mtk_aes_gcm_init,
1100 .exit = mtk_aes_gcm_exit,
1102 .maxauthsize = AES_BLOCK_SIZE,
1105 .cra_name = "gcm(aes)",
1106 .cra_driver_name = "gcm-aes-mtk",
1107 .cra_priority = 400,
1108 .cra_flags = CRYPTO_ALG_ASYNC,
1110 .cra_ctxsize = sizeof(struct mtk_aes_gcm_ctx),
1111 .cra_alignmask = 0xf,
1112 .cra_module = THIS_MODULE,
1116 static void mtk_aes_queue_task(unsigned long data)
1118 struct mtk_aes_rec *aes = (struct mtk_aes_rec *)data;
1120 mtk_aes_handle_queue(aes->cryp, aes->id, NULL);
1123 static void mtk_aes_done_task(unsigned long data)
1125 struct mtk_aes_rec *aes = (struct mtk_aes_rec *)data;
1126 struct mtk_cryp *cryp = aes->cryp;
1128 mtk_aes_unmap(cryp, aes);
1129 aes->resume(cryp, aes);
1132 static irqreturn_t mtk_aes_irq(int irq, void *dev_id)
1134 struct mtk_aes_rec *aes = (struct mtk_aes_rec *)dev_id;
1135 struct mtk_cryp *cryp = aes->cryp;
1136 u32 val = mtk_aes_read(cryp, RDR_STAT(aes->id));
1138 mtk_aes_write(cryp, RDR_STAT(aes->id), val);
1140 if (likely(AES_FLAGS_BUSY & aes->flags)) {
1141 mtk_aes_write(cryp, RDR_PROC_COUNT(aes->id), MTK_CNT_RST);
1142 mtk_aes_write(cryp, RDR_THRESH(aes->id),
1143 MTK_RDR_PROC_THRESH | MTK_RDR_PROC_MODE);
1145 tasklet_schedule(&aes->done_task);
1147 dev_warn(cryp->dev, "AES interrupt when no active requests.\n");
1153 * The purpose of creating encryption and decryption records is
1154 * to process outbound/inbound data in parallel, it can improve
1155 * performance in most use cases, such as IPSec VPN, especially
1156 * under heavy network traffic.
1158 static int mtk_aes_record_init(struct mtk_cryp *cryp)
1160 struct mtk_aes_rec **aes = cryp->aes;
1161 int i, err = -ENOMEM;
1163 for (i = 0; i < MTK_REC_NUM; i++) {
1164 aes[i] = kzalloc(sizeof(**aes), GFP_KERNEL);
1168 aes[i]->buf = (void *)__get_free_pages(GFP_KERNEL,
1173 aes[i]->cryp = cryp;
1175 spin_lock_init(&aes[i]->lock);
1176 crypto_init_queue(&aes[i]->queue, AES_QUEUE_SIZE);
1178 tasklet_init(&aes[i]->queue_task, mtk_aes_queue_task,
1179 (unsigned long)aes[i]);
1180 tasklet_init(&aes[i]->done_task, mtk_aes_done_task,
1181 (unsigned long)aes[i]);
1184 /* Link to ring0 and ring1 respectively */
1185 aes[0]->id = MTK_RING0;
1186 aes[1]->id = MTK_RING1;
1192 free_page((unsigned long)aes[i]->buf);
1199 static void mtk_aes_record_free(struct mtk_cryp *cryp)
1203 for (i = 0; i < MTK_REC_NUM; i++) {
1204 tasklet_kill(&cryp->aes[i]->done_task);
1205 tasklet_kill(&cryp->aes[i]->queue_task);
1207 free_page((unsigned long)cryp->aes[i]->buf);
1208 kfree(cryp->aes[i]);
1212 static void mtk_aes_unregister_algs(void)
1216 crypto_unregister_aead(&aes_gcm_alg);
1218 for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
1219 crypto_unregister_alg(&aes_algs[i]);
1222 static int mtk_aes_register_algs(void)
1226 for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
1227 err = crypto_register_alg(&aes_algs[i]);
1232 err = crypto_register_aead(&aes_gcm_alg);
1240 crypto_unregister_alg(&aes_algs[i]);
1245 int mtk_cipher_alg_register(struct mtk_cryp *cryp)
1249 INIT_LIST_HEAD(&cryp->aes_list);
1251 /* Initialize two cipher records */
1252 ret = mtk_aes_record_init(cryp);
1256 ret = devm_request_irq(cryp->dev, cryp->irq[MTK_RING0], mtk_aes_irq,
1257 0, "mtk-aes", cryp->aes[0]);
1259 dev_err(cryp->dev, "unable to request AES irq.\n");
1263 ret = devm_request_irq(cryp->dev, cryp->irq[MTK_RING1], mtk_aes_irq,
1264 0, "mtk-aes", cryp->aes[1]);
1266 dev_err(cryp->dev, "unable to request AES irq.\n");
1270 /* Enable ring0 and ring1 interrupt */
1271 mtk_aes_write(cryp, AIC_ENABLE_SET(MTK_RING0), MTK_IRQ_RDR0);
1272 mtk_aes_write(cryp, AIC_ENABLE_SET(MTK_RING1), MTK_IRQ_RDR1);
1274 spin_lock(&mtk_aes.lock);
1275 list_add_tail(&cryp->aes_list, &mtk_aes.dev_list);
1276 spin_unlock(&mtk_aes.lock);
1278 ret = mtk_aes_register_algs();
1285 spin_lock(&mtk_aes.lock);
1286 list_del(&cryp->aes_list);
1287 spin_unlock(&mtk_aes.lock);
1289 mtk_aes_record_free(cryp);
1292 dev_err(cryp->dev, "mtk-aes initialization failed.\n");
1296 void mtk_cipher_alg_release(struct mtk_cryp *cryp)
1298 spin_lock(&mtk_aes.lock);
1299 list_del(&cryp->aes_list);
1300 spin_unlock(&mtk_aes.lock);
1302 mtk_aes_unregister_algs();
1303 mtk_aes_record_free(cryp);