4 * Support for ATMEL AES HW acceleration.
6 * Copyright (c) 2012 Eukréa Electromatique - ATMEL
7 * Author: Nicolas Royer <nicolas@eukrea.com>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as published
11 * by the Free Software Foundation.
13 * Some ideas are from omap-aes.c driver.
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/slab.h>
20 #include <linux/err.h>
21 #include <linux/clk.h>
23 #include <linux/hw_random.h>
24 #include <linux/platform_device.h>
26 #include <linux/device.h>
27 #include <linux/init.h>
28 #include <linux/errno.h>
29 #include <linux/interrupt.h>
30 #include <linux/irq.h>
31 #include <linux/scatterlist.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/of_device.h>
34 #include <linux/delay.h>
35 #include <linux/crypto.h>
36 #include <crypto/scatterwalk.h>
37 #include <crypto/algapi.h>
38 #include <crypto/aes.h>
39 #include <crypto/internal/aead.h>
40 #include <linux/platform_data/crypto-atmel.h>
41 #include <dt-bindings/dma/at91.h>
42 #include "atmel-aes-regs.h"
44 #define ATMEL_AES_PRIORITY 300
46 #define ATMEL_AES_BUFFER_ORDER 2
47 #define ATMEL_AES_BUFFER_SIZE (PAGE_SIZE << ATMEL_AES_BUFFER_ORDER)
49 #define CFB8_BLOCK_SIZE 1
50 #define CFB16_BLOCK_SIZE 2
51 #define CFB32_BLOCK_SIZE 4
52 #define CFB64_BLOCK_SIZE 8
54 #define SIZE_IN_WORDS(x) ((x) >> 2)
57 /* Reserve bits [18:16] [14:12] [1:0] for mode (same as for AES_MR) */
58 #define AES_FLAGS_ENCRYPT AES_MR_CYPHER_ENC
59 #define AES_FLAGS_GTAGEN AES_MR_GTAGEN
60 #define AES_FLAGS_OPMODE_MASK (AES_MR_OPMOD_MASK | AES_MR_CFBS_MASK)
61 #define AES_FLAGS_ECB AES_MR_OPMOD_ECB
62 #define AES_FLAGS_CBC AES_MR_OPMOD_CBC
63 #define AES_FLAGS_OFB AES_MR_OPMOD_OFB
64 #define AES_FLAGS_CFB128 (AES_MR_OPMOD_CFB | AES_MR_CFBS_128b)
65 #define AES_FLAGS_CFB64 (AES_MR_OPMOD_CFB | AES_MR_CFBS_64b)
66 #define AES_FLAGS_CFB32 (AES_MR_OPMOD_CFB | AES_MR_CFBS_32b)
67 #define AES_FLAGS_CFB16 (AES_MR_OPMOD_CFB | AES_MR_CFBS_16b)
68 #define AES_FLAGS_CFB8 (AES_MR_OPMOD_CFB | AES_MR_CFBS_8b)
69 #define AES_FLAGS_CTR AES_MR_OPMOD_CTR
70 #define AES_FLAGS_GCM AES_MR_OPMOD_GCM
72 #define AES_FLAGS_MODE_MASK (AES_FLAGS_OPMODE_MASK | \
76 #define AES_FLAGS_INIT BIT(2)
77 #define AES_FLAGS_BUSY BIT(3)
78 #define AES_FLAGS_DUMP_REG BIT(4)
80 #define AES_FLAGS_PERSISTENT (AES_FLAGS_INIT | AES_FLAGS_BUSY)
82 #define ATMEL_AES_QUEUE_LENGTH 50
84 #define ATMEL_AES_DMA_THRESHOLD 256
87 struct atmel_aes_caps {
97 typedef int (*atmel_aes_fn_t)(struct atmel_aes_dev *);
100 struct atmel_aes_base_ctx {
101 struct atmel_aes_dev *dd;
102 atmel_aes_fn_t start;
104 u32 key[AES_KEYSIZE_256 / sizeof(u32)];
108 struct atmel_aes_ctx {
109 struct atmel_aes_base_ctx base;
112 struct atmel_aes_ctr_ctx {
113 struct atmel_aes_base_ctx base;
115 u32 iv[AES_BLOCK_SIZE / sizeof(u32)];
117 struct scatterlist src[2];
118 struct scatterlist dst[2];
121 struct atmel_aes_gcm_ctx {
122 struct atmel_aes_base_ctx base;
124 struct scatterlist src[2];
125 struct scatterlist dst[2];
127 u32 j0[AES_BLOCK_SIZE / sizeof(u32)];
128 u32 tag[AES_BLOCK_SIZE / sizeof(u32)];
129 u32 ghash[AES_BLOCK_SIZE / sizeof(u32)];
134 atmel_aes_fn_t ghash_resume;
137 struct atmel_aes_reqctx {
141 struct atmel_aes_dma {
142 struct dma_chan *chan;
143 struct scatterlist *sg;
145 unsigned int remainder;
149 struct atmel_aes_dev {
150 struct list_head list;
151 unsigned long phys_base;
152 void __iomem *io_base;
154 struct crypto_async_request *areq;
155 struct atmel_aes_base_ctx *ctx;
158 atmel_aes_fn_t resume;
159 atmel_aes_fn_t cpu_transfer_complete;
168 struct crypto_queue queue;
170 struct tasklet_struct done_task;
171 struct tasklet_struct queue_task;
177 struct atmel_aes_dma src;
178 struct atmel_aes_dma dst;
182 struct scatterlist aligned_sg;
183 struct scatterlist *real_dst;
185 struct atmel_aes_caps caps;
190 struct atmel_aes_drv {
191 struct list_head dev_list;
195 static struct atmel_aes_drv atmel_aes = {
196 .dev_list = LIST_HEAD_INIT(atmel_aes.dev_list),
197 .lock = __SPIN_LOCK_UNLOCKED(atmel_aes.lock),
201 static const char *atmel_aes_reg_name(u32 offset, char *tmp, size_t sz)
230 snprintf(tmp, sz, "KEYWR[%u]", (offset - AES_KEYWR(0)) >> 2);
237 snprintf(tmp, sz, "IDATAR[%u]", (offset - AES_IDATAR(0)) >> 2);
244 snprintf(tmp, sz, "ODATAR[%u]", (offset - AES_ODATAR(0)) >> 2);
251 snprintf(tmp, sz, "IVR[%u]", (offset - AES_IVR(0)) >> 2);
264 snprintf(tmp, sz, "GHASHR[%u]", (offset - AES_GHASHR(0)) >> 2);
271 snprintf(tmp, sz, "TAGR[%u]", (offset - AES_TAGR(0)) >> 2);
281 snprintf(tmp, sz, "GCMHR[%u]", (offset - AES_GCMHR(0)) >> 2);
285 snprintf(tmp, sz, "0x%02x", offset);
291 #endif /* VERBOSE_DEBUG */
293 /* Shared functions */
295 static inline u32 atmel_aes_read(struct atmel_aes_dev *dd, u32 offset)
297 u32 value = readl_relaxed(dd->io_base + offset);
300 if (dd->flags & AES_FLAGS_DUMP_REG) {
303 dev_vdbg(dd->dev, "read 0x%08x from %s\n", value,
304 atmel_aes_reg_name(offset, tmp, sizeof(tmp)));
306 #endif /* VERBOSE_DEBUG */
311 static inline void atmel_aes_write(struct atmel_aes_dev *dd,
312 u32 offset, u32 value)
315 if (dd->flags & AES_FLAGS_DUMP_REG) {
318 dev_vdbg(dd->dev, "write 0x%08x into %s\n", value,
319 atmel_aes_reg_name(offset, tmp));
321 #endif /* VERBOSE_DEBUG */
323 writel_relaxed(value, dd->io_base + offset);
326 static void atmel_aes_read_n(struct atmel_aes_dev *dd, u32 offset,
327 u32 *value, int count)
329 for (; count--; value++, offset += 4)
330 *value = atmel_aes_read(dd, offset);
333 static void atmel_aes_write_n(struct atmel_aes_dev *dd, u32 offset,
334 const u32 *value, int count)
336 for (; count--; value++, offset += 4)
337 atmel_aes_write(dd, offset, *value);
340 static inline void atmel_aes_read_block(struct atmel_aes_dev *dd, u32 offset,
343 atmel_aes_read_n(dd, offset, value, SIZE_IN_WORDS(AES_BLOCK_SIZE));
346 static inline void atmel_aes_write_block(struct atmel_aes_dev *dd, u32 offset,
349 atmel_aes_write_n(dd, offset, value, SIZE_IN_WORDS(AES_BLOCK_SIZE));
352 static inline int atmel_aes_wait_for_data_ready(struct atmel_aes_dev *dd,
353 atmel_aes_fn_t resume)
355 u32 isr = atmel_aes_read(dd, AES_ISR);
357 if (unlikely(isr & AES_INT_DATARDY))
361 atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
365 static inline size_t atmel_aes_padlen(size_t len, size_t block_size)
367 len &= block_size - 1;
368 return len ? block_size - len : 0;
371 static struct atmel_aes_dev *atmel_aes_find_dev(struct atmel_aes_base_ctx *ctx)
373 struct atmel_aes_dev *aes_dd = NULL;
374 struct atmel_aes_dev *tmp;
376 spin_lock_bh(&atmel_aes.lock);
378 list_for_each_entry(tmp, &atmel_aes.dev_list, list) {
387 spin_unlock_bh(&atmel_aes.lock);
392 static int atmel_aes_hw_init(struct atmel_aes_dev *dd)
396 err = clk_enable(dd->iclk);
400 if (!(dd->flags & AES_FLAGS_INIT)) {
401 atmel_aes_write(dd, AES_CR, AES_CR_SWRST);
402 atmel_aes_write(dd, AES_MR, 0xE << AES_MR_CKEY_OFFSET);
403 dd->flags |= AES_FLAGS_INIT;
409 static inline unsigned int atmel_aes_get_version(struct atmel_aes_dev *dd)
411 return atmel_aes_read(dd, AES_HW_VERSION) & 0x00000fff;
414 static int atmel_aes_hw_version_init(struct atmel_aes_dev *dd)
418 err = atmel_aes_hw_init(dd);
422 dd->hw_version = atmel_aes_get_version(dd);
424 dev_info(dd->dev, "version: 0x%x\n", dd->hw_version);
426 clk_disable(dd->iclk);
430 static inline void atmel_aes_set_mode(struct atmel_aes_dev *dd,
431 const struct atmel_aes_reqctx *rctx)
433 /* Clear all but persistent flags and set request flags. */
434 dd->flags = (dd->flags & AES_FLAGS_PERSISTENT) | rctx->mode;
437 static inline bool atmel_aes_is_encrypt(const struct atmel_aes_dev *dd)
439 return (dd->flags & AES_FLAGS_ENCRYPT);
442 static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err)
444 clk_disable(dd->iclk);
445 dd->flags &= ~AES_FLAGS_BUSY;
448 dd->areq->complete(dd->areq, err);
450 tasklet_schedule(&dd->queue_task);
455 static void atmel_aes_write_ctrl(struct atmel_aes_dev *dd, bool use_dma,
460 /* MR register must be set before IV registers */
461 if (dd->ctx->keylen == AES_KEYSIZE_128)
462 valmr |= AES_MR_KEYSIZE_128;
463 else if (dd->ctx->keylen == AES_KEYSIZE_192)
464 valmr |= AES_MR_KEYSIZE_192;
466 valmr |= AES_MR_KEYSIZE_256;
468 valmr |= dd->flags & AES_FLAGS_MODE_MASK;
471 valmr |= AES_MR_SMOD_IDATAR0;
472 if (dd->caps.has_dualbuff)
473 valmr |= AES_MR_DUALBUFF;
475 valmr |= AES_MR_SMOD_AUTO;
478 atmel_aes_write(dd, AES_MR, valmr);
480 atmel_aes_write_n(dd, AES_KEYWR(0), dd->ctx->key,
481 SIZE_IN_WORDS(dd->ctx->keylen));
483 if (iv && (valmr & AES_MR_OPMOD_MASK) != AES_MR_OPMOD_ECB)
484 atmel_aes_write_block(dd, AES_IVR(0), iv);
490 static int atmel_aes_cpu_transfer(struct atmel_aes_dev *dd)
496 atmel_aes_read_block(dd, AES_ODATAR(0), dd->data);
498 dd->datalen -= AES_BLOCK_SIZE;
500 if (dd->datalen < AES_BLOCK_SIZE)
503 atmel_aes_write_block(dd, AES_IDATAR(0), dd->data);
505 isr = atmel_aes_read(dd, AES_ISR);
506 if (!(isr & AES_INT_DATARDY)) {
507 dd->resume = atmel_aes_cpu_transfer;
508 atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
513 if (!sg_copy_from_buffer(dd->real_dst, sg_nents(dd->real_dst),
518 return atmel_aes_complete(dd, err);
520 return dd->cpu_transfer_complete(dd);
523 static int atmel_aes_cpu_start(struct atmel_aes_dev *dd,
524 struct scatterlist *src,
525 struct scatterlist *dst,
527 atmel_aes_fn_t resume)
529 size_t padlen = atmel_aes_padlen(len, AES_BLOCK_SIZE);
531 if (unlikely(len == 0))
534 sg_copy_to_buffer(src, sg_nents(src), dd->buf, len);
538 dd->cpu_transfer_complete = resume;
539 dd->datalen = len + padlen;
540 dd->data = (u32 *)dd->buf;
541 atmel_aes_write_block(dd, AES_IDATAR(0), dd->data);
542 return atmel_aes_wait_for_data_ready(dd, atmel_aes_cpu_transfer);
548 static void atmel_aes_dma_callback(void *data);
550 static bool atmel_aes_check_aligned(struct atmel_aes_dev *dd,
551 struct scatterlist *sg,
553 struct atmel_aes_dma *dma)
557 if (!IS_ALIGNED(len, dd->ctx->block_size))
560 for (nents = 0; sg; sg = sg_next(sg), ++nents) {
561 if (!IS_ALIGNED(sg->offset, sizeof(u32)))
564 if (len <= sg->length) {
565 if (!IS_ALIGNED(len, dd->ctx->block_size))
568 dma->nents = nents+1;
569 dma->remainder = sg->length - len;
574 if (!IS_ALIGNED(sg->length, dd->ctx->block_size))
583 static inline void atmel_aes_restore_sg(const struct atmel_aes_dma *dma)
585 struct scatterlist *sg = dma->sg;
586 int nents = dma->nents;
591 while (--nents > 0 && sg)
597 sg->length += dma->remainder;
600 static int atmel_aes_map(struct atmel_aes_dev *dd,
601 struct scatterlist *src,
602 struct scatterlist *dst,
605 bool src_aligned, dst_aligned;
613 src_aligned = atmel_aes_check_aligned(dd, src, len, &dd->src);
615 dst_aligned = src_aligned;
617 dst_aligned = atmel_aes_check_aligned(dd, dst, len, &dd->dst);
618 if (!src_aligned || !dst_aligned) {
619 padlen = atmel_aes_padlen(len, dd->ctx->block_size);
621 if (dd->buflen < len + padlen)
625 sg_copy_to_buffer(src, sg_nents(src), dd->buf, len);
626 dd->src.sg = &dd->aligned_sg;
628 dd->src.remainder = 0;
632 dd->dst.sg = &dd->aligned_sg;
634 dd->dst.remainder = 0;
637 sg_init_table(&dd->aligned_sg, 1);
638 sg_set_buf(&dd->aligned_sg, dd->buf, len + padlen);
641 if (dd->src.sg == dd->dst.sg) {
642 dd->src.sg_len = dma_map_sg(dd->dev, dd->src.sg, dd->src.nents,
644 dd->dst.sg_len = dd->src.sg_len;
648 dd->src.sg_len = dma_map_sg(dd->dev, dd->src.sg, dd->src.nents,
653 dd->dst.sg_len = dma_map_sg(dd->dev, dd->dst.sg, dd->dst.nents,
655 if (!dd->dst.sg_len) {
656 dma_unmap_sg(dd->dev, dd->src.sg, dd->src.nents,
665 static void atmel_aes_unmap(struct atmel_aes_dev *dd)
667 if (dd->src.sg == dd->dst.sg) {
668 dma_unmap_sg(dd->dev, dd->src.sg, dd->src.nents,
671 if (dd->src.sg != &dd->aligned_sg)
672 atmel_aes_restore_sg(&dd->src);
674 dma_unmap_sg(dd->dev, dd->dst.sg, dd->dst.nents,
677 if (dd->dst.sg != &dd->aligned_sg)
678 atmel_aes_restore_sg(&dd->dst);
680 dma_unmap_sg(dd->dev, dd->src.sg, dd->src.nents,
683 if (dd->src.sg != &dd->aligned_sg)
684 atmel_aes_restore_sg(&dd->src);
687 if (dd->dst.sg == &dd->aligned_sg)
688 sg_copy_from_buffer(dd->real_dst, sg_nents(dd->real_dst),
692 static int atmel_aes_dma_transfer_start(struct atmel_aes_dev *dd,
693 enum dma_slave_buswidth addr_width,
694 enum dma_transfer_direction dir,
697 struct dma_async_tx_descriptor *desc;
698 struct dma_slave_config config;
699 dma_async_tx_callback callback;
700 struct atmel_aes_dma *dma;
703 memset(&config, 0, sizeof(config));
704 config.direction = dir;
705 config.src_addr_width = addr_width;
706 config.dst_addr_width = addr_width;
707 config.src_maxburst = maxburst;
708 config.dst_maxburst = maxburst;
714 config.dst_addr = dd->phys_base + AES_IDATAR(0);
719 callback = atmel_aes_dma_callback;
720 config.src_addr = dd->phys_base + AES_ODATAR(0);
727 err = dmaengine_slave_config(dma->chan, &config);
731 desc = dmaengine_prep_slave_sg(dma->chan, dma->sg, dma->sg_len, dir,
732 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
736 desc->callback = callback;
737 desc->callback_param = dd;
738 dmaengine_submit(desc);
739 dma_async_issue_pending(dma->chan);
744 static void atmel_aes_dma_transfer_stop(struct atmel_aes_dev *dd,
745 enum dma_transfer_direction dir)
747 struct atmel_aes_dma *dma;
762 dmaengine_terminate_all(dma->chan);
765 static int atmel_aes_dma_start(struct atmel_aes_dev *dd,
766 struct scatterlist *src,
767 struct scatterlist *dst,
769 atmel_aes_fn_t resume)
771 enum dma_slave_buswidth addr_width;
775 switch (dd->ctx->block_size) {
776 case CFB8_BLOCK_SIZE:
777 addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
781 case CFB16_BLOCK_SIZE:
782 addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
786 case CFB32_BLOCK_SIZE:
787 case CFB64_BLOCK_SIZE:
788 addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
793 addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
794 maxburst = dd->caps.max_burst_size;
802 err = atmel_aes_map(dd, src, dst, len);
808 /* Set output DMA transfer first */
809 err = atmel_aes_dma_transfer_start(dd, addr_width, DMA_DEV_TO_MEM,
814 /* Then set input DMA transfer */
815 err = atmel_aes_dma_transfer_start(dd, addr_width, DMA_MEM_TO_DEV,
818 goto output_transfer_stop;
822 output_transfer_stop:
823 atmel_aes_dma_transfer_stop(dd, DMA_DEV_TO_MEM);
827 return atmel_aes_complete(dd, err);
830 static void atmel_aes_dma_stop(struct atmel_aes_dev *dd)
832 atmel_aes_dma_transfer_stop(dd, DMA_MEM_TO_DEV);
833 atmel_aes_dma_transfer_stop(dd, DMA_DEV_TO_MEM);
837 static void atmel_aes_dma_callback(void *data)
839 struct atmel_aes_dev *dd = data;
841 atmel_aes_dma_stop(dd);
843 (void)dd->resume(dd);
846 static int atmel_aes_handle_queue(struct atmel_aes_dev *dd,
847 struct crypto_async_request *new_areq)
849 struct crypto_async_request *areq, *backlog;
850 struct atmel_aes_base_ctx *ctx;
854 spin_lock_irqsave(&dd->lock, flags);
856 ret = crypto_enqueue_request(&dd->queue, new_areq);
857 if (dd->flags & AES_FLAGS_BUSY) {
858 spin_unlock_irqrestore(&dd->lock, flags);
861 backlog = crypto_get_backlog(&dd->queue);
862 areq = crypto_dequeue_request(&dd->queue);
864 dd->flags |= AES_FLAGS_BUSY;
865 spin_unlock_irqrestore(&dd->lock, flags);
871 backlog->complete(backlog, -EINPROGRESS);
873 ctx = crypto_tfm_ctx(areq->tfm);
877 dd->is_async = (areq != new_areq);
879 err = ctx->start(dd);
880 return (dd->is_async) ? ret : err;
884 /* AES async block ciphers */
886 static int atmel_aes_transfer_complete(struct atmel_aes_dev *dd)
888 return atmel_aes_complete(dd, 0);
891 static int atmel_aes_start(struct atmel_aes_dev *dd)
893 struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
894 struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
895 bool use_dma = (req->nbytes >= ATMEL_AES_DMA_THRESHOLD ||
896 dd->ctx->block_size != AES_BLOCK_SIZE);
899 atmel_aes_set_mode(dd, rctx);
901 err = atmel_aes_hw_init(dd);
903 return atmel_aes_complete(dd, err);
905 atmel_aes_write_ctrl(dd, use_dma, req->info);
907 return atmel_aes_dma_start(dd, req->src, req->dst, req->nbytes,
908 atmel_aes_transfer_complete);
910 return atmel_aes_cpu_start(dd, req->src, req->dst, req->nbytes,
911 atmel_aes_transfer_complete);
914 static inline struct atmel_aes_ctr_ctx *
915 atmel_aes_ctr_ctx_cast(struct atmel_aes_base_ctx *ctx)
917 return container_of(ctx, struct atmel_aes_ctr_ctx, base);
920 static int atmel_aes_ctr_transfer(struct atmel_aes_dev *dd)
922 struct atmel_aes_ctr_ctx *ctx = atmel_aes_ctr_ctx_cast(dd->ctx);
923 struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
924 struct scatterlist *src, *dst;
927 u16 blocks, start, end;
928 bool use_dma, fragmented = false;
930 /* Check for transfer completion. */
931 ctx->offset += dd->total;
932 if (ctx->offset >= req->nbytes)
933 return atmel_aes_transfer_complete(dd);
935 /* Compute data length. */
936 datalen = req->nbytes - ctx->offset;
937 blocks = DIV_ROUND_UP(datalen, AES_BLOCK_SIZE);
938 ctr = be32_to_cpu(ctx->iv[3]);
940 /* Check 16bit counter overflow. */
941 start = ctr & 0xffff;
942 end = start + blocks - 1;
944 if (blocks >> 16 || end < start) {
946 datalen = AES_BLOCK_SIZE * (0x10000 - start);
950 use_dma = (datalen >= ATMEL_AES_DMA_THRESHOLD);
952 /* Jump to offset. */
953 src = scatterwalk_ffwd(ctx->src, req->src, ctx->offset);
954 dst = ((req->src == req->dst) ? src :
955 scatterwalk_ffwd(ctx->dst, req->dst, ctx->offset));
957 /* Configure hardware. */
958 atmel_aes_write_ctrl(dd, use_dma, ctx->iv);
959 if (unlikely(fragmented)) {
961 * Increment the counter manually to cope with the hardware
964 ctx->iv[3] = cpu_to_be32(ctr);
965 crypto_inc((u8 *)ctx->iv, AES_BLOCK_SIZE);
969 return atmel_aes_dma_start(dd, src, dst, datalen,
970 atmel_aes_ctr_transfer);
972 return atmel_aes_cpu_start(dd, src, dst, datalen,
973 atmel_aes_ctr_transfer);
976 static int atmel_aes_ctr_start(struct atmel_aes_dev *dd)
978 struct atmel_aes_ctr_ctx *ctx = atmel_aes_ctr_ctx_cast(dd->ctx);
979 struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
980 struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
983 atmel_aes_set_mode(dd, rctx);
985 err = atmel_aes_hw_init(dd);
987 return atmel_aes_complete(dd, err);
989 memcpy(ctx->iv, req->info, AES_BLOCK_SIZE);
992 return atmel_aes_ctr_transfer(dd);
995 static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
997 struct atmel_aes_base_ctx *ctx;
998 struct atmel_aes_reqctx *rctx;
999 struct atmel_aes_dev *dd;
1001 ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
1002 switch (mode & AES_FLAGS_OPMODE_MASK) {
1003 case AES_FLAGS_CFB8:
1004 ctx->block_size = CFB8_BLOCK_SIZE;
1007 case AES_FLAGS_CFB16:
1008 ctx->block_size = CFB16_BLOCK_SIZE;
1011 case AES_FLAGS_CFB32:
1012 ctx->block_size = CFB32_BLOCK_SIZE;
1015 case AES_FLAGS_CFB64:
1016 ctx->block_size = CFB64_BLOCK_SIZE;
1020 ctx->block_size = AES_BLOCK_SIZE;
1024 dd = atmel_aes_find_dev(ctx);
1028 rctx = ablkcipher_request_ctx(req);
1031 return atmel_aes_handle_queue(dd, &req->base);
1034 static int atmel_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
1035 unsigned int keylen)
1037 struct atmel_aes_base_ctx *ctx = crypto_ablkcipher_ctx(tfm);
1039 if (keylen != AES_KEYSIZE_128 &&
1040 keylen != AES_KEYSIZE_192 &&
1041 keylen != AES_KEYSIZE_256) {
1042 crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
1046 memcpy(ctx->key, key, keylen);
1047 ctx->keylen = keylen;
1052 static int atmel_aes_ecb_encrypt(struct ablkcipher_request *req)
1054 return atmel_aes_crypt(req, AES_FLAGS_ECB | AES_FLAGS_ENCRYPT);
1057 static int atmel_aes_ecb_decrypt(struct ablkcipher_request *req)
1059 return atmel_aes_crypt(req, AES_FLAGS_ECB);
1062 static int atmel_aes_cbc_encrypt(struct ablkcipher_request *req)
1064 return atmel_aes_crypt(req, AES_FLAGS_CBC | AES_FLAGS_ENCRYPT);
1067 static int atmel_aes_cbc_decrypt(struct ablkcipher_request *req)
1069 return atmel_aes_crypt(req, AES_FLAGS_CBC);
1072 static int atmel_aes_ofb_encrypt(struct ablkcipher_request *req)
1074 return atmel_aes_crypt(req, AES_FLAGS_OFB | AES_FLAGS_ENCRYPT);
1077 static int atmel_aes_ofb_decrypt(struct ablkcipher_request *req)
1079 return atmel_aes_crypt(req, AES_FLAGS_OFB);
1082 static int atmel_aes_cfb_encrypt(struct ablkcipher_request *req)
1084 return atmel_aes_crypt(req, AES_FLAGS_CFB128 | AES_FLAGS_ENCRYPT);
1087 static int atmel_aes_cfb_decrypt(struct ablkcipher_request *req)
1089 return atmel_aes_crypt(req, AES_FLAGS_CFB128);
1092 static int atmel_aes_cfb64_encrypt(struct ablkcipher_request *req)
1094 return atmel_aes_crypt(req, AES_FLAGS_CFB64 | AES_FLAGS_ENCRYPT);
1097 static int atmel_aes_cfb64_decrypt(struct ablkcipher_request *req)
1099 return atmel_aes_crypt(req, AES_FLAGS_CFB64);
1102 static int atmel_aes_cfb32_encrypt(struct ablkcipher_request *req)
1104 return atmel_aes_crypt(req, AES_FLAGS_CFB32 | AES_FLAGS_ENCRYPT);
1107 static int atmel_aes_cfb32_decrypt(struct ablkcipher_request *req)
1109 return atmel_aes_crypt(req, AES_FLAGS_CFB32);
1112 static int atmel_aes_cfb16_encrypt(struct ablkcipher_request *req)
1114 return atmel_aes_crypt(req, AES_FLAGS_CFB16 | AES_FLAGS_ENCRYPT);
1117 static int atmel_aes_cfb16_decrypt(struct ablkcipher_request *req)
1119 return atmel_aes_crypt(req, AES_FLAGS_CFB16);
1122 static int atmel_aes_cfb8_encrypt(struct ablkcipher_request *req)
1124 return atmel_aes_crypt(req, AES_FLAGS_CFB8 | AES_FLAGS_ENCRYPT);
1127 static int atmel_aes_cfb8_decrypt(struct ablkcipher_request *req)
1129 return atmel_aes_crypt(req, AES_FLAGS_CFB8);
1132 static int atmel_aes_ctr_encrypt(struct ablkcipher_request *req)
1134 return atmel_aes_crypt(req, AES_FLAGS_CTR | AES_FLAGS_ENCRYPT);
1137 static int atmel_aes_ctr_decrypt(struct ablkcipher_request *req)
1139 return atmel_aes_crypt(req, AES_FLAGS_CTR);
1142 static int atmel_aes_cra_init(struct crypto_tfm *tfm)
1144 struct atmel_aes_ctx *ctx = crypto_tfm_ctx(tfm);
1146 tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_aes_reqctx);
1147 ctx->base.start = atmel_aes_start;
1152 static int atmel_aes_ctr_cra_init(struct crypto_tfm *tfm)
1154 struct atmel_aes_ctx *ctx = crypto_tfm_ctx(tfm);
1156 tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_aes_reqctx);
1157 ctx->base.start = atmel_aes_ctr_start;
1162 static void atmel_aes_cra_exit(struct crypto_tfm *tfm)
1166 static struct crypto_alg aes_algs[] = {
1168 .cra_name = "ecb(aes)",
1169 .cra_driver_name = "atmel-ecb-aes",
1170 .cra_priority = ATMEL_AES_PRIORITY,
1171 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1172 .cra_blocksize = AES_BLOCK_SIZE,
1173 .cra_ctxsize = sizeof(struct atmel_aes_ctx),
1174 .cra_alignmask = 0xf,
1175 .cra_type = &crypto_ablkcipher_type,
1176 .cra_module = THIS_MODULE,
1177 .cra_init = atmel_aes_cra_init,
1178 .cra_exit = atmel_aes_cra_exit,
1179 .cra_u.ablkcipher = {
1180 .min_keysize = AES_MIN_KEY_SIZE,
1181 .max_keysize = AES_MAX_KEY_SIZE,
1182 .setkey = atmel_aes_setkey,
1183 .encrypt = atmel_aes_ecb_encrypt,
1184 .decrypt = atmel_aes_ecb_decrypt,
1188 .cra_name = "cbc(aes)",
1189 .cra_driver_name = "atmel-cbc-aes",
1190 .cra_priority = ATMEL_AES_PRIORITY,
1191 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1192 .cra_blocksize = AES_BLOCK_SIZE,
1193 .cra_ctxsize = sizeof(struct atmel_aes_ctx),
1194 .cra_alignmask = 0xf,
1195 .cra_type = &crypto_ablkcipher_type,
1196 .cra_module = THIS_MODULE,
1197 .cra_init = atmel_aes_cra_init,
1198 .cra_exit = atmel_aes_cra_exit,
1199 .cra_u.ablkcipher = {
1200 .min_keysize = AES_MIN_KEY_SIZE,
1201 .max_keysize = AES_MAX_KEY_SIZE,
1202 .ivsize = AES_BLOCK_SIZE,
1203 .setkey = atmel_aes_setkey,
1204 .encrypt = atmel_aes_cbc_encrypt,
1205 .decrypt = atmel_aes_cbc_decrypt,
1209 .cra_name = "ofb(aes)",
1210 .cra_driver_name = "atmel-ofb-aes",
1211 .cra_priority = ATMEL_AES_PRIORITY,
1212 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1213 .cra_blocksize = AES_BLOCK_SIZE,
1214 .cra_ctxsize = sizeof(struct atmel_aes_ctx),
1215 .cra_alignmask = 0xf,
1216 .cra_type = &crypto_ablkcipher_type,
1217 .cra_module = THIS_MODULE,
1218 .cra_init = atmel_aes_cra_init,
1219 .cra_exit = atmel_aes_cra_exit,
1220 .cra_u.ablkcipher = {
1221 .min_keysize = AES_MIN_KEY_SIZE,
1222 .max_keysize = AES_MAX_KEY_SIZE,
1223 .ivsize = AES_BLOCK_SIZE,
1224 .setkey = atmel_aes_setkey,
1225 .encrypt = atmel_aes_ofb_encrypt,
1226 .decrypt = atmel_aes_ofb_decrypt,
1230 .cra_name = "cfb(aes)",
1231 .cra_driver_name = "atmel-cfb-aes",
1232 .cra_priority = ATMEL_AES_PRIORITY,
1233 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1234 .cra_blocksize = AES_BLOCK_SIZE,
1235 .cra_ctxsize = sizeof(struct atmel_aes_ctx),
1236 .cra_alignmask = 0xf,
1237 .cra_type = &crypto_ablkcipher_type,
1238 .cra_module = THIS_MODULE,
1239 .cra_init = atmel_aes_cra_init,
1240 .cra_exit = atmel_aes_cra_exit,
1241 .cra_u.ablkcipher = {
1242 .min_keysize = AES_MIN_KEY_SIZE,
1243 .max_keysize = AES_MAX_KEY_SIZE,
1244 .ivsize = AES_BLOCK_SIZE,
1245 .setkey = atmel_aes_setkey,
1246 .encrypt = atmel_aes_cfb_encrypt,
1247 .decrypt = atmel_aes_cfb_decrypt,
1251 .cra_name = "cfb32(aes)",
1252 .cra_driver_name = "atmel-cfb32-aes",
1253 .cra_priority = ATMEL_AES_PRIORITY,
1254 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1255 .cra_blocksize = CFB32_BLOCK_SIZE,
1256 .cra_ctxsize = sizeof(struct atmel_aes_ctx),
1257 .cra_alignmask = 0x3,
1258 .cra_type = &crypto_ablkcipher_type,
1259 .cra_module = THIS_MODULE,
1260 .cra_init = atmel_aes_cra_init,
1261 .cra_exit = atmel_aes_cra_exit,
1262 .cra_u.ablkcipher = {
1263 .min_keysize = AES_MIN_KEY_SIZE,
1264 .max_keysize = AES_MAX_KEY_SIZE,
1265 .ivsize = AES_BLOCK_SIZE,
1266 .setkey = atmel_aes_setkey,
1267 .encrypt = atmel_aes_cfb32_encrypt,
1268 .decrypt = atmel_aes_cfb32_decrypt,
1272 .cra_name = "cfb16(aes)",
1273 .cra_driver_name = "atmel-cfb16-aes",
1274 .cra_priority = ATMEL_AES_PRIORITY,
1275 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1276 .cra_blocksize = CFB16_BLOCK_SIZE,
1277 .cra_ctxsize = sizeof(struct atmel_aes_ctx),
1278 .cra_alignmask = 0x1,
1279 .cra_type = &crypto_ablkcipher_type,
1280 .cra_module = THIS_MODULE,
1281 .cra_init = atmel_aes_cra_init,
1282 .cra_exit = atmel_aes_cra_exit,
1283 .cra_u.ablkcipher = {
1284 .min_keysize = AES_MIN_KEY_SIZE,
1285 .max_keysize = AES_MAX_KEY_SIZE,
1286 .ivsize = AES_BLOCK_SIZE,
1287 .setkey = atmel_aes_setkey,
1288 .encrypt = atmel_aes_cfb16_encrypt,
1289 .decrypt = atmel_aes_cfb16_decrypt,
1293 .cra_name = "cfb8(aes)",
1294 .cra_driver_name = "atmel-cfb8-aes",
1295 .cra_priority = ATMEL_AES_PRIORITY,
1296 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1297 .cra_blocksize = CFB8_BLOCK_SIZE,
1298 .cra_ctxsize = sizeof(struct atmel_aes_ctx),
1299 .cra_alignmask = 0x0,
1300 .cra_type = &crypto_ablkcipher_type,
1301 .cra_module = THIS_MODULE,
1302 .cra_init = atmel_aes_cra_init,
1303 .cra_exit = atmel_aes_cra_exit,
1304 .cra_u.ablkcipher = {
1305 .min_keysize = AES_MIN_KEY_SIZE,
1306 .max_keysize = AES_MAX_KEY_SIZE,
1307 .ivsize = AES_BLOCK_SIZE,
1308 .setkey = atmel_aes_setkey,
1309 .encrypt = atmel_aes_cfb8_encrypt,
1310 .decrypt = atmel_aes_cfb8_decrypt,
1314 .cra_name = "ctr(aes)",
1315 .cra_driver_name = "atmel-ctr-aes",
1316 .cra_priority = ATMEL_AES_PRIORITY,
1317 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1319 .cra_ctxsize = sizeof(struct atmel_aes_ctr_ctx),
1320 .cra_alignmask = 0xf,
1321 .cra_type = &crypto_ablkcipher_type,
1322 .cra_module = THIS_MODULE,
1323 .cra_init = atmel_aes_ctr_cra_init,
1324 .cra_exit = atmel_aes_cra_exit,
1325 .cra_u.ablkcipher = {
1326 .min_keysize = AES_MIN_KEY_SIZE,
1327 .max_keysize = AES_MAX_KEY_SIZE,
1328 .ivsize = AES_BLOCK_SIZE,
1329 .setkey = atmel_aes_setkey,
1330 .encrypt = atmel_aes_ctr_encrypt,
1331 .decrypt = atmel_aes_ctr_decrypt,
1336 static struct crypto_alg aes_cfb64_alg = {
1337 .cra_name = "cfb64(aes)",
1338 .cra_driver_name = "atmel-cfb64-aes",
1339 .cra_priority = ATMEL_AES_PRIORITY,
1340 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1341 .cra_blocksize = CFB64_BLOCK_SIZE,
1342 .cra_ctxsize = sizeof(struct atmel_aes_ctx),
1343 .cra_alignmask = 0x7,
1344 .cra_type = &crypto_ablkcipher_type,
1345 .cra_module = THIS_MODULE,
1346 .cra_init = atmel_aes_cra_init,
1347 .cra_exit = atmel_aes_cra_exit,
1348 .cra_u.ablkcipher = {
1349 .min_keysize = AES_MIN_KEY_SIZE,
1350 .max_keysize = AES_MAX_KEY_SIZE,
1351 .ivsize = AES_BLOCK_SIZE,
1352 .setkey = atmel_aes_setkey,
1353 .encrypt = atmel_aes_cfb64_encrypt,
1354 .decrypt = atmel_aes_cfb64_decrypt,
1359 /* gcm aead functions */
1361 static int atmel_aes_gcm_ghash(struct atmel_aes_dev *dd,
1362 const u32 *data, size_t datalen,
1363 const u32 *ghash_in, u32 *ghash_out,
1364 atmel_aes_fn_t resume);
1365 static int atmel_aes_gcm_ghash_init(struct atmel_aes_dev *dd);
1366 static int atmel_aes_gcm_ghash_finalize(struct atmel_aes_dev *dd);
1368 static int atmel_aes_gcm_start(struct atmel_aes_dev *dd);
1369 static int atmel_aes_gcm_process(struct atmel_aes_dev *dd);
1370 static int atmel_aes_gcm_length(struct atmel_aes_dev *dd);
1371 static int atmel_aes_gcm_data(struct atmel_aes_dev *dd);
1372 static int atmel_aes_gcm_tag_init(struct atmel_aes_dev *dd);
1373 static int atmel_aes_gcm_tag(struct atmel_aes_dev *dd);
1374 static int atmel_aes_gcm_finalize(struct atmel_aes_dev *dd);
1376 static inline struct atmel_aes_gcm_ctx *
1377 atmel_aes_gcm_ctx_cast(struct atmel_aes_base_ctx *ctx)
1379 return container_of(ctx, struct atmel_aes_gcm_ctx, base);
1382 static int atmel_aes_gcm_ghash(struct atmel_aes_dev *dd,
1383 const u32 *data, size_t datalen,
1384 const u32 *ghash_in, u32 *ghash_out,
1385 atmel_aes_fn_t resume)
1387 struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1389 dd->data = (u32 *)data;
1390 dd->datalen = datalen;
1391 ctx->ghash_in = ghash_in;
1392 ctx->ghash_out = ghash_out;
1393 ctx->ghash_resume = resume;
1395 atmel_aes_write_ctrl(dd, false, NULL);
1396 return atmel_aes_wait_for_data_ready(dd, atmel_aes_gcm_ghash_init);
1399 static int atmel_aes_gcm_ghash_init(struct atmel_aes_dev *dd)
1401 struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1403 /* Set the data length. */
1404 atmel_aes_write(dd, AES_AADLENR, dd->total);
1405 atmel_aes_write(dd, AES_CLENR, 0);
1407 /* If needed, overwrite the GCM Intermediate Hash Word Registers */
1409 atmel_aes_write_block(dd, AES_GHASHR(0), ctx->ghash_in);
1411 return atmel_aes_gcm_ghash_finalize(dd);
1414 static int atmel_aes_gcm_ghash_finalize(struct atmel_aes_dev *dd)
1416 struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1419 /* Write data into the Input Data Registers. */
1420 while (dd->datalen > 0) {
1421 atmel_aes_write_block(dd, AES_IDATAR(0), dd->data);
1423 dd->datalen -= AES_BLOCK_SIZE;
1425 isr = atmel_aes_read(dd, AES_ISR);
1426 if (!(isr & AES_INT_DATARDY)) {
1427 dd->resume = atmel_aes_gcm_ghash_finalize;
1428 atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
1429 return -EINPROGRESS;
1433 /* Read the computed hash from GHASHRx. */
1434 atmel_aes_read_block(dd, AES_GHASHR(0), ctx->ghash_out);
1436 return ctx->ghash_resume(dd);
1440 static int atmel_aes_gcm_start(struct atmel_aes_dev *dd)
1442 struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1443 struct aead_request *req = aead_request_cast(dd->areq);
1444 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1445 struct atmel_aes_reqctx *rctx = aead_request_ctx(req);
1446 size_t ivsize = crypto_aead_ivsize(tfm);
1447 size_t datalen, padlen;
1448 const void *iv = req->iv;
1452 atmel_aes_set_mode(dd, rctx);
1454 err = atmel_aes_hw_init(dd);
1456 return atmel_aes_complete(dd, err);
1458 if (likely(ivsize == 12)) {
1459 memcpy(ctx->j0, iv, ivsize);
1460 ctx->j0[3] = cpu_to_be32(1);
1461 return atmel_aes_gcm_process(dd);
1464 padlen = atmel_aes_padlen(ivsize, AES_BLOCK_SIZE);
1465 datalen = ivsize + padlen + AES_BLOCK_SIZE;
1466 if (datalen > dd->buflen)
1467 return atmel_aes_complete(dd, -EINVAL);
1469 memcpy(data, iv, ivsize);
1470 memset(data + ivsize, 0, padlen + sizeof(u64));
1471 ((u64 *)(data + datalen))[-1] = cpu_to_be64(ivsize * 8);
1473 return atmel_aes_gcm_ghash(dd, (const u32 *)data, datalen,
1474 NULL, ctx->j0, atmel_aes_gcm_process);
1477 static int atmel_aes_gcm_process(struct atmel_aes_dev *dd)
1479 struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1480 struct aead_request *req = aead_request_cast(dd->areq);
1481 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1482 bool enc = atmel_aes_is_encrypt(dd);
1485 /* Compute text length. */
1486 authsize = crypto_aead_authsize(tfm);
1487 ctx->textlen = req->cryptlen - (enc ? 0 : authsize);
1490 * According to tcrypt test suite, the GCM Automatic Tag Generation
1491 * fails when both the message and its associated data are empty.
1493 if (likely(req->assoclen != 0 || ctx->textlen != 0))
1494 dd->flags |= AES_FLAGS_GTAGEN;
1496 atmel_aes_write_ctrl(dd, false, NULL);
1497 return atmel_aes_wait_for_data_ready(dd, atmel_aes_gcm_length);
1500 static int atmel_aes_gcm_length(struct atmel_aes_dev *dd)
1502 struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1503 struct aead_request *req = aead_request_cast(dd->areq);
1504 u32 j0_lsw, *j0 = ctx->j0;
1507 /* Write incr32(J0) into IV. */
1509 j0[3] = cpu_to_be32(be32_to_cpu(j0[3]) + 1);
1510 atmel_aes_write_block(dd, AES_IVR(0), j0);
1513 /* Set aad and text lengths. */
1514 atmel_aes_write(dd, AES_AADLENR, req->assoclen);
1515 atmel_aes_write(dd, AES_CLENR, ctx->textlen);
1517 /* Check whether AAD are present. */
1518 if (unlikely(req->assoclen == 0)) {
1520 return atmel_aes_gcm_data(dd);
1523 /* Copy assoc data and add padding. */
1524 padlen = atmel_aes_padlen(req->assoclen, AES_BLOCK_SIZE);
1525 if (unlikely(req->assoclen + padlen > dd->buflen))
1526 return atmel_aes_complete(dd, -EINVAL);
1527 sg_copy_to_buffer(req->src, sg_nents(req->src), dd->buf, req->assoclen);
1529 /* Write assoc data into the Input Data register. */
1530 dd->data = (u32 *)dd->buf;
1531 dd->datalen = req->assoclen + padlen;
1532 return atmel_aes_gcm_data(dd);
1535 static int atmel_aes_gcm_data(struct atmel_aes_dev *dd)
1537 struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1538 struct aead_request *req = aead_request_cast(dd->areq);
1539 bool use_dma = (ctx->textlen >= ATMEL_AES_DMA_THRESHOLD);
1540 struct scatterlist *src, *dst;
1543 /* Write AAD first. */
1544 while (dd->datalen > 0) {
1545 atmel_aes_write_block(dd, AES_IDATAR(0), dd->data);
1547 dd->datalen -= AES_BLOCK_SIZE;
1549 isr = atmel_aes_read(dd, AES_ISR);
1550 if (!(isr & AES_INT_DATARDY)) {
1551 dd->resume = atmel_aes_gcm_data;
1552 atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
1553 return -EINPROGRESS;
1558 if (unlikely(ctx->textlen == 0))
1559 return atmel_aes_gcm_tag_init(dd);
1561 /* Prepare src and dst scatter lists to transfer cipher/plain texts */
1562 src = scatterwalk_ffwd(ctx->src, req->src, req->assoclen);
1563 dst = ((req->src == req->dst) ? src :
1564 scatterwalk_ffwd(ctx->dst, req->dst, req->assoclen));
1567 /* Update the Mode Register for DMA transfers. */
1568 mr = atmel_aes_read(dd, AES_MR);
1569 mr &= ~(AES_MR_SMOD_MASK | AES_MR_DUALBUFF);
1570 mr |= AES_MR_SMOD_IDATAR0;
1571 if (dd->caps.has_dualbuff)
1572 mr |= AES_MR_DUALBUFF;
1573 atmel_aes_write(dd, AES_MR, mr);
1575 return atmel_aes_dma_start(dd, src, dst, ctx->textlen,
1576 atmel_aes_gcm_tag_init);
1579 return atmel_aes_cpu_start(dd, src, dst, ctx->textlen,
1580 atmel_aes_gcm_tag_init);
1583 static int atmel_aes_gcm_tag_init(struct atmel_aes_dev *dd)
1585 struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1586 struct aead_request *req = aead_request_cast(dd->areq);
1587 u64 *data = dd->buf;
1589 if (likely(dd->flags & AES_FLAGS_GTAGEN)) {
1590 if (!(atmel_aes_read(dd, AES_ISR) & AES_INT_TAGRDY)) {
1591 dd->resume = atmel_aes_gcm_tag_init;
1592 atmel_aes_write(dd, AES_IER, AES_INT_TAGRDY);
1593 return -EINPROGRESS;
1596 return atmel_aes_gcm_finalize(dd);
1599 /* Read the GCM Intermediate Hash Word Registers. */
1600 atmel_aes_read_block(dd, AES_GHASHR(0), ctx->ghash);
1602 data[0] = cpu_to_be64(req->assoclen * 8);
1603 data[1] = cpu_to_be64(ctx->textlen * 8);
1605 return atmel_aes_gcm_ghash(dd, (const u32 *)data, AES_BLOCK_SIZE,
1606 ctx->ghash, ctx->ghash, atmel_aes_gcm_tag);
1609 static int atmel_aes_gcm_tag(struct atmel_aes_dev *dd)
1611 struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1612 unsigned long flags;
1615 * Change mode to CTR to complete the tag generation.
1616 * Use J0 as Initialization Vector.
1619 dd->flags &= ~(AES_FLAGS_OPMODE_MASK | AES_FLAGS_GTAGEN);
1620 dd->flags |= AES_FLAGS_CTR;
1621 atmel_aes_write_ctrl(dd, false, ctx->j0);
1624 atmel_aes_write_block(dd, AES_IDATAR(0), ctx->ghash);
1625 return atmel_aes_wait_for_data_ready(dd, atmel_aes_gcm_finalize);
1628 static int atmel_aes_gcm_finalize(struct atmel_aes_dev *dd)
1630 struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1631 struct aead_request *req = aead_request_cast(dd->areq);
1632 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1633 bool enc = atmel_aes_is_encrypt(dd);
1634 u32 offset, authsize, itag[4], *otag = ctx->tag;
1637 /* Read the computed tag. */
1638 if (likely(dd->flags & AES_FLAGS_GTAGEN))
1639 atmel_aes_read_block(dd, AES_TAGR(0), ctx->tag);
1641 atmel_aes_read_block(dd, AES_ODATAR(0), ctx->tag);
1643 offset = req->assoclen + ctx->textlen;
1644 authsize = crypto_aead_authsize(tfm);
1646 scatterwalk_map_and_copy(otag, req->dst, offset, authsize, 1);
1649 scatterwalk_map_and_copy(itag, req->src, offset, authsize, 0);
1650 err = crypto_memneq(itag, otag, authsize) ? -EBADMSG : 0;
1653 return atmel_aes_complete(dd, err);
1656 static int atmel_aes_gcm_crypt(struct aead_request *req,
1659 struct atmel_aes_base_ctx *ctx;
1660 struct atmel_aes_reqctx *rctx;
1661 struct atmel_aes_dev *dd;
1663 ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
1664 ctx->block_size = AES_BLOCK_SIZE;
1666 dd = atmel_aes_find_dev(ctx);
1670 rctx = aead_request_ctx(req);
1671 rctx->mode = AES_FLAGS_GCM | mode;
1673 return atmel_aes_handle_queue(dd, &req->base);
1676 static int atmel_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
1677 unsigned int keylen)
1679 struct atmel_aes_base_ctx *ctx = crypto_aead_ctx(tfm);
1681 if (keylen != AES_KEYSIZE_256 &&
1682 keylen != AES_KEYSIZE_192 &&
1683 keylen != AES_KEYSIZE_128) {
1684 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
1688 memcpy(ctx->key, key, keylen);
1689 ctx->keylen = keylen;
1694 static int atmel_aes_gcm_setauthsize(struct crypto_aead *tfm,
1695 unsigned int authsize)
1697 /* Same as crypto_gcm_authsize() from crypto/gcm.c */
1714 static int atmel_aes_gcm_encrypt(struct aead_request *req)
1716 return atmel_aes_gcm_crypt(req, AES_FLAGS_ENCRYPT);
1719 static int atmel_aes_gcm_decrypt(struct aead_request *req)
1721 return atmel_aes_gcm_crypt(req, 0);
1724 static int atmel_aes_gcm_init(struct crypto_aead *tfm)
1726 struct atmel_aes_gcm_ctx *ctx = crypto_aead_ctx(tfm);
1728 crypto_aead_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx));
1729 ctx->base.start = atmel_aes_gcm_start;
1734 static void atmel_aes_gcm_exit(struct crypto_aead *tfm)
1739 static struct aead_alg aes_gcm_alg = {
1740 .setkey = atmel_aes_gcm_setkey,
1741 .setauthsize = atmel_aes_gcm_setauthsize,
1742 .encrypt = atmel_aes_gcm_encrypt,
1743 .decrypt = atmel_aes_gcm_decrypt,
1744 .init = atmel_aes_gcm_init,
1745 .exit = atmel_aes_gcm_exit,
1747 .maxauthsize = AES_BLOCK_SIZE,
1750 .cra_name = "gcm(aes)",
1751 .cra_driver_name = "atmel-gcm-aes",
1752 .cra_priority = ATMEL_AES_PRIORITY,
1753 .cra_flags = CRYPTO_ALG_ASYNC,
1755 .cra_ctxsize = sizeof(struct atmel_aes_gcm_ctx),
1756 .cra_alignmask = 0xf,
1757 .cra_module = THIS_MODULE,
1762 /* Probe functions */
1764 static int atmel_aes_buff_init(struct atmel_aes_dev *dd)
1766 dd->buf = (void *)__get_free_pages(GFP_KERNEL, ATMEL_AES_BUFFER_ORDER);
1767 dd->buflen = ATMEL_AES_BUFFER_SIZE;
1768 dd->buflen &= ~(AES_BLOCK_SIZE - 1);
1771 dev_err(dd->dev, "unable to alloc pages.\n");
1778 static void atmel_aes_buff_cleanup(struct atmel_aes_dev *dd)
1780 free_page((unsigned long)dd->buf);
1783 static bool atmel_aes_filter(struct dma_chan *chan, void *slave)
1785 struct at_dma_slave *sl = slave;
1787 if (sl && sl->dma_dev == chan->device->dev) {
1795 static int atmel_aes_dma_init(struct atmel_aes_dev *dd,
1796 struct crypto_platform_data *pdata)
1798 struct at_dma_slave *slave;
1800 dma_cap_mask_t mask;
1803 dma_cap_set(DMA_SLAVE, mask);
1805 /* Try to grab 2 DMA channels */
1806 slave = &pdata->dma_slave->rxdata;
1807 dd->src.chan = dma_request_slave_channel_compat(mask, atmel_aes_filter,
1808 slave, dd->dev, "tx");
1812 slave = &pdata->dma_slave->txdata;
1813 dd->dst.chan = dma_request_slave_channel_compat(mask, atmel_aes_filter,
1814 slave, dd->dev, "rx");
1821 dma_release_channel(dd->src.chan);
1823 dev_warn(dd->dev, "no DMA channel available\n");
1827 static void atmel_aes_dma_cleanup(struct atmel_aes_dev *dd)
1829 dma_release_channel(dd->dst.chan);
1830 dma_release_channel(dd->src.chan);
1833 static void atmel_aes_queue_task(unsigned long data)
1835 struct atmel_aes_dev *dd = (struct atmel_aes_dev *)data;
1837 atmel_aes_handle_queue(dd, NULL);
1840 static void atmel_aes_done_task(unsigned long data)
1842 struct atmel_aes_dev *dd = (struct atmel_aes_dev *)data;
1844 dd->is_async = true;
1845 (void)dd->resume(dd);
1848 static irqreturn_t atmel_aes_irq(int irq, void *dev_id)
1850 struct atmel_aes_dev *aes_dd = dev_id;
1853 reg = atmel_aes_read(aes_dd, AES_ISR);
1854 if (reg & atmel_aes_read(aes_dd, AES_IMR)) {
1855 atmel_aes_write(aes_dd, AES_IDR, reg);
1856 if (AES_FLAGS_BUSY & aes_dd->flags)
1857 tasklet_schedule(&aes_dd->done_task);
1859 dev_warn(aes_dd->dev, "AES interrupt when no active requests.\n");
1866 static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd)
1870 if (dd->caps.has_gcm)
1871 crypto_unregister_aead(&aes_gcm_alg);
1873 if (dd->caps.has_cfb64)
1874 crypto_unregister_alg(&aes_cfb64_alg);
1876 for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
1877 crypto_unregister_alg(&aes_algs[i]);
1880 static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
1884 for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
1885 err = crypto_register_alg(&aes_algs[i]);
1890 if (dd->caps.has_cfb64) {
1891 err = crypto_register_alg(&aes_cfb64_alg);
1893 goto err_aes_cfb64_alg;
1896 if (dd->caps.has_gcm) {
1897 err = crypto_register_aead(&aes_gcm_alg);
1899 goto err_aes_gcm_alg;
1905 crypto_unregister_alg(&aes_cfb64_alg);
1907 i = ARRAY_SIZE(aes_algs);
1909 for (j = 0; j < i; j++)
1910 crypto_unregister_alg(&aes_algs[j]);
1915 static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
1917 dd->caps.has_dualbuff = 0;
1918 dd->caps.has_cfb64 = 0;
1919 dd->caps.has_gcm = 0;
1920 dd->caps.max_burst_size = 1;
1922 /* keep only major version number */
1923 switch (dd->hw_version & 0xff0) {
1925 dd->caps.has_dualbuff = 1;
1926 dd->caps.has_cfb64 = 1;
1927 dd->caps.has_gcm = 1;
1928 dd->caps.max_burst_size = 4;
1931 dd->caps.has_dualbuff = 1;
1932 dd->caps.has_cfb64 = 1;
1933 dd->caps.has_gcm = 1;
1934 dd->caps.max_burst_size = 4;
1937 dd->caps.has_dualbuff = 1;
1938 dd->caps.has_cfb64 = 1;
1939 dd->caps.max_burst_size = 4;
1945 "Unmanaged aes version, set minimum capabilities\n");
1950 #if defined(CONFIG_OF)
1951 static const struct of_device_id atmel_aes_dt_ids[] = {
1952 { .compatible = "atmel,at91sam9g46-aes" },
1955 MODULE_DEVICE_TABLE(of, atmel_aes_dt_ids);
1957 static struct crypto_platform_data *atmel_aes_of_init(struct platform_device *pdev)
1959 struct device_node *np = pdev->dev.of_node;
1960 struct crypto_platform_data *pdata;
1963 dev_err(&pdev->dev, "device node not found\n");
1964 return ERR_PTR(-EINVAL);
1967 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1969 dev_err(&pdev->dev, "could not allocate memory for pdata\n");
1970 return ERR_PTR(-ENOMEM);
1973 pdata->dma_slave = devm_kzalloc(&pdev->dev,
1974 sizeof(*(pdata->dma_slave)),
1976 if (!pdata->dma_slave) {
1977 dev_err(&pdev->dev, "could not allocate memory for dma_slave\n");
1978 devm_kfree(&pdev->dev, pdata);
1979 return ERR_PTR(-ENOMEM);
1985 static inline struct crypto_platform_data *atmel_aes_of_init(struct platform_device *pdev)
1987 return ERR_PTR(-EINVAL);
1991 static int atmel_aes_probe(struct platform_device *pdev)
1993 struct atmel_aes_dev *aes_dd;
1994 struct crypto_platform_data *pdata;
1995 struct device *dev = &pdev->dev;
1996 struct resource *aes_res;
1999 pdata = pdev->dev.platform_data;
2001 pdata = atmel_aes_of_init(pdev);
2002 if (IS_ERR(pdata)) {
2003 err = PTR_ERR(pdata);
2008 if (!pdata->dma_slave) {
2013 aes_dd = devm_kzalloc(&pdev->dev, sizeof(*aes_dd), GFP_KERNEL);
2014 if (aes_dd == NULL) {
2015 dev_err(dev, "unable to alloc data struct.\n");
2022 platform_set_drvdata(pdev, aes_dd);
2024 INIT_LIST_HEAD(&aes_dd->list);
2025 spin_lock_init(&aes_dd->lock);
2027 tasklet_init(&aes_dd->done_task, atmel_aes_done_task,
2028 (unsigned long)aes_dd);
2029 tasklet_init(&aes_dd->queue_task, atmel_aes_queue_task,
2030 (unsigned long)aes_dd);
2032 crypto_init_queue(&aes_dd->queue, ATMEL_AES_QUEUE_LENGTH);
2036 /* Get the base address */
2037 aes_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2039 dev_err(dev, "no MEM resource info\n");
2043 aes_dd->phys_base = aes_res->start;
2046 aes_dd->irq = platform_get_irq(pdev, 0);
2047 if (aes_dd->irq < 0) {
2048 dev_err(dev, "no IRQ resource info\n");
2053 err = devm_request_irq(&pdev->dev, aes_dd->irq, atmel_aes_irq,
2054 IRQF_SHARED, "atmel-aes", aes_dd);
2056 dev_err(dev, "unable to request aes irq.\n");
2060 /* Initializing the clock */
2061 aes_dd->iclk = devm_clk_get(&pdev->dev, "aes_clk");
2062 if (IS_ERR(aes_dd->iclk)) {
2063 dev_err(dev, "clock initialization failed.\n");
2064 err = PTR_ERR(aes_dd->iclk);
2068 aes_dd->io_base = devm_ioremap_resource(&pdev->dev, aes_res);
2069 if (IS_ERR(aes_dd->io_base)) {
2070 dev_err(dev, "can't ioremap\n");
2071 err = PTR_ERR(aes_dd->io_base);
2075 err = clk_prepare(aes_dd->iclk);
2079 err = atmel_aes_hw_version_init(aes_dd);
2081 goto iclk_unprepare;
2083 atmel_aes_get_cap(aes_dd);
2085 err = atmel_aes_buff_init(aes_dd);
2089 err = atmel_aes_dma_init(aes_dd, pdata);
2093 spin_lock(&atmel_aes.lock);
2094 list_add_tail(&aes_dd->list, &atmel_aes.dev_list);
2095 spin_unlock(&atmel_aes.lock);
2097 err = atmel_aes_register_algs(aes_dd);
2101 dev_info(dev, "Atmel AES - Using %s, %s for DMA transfers\n",
2102 dma_chan_name(aes_dd->src.chan),
2103 dma_chan_name(aes_dd->dst.chan));
2108 spin_lock(&atmel_aes.lock);
2109 list_del(&aes_dd->list);
2110 spin_unlock(&atmel_aes.lock);
2111 atmel_aes_dma_cleanup(aes_dd);
2113 atmel_aes_buff_cleanup(aes_dd);
2116 clk_unprepare(aes_dd->iclk);
2118 tasklet_kill(&aes_dd->done_task);
2119 tasklet_kill(&aes_dd->queue_task);
2121 dev_err(dev, "initialization failed.\n");
2126 static int atmel_aes_remove(struct platform_device *pdev)
2128 static struct atmel_aes_dev *aes_dd;
2130 aes_dd = platform_get_drvdata(pdev);
2133 spin_lock(&atmel_aes.lock);
2134 list_del(&aes_dd->list);
2135 spin_unlock(&atmel_aes.lock);
2137 atmel_aes_unregister_algs(aes_dd);
2139 tasklet_kill(&aes_dd->done_task);
2140 tasklet_kill(&aes_dd->queue_task);
2142 atmel_aes_dma_cleanup(aes_dd);
2143 atmel_aes_buff_cleanup(aes_dd);
2145 clk_unprepare(aes_dd->iclk);
2150 static struct platform_driver atmel_aes_driver = {
2151 .probe = atmel_aes_probe,
2152 .remove = atmel_aes_remove,
2154 .name = "atmel_aes",
2155 .of_match_table = of_match_ptr(atmel_aes_dt_ids),
2159 module_platform_driver(atmel_aes_driver);
2161 MODULE_DESCRIPTION("Atmel AES hw acceleration support.");
2162 MODULE_LICENSE("GPL v2");
2163 MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique");