4 * Support for OMAP AES HW acceleration.
6 * Copyright (c) 2010 Nokia Corporation
7 * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com>
8 * Copyright (c) 2011 Texas Instruments Incorporated
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as published
12 * by the Free Software Foundation.
16 #define pr_fmt(fmt) "%20s: " fmt, __func__
17 #define prn(num) pr_debug(#num "=%d\n", num)
18 #define prx(num) pr_debug(#num "=%x\n", num)
20 #include <linux/err.h>
21 #include <linux/module.h>
22 #include <linux/init.h>
23 #include <linux/errno.h>
24 #include <linux/kernel.h>
25 #include <linux/platform_device.h>
26 #include <linux/scatterlist.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/dmaengine.h>
29 #include <linux/omap-dma.h>
30 #include <linux/pm_runtime.h>
32 #include <linux/of_device.h>
33 #include <linux/of_address.h>
35 #include <linux/crypto.h>
36 #include <linux/interrupt.h>
37 #include <crypto/scatterwalk.h>
38 #include <crypto/aes.h>
40 #define DST_MAXBURST 4
41 #define DMA_MIN (DST_MAXBURST * sizeof(u32))
43 #define _calc_walked(inout) (dd->inout##_walk.offset - dd->inout##_sg->offset)
45 /* OMAP TRM gives bitfields as start:end, where start is the higher bit
46 number. For example 7:0 */
47 #define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end))
48 #define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end))
50 #define AES_REG_KEY(dd, x) ((dd)->pdata->key_ofs - \
52 #define AES_REG_IV(dd, x) ((dd)->pdata->iv_ofs + ((x) * 0x04))
54 #define AES_REG_CTRL(dd) ((dd)->pdata->ctrl_ofs)
55 #define AES_REG_CTRL_CTR_WIDTH_MASK GENMASK(8, 7)
56 #define AES_REG_CTRL_CTR_WIDTH_32 0
57 #define AES_REG_CTRL_CTR_WIDTH_64 BIT(7)
58 #define AES_REG_CTRL_CTR_WIDTH_96 BIT(8)
59 #define AES_REG_CTRL_CTR_WIDTH_128 GENMASK(8, 7)
60 #define AES_REG_CTRL_CTR BIT(6)
61 #define AES_REG_CTRL_CBC BIT(5)
62 #define AES_REG_CTRL_KEY_SIZE GENMASK(4, 3)
63 #define AES_REG_CTRL_DIRECTION BIT(2)
64 #define AES_REG_CTRL_INPUT_READY BIT(1)
65 #define AES_REG_CTRL_OUTPUT_READY BIT(0)
66 #define AES_REG_CTRL_MASK GENMASK(24, 2)
68 #define AES_REG_DATA_N(dd, x) ((dd)->pdata->data_ofs + ((x) * 0x04))
70 #define AES_REG_REV(dd) ((dd)->pdata->rev_ofs)
72 #define AES_REG_MASK(dd) ((dd)->pdata->mask_ofs)
73 #define AES_REG_MASK_SIDLE BIT(6)
74 #define AES_REG_MASK_START BIT(5)
75 #define AES_REG_MASK_DMA_OUT_EN BIT(3)
76 #define AES_REG_MASK_DMA_IN_EN BIT(2)
77 #define AES_REG_MASK_SOFTRESET BIT(1)
78 #define AES_REG_AUTOIDLE BIT(0)
80 #define AES_REG_LENGTH_N(x) (0x54 + ((x) * 0x04))
82 #define AES_REG_IRQ_STATUS(dd) ((dd)->pdata->irq_status_ofs)
83 #define AES_REG_IRQ_ENABLE(dd) ((dd)->pdata->irq_enable_ofs)
84 #define AES_REG_IRQ_DATA_IN BIT(1)
85 #define AES_REG_IRQ_DATA_OUT BIT(2)
86 #define DEFAULT_TIMEOUT (5*HZ)
88 #define FLAGS_MODE_MASK 0x000f
89 #define FLAGS_ENCRYPT BIT(0)
90 #define FLAGS_CBC BIT(1)
91 #define FLAGS_GIV BIT(2)
92 #define FLAGS_CTR BIT(3)
94 #define FLAGS_INIT BIT(4)
95 #define FLAGS_FAST BIT(5)
96 #define FLAGS_BUSY BIT(6)
98 #define AES_BLOCK_WORDS (AES_BLOCK_SIZE >> 2)
100 struct omap_aes_ctx {
101 struct omap_aes_dev *dd;
104 u32 key[AES_KEYSIZE_256 / sizeof(u32)];
108 struct omap_aes_reqctx {
112 #define OMAP_AES_QUEUE_LENGTH 1
113 #define OMAP_AES_CACHE_SIZE 0
115 struct omap_aes_algs_info {
116 struct crypto_alg *algs_list;
118 unsigned int registered;
121 struct omap_aes_pdata {
122 struct omap_aes_algs_info *algs_info;
123 unsigned int algs_info_size;
125 void (*trigger)(struct omap_aes_dev *dd, int length);
146 struct omap_aes_dev {
147 struct list_head list;
148 unsigned long phys_base;
149 void __iomem *io_base;
150 struct omap_aes_ctx *ctx;
156 struct crypto_queue queue;
158 struct tasklet_struct done_task;
159 struct tasklet_struct queue_task;
161 struct ablkcipher_request *req;
164 * total is used by PIO mode for book keeping so introduce
165 * variable total_save as need it to calc page_order
170 struct scatterlist *in_sg;
171 struct scatterlist *out_sg;
173 /* Buffers for copying for unaligned cases */
174 struct scatterlist in_sgl;
175 struct scatterlist out_sgl;
176 struct scatterlist *orig_out;
179 struct scatter_walk in_walk;
180 struct scatter_walk out_walk;
182 struct dma_chan *dma_lch_in;
184 struct dma_chan *dma_lch_out;
188 const struct omap_aes_pdata *pdata;
191 /* keep registered devices data here */
192 static LIST_HEAD(dev_list);
193 static DEFINE_SPINLOCK(list_lock);
196 #define omap_aes_read(dd, offset) \
199 _read_ret = __raw_readl(dd->io_base + offset); \
200 pr_debug("omap_aes_read(" #offset "=%#x)= %#x\n", \
201 offset, _read_ret); \
205 static inline u32 omap_aes_read(struct omap_aes_dev *dd, u32 offset)
207 return __raw_readl(dd->io_base + offset);
212 #define omap_aes_write(dd, offset, value) \
214 pr_debug("omap_aes_write(" #offset "=%#x) value=%#x\n", \
216 __raw_writel(value, dd->io_base + offset); \
219 static inline void omap_aes_write(struct omap_aes_dev *dd, u32 offset,
222 __raw_writel(value, dd->io_base + offset);
226 static inline void omap_aes_write_mask(struct omap_aes_dev *dd, u32 offset,
231 val = omap_aes_read(dd, offset);
234 omap_aes_write(dd, offset, val);
237 static void omap_aes_write_n(struct omap_aes_dev *dd, u32 offset,
238 u32 *value, int count)
240 for (; count--; value++, offset += 4)
241 omap_aes_write(dd, offset, *value);
244 static int omap_aes_hw_init(struct omap_aes_dev *dd)
246 if (!(dd->flags & FLAGS_INIT)) {
247 dd->flags |= FLAGS_INIT;
254 static int omap_aes_write_ctrl(struct omap_aes_dev *dd)
260 err = omap_aes_hw_init(dd);
264 key32 = dd->ctx->keylen / sizeof(u32);
266 /* it seems a key should always be set even if it has not changed */
267 for (i = 0; i < key32; i++) {
268 omap_aes_write(dd, AES_REG_KEY(dd, i),
269 __le32_to_cpu(dd->ctx->key[i]));
272 if ((dd->flags & (FLAGS_CBC | FLAGS_CTR)) && dd->req->info)
273 omap_aes_write_n(dd, AES_REG_IV(dd, 0), dd->req->info, 4);
275 val = FLD_VAL(((dd->ctx->keylen >> 3) - 1), 4, 3);
276 if (dd->flags & FLAGS_CBC)
277 val |= AES_REG_CTRL_CBC;
278 if (dd->flags & FLAGS_CTR)
279 val |= AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_128;
281 if (dd->flags & FLAGS_ENCRYPT)
282 val |= AES_REG_CTRL_DIRECTION;
284 omap_aes_write_mask(dd, AES_REG_CTRL(dd), val, AES_REG_CTRL_MASK);
289 static void omap_aes_dma_trigger_omap2(struct omap_aes_dev *dd, int length)
293 val = dd->pdata->dma_start;
295 if (dd->dma_lch_out != NULL)
296 val |= dd->pdata->dma_enable_out;
297 if (dd->dma_lch_in != NULL)
298 val |= dd->pdata->dma_enable_in;
300 mask = dd->pdata->dma_enable_out | dd->pdata->dma_enable_in |
301 dd->pdata->dma_start;
303 omap_aes_write_mask(dd, AES_REG_MASK(dd), val, mask);
307 static void omap_aes_dma_trigger_omap4(struct omap_aes_dev *dd, int length)
309 omap_aes_write(dd, AES_REG_LENGTH_N(0), length);
310 omap_aes_write(dd, AES_REG_LENGTH_N(1), 0);
312 omap_aes_dma_trigger_omap2(dd, length);
315 static void omap_aes_dma_stop(struct omap_aes_dev *dd)
319 mask = dd->pdata->dma_enable_out | dd->pdata->dma_enable_in |
320 dd->pdata->dma_start;
322 omap_aes_write_mask(dd, AES_REG_MASK(dd), 0, mask);
325 static struct omap_aes_dev *omap_aes_find_dev(struct omap_aes_ctx *ctx)
327 struct omap_aes_dev *dd = NULL, *tmp;
329 spin_lock_bh(&list_lock);
331 list_for_each_entry(tmp, &dev_list, list) {
332 /* FIXME: take fist available aes core */
338 /* already found before */
341 spin_unlock_bh(&list_lock);
346 static void omap_aes_dma_out_callback(void *data)
348 struct omap_aes_dev *dd = data;
350 /* dma_lch_out - completed */
351 tasklet_schedule(&dd->done_task);
354 static int omap_aes_dma_init(struct omap_aes_dev *dd)
359 dd->dma_lch_out = NULL;
360 dd->dma_lch_in = NULL;
363 dma_cap_set(DMA_SLAVE, mask);
365 dd->dma_lch_in = dma_request_slave_channel_compat(mask,
369 if (!dd->dma_lch_in) {
370 dev_err(dd->dev, "Unable to request in DMA channel\n");
374 dd->dma_lch_out = dma_request_slave_channel_compat(mask,
378 if (!dd->dma_lch_out) {
379 dev_err(dd->dev, "Unable to request out DMA channel\n");
386 dma_release_channel(dd->dma_lch_in);
389 pr_err("error: %d\n", err);
393 static void omap_aes_dma_cleanup(struct omap_aes_dev *dd)
395 dma_release_channel(dd->dma_lch_out);
396 dma_release_channel(dd->dma_lch_in);
399 static void sg_copy_buf(void *buf, struct scatterlist *sg,
400 unsigned int start, unsigned int nbytes, int out)
402 struct scatter_walk walk;
407 scatterwalk_start(&walk, sg);
408 scatterwalk_advance(&walk, start);
409 scatterwalk_copychunks(buf, &walk, nbytes, out);
410 scatterwalk_done(&walk, out, 0);
413 static int omap_aes_crypt_dma(struct crypto_tfm *tfm,
414 struct scatterlist *in_sg, struct scatterlist *out_sg,
415 int in_sg_len, int out_sg_len)
417 struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm);
418 struct omap_aes_dev *dd = ctx->dd;
419 struct dma_async_tx_descriptor *tx_in, *tx_out;
420 struct dma_slave_config cfg;
424 scatterwalk_start(&dd->in_walk, dd->in_sg);
425 scatterwalk_start(&dd->out_walk, dd->out_sg);
427 /* Enable DATAIN interrupt and let it take
429 omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x2);
433 dma_sync_sg_for_device(dd->dev, dd->in_sg, in_sg_len, DMA_TO_DEVICE);
435 memset(&cfg, 0, sizeof(cfg));
437 cfg.src_addr = dd->phys_base + AES_REG_DATA_N(dd, 0);
438 cfg.dst_addr = dd->phys_base + AES_REG_DATA_N(dd, 0);
439 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
440 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
441 cfg.src_maxburst = DST_MAXBURST;
442 cfg.dst_maxburst = DST_MAXBURST;
445 ret = dmaengine_slave_config(dd->dma_lch_in, &cfg);
447 dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n",
452 tx_in = dmaengine_prep_slave_sg(dd->dma_lch_in, in_sg, in_sg_len,
454 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
456 dev_err(dd->dev, "IN prep_slave_sg() failed\n");
460 /* No callback necessary */
461 tx_in->callback_param = dd;
464 ret = dmaengine_slave_config(dd->dma_lch_out, &cfg);
466 dev_err(dd->dev, "can't configure OUT dmaengine slave: %d\n",
471 tx_out = dmaengine_prep_slave_sg(dd->dma_lch_out, out_sg, out_sg_len,
473 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
475 dev_err(dd->dev, "OUT prep_slave_sg() failed\n");
479 tx_out->callback = omap_aes_dma_out_callback;
480 tx_out->callback_param = dd;
482 dmaengine_submit(tx_in);
483 dmaengine_submit(tx_out);
485 dma_async_issue_pending(dd->dma_lch_in);
486 dma_async_issue_pending(dd->dma_lch_out);
489 dd->pdata->trigger(dd, dd->total);
494 static int omap_aes_crypt_dma_start(struct omap_aes_dev *dd)
496 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(
497 crypto_ablkcipher_reqtfm(dd->req));
500 pr_debug("total: %d\n", dd->total);
503 err = dma_map_sg(dd->dev, dd->in_sg, dd->in_sg_len,
506 dev_err(dd->dev, "dma_map_sg() error\n");
510 err = dma_map_sg(dd->dev, dd->out_sg, dd->out_sg_len,
513 dev_err(dd->dev, "dma_map_sg() error\n");
518 err = omap_aes_crypt_dma(tfm, dd->in_sg, dd->out_sg, dd->in_sg_len,
520 if (err && !dd->pio_only) {
521 dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
522 dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len,
529 static void omap_aes_finish_req(struct omap_aes_dev *dd, int err)
531 struct ablkcipher_request *req = dd->req;
533 pr_debug("err: %d\n", err);
535 dd->flags &= ~FLAGS_BUSY;
537 req->base.complete(&req->base, err);
540 static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
544 pr_debug("total: %d\n", dd->total);
546 omap_aes_dma_stop(dd);
548 dmaengine_terminate_all(dd->dma_lch_in);
549 dmaengine_terminate_all(dd->dma_lch_out);
554 static int omap_aes_check_aligned(struct scatterlist *sg, int total)
558 if (!IS_ALIGNED(total, AES_BLOCK_SIZE))
562 if (!IS_ALIGNED(sg->offset, 4))
564 if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE))
577 static int omap_aes_copy_sgs(struct omap_aes_dev *dd)
579 void *buf_in, *buf_out;
582 total = ALIGN(dd->total, AES_BLOCK_SIZE);
583 pages = get_order(total);
585 buf_in = (void *)__get_free_pages(GFP_ATOMIC, pages);
586 buf_out = (void *)__get_free_pages(GFP_ATOMIC, pages);
588 if (!buf_in || !buf_out) {
589 pr_err("Couldn't allocated pages for unaligned cases.\n");
593 dd->orig_out = dd->out_sg;
595 sg_copy_buf(buf_in, dd->in_sg, 0, dd->total, 0);
597 sg_init_table(&dd->in_sgl, 1);
598 sg_set_buf(&dd->in_sgl, buf_in, total);
599 dd->in_sg = &dd->in_sgl;
601 sg_init_table(&dd->out_sgl, 1);
602 sg_set_buf(&dd->out_sgl, buf_out, total);
603 dd->out_sg = &dd->out_sgl;
608 static int omap_aes_handle_queue(struct omap_aes_dev *dd,
609 struct ablkcipher_request *req)
611 struct crypto_async_request *async_req, *backlog;
612 struct omap_aes_ctx *ctx;
613 struct omap_aes_reqctx *rctx;
615 int err, ret = 0, len;
617 spin_lock_irqsave(&dd->lock, flags);
619 ret = ablkcipher_enqueue_request(&dd->queue, req);
620 if (dd->flags & FLAGS_BUSY) {
621 spin_unlock_irqrestore(&dd->lock, flags);
624 backlog = crypto_get_backlog(&dd->queue);
625 async_req = crypto_dequeue_request(&dd->queue);
627 dd->flags |= FLAGS_BUSY;
628 spin_unlock_irqrestore(&dd->lock, flags);
634 backlog->complete(backlog, -EINPROGRESS);
636 req = ablkcipher_request_cast(async_req);
638 /* assign new request to device */
640 dd->total = req->nbytes;
641 dd->total_save = req->nbytes;
642 dd->in_sg = req->src;
643 dd->out_sg = req->dst;
645 if (omap_aes_check_aligned(dd->in_sg, dd->total) ||
646 omap_aes_check_aligned(dd->out_sg, dd->total)) {
647 if (omap_aes_copy_sgs(dd))
648 pr_err("Failed to copy SGs for unaligned cases\n");
654 len = ALIGN(dd->total, AES_BLOCK_SIZE);
655 dd->in_sg_len = scatterwalk_bytes_sglen(dd->in_sg, len);
656 dd->out_sg_len = scatterwalk_bytes_sglen(dd->out_sg, len);
657 BUG_ON(dd->in_sg_len < 0 || dd->out_sg_len < 0);
659 rctx = ablkcipher_request_ctx(req);
660 ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
661 rctx->mode &= FLAGS_MODE_MASK;
662 dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode;
667 err = omap_aes_write_ctrl(dd);
669 err = omap_aes_crypt_dma_start(dd);
671 /* aes_task will not finish it, so do it here */
672 omap_aes_finish_req(dd, err);
673 tasklet_schedule(&dd->queue_task);
676 return ret; /* return ret, which is enqueue return value */
679 static void omap_aes_done_task(unsigned long data)
681 struct omap_aes_dev *dd = (struct omap_aes_dev *)data;
682 void *buf_in, *buf_out;
685 pr_debug("enter done_task\n");
688 dma_sync_sg_for_device(dd->dev, dd->out_sg, dd->out_sg_len,
690 dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
691 dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len,
693 omap_aes_crypt_dma_stop(dd);
696 if (dd->sgs_copied) {
697 buf_in = sg_virt(&dd->in_sgl);
698 buf_out = sg_virt(&dd->out_sgl);
700 sg_copy_buf(buf_out, dd->orig_out, 0, dd->total_save, 1);
702 len = ALIGN(dd->total_save, AES_BLOCK_SIZE);
703 pages = get_order(len);
704 free_pages((unsigned long)buf_in, pages);
705 free_pages((unsigned long)buf_out, pages);
708 omap_aes_finish_req(dd, 0);
709 omap_aes_handle_queue(dd, NULL);
714 static void omap_aes_queue_task(unsigned long data)
716 struct omap_aes_dev *dd = (struct omap_aes_dev *)data;
718 omap_aes_handle_queue(dd, NULL);
721 static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
723 struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(
724 crypto_ablkcipher_reqtfm(req));
725 struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req);
726 struct omap_aes_dev *dd;
728 pr_debug("nbytes: %d, enc: %d, cbc: %d\n", req->nbytes,
729 !!(mode & FLAGS_ENCRYPT),
730 !!(mode & FLAGS_CBC));
732 dd = omap_aes_find_dev(ctx);
738 return omap_aes_handle_queue(dd, req);
741 /* ********************** ALG API ************************************ */
743 static int omap_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
746 struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
748 if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
749 keylen != AES_KEYSIZE_256)
752 pr_debug("enter, keylen: %d\n", keylen);
754 memcpy(ctx->key, key, keylen);
755 ctx->keylen = keylen;
760 static int omap_aes_ecb_encrypt(struct ablkcipher_request *req)
762 return omap_aes_crypt(req, FLAGS_ENCRYPT);
765 static int omap_aes_ecb_decrypt(struct ablkcipher_request *req)
767 return omap_aes_crypt(req, 0);
770 static int omap_aes_cbc_encrypt(struct ablkcipher_request *req)
772 return omap_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
775 static int omap_aes_cbc_decrypt(struct ablkcipher_request *req)
777 return omap_aes_crypt(req, FLAGS_CBC);
780 static int omap_aes_ctr_encrypt(struct ablkcipher_request *req)
782 return omap_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CTR);
785 static int omap_aes_ctr_decrypt(struct ablkcipher_request *req)
787 return omap_aes_crypt(req, FLAGS_CTR);
790 static int omap_aes_cra_init(struct crypto_tfm *tfm)
792 struct omap_aes_dev *dd = NULL;
795 /* Find AES device, currently picks the first device */
796 spin_lock_bh(&list_lock);
797 list_for_each_entry(dd, &dev_list, list) {
800 spin_unlock_bh(&list_lock);
802 err = pm_runtime_get_sync(dd->dev);
804 dev_err(dd->dev, "%s: failed to get_sync(%d)\n",
809 tfm->crt_ablkcipher.reqsize = sizeof(struct omap_aes_reqctx);
814 static void omap_aes_cra_exit(struct crypto_tfm *tfm)
816 struct omap_aes_dev *dd = NULL;
818 /* Find AES device, currently picks the first device */
819 spin_lock_bh(&list_lock);
820 list_for_each_entry(dd, &dev_list, list) {
823 spin_unlock_bh(&list_lock);
825 pm_runtime_put_sync(dd->dev);
828 /* ********************** ALGS ************************************ */
830 static struct crypto_alg algs_ecb_cbc[] = {
832 .cra_name = "ecb(aes)",
833 .cra_driver_name = "ecb-aes-omap",
835 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
836 CRYPTO_ALG_KERN_DRIVER_ONLY |
838 .cra_blocksize = AES_BLOCK_SIZE,
839 .cra_ctxsize = sizeof(struct omap_aes_ctx),
841 .cra_type = &crypto_ablkcipher_type,
842 .cra_module = THIS_MODULE,
843 .cra_init = omap_aes_cra_init,
844 .cra_exit = omap_aes_cra_exit,
845 .cra_u.ablkcipher = {
846 .min_keysize = AES_MIN_KEY_SIZE,
847 .max_keysize = AES_MAX_KEY_SIZE,
848 .setkey = omap_aes_setkey,
849 .encrypt = omap_aes_ecb_encrypt,
850 .decrypt = omap_aes_ecb_decrypt,
854 .cra_name = "cbc(aes)",
855 .cra_driver_name = "cbc-aes-omap",
857 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
858 CRYPTO_ALG_KERN_DRIVER_ONLY |
860 .cra_blocksize = AES_BLOCK_SIZE,
861 .cra_ctxsize = sizeof(struct omap_aes_ctx),
863 .cra_type = &crypto_ablkcipher_type,
864 .cra_module = THIS_MODULE,
865 .cra_init = omap_aes_cra_init,
866 .cra_exit = omap_aes_cra_exit,
867 .cra_u.ablkcipher = {
868 .min_keysize = AES_MIN_KEY_SIZE,
869 .max_keysize = AES_MAX_KEY_SIZE,
870 .ivsize = AES_BLOCK_SIZE,
871 .setkey = omap_aes_setkey,
872 .encrypt = omap_aes_cbc_encrypt,
873 .decrypt = omap_aes_cbc_decrypt,
878 static struct crypto_alg algs_ctr[] = {
880 .cra_name = "ctr(aes)",
881 .cra_driver_name = "ctr-aes-omap",
883 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
884 CRYPTO_ALG_KERN_DRIVER_ONLY |
886 .cra_blocksize = AES_BLOCK_SIZE,
887 .cra_ctxsize = sizeof(struct omap_aes_ctx),
889 .cra_type = &crypto_ablkcipher_type,
890 .cra_module = THIS_MODULE,
891 .cra_init = omap_aes_cra_init,
892 .cra_exit = omap_aes_cra_exit,
893 .cra_u.ablkcipher = {
894 .min_keysize = AES_MIN_KEY_SIZE,
895 .max_keysize = AES_MAX_KEY_SIZE,
897 .ivsize = AES_BLOCK_SIZE,
898 .setkey = omap_aes_setkey,
899 .encrypt = omap_aes_ctr_encrypt,
900 .decrypt = omap_aes_ctr_decrypt,
905 static struct omap_aes_algs_info omap_aes_algs_info_ecb_cbc[] = {
907 .algs_list = algs_ecb_cbc,
908 .size = ARRAY_SIZE(algs_ecb_cbc),
912 static const struct omap_aes_pdata omap_aes_pdata_omap2 = {
913 .algs_info = omap_aes_algs_info_ecb_cbc,
914 .algs_info_size = ARRAY_SIZE(omap_aes_algs_info_ecb_cbc),
915 .trigger = omap_aes_dma_trigger_omap2,
922 .dma_enable_in = BIT(2),
923 .dma_enable_out = BIT(3),
932 static struct omap_aes_algs_info omap_aes_algs_info_ecb_cbc_ctr[] = {
934 .algs_list = algs_ecb_cbc,
935 .size = ARRAY_SIZE(algs_ecb_cbc),
938 .algs_list = algs_ctr,
939 .size = ARRAY_SIZE(algs_ctr),
943 static const struct omap_aes_pdata omap_aes_pdata_omap3 = {
944 .algs_info = omap_aes_algs_info_ecb_cbc_ctr,
945 .algs_info_size = ARRAY_SIZE(omap_aes_algs_info_ecb_cbc_ctr),
946 .trigger = omap_aes_dma_trigger_omap2,
953 .dma_enable_in = BIT(2),
954 .dma_enable_out = BIT(3),
962 static const struct omap_aes_pdata omap_aes_pdata_omap4 = {
963 .algs_info = omap_aes_algs_info_ecb_cbc_ctr,
964 .algs_info_size = ARRAY_SIZE(omap_aes_algs_info_ecb_cbc_ctr),
965 .trigger = omap_aes_dma_trigger_omap4,
972 .irq_status_ofs = 0x8c,
973 .irq_enable_ofs = 0x90,
974 .dma_enable_in = BIT(5),
975 .dma_enable_out = BIT(6),
976 .major_mask = 0x0700,
978 .minor_mask = 0x003f,
982 static irqreturn_t omap_aes_irq(int irq, void *dev_id)
984 struct omap_aes_dev *dd = dev_id;
988 status = omap_aes_read(dd, AES_REG_IRQ_STATUS(dd));
989 if (status & AES_REG_IRQ_DATA_IN) {
990 omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x0);
994 BUG_ON(_calc_walked(in) > dd->in_sg->length);
996 src = sg_virt(dd->in_sg) + _calc_walked(in);
998 for (i = 0; i < AES_BLOCK_WORDS; i++) {
999 omap_aes_write(dd, AES_REG_DATA_N(dd, i), *src);
1001 scatterwalk_advance(&dd->in_walk, 4);
1002 if (dd->in_sg->length == _calc_walked(in)) {
1003 dd->in_sg = sg_next(dd->in_sg);
1005 scatterwalk_start(&dd->in_walk,
1007 src = sg_virt(dd->in_sg) +
1015 /* Clear IRQ status */
1016 status &= ~AES_REG_IRQ_DATA_IN;
1017 omap_aes_write(dd, AES_REG_IRQ_STATUS(dd), status);
1019 /* Enable DATA_OUT interrupt */
1020 omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x4);
1022 } else if (status & AES_REG_IRQ_DATA_OUT) {
1023 omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x0);
1025 BUG_ON(!dd->out_sg);
1027 BUG_ON(_calc_walked(out) > dd->out_sg->length);
1029 dst = sg_virt(dd->out_sg) + _calc_walked(out);
1031 for (i = 0; i < AES_BLOCK_WORDS; i++) {
1032 *dst = omap_aes_read(dd, AES_REG_DATA_N(dd, i));
1033 scatterwalk_advance(&dd->out_walk, 4);
1034 if (dd->out_sg->length == _calc_walked(out)) {
1035 dd->out_sg = sg_next(dd->out_sg);
1037 scatterwalk_start(&dd->out_walk,
1039 dst = sg_virt(dd->out_sg) +
1047 dd->total -= min_t(size_t, AES_BLOCK_SIZE, dd->total);
1049 /* Clear IRQ status */
1050 status &= ~AES_REG_IRQ_DATA_OUT;
1051 omap_aes_write(dd, AES_REG_IRQ_STATUS(dd), status);
1054 /* All bytes read! */
1055 tasklet_schedule(&dd->done_task);
1057 /* Enable DATA_IN interrupt for next block */
1058 omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x2);
1064 static const struct of_device_id omap_aes_of_match[] = {
1066 .compatible = "ti,omap2-aes",
1067 .data = &omap_aes_pdata_omap2,
1070 .compatible = "ti,omap3-aes",
1071 .data = &omap_aes_pdata_omap3,
1074 .compatible = "ti,omap4-aes",
1075 .data = &omap_aes_pdata_omap4,
1079 MODULE_DEVICE_TABLE(of, omap_aes_of_match);
1081 static int omap_aes_get_res_of(struct omap_aes_dev *dd,
1082 struct device *dev, struct resource *res)
1084 struct device_node *node = dev->of_node;
1085 const struct of_device_id *match;
1088 match = of_match_device(of_match_ptr(omap_aes_of_match), dev);
1090 dev_err(dev, "no compatible OF match\n");
1095 err = of_address_to_resource(node, 0, res);
1097 dev_err(dev, "can't translate OF node address\n");
1102 dd->dma_out = -1; /* Dummy value that's unused */
1103 dd->dma_in = -1; /* Dummy value that's unused */
1105 dd->pdata = match->data;
1111 static const struct of_device_id omap_aes_of_match[] = {
1115 static int omap_aes_get_res_of(struct omap_aes_dev *dd,
1116 struct device *dev, struct resource *res)
1122 static int omap_aes_get_res_pdev(struct omap_aes_dev *dd,
1123 struct platform_device *pdev, struct resource *res)
1125 struct device *dev = &pdev->dev;
1129 /* Get the base address */
1130 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1132 dev_err(dev, "no MEM resource info\n");
1136 memcpy(res, r, sizeof(*res));
1138 /* Get the DMA out channel */
1139 r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1141 dev_err(dev, "no DMA out resource info\n");
1145 dd->dma_out = r->start;
1147 /* Get the DMA in channel */
1148 r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
1150 dev_err(dev, "no DMA in resource info\n");
1154 dd->dma_in = r->start;
1156 /* Only OMAP2/3 can be non-DT */
1157 dd->pdata = &omap_aes_pdata_omap2;
1163 static int omap_aes_probe(struct platform_device *pdev)
1165 struct device *dev = &pdev->dev;
1166 struct omap_aes_dev *dd;
1167 struct crypto_alg *algp;
1168 struct resource res;
1169 int err = -ENOMEM, i, j, irq = -1;
1172 dd = devm_kzalloc(dev, sizeof(struct omap_aes_dev), GFP_KERNEL);
1174 dev_err(dev, "unable to alloc data struct.\n");
1178 platform_set_drvdata(pdev, dd);
1180 spin_lock_init(&dd->lock);
1181 crypto_init_queue(&dd->queue, OMAP_AES_QUEUE_LENGTH);
1183 err = (dev->of_node) ? omap_aes_get_res_of(dd, dev, &res) :
1184 omap_aes_get_res_pdev(dd, pdev, &res);
1188 dd->io_base = devm_ioremap_resource(dev, &res);
1189 if (IS_ERR(dd->io_base)) {
1190 err = PTR_ERR(dd->io_base);
1193 dd->phys_base = res.start;
1195 pm_runtime_enable(dev);
1196 err = pm_runtime_get_sync(dev);
1198 dev_err(dev, "%s: failed to get_sync(%d)\n",
1203 omap_aes_dma_stop(dd);
1205 reg = omap_aes_read(dd, AES_REG_REV(dd));
1207 pm_runtime_put_sync(dev);
1209 dev_info(dev, "OMAP AES hw accel rev: %u.%u\n",
1210 (reg & dd->pdata->major_mask) >> dd->pdata->major_shift,
1211 (reg & dd->pdata->minor_mask) >> dd->pdata->minor_shift);
1213 tasklet_init(&dd->done_task, omap_aes_done_task, (unsigned long)dd);
1214 tasklet_init(&dd->queue_task, omap_aes_queue_task, (unsigned long)dd);
1216 err = omap_aes_dma_init(dd);
1217 if (err && AES_REG_IRQ_STATUS(dd) && AES_REG_IRQ_ENABLE(dd)) {
1220 irq = platform_get_irq(pdev, 0);
1222 dev_err(dev, "can't get IRQ resource\n");
1226 err = devm_request_irq(dev, irq, omap_aes_irq, 0,
1229 dev_err(dev, "Unable to grab omap-aes IRQ\n");
1235 INIT_LIST_HEAD(&dd->list);
1236 spin_lock(&list_lock);
1237 list_add_tail(&dd->list, &dev_list);
1238 spin_unlock(&list_lock);
1240 for (i = 0; i < dd->pdata->algs_info_size; i++) {
1241 for (j = 0; j < dd->pdata->algs_info[i].size; j++) {
1242 algp = &dd->pdata->algs_info[i].algs_list[j];
1244 pr_debug("reg alg: %s\n", algp->cra_name);
1245 INIT_LIST_HEAD(&algp->cra_list);
1247 err = crypto_register_alg(algp);
1251 dd->pdata->algs_info[i].registered++;
1257 for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
1258 for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
1259 crypto_unregister_alg(
1260 &dd->pdata->algs_info[i].algs_list[j]);
1262 omap_aes_dma_cleanup(dd);
1264 tasklet_kill(&dd->done_task);
1265 tasklet_kill(&dd->queue_task);
1266 pm_runtime_disable(dev);
1270 dev_err(dev, "initialization failed.\n");
1274 static int omap_aes_remove(struct platform_device *pdev)
1276 struct omap_aes_dev *dd = platform_get_drvdata(pdev);
1282 spin_lock(&list_lock);
1283 list_del(&dd->list);
1284 spin_unlock(&list_lock);
1286 for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
1287 for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
1288 crypto_unregister_alg(
1289 &dd->pdata->algs_info[i].algs_list[j]);
1291 tasklet_kill(&dd->done_task);
1292 tasklet_kill(&dd->queue_task);
1293 omap_aes_dma_cleanup(dd);
1294 pm_runtime_disable(dd->dev);
1300 #ifdef CONFIG_PM_SLEEP
1301 static int omap_aes_suspend(struct device *dev)
1303 pm_runtime_put_sync(dev);
1307 static int omap_aes_resume(struct device *dev)
1309 pm_runtime_get_sync(dev);
1314 static SIMPLE_DEV_PM_OPS(omap_aes_pm_ops, omap_aes_suspend, omap_aes_resume);
1316 static struct platform_driver omap_aes_driver = {
1317 .probe = omap_aes_probe,
1318 .remove = omap_aes_remove,
1321 .pm = &omap_aes_pm_ops,
1322 .of_match_table = omap_aes_of_match,
1326 module_platform_driver(omap_aes_driver);
1328 MODULE_DESCRIPTION("OMAP AES hw acceleration support.");
1329 MODULE_LICENSE("GPL v2");
1330 MODULE_AUTHOR("Dmitry Kasatkin");