1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2014 Imagination Technologies
4 * Authors: Will Thomas, James Hartley
6 * Interface structure taken from omap-sham driver
10 #include <linux/dma-mapping.h>
11 #include <linux/dmaengine.h>
12 #include <linux/interrupt.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/of_device.h>
17 #include <linux/platform_device.h>
18 #include <linux/scatterlist.h>
20 #include <crypto/internal/hash.h>
21 #include <crypto/md5.h>
22 #include <crypto/sha.h>
25 #define CR_RESET_SET 1
26 #define CR_RESET_UNSET 0
28 #define CR_MESSAGE_LENGTH_H 0x4
29 #define CR_MESSAGE_LENGTH_L 0x8
31 #define CR_CONTROL 0xc
32 #define CR_CONTROL_BYTE_ORDER_3210 0
33 #define CR_CONTROL_BYTE_ORDER_0123 1
34 #define CR_CONTROL_BYTE_ORDER_2310 2
35 #define CR_CONTROL_BYTE_ORDER_1032 3
36 #define CR_CONTROL_BYTE_ORDER_SHIFT 8
37 #define CR_CONTROL_ALGO_MD5 0
38 #define CR_CONTROL_ALGO_SHA1 1
39 #define CR_CONTROL_ALGO_SHA224 2
40 #define CR_CONTROL_ALGO_SHA256 3
42 #define CR_INTSTAT 0x10
43 #define CR_INTENAB 0x14
44 #define CR_INTCLEAR 0x18
45 #define CR_INT_RESULTS_AVAILABLE BIT(0)
46 #define CR_INT_NEW_RESULTS_SET BIT(1)
47 #define CR_INT_RESULT_READ_ERR BIT(2)
48 #define CR_INT_MESSAGE_WRITE_ERROR BIT(3)
49 #define CR_INT_STATUS BIT(8)
51 #define CR_RESULT_QUEUE 0x1c
53 #define CR_CORE_REV 0x50
54 #define CR_CORE_DES1 0x60
55 #define CR_CORE_DES2 0x70
57 #define DRIVER_FLAGS_BUSY BIT(0)
58 #define DRIVER_FLAGS_FINAL BIT(1)
59 #define DRIVER_FLAGS_DMA_ACTIVE BIT(2)
60 #define DRIVER_FLAGS_OUTPUT_READY BIT(3)
61 #define DRIVER_FLAGS_INIT BIT(4)
62 #define DRIVER_FLAGS_CPU BIT(5)
63 #define DRIVER_FLAGS_DMA_READY BIT(6)
64 #define DRIVER_FLAGS_ERROR BIT(7)
65 #define DRIVER_FLAGS_SG BIT(8)
66 #define DRIVER_FLAGS_SHA1 BIT(18)
67 #define DRIVER_FLAGS_SHA224 BIT(19)
68 #define DRIVER_FLAGS_SHA256 BIT(20)
69 #define DRIVER_FLAGS_MD5 BIT(21)
71 #define IMG_HASH_QUEUE_LENGTH 20
72 #define IMG_HASH_DMA_BURST 4
73 #define IMG_HASH_DMA_THRESHOLD 64
75 #ifdef __LITTLE_ENDIAN
76 #define IMG_HASH_BYTE_ORDER CR_CONTROL_BYTE_ORDER_3210
78 #define IMG_HASH_BYTE_ORDER CR_CONTROL_BYTE_ORDER_0123
83 struct img_hash_request_ctx {
84 struct img_hash_dev *hdev;
85 u8 digest[SHA256_DIGEST_SIZE] __aligned(sizeof(u32));
93 struct scatterlist *sgfirst;
95 struct scatterlist *sg;
104 struct ahash_request fallback_req;
106 /* Zero length buffer must remain last member of struct */
107 u8 buffer[] __aligned(sizeof(u32));
110 struct img_hash_ctx {
111 struct img_hash_dev *hdev;
113 struct crypto_ahash *fallback;
116 struct img_hash_dev {
117 struct list_head list;
119 struct clk *hash_clk;
121 void __iomem *io_base;
123 phys_addr_t bus_addr;
124 void __iomem *cpu_addr;
128 struct tasklet_struct done_task;
129 struct tasklet_struct dma_task;
132 struct crypto_queue queue;
133 struct ahash_request *req;
135 struct dma_chan *dma_lch;
138 struct img_hash_drv {
139 struct list_head dev_list;
143 static struct img_hash_drv img_hash = {
144 .dev_list = LIST_HEAD_INIT(img_hash.dev_list),
145 .lock = __SPIN_LOCK_UNLOCKED(img_hash.lock),
148 static inline u32 img_hash_read(struct img_hash_dev *hdev, u32 offset)
150 return readl_relaxed(hdev->io_base + offset);
153 static inline void img_hash_write(struct img_hash_dev *hdev,
154 u32 offset, u32 value)
156 writel_relaxed(value, hdev->io_base + offset);
159 static inline u32 img_hash_read_result_queue(struct img_hash_dev *hdev)
161 return be32_to_cpu(img_hash_read(hdev, CR_RESULT_QUEUE));
164 static void img_hash_start(struct img_hash_dev *hdev, bool dma)
166 struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
167 u32 cr = IMG_HASH_BYTE_ORDER << CR_CONTROL_BYTE_ORDER_SHIFT;
169 if (ctx->flags & DRIVER_FLAGS_MD5)
170 cr |= CR_CONTROL_ALGO_MD5;
171 else if (ctx->flags & DRIVER_FLAGS_SHA1)
172 cr |= CR_CONTROL_ALGO_SHA1;
173 else if (ctx->flags & DRIVER_FLAGS_SHA224)
174 cr |= CR_CONTROL_ALGO_SHA224;
175 else if (ctx->flags & DRIVER_FLAGS_SHA256)
176 cr |= CR_CONTROL_ALGO_SHA256;
177 dev_dbg(hdev->dev, "Starting hash process\n");
178 img_hash_write(hdev, CR_CONTROL, cr);
181 * The hardware block requires two cycles between writing the control
182 * register and writing the first word of data in non DMA mode, to
183 * ensure the first data write is not grouped in burst with the control
184 * register write a read is issued to 'flush' the bus.
187 img_hash_read(hdev, CR_CONTROL);
190 static int img_hash_xmit_cpu(struct img_hash_dev *hdev, const u8 *buf,
191 size_t length, int final)
194 const u32 *buffer = (const u32 *)buf;
196 dev_dbg(hdev->dev, "xmit_cpu: length: %zu bytes\n", length);
199 hdev->flags |= DRIVER_FLAGS_FINAL;
201 len32 = DIV_ROUND_UP(length, sizeof(u32));
203 for (count = 0; count < len32; count++)
204 writel_relaxed(buffer[count], hdev->cpu_addr);
209 static void img_hash_dma_callback(void *data)
211 struct img_hash_dev *hdev = (struct img_hash_dev *)data;
212 struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
215 img_hash_xmit_cpu(hdev, ctx->buffer, ctx->bufcnt, 0);
219 tasklet_schedule(&hdev->dma_task);
222 static int img_hash_xmit_dma(struct img_hash_dev *hdev, struct scatterlist *sg)
224 struct dma_async_tx_descriptor *desc;
225 struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
227 ctx->dma_ct = dma_map_sg(hdev->dev, sg, 1, DMA_TO_DEVICE);
228 if (ctx->dma_ct == 0) {
229 dev_err(hdev->dev, "Invalid DMA sg\n");
234 desc = dmaengine_prep_slave_sg(hdev->dma_lch,
238 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
240 dev_err(hdev->dev, "Null DMA descriptor\n");
242 dma_unmap_sg(hdev->dev, sg, 1, DMA_TO_DEVICE);
245 desc->callback = img_hash_dma_callback;
246 desc->callback_param = hdev;
247 dmaengine_submit(desc);
248 dma_async_issue_pending(hdev->dma_lch);
253 static int img_hash_write_via_cpu(struct img_hash_dev *hdev)
255 struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
257 ctx->bufcnt = sg_copy_to_buffer(hdev->req->src, sg_nents(ctx->sg),
258 ctx->buffer, hdev->req->nbytes);
260 ctx->total = hdev->req->nbytes;
263 hdev->flags |= (DRIVER_FLAGS_CPU | DRIVER_FLAGS_FINAL);
265 img_hash_start(hdev, false);
267 return img_hash_xmit_cpu(hdev, ctx->buffer, ctx->total, 1);
270 static int img_hash_finish(struct ahash_request *req)
272 struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
277 memcpy(req->result, ctx->digest, ctx->digsize);
282 static void img_hash_copy_hash(struct ahash_request *req)
284 struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
285 u32 *hash = (u32 *)ctx->digest;
288 for (i = (ctx->digsize / sizeof(u32)) - 1; i >= 0; i--)
289 hash[i] = img_hash_read_result_queue(ctx->hdev);
292 static void img_hash_finish_req(struct ahash_request *req, int err)
294 struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
295 struct img_hash_dev *hdev = ctx->hdev;
298 img_hash_copy_hash(req);
299 if (DRIVER_FLAGS_FINAL & hdev->flags)
300 err = img_hash_finish(req);
302 dev_warn(hdev->dev, "Hash failed with error %d\n", err);
303 ctx->flags |= DRIVER_FLAGS_ERROR;
306 hdev->flags &= ~(DRIVER_FLAGS_DMA_READY | DRIVER_FLAGS_OUTPUT_READY |
307 DRIVER_FLAGS_CPU | DRIVER_FLAGS_BUSY | DRIVER_FLAGS_FINAL);
309 if (req->base.complete)
310 req->base.complete(&req->base, err);
313 static int img_hash_write_via_dma(struct img_hash_dev *hdev)
315 struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
317 img_hash_start(hdev, true);
319 dev_dbg(hdev->dev, "xmit dma size: %d\n", ctx->total);
322 hdev->flags |= DRIVER_FLAGS_FINAL;
324 hdev->flags |= DRIVER_FLAGS_DMA_ACTIVE | DRIVER_FLAGS_FINAL;
326 tasklet_schedule(&hdev->dma_task);
331 static int img_hash_dma_init(struct img_hash_dev *hdev)
333 struct dma_slave_config dma_conf;
336 hdev->dma_lch = dma_request_chan(hdev->dev, "tx");
337 if (IS_ERR(hdev->dma_lch)) {
338 dev_err(hdev->dev, "Couldn't acquire a slave DMA channel.\n");
339 return PTR_ERR(hdev->dma_lch);
341 dma_conf.direction = DMA_MEM_TO_DEV;
342 dma_conf.dst_addr = hdev->bus_addr;
343 dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
344 dma_conf.dst_maxburst = IMG_HASH_DMA_BURST;
345 dma_conf.device_fc = false;
347 err = dmaengine_slave_config(hdev->dma_lch, &dma_conf);
349 dev_err(hdev->dev, "Couldn't configure DMA slave.\n");
350 dma_release_channel(hdev->dma_lch);
357 static void img_hash_dma_task(unsigned long d)
359 struct img_hash_dev *hdev = (struct img_hash_dev *)d;
360 struct img_hash_request_ctx *ctx;
362 size_t nbytes, bleft, wsend, len, tbc;
363 struct scatterlist tsg;
368 ctx = ahash_request_ctx(hdev->req);
372 addr = sg_virt(ctx->sg);
373 nbytes = ctx->sg->length - ctx->offset;
376 * The hash accelerator does not support a data valid mask. This means
377 * that if each dma (i.e. per page) is not a multiple of 4 bytes, the
378 * padding bytes in the last word written by that dma would erroneously
379 * be included in the hash. To avoid this we round down the transfer,
380 * and add the excess to the start of the next dma. It does not matter
381 * that the final dma may not be a multiple of 4 bytes as the hashing
382 * block is programmed to accept the correct number of bytes.
386 wsend = (nbytes / 4);
389 sg_init_one(&tsg, addr + ctx->offset, wsend * 4);
390 if (img_hash_xmit_dma(hdev, &tsg)) {
391 dev_err(hdev->dev, "DMA failed, falling back to CPU");
392 ctx->flags |= DRIVER_FLAGS_CPU;
394 img_hash_xmit_cpu(hdev, addr + ctx->offset,
396 ctx->sent += wsend * 4;
399 ctx->sent += wsend * 4;
404 ctx->bufcnt = sg_pcopy_to_buffer(ctx->sgfirst, ctx->nents,
405 ctx->buffer, bleft, ctx->sent);
407 ctx->sg = sg_next(ctx->sg);
408 while (ctx->sg && (ctx->bufcnt < 4)) {
409 len = ctx->sg->length;
410 if (likely(len > (4 - ctx->bufcnt)))
411 len = 4 - ctx->bufcnt;
412 tbc = sg_pcopy_to_buffer(ctx->sgfirst, ctx->nents,
413 ctx->buffer + ctx->bufcnt, len,
414 ctx->sent + ctx->bufcnt);
416 if (tbc >= ctx->sg->length) {
417 ctx->sg = sg_next(ctx->sg);
422 ctx->sent += ctx->bufcnt;
426 img_hash_dma_callback(hdev);
429 ctx->sg = sg_next(ctx->sg);
433 static int img_hash_write_via_dma_stop(struct img_hash_dev *hdev)
435 struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
437 if (ctx->flags & DRIVER_FLAGS_SG)
438 dma_unmap_sg(hdev->dev, ctx->sg, ctx->dma_ct, DMA_TO_DEVICE);
443 static int img_hash_process_data(struct img_hash_dev *hdev)
445 struct ahash_request *req = hdev->req;
446 struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
451 if (req->nbytes >= IMG_HASH_DMA_THRESHOLD) {
452 dev_dbg(hdev->dev, "process data request(%d bytes) using DMA\n",
454 err = img_hash_write_via_dma(hdev);
456 dev_dbg(hdev->dev, "process data request(%d bytes) using CPU\n",
458 err = img_hash_write_via_cpu(hdev);
463 static int img_hash_hw_init(struct img_hash_dev *hdev)
465 unsigned long long nbits;
468 img_hash_write(hdev, CR_RESET, CR_RESET_SET);
469 img_hash_write(hdev, CR_RESET, CR_RESET_UNSET);
470 img_hash_write(hdev, CR_INTENAB, CR_INT_NEW_RESULTS_SET);
472 nbits = (u64)hdev->req->nbytes << 3;
475 img_hash_write(hdev, CR_MESSAGE_LENGTH_H, u);
476 img_hash_write(hdev, CR_MESSAGE_LENGTH_L, l);
478 if (!(DRIVER_FLAGS_INIT & hdev->flags)) {
479 hdev->flags |= DRIVER_FLAGS_INIT;
482 dev_dbg(hdev->dev, "hw initialized, nbits: %llx\n", nbits);
486 static int img_hash_init(struct ahash_request *req)
488 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
489 struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
490 struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
492 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
493 rctx->fallback_req.base.flags = req->base.flags
494 & CRYPTO_TFM_REQ_MAY_SLEEP;
496 return crypto_ahash_init(&rctx->fallback_req);
499 static int img_hash_handle_queue(struct img_hash_dev *hdev,
500 struct ahash_request *req)
502 struct crypto_async_request *async_req, *backlog;
503 struct img_hash_request_ctx *ctx;
505 int err = 0, res = 0;
507 spin_lock_irqsave(&hdev->lock, flags);
510 res = ahash_enqueue_request(&hdev->queue, req);
512 if (DRIVER_FLAGS_BUSY & hdev->flags) {
513 spin_unlock_irqrestore(&hdev->lock, flags);
517 backlog = crypto_get_backlog(&hdev->queue);
518 async_req = crypto_dequeue_request(&hdev->queue);
520 hdev->flags |= DRIVER_FLAGS_BUSY;
522 spin_unlock_irqrestore(&hdev->lock, flags);
528 backlog->complete(backlog, -EINPROGRESS);
530 req = ahash_request_cast(async_req);
533 ctx = ahash_request_ctx(req);
535 dev_info(hdev->dev, "processing req, op: %lu, bytes: %d\n",
536 ctx->op, req->nbytes);
538 err = img_hash_hw_init(hdev);
541 err = img_hash_process_data(hdev);
543 if (err != -EINPROGRESS) {
544 /* done_task will not finish so do it here */
545 img_hash_finish_req(req, err);
550 static int img_hash_update(struct ahash_request *req)
552 struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
553 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
554 struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
556 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
557 rctx->fallback_req.base.flags = req->base.flags
558 & CRYPTO_TFM_REQ_MAY_SLEEP;
559 rctx->fallback_req.nbytes = req->nbytes;
560 rctx->fallback_req.src = req->src;
562 return crypto_ahash_update(&rctx->fallback_req);
565 static int img_hash_final(struct ahash_request *req)
567 struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
568 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
569 struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
571 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
572 rctx->fallback_req.base.flags = req->base.flags
573 & CRYPTO_TFM_REQ_MAY_SLEEP;
574 rctx->fallback_req.result = req->result;
576 return crypto_ahash_final(&rctx->fallback_req);
579 static int img_hash_finup(struct ahash_request *req)
581 struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
582 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
583 struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
585 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
586 rctx->fallback_req.base.flags = req->base.flags
587 & CRYPTO_TFM_REQ_MAY_SLEEP;
588 rctx->fallback_req.nbytes = req->nbytes;
589 rctx->fallback_req.src = req->src;
590 rctx->fallback_req.result = req->result;
592 return crypto_ahash_finup(&rctx->fallback_req);
595 static int img_hash_import(struct ahash_request *req, const void *in)
597 struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
598 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
599 struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
601 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
602 rctx->fallback_req.base.flags = req->base.flags
603 & CRYPTO_TFM_REQ_MAY_SLEEP;
605 return crypto_ahash_import(&rctx->fallback_req, in);
608 static int img_hash_export(struct ahash_request *req, void *out)
610 struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
611 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
612 struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
614 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
615 rctx->fallback_req.base.flags = req->base.flags
616 & CRYPTO_TFM_REQ_MAY_SLEEP;
618 return crypto_ahash_export(&rctx->fallback_req, out);
621 static int img_hash_digest(struct ahash_request *req)
623 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
624 struct img_hash_ctx *tctx = crypto_ahash_ctx(tfm);
625 struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
626 struct img_hash_dev *hdev = NULL;
627 struct img_hash_dev *tmp;
630 spin_lock(&img_hash.lock);
632 list_for_each_entry(tmp, &img_hash.dev_list, list) {
642 spin_unlock(&img_hash.lock);
645 ctx->digsize = crypto_ahash_digestsize(tfm);
647 switch (ctx->digsize) {
648 case SHA1_DIGEST_SIZE:
649 ctx->flags |= DRIVER_FLAGS_SHA1;
651 case SHA256_DIGEST_SIZE:
652 ctx->flags |= DRIVER_FLAGS_SHA256;
654 case SHA224_DIGEST_SIZE:
655 ctx->flags |= DRIVER_FLAGS_SHA224;
657 case MD5_DIGEST_SIZE:
658 ctx->flags |= DRIVER_FLAGS_MD5;
667 ctx->total = req->nbytes;
669 ctx->sgfirst = req->src;
670 ctx->nents = sg_nents(ctx->sg);
672 err = img_hash_handle_queue(tctx->hdev, req);
677 static int img_hash_cra_init(struct crypto_tfm *tfm, const char *alg_name)
679 struct img_hash_ctx *ctx = crypto_tfm_ctx(tfm);
682 ctx->fallback = crypto_alloc_ahash(alg_name, 0,
683 CRYPTO_ALG_NEED_FALLBACK);
684 if (IS_ERR(ctx->fallback)) {
685 pr_err("img_hash: Could not load fallback driver.\n");
686 err = PTR_ERR(ctx->fallback);
689 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
690 sizeof(struct img_hash_request_ctx) +
691 crypto_ahash_reqsize(ctx->fallback) +
692 IMG_HASH_DMA_THRESHOLD);
700 static int img_hash_cra_md5_init(struct crypto_tfm *tfm)
702 return img_hash_cra_init(tfm, "md5-generic");
705 static int img_hash_cra_sha1_init(struct crypto_tfm *tfm)
707 return img_hash_cra_init(tfm, "sha1-generic");
710 static int img_hash_cra_sha224_init(struct crypto_tfm *tfm)
712 return img_hash_cra_init(tfm, "sha224-generic");
715 static int img_hash_cra_sha256_init(struct crypto_tfm *tfm)
717 return img_hash_cra_init(tfm, "sha256-generic");
720 static void img_hash_cra_exit(struct crypto_tfm *tfm)
722 struct img_hash_ctx *tctx = crypto_tfm_ctx(tfm);
724 crypto_free_ahash(tctx->fallback);
727 static irqreturn_t img_irq_handler(int irq, void *dev_id)
729 struct img_hash_dev *hdev = dev_id;
732 reg = img_hash_read(hdev, CR_INTSTAT);
733 img_hash_write(hdev, CR_INTCLEAR, reg);
735 if (reg & CR_INT_NEW_RESULTS_SET) {
736 dev_dbg(hdev->dev, "IRQ CR_INT_NEW_RESULTS_SET\n");
737 if (DRIVER_FLAGS_BUSY & hdev->flags) {
738 hdev->flags |= DRIVER_FLAGS_OUTPUT_READY;
739 if (!(DRIVER_FLAGS_CPU & hdev->flags))
740 hdev->flags |= DRIVER_FLAGS_DMA_READY;
741 tasklet_schedule(&hdev->done_task);
744 "HASH interrupt when no active requests.\n");
746 } else if (reg & CR_INT_RESULTS_AVAILABLE) {
748 "IRQ triggered before the hash had completed\n");
749 } else if (reg & CR_INT_RESULT_READ_ERR) {
751 "Attempt to read from an empty result queue\n");
752 } else if (reg & CR_INT_MESSAGE_WRITE_ERROR) {
754 "Data written before the hardware was configured\n");
759 static struct ahash_alg img_algs[] = {
761 .init = img_hash_init,
762 .update = img_hash_update,
763 .final = img_hash_final,
764 .finup = img_hash_finup,
765 .export = img_hash_export,
766 .import = img_hash_import,
767 .digest = img_hash_digest,
769 .digestsize = MD5_DIGEST_SIZE,
770 .statesize = sizeof(struct md5_state),
773 .cra_driver_name = "img-md5",
777 CRYPTO_ALG_NEED_FALLBACK,
778 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
779 .cra_ctxsize = sizeof(struct img_hash_ctx),
780 .cra_init = img_hash_cra_md5_init,
781 .cra_exit = img_hash_cra_exit,
782 .cra_module = THIS_MODULE,
787 .init = img_hash_init,
788 .update = img_hash_update,
789 .final = img_hash_final,
790 .finup = img_hash_finup,
791 .export = img_hash_export,
792 .import = img_hash_import,
793 .digest = img_hash_digest,
795 .digestsize = SHA1_DIGEST_SIZE,
796 .statesize = sizeof(struct sha1_state),
799 .cra_driver_name = "img-sha1",
803 CRYPTO_ALG_NEED_FALLBACK,
804 .cra_blocksize = SHA1_BLOCK_SIZE,
805 .cra_ctxsize = sizeof(struct img_hash_ctx),
806 .cra_init = img_hash_cra_sha1_init,
807 .cra_exit = img_hash_cra_exit,
808 .cra_module = THIS_MODULE,
813 .init = img_hash_init,
814 .update = img_hash_update,
815 .final = img_hash_final,
816 .finup = img_hash_finup,
817 .export = img_hash_export,
818 .import = img_hash_import,
819 .digest = img_hash_digest,
821 .digestsize = SHA224_DIGEST_SIZE,
822 .statesize = sizeof(struct sha256_state),
824 .cra_name = "sha224",
825 .cra_driver_name = "img-sha224",
829 CRYPTO_ALG_NEED_FALLBACK,
830 .cra_blocksize = SHA224_BLOCK_SIZE,
831 .cra_ctxsize = sizeof(struct img_hash_ctx),
832 .cra_init = img_hash_cra_sha224_init,
833 .cra_exit = img_hash_cra_exit,
834 .cra_module = THIS_MODULE,
839 .init = img_hash_init,
840 .update = img_hash_update,
841 .final = img_hash_final,
842 .finup = img_hash_finup,
843 .export = img_hash_export,
844 .import = img_hash_import,
845 .digest = img_hash_digest,
847 .digestsize = SHA256_DIGEST_SIZE,
848 .statesize = sizeof(struct sha256_state),
850 .cra_name = "sha256",
851 .cra_driver_name = "img-sha256",
855 CRYPTO_ALG_NEED_FALLBACK,
856 .cra_blocksize = SHA256_BLOCK_SIZE,
857 .cra_ctxsize = sizeof(struct img_hash_ctx),
858 .cra_init = img_hash_cra_sha256_init,
859 .cra_exit = img_hash_cra_exit,
860 .cra_module = THIS_MODULE,
866 static int img_register_algs(struct img_hash_dev *hdev)
870 for (i = 0; i < ARRAY_SIZE(img_algs); i++) {
871 err = crypto_register_ahash(&img_algs[i]);
879 crypto_unregister_ahash(&img_algs[i]);
884 static int img_unregister_algs(struct img_hash_dev *hdev)
888 for (i = 0; i < ARRAY_SIZE(img_algs); i++)
889 crypto_unregister_ahash(&img_algs[i]);
893 static void img_hash_done_task(unsigned long data)
895 struct img_hash_dev *hdev = (struct img_hash_dev *)data;
898 if (hdev->err == -EINVAL) {
903 if (!(DRIVER_FLAGS_BUSY & hdev->flags)) {
904 img_hash_handle_queue(hdev, NULL);
908 if (DRIVER_FLAGS_CPU & hdev->flags) {
909 if (DRIVER_FLAGS_OUTPUT_READY & hdev->flags) {
910 hdev->flags &= ~DRIVER_FLAGS_OUTPUT_READY;
913 } else if (DRIVER_FLAGS_DMA_READY & hdev->flags) {
914 if (DRIVER_FLAGS_DMA_ACTIVE & hdev->flags) {
915 hdev->flags &= ~DRIVER_FLAGS_DMA_ACTIVE;
916 img_hash_write_via_dma_stop(hdev);
922 if (DRIVER_FLAGS_OUTPUT_READY & hdev->flags) {
923 hdev->flags &= ~(DRIVER_FLAGS_DMA_READY |
924 DRIVER_FLAGS_OUTPUT_READY);
931 img_hash_finish_req(hdev->req, err);
934 static const struct of_device_id img_hash_match[] = {
935 { .compatible = "img,hash-accelerator" },
938 MODULE_DEVICE_TABLE(of, img_hash_match);
940 static int img_hash_probe(struct platform_device *pdev)
942 struct img_hash_dev *hdev;
943 struct device *dev = &pdev->dev;
944 struct resource *hash_res;
948 hdev = devm_kzalloc(dev, sizeof(*hdev), GFP_KERNEL);
952 spin_lock_init(&hdev->lock);
956 platform_set_drvdata(pdev, hdev);
958 INIT_LIST_HEAD(&hdev->list);
960 tasklet_init(&hdev->done_task, img_hash_done_task, (unsigned long)hdev);
961 tasklet_init(&hdev->dma_task, img_hash_dma_task, (unsigned long)hdev);
963 crypto_init_queue(&hdev->queue, IMG_HASH_QUEUE_LENGTH);
966 hdev->io_base = devm_platform_ioremap_resource(pdev, 0);
967 if (IS_ERR(hdev->io_base)) {
968 err = PTR_ERR(hdev->io_base);
969 dev_err(dev, "can't ioremap, returned %d\n", err);
974 /* Write port (DMA or CPU) */
975 hash_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
976 hdev->cpu_addr = devm_ioremap_resource(dev, hash_res);
977 if (IS_ERR(hdev->cpu_addr)) {
978 dev_err(dev, "can't ioremap write port\n");
979 err = PTR_ERR(hdev->cpu_addr);
982 hdev->bus_addr = hash_res->start;
984 irq = platform_get_irq(pdev, 0);
990 err = devm_request_irq(dev, irq, img_irq_handler, 0,
991 dev_name(dev), hdev);
993 dev_err(dev, "unable to request irq\n");
996 dev_dbg(dev, "using IRQ channel %d\n", irq);
998 hdev->hash_clk = devm_clk_get(&pdev->dev, "hash");
999 if (IS_ERR(hdev->hash_clk)) {
1000 dev_err(dev, "clock initialization failed.\n");
1001 err = PTR_ERR(hdev->hash_clk);
1005 hdev->sys_clk = devm_clk_get(&pdev->dev, "sys");
1006 if (IS_ERR(hdev->sys_clk)) {
1007 dev_err(dev, "clock initialization failed.\n");
1008 err = PTR_ERR(hdev->sys_clk);
1012 err = clk_prepare_enable(hdev->hash_clk);
1016 err = clk_prepare_enable(hdev->sys_clk);
1020 err = img_hash_dma_init(hdev);
1024 dev_dbg(dev, "using %s for DMA transfers\n",
1025 dma_chan_name(hdev->dma_lch));
1027 spin_lock(&img_hash.lock);
1028 list_add_tail(&hdev->list, &img_hash.dev_list);
1029 spin_unlock(&img_hash.lock);
1031 err = img_register_algs(hdev);
1034 dev_info(dev, "Img MD5/SHA1/SHA224/SHA256 Hardware accelerator initialized\n");
1039 spin_lock(&img_hash.lock);
1040 list_del(&hdev->list);
1041 spin_unlock(&img_hash.lock);
1042 dma_release_channel(hdev->dma_lch);
1044 clk_disable_unprepare(hdev->sys_clk);
1046 clk_disable_unprepare(hdev->hash_clk);
1048 tasklet_kill(&hdev->done_task);
1049 tasklet_kill(&hdev->dma_task);
1054 static int img_hash_remove(struct platform_device *pdev)
1056 struct img_hash_dev *hdev;
1058 hdev = platform_get_drvdata(pdev);
1059 spin_lock(&img_hash.lock);
1060 list_del(&hdev->list);
1061 spin_unlock(&img_hash.lock);
1063 img_unregister_algs(hdev);
1065 tasklet_kill(&hdev->done_task);
1066 tasklet_kill(&hdev->dma_task);
1068 dma_release_channel(hdev->dma_lch);
1070 clk_disable_unprepare(hdev->hash_clk);
1071 clk_disable_unprepare(hdev->sys_clk);
1076 #ifdef CONFIG_PM_SLEEP
1077 static int img_hash_suspend(struct device *dev)
1079 struct img_hash_dev *hdev = dev_get_drvdata(dev);
1081 clk_disable_unprepare(hdev->hash_clk);
1082 clk_disable_unprepare(hdev->sys_clk);
1087 static int img_hash_resume(struct device *dev)
1089 struct img_hash_dev *hdev = dev_get_drvdata(dev);
1092 ret = clk_prepare_enable(hdev->hash_clk);
1096 ret = clk_prepare_enable(hdev->sys_clk);
1098 clk_disable_unprepare(hdev->hash_clk);
1104 #endif /* CONFIG_PM_SLEEP */
1106 static const struct dev_pm_ops img_hash_pm_ops = {
1107 SET_SYSTEM_SLEEP_PM_OPS(img_hash_suspend, img_hash_resume)
1110 static struct platform_driver img_hash_driver = {
1111 .probe = img_hash_probe,
1112 .remove = img_hash_remove,
1114 .name = "img-hash-accelerator",
1115 .pm = &img_hash_pm_ops,
1116 .of_match_table = of_match_ptr(img_hash_match),
1119 module_platform_driver(img_hash_driver);
1121 MODULE_LICENSE("GPL v2");
1122 MODULE_DESCRIPTION("Imgtec SHA1/224/256 & MD5 hw accelerator driver");
1123 MODULE_AUTHOR("Will Thomas.");
1124 MODULE_AUTHOR("James Hartley <james.hartley@imgtec.com>");