1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2014 Imagination Technologies
4 * Authors: Will Thomas, James Hartley
6 * Interface structure taken from omap-sham driver
10 #include <linux/dmaengine.h>
11 #include <linux/interrupt.h>
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/of_device.h>
16 #include <linux/platform_device.h>
17 #include <linux/scatterlist.h>
19 #include <crypto/internal/hash.h>
20 #include <crypto/md5.h>
21 #include <crypto/sha.h>
24 #define CR_RESET_SET 1
25 #define CR_RESET_UNSET 0
27 #define CR_MESSAGE_LENGTH_H 0x4
28 #define CR_MESSAGE_LENGTH_L 0x8
30 #define CR_CONTROL 0xc
31 #define CR_CONTROL_BYTE_ORDER_3210 0
32 #define CR_CONTROL_BYTE_ORDER_0123 1
33 #define CR_CONTROL_BYTE_ORDER_2310 2
34 #define CR_CONTROL_BYTE_ORDER_1032 3
35 #define CR_CONTROL_BYTE_ORDER_SHIFT 8
36 #define CR_CONTROL_ALGO_MD5 0
37 #define CR_CONTROL_ALGO_SHA1 1
38 #define CR_CONTROL_ALGO_SHA224 2
39 #define CR_CONTROL_ALGO_SHA256 3
41 #define CR_INTSTAT 0x10
42 #define CR_INTENAB 0x14
43 #define CR_INTCLEAR 0x18
44 #define CR_INT_RESULTS_AVAILABLE BIT(0)
45 #define CR_INT_NEW_RESULTS_SET BIT(1)
46 #define CR_INT_RESULT_READ_ERR BIT(2)
47 #define CR_INT_MESSAGE_WRITE_ERROR BIT(3)
48 #define CR_INT_STATUS BIT(8)
50 #define CR_RESULT_QUEUE 0x1c
52 #define CR_CORE_REV 0x50
53 #define CR_CORE_DES1 0x60
54 #define CR_CORE_DES2 0x70
56 #define DRIVER_FLAGS_BUSY BIT(0)
57 #define DRIVER_FLAGS_FINAL BIT(1)
58 #define DRIVER_FLAGS_DMA_ACTIVE BIT(2)
59 #define DRIVER_FLAGS_OUTPUT_READY BIT(3)
60 #define DRIVER_FLAGS_INIT BIT(4)
61 #define DRIVER_FLAGS_CPU BIT(5)
62 #define DRIVER_FLAGS_DMA_READY BIT(6)
63 #define DRIVER_FLAGS_ERROR BIT(7)
64 #define DRIVER_FLAGS_SG BIT(8)
65 #define DRIVER_FLAGS_SHA1 BIT(18)
66 #define DRIVER_FLAGS_SHA224 BIT(19)
67 #define DRIVER_FLAGS_SHA256 BIT(20)
68 #define DRIVER_FLAGS_MD5 BIT(21)
70 #define IMG_HASH_QUEUE_LENGTH 20
71 #define IMG_HASH_DMA_BURST 4
72 #define IMG_HASH_DMA_THRESHOLD 64
74 #ifdef __LITTLE_ENDIAN
75 #define IMG_HASH_BYTE_ORDER CR_CONTROL_BYTE_ORDER_3210
77 #define IMG_HASH_BYTE_ORDER CR_CONTROL_BYTE_ORDER_0123
82 struct img_hash_request_ctx {
83 struct img_hash_dev *hdev;
84 u8 digest[SHA256_DIGEST_SIZE] __aligned(sizeof(u32));
92 struct scatterlist *sgfirst;
94 struct scatterlist *sg;
103 struct ahash_request fallback_req;
105 /* Zero length buffer must remain last member of struct */
106 u8 buffer[0] __aligned(sizeof(u32));
109 struct img_hash_ctx {
110 struct img_hash_dev *hdev;
112 struct crypto_ahash *fallback;
115 struct img_hash_dev {
116 struct list_head list;
118 struct clk *hash_clk;
120 void __iomem *io_base;
122 phys_addr_t bus_addr;
123 void __iomem *cpu_addr;
127 struct tasklet_struct done_task;
128 struct tasklet_struct dma_task;
131 struct crypto_queue queue;
132 struct ahash_request *req;
134 struct dma_chan *dma_lch;
137 struct img_hash_drv {
138 struct list_head dev_list;
142 static struct img_hash_drv img_hash = {
143 .dev_list = LIST_HEAD_INIT(img_hash.dev_list),
144 .lock = __SPIN_LOCK_UNLOCKED(img_hash.lock),
147 static inline u32 img_hash_read(struct img_hash_dev *hdev, u32 offset)
149 return readl_relaxed(hdev->io_base + offset);
152 static inline void img_hash_write(struct img_hash_dev *hdev,
153 u32 offset, u32 value)
155 writel_relaxed(value, hdev->io_base + offset);
158 static inline u32 img_hash_read_result_queue(struct img_hash_dev *hdev)
160 return be32_to_cpu(img_hash_read(hdev, CR_RESULT_QUEUE));
163 static void img_hash_start(struct img_hash_dev *hdev, bool dma)
165 struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
166 u32 cr = IMG_HASH_BYTE_ORDER << CR_CONTROL_BYTE_ORDER_SHIFT;
168 if (ctx->flags & DRIVER_FLAGS_MD5)
169 cr |= CR_CONTROL_ALGO_MD5;
170 else if (ctx->flags & DRIVER_FLAGS_SHA1)
171 cr |= CR_CONTROL_ALGO_SHA1;
172 else if (ctx->flags & DRIVER_FLAGS_SHA224)
173 cr |= CR_CONTROL_ALGO_SHA224;
174 else if (ctx->flags & DRIVER_FLAGS_SHA256)
175 cr |= CR_CONTROL_ALGO_SHA256;
176 dev_dbg(hdev->dev, "Starting hash process\n");
177 img_hash_write(hdev, CR_CONTROL, cr);
180 * The hardware block requires two cycles between writing the control
181 * register and writing the first word of data in non DMA mode, to
182 * ensure the first data write is not grouped in burst with the control
183 * register write a read is issued to 'flush' the bus.
186 img_hash_read(hdev, CR_CONTROL);
189 static int img_hash_xmit_cpu(struct img_hash_dev *hdev, const u8 *buf,
190 size_t length, int final)
193 const u32 *buffer = (const u32 *)buf;
195 dev_dbg(hdev->dev, "xmit_cpu: length: %zu bytes\n", length);
198 hdev->flags |= DRIVER_FLAGS_FINAL;
200 len32 = DIV_ROUND_UP(length, sizeof(u32));
202 for (count = 0; count < len32; count++)
203 writel_relaxed(buffer[count], hdev->cpu_addr);
208 static void img_hash_dma_callback(void *data)
210 struct img_hash_dev *hdev = (struct img_hash_dev *)data;
211 struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
214 img_hash_xmit_cpu(hdev, ctx->buffer, ctx->bufcnt, 0);
218 tasklet_schedule(&hdev->dma_task);
221 static int img_hash_xmit_dma(struct img_hash_dev *hdev, struct scatterlist *sg)
223 struct dma_async_tx_descriptor *desc;
224 struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
226 ctx->dma_ct = dma_map_sg(hdev->dev, sg, 1, DMA_TO_DEVICE);
227 if (ctx->dma_ct == 0) {
228 dev_err(hdev->dev, "Invalid DMA sg\n");
233 desc = dmaengine_prep_slave_sg(hdev->dma_lch,
237 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
239 dev_err(hdev->dev, "Null DMA descriptor\n");
241 dma_unmap_sg(hdev->dev, sg, 1, DMA_TO_DEVICE);
244 desc->callback = img_hash_dma_callback;
245 desc->callback_param = hdev;
246 dmaengine_submit(desc);
247 dma_async_issue_pending(hdev->dma_lch);
252 static int img_hash_write_via_cpu(struct img_hash_dev *hdev)
254 struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
256 ctx->bufcnt = sg_copy_to_buffer(hdev->req->src, sg_nents(ctx->sg),
257 ctx->buffer, hdev->req->nbytes);
259 ctx->total = hdev->req->nbytes;
262 hdev->flags |= (DRIVER_FLAGS_CPU | DRIVER_FLAGS_FINAL);
264 img_hash_start(hdev, false);
266 return img_hash_xmit_cpu(hdev, ctx->buffer, ctx->total, 1);
269 static int img_hash_finish(struct ahash_request *req)
271 struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
276 memcpy(req->result, ctx->digest, ctx->digsize);
281 static void img_hash_copy_hash(struct ahash_request *req)
283 struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
284 u32 *hash = (u32 *)ctx->digest;
287 for (i = (ctx->digsize / sizeof(u32)) - 1; i >= 0; i--)
288 hash[i] = img_hash_read_result_queue(ctx->hdev);
291 static void img_hash_finish_req(struct ahash_request *req, int err)
293 struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
294 struct img_hash_dev *hdev = ctx->hdev;
297 img_hash_copy_hash(req);
298 if (DRIVER_FLAGS_FINAL & hdev->flags)
299 err = img_hash_finish(req);
301 dev_warn(hdev->dev, "Hash failed with error %d\n", err);
302 ctx->flags |= DRIVER_FLAGS_ERROR;
305 hdev->flags &= ~(DRIVER_FLAGS_DMA_READY | DRIVER_FLAGS_OUTPUT_READY |
306 DRIVER_FLAGS_CPU | DRIVER_FLAGS_BUSY | DRIVER_FLAGS_FINAL);
308 if (req->base.complete)
309 req->base.complete(&req->base, err);
312 static int img_hash_write_via_dma(struct img_hash_dev *hdev)
314 struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
316 img_hash_start(hdev, true);
318 dev_dbg(hdev->dev, "xmit dma size: %d\n", ctx->total);
321 hdev->flags |= DRIVER_FLAGS_FINAL;
323 hdev->flags |= DRIVER_FLAGS_DMA_ACTIVE | DRIVER_FLAGS_FINAL;
325 tasklet_schedule(&hdev->dma_task);
330 static int img_hash_dma_init(struct img_hash_dev *hdev)
332 struct dma_slave_config dma_conf;
335 hdev->dma_lch = dma_request_slave_channel(hdev->dev, "tx");
336 if (!hdev->dma_lch) {
337 dev_err(hdev->dev, "Couldn't acquire a slave DMA channel.\n");
340 dma_conf.direction = DMA_MEM_TO_DEV;
341 dma_conf.dst_addr = hdev->bus_addr;
342 dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
343 dma_conf.dst_maxburst = IMG_HASH_DMA_BURST;
344 dma_conf.device_fc = false;
346 err = dmaengine_slave_config(hdev->dma_lch, &dma_conf);
348 dev_err(hdev->dev, "Couldn't configure DMA slave.\n");
349 dma_release_channel(hdev->dma_lch);
356 static void img_hash_dma_task(unsigned long d)
358 struct img_hash_dev *hdev = (struct img_hash_dev *)d;
359 struct img_hash_request_ctx *ctx;
361 size_t nbytes, bleft, wsend, len, tbc;
362 struct scatterlist tsg;
367 ctx = ahash_request_ctx(hdev->req);
371 addr = sg_virt(ctx->sg);
372 nbytes = ctx->sg->length - ctx->offset;
375 * The hash accelerator does not support a data valid mask. This means
376 * that if each dma (i.e. per page) is not a multiple of 4 bytes, the
377 * padding bytes in the last word written by that dma would erroneously
378 * be included in the hash. To avoid this we round down the transfer,
379 * and add the excess to the start of the next dma. It does not matter
380 * that the final dma may not be a multiple of 4 bytes as the hashing
381 * block is programmed to accept the correct number of bytes.
385 wsend = (nbytes / 4);
388 sg_init_one(&tsg, addr + ctx->offset, wsend * 4);
389 if (img_hash_xmit_dma(hdev, &tsg)) {
390 dev_err(hdev->dev, "DMA failed, falling back to CPU");
391 ctx->flags |= DRIVER_FLAGS_CPU;
393 img_hash_xmit_cpu(hdev, addr + ctx->offset,
395 ctx->sent += wsend * 4;
398 ctx->sent += wsend * 4;
403 ctx->bufcnt = sg_pcopy_to_buffer(ctx->sgfirst, ctx->nents,
404 ctx->buffer, bleft, ctx->sent);
406 ctx->sg = sg_next(ctx->sg);
407 while (ctx->sg && (ctx->bufcnt < 4)) {
408 len = ctx->sg->length;
409 if (likely(len > (4 - ctx->bufcnt)))
410 len = 4 - ctx->bufcnt;
411 tbc = sg_pcopy_to_buffer(ctx->sgfirst, ctx->nents,
412 ctx->buffer + ctx->bufcnt, len,
413 ctx->sent + ctx->bufcnt);
415 if (tbc >= ctx->sg->length) {
416 ctx->sg = sg_next(ctx->sg);
421 ctx->sent += ctx->bufcnt;
425 img_hash_dma_callback(hdev);
428 ctx->sg = sg_next(ctx->sg);
432 static int img_hash_write_via_dma_stop(struct img_hash_dev *hdev)
434 struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
436 if (ctx->flags & DRIVER_FLAGS_SG)
437 dma_unmap_sg(hdev->dev, ctx->sg, ctx->dma_ct, DMA_TO_DEVICE);
442 static int img_hash_process_data(struct img_hash_dev *hdev)
444 struct ahash_request *req = hdev->req;
445 struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
450 if (req->nbytes >= IMG_HASH_DMA_THRESHOLD) {
451 dev_dbg(hdev->dev, "process data request(%d bytes) using DMA\n",
453 err = img_hash_write_via_dma(hdev);
455 dev_dbg(hdev->dev, "process data request(%d bytes) using CPU\n",
457 err = img_hash_write_via_cpu(hdev);
462 static int img_hash_hw_init(struct img_hash_dev *hdev)
464 unsigned long long nbits;
467 img_hash_write(hdev, CR_RESET, CR_RESET_SET);
468 img_hash_write(hdev, CR_RESET, CR_RESET_UNSET);
469 img_hash_write(hdev, CR_INTENAB, CR_INT_NEW_RESULTS_SET);
471 nbits = (u64)hdev->req->nbytes << 3;
474 img_hash_write(hdev, CR_MESSAGE_LENGTH_H, u);
475 img_hash_write(hdev, CR_MESSAGE_LENGTH_L, l);
477 if (!(DRIVER_FLAGS_INIT & hdev->flags)) {
478 hdev->flags |= DRIVER_FLAGS_INIT;
481 dev_dbg(hdev->dev, "hw initialized, nbits: %llx\n", nbits);
485 static int img_hash_init(struct ahash_request *req)
487 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
488 struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
489 struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
491 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
492 rctx->fallback_req.base.flags = req->base.flags
493 & CRYPTO_TFM_REQ_MAY_SLEEP;
495 return crypto_ahash_init(&rctx->fallback_req);
498 static int img_hash_handle_queue(struct img_hash_dev *hdev,
499 struct ahash_request *req)
501 struct crypto_async_request *async_req, *backlog;
502 struct img_hash_request_ctx *ctx;
504 int err = 0, res = 0;
506 spin_lock_irqsave(&hdev->lock, flags);
509 res = ahash_enqueue_request(&hdev->queue, req);
511 if (DRIVER_FLAGS_BUSY & hdev->flags) {
512 spin_unlock_irqrestore(&hdev->lock, flags);
516 backlog = crypto_get_backlog(&hdev->queue);
517 async_req = crypto_dequeue_request(&hdev->queue);
519 hdev->flags |= DRIVER_FLAGS_BUSY;
521 spin_unlock_irqrestore(&hdev->lock, flags);
527 backlog->complete(backlog, -EINPROGRESS);
529 req = ahash_request_cast(async_req);
532 ctx = ahash_request_ctx(req);
534 dev_info(hdev->dev, "processing req, op: %lu, bytes: %d\n",
535 ctx->op, req->nbytes);
537 err = img_hash_hw_init(hdev);
540 err = img_hash_process_data(hdev);
542 if (err != -EINPROGRESS) {
543 /* done_task will not finish so do it here */
544 img_hash_finish_req(req, err);
549 static int img_hash_update(struct ahash_request *req)
551 struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
552 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
553 struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
555 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
556 rctx->fallback_req.base.flags = req->base.flags
557 & CRYPTO_TFM_REQ_MAY_SLEEP;
558 rctx->fallback_req.nbytes = req->nbytes;
559 rctx->fallback_req.src = req->src;
561 return crypto_ahash_update(&rctx->fallback_req);
564 static int img_hash_final(struct ahash_request *req)
566 struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
567 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
568 struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
570 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
571 rctx->fallback_req.base.flags = req->base.flags
572 & CRYPTO_TFM_REQ_MAY_SLEEP;
573 rctx->fallback_req.result = req->result;
575 return crypto_ahash_final(&rctx->fallback_req);
578 static int img_hash_finup(struct ahash_request *req)
580 struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
581 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
582 struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
584 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
585 rctx->fallback_req.base.flags = req->base.flags
586 & CRYPTO_TFM_REQ_MAY_SLEEP;
587 rctx->fallback_req.nbytes = req->nbytes;
588 rctx->fallback_req.src = req->src;
589 rctx->fallback_req.result = req->result;
591 return crypto_ahash_finup(&rctx->fallback_req);
594 static int img_hash_import(struct ahash_request *req, const void *in)
596 struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
597 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
598 struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
600 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
601 rctx->fallback_req.base.flags = req->base.flags
602 & CRYPTO_TFM_REQ_MAY_SLEEP;
604 return crypto_ahash_import(&rctx->fallback_req, in);
607 static int img_hash_export(struct ahash_request *req, void *out)
609 struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
610 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
611 struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
613 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
614 rctx->fallback_req.base.flags = req->base.flags
615 & CRYPTO_TFM_REQ_MAY_SLEEP;
617 return crypto_ahash_export(&rctx->fallback_req, out);
620 static int img_hash_digest(struct ahash_request *req)
622 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
623 struct img_hash_ctx *tctx = crypto_ahash_ctx(tfm);
624 struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
625 struct img_hash_dev *hdev = NULL;
626 struct img_hash_dev *tmp;
629 spin_lock(&img_hash.lock);
631 list_for_each_entry(tmp, &img_hash.dev_list, list) {
641 spin_unlock(&img_hash.lock);
644 ctx->digsize = crypto_ahash_digestsize(tfm);
646 switch (ctx->digsize) {
647 case SHA1_DIGEST_SIZE:
648 ctx->flags |= DRIVER_FLAGS_SHA1;
650 case SHA256_DIGEST_SIZE:
651 ctx->flags |= DRIVER_FLAGS_SHA256;
653 case SHA224_DIGEST_SIZE:
654 ctx->flags |= DRIVER_FLAGS_SHA224;
656 case MD5_DIGEST_SIZE:
657 ctx->flags |= DRIVER_FLAGS_MD5;
666 ctx->total = req->nbytes;
668 ctx->sgfirst = req->src;
669 ctx->nents = sg_nents(ctx->sg);
671 err = img_hash_handle_queue(tctx->hdev, req);
676 static int img_hash_cra_init(struct crypto_tfm *tfm, const char *alg_name)
678 struct img_hash_ctx *ctx = crypto_tfm_ctx(tfm);
681 ctx->fallback = crypto_alloc_ahash(alg_name, 0,
682 CRYPTO_ALG_NEED_FALLBACK);
683 if (IS_ERR(ctx->fallback)) {
684 pr_err("img_hash: Could not load fallback driver.\n");
685 err = PTR_ERR(ctx->fallback);
688 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
689 sizeof(struct img_hash_request_ctx) +
690 crypto_ahash_reqsize(ctx->fallback) +
691 IMG_HASH_DMA_THRESHOLD);
699 static int img_hash_cra_md5_init(struct crypto_tfm *tfm)
701 return img_hash_cra_init(tfm, "md5-generic");
704 static int img_hash_cra_sha1_init(struct crypto_tfm *tfm)
706 return img_hash_cra_init(tfm, "sha1-generic");
709 static int img_hash_cra_sha224_init(struct crypto_tfm *tfm)
711 return img_hash_cra_init(tfm, "sha224-generic");
714 static int img_hash_cra_sha256_init(struct crypto_tfm *tfm)
716 return img_hash_cra_init(tfm, "sha256-generic");
719 static void img_hash_cra_exit(struct crypto_tfm *tfm)
721 struct img_hash_ctx *tctx = crypto_tfm_ctx(tfm);
723 crypto_free_ahash(tctx->fallback);
726 static irqreturn_t img_irq_handler(int irq, void *dev_id)
728 struct img_hash_dev *hdev = dev_id;
731 reg = img_hash_read(hdev, CR_INTSTAT);
732 img_hash_write(hdev, CR_INTCLEAR, reg);
734 if (reg & CR_INT_NEW_RESULTS_SET) {
735 dev_dbg(hdev->dev, "IRQ CR_INT_NEW_RESULTS_SET\n");
736 if (DRIVER_FLAGS_BUSY & hdev->flags) {
737 hdev->flags |= DRIVER_FLAGS_OUTPUT_READY;
738 if (!(DRIVER_FLAGS_CPU & hdev->flags))
739 hdev->flags |= DRIVER_FLAGS_DMA_READY;
740 tasklet_schedule(&hdev->done_task);
743 "HASH interrupt when no active requests.\n");
745 } else if (reg & CR_INT_RESULTS_AVAILABLE) {
747 "IRQ triggered before the hash had completed\n");
748 } else if (reg & CR_INT_RESULT_READ_ERR) {
750 "Attempt to read from an empty result queue\n");
751 } else if (reg & CR_INT_MESSAGE_WRITE_ERROR) {
753 "Data written before the hardware was configured\n");
758 static struct ahash_alg img_algs[] = {
760 .init = img_hash_init,
761 .update = img_hash_update,
762 .final = img_hash_final,
763 .finup = img_hash_finup,
764 .export = img_hash_export,
765 .import = img_hash_import,
766 .digest = img_hash_digest,
768 .digestsize = MD5_DIGEST_SIZE,
769 .statesize = sizeof(struct md5_state),
772 .cra_driver_name = "img-md5",
776 CRYPTO_ALG_NEED_FALLBACK,
777 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
778 .cra_ctxsize = sizeof(struct img_hash_ctx),
779 .cra_init = img_hash_cra_md5_init,
780 .cra_exit = img_hash_cra_exit,
781 .cra_module = THIS_MODULE,
786 .init = img_hash_init,
787 .update = img_hash_update,
788 .final = img_hash_final,
789 .finup = img_hash_finup,
790 .export = img_hash_export,
791 .import = img_hash_import,
792 .digest = img_hash_digest,
794 .digestsize = SHA1_DIGEST_SIZE,
795 .statesize = sizeof(struct sha1_state),
798 .cra_driver_name = "img-sha1",
802 CRYPTO_ALG_NEED_FALLBACK,
803 .cra_blocksize = SHA1_BLOCK_SIZE,
804 .cra_ctxsize = sizeof(struct img_hash_ctx),
805 .cra_init = img_hash_cra_sha1_init,
806 .cra_exit = img_hash_cra_exit,
807 .cra_module = THIS_MODULE,
812 .init = img_hash_init,
813 .update = img_hash_update,
814 .final = img_hash_final,
815 .finup = img_hash_finup,
816 .export = img_hash_export,
817 .import = img_hash_import,
818 .digest = img_hash_digest,
820 .digestsize = SHA224_DIGEST_SIZE,
821 .statesize = sizeof(struct sha256_state),
823 .cra_name = "sha224",
824 .cra_driver_name = "img-sha224",
828 CRYPTO_ALG_NEED_FALLBACK,
829 .cra_blocksize = SHA224_BLOCK_SIZE,
830 .cra_ctxsize = sizeof(struct img_hash_ctx),
831 .cra_init = img_hash_cra_sha224_init,
832 .cra_exit = img_hash_cra_exit,
833 .cra_module = THIS_MODULE,
838 .init = img_hash_init,
839 .update = img_hash_update,
840 .final = img_hash_final,
841 .finup = img_hash_finup,
842 .export = img_hash_export,
843 .import = img_hash_import,
844 .digest = img_hash_digest,
846 .digestsize = SHA256_DIGEST_SIZE,
847 .statesize = sizeof(struct sha256_state),
849 .cra_name = "sha256",
850 .cra_driver_name = "img-sha256",
854 CRYPTO_ALG_NEED_FALLBACK,
855 .cra_blocksize = SHA256_BLOCK_SIZE,
856 .cra_ctxsize = sizeof(struct img_hash_ctx),
857 .cra_init = img_hash_cra_sha256_init,
858 .cra_exit = img_hash_cra_exit,
859 .cra_module = THIS_MODULE,
865 static int img_register_algs(struct img_hash_dev *hdev)
869 for (i = 0; i < ARRAY_SIZE(img_algs); i++) {
870 err = crypto_register_ahash(&img_algs[i]);
878 crypto_unregister_ahash(&img_algs[i]);
883 static int img_unregister_algs(struct img_hash_dev *hdev)
887 for (i = 0; i < ARRAY_SIZE(img_algs); i++)
888 crypto_unregister_ahash(&img_algs[i]);
892 static void img_hash_done_task(unsigned long data)
894 struct img_hash_dev *hdev = (struct img_hash_dev *)data;
897 if (hdev->err == -EINVAL) {
902 if (!(DRIVER_FLAGS_BUSY & hdev->flags)) {
903 img_hash_handle_queue(hdev, NULL);
907 if (DRIVER_FLAGS_CPU & hdev->flags) {
908 if (DRIVER_FLAGS_OUTPUT_READY & hdev->flags) {
909 hdev->flags &= ~DRIVER_FLAGS_OUTPUT_READY;
912 } else if (DRIVER_FLAGS_DMA_READY & hdev->flags) {
913 if (DRIVER_FLAGS_DMA_ACTIVE & hdev->flags) {
914 hdev->flags &= ~DRIVER_FLAGS_DMA_ACTIVE;
915 img_hash_write_via_dma_stop(hdev);
921 if (DRIVER_FLAGS_OUTPUT_READY & hdev->flags) {
922 hdev->flags &= ~(DRIVER_FLAGS_DMA_READY |
923 DRIVER_FLAGS_OUTPUT_READY);
930 img_hash_finish_req(hdev->req, err);
933 static const struct of_device_id img_hash_match[] = {
934 { .compatible = "img,hash-accelerator" },
937 MODULE_DEVICE_TABLE(of, img_hash_match);
939 static int img_hash_probe(struct platform_device *pdev)
941 struct img_hash_dev *hdev;
942 struct device *dev = &pdev->dev;
943 struct resource *hash_res;
947 hdev = devm_kzalloc(dev, sizeof(*hdev), GFP_KERNEL);
951 spin_lock_init(&hdev->lock);
955 platform_set_drvdata(pdev, hdev);
957 INIT_LIST_HEAD(&hdev->list);
959 tasklet_init(&hdev->done_task, img_hash_done_task, (unsigned long)hdev);
960 tasklet_init(&hdev->dma_task, img_hash_dma_task, (unsigned long)hdev);
962 crypto_init_queue(&hdev->queue, IMG_HASH_QUEUE_LENGTH);
965 hdev->io_base = devm_platform_ioremap_resource(pdev, 0);
966 if (IS_ERR(hdev->io_base)) {
967 err = PTR_ERR(hdev->io_base);
968 dev_err(dev, "can't ioremap, returned %d\n", err);
973 /* Write port (DMA or CPU) */
974 hash_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
975 hdev->cpu_addr = devm_ioremap_resource(dev, hash_res);
976 if (IS_ERR(hdev->cpu_addr)) {
977 dev_err(dev, "can't ioremap write port\n");
978 err = PTR_ERR(hdev->cpu_addr);
981 hdev->bus_addr = hash_res->start;
983 irq = platform_get_irq(pdev, 0);
989 err = devm_request_irq(dev, irq, img_irq_handler, 0,
990 dev_name(dev), hdev);
992 dev_err(dev, "unable to request irq\n");
995 dev_dbg(dev, "using IRQ channel %d\n", irq);
997 hdev->hash_clk = devm_clk_get(&pdev->dev, "hash");
998 if (IS_ERR(hdev->hash_clk)) {
999 dev_err(dev, "clock initialization failed.\n");
1000 err = PTR_ERR(hdev->hash_clk);
1004 hdev->sys_clk = devm_clk_get(&pdev->dev, "sys");
1005 if (IS_ERR(hdev->sys_clk)) {
1006 dev_err(dev, "clock initialization failed.\n");
1007 err = PTR_ERR(hdev->sys_clk);
1011 err = clk_prepare_enable(hdev->hash_clk);
1015 err = clk_prepare_enable(hdev->sys_clk);
1019 err = img_hash_dma_init(hdev);
1023 dev_dbg(dev, "using %s for DMA transfers\n",
1024 dma_chan_name(hdev->dma_lch));
1026 spin_lock(&img_hash.lock);
1027 list_add_tail(&hdev->list, &img_hash.dev_list);
1028 spin_unlock(&img_hash.lock);
1030 err = img_register_algs(hdev);
1033 dev_info(dev, "Img MD5/SHA1/SHA224/SHA256 Hardware accelerator initialized\n");
1038 spin_lock(&img_hash.lock);
1039 list_del(&hdev->list);
1040 spin_unlock(&img_hash.lock);
1041 dma_release_channel(hdev->dma_lch);
1043 clk_disable_unprepare(hdev->sys_clk);
1045 clk_disable_unprepare(hdev->hash_clk);
1047 tasklet_kill(&hdev->done_task);
1048 tasklet_kill(&hdev->dma_task);
1053 static int img_hash_remove(struct platform_device *pdev)
1055 struct img_hash_dev *hdev;
1057 hdev = platform_get_drvdata(pdev);
1058 spin_lock(&img_hash.lock);
1059 list_del(&hdev->list);
1060 spin_unlock(&img_hash.lock);
1062 img_unregister_algs(hdev);
1064 tasklet_kill(&hdev->done_task);
1065 tasklet_kill(&hdev->dma_task);
1067 dma_release_channel(hdev->dma_lch);
1069 clk_disable_unprepare(hdev->hash_clk);
1070 clk_disable_unprepare(hdev->sys_clk);
1075 #ifdef CONFIG_PM_SLEEP
1076 static int img_hash_suspend(struct device *dev)
1078 struct img_hash_dev *hdev = dev_get_drvdata(dev);
1080 clk_disable_unprepare(hdev->hash_clk);
1081 clk_disable_unprepare(hdev->sys_clk);
1086 static int img_hash_resume(struct device *dev)
1088 struct img_hash_dev *hdev = dev_get_drvdata(dev);
1091 ret = clk_prepare_enable(hdev->hash_clk);
1095 ret = clk_prepare_enable(hdev->sys_clk);
1097 clk_disable_unprepare(hdev->hash_clk);
1103 #endif /* CONFIG_PM_SLEEP */
1105 static const struct dev_pm_ops img_hash_pm_ops = {
1106 SET_SYSTEM_SLEEP_PM_OPS(img_hash_suspend, img_hash_resume)
1109 static struct platform_driver img_hash_driver = {
1110 .probe = img_hash_probe,
1111 .remove = img_hash_remove,
1113 .name = "img-hash-accelerator",
1114 .pm = &img_hash_pm_ops,
1115 .of_match_table = of_match_ptr(img_hash_match),
1118 module_platform_driver(img_hash_driver);
1120 MODULE_LICENSE("GPL v2");
1121 MODULE_DESCRIPTION("Imgtec SHA1/224/256 & MD5 hw accelerator driver");
1122 MODULE_AUTHOR("Will Thomas.");
1123 MODULE_AUTHOR("James Hartley <james.hartley@imgtec.com>");