4 * Support for SAHARA cryptographic accelerator.
6 * Copyright (c) 2014 Steffen Trumtrar <s.trumtrar@pengutronix.de>
7 * Copyright (c) 2013 Vista Silicon S.L.
8 * Author: Javier Martin <javier.martin@vista-silicon.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as published
12 * by the Free Software Foundation.
14 * Based on omap-aes.c and tegra-aes.c
17 #include <crypto/aes.h>
18 #include <crypto/internal/hash.h>
19 #include <crypto/internal/skcipher.h>
20 #include <crypto/scatterwalk.h>
21 #include <crypto/sha.h>
23 #include <linux/clk.h>
24 #include <linux/crypto.h>
25 #include <linux/interrupt.h>
27 #include <linux/irq.h>
28 #include <linux/kernel.h>
29 #include <linux/kthread.h>
30 #include <linux/module.h>
31 #include <linux/mutex.h>
33 #include <linux/of_device.h>
34 #include <linux/platform_device.h>
36 #define SHA_BUFFER_LEN PAGE_SIZE
37 #define SAHARA_MAX_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
39 #define SAHARA_NAME "sahara"
40 #define SAHARA_VERSION_3 3
41 #define SAHARA_VERSION_4 4
42 #define SAHARA_TIMEOUT_MS 1000
43 #define SAHARA_MAX_HW_DESC 2
44 #define SAHARA_MAX_HW_LINK 20
46 #define FLAGS_MODE_MASK 0x000f
47 #define FLAGS_ENCRYPT BIT(0)
48 #define FLAGS_CBC BIT(1)
50 #define SAHARA_HDR_BASE 0x00800000
51 #define SAHARA_HDR_SKHA_ALG_AES 0
52 #define SAHARA_HDR_SKHA_OP_ENC (1 << 2)
53 #define SAHARA_HDR_SKHA_MODE_ECB (0 << 3)
54 #define SAHARA_HDR_SKHA_MODE_CBC (1 << 3)
55 #define SAHARA_HDR_FORM_DATA (5 << 16)
56 #define SAHARA_HDR_FORM_KEY (8 << 16)
57 #define SAHARA_HDR_LLO (1 << 24)
58 #define SAHARA_HDR_CHA_SKHA (1 << 28)
59 #define SAHARA_HDR_CHA_MDHA (2 << 28)
60 #define SAHARA_HDR_PARITY_BIT (1 << 31)
62 #define SAHARA_HDR_MDHA_SET_MODE_MD_KEY 0x20880000
63 #define SAHARA_HDR_MDHA_SET_MODE_HASH 0x208D0000
64 #define SAHARA_HDR_MDHA_HASH 0xA0850000
65 #define SAHARA_HDR_MDHA_STORE_DIGEST 0x20820000
66 #define SAHARA_HDR_MDHA_ALG_SHA1 0
67 #define SAHARA_HDR_MDHA_ALG_MD5 1
68 #define SAHARA_HDR_MDHA_ALG_SHA256 2
69 #define SAHARA_HDR_MDHA_ALG_SHA224 3
70 #define SAHARA_HDR_MDHA_PDATA (1 << 2)
71 #define SAHARA_HDR_MDHA_HMAC (1 << 3)
72 #define SAHARA_HDR_MDHA_INIT (1 << 5)
73 #define SAHARA_HDR_MDHA_IPAD (1 << 6)
74 #define SAHARA_HDR_MDHA_OPAD (1 << 7)
75 #define SAHARA_HDR_MDHA_SWAP (1 << 8)
76 #define SAHARA_HDR_MDHA_MAC_FULL (1 << 9)
77 #define SAHARA_HDR_MDHA_SSL (1 << 10)
79 /* SAHARA can only process one request at a time */
80 #define SAHARA_QUEUE_LENGTH 1
82 #define SAHARA_REG_VERSION 0x00
83 #define SAHARA_REG_DAR 0x04
84 #define SAHARA_REG_CONTROL 0x08
85 #define SAHARA_CONTROL_SET_THROTTLE(x) (((x) & 0xff) << 24)
86 #define SAHARA_CONTROL_SET_MAXBURST(x) (((x) & 0xff) << 16)
87 #define SAHARA_CONTROL_RNG_AUTORSD (1 << 7)
88 #define SAHARA_CONTROL_ENABLE_INT (1 << 4)
89 #define SAHARA_REG_CMD 0x0C
90 #define SAHARA_CMD_RESET (1 << 0)
91 #define SAHARA_CMD_CLEAR_INT (1 << 8)
92 #define SAHARA_CMD_CLEAR_ERR (1 << 9)
93 #define SAHARA_CMD_SINGLE_STEP (1 << 10)
94 #define SAHARA_CMD_MODE_BATCH (1 << 16)
95 #define SAHARA_CMD_MODE_DEBUG (1 << 18)
96 #define SAHARA_REG_STATUS 0x10
97 #define SAHARA_STATUS_GET_STATE(x) ((x) & 0x7)
98 #define SAHARA_STATE_IDLE 0
99 #define SAHARA_STATE_BUSY 1
100 #define SAHARA_STATE_ERR 2
101 #define SAHARA_STATE_FAULT 3
102 #define SAHARA_STATE_COMPLETE 4
103 #define SAHARA_STATE_COMP_FLAG (1 << 2)
104 #define SAHARA_STATUS_DAR_FULL (1 << 3)
105 #define SAHARA_STATUS_ERROR (1 << 4)
106 #define SAHARA_STATUS_SECURE (1 << 5)
107 #define SAHARA_STATUS_FAIL (1 << 6)
108 #define SAHARA_STATUS_INIT (1 << 7)
109 #define SAHARA_STATUS_RNG_RESEED (1 << 8)
110 #define SAHARA_STATUS_ACTIVE_RNG (1 << 9)
111 #define SAHARA_STATUS_ACTIVE_MDHA (1 << 10)
112 #define SAHARA_STATUS_ACTIVE_SKHA (1 << 11)
113 #define SAHARA_STATUS_MODE_BATCH (1 << 16)
114 #define SAHARA_STATUS_MODE_DEDICATED (1 << 17)
115 #define SAHARA_STATUS_MODE_DEBUG (1 << 18)
116 #define SAHARA_STATUS_GET_ISTATE(x) (((x) >> 24) & 0xff)
117 #define SAHARA_REG_ERRSTATUS 0x14
118 #define SAHARA_ERRSTATUS_GET_SOURCE(x) ((x) & 0xf)
119 #define SAHARA_ERRSOURCE_CHA 14
120 #define SAHARA_ERRSOURCE_DMA 15
121 #define SAHARA_ERRSTATUS_DMA_DIR (1 << 8)
122 #define SAHARA_ERRSTATUS_GET_DMASZ(x)(((x) >> 9) & 0x3)
123 #define SAHARA_ERRSTATUS_GET_DMASRC(x) (((x) >> 13) & 0x7)
124 #define SAHARA_ERRSTATUS_GET_CHASRC(x) (((x) >> 16) & 0xfff)
125 #define SAHARA_ERRSTATUS_GET_CHAERR(x) (((x) >> 28) & 0x3)
126 #define SAHARA_REG_FADDR 0x18
127 #define SAHARA_REG_CDAR 0x1C
128 #define SAHARA_REG_IDAR 0x20
130 struct sahara_hw_desc {
139 struct sahara_hw_link {
146 /* AES-specific context */
148 u8 key[AES_KEYSIZE_128];
149 struct crypto_skcipher *fallback;
152 struct sahara_aes_reqctx {
157 * struct sahara_sha_reqctx - private data per request
158 * @buf: holds data for requests smaller than block_size
159 * @rembuf: used to prepare one block_size-aligned request
160 * @context: hw-specific context for request. Digest is extracted from this
161 * @mode: specifies what type of hw-descriptor needs to be built
162 * @digest_size: length of digest for this request
163 * @context_size: length of hw-context for this request.
164 * Always digest_size + 4
165 * @buf_cnt: number of bytes saved in buf
166 * @sg_in_idx: number of hw links
167 * @in_sg: scatterlist for input data
168 * @in_sg_chain: scatterlists for chained input data
169 * @total: total number of bytes for transfer
170 * @last: is this the last block
171 * @first: is this the first block
172 * @active: inside a transfer
174 struct sahara_sha_reqctx {
175 u8 buf[SAHARA_MAX_SHA_BLOCK_SIZE];
176 u8 rembuf[SAHARA_MAX_SHA_BLOCK_SIZE];
177 u8 context[SHA256_DIGEST_SIZE + 4];
179 unsigned int digest_size;
180 unsigned int context_size;
181 unsigned int buf_cnt;
182 unsigned int sg_in_idx;
183 struct scatterlist *in_sg;
184 struct scatterlist in_sg_chain[2];
192 struct device *device;
193 unsigned int version;
194 void __iomem *regs_base;
197 struct mutex queue_mutex;
198 struct task_struct *kthread;
199 struct completion dma_completion;
201 struct sahara_ctx *ctx;
202 struct crypto_queue queue;
205 struct sahara_hw_desc *hw_desc[SAHARA_MAX_HW_DESC];
206 dma_addr_t hw_phys_desc[SAHARA_MAX_HW_DESC];
209 dma_addr_t key_phys_base;
212 dma_addr_t iv_phys_base;
215 dma_addr_t context_phys_base;
217 struct sahara_hw_link *hw_link[SAHARA_MAX_HW_LINK];
218 dma_addr_t hw_phys_link[SAHARA_MAX_HW_LINK];
221 struct scatterlist *in_sg;
223 struct scatterlist *out_sg;
229 static struct sahara_dev *dev_ptr;
231 static inline void sahara_write(struct sahara_dev *dev, u32 data, u32 reg)
233 writel(data, dev->regs_base + reg);
236 static inline unsigned int sahara_read(struct sahara_dev *dev, u32 reg)
238 return readl(dev->regs_base + reg);
241 static u32 sahara_aes_key_hdr(struct sahara_dev *dev)
243 u32 hdr = SAHARA_HDR_BASE | SAHARA_HDR_SKHA_ALG_AES |
244 SAHARA_HDR_FORM_KEY | SAHARA_HDR_LLO |
245 SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
247 if (dev->flags & FLAGS_CBC) {
248 hdr |= SAHARA_HDR_SKHA_MODE_CBC;
249 hdr ^= SAHARA_HDR_PARITY_BIT;
252 if (dev->flags & FLAGS_ENCRYPT) {
253 hdr |= SAHARA_HDR_SKHA_OP_ENC;
254 hdr ^= SAHARA_HDR_PARITY_BIT;
260 static u32 sahara_aes_data_link_hdr(struct sahara_dev *dev)
262 return SAHARA_HDR_BASE | SAHARA_HDR_FORM_DATA |
263 SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
266 static const char *sahara_err_src[16] = {
269 "Descriptor length error",
270 "Descriptor length or pointer error",
272 "Link pointer error",
273 "Input buffer error",
274 "Output buffer error",
275 "Output buffer starvation",
276 "Internal state fault",
277 "General descriptor problem",
279 "Descriptor address error",
280 "Link address error",
285 static const char *sahara_err_dmasize[4] = {
287 "Half-word transfer",
292 static const char *sahara_err_dmasrc[8] = {
295 "Internal IP bus error",
297 "DMA crosses 256 byte boundary",
303 static const char *sahara_cha_errsrc[12] = {
304 "Input buffer non-empty",
309 "Write during processing",
310 "CTX read during processing",
312 "Input buffer disabled/underflow",
313 "Output buffer disabled/overflow",
314 "DES key parity error",
318 static const char *sahara_cha_err[4] = { "No error", "SKHA", "MDHA", "RNG" };
320 static void sahara_decode_error(struct sahara_dev *dev, unsigned int error)
322 u8 source = SAHARA_ERRSTATUS_GET_SOURCE(error);
323 u16 chasrc = ffs(SAHARA_ERRSTATUS_GET_CHASRC(error));
325 dev_err(dev->device, "%s: Error Register = 0x%08x\n", __func__, error);
327 dev_err(dev->device, " - %s.\n", sahara_err_src[source]);
329 if (source == SAHARA_ERRSOURCE_DMA) {
330 if (error & SAHARA_ERRSTATUS_DMA_DIR)
331 dev_err(dev->device, " * DMA read.\n");
333 dev_err(dev->device, " * DMA write.\n");
335 dev_err(dev->device, " * %s.\n",
336 sahara_err_dmasize[SAHARA_ERRSTATUS_GET_DMASZ(error)]);
337 dev_err(dev->device, " * %s.\n",
338 sahara_err_dmasrc[SAHARA_ERRSTATUS_GET_DMASRC(error)]);
339 } else if (source == SAHARA_ERRSOURCE_CHA) {
340 dev_err(dev->device, " * %s.\n",
341 sahara_cha_errsrc[chasrc]);
342 dev_err(dev->device, " * %s.\n",
343 sahara_cha_err[SAHARA_ERRSTATUS_GET_CHAERR(error)]);
345 dev_err(dev->device, "\n");
348 static const char *sahara_state[4] = { "Idle", "Busy", "Error", "HW Fault" };
350 static void sahara_decode_status(struct sahara_dev *dev, unsigned int status)
354 if (!IS_ENABLED(DEBUG))
357 state = SAHARA_STATUS_GET_STATE(status);
359 dev_dbg(dev->device, "%s: Status Register = 0x%08x\n",
362 dev_dbg(dev->device, " - State = %d:\n", state);
363 if (state & SAHARA_STATE_COMP_FLAG)
364 dev_dbg(dev->device, " * Descriptor completed. IRQ pending.\n");
366 dev_dbg(dev->device, " * %s.\n",
367 sahara_state[state & ~SAHARA_STATE_COMP_FLAG]);
369 if (status & SAHARA_STATUS_DAR_FULL)
370 dev_dbg(dev->device, " - DAR Full.\n");
371 if (status & SAHARA_STATUS_ERROR)
372 dev_dbg(dev->device, " - Error.\n");
373 if (status & SAHARA_STATUS_SECURE)
374 dev_dbg(dev->device, " - Secure.\n");
375 if (status & SAHARA_STATUS_FAIL)
376 dev_dbg(dev->device, " - Fail.\n");
377 if (status & SAHARA_STATUS_RNG_RESEED)
378 dev_dbg(dev->device, " - RNG Reseed Request.\n");
379 if (status & SAHARA_STATUS_ACTIVE_RNG)
380 dev_dbg(dev->device, " - RNG Active.\n");
381 if (status & SAHARA_STATUS_ACTIVE_MDHA)
382 dev_dbg(dev->device, " - MDHA Active.\n");
383 if (status & SAHARA_STATUS_ACTIVE_SKHA)
384 dev_dbg(dev->device, " - SKHA Active.\n");
386 if (status & SAHARA_STATUS_MODE_BATCH)
387 dev_dbg(dev->device, " - Batch Mode.\n");
388 else if (status & SAHARA_STATUS_MODE_DEDICATED)
389 dev_dbg(dev->device, " - Dedicated Mode.\n");
390 else if (status & SAHARA_STATUS_MODE_DEBUG)
391 dev_dbg(dev->device, " - Debug Mode.\n");
393 dev_dbg(dev->device, " - Internal state = 0x%02x\n",
394 SAHARA_STATUS_GET_ISTATE(status));
396 dev_dbg(dev->device, "Current DAR: 0x%08x\n",
397 sahara_read(dev, SAHARA_REG_CDAR));
398 dev_dbg(dev->device, "Initial DAR: 0x%08x\n\n",
399 sahara_read(dev, SAHARA_REG_IDAR));
402 static void sahara_dump_descriptors(struct sahara_dev *dev)
406 if (!IS_ENABLED(DEBUG))
409 for (i = 0; i < SAHARA_MAX_HW_DESC; i++) {
410 dev_dbg(dev->device, "Descriptor (%d) (%pad):\n",
411 i, &dev->hw_phys_desc[i]);
412 dev_dbg(dev->device, "\thdr = 0x%08x\n", dev->hw_desc[i]->hdr);
413 dev_dbg(dev->device, "\tlen1 = %u\n", dev->hw_desc[i]->len1);
414 dev_dbg(dev->device, "\tp1 = 0x%08x\n", dev->hw_desc[i]->p1);
415 dev_dbg(dev->device, "\tlen2 = %u\n", dev->hw_desc[i]->len2);
416 dev_dbg(dev->device, "\tp2 = 0x%08x\n", dev->hw_desc[i]->p2);
417 dev_dbg(dev->device, "\tnext = 0x%08x\n",
418 dev->hw_desc[i]->next);
420 dev_dbg(dev->device, "\n");
423 static void sahara_dump_links(struct sahara_dev *dev)
427 if (!IS_ENABLED(DEBUG))
430 for (i = 0; i < SAHARA_MAX_HW_LINK; i++) {
431 dev_dbg(dev->device, "Link (%d) (%pad):\n",
432 i, &dev->hw_phys_link[i]);
433 dev_dbg(dev->device, "\tlen = %u\n", dev->hw_link[i]->len);
434 dev_dbg(dev->device, "\tp = 0x%08x\n", dev->hw_link[i]->p);
435 dev_dbg(dev->device, "\tnext = 0x%08x\n",
436 dev->hw_link[i]->next);
438 dev_dbg(dev->device, "\n");
441 static int sahara_hw_descriptor_create(struct sahara_dev *dev)
443 struct sahara_ctx *ctx = dev->ctx;
444 struct scatterlist *sg;
450 memcpy(dev->key_base, ctx->key, ctx->keylen);
452 if (dev->flags & FLAGS_CBC) {
453 dev->hw_desc[idx]->len1 = AES_BLOCK_SIZE;
454 dev->hw_desc[idx]->p1 = dev->iv_phys_base;
456 dev->hw_desc[idx]->len1 = 0;
457 dev->hw_desc[idx]->p1 = 0;
459 dev->hw_desc[idx]->len2 = ctx->keylen;
460 dev->hw_desc[idx]->p2 = dev->key_phys_base;
461 dev->hw_desc[idx]->next = dev->hw_phys_desc[1];
462 dev->hw_desc[idx]->hdr = sahara_aes_key_hdr(dev);
467 dev->nb_in_sg = sg_nents_for_len(dev->in_sg, dev->total);
468 if (dev->nb_in_sg < 0) {
469 dev_err(dev->device, "Invalid numbers of src SG.\n");
470 return dev->nb_in_sg;
472 dev->nb_out_sg = sg_nents_for_len(dev->out_sg, dev->total);
473 if (dev->nb_out_sg < 0) {
474 dev_err(dev->device, "Invalid numbers of dst SG.\n");
475 return dev->nb_out_sg;
477 if ((dev->nb_in_sg + dev->nb_out_sg) > SAHARA_MAX_HW_LINK) {
478 dev_err(dev->device, "not enough hw links (%d)\n",
479 dev->nb_in_sg + dev->nb_out_sg);
483 ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg,
485 if (ret != dev->nb_in_sg) {
486 dev_err(dev->device, "couldn't map in sg\n");
490 ret = dma_map_sg(dev->device, dev->out_sg, dev->nb_out_sg,
492 if (ret != dev->nb_out_sg) {
493 dev_err(dev->device, "couldn't map out sg\n");
497 /* Create input links */
498 dev->hw_desc[idx]->p1 = dev->hw_phys_link[0];
501 for (i = 0; i < dev->nb_in_sg; i++) {
502 dev->hw_link[i]->len = min(len, sg->length);
503 dev->hw_link[i]->p = sg->dma_address;
504 if (i == (dev->nb_in_sg - 1)) {
505 dev->hw_link[i]->next = 0;
507 len -= min(len, sg->length);
508 dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
513 /* Create output links */
514 dev->hw_desc[idx]->p2 = dev->hw_phys_link[i];
517 for (j = i; j < dev->nb_out_sg + i; j++) {
518 dev->hw_link[j]->len = min(len, sg->length);
519 dev->hw_link[j]->p = sg->dma_address;
520 if (j == (dev->nb_out_sg + i - 1)) {
521 dev->hw_link[j]->next = 0;
523 len -= min(len, sg->length);
524 dev->hw_link[j]->next = dev->hw_phys_link[j + 1];
529 /* Fill remaining fields of hw_desc[1] */
530 dev->hw_desc[idx]->hdr = sahara_aes_data_link_hdr(dev);
531 dev->hw_desc[idx]->len1 = dev->total;
532 dev->hw_desc[idx]->len2 = dev->total;
533 dev->hw_desc[idx]->next = 0;
535 sahara_dump_descriptors(dev);
536 sahara_dump_links(dev);
538 sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
543 dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
549 static int sahara_aes_process(struct ablkcipher_request *req)
551 struct sahara_dev *dev = dev_ptr;
552 struct sahara_ctx *ctx;
553 struct sahara_aes_reqctx *rctx;
555 unsigned long timeout;
557 /* Request is ready to be dispatched by the device */
559 "dispatch request (nbytes=%d, src=%p, dst=%p)\n",
560 req->nbytes, req->src, req->dst);
562 /* assign new request to device */
563 dev->total = req->nbytes;
564 dev->in_sg = req->src;
565 dev->out_sg = req->dst;
567 rctx = ablkcipher_request_ctx(req);
568 ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
569 rctx->mode &= FLAGS_MODE_MASK;
570 dev->flags = (dev->flags & ~FLAGS_MODE_MASK) | rctx->mode;
572 if ((dev->flags & FLAGS_CBC) && req->info)
573 memcpy(dev->iv_base, req->info, AES_KEYSIZE_128);
575 /* assign new context to device */
578 reinit_completion(&dev->dma_completion);
580 ret = sahara_hw_descriptor_create(dev);
584 timeout = wait_for_completion_timeout(&dev->dma_completion,
585 msecs_to_jiffies(SAHARA_TIMEOUT_MS));
587 dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
589 dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
593 dev_err(dev->device, "AES timeout\n");
600 static int sahara_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
603 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(tfm);
606 ctx->keylen = keylen;
608 /* SAHARA only supports 128bit keys */
609 if (keylen == AES_KEYSIZE_128) {
610 memcpy(ctx->key, key, keylen);
614 if (keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_256)
618 * The requested key size is not supported by HW, do a fallback.
620 crypto_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK);
621 crypto_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags &
622 CRYPTO_TFM_REQ_MASK);
624 ret = crypto_skcipher_setkey(ctx->fallback, key, keylen);
626 tfm->base.crt_flags &= ~CRYPTO_TFM_RES_MASK;
627 tfm->base.crt_flags |= crypto_skcipher_get_flags(ctx->fallback) &
632 static int sahara_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
634 struct sahara_aes_reqctx *rctx = ablkcipher_request_ctx(req);
635 struct sahara_dev *dev = dev_ptr;
638 dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n",
639 req->nbytes, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC));
641 if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
643 "request size is not exact amount of AES blocks\n");
649 mutex_lock(&dev->queue_mutex);
650 err = ablkcipher_enqueue_request(&dev->queue, req);
651 mutex_unlock(&dev->queue_mutex);
653 wake_up_process(dev->kthread);
658 static int sahara_aes_ecb_encrypt(struct ablkcipher_request *req)
660 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
661 crypto_ablkcipher_reqtfm(req));
664 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
665 SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
667 skcipher_request_set_tfm(subreq, ctx->fallback);
668 skcipher_request_set_callback(subreq, req->base.flags,
670 skcipher_request_set_crypt(subreq, req->src, req->dst,
671 req->nbytes, req->info);
672 err = crypto_skcipher_encrypt(subreq);
673 skcipher_request_zero(subreq);
677 return sahara_aes_crypt(req, FLAGS_ENCRYPT);
680 static int sahara_aes_ecb_decrypt(struct ablkcipher_request *req)
682 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
683 crypto_ablkcipher_reqtfm(req));
686 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
687 SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
689 skcipher_request_set_tfm(subreq, ctx->fallback);
690 skcipher_request_set_callback(subreq, req->base.flags,
692 skcipher_request_set_crypt(subreq, req->src, req->dst,
693 req->nbytes, req->info);
694 err = crypto_skcipher_decrypt(subreq);
695 skcipher_request_zero(subreq);
699 return sahara_aes_crypt(req, 0);
702 static int sahara_aes_cbc_encrypt(struct ablkcipher_request *req)
704 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
705 crypto_ablkcipher_reqtfm(req));
708 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
709 SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
711 skcipher_request_set_tfm(subreq, ctx->fallback);
712 skcipher_request_set_callback(subreq, req->base.flags,
714 skcipher_request_set_crypt(subreq, req->src, req->dst,
715 req->nbytes, req->info);
716 err = crypto_skcipher_encrypt(subreq);
717 skcipher_request_zero(subreq);
721 return sahara_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
724 static int sahara_aes_cbc_decrypt(struct ablkcipher_request *req)
726 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
727 crypto_ablkcipher_reqtfm(req));
730 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
731 SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
733 skcipher_request_set_tfm(subreq, ctx->fallback);
734 skcipher_request_set_callback(subreq, req->base.flags,
736 skcipher_request_set_crypt(subreq, req->src, req->dst,
737 req->nbytes, req->info);
738 err = crypto_skcipher_decrypt(subreq);
739 skcipher_request_zero(subreq);
743 return sahara_aes_crypt(req, FLAGS_CBC);
746 static int sahara_aes_cra_init(struct crypto_tfm *tfm)
748 const char *name = crypto_tfm_alg_name(tfm);
749 struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
751 ctx->fallback = crypto_alloc_skcipher(name, 0,
753 CRYPTO_ALG_NEED_FALLBACK);
754 if (IS_ERR(ctx->fallback)) {
755 pr_err("Error allocating fallback algo %s\n", name);
756 return PTR_ERR(ctx->fallback);
759 tfm->crt_ablkcipher.reqsize = sizeof(struct sahara_aes_reqctx);
764 static void sahara_aes_cra_exit(struct crypto_tfm *tfm)
766 struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
768 crypto_free_skcipher(ctx->fallback);
771 static u32 sahara_sha_init_hdr(struct sahara_dev *dev,
772 struct sahara_sha_reqctx *rctx)
779 hdr |= SAHARA_HDR_MDHA_SET_MODE_HASH;
780 hdr |= SAHARA_HDR_MDHA_INIT;
782 hdr |= SAHARA_HDR_MDHA_SET_MODE_MD_KEY;
786 hdr |= SAHARA_HDR_MDHA_PDATA;
788 if (hweight_long(hdr) % 2 == 0)
789 hdr |= SAHARA_HDR_PARITY_BIT;
794 static int sahara_sha_hw_links_create(struct sahara_dev *dev,
795 struct sahara_sha_reqctx *rctx,
798 struct scatterlist *sg;
803 dev->in_sg = rctx->in_sg;
805 dev->nb_in_sg = sg_nents_for_len(dev->in_sg, rctx->total);
806 if (dev->nb_in_sg < 0) {
807 dev_err(dev->device, "Invalid numbers of src SG.\n");
808 return dev->nb_in_sg;
810 if ((dev->nb_in_sg) > SAHARA_MAX_HW_LINK) {
811 dev_err(dev->device, "not enough hw links (%d)\n",
812 dev->nb_in_sg + dev->nb_out_sg);
817 ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg, DMA_TO_DEVICE);
822 for (i = start; i < dev->nb_in_sg + start; i++) {
823 dev->hw_link[i]->len = min(len, sg->length);
824 dev->hw_link[i]->p = sg->dma_address;
825 if (i == (dev->nb_in_sg + start - 1)) {
826 dev->hw_link[i]->next = 0;
828 len -= min(len, sg->length);
829 dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
837 static int sahara_sha_hw_data_descriptor_create(struct sahara_dev *dev,
838 struct sahara_sha_reqctx *rctx,
839 struct ahash_request *req,
846 /* Create initial descriptor: #8*/
847 dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
849 /* Create hash descriptor: #10. Must follow #6. */
850 dev->hw_desc[index]->hdr = SAHARA_HDR_MDHA_HASH;
852 dev->hw_desc[index]->len1 = rctx->total;
853 if (dev->hw_desc[index]->len1 == 0) {
854 /* if len1 is 0, p1 must be 0, too */
855 dev->hw_desc[index]->p1 = 0;
858 /* Create input links */
859 dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
860 i = sahara_sha_hw_links_create(dev, rctx, index);
862 rctx->sg_in_idx = index;
867 dev->hw_desc[index]->p2 = dev->hw_phys_link[i];
869 /* Save the context for the next operation */
870 result_len = rctx->context_size;
871 dev->hw_link[i]->p = dev->context_phys_base;
873 dev->hw_link[i]->len = result_len;
874 dev->hw_desc[index]->len2 = result_len;
876 dev->hw_link[i]->next = 0;
882 * Load descriptor aka #6
884 * To load a previously saved context back to the MDHA unit
890 static int sahara_sha_hw_context_descriptor_create(struct sahara_dev *dev,
891 struct sahara_sha_reqctx *rctx,
892 struct ahash_request *req,
895 dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
897 dev->hw_desc[index]->len1 = rctx->context_size;
898 dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
899 dev->hw_desc[index]->len2 = 0;
900 dev->hw_desc[index]->p2 = 0;
902 dev->hw_link[index]->len = rctx->context_size;
903 dev->hw_link[index]->p = dev->context_phys_base;
904 dev->hw_link[index]->next = 0;
909 static int sahara_sha_prepare_request(struct ahash_request *req)
911 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
912 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
913 unsigned int hash_later;
914 unsigned int block_size;
917 block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
919 /* append bytes from previous operation */
920 len = rctx->buf_cnt + req->nbytes;
922 /* only the last transfer can be padded in hardware */
923 if (!rctx->last && (len < block_size)) {
924 /* to few data, save for next operation */
925 scatterwalk_map_and_copy(rctx->buf + rctx->buf_cnt, req->src,
927 rctx->buf_cnt += req->nbytes;
932 /* add data from previous operation first */
934 memcpy(rctx->rembuf, rctx->buf, rctx->buf_cnt);
936 /* data must always be a multiple of block_size */
937 hash_later = rctx->last ? 0 : len & (block_size - 1);
939 unsigned int offset = req->nbytes - hash_later;
940 /* Save remaining bytes for later use */
941 scatterwalk_map_and_copy(rctx->buf, req->src, offset,
945 rctx->total = len - hash_later;
946 /* have data from previous operation and current */
947 if (rctx->buf_cnt && req->nbytes) {
948 sg_init_table(rctx->in_sg_chain, 2);
949 sg_set_buf(rctx->in_sg_chain, rctx->rembuf, rctx->buf_cnt);
950 sg_chain(rctx->in_sg_chain, 2, req->src);
951 rctx->in_sg = rctx->in_sg_chain;
952 /* only data from previous operation */
953 } else if (rctx->buf_cnt) {
954 rctx->in_sg = rctx->in_sg_chain;
955 sg_init_one(rctx->in_sg, rctx->rembuf, rctx->buf_cnt);
956 /* no data from previous operation */
958 rctx->in_sg = req->src;
961 /* on next call, we only have the remaining data in the buffer */
962 rctx->buf_cnt = hash_later;
967 static int sahara_sha_process(struct ahash_request *req)
969 struct sahara_dev *dev = dev_ptr;
970 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
972 unsigned long timeout;
974 ret = sahara_sha_prepare_request(req);
979 ret = sahara_sha_hw_data_descriptor_create(dev, rctx, req, 0);
983 dev->hw_desc[0]->next = 0;
986 memcpy(dev->context_base, rctx->context, rctx->context_size);
988 sahara_sha_hw_context_descriptor_create(dev, rctx, req, 0);
989 dev->hw_desc[0]->next = dev->hw_phys_desc[1];
990 ret = sahara_sha_hw_data_descriptor_create(dev, rctx, req, 1);
994 dev->hw_desc[1]->next = 0;
997 sahara_dump_descriptors(dev);
998 sahara_dump_links(dev);
1000 reinit_completion(&dev->dma_completion);
1002 sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
1004 timeout = wait_for_completion_timeout(&dev->dma_completion,
1005 msecs_to_jiffies(SAHARA_TIMEOUT_MS));
1007 if (rctx->sg_in_idx)
1008 dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
1012 dev_err(dev->device, "SHA timeout\n");
1016 memcpy(rctx->context, dev->context_base, rctx->context_size);
1018 if (req->result && rctx->last)
1019 memcpy(req->result, rctx->context, rctx->digest_size);
1024 static int sahara_queue_manage(void *data)
1026 struct sahara_dev *dev = (struct sahara_dev *)data;
1027 struct crypto_async_request *async_req;
1028 struct crypto_async_request *backlog;
1032 __set_current_state(TASK_INTERRUPTIBLE);
1034 mutex_lock(&dev->queue_mutex);
1035 backlog = crypto_get_backlog(&dev->queue);
1036 async_req = crypto_dequeue_request(&dev->queue);
1037 mutex_unlock(&dev->queue_mutex);
1040 backlog->complete(backlog, -EINPROGRESS);
1043 if (crypto_tfm_alg_type(async_req->tfm) ==
1044 CRYPTO_ALG_TYPE_AHASH) {
1045 struct ahash_request *req =
1046 ahash_request_cast(async_req);
1048 ret = sahara_sha_process(req);
1050 struct ablkcipher_request *req =
1051 ablkcipher_request_cast(async_req);
1053 ret = sahara_aes_process(req);
1056 async_req->complete(async_req, ret);
1062 } while (!kthread_should_stop());
1067 static int sahara_sha_enqueue(struct ahash_request *req, int last)
1069 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1070 struct sahara_dev *dev = dev_ptr;
1073 if (!req->nbytes && !last)
1078 if (!rctx->active) {
1083 mutex_lock(&dev->queue_mutex);
1084 ret = crypto_enqueue_request(&dev->queue, &req->base);
1085 mutex_unlock(&dev->queue_mutex);
1087 wake_up_process(dev->kthread);
1092 static int sahara_sha_init(struct ahash_request *req)
1094 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1095 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1097 memset(rctx, 0, sizeof(*rctx));
1099 switch (crypto_ahash_digestsize(tfm)) {
1100 case SHA1_DIGEST_SIZE:
1101 rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA1;
1102 rctx->digest_size = SHA1_DIGEST_SIZE;
1104 case SHA256_DIGEST_SIZE:
1105 rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA256;
1106 rctx->digest_size = SHA256_DIGEST_SIZE;
1112 rctx->context_size = rctx->digest_size + 4;
1118 static int sahara_sha_update(struct ahash_request *req)
1120 return sahara_sha_enqueue(req, 0);
1123 static int sahara_sha_final(struct ahash_request *req)
1126 return sahara_sha_enqueue(req, 1);
1129 static int sahara_sha_finup(struct ahash_request *req)
1131 return sahara_sha_enqueue(req, 1);
1134 static int sahara_sha_digest(struct ahash_request *req)
1136 sahara_sha_init(req);
1138 return sahara_sha_finup(req);
1141 static int sahara_sha_export(struct ahash_request *req, void *out)
1143 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1145 memcpy(out, rctx, sizeof(struct sahara_sha_reqctx));
1150 static int sahara_sha_import(struct ahash_request *req, const void *in)
1152 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1154 memcpy(rctx, in, sizeof(struct sahara_sha_reqctx));
1159 static int sahara_sha_cra_init(struct crypto_tfm *tfm)
1161 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1162 sizeof(struct sahara_sha_reqctx));
1167 static struct crypto_alg aes_algs[] = {
1169 .cra_name = "ecb(aes)",
1170 .cra_driver_name = "sahara-ecb-aes",
1171 .cra_priority = 300,
1172 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1173 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1174 .cra_blocksize = AES_BLOCK_SIZE,
1175 .cra_ctxsize = sizeof(struct sahara_ctx),
1176 .cra_alignmask = 0x0,
1177 .cra_type = &crypto_ablkcipher_type,
1178 .cra_module = THIS_MODULE,
1179 .cra_init = sahara_aes_cra_init,
1180 .cra_exit = sahara_aes_cra_exit,
1181 .cra_u.ablkcipher = {
1182 .min_keysize = AES_MIN_KEY_SIZE ,
1183 .max_keysize = AES_MAX_KEY_SIZE,
1184 .setkey = sahara_aes_setkey,
1185 .encrypt = sahara_aes_ecb_encrypt,
1186 .decrypt = sahara_aes_ecb_decrypt,
1189 .cra_name = "cbc(aes)",
1190 .cra_driver_name = "sahara-cbc-aes",
1191 .cra_priority = 300,
1192 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1193 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1194 .cra_blocksize = AES_BLOCK_SIZE,
1195 .cra_ctxsize = sizeof(struct sahara_ctx),
1196 .cra_alignmask = 0x0,
1197 .cra_type = &crypto_ablkcipher_type,
1198 .cra_module = THIS_MODULE,
1199 .cra_init = sahara_aes_cra_init,
1200 .cra_exit = sahara_aes_cra_exit,
1201 .cra_u.ablkcipher = {
1202 .min_keysize = AES_MIN_KEY_SIZE ,
1203 .max_keysize = AES_MAX_KEY_SIZE,
1204 .ivsize = AES_BLOCK_SIZE,
1205 .setkey = sahara_aes_setkey,
1206 .encrypt = sahara_aes_cbc_encrypt,
1207 .decrypt = sahara_aes_cbc_decrypt,
1212 static struct ahash_alg sha_v3_algs[] = {
1214 .init = sahara_sha_init,
1215 .update = sahara_sha_update,
1216 .final = sahara_sha_final,
1217 .finup = sahara_sha_finup,
1218 .digest = sahara_sha_digest,
1219 .export = sahara_sha_export,
1220 .import = sahara_sha_import,
1221 .halg.digestsize = SHA1_DIGEST_SIZE,
1222 .halg.statesize = sizeof(struct sahara_sha_reqctx),
1225 .cra_driver_name = "sahara-sha1",
1226 .cra_priority = 300,
1227 .cra_flags = CRYPTO_ALG_ASYNC |
1228 CRYPTO_ALG_NEED_FALLBACK,
1229 .cra_blocksize = SHA1_BLOCK_SIZE,
1230 .cra_ctxsize = sizeof(struct sahara_ctx),
1232 .cra_module = THIS_MODULE,
1233 .cra_init = sahara_sha_cra_init,
1238 static struct ahash_alg sha_v4_algs[] = {
1240 .init = sahara_sha_init,
1241 .update = sahara_sha_update,
1242 .final = sahara_sha_final,
1243 .finup = sahara_sha_finup,
1244 .digest = sahara_sha_digest,
1245 .export = sahara_sha_export,
1246 .import = sahara_sha_import,
1247 .halg.digestsize = SHA256_DIGEST_SIZE,
1248 .halg.statesize = sizeof(struct sahara_sha_reqctx),
1250 .cra_name = "sha256",
1251 .cra_driver_name = "sahara-sha256",
1252 .cra_priority = 300,
1253 .cra_flags = CRYPTO_ALG_ASYNC |
1254 CRYPTO_ALG_NEED_FALLBACK,
1255 .cra_blocksize = SHA256_BLOCK_SIZE,
1256 .cra_ctxsize = sizeof(struct sahara_ctx),
1258 .cra_module = THIS_MODULE,
1259 .cra_init = sahara_sha_cra_init,
1264 static irqreturn_t sahara_irq_handler(int irq, void *data)
1266 struct sahara_dev *dev = (struct sahara_dev *)data;
1267 unsigned int stat = sahara_read(dev, SAHARA_REG_STATUS);
1268 unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS);
1270 sahara_write(dev, SAHARA_CMD_CLEAR_INT | SAHARA_CMD_CLEAR_ERR,
1273 sahara_decode_status(dev, stat);
1275 if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_BUSY) {
1277 } else if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_COMPLETE) {
1280 sahara_decode_error(dev, err);
1281 dev->error = -EINVAL;
1284 complete(&dev->dma_completion);
1290 static int sahara_register_algs(struct sahara_dev *dev)
1293 unsigned int i, j, k, l;
1295 for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
1296 INIT_LIST_HEAD(&aes_algs[i].cra_list);
1297 err = crypto_register_alg(&aes_algs[i]);
1302 for (k = 0; k < ARRAY_SIZE(sha_v3_algs); k++) {
1303 err = crypto_register_ahash(&sha_v3_algs[k]);
1305 goto err_sha_v3_algs;
1308 if (dev->version > SAHARA_VERSION_3)
1309 for (l = 0; l < ARRAY_SIZE(sha_v4_algs); l++) {
1310 err = crypto_register_ahash(&sha_v4_algs[l]);
1312 goto err_sha_v4_algs;
1318 for (j = 0; j < l; j++)
1319 crypto_unregister_ahash(&sha_v4_algs[j]);
1322 for (j = 0; j < k; j++)
1323 crypto_unregister_ahash(&sha_v3_algs[j]);
1326 for (j = 0; j < i; j++)
1327 crypto_unregister_alg(&aes_algs[j]);
1332 static void sahara_unregister_algs(struct sahara_dev *dev)
1336 for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
1337 crypto_unregister_alg(&aes_algs[i]);
1339 for (i = 0; i < ARRAY_SIZE(sha_v3_algs); i++)
1340 crypto_unregister_ahash(&sha_v3_algs[i]);
1342 if (dev->version > SAHARA_VERSION_3)
1343 for (i = 0; i < ARRAY_SIZE(sha_v4_algs); i++)
1344 crypto_unregister_ahash(&sha_v4_algs[i]);
1347 static const struct platform_device_id sahara_platform_ids[] = {
1348 { .name = "sahara-imx27" },
1351 MODULE_DEVICE_TABLE(platform, sahara_platform_ids);
1353 static const struct of_device_id sahara_dt_ids[] = {
1354 { .compatible = "fsl,imx53-sahara" },
1355 { .compatible = "fsl,imx27-sahara" },
1358 MODULE_DEVICE_TABLE(of, sahara_dt_ids);
1360 static int sahara_probe(struct platform_device *pdev)
1362 struct sahara_dev *dev;
1363 struct resource *res;
1369 dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
1373 dev->device = &pdev->dev;
1374 platform_set_drvdata(pdev, dev);
1376 /* Get the base address */
1377 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1378 dev->regs_base = devm_ioremap_resource(&pdev->dev, res);
1379 if (IS_ERR(dev->regs_base))
1380 return PTR_ERR(dev->regs_base);
1383 irq = platform_get_irq(pdev, 0);
1385 dev_err(&pdev->dev, "failed to get irq resource\n");
1389 err = devm_request_irq(&pdev->dev, irq, sahara_irq_handler,
1390 0, dev_name(&pdev->dev), dev);
1392 dev_err(&pdev->dev, "failed to request irq\n");
1397 dev->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
1398 if (IS_ERR(dev->clk_ipg)) {
1399 dev_err(&pdev->dev, "Could not get ipg clock\n");
1400 return PTR_ERR(dev->clk_ipg);
1403 dev->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
1404 if (IS_ERR(dev->clk_ahb)) {
1405 dev_err(&pdev->dev, "Could not get ahb clock\n");
1406 return PTR_ERR(dev->clk_ahb);
1409 /* Allocate HW descriptors */
1410 dev->hw_desc[0] = dmam_alloc_coherent(&pdev->dev,
1411 SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
1412 &dev->hw_phys_desc[0], GFP_KERNEL);
1413 if (!dev->hw_desc[0]) {
1414 dev_err(&pdev->dev, "Could not allocate hw descriptors\n");
1417 dev->hw_desc[1] = dev->hw_desc[0] + 1;
1418 dev->hw_phys_desc[1] = dev->hw_phys_desc[0] +
1419 sizeof(struct sahara_hw_desc);
1421 /* Allocate space for iv and key */
1422 dev->key_base = dmam_alloc_coherent(&pdev->dev, 2 * AES_KEYSIZE_128,
1423 &dev->key_phys_base, GFP_KERNEL);
1424 if (!dev->key_base) {
1425 dev_err(&pdev->dev, "Could not allocate memory for key\n");
1428 dev->iv_base = dev->key_base + AES_KEYSIZE_128;
1429 dev->iv_phys_base = dev->key_phys_base + AES_KEYSIZE_128;
1431 /* Allocate space for context: largest digest + message length field */
1432 dev->context_base = dmam_alloc_coherent(&pdev->dev,
1433 SHA256_DIGEST_SIZE + 4,
1434 &dev->context_phys_base, GFP_KERNEL);
1435 if (!dev->context_base) {
1436 dev_err(&pdev->dev, "Could not allocate memory for MDHA context\n");
1440 /* Allocate space for HW links */
1441 dev->hw_link[0] = dmam_alloc_coherent(&pdev->dev,
1442 SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
1443 &dev->hw_phys_link[0], GFP_KERNEL);
1444 if (!dev->hw_link[0]) {
1445 dev_err(&pdev->dev, "Could not allocate hw links\n");
1448 for (i = 1; i < SAHARA_MAX_HW_LINK; i++) {
1449 dev->hw_phys_link[i] = dev->hw_phys_link[i - 1] +
1450 sizeof(struct sahara_hw_link);
1451 dev->hw_link[i] = dev->hw_link[i - 1] + 1;
1454 crypto_init_queue(&dev->queue, SAHARA_QUEUE_LENGTH);
1456 mutex_init(&dev->queue_mutex);
1460 dev->kthread = kthread_run(sahara_queue_manage, dev, "sahara_crypto");
1461 if (IS_ERR(dev->kthread)) {
1462 return PTR_ERR(dev->kthread);
1465 init_completion(&dev->dma_completion);
1467 err = clk_prepare_enable(dev->clk_ipg);
1470 err = clk_prepare_enable(dev->clk_ahb);
1472 goto clk_ipg_disable;
1474 version = sahara_read(dev, SAHARA_REG_VERSION);
1475 if (of_device_is_compatible(pdev->dev.of_node, "fsl,imx27-sahara")) {
1476 if (version != SAHARA_VERSION_3)
1478 } else if (of_device_is_compatible(pdev->dev.of_node,
1479 "fsl,imx53-sahara")) {
1480 if (((version >> 8) & 0xff) != SAHARA_VERSION_4)
1482 version = (version >> 8) & 0xff;
1484 if (err == -ENODEV) {
1485 dev_err(&pdev->dev, "SAHARA version %d not supported\n",
1490 dev->version = version;
1492 sahara_write(dev, SAHARA_CMD_RESET | SAHARA_CMD_MODE_BATCH,
1494 sahara_write(dev, SAHARA_CONTROL_SET_THROTTLE(0) |
1495 SAHARA_CONTROL_SET_MAXBURST(8) |
1496 SAHARA_CONTROL_RNG_AUTORSD |
1497 SAHARA_CONTROL_ENABLE_INT,
1498 SAHARA_REG_CONTROL);
1500 err = sahara_register_algs(dev);
1504 dev_info(&pdev->dev, "SAHARA version %d initialized\n", version);
1509 kthread_stop(dev->kthread);
1511 clk_disable_unprepare(dev->clk_ahb);
1513 clk_disable_unprepare(dev->clk_ipg);
1518 static int sahara_remove(struct platform_device *pdev)
1520 struct sahara_dev *dev = platform_get_drvdata(pdev);
1522 kthread_stop(dev->kthread);
1524 sahara_unregister_algs(dev);
1526 clk_disable_unprepare(dev->clk_ipg);
1527 clk_disable_unprepare(dev->clk_ahb);
1534 static struct platform_driver sahara_driver = {
1535 .probe = sahara_probe,
1536 .remove = sahara_remove,
1538 .name = SAHARA_NAME,
1539 .of_match_table = sahara_dt_ids,
1541 .id_table = sahara_platform_ids,
1544 module_platform_driver(sahara_driver);
1546 MODULE_LICENSE("GPL");
1547 MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com>");
1548 MODULE_AUTHOR("Steffen Trumtrar <s.trumtrar@pengutronix.de>");
1549 MODULE_DESCRIPTION("SAHARA2 HW crypto accelerator");