2 * caam - Freescale FSL CAAM support for ahash functions of crypto API
4 * Copyright 2011 Freescale Semiconductor, Inc.
6 * Based on caamalg.c crypto API driver.
8 * relationship of digest job descriptor or first job descriptor after init to
11 * --------------- ---------------
12 * | JobDesc #1 |-------------------->| ShareDesc |
13 * | *(packet 1) | | (hashKey) |
14 * --------------- | (operation) |
17 * relationship of subsequent job descriptors to shared descriptors:
19 * --------------- ---------------
20 * | JobDesc #2 |-------------------->| ShareDesc |
21 * | *(packet 2) | |------------->| (hashKey) |
22 * --------------- | |-------->| (operation) |
23 * . | | | (load ctx2) |
24 * . | | ---------------
26 * | JobDesc #3 |------| |
32 * | JobDesc #4 |------------
36 * The SharedDesc never changes for a connection unless rekeyed, but
37 * each packet will likely be in a different place. So all we need
38 * to know to process the packet is where the input is, where the
39 * output goes, and what context we want to process with. Context is
40 * in the SharedDesc, packet references in the JobDesc.
42 * So, a job desc looks like:
44 * ---------------------
46 * | ShareDesc Pointer |
53 * ---------------------
60 #include "desc_constr.h"
63 #include "sg_sw_sec4.h"
66 #define CAAM_CRA_PRIORITY 3000
68 /* max hash key is max split key size */
69 #define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
71 #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
72 #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
74 /* length of descriptors text */
75 #define DESC_AHASH_BASE (4 * CAAM_CMD_SZ)
76 #define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
77 #define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
78 #define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
79 #define DESC_AHASH_FINUP_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
80 #define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
82 #define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
83 CAAM_MAX_HASH_KEY_SIZE)
84 #define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
86 /* caam context sizes for hashes: running digest + 8 */
87 #define HASH_MSG_LEN 8
88 #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
91 /* for print_hex_dumps with line references */
92 #define debug(format, arg...) printk(format, arg)
94 #define debug(format, arg...)
98 static struct list_head hash_list;
100 /* ahash per-session context */
101 struct caam_hash_ctx {
102 u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
103 u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
104 u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
105 u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
106 u32 sh_desc_finup[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
107 dma_addr_t sh_desc_update_dma ____cacheline_aligned;
108 dma_addr_t sh_desc_update_first_dma;
109 dma_addr_t sh_desc_fin_dma;
110 dma_addr_t sh_desc_digest_dma;
111 dma_addr_t sh_desc_finup_dma;
112 struct device *jrdev;
115 u8 key[CAAM_MAX_HASH_KEY_SIZE];
118 unsigned int split_key_len;
119 unsigned int split_key_pad_len;
123 struct caam_hash_state {
126 u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
128 u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
130 u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
131 int (*update)(struct ahash_request *req);
132 int (*final)(struct ahash_request *req);
133 int (*finup)(struct ahash_request *req);
137 struct caam_export_state {
138 u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
139 u8 caam_ctx[MAX_CTX_LEN];
141 int (*update)(struct ahash_request *req);
142 int (*final)(struct ahash_request *req);
143 int (*finup)(struct ahash_request *req);
146 /* Common job descriptor seq in/out ptr routines */
148 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
149 static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
150 struct caam_hash_state *state,
153 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
154 ctx_len, DMA_FROM_DEVICE);
155 if (dma_mapping_error(jrdev, state->ctx_dma)) {
156 dev_err(jrdev, "unable to map ctx\n");
161 append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
166 /* Map req->result, and append seq_out_ptr command that points to it */
167 static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev,
168 u8 *result, int digestsize)
172 dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE);
173 append_seq_out_ptr(desc, dst_dma, digestsize, 0);
178 /* Map current buffer in state and put it in link table */
179 static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev,
180 struct sec4_sg_entry *sec4_sg,
185 buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
186 dma_to_sec4_sg_one(sec4_sg, buf_dma, buflen, 0);
192 * Only put buffer in link table if it contains data, which is possible,
193 * since a buffer has previously been used, and needs to be unmapped,
195 static inline dma_addr_t
196 try_buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg,
197 u8 *buf, dma_addr_t buf_dma, int buflen,
200 if (buf_dma && !dma_mapping_error(jrdev, buf_dma))
201 dma_unmap_single(jrdev, buf_dma, last_buflen, DMA_TO_DEVICE);
203 buf_dma = buf_map_to_sec4_sg(jrdev, sec4_sg, buf, buflen);
210 /* Map state->caam_ctx, and add it to link table */
211 static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
212 struct caam_hash_state *state, int ctx_len,
213 struct sec4_sg_entry *sec4_sg, u32 flag)
215 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
216 if (dma_mapping_error(jrdev, state->ctx_dma)) {
217 dev_err(jrdev, "unable to map ctx\n");
222 dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
227 /* Common shared descriptor commands */
228 static inline void append_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
230 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
231 ctx->split_key_len, CLASS_2 |
232 KEY_DEST_MDHA_SPLIT | KEY_ENC);
235 /* Append key if it has been set */
236 static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
240 init_sh_desc(desc, HDR_SHARE_SERIAL);
242 if (ctx->split_key_len) {
243 /* Skip if already shared */
244 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
247 append_key_ahash(desc, ctx);
249 set_jump_tgt_here(desc, key_jump_cmd);
252 /* Propagate errors from shared to job descriptor */
253 append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
257 * For ahash read data from seqin following state->caam_ctx,
258 * and write resulting class2 context to seqout, which may be state->caam_ctx
261 static inline void ahash_append_load_str(u32 *desc, int digestsize)
263 /* Calculate remaining bytes to read */
264 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
266 /* Read remaining bytes */
267 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
268 FIFOLD_TYPE_MSG | KEY_VLF);
270 /* Store class2 context bytes */
271 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
272 LDST_SRCDST_BYTE_CONTEXT);
276 * For ahash update, final and finup, import context, read and write to seqout
278 static inline void ahash_ctx_data_to_out(u32 *desc, u32 op, u32 state,
280 struct caam_hash_ctx *ctx)
282 init_sh_desc_key_ahash(desc, ctx);
284 /* Import context from software */
285 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
286 LDST_CLASS_2_CCB | ctx->ctx_len);
288 /* Class 2 operation */
289 append_operation(desc, op | state | OP_ALG_ENCRYPT);
292 * Load from buf and/or src and write to req->result or state->context
294 ahash_append_load_str(desc, digestsize);
297 /* For ahash firsts and digest, read and write to seqout */
298 static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state,
299 int digestsize, struct caam_hash_ctx *ctx)
301 init_sh_desc_key_ahash(desc, ctx);
303 /* Class 2 operation */
304 append_operation(desc, op | state | OP_ALG_ENCRYPT);
307 * Load from buf and/or src and write to req->result or state->context
309 ahash_append_load_str(desc, digestsize);
312 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
314 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
315 int digestsize = crypto_ahash_digestsize(ahash);
316 struct device *jrdev = ctx->jrdev;
320 if (ctx->split_key_len)
321 have_key = OP_ALG_AAI_HMAC_PRECOMP;
323 /* ahash_update shared descriptor */
324 desc = ctx->sh_desc_update;
326 init_sh_desc(desc, HDR_SHARE_SERIAL);
328 /* Import context from software */
329 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
330 LDST_CLASS_2_CCB | ctx->ctx_len);
332 /* Class 2 operation */
333 append_operation(desc, ctx->alg_type | OP_ALG_AS_UPDATE |
336 /* Load data and write to result or context */
337 ahash_append_load_str(desc, ctx->ctx_len);
339 ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
341 if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) {
342 dev_err(jrdev, "unable to map shared descriptor\n");
346 print_hex_dump(KERN_ERR,
347 "ahash update shdesc@"__stringify(__LINE__)": ",
348 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
351 /* ahash_update_first shared descriptor */
352 desc = ctx->sh_desc_update_first;
354 ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INIT,
357 ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc,
360 if (dma_mapping_error(jrdev, ctx->sh_desc_update_first_dma)) {
361 dev_err(jrdev, "unable to map shared descriptor\n");
365 print_hex_dump(KERN_ERR,
366 "ahash update first shdesc@"__stringify(__LINE__)": ",
367 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
370 /* ahash_final shared descriptor */
371 desc = ctx->sh_desc_fin;
373 ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
374 OP_ALG_AS_FINALIZE, digestsize, ctx);
376 ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
378 if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) {
379 dev_err(jrdev, "unable to map shared descriptor\n");
383 print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
384 DUMP_PREFIX_ADDRESS, 16, 4, desc,
385 desc_bytes(desc), 1);
388 /* ahash_finup shared descriptor */
389 desc = ctx->sh_desc_finup;
391 ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
392 OP_ALG_AS_FINALIZE, digestsize, ctx);
394 ctx->sh_desc_finup_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
396 if (dma_mapping_error(jrdev, ctx->sh_desc_finup_dma)) {
397 dev_err(jrdev, "unable to map shared descriptor\n");
401 print_hex_dump(KERN_ERR, "ahash finup shdesc@"__stringify(__LINE__)": ",
402 DUMP_PREFIX_ADDRESS, 16, 4, desc,
403 desc_bytes(desc), 1);
406 /* ahash_digest shared descriptor */
407 desc = ctx->sh_desc_digest;
409 ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INITFINAL,
412 ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc,
415 if (dma_mapping_error(jrdev, ctx->sh_desc_digest_dma)) {
416 dev_err(jrdev, "unable to map shared descriptor\n");
420 print_hex_dump(KERN_ERR,
421 "ahash digest shdesc@"__stringify(__LINE__)": ",
422 DUMP_PREFIX_ADDRESS, 16, 4, desc,
423 desc_bytes(desc), 1);
429 static int gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in,
432 return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
433 ctx->split_key_pad_len, key_in, keylen,
437 /* Digest hash size if it is too large */
438 static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
439 u32 *keylen, u8 *key_out, u32 digestsize)
441 struct device *jrdev = ctx->jrdev;
443 struct split_key_result result;
444 dma_addr_t src_dma, dst_dma;
447 desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
449 dev_err(jrdev, "unable to allocate key input memory\n");
453 init_job_desc(desc, 0);
455 src_dma = dma_map_single(jrdev, (void *)key_in, *keylen,
457 if (dma_mapping_error(jrdev, src_dma)) {
458 dev_err(jrdev, "unable to map key input memory\n");
462 dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize,
464 if (dma_mapping_error(jrdev, dst_dma)) {
465 dev_err(jrdev, "unable to map key output memory\n");
466 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
471 /* Job descriptor to perform unkeyed hash on key_in */
472 append_operation(desc, ctx->alg_type | OP_ALG_ENCRYPT |
473 OP_ALG_AS_INITFINAL);
474 append_seq_in_ptr(desc, src_dma, *keylen, 0);
475 append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
476 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
477 append_seq_out_ptr(desc, dst_dma, digestsize, 0);
478 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
479 LDST_SRCDST_BYTE_CONTEXT);
482 print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ",
483 DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
484 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
485 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
489 init_completion(&result.completion);
491 ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
494 wait_for_completion(&result.completion);
497 print_hex_dump(KERN_ERR,
498 "digested key@"__stringify(__LINE__)": ",
499 DUMP_PREFIX_ADDRESS, 16, 4, key_in,
503 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
504 dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE);
506 *keylen = digestsize;
513 static int ahash_setkey(struct crypto_ahash *ahash,
514 const u8 *key, unsigned int keylen)
516 /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
517 static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
518 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
519 struct device *jrdev = ctx->jrdev;
520 int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
521 int digestsize = crypto_ahash_digestsize(ahash);
523 u8 *hashed_key = NULL;
526 printk(KERN_ERR "keylen %d\n", keylen);
529 if (keylen > blocksize) {
530 hashed_key = kmalloc_array(digestsize,
532 GFP_KERNEL | GFP_DMA);
535 ret = hash_digest_key(ctx, key, &keylen, hashed_key,
542 /* Pick class 2 key length from algorithm submask */
543 ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
544 OP_ALG_ALGSEL_SHIFT] * 2;
545 ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
548 printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
549 ctx->split_key_len, ctx->split_key_pad_len);
550 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
551 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
554 ret = gen_split_hash_key(ctx, key, keylen);
558 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len,
560 if (dma_mapping_error(jrdev, ctx->key_dma)) {
561 dev_err(jrdev, "unable to map key i/o memory\n");
566 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
567 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
568 ctx->split_key_pad_len, 1);
571 ret = ahash_set_sh_desc(ahash);
573 dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len,
581 crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
586 * ahash_edesc - s/w-extended ahash descriptor
587 * @dst_dma: physical mapped address of req->result
588 * @sec4_sg_dma: physical mapped address of h/w link table
589 * @src_nents: number of segments in input scatterlist
590 * @sec4_sg_bytes: length of dma mapped sec4_sg space
591 * @hw_desc: the h/w job descriptor followed by any referenced link tables
592 * @sec4_sg: h/w link table
596 dma_addr_t sec4_sg_dma;
599 u32 hw_desc[DESC_JOB_IO_LEN / sizeof(u32)] ____cacheline_aligned;
600 struct sec4_sg_entry sec4_sg[0];
603 static inline void ahash_unmap(struct device *dev,
604 struct ahash_edesc *edesc,
605 struct ahash_request *req, int dst_len)
607 if (edesc->src_nents)
608 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
610 dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
612 if (edesc->sec4_sg_bytes)
613 dma_unmap_single(dev, edesc->sec4_sg_dma,
614 edesc->sec4_sg_bytes, DMA_TO_DEVICE);
617 static inline void ahash_unmap_ctx(struct device *dev,
618 struct ahash_edesc *edesc,
619 struct ahash_request *req, int dst_len, u32 flag)
621 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
622 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
623 struct caam_hash_state *state = ahash_request_ctx(req);
625 if (state->ctx_dma) {
626 dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
629 ahash_unmap(dev, edesc, req, dst_len);
632 static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
635 struct ahash_request *req = context;
636 struct ahash_edesc *edesc;
637 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
638 int digestsize = crypto_ahash_digestsize(ahash);
640 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
641 struct caam_hash_state *state = ahash_request_ctx(req);
643 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
646 edesc = (struct ahash_edesc *)((char *)desc -
647 offsetof(struct ahash_edesc, hw_desc));
649 caam_jr_strstatus(jrdev, err);
651 ahash_unmap(jrdev, edesc, req, digestsize);
655 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
656 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
659 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
660 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
664 req->base.complete(&req->base, err);
667 static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
670 struct ahash_request *req = context;
671 struct ahash_edesc *edesc;
672 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
673 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
675 struct caam_hash_state *state = ahash_request_ctx(req);
676 int digestsize = crypto_ahash_digestsize(ahash);
678 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
681 edesc = (struct ahash_edesc *)((char *)desc -
682 offsetof(struct ahash_edesc, hw_desc));
684 caam_jr_strstatus(jrdev, err);
686 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
690 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
691 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
694 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
695 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
699 req->base.complete(&req->base, err);
702 static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
705 struct ahash_request *req = context;
706 struct ahash_edesc *edesc;
707 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
708 int digestsize = crypto_ahash_digestsize(ahash);
710 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
711 struct caam_hash_state *state = ahash_request_ctx(req);
713 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
716 edesc = (struct ahash_edesc *)((char *)desc -
717 offsetof(struct ahash_edesc, hw_desc));
719 caam_jr_strstatus(jrdev, err);
721 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_TO_DEVICE);
725 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
726 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
729 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
730 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
734 req->base.complete(&req->base, err);
737 static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
740 struct ahash_request *req = context;
741 struct ahash_edesc *edesc;
742 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
743 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
745 struct caam_hash_state *state = ahash_request_ctx(req);
746 int digestsize = crypto_ahash_digestsize(ahash);
748 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
751 edesc = (struct ahash_edesc *)((char *)desc -
752 offsetof(struct ahash_edesc, hw_desc));
754 caam_jr_strstatus(jrdev, err);
756 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
760 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
761 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
764 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
765 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
769 req->base.complete(&req->base, err);
773 * Allocate an enhanced descriptor, which contains the hardware descriptor
774 * and space for hardware scatter table containing sg_num entries.
776 static struct ahash_edesc *ahash_edesc_alloc(struct caam_hash_ctx *ctx,
777 int sg_num, u32 *sh_desc,
778 dma_addr_t sh_desc_dma,
781 struct ahash_edesc *edesc;
782 unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry);
784 edesc = kzalloc(sizeof(*edesc) + sg_size, GFP_DMA | flags);
786 dev_err(ctx->jrdev, "could not allocate extended descriptor\n");
790 init_job_desc_shared(edesc->hw_desc, sh_desc_dma, desc_len(sh_desc),
791 HDR_SHARE_DEFER | HDR_REVERSE);
796 static int ahash_edesc_add_src(struct caam_hash_ctx *ctx,
797 struct ahash_edesc *edesc,
798 struct ahash_request *req, int nents,
799 unsigned int first_sg,
800 unsigned int first_bytes, size_t to_hash)
805 if (nents > 1 || first_sg) {
806 struct sec4_sg_entry *sg = edesc->sec4_sg;
807 unsigned int sgsize = sizeof(*sg) * (first_sg + nents);
809 sg_to_sec4_sg_last(req->src, nents, sg + first_sg, 0);
811 src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE);
812 if (dma_mapping_error(ctx->jrdev, src_dma)) {
813 dev_err(ctx->jrdev, "unable to map S/G table\n");
817 edesc->sec4_sg_bytes = sgsize;
818 edesc->sec4_sg_dma = src_dma;
821 src_dma = sg_dma_address(req->src);
825 append_seq_in_ptr(edesc->hw_desc, src_dma, first_bytes + to_hash,
831 /* submit update job descriptor */
832 static int ahash_update_ctx(struct ahash_request *req)
834 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
835 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
836 struct caam_hash_state *state = ahash_request_ctx(req);
837 struct device *jrdev = ctx->jrdev;
838 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
839 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
840 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
841 int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
842 u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
843 int *next_buflen = state->current_buf ? &state->buflen_0 :
844 &state->buflen_1, last_buflen;
845 int in_len = *buflen + req->nbytes, to_hash;
847 int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index;
848 struct ahash_edesc *edesc;
851 last_buflen = *next_buflen;
852 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
853 to_hash = in_len - *next_buflen;
856 src_nents = sg_nents_for_len(req->src,
857 req->nbytes - (*next_buflen));
859 dev_err(jrdev, "Invalid number of src SG.\n");
864 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
867 dev_err(jrdev, "unable to DMA map source\n");
874 sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
875 sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
876 sizeof(struct sec4_sg_entry);
879 * allocate space for base edesc and hw desc commands,
882 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
884 ctx->sh_desc_update_dma, flags);
886 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
890 edesc->src_nents = src_nents;
891 edesc->sec4_sg_bytes = sec4_sg_bytes;
893 ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
894 edesc->sec4_sg, DMA_BIDIRECTIONAL);
898 state->buf_dma = try_buf_map_to_sec4_sg(jrdev,
901 *buflen, last_buflen);
904 sg_to_sec4_sg_last(req->src, mapped_nents,
905 edesc->sec4_sg + sec4_sg_src_index,
908 scatterwalk_map_and_copy(next_buf, req->src,
912 (edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
913 cpu_to_caam32(SEC4_SG_LEN_FIN);
916 state->current_buf = !state->current_buf;
918 desc = edesc->hw_desc;
920 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
923 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
924 dev_err(jrdev, "unable to map S/G table\n");
929 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
932 append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
935 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
936 DUMP_PREFIX_ADDRESS, 16, 4, desc,
937 desc_bytes(desc), 1);
940 ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
945 } else if (*next_buflen) {
946 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
948 *buflen = *next_buflen;
949 *next_buflen = last_buflen;
952 print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
953 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
954 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
955 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
961 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
966 static int ahash_final_ctx(struct ahash_request *req)
968 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
969 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
970 struct caam_hash_state *state = ahash_request_ctx(req);
971 struct device *jrdev = ctx->jrdev;
972 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
973 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
974 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
975 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
976 int last_buflen = state->current_buf ? state->buflen_0 :
979 int sec4_sg_bytes, sec4_sg_src_index;
980 int digestsize = crypto_ahash_digestsize(ahash);
981 struct ahash_edesc *edesc;
984 sec4_sg_src_index = 1 + (buflen ? 1 : 0);
985 sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
987 /* allocate space for base edesc and hw desc commands, link tables */
988 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index,
989 ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
994 desc = edesc->hw_desc;
996 edesc->sec4_sg_bytes = sec4_sg_bytes;
997 edesc->src_nents = 0;
999 ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
1000 edesc->sec4_sg, DMA_TO_DEVICE);
1004 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
1005 buf, state->buf_dma, buflen,
1007 (edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
1008 cpu_to_caam32(SEC4_SG_LEN_FIN);
1010 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1011 sec4_sg_bytes, DMA_TO_DEVICE);
1012 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1013 dev_err(jrdev, "unable to map S/G table\n");
1018 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
1021 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1023 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1024 dev_err(jrdev, "unable to map dst\n");
1030 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1031 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1034 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
1038 return -EINPROGRESS;
1040 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
1045 static int ahash_finup_ctx(struct ahash_request *req)
1047 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1048 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1049 struct caam_hash_state *state = ahash_request_ctx(req);
1050 struct device *jrdev = ctx->jrdev;
1051 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1052 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1053 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1054 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
1055 int last_buflen = state->current_buf ? state->buflen_0 :
1058 int sec4_sg_src_index;
1059 int src_nents, mapped_nents;
1060 int digestsize = crypto_ahash_digestsize(ahash);
1061 struct ahash_edesc *edesc;
1064 src_nents = sg_nents_for_len(req->src, req->nbytes);
1065 if (src_nents < 0) {
1066 dev_err(jrdev, "Invalid number of src SG.\n");
1071 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1073 if (!mapped_nents) {
1074 dev_err(jrdev, "unable to DMA map source\n");
1081 sec4_sg_src_index = 1 + (buflen ? 1 : 0);
1083 /* allocate space for base edesc and hw desc commands, link tables */
1084 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
1085 ctx->sh_desc_finup, ctx->sh_desc_finup_dma,
1088 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1092 desc = edesc->hw_desc;
1094 edesc->src_nents = src_nents;
1096 ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
1097 edesc->sec4_sg, DMA_TO_DEVICE);
1101 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
1102 buf, state->buf_dma, buflen,
1105 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents,
1106 sec4_sg_src_index, ctx->ctx_len + buflen,
1111 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1113 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1114 dev_err(jrdev, "unable to map dst\n");
1120 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1121 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1124 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
1128 return -EINPROGRESS;
1130 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
1135 static int ahash_digest(struct ahash_request *req)
1137 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1138 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1139 struct device *jrdev = ctx->jrdev;
1140 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1141 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1143 int digestsize = crypto_ahash_digestsize(ahash);
1144 int src_nents, mapped_nents;
1145 struct ahash_edesc *edesc;
1148 src_nents = sg_nents_for_len(req->src, req->nbytes);
1149 if (src_nents < 0) {
1150 dev_err(jrdev, "Invalid number of src SG.\n");
1155 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1157 if (!mapped_nents) {
1158 dev_err(jrdev, "unable to map source for DMA\n");
1165 /* allocate space for base edesc and hw desc commands, link tables */
1166 edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ? mapped_nents : 0,
1167 ctx->sh_desc_digest, ctx->sh_desc_digest_dma,
1170 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1174 edesc->src_nents = src_nents;
1176 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1179 ahash_unmap(jrdev, edesc, req, digestsize);
1184 desc = edesc->hw_desc;
1186 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1188 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1189 dev_err(jrdev, "unable to map dst\n");
1190 ahash_unmap(jrdev, edesc, req, digestsize);
1196 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1197 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1200 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1204 ahash_unmap(jrdev, edesc, req, digestsize);
1211 /* submit ahash final if it the first job descriptor */
1212 static int ahash_final_no_ctx(struct ahash_request *req)
1214 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1215 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1216 struct caam_hash_state *state = ahash_request_ctx(req);
1217 struct device *jrdev = ctx->jrdev;
1218 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1219 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1220 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1221 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
1223 int digestsize = crypto_ahash_digestsize(ahash);
1224 struct ahash_edesc *edesc;
1227 /* allocate space for base edesc and hw desc commands, link tables */
1228 edesc = ahash_edesc_alloc(ctx, 0, ctx->sh_desc_digest,
1229 ctx->sh_desc_digest_dma, flags);
1233 desc = edesc->hw_desc;
1236 state->buf_dma = dma_map_single(jrdev, buf, buflen,
1238 if (dma_mapping_error(jrdev, state->buf_dma)) {
1239 dev_err(jrdev, "unable to map src\n");
1243 append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
1246 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1248 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1249 dev_err(jrdev, "unable to map dst\n");
1252 edesc->src_nents = 0;
1255 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1256 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1259 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1263 ahash_unmap(jrdev, edesc, req, digestsize);
1269 ahash_unmap(jrdev, edesc, req, digestsize);
1275 /* submit ahash update if it the first job descriptor after update */
1276 static int ahash_update_no_ctx(struct ahash_request *req)
1278 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1279 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1280 struct caam_hash_state *state = ahash_request_ctx(req);
1281 struct device *jrdev = ctx->jrdev;
1282 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1283 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1284 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1285 int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
1286 u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
1287 int *next_buflen = state->current_buf ? &state->buflen_0 :
1289 int in_len = *buflen + req->nbytes, to_hash;
1290 int sec4_sg_bytes, src_nents, mapped_nents;
1291 struct ahash_edesc *edesc;
1295 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
1296 to_hash = in_len - *next_buflen;
1299 src_nents = sg_nents_for_len(req->src,
1300 req->nbytes - *next_buflen);
1301 if (src_nents < 0) {
1302 dev_err(jrdev, "Invalid number of src SG.\n");
1307 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1309 if (!mapped_nents) {
1310 dev_err(jrdev, "unable to DMA map source\n");
1317 sec4_sg_bytes = (1 + mapped_nents) *
1318 sizeof(struct sec4_sg_entry);
1321 * allocate space for base edesc and hw desc commands,
1324 edesc = ahash_edesc_alloc(ctx, 1 + mapped_nents,
1325 ctx->sh_desc_update_first,
1326 ctx->sh_desc_update_first_dma,
1329 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1333 edesc->src_nents = src_nents;
1334 edesc->sec4_sg_bytes = sec4_sg_bytes;
1337 state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg,
1339 sg_to_sec4_sg_last(req->src, mapped_nents,
1340 edesc->sec4_sg + 1, 0);
1343 scatterwalk_map_and_copy(next_buf, req->src,
1348 state->current_buf = !state->current_buf;
1350 desc = edesc->hw_desc;
1352 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1355 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1356 dev_err(jrdev, "unable to map S/G table\n");
1361 append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
1363 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1368 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1369 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1370 desc_bytes(desc), 1);
1373 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
1378 state->update = ahash_update_ctx;
1379 state->finup = ahash_finup_ctx;
1380 state->final = ahash_final_ctx;
1381 } else if (*next_buflen) {
1382 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
1384 *buflen = *next_buflen;
1388 print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
1389 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
1390 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
1391 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1397 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1402 /* submit ahash finup if it the first job descriptor after update */
1403 static int ahash_finup_no_ctx(struct ahash_request *req)
1405 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1406 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1407 struct caam_hash_state *state = ahash_request_ctx(req);
1408 struct device *jrdev = ctx->jrdev;
1409 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1410 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1411 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1412 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
1413 int last_buflen = state->current_buf ? state->buflen_0 :
1416 int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
1417 int digestsize = crypto_ahash_digestsize(ahash);
1418 struct ahash_edesc *edesc;
1421 src_nents = sg_nents_for_len(req->src, req->nbytes);
1422 if (src_nents < 0) {
1423 dev_err(jrdev, "Invalid number of src SG.\n");
1428 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1430 if (!mapped_nents) {
1431 dev_err(jrdev, "unable to DMA map source\n");
1438 sec4_sg_src_index = 2;
1439 sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
1440 sizeof(struct sec4_sg_entry);
1442 /* allocate space for base edesc and hw desc commands, link tables */
1443 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
1444 ctx->sh_desc_digest, ctx->sh_desc_digest_dma,
1447 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1451 desc = edesc->hw_desc;
1453 edesc->src_nents = src_nents;
1454 edesc->sec4_sg_bytes = sec4_sg_bytes;
1456 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf,
1457 state->buf_dma, buflen,
1460 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen,
1463 dev_err(jrdev, "unable to map S/G table\n");
1467 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1469 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1470 dev_err(jrdev, "unable to map dst\n");
1475 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1476 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1479 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1483 ahash_unmap(jrdev, edesc, req, digestsize);
1489 ahash_unmap(jrdev, edesc, req, digestsize);
1495 /* submit first update job descriptor after init */
1496 static int ahash_update_first(struct ahash_request *req)
1498 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1499 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1500 struct caam_hash_state *state = ahash_request_ctx(req);
1501 struct device *jrdev = ctx->jrdev;
1502 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1503 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1504 u8 *next_buf = state->current_buf ? state->buf_1 : state->buf_0;
1505 int *next_buflen = state->current_buf ?
1506 &state->buflen_1 : &state->buflen_0;
1509 int src_nents, mapped_nents;
1510 struct ahash_edesc *edesc;
1513 *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
1515 to_hash = req->nbytes - *next_buflen;
1518 src_nents = sg_nents_for_len(req->src,
1519 req->nbytes - *next_buflen);
1520 if (src_nents < 0) {
1521 dev_err(jrdev, "Invalid number of src SG.\n");
1526 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1528 if (!mapped_nents) {
1529 dev_err(jrdev, "unable to map source for DMA\n");
1537 * allocate space for base edesc and hw desc commands,
1540 edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ?
1542 ctx->sh_desc_update_first,
1543 ctx->sh_desc_update_first_dma,
1546 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1550 edesc->src_nents = src_nents;
1553 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1559 scatterwalk_map_and_copy(next_buf, req->src, to_hash,
1562 desc = edesc->hw_desc;
1564 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1569 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1570 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1571 desc_bytes(desc), 1);
1574 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
1579 state->update = ahash_update_ctx;
1580 state->finup = ahash_finup_ctx;
1581 state->final = ahash_final_ctx;
1582 } else if (*next_buflen) {
1583 state->update = ahash_update_no_ctx;
1584 state->finup = ahash_finup_no_ctx;
1585 state->final = ahash_final_no_ctx;
1586 scatterwalk_map_and_copy(next_buf, req->src, 0,
1590 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
1591 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1597 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1602 static int ahash_finup_first(struct ahash_request *req)
1604 return ahash_digest(req);
1607 static int ahash_init(struct ahash_request *req)
1609 struct caam_hash_state *state = ahash_request_ctx(req);
1611 state->update = ahash_update_first;
1612 state->finup = ahash_finup_first;
1613 state->final = ahash_final_no_ctx;
1616 state->current_buf = 0;
1618 state->buflen_0 = 0;
1619 state->buflen_1 = 0;
1624 static int ahash_update(struct ahash_request *req)
1626 struct caam_hash_state *state = ahash_request_ctx(req);
1628 return state->update(req);
1631 static int ahash_finup(struct ahash_request *req)
1633 struct caam_hash_state *state = ahash_request_ctx(req);
1635 return state->finup(req);
1638 static int ahash_final(struct ahash_request *req)
1640 struct caam_hash_state *state = ahash_request_ctx(req);
1642 return state->final(req);
1645 static int ahash_export(struct ahash_request *req, void *out)
1647 struct caam_hash_state *state = ahash_request_ctx(req);
1648 struct caam_export_state *export = out;
1652 if (state->current_buf) {
1654 len = state->buflen_1;
1657 len = state->buflen_0;
1660 memcpy(export->buf, buf, len);
1661 memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
1662 export->buflen = len;
1663 export->update = state->update;
1664 export->final = state->final;
1665 export->finup = state->finup;
1670 static int ahash_import(struct ahash_request *req, const void *in)
1672 struct caam_hash_state *state = ahash_request_ctx(req);
1673 const struct caam_export_state *export = in;
1675 memset(state, 0, sizeof(*state));
1676 memcpy(state->buf_0, export->buf, export->buflen);
1677 memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
1678 state->buflen_0 = export->buflen;
1679 state->update = export->update;
1680 state->final = export->final;
1681 state->finup = export->finup;
1686 struct caam_hash_template {
1687 char name[CRYPTO_MAX_ALG_NAME];
1688 char driver_name[CRYPTO_MAX_ALG_NAME];
1689 char hmac_name[CRYPTO_MAX_ALG_NAME];
1690 char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
1691 unsigned int blocksize;
1692 struct ahash_alg template_ahash;
1697 /* ahash descriptors */
1698 static struct caam_hash_template driver_hash[] = {
1701 .driver_name = "sha1-caam",
1702 .hmac_name = "hmac(sha1)",
1703 .hmac_driver_name = "hmac-sha1-caam",
1704 .blocksize = SHA1_BLOCK_SIZE,
1707 .update = ahash_update,
1708 .final = ahash_final,
1709 .finup = ahash_finup,
1710 .digest = ahash_digest,
1711 .export = ahash_export,
1712 .import = ahash_import,
1713 .setkey = ahash_setkey,
1715 .digestsize = SHA1_DIGEST_SIZE,
1716 .statesize = sizeof(struct caam_export_state),
1719 .alg_type = OP_ALG_ALGSEL_SHA1,
1720 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1723 .driver_name = "sha224-caam",
1724 .hmac_name = "hmac(sha224)",
1725 .hmac_driver_name = "hmac-sha224-caam",
1726 .blocksize = SHA224_BLOCK_SIZE,
1729 .update = ahash_update,
1730 .final = ahash_final,
1731 .finup = ahash_finup,
1732 .digest = ahash_digest,
1733 .export = ahash_export,
1734 .import = ahash_import,
1735 .setkey = ahash_setkey,
1737 .digestsize = SHA224_DIGEST_SIZE,
1738 .statesize = sizeof(struct caam_export_state),
1741 .alg_type = OP_ALG_ALGSEL_SHA224,
1742 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
1745 .driver_name = "sha256-caam",
1746 .hmac_name = "hmac(sha256)",
1747 .hmac_driver_name = "hmac-sha256-caam",
1748 .blocksize = SHA256_BLOCK_SIZE,
1751 .update = ahash_update,
1752 .final = ahash_final,
1753 .finup = ahash_finup,
1754 .digest = ahash_digest,
1755 .export = ahash_export,
1756 .import = ahash_import,
1757 .setkey = ahash_setkey,
1759 .digestsize = SHA256_DIGEST_SIZE,
1760 .statesize = sizeof(struct caam_export_state),
1763 .alg_type = OP_ALG_ALGSEL_SHA256,
1764 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1767 .driver_name = "sha384-caam",
1768 .hmac_name = "hmac(sha384)",
1769 .hmac_driver_name = "hmac-sha384-caam",
1770 .blocksize = SHA384_BLOCK_SIZE,
1773 .update = ahash_update,
1774 .final = ahash_final,
1775 .finup = ahash_finup,
1776 .digest = ahash_digest,
1777 .export = ahash_export,
1778 .import = ahash_import,
1779 .setkey = ahash_setkey,
1781 .digestsize = SHA384_DIGEST_SIZE,
1782 .statesize = sizeof(struct caam_export_state),
1785 .alg_type = OP_ALG_ALGSEL_SHA384,
1786 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
1789 .driver_name = "sha512-caam",
1790 .hmac_name = "hmac(sha512)",
1791 .hmac_driver_name = "hmac-sha512-caam",
1792 .blocksize = SHA512_BLOCK_SIZE,
1795 .update = ahash_update,
1796 .final = ahash_final,
1797 .finup = ahash_finup,
1798 .digest = ahash_digest,
1799 .export = ahash_export,
1800 .import = ahash_import,
1801 .setkey = ahash_setkey,
1803 .digestsize = SHA512_DIGEST_SIZE,
1804 .statesize = sizeof(struct caam_export_state),
1807 .alg_type = OP_ALG_ALGSEL_SHA512,
1808 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
1811 .driver_name = "md5-caam",
1812 .hmac_name = "hmac(md5)",
1813 .hmac_driver_name = "hmac-md5-caam",
1814 .blocksize = MD5_BLOCK_WORDS * 4,
1817 .update = ahash_update,
1818 .final = ahash_final,
1819 .finup = ahash_finup,
1820 .digest = ahash_digest,
1821 .export = ahash_export,
1822 .import = ahash_import,
1823 .setkey = ahash_setkey,
1825 .digestsize = MD5_DIGEST_SIZE,
1826 .statesize = sizeof(struct caam_export_state),
1829 .alg_type = OP_ALG_ALGSEL_MD5,
1830 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1834 struct caam_hash_alg {
1835 struct list_head entry;
1838 struct ahash_alg ahash_alg;
1841 static int caam_hash_cra_init(struct crypto_tfm *tfm)
1843 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
1844 struct crypto_alg *base = tfm->__crt_alg;
1845 struct hash_alg_common *halg =
1846 container_of(base, struct hash_alg_common, base);
1847 struct ahash_alg *alg =
1848 container_of(halg, struct ahash_alg, halg);
1849 struct caam_hash_alg *caam_hash =
1850 container_of(alg, struct caam_hash_alg, ahash_alg);
1851 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1852 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1853 static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
1854 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
1856 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
1858 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
1861 * Get a Job ring from Job Ring driver to ensure in-order
1862 * crypto request processing per tfm
1864 ctx->jrdev = caam_jr_alloc();
1865 if (IS_ERR(ctx->jrdev)) {
1866 pr_err("Job Ring Device allocation for transform failed\n");
1867 return PTR_ERR(ctx->jrdev);
1869 /* copy descriptor header template value */
1870 ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
1871 ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op;
1873 ctx->ctx_len = runninglen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
1874 OP_ALG_ALGSEL_SHIFT];
1876 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1877 sizeof(struct caam_hash_state));
1878 return ahash_set_sh_desc(ahash);
1881 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
1883 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1885 if (ctx->sh_desc_update_dma &&
1886 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_dma))
1887 dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_dma,
1888 desc_bytes(ctx->sh_desc_update),
1890 if (ctx->sh_desc_update_first_dma &&
1891 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_first_dma))
1892 dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_first_dma,
1893 desc_bytes(ctx->sh_desc_update_first),
1895 if (ctx->sh_desc_fin_dma &&
1896 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_fin_dma))
1897 dma_unmap_single(ctx->jrdev, ctx->sh_desc_fin_dma,
1898 desc_bytes(ctx->sh_desc_fin), DMA_TO_DEVICE);
1899 if (ctx->sh_desc_digest_dma &&
1900 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_digest_dma))
1901 dma_unmap_single(ctx->jrdev, ctx->sh_desc_digest_dma,
1902 desc_bytes(ctx->sh_desc_digest),
1904 if (ctx->sh_desc_finup_dma &&
1905 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma))
1906 dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma,
1907 desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE);
1909 caam_jr_free(ctx->jrdev);
1912 static void __exit caam_algapi_hash_exit(void)
1914 struct caam_hash_alg *t_alg, *n;
1916 if (!hash_list.next)
1919 list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
1920 crypto_unregister_ahash(&t_alg->ahash_alg);
1921 list_del(&t_alg->entry);
1926 static struct caam_hash_alg *
1927 caam_hash_alloc(struct caam_hash_template *template,
1930 struct caam_hash_alg *t_alg;
1931 struct ahash_alg *halg;
1932 struct crypto_alg *alg;
1934 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
1936 pr_err("failed to allocate t_alg\n");
1937 return ERR_PTR(-ENOMEM);
1940 t_alg->ahash_alg = template->template_ahash;
1941 halg = &t_alg->ahash_alg;
1942 alg = &halg->halg.base;
1945 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1946 template->hmac_name);
1947 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1948 template->hmac_driver_name);
1950 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1952 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1953 template->driver_name);
1954 t_alg->ahash_alg.setkey = NULL;
1956 alg->cra_module = THIS_MODULE;
1957 alg->cra_init = caam_hash_cra_init;
1958 alg->cra_exit = caam_hash_cra_exit;
1959 alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
1960 alg->cra_priority = CAAM_CRA_PRIORITY;
1961 alg->cra_blocksize = template->blocksize;
1962 alg->cra_alignmask = 0;
1963 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH;
1964 alg->cra_type = &crypto_ahash_type;
1966 t_alg->alg_type = template->alg_type;
1967 t_alg->alg_op = template->alg_op;
1972 static int __init caam_algapi_hash_init(void)
1974 struct device_node *dev_node;
1975 struct platform_device *pdev;
1976 struct device *ctrldev;
1978 struct caam_drv_private *priv;
1979 unsigned int md_limit = SHA512_DIGEST_SIZE;
1980 u32 cha_inst, cha_vid;
1982 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
1984 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
1989 pdev = of_find_device_by_node(dev_node);
1991 of_node_put(dev_node);
1995 ctrldev = &pdev->dev;
1996 priv = dev_get_drvdata(ctrldev);
1997 of_node_put(dev_node);
2000 * If priv is NULL, it's probably because the caam driver wasn't
2001 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
2007 * Register crypto algorithms the device supports. First, identify
2008 * presence and attributes of MD block.
2010 cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
2011 cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
2014 * Skip registration of any hashing algorithms if MD block
2017 if (!((cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT))
2020 /* Limit digest size based on LP256 */
2021 if ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256)
2022 md_limit = SHA256_DIGEST_SIZE;
2024 INIT_LIST_HEAD(&hash_list);
2026 /* register crypto algorithms the device supports */
2027 for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
2028 struct caam_hash_alg *t_alg;
2029 struct caam_hash_template *alg = driver_hash + i;
2031 /* If MD size is not supported by device, skip registration */
2032 if (alg->template_ahash.halg.digestsize > md_limit)
2035 /* register hmac version */
2036 t_alg = caam_hash_alloc(alg, true);
2037 if (IS_ERR(t_alg)) {
2038 err = PTR_ERR(t_alg);
2039 pr_warn("%s alg allocation failed\n", alg->driver_name);
2043 err = crypto_register_ahash(&t_alg->ahash_alg);
2045 pr_warn("%s alg registration failed: %d\n",
2046 t_alg->ahash_alg.halg.base.cra_driver_name,
2050 list_add_tail(&t_alg->entry, &hash_list);
2052 /* register unkeyed version */
2053 t_alg = caam_hash_alloc(alg, false);
2054 if (IS_ERR(t_alg)) {
2055 err = PTR_ERR(t_alg);
2056 pr_warn("%s alg allocation failed\n", alg->driver_name);
2060 err = crypto_register_ahash(&t_alg->ahash_alg);
2062 pr_warn("%s alg registration failed: %d\n",
2063 t_alg->ahash_alg.halg.base.cra_driver_name,
2067 list_add_tail(&t_alg->entry, &hash_list);
2073 module_init(caam_algapi_hash_init);
2074 module_exit(caam_algapi_hash_exit);
2076 MODULE_LICENSE("GPL");
2077 MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
2078 MODULE_AUTHOR("Freescale Semiconductor - NMG");