1 // SPDX-License-Identifier: GPL-2.0+
3 * caam - Freescale FSL CAAM support for ahash functions of crypto API
5 * Copyright 2011 Freescale Semiconductor, Inc.
6 * Copyright 2018-2019, 2023 NXP
8 * Based on caamalg.c crypto API driver.
10 * relationship of digest job descriptor or first job descriptor after init to
13 * --------------- ---------------
14 * | JobDesc #1 |-------------------->| ShareDesc |
15 * | *(packet 1) | | (hashKey) |
16 * --------------- | (operation) |
19 * relationship of subsequent job descriptors to shared descriptors:
21 * --------------- ---------------
22 * | JobDesc #2 |-------------------->| ShareDesc |
23 * | *(packet 2) | |------------->| (hashKey) |
24 * --------------- | |-------->| (operation) |
25 * . | | | (load ctx2) |
26 * . | | ---------------
28 * | JobDesc #3 |------| |
34 * | JobDesc #4 |------------
38 * The SharedDesc never changes for a connection unless rekeyed, but
39 * each packet will likely be in a different place. So all we need
40 * to know to process the packet is where the input is, where the
41 * output goes, and what context we want to process with. Context is
42 * in the SharedDesc, packet references in the JobDesc.
44 * So, a job desc looks like:
46 * ---------------------
48 * | ShareDesc Pointer |
55 * ---------------------
62 #include "desc_constr.h"
65 #include "sg_sw_sec4.h"
67 #include "caamhash_desc.h"
68 #include <crypto/internal/engine.h>
69 #include <crypto/internal/hash.h>
70 #include <linux/dma-mapping.h>
71 #include <linux/err.h>
72 #include <linux/kernel.h>
73 #include <linux/slab.h>
74 #include <linux/string.h>
76 #define CAAM_CRA_PRIORITY 3000
78 /* max hash key is max split key size */
79 #define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
81 #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
82 #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
84 #define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
85 CAAM_MAX_HASH_KEY_SIZE)
86 #define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
88 /* caam context sizes for hashes: running digest + 8 */
89 #define HASH_MSG_LEN 8
90 #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
92 static struct list_head hash_list;
94 /* ahash per-session context */
95 struct caam_hash_ctx {
96 u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
97 u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
98 u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
99 u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
100 u8 key[CAAM_MAX_HASH_KEY_SIZE] ____cacheline_aligned;
101 dma_addr_t sh_desc_update_dma ____cacheline_aligned;
102 dma_addr_t sh_desc_update_first_dma;
103 dma_addr_t sh_desc_fin_dma;
104 dma_addr_t sh_desc_digest_dma;
105 enum dma_data_direction dir;
106 enum dma_data_direction key_dir;
107 struct device *jrdev;
109 struct alginfo adata;
113 struct caam_hash_state {
117 u8 buf[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
120 u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
121 int (*update)(struct ahash_request *req) ____cacheline_aligned;
122 int (*final)(struct ahash_request *req);
123 int (*finup)(struct ahash_request *req);
124 struct ahash_edesc *edesc;
125 void (*ahash_op_done)(struct device *jrdev, u32 *desc, u32 err,
129 struct caam_export_state {
130 u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
131 u8 caam_ctx[MAX_CTX_LEN];
133 int (*update)(struct ahash_request *req);
134 int (*final)(struct ahash_request *req);
135 int (*finup)(struct ahash_request *req);
138 static inline bool is_cmac_aes(u32 algtype)
140 return (algtype & (OP_ALG_ALGSEL_MASK | OP_ALG_AAI_MASK)) ==
141 (OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC);
143 /* Common job descriptor seq in/out ptr routines */
145 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
146 static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
147 struct caam_hash_state *state,
150 state->ctx_dma_len = ctx_len;
151 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
152 ctx_len, DMA_FROM_DEVICE);
153 if (dma_mapping_error(jrdev, state->ctx_dma)) {
154 dev_err(jrdev, "unable to map ctx\n");
159 append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
164 /* Map current buffer in state (if length > 0) and put it in link table */
165 static inline int buf_map_to_sec4_sg(struct device *jrdev,
166 struct sec4_sg_entry *sec4_sg,
167 struct caam_hash_state *state)
169 int buflen = state->buflen;
174 state->buf_dma = dma_map_single(jrdev, state->buf, buflen,
176 if (dma_mapping_error(jrdev, state->buf_dma)) {
177 dev_err(jrdev, "unable to map buf\n");
182 dma_to_sec4_sg_one(sec4_sg, state->buf_dma, buflen, 0);
187 /* Map state->caam_ctx, and add it to link table */
188 static inline int ctx_map_to_sec4_sg(struct device *jrdev,
189 struct caam_hash_state *state, int ctx_len,
190 struct sec4_sg_entry *sec4_sg, u32 flag)
192 state->ctx_dma_len = ctx_len;
193 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
194 if (dma_mapping_error(jrdev, state->ctx_dma)) {
195 dev_err(jrdev, "unable to map ctx\n");
200 dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
205 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
207 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
208 int digestsize = crypto_ahash_digestsize(ahash);
209 struct device *jrdev = ctx->jrdev;
210 struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
213 ctx->adata.key_virt = ctx->key;
215 /* ahash_update shared descriptor */
216 desc = ctx->sh_desc_update;
217 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
218 ctx->ctx_len, true, ctrlpriv->era);
219 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
220 desc_bytes(desc), ctx->dir);
222 print_hex_dump_debug("ahash update shdesc@"__stringify(__LINE__)": ",
223 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
226 /* ahash_update_first shared descriptor */
227 desc = ctx->sh_desc_update_first;
228 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
229 ctx->ctx_len, false, ctrlpriv->era);
230 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
231 desc_bytes(desc), ctx->dir);
232 print_hex_dump_debug("ahash update first shdesc@"__stringify(__LINE__)
233 ": ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
234 desc_bytes(desc), 1);
236 /* ahash_final shared descriptor */
237 desc = ctx->sh_desc_fin;
238 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
239 ctx->ctx_len, true, ctrlpriv->era);
240 dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
241 desc_bytes(desc), ctx->dir);
243 print_hex_dump_debug("ahash final shdesc@"__stringify(__LINE__)": ",
244 DUMP_PREFIX_ADDRESS, 16, 4, desc,
245 desc_bytes(desc), 1);
247 /* ahash_digest shared descriptor */
248 desc = ctx->sh_desc_digest;
249 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
250 ctx->ctx_len, false, ctrlpriv->era);
251 dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
252 desc_bytes(desc), ctx->dir);
254 print_hex_dump_debug("ahash digest shdesc@"__stringify(__LINE__)": ",
255 DUMP_PREFIX_ADDRESS, 16, 4, desc,
256 desc_bytes(desc), 1);
261 static int axcbc_set_sh_desc(struct crypto_ahash *ahash)
263 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
264 int digestsize = crypto_ahash_digestsize(ahash);
265 struct device *jrdev = ctx->jrdev;
268 /* shared descriptor for ahash_update */
269 desc = ctx->sh_desc_update;
270 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE,
271 ctx->ctx_len, ctx->ctx_len);
272 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
273 desc_bytes(desc), ctx->dir);
274 print_hex_dump_debug("axcbc update shdesc@" __stringify(__LINE__)" : ",
275 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
278 /* shared descriptor for ahash_{final,finup} */
279 desc = ctx->sh_desc_fin;
280 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE,
281 digestsize, ctx->ctx_len);
282 dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
283 desc_bytes(desc), ctx->dir);
284 print_hex_dump_debug("axcbc finup shdesc@" __stringify(__LINE__)" : ",
285 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
288 /* key is immediate data for INIT and INITFINAL states */
289 ctx->adata.key_virt = ctx->key;
291 /* shared descriptor for first invocation of ahash_update */
292 desc = ctx->sh_desc_update_first;
293 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
295 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
296 desc_bytes(desc), ctx->dir);
297 print_hex_dump_debug("axcbc update first shdesc@" __stringify(__LINE__)
298 " : ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
299 desc_bytes(desc), 1);
301 /* shared descriptor for ahash_digest */
302 desc = ctx->sh_desc_digest;
303 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL,
304 digestsize, ctx->ctx_len);
305 dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
306 desc_bytes(desc), ctx->dir);
307 print_hex_dump_debug("axcbc digest shdesc@" __stringify(__LINE__)" : ",
308 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
313 static int acmac_set_sh_desc(struct crypto_ahash *ahash)
315 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
316 int digestsize = crypto_ahash_digestsize(ahash);
317 struct device *jrdev = ctx->jrdev;
320 /* shared descriptor for ahash_update */
321 desc = ctx->sh_desc_update;
322 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE,
323 ctx->ctx_len, ctx->ctx_len);
324 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
325 desc_bytes(desc), ctx->dir);
326 print_hex_dump_debug("acmac update shdesc@" __stringify(__LINE__)" : ",
327 DUMP_PREFIX_ADDRESS, 16, 4, desc,
328 desc_bytes(desc), 1);
330 /* shared descriptor for ahash_{final,finup} */
331 desc = ctx->sh_desc_fin;
332 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE,
333 digestsize, ctx->ctx_len);
334 dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
335 desc_bytes(desc), ctx->dir);
336 print_hex_dump_debug("acmac finup shdesc@" __stringify(__LINE__)" : ",
337 DUMP_PREFIX_ADDRESS, 16, 4, desc,
338 desc_bytes(desc), 1);
340 /* shared descriptor for first invocation of ahash_update */
341 desc = ctx->sh_desc_update_first;
342 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
344 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
345 desc_bytes(desc), ctx->dir);
346 print_hex_dump_debug("acmac update first shdesc@" __stringify(__LINE__)
347 " : ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
348 desc_bytes(desc), 1);
350 /* shared descriptor for ahash_digest */
351 desc = ctx->sh_desc_digest;
352 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL,
353 digestsize, ctx->ctx_len);
354 dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
355 desc_bytes(desc), ctx->dir);
356 print_hex_dump_debug("acmac digest shdesc@" __stringify(__LINE__)" : ",
357 DUMP_PREFIX_ADDRESS, 16, 4, desc,
358 desc_bytes(desc), 1);
363 /* Digest hash size if it is too large */
364 static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
367 struct device *jrdev = ctx->jrdev;
369 struct split_key_result result;
373 desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL);
377 init_job_desc(desc, 0);
379 key_dma = dma_map_single(jrdev, key, *keylen, DMA_BIDIRECTIONAL);
380 if (dma_mapping_error(jrdev, key_dma)) {
381 dev_err(jrdev, "unable to map key memory\n");
386 /* Job descriptor to perform unkeyed hash on key_in */
387 append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
388 OP_ALG_AS_INITFINAL);
389 append_seq_in_ptr(desc, key_dma, *keylen, 0);
390 append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
391 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
392 append_seq_out_ptr(desc, key_dma, digestsize, 0);
393 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
394 LDST_SRCDST_BYTE_CONTEXT);
396 print_hex_dump_debug("key_in@"__stringify(__LINE__)": ",
397 DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
398 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
399 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
403 init_completion(&result.completion);
405 ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
406 if (ret == -EINPROGRESS) {
408 wait_for_completion(&result.completion);
411 print_hex_dump_debug("digested key@"__stringify(__LINE__)": ",
412 DUMP_PREFIX_ADDRESS, 16, 4, key,
415 dma_unmap_single(jrdev, key_dma, *keylen, DMA_BIDIRECTIONAL);
417 *keylen = digestsize;
424 static int ahash_setkey(struct crypto_ahash *ahash,
425 const u8 *key, unsigned int keylen)
427 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
428 struct device *jrdev = ctx->jrdev;
429 int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
430 int digestsize = crypto_ahash_digestsize(ahash);
431 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
433 u8 *hashed_key = NULL;
435 dev_dbg(jrdev, "keylen %d\n", keylen);
437 if (keylen > blocksize) {
438 unsigned int aligned_len =
439 ALIGN(keylen, dma_get_cache_alignment());
441 if (aligned_len < keylen)
444 hashed_key = kmemdup(key, keylen, GFP_KERNEL);
447 ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize);
454 * If DKP is supported, use it in the shared descriptor to generate
457 if (ctrlpriv->era >= 6) {
458 ctx->adata.key_inline = true;
459 ctx->adata.keylen = keylen;
460 ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
463 if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
466 memcpy(ctx->key, key, keylen);
469 * In case |user key| > |derived key|, using DKP<imm,imm>
470 * would result in invalid opcodes (last bytes of user key) in
471 * the resulting descriptor. Use DKP<ptr,imm> instead => both
472 * virtual and dma key addresses are needed.
474 if (keylen > ctx->adata.keylen_pad)
475 dma_sync_single_for_device(ctx->jrdev,
477 ctx->adata.keylen_pad,
480 ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key,
481 keylen, CAAM_MAX_HASH_KEY_SIZE);
487 return ahash_set_sh_desc(ahash);
493 static int axcbc_setkey(struct crypto_ahash *ahash, const u8 *key,
496 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
497 struct device *jrdev = ctx->jrdev;
499 if (keylen != AES_KEYSIZE_128)
502 memcpy(ctx->key, key, keylen);
503 dma_sync_single_for_device(jrdev, ctx->adata.key_dma, keylen,
505 ctx->adata.keylen = keylen;
507 print_hex_dump_debug("axcbc ctx.key@" __stringify(__LINE__)" : ",
508 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, keylen, 1);
510 return axcbc_set_sh_desc(ahash);
513 static int acmac_setkey(struct crypto_ahash *ahash, const u8 *key,
516 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
519 err = aes_check_keylen(keylen);
523 /* key is immediate data for all cmac shared descriptors */
524 ctx->adata.key_virt = key;
525 ctx->adata.keylen = keylen;
527 print_hex_dump_debug("acmac ctx.key@" __stringify(__LINE__)" : ",
528 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
530 return acmac_set_sh_desc(ahash);
534 * ahash_edesc - s/w-extended ahash descriptor
535 * @sec4_sg_dma: physical mapped address of h/w link table
536 * @src_nents: number of segments in input scatterlist
537 * @sec4_sg_bytes: length of dma mapped sec4_sg space
538 * @bklog: stored to determine if the request needs backlog
539 * @hw_desc: the h/w job descriptor followed by any referenced link tables
540 * @sec4_sg: h/w link table
543 dma_addr_t sec4_sg_dma;
547 u32 hw_desc[DESC_JOB_IO_LEN_MAX / sizeof(u32)] ____cacheline_aligned;
548 struct sec4_sg_entry sec4_sg[];
551 static inline void ahash_unmap(struct device *dev,
552 struct ahash_edesc *edesc,
553 struct ahash_request *req, int dst_len)
555 struct caam_hash_state *state = ahash_request_ctx_dma(req);
557 if (edesc->src_nents)
558 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
560 if (edesc->sec4_sg_bytes)
561 dma_unmap_single(dev, edesc->sec4_sg_dma,
562 edesc->sec4_sg_bytes, DMA_TO_DEVICE);
564 if (state->buf_dma) {
565 dma_unmap_single(dev, state->buf_dma, state->buflen,
571 static inline void ahash_unmap_ctx(struct device *dev,
572 struct ahash_edesc *edesc,
573 struct ahash_request *req, int dst_len, u32 flag)
575 struct caam_hash_state *state = ahash_request_ctx_dma(req);
577 if (state->ctx_dma) {
578 dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag);
581 ahash_unmap(dev, edesc, req, dst_len);
584 static inline void ahash_done_cpy(struct device *jrdev, u32 *desc, u32 err,
585 void *context, enum dma_data_direction dir)
587 struct ahash_request *req = context;
588 struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev);
589 struct ahash_edesc *edesc;
590 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
591 int digestsize = crypto_ahash_digestsize(ahash);
592 struct caam_hash_state *state = ahash_request_ctx_dma(req);
593 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
597 dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
599 edesc = state->edesc;
600 has_bklog = edesc->bklog;
603 ecode = caam_jr_strstatus(jrdev, err);
605 ahash_unmap_ctx(jrdev, edesc, req, digestsize, dir);
606 memcpy(req->result, state->caam_ctx, digestsize);
609 print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
610 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
614 * If no backlog flag, the completion of the request is done
615 * by CAAM, not crypto engine.
618 ahash_request_complete(req, ecode);
620 crypto_finalize_hash_request(jrp->engine, req, ecode);
623 static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
626 ahash_done_cpy(jrdev, desc, err, context, DMA_FROM_DEVICE);
629 static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
632 ahash_done_cpy(jrdev, desc, err, context, DMA_BIDIRECTIONAL);
635 static inline void ahash_done_switch(struct device *jrdev, u32 *desc, u32 err,
636 void *context, enum dma_data_direction dir)
638 struct ahash_request *req = context;
639 struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev);
640 struct ahash_edesc *edesc;
641 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
642 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
643 struct caam_hash_state *state = ahash_request_ctx_dma(req);
644 int digestsize = crypto_ahash_digestsize(ahash);
648 dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
650 edesc = state->edesc;
651 has_bklog = edesc->bklog;
653 ecode = caam_jr_strstatus(jrdev, err);
655 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, dir);
658 scatterwalk_map_and_copy(state->buf, req->src,
659 req->nbytes - state->next_buflen,
660 state->next_buflen, 0);
661 state->buflen = state->next_buflen;
663 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
664 DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
667 print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
668 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
671 print_hex_dump_debug("result@"__stringify(__LINE__)": ",
672 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
676 * If no backlog flag, the completion of the request is done
677 * by CAAM, not crypto engine.
680 ahash_request_complete(req, ecode);
682 crypto_finalize_hash_request(jrp->engine, req, ecode);
686 static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
689 ahash_done_switch(jrdev, desc, err, context, DMA_BIDIRECTIONAL);
692 static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
695 ahash_done_switch(jrdev, desc, err, context, DMA_FROM_DEVICE);
699 * Allocate an enhanced descriptor, which contains the hardware descriptor
700 * and space for hardware scatter table containing sg_num entries.
702 static struct ahash_edesc *ahash_edesc_alloc(struct ahash_request *req,
703 int sg_num, u32 *sh_desc,
704 dma_addr_t sh_desc_dma)
706 struct caam_hash_state *state = ahash_request_ctx_dma(req);
707 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
708 GFP_KERNEL : GFP_ATOMIC;
709 struct ahash_edesc *edesc;
711 edesc = kzalloc(struct_size(edesc, sec4_sg, sg_num), flags);
715 state->edesc = edesc;
717 init_job_desc_shared(edesc->hw_desc, sh_desc_dma, desc_len(sh_desc),
718 HDR_SHARE_DEFER | HDR_REVERSE);
723 static int ahash_edesc_add_src(struct caam_hash_ctx *ctx,
724 struct ahash_edesc *edesc,
725 struct ahash_request *req, int nents,
726 unsigned int first_sg,
727 unsigned int first_bytes, size_t to_hash)
732 if (nents > 1 || first_sg) {
733 struct sec4_sg_entry *sg = edesc->sec4_sg;
734 unsigned int sgsize = sizeof(*sg) *
735 pad_sg_nents(first_sg + nents);
737 sg_to_sec4_sg_last(req->src, to_hash, sg + first_sg, 0);
739 src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE);
740 if (dma_mapping_error(ctx->jrdev, src_dma)) {
741 dev_err(ctx->jrdev, "unable to map S/G table\n");
745 edesc->sec4_sg_bytes = sgsize;
746 edesc->sec4_sg_dma = src_dma;
749 src_dma = sg_dma_address(req->src);
753 append_seq_in_ptr(edesc->hw_desc, src_dma, first_bytes + to_hash,
759 static int ahash_do_one_req(struct crypto_engine *engine, void *areq)
761 struct ahash_request *req = ahash_request_cast(areq);
762 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(crypto_ahash_reqtfm(req));
763 struct caam_hash_state *state = ahash_request_ctx_dma(req);
764 struct device *jrdev = ctx->jrdev;
765 u32 *desc = state->edesc->hw_desc;
768 state->edesc->bklog = true;
770 ret = caam_jr_enqueue(jrdev, desc, state->ahash_op_done, req);
772 if (ret == -ENOSPC && engine->retry_support)
775 if (ret != -EINPROGRESS) {
776 ahash_unmap(jrdev, state->edesc, req, 0);
785 static int ahash_enqueue_req(struct device *jrdev,
786 void (*cbk)(struct device *jrdev, u32 *desc,
787 u32 err, void *context),
788 struct ahash_request *req,
789 int dst_len, enum dma_data_direction dir)
791 struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev);
792 struct caam_hash_state *state = ahash_request_ctx_dma(req);
793 struct ahash_edesc *edesc = state->edesc;
794 u32 *desc = edesc->hw_desc;
797 state->ahash_op_done = cbk;
800 * Only the backlog request are sent to crypto-engine since the others
801 * can be handled by CAAM, if free, especially since JR has up to 1024
802 * entries (more than the 10 entries from crypto-engine).
804 if (req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
805 ret = crypto_transfer_hash_request_to_engine(jrpriv->engine,
808 ret = caam_jr_enqueue(jrdev, desc, cbk, req);
810 if ((ret != -EINPROGRESS) && (ret != -EBUSY)) {
811 ahash_unmap_ctx(jrdev, edesc, req, dst_len, dir);
818 /* submit update job descriptor */
819 static int ahash_update_ctx(struct ahash_request *req)
821 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
822 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
823 struct caam_hash_state *state = ahash_request_ctx_dma(req);
824 struct device *jrdev = ctx->jrdev;
825 u8 *buf = state->buf;
826 int *buflen = &state->buflen;
827 int *next_buflen = &state->next_buflen;
828 int blocksize = crypto_ahash_blocksize(ahash);
829 int in_len = *buflen + req->nbytes, to_hash;
831 int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index;
832 struct ahash_edesc *edesc;
835 *next_buflen = in_len & (blocksize - 1);
836 to_hash = in_len - *next_buflen;
839 * For XCBC and CMAC, if to_hash is multiple of block size,
840 * keep last block in internal buffer
842 if ((is_xcbc_aes(ctx->adata.algtype) ||
843 is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
844 (*next_buflen == 0)) {
845 *next_buflen = blocksize;
846 to_hash -= blocksize;
851 int src_len = req->nbytes - *next_buflen;
853 src_nents = sg_nents_for_len(req->src, src_len);
855 dev_err(jrdev, "Invalid number of src SG.\n");
860 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
863 dev_err(jrdev, "unable to DMA map source\n");
870 sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
871 pad_nents = pad_sg_nents(sec4_sg_src_index + mapped_nents);
872 sec4_sg_bytes = pad_nents * sizeof(struct sec4_sg_entry);
875 * allocate space for base edesc and hw desc commands,
878 edesc = ahash_edesc_alloc(req, pad_nents, ctx->sh_desc_update,
879 ctx->sh_desc_update_dma);
881 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
885 edesc->src_nents = src_nents;
886 edesc->sec4_sg_bytes = sec4_sg_bytes;
888 ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
889 edesc->sec4_sg, DMA_BIDIRECTIONAL);
893 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
898 sg_to_sec4_sg_last(req->src, src_len,
899 edesc->sec4_sg + sec4_sg_src_index,
902 sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index -
905 desc = edesc->hw_desc;
907 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
910 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
911 dev_err(jrdev, "unable to map S/G table\n");
916 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
919 append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
921 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
922 DUMP_PREFIX_ADDRESS, 16, 4, desc,
923 desc_bytes(desc), 1);
925 ret = ahash_enqueue_req(jrdev, ahash_done_bi, req,
926 ctx->ctx_len, DMA_BIDIRECTIONAL);
927 } else if (*next_buflen) {
928 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
930 *buflen = *next_buflen;
932 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
933 DUMP_PREFIX_ADDRESS, 16, 4, buf,
939 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
944 static int ahash_final_ctx(struct ahash_request *req)
946 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
947 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
948 struct caam_hash_state *state = ahash_request_ctx_dma(req);
949 struct device *jrdev = ctx->jrdev;
950 int buflen = state->buflen;
953 int digestsize = crypto_ahash_digestsize(ahash);
954 struct ahash_edesc *edesc;
957 sec4_sg_bytes = pad_sg_nents(1 + (buflen ? 1 : 0)) *
958 sizeof(struct sec4_sg_entry);
960 /* allocate space for base edesc and hw desc commands, link tables */
961 edesc = ahash_edesc_alloc(req, 4, ctx->sh_desc_fin,
962 ctx->sh_desc_fin_dma);
966 desc = edesc->hw_desc;
968 edesc->sec4_sg_bytes = sec4_sg_bytes;
970 ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
971 edesc->sec4_sg, DMA_BIDIRECTIONAL);
975 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
979 sg_to_sec4_set_last(edesc->sec4_sg + (buflen ? 1 : 0));
981 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
982 sec4_sg_bytes, DMA_TO_DEVICE);
983 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
984 dev_err(jrdev, "unable to map S/G table\n");
989 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
991 append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
993 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
994 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
997 return ahash_enqueue_req(jrdev, ahash_done_ctx_src, req,
998 digestsize, DMA_BIDIRECTIONAL);
1000 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
1005 static int ahash_finup_ctx(struct ahash_request *req)
1007 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1008 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
1009 struct caam_hash_state *state = ahash_request_ctx_dma(req);
1010 struct device *jrdev = ctx->jrdev;
1011 int buflen = state->buflen;
1013 int sec4_sg_src_index;
1014 int src_nents, mapped_nents;
1015 int digestsize = crypto_ahash_digestsize(ahash);
1016 struct ahash_edesc *edesc;
1019 src_nents = sg_nents_for_len(req->src, req->nbytes);
1020 if (src_nents < 0) {
1021 dev_err(jrdev, "Invalid number of src SG.\n");
1026 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1028 if (!mapped_nents) {
1029 dev_err(jrdev, "unable to DMA map source\n");
1036 sec4_sg_src_index = 1 + (buflen ? 1 : 0);
1038 /* allocate space for base edesc and hw desc commands, link tables */
1039 edesc = ahash_edesc_alloc(req, sec4_sg_src_index + mapped_nents,
1040 ctx->sh_desc_fin, ctx->sh_desc_fin_dma);
1042 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1046 desc = edesc->hw_desc;
1048 edesc->src_nents = src_nents;
1050 ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
1051 edesc->sec4_sg, DMA_BIDIRECTIONAL);
1055 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
1059 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents,
1060 sec4_sg_src_index, ctx->ctx_len + buflen,
1065 append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
1067 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1068 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1071 return ahash_enqueue_req(jrdev, ahash_done_ctx_src, req,
1072 digestsize, DMA_BIDIRECTIONAL);
1074 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
1079 static int ahash_digest(struct ahash_request *req)
1081 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1082 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
1083 struct caam_hash_state *state = ahash_request_ctx_dma(req);
1084 struct device *jrdev = ctx->jrdev;
1086 int digestsize = crypto_ahash_digestsize(ahash);
1087 int src_nents, mapped_nents;
1088 struct ahash_edesc *edesc;
1093 src_nents = sg_nents_for_len(req->src, req->nbytes);
1094 if (src_nents < 0) {
1095 dev_err(jrdev, "Invalid number of src SG.\n");
1100 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1102 if (!mapped_nents) {
1103 dev_err(jrdev, "unable to map source for DMA\n");
1110 /* allocate space for base edesc and hw desc commands, link tables */
1111 edesc = ahash_edesc_alloc(req, mapped_nents > 1 ? mapped_nents : 0,
1112 ctx->sh_desc_digest, ctx->sh_desc_digest_dma);
1114 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1118 edesc->src_nents = src_nents;
1120 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1123 ahash_unmap(jrdev, edesc, req, digestsize);
1128 desc = edesc->hw_desc;
1130 ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
1132 ahash_unmap(jrdev, edesc, req, digestsize);
1137 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1138 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1141 return ahash_enqueue_req(jrdev, ahash_done, req, digestsize,
1145 /* submit ahash final if it the first job descriptor */
1146 static int ahash_final_no_ctx(struct ahash_request *req)
1148 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1149 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
1150 struct caam_hash_state *state = ahash_request_ctx_dma(req);
1151 struct device *jrdev = ctx->jrdev;
1152 u8 *buf = state->buf;
1153 int buflen = state->buflen;
1155 int digestsize = crypto_ahash_digestsize(ahash);
1156 struct ahash_edesc *edesc;
1159 /* allocate space for base edesc and hw desc commands, link tables */
1160 edesc = ahash_edesc_alloc(req, 0, ctx->sh_desc_digest,
1161 ctx->sh_desc_digest_dma);
1165 desc = edesc->hw_desc;
1168 state->buf_dma = dma_map_single(jrdev, buf, buflen,
1170 if (dma_mapping_error(jrdev, state->buf_dma)) {
1171 dev_err(jrdev, "unable to map src\n");
1175 append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
1178 ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
1182 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1183 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1186 return ahash_enqueue_req(jrdev, ahash_done, req,
1187 digestsize, DMA_FROM_DEVICE);
1189 ahash_unmap(jrdev, edesc, req, digestsize);
1194 /* submit ahash update if it the first job descriptor after update */
1195 static int ahash_update_no_ctx(struct ahash_request *req)
1197 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1198 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
1199 struct caam_hash_state *state = ahash_request_ctx_dma(req);
1200 struct device *jrdev = ctx->jrdev;
1201 u8 *buf = state->buf;
1202 int *buflen = &state->buflen;
1203 int *next_buflen = &state->next_buflen;
1204 int blocksize = crypto_ahash_blocksize(ahash);
1205 int in_len = *buflen + req->nbytes, to_hash;
1206 int sec4_sg_bytes, src_nents, mapped_nents;
1207 struct ahash_edesc *edesc;
1211 *next_buflen = in_len & (blocksize - 1);
1212 to_hash = in_len - *next_buflen;
1215 * For XCBC and CMAC, if to_hash is multiple of block size,
1216 * keep last block in internal buffer
1218 if ((is_xcbc_aes(ctx->adata.algtype) ||
1219 is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
1220 (*next_buflen == 0)) {
1221 *next_buflen = blocksize;
1222 to_hash -= blocksize;
1227 int src_len = req->nbytes - *next_buflen;
1229 src_nents = sg_nents_for_len(req->src, src_len);
1230 if (src_nents < 0) {
1231 dev_err(jrdev, "Invalid number of src SG.\n");
1236 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1238 if (!mapped_nents) {
1239 dev_err(jrdev, "unable to DMA map source\n");
1246 pad_nents = pad_sg_nents(1 + mapped_nents);
1247 sec4_sg_bytes = pad_nents * sizeof(struct sec4_sg_entry);
1250 * allocate space for base edesc and hw desc commands,
1253 edesc = ahash_edesc_alloc(req, pad_nents,
1254 ctx->sh_desc_update_first,
1255 ctx->sh_desc_update_first_dma);
1257 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1261 edesc->src_nents = src_nents;
1262 edesc->sec4_sg_bytes = sec4_sg_bytes;
1264 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
1268 sg_to_sec4_sg_last(req->src, src_len, edesc->sec4_sg + 1, 0);
1270 desc = edesc->hw_desc;
1272 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1275 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1276 dev_err(jrdev, "unable to map S/G table\n");
1281 append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
1283 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1287 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1288 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1289 desc_bytes(desc), 1);
1291 ret = ahash_enqueue_req(jrdev, ahash_done_ctx_dst, req,
1292 ctx->ctx_len, DMA_TO_DEVICE);
1293 if ((ret != -EINPROGRESS) && (ret != -EBUSY))
1295 state->update = ahash_update_ctx;
1296 state->finup = ahash_finup_ctx;
1297 state->final = ahash_final_ctx;
1298 } else if (*next_buflen) {
1299 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
1301 *buflen = *next_buflen;
1303 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
1304 DUMP_PREFIX_ADDRESS, 16, 4, buf,
1310 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1315 /* submit ahash finup if it the first job descriptor after update */
1316 static int ahash_finup_no_ctx(struct ahash_request *req)
1318 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1319 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
1320 struct caam_hash_state *state = ahash_request_ctx_dma(req);
1321 struct device *jrdev = ctx->jrdev;
1322 int buflen = state->buflen;
1324 int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
1325 int digestsize = crypto_ahash_digestsize(ahash);
1326 struct ahash_edesc *edesc;
1329 src_nents = sg_nents_for_len(req->src, req->nbytes);
1330 if (src_nents < 0) {
1331 dev_err(jrdev, "Invalid number of src SG.\n");
1336 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1338 if (!mapped_nents) {
1339 dev_err(jrdev, "unable to DMA map source\n");
1346 sec4_sg_src_index = 2;
1347 sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
1348 sizeof(struct sec4_sg_entry);
1350 /* allocate space for base edesc and hw desc commands, link tables */
1351 edesc = ahash_edesc_alloc(req, sec4_sg_src_index + mapped_nents,
1352 ctx->sh_desc_digest, ctx->sh_desc_digest_dma);
1354 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1358 desc = edesc->hw_desc;
1360 edesc->src_nents = src_nents;
1361 edesc->sec4_sg_bytes = sec4_sg_bytes;
1363 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
1367 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen,
1370 dev_err(jrdev, "unable to map S/G table\n");
1374 ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
1378 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1379 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1382 return ahash_enqueue_req(jrdev, ahash_done, req,
1383 digestsize, DMA_FROM_DEVICE);
1385 ahash_unmap(jrdev, edesc, req, digestsize);
1391 /* submit first update job descriptor after init */
1392 static int ahash_update_first(struct ahash_request *req)
1394 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1395 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
1396 struct caam_hash_state *state = ahash_request_ctx_dma(req);
1397 struct device *jrdev = ctx->jrdev;
1398 u8 *buf = state->buf;
1399 int *buflen = &state->buflen;
1400 int *next_buflen = &state->next_buflen;
1402 int blocksize = crypto_ahash_blocksize(ahash);
1404 int src_nents, mapped_nents;
1405 struct ahash_edesc *edesc;
1408 *next_buflen = req->nbytes & (blocksize - 1);
1409 to_hash = req->nbytes - *next_buflen;
1412 * For XCBC and CMAC, if to_hash is multiple of block size,
1413 * keep last block in internal buffer
1415 if ((is_xcbc_aes(ctx->adata.algtype) ||
1416 is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
1417 (*next_buflen == 0)) {
1418 *next_buflen = blocksize;
1419 to_hash -= blocksize;
1423 src_nents = sg_nents_for_len(req->src,
1424 req->nbytes - *next_buflen);
1425 if (src_nents < 0) {
1426 dev_err(jrdev, "Invalid number of src SG.\n");
1431 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1433 if (!mapped_nents) {
1434 dev_err(jrdev, "unable to map source for DMA\n");
1442 * allocate space for base edesc and hw desc commands,
1445 edesc = ahash_edesc_alloc(req, mapped_nents > 1 ?
1447 ctx->sh_desc_update_first,
1448 ctx->sh_desc_update_first_dma);
1450 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1454 edesc->src_nents = src_nents;
1456 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1461 desc = edesc->hw_desc;
1463 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1467 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1468 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1469 desc_bytes(desc), 1);
1471 ret = ahash_enqueue_req(jrdev, ahash_done_ctx_dst, req,
1472 ctx->ctx_len, DMA_TO_DEVICE);
1473 if ((ret != -EINPROGRESS) && (ret != -EBUSY))
1475 state->update = ahash_update_ctx;
1476 state->finup = ahash_finup_ctx;
1477 state->final = ahash_final_ctx;
1478 } else if (*next_buflen) {
1479 state->update = ahash_update_no_ctx;
1480 state->finup = ahash_finup_no_ctx;
1481 state->final = ahash_final_no_ctx;
1482 scatterwalk_map_and_copy(buf, req->src, 0,
1484 *buflen = *next_buflen;
1486 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
1487 DUMP_PREFIX_ADDRESS, 16, 4, buf,
1493 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1498 static int ahash_finup_first(struct ahash_request *req)
1500 return ahash_digest(req);
1503 static int ahash_init(struct ahash_request *req)
1505 struct caam_hash_state *state = ahash_request_ctx_dma(req);
1507 state->update = ahash_update_first;
1508 state->finup = ahash_finup_first;
1509 state->final = ahash_final_no_ctx;
1512 state->ctx_dma_len = 0;
1515 state->next_buflen = 0;
1520 static int ahash_update(struct ahash_request *req)
1522 struct caam_hash_state *state = ahash_request_ctx_dma(req);
1524 return state->update(req);
1527 static int ahash_finup(struct ahash_request *req)
1529 struct caam_hash_state *state = ahash_request_ctx_dma(req);
1531 return state->finup(req);
1534 static int ahash_final(struct ahash_request *req)
1536 struct caam_hash_state *state = ahash_request_ctx_dma(req);
1538 return state->final(req);
1541 static int ahash_export(struct ahash_request *req, void *out)
1543 struct caam_hash_state *state = ahash_request_ctx_dma(req);
1544 struct caam_export_state *export = out;
1545 u8 *buf = state->buf;
1546 int len = state->buflen;
1548 memcpy(export->buf, buf, len);
1549 memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
1550 export->buflen = len;
1551 export->update = state->update;
1552 export->final = state->final;
1553 export->finup = state->finup;
1558 static int ahash_import(struct ahash_request *req, const void *in)
1560 struct caam_hash_state *state = ahash_request_ctx_dma(req);
1561 const struct caam_export_state *export = in;
1563 memset(state, 0, sizeof(*state));
1564 memcpy(state->buf, export->buf, export->buflen);
1565 memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
1566 state->buflen = export->buflen;
1567 state->update = export->update;
1568 state->final = export->final;
1569 state->finup = export->finup;
1574 struct caam_hash_template {
1575 char name[CRYPTO_MAX_ALG_NAME];
1576 char driver_name[CRYPTO_MAX_ALG_NAME];
1577 char hmac_name[CRYPTO_MAX_ALG_NAME];
1578 char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
1579 unsigned int blocksize;
1580 struct ahash_alg template_ahash;
1584 /* ahash descriptors */
1585 static struct caam_hash_template driver_hash[] = {
1588 .driver_name = "sha1-caam",
1589 .hmac_name = "hmac(sha1)",
1590 .hmac_driver_name = "hmac-sha1-caam",
1591 .blocksize = SHA1_BLOCK_SIZE,
1594 .update = ahash_update,
1595 .final = ahash_final,
1596 .finup = ahash_finup,
1597 .digest = ahash_digest,
1598 .export = ahash_export,
1599 .import = ahash_import,
1600 .setkey = ahash_setkey,
1602 .digestsize = SHA1_DIGEST_SIZE,
1603 .statesize = sizeof(struct caam_export_state),
1606 .alg_type = OP_ALG_ALGSEL_SHA1,
1609 .driver_name = "sha224-caam",
1610 .hmac_name = "hmac(sha224)",
1611 .hmac_driver_name = "hmac-sha224-caam",
1612 .blocksize = SHA224_BLOCK_SIZE,
1615 .update = ahash_update,
1616 .final = ahash_final,
1617 .finup = ahash_finup,
1618 .digest = ahash_digest,
1619 .export = ahash_export,
1620 .import = ahash_import,
1621 .setkey = ahash_setkey,
1623 .digestsize = SHA224_DIGEST_SIZE,
1624 .statesize = sizeof(struct caam_export_state),
1627 .alg_type = OP_ALG_ALGSEL_SHA224,
1630 .driver_name = "sha256-caam",
1631 .hmac_name = "hmac(sha256)",
1632 .hmac_driver_name = "hmac-sha256-caam",
1633 .blocksize = SHA256_BLOCK_SIZE,
1636 .update = ahash_update,
1637 .final = ahash_final,
1638 .finup = ahash_finup,
1639 .digest = ahash_digest,
1640 .export = ahash_export,
1641 .import = ahash_import,
1642 .setkey = ahash_setkey,
1644 .digestsize = SHA256_DIGEST_SIZE,
1645 .statesize = sizeof(struct caam_export_state),
1648 .alg_type = OP_ALG_ALGSEL_SHA256,
1651 .driver_name = "sha384-caam",
1652 .hmac_name = "hmac(sha384)",
1653 .hmac_driver_name = "hmac-sha384-caam",
1654 .blocksize = SHA384_BLOCK_SIZE,
1657 .update = ahash_update,
1658 .final = ahash_final,
1659 .finup = ahash_finup,
1660 .digest = ahash_digest,
1661 .export = ahash_export,
1662 .import = ahash_import,
1663 .setkey = ahash_setkey,
1665 .digestsize = SHA384_DIGEST_SIZE,
1666 .statesize = sizeof(struct caam_export_state),
1669 .alg_type = OP_ALG_ALGSEL_SHA384,
1672 .driver_name = "sha512-caam",
1673 .hmac_name = "hmac(sha512)",
1674 .hmac_driver_name = "hmac-sha512-caam",
1675 .blocksize = SHA512_BLOCK_SIZE,
1678 .update = ahash_update,
1679 .final = ahash_final,
1680 .finup = ahash_finup,
1681 .digest = ahash_digest,
1682 .export = ahash_export,
1683 .import = ahash_import,
1684 .setkey = ahash_setkey,
1686 .digestsize = SHA512_DIGEST_SIZE,
1687 .statesize = sizeof(struct caam_export_state),
1690 .alg_type = OP_ALG_ALGSEL_SHA512,
1693 .driver_name = "md5-caam",
1694 .hmac_name = "hmac(md5)",
1695 .hmac_driver_name = "hmac-md5-caam",
1696 .blocksize = MD5_BLOCK_WORDS * 4,
1699 .update = ahash_update,
1700 .final = ahash_final,
1701 .finup = ahash_finup,
1702 .digest = ahash_digest,
1703 .export = ahash_export,
1704 .import = ahash_import,
1705 .setkey = ahash_setkey,
1707 .digestsize = MD5_DIGEST_SIZE,
1708 .statesize = sizeof(struct caam_export_state),
1711 .alg_type = OP_ALG_ALGSEL_MD5,
1713 .hmac_name = "xcbc(aes)",
1714 .hmac_driver_name = "xcbc-aes-caam",
1715 .blocksize = AES_BLOCK_SIZE,
1718 .update = ahash_update,
1719 .final = ahash_final,
1720 .finup = ahash_finup,
1721 .digest = ahash_digest,
1722 .export = ahash_export,
1723 .import = ahash_import,
1724 .setkey = axcbc_setkey,
1726 .digestsize = AES_BLOCK_SIZE,
1727 .statesize = sizeof(struct caam_export_state),
1730 .alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XCBC_MAC,
1732 .hmac_name = "cmac(aes)",
1733 .hmac_driver_name = "cmac-aes-caam",
1734 .blocksize = AES_BLOCK_SIZE,
1737 .update = ahash_update,
1738 .final = ahash_final,
1739 .finup = ahash_finup,
1740 .digest = ahash_digest,
1741 .export = ahash_export,
1742 .import = ahash_import,
1743 .setkey = acmac_setkey,
1745 .digestsize = AES_BLOCK_SIZE,
1746 .statesize = sizeof(struct caam_export_state),
1749 .alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC,
1753 struct caam_hash_alg {
1754 struct list_head entry;
1757 struct ahash_engine_alg ahash_alg;
1760 static int caam_hash_cra_init(struct crypto_tfm *tfm)
1762 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
1763 struct crypto_alg *base = tfm->__crt_alg;
1764 struct hash_alg_common *halg =
1765 container_of(base, struct hash_alg_common, base);
1766 struct ahash_alg *alg =
1767 container_of(halg, struct ahash_alg, halg);
1768 struct caam_hash_alg *caam_hash =
1769 container_of(alg, struct caam_hash_alg, ahash_alg.base);
1770 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
1771 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1772 static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
1773 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
1775 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
1777 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
1778 const size_t sh_desc_update_offset = offsetof(struct caam_hash_ctx,
1780 dma_addr_t dma_addr;
1781 struct caam_drv_private *priv;
1784 * Get a Job ring from Job Ring driver to ensure in-order
1785 * crypto request processing per tfm
1787 ctx->jrdev = caam_jr_alloc();
1788 if (IS_ERR(ctx->jrdev)) {
1789 pr_err("Job Ring Device allocation for transform failed\n");
1790 return PTR_ERR(ctx->jrdev);
1793 priv = dev_get_drvdata(ctx->jrdev->parent);
1795 if (is_xcbc_aes(caam_hash->alg_type)) {
1796 ctx->dir = DMA_TO_DEVICE;
1797 ctx->key_dir = DMA_BIDIRECTIONAL;
1798 ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type;
1800 } else if (is_cmac_aes(caam_hash->alg_type)) {
1801 ctx->dir = DMA_TO_DEVICE;
1802 ctx->key_dir = DMA_NONE;
1803 ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type;
1806 if (priv->era >= 6) {
1807 ctx->dir = DMA_BIDIRECTIONAL;
1808 ctx->key_dir = caam_hash->is_hmac ? DMA_TO_DEVICE : DMA_NONE;
1810 ctx->dir = DMA_TO_DEVICE;
1811 ctx->key_dir = DMA_NONE;
1813 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
1814 ctx->ctx_len = runninglen[(ctx->adata.algtype &
1815 OP_ALG_ALGSEL_SUBMASK) >>
1816 OP_ALG_ALGSEL_SHIFT];
1819 if (ctx->key_dir != DMA_NONE) {
1820 ctx->adata.key_dma = dma_map_single_attrs(ctx->jrdev, ctx->key,
1821 ARRAY_SIZE(ctx->key),
1823 DMA_ATTR_SKIP_CPU_SYNC);
1824 if (dma_mapping_error(ctx->jrdev, ctx->adata.key_dma)) {
1825 dev_err(ctx->jrdev, "unable to map key\n");
1826 caam_jr_free(ctx->jrdev);
1831 dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
1832 offsetof(struct caam_hash_ctx, key) -
1833 sh_desc_update_offset,
1834 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1835 if (dma_mapping_error(ctx->jrdev, dma_addr)) {
1836 dev_err(ctx->jrdev, "unable to map shared descriptors\n");
1838 if (ctx->key_dir != DMA_NONE)
1839 dma_unmap_single_attrs(ctx->jrdev, ctx->adata.key_dma,
1840 ARRAY_SIZE(ctx->key),
1842 DMA_ATTR_SKIP_CPU_SYNC);
1844 caam_jr_free(ctx->jrdev);
1848 ctx->sh_desc_update_dma = dma_addr;
1849 ctx->sh_desc_update_first_dma = dma_addr +
1850 offsetof(struct caam_hash_ctx,
1851 sh_desc_update_first) -
1852 sh_desc_update_offset;
1853 ctx->sh_desc_fin_dma = dma_addr + offsetof(struct caam_hash_ctx,
1855 sh_desc_update_offset;
1856 ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx,
1858 sh_desc_update_offset;
1860 crypto_ahash_set_reqsize_dma(ahash, sizeof(struct caam_hash_state));
1863 * For keyed hash algorithms shared descriptors
1864 * will be created later in setkey() callback
1866 return caam_hash->is_hmac ? 0 : ahash_set_sh_desc(ahash);
1869 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
1871 struct caam_hash_ctx *ctx = crypto_tfm_ctx_dma(tfm);
1873 dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
1874 offsetof(struct caam_hash_ctx, key) -
1875 offsetof(struct caam_hash_ctx, sh_desc_update),
1876 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1877 if (ctx->key_dir != DMA_NONE)
1878 dma_unmap_single_attrs(ctx->jrdev, ctx->adata.key_dma,
1879 ARRAY_SIZE(ctx->key), ctx->key_dir,
1880 DMA_ATTR_SKIP_CPU_SYNC);
1881 caam_jr_free(ctx->jrdev);
1884 void caam_algapi_hash_exit(void)
1886 struct caam_hash_alg *t_alg, *n;
1888 if (!hash_list.next)
1891 list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
1892 crypto_engine_unregister_ahash(&t_alg->ahash_alg);
1893 list_del(&t_alg->entry);
1898 static struct caam_hash_alg *
1899 caam_hash_alloc(struct caam_hash_template *template,
1902 struct caam_hash_alg *t_alg;
1903 struct ahash_alg *halg;
1904 struct crypto_alg *alg;
1906 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
1908 return ERR_PTR(-ENOMEM);
1910 t_alg->ahash_alg.base = template->template_ahash;
1911 halg = &t_alg->ahash_alg.base;
1912 alg = &halg->halg.base;
1915 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1916 template->hmac_name);
1917 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1918 template->hmac_driver_name);
1919 t_alg->is_hmac = true;
1921 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1923 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1924 template->driver_name);
1925 halg->setkey = NULL;
1926 t_alg->is_hmac = false;
1928 alg->cra_module = THIS_MODULE;
1929 alg->cra_init = caam_hash_cra_init;
1930 alg->cra_exit = caam_hash_cra_exit;
1931 alg->cra_ctxsize = sizeof(struct caam_hash_ctx) + crypto_dma_padding();
1932 alg->cra_priority = CAAM_CRA_PRIORITY;
1933 alg->cra_blocksize = template->blocksize;
1934 alg->cra_alignmask = 0;
1935 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY;
1937 t_alg->alg_type = template->alg_type;
1938 t_alg->ahash_alg.op.do_one_request = ahash_do_one_req;
1943 int caam_algapi_hash_init(struct device *ctrldev)
1946 struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
1947 unsigned int md_limit = SHA512_DIGEST_SIZE;
1948 u32 md_inst, md_vid;
1951 * Register crypto algorithms the device supports. First, identify
1952 * presence and attributes of MD block.
1954 if (priv->era < 10) {
1955 struct caam_perfmon __iomem *perfmon = &priv->jr[0]->perfmon;
1957 md_vid = (rd_reg32(&perfmon->cha_id_ls) &
1958 CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
1959 md_inst = (rd_reg32(&perfmon->cha_num_ls) &
1960 CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
1962 u32 mdha = rd_reg32(&priv->jr[0]->vreg.mdha);
1964 md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
1965 md_inst = mdha & CHA_VER_NUM_MASK;
1969 * Skip registration of any hashing algorithms if MD block
1975 /* Limit digest size based on LP256 */
1976 if (md_vid == CHA_VER_VID_MD_LP256)
1977 md_limit = SHA256_DIGEST_SIZE;
1979 INIT_LIST_HEAD(&hash_list);
1981 /* register crypto algorithms the device supports */
1982 for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
1983 struct caam_hash_alg *t_alg;
1984 struct caam_hash_template *alg = driver_hash + i;
1986 /* If MD size is not supported by device, skip registration */
1987 if (is_mdha(alg->alg_type) &&
1988 alg->template_ahash.halg.digestsize > md_limit)
1991 /* register hmac version */
1992 t_alg = caam_hash_alloc(alg, true);
1993 if (IS_ERR(t_alg)) {
1994 err = PTR_ERR(t_alg);
1995 pr_warn("%s alg allocation failed\n",
1996 alg->hmac_driver_name);
2000 err = crypto_engine_register_ahash(&t_alg->ahash_alg);
2002 pr_warn("%s alg registration failed: %d\n",
2003 t_alg->ahash_alg.base.halg.base.cra_driver_name,
2007 list_add_tail(&t_alg->entry, &hash_list);
2009 if ((alg->alg_type & OP_ALG_ALGSEL_MASK) == OP_ALG_ALGSEL_AES)
2012 /* register unkeyed version */
2013 t_alg = caam_hash_alloc(alg, false);
2014 if (IS_ERR(t_alg)) {
2015 err = PTR_ERR(t_alg);
2016 pr_warn("%s alg allocation failed\n", alg->driver_name);
2020 err = crypto_engine_register_ahash(&t_alg->ahash_alg);
2022 pr_warn("%s alg registration failed: %d\n",
2023 t_alg->ahash_alg.base.halg.base.cra_driver_name,
2027 list_add_tail(&t_alg->entry, &hash_list);