2 * AMD Cryptographic Coprocessor (CCP) driver
4 * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
6 * Author: Tom Lendacky <thomas.lendacky@amd.com>
7 * Author: Gary R Hook <gary.hook@amd.com>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
14 #include <linux/module.h>
15 #include <linux/kernel.h>
16 #include <linux/pci.h>
17 #include <linux/interrupt.h>
18 #include <crypto/scatterwalk.h>
19 #include <crypto/des.h>
20 #include <linux/ccp.h>
24 /* SHA initial context values */
25 static const __be32 ccp_sha1_init[SHA1_DIGEST_SIZE / sizeof(__be32)] = {
26 cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1),
27 cpu_to_be32(SHA1_H2), cpu_to_be32(SHA1_H3),
31 static const __be32 ccp_sha224_init[SHA256_DIGEST_SIZE / sizeof(__be32)] = {
32 cpu_to_be32(SHA224_H0), cpu_to_be32(SHA224_H1),
33 cpu_to_be32(SHA224_H2), cpu_to_be32(SHA224_H3),
34 cpu_to_be32(SHA224_H4), cpu_to_be32(SHA224_H5),
35 cpu_to_be32(SHA224_H6), cpu_to_be32(SHA224_H7),
38 static const __be32 ccp_sha256_init[SHA256_DIGEST_SIZE / sizeof(__be32)] = {
39 cpu_to_be32(SHA256_H0), cpu_to_be32(SHA256_H1),
40 cpu_to_be32(SHA256_H2), cpu_to_be32(SHA256_H3),
41 cpu_to_be32(SHA256_H4), cpu_to_be32(SHA256_H5),
42 cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7),
45 static const __be64 ccp_sha384_init[SHA512_DIGEST_SIZE / sizeof(__be64)] = {
46 cpu_to_be64(SHA384_H0), cpu_to_be64(SHA384_H1),
47 cpu_to_be64(SHA384_H2), cpu_to_be64(SHA384_H3),
48 cpu_to_be64(SHA384_H4), cpu_to_be64(SHA384_H5),
49 cpu_to_be64(SHA384_H6), cpu_to_be64(SHA384_H7),
52 static const __be64 ccp_sha512_init[SHA512_DIGEST_SIZE / sizeof(__be64)] = {
53 cpu_to_be64(SHA512_H0), cpu_to_be64(SHA512_H1),
54 cpu_to_be64(SHA512_H2), cpu_to_be64(SHA512_H3),
55 cpu_to_be64(SHA512_H4), cpu_to_be64(SHA512_H5),
56 cpu_to_be64(SHA512_H6), cpu_to_be64(SHA512_H7),
59 #define CCP_NEW_JOBID(ccp) ((ccp->vdata->version == CCP_VERSION(3, 0)) ? \
60 ccp_gen_jobid(ccp) : 0)
62 static u32 ccp_gen_jobid(struct ccp_device *ccp)
64 return atomic_inc_return(&ccp->current_id) & CCP_JOBID_MASK;
67 static void ccp_sg_free(struct ccp_sg_workarea *wa)
70 dma_unmap_sg(wa->dma_dev, wa->dma_sg_head, wa->nents, wa->dma_dir);
75 static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev,
76 struct scatterlist *sg, u64 len,
77 enum dma_data_direction dma_dir)
79 memset(wa, 0, sizeof(*wa));
85 wa->nents = sg_nents_for_len(sg, len);
95 if (dma_dir == DMA_NONE)
101 wa->dma_dir = dma_dir;
102 wa->dma_count = dma_map_sg(dev, sg, wa->nents, dma_dir);
109 static void ccp_update_sg_workarea(struct ccp_sg_workarea *wa, unsigned int len)
111 unsigned int nbytes = min_t(u64, len, wa->bytes_left);
112 unsigned int sg_combined_len = 0;
117 wa->sg_used += nbytes;
118 wa->bytes_left -= nbytes;
119 if (wa->sg_used == sg_dma_len(wa->dma_sg)) {
120 /* Advance to the next DMA scatterlist entry */
121 wa->dma_sg = sg_next(wa->dma_sg);
123 /* In the case that the DMA mapped scatterlist has entries
124 * that have been merged, the non-DMA mapped scatterlist
125 * must be advanced multiple times for each merged entry.
126 * This ensures that the current non-DMA mapped entry
127 * corresponds to the current DMA mapped entry.
130 sg_combined_len += wa->sg->length;
131 wa->sg = sg_next(wa->sg);
132 } while (wa->sg_used > sg_combined_len);
138 static void ccp_dm_free(struct ccp_dm_workarea *wa)
140 if (wa->length <= CCP_DMAPOOL_MAX_SIZE) {
142 dma_pool_free(wa->dma_pool, wa->address,
146 dma_unmap_single(wa->dev, wa->dma.address, wa->length,
155 static int ccp_init_dm_workarea(struct ccp_dm_workarea *wa,
156 struct ccp_cmd_queue *cmd_q,
158 enum dma_data_direction dir)
160 memset(wa, 0, sizeof(*wa));
165 wa->dev = cmd_q->ccp->dev;
168 if (len <= CCP_DMAPOOL_MAX_SIZE) {
169 wa->dma_pool = cmd_q->dma_pool;
171 wa->address = dma_pool_alloc(wa->dma_pool, GFP_KERNEL,
176 wa->dma.length = CCP_DMAPOOL_MAX_SIZE;
178 memset(wa->address, 0, CCP_DMAPOOL_MAX_SIZE);
180 wa->address = kzalloc(len, GFP_KERNEL);
184 wa->dma.address = dma_map_single(wa->dev, wa->address, len,
186 if (dma_mapping_error(wa->dev, wa->dma.address)) {
192 wa->dma.length = len;
199 static int ccp_set_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset,
200 struct scatterlist *sg, unsigned int sg_offset,
203 WARN_ON(!wa->address);
205 if (len > (wa->length - wa_offset))
208 scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len,
213 static void ccp_get_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset,
214 struct scatterlist *sg, unsigned int sg_offset,
217 WARN_ON(!wa->address);
219 scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len,
223 static int ccp_reverse_set_dm_area(struct ccp_dm_workarea *wa,
224 unsigned int wa_offset,
225 struct scatterlist *sg,
226 unsigned int sg_offset,
232 rc = ccp_set_dm_area(wa, wa_offset, sg, sg_offset, len);
236 p = wa->address + wa_offset;
248 static void ccp_reverse_get_dm_area(struct ccp_dm_workarea *wa,
249 unsigned int wa_offset,
250 struct scatterlist *sg,
251 unsigned int sg_offset,
256 p = wa->address + wa_offset;
266 ccp_get_dm_area(wa, wa_offset, sg, sg_offset, len);
269 static void ccp_free_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q)
271 ccp_dm_free(&data->dm_wa);
272 ccp_sg_free(&data->sg_wa);
275 static int ccp_init_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q,
276 struct scatterlist *sg, u64 sg_len,
278 enum dma_data_direction dir)
282 memset(data, 0, sizeof(*data));
284 ret = ccp_init_sg_workarea(&data->sg_wa, cmd_q->ccp->dev, sg, sg_len,
289 ret = ccp_init_dm_workarea(&data->dm_wa, cmd_q, dm_len, dir);
296 ccp_free_data(data, cmd_q);
301 static unsigned int ccp_queue_buf(struct ccp_data *data, unsigned int from)
303 struct ccp_sg_workarea *sg_wa = &data->sg_wa;
304 struct ccp_dm_workarea *dm_wa = &data->dm_wa;
305 unsigned int buf_count, nbytes;
307 /* Clear the buffer if setting it */
309 memset(dm_wa->address, 0, dm_wa->length);
314 /* Perform the copy operation
315 * nbytes will always be <= UINT_MAX because dm_wa->length is
318 nbytes = min_t(u64, sg_wa->bytes_left, dm_wa->length);
319 scatterwalk_map_and_copy(dm_wa->address, sg_wa->sg, sg_wa->sg_used,
322 /* Update the structures and generate the count */
324 while (sg_wa->bytes_left && (buf_count < dm_wa->length)) {
325 nbytes = min(sg_dma_len(sg_wa->dma_sg) - sg_wa->sg_used,
326 dm_wa->length - buf_count);
327 nbytes = min_t(u64, sg_wa->bytes_left, nbytes);
330 ccp_update_sg_workarea(sg_wa, nbytes);
336 static unsigned int ccp_fill_queue_buf(struct ccp_data *data)
338 return ccp_queue_buf(data, 0);
341 static unsigned int ccp_empty_queue_buf(struct ccp_data *data)
343 return ccp_queue_buf(data, 1);
346 static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst,
347 struct ccp_op *op, unsigned int block_size,
350 unsigned int sg_src_len, sg_dst_len, op_len;
352 /* The CCP can only DMA from/to one address each per operation. This
353 * requires that we find the smallest DMA area between the source
354 * and destination. The resulting len values will always be <= UINT_MAX
355 * because the dma length is an unsigned int.
357 sg_src_len = sg_dma_len(src->sg_wa.dma_sg) - src->sg_wa.sg_used;
358 sg_src_len = min_t(u64, src->sg_wa.bytes_left, sg_src_len);
361 sg_dst_len = sg_dma_len(dst->sg_wa.dma_sg) - dst->sg_wa.sg_used;
362 sg_dst_len = min_t(u64, src->sg_wa.bytes_left, sg_dst_len);
363 op_len = min(sg_src_len, sg_dst_len);
368 /* The data operation length will be at least block_size in length
369 * or the smaller of available sg room remaining for the source or
372 op_len = max(op_len, block_size);
374 /* Unless we have to buffer data, there's no reason to wait */
377 if (sg_src_len < block_size) {
378 /* Not enough data in the sg element, so it
379 * needs to be buffered into a blocksize chunk
381 int cp_len = ccp_fill_queue_buf(src);
384 op->src.u.dma.address = src->dm_wa.dma.address;
385 op->src.u.dma.offset = 0;
386 op->src.u.dma.length = (blocksize_op) ? block_size : cp_len;
388 /* Enough data in the sg element, but we need to
389 * adjust for any previously copied data
391 op->src.u.dma.address = sg_dma_address(src->sg_wa.dma_sg);
392 op->src.u.dma.offset = src->sg_wa.sg_used;
393 op->src.u.dma.length = op_len & ~(block_size - 1);
395 ccp_update_sg_workarea(&src->sg_wa, op->src.u.dma.length);
399 if (sg_dst_len < block_size) {
400 /* Not enough room in the sg element or we're on the
401 * last piece of data (when using padding), so the
402 * output needs to be buffered into a blocksize chunk
405 op->dst.u.dma.address = dst->dm_wa.dma.address;
406 op->dst.u.dma.offset = 0;
407 op->dst.u.dma.length = op->src.u.dma.length;
409 /* Enough room in the sg element, but we need to
410 * adjust for any previously used area
412 op->dst.u.dma.address = sg_dma_address(dst->sg_wa.dma_sg);
413 op->dst.u.dma.offset = dst->sg_wa.sg_used;
414 op->dst.u.dma.length = op->src.u.dma.length;
419 static void ccp_process_data(struct ccp_data *src, struct ccp_data *dst,
425 if (op->dst.u.dma.address == dst->dm_wa.dma.address)
426 ccp_empty_queue_buf(dst);
428 ccp_update_sg_workarea(&dst->sg_wa,
429 op->dst.u.dma.length);
433 static int ccp_copy_to_from_sb(struct ccp_cmd_queue *cmd_q,
434 struct ccp_dm_workarea *wa, u32 jobid, u32 sb,
435 u32 byte_swap, bool from)
439 memset(&op, 0, sizeof(op));
447 op.src.type = CCP_MEMTYPE_SB;
449 op.dst.type = CCP_MEMTYPE_SYSTEM;
450 op.dst.u.dma.address = wa->dma.address;
451 op.dst.u.dma.length = wa->length;
453 op.src.type = CCP_MEMTYPE_SYSTEM;
454 op.src.u.dma.address = wa->dma.address;
455 op.src.u.dma.length = wa->length;
456 op.dst.type = CCP_MEMTYPE_SB;
460 op.u.passthru.byte_swap = byte_swap;
462 return cmd_q->ccp->vdata->perform->passthru(&op);
465 static int ccp_copy_to_sb(struct ccp_cmd_queue *cmd_q,
466 struct ccp_dm_workarea *wa, u32 jobid, u32 sb,
469 return ccp_copy_to_from_sb(cmd_q, wa, jobid, sb, byte_swap, false);
472 static int ccp_copy_from_sb(struct ccp_cmd_queue *cmd_q,
473 struct ccp_dm_workarea *wa, u32 jobid, u32 sb,
476 return ccp_copy_to_from_sb(cmd_q, wa, jobid, sb, byte_swap, true);
479 static noinline_for_stack int
480 ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
482 struct ccp_aes_engine *aes = &cmd->u.aes;
483 struct ccp_dm_workarea key, ctx;
486 unsigned int dm_offset;
489 if (!((aes->key_len == AES_KEYSIZE_128) ||
490 (aes->key_len == AES_KEYSIZE_192) ||
491 (aes->key_len == AES_KEYSIZE_256)))
494 if (aes->src_len & (AES_BLOCK_SIZE - 1))
497 if (aes->iv_len != AES_BLOCK_SIZE)
500 if (!aes->key || !aes->iv || !aes->src)
503 if (aes->cmac_final) {
504 if (aes->cmac_key_len != AES_BLOCK_SIZE)
511 BUILD_BUG_ON(CCP_AES_KEY_SB_COUNT != 1);
512 BUILD_BUG_ON(CCP_AES_CTX_SB_COUNT != 1);
515 memset(&op, 0, sizeof(op));
517 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
518 op.sb_key = cmd_q->sb_key;
519 op.sb_ctx = cmd_q->sb_ctx;
521 op.u.aes.type = aes->type;
522 op.u.aes.mode = aes->mode;
523 op.u.aes.action = aes->action;
525 /* All supported key sizes fit in a single (32-byte) SB entry
526 * and must be in little endian format. Use the 256-bit byte
527 * swap passthru option to convert from big endian to little
530 ret = ccp_init_dm_workarea(&key, cmd_q,
531 CCP_AES_KEY_SB_COUNT * CCP_SB_BYTES,
536 dm_offset = CCP_SB_BYTES - aes->key_len;
537 ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
540 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
541 CCP_PASSTHRU_BYTESWAP_256BIT);
543 cmd->engine_error = cmd_q->cmd_error;
547 /* The AES context fits in a single (32-byte) SB entry and
548 * must be in little endian format. Use the 256-bit byte swap
549 * passthru option to convert from big endian to little endian.
551 ret = ccp_init_dm_workarea(&ctx, cmd_q,
552 CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
557 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
558 ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
561 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
562 CCP_PASSTHRU_BYTESWAP_256BIT);
564 cmd->engine_error = cmd_q->cmd_error;
568 /* Send data to the CCP AES engine */
569 ret = ccp_init_data(&src, cmd_q, aes->src, aes->src_len,
570 AES_BLOCK_SIZE, DMA_TO_DEVICE);
574 while (src.sg_wa.bytes_left) {
575 ccp_prepare_data(&src, NULL, &op, AES_BLOCK_SIZE, true);
576 if (aes->cmac_final && !src.sg_wa.bytes_left) {
579 /* Push the K1/K2 key to the CCP now */
580 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid,
582 CCP_PASSTHRU_BYTESWAP_256BIT);
584 cmd->engine_error = cmd_q->cmd_error;
588 ret = ccp_set_dm_area(&ctx, 0, aes->cmac_key, 0,
592 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
593 CCP_PASSTHRU_BYTESWAP_256BIT);
595 cmd->engine_error = cmd_q->cmd_error;
600 ret = cmd_q->ccp->vdata->perform->aes(&op);
602 cmd->engine_error = cmd_q->cmd_error;
606 ccp_process_data(&src, NULL, &op);
609 /* Retrieve the AES context - convert from LE to BE using
610 * 32-byte (256-bit) byteswapping
612 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
613 CCP_PASSTHRU_BYTESWAP_256BIT);
615 cmd->engine_error = cmd_q->cmd_error;
619 /* ...but we only need AES_BLOCK_SIZE bytes */
620 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
621 ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
624 ccp_free_data(&src, cmd_q);
635 static noinline_for_stack int
636 ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
638 struct ccp_aes_engine *aes = &cmd->u.aes;
639 struct ccp_dm_workarea key, ctx, final_wa, tag;
640 struct ccp_data src, dst;
644 unsigned long long *final;
645 unsigned int dm_offset;
646 unsigned int authsize;
649 bool in_place = true; /* Default value */
652 struct scatterlist *p_inp, sg_inp[2];
653 struct scatterlist *p_tag, sg_tag[2];
654 struct scatterlist *p_outp, sg_outp[2];
655 struct scatterlist *p_aad;
660 if (!((aes->key_len == AES_KEYSIZE_128) ||
661 (aes->key_len == AES_KEYSIZE_192) ||
662 (aes->key_len == AES_KEYSIZE_256)))
665 if (!aes->key) /* Gotta have a key SGL */
668 /* Zero defaults to 16 bytes, the maximum size */
669 authsize = aes->authsize ? aes->authsize : AES_BLOCK_SIZE;
683 /* First, decompose the source buffer into AAD & PT,
684 * and the destination buffer into AAD, CT & tag, or
685 * the input into CT & tag.
686 * It is expected that the input and output SGs will
687 * be valid, even if the AAD and input lengths are 0.
690 p_inp = scatterwalk_ffwd(sg_inp, aes->src, aes->aad_len);
691 p_outp = scatterwalk_ffwd(sg_outp, aes->dst, aes->aad_len);
692 if (aes->action == CCP_AES_ACTION_ENCRYPT) {
694 p_tag = scatterwalk_ffwd(sg_tag, p_outp, ilen);
696 /* Input length for decryption includes tag */
697 ilen = aes->src_len - authsize;
698 p_tag = scatterwalk_ffwd(sg_tag, p_inp, ilen);
701 jobid = CCP_NEW_JOBID(cmd_q->ccp);
703 memset(&op, 0, sizeof(op));
706 op.sb_key = cmd_q->sb_key; /* Pre-allocated */
707 op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
709 op.u.aes.type = aes->type;
711 /* Copy the key to the LSB */
712 ret = ccp_init_dm_workarea(&key, cmd_q,
713 CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
718 dm_offset = CCP_SB_BYTES - aes->key_len;
719 ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
722 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
723 CCP_PASSTHRU_BYTESWAP_256BIT);
725 cmd->engine_error = cmd_q->cmd_error;
729 /* Copy the context (IV) to the LSB.
730 * There is an assumption here that the IV is 96 bits in length, plus
731 * a nonce of 32 bits. If no IV is present, use a zeroed buffer.
733 ret = ccp_init_dm_workarea(&ctx, cmd_q,
734 CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
739 dm_offset = CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES - aes->iv_len;
740 ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
744 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
745 CCP_PASSTHRU_BYTESWAP_256BIT);
747 cmd->engine_error = cmd_q->cmd_error;
752 if (aes->aad_len > 0) {
753 /* Step 1: Run a GHASH over the Additional Authenticated Data */
754 ret = ccp_init_data(&aad, cmd_q, p_aad, aes->aad_len,
760 op.u.aes.mode = CCP_AES_MODE_GHASH;
761 op.u.aes.action = CCP_AES_GHASHAAD;
763 while (aad.sg_wa.bytes_left) {
764 ccp_prepare_data(&aad, NULL, &op, AES_BLOCK_SIZE, true);
766 ret = cmd_q->ccp->vdata->perform->aes(&op);
768 cmd->engine_error = cmd_q->cmd_error;
772 ccp_process_data(&aad, NULL, &op);
777 op.u.aes.mode = CCP_AES_MODE_GCTR;
778 op.u.aes.action = aes->action;
781 /* Step 2: Run a GCTR over the plaintext */
782 in_place = (sg_virt(p_inp) == sg_virt(p_outp)) ? true : false;
784 ret = ccp_init_data(&src, cmd_q, p_inp, ilen,
786 in_place ? DMA_BIDIRECTIONAL
794 ret = ccp_init_data(&dst, cmd_q, p_outp, ilen,
795 AES_BLOCK_SIZE, DMA_FROM_DEVICE);
803 while (src.sg_wa.bytes_left) {
804 ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true);
805 if (!src.sg_wa.bytes_left) {
806 unsigned int nbytes = ilen % AES_BLOCK_SIZE;
810 op.u.aes.size = (nbytes * 8) - 1;
814 ret = cmd_q->ccp->vdata->perform->aes(&op);
816 cmd->engine_error = cmd_q->cmd_error;
820 ccp_process_data(&src, &dst, &op);
825 /* Step 3: Update the IV portion of the context with the original IV */
826 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
827 CCP_PASSTHRU_BYTESWAP_256BIT);
829 cmd->engine_error = cmd_q->cmd_error;
833 ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
837 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
838 CCP_PASSTHRU_BYTESWAP_256BIT);
840 cmd->engine_error = cmd_q->cmd_error;
844 /* Step 4: Concatenate the lengths of the AAD and source, and
845 * hash that 16 byte buffer.
847 ret = ccp_init_dm_workarea(&final_wa, cmd_q, AES_BLOCK_SIZE,
851 final = (unsigned long long *) final_wa.address;
852 final[0] = cpu_to_be64(aes->aad_len * 8);
853 final[1] = cpu_to_be64(ilen * 8);
855 memset(&op, 0, sizeof(op));
858 op.sb_key = cmd_q->sb_key; /* Pre-allocated */
859 op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
861 op.u.aes.type = aes->type;
862 op.u.aes.mode = CCP_AES_MODE_GHASH;
863 op.u.aes.action = CCP_AES_GHASHFINAL;
864 op.src.type = CCP_MEMTYPE_SYSTEM;
865 op.src.u.dma.address = final_wa.dma.address;
866 op.src.u.dma.length = AES_BLOCK_SIZE;
867 op.dst.type = CCP_MEMTYPE_SYSTEM;
868 op.dst.u.dma.address = final_wa.dma.address;
869 op.dst.u.dma.length = AES_BLOCK_SIZE;
872 ret = cmd_q->ccp->vdata->perform->aes(&op);
876 if (aes->action == CCP_AES_ACTION_ENCRYPT) {
877 /* Put the ciphered tag after the ciphertext. */
878 ccp_get_dm_area(&final_wa, 0, p_tag, 0, authsize);
880 /* Does this ciphered tag match the input? */
881 ret = ccp_init_dm_workarea(&tag, cmd_q, authsize,
885 ret = ccp_set_dm_area(&tag, 0, p_tag, 0, authsize);
891 ret = crypto_memneq(tag.address, final_wa.address,
892 authsize) ? -EBADMSG : 0;
897 ccp_dm_free(&final_wa);
900 if (ilen > 0 && !in_place)
901 ccp_free_data(&dst, cmd_q);
905 ccp_free_data(&src, cmd_q);
909 ccp_free_data(&aad, cmd_q);
920 static noinline_for_stack int
921 ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
923 struct ccp_aes_engine *aes = &cmd->u.aes;
924 struct ccp_dm_workarea key, ctx;
925 struct ccp_data src, dst;
927 unsigned int dm_offset;
928 bool in_place = false;
931 if (!((aes->key_len == AES_KEYSIZE_128) ||
932 (aes->key_len == AES_KEYSIZE_192) ||
933 (aes->key_len == AES_KEYSIZE_256)))
936 if (((aes->mode == CCP_AES_MODE_ECB) ||
937 (aes->mode == CCP_AES_MODE_CBC) ||
938 (aes->mode == CCP_AES_MODE_CFB)) &&
939 (aes->src_len & (AES_BLOCK_SIZE - 1)))
942 if (!aes->key || !aes->src || !aes->dst)
945 if (aes->mode != CCP_AES_MODE_ECB) {
946 if (aes->iv_len != AES_BLOCK_SIZE)
953 BUILD_BUG_ON(CCP_AES_KEY_SB_COUNT != 1);
954 BUILD_BUG_ON(CCP_AES_CTX_SB_COUNT != 1);
957 memset(&op, 0, sizeof(op));
959 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
960 op.sb_key = cmd_q->sb_key;
961 op.sb_ctx = cmd_q->sb_ctx;
962 op.init = (aes->mode == CCP_AES_MODE_ECB) ? 0 : 1;
963 op.u.aes.type = aes->type;
964 op.u.aes.mode = aes->mode;
965 op.u.aes.action = aes->action;
967 /* All supported key sizes fit in a single (32-byte) SB entry
968 * and must be in little endian format. Use the 256-bit byte
969 * swap passthru option to convert from big endian to little
972 ret = ccp_init_dm_workarea(&key, cmd_q,
973 CCP_AES_KEY_SB_COUNT * CCP_SB_BYTES,
978 dm_offset = CCP_SB_BYTES - aes->key_len;
979 ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
982 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
983 CCP_PASSTHRU_BYTESWAP_256BIT);
985 cmd->engine_error = cmd_q->cmd_error;
989 /* The AES context fits in a single (32-byte) SB entry and
990 * must be in little endian format. Use the 256-bit byte swap
991 * passthru option to convert from big endian to little endian.
993 ret = ccp_init_dm_workarea(&ctx, cmd_q,
994 CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
999 if (aes->mode != CCP_AES_MODE_ECB) {
1000 /* Load the AES context - convert to LE */
1001 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
1002 ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
1005 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1006 CCP_PASSTHRU_BYTESWAP_256BIT);
1008 cmd->engine_error = cmd_q->cmd_error;
1012 switch (aes->mode) {
1013 case CCP_AES_MODE_CFB: /* CFB128 only */
1014 case CCP_AES_MODE_CTR:
1015 op.u.aes.size = AES_BLOCK_SIZE * BITS_PER_BYTE - 1;
1021 /* Prepare the input and output data workareas. For in-place
1022 * operations we need to set the dma direction to BIDIRECTIONAL
1023 * and copy the src workarea to the dst workarea.
1025 if (sg_virt(aes->src) == sg_virt(aes->dst))
1028 ret = ccp_init_data(&src, cmd_q, aes->src, aes->src_len,
1030 in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1037 ret = ccp_init_data(&dst, cmd_q, aes->dst, aes->src_len,
1038 AES_BLOCK_SIZE, DMA_FROM_DEVICE);
1043 /* Send data to the CCP AES engine */
1044 while (src.sg_wa.bytes_left) {
1045 ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true);
1046 if (!src.sg_wa.bytes_left) {
1049 /* Since we don't retrieve the AES context in ECB
1050 * mode we have to wait for the operation to complete
1051 * on the last piece of data
1053 if (aes->mode == CCP_AES_MODE_ECB)
1057 ret = cmd_q->ccp->vdata->perform->aes(&op);
1059 cmd->engine_error = cmd_q->cmd_error;
1063 ccp_process_data(&src, &dst, &op);
1066 if (aes->mode != CCP_AES_MODE_ECB) {
1067 /* Retrieve the AES context - convert from LE to BE using
1068 * 32-byte (256-bit) byteswapping
1070 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1071 CCP_PASSTHRU_BYTESWAP_256BIT);
1073 cmd->engine_error = cmd_q->cmd_error;
1077 /* ...but we only need AES_BLOCK_SIZE bytes */
1078 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
1079 ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
1084 ccp_free_data(&dst, cmd_q);
1087 ccp_free_data(&src, cmd_q);
1098 static noinline_for_stack int
1099 ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1101 struct ccp_xts_aes_engine *xts = &cmd->u.xts;
1102 struct ccp_dm_workarea key, ctx;
1103 struct ccp_data src, dst;
1105 unsigned int unit_size, dm_offset;
1106 bool in_place = false;
1107 unsigned int sb_count;
1108 enum ccp_aes_type aestype;
1111 switch (xts->unit_size) {
1112 case CCP_XTS_AES_UNIT_SIZE_16:
1115 case CCP_XTS_AES_UNIT_SIZE_512:
1118 case CCP_XTS_AES_UNIT_SIZE_1024:
1121 case CCP_XTS_AES_UNIT_SIZE_2048:
1124 case CCP_XTS_AES_UNIT_SIZE_4096:
1132 if (xts->key_len == AES_KEYSIZE_128)
1133 aestype = CCP_AES_TYPE_128;
1134 else if (xts->key_len == AES_KEYSIZE_256)
1135 aestype = CCP_AES_TYPE_256;
1139 if (!xts->final && (xts->src_len & (AES_BLOCK_SIZE - 1)))
1142 if (xts->iv_len != AES_BLOCK_SIZE)
1145 if (!xts->key || !xts->iv || !xts->src || !xts->dst)
1148 BUILD_BUG_ON(CCP_XTS_AES_KEY_SB_COUNT != 1);
1149 BUILD_BUG_ON(CCP_XTS_AES_CTX_SB_COUNT != 1);
1152 memset(&op, 0, sizeof(op));
1154 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
1155 op.sb_key = cmd_q->sb_key;
1156 op.sb_ctx = cmd_q->sb_ctx;
1158 op.u.xts.type = aestype;
1159 op.u.xts.action = xts->action;
1160 op.u.xts.unit_size = xts->unit_size;
1162 /* A version 3 device only supports 128-bit keys, which fits into a
1163 * single SB entry. A version 5 device uses a 512-bit vector, so two
1166 if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0))
1167 sb_count = CCP_XTS_AES_KEY_SB_COUNT;
1169 sb_count = CCP5_XTS_AES_KEY_SB_COUNT;
1170 ret = ccp_init_dm_workarea(&key, cmd_q,
1171 sb_count * CCP_SB_BYTES,
1176 if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) {
1177 /* All supported key sizes must be in little endian format.
1178 * Use the 256-bit byte swap passthru option to convert from
1179 * big endian to little endian.
1181 dm_offset = CCP_SB_BYTES - AES_KEYSIZE_128;
1182 ret = ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len);
1185 ret = ccp_set_dm_area(&key, 0, xts->key, xts->key_len, xts->key_len);
1189 /* Version 5 CCPs use a 512-bit space for the key: each portion
1190 * occupies 256 bits, or one entire slot, and is zero-padded.
1194 dm_offset = CCP_SB_BYTES;
1195 pad = dm_offset - xts->key_len;
1196 ret = ccp_set_dm_area(&key, pad, xts->key, 0, xts->key_len);
1199 ret = ccp_set_dm_area(&key, dm_offset + pad, xts->key,
1200 xts->key_len, xts->key_len);
1204 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
1205 CCP_PASSTHRU_BYTESWAP_256BIT);
1207 cmd->engine_error = cmd_q->cmd_error;
1211 /* The AES context fits in a single (32-byte) SB entry and
1212 * for XTS is already in little endian format so no byte swapping
1215 ret = ccp_init_dm_workarea(&ctx, cmd_q,
1216 CCP_XTS_AES_CTX_SB_COUNT * CCP_SB_BYTES,
1221 ret = ccp_set_dm_area(&ctx, 0, xts->iv, 0, xts->iv_len);
1224 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1225 CCP_PASSTHRU_BYTESWAP_NOOP);
1227 cmd->engine_error = cmd_q->cmd_error;
1231 /* Prepare the input and output data workareas. For in-place
1232 * operations we need to set the dma direction to BIDIRECTIONAL
1233 * and copy the src workarea to the dst workarea.
1235 if (sg_virt(xts->src) == sg_virt(xts->dst))
1238 ret = ccp_init_data(&src, cmd_q, xts->src, xts->src_len,
1240 in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1247 ret = ccp_init_data(&dst, cmd_q, xts->dst, xts->src_len,
1248 unit_size, DMA_FROM_DEVICE);
1253 /* Send data to the CCP AES engine */
1254 while (src.sg_wa.bytes_left) {
1255 ccp_prepare_data(&src, &dst, &op, unit_size, true);
1256 if (!src.sg_wa.bytes_left)
1259 ret = cmd_q->ccp->vdata->perform->xts_aes(&op);
1261 cmd->engine_error = cmd_q->cmd_error;
1265 ccp_process_data(&src, &dst, &op);
1268 /* Retrieve the AES context - convert from LE to BE using
1269 * 32-byte (256-bit) byteswapping
1271 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1272 CCP_PASSTHRU_BYTESWAP_256BIT);
1274 cmd->engine_error = cmd_q->cmd_error;
1278 /* ...but we only need AES_BLOCK_SIZE bytes */
1279 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
1280 ccp_get_dm_area(&ctx, dm_offset, xts->iv, 0, xts->iv_len);
1284 ccp_free_data(&dst, cmd_q);
1287 ccp_free_data(&src, cmd_q);
1298 static noinline_for_stack int
1299 ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1301 struct ccp_des3_engine *des3 = &cmd->u.des3;
1303 struct ccp_dm_workarea key, ctx;
1304 struct ccp_data src, dst;
1306 unsigned int dm_offset;
1307 unsigned int len_singlekey;
1308 bool in_place = false;
1312 if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0))
1315 if (!cmd_q->ccp->vdata->perform->des3)
1318 if (des3->key_len != DES3_EDE_KEY_SIZE)
1321 if (((des3->mode == CCP_DES3_MODE_ECB) ||
1322 (des3->mode == CCP_DES3_MODE_CBC)) &&
1323 (des3->src_len & (DES3_EDE_BLOCK_SIZE - 1)))
1326 if (!des3->key || !des3->src || !des3->dst)
1329 if (des3->mode != CCP_DES3_MODE_ECB) {
1330 if (des3->iv_len != DES3_EDE_BLOCK_SIZE)
1338 /* Zero out all the fields of the command desc */
1339 memset(&op, 0, sizeof(op));
1341 /* Set up the Function field */
1343 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
1344 op.sb_key = cmd_q->sb_key;
1346 op.init = (des3->mode == CCP_DES3_MODE_ECB) ? 0 : 1;
1347 op.u.des3.type = des3->type;
1348 op.u.des3.mode = des3->mode;
1349 op.u.des3.action = des3->action;
1352 * All supported key sizes fit in a single (32-byte) KSB entry and
1353 * (like AES) must be in little endian format. Use the 256-bit byte
1354 * swap passthru option to convert from big endian to little endian.
1356 ret = ccp_init_dm_workarea(&key, cmd_q,
1357 CCP_DES3_KEY_SB_COUNT * CCP_SB_BYTES,
1363 * The contents of the key triplet are in the reverse order of what
1364 * is required by the engine. Copy the 3 pieces individually to put
1365 * them where they belong.
1367 dm_offset = CCP_SB_BYTES - des3->key_len; /* Basic offset */
1369 len_singlekey = des3->key_len / 3;
1370 ret = ccp_set_dm_area(&key, dm_offset + 2 * len_singlekey,
1371 des3->key, 0, len_singlekey);
1374 ret = ccp_set_dm_area(&key, dm_offset + len_singlekey,
1375 des3->key, len_singlekey, len_singlekey);
1378 ret = ccp_set_dm_area(&key, dm_offset,
1379 des3->key, 2 * len_singlekey, len_singlekey);
1383 /* Copy the key to the SB */
1384 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
1385 CCP_PASSTHRU_BYTESWAP_256BIT);
1387 cmd->engine_error = cmd_q->cmd_error;
1392 * The DES3 context fits in a single (32-byte) KSB entry and
1393 * must be in little endian format. Use the 256-bit byte swap
1394 * passthru option to convert from big endian to little endian.
1396 if (des3->mode != CCP_DES3_MODE_ECB) {
1397 op.sb_ctx = cmd_q->sb_ctx;
1399 ret = ccp_init_dm_workarea(&ctx, cmd_q,
1400 CCP_DES3_CTX_SB_COUNT * CCP_SB_BYTES,
1405 /* Load the context into the LSB */
1406 dm_offset = CCP_SB_BYTES - des3->iv_len;
1407 ret = ccp_set_dm_area(&ctx, dm_offset, des3->iv, 0,
1412 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1413 CCP_PASSTHRU_BYTESWAP_256BIT);
1415 cmd->engine_error = cmd_q->cmd_error;
1421 * Prepare the input and output data workareas. For in-place
1422 * operations we need to set the dma direction to BIDIRECTIONAL
1423 * and copy the src workarea to the dst workarea.
1425 if (sg_virt(des3->src) == sg_virt(des3->dst))
1428 ret = ccp_init_data(&src, cmd_q, des3->src, des3->src_len,
1429 DES3_EDE_BLOCK_SIZE,
1430 in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1437 ret = ccp_init_data(&dst, cmd_q, des3->dst, des3->src_len,
1438 DES3_EDE_BLOCK_SIZE, DMA_FROM_DEVICE);
1443 /* Send data to the CCP DES3 engine */
1444 while (src.sg_wa.bytes_left) {
1445 ccp_prepare_data(&src, &dst, &op, DES3_EDE_BLOCK_SIZE, true);
1446 if (!src.sg_wa.bytes_left) {
1449 /* Since we don't retrieve the context in ECB mode
1450 * we have to wait for the operation to complete
1451 * on the last piece of data
1456 ret = cmd_q->ccp->vdata->perform->des3(&op);
1458 cmd->engine_error = cmd_q->cmd_error;
1462 ccp_process_data(&src, &dst, &op);
1465 if (des3->mode != CCP_DES3_MODE_ECB) {
1466 /* Retrieve the context and make BE */
1467 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1468 CCP_PASSTHRU_BYTESWAP_256BIT);
1470 cmd->engine_error = cmd_q->cmd_error;
1474 /* ...but we only need the last DES3_EDE_BLOCK_SIZE bytes */
1475 ccp_get_dm_area(&ctx, dm_offset, des3->iv, 0,
1476 DES3_EDE_BLOCK_SIZE);
1480 ccp_free_data(&dst, cmd_q);
1483 ccp_free_data(&src, cmd_q);
1486 if (des3->mode != CCP_DES3_MODE_ECB)
1495 static noinline_for_stack int
1496 ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1498 struct ccp_sha_engine *sha = &cmd->u.sha;
1499 struct ccp_dm_workarea ctx;
1500 struct ccp_data src;
1502 unsigned int ioffset, ooffset;
1503 unsigned int digest_size;
1510 switch (sha->type) {
1511 case CCP_SHA_TYPE_1:
1512 if (sha->ctx_len < SHA1_DIGEST_SIZE)
1514 block_size = SHA1_BLOCK_SIZE;
1516 case CCP_SHA_TYPE_224:
1517 if (sha->ctx_len < SHA224_DIGEST_SIZE)
1519 block_size = SHA224_BLOCK_SIZE;
1521 case CCP_SHA_TYPE_256:
1522 if (sha->ctx_len < SHA256_DIGEST_SIZE)
1524 block_size = SHA256_BLOCK_SIZE;
1526 case CCP_SHA_TYPE_384:
1527 if (cmd_q->ccp->vdata->version < CCP_VERSION(4, 0)
1528 || sha->ctx_len < SHA384_DIGEST_SIZE)
1530 block_size = SHA384_BLOCK_SIZE;
1532 case CCP_SHA_TYPE_512:
1533 if (cmd_q->ccp->vdata->version < CCP_VERSION(4, 0)
1534 || sha->ctx_len < SHA512_DIGEST_SIZE)
1536 block_size = SHA512_BLOCK_SIZE;
1545 if (!sha->final && (sha->src_len & (block_size - 1)))
1548 /* The version 3 device can't handle zero-length input */
1549 if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) {
1551 if (!sha->src_len) {
1552 unsigned int digest_len;
1555 /* Not final, just return */
1559 /* CCP can't do a zero length sha operation so the
1560 * caller must buffer the data.
1565 /* The CCP cannot perform zero-length sha operations
1566 * so the caller is required to buffer data for the
1567 * final operation. However, a sha operation for a
1568 * message with a total length of zero is valid so
1569 * known values are required to supply the result.
1571 switch (sha->type) {
1572 case CCP_SHA_TYPE_1:
1573 sha_zero = sha1_zero_message_hash;
1574 digest_len = SHA1_DIGEST_SIZE;
1576 case CCP_SHA_TYPE_224:
1577 sha_zero = sha224_zero_message_hash;
1578 digest_len = SHA224_DIGEST_SIZE;
1580 case CCP_SHA_TYPE_256:
1581 sha_zero = sha256_zero_message_hash;
1582 digest_len = SHA256_DIGEST_SIZE;
1588 scatterwalk_map_and_copy((void *)sha_zero, sha->ctx, 0,
1595 /* Set variables used throughout */
1596 switch (sha->type) {
1597 case CCP_SHA_TYPE_1:
1598 digest_size = SHA1_DIGEST_SIZE;
1599 init = (void *) ccp_sha1_init;
1600 ctx_size = SHA1_DIGEST_SIZE;
1602 if (cmd_q->ccp->vdata->version != CCP_VERSION(3, 0))
1603 ooffset = ioffset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
1605 ooffset = ioffset = 0;
1607 case CCP_SHA_TYPE_224:
1608 digest_size = SHA224_DIGEST_SIZE;
1609 init = (void *) ccp_sha224_init;
1610 ctx_size = SHA256_DIGEST_SIZE;
1613 if (cmd_q->ccp->vdata->version != CCP_VERSION(3, 0))
1614 ooffset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
1618 case CCP_SHA_TYPE_256:
1619 digest_size = SHA256_DIGEST_SIZE;
1620 init = (void *) ccp_sha256_init;
1621 ctx_size = SHA256_DIGEST_SIZE;
1623 ooffset = ioffset = 0;
1625 case CCP_SHA_TYPE_384:
1626 digest_size = SHA384_DIGEST_SIZE;
1627 init = (void *) ccp_sha384_init;
1628 ctx_size = SHA512_DIGEST_SIZE;
1631 ooffset = 2 * CCP_SB_BYTES - SHA384_DIGEST_SIZE;
1633 case CCP_SHA_TYPE_512:
1634 digest_size = SHA512_DIGEST_SIZE;
1635 init = (void *) ccp_sha512_init;
1636 ctx_size = SHA512_DIGEST_SIZE;
1638 ooffset = ioffset = 0;
1645 /* For zero-length plaintext the src pointer is ignored;
1646 * otherwise both parts must be valid
1648 if (sha->src_len && !sha->src)
1651 memset(&op, 0, sizeof(op));
1653 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
1654 op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
1655 op.u.sha.type = sha->type;
1656 op.u.sha.msg_bits = sha->msg_bits;
1658 /* For SHA1/224/256 the context fits in a single (32-byte) SB entry;
1659 * SHA384/512 require 2 adjacent SB slots, with the right half in the
1660 * first slot, and the left half in the second. Each portion must then
1661 * be in little endian format: use the 256-bit byte swap option.
1663 ret = ccp_init_dm_workarea(&ctx, cmd_q, sb_count * CCP_SB_BYTES,
1668 switch (sha->type) {
1669 case CCP_SHA_TYPE_1:
1670 case CCP_SHA_TYPE_224:
1671 case CCP_SHA_TYPE_256:
1672 memcpy(ctx.address + ioffset, init, ctx_size);
1674 case CCP_SHA_TYPE_384:
1675 case CCP_SHA_TYPE_512:
1676 memcpy(ctx.address + ctx_size / 2, init,
1678 memcpy(ctx.address, init + ctx_size / 2,
1686 /* Restore the context */
1687 ret = ccp_set_dm_area(&ctx, 0, sha->ctx, 0,
1688 sb_count * CCP_SB_BYTES);
1693 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1694 CCP_PASSTHRU_BYTESWAP_256BIT);
1696 cmd->engine_error = cmd_q->cmd_error;
1701 /* Send data to the CCP SHA engine; block_size is set above */
1702 ret = ccp_init_data(&src, cmd_q, sha->src, sha->src_len,
1703 block_size, DMA_TO_DEVICE);
1707 while (src.sg_wa.bytes_left) {
1708 ccp_prepare_data(&src, NULL, &op, block_size, false);
1709 if (sha->final && !src.sg_wa.bytes_left)
1712 ret = cmd_q->ccp->vdata->perform->sha(&op);
1714 cmd->engine_error = cmd_q->cmd_error;
1718 ccp_process_data(&src, NULL, &op);
1722 ret = cmd_q->ccp->vdata->perform->sha(&op);
1724 cmd->engine_error = cmd_q->cmd_error;
1729 /* Retrieve the SHA context - convert from LE to BE using
1730 * 32-byte (256-bit) byteswapping to BE
1732 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1733 CCP_PASSTHRU_BYTESWAP_256BIT);
1735 cmd->engine_error = cmd_q->cmd_error;
1740 /* Finishing up, so get the digest */
1741 switch (sha->type) {
1742 case CCP_SHA_TYPE_1:
1743 case CCP_SHA_TYPE_224:
1744 case CCP_SHA_TYPE_256:
1745 ccp_get_dm_area(&ctx, ooffset,
1749 case CCP_SHA_TYPE_384:
1750 case CCP_SHA_TYPE_512:
1751 ccp_get_dm_area(&ctx, 0,
1752 sha->ctx, LSB_ITEM_SIZE - ooffset,
1754 ccp_get_dm_area(&ctx, LSB_ITEM_SIZE + ooffset,
1756 LSB_ITEM_SIZE - ooffset);
1763 /* Stash the context */
1764 ccp_get_dm_area(&ctx, 0, sha->ctx, 0,
1765 sb_count * CCP_SB_BYTES);
1768 if (sha->final && sha->opad) {
1769 /* HMAC operation, recursively perform final SHA */
1770 struct ccp_cmd hmac_cmd;
1771 struct scatterlist sg;
1774 if (sha->opad_len != block_size) {
1779 hmac_buf = kmalloc(block_size + digest_size, GFP_KERNEL);
1784 sg_init_one(&sg, hmac_buf, block_size + digest_size);
1786 scatterwalk_map_and_copy(hmac_buf, sha->opad, 0, block_size, 0);
1787 switch (sha->type) {
1788 case CCP_SHA_TYPE_1:
1789 case CCP_SHA_TYPE_224:
1790 case CCP_SHA_TYPE_256:
1791 memcpy(hmac_buf + block_size,
1792 ctx.address + ooffset,
1795 case CCP_SHA_TYPE_384:
1796 case CCP_SHA_TYPE_512:
1797 memcpy(hmac_buf + block_size,
1798 ctx.address + LSB_ITEM_SIZE + ooffset,
1800 memcpy(hmac_buf + block_size +
1801 (LSB_ITEM_SIZE - ooffset),
1811 memset(&hmac_cmd, 0, sizeof(hmac_cmd));
1812 hmac_cmd.engine = CCP_ENGINE_SHA;
1813 hmac_cmd.u.sha.type = sha->type;
1814 hmac_cmd.u.sha.ctx = sha->ctx;
1815 hmac_cmd.u.sha.ctx_len = sha->ctx_len;
1816 hmac_cmd.u.sha.src = &sg;
1817 hmac_cmd.u.sha.src_len = block_size + digest_size;
1818 hmac_cmd.u.sha.opad = NULL;
1819 hmac_cmd.u.sha.opad_len = 0;
1820 hmac_cmd.u.sha.first = 1;
1821 hmac_cmd.u.sha.final = 1;
1822 hmac_cmd.u.sha.msg_bits = (block_size + digest_size) << 3;
1824 ret = ccp_run_sha_cmd(cmd_q, &hmac_cmd);
1826 cmd->engine_error = hmac_cmd.engine_error;
1833 ccp_free_data(&src, cmd_q);
1841 static noinline_for_stack int
1842 ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1844 struct ccp_rsa_engine *rsa = &cmd->u.rsa;
1845 struct ccp_dm_workarea exp, src, dst;
1847 unsigned int sb_count, i_len, o_len;
1850 /* Check against the maximum allowable size, in bits */
1851 if (rsa->key_size > cmd_q->ccp->vdata->rsamax)
1854 if (!rsa->exp || !rsa->mod || !rsa->src || !rsa->dst)
1857 memset(&op, 0, sizeof(op));
1859 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
1861 /* The RSA modulus must precede the message being acted upon, so
1862 * it must be copied to a DMA area where the message and the
1863 * modulus can be concatenated. Therefore the input buffer
1864 * length required is twice the output buffer length (which
1865 * must be a multiple of 256-bits). Compute o_len, i_len in bytes.
1866 * Buffer sizes must be a multiple of 32 bytes; rounding up may be
1869 o_len = 32 * ((rsa->key_size + 255) / 256);
1873 if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0)) {
1874 /* sb_count is the number of storage block slots required
1877 sb_count = o_len / CCP_SB_BYTES;
1878 op.sb_key = cmd_q->ccp->vdata->perform->sballoc(cmd_q,
1883 /* A version 5 device allows a modulus size that will not fit
1884 * in the LSB, so the command will transfer it from memory.
1885 * Set the sb key to the default, even though it's not used.
1887 op.sb_key = cmd_q->sb_key;
1890 /* The RSA exponent must be in little endian format. Reverse its
1893 ret = ccp_init_dm_workarea(&exp, cmd_q, o_len, DMA_TO_DEVICE);
1897 ret = ccp_reverse_set_dm_area(&exp, 0, rsa->exp, 0, rsa->exp_len);
1901 if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0)) {
1902 /* Copy the exponent to the local storage block, using
1903 * as many 32-byte blocks as were allocated above. It's
1904 * already little endian, so no further change is required.
1906 ret = ccp_copy_to_sb(cmd_q, &exp, op.jobid, op.sb_key,
1907 CCP_PASSTHRU_BYTESWAP_NOOP);
1909 cmd->engine_error = cmd_q->cmd_error;
1913 /* The exponent can be retrieved from memory via DMA. */
1914 op.exp.u.dma.address = exp.dma.address;
1915 op.exp.u.dma.offset = 0;
1918 /* Concatenate the modulus and the message. Both the modulus and
1919 * the operands must be in little endian format. Since the input
1920 * is in big endian format it must be converted.
1922 ret = ccp_init_dm_workarea(&src, cmd_q, i_len, DMA_TO_DEVICE);
1926 ret = ccp_reverse_set_dm_area(&src, 0, rsa->mod, 0, rsa->mod_len);
1929 ret = ccp_reverse_set_dm_area(&src, o_len, rsa->src, 0, rsa->src_len);
1933 /* Prepare the output area for the operation */
1934 ret = ccp_init_dm_workarea(&dst, cmd_q, o_len, DMA_FROM_DEVICE);
1939 op.src.u.dma.address = src.dma.address;
1940 op.src.u.dma.offset = 0;
1941 op.src.u.dma.length = i_len;
1942 op.dst.u.dma.address = dst.dma.address;
1943 op.dst.u.dma.offset = 0;
1944 op.dst.u.dma.length = o_len;
1946 op.u.rsa.mod_size = rsa->key_size;
1947 op.u.rsa.input_len = i_len;
1949 ret = cmd_q->ccp->vdata->perform->rsa(&op);
1951 cmd->engine_error = cmd_q->cmd_error;
1955 ccp_reverse_get_dm_area(&dst, 0, rsa->dst, 0, rsa->mod_len);
1968 cmd_q->ccp->vdata->perform->sbfree(cmd_q, op.sb_key, sb_count);
1973 static noinline_for_stack int
1974 ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1976 struct ccp_passthru_engine *pt = &cmd->u.passthru;
1977 struct ccp_dm_workarea mask;
1978 struct ccp_data src, dst;
1980 bool in_place = false;
1984 if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1)))
1987 if (!pt->src || !pt->dst)
1990 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
1991 if (pt->mask_len != CCP_PASSTHRU_MASKSIZE)
1997 BUILD_BUG_ON(CCP_PASSTHRU_SB_COUNT != 1);
1999 memset(&op, 0, sizeof(op));
2001 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
2003 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
2005 op.sb_key = cmd_q->sb_key;
2007 ret = ccp_init_dm_workarea(&mask, cmd_q,
2008 CCP_PASSTHRU_SB_COUNT *
2014 ret = ccp_set_dm_area(&mask, 0, pt->mask, 0, pt->mask_len);
2017 ret = ccp_copy_to_sb(cmd_q, &mask, op.jobid, op.sb_key,
2018 CCP_PASSTHRU_BYTESWAP_NOOP);
2020 cmd->engine_error = cmd_q->cmd_error;
2025 /* Prepare the input and output data workareas. For in-place
2026 * operations we need to set the dma direction to BIDIRECTIONAL
2027 * and copy the src workarea to the dst workarea.
2029 if (sg_virt(pt->src) == sg_virt(pt->dst))
2032 ret = ccp_init_data(&src, cmd_q, pt->src, pt->src_len,
2033 CCP_PASSTHRU_MASKSIZE,
2034 in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
2041 ret = ccp_init_data(&dst, cmd_q, pt->dst, pt->src_len,
2042 CCP_PASSTHRU_MASKSIZE, DMA_FROM_DEVICE);
2047 /* Send data to the CCP Passthru engine
2048 * Because the CCP engine works on a single source and destination
2049 * dma address at a time, each entry in the source scatterlist
2050 * (after the dma_map_sg call) must be less than or equal to the
2051 * (remaining) length in the destination scatterlist entry and the
2052 * length must be a multiple of CCP_PASSTHRU_BLOCKSIZE
2054 dst.sg_wa.sg_used = 0;
2055 for (i = 1; i <= src.sg_wa.dma_count; i++) {
2056 if (!dst.sg_wa.sg ||
2057 (sg_dma_len(dst.sg_wa.sg) < sg_dma_len(src.sg_wa.sg))) {
2062 if (i == src.sg_wa.dma_count) {
2067 op.src.type = CCP_MEMTYPE_SYSTEM;
2068 op.src.u.dma.address = sg_dma_address(src.sg_wa.sg);
2069 op.src.u.dma.offset = 0;
2070 op.src.u.dma.length = sg_dma_len(src.sg_wa.sg);
2072 op.dst.type = CCP_MEMTYPE_SYSTEM;
2073 op.dst.u.dma.address = sg_dma_address(dst.sg_wa.sg);
2074 op.dst.u.dma.offset = dst.sg_wa.sg_used;
2075 op.dst.u.dma.length = op.src.u.dma.length;
2077 ret = cmd_q->ccp->vdata->perform->passthru(&op);
2079 cmd->engine_error = cmd_q->cmd_error;
2083 dst.sg_wa.sg_used += sg_dma_len(src.sg_wa.sg);
2084 if (dst.sg_wa.sg_used == sg_dma_len(dst.sg_wa.sg)) {
2085 dst.sg_wa.sg = sg_next(dst.sg_wa.sg);
2086 dst.sg_wa.sg_used = 0;
2088 src.sg_wa.sg = sg_next(src.sg_wa.sg);
2093 ccp_free_data(&dst, cmd_q);
2096 ccp_free_data(&src, cmd_q);
2099 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
2105 static noinline_for_stack int
2106 ccp_run_passthru_nomap_cmd(struct ccp_cmd_queue *cmd_q,
2107 struct ccp_cmd *cmd)
2109 struct ccp_passthru_nomap_engine *pt = &cmd->u.passthru_nomap;
2110 struct ccp_dm_workarea mask;
2114 if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1)))
2117 if (!pt->src_dma || !pt->dst_dma)
2120 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
2121 if (pt->mask_len != CCP_PASSTHRU_MASKSIZE)
2127 BUILD_BUG_ON(CCP_PASSTHRU_SB_COUNT != 1);
2129 memset(&op, 0, sizeof(op));
2131 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
2133 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
2135 op.sb_key = cmd_q->sb_key;
2137 mask.length = pt->mask_len;
2138 mask.dma.address = pt->mask;
2139 mask.dma.length = pt->mask_len;
2141 ret = ccp_copy_to_sb(cmd_q, &mask, op.jobid, op.sb_key,
2142 CCP_PASSTHRU_BYTESWAP_NOOP);
2144 cmd->engine_error = cmd_q->cmd_error;
2149 /* Send data to the CCP Passthru engine */
2153 op.src.type = CCP_MEMTYPE_SYSTEM;
2154 op.src.u.dma.address = pt->src_dma;
2155 op.src.u.dma.offset = 0;
2156 op.src.u.dma.length = pt->src_len;
2158 op.dst.type = CCP_MEMTYPE_SYSTEM;
2159 op.dst.u.dma.address = pt->dst_dma;
2160 op.dst.u.dma.offset = 0;
2161 op.dst.u.dma.length = pt->src_len;
2163 ret = cmd_q->ccp->vdata->perform->passthru(&op);
2165 cmd->engine_error = cmd_q->cmd_error;
2170 static int ccp_run_ecc_mm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
2172 struct ccp_ecc_engine *ecc = &cmd->u.ecc;
2173 struct ccp_dm_workarea src, dst;
2178 if (!ecc->u.mm.operand_1 ||
2179 (ecc->u.mm.operand_1_len > CCP_ECC_MODULUS_BYTES))
2182 if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT)
2183 if (!ecc->u.mm.operand_2 ||
2184 (ecc->u.mm.operand_2_len > CCP_ECC_MODULUS_BYTES))
2187 if (!ecc->u.mm.result ||
2188 (ecc->u.mm.result_len < CCP_ECC_MODULUS_BYTES))
2191 memset(&op, 0, sizeof(op));
2193 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
2195 /* Concatenate the modulus and the operands. Both the modulus and
2196 * the operands must be in little endian format. Since the input
2197 * is in big endian format it must be converted and placed in a
2198 * fixed length buffer.
2200 ret = ccp_init_dm_workarea(&src, cmd_q, CCP_ECC_SRC_BUF_SIZE,
2205 /* Save the workarea address since it is updated in order to perform
2210 /* Copy the ECC modulus */
2211 ret = ccp_reverse_set_dm_area(&src, 0, ecc->mod, 0, ecc->mod_len);
2214 src.address += CCP_ECC_OPERAND_SIZE;
2216 /* Copy the first operand */
2217 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.mm.operand_1, 0,
2218 ecc->u.mm.operand_1_len);
2221 src.address += CCP_ECC_OPERAND_SIZE;
2223 if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT) {
2224 /* Copy the second operand */
2225 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.mm.operand_2, 0,
2226 ecc->u.mm.operand_2_len);
2229 src.address += CCP_ECC_OPERAND_SIZE;
2232 /* Restore the workarea address */
2235 /* Prepare the output area for the operation */
2236 ret = ccp_init_dm_workarea(&dst, cmd_q, CCP_ECC_DST_BUF_SIZE,
2242 op.src.u.dma.address = src.dma.address;
2243 op.src.u.dma.offset = 0;
2244 op.src.u.dma.length = src.length;
2245 op.dst.u.dma.address = dst.dma.address;
2246 op.dst.u.dma.offset = 0;
2247 op.dst.u.dma.length = dst.length;
2249 op.u.ecc.function = cmd->u.ecc.function;
2251 ret = cmd_q->ccp->vdata->perform->ecc(&op);
2253 cmd->engine_error = cmd_q->cmd_error;
2257 ecc->ecc_result = le16_to_cpup(
2258 (const __le16 *)(dst.address + CCP_ECC_RESULT_OFFSET));
2259 if (!(ecc->ecc_result & CCP_ECC_RESULT_SUCCESS)) {
2264 /* Save the ECC result */
2265 ccp_reverse_get_dm_area(&dst, 0, ecc->u.mm.result, 0,
2266 CCP_ECC_MODULUS_BYTES);
2277 static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
2279 struct ccp_ecc_engine *ecc = &cmd->u.ecc;
2280 struct ccp_dm_workarea src, dst;
2285 if (!ecc->u.pm.point_1.x ||
2286 (ecc->u.pm.point_1.x_len > CCP_ECC_MODULUS_BYTES) ||
2287 !ecc->u.pm.point_1.y ||
2288 (ecc->u.pm.point_1.y_len > CCP_ECC_MODULUS_BYTES))
2291 if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) {
2292 if (!ecc->u.pm.point_2.x ||
2293 (ecc->u.pm.point_2.x_len > CCP_ECC_MODULUS_BYTES) ||
2294 !ecc->u.pm.point_2.y ||
2295 (ecc->u.pm.point_2.y_len > CCP_ECC_MODULUS_BYTES))
2298 if (!ecc->u.pm.domain_a ||
2299 (ecc->u.pm.domain_a_len > CCP_ECC_MODULUS_BYTES))
2302 if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT)
2303 if (!ecc->u.pm.scalar ||
2304 (ecc->u.pm.scalar_len > CCP_ECC_MODULUS_BYTES))
2308 if (!ecc->u.pm.result.x ||
2309 (ecc->u.pm.result.x_len < CCP_ECC_MODULUS_BYTES) ||
2310 !ecc->u.pm.result.y ||
2311 (ecc->u.pm.result.y_len < CCP_ECC_MODULUS_BYTES))
2314 memset(&op, 0, sizeof(op));
2316 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
2318 /* Concatenate the modulus and the operands. Both the modulus and
2319 * the operands must be in little endian format. Since the input
2320 * is in big endian format it must be converted and placed in a
2321 * fixed length buffer.
2323 ret = ccp_init_dm_workarea(&src, cmd_q, CCP_ECC_SRC_BUF_SIZE,
2328 /* Save the workarea address since it is updated in order to perform
2333 /* Copy the ECC modulus */
2334 ret = ccp_reverse_set_dm_area(&src, 0, ecc->mod, 0, ecc->mod_len);
2337 src.address += CCP_ECC_OPERAND_SIZE;
2339 /* Copy the first point X and Y coordinate */
2340 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_1.x, 0,
2341 ecc->u.pm.point_1.x_len);
2344 src.address += CCP_ECC_OPERAND_SIZE;
2345 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_1.y, 0,
2346 ecc->u.pm.point_1.y_len);
2349 src.address += CCP_ECC_OPERAND_SIZE;
2351 /* Set the first point Z coordinate to 1 */
2352 *src.address = 0x01;
2353 src.address += CCP_ECC_OPERAND_SIZE;
2355 if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) {
2356 /* Copy the second point X and Y coordinate */
2357 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_2.x, 0,
2358 ecc->u.pm.point_2.x_len);
2361 src.address += CCP_ECC_OPERAND_SIZE;
2362 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_2.y, 0,
2363 ecc->u.pm.point_2.y_len);
2366 src.address += CCP_ECC_OPERAND_SIZE;
2368 /* Set the second point Z coordinate to 1 */
2369 *src.address = 0x01;
2370 src.address += CCP_ECC_OPERAND_SIZE;
2372 /* Copy the Domain "a" parameter */
2373 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.domain_a, 0,
2374 ecc->u.pm.domain_a_len);
2377 src.address += CCP_ECC_OPERAND_SIZE;
2379 if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT) {
2380 /* Copy the scalar value */
2381 ret = ccp_reverse_set_dm_area(&src, 0,
2382 ecc->u.pm.scalar, 0,
2383 ecc->u.pm.scalar_len);
2386 src.address += CCP_ECC_OPERAND_SIZE;
2390 /* Restore the workarea address */
2393 /* Prepare the output area for the operation */
2394 ret = ccp_init_dm_workarea(&dst, cmd_q, CCP_ECC_DST_BUF_SIZE,
2400 op.src.u.dma.address = src.dma.address;
2401 op.src.u.dma.offset = 0;
2402 op.src.u.dma.length = src.length;
2403 op.dst.u.dma.address = dst.dma.address;
2404 op.dst.u.dma.offset = 0;
2405 op.dst.u.dma.length = dst.length;
2407 op.u.ecc.function = cmd->u.ecc.function;
2409 ret = cmd_q->ccp->vdata->perform->ecc(&op);
2411 cmd->engine_error = cmd_q->cmd_error;
2415 ecc->ecc_result = le16_to_cpup(
2416 (const __le16 *)(dst.address + CCP_ECC_RESULT_OFFSET));
2417 if (!(ecc->ecc_result & CCP_ECC_RESULT_SUCCESS)) {
2422 /* Save the workarea address since it is updated as we walk through
2423 * to copy the point math result
2427 /* Save the ECC result X and Y coordinates */
2428 ccp_reverse_get_dm_area(&dst, 0, ecc->u.pm.result.x, 0,
2429 CCP_ECC_MODULUS_BYTES);
2430 dst.address += CCP_ECC_OUTPUT_SIZE;
2431 ccp_reverse_get_dm_area(&dst, 0, ecc->u.pm.result.y, 0,
2432 CCP_ECC_MODULUS_BYTES);
2433 dst.address += CCP_ECC_OUTPUT_SIZE;
2435 /* Restore the workarea address */
2447 static noinline_for_stack int
2448 ccp_run_ecc_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
2450 struct ccp_ecc_engine *ecc = &cmd->u.ecc;
2452 ecc->ecc_result = 0;
2455 (ecc->mod_len > CCP_ECC_MODULUS_BYTES))
2458 switch (ecc->function) {
2459 case CCP_ECC_FUNCTION_MMUL_384BIT:
2460 case CCP_ECC_FUNCTION_MADD_384BIT:
2461 case CCP_ECC_FUNCTION_MINV_384BIT:
2462 return ccp_run_ecc_mm_cmd(cmd_q, cmd);
2464 case CCP_ECC_FUNCTION_PADD_384BIT:
2465 case CCP_ECC_FUNCTION_PMUL_384BIT:
2466 case CCP_ECC_FUNCTION_PDBL_384BIT:
2467 return ccp_run_ecc_pm_cmd(cmd_q, cmd);
2474 int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
2478 cmd->engine_error = 0;
2479 cmd_q->cmd_error = 0;
2480 cmd_q->int_rcvd = 0;
2481 cmd_q->free_slots = cmd_q->ccp->vdata->perform->get_free_slots(cmd_q);
2483 switch (cmd->engine) {
2484 case CCP_ENGINE_AES:
2485 switch (cmd->u.aes.mode) {
2486 case CCP_AES_MODE_CMAC:
2487 ret = ccp_run_aes_cmac_cmd(cmd_q, cmd);
2489 case CCP_AES_MODE_GCM:
2490 ret = ccp_run_aes_gcm_cmd(cmd_q, cmd);
2493 ret = ccp_run_aes_cmd(cmd_q, cmd);
2497 case CCP_ENGINE_XTS_AES_128:
2498 ret = ccp_run_xts_aes_cmd(cmd_q, cmd);
2500 case CCP_ENGINE_DES3:
2501 ret = ccp_run_des3_cmd(cmd_q, cmd);
2503 case CCP_ENGINE_SHA:
2504 ret = ccp_run_sha_cmd(cmd_q, cmd);
2506 case CCP_ENGINE_RSA:
2507 ret = ccp_run_rsa_cmd(cmd_q, cmd);
2509 case CCP_ENGINE_PASSTHRU:
2510 if (cmd->flags & CCP_CMD_PASSTHRU_NO_DMA_MAP)
2511 ret = ccp_run_passthru_nomap_cmd(cmd_q, cmd);
2513 ret = ccp_run_passthru_cmd(cmd_q, cmd);
2515 case CCP_ENGINE_ECC:
2516 ret = ccp_run_ecc_cmd(cmd_q, cmd);