2 * AMD Cryptographic Coprocessor (CCP) driver
4 * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
6 * Author: Tom Lendacky <thomas.lendacky@amd.com>
7 * Author: Gary R Hook <gary.hook@amd.com>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
14 #include <linux/module.h>
15 #include <linux/kernel.h>
16 #include <linux/pci.h>
17 #include <linux/interrupt.h>
18 #include <crypto/scatterwalk.h>
19 #include <linux/ccp.h>
23 /* SHA initial context values */
24 static const __be32 ccp_sha1_init[SHA1_DIGEST_SIZE / sizeof(__be32)] = {
25 cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1),
26 cpu_to_be32(SHA1_H2), cpu_to_be32(SHA1_H3),
30 static const __be32 ccp_sha224_init[SHA256_DIGEST_SIZE / sizeof(__be32)] = {
31 cpu_to_be32(SHA224_H0), cpu_to_be32(SHA224_H1),
32 cpu_to_be32(SHA224_H2), cpu_to_be32(SHA224_H3),
33 cpu_to_be32(SHA224_H4), cpu_to_be32(SHA224_H5),
34 cpu_to_be32(SHA224_H6), cpu_to_be32(SHA224_H7),
37 static const __be32 ccp_sha256_init[SHA256_DIGEST_SIZE / sizeof(__be32)] = {
38 cpu_to_be32(SHA256_H0), cpu_to_be32(SHA256_H1),
39 cpu_to_be32(SHA256_H2), cpu_to_be32(SHA256_H3),
40 cpu_to_be32(SHA256_H4), cpu_to_be32(SHA256_H5),
41 cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7),
44 #define CCP_NEW_JOBID(ccp) ((ccp->vdata->version == CCP_VERSION(3, 0)) ? \
45 ccp_gen_jobid(ccp) : 0)
47 static u32 ccp_gen_jobid(struct ccp_device *ccp)
49 return atomic_inc_return(&ccp->current_id) & CCP_JOBID_MASK;
52 static void ccp_sg_free(struct ccp_sg_workarea *wa)
55 dma_unmap_sg(wa->dma_dev, wa->dma_sg_head, wa->nents, wa->dma_dir);
60 static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev,
61 struct scatterlist *sg, u64 len,
62 enum dma_data_direction dma_dir)
64 memset(wa, 0, sizeof(*wa));
70 wa->nents = sg_nents_for_len(sg, len);
80 if (dma_dir == DMA_NONE)
86 wa->dma_dir = dma_dir;
87 wa->dma_count = dma_map_sg(dev, sg, wa->nents, dma_dir);
94 static void ccp_update_sg_workarea(struct ccp_sg_workarea *wa, unsigned int len)
96 unsigned int nbytes = min_t(u64, len, wa->bytes_left);
97 unsigned int sg_combined_len = 0;
102 wa->sg_used += nbytes;
103 wa->bytes_left -= nbytes;
104 if (wa->sg_used == sg_dma_len(wa->dma_sg)) {
105 /* Advance to the next DMA scatterlist entry */
106 wa->dma_sg = sg_next(wa->dma_sg);
108 /* In the case that the DMA mapped scatterlist has entries
109 * that have been merged, the non-DMA mapped scatterlist
110 * must be advanced multiple times for each merged entry.
111 * This ensures that the current non-DMA mapped entry
112 * corresponds to the current DMA mapped entry.
115 sg_combined_len += wa->sg->length;
116 wa->sg = sg_next(wa->sg);
117 } while (wa->sg_used > sg_combined_len);
123 static void ccp_dm_free(struct ccp_dm_workarea *wa)
125 if (wa->length <= CCP_DMAPOOL_MAX_SIZE) {
127 dma_pool_free(wa->dma_pool, wa->address,
131 dma_unmap_single(wa->dev, wa->dma.address, wa->length,
140 static int ccp_init_dm_workarea(struct ccp_dm_workarea *wa,
141 struct ccp_cmd_queue *cmd_q,
143 enum dma_data_direction dir)
145 memset(wa, 0, sizeof(*wa));
150 wa->dev = cmd_q->ccp->dev;
153 if (len <= CCP_DMAPOOL_MAX_SIZE) {
154 wa->dma_pool = cmd_q->dma_pool;
156 wa->address = dma_pool_alloc(wa->dma_pool, GFP_KERNEL,
161 wa->dma.length = CCP_DMAPOOL_MAX_SIZE;
163 memset(wa->address, 0, CCP_DMAPOOL_MAX_SIZE);
165 wa->address = kzalloc(len, GFP_KERNEL);
169 wa->dma.address = dma_map_single(wa->dev, wa->address, len,
171 if (!wa->dma.address)
174 wa->dma.length = len;
181 static void ccp_set_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset,
182 struct scatterlist *sg, unsigned int sg_offset,
185 WARN_ON(!wa->address);
187 scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len,
191 static void ccp_get_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset,
192 struct scatterlist *sg, unsigned int sg_offset,
195 WARN_ON(!wa->address);
197 scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len,
201 static int ccp_reverse_set_dm_area(struct ccp_dm_workarea *wa,
202 struct scatterlist *sg,
203 unsigned int len, unsigned int se_len,
206 unsigned int nbytes, sg_offset, dm_offset, sb_len, i;
207 u8 buffer[CCP_REVERSE_BUF_SIZE];
209 if (WARN_ON(se_len > sizeof(buffer)))
216 sb_len = min_t(unsigned int, nbytes, se_len);
219 scatterwalk_map_and_copy(buffer, sg, sg_offset, sb_len, 0);
220 for (i = 0; i < sb_len; i++)
221 wa->address[dm_offset + i] = buffer[sb_len - i - 1];
226 if ((sb_len != se_len) && sign_extend) {
227 /* Must sign-extend to nearest sign-extend length */
228 if (wa->address[dm_offset - 1] & 0x80)
229 memset(wa->address + dm_offset, 0xff,
237 static void ccp_reverse_get_dm_area(struct ccp_dm_workarea *wa,
238 struct scatterlist *sg,
241 unsigned int nbytes, sg_offset, dm_offset, sb_len, i;
242 u8 buffer[CCP_REVERSE_BUF_SIZE];
248 sb_len = min_t(unsigned int, nbytes, sizeof(buffer));
251 for (i = 0; i < sb_len; i++)
252 buffer[sb_len - i - 1] = wa->address[dm_offset + i];
253 scatterwalk_map_and_copy(buffer, sg, sg_offset, sb_len, 1);
260 static void ccp_free_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q)
262 ccp_dm_free(&data->dm_wa);
263 ccp_sg_free(&data->sg_wa);
266 static int ccp_init_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q,
267 struct scatterlist *sg, u64 sg_len,
269 enum dma_data_direction dir)
273 memset(data, 0, sizeof(*data));
275 ret = ccp_init_sg_workarea(&data->sg_wa, cmd_q->ccp->dev, sg, sg_len,
280 ret = ccp_init_dm_workarea(&data->dm_wa, cmd_q, dm_len, dir);
287 ccp_free_data(data, cmd_q);
292 static unsigned int ccp_queue_buf(struct ccp_data *data, unsigned int from)
294 struct ccp_sg_workarea *sg_wa = &data->sg_wa;
295 struct ccp_dm_workarea *dm_wa = &data->dm_wa;
296 unsigned int buf_count, nbytes;
298 /* Clear the buffer if setting it */
300 memset(dm_wa->address, 0, dm_wa->length);
305 /* Perform the copy operation
306 * nbytes will always be <= UINT_MAX because dm_wa->length is
309 nbytes = min_t(u64, sg_wa->bytes_left, dm_wa->length);
310 scatterwalk_map_and_copy(dm_wa->address, sg_wa->sg, sg_wa->sg_used,
313 /* Update the structures and generate the count */
315 while (sg_wa->bytes_left && (buf_count < dm_wa->length)) {
316 nbytes = min(sg_dma_len(sg_wa->dma_sg) - sg_wa->sg_used,
317 dm_wa->length - buf_count);
318 nbytes = min_t(u64, sg_wa->bytes_left, nbytes);
321 ccp_update_sg_workarea(sg_wa, nbytes);
327 static unsigned int ccp_fill_queue_buf(struct ccp_data *data)
329 return ccp_queue_buf(data, 0);
332 static unsigned int ccp_empty_queue_buf(struct ccp_data *data)
334 return ccp_queue_buf(data, 1);
337 static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst,
338 struct ccp_op *op, unsigned int block_size,
341 unsigned int sg_src_len, sg_dst_len, op_len;
343 /* The CCP can only DMA from/to one address each per operation. This
344 * requires that we find the smallest DMA area between the source
345 * and destination. The resulting len values will always be <= UINT_MAX
346 * because the dma length is an unsigned int.
348 sg_src_len = sg_dma_len(src->sg_wa.dma_sg) - src->sg_wa.sg_used;
349 sg_src_len = min_t(u64, src->sg_wa.bytes_left, sg_src_len);
352 sg_dst_len = sg_dma_len(dst->sg_wa.dma_sg) - dst->sg_wa.sg_used;
353 sg_dst_len = min_t(u64, src->sg_wa.bytes_left, sg_dst_len);
354 op_len = min(sg_src_len, sg_dst_len);
359 /* The data operation length will be at least block_size in length
360 * or the smaller of available sg room remaining for the source or
363 op_len = max(op_len, block_size);
365 /* Unless we have to buffer data, there's no reason to wait */
368 if (sg_src_len < block_size) {
369 /* Not enough data in the sg element, so it
370 * needs to be buffered into a blocksize chunk
372 int cp_len = ccp_fill_queue_buf(src);
375 op->src.u.dma.address = src->dm_wa.dma.address;
376 op->src.u.dma.offset = 0;
377 op->src.u.dma.length = (blocksize_op) ? block_size : cp_len;
379 /* Enough data in the sg element, but we need to
380 * adjust for any previously copied data
382 op->src.u.dma.address = sg_dma_address(src->sg_wa.dma_sg);
383 op->src.u.dma.offset = src->sg_wa.sg_used;
384 op->src.u.dma.length = op_len & ~(block_size - 1);
386 ccp_update_sg_workarea(&src->sg_wa, op->src.u.dma.length);
390 if (sg_dst_len < block_size) {
391 /* Not enough room in the sg element or we're on the
392 * last piece of data (when using padding), so the
393 * output needs to be buffered into a blocksize chunk
396 op->dst.u.dma.address = dst->dm_wa.dma.address;
397 op->dst.u.dma.offset = 0;
398 op->dst.u.dma.length = op->src.u.dma.length;
400 /* Enough room in the sg element, but we need to
401 * adjust for any previously used area
403 op->dst.u.dma.address = sg_dma_address(dst->sg_wa.dma_sg);
404 op->dst.u.dma.offset = dst->sg_wa.sg_used;
405 op->dst.u.dma.length = op->src.u.dma.length;
410 static void ccp_process_data(struct ccp_data *src, struct ccp_data *dst,
416 if (op->dst.u.dma.address == dst->dm_wa.dma.address)
417 ccp_empty_queue_buf(dst);
419 ccp_update_sg_workarea(&dst->sg_wa,
420 op->dst.u.dma.length);
424 static int ccp_copy_to_from_sb(struct ccp_cmd_queue *cmd_q,
425 struct ccp_dm_workarea *wa, u32 jobid, u32 sb,
426 u32 byte_swap, bool from)
430 memset(&op, 0, sizeof(op));
438 op.src.type = CCP_MEMTYPE_SB;
440 op.dst.type = CCP_MEMTYPE_SYSTEM;
441 op.dst.u.dma.address = wa->dma.address;
442 op.dst.u.dma.length = wa->length;
444 op.src.type = CCP_MEMTYPE_SYSTEM;
445 op.src.u.dma.address = wa->dma.address;
446 op.src.u.dma.length = wa->length;
447 op.dst.type = CCP_MEMTYPE_SB;
451 op.u.passthru.byte_swap = byte_swap;
453 return cmd_q->ccp->vdata->perform->passthru(&op);
456 static int ccp_copy_to_sb(struct ccp_cmd_queue *cmd_q,
457 struct ccp_dm_workarea *wa, u32 jobid, u32 sb,
460 return ccp_copy_to_from_sb(cmd_q, wa, jobid, sb, byte_swap, false);
463 static int ccp_copy_from_sb(struct ccp_cmd_queue *cmd_q,
464 struct ccp_dm_workarea *wa, u32 jobid, u32 sb,
467 return ccp_copy_to_from_sb(cmd_q, wa, jobid, sb, byte_swap, true);
470 static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q,
473 struct ccp_aes_engine *aes = &cmd->u.aes;
474 struct ccp_dm_workarea key, ctx;
477 unsigned int dm_offset;
480 if (!((aes->key_len == AES_KEYSIZE_128) ||
481 (aes->key_len == AES_KEYSIZE_192) ||
482 (aes->key_len == AES_KEYSIZE_256)))
485 if (aes->src_len & (AES_BLOCK_SIZE - 1))
488 if (aes->iv_len != AES_BLOCK_SIZE)
491 if (!aes->key || !aes->iv || !aes->src)
494 if (aes->cmac_final) {
495 if (aes->cmac_key_len != AES_BLOCK_SIZE)
502 BUILD_BUG_ON(CCP_AES_KEY_SB_COUNT != 1);
503 BUILD_BUG_ON(CCP_AES_CTX_SB_COUNT != 1);
506 memset(&op, 0, sizeof(op));
508 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
509 op.sb_key = cmd_q->sb_key;
510 op.sb_ctx = cmd_q->sb_ctx;
512 op.u.aes.type = aes->type;
513 op.u.aes.mode = aes->mode;
514 op.u.aes.action = aes->action;
516 /* All supported key sizes fit in a single (32-byte) SB entry
517 * and must be in little endian format. Use the 256-bit byte
518 * swap passthru option to convert from big endian to little
521 ret = ccp_init_dm_workarea(&key, cmd_q,
522 CCP_AES_KEY_SB_COUNT * CCP_SB_BYTES,
527 dm_offset = CCP_SB_BYTES - aes->key_len;
528 ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
529 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
530 CCP_PASSTHRU_BYTESWAP_256BIT);
532 cmd->engine_error = cmd_q->cmd_error;
536 /* The AES context fits in a single (32-byte) SB entry and
537 * must be in little endian format. Use the 256-bit byte swap
538 * passthru option to convert from big endian to little endian.
540 ret = ccp_init_dm_workarea(&ctx, cmd_q,
541 CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
546 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
547 ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
548 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
549 CCP_PASSTHRU_BYTESWAP_256BIT);
551 cmd->engine_error = cmd_q->cmd_error;
555 /* Send data to the CCP AES engine */
556 ret = ccp_init_data(&src, cmd_q, aes->src, aes->src_len,
557 AES_BLOCK_SIZE, DMA_TO_DEVICE);
561 while (src.sg_wa.bytes_left) {
562 ccp_prepare_data(&src, NULL, &op, AES_BLOCK_SIZE, true);
563 if (aes->cmac_final && !src.sg_wa.bytes_left) {
566 /* Push the K1/K2 key to the CCP now */
567 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid,
569 CCP_PASSTHRU_BYTESWAP_256BIT);
571 cmd->engine_error = cmd_q->cmd_error;
575 ccp_set_dm_area(&ctx, 0, aes->cmac_key, 0,
577 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
578 CCP_PASSTHRU_BYTESWAP_256BIT);
580 cmd->engine_error = cmd_q->cmd_error;
585 ret = cmd_q->ccp->vdata->perform->aes(&op);
587 cmd->engine_error = cmd_q->cmd_error;
591 ccp_process_data(&src, NULL, &op);
594 /* Retrieve the AES context - convert from LE to BE using
595 * 32-byte (256-bit) byteswapping
597 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
598 CCP_PASSTHRU_BYTESWAP_256BIT);
600 cmd->engine_error = cmd_q->cmd_error;
604 /* ...but we only need AES_BLOCK_SIZE bytes */
605 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
606 ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
609 ccp_free_data(&src, cmd_q);
620 static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
622 struct ccp_aes_engine *aes = &cmd->u.aes;
623 struct ccp_dm_workarea key, ctx;
624 struct ccp_data src, dst;
626 unsigned int dm_offset;
627 bool in_place = false;
630 if (aes->mode == CCP_AES_MODE_CMAC)
631 return ccp_run_aes_cmac_cmd(cmd_q, cmd);
633 if (!((aes->key_len == AES_KEYSIZE_128) ||
634 (aes->key_len == AES_KEYSIZE_192) ||
635 (aes->key_len == AES_KEYSIZE_256)))
638 if (((aes->mode == CCP_AES_MODE_ECB) ||
639 (aes->mode == CCP_AES_MODE_CBC) ||
640 (aes->mode == CCP_AES_MODE_CFB)) &&
641 (aes->src_len & (AES_BLOCK_SIZE - 1)))
644 if (!aes->key || !aes->src || !aes->dst)
647 if (aes->mode != CCP_AES_MODE_ECB) {
648 if (aes->iv_len != AES_BLOCK_SIZE)
655 BUILD_BUG_ON(CCP_AES_KEY_SB_COUNT != 1);
656 BUILD_BUG_ON(CCP_AES_CTX_SB_COUNT != 1);
659 memset(&op, 0, sizeof(op));
661 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
662 op.sb_key = cmd_q->sb_key;
663 op.sb_ctx = cmd_q->sb_ctx;
664 op.init = (aes->mode == CCP_AES_MODE_ECB) ? 0 : 1;
665 op.u.aes.type = aes->type;
666 op.u.aes.mode = aes->mode;
667 op.u.aes.action = aes->action;
669 /* All supported key sizes fit in a single (32-byte) SB entry
670 * and must be in little endian format. Use the 256-bit byte
671 * swap passthru option to convert from big endian to little
674 ret = ccp_init_dm_workarea(&key, cmd_q,
675 CCP_AES_KEY_SB_COUNT * CCP_SB_BYTES,
680 dm_offset = CCP_SB_BYTES - aes->key_len;
681 ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
682 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
683 CCP_PASSTHRU_BYTESWAP_256BIT);
685 cmd->engine_error = cmd_q->cmd_error;
689 /* The AES context fits in a single (32-byte) SB entry and
690 * must be in little endian format. Use the 256-bit byte swap
691 * passthru option to convert from big endian to little endian.
693 ret = ccp_init_dm_workarea(&ctx, cmd_q,
694 CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
699 if (aes->mode != CCP_AES_MODE_ECB) {
700 /* Load the AES context - convert to LE */
701 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
702 ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
703 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
704 CCP_PASSTHRU_BYTESWAP_256BIT);
706 cmd->engine_error = cmd_q->cmd_error;
711 case CCP_AES_MODE_CFB: /* CFB128 only */
712 case CCP_AES_MODE_CTR:
713 op.u.aes.size = AES_BLOCK_SIZE * BITS_PER_BYTE - 1;
719 /* Prepare the input and output data workareas. For in-place
720 * operations we need to set the dma direction to BIDIRECTIONAL
721 * and copy the src workarea to the dst workarea.
723 if (sg_virt(aes->src) == sg_virt(aes->dst))
726 ret = ccp_init_data(&src, cmd_q, aes->src, aes->src_len,
728 in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
735 ret = ccp_init_data(&dst, cmd_q, aes->dst, aes->src_len,
736 AES_BLOCK_SIZE, DMA_FROM_DEVICE);
741 /* Send data to the CCP AES engine */
742 while (src.sg_wa.bytes_left) {
743 ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true);
744 if (!src.sg_wa.bytes_left) {
747 /* Since we don't retrieve the AES context in ECB
748 * mode we have to wait for the operation to complete
749 * on the last piece of data
751 if (aes->mode == CCP_AES_MODE_ECB)
755 ret = cmd_q->ccp->vdata->perform->aes(&op);
757 cmd->engine_error = cmd_q->cmd_error;
761 ccp_process_data(&src, &dst, &op);
764 if (aes->mode != CCP_AES_MODE_ECB) {
765 /* Retrieve the AES context - convert from LE to BE using
766 * 32-byte (256-bit) byteswapping
768 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
769 CCP_PASSTHRU_BYTESWAP_256BIT);
771 cmd->engine_error = cmd_q->cmd_error;
775 /* ...but we only need AES_BLOCK_SIZE bytes */
776 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
777 ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
782 ccp_free_data(&dst, cmd_q);
785 ccp_free_data(&src, cmd_q);
796 static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q,
799 struct ccp_xts_aes_engine *xts = &cmd->u.xts;
800 struct ccp_dm_workarea key, ctx;
801 struct ccp_data src, dst;
803 unsigned int unit_size, dm_offset;
804 bool in_place = false;
805 unsigned int sb_count;
806 enum ccp_aes_type aestype;
809 switch (xts->unit_size) {
810 case CCP_XTS_AES_UNIT_SIZE_16:
813 case CCP_XTS_AES_UNIT_SIZE_512:
816 case CCP_XTS_AES_UNIT_SIZE_1024:
819 case CCP_XTS_AES_UNIT_SIZE_2048:
822 case CCP_XTS_AES_UNIT_SIZE_4096:
830 if (xts->key_len == AES_KEYSIZE_128)
831 aestype = CCP_AES_TYPE_128;
835 if (!xts->final && (xts->src_len & (AES_BLOCK_SIZE - 1)))
838 if (xts->iv_len != AES_BLOCK_SIZE)
841 if (!xts->key || !xts->iv || !xts->src || !xts->dst)
844 BUILD_BUG_ON(CCP_XTS_AES_KEY_SB_COUNT != 1);
845 BUILD_BUG_ON(CCP_XTS_AES_CTX_SB_COUNT != 1);
848 memset(&op, 0, sizeof(op));
850 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
851 op.sb_key = cmd_q->sb_key;
852 op.sb_ctx = cmd_q->sb_ctx;
854 op.u.xts.type = aestype;
855 op.u.xts.action = xts->action;
856 op.u.xts.unit_size = xts->unit_size;
858 /* A version 3 device only supports 128-bit keys, which fits into a
859 * single SB entry. A version 5 device uses a 512-bit vector, so two
862 if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0))
863 sb_count = CCP_XTS_AES_KEY_SB_COUNT;
865 sb_count = CCP5_XTS_AES_KEY_SB_COUNT;
866 ret = ccp_init_dm_workarea(&key, cmd_q,
867 sb_count * CCP_SB_BYTES,
872 if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) {
873 /* All supported key sizes must be in little endian format.
874 * Use the 256-bit byte swap passthru option to convert from
875 * big endian to little endian.
877 dm_offset = CCP_SB_BYTES - AES_KEYSIZE_128;
878 ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len);
879 ccp_set_dm_area(&key, 0, xts->key, xts->key_len, xts->key_len);
881 /* Version 5 CCPs use a 512-bit space for the key: each portion
882 * occupies 256 bits, or one entire slot, and is zero-padded.
886 dm_offset = CCP_SB_BYTES;
887 pad = dm_offset - xts->key_len;
888 ccp_set_dm_area(&key, pad, xts->key, 0, xts->key_len);
889 ccp_set_dm_area(&key, dm_offset + pad, xts->key, xts->key_len,
892 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
893 CCP_PASSTHRU_BYTESWAP_256BIT);
895 cmd->engine_error = cmd_q->cmd_error;
899 /* The AES context fits in a single (32-byte) SB entry and
900 * for XTS is already in little endian format so no byte swapping
903 ret = ccp_init_dm_workarea(&ctx, cmd_q,
904 CCP_XTS_AES_CTX_SB_COUNT * CCP_SB_BYTES,
909 ccp_set_dm_area(&ctx, 0, xts->iv, 0, xts->iv_len);
910 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
911 CCP_PASSTHRU_BYTESWAP_NOOP);
913 cmd->engine_error = cmd_q->cmd_error;
917 /* Prepare the input and output data workareas. For in-place
918 * operations we need to set the dma direction to BIDIRECTIONAL
919 * and copy the src workarea to the dst workarea.
921 if (sg_virt(xts->src) == sg_virt(xts->dst))
924 ret = ccp_init_data(&src, cmd_q, xts->src, xts->src_len,
926 in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
933 ret = ccp_init_data(&dst, cmd_q, xts->dst, xts->src_len,
934 unit_size, DMA_FROM_DEVICE);
939 /* Send data to the CCP AES engine */
940 while (src.sg_wa.bytes_left) {
941 ccp_prepare_data(&src, &dst, &op, unit_size, true);
942 if (!src.sg_wa.bytes_left)
945 ret = cmd_q->ccp->vdata->perform->xts_aes(&op);
947 cmd->engine_error = cmd_q->cmd_error;
951 ccp_process_data(&src, &dst, &op);
954 /* Retrieve the AES context - convert from LE to BE using
955 * 32-byte (256-bit) byteswapping
957 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
958 CCP_PASSTHRU_BYTESWAP_256BIT);
960 cmd->engine_error = cmd_q->cmd_error;
964 /* ...but we only need AES_BLOCK_SIZE bytes */
965 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
966 ccp_get_dm_area(&ctx, dm_offset, xts->iv, 0, xts->iv_len);
970 ccp_free_data(&dst, cmd_q);
973 ccp_free_data(&src, cmd_q);
984 static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
986 struct ccp_sha_engine *sha = &cmd->u.sha;
987 struct ccp_dm_workarea ctx;
990 unsigned int ioffset, ooffset;
991 unsigned int digest_size;
1000 if (sha->ctx_len < SHA1_DIGEST_SIZE)
1002 block_size = SHA1_BLOCK_SIZE;
1004 case CCP_SHA_TYPE_224:
1005 if (sha->ctx_len < SHA224_DIGEST_SIZE)
1007 block_size = SHA224_BLOCK_SIZE;
1009 case CCP_SHA_TYPE_256:
1010 if (sha->ctx_len < SHA256_DIGEST_SIZE)
1012 block_size = SHA256_BLOCK_SIZE;
1021 if (!sha->final && (sha->src_len & (block_size - 1)))
1024 /* The version 3 device can't handle zero-length input */
1025 if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) {
1027 if (!sha->src_len) {
1028 unsigned int digest_len;
1031 /* Not final, just return */
1035 /* CCP can't do a zero length sha operation so the
1036 * caller must buffer the data.
1041 /* The CCP cannot perform zero-length sha operations
1042 * so the caller is required to buffer data for the
1043 * final operation. However, a sha operation for a
1044 * message with a total length of zero is valid so
1045 * known values are required to supply the result.
1047 switch (sha->type) {
1048 case CCP_SHA_TYPE_1:
1049 sha_zero = sha1_zero_message_hash;
1050 digest_len = SHA1_DIGEST_SIZE;
1052 case CCP_SHA_TYPE_224:
1053 sha_zero = sha224_zero_message_hash;
1054 digest_len = SHA224_DIGEST_SIZE;
1056 case CCP_SHA_TYPE_256:
1057 sha_zero = sha256_zero_message_hash;
1058 digest_len = SHA256_DIGEST_SIZE;
1064 scatterwalk_map_and_copy((void *)sha_zero, sha->ctx, 0,
1071 /* Set variables used throughout */
1072 switch (sha->type) {
1073 case CCP_SHA_TYPE_1:
1074 digest_size = SHA1_DIGEST_SIZE;
1075 init = (void *) ccp_sha1_init;
1076 ctx_size = SHA1_DIGEST_SIZE;
1078 if (cmd_q->ccp->vdata->version != CCP_VERSION(3, 0))
1079 ooffset = ioffset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
1081 ooffset = ioffset = 0;
1083 case CCP_SHA_TYPE_224:
1084 digest_size = SHA224_DIGEST_SIZE;
1085 init = (void *) ccp_sha224_init;
1086 ctx_size = SHA256_DIGEST_SIZE;
1089 if (cmd_q->ccp->vdata->version != CCP_VERSION(3, 0))
1090 ooffset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
1094 case CCP_SHA_TYPE_256:
1095 digest_size = SHA256_DIGEST_SIZE;
1096 init = (void *) ccp_sha256_init;
1097 ctx_size = SHA256_DIGEST_SIZE;
1099 ooffset = ioffset = 0;
1106 /* For zero-length plaintext the src pointer is ignored;
1107 * otherwise both parts must be valid
1109 if (sha->src_len && !sha->src)
1112 memset(&op, 0, sizeof(op));
1114 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
1115 op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
1116 op.u.sha.type = sha->type;
1117 op.u.sha.msg_bits = sha->msg_bits;
1119 ret = ccp_init_dm_workarea(&ctx, cmd_q, sb_count * CCP_SB_BYTES,
1124 switch (sha->type) {
1125 case CCP_SHA_TYPE_1:
1126 case CCP_SHA_TYPE_224:
1127 case CCP_SHA_TYPE_256:
1128 memcpy(ctx.address + ioffset, init, ctx_size);
1135 /* Restore the context */
1136 ccp_set_dm_area(&ctx, 0, sha->ctx, 0,
1137 sb_count * CCP_SB_BYTES);
1140 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1141 CCP_PASSTHRU_BYTESWAP_256BIT);
1143 cmd->engine_error = cmd_q->cmd_error;
1148 /* Send data to the CCP SHA engine; block_size is set above */
1149 ret = ccp_init_data(&src, cmd_q, sha->src, sha->src_len,
1150 block_size, DMA_TO_DEVICE);
1154 while (src.sg_wa.bytes_left) {
1155 ccp_prepare_data(&src, NULL, &op, block_size, false);
1156 if (sha->final && !src.sg_wa.bytes_left)
1159 ret = cmd_q->ccp->vdata->perform->sha(&op);
1161 cmd->engine_error = cmd_q->cmd_error;
1165 ccp_process_data(&src, NULL, &op);
1169 ret = cmd_q->ccp->vdata->perform->sha(&op);
1171 cmd->engine_error = cmd_q->cmd_error;
1176 /* Retrieve the SHA context - convert from LE to BE using
1177 * 32-byte (256-bit) byteswapping to BE
1179 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1180 CCP_PASSTHRU_BYTESWAP_256BIT);
1182 cmd->engine_error = cmd_q->cmd_error;
1187 /* Finishing up, so get the digest */
1188 switch (sha->type) {
1189 case CCP_SHA_TYPE_1:
1190 case CCP_SHA_TYPE_224:
1191 case CCP_SHA_TYPE_256:
1192 ccp_get_dm_area(&ctx, ooffset,
1201 /* Stash the context */
1202 ccp_get_dm_area(&ctx, 0, sha->ctx, 0,
1203 sb_count * CCP_SB_BYTES);
1206 if (sha->final && sha->opad) {
1207 /* HMAC operation, recursively perform final SHA */
1208 struct ccp_cmd hmac_cmd;
1209 struct scatterlist sg;
1212 if (sha->opad_len != block_size) {
1217 hmac_buf = kmalloc(block_size + digest_size, GFP_KERNEL);
1222 sg_init_one(&sg, hmac_buf, block_size + digest_size);
1224 scatterwalk_map_and_copy(hmac_buf, sha->opad, 0, block_size, 0);
1225 switch (sha->type) {
1226 case CCP_SHA_TYPE_1:
1227 case CCP_SHA_TYPE_224:
1228 case CCP_SHA_TYPE_256:
1229 memcpy(hmac_buf + block_size,
1230 ctx.address + ooffset,
1239 memset(&hmac_cmd, 0, sizeof(hmac_cmd));
1240 hmac_cmd.engine = CCP_ENGINE_SHA;
1241 hmac_cmd.u.sha.type = sha->type;
1242 hmac_cmd.u.sha.ctx = sha->ctx;
1243 hmac_cmd.u.sha.ctx_len = sha->ctx_len;
1244 hmac_cmd.u.sha.src = &sg;
1245 hmac_cmd.u.sha.src_len = block_size + digest_size;
1246 hmac_cmd.u.sha.opad = NULL;
1247 hmac_cmd.u.sha.opad_len = 0;
1248 hmac_cmd.u.sha.first = 1;
1249 hmac_cmd.u.sha.final = 1;
1250 hmac_cmd.u.sha.msg_bits = (block_size + digest_size) << 3;
1252 ret = ccp_run_sha_cmd(cmd_q, &hmac_cmd);
1254 cmd->engine_error = hmac_cmd.engine_error;
1261 ccp_free_data(&src, cmd_q);
1269 static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1271 struct ccp_rsa_engine *rsa = &cmd->u.rsa;
1272 struct ccp_dm_workarea exp, src;
1273 struct ccp_data dst;
1275 unsigned int sb_count, i_len, o_len;
1278 if (rsa->key_size > CCP_RSA_MAX_WIDTH)
1281 if (!rsa->exp || !rsa->mod || !rsa->src || !rsa->dst)
1284 /* The RSA modulus must precede the message being acted upon, so
1285 * it must be copied to a DMA area where the message and the
1286 * modulus can be concatenated. Therefore the input buffer
1287 * length required is twice the output buffer length (which
1288 * must be a multiple of 256-bits).
1290 o_len = ((rsa->key_size + 255) / 256) * 32;
1293 sb_count = o_len / CCP_SB_BYTES;
1295 memset(&op, 0, sizeof(op));
1297 op.jobid = ccp_gen_jobid(cmd_q->ccp);
1298 op.sb_key = cmd_q->ccp->vdata->perform->sballoc(cmd_q, sb_count);
1303 /* The RSA exponent may span multiple (32-byte) SB entries and must
1304 * be in little endian format. Reverse copy each 32-byte chunk
1305 * of the exponent (En chunk to E0 chunk, E(n-1) chunk to E1 chunk)
1306 * and each byte within that chunk and do not perform any byte swap
1307 * operations on the passthru operation.
1309 ret = ccp_init_dm_workarea(&exp, cmd_q, o_len, DMA_TO_DEVICE);
1313 ret = ccp_reverse_set_dm_area(&exp, rsa->exp, rsa->exp_len,
1314 CCP_SB_BYTES, false);
1317 ret = ccp_copy_to_sb(cmd_q, &exp, op.jobid, op.sb_key,
1318 CCP_PASSTHRU_BYTESWAP_NOOP);
1320 cmd->engine_error = cmd_q->cmd_error;
1324 /* Concatenate the modulus and the message. Both the modulus and
1325 * the operands must be in little endian format. Since the input
1326 * is in big endian format it must be converted.
1328 ret = ccp_init_dm_workarea(&src, cmd_q, i_len, DMA_TO_DEVICE);
1332 ret = ccp_reverse_set_dm_area(&src, rsa->mod, rsa->mod_len,
1333 CCP_SB_BYTES, false);
1336 src.address += o_len; /* Adjust the address for the copy operation */
1337 ret = ccp_reverse_set_dm_area(&src, rsa->src, rsa->src_len,
1338 CCP_SB_BYTES, false);
1341 src.address -= o_len; /* Reset the address to original value */
1343 /* Prepare the output area for the operation */
1344 ret = ccp_init_data(&dst, cmd_q, rsa->dst, rsa->mod_len,
1345 o_len, DMA_FROM_DEVICE);
1350 op.src.u.dma.address = src.dma.address;
1351 op.src.u.dma.offset = 0;
1352 op.src.u.dma.length = i_len;
1353 op.dst.u.dma.address = dst.dm_wa.dma.address;
1354 op.dst.u.dma.offset = 0;
1355 op.dst.u.dma.length = o_len;
1357 op.u.rsa.mod_size = rsa->key_size;
1358 op.u.rsa.input_len = i_len;
1360 ret = cmd_q->ccp->vdata->perform->rsa(&op);
1362 cmd->engine_error = cmd_q->cmd_error;
1366 ccp_reverse_get_dm_area(&dst.dm_wa, rsa->dst, rsa->mod_len);
1369 ccp_free_data(&dst, cmd_q);
1378 cmd_q->ccp->vdata->perform->sbfree(cmd_q, op.sb_key, sb_count);
1383 static int ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q,
1384 struct ccp_cmd *cmd)
1386 struct ccp_passthru_engine *pt = &cmd->u.passthru;
1387 struct ccp_dm_workarea mask;
1388 struct ccp_data src, dst;
1390 bool in_place = false;
1394 if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1)))
1397 if (!pt->src || !pt->dst)
1400 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
1401 if (pt->mask_len != CCP_PASSTHRU_MASKSIZE)
1407 BUILD_BUG_ON(CCP_PASSTHRU_SB_COUNT != 1);
1409 memset(&op, 0, sizeof(op));
1411 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
1413 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
1415 op.sb_key = cmd_q->sb_key;
1417 ret = ccp_init_dm_workarea(&mask, cmd_q,
1418 CCP_PASSTHRU_SB_COUNT *
1424 ccp_set_dm_area(&mask, 0, pt->mask, 0, pt->mask_len);
1425 ret = ccp_copy_to_sb(cmd_q, &mask, op.jobid, op.sb_key,
1426 CCP_PASSTHRU_BYTESWAP_NOOP);
1428 cmd->engine_error = cmd_q->cmd_error;
1433 /* Prepare the input and output data workareas. For in-place
1434 * operations we need to set the dma direction to BIDIRECTIONAL
1435 * and copy the src workarea to the dst workarea.
1437 if (sg_virt(pt->src) == sg_virt(pt->dst))
1440 ret = ccp_init_data(&src, cmd_q, pt->src, pt->src_len,
1441 CCP_PASSTHRU_MASKSIZE,
1442 in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1449 ret = ccp_init_data(&dst, cmd_q, pt->dst, pt->src_len,
1450 CCP_PASSTHRU_MASKSIZE, DMA_FROM_DEVICE);
1455 /* Send data to the CCP Passthru engine
1456 * Because the CCP engine works on a single source and destination
1457 * dma address at a time, each entry in the source scatterlist
1458 * (after the dma_map_sg call) must be less than or equal to the
1459 * (remaining) length in the destination scatterlist entry and the
1460 * length must be a multiple of CCP_PASSTHRU_BLOCKSIZE
1462 dst.sg_wa.sg_used = 0;
1463 for (i = 1; i <= src.sg_wa.dma_count; i++) {
1464 if (!dst.sg_wa.sg ||
1465 (sg_dma_len(dst.sg_wa.sg) < sg_dma_len(src.sg_wa.sg))) {
1470 if (i == src.sg_wa.dma_count) {
1475 op.src.type = CCP_MEMTYPE_SYSTEM;
1476 op.src.u.dma.address = sg_dma_address(src.sg_wa.sg);
1477 op.src.u.dma.offset = 0;
1478 op.src.u.dma.length = sg_dma_len(src.sg_wa.sg);
1480 op.dst.type = CCP_MEMTYPE_SYSTEM;
1481 op.dst.u.dma.address = sg_dma_address(dst.sg_wa.sg);
1482 op.dst.u.dma.offset = dst.sg_wa.sg_used;
1483 op.dst.u.dma.length = op.src.u.dma.length;
1485 ret = cmd_q->ccp->vdata->perform->passthru(&op);
1487 cmd->engine_error = cmd_q->cmd_error;
1491 dst.sg_wa.sg_used += sg_dma_len(src.sg_wa.sg);
1492 if (dst.sg_wa.sg_used == sg_dma_len(dst.sg_wa.sg)) {
1493 dst.sg_wa.sg = sg_next(dst.sg_wa.sg);
1494 dst.sg_wa.sg_used = 0;
1496 src.sg_wa.sg = sg_next(src.sg_wa.sg);
1501 ccp_free_data(&dst, cmd_q);
1504 ccp_free_data(&src, cmd_q);
1507 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
1513 static int ccp_run_passthru_nomap_cmd(struct ccp_cmd_queue *cmd_q,
1514 struct ccp_cmd *cmd)
1516 struct ccp_passthru_nomap_engine *pt = &cmd->u.passthru_nomap;
1517 struct ccp_dm_workarea mask;
1521 if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1)))
1524 if (!pt->src_dma || !pt->dst_dma)
1527 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
1528 if (pt->mask_len != CCP_PASSTHRU_MASKSIZE)
1534 BUILD_BUG_ON(CCP_PASSTHRU_SB_COUNT != 1);
1536 memset(&op, 0, sizeof(op));
1538 op.jobid = ccp_gen_jobid(cmd_q->ccp);
1540 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
1542 op.sb_key = cmd_q->sb_key;
1544 mask.length = pt->mask_len;
1545 mask.dma.address = pt->mask;
1546 mask.dma.length = pt->mask_len;
1548 ret = ccp_copy_to_sb(cmd_q, &mask, op.jobid, op.sb_key,
1549 CCP_PASSTHRU_BYTESWAP_NOOP);
1551 cmd->engine_error = cmd_q->cmd_error;
1556 /* Send data to the CCP Passthru engine */
1560 op.src.type = CCP_MEMTYPE_SYSTEM;
1561 op.src.u.dma.address = pt->src_dma;
1562 op.src.u.dma.offset = 0;
1563 op.src.u.dma.length = pt->src_len;
1565 op.dst.type = CCP_MEMTYPE_SYSTEM;
1566 op.dst.u.dma.address = pt->dst_dma;
1567 op.dst.u.dma.offset = 0;
1568 op.dst.u.dma.length = pt->src_len;
1570 ret = cmd_q->ccp->vdata->perform->passthru(&op);
1572 cmd->engine_error = cmd_q->cmd_error;
1577 static int ccp_run_ecc_mm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1579 struct ccp_ecc_engine *ecc = &cmd->u.ecc;
1580 struct ccp_dm_workarea src, dst;
1585 if (!ecc->u.mm.operand_1 ||
1586 (ecc->u.mm.operand_1_len > CCP_ECC_MODULUS_BYTES))
1589 if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT)
1590 if (!ecc->u.mm.operand_2 ||
1591 (ecc->u.mm.operand_2_len > CCP_ECC_MODULUS_BYTES))
1594 if (!ecc->u.mm.result ||
1595 (ecc->u.mm.result_len < CCP_ECC_MODULUS_BYTES))
1598 memset(&op, 0, sizeof(op));
1600 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
1602 /* Concatenate the modulus and the operands. Both the modulus and
1603 * the operands must be in little endian format. Since the input
1604 * is in big endian format it must be converted and placed in a
1605 * fixed length buffer.
1607 ret = ccp_init_dm_workarea(&src, cmd_q, CCP_ECC_SRC_BUF_SIZE,
1612 /* Save the workarea address since it is updated in order to perform
1617 /* Copy the ECC modulus */
1618 ret = ccp_reverse_set_dm_area(&src, ecc->mod, ecc->mod_len,
1619 CCP_ECC_OPERAND_SIZE, false);
1622 src.address += CCP_ECC_OPERAND_SIZE;
1624 /* Copy the first operand */
1625 ret = ccp_reverse_set_dm_area(&src, ecc->u.mm.operand_1,
1626 ecc->u.mm.operand_1_len,
1627 CCP_ECC_OPERAND_SIZE, false);
1630 src.address += CCP_ECC_OPERAND_SIZE;
1632 if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT) {
1633 /* Copy the second operand */
1634 ret = ccp_reverse_set_dm_area(&src, ecc->u.mm.operand_2,
1635 ecc->u.mm.operand_2_len,
1636 CCP_ECC_OPERAND_SIZE, false);
1639 src.address += CCP_ECC_OPERAND_SIZE;
1642 /* Restore the workarea address */
1645 /* Prepare the output area for the operation */
1646 ret = ccp_init_dm_workarea(&dst, cmd_q, CCP_ECC_DST_BUF_SIZE,
1652 op.src.u.dma.address = src.dma.address;
1653 op.src.u.dma.offset = 0;
1654 op.src.u.dma.length = src.length;
1655 op.dst.u.dma.address = dst.dma.address;
1656 op.dst.u.dma.offset = 0;
1657 op.dst.u.dma.length = dst.length;
1659 op.u.ecc.function = cmd->u.ecc.function;
1661 ret = cmd_q->ccp->vdata->perform->ecc(&op);
1663 cmd->engine_error = cmd_q->cmd_error;
1667 ecc->ecc_result = le16_to_cpup(
1668 (const __le16 *)(dst.address + CCP_ECC_RESULT_OFFSET));
1669 if (!(ecc->ecc_result & CCP_ECC_RESULT_SUCCESS)) {
1674 /* Save the ECC result */
1675 ccp_reverse_get_dm_area(&dst, ecc->u.mm.result, CCP_ECC_MODULUS_BYTES);
1686 static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1688 struct ccp_ecc_engine *ecc = &cmd->u.ecc;
1689 struct ccp_dm_workarea src, dst;
1694 if (!ecc->u.pm.point_1.x ||
1695 (ecc->u.pm.point_1.x_len > CCP_ECC_MODULUS_BYTES) ||
1696 !ecc->u.pm.point_1.y ||
1697 (ecc->u.pm.point_1.y_len > CCP_ECC_MODULUS_BYTES))
1700 if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) {
1701 if (!ecc->u.pm.point_2.x ||
1702 (ecc->u.pm.point_2.x_len > CCP_ECC_MODULUS_BYTES) ||
1703 !ecc->u.pm.point_2.y ||
1704 (ecc->u.pm.point_2.y_len > CCP_ECC_MODULUS_BYTES))
1707 if (!ecc->u.pm.domain_a ||
1708 (ecc->u.pm.domain_a_len > CCP_ECC_MODULUS_BYTES))
1711 if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT)
1712 if (!ecc->u.pm.scalar ||
1713 (ecc->u.pm.scalar_len > CCP_ECC_MODULUS_BYTES))
1717 if (!ecc->u.pm.result.x ||
1718 (ecc->u.pm.result.x_len < CCP_ECC_MODULUS_BYTES) ||
1719 !ecc->u.pm.result.y ||
1720 (ecc->u.pm.result.y_len < CCP_ECC_MODULUS_BYTES))
1723 memset(&op, 0, sizeof(op));
1725 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
1727 /* Concatenate the modulus and the operands. Both the modulus and
1728 * the operands must be in little endian format. Since the input
1729 * is in big endian format it must be converted and placed in a
1730 * fixed length buffer.
1732 ret = ccp_init_dm_workarea(&src, cmd_q, CCP_ECC_SRC_BUF_SIZE,
1737 /* Save the workarea address since it is updated in order to perform
1742 /* Copy the ECC modulus */
1743 ret = ccp_reverse_set_dm_area(&src, ecc->mod, ecc->mod_len,
1744 CCP_ECC_OPERAND_SIZE, false);
1747 src.address += CCP_ECC_OPERAND_SIZE;
1749 /* Copy the first point X and Y coordinate */
1750 ret = ccp_reverse_set_dm_area(&src, ecc->u.pm.point_1.x,
1751 ecc->u.pm.point_1.x_len,
1752 CCP_ECC_OPERAND_SIZE, false);
1755 src.address += CCP_ECC_OPERAND_SIZE;
1756 ret = ccp_reverse_set_dm_area(&src, ecc->u.pm.point_1.y,
1757 ecc->u.pm.point_1.y_len,
1758 CCP_ECC_OPERAND_SIZE, false);
1761 src.address += CCP_ECC_OPERAND_SIZE;
1763 /* Set the first point Z coordinate to 1 */
1764 *src.address = 0x01;
1765 src.address += CCP_ECC_OPERAND_SIZE;
1767 if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) {
1768 /* Copy the second point X and Y coordinate */
1769 ret = ccp_reverse_set_dm_area(&src, ecc->u.pm.point_2.x,
1770 ecc->u.pm.point_2.x_len,
1771 CCP_ECC_OPERAND_SIZE, false);
1774 src.address += CCP_ECC_OPERAND_SIZE;
1775 ret = ccp_reverse_set_dm_area(&src, ecc->u.pm.point_2.y,
1776 ecc->u.pm.point_2.y_len,
1777 CCP_ECC_OPERAND_SIZE, false);
1780 src.address += CCP_ECC_OPERAND_SIZE;
1782 /* Set the second point Z coordinate to 1 */
1783 *src.address = 0x01;
1784 src.address += CCP_ECC_OPERAND_SIZE;
1786 /* Copy the Domain "a" parameter */
1787 ret = ccp_reverse_set_dm_area(&src, ecc->u.pm.domain_a,
1788 ecc->u.pm.domain_a_len,
1789 CCP_ECC_OPERAND_SIZE, false);
1792 src.address += CCP_ECC_OPERAND_SIZE;
1794 if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT) {
1795 /* Copy the scalar value */
1796 ret = ccp_reverse_set_dm_area(&src, ecc->u.pm.scalar,
1797 ecc->u.pm.scalar_len,
1798 CCP_ECC_OPERAND_SIZE,
1802 src.address += CCP_ECC_OPERAND_SIZE;
1806 /* Restore the workarea address */
1809 /* Prepare the output area for the operation */
1810 ret = ccp_init_dm_workarea(&dst, cmd_q, CCP_ECC_DST_BUF_SIZE,
1816 op.src.u.dma.address = src.dma.address;
1817 op.src.u.dma.offset = 0;
1818 op.src.u.dma.length = src.length;
1819 op.dst.u.dma.address = dst.dma.address;
1820 op.dst.u.dma.offset = 0;
1821 op.dst.u.dma.length = dst.length;
1823 op.u.ecc.function = cmd->u.ecc.function;
1825 ret = cmd_q->ccp->vdata->perform->ecc(&op);
1827 cmd->engine_error = cmd_q->cmd_error;
1831 ecc->ecc_result = le16_to_cpup(
1832 (const __le16 *)(dst.address + CCP_ECC_RESULT_OFFSET));
1833 if (!(ecc->ecc_result & CCP_ECC_RESULT_SUCCESS)) {
1838 /* Save the workarea address since it is updated as we walk through
1839 * to copy the point math result
1843 /* Save the ECC result X and Y coordinates */
1844 ccp_reverse_get_dm_area(&dst, ecc->u.pm.result.x,
1845 CCP_ECC_MODULUS_BYTES);
1846 dst.address += CCP_ECC_OUTPUT_SIZE;
1847 ccp_reverse_get_dm_area(&dst, ecc->u.pm.result.y,
1848 CCP_ECC_MODULUS_BYTES);
1849 dst.address += CCP_ECC_OUTPUT_SIZE;
1851 /* Restore the workarea address */
1863 static int ccp_run_ecc_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1865 struct ccp_ecc_engine *ecc = &cmd->u.ecc;
1867 ecc->ecc_result = 0;
1870 (ecc->mod_len > CCP_ECC_MODULUS_BYTES))
1873 switch (ecc->function) {
1874 case CCP_ECC_FUNCTION_MMUL_384BIT:
1875 case CCP_ECC_FUNCTION_MADD_384BIT:
1876 case CCP_ECC_FUNCTION_MINV_384BIT:
1877 return ccp_run_ecc_mm_cmd(cmd_q, cmd);
1879 case CCP_ECC_FUNCTION_PADD_384BIT:
1880 case CCP_ECC_FUNCTION_PMUL_384BIT:
1881 case CCP_ECC_FUNCTION_PDBL_384BIT:
1882 return ccp_run_ecc_pm_cmd(cmd_q, cmd);
1889 int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1893 cmd->engine_error = 0;
1894 cmd_q->cmd_error = 0;
1895 cmd_q->int_rcvd = 0;
1896 cmd_q->free_slots = cmd_q->ccp->vdata->perform->get_free_slots(cmd_q);
1898 switch (cmd->engine) {
1899 case CCP_ENGINE_AES:
1900 ret = ccp_run_aes_cmd(cmd_q, cmd);
1902 case CCP_ENGINE_XTS_AES_128:
1903 ret = ccp_run_xts_aes_cmd(cmd_q, cmd);
1905 case CCP_ENGINE_SHA:
1906 ret = ccp_run_sha_cmd(cmd_q, cmd);
1908 case CCP_ENGINE_RSA:
1909 ret = ccp_run_rsa_cmd(cmd_q, cmd);
1911 case CCP_ENGINE_PASSTHRU:
1912 if (cmd->flags & CCP_CMD_PASSTHRU_NO_DMA_MAP)
1913 ret = ccp_run_passthru_nomap_cmd(cmd_q, cmd);
1915 ret = ccp_run_passthru_cmd(cmd_q, cmd);
1917 case CCP_ENGINE_ECC:
1918 ret = ccp_run_ecc_cmd(cmd_q, cmd);