2 * Copyright (C) 2012-2017 ARM Limited or its affiliates.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, see <http://www.gnu.org/licenses/>.
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/platform_device.h>
20 #include <crypto/algapi.h>
21 #include <crypto/hash.h>
22 #include <crypto/sha.h>
23 #include <crypto/md5.h>
24 #include <crypto/internal/hash.h>
26 #include "ssi_config.h"
27 #include "ssi_driver.h"
28 #include "ssi_request_mgr.h"
29 #include "ssi_buffer_mgr.h"
30 #include "ssi_sysfs.h"
32 #include "ssi_sram_mgr.h"
34 #define SSI_MAX_AHASH_SEQ_LEN 12
35 #define SSI_MAX_HASH_OPAD_TMP_KEYS_SIZE MAX(SSI_MAX_HASH_BLCK_SIZE, 3 * AES_BLOCK_SIZE)
37 struct ssi_hash_handle {
38 ssi_sram_addr_t digest_len_sram_addr; /* const value in SRAM*/
39 ssi_sram_addr_t larval_digest_sram_addr; /* const value in SRAM */
40 struct list_head hash_list;
41 struct completion init_comp;
44 static const u32 digest_len_init[] = {
45 0x00000040, 0x00000000, 0x00000000, 0x00000000 };
46 static const u32 md5_init[] = {
47 SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
48 static const u32 sha1_init[] = {
49 SHA1_H4, SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
50 static const u32 sha224_init[] = {
51 SHA224_H7, SHA224_H6, SHA224_H5, SHA224_H4,
52 SHA224_H3, SHA224_H2, SHA224_H1, SHA224_H0 };
53 static const u32 sha256_init[] = {
54 SHA256_H7, SHA256_H6, SHA256_H5, SHA256_H4,
55 SHA256_H3, SHA256_H2, SHA256_H1, SHA256_H0 };
56 #if (DX_DEV_SHA_MAX > 256)
57 static const u32 digest_len_sha512_init[] = {
58 0x00000080, 0x00000000, 0x00000000, 0x00000000 };
59 static const u64 sha384_init[] = {
60 SHA384_H7, SHA384_H6, SHA384_H5, SHA384_H4,
61 SHA384_H3, SHA384_H2, SHA384_H1, SHA384_H0 };
62 static const u64 sha512_init[] = {
63 SHA512_H7, SHA512_H6, SHA512_H5, SHA512_H4,
64 SHA512_H3, SHA512_H2, SHA512_H1, SHA512_H0 };
67 static void ssi_hash_create_xcbc_setup(
68 struct ahash_request *areq,
69 struct cc_hw_desc desc[],
70 unsigned int *seq_size);
72 static void ssi_hash_create_cmac_setup(struct ahash_request *areq,
73 struct cc_hw_desc desc[],
74 unsigned int *seq_size);
77 struct list_head entry;
81 struct ssi_drvdata *drvdata;
82 struct ahash_alg ahash_alg;
85 struct hash_key_req_ctx {
87 dma_addr_t key_dma_addr;
90 /* hash per-session context */
92 struct ssi_drvdata *drvdata;
93 /* holds the origin digest; the digest after "setkey" if HMAC,*
94 * the initial digest if HASH.
96 u8 digest_buff[SSI_MAX_HASH_DIGEST_SIZE] ____cacheline_aligned;
97 u8 opad_tmp_keys_buff[SSI_MAX_HASH_OPAD_TMP_KEYS_SIZE] ____cacheline_aligned;
99 dma_addr_t opad_tmp_keys_dma_addr ____cacheline_aligned;
100 dma_addr_t digest_buff_dma_addr;
101 /* use for hmac with key large then mode block size */
102 struct hash_key_req_ctx key_params;
105 int inter_digestsize;
106 struct completion setkey_comp;
110 static void ssi_hash_create_data_desc(
111 struct ahash_req_ctx *areq_ctx,
112 struct ssi_hash_ctx *ctx,
113 unsigned int flow_mode, struct cc_hw_desc desc[],
114 bool is_not_last_data,
115 unsigned int *seq_size);
117 static inline void ssi_set_hash_endianity(u32 mode, struct cc_hw_desc *desc)
119 if (unlikely((mode == DRV_HASH_MD5) ||
120 (mode == DRV_HASH_SHA384) ||
121 (mode == DRV_HASH_SHA512))) {
122 set_bytes_swap(desc, 1);
124 set_cipher_config0(desc, HASH_DIGEST_RESULT_LITTLE_ENDIAN);
128 static int ssi_hash_map_result(struct device *dev,
129 struct ahash_req_ctx *state,
130 unsigned int digestsize)
132 state->digest_result_dma_addr =
133 dma_map_single(dev, (void *)state->digest_result_buff,
136 if (unlikely(dma_mapping_error(dev, state->digest_result_dma_addr))) {
137 SSI_LOG_ERR("Mapping digest result buffer %u B for DMA failed\n",
141 SSI_LOG_DEBUG("Mapped digest result buffer %u B "
142 "at va=%pK to dma=%pad\n",
143 digestsize, state->digest_result_buff,
144 state->digest_result_dma_addr);
149 static int ssi_hash_map_request(struct device *dev,
150 struct ahash_req_ctx *state,
151 struct ssi_hash_ctx *ctx)
153 bool is_hmac = ctx->is_hmac;
154 ssi_sram_addr_t larval_digest_addr = ssi_ahash_get_larval_digest_sram_addr(
155 ctx->drvdata, ctx->hash_mode);
156 struct ssi_crypto_req ssi_req = {};
157 struct cc_hw_desc desc;
160 state->buff0 = kzalloc(SSI_MAX_HASH_BLCK_SIZE, GFP_KERNEL | GFP_DMA);
162 SSI_LOG_ERR("Allocating buff0 in context failed\n");
165 state->buff1 = kzalloc(SSI_MAX_HASH_BLCK_SIZE, GFP_KERNEL | GFP_DMA);
167 SSI_LOG_ERR("Allocating buff1 in context failed\n");
170 state->digest_result_buff = kzalloc(SSI_MAX_HASH_DIGEST_SIZE, GFP_KERNEL | GFP_DMA);
171 if (!state->digest_result_buff) {
172 SSI_LOG_ERR("Allocating digest_result_buff in context failed\n");
175 state->digest_buff = kzalloc(ctx->inter_digestsize, GFP_KERNEL | GFP_DMA);
176 if (!state->digest_buff) {
177 SSI_LOG_ERR("Allocating digest-buffer in context failed\n");
178 goto fail_digest_result_buff;
181 SSI_LOG_DEBUG("Allocated digest-buffer in context ctx->digest_buff=@%p\n", state->digest_buff);
182 if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC) {
183 state->digest_bytes_len = kzalloc(HASH_LEN_SIZE, GFP_KERNEL | GFP_DMA);
184 if (!state->digest_bytes_len) {
185 SSI_LOG_ERR("Allocating digest-bytes-len in context failed\n");
188 SSI_LOG_DEBUG("Allocated digest-bytes-len in context state->>digest_bytes_len=@%p\n", state->digest_bytes_len);
190 state->digest_bytes_len = NULL;
193 state->opad_digest_buff = kzalloc(ctx->inter_digestsize, GFP_KERNEL | GFP_DMA);
194 if (!state->opad_digest_buff) {
195 SSI_LOG_ERR("Allocating opad-digest-buffer in context failed\n");
198 SSI_LOG_DEBUG("Allocated opad-digest-buffer in context state->digest_bytes_len=@%p\n", state->opad_digest_buff);
200 state->digest_buff_dma_addr = dma_map_single(dev, (void *)state->digest_buff, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
201 if (dma_mapping_error(dev, state->digest_buff_dma_addr)) {
202 SSI_LOG_ERR("Mapping digest len %d B at va=%pK for DMA failed\n",
203 ctx->inter_digestsize, state->digest_buff);
206 SSI_LOG_DEBUG("Mapped digest %d B at va=%pK to dma=%pad\n",
207 ctx->inter_digestsize, state->digest_buff,
208 state->digest_buff_dma_addr);
211 dma_sync_single_for_cpu(dev, ctx->digest_buff_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
212 if ((ctx->hw_mode == DRV_CIPHER_XCBC_MAC) || (ctx->hw_mode == DRV_CIPHER_CMAC)) {
213 memset(state->digest_buff, 0, ctx->inter_digestsize);
215 memcpy(state->digest_buff, ctx->digest_buff, ctx->inter_digestsize);
216 #if (DX_DEV_SHA_MAX > 256)
217 if (unlikely((ctx->hash_mode == DRV_HASH_SHA512) || (ctx->hash_mode == DRV_HASH_SHA384)))
218 memcpy(state->digest_bytes_len, digest_len_sha512_init, HASH_LEN_SIZE);
220 memcpy(state->digest_bytes_len, digest_len_init, HASH_LEN_SIZE);
222 memcpy(state->digest_bytes_len, digest_len_init, HASH_LEN_SIZE);
225 dma_sync_single_for_device(dev, state->digest_buff_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
227 if (ctx->hash_mode != DRV_HASH_NULL) {
228 dma_sync_single_for_cpu(dev, ctx->opad_tmp_keys_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
229 memcpy(state->opad_digest_buff, ctx->opad_tmp_keys_buff, ctx->inter_digestsize);
232 /* Copy the initial digests if hash flow. The SRAM contains the
233 * initial digests in the expected order for all SHA*
236 set_din_sram(&desc, larval_digest_addr, ctx->inter_digestsize);
237 set_dout_dlli(&desc, state->digest_buff_dma_addr,
238 ctx->inter_digestsize, NS_BIT, 0);
239 set_flow_mode(&desc, BYPASS);
241 rc = send_request(ctx->drvdata, &ssi_req, &desc, 1, 0);
242 if (unlikely(rc != 0)) {
243 SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
248 if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC) {
249 state->digest_bytes_len_dma_addr = dma_map_single(dev, (void *)state->digest_bytes_len, HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
250 if (dma_mapping_error(dev, state->digest_bytes_len_dma_addr)) {
251 SSI_LOG_ERR("Mapping digest len %u B at va=%pK for DMA failed\n",
252 HASH_LEN_SIZE, state->digest_bytes_len);
255 SSI_LOG_DEBUG("Mapped digest len %u B at va=%pK to dma=%pad\n",
256 HASH_LEN_SIZE, state->digest_bytes_len,
257 state->digest_bytes_len_dma_addr);
259 state->digest_bytes_len_dma_addr = 0;
262 if (is_hmac && ctx->hash_mode != DRV_HASH_NULL) {
263 state->opad_digest_dma_addr = dma_map_single(dev, (void *)state->opad_digest_buff, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
264 if (dma_mapping_error(dev, state->opad_digest_dma_addr)) {
265 SSI_LOG_ERR("Mapping opad digest %d B at va=%pK for DMA failed\n",
266 ctx->inter_digestsize,
267 state->opad_digest_buff);
270 SSI_LOG_DEBUG("Mapped opad digest %d B at va=%pK to dma=%pad\n",
271 ctx->inter_digestsize, state->opad_digest_buff,
272 state->opad_digest_dma_addr);
274 state->opad_digest_dma_addr = 0;
276 state->buff0_cnt = 0;
277 state->buff1_cnt = 0;
278 state->buff_index = 0;
279 state->mlli_params.curr_pool = NULL;
284 if (state->digest_bytes_len_dma_addr != 0) {
285 dma_unmap_single(dev, state->digest_bytes_len_dma_addr, HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
286 state->digest_bytes_len_dma_addr = 0;
289 if (state->digest_buff_dma_addr != 0) {
290 dma_unmap_single(dev, state->digest_buff_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
291 state->digest_buff_dma_addr = 0;
294 kfree(state->opad_digest_buff);
296 kfree(state->digest_bytes_len);
298 kfree(state->digest_buff);
299 fail_digest_result_buff:
300 kfree(state->digest_result_buff);
301 state->digest_result_buff = NULL;
312 static void ssi_hash_unmap_request(struct device *dev,
313 struct ahash_req_ctx *state,
314 struct ssi_hash_ctx *ctx)
316 if (state->digest_buff_dma_addr != 0) {
317 dma_unmap_single(dev, state->digest_buff_dma_addr,
318 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
319 SSI_LOG_DEBUG("Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
320 state->digest_buff_dma_addr);
321 state->digest_buff_dma_addr = 0;
323 if (state->digest_bytes_len_dma_addr != 0) {
324 dma_unmap_single(dev, state->digest_bytes_len_dma_addr,
325 HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
326 SSI_LOG_DEBUG("Unmapped digest-bytes-len buffer: digest_bytes_len_dma_addr=%pad\n",
327 state->digest_bytes_len_dma_addr);
328 state->digest_bytes_len_dma_addr = 0;
330 if (state->opad_digest_dma_addr != 0) {
331 dma_unmap_single(dev, state->opad_digest_dma_addr,
332 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
333 SSI_LOG_DEBUG("Unmapped opad-digest: opad_digest_dma_addr=%pad\n",
334 state->opad_digest_dma_addr);
335 state->opad_digest_dma_addr = 0;
338 kfree(state->opad_digest_buff);
339 kfree(state->digest_bytes_len);
340 kfree(state->digest_buff);
341 kfree(state->digest_result_buff);
346 static void ssi_hash_unmap_result(struct device *dev,
347 struct ahash_req_ctx *state,
348 unsigned int digestsize, u8 *result)
350 if (state->digest_result_dma_addr != 0) {
351 dma_unmap_single(dev,
352 state->digest_result_dma_addr,
355 SSI_LOG_DEBUG("unmpa digest result buffer "
356 "va (%pK) pa (%pad) len %u\n",
357 state->digest_result_buff,
358 state->digest_result_dma_addr,
361 state->digest_result_buff,
364 state->digest_result_dma_addr = 0;
367 static void ssi_hash_update_complete(struct device *dev, void *ssi_req, void __iomem *cc_base)
369 struct ahash_request *req = (struct ahash_request *)ssi_req;
370 struct ahash_req_ctx *state = ahash_request_ctx(req);
372 SSI_LOG_DEBUG("req=%pK\n", req);
374 ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, false);
375 req->base.complete(&req->base, 0);
378 static void ssi_hash_digest_complete(struct device *dev, void *ssi_req, void __iomem *cc_base)
380 struct ahash_request *req = (struct ahash_request *)ssi_req;
381 struct ahash_req_ctx *state = ahash_request_ctx(req);
382 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
383 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
384 u32 digestsize = crypto_ahash_digestsize(tfm);
386 SSI_LOG_DEBUG("req=%pK\n", req);
388 ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, false);
389 ssi_hash_unmap_result(dev, state, digestsize, req->result);
390 ssi_hash_unmap_request(dev, state, ctx);
391 req->base.complete(&req->base, 0);
394 static void ssi_hash_complete(struct device *dev, void *ssi_req, void __iomem *cc_base)
396 struct ahash_request *req = (struct ahash_request *)ssi_req;
397 struct ahash_req_ctx *state = ahash_request_ctx(req);
398 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
399 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
400 u32 digestsize = crypto_ahash_digestsize(tfm);
402 SSI_LOG_DEBUG("req=%pK\n", req);
404 ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, false);
405 ssi_hash_unmap_result(dev, state, digestsize, req->result);
406 ssi_hash_unmap_request(dev, state, ctx);
407 req->base.complete(&req->base, 0);
410 static int ssi_hash_digest(struct ahash_req_ctx *state,
411 struct ssi_hash_ctx *ctx,
412 unsigned int digestsize,
413 struct scatterlist *src,
414 unsigned int nbytes, u8 *result,
417 struct device *dev = &ctx->drvdata->plat_dev->dev;
418 bool is_hmac = ctx->is_hmac;
419 struct ssi_crypto_req ssi_req = {};
420 struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
421 ssi_sram_addr_t larval_digest_addr = ssi_ahash_get_larval_digest_sram_addr(
422 ctx->drvdata, ctx->hash_mode);
426 SSI_LOG_DEBUG("===== %s-digest (%d) ====\n", is_hmac ? "hmac" : "hash", nbytes);
428 if (unlikely(ssi_hash_map_request(dev, state, ctx) != 0)) {
429 SSI_LOG_ERR("map_ahash_source() failed\n");
433 if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
434 SSI_LOG_ERR("map_ahash_digest() failed\n");
438 if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1) != 0)) {
439 SSI_LOG_ERR("map_ahash_request_final() failed\n");
444 /* Setup DX request structure */
445 ssi_req.user_cb = (void *)ssi_hash_digest_complete;
446 ssi_req.user_arg = (void *)async_req;
449 /* If HMAC then load hash IPAD xor key, if HASH then load initial digest */
450 hw_desc_init(&desc[idx]);
451 set_cipher_mode(&desc[idx], ctx->hw_mode);
453 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
454 ctx->inter_digestsize, NS_BIT);
456 set_din_sram(&desc[idx], larval_digest_addr,
457 ctx->inter_digestsize);
459 set_flow_mode(&desc[idx], S_DIN_to_HASH);
460 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
463 /* Load the hash current length */
464 hw_desc_init(&desc[idx]);
465 set_cipher_mode(&desc[idx], ctx->hw_mode);
468 set_din_type(&desc[idx], DMA_DLLI,
469 state->digest_bytes_len_dma_addr, HASH_LEN_SIZE,
472 set_din_const(&desc[idx], 0, HASH_LEN_SIZE);
473 if (likely(nbytes != 0))
474 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
476 set_cipher_do(&desc[idx], DO_PAD);
478 set_flow_mode(&desc[idx], S_DIN_to_HASH);
479 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
482 ssi_hash_create_data_desc(state, ctx, DIN_HASH, desc, false, &idx);
485 /* HW last hash block padding (aka. "DO_PAD") */
486 hw_desc_init(&desc[idx]);
487 set_cipher_mode(&desc[idx], ctx->hw_mode);
488 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
489 HASH_LEN_SIZE, NS_BIT, 0);
490 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
491 set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
492 set_cipher_do(&desc[idx], DO_PAD);
495 /* store the hash digest result in the context */
496 hw_desc_init(&desc[idx]);
497 set_cipher_mode(&desc[idx], ctx->hw_mode);
498 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
499 digestsize, NS_BIT, 0);
500 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
501 ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
502 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
505 /* Loading hash opad xor key state */
506 hw_desc_init(&desc[idx]);
507 set_cipher_mode(&desc[idx], ctx->hw_mode);
508 set_din_type(&desc[idx], DMA_DLLI, state->opad_digest_dma_addr,
509 ctx->inter_digestsize, NS_BIT);
510 set_flow_mode(&desc[idx], S_DIN_to_HASH);
511 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
514 /* Load the hash current length */
515 hw_desc_init(&desc[idx]);
516 set_cipher_mode(&desc[idx], ctx->hw_mode);
517 set_din_sram(&desc[idx],
518 ssi_ahash_get_initial_digest_len_sram_addr(
519 ctx->drvdata, ctx->hash_mode), HASH_LEN_SIZE);
520 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
521 set_flow_mode(&desc[idx], S_DIN_to_HASH);
522 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
525 /* Memory Barrier: wait for IPAD/OPAD axi write to complete */
526 hw_desc_init(&desc[idx]);
527 set_din_no_dma(&desc[idx], 0, 0xfffff0);
528 set_dout_no_dma(&desc[idx], 0, 0, 1);
531 /* Perform HASH update */
532 hw_desc_init(&desc[idx]);
533 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
535 set_flow_mode(&desc[idx], DIN_HASH);
539 /* Get final MAC result */
540 hw_desc_init(&desc[idx]);
541 set_cipher_mode(&desc[idx], ctx->hw_mode);
543 set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize,
544 NS_BIT, (async_req ? 1 : 0));
546 set_queue_last_ind(&desc[idx]);
547 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
548 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
549 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
550 ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
554 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
555 if (unlikely(rc != -EINPROGRESS)) {
556 SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
557 ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
558 ssi_hash_unmap_result(dev, state, digestsize, result);
559 ssi_hash_unmap_request(dev, state, ctx);
562 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
564 SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
565 ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
567 ssi_buffer_mgr_unmap_hash_request(dev, state, src, false);
569 ssi_hash_unmap_result(dev, state, digestsize, result);
570 ssi_hash_unmap_request(dev, state, ctx);
575 static int ssi_hash_update(struct ahash_req_ctx *state,
576 struct ssi_hash_ctx *ctx,
577 unsigned int block_size,
578 struct scatterlist *src,
582 struct device *dev = &ctx->drvdata->plat_dev->dev;
583 struct ssi_crypto_req ssi_req = {};
584 struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
588 SSI_LOG_DEBUG("===== %s-update (%d) ====\n", ctx->is_hmac ?
589 "hmac" : "hash", nbytes);
592 /* no real updates required */
596 rc = ssi_buffer_mgr_map_hash_request_update(ctx->drvdata, state, src, nbytes, block_size);
599 SSI_LOG_DEBUG(" data size not require HW update %x\n",
601 /* No hardware updates are required */
604 SSI_LOG_ERR("map_ahash_request_update() failed\n");
609 /* Setup DX request structure */
610 ssi_req.user_cb = (void *)ssi_hash_update_complete;
611 ssi_req.user_arg = async_req;
614 /* Restore hash digest */
615 hw_desc_init(&desc[idx]);
616 set_cipher_mode(&desc[idx], ctx->hw_mode);
617 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
618 ctx->inter_digestsize, NS_BIT);
619 set_flow_mode(&desc[idx], S_DIN_to_HASH);
620 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
622 /* Restore hash current length */
623 hw_desc_init(&desc[idx]);
624 set_cipher_mode(&desc[idx], ctx->hw_mode);
625 set_din_type(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr,
626 HASH_LEN_SIZE, NS_BIT);
627 set_flow_mode(&desc[idx], S_DIN_to_HASH);
628 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
631 ssi_hash_create_data_desc(state, ctx, DIN_HASH, desc, false, &idx);
633 /* store the hash digest result in context */
634 hw_desc_init(&desc[idx]);
635 set_cipher_mode(&desc[idx], ctx->hw_mode);
636 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
637 ctx->inter_digestsize, NS_BIT, 0);
638 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
639 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
642 /* store current hash length in context */
643 hw_desc_init(&desc[idx]);
644 set_cipher_mode(&desc[idx], ctx->hw_mode);
645 set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr,
646 HASH_LEN_SIZE, NS_BIT, (async_req ? 1 : 0));
648 set_queue_last_ind(&desc[idx]);
649 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
650 set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
654 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
655 if (unlikely(rc != -EINPROGRESS)) {
656 SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
657 ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
660 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
662 SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
663 ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
665 ssi_buffer_mgr_unmap_hash_request(dev, state, src, false);
671 static int ssi_hash_finup(struct ahash_req_ctx *state,
672 struct ssi_hash_ctx *ctx,
673 unsigned int digestsize,
674 struct scatterlist *src,
679 struct device *dev = &ctx->drvdata->plat_dev->dev;
680 bool is_hmac = ctx->is_hmac;
681 struct ssi_crypto_req ssi_req = {};
682 struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
686 SSI_LOG_DEBUG("===== %s-finup (%d) ====\n", is_hmac ? "hmac" : "hash", nbytes);
688 if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1) != 0)) {
689 SSI_LOG_ERR("map_ahash_request_final() failed\n");
692 if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
693 SSI_LOG_ERR("map_ahash_digest() failed\n");
698 /* Setup DX request structure */
699 ssi_req.user_cb = (void *)ssi_hash_complete;
700 ssi_req.user_arg = async_req;
703 /* Restore hash digest */
704 hw_desc_init(&desc[idx]);
705 set_cipher_mode(&desc[idx], ctx->hw_mode);
706 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
707 ctx->inter_digestsize, NS_BIT);
708 set_flow_mode(&desc[idx], S_DIN_to_HASH);
709 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
712 /* Restore hash current length */
713 hw_desc_init(&desc[idx]);
714 set_cipher_mode(&desc[idx], ctx->hw_mode);
715 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
716 set_din_type(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr,
717 HASH_LEN_SIZE, NS_BIT);
718 set_flow_mode(&desc[idx], S_DIN_to_HASH);
719 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
722 ssi_hash_create_data_desc(state, ctx, DIN_HASH, desc, false, &idx);
725 /* Store the hash digest result in the context */
726 hw_desc_init(&desc[idx]);
727 set_cipher_mode(&desc[idx], ctx->hw_mode);
728 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
729 digestsize, NS_BIT, 0);
730 ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
731 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
732 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
735 /* Loading hash OPAD xor key state */
736 hw_desc_init(&desc[idx]);
737 set_cipher_mode(&desc[idx], ctx->hw_mode);
738 set_din_type(&desc[idx], DMA_DLLI, state->opad_digest_dma_addr,
739 ctx->inter_digestsize, NS_BIT);
740 set_flow_mode(&desc[idx], S_DIN_to_HASH);
741 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
744 /* Load the hash current length */
745 hw_desc_init(&desc[idx]);
746 set_cipher_mode(&desc[idx], ctx->hw_mode);
747 set_din_sram(&desc[idx],
748 ssi_ahash_get_initial_digest_len_sram_addr(
749 ctx->drvdata, ctx->hash_mode), HASH_LEN_SIZE);
750 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
751 set_flow_mode(&desc[idx], S_DIN_to_HASH);
752 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
755 /* Memory Barrier: wait for IPAD/OPAD axi write to complete */
756 hw_desc_init(&desc[idx]);
757 set_din_no_dma(&desc[idx], 0, 0xfffff0);
758 set_dout_no_dma(&desc[idx], 0, 0, 1);
761 /* Perform HASH update on last digest */
762 hw_desc_init(&desc[idx]);
763 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
765 set_flow_mode(&desc[idx], DIN_HASH);
769 /* Get final MAC result */
770 hw_desc_init(&desc[idx]);
772 set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize,
773 NS_BIT, (async_req ? 1 : 0));
775 set_queue_last_ind(&desc[idx]);
776 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
777 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
778 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
779 ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
780 set_cipher_mode(&desc[idx], ctx->hw_mode);
784 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
785 if (unlikely(rc != -EINPROGRESS)) {
786 SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
787 ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
788 ssi_hash_unmap_result(dev, state, digestsize, result);
791 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
793 SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
794 ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
795 ssi_hash_unmap_result(dev, state, digestsize, result);
797 ssi_buffer_mgr_unmap_hash_request(dev, state, src, false);
798 ssi_hash_unmap_result(dev, state, digestsize, result);
799 ssi_hash_unmap_request(dev, state, ctx);
805 static int ssi_hash_final(struct ahash_req_ctx *state,
806 struct ssi_hash_ctx *ctx,
807 unsigned int digestsize,
808 struct scatterlist *src,
813 struct device *dev = &ctx->drvdata->plat_dev->dev;
814 bool is_hmac = ctx->is_hmac;
815 struct ssi_crypto_req ssi_req = {};
816 struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
820 SSI_LOG_DEBUG("===== %s-final (%d) ====\n", is_hmac ? "hmac" : "hash", nbytes);
822 if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, src, nbytes, 0) != 0)) {
823 SSI_LOG_ERR("map_ahash_request_final() failed\n");
827 if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
828 SSI_LOG_ERR("map_ahash_digest() failed\n");
833 /* Setup DX request structure */
834 ssi_req.user_cb = (void *)ssi_hash_complete;
835 ssi_req.user_arg = async_req;
838 /* Restore hash digest */
839 hw_desc_init(&desc[idx]);
840 set_cipher_mode(&desc[idx], ctx->hw_mode);
841 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
842 ctx->inter_digestsize, NS_BIT);
843 set_flow_mode(&desc[idx], S_DIN_to_HASH);
844 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
847 /* Restore hash current length */
848 hw_desc_init(&desc[idx]);
849 set_cipher_mode(&desc[idx], ctx->hw_mode);
850 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
851 set_din_type(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr,
852 HASH_LEN_SIZE, NS_BIT);
853 set_flow_mode(&desc[idx], S_DIN_to_HASH);
854 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
857 ssi_hash_create_data_desc(state, ctx, DIN_HASH, desc, false, &idx);
859 /* "DO-PAD" must be enabled only when writing current length to HW */
860 hw_desc_init(&desc[idx]);
861 set_cipher_do(&desc[idx], DO_PAD);
862 set_cipher_mode(&desc[idx], ctx->hw_mode);
863 set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr,
864 HASH_LEN_SIZE, NS_BIT, 0);
865 set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
866 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
870 /* Store the hash digest result in the context */
871 hw_desc_init(&desc[idx]);
872 set_cipher_mode(&desc[idx], ctx->hw_mode);
873 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
874 digestsize, NS_BIT, 0);
875 ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
876 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
877 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
880 /* Loading hash OPAD xor key state */
881 hw_desc_init(&desc[idx]);
882 set_cipher_mode(&desc[idx], ctx->hw_mode);
883 set_din_type(&desc[idx], DMA_DLLI, state->opad_digest_dma_addr,
884 ctx->inter_digestsize, NS_BIT);
885 set_flow_mode(&desc[idx], S_DIN_to_HASH);
886 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
889 /* Load the hash current length */
890 hw_desc_init(&desc[idx]);
891 set_cipher_mode(&desc[idx], ctx->hw_mode);
892 set_din_sram(&desc[idx],
893 ssi_ahash_get_initial_digest_len_sram_addr(
894 ctx->drvdata, ctx->hash_mode), HASH_LEN_SIZE);
895 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
896 set_flow_mode(&desc[idx], S_DIN_to_HASH);
897 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
900 /* Memory Barrier: wait for IPAD/OPAD axi write to complete */
901 hw_desc_init(&desc[idx]);
902 set_din_no_dma(&desc[idx], 0, 0xfffff0);
903 set_dout_no_dma(&desc[idx], 0, 0, 1);
906 /* Perform HASH update on last digest */
907 hw_desc_init(&desc[idx]);
908 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
910 set_flow_mode(&desc[idx], DIN_HASH);
914 /* Get final MAC result */
915 hw_desc_init(&desc[idx]);
916 set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize,
917 NS_BIT, (async_req ? 1 : 0));
919 set_queue_last_ind(&desc[idx]);
920 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
921 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
922 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
923 ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
924 set_cipher_mode(&desc[idx], ctx->hw_mode);
928 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
929 if (unlikely(rc != -EINPROGRESS)) {
930 SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
931 ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
932 ssi_hash_unmap_result(dev, state, digestsize, result);
935 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
937 SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
938 ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
939 ssi_hash_unmap_result(dev, state, digestsize, result);
941 ssi_buffer_mgr_unmap_hash_request(dev, state, src, false);
942 ssi_hash_unmap_result(dev, state, digestsize, result);
943 ssi_hash_unmap_request(dev, state, ctx);
949 static int ssi_hash_init(struct ahash_req_ctx *state, struct ssi_hash_ctx *ctx)
951 struct device *dev = &ctx->drvdata->plat_dev->dev;
953 state->xcbc_count = 0;
955 ssi_hash_map_request(dev, state, ctx);
960 static int ssi_hash_setkey(void *hash,
965 unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
966 struct ssi_crypto_req ssi_req = {};
967 struct ssi_hash_ctx *ctx = NULL;
970 int i, idx = 0, rc = 0;
971 struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
972 ssi_sram_addr_t larval_addr;
974 SSI_LOG_DEBUG("start keylen: %d", keylen);
976 ctx = crypto_ahash_ctx(((struct crypto_ahash *)hash));
977 blocksize = crypto_tfm_alg_blocksize(&((struct crypto_ahash *)hash)->base);
978 digestsize = crypto_ahash_digestsize(((struct crypto_ahash *)hash));
980 larval_addr = ssi_ahash_get_larval_digest_sram_addr(
981 ctx->drvdata, ctx->hash_mode);
983 /* The keylen value distinguishes HASH in case keylen is ZERO bytes,
984 * any NON-ZERO value utilizes HMAC flow
986 ctx->key_params.keylen = keylen;
987 ctx->key_params.key_dma_addr = 0;
991 ctx->key_params.key_dma_addr = dma_map_single(
992 &ctx->drvdata->plat_dev->dev,
994 keylen, DMA_TO_DEVICE);
995 if (unlikely(dma_mapping_error(&ctx->drvdata->plat_dev->dev,
996 ctx->key_params.key_dma_addr))) {
997 SSI_LOG_ERR("Mapping key va=0x%p len=%u for"
998 " DMA failed\n", key, keylen);
1001 SSI_LOG_DEBUG("mapping key-buffer: key_dma_addr=%pad "
1002 "keylen=%u\n", ctx->key_params.key_dma_addr,
1003 ctx->key_params.keylen);
1005 if (keylen > blocksize) {
1006 /* Load hash initial state */
1007 hw_desc_init(&desc[idx]);
1008 set_cipher_mode(&desc[idx], ctx->hw_mode);
1009 set_din_sram(&desc[idx], larval_addr,
1010 ctx->inter_digestsize);
1011 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1012 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1015 /* Load the hash current length*/
1016 hw_desc_init(&desc[idx]);
1017 set_cipher_mode(&desc[idx], ctx->hw_mode);
1018 set_din_const(&desc[idx], 0, HASH_LEN_SIZE);
1019 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1020 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1021 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1024 hw_desc_init(&desc[idx]);
1025 set_din_type(&desc[idx], DMA_DLLI,
1026 ctx->key_params.key_dma_addr, keylen,
1028 set_flow_mode(&desc[idx], DIN_HASH);
1031 /* Get hashed key */
1032 hw_desc_init(&desc[idx]);
1033 set_cipher_mode(&desc[idx], ctx->hw_mode);
1034 set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
1035 digestsize, NS_BIT, 0);
1036 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1037 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1038 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
1039 ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
1042 hw_desc_init(&desc[idx]);
1043 set_din_const(&desc[idx], 0, (blocksize - digestsize));
1044 set_flow_mode(&desc[idx], BYPASS);
1045 set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr +
1047 (blocksize - digestsize), NS_BIT, 0);
1050 hw_desc_init(&desc[idx]);
1051 set_din_type(&desc[idx], DMA_DLLI,
1052 ctx->key_params.key_dma_addr, keylen,
1054 set_flow_mode(&desc[idx], BYPASS);
1055 set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
1059 if ((blocksize - keylen) != 0) {
1060 hw_desc_init(&desc[idx]);
1061 set_din_const(&desc[idx], 0,
1062 (blocksize - keylen));
1063 set_flow_mode(&desc[idx], BYPASS);
1064 set_dout_dlli(&desc[idx],
1065 (ctx->opad_tmp_keys_dma_addr +
1066 keylen), (blocksize - keylen),
1072 hw_desc_init(&desc[idx]);
1073 set_din_const(&desc[idx], 0, blocksize);
1074 set_flow_mode(&desc[idx], BYPASS);
1075 set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr),
1076 blocksize, NS_BIT, 0);
1080 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
1081 if (unlikely(rc != 0)) {
1082 SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
1086 /* calc derived HMAC key */
1087 for (idx = 0, i = 0; i < 2; i++) {
1088 /* Load hash initial state */
1089 hw_desc_init(&desc[idx]);
1090 set_cipher_mode(&desc[idx], ctx->hw_mode);
1091 set_din_sram(&desc[idx], larval_addr, ctx->inter_digestsize);
1092 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1093 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1096 /* Load the hash current length*/
1097 hw_desc_init(&desc[idx]);
1098 set_cipher_mode(&desc[idx], ctx->hw_mode);
1099 set_din_const(&desc[idx], 0, HASH_LEN_SIZE);
1100 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1101 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1104 /* Prepare ipad key */
1105 hw_desc_init(&desc[idx]);
1106 set_xor_val(&desc[idx], hmac_pad_const[i]);
1107 set_cipher_mode(&desc[idx], ctx->hw_mode);
1108 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1109 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1112 /* Perform HASH update */
1113 hw_desc_init(&desc[idx]);
1114 set_din_type(&desc[idx], DMA_DLLI, ctx->opad_tmp_keys_dma_addr,
1116 set_cipher_mode(&desc[idx], ctx->hw_mode);
1117 set_xor_active(&desc[idx]);
1118 set_flow_mode(&desc[idx], DIN_HASH);
1121 /* Get the IPAD/OPAD xor key (Note, IPAD is the initial digest of the first HASH "update" state) */
1122 hw_desc_init(&desc[idx]);
1123 set_cipher_mode(&desc[idx], ctx->hw_mode);
1124 if (i > 0) /* Not first iteration */
1125 set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
1126 ctx->inter_digestsize, NS_BIT, 0);
1127 else /* First iteration */
1128 set_dout_dlli(&desc[idx], ctx->digest_buff_dma_addr,
1129 ctx->inter_digestsize, NS_BIT, 0);
1130 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1131 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1135 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
1139 crypto_ahash_set_flags((struct crypto_ahash *)hash, CRYPTO_TFM_RES_BAD_KEY_LEN);
1141 if (ctx->key_params.key_dma_addr) {
1142 dma_unmap_single(&ctx->drvdata->plat_dev->dev,
1143 ctx->key_params.key_dma_addr,
1144 ctx->key_params.keylen, DMA_TO_DEVICE);
1145 SSI_LOG_DEBUG("Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
1146 ctx->key_params.key_dma_addr,
1147 ctx->key_params.keylen);
1152 static int ssi_xcbc_setkey(struct crypto_ahash *ahash,
1153 const u8 *key, unsigned int keylen)
1155 struct ssi_crypto_req ssi_req = {};
1156 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1157 int idx = 0, rc = 0;
1158 struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
1160 SSI_LOG_DEBUG("===== setkey (%d) ====\n", keylen);
1163 case AES_KEYSIZE_128:
1164 case AES_KEYSIZE_192:
1165 case AES_KEYSIZE_256:
1171 ctx->key_params.keylen = keylen;
1173 ctx->key_params.key_dma_addr = dma_map_single(
1174 &ctx->drvdata->plat_dev->dev,
1176 keylen, DMA_TO_DEVICE);
1177 if (unlikely(dma_mapping_error(&ctx->drvdata->plat_dev->dev,
1178 ctx->key_params.key_dma_addr))) {
1179 SSI_LOG_ERR("Mapping key va=0x%p len=%u for"
1180 " DMA failed\n", key, keylen);
1183 SSI_LOG_DEBUG("mapping key-buffer: key_dma_addr=%pad "
1185 ctx->key_params.key_dma_addr,
1186 ctx->key_params.keylen);
1188 ctx->is_hmac = true;
1189 /* 1. Load the AES key */
1190 hw_desc_init(&desc[idx]);
1191 set_din_type(&desc[idx], DMA_DLLI, ctx->key_params.key_dma_addr,
1193 set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
1194 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1195 set_key_size_aes(&desc[idx], keylen);
1196 set_flow_mode(&desc[idx], S_DIN_to_AES);
1197 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1200 hw_desc_init(&desc[idx]);
1201 set_din_const(&desc[idx], 0x01010101, CC_AES_128_BIT_KEY_SIZE);
1202 set_flow_mode(&desc[idx], DIN_AES_DOUT);
1203 set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr +
1204 XCBC_MAC_K1_OFFSET),
1205 CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
1208 hw_desc_init(&desc[idx]);
1209 set_din_const(&desc[idx], 0x02020202, CC_AES_128_BIT_KEY_SIZE);
1210 set_flow_mode(&desc[idx], DIN_AES_DOUT);
1211 set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr +
1212 XCBC_MAC_K2_OFFSET),
1213 CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
1216 hw_desc_init(&desc[idx]);
1217 set_din_const(&desc[idx], 0x03030303, CC_AES_128_BIT_KEY_SIZE);
1218 set_flow_mode(&desc[idx], DIN_AES_DOUT);
1219 set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr +
1220 XCBC_MAC_K3_OFFSET),
1221 CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
1224 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
1227 crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
1229 dma_unmap_single(&ctx->drvdata->plat_dev->dev,
1230 ctx->key_params.key_dma_addr,
1231 ctx->key_params.keylen, DMA_TO_DEVICE);
1232 SSI_LOG_DEBUG("Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
1233 ctx->key_params.key_dma_addr,
1234 ctx->key_params.keylen);
1240 static int ssi_cmac_setkey(struct crypto_ahash *ahash,
1241 const u8 *key, unsigned int keylen)
1243 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1245 SSI_LOG_DEBUG("===== setkey (%d) ====\n", keylen);
1247 ctx->is_hmac = true;
1250 case AES_KEYSIZE_128:
1251 case AES_KEYSIZE_192:
1252 case AES_KEYSIZE_256:
1258 ctx->key_params.keylen = keylen;
1260 /* STAT_PHASE_1: Copy key to ctx */
1262 dma_sync_single_for_cpu(&ctx->drvdata->plat_dev->dev,
1263 ctx->opad_tmp_keys_dma_addr,
1264 keylen, DMA_TO_DEVICE);
1266 memcpy(ctx->opad_tmp_keys_buff, key, keylen);
1268 memset(ctx->opad_tmp_keys_buff + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
1270 dma_sync_single_for_device(&ctx->drvdata->plat_dev->dev,
1271 ctx->opad_tmp_keys_dma_addr,
1272 keylen, DMA_TO_DEVICE);
1274 ctx->key_params.keylen = keylen;
1280 static void ssi_hash_free_ctx(struct ssi_hash_ctx *ctx)
1282 struct device *dev = &ctx->drvdata->plat_dev->dev;
1284 if (ctx->digest_buff_dma_addr != 0) {
1285 dma_unmap_single(dev, ctx->digest_buff_dma_addr,
1286 sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
1287 SSI_LOG_DEBUG("Unmapped digest-buffer: "
1288 "digest_buff_dma_addr=%pad\n",
1289 ctx->digest_buff_dma_addr);
1290 ctx->digest_buff_dma_addr = 0;
1292 if (ctx->opad_tmp_keys_dma_addr != 0) {
1293 dma_unmap_single(dev, ctx->opad_tmp_keys_dma_addr,
1294 sizeof(ctx->opad_tmp_keys_buff),
1296 SSI_LOG_DEBUG("Unmapped opad-digest: "
1297 "opad_tmp_keys_dma_addr=%pad\n",
1298 ctx->opad_tmp_keys_dma_addr);
1299 ctx->opad_tmp_keys_dma_addr = 0;
1302 ctx->key_params.keylen = 0;
1305 static int ssi_hash_alloc_ctx(struct ssi_hash_ctx *ctx)
1307 struct device *dev = &ctx->drvdata->plat_dev->dev;
1309 ctx->key_params.keylen = 0;
1311 ctx->digest_buff_dma_addr = dma_map_single(dev, (void *)ctx->digest_buff, sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
1312 if (dma_mapping_error(dev, ctx->digest_buff_dma_addr)) {
1313 SSI_LOG_ERR("Mapping digest len %zu B at va=%pK for DMA failed\n",
1314 sizeof(ctx->digest_buff), ctx->digest_buff);
1317 SSI_LOG_DEBUG("Mapped digest %zu B at va=%pK to dma=%pad\n",
1318 sizeof(ctx->digest_buff), ctx->digest_buff,
1319 ctx->digest_buff_dma_addr);
1321 ctx->opad_tmp_keys_dma_addr = dma_map_single(dev, (void *)ctx->opad_tmp_keys_buff, sizeof(ctx->opad_tmp_keys_buff), DMA_BIDIRECTIONAL);
1322 if (dma_mapping_error(dev, ctx->opad_tmp_keys_dma_addr)) {
1323 SSI_LOG_ERR("Mapping opad digest %zu B at va=%pK for DMA failed\n",
1324 sizeof(ctx->opad_tmp_keys_buff),
1325 ctx->opad_tmp_keys_buff);
1328 SSI_LOG_DEBUG("Mapped opad_tmp_keys %zu B at va=%pK to dma=%pad\n",
1329 sizeof(ctx->opad_tmp_keys_buff), ctx->opad_tmp_keys_buff,
1330 ctx->opad_tmp_keys_dma_addr);
1332 ctx->is_hmac = false;
1336 ssi_hash_free_ctx(ctx);
1340 static int ssi_ahash_cra_init(struct crypto_tfm *tfm)
1342 struct ssi_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1343 struct hash_alg_common *hash_alg_common =
1344 container_of(tfm->__crt_alg, struct hash_alg_common, base);
1345 struct ahash_alg *ahash_alg =
1346 container_of(hash_alg_common, struct ahash_alg, halg);
1347 struct ssi_hash_alg *ssi_alg =
1348 container_of(ahash_alg, struct ssi_hash_alg, ahash_alg);
1350 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1351 sizeof(struct ahash_req_ctx));
1353 ctx->hash_mode = ssi_alg->hash_mode;
1354 ctx->hw_mode = ssi_alg->hw_mode;
1355 ctx->inter_digestsize = ssi_alg->inter_digestsize;
1356 ctx->drvdata = ssi_alg->drvdata;
1358 return ssi_hash_alloc_ctx(ctx);
1361 static void ssi_hash_cra_exit(struct crypto_tfm *tfm)
1363 struct ssi_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1365 SSI_LOG_DEBUG("ssi_hash_cra_exit");
1366 ssi_hash_free_ctx(ctx);
1369 static int ssi_mac_update(struct ahash_request *req)
1371 struct ahash_req_ctx *state = ahash_request_ctx(req);
1372 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1373 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1374 struct device *dev = &ctx->drvdata->plat_dev->dev;
1375 unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
1376 struct ssi_crypto_req ssi_req = {};
1377 struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
1381 if (req->nbytes == 0) {
1382 /* no real updates required */
1386 state->xcbc_count++;
1388 rc = ssi_buffer_mgr_map_hash_request_update(ctx->drvdata, state, req->src, req->nbytes, block_size);
1391 SSI_LOG_DEBUG(" data size not require HW update %x\n",
1393 /* No hardware updates are required */
1396 SSI_LOG_ERR("map_ahash_request_update() failed\n");
1400 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC)
1401 ssi_hash_create_xcbc_setup(req, desc, &idx);
1403 ssi_hash_create_cmac_setup(req, desc, &idx);
1405 ssi_hash_create_data_desc(state, ctx, DIN_AES_DOUT, desc, true, &idx);
1407 /* store the hash digest result in context */
1408 hw_desc_init(&desc[idx]);
1409 set_cipher_mode(&desc[idx], ctx->hw_mode);
1410 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
1411 ctx->inter_digestsize, NS_BIT, 1);
1412 set_queue_last_ind(&desc[idx]);
1413 set_flow_mode(&desc[idx], S_AES_to_DOUT);
1414 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1417 /* Setup DX request structure */
1418 ssi_req.user_cb = (void *)ssi_hash_update_complete;
1419 ssi_req.user_arg = (void *)req;
1421 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
1422 if (unlikely(rc != -EINPROGRESS)) {
1423 SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
1424 ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, true);
1429 static int ssi_mac_final(struct ahash_request *req)
1431 struct ahash_req_ctx *state = ahash_request_ctx(req);
1432 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1433 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1434 struct device *dev = &ctx->drvdata->plat_dev->dev;
1435 struct ssi_crypto_req ssi_req = {};
1436 struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
1439 u32 key_size, key_len;
1440 u32 digestsize = crypto_ahash_digestsize(tfm);
1442 u32 rem_cnt = state->buff_index ? state->buff1_cnt :
1445 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1446 key_size = CC_AES_128_BIT_KEY_SIZE;
1447 key_len = CC_AES_128_BIT_KEY_SIZE;
1449 key_size = (ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE :
1450 ctx->key_params.keylen;
1451 key_len = ctx->key_params.keylen;
1454 SSI_LOG_DEBUG("===== final xcbc reminder (%d) ====\n", rem_cnt);
1456 if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, req->src, req->nbytes, 0) != 0)) {
1457 SSI_LOG_ERR("map_ahash_request_final() failed\n");
1461 if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
1462 SSI_LOG_ERR("map_ahash_digest() failed\n");
1466 /* Setup DX request structure */
1467 ssi_req.user_cb = (void *)ssi_hash_complete;
1468 ssi_req.user_arg = (void *)req;
1470 if (state->xcbc_count && (rem_cnt == 0)) {
1471 /* Load key for ECB decryption */
1472 hw_desc_init(&desc[idx]);
1473 set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
1474 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_DECRYPT);
1475 set_din_type(&desc[idx], DMA_DLLI,
1476 (ctx->opad_tmp_keys_dma_addr +
1477 XCBC_MAC_K1_OFFSET), key_size, NS_BIT);
1478 set_key_size_aes(&desc[idx], key_len);
1479 set_flow_mode(&desc[idx], S_DIN_to_AES);
1480 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1483 /* Initiate decryption of block state to previous block_state-XOR-M[n] */
1484 hw_desc_init(&desc[idx]);
1485 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
1486 CC_AES_BLOCK_SIZE, NS_BIT);
1487 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
1488 CC_AES_BLOCK_SIZE, NS_BIT, 0);
1489 set_flow_mode(&desc[idx], DIN_AES_DOUT);
1492 /* Memory Barrier: wait for axi write to complete */
1493 hw_desc_init(&desc[idx]);
1494 set_din_no_dma(&desc[idx], 0, 0xfffff0);
1495 set_dout_no_dma(&desc[idx], 0, 0, 1);
1499 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC)
1500 ssi_hash_create_xcbc_setup(req, desc, &idx);
1502 ssi_hash_create_cmac_setup(req, desc, &idx);
1504 if (state->xcbc_count == 0) {
1505 hw_desc_init(&desc[idx]);
1506 set_cipher_mode(&desc[idx], ctx->hw_mode);
1507 set_key_size_aes(&desc[idx], key_len);
1508 set_cmac_size0_mode(&desc[idx]);
1509 set_flow_mode(&desc[idx], S_DIN_to_AES);
1511 } else if (rem_cnt > 0) {
1512 ssi_hash_create_data_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1514 hw_desc_init(&desc[idx]);
1515 set_din_const(&desc[idx], 0x00, CC_AES_BLOCK_SIZE);
1516 set_flow_mode(&desc[idx], DIN_AES_DOUT);
1520 /* Get final MAC result */
1521 hw_desc_init(&desc[idx]);
1523 set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
1524 digestsize, NS_BIT, 1);
1525 set_queue_last_ind(&desc[idx]);
1526 set_flow_mode(&desc[idx], S_AES_to_DOUT);
1527 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1528 set_cipher_mode(&desc[idx], ctx->hw_mode);
1531 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
1532 if (unlikely(rc != -EINPROGRESS)) {
1533 SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
1534 ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, true);
1535 ssi_hash_unmap_result(dev, state, digestsize, req->result);
1540 static int ssi_mac_finup(struct ahash_request *req)
1542 struct ahash_req_ctx *state = ahash_request_ctx(req);
1543 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1544 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1545 struct device *dev = &ctx->drvdata->plat_dev->dev;
1546 struct ssi_crypto_req ssi_req = {};
1547 struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
1551 u32 digestsize = crypto_ahash_digestsize(tfm);
1553 SSI_LOG_DEBUG("===== finup xcbc(%d) ====\n", req->nbytes);
1554 if (state->xcbc_count > 0 && req->nbytes == 0) {
1555 SSI_LOG_DEBUG("No data to update. Call to fdx_mac_final\n");
1556 return ssi_mac_final(req);
1559 if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, req->src, req->nbytes, 1) != 0)) {
1560 SSI_LOG_ERR("map_ahash_request_final() failed\n");
1563 if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
1564 SSI_LOG_ERR("map_ahash_digest() failed\n");
1568 /* Setup DX request structure */
1569 ssi_req.user_cb = (void *)ssi_hash_complete;
1570 ssi_req.user_arg = (void *)req;
1572 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1573 key_len = CC_AES_128_BIT_KEY_SIZE;
1574 ssi_hash_create_xcbc_setup(req, desc, &idx);
1576 key_len = ctx->key_params.keylen;
1577 ssi_hash_create_cmac_setup(req, desc, &idx);
1580 if (req->nbytes == 0) {
1581 hw_desc_init(&desc[idx]);
1582 set_cipher_mode(&desc[idx], ctx->hw_mode);
1583 set_key_size_aes(&desc[idx], key_len);
1584 set_cmac_size0_mode(&desc[idx]);
1585 set_flow_mode(&desc[idx], S_DIN_to_AES);
1588 ssi_hash_create_data_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1591 /* Get final MAC result */
1592 hw_desc_init(&desc[idx]);
1594 set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
1595 digestsize, NS_BIT, 1);
1596 set_queue_last_ind(&desc[idx]);
1597 set_flow_mode(&desc[idx], S_AES_to_DOUT);
1598 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1599 set_cipher_mode(&desc[idx], ctx->hw_mode);
1602 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
1603 if (unlikely(rc != -EINPROGRESS)) {
1604 SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
1605 ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, true);
1606 ssi_hash_unmap_result(dev, state, digestsize, req->result);
1611 static int ssi_mac_digest(struct ahash_request *req)
1613 struct ahash_req_ctx *state = ahash_request_ctx(req);
1614 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1615 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1616 struct device *dev = &ctx->drvdata->plat_dev->dev;
1617 u32 digestsize = crypto_ahash_digestsize(tfm);
1618 struct ssi_crypto_req ssi_req = {};
1619 struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
1624 SSI_LOG_DEBUG("===== -digest mac (%d) ====\n", req->nbytes);
1626 if (unlikely(ssi_hash_map_request(dev, state, ctx) != 0)) {
1627 SSI_LOG_ERR("map_ahash_source() failed\n");
1630 if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
1631 SSI_LOG_ERR("map_ahash_digest() failed\n");
1635 if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, req->src, req->nbytes, 1) != 0)) {
1636 SSI_LOG_ERR("map_ahash_request_final() failed\n");
1640 /* Setup DX request structure */
1641 ssi_req.user_cb = (void *)ssi_hash_digest_complete;
1642 ssi_req.user_arg = (void *)req;
1644 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1645 key_len = CC_AES_128_BIT_KEY_SIZE;
1646 ssi_hash_create_xcbc_setup(req, desc, &idx);
1648 key_len = ctx->key_params.keylen;
1649 ssi_hash_create_cmac_setup(req, desc, &idx);
1652 if (req->nbytes == 0) {
1653 hw_desc_init(&desc[idx]);
1654 set_cipher_mode(&desc[idx], ctx->hw_mode);
1655 set_key_size_aes(&desc[idx], key_len);
1656 set_cmac_size0_mode(&desc[idx]);
1657 set_flow_mode(&desc[idx], S_DIN_to_AES);
1660 ssi_hash_create_data_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1663 /* Get final MAC result */
1664 hw_desc_init(&desc[idx]);
1665 set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
1666 CC_AES_BLOCK_SIZE, NS_BIT, 1);
1667 set_queue_last_ind(&desc[idx]);
1668 set_flow_mode(&desc[idx], S_AES_to_DOUT);
1669 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1670 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1671 set_cipher_mode(&desc[idx], ctx->hw_mode);
1674 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
1675 if (unlikely(rc != -EINPROGRESS)) {
1676 SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
1677 ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, true);
1678 ssi_hash_unmap_result(dev, state, digestsize, req->result);
1679 ssi_hash_unmap_request(dev, state, ctx);
1684 //ahash wrap functions
1685 static int ssi_ahash_digest(struct ahash_request *req)
1687 struct ahash_req_ctx *state = ahash_request_ctx(req);
1688 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1689 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1690 u32 digestsize = crypto_ahash_digestsize(tfm);
1692 return ssi_hash_digest(state, ctx, digestsize, req->src, req->nbytes, req->result, (void *)req);
1695 static int ssi_ahash_update(struct ahash_request *req)
1697 struct ahash_req_ctx *state = ahash_request_ctx(req);
1698 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1699 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1700 unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
1702 return ssi_hash_update(state, ctx, block_size, req->src, req->nbytes, (void *)req);
1705 static int ssi_ahash_finup(struct ahash_request *req)
1707 struct ahash_req_ctx *state = ahash_request_ctx(req);
1708 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1709 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1710 u32 digestsize = crypto_ahash_digestsize(tfm);
1712 return ssi_hash_finup(state, ctx, digestsize, req->src, req->nbytes, req->result, (void *)req);
1715 static int ssi_ahash_final(struct ahash_request *req)
1717 struct ahash_req_ctx *state = ahash_request_ctx(req);
1718 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1719 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1720 u32 digestsize = crypto_ahash_digestsize(tfm);
1722 return ssi_hash_final(state, ctx, digestsize, req->src, req->nbytes, req->result, (void *)req);
1725 static int ssi_ahash_init(struct ahash_request *req)
1727 struct ahash_req_ctx *state = ahash_request_ctx(req);
1728 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1729 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1731 SSI_LOG_DEBUG("===== init (%d) ====\n", req->nbytes);
1733 return ssi_hash_init(state, ctx);
1736 static int ssi_ahash_export(struct ahash_request *req, void *out)
1738 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1739 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1740 struct device *dev = &ctx->drvdata->plat_dev->dev;
1741 struct ahash_req_ctx *state = ahash_request_ctx(req);
1742 u8 *curr_buff = state->buff_index ? state->buff1 : state->buff0;
1743 u32 curr_buff_cnt = state->buff_index ? state->buff1_cnt :
1745 const u32 tmp = CC_EXPORT_MAGIC;
1747 memcpy(out, &tmp, sizeof(u32));
1750 dma_sync_single_for_cpu(dev, state->digest_buff_dma_addr,
1751 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
1752 memcpy(out, state->digest_buff, ctx->inter_digestsize);
1753 out += ctx->inter_digestsize;
1755 if (state->digest_bytes_len_dma_addr) {
1756 dma_sync_single_for_cpu(dev, state->digest_bytes_len_dma_addr,
1757 HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
1758 memcpy(out, state->digest_bytes_len, HASH_LEN_SIZE);
1760 /* Poison the unused exported digest len field. */
1761 memset(out, 0x5F, HASH_LEN_SIZE);
1763 out += HASH_LEN_SIZE;
1765 memcpy(out, &curr_buff_cnt, sizeof(u32));
1768 memcpy(out, curr_buff, curr_buff_cnt);
1770 /* No sync for device ineeded since we did not change the data,
1777 static int ssi_ahash_import(struct ahash_request *req, const void *in)
1779 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1780 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1781 struct device *dev = &ctx->drvdata->plat_dev->dev;
1782 struct ahash_req_ctx *state = ahash_request_ctx(req);
1786 memcpy(&tmp, in, sizeof(u32));
1787 if (tmp != CC_EXPORT_MAGIC) {
1793 rc = ssi_hash_init(state, ctx);
1797 dma_sync_single_for_cpu(dev, state->digest_buff_dma_addr,
1798 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
1799 memcpy(state->digest_buff, in, ctx->inter_digestsize);
1800 in += ctx->inter_digestsize;
1802 if (state->digest_bytes_len_dma_addr) {
1803 dma_sync_single_for_cpu(dev, state->digest_bytes_len_dma_addr,
1804 HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
1805 memcpy(state->digest_bytes_len, in, HASH_LEN_SIZE);
1807 in += HASH_LEN_SIZE;
1809 dma_sync_single_for_device(dev, state->digest_buff_dma_addr,
1810 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
1812 if (state->digest_bytes_len_dma_addr)
1813 dma_sync_single_for_device(dev,
1814 state->digest_bytes_len_dma_addr,
1815 HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
1817 state->buff_index = 0;
1819 /* Sanity check the data as much as possible */
1820 memcpy(&tmp, in, sizeof(u32));
1821 if (tmp > SSI_MAX_HASH_BLCK_SIZE) {
1827 state->buff0_cnt = tmp;
1828 memcpy(state->buff0, in, state->buff0_cnt);
1834 static int ssi_ahash_setkey(struct crypto_ahash *ahash,
1835 const u8 *key, unsigned int keylen)
1837 return ssi_hash_setkey((void *)ahash, key, keylen, false);
1840 struct ssi_hash_template {
1841 char name[CRYPTO_MAX_ALG_NAME];
1842 char driver_name[CRYPTO_MAX_ALG_NAME];
1843 char mac_name[CRYPTO_MAX_ALG_NAME];
1844 char mac_driver_name[CRYPTO_MAX_ALG_NAME];
1845 unsigned int blocksize;
1847 struct ahash_alg template_ahash;
1850 int inter_digestsize;
1851 struct ssi_drvdata *drvdata;
1854 #define CC_STATE_SIZE(_x) \
1855 ((_x) + HASH_LEN_SIZE + SSI_MAX_HASH_BLCK_SIZE + (2 * sizeof(u32)))
1857 /* hash descriptors */
1858 static struct ssi_hash_template driver_hash[] = {
1859 //Asynchronize hash template
1862 .driver_name = "sha1-dx",
1863 .mac_name = "hmac(sha1)",
1864 .mac_driver_name = "hmac-sha1-dx",
1865 .blocksize = SHA1_BLOCK_SIZE,
1866 .synchronize = false,
1868 .init = ssi_ahash_init,
1869 .update = ssi_ahash_update,
1870 .final = ssi_ahash_final,
1871 .finup = ssi_ahash_finup,
1872 .digest = ssi_ahash_digest,
1873 .export = ssi_ahash_export,
1874 .import = ssi_ahash_import,
1875 .setkey = ssi_ahash_setkey,
1877 .digestsize = SHA1_DIGEST_SIZE,
1878 .statesize = CC_STATE_SIZE(SHA1_DIGEST_SIZE),
1881 .hash_mode = DRV_HASH_SHA1,
1882 .hw_mode = DRV_HASH_HW_SHA1,
1883 .inter_digestsize = SHA1_DIGEST_SIZE,
1887 .driver_name = "sha256-dx",
1888 .mac_name = "hmac(sha256)",
1889 .mac_driver_name = "hmac-sha256-dx",
1890 .blocksize = SHA256_BLOCK_SIZE,
1892 .init = ssi_ahash_init,
1893 .update = ssi_ahash_update,
1894 .final = ssi_ahash_final,
1895 .finup = ssi_ahash_finup,
1896 .digest = ssi_ahash_digest,
1897 .export = ssi_ahash_export,
1898 .import = ssi_ahash_import,
1899 .setkey = ssi_ahash_setkey,
1901 .digestsize = SHA256_DIGEST_SIZE,
1902 .statesize = CC_STATE_SIZE(SHA256_DIGEST_SIZE)
1905 .hash_mode = DRV_HASH_SHA256,
1906 .hw_mode = DRV_HASH_HW_SHA256,
1907 .inter_digestsize = SHA256_DIGEST_SIZE,
1911 .driver_name = "sha224-dx",
1912 .mac_name = "hmac(sha224)",
1913 .mac_driver_name = "hmac-sha224-dx",
1914 .blocksize = SHA224_BLOCK_SIZE,
1916 .init = ssi_ahash_init,
1917 .update = ssi_ahash_update,
1918 .final = ssi_ahash_final,
1919 .finup = ssi_ahash_finup,
1920 .digest = ssi_ahash_digest,
1921 .export = ssi_ahash_export,
1922 .import = ssi_ahash_import,
1923 .setkey = ssi_ahash_setkey,
1925 .digestsize = SHA224_DIGEST_SIZE,
1926 .statesize = CC_STATE_SIZE(SHA224_DIGEST_SIZE),
1929 .hash_mode = DRV_HASH_SHA224,
1930 .hw_mode = DRV_HASH_HW_SHA256,
1931 .inter_digestsize = SHA256_DIGEST_SIZE,
1933 #if (DX_DEV_SHA_MAX > 256)
1936 .driver_name = "sha384-dx",
1937 .mac_name = "hmac(sha384)",
1938 .mac_driver_name = "hmac-sha384-dx",
1939 .blocksize = SHA384_BLOCK_SIZE,
1941 .init = ssi_ahash_init,
1942 .update = ssi_ahash_update,
1943 .final = ssi_ahash_final,
1944 .finup = ssi_ahash_finup,
1945 .digest = ssi_ahash_digest,
1946 .export = ssi_ahash_export,
1947 .import = ssi_ahash_import,
1948 .setkey = ssi_ahash_setkey,
1950 .digestsize = SHA384_DIGEST_SIZE,
1951 .statesize = CC_STATE_SIZE(SHA384_DIGEST_SIZE),
1954 .hash_mode = DRV_HASH_SHA384,
1955 .hw_mode = DRV_HASH_HW_SHA512,
1956 .inter_digestsize = SHA512_DIGEST_SIZE,
1960 .driver_name = "sha512-dx",
1961 .mac_name = "hmac(sha512)",
1962 .mac_driver_name = "hmac-sha512-dx",
1963 .blocksize = SHA512_BLOCK_SIZE,
1965 .init = ssi_ahash_init,
1966 .update = ssi_ahash_update,
1967 .final = ssi_ahash_final,
1968 .finup = ssi_ahash_finup,
1969 .digest = ssi_ahash_digest,
1970 .export = ssi_ahash_export,
1971 .import = ssi_ahash_import,
1972 .setkey = ssi_ahash_setkey,
1974 .digestsize = SHA512_DIGEST_SIZE,
1975 .statesize = CC_STATE_SIZE(SHA512_DIGEST_SIZE),
1978 .hash_mode = DRV_HASH_SHA512,
1979 .hw_mode = DRV_HASH_HW_SHA512,
1980 .inter_digestsize = SHA512_DIGEST_SIZE,
1985 .driver_name = "md5-dx",
1986 .mac_name = "hmac(md5)",
1987 .mac_driver_name = "hmac-md5-dx",
1988 .blocksize = MD5_HMAC_BLOCK_SIZE,
1990 .init = ssi_ahash_init,
1991 .update = ssi_ahash_update,
1992 .final = ssi_ahash_final,
1993 .finup = ssi_ahash_finup,
1994 .digest = ssi_ahash_digest,
1995 .export = ssi_ahash_export,
1996 .import = ssi_ahash_import,
1997 .setkey = ssi_ahash_setkey,
1999 .digestsize = MD5_DIGEST_SIZE,
2000 .statesize = CC_STATE_SIZE(MD5_DIGEST_SIZE),
2003 .hash_mode = DRV_HASH_MD5,
2004 .hw_mode = DRV_HASH_HW_MD5,
2005 .inter_digestsize = MD5_DIGEST_SIZE,
2008 .mac_name = "xcbc(aes)",
2009 .mac_driver_name = "xcbc-aes-dx",
2010 .blocksize = AES_BLOCK_SIZE,
2012 .init = ssi_ahash_init,
2013 .update = ssi_mac_update,
2014 .final = ssi_mac_final,
2015 .finup = ssi_mac_finup,
2016 .digest = ssi_mac_digest,
2017 .setkey = ssi_xcbc_setkey,
2018 .export = ssi_ahash_export,
2019 .import = ssi_ahash_import,
2021 .digestsize = AES_BLOCK_SIZE,
2022 .statesize = CC_STATE_SIZE(AES_BLOCK_SIZE),
2025 .hash_mode = DRV_HASH_NULL,
2026 .hw_mode = DRV_CIPHER_XCBC_MAC,
2027 .inter_digestsize = AES_BLOCK_SIZE,
2031 .mac_name = "cmac(aes)",
2032 .mac_driver_name = "cmac-aes-dx",
2033 .blocksize = AES_BLOCK_SIZE,
2035 .init = ssi_ahash_init,
2036 .update = ssi_mac_update,
2037 .final = ssi_mac_final,
2038 .finup = ssi_mac_finup,
2039 .digest = ssi_mac_digest,
2040 .setkey = ssi_cmac_setkey,
2041 .export = ssi_ahash_export,
2042 .import = ssi_ahash_import,
2044 .digestsize = AES_BLOCK_SIZE,
2045 .statesize = CC_STATE_SIZE(AES_BLOCK_SIZE),
2048 .hash_mode = DRV_HASH_NULL,
2049 .hw_mode = DRV_CIPHER_CMAC,
2050 .inter_digestsize = AES_BLOCK_SIZE,
2056 static struct ssi_hash_alg *
2057 ssi_hash_create_alg(struct ssi_hash_template *template, bool keyed)
2059 struct ssi_hash_alg *t_crypto_alg;
2060 struct crypto_alg *alg;
2061 struct ahash_alg *halg;
2063 t_crypto_alg = kzalloc(sizeof(*t_crypto_alg), GFP_KERNEL);
2064 if (!t_crypto_alg) {
2065 SSI_LOG_ERR("failed to allocate t_crypto_alg\n");
2066 return ERR_PTR(-ENOMEM);
2069 t_crypto_alg->ahash_alg = template->template_ahash;
2070 halg = &t_crypto_alg->ahash_alg;
2071 alg = &halg->halg.base;
2074 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
2075 template->mac_name);
2076 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
2077 template->mac_driver_name);
2079 halg->setkey = NULL;
2080 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
2082 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
2083 template->driver_name);
2085 alg->cra_module = THIS_MODULE;
2086 alg->cra_ctxsize = sizeof(struct ssi_hash_ctx);
2087 alg->cra_priority = SSI_CRA_PRIO;
2088 alg->cra_blocksize = template->blocksize;
2089 alg->cra_alignmask = 0;
2090 alg->cra_exit = ssi_hash_cra_exit;
2092 alg->cra_init = ssi_ahash_cra_init;
2093 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH |
2094 CRYPTO_ALG_KERN_DRIVER_ONLY;
2095 alg->cra_type = &crypto_ahash_type;
2097 t_crypto_alg->hash_mode = template->hash_mode;
2098 t_crypto_alg->hw_mode = template->hw_mode;
2099 t_crypto_alg->inter_digestsize = template->inter_digestsize;
2101 return t_crypto_alg;
2104 int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata)
2106 struct ssi_hash_handle *hash_handle = drvdata->hash_handle;
2107 ssi_sram_addr_t sram_buff_ofs = hash_handle->digest_len_sram_addr;
2108 unsigned int larval_seq_len = 0;
2109 struct cc_hw_desc larval_seq[CC_DIGEST_SIZE_MAX / sizeof(u32)];
2111 #if (DX_DEV_SHA_MAX > 256)
2115 /* Copy-to-sram digest-len */
2116 ssi_sram_mgr_const2sram_desc(digest_len_init, sram_buff_ofs,
2117 ARRAY_SIZE(digest_len_init),
2118 larval_seq, &larval_seq_len);
2119 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2120 if (unlikely(rc != 0))
2121 goto init_digest_const_err;
2123 sram_buff_ofs += sizeof(digest_len_init);
2126 #if (DX_DEV_SHA_MAX > 256)
2127 /* Copy-to-sram digest-len for sha384/512 */
2128 ssi_sram_mgr_const2sram_desc(digest_len_sha512_init, sram_buff_ofs,
2129 ARRAY_SIZE(digest_len_sha512_init),
2130 larval_seq, &larval_seq_len);
2131 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2132 if (unlikely(rc != 0))
2133 goto init_digest_const_err;
2135 sram_buff_ofs += sizeof(digest_len_sha512_init);
2139 /* The initial digests offset */
2140 hash_handle->larval_digest_sram_addr = sram_buff_ofs;
2142 /* Copy-to-sram initial SHA* digests */
2143 ssi_sram_mgr_const2sram_desc(md5_init, sram_buff_ofs,
2144 ARRAY_SIZE(md5_init), larval_seq,
2146 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2147 if (unlikely(rc != 0))
2148 goto init_digest_const_err;
2149 sram_buff_ofs += sizeof(md5_init);
2152 ssi_sram_mgr_const2sram_desc(sha1_init, sram_buff_ofs,
2153 ARRAY_SIZE(sha1_init), larval_seq,
2155 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2156 if (unlikely(rc != 0))
2157 goto init_digest_const_err;
2158 sram_buff_ofs += sizeof(sha1_init);
2161 ssi_sram_mgr_const2sram_desc(sha224_init, sram_buff_ofs,
2162 ARRAY_SIZE(sha224_init), larval_seq,
2164 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2165 if (unlikely(rc != 0))
2166 goto init_digest_const_err;
2167 sram_buff_ofs += sizeof(sha224_init);
2170 ssi_sram_mgr_const2sram_desc(sha256_init, sram_buff_ofs,
2171 ARRAY_SIZE(sha256_init), larval_seq,
2173 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2174 if (unlikely(rc != 0))
2175 goto init_digest_const_err;
2176 sram_buff_ofs += sizeof(sha256_init);
2179 #if (DX_DEV_SHA_MAX > 256)
2180 /* We are forced to swap each double-word larval before copying to sram */
2181 for (i = 0; i < ARRAY_SIZE(sha384_init); i++) {
2182 const u32 const0 = ((u32 *)((u64 *)&sha384_init[i]))[1];
2183 const u32 const1 = ((u32 *)((u64 *)&sha384_init[i]))[0];
2185 ssi_sram_mgr_const2sram_desc(&const0, sram_buff_ofs, 1,
2186 larval_seq, &larval_seq_len);
2187 sram_buff_ofs += sizeof(u32);
2188 ssi_sram_mgr_const2sram_desc(&const1, sram_buff_ofs, 1,
2189 larval_seq, &larval_seq_len);
2190 sram_buff_ofs += sizeof(u32);
2192 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2193 if (unlikely(rc != 0)) {
2194 SSI_LOG_ERR("send_request() failed (rc = %d)\n", rc);
2195 goto init_digest_const_err;
2199 for (i = 0; i < ARRAY_SIZE(sha512_init); i++) {
2200 const u32 const0 = ((u32 *)((u64 *)&sha512_init[i]))[1];
2201 const u32 const1 = ((u32 *)((u64 *)&sha512_init[i]))[0];
2203 ssi_sram_mgr_const2sram_desc(&const0, sram_buff_ofs, 1,
2204 larval_seq, &larval_seq_len);
2205 sram_buff_ofs += sizeof(u32);
2206 ssi_sram_mgr_const2sram_desc(&const1, sram_buff_ofs, 1,
2207 larval_seq, &larval_seq_len);
2208 sram_buff_ofs += sizeof(u32);
2210 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2211 if (unlikely(rc != 0)) {
2212 SSI_LOG_ERR("send_request() failed (rc = %d)\n", rc);
2213 goto init_digest_const_err;
2217 init_digest_const_err:
2221 int ssi_hash_alloc(struct ssi_drvdata *drvdata)
2223 struct ssi_hash_handle *hash_handle;
2224 ssi_sram_addr_t sram_buff;
2225 u32 sram_size_to_alloc;
2229 hash_handle = kzalloc(sizeof(*hash_handle), GFP_KERNEL);
2231 SSI_LOG_ERR("kzalloc failed to allocate %zu B\n",
2232 sizeof(*hash_handle));
2237 drvdata->hash_handle = hash_handle;
2239 sram_size_to_alloc = sizeof(digest_len_init) +
2240 #if (DX_DEV_SHA_MAX > 256)
2241 sizeof(digest_len_sha512_init) +
2242 sizeof(sha384_init) +
2243 sizeof(sha512_init) +
2247 sizeof(sha224_init) +
2248 sizeof(sha256_init);
2250 sram_buff = ssi_sram_mgr_alloc(drvdata, sram_size_to_alloc);
2251 if (sram_buff == NULL_SRAM_ADDR) {
2252 SSI_LOG_ERR("SRAM pool exhausted\n");
2257 /* The initial digest-len offset */
2258 hash_handle->digest_len_sram_addr = sram_buff;
2260 /*must be set before the alg registration as it is being used there*/
2261 rc = ssi_hash_init_sram_digest_consts(drvdata);
2262 if (unlikely(rc != 0)) {
2263 SSI_LOG_ERR("Init digest CONST failed (rc=%d)\n", rc);
2267 INIT_LIST_HEAD(&hash_handle->hash_list);
2269 /* ahash registration */
2270 for (alg = 0; alg < ARRAY_SIZE(driver_hash); alg++) {
2271 struct ssi_hash_alg *t_alg;
2272 int hw_mode = driver_hash[alg].hw_mode;
2274 /* register hmac version */
2275 t_alg = ssi_hash_create_alg(&driver_hash[alg], true);
2276 if (IS_ERR(t_alg)) {
2277 rc = PTR_ERR(t_alg);
2278 SSI_LOG_ERR("%s alg allocation failed\n",
2279 driver_hash[alg].driver_name);
2282 t_alg->drvdata = drvdata;
2284 rc = crypto_register_ahash(&t_alg->ahash_alg);
2286 SSI_LOG_ERR("%s alg registration failed\n",
2287 driver_hash[alg].driver_name);
2291 list_add_tail(&t_alg->entry,
2292 &hash_handle->hash_list);
2295 if ((hw_mode == DRV_CIPHER_XCBC_MAC) ||
2296 (hw_mode == DRV_CIPHER_CMAC))
2299 /* register hash version */
2300 t_alg = ssi_hash_create_alg(&driver_hash[alg], false);
2301 if (IS_ERR(t_alg)) {
2302 rc = PTR_ERR(t_alg);
2303 SSI_LOG_ERR("%s alg allocation failed\n",
2304 driver_hash[alg].driver_name);
2307 t_alg->drvdata = drvdata;
2309 rc = crypto_register_ahash(&t_alg->ahash_alg);
2311 SSI_LOG_ERR("%s alg registration failed\n",
2312 driver_hash[alg].driver_name);
2316 list_add_tail(&t_alg->entry, &hash_handle->hash_list);
2323 kfree(drvdata->hash_handle);
2324 drvdata->hash_handle = NULL;
2328 int ssi_hash_free(struct ssi_drvdata *drvdata)
2330 struct ssi_hash_alg *t_hash_alg, *hash_n;
2331 struct ssi_hash_handle *hash_handle = drvdata->hash_handle;
2334 list_for_each_entry_safe(t_hash_alg, hash_n, &hash_handle->hash_list, entry) {
2335 crypto_unregister_ahash(&t_hash_alg->ahash_alg);
2336 list_del(&t_hash_alg->entry);
2341 drvdata->hash_handle = NULL;
2346 static void ssi_hash_create_xcbc_setup(struct ahash_request *areq,
2347 struct cc_hw_desc desc[],
2348 unsigned int *seq_size)
2350 unsigned int idx = *seq_size;
2351 struct ahash_req_ctx *state = ahash_request_ctx(areq);
2352 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2353 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
2355 /* Setup XCBC MAC K1 */
2356 hw_desc_init(&desc[idx]);
2357 set_din_type(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr +
2358 XCBC_MAC_K1_OFFSET),
2359 CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2360 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
2361 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2362 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2363 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2364 set_flow_mode(&desc[idx], S_DIN_to_AES);
2367 /* Setup XCBC MAC K2 */
2368 hw_desc_init(&desc[idx]);
2369 set_din_type(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr +
2370 XCBC_MAC_K2_OFFSET),
2371 CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2372 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
2373 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2374 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2375 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2376 set_flow_mode(&desc[idx], S_DIN_to_AES);
2379 /* Setup XCBC MAC K3 */
2380 hw_desc_init(&desc[idx]);
2381 set_din_type(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr +
2382 XCBC_MAC_K3_OFFSET),
2383 CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2384 set_setup_mode(&desc[idx], SETUP_LOAD_STATE2);
2385 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2386 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2387 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2388 set_flow_mode(&desc[idx], S_DIN_to_AES);
2391 /* Loading MAC state */
2392 hw_desc_init(&desc[idx]);
2393 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
2394 CC_AES_BLOCK_SIZE, NS_BIT);
2395 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
2396 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2397 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2398 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2399 set_flow_mode(&desc[idx], S_DIN_to_AES);
2404 static void ssi_hash_create_cmac_setup(struct ahash_request *areq,
2405 struct cc_hw_desc desc[],
2406 unsigned int *seq_size)
2408 unsigned int idx = *seq_size;
2409 struct ahash_req_ctx *state = ahash_request_ctx(areq);
2410 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2411 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
2413 /* Setup CMAC Key */
2414 hw_desc_init(&desc[idx]);
2415 set_din_type(&desc[idx], DMA_DLLI, ctx->opad_tmp_keys_dma_addr,
2416 ((ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE :
2417 ctx->key_params.keylen), NS_BIT);
2418 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
2419 set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC);
2420 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2421 set_key_size_aes(&desc[idx], ctx->key_params.keylen);
2422 set_flow_mode(&desc[idx], S_DIN_to_AES);
2425 /* Load MAC state */
2426 hw_desc_init(&desc[idx]);
2427 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
2428 CC_AES_BLOCK_SIZE, NS_BIT);
2429 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
2430 set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC);
2431 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2432 set_key_size_aes(&desc[idx], ctx->key_params.keylen);
2433 set_flow_mode(&desc[idx], S_DIN_to_AES);
2438 static void ssi_hash_create_data_desc(struct ahash_req_ctx *areq_ctx,
2439 struct ssi_hash_ctx *ctx,
2440 unsigned int flow_mode,
2441 struct cc_hw_desc desc[],
2442 bool is_not_last_data,
2443 unsigned int *seq_size)
2445 unsigned int idx = *seq_size;
2447 if (likely(areq_ctx->data_dma_buf_type == SSI_DMA_BUF_DLLI)) {
2448 hw_desc_init(&desc[idx]);
2449 set_din_type(&desc[idx], DMA_DLLI,
2450 sg_dma_address(areq_ctx->curr_sg),
2451 areq_ctx->curr_sg->length, NS_BIT);
2452 set_flow_mode(&desc[idx], flow_mode);
2455 if (areq_ctx->data_dma_buf_type == SSI_DMA_BUF_NULL) {
2456 SSI_LOG_DEBUG(" NULL mode\n");
2457 /* nothing to build */
2461 hw_desc_init(&desc[idx]);
2462 set_din_type(&desc[idx], DMA_DLLI,
2463 areq_ctx->mlli_params.mlli_dma_addr,
2464 areq_ctx->mlli_params.mlli_len, NS_BIT);
2465 set_dout_sram(&desc[idx], ctx->drvdata->mlli_sram_addr,
2466 areq_ctx->mlli_params.mlli_len);
2467 set_flow_mode(&desc[idx], BYPASS);
2470 hw_desc_init(&desc[idx]);
2471 set_din_type(&desc[idx], DMA_MLLI,
2472 ctx->drvdata->mlli_sram_addr,
2473 areq_ctx->mlli_nents, NS_BIT);
2474 set_flow_mode(&desc[idx], flow_mode);
2477 if (is_not_last_data)
2478 set_din_not_last_indication(&desc[(idx - 1)]);
2479 /* return updated desc sequence size */
2484 * Gets the address of the initial digest in SRAM
2485 * according to the given hash mode
2488 * \param mode The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256
2490 * \return u32 The address of the inital digest in SRAM
2492 ssi_sram_addr_t ssi_ahash_get_larval_digest_sram_addr(void *drvdata, u32 mode)
2494 struct ssi_drvdata *_drvdata = (struct ssi_drvdata *)drvdata;
2495 struct ssi_hash_handle *hash_handle = _drvdata->hash_handle;
2501 return (hash_handle->larval_digest_sram_addr);
2503 return (hash_handle->larval_digest_sram_addr +
2505 case DRV_HASH_SHA224:
2506 return (hash_handle->larval_digest_sram_addr +
2509 case DRV_HASH_SHA256:
2510 return (hash_handle->larval_digest_sram_addr +
2513 sizeof(sha224_init));
2514 #if (DX_DEV_SHA_MAX > 256)
2515 case DRV_HASH_SHA384:
2516 return (hash_handle->larval_digest_sram_addr +
2519 sizeof(sha224_init) +
2520 sizeof(sha256_init));
2521 case DRV_HASH_SHA512:
2522 return (hash_handle->larval_digest_sram_addr +
2525 sizeof(sha224_init) +
2526 sizeof(sha256_init) +
2527 sizeof(sha384_init));
2530 SSI_LOG_ERR("Invalid hash mode (%d)\n", mode);
2533 /*This is valid wrong value to avoid kernel crash*/
2534 return hash_handle->larval_digest_sram_addr;
2538 ssi_ahash_get_initial_digest_len_sram_addr(void *drvdata, u32 mode)
2540 struct ssi_drvdata *_drvdata = (struct ssi_drvdata *)drvdata;
2541 struct ssi_hash_handle *hash_handle = _drvdata->hash_handle;
2542 ssi_sram_addr_t digest_len_addr = hash_handle->digest_len_sram_addr;
2546 case DRV_HASH_SHA224:
2547 case DRV_HASH_SHA256:
2549 return digest_len_addr;
2550 #if (DX_DEV_SHA_MAX > 256)
2551 case DRV_HASH_SHA384:
2552 case DRV_HASH_SHA512:
2553 return digest_len_addr + sizeof(digest_len_init);
2556 return digest_len_addr; /*to avoid kernel crash*/