2 * Copyright (C) 2012-2017 ARM Limited or its affiliates.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, see <http://www.gnu.org/licenses/>.
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/platform_device.h>
20 #include <crypto/algapi.h>
21 #include <crypto/internal/skcipher.h>
22 #include <crypto/internal/hash.h>
23 #include <crypto/internal/aead.h>
24 #include <crypto/sha.h>
25 #include <crypto/ctr.h>
26 #include <crypto/authenc.h>
27 #include <crypto/aes.h>
28 #include <crypto/des.h>
29 #include <linux/rtnetlink.h>
30 #include <linux/version.h>
31 #include "ssi_config.h"
32 #include "ssi_driver.h"
33 #include "ssi_buffer_mgr.h"
35 #include "ssi_request_mgr.h"
37 #include "ssi_sysfs.h"
38 #include "ssi_sram_mgr.h"
40 #define template_aead template_u.aead
42 #define MAX_AEAD_SETKEY_SEQ 12
43 #define MAX_AEAD_PROCESS_SEQ 23
45 #define MAX_HMAC_DIGEST_SIZE (SHA256_DIGEST_SIZE)
46 #define MAX_HMAC_BLOCK_SIZE (SHA256_BLOCK_SIZE)
48 #define AES_CCM_RFC4309_NONCE_SIZE 3
49 #define MAX_NONCE_SIZE CTR_RFC3686_NONCE_SIZE
51 /* Value of each ICV_CMP byte (of 8) in case of success */
52 #define ICV_VERIF_OK 0x01
54 struct ssi_aead_handle {
55 ssi_sram_addr_t sram_workspace_addr;
56 struct list_head aead_list;
61 u8 *ipad_opad; /* IPAD, OPAD*/
62 dma_addr_t padded_authkey_dma_addr;
63 dma_addr_t ipad_opad_dma_addr;
67 u8 *xcbc_keys; /* K1,K2,K3 */
68 dma_addr_t xcbc_keys_dma_addr;
72 struct ssi_drvdata *drvdata;
73 u8 ctr_nonce[MAX_NONCE_SIZE]; /* used for ctr3686 iv and aes ccm */
75 dma_addr_t enckey_dma_addr;
77 struct cc_hmac_s hmac;
78 struct cc_xcbc_s xcbc;
80 unsigned int enc_keylen;
81 unsigned int auth_keylen;
82 unsigned int authsize; /* Actual (reduced?) size of the MAC/ICv */
83 enum drv_cipher_mode cipher_mode;
84 enum cc_flow_mode flow_mode;
85 enum drv_hash_mode auth_mode;
88 static inline bool valid_assoclen(struct aead_request *req)
90 return ((req->assoclen == 16) || (req->assoclen == 20));
93 static void ssi_aead_exit(struct crypto_aead *tfm)
95 struct device *dev = NULL;
96 struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
98 SSI_LOG_DEBUG("Clearing context @%p for %s\n",
99 crypto_aead_ctx(tfm), crypto_tfm_alg_name(&tfm->base));
101 dev = &ctx->drvdata->plat_dev->dev;
102 /* Unmap enckey buffer */
104 dma_free_coherent(dev, AES_MAX_KEY_SIZE, ctx->enckey, ctx->enckey_dma_addr);
105 SSI_LOG_DEBUG("Freed enckey DMA buffer enckey_dma_addr=%pad\n",
106 ctx->enckey_dma_addr);
107 ctx->enckey_dma_addr = 0;
111 if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { /* XCBC authetication */
112 struct cc_xcbc_s *xcbc = &ctx->auth_state.xcbc;
114 if (xcbc->xcbc_keys) {
115 dma_free_coherent(dev, CC_AES_128_BIT_KEY_SIZE * 3,
117 xcbc->xcbc_keys_dma_addr);
119 SSI_LOG_DEBUG("Freed xcbc_keys DMA buffer xcbc_keys_dma_addr=%pad\n",
120 xcbc->xcbc_keys_dma_addr);
121 xcbc->xcbc_keys_dma_addr = 0;
122 xcbc->xcbc_keys = NULL;
123 } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC auth. */
124 struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
126 if (hmac->ipad_opad) {
127 dma_free_coherent(dev, 2 * MAX_HMAC_DIGEST_SIZE,
129 hmac->ipad_opad_dma_addr);
130 SSI_LOG_DEBUG("Freed ipad_opad DMA buffer ipad_opad_dma_addr=%pad\n",
131 hmac->ipad_opad_dma_addr);
132 hmac->ipad_opad_dma_addr = 0;
133 hmac->ipad_opad = NULL;
135 if (hmac->padded_authkey) {
136 dma_free_coherent(dev, MAX_HMAC_BLOCK_SIZE,
137 hmac->padded_authkey,
138 hmac->padded_authkey_dma_addr);
139 SSI_LOG_DEBUG("Freed padded_authkey DMA buffer padded_authkey_dma_addr=%pad\n",
140 hmac->padded_authkey_dma_addr);
141 hmac->padded_authkey_dma_addr = 0;
142 hmac->padded_authkey = NULL;
147 static int ssi_aead_init(struct crypto_aead *tfm)
150 struct aead_alg *alg = crypto_aead_alg(tfm);
151 struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
152 struct ssi_crypto_alg *ssi_alg =
153 container_of(alg, struct ssi_crypto_alg, aead_alg);
154 SSI_LOG_DEBUG("Initializing context @%p for %s\n", ctx, crypto_tfm_alg_name(&tfm->base));
156 /* Initialize modes in instance */
157 ctx->cipher_mode = ssi_alg->cipher_mode;
158 ctx->flow_mode = ssi_alg->flow_mode;
159 ctx->auth_mode = ssi_alg->auth_mode;
160 ctx->drvdata = ssi_alg->drvdata;
161 dev = &ctx->drvdata->plat_dev->dev;
162 crypto_aead_set_reqsize(tfm, sizeof(struct aead_req_ctx));
164 /* Allocate key buffer, cache line aligned */
165 ctx->enckey = dma_alloc_coherent(dev, AES_MAX_KEY_SIZE,
166 &ctx->enckey_dma_addr, GFP_KERNEL);
168 SSI_LOG_ERR("Failed allocating key buffer\n");
171 SSI_LOG_DEBUG("Allocated enckey buffer in context ctx->enckey=@%p\n", ctx->enckey);
173 /* Set default authlen value */
175 if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { /* XCBC authetication */
176 struct cc_xcbc_s *xcbc = &ctx->auth_state.xcbc;
177 const unsigned int key_size = CC_AES_128_BIT_KEY_SIZE * 3;
179 /* Allocate dma-coherent buffer for XCBC's K1+K2+K3 */
180 /* (and temporary for user key - up to 256b) */
181 xcbc->xcbc_keys = dma_alloc_coherent(dev, key_size,
182 &xcbc->xcbc_keys_dma_addr,
184 if (!xcbc->xcbc_keys) {
185 SSI_LOG_ERR("Failed allocating buffer for XCBC keys\n");
188 } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC authentication */
189 struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
190 const unsigned int digest_size = 2 * MAX_HMAC_DIGEST_SIZE;
191 dma_addr_t *pkey_dma = &hmac->padded_authkey_dma_addr;
193 /* Allocate dma-coherent buffer for IPAD + OPAD */
194 hmac->ipad_opad = dma_alloc_coherent(dev, digest_size,
195 &hmac->ipad_opad_dma_addr,
198 if (!hmac->ipad_opad) {
199 SSI_LOG_ERR("Failed allocating IPAD/OPAD buffer\n");
203 SSI_LOG_DEBUG("Allocated authkey buffer in context ctx->authkey=@%p\n",
206 hmac->padded_authkey = dma_alloc_coherent(dev,
211 if (!hmac->padded_authkey) {
212 SSI_LOG_ERR("failed to allocate padded_authkey\n");
216 ctx->auth_state.hmac.ipad_opad = NULL;
217 ctx->auth_state.hmac.padded_authkey = NULL;
227 static void ssi_aead_complete(struct device *dev, void *ssi_req, void __iomem *cc_base)
229 struct aead_request *areq = (struct aead_request *)ssi_req;
230 struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
231 struct crypto_aead *tfm = crypto_aead_reqtfm(ssi_req);
232 struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
235 ssi_buffer_mgr_unmap_aead_request(dev, areq);
237 /* Restore ordinary iv pointer */
238 areq->iv = areq_ctx->backup_iv;
240 if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
241 if (memcmp(areq_ctx->mac_buf, areq_ctx->icv_virt_addr,
242 ctx->authsize) != 0) {
243 SSI_LOG_DEBUG("Payload authentication failure, "
244 "(auth-size=%d, cipher=%d).\n",
245 ctx->authsize, ctx->cipher_mode);
246 /* In case of payload authentication failure, MUST NOT
247 * revealed the decrypted message --> zero its memory.
249 ssi_buffer_mgr_zero_sgl(areq->dst, areq_ctx->cryptlen);
253 if (unlikely(areq_ctx->is_icv_fragmented))
254 ssi_buffer_mgr_copy_scatterlist_portion(
255 areq_ctx->mac_buf, areq_ctx->dst_sgl, areq->cryptlen + areq_ctx->dst_offset,
256 areq->cryptlen + areq_ctx->dst_offset + ctx->authsize, SSI_SG_FROM_BUF);
258 /* If an IV was generated, copy it back to the user provided buffer. */
259 if (areq_ctx->backup_giv) {
260 if (ctx->cipher_mode == DRV_CIPHER_CTR)
261 memcpy(areq_ctx->backup_giv, areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_IV_SIZE);
262 else if (ctx->cipher_mode == DRV_CIPHER_CCM)
263 memcpy(areq_ctx->backup_giv, areq_ctx->ctr_iv + CCM_BLOCK_IV_OFFSET, CCM_BLOCK_IV_SIZE);
267 aead_request_complete(areq, err);
270 static int xcbc_setkey(struct cc_hw_desc *desc, struct ssi_aead_ctx *ctx)
272 /* Load the AES key */
273 hw_desc_init(&desc[0]);
274 /* We are using for the source/user key the same buffer as for the output keys,
275 * because after this key loading it is not needed anymore
277 set_din_type(&desc[0], DMA_DLLI,
278 ctx->auth_state.xcbc.xcbc_keys_dma_addr, ctx->auth_keylen,
280 set_cipher_mode(&desc[0], DRV_CIPHER_ECB);
281 set_cipher_config0(&desc[0], DRV_CRYPTO_DIRECTION_ENCRYPT);
282 set_key_size_aes(&desc[0], ctx->auth_keylen);
283 set_flow_mode(&desc[0], S_DIN_to_AES);
284 set_setup_mode(&desc[0], SETUP_LOAD_KEY0);
286 hw_desc_init(&desc[1]);
287 set_din_const(&desc[1], 0x01010101, CC_AES_128_BIT_KEY_SIZE);
288 set_flow_mode(&desc[1], DIN_AES_DOUT);
289 set_dout_dlli(&desc[1], ctx->auth_state.xcbc.xcbc_keys_dma_addr,
290 AES_KEYSIZE_128, NS_BIT, 0);
292 hw_desc_init(&desc[2]);
293 set_din_const(&desc[2], 0x02020202, CC_AES_128_BIT_KEY_SIZE);
294 set_flow_mode(&desc[2], DIN_AES_DOUT);
295 set_dout_dlli(&desc[2], (ctx->auth_state.xcbc.xcbc_keys_dma_addr
297 AES_KEYSIZE_128, NS_BIT, 0);
299 hw_desc_init(&desc[3]);
300 set_din_const(&desc[3], 0x03030303, CC_AES_128_BIT_KEY_SIZE);
301 set_flow_mode(&desc[3], DIN_AES_DOUT);
302 set_dout_dlli(&desc[3], (ctx->auth_state.xcbc.xcbc_keys_dma_addr
303 + 2 * AES_KEYSIZE_128),
304 AES_KEYSIZE_128, NS_BIT, 0);
309 static int hmac_setkey(struct cc_hw_desc *desc, struct ssi_aead_ctx *ctx)
311 unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
312 unsigned int digest_ofs = 0;
313 unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
314 DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
315 unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
316 CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
317 struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
322 /* calc derived HMAC key */
323 for (i = 0; i < 2; i++) {
324 /* Load hash initial state */
325 hw_desc_init(&desc[idx]);
326 set_cipher_mode(&desc[idx], hash_mode);
327 set_din_sram(&desc[idx],
328 ssi_ahash_get_larval_digest_sram_addr(
329 ctx->drvdata, ctx->auth_mode),
331 set_flow_mode(&desc[idx], S_DIN_to_HASH);
332 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
335 /* Load the hash current length*/
336 hw_desc_init(&desc[idx]);
337 set_cipher_mode(&desc[idx], hash_mode);
338 set_din_const(&desc[idx], 0, HASH_LEN_SIZE);
339 set_flow_mode(&desc[idx], S_DIN_to_HASH);
340 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
343 /* Prepare ipad key */
344 hw_desc_init(&desc[idx]);
345 set_xor_val(&desc[idx], hmac_pad_const[i]);
346 set_cipher_mode(&desc[idx], hash_mode);
347 set_flow_mode(&desc[idx], S_DIN_to_HASH);
348 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
351 /* Perform HASH update */
352 hw_desc_init(&desc[idx]);
353 set_din_type(&desc[idx], DMA_DLLI,
354 hmac->padded_authkey_dma_addr,
355 SHA256_BLOCK_SIZE, NS_BIT);
356 set_cipher_mode(&desc[idx], hash_mode);
357 set_xor_active(&desc[idx]);
358 set_flow_mode(&desc[idx], DIN_HASH);
362 hw_desc_init(&desc[idx]);
363 set_cipher_mode(&desc[idx], hash_mode);
364 set_dout_dlli(&desc[idx],
365 (hmac->ipad_opad_dma_addr + digest_ofs),
366 digest_size, NS_BIT, 0);
367 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
368 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
369 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
372 digest_ofs += digest_size;
378 static int validate_keys_sizes(struct ssi_aead_ctx *ctx)
380 SSI_LOG_DEBUG("enc_keylen=%u authkeylen=%u\n",
381 ctx->enc_keylen, ctx->auth_keylen);
383 switch (ctx->auth_mode) {
385 case DRV_HASH_SHA256:
387 case DRV_HASH_XCBC_MAC:
388 if ((ctx->auth_keylen != AES_KEYSIZE_128) &&
389 (ctx->auth_keylen != AES_KEYSIZE_192) &&
390 (ctx->auth_keylen != AES_KEYSIZE_256))
393 case DRV_HASH_NULL: /* Not authenc (e.g., CCM) - no auth_key) */
394 if (ctx->auth_keylen > 0)
398 SSI_LOG_ERR("Invalid auth_mode=%d\n", ctx->auth_mode);
401 /* Check cipher key size */
402 if (unlikely(ctx->flow_mode == S_DIN_to_DES)) {
403 if (ctx->enc_keylen != DES3_EDE_KEY_SIZE) {
404 SSI_LOG_ERR("Invalid cipher(3DES) key size: %u\n",
408 } else { /* Default assumed to be AES ciphers */
409 if ((ctx->enc_keylen != AES_KEYSIZE_128) &&
410 (ctx->enc_keylen != AES_KEYSIZE_192) &&
411 (ctx->enc_keylen != AES_KEYSIZE_256)) {
412 SSI_LOG_ERR("Invalid cipher(AES) key size: %u\n",
418 return 0; /* All tests of keys sizes passed */
421 /* This function prepers the user key so it can pass to the hmac processing
422 * (copy to intenral buffer or hash in case of key longer than block
425 ssi_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
427 dma_addr_t key_dma_addr = 0;
428 struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
429 struct device *dev = &ctx->drvdata->plat_dev->dev;
430 u32 larval_addr = ssi_ahash_get_larval_digest_sram_addr(
431 ctx->drvdata, ctx->auth_mode);
432 struct ssi_crypto_req ssi_req = {};
433 unsigned int blocksize;
434 unsigned int digestsize;
435 unsigned int hashmode;
436 unsigned int idx = 0;
438 struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
439 dma_addr_t padded_authkey_dma_addr =
440 ctx->auth_state.hmac.padded_authkey_dma_addr;
442 switch (ctx->auth_mode) { /* auth_key required and >0 */
444 blocksize = SHA1_BLOCK_SIZE;
445 digestsize = SHA1_DIGEST_SIZE;
446 hashmode = DRV_HASH_HW_SHA1;
448 case DRV_HASH_SHA256:
450 blocksize = SHA256_BLOCK_SIZE;
451 digestsize = SHA256_DIGEST_SIZE;
452 hashmode = DRV_HASH_HW_SHA256;
455 if (likely(keylen != 0)) {
456 key_dma_addr = dma_map_single(dev, (void *)key, keylen, DMA_TO_DEVICE);
457 if (unlikely(dma_mapping_error(dev, key_dma_addr))) {
458 SSI_LOG_ERR("Mapping key va=0x%p len=%u for"
459 " DMA failed\n", key, keylen);
462 if (keylen > blocksize) {
463 /* Load hash initial state */
464 hw_desc_init(&desc[idx]);
465 set_cipher_mode(&desc[idx], hashmode);
466 set_din_sram(&desc[idx], larval_addr, digestsize);
467 set_flow_mode(&desc[idx], S_DIN_to_HASH);
468 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
471 /* Load the hash current length*/
472 hw_desc_init(&desc[idx]);
473 set_cipher_mode(&desc[idx], hashmode);
474 set_din_const(&desc[idx], 0, HASH_LEN_SIZE);
475 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
476 set_flow_mode(&desc[idx], S_DIN_to_HASH);
477 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
480 hw_desc_init(&desc[idx]);
481 set_din_type(&desc[idx], DMA_DLLI,
482 key_dma_addr, keylen, NS_BIT);
483 set_flow_mode(&desc[idx], DIN_HASH);
487 hw_desc_init(&desc[idx]);
488 set_cipher_mode(&desc[idx], hashmode);
489 set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
490 digestsize, NS_BIT, 0);
491 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
492 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
493 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
494 set_cipher_config0(&desc[idx],
495 HASH_DIGEST_RESULT_LITTLE_ENDIAN);
498 hw_desc_init(&desc[idx]);
499 set_din_const(&desc[idx], 0, (blocksize - digestsize));
500 set_flow_mode(&desc[idx], BYPASS);
501 set_dout_dlli(&desc[idx], (padded_authkey_dma_addr +
502 digestsize), (blocksize - digestsize),
506 hw_desc_init(&desc[idx]);
507 set_din_type(&desc[idx], DMA_DLLI, key_dma_addr,
509 set_flow_mode(&desc[idx], BYPASS);
510 set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
514 if ((blocksize - keylen) != 0) {
515 hw_desc_init(&desc[idx]);
516 set_din_const(&desc[idx], 0,
517 (blocksize - keylen));
518 set_flow_mode(&desc[idx], BYPASS);
519 set_dout_dlli(&desc[idx],
520 (padded_authkey_dma_addr +
522 (blocksize - keylen), NS_BIT, 0);
527 hw_desc_init(&desc[idx]);
528 set_din_const(&desc[idx], 0, (blocksize - keylen));
529 set_flow_mode(&desc[idx], BYPASS);
530 set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
531 blocksize, NS_BIT, 0);
535 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
536 if (unlikely(rc != 0))
537 SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
539 if (likely(key_dma_addr != 0))
540 dma_unmap_single(dev, key_dma_addr, keylen, DMA_TO_DEVICE);
546 ssi_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
548 struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
549 struct rtattr *rta = (struct rtattr *)key;
550 struct ssi_crypto_req ssi_req = {};
551 struct crypto_authenc_key_param *param;
552 struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
553 int seq_len = 0, rc = -EINVAL;
555 SSI_LOG_DEBUG("Setting key in context @%p for %s. key=%p keylen=%u\n",
556 ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)),
559 /* STAT_PHASE_0: Init and sanity checks */
561 if (ctx->auth_mode != DRV_HASH_NULL) { /* authenc() alg. */
562 if (!RTA_OK(rta, keylen))
564 if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
566 if (RTA_PAYLOAD(rta) < sizeof(*param))
568 param = RTA_DATA(rta);
569 ctx->enc_keylen = be32_to_cpu(param->enckeylen);
570 key += RTA_ALIGN(rta->rta_len);
571 keylen -= RTA_ALIGN(rta->rta_len);
572 if (keylen < ctx->enc_keylen)
574 ctx->auth_keylen = keylen - ctx->enc_keylen;
576 if (ctx->cipher_mode == DRV_CIPHER_CTR) {
577 /* the nonce is stored in bytes at end of key */
578 if (ctx->enc_keylen <
579 (AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE))
581 /* Copy nonce from last 4 bytes in CTR key to
582 * first 4 bytes in CTR IV
584 memcpy(ctx->ctr_nonce, key + ctx->auth_keylen + ctx->enc_keylen -
585 CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_NONCE_SIZE);
586 /* Set CTR key size */
587 ctx->enc_keylen -= CTR_RFC3686_NONCE_SIZE;
589 } else { /* non-authenc - has just one key */
590 ctx->enc_keylen = keylen;
591 ctx->auth_keylen = 0;
594 rc = validate_keys_sizes(ctx);
595 if (unlikely(rc != 0))
598 /* STAT_PHASE_1: Copy key to ctx */
600 /* Get key material */
601 memcpy(ctx->enckey, key + ctx->auth_keylen, ctx->enc_keylen);
602 if (ctx->enc_keylen == 24)
603 memset(ctx->enckey + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
604 if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
605 memcpy(ctx->auth_state.xcbc.xcbc_keys, key, ctx->auth_keylen);
606 } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */
607 rc = ssi_get_plain_hmac_key(tfm, key, ctx->auth_keylen);
612 /* STAT_PHASE_2: Create sequence */
614 switch (ctx->auth_mode) {
616 case DRV_HASH_SHA256:
617 seq_len = hmac_setkey(desc, ctx);
619 case DRV_HASH_XCBC_MAC:
620 seq_len = xcbc_setkey(desc, ctx);
622 case DRV_HASH_NULL: /* non-authenc modes, e.g., CCM */
623 break; /* No auth. key setup */
625 SSI_LOG_ERR("Unsupported authenc (%d)\n", ctx->auth_mode);
630 /* STAT_PHASE_3: Submit sequence to HW */
632 if (seq_len > 0) { /* For CCM there is no sequence to setup the key */
633 rc = send_request(ctx->drvdata, &ssi_req, desc, seq_len, 0);
634 if (unlikely(rc != 0)) {
635 SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
640 /* Update STAT_PHASE_3 */
644 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
650 #if SSI_CC_HAS_AES_CCM
651 static int ssi_rfc4309_ccm_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
653 struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
660 memcpy(ctx->ctr_nonce, key + keylen, 3);
662 rc = ssi_aead_setkey(tfm, key, keylen);
666 #endif /*SSI_CC_HAS_AES_CCM*/
668 static int ssi_aead_setauthsize(
669 struct crypto_aead *authenc,
670 unsigned int authsize)
672 struct ssi_aead_ctx *ctx = crypto_aead_ctx(authenc);
674 /* Unsupported auth. sizes */
675 if ((authsize == 0) ||
676 (authsize > crypto_aead_maxauthsize(authenc))) {
680 ctx->authsize = authsize;
681 SSI_LOG_DEBUG("authlen=%d\n", ctx->authsize);
686 #if SSI_CC_HAS_AES_CCM
687 static int ssi_rfc4309_ccm_setauthsize(struct crypto_aead *authenc,
688 unsigned int authsize)
699 return ssi_aead_setauthsize(authenc, authsize);
702 static int ssi_ccm_setauthsize(struct crypto_aead *authenc,
703 unsigned int authsize)
718 return ssi_aead_setauthsize(authenc, authsize);
720 #endif /*SSI_CC_HAS_AES_CCM*/
723 ssi_aead_create_assoc_desc(
724 struct aead_request *areq,
725 unsigned int flow_mode,
726 struct cc_hw_desc desc[],
727 unsigned int *seq_size)
729 struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
730 struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
731 struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
732 enum ssi_req_dma_buf_type assoc_dma_type = areq_ctx->assoc_buff_type;
733 unsigned int idx = *seq_size;
735 switch (assoc_dma_type) {
736 case SSI_DMA_BUF_DLLI:
737 SSI_LOG_DEBUG("ASSOC buffer type DLLI\n");
738 hw_desc_init(&desc[idx]);
739 set_din_type(&desc[idx], DMA_DLLI, sg_dma_address(areq->src),
740 areq->assoclen, NS_BIT); set_flow_mode(&desc[idx],
742 if ((ctx->auth_mode == DRV_HASH_XCBC_MAC) &&
743 (areq_ctx->cryptlen > 0))
744 set_din_not_last_indication(&desc[idx]);
746 case SSI_DMA_BUF_MLLI:
747 SSI_LOG_DEBUG("ASSOC buffer type MLLI\n");
748 hw_desc_init(&desc[idx]);
749 set_din_type(&desc[idx], DMA_MLLI, areq_ctx->assoc.sram_addr,
750 areq_ctx->assoc.mlli_nents, NS_BIT);
751 set_flow_mode(&desc[idx], flow_mode);
752 if ((ctx->auth_mode == DRV_HASH_XCBC_MAC) &&
753 (areq_ctx->cryptlen > 0))
754 set_din_not_last_indication(&desc[idx]);
756 case SSI_DMA_BUF_NULL:
758 SSI_LOG_ERR("Invalid ASSOC buffer type\n");
765 ssi_aead_process_authenc_data_desc(
766 struct aead_request *areq,
767 unsigned int flow_mode,
768 struct cc_hw_desc desc[],
769 unsigned int *seq_size,
772 struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
773 enum ssi_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
774 unsigned int idx = *seq_size;
776 switch (data_dma_type) {
777 case SSI_DMA_BUF_DLLI:
779 struct scatterlist *cipher =
780 (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
781 areq_ctx->dst_sgl : areq_ctx->src_sgl;
783 unsigned int offset =
784 (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
785 areq_ctx->dst_offset : areq_ctx->src_offset;
786 SSI_LOG_DEBUG("AUTHENC: SRC/DST buffer type DLLI\n");
787 hw_desc_init(&desc[idx]);
788 set_din_type(&desc[idx], DMA_DLLI,
789 (sg_dma_address(cipher) + offset),
790 areq_ctx->cryptlen, NS_BIT);
791 set_flow_mode(&desc[idx], flow_mode);
794 case SSI_DMA_BUF_MLLI:
796 /* DOUBLE-PASS flow (as default)
797 * assoc. + iv + data -compact in one table
798 * if assoclen is ZERO only IV perform
800 ssi_sram_addr_t mlli_addr = areq_ctx->assoc.sram_addr;
801 u32 mlli_nents = areq_ctx->assoc.mlli_nents;
803 if (likely(areq_ctx->is_single_pass)) {
804 if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
805 mlli_addr = areq_ctx->dst.sram_addr;
806 mlli_nents = areq_ctx->dst.mlli_nents;
808 mlli_addr = areq_ctx->src.sram_addr;
809 mlli_nents = areq_ctx->src.mlli_nents;
813 SSI_LOG_DEBUG("AUTHENC: SRC/DST buffer type MLLI\n");
814 hw_desc_init(&desc[idx]);
815 set_din_type(&desc[idx], DMA_MLLI, mlli_addr, mlli_nents,
817 set_flow_mode(&desc[idx], flow_mode);
820 case SSI_DMA_BUF_NULL:
822 SSI_LOG_ERR("AUTHENC: Invalid SRC/DST buffer type\n");
829 ssi_aead_process_cipher_data_desc(
830 struct aead_request *areq,
831 unsigned int flow_mode,
832 struct cc_hw_desc desc[],
833 unsigned int *seq_size)
835 unsigned int idx = *seq_size;
836 struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
837 enum ssi_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
839 if (areq_ctx->cryptlen == 0)
840 return; /*null processing*/
842 switch (data_dma_type) {
843 case SSI_DMA_BUF_DLLI:
844 SSI_LOG_DEBUG("CIPHER: SRC/DST buffer type DLLI\n");
845 hw_desc_init(&desc[idx]);
846 set_din_type(&desc[idx], DMA_DLLI,
847 (sg_dma_address(areq_ctx->src_sgl) +
848 areq_ctx->src_offset), areq_ctx->cryptlen, NS_BIT);
849 set_dout_dlli(&desc[idx],
850 (sg_dma_address(areq_ctx->dst_sgl) +
851 areq_ctx->dst_offset),
852 areq_ctx->cryptlen, NS_BIT, 0);
853 set_flow_mode(&desc[idx], flow_mode);
855 case SSI_DMA_BUF_MLLI:
856 SSI_LOG_DEBUG("CIPHER: SRC/DST buffer type MLLI\n");
857 hw_desc_init(&desc[idx]);
858 set_din_type(&desc[idx], DMA_MLLI, areq_ctx->src.sram_addr,
859 areq_ctx->src.mlli_nents, NS_BIT);
860 set_dout_mlli(&desc[idx], areq_ctx->dst.sram_addr,
861 areq_ctx->dst.mlli_nents, NS_BIT, 0);
862 set_flow_mode(&desc[idx], flow_mode);
864 case SSI_DMA_BUF_NULL:
866 SSI_LOG_ERR("CIPHER: Invalid SRC/DST buffer type\n");
872 static inline void ssi_aead_process_digest_result_desc(
873 struct aead_request *req,
874 struct cc_hw_desc desc[],
875 unsigned int *seq_size)
877 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
878 struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
879 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
880 unsigned int idx = *seq_size;
881 unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
882 DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
883 int direct = req_ctx->gen_ctx.op_type;
885 /* Get final ICV result */
886 if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
887 hw_desc_init(&desc[idx]);
888 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
889 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
890 set_dout_dlli(&desc[idx], req_ctx->icv_dma_addr, ctx->authsize,
892 set_queue_last_ind(&desc[idx]);
893 if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
894 set_aes_not_hash_mode(&desc[idx]);
895 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
897 set_cipher_config0(&desc[idx],
898 HASH_DIGEST_RESULT_LITTLE_ENDIAN);
899 set_cipher_mode(&desc[idx], hash_mode);
902 /* Get ICV out from hardware */
903 hw_desc_init(&desc[idx]);
904 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
905 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
906 set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr,
907 ctx->authsize, NS_BIT, 1);
908 set_queue_last_ind(&desc[idx]);
909 set_cipher_config0(&desc[idx],
910 HASH_DIGEST_RESULT_LITTLE_ENDIAN);
911 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
912 if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
913 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
914 set_aes_not_hash_mode(&desc[idx]);
916 set_cipher_mode(&desc[idx], hash_mode);
923 static inline void ssi_aead_setup_cipher_desc(
924 struct aead_request *req,
925 struct cc_hw_desc desc[],
926 unsigned int *seq_size)
928 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
929 struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
930 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
931 unsigned int hw_iv_size = req_ctx->hw_iv_size;
932 unsigned int idx = *seq_size;
933 int direct = req_ctx->gen_ctx.op_type;
935 /* Setup cipher state */
936 hw_desc_init(&desc[idx]);
937 set_cipher_config0(&desc[idx], direct);
938 set_flow_mode(&desc[idx], ctx->flow_mode);
939 set_din_type(&desc[idx], DMA_DLLI, req_ctx->gen_ctx.iv_dma_addr,
941 if (ctx->cipher_mode == DRV_CIPHER_CTR)
942 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
944 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
945 set_cipher_mode(&desc[idx], ctx->cipher_mode);
949 hw_desc_init(&desc[idx]);
950 set_cipher_config0(&desc[idx], direct);
951 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
952 set_flow_mode(&desc[idx], ctx->flow_mode);
953 if (ctx->flow_mode == S_DIN_to_AES) {
954 set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
955 ((ctx->enc_keylen == 24) ? CC_AES_KEY_SIZE_MAX :
956 ctx->enc_keylen), NS_BIT);
957 set_key_size_aes(&desc[idx], ctx->enc_keylen);
959 set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
960 ctx->enc_keylen, NS_BIT);
961 set_key_size_des(&desc[idx], ctx->enc_keylen);
963 set_cipher_mode(&desc[idx], ctx->cipher_mode);
969 static inline void ssi_aead_process_cipher(
970 struct aead_request *req,
971 struct cc_hw_desc desc[],
972 unsigned int *seq_size,
973 unsigned int data_flow_mode)
975 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
976 int direct = req_ctx->gen_ctx.op_type;
977 unsigned int idx = *seq_size;
979 if (req_ctx->cryptlen == 0)
980 return; /*null processing*/
982 ssi_aead_setup_cipher_desc(req, desc, &idx);
983 ssi_aead_process_cipher_data_desc(req, data_flow_mode, desc, &idx);
984 if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
985 /* We must wait for DMA to write all cipher */
986 hw_desc_init(&desc[idx]);
987 set_din_no_dma(&desc[idx], 0, 0xfffff0);
988 set_dout_no_dma(&desc[idx], 0, 0, 1);
995 static inline void ssi_aead_hmac_setup_digest_desc(
996 struct aead_request *req,
997 struct cc_hw_desc desc[],
998 unsigned int *seq_size)
1000 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1001 struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
1002 unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
1003 DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
1004 unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
1005 CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
1006 unsigned int idx = *seq_size;
1008 /* Loading hash ipad xor key state */
1009 hw_desc_init(&desc[idx]);
1010 set_cipher_mode(&desc[idx], hash_mode);
1011 set_din_type(&desc[idx], DMA_DLLI,
1012 ctx->auth_state.hmac.ipad_opad_dma_addr, digest_size,
1014 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1015 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1018 /* Load init. digest len (64 bytes) */
1019 hw_desc_init(&desc[idx]);
1020 set_cipher_mode(&desc[idx], hash_mode);
1021 set_din_sram(&desc[idx],
1022 ssi_ahash_get_initial_digest_len_sram_addr(ctx->drvdata,
1025 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1026 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1032 static inline void ssi_aead_xcbc_setup_digest_desc(
1033 struct aead_request *req,
1034 struct cc_hw_desc desc[],
1035 unsigned int *seq_size)
1037 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1038 struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
1039 unsigned int idx = *seq_size;
1041 /* Loading MAC state */
1042 hw_desc_init(&desc[idx]);
1043 set_din_const(&desc[idx], 0, CC_AES_BLOCK_SIZE);
1044 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1045 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
1046 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1047 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1048 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1049 set_aes_not_hash_mode(&desc[idx]);
1052 /* Setup XCBC MAC K1 */
1053 hw_desc_init(&desc[idx]);
1054 set_din_type(&desc[idx], DMA_DLLI,
1055 ctx->auth_state.xcbc.xcbc_keys_dma_addr,
1056 AES_KEYSIZE_128, NS_BIT);
1057 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1058 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
1059 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1060 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1061 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1062 set_aes_not_hash_mode(&desc[idx]);
1065 /* Setup XCBC MAC K2 */
1066 hw_desc_init(&desc[idx]);
1067 set_din_type(&desc[idx], DMA_DLLI,
1068 (ctx->auth_state.xcbc.xcbc_keys_dma_addr +
1069 AES_KEYSIZE_128), AES_KEYSIZE_128, NS_BIT);
1070 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1071 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
1072 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1073 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1074 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1075 set_aes_not_hash_mode(&desc[idx]);
1078 /* Setup XCBC MAC K3 */
1079 hw_desc_init(&desc[idx]);
1080 set_din_type(&desc[idx], DMA_DLLI,
1081 (ctx->auth_state.xcbc.xcbc_keys_dma_addr +
1082 2 * AES_KEYSIZE_128), AES_KEYSIZE_128, NS_BIT);
1083 set_setup_mode(&desc[idx], SETUP_LOAD_STATE2);
1084 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
1085 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1086 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1087 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1088 set_aes_not_hash_mode(&desc[idx]);
1094 static inline void ssi_aead_process_digest_header_desc(
1095 struct aead_request *req,
1096 struct cc_hw_desc desc[],
1097 unsigned int *seq_size)
1099 unsigned int idx = *seq_size;
1100 /* Hash associated data */
1101 if (req->assoclen > 0)
1102 ssi_aead_create_assoc_desc(req, DIN_HASH, desc, &idx);
1108 static inline void ssi_aead_process_digest_scheme_desc(
1109 struct aead_request *req,
1110 struct cc_hw_desc desc[],
1111 unsigned int *seq_size)
1113 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1114 struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
1115 struct ssi_aead_handle *aead_handle = ctx->drvdata->aead_handle;
1116 unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
1117 DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
1118 unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
1119 CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
1120 unsigned int idx = *seq_size;
1122 hw_desc_init(&desc[idx]);
1123 set_cipher_mode(&desc[idx], hash_mode);
1124 set_dout_sram(&desc[idx], aead_handle->sram_workspace_addr,
1126 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1127 set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
1128 set_cipher_do(&desc[idx], DO_PAD);
1131 /* Get final ICV result */
1132 hw_desc_init(&desc[idx]);
1133 set_dout_sram(&desc[idx], aead_handle->sram_workspace_addr,
1135 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1136 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1137 set_cipher_config0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN);
1138 set_cipher_mode(&desc[idx], hash_mode);
1141 /* Loading hash opad xor key state */
1142 hw_desc_init(&desc[idx]);
1143 set_cipher_mode(&desc[idx], hash_mode);
1144 set_din_type(&desc[idx], DMA_DLLI,
1145 (ctx->auth_state.hmac.ipad_opad_dma_addr + digest_size),
1146 digest_size, NS_BIT);
1147 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1148 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1151 /* Load init. digest len (64 bytes) */
1152 hw_desc_init(&desc[idx]);
1153 set_cipher_mode(&desc[idx], hash_mode);
1154 set_din_sram(&desc[idx],
1155 ssi_ahash_get_initial_digest_len_sram_addr(ctx->drvdata,
1158 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1159 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1160 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1163 /* Perform HASH update */
1164 hw_desc_init(&desc[idx]);
1165 set_din_sram(&desc[idx], aead_handle->sram_workspace_addr,
1167 set_flow_mode(&desc[idx], DIN_HASH);
1173 static inline void ssi_aead_load_mlli_to_sram(
1174 struct aead_request *req,
1175 struct cc_hw_desc desc[],
1176 unsigned int *seq_size)
1178 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1179 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1180 struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
1183 (req_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI) ||
1184 (req_ctx->data_buff_type == SSI_DMA_BUF_MLLI) ||
1185 !req_ctx->is_single_pass)) {
1186 SSI_LOG_DEBUG("Copy-to-sram: mlli_dma=%08x, mlli_size=%u\n",
1187 (unsigned int)ctx->drvdata->mlli_sram_addr,
1188 req_ctx->mlli_params.mlli_len);
1189 /* Copy MLLI table host-to-sram */
1190 hw_desc_init(&desc[*seq_size]);
1191 set_din_type(&desc[*seq_size], DMA_DLLI,
1192 req_ctx->mlli_params.mlli_dma_addr,
1193 req_ctx->mlli_params.mlli_len, NS_BIT);
1194 set_dout_sram(&desc[*seq_size],
1195 ctx->drvdata->mlli_sram_addr,
1196 req_ctx->mlli_params.mlli_len);
1197 set_flow_mode(&desc[*seq_size], BYPASS);
1202 static inline enum cc_flow_mode ssi_aead_get_data_flow_mode(
1203 enum drv_crypto_direction direct,
1204 enum cc_flow_mode setup_flow_mode,
1205 bool is_single_pass)
1207 enum cc_flow_mode data_flow_mode;
1209 if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
1210 if (setup_flow_mode == S_DIN_to_AES)
1211 data_flow_mode = likely(is_single_pass) ?
1212 AES_to_HASH_and_DOUT : DIN_AES_DOUT;
1214 data_flow_mode = likely(is_single_pass) ?
1215 DES_to_HASH_and_DOUT : DIN_DES_DOUT;
1216 } else { /* Decrypt */
1217 if (setup_flow_mode == S_DIN_to_AES)
1218 data_flow_mode = likely(is_single_pass) ?
1219 AES_and_HASH : DIN_AES_DOUT;
1221 data_flow_mode = likely(is_single_pass) ?
1222 DES_and_HASH : DIN_DES_DOUT;
1225 return data_flow_mode;
1228 static inline void ssi_aead_hmac_authenc(
1229 struct aead_request *req,
1230 struct cc_hw_desc desc[],
1231 unsigned int *seq_size)
1233 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1234 struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
1235 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1236 int direct = req_ctx->gen_ctx.op_type;
1237 unsigned int data_flow_mode = ssi_aead_get_data_flow_mode(
1238 direct, ctx->flow_mode, req_ctx->is_single_pass);
1240 if (req_ctx->is_single_pass) {
1244 ssi_aead_hmac_setup_digest_desc(req, desc, seq_size);
1245 ssi_aead_setup_cipher_desc(req, desc, seq_size);
1246 ssi_aead_process_digest_header_desc(req, desc, seq_size);
1247 ssi_aead_process_cipher_data_desc(req, data_flow_mode, desc, seq_size);
1248 ssi_aead_process_digest_scheme_desc(req, desc, seq_size);
1249 ssi_aead_process_digest_result_desc(req, desc, seq_size);
1255 * Fallback for unsupported single-pass modes,
1256 * i.e. using assoc. data of non-word-multiple
1258 if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
1259 /* encrypt first.. */
1260 ssi_aead_process_cipher(req, desc, seq_size, data_flow_mode);
1261 /* authenc after..*/
1262 ssi_aead_hmac_setup_digest_desc(req, desc, seq_size);
1263 ssi_aead_process_authenc_data_desc(req, DIN_HASH, desc, seq_size, direct);
1264 ssi_aead_process_digest_scheme_desc(req, desc, seq_size);
1265 ssi_aead_process_digest_result_desc(req, desc, seq_size);
1267 } else { /*DECRYPT*/
1268 /* authenc first..*/
1269 ssi_aead_hmac_setup_digest_desc(req, desc, seq_size);
1270 ssi_aead_process_authenc_data_desc(req, DIN_HASH, desc, seq_size, direct);
1271 ssi_aead_process_digest_scheme_desc(req, desc, seq_size);
1272 /* decrypt after.. */
1273 ssi_aead_process_cipher(req, desc, seq_size, data_flow_mode);
1274 /* read the digest result with setting the completion bit
1275 * must be after the cipher operation
1277 ssi_aead_process_digest_result_desc(req, desc, seq_size);
1282 ssi_aead_xcbc_authenc(
1283 struct aead_request *req,
1284 struct cc_hw_desc desc[],
1285 unsigned int *seq_size)
1287 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1288 struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
1289 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1290 int direct = req_ctx->gen_ctx.op_type;
1291 unsigned int data_flow_mode = ssi_aead_get_data_flow_mode(
1292 direct, ctx->flow_mode, req_ctx->is_single_pass);
1294 if (req_ctx->is_single_pass) {
1298 ssi_aead_xcbc_setup_digest_desc(req, desc, seq_size);
1299 ssi_aead_setup_cipher_desc(req, desc, seq_size);
1300 ssi_aead_process_digest_header_desc(req, desc, seq_size);
1301 ssi_aead_process_cipher_data_desc(req, data_flow_mode, desc, seq_size);
1302 ssi_aead_process_digest_result_desc(req, desc, seq_size);
1308 * Fallback for unsupported single-pass modes,
1309 * i.e. using assoc. data of non-word-multiple
1311 if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
1312 /* encrypt first.. */
1313 ssi_aead_process_cipher(req, desc, seq_size, data_flow_mode);
1314 /* authenc after.. */
1315 ssi_aead_xcbc_setup_digest_desc(req, desc, seq_size);
1316 ssi_aead_process_authenc_data_desc(req, DIN_HASH, desc, seq_size, direct);
1317 ssi_aead_process_digest_result_desc(req, desc, seq_size);
1318 } else { /*DECRYPT*/
1319 /* authenc first.. */
1320 ssi_aead_xcbc_setup_digest_desc(req, desc, seq_size);
1321 ssi_aead_process_authenc_data_desc(req, DIN_HASH, desc, seq_size, direct);
1322 /* decrypt after..*/
1323 ssi_aead_process_cipher(req, desc, seq_size, data_flow_mode);
1324 /* read the digest result with setting the completion bit
1325 * must be after the cipher operation
1327 ssi_aead_process_digest_result_desc(req, desc, seq_size);
1331 static int validate_data_size(struct ssi_aead_ctx *ctx,
1332 enum drv_crypto_direction direct,
1333 struct aead_request *req)
1335 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1336 unsigned int assoclen = req->assoclen;
1337 unsigned int cipherlen = (direct == DRV_CRYPTO_DIRECTION_DECRYPT) ?
1338 (req->cryptlen - ctx->authsize) : req->cryptlen;
1340 if (unlikely((direct == DRV_CRYPTO_DIRECTION_DECRYPT) &&
1341 (req->cryptlen < ctx->authsize)))
1344 areq_ctx->is_single_pass = true; /*defaulted to fast flow*/
1346 switch (ctx->flow_mode) {
1348 if (unlikely((ctx->cipher_mode == DRV_CIPHER_CBC) &&
1349 !IS_ALIGNED(cipherlen, AES_BLOCK_SIZE)))
1351 if (ctx->cipher_mode == DRV_CIPHER_CCM)
1353 if (ctx->cipher_mode == DRV_CIPHER_GCTR) {
1354 if (areq_ctx->plaintext_authenticate_only)
1355 areq_ctx->is_single_pass = false;
1359 if (!IS_ALIGNED(assoclen, sizeof(u32)))
1360 areq_ctx->is_single_pass = false;
1362 if ((ctx->cipher_mode == DRV_CIPHER_CTR) &&
1363 !IS_ALIGNED(cipherlen, sizeof(u32)))
1364 areq_ctx->is_single_pass = false;
1368 if (unlikely(!IS_ALIGNED(cipherlen, DES_BLOCK_SIZE)))
1370 if (unlikely(!IS_ALIGNED(assoclen, DES_BLOCK_SIZE)))
1371 areq_ctx->is_single_pass = false;
1374 SSI_LOG_ERR("Unexpected flow mode (%d)\n", ctx->flow_mode);
1384 #if SSI_CC_HAS_AES_CCM
1385 static unsigned int format_ccm_a0(u8 *pa0_buff, u32 header_size)
1387 unsigned int len = 0;
1389 if (header_size == 0)
1392 if (header_size < ((1UL << 16) - (1UL << 8))) {
1395 pa0_buff[0] = (header_size >> 8) & 0xFF;
1396 pa0_buff[1] = header_size & 0xFF;
1402 pa0_buff[2] = (header_size >> 24) & 0xFF;
1403 pa0_buff[3] = (header_size >> 16) & 0xFF;
1404 pa0_buff[4] = (header_size >> 8) & 0xFF;
1405 pa0_buff[5] = header_size & 0xFF;
1411 static int set_msg_len(u8 *block, unsigned int msglen, unsigned int csize)
1415 memset(block, 0, csize);
1420 else if (msglen > (1 << (8 * csize)))
1423 data = cpu_to_be32(msglen);
1424 memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
1429 static inline int ssi_aead_ccm(
1430 struct aead_request *req,
1431 struct cc_hw_desc desc[],
1432 unsigned int *seq_size)
1434 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1435 struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
1436 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1437 unsigned int idx = *seq_size;
1438 unsigned int cipher_flow_mode;
1439 dma_addr_t mac_result;
1441 if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
1442 cipher_flow_mode = AES_to_HASH_and_DOUT;
1443 mac_result = req_ctx->mac_buf_dma_addr;
1444 } else { /* Encrypt */
1445 cipher_flow_mode = AES_and_HASH;
1446 mac_result = req_ctx->icv_dma_addr;
1450 hw_desc_init(&desc[idx]);
1451 set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
1452 set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1453 ((ctx->enc_keylen == 24) ? CC_AES_KEY_SIZE_MAX :
1454 ctx->enc_keylen), NS_BIT);
1455 set_key_size_aes(&desc[idx], ctx->enc_keylen);
1456 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1457 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1458 set_flow_mode(&desc[idx], S_DIN_to_AES);
1461 /* load ctr state */
1462 hw_desc_init(&desc[idx]);
1463 set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
1464 set_key_size_aes(&desc[idx], ctx->enc_keylen);
1465 set_din_type(&desc[idx], DMA_DLLI,
1466 req_ctx->gen_ctx.iv_dma_addr, AES_BLOCK_SIZE, NS_BIT);
1467 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1468 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1469 set_flow_mode(&desc[idx], S_DIN_to_AES);
1473 hw_desc_init(&desc[idx]);
1474 set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
1475 set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1476 ((ctx->enc_keylen == 24) ? CC_AES_KEY_SIZE_MAX :
1477 ctx->enc_keylen), NS_BIT);
1478 set_key_size_aes(&desc[idx], ctx->enc_keylen);
1479 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1480 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1481 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1482 set_aes_not_hash_mode(&desc[idx]);
1485 /* load MAC state */
1486 hw_desc_init(&desc[idx]);
1487 set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
1488 set_key_size_aes(&desc[idx], ctx->enc_keylen);
1489 set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
1490 AES_BLOCK_SIZE, NS_BIT);
1491 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1492 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1493 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1494 set_aes_not_hash_mode(&desc[idx]);
1497 /* process assoc data */
1498 if (req->assoclen > 0) {
1499 ssi_aead_create_assoc_desc(req, DIN_HASH, desc, &idx);
1501 hw_desc_init(&desc[idx]);
1502 set_din_type(&desc[idx], DMA_DLLI,
1503 sg_dma_address(&req_ctx->ccm_adata_sg),
1504 AES_BLOCK_SIZE + req_ctx->ccm_hdr_size, NS_BIT);
1505 set_flow_mode(&desc[idx], DIN_HASH);
1509 /* process the cipher */
1510 if (req_ctx->cryptlen != 0)
1511 ssi_aead_process_cipher_data_desc(req, cipher_flow_mode, desc, &idx);
1513 /* Read temporal MAC */
1514 hw_desc_init(&desc[idx]);
1515 set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
1516 set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr, ctx->authsize,
1518 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1519 set_cipher_config0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN);
1520 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1521 set_aes_not_hash_mode(&desc[idx]);
1524 /* load AES-CTR state (for last MAC calculation)*/
1525 hw_desc_init(&desc[idx]);
1526 set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
1527 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1528 set_din_type(&desc[idx], DMA_DLLI, req_ctx->ccm_iv0_dma_addr,
1529 AES_BLOCK_SIZE, NS_BIT);
1530 set_key_size_aes(&desc[idx], ctx->enc_keylen);
1531 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1532 set_flow_mode(&desc[idx], S_DIN_to_AES);
1535 hw_desc_init(&desc[idx]);
1536 set_din_no_dma(&desc[idx], 0, 0xfffff0);
1537 set_dout_no_dma(&desc[idx], 0, 0, 1);
1540 /* encrypt the "T" value and store MAC in mac_state */
1541 hw_desc_init(&desc[idx]);
1542 set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
1543 ctx->authsize, NS_BIT);
1544 set_dout_dlli(&desc[idx], mac_result, ctx->authsize, NS_BIT, 1);
1545 set_queue_last_ind(&desc[idx]);
1546 set_flow_mode(&desc[idx], DIN_AES_DOUT);
1553 static int config_ccm_adata(struct aead_request *req)
1555 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1556 struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
1557 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1558 //unsigned int size_of_a = 0, rem_a_size = 0;
1559 unsigned int lp = req->iv[0];
1560 /* Note: The code assume that req->iv[0] already contains the value of L' of RFC3610 */
1561 unsigned int l = lp + 1; /* This is L' of RFC 3610. */
1562 unsigned int m = ctx->authsize; /* This is M' of RFC 3610. */
1563 u8 *b0 = req_ctx->ccm_config + CCM_B0_OFFSET;
1564 u8 *a0 = req_ctx->ccm_config + CCM_A0_OFFSET;
1565 u8 *ctr_count_0 = req_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET;
1566 unsigned int cryptlen = (req_ctx->gen_ctx.op_type ==
1567 DRV_CRYPTO_DIRECTION_ENCRYPT) ?
1569 (req->cryptlen - ctx->authsize);
1572 memset(req_ctx->mac_buf, 0, AES_BLOCK_SIZE);
1573 memset(req_ctx->ccm_config, 0, AES_BLOCK_SIZE * 3);
1575 /* taken from crypto/ccm.c */
1576 /* 2 <= L <= 8, so 1 <= L' <= 7. */
1577 if (l < 2 || l > 8) {
1578 SSI_LOG_ERR("illegal iv value %X\n", req->iv[0]);
1581 memcpy(b0, req->iv, AES_BLOCK_SIZE);
1583 /* format control info per RFC 3610 and
1584 * NIST Special Publication 800-38C
1586 *b0 |= (8 * ((m - 2) / 2));
1587 if (req->assoclen > 0)
1588 *b0 |= 64; /* Enable bit 6 if Adata exists. */
1590 rc = set_msg_len(b0 + 16 - l, cryptlen, l); /* Write L'. */
1593 /* END of "taken from crypto/ccm.c" */
1595 /* l(a) - size of associated data. */
1596 req_ctx->ccm_hdr_size = format_ccm_a0(a0, req->assoclen);
1598 memset(req->iv + 15 - req->iv[0], 0, req->iv[0] + 1);
1601 memcpy(ctr_count_0, req->iv, AES_BLOCK_SIZE);
1602 ctr_count_0[15] = 0;
1607 static void ssi_rfc4309_ccm_process(struct aead_request *req)
1609 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1610 struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
1611 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1614 memset(areq_ctx->ctr_iv, 0, AES_BLOCK_SIZE);
1615 areq_ctx->ctr_iv[0] = 3; /* For RFC 4309, always use 4 bytes for message length (at most 2^32-1 bytes). */
1617 /* In RFC 4309 there is an 11-bytes nonce+IV part, that we build here. */
1618 memcpy(areq_ctx->ctr_iv + CCM_BLOCK_NONCE_OFFSET, ctx->ctr_nonce, CCM_BLOCK_NONCE_SIZE);
1619 memcpy(areq_ctx->ctr_iv + CCM_BLOCK_IV_OFFSET, req->iv, CCM_BLOCK_IV_SIZE);
1620 req->iv = areq_ctx->ctr_iv;
1621 req->assoclen -= CCM_BLOCK_IV_SIZE;
1623 #endif /*SSI_CC_HAS_AES_CCM*/
1625 #if SSI_CC_HAS_AES_GCM
1627 static inline void ssi_aead_gcm_setup_ghash_desc(
1628 struct aead_request *req,
1629 struct cc_hw_desc desc[],
1630 unsigned int *seq_size)
1632 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1633 struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
1634 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1635 unsigned int idx = *seq_size;
1637 /* load key to AES*/
1638 hw_desc_init(&desc[idx]);
1639 set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
1640 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1641 set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1642 ctx->enc_keylen, NS_BIT);
1643 set_key_size_aes(&desc[idx], ctx->enc_keylen);
1644 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1645 set_flow_mode(&desc[idx], S_DIN_to_AES);
1648 /* process one zero block to generate hkey */
1649 hw_desc_init(&desc[idx]);
1650 set_din_const(&desc[idx], 0x0, AES_BLOCK_SIZE);
1651 set_dout_dlli(&desc[idx], req_ctx->hkey_dma_addr, AES_BLOCK_SIZE,
1653 set_flow_mode(&desc[idx], DIN_AES_DOUT);
1656 /* Memory Barrier */
1657 hw_desc_init(&desc[idx]);
1658 set_din_no_dma(&desc[idx], 0, 0xfffff0);
1659 set_dout_no_dma(&desc[idx], 0, 0, 1);
1662 /* Load GHASH subkey */
1663 hw_desc_init(&desc[idx]);
1664 set_din_type(&desc[idx], DMA_DLLI, req_ctx->hkey_dma_addr,
1665 AES_BLOCK_SIZE, NS_BIT);
1666 set_dout_no_dma(&desc[idx], 0, 0, 1);
1667 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1668 set_aes_not_hash_mode(&desc[idx]);
1669 set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
1670 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1671 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1674 /* Configure Hash Engine to work with GHASH.
1675 * Since it was not possible to extend HASH submodes to add GHASH,
1676 * The following command is necessary in order to
1677 * select GHASH (according to HW designers)
1679 hw_desc_init(&desc[idx]);
1680 set_din_no_dma(&desc[idx], 0, 0xfffff0);
1681 set_dout_no_dma(&desc[idx], 0, 0, 1);
1682 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1683 set_aes_not_hash_mode(&desc[idx]);
1684 set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
1685 set_cipher_do(&desc[idx], 1); //1=AES_SK RKEK
1686 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1687 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1688 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1691 /* Load GHASH initial STATE (which is 0). (for any hash there is an initial state) */
1692 hw_desc_init(&desc[idx]);
1693 set_din_const(&desc[idx], 0x0, AES_BLOCK_SIZE);
1694 set_dout_no_dma(&desc[idx], 0, 0, 1);
1695 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1696 set_aes_not_hash_mode(&desc[idx]);
1697 set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
1698 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1699 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1705 static inline void ssi_aead_gcm_setup_gctr_desc(
1706 struct aead_request *req,
1707 struct cc_hw_desc desc[],
1708 unsigned int *seq_size)
1710 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1711 struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
1712 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1713 unsigned int idx = *seq_size;
1715 /* load key to AES*/
1716 hw_desc_init(&desc[idx]);
1717 set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
1718 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1719 set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1720 ctx->enc_keylen, NS_BIT);
1721 set_key_size_aes(&desc[idx], ctx->enc_keylen);
1722 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1723 set_flow_mode(&desc[idx], S_DIN_to_AES);
1726 if ((req_ctx->cryptlen != 0) && (!req_ctx->plaintext_authenticate_only)) {
1727 /* load AES/CTR initial CTR value inc by 2*/
1728 hw_desc_init(&desc[idx]);
1729 set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
1730 set_key_size_aes(&desc[idx], ctx->enc_keylen);
1731 set_din_type(&desc[idx], DMA_DLLI,
1732 req_ctx->gcm_iv_inc2_dma_addr, AES_BLOCK_SIZE,
1734 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1735 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1736 set_flow_mode(&desc[idx], S_DIN_to_AES);
1743 static inline void ssi_aead_process_gcm_result_desc(
1744 struct aead_request *req,
1745 struct cc_hw_desc desc[],
1746 unsigned int *seq_size)
1748 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1749 struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
1750 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1751 dma_addr_t mac_result;
1752 unsigned int idx = *seq_size;
1754 if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
1755 mac_result = req_ctx->mac_buf_dma_addr;
1756 } else { /* Encrypt */
1757 mac_result = req_ctx->icv_dma_addr;
1760 /* process(ghash) gcm_block_len */
1761 hw_desc_init(&desc[idx]);
1762 set_din_type(&desc[idx], DMA_DLLI, req_ctx->gcm_block_len_dma_addr,
1763 AES_BLOCK_SIZE, NS_BIT);
1764 set_flow_mode(&desc[idx], DIN_HASH);
1767 /* Store GHASH state after GHASH(Associated Data + Cipher +LenBlock) */
1768 hw_desc_init(&desc[idx]);
1769 set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
1770 set_din_no_dma(&desc[idx], 0, 0xfffff0);
1771 set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr, AES_BLOCK_SIZE,
1773 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1774 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1775 set_aes_not_hash_mode(&desc[idx]);
1779 /* load AES/CTR initial CTR value inc by 1*/
1780 hw_desc_init(&desc[idx]);
1781 set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
1782 set_key_size_aes(&desc[idx], ctx->enc_keylen);
1783 set_din_type(&desc[idx], DMA_DLLI, req_ctx->gcm_iv_inc1_dma_addr,
1784 AES_BLOCK_SIZE, NS_BIT);
1785 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1786 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1787 set_flow_mode(&desc[idx], S_DIN_to_AES);
1790 /* Memory Barrier */
1791 hw_desc_init(&desc[idx]);
1792 set_din_no_dma(&desc[idx], 0, 0xfffff0);
1793 set_dout_no_dma(&desc[idx], 0, 0, 1);
1796 /* process GCTR on stored GHASH and store MAC in mac_state*/
1797 hw_desc_init(&desc[idx]);
1798 set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
1799 set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
1800 AES_BLOCK_SIZE, NS_BIT);
1801 set_dout_dlli(&desc[idx], mac_result, ctx->authsize, NS_BIT, 1);
1802 set_queue_last_ind(&desc[idx]);
1803 set_flow_mode(&desc[idx], DIN_AES_DOUT);
1809 static inline int ssi_aead_gcm(
1810 struct aead_request *req,
1811 struct cc_hw_desc desc[],
1812 unsigned int *seq_size)
1814 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1815 unsigned int idx = *seq_size;
1816 unsigned int cipher_flow_mode;
1818 if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
1819 cipher_flow_mode = AES_and_HASH;
1820 } else { /* Encrypt */
1821 cipher_flow_mode = AES_to_HASH_and_DOUT;
1824 //in RFC4543 no data to encrypt. just copy data from src to dest.
1825 if (req_ctx->plaintext_authenticate_only) {
1826 ssi_aead_process_cipher_data_desc(req, BYPASS, desc, seq_size);
1827 ssi_aead_gcm_setup_ghash_desc(req, desc, seq_size);
1828 /* process(ghash) assoc data */
1829 ssi_aead_create_assoc_desc(req, DIN_HASH, desc, seq_size);
1830 ssi_aead_gcm_setup_gctr_desc(req, desc, seq_size);
1831 ssi_aead_process_gcm_result_desc(req, desc, seq_size);
1836 // for gcm and rfc4106.
1837 ssi_aead_gcm_setup_ghash_desc(req, desc, seq_size);
1838 /* process(ghash) assoc data */
1839 if (req->assoclen > 0)
1840 ssi_aead_create_assoc_desc(req, DIN_HASH, desc, seq_size);
1841 ssi_aead_gcm_setup_gctr_desc(req, desc, seq_size);
1842 /* process(gctr+ghash) */
1843 if (req_ctx->cryptlen != 0)
1844 ssi_aead_process_cipher_data_desc(req, cipher_flow_mode, desc, seq_size);
1845 ssi_aead_process_gcm_result_desc(req, desc, seq_size);
1852 static inline void ssi_aead_dump_gcm(
1854 struct aead_request *req)
1856 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1857 struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
1858 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1860 if (ctx->cipher_mode != DRV_CIPHER_GCTR)
1864 SSI_LOG_DEBUG("----------------------------------------------------------------------------------");
1865 SSI_LOG_DEBUG("%s\n", title);
1868 SSI_LOG_DEBUG("cipher_mode %d, authsize %d, enc_keylen %d, assoclen %d, cryptlen %d\n",
1869 ctx->cipher_mode, ctx->authsize, ctx->enc_keylen,
1870 req->assoclen, req_ctx->cryptlen);
1873 dump_byte_array("mac key", ctx->enckey, 16);
1875 dump_byte_array("req->iv", req->iv, AES_BLOCK_SIZE);
1877 dump_byte_array("gcm_iv_inc1", req_ctx->gcm_iv_inc1, AES_BLOCK_SIZE);
1879 dump_byte_array("gcm_iv_inc2", req_ctx->gcm_iv_inc2, AES_BLOCK_SIZE);
1881 dump_byte_array("hkey", req_ctx->hkey, AES_BLOCK_SIZE);
1883 dump_byte_array("mac_buf", req_ctx->mac_buf, AES_BLOCK_SIZE);
1885 dump_byte_array("gcm_len_block", req_ctx->gcm_len_block.len_a, AES_BLOCK_SIZE);
1887 if (req->src && req->cryptlen)
1888 dump_byte_array("req->src", sg_virt(req->src), req->cryptlen + req->assoclen);
1891 dump_byte_array("req->dst", sg_virt(req->dst), req->cryptlen + ctx->authsize + req->assoclen);
1895 static int config_gcm_context(struct aead_request *req)
1897 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1898 struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
1899 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1901 unsigned int cryptlen = (req_ctx->gen_ctx.op_type ==
1902 DRV_CRYPTO_DIRECTION_ENCRYPT) ?
1904 (req->cryptlen - ctx->authsize);
1905 __be32 counter = cpu_to_be32(2);
1907 SSI_LOG_DEBUG("%s() cryptlen = %d, req->assoclen = %d ctx->authsize = %d\n", __func__, cryptlen, req->assoclen, ctx->authsize);
1909 memset(req_ctx->hkey, 0, AES_BLOCK_SIZE);
1911 memset(req_ctx->mac_buf, 0, AES_BLOCK_SIZE);
1913 memcpy(req->iv + 12, &counter, 4);
1914 memcpy(req_ctx->gcm_iv_inc2, req->iv, 16);
1916 counter = cpu_to_be32(1);
1917 memcpy(req->iv + 12, &counter, 4);
1918 memcpy(req_ctx->gcm_iv_inc1, req->iv, 16);
1920 if (!req_ctx->plaintext_authenticate_only) {
1923 temp64 = cpu_to_be64(req->assoclen * 8);
1924 memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
1925 temp64 = cpu_to_be64(cryptlen * 8);
1926 memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
1927 } else { //rfc4543=> all data(AAD,IV,Plain) are considered additional data that is nothing is encrypted.
1930 temp64 = cpu_to_be64((req->assoclen + GCM_BLOCK_RFC4_IV_SIZE + cryptlen) * 8);
1931 memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
1933 memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
1939 static void ssi_rfc4_gcm_process(struct aead_request *req)
1941 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1942 struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
1943 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1945 memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_NONCE_OFFSET, ctx->ctr_nonce, GCM_BLOCK_RFC4_NONCE_SIZE);
1946 memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_IV_OFFSET, req->iv, GCM_BLOCK_RFC4_IV_SIZE);
1947 req->iv = areq_ctx->ctr_iv;
1948 req->assoclen -= GCM_BLOCK_RFC4_IV_SIZE;
1951 #endif /*SSI_CC_HAS_AES_GCM*/
1953 static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction direct)
1957 struct cc_hw_desc desc[MAX_AEAD_PROCESS_SEQ];
1958 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1959 struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
1960 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1961 struct device *dev = &ctx->drvdata->plat_dev->dev;
1962 struct ssi_crypto_req ssi_req = {};
1964 SSI_LOG_DEBUG("%s context=%p req=%p iv=%p src=%p src_ofs=%d dst=%p dst_ofs=%d cryptolen=%d\n",
1965 ((direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? "Encrypt" : "Decrypt"),
1966 ctx, req, req->iv, sg_virt(req->src), req->src->offset,
1967 sg_virt(req->dst), req->dst->offset, req->cryptlen);
1969 /* STAT_PHASE_0: Init and sanity checks */
1971 /* Check data length according to mode */
1972 if (unlikely(validate_data_size(ctx, direct, req) != 0)) {
1973 SSI_LOG_ERR("Unsupported crypt/assoc len %d/%d.\n",
1974 req->cryptlen, req->assoclen);
1975 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
1979 /* Setup DX request structure */
1980 ssi_req.user_cb = (void *)ssi_aead_complete;
1981 ssi_req.user_arg = (void *)req;
1983 /* Setup request context */
1984 areq_ctx->gen_ctx.op_type = direct;
1985 areq_ctx->req_authsize = ctx->authsize;
1986 areq_ctx->cipher_mode = ctx->cipher_mode;
1988 /* STAT_PHASE_1: Map buffers */
1990 if (ctx->cipher_mode == DRV_CIPHER_CTR) {
1991 /* Build CTR IV - Copy nonce from last 4 bytes in
1992 * CTR key to first 4 bytes in CTR IV
1994 memcpy(areq_ctx->ctr_iv, ctx->ctr_nonce, CTR_RFC3686_NONCE_SIZE);
1995 if (!areq_ctx->backup_giv) /*User none-generated IV*/
1996 memcpy(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE,
1997 req->iv, CTR_RFC3686_IV_SIZE);
1998 /* Initialize counter portion of counter block */
1999 *(__be32 *)(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE +
2000 CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
2002 /* Replace with counter iv */
2003 req->iv = areq_ctx->ctr_iv;
2004 areq_ctx->hw_iv_size = CTR_RFC3686_BLOCK_SIZE;
2005 } else if ((ctx->cipher_mode == DRV_CIPHER_CCM) ||
2006 (ctx->cipher_mode == DRV_CIPHER_GCTR)) {
2007 areq_ctx->hw_iv_size = AES_BLOCK_SIZE;
2008 if (areq_ctx->ctr_iv != req->iv) {
2009 memcpy(areq_ctx->ctr_iv, req->iv, crypto_aead_ivsize(tfm));
2010 req->iv = areq_ctx->ctr_iv;
2013 areq_ctx->hw_iv_size = crypto_aead_ivsize(tfm);
2016 #if SSI_CC_HAS_AES_CCM
2017 if (ctx->cipher_mode == DRV_CIPHER_CCM) {
2018 rc = config_ccm_adata(req);
2019 if (unlikely(rc != 0)) {
2020 SSI_LOG_ERR("config_ccm_adata() returned with a failure %d!", rc);
2024 areq_ctx->ccm_hdr_size = ccm_header_size_null;
2027 areq_ctx->ccm_hdr_size = ccm_header_size_null;
2028 #endif /*SSI_CC_HAS_AES_CCM*/
2030 #if SSI_CC_HAS_AES_GCM
2031 if (ctx->cipher_mode == DRV_CIPHER_GCTR) {
2032 rc = config_gcm_context(req);
2033 if (unlikely(rc != 0)) {
2034 SSI_LOG_ERR("config_gcm_context() returned with a failure %d!", rc);
2038 #endif /*SSI_CC_HAS_AES_GCM*/
2040 rc = ssi_buffer_mgr_map_aead_request(ctx->drvdata, req);
2041 if (unlikely(rc != 0)) {
2042 SSI_LOG_ERR("map_request() failed\n");
2046 /* do we need to generate IV? */
2047 if (areq_ctx->backup_giv) {
2048 /* set the DMA mapped IV address*/
2049 if (ctx->cipher_mode == DRV_CIPHER_CTR) {
2050 ssi_req.ivgen_dma_addr[0] = areq_ctx->gen_ctx.iv_dma_addr + CTR_RFC3686_NONCE_SIZE;
2051 ssi_req.ivgen_dma_addr_len = 1;
2052 } else if (ctx->cipher_mode == DRV_CIPHER_CCM) {
2053 /* In ccm, the IV needs to exist both inside B0 and inside the counter.
2054 * It is also copied to iv_dma_addr for other reasons (like returning
2056 * So, using 3 (identical) IV outputs.
2058 ssi_req.ivgen_dma_addr[0] = areq_ctx->gen_ctx.iv_dma_addr + CCM_BLOCK_IV_OFFSET;
2059 ssi_req.ivgen_dma_addr[1] = sg_dma_address(&areq_ctx->ccm_adata_sg) + CCM_B0_OFFSET + CCM_BLOCK_IV_OFFSET;
2060 ssi_req.ivgen_dma_addr[2] = sg_dma_address(&areq_ctx->ccm_adata_sg) + CCM_CTR_COUNT_0_OFFSET + CCM_BLOCK_IV_OFFSET;
2061 ssi_req.ivgen_dma_addr_len = 3;
2063 ssi_req.ivgen_dma_addr[0] = areq_ctx->gen_ctx.iv_dma_addr;
2064 ssi_req.ivgen_dma_addr_len = 1;
2067 /* set the IV size (8/16 B long)*/
2068 ssi_req.ivgen_size = crypto_aead_ivsize(tfm);
2071 /* STAT_PHASE_2: Create sequence */
2073 /* Load MLLI tables to SRAM if necessary */
2074 ssi_aead_load_mlli_to_sram(req, desc, &seq_len);
2076 /*TODO: move seq len by reference */
2077 switch (ctx->auth_mode) {
2079 case DRV_HASH_SHA256:
2080 ssi_aead_hmac_authenc(req, desc, &seq_len);
2082 case DRV_HASH_XCBC_MAC:
2083 ssi_aead_xcbc_authenc(req, desc, &seq_len);
2085 #if (SSI_CC_HAS_AES_CCM || SSI_CC_HAS_AES_GCM)
2087 #if SSI_CC_HAS_AES_CCM
2088 if (ctx->cipher_mode == DRV_CIPHER_CCM)
2089 ssi_aead_ccm(req, desc, &seq_len);
2090 #endif /*SSI_CC_HAS_AES_CCM*/
2091 #if SSI_CC_HAS_AES_GCM
2092 if (ctx->cipher_mode == DRV_CIPHER_GCTR)
2093 ssi_aead_gcm(req, desc, &seq_len);
2094 #endif /*SSI_CC_HAS_AES_GCM*/
2098 SSI_LOG_ERR("Unsupported authenc (%d)\n", ctx->auth_mode);
2099 ssi_buffer_mgr_unmap_aead_request(dev, req);
2104 /* STAT_PHASE_3: Lock HW and push sequence */
2106 rc = send_request(ctx->drvdata, &ssi_req, desc, seq_len, 1);
2108 if (unlikely(rc != -EINPROGRESS)) {
2109 SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
2110 ssi_buffer_mgr_unmap_aead_request(dev, req);
2117 static int ssi_aead_encrypt(struct aead_request *req)
2119 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2122 /* No generated IV required */
2123 areq_ctx->backup_iv = req->iv;
2124 areq_ctx->backup_giv = NULL;
2125 areq_ctx->is_gcm4543 = false;
2127 areq_ctx->plaintext_authenticate_only = false;
2129 rc = ssi_aead_process(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2130 if (rc != -EINPROGRESS)
2131 req->iv = areq_ctx->backup_iv;
2136 #if SSI_CC_HAS_AES_CCM
2137 static int ssi_rfc4309_ccm_encrypt(struct aead_request *req)
2139 /* Very similar to ssi_aead_encrypt() above. */
2141 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2144 if (!valid_assoclen(req)) {
2145 SSI_LOG_ERR("invalid Assoclen:%u\n", req->assoclen);
2149 /* No generated IV required */
2150 areq_ctx->backup_iv = req->iv;
2151 areq_ctx->backup_giv = NULL;
2152 areq_ctx->is_gcm4543 = true;
2154 ssi_rfc4309_ccm_process(req);
2156 rc = ssi_aead_process(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2157 if (rc != -EINPROGRESS)
2158 req->iv = areq_ctx->backup_iv;
2162 #endif /* SSI_CC_HAS_AES_CCM */
2164 static int ssi_aead_decrypt(struct aead_request *req)
2166 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2169 /* No generated IV required */
2170 areq_ctx->backup_iv = req->iv;
2171 areq_ctx->backup_giv = NULL;
2172 areq_ctx->is_gcm4543 = false;
2174 areq_ctx->plaintext_authenticate_only = false;
2176 rc = ssi_aead_process(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2177 if (rc != -EINPROGRESS)
2178 req->iv = areq_ctx->backup_iv;
2183 #if SSI_CC_HAS_AES_CCM
2184 static int ssi_rfc4309_ccm_decrypt(struct aead_request *req)
2186 /* Very similar to ssi_aead_decrypt() above. */
2188 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2191 if (!valid_assoclen(req)) {
2192 SSI_LOG_ERR("invalid Assoclen:%u\n", req->assoclen);
2196 /* No generated IV required */
2197 areq_ctx->backup_iv = req->iv;
2198 areq_ctx->backup_giv = NULL;
2200 areq_ctx->is_gcm4543 = true;
2201 ssi_rfc4309_ccm_process(req);
2203 rc = ssi_aead_process(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2204 if (rc != -EINPROGRESS)
2205 req->iv = areq_ctx->backup_iv;
2210 #endif /* SSI_CC_HAS_AES_CCM */
2212 #if SSI_CC_HAS_AES_GCM
2214 static int ssi_rfc4106_gcm_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
2216 struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
2219 SSI_LOG_DEBUG("%s() keylen %d, key %p\n", __func__, keylen, key);
2225 memcpy(ctx->ctr_nonce, key + keylen, 4);
2227 rc = ssi_aead_setkey(tfm, key, keylen);
2232 static int ssi_rfc4543_gcm_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
2234 struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
2237 SSI_LOG_DEBUG("%s() keylen %d, key %p\n", __func__, keylen, key);
2243 memcpy(ctx->ctr_nonce, key + keylen, 4);
2245 rc = ssi_aead_setkey(tfm, key, keylen);
2250 static int ssi_gcm_setauthsize(struct crypto_aead *authenc,
2251 unsigned int authsize)
2266 return ssi_aead_setauthsize(authenc, authsize);
2269 static int ssi_rfc4106_gcm_setauthsize(struct crypto_aead *authenc,
2270 unsigned int authsize)
2272 SSI_LOG_DEBUG("authsize %d\n", authsize);
2283 return ssi_aead_setauthsize(authenc, authsize);
2286 static int ssi_rfc4543_gcm_setauthsize(struct crypto_aead *authenc,
2287 unsigned int authsize)
2289 SSI_LOG_DEBUG("authsize %d\n", authsize);
2294 return ssi_aead_setauthsize(authenc, authsize);
2297 static int ssi_rfc4106_gcm_encrypt(struct aead_request *req)
2299 /* Very similar to ssi_aead_encrypt() above. */
2301 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2304 if (!valid_assoclen(req)) {
2305 SSI_LOG_ERR("invalid Assoclen:%u\n", req->assoclen);
2309 /* No generated IV required */
2310 areq_ctx->backup_iv = req->iv;
2311 areq_ctx->backup_giv = NULL;
2313 areq_ctx->plaintext_authenticate_only = false;
2315 ssi_rfc4_gcm_process(req);
2316 areq_ctx->is_gcm4543 = true;
2318 rc = ssi_aead_process(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2319 if (rc != -EINPROGRESS)
2320 req->iv = areq_ctx->backup_iv;
2325 static int ssi_rfc4543_gcm_encrypt(struct aead_request *req)
2327 /* Very similar to ssi_aead_encrypt() above. */
2329 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2332 //plaintext is not encryped with rfc4543
2333 areq_ctx->plaintext_authenticate_only = true;
2335 /* No generated IV required */
2336 areq_ctx->backup_iv = req->iv;
2337 areq_ctx->backup_giv = NULL;
2339 ssi_rfc4_gcm_process(req);
2340 areq_ctx->is_gcm4543 = true;
2342 rc = ssi_aead_process(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2343 if (rc != -EINPROGRESS)
2344 req->iv = areq_ctx->backup_iv;
2349 static int ssi_rfc4106_gcm_decrypt(struct aead_request *req)
2351 /* Very similar to ssi_aead_decrypt() above. */
2353 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2356 if (!valid_assoclen(req)) {
2357 SSI_LOG_ERR("invalid Assoclen:%u\n", req->assoclen);
2361 /* No generated IV required */
2362 areq_ctx->backup_iv = req->iv;
2363 areq_ctx->backup_giv = NULL;
2365 areq_ctx->plaintext_authenticate_only = false;
2367 ssi_rfc4_gcm_process(req);
2368 areq_ctx->is_gcm4543 = true;
2370 rc = ssi_aead_process(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2371 if (rc != -EINPROGRESS)
2372 req->iv = areq_ctx->backup_iv;
2377 static int ssi_rfc4543_gcm_decrypt(struct aead_request *req)
2379 /* Very similar to ssi_aead_decrypt() above. */
2381 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2384 //plaintext is not decryped with rfc4543
2385 areq_ctx->plaintext_authenticate_only = true;
2387 /* No generated IV required */
2388 areq_ctx->backup_iv = req->iv;
2389 areq_ctx->backup_giv = NULL;
2391 ssi_rfc4_gcm_process(req);
2392 areq_ctx->is_gcm4543 = true;
2394 rc = ssi_aead_process(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2395 if (rc != -EINPROGRESS)
2396 req->iv = areq_ctx->backup_iv;
2400 #endif /* SSI_CC_HAS_AES_GCM */
2402 /* DX Block aead alg */
2403 static struct ssi_alg_template aead_algs[] = {
2405 .name = "authenc(hmac(sha1),cbc(aes))",
2406 .driver_name = "authenc-hmac-sha1-cbc-aes-dx",
2407 .blocksize = AES_BLOCK_SIZE,
2408 .type = CRYPTO_ALG_TYPE_AEAD,
2410 .setkey = ssi_aead_setkey,
2411 .setauthsize = ssi_aead_setauthsize,
2412 .encrypt = ssi_aead_encrypt,
2413 .decrypt = ssi_aead_decrypt,
2414 .init = ssi_aead_init,
2415 .exit = ssi_aead_exit,
2416 .ivsize = AES_BLOCK_SIZE,
2417 .maxauthsize = SHA1_DIGEST_SIZE,
2419 .cipher_mode = DRV_CIPHER_CBC,
2420 .flow_mode = S_DIN_to_AES,
2421 .auth_mode = DRV_HASH_SHA1,
2424 .name = "authenc(hmac(sha1),cbc(des3_ede))",
2425 .driver_name = "authenc-hmac-sha1-cbc-des3-dx",
2426 .blocksize = DES3_EDE_BLOCK_SIZE,
2427 .type = CRYPTO_ALG_TYPE_AEAD,
2429 .setkey = ssi_aead_setkey,
2430 .setauthsize = ssi_aead_setauthsize,
2431 .encrypt = ssi_aead_encrypt,
2432 .decrypt = ssi_aead_decrypt,
2433 .init = ssi_aead_init,
2434 .exit = ssi_aead_exit,
2435 .ivsize = DES3_EDE_BLOCK_SIZE,
2436 .maxauthsize = SHA1_DIGEST_SIZE,
2438 .cipher_mode = DRV_CIPHER_CBC,
2439 .flow_mode = S_DIN_to_DES,
2440 .auth_mode = DRV_HASH_SHA1,
2443 .name = "authenc(hmac(sha256),cbc(aes))",
2444 .driver_name = "authenc-hmac-sha256-cbc-aes-dx",
2445 .blocksize = AES_BLOCK_SIZE,
2446 .type = CRYPTO_ALG_TYPE_AEAD,
2448 .setkey = ssi_aead_setkey,
2449 .setauthsize = ssi_aead_setauthsize,
2450 .encrypt = ssi_aead_encrypt,
2451 .decrypt = ssi_aead_decrypt,
2452 .init = ssi_aead_init,
2453 .exit = ssi_aead_exit,
2454 .ivsize = AES_BLOCK_SIZE,
2455 .maxauthsize = SHA256_DIGEST_SIZE,
2457 .cipher_mode = DRV_CIPHER_CBC,
2458 .flow_mode = S_DIN_to_AES,
2459 .auth_mode = DRV_HASH_SHA256,
2462 .name = "authenc(hmac(sha256),cbc(des3_ede))",
2463 .driver_name = "authenc-hmac-sha256-cbc-des3-dx",
2464 .blocksize = DES3_EDE_BLOCK_SIZE,
2465 .type = CRYPTO_ALG_TYPE_AEAD,
2467 .setkey = ssi_aead_setkey,
2468 .setauthsize = ssi_aead_setauthsize,
2469 .encrypt = ssi_aead_encrypt,
2470 .decrypt = ssi_aead_decrypt,
2471 .init = ssi_aead_init,
2472 .exit = ssi_aead_exit,
2473 .ivsize = DES3_EDE_BLOCK_SIZE,
2474 .maxauthsize = SHA256_DIGEST_SIZE,
2476 .cipher_mode = DRV_CIPHER_CBC,
2477 .flow_mode = S_DIN_to_DES,
2478 .auth_mode = DRV_HASH_SHA256,
2481 .name = "authenc(xcbc(aes),cbc(aes))",
2482 .driver_name = "authenc-xcbc-aes-cbc-aes-dx",
2483 .blocksize = AES_BLOCK_SIZE,
2484 .type = CRYPTO_ALG_TYPE_AEAD,
2486 .setkey = ssi_aead_setkey,
2487 .setauthsize = ssi_aead_setauthsize,
2488 .encrypt = ssi_aead_encrypt,
2489 .decrypt = ssi_aead_decrypt,
2490 .init = ssi_aead_init,
2491 .exit = ssi_aead_exit,
2492 .ivsize = AES_BLOCK_SIZE,
2493 .maxauthsize = AES_BLOCK_SIZE,
2495 .cipher_mode = DRV_CIPHER_CBC,
2496 .flow_mode = S_DIN_to_AES,
2497 .auth_mode = DRV_HASH_XCBC_MAC,
2500 .name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
2501 .driver_name = "authenc-hmac-sha1-rfc3686-ctr-aes-dx",
2503 .type = CRYPTO_ALG_TYPE_AEAD,
2505 .setkey = ssi_aead_setkey,
2506 .setauthsize = ssi_aead_setauthsize,
2507 .encrypt = ssi_aead_encrypt,
2508 .decrypt = ssi_aead_decrypt,
2509 .init = ssi_aead_init,
2510 .exit = ssi_aead_exit,
2511 .ivsize = CTR_RFC3686_IV_SIZE,
2512 .maxauthsize = SHA1_DIGEST_SIZE,
2514 .cipher_mode = DRV_CIPHER_CTR,
2515 .flow_mode = S_DIN_to_AES,
2516 .auth_mode = DRV_HASH_SHA1,
2519 .name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
2520 .driver_name = "authenc-hmac-sha256-rfc3686-ctr-aes-dx",
2522 .type = CRYPTO_ALG_TYPE_AEAD,
2524 .setkey = ssi_aead_setkey,
2525 .setauthsize = ssi_aead_setauthsize,
2526 .encrypt = ssi_aead_encrypt,
2527 .decrypt = ssi_aead_decrypt,
2528 .init = ssi_aead_init,
2529 .exit = ssi_aead_exit,
2530 .ivsize = CTR_RFC3686_IV_SIZE,
2531 .maxauthsize = SHA256_DIGEST_SIZE,
2533 .cipher_mode = DRV_CIPHER_CTR,
2534 .flow_mode = S_DIN_to_AES,
2535 .auth_mode = DRV_HASH_SHA256,
2538 .name = "authenc(xcbc(aes),rfc3686(ctr(aes)))",
2539 .driver_name = "authenc-xcbc-aes-rfc3686-ctr-aes-dx",
2541 .type = CRYPTO_ALG_TYPE_AEAD,
2543 .setkey = ssi_aead_setkey,
2544 .setauthsize = ssi_aead_setauthsize,
2545 .encrypt = ssi_aead_encrypt,
2546 .decrypt = ssi_aead_decrypt,
2547 .init = ssi_aead_init,
2548 .exit = ssi_aead_exit,
2549 .ivsize = CTR_RFC3686_IV_SIZE,
2550 .maxauthsize = AES_BLOCK_SIZE,
2552 .cipher_mode = DRV_CIPHER_CTR,
2553 .flow_mode = S_DIN_to_AES,
2554 .auth_mode = DRV_HASH_XCBC_MAC,
2556 #if SSI_CC_HAS_AES_CCM
2559 .driver_name = "ccm-aes-dx",
2561 .type = CRYPTO_ALG_TYPE_AEAD,
2563 .setkey = ssi_aead_setkey,
2564 .setauthsize = ssi_ccm_setauthsize,
2565 .encrypt = ssi_aead_encrypt,
2566 .decrypt = ssi_aead_decrypt,
2567 .init = ssi_aead_init,
2568 .exit = ssi_aead_exit,
2569 .ivsize = AES_BLOCK_SIZE,
2570 .maxauthsize = AES_BLOCK_SIZE,
2572 .cipher_mode = DRV_CIPHER_CCM,
2573 .flow_mode = S_DIN_to_AES,
2574 .auth_mode = DRV_HASH_NULL,
2577 .name = "rfc4309(ccm(aes))",
2578 .driver_name = "rfc4309-ccm-aes-dx",
2580 .type = CRYPTO_ALG_TYPE_AEAD,
2582 .setkey = ssi_rfc4309_ccm_setkey,
2583 .setauthsize = ssi_rfc4309_ccm_setauthsize,
2584 .encrypt = ssi_rfc4309_ccm_encrypt,
2585 .decrypt = ssi_rfc4309_ccm_decrypt,
2586 .init = ssi_aead_init,
2587 .exit = ssi_aead_exit,
2588 .ivsize = CCM_BLOCK_IV_SIZE,
2589 .maxauthsize = AES_BLOCK_SIZE,
2591 .cipher_mode = DRV_CIPHER_CCM,
2592 .flow_mode = S_DIN_to_AES,
2593 .auth_mode = DRV_HASH_NULL,
2595 #endif /*SSI_CC_HAS_AES_CCM*/
2596 #if SSI_CC_HAS_AES_GCM
2599 .driver_name = "gcm-aes-dx",
2601 .type = CRYPTO_ALG_TYPE_AEAD,
2603 .setkey = ssi_aead_setkey,
2604 .setauthsize = ssi_gcm_setauthsize,
2605 .encrypt = ssi_aead_encrypt,
2606 .decrypt = ssi_aead_decrypt,
2607 .init = ssi_aead_init,
2608 .exit = ssi_aead_exit,
2610 .maxauthsize = AES_BLOCK_SIZE,
2612 .cipher_mode = DRV_CIPHER_GCTR,
2613 .flow_mode = S_DIN_to_AES,
2614 .auth_mode = DRV_HASH_NULL,
2617 .name = "rfc4106(gcm(aes))",
2618 .driver_name = "rfc4106-gcm-aes-dx",
2620 .type = CRYPTO_ALG_TYPE_AEAD,
2622 .setkey = ssi_rfc4106_gcm_setkey,
2623 .setauthsize = ssi_rfc4106_gcm_setauthsize,
2624 .encrypt = ssi_rfc4106_gcm_encrypt,
2625 .decrypt = ssi_rfc4106_gcm_decrypt,
2626 .init = ssi_aead_init,
2627 .exit = ssi_aead_exit,
2628 .ivsize = GCM_BLOCK_RFC4_IV_SIZE,
2629 .maxauthsize = AES_BLOCK_SIZE,
2631 .cipher_mode = DRV_CIPHER_GCTR,
2632 .flow_mode = S_DIN_to_AES,
2633 .auth_mode = DRV_HASH_NULL,
2636 .name = "rfc4543(gcm(aes))",
2637 .driver_name = "rfc4543-gcm-aes-dx",
2639 .type = CRYPTO_ALG_TYPE_AEAD,
2641 .setkey = ssi_rfc4543_gcm_setkey,
2642 .setauthsize = ssi_rfc4543_gcm_setauthsize,
2643 .encrypt = ssi_rfc4543_gcm_encrypt,
2644 .decrypt = ssi_rfc4543_gcm_decrypt,
2645 .init = ssi_aead_init,
2646 .exit = ssi_aead_exit,
2647 .ivsize = GCM_BLOCK_RFC4_IV_SIZE,
2648 .maxauthsize = AES_BLOCK_SIZE,
2650 .cipher_mode = DRV_CIPHER_GCTR,
2651 .flow_mode = S_DIN_to_AES,
2652 .auth_mode = DRV_HASH_NULL,
2654 #endif /*SSI_CC_HAS_AES_GCM*/
2657 static struct ssi_crypto_alg *ssi_aead_create_alg(struct ssi_alg_template *template)
2659 struct ssi_crypto_alg *t_alg;
2660 struct aead_alg *alg;
2662 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
2664 SSI_LOG_ERR("failed to allocate t_alg\n");
2665 return ERR_PTR(-ENOMEM);
2667 alg = &template->template_aead;
2669 snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
2670 snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
2671 template->driver_name);
2672 alg->base.cra_module = THIS_MODULE;
2673 alg->base.cra_priority = SSI_CRA_PRIO;
2675 alg->base.cra_ctxsize = sizeof(struct ssi_aead_ctx);
2676 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
2678 alg->init = ssi_aead_init;
2679 alg->exit = ssi_aead_exit;
2681 t_alg->aead_alg = *alg;
2683 t_alg->cipher_mode = template->cipher_mode;
2684 t_alg->flow_mode = template->flow_mode;
2685 t_alg->auth_mode = template->auth_mode;
2690 int ssi_aead_free(struct ssi_drvdata *drvdata)
2692 struct ssi_crypto_alg *t_alg, *n;
2693 struct ssi_aead_handle *aead_handle =
2694 (struct ssi_aead_handle *)drvdata->aead_handle;
2697 /* Remove registered algs */
2698 list_for_each_entry_safe(t_alg, n, &aead_handle->aead_list, entry) {
2699 crypto_unregister_aead(&t_alg->aead_alg);
2700 list_del(&t_alg->entry);
2704 drvdata->aead_handle = NULL;
2710 int ssi_aead_alloc(struct ssi_drvdata *drvdata)
2712 struct ssi_aead_handle *aead_handle;
2713 struct ssi_crypto_alg *t_alg;
2717 aead_handle = kmalloc(sizeof(*aead_handle), GFP_KERNEL);
2723 drvdata->aead_handle = aead_handle;
2725 aead_handle->sram_workspace_addr = ssi_sram_mgr_alloc(
2726 drvdata, MAX_HMAC_DIGEST_SIZE);
2727 if (aead_handle->sram_workspace_addr == NULL_SRAM_ADDR) {
2728 SSI_LOG_ERR("SRAM pool exhausted\n");
2733 INIT_LIST_HEAD(&aead_handle->aead_list);
2736 for (alg = 0; alg < ARRAY_SIZE(aead_algs); alg++) {
2737 t_alg = ssi_aead_create_alg(&aead_algs[alg]);
2738 if (IS_ERR(t_alg)) {
2739 rc = PTR_ERR(t_alg);
2740 SSI_LOG_ERR("%s alg allocation failed\n",
2741 aead_algs[alg].driver_name);
2744 t_alg->drvdata = drvdata;
2745 rc = crypto_register_aead(&t_alg->aead_alg);
2746 if (unlikely(rc != 0)) {
2747 SSI_LOG_ERR("%s alg registration failed\n",
2748 t_alg->aead_alg.base.cra_driver_name);
2751 list_add_tail(&t_alg->entry, &aead_handle->aead_list);
2752 SSI_LOG_DEBUG("Registered %s\n", t_alg->aead_alg.base.cra_driver_name);
2761 ssi_aead_free(drvdata);