1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTX CPT driver
4 * Copyright (C) 2019 Marvell International Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #include <crypto/aes.h>
12 #include <crypto/authenc.h>
13 #include <crypto/cryptd.h>
14 #include <crypto/des.h>
15 #include <crypto/internal/aead.h>
16 #include <crypto/sha1.h>
17 #include <crypto/sha2.h>
18 #include <crypto/xts.h>
19 #include <crypto/scatterwalk.h>
20 #include <linux/rtnetlink.h>
21 #include <linux/sort.h>
22 #include <linux/module.h>
23 #include "otx_cptvf.h"
24 #include "otx_cptvf_algs.h"
25 #include "otx_cptvf_reqmgr.h"
27 #define CPT_MAX_VF_NUM 64
28 /* Size of salt in AES GCM mode */
29 #define AES_GCM_SALT_SIZE 4
30 /* Size of IV in AES GCM mode */
31 #define AES_GCM_IV_SIZE 8
32 /* Size of ICV (Integrity Check Value) in AES GCM mode */
33 #define AES_GCM_ICV_SIZE 16
34 /* Offset of IV in AES GCM mode */
35 #define AES_GCM_IV_OFFSET 8
36 #define CONTROL_WORD_LEN 8
37 #define KEY2_OFFSET 48
38 #define DMA_MODE_FLAG(dma_mode) \
39 (((dma_mode) == OTX_CPT_DMA_GATHER_SCATTER) ? (1 << 7) : 0)
41 /* Truncated SHA digest size */
42 #define SHA1_TRUNC_DIGEST_SIZE 12
43 #define SHA256_TRUNC_DIGEST_SIZE 16
44 #define SHA384_TRUNC_DIGEST_SIZE 24
45 #define SHA512_TRUNC_DIGEST_SIZE 32
47 static DEFINE_MUTEX(mutex);
48 static int is_crypto_registered;
50 struct cpt_device_desc {
51 enum otx_cptpf_type pf_type;
56 struct cpt_device_table {
58 struct cpt_device_desc desc[CPT_MAX_VF_NUM];
61 static struct cpt_device_table se_devices = {
62 .count = ATOMIC_INIT(0)
65 static struct cpt_device_table ae_devices = {
66 .count = ATOMIC_INIT(0)
69 static inline int get_se_device(struct pci_dev **pdev, int *cpu_num)
73 count = atomic_read(&se_devices.count);
79 if (se_devices.desc[0].pf_type == OTX_CPT_SE) {
81 * On OcteonTX platform there is one CPT instruction queue bound
82 * to each VF. We get maximum performance if one CPT queue
83 * is available for each cpu otherwise CPT queues need to be
84 * shared between cpus.
86 if (*cpu_num >= count)
88 *pdev = se_devices.desc[*cpu_num].dev;
90 pr_err("Unknown PF type %d\n", se_devices.desc[0].pf_type);
98 static inline int validate_hmac_cipher_null(struct otx_cpt_req_info *cpt_req)
100 struct otx_cpt_req_ctx *rctx;
101 struct aead_request *req;
102 struct crypto_aead *tfm;
104 req = container_of(cpt_req->areq, struct aead_request, base);
105 tfm = crypto_aead_reqtfm(req);
106 rctx = aead_request_ctx_dma(req);
107 if (memcmp(rctx->fctx.hmac.s.hmac_calc,
108 rctx->fctx.hmac.s.hmac_recv,
109 crypto_aead_authsize(tfm)) != 0)
115 static void otx_cpt_aead_callback(int status, void *arg1, void *arg2)
117 struct otx_cpt_info_buffer *cpt_info = arg2;
118 struct crypto_async_request *areq = arg1;
119 struct otx_cpt_req_info *cpt_req;
120 struct pci_dev *pdev;
125 cpt_req = cpt_info->req;
128 * When selected cipher is NULL we need to manually
129 * verify whether calculated hmac value matches
130 * received hmac value
132 if (cpt_req->req_type == OTX_CPT_AEAD_ENC_DEC_NULL_REQ &&
134 status = validate_hmac_cipher_null(cpt_req);
136 pdev = cpt_info->pdev;
137 do_request_cleanup(pdev, cpt_info);
141 crypto_request_complete(areq, status);
144 static void output_iv_copyback(struct crypto_async_request *areq)
146 struct otx_cpt_req_info *req_info;
147 struct skcipher_request *sreq;
148 struct crypto_skcipher *stfm;
149 struct otx_cpt_req_ctx *rctx;
150 struct otx_cpt_enc_ctx *ctx;
153 sreq = container_of(areq, struct skcipher_request, base);
154 stfm = crypto_skcipher_reqtfm(sreq);
155 ctx = crypto_skcipher_ctx(stfm);
156 if (ctx->cipher_type == OTX_CPT_AES_CBC ||
157 ctx->cipher_type == OTX_CPT_DES3_CBC) {
158 rctx = skcipher_request_ctx_dma(sreq);
159 req_info = &rctx->cpt_req;
160 ivsize = crypto_skcipher_ivsize(stfm);
161 start = sreq->cryptlen - ivsize;
163 if (req_info->is_enc) {
164 scatterwalk_map_and_copy(sreq->iv, sreq->dst, start,
167 if (sreq->src != sreq->dst) {
168 scatterwalk_map_and_copy(sreq->iv, sreq->src,
171 memcpy(sreq->iv, req_info->iv_out, ivsize);
172 kfree(req_info->iv_out);
178 static void otx_cpt_skcipher_callback(int status, void *arg1, void *arg2)
180 struct otx_cpt_info_buffer *cpt_info = arg2;
181 struct crypto_async_request *areq = arg1;
182 struct pci_dev *pdev;
186 output_iv_copyback(areq);
188 pdev = cpt_info->pdev;
189 do_request_cleanup(pdev, cpt_info);
191 crypto_request_complete(areq, status);
195 static inline void update_input_data(struct otx_cpt_req_info *req_info,
196 struct scatterlist *inp_sg,
197 u32 nbytes, u32 *argcnt)
199 req_info->req.dlen += nbytes;
202 u32 len = min(nbytes, inp_sg->length);
203 u8 *ptr = sg_virt(inp_sg);
205 req_info->in[*argcnt].vptr = (void *)ptr;
206 req_info->in[*argcnt].size = len;
209 inp_sg = sg_next(inp_sg);
213 static inline void update_output_data(struct otx_cpt_req_info *req_info,
214 struct scatterlist *outp_sg,
215 u32 offset, u32 nbytes, u32 *argcnt)
217 req_info->rlen += nbytes;
220 u32 len = min(nbytes, outp_sg->length - offset);
221 u8 *ptr = sg_virt(outp_sg);
223 req_info->out[*argcnt].vptr = (void *) (ptr + offset);
224 req_info->out[*argcnt].size = len;
228 outp_sg = sg_next(outp_sg);
232 static inline u32 create_ctx_hdr(struct skcipher_request *req, u32 enc,
235 struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
236 struct otx_cpt_req_ctx *rctx = skcipher_request_ctx_dma(req);
237 struct otx_cpt_req_info *req_info = &rctx->cpt_req;
238 struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
239 struct otx_cpt_enc_ctx *ctx = crypto_tfm_ctx(tfm);
240 struct otx_cpt_fc_ctx *fctx = &rctx->fctx;
241 int ivsize = crypto_skcipher_ivsize(stfm);
242 u32 start = req->cryptlen - ivsize;
245 flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
246 GFP_KERNEL : GFP_ATOMIC;
247 req_info->ctrl.s.dma_mode = OTX_CPT_DMA_GATHER_SCATTER;
248 req_info->ctrl.s.se_req = OTX_CPT_SE_CORE_REQ;
250 req_info->req.opcode.s.major = OTX_CPT_MAJOR_OP_FC |
251 DMA_MODE_FLAG(OTX_CPT_DMA_GATHER_SCATTER);
253 req_info->req.opcode.s.minor = 2;
255 req_info->req.opcode.s.minor = 3;
256 if ((ctx->cipher_type == OTX_CPT_AES_CBC ||
257 ctx->cipher_type == OTX_CPT_DES3_CBC) &&
258 req->src == req->dst) {
259 req_info->iv_out = kmalloc(ivsize, flags);
260 if (!req_info->iv_out)
263 scatterwalk_map_and_copy(req_info->iv_out, req->src,
267 /* Encryption data length */
268 req_info->req.param1 = req->cryptlen;
269 /* Authentication data length */
270 req_info->req.param2 = 0;
272 fctx->enc.enc_ctrl.e.enc_cipher = ctx->cipher_type;
273 fctx->enc.enc_ctrl.e.aes_key = ctx->key_type;
274 fctx->enc.enc_ctrl.e.iv_source = OTX_CPT_FROM_CPTR;
276 if (ctx->cipher_type == OTX_CPT_AES_XTS)
277 memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len * 2);
279 memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len);
281 memcpy(fctx->enc.encr_iv, req->iv, crypto_skcipher_ivsize(stfm));
283 fctx->enc.enc_ctrl.flags = cpu_to_be64(fctx->enc.enc_ctrl.cflags);
286 * Storing Packet Data Information in offset
287 * Control Word First 8 bytes
289 req_info->in[*argcnt].vptr = (u8 *)&rctx->ctrl_word;
290 req_info->in[*argcnt].size = CONTROL_WORD_LEN;
291 req_info->req.dlen += CONTROL_WORD_LEN;
294 req_info->in[*argcnt].vptr = (u8 *)fctx;
295 req_info->in[*argcnt].size = sizeof(struct otx_cpt_fc_ctx);
296 req_info->req.dlen += sizeof(struct otx_cpt_fc_ctx);
303 static inline u32 create_input_list(struct skcipher_request *req, u32 enc,
306 struct otx_cpt_req_ctx *rctx = skcipher_request_ctx_dma(req);
307 struct otx_cpt_req_info *req_info = &rctx->cpt_req;
311 ret = create_ctx_hdr(req, enc, &argcnt);
315 update_input_data(req_info, req->src, req->cryptlen, &argcnt);
316 req_info->incnt = argcnt;
321 static inline void create_output_list(struct skcipher_request *req,
324 struct otx_cpt_req_ctx *rctx = skcipher_request_ctx_dma(req);
325 struct otx_cpt_req_info *req_info = &rctx->cpt_req;
329 * OUTPUT Buffer Processing
330 * AES encryption/decryption output would be
331 * received in the following format
333 * ------IV--------|------ENCRYPTED/DECRYPTED DATA-----|
334 * [ 16 Bytes/ [ Request Enc/Dec/ DATA Len AES CBC ]
336 update_output_data(req_info, req->dst, 0, req->cryptlen, &argcnt);
337 req_info->outcnt = argcnt;
340 static inline int cpt_enc_dec(struct skcipher_request *req, u32 enc)
342 struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
343 struct otx_cpt_req_ctx *rctx = skcipher_request_ctx_dma(req);
344 struct otx_cpt_req_info *req_info = &rctx->cpt_req;
345 u32 enc_iv_len = crypto_skcipher_ivsize(stfm);
346 struct pci_dev *pdev;
349 /* Validate that request doesn't exceed maximum CPT supported size */
350 if (req->cryptlen > OTX_CPT_MAX_REQ_SIZE)
353 /* Clear control words */
354 rctx->ctrl_word.flags = 0;
355 rctx->fctx.enc.enc_ctrl.flags = 0;
357 status = create_input_list(req, enc, enc_iv_len);
360 create_output_list(req, enc_iv_len);
362 status = get_se_device(&pdev, &cpu_num);
366 req_info->callback = (void *)otx_cpt_skcipher_callback;
367 req_info->areq = &req->base;
368 req_info->req_type = OTX_CPT_ENC_DEC_REQ;
369 req_info->is_enc = enc;
370 req_info->is_trunc_hmac = false;
371 req_info->ctrl.s.grp = 0;
374 * We perform an asynchronous send and once
375 * the request is completed the driver would
376 * intimate through registered call back functions
378 status = otx_cpt_do_request(pdev, req_info, cpu_num);
383 static int otx_cpt_skcipher_encrypt(struct skcipher_request *req)
385 return cpt_enc_dec(req, true);
388 static int otx_cpt_skcipher_decrypt(struct skcipher_request *req)
390 return cpt_enc_dec(req, false);
393 static int otx_cpt_skcipher_xts_setkey(struct crypto_skcipher *tfm,
394 const u8 *key, u32 keylen)
396 struct otx_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
397 const u8 *key2 = key + (keylen / 2);
398 const u8 *key1 = key;
401 ret = xts_verify_key(tfm, key, keylen);
404 ctx->key_len = keylen;
405 memcpy(ctx->enc_key, key1, keylen / 2);
406 memcpy(ctx->enc_key + KEY2_OFFSET, key2, keylen / 2);
407 ctx->cipher_type = OTX_CPT_AES_XTS;
408 switch (ctx->key_len) {
409 case 2 * AES_KEYSIZE_128:
410 ctx->key_type = OTX_CPT_AES_128_BIT;
412 case 2 * AES_KEYSIZE_256:
413 ctx->key_type = OTX_CPT_AES_256_BIT;
422 static int cpt_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
423 u32 keylen, u8 cipher_type)
425 struct otx_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
427 if (keylen != DES3_EDE_KEY_SIZE)
430 ctx->key_len = keylen;
431 ctx->cipher_type = cipher_type;
433 memcpy(ctx->enc_key, key, keylen);
438 static int cpt_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
439 u32 keylen, u8 cipher_type)
441 struct otx_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
444 case AES_KEYSIZE_128:
445 ctx->key_type = OTX_CPT_AES_128_BIT;
447 case AES_KEYSIZE_192:
448 ctx->key_type = OTX_CPT_AES_192_BIT;
450 case AES_KEYSIZE_256:
451 ctx->key_type = OTX_CPT_AES_256_BIT;
456 ctx->key_len = keylen;
457 ctx->cipher_type = cipher_type;
459 memcpy(ctx->enc_key, key, keylen);
464 static int otx_cpt_skcipher_cbc_aes_setkey(struct crypto_skcipher *tfm,
465 const u8 *key, u32 keylen)
467 return cpt_aes_setkey(tfm, key, keylen, OTX_CPT_AES_CBC);
470 static int otx_cpt_skcipher_ecb_aes_setkey(struct crypto_skcipher *tfm,
471 const u8 *key, u32 keylen)
473 return cpt_aes_setkey(tfm, key, keylen, OTX_CPT_AES_ECB);
476 static int otx_cpt_skcipher_cfb_aes_setkey(struct crypto_skcipher *tfm,
477 const u8 *key, u32 keylen)
479 return cpt_aes_setkey(tfm, key, keylen, OTX_CPT_AES_CFB);
482 static int otx_cpt_skcipher_cbc_des3_setkey(struct crypto_skcipher *tfm,
483 const u8 *key, u32 keylen)
485 return cpt_des_setkey(tfm, key, keylen, OTX_CPT_DES3_CBC);
488 static int otx_cpt_skcipher_ecb_des3_setkey(struct crypto_skcipher *tfm,
489 const u8 *key, u32 keylen)
491 return cpt_des_setkey(tfm, key, keylen, OTX_CPT_DES3_ECB);
494 static int otx_cpt_enc_dec_init(struct crypto_skcipher *tfm)
496 struct otx_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
498 memset(ctx, 0, sizeof(*ctx));
500 * Additional memory for skcipher_request is
501 * allocated since the cryptd daemon uses
502 * this memory for request_ctx information
504 crypto_skcipher_set_reqsize_dma(
505 tfm, sizeof(struct otx_cpt_req_ctx) +
506 sizeof(struct skcipher_request));
511 static int cpt_aead_init(struct crypto_aead *tfm, u8 cipher_type, u8 mac_type)
513 struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
515 ctx->cipher_type = cipher_type;
516 ctx->mac_type = mac_type;
519 * When selected cipher is NULL we use HMAC opcode instead of
520 * FLEXICRYPTO opcode therefore we don't need to use HASH algorithms
521 * for calculating ipad and opad
523 if (ctx->cipher_type != OTX_CPT_CIPHER_NULL) {
524 switch (ctx->mac_type) {
526 ctx->hashalg = crypto_alloc_shash("sha1", 0,
528 if (IS_ERR(ctx->hashalg))
529 return PTR_ERR(ctx->hashalg);
533 ctx->hashalg = crypto_alloc_shash("sha256", 0,
535 if (IS_ERR(ctx->hashalg))
536 return PTR_ERR(ctx->hashalg);
540 ctx->hashalg = crypto_alloc_shash("sha384", 0,
542 if (IS_ERR(ctx->hashalg))
543 return PTR_ERR(ctx->hashalg);
547 ctx->hashalg = crypto_alloc_shash("sha512", 0,
549 if (IS_ERR(ctx->hashalg))
550 return PTR_ERR(ctx->hashalg);
555 crypto_aead_set_reqsize_dma(tfm, sizeof(struct otx_cpt_req_ctx));
560 static int otx_cpt_aead_cbc_aes_sha1_init(struct crypto_aead *tfm)
562 return cpt_aead_init(tfm, OTX_CPT_AES_CBC, OTX_CPT_SHA1);
565 static int otx_cpt_aead_cbc_aes_sha256_init(struct crypto_aead *tfm)
567 return cpt_aead_init(tfm, OTX_CPT_AES_CBC, OTX_CPT_SHA256);
570 static int otx_cpt_aead_cbc_aes_sha384_init(struct crypto_aead *tfm)
572 return cpt_aead_init(tfm, OTX_CPT_AES_CBC, OTX_CPT_SHA384);
575 static int otx_cpt_aead_cbc_aes_sha512_init(struct crypto_aead *tfm)
577 return cpt_aead_init(tfm, OTX_CPT_AES_CBC, OTX_CPT_SHA512);
580 static int otx_cpt_aead_ecb_null_sha1_init(struct crypto_aead *tfm)
582 return cpt_aead_init(tfm, OTX_CPT_CIPHER_NULL, OTX_CPT_SHA1);
585 static int otx_cpt_aead_ecb_null_sha256_init(struct crypto_aead *tfm)
587 return cpt_aead_init(tfm, OTX_CPT_CIPHER_NULL, OTX_CPT_SHA256);
590 static int otx_cpt_aead_ecb_null_sha384_init(struct crypto_aead *tfm)
592 return cpt_aead_init(tfm, OTX_CPT_CIPHER_NULL, OTX_CPT_SHA384);
595 static int otx_cpt_aead_ecb_null_sha512_init(struct crypto_aead *tfm)
597 return cpt_aead_init(tfm, OTX_CPT_CIPHER_NULL, OTX_CPT_SHA512);
600 static int otx_cpt_aead_gcm_aes_init(struct crypto_aead *tfm)
602 return cpt_aead_init(tfm, OTX_CPT_AES_GCM, OTX_CPT_MAC_NULL);
605 static void otx_cpt_aead_exit(struct crypto_aead *tfm)
607 struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
612 crypto_free_shash(ctx->hashalg);
617 * This is the Integrity Check Value validation (aka the authentication tag
620 static int otx_cpt_aead_set_authsize(struct crypto_aead *tfm,
621 unsigned int authsize)
623 struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
625 switch (ctx->mac_type) {
627 if (authsize != SHA1_DIGEST_SIZE &&
628 authsize != SHA1_TRUNC_DIGEST_SIZE)
631 if (authsize == SHA1_TRUNC_DIGEST_SIZE)
632 ctx->is_trunc_hmac = true;
636 if (authsize != SHA256_DIGEST_SIZE &&
637 authsize != SHA256_TRUNC_DIGEST_SIZE)
640 if (authsize == SHA256_TRUNC_DIGEST_SIZE)
641 ctx->is_trunc_hmac = true;
645 if (authsize != SHA384_DIGEST_SIZE &&
646 authsize != SHA384_TRUNC_DIGEST_SIZE)
649 if (authsize == SHA384_TRUNC_DIGEST_SIZE)
650 ctx->is_trunc_hmac = true;
654 if (authsize != SHA512_DIGEST_SIZE &&
655 authsize != SHA512_TRUNC_DIGEST_SIZE)
658 if (authsize == SHA512_TRUNC_DIGEST_SIZE)
659 ctx->is_trunc_hmac = true;
662 case OTX_CPT_MAC_NULL:
663 if (ctx->cipher_type == OTX_CPT_AES_GCM) {
664 if (authsize != AES_GCM_ICV_SIZE)
674 tfm->authsize = authsize;
678 static struct otx_cpt_sdesc *alloc_sdesc(struct crypto_shash *alg)
680 struct otx_cpt_sdesc *sdesc;
683 size = sizeof(struct shash_desc) + crypto_shash_descsize(alg);
684 sdesc = kmalloc(size, GFP_KERNEL);
688 sdesc->shash.tfm = alg;
693 static inline void swap_data32(void *buf, u32 len)
695 cpu_to_be32_array(buf, buf, len / 4);
698 static inline void swap_data64(void *buf, u32 len)
704 for (i = 0 ; i < len / 8; i++, src++, dst++)
705 *dst = cpu_to_be64p(src);
708 static int copy_pad(u8 mac_type, u8 *out_pad, u8 *in_pad)
710 struct sha512_state *sha512;
711 struct sha256_state *sha256;
712 struct sha1_state *sha1;
716 sha1 = (struct sha1_state *) in_pad;
717 swap_data32(sha1->state, SHA1_DIGEST_SIZE);
718 memcpy(out_pad, &sha1->state, SHA1_DIGEST_SIZE);
722 sha256 = (struct sha256_state *) in_pad;
723 swap_data32(sha256->state, SHA256_DIGEST_SIZE);
724 memcpy(out_pad, &sha256->state, SHA256_DIGEST_SIZE);
729 sha512 = (struct sha512_state *) in_pad;
730 swap_data64(sha512->state, SHA512_DIGEST_SIZE);
731 memcpy(out_pad, &sha512->state, SHA512_DIGEST_SIZE);
741 static int aead_hmac_init(struct crypto_aead *cipher)
743 struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher);
744 int state_size = crypto_shash_statesize(ctx->hashalg);
745 int ds = crypto_shash_digestsize(ctx->hashalg);
746 int bs = crypto_shash_blocksize(ctx->hashalg);
747 int authkeylen = ctx->auth_key_len;
748 u8 *ipad = NULL, *opad = NULL;
749 int ret = 0, icount = 0;
751 ctx->sdesc = alloc_sdesc(ctx->hashalg);
755 ctx->ipad = kzalloc(bs, GFP_KERNEL);
761 ctx->opad = kzalloc(bs, GFP_KERNEL);
767 ipad = kzalloc(state_size, GFP_KERNEL);
773 opad = kzalloc(state_size, GFP_KERNEL);
779 if (authkeylen > bs) {
780 ret = crypto_shash_digest(&ctx->sdesc->shash, ctx->key,
787 memcpy(ipad, ctx->key, authkeylen);
790 memset(ipad + authkeylen, 0, bs - authkeylen);
791 memcpy(opad, ipad, bs);
793 for (icount = 0; icount < bs; icount++) {
794 ipad[icount] ^= 0x36;
795 opad[icount] ^= 0x5c;
799 * Partial Hash calculated from the software
800 * algorithm is retrieved for IPAD & OPAD
803 /* IPAD Calculation */
804 crypto_shash_init(&ctx->sdesc->shash);
805 crypto_shash_update(&ctx->sdesc->shash, ipad, bs);
806 crypto_shash_export(&ctx->sdesc->shash, ipad);
807 ret = copy_pad(ctx->mac_type, ctx->ipad, ipad);
811 /* OPAD Calculation */
812 crypto_shash_init(&ctx->sdesc->shash);
813 crypto_shash_update(&ctx->sdesc->shash, opad, bs);
814 crypto_shash_export(&ctx->sdesc->shash, opad);
815 ret = copy_pad(ctx->mac_type, ctx->opad, opad);
837 static int otx_cpt_aead_cbc_aes_sha_setkey(struct crypto_aead *cipher,
838 const unsigned char *key,
841 struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher);
842 struct crypto_authenc_key_param *param;
843 int enckeylen = 0, authkeylen = 0;
844 struct rtattr *rta = (void *)key;
845 int status = -EINVAL;
847 if (!RTA_OK(rta, keylen))
850 if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
853 if (RTA_PAYLOAD(rta) < sizeof(*param))
856 param = RTA_DATA(rta);
857 enckeylen = be32_to_cpu(param->enckeylen);
858 key += RTA_ALIGN(rta->rta_len);
859 keylen -= RTA_ALIGN(rta->rta_len);
860 if (keylen < enckeylen)
863 if (keylen > OTX_CPT_MAX_KEY_SIZE)
866 authkeylen = keylen - enckeylen;
867 memcpy(ctx->key, key, keylen);
870 case AES_KEYSIZE_128:
871 ctx->key_type = OTX_CPT_AES_128_BIT;
873 case AES_KEYSIZE_192:
874 ctx->key_type = OTX_CPT_AES_192_BIT;
876 case AES_KEYSIZE_256:
877 ctx->key_type = OTX_CPT_AES_256_BIT;
880 /* Invalid key length */
884 ctx->enc_key_len = enckeylen;
885 ctx->auth_key_len = authkeylen;
887 status = aead_hmac_init(cipher);
896 static int otx_cpt_aead_ecb_null_sha_setkey(struct crypto_aead *cipher,
897 const unsigned char *key,
900 struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher);
901 struct crypto_authenc_key_param *param;
902 struct rtattr *rta = (void *)key;
905 if (!RTA_OK(rta, keylen))
908 if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
911 if (RTA_PAYLOAD(rta) < sizeof(*param))
914 param = RTA_DATA(rta);
915 enckeylen = be32_to_cpu(param->enckeylen);
916 key += RTA_ALIGN(rta->rta_len);
917 keylen -= RTA_ALIGN(rta->rta_len);
921 if (keylen > OTX_CPT_MAX_KEY_SIZE)
924 memcpy(ctx->key, key, keylen);
925 ctx->enc_key_len = enckeylen;
926 ctx->auth_key_len = keylen;
932 static int otx_cpt_aead_gcm_aes_setkey(struct crypto_aead *cipher,
933 const unsigned char *key,
936 struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher);
939 * For aes gcm we expect to get encryption key (16, 24, 32 bytes)
943 case AES_KEYSIZE_128 + AES_GCM_SALT_SIZE:
944 ctx->key_type = OTX_CPT_AES_128_BIT;
945 ctx->enc_key_len = AES_KEYSIZE_128;
947 case AES_KEYSIZE_192 + AES_GCM_SALT_SIZE:
948 ctx->key_type = OTX_CPT_AES_192_BIT;
949 ctx->enc_key_len = AES_KEYSIZE_192;
951 case AES_KEYSIZE_256 + AES_GCM_SALT_SIZE:
952 ctx->key_type = OTX_CPT_AES_256_BIT;
953 ctx->enc_key_len = AES_KEYSIZE_256;
956 /* Invalid key and salt length */
960 /* Store encryption key and salt */
961 memcpy(ctx->key, key, keylen);
966 static inline u32 create_aead_ctx_hdr(struct aead_request *req, u32 enc,
969 struct otx_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
970 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
971 struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
972 struct otx_cpt_req_info *req_info = &rctx->cpt_req;
973 struct otx_cpt_fc_ctx *fctx = &rctx->fctx;
974 int mac_len = crypto_aead_authsize(tfm);
977 rctx->ctrl_word.e.enc_data_offset = req->assoclen;
979 switch (ctx->cipher_type) {
980 case OTX_CPT_AES_CBC:
981 fctx->enc.enc_ctrl.e.iv_source = OTX_CPT_FROM_CPTR;
982 /* Copy encryption key to context */
983 memcpy(fctx->enc.encr_key, ctx->key + ctx->auth_key_len,
985 /* Copy IV to context */
986 memcpy(fctx->enc.encr_iv, req->iv, crypto_aead_ivsize(tfm));
988 ds = crypto_shash_digestsize(ctx->hashalg);
989 if (ctx->mac_type == OTX_CPT_SHA384)
990 ds = SHA512_DIGEST_SIZE;
992 memcpy(fctx->hmac.e.ipad, ctx->ipad, ds);
994 memcpy(fctx->hmac.e.opad, ctx->opad, ds);
997 case OTX_CPT_AES_GCM:
998 fctx->enc.enc_ctrl.e.iv_source = OTX_CPT_FROM_DPTR;
999 /* Copy encryption key to context */
1000 memcpy(fctx->enc.encr_key, ctx->key, ctx->enc_key_len);
1001 /* Copy salt to context */
1002 memcpy(fctx->enc.encr_iv, ctx->key + ctx->enc_key_len,
1005 rctx->ctrl_word.e.iv_offset = req->assoclen - AES_GCM_IV_OFFSET;
1009 /* Unknown cipher type */
1012 rctx->ctrl_word.flags = cpu_to_be64(rctx->ctrl_word.cflags);
1014 req_info->ctrl.s.dma_mode = OTX_CPT_DMA_GATHER_SCATTER;
1015 req_info->ctrl.s.se_req = OTX_CPT_SE_CORE_REQ;
1016 req_info->req.opcode.s.major = OTX_CPT_MAJOR_OP_FC |
1017 DMA_MODE_FLAG(OTX_CPT_DMA_GATHER_SCATTER);
1019 req_info->req.opcode.s.minor = 2;
1020 req_info->req.param1 = req->cryptlen;
1021 req_info->req.param2 = req->cryptlen + req->assoclen;
1023 req_info->req.opcode.s.minor = 3;
1024 req_info->req.param1 = req->cryptlen - mac_len;
1025 req_info->req.param2 = req->cryptlen + req->assoclen - mac_len;
1028 fctx->enc.enc_ctrl.e.enc_cipher = ctx->cipher_type;
1029 fctx->enc.enc_ctrl.e.aes_key = ctx->key_type;
1030 fctx->enc.enc_ctrl.e.mac_type = ctx->mac_type;
1031 fctx->enc.enc_ctrl.e.mac_len = mac_len;
1032 fctx->enc.enc_ctrl.flags = cpu_to_be64(fctx->enc.enc_ctrl.cflags);
1035 * Storing Packet Data Information in offset
1036 * Control Word First 8 bytes
1038 req_info->in[*argcnt].vptr = (u8 *)&rctx->ctrl_word;
1039 req_info->in[*argcnt].size = CONTROL_WORD_LEN;
1040 req_info->req.dlen += CONTROL_WORD_LEN;
1043 req_info->in[*argcnt].vptr = (u8 *)fctx;
1044 req_info->in[*argcnt].size = sizeof(struct otx_cpt_fc_ctx);
1045 req_info->req.dlen += sizeof(struct otx_cpt_fc_ctx);
1051 static inline u32 create_hmac_ctx_hdr(struct aead_request *req, u32 *argcnt,
1054 struct otx_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
1055 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1056 struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
1057 struct otx_cpt_req_info *req_info = &rctx->cpt_req;
1059 req_info->ctrl.s.dma_mode = OTX_CPT_DMA_GATHER_SCATTER;
1060 req_info->ctrl.s.se_req = OTX_CPT_SE_CORE_REQ;
1061 req_info->req.opcode.s.major = OTX_CPT_MAJOR_OP_HMAC |
1062 DMA_MODE_FLAG(OTX_CPT_DMA_GATHER_SCATTER);
1063 req_info->is_trunc_hmac = ctx->is_trunc_hmac;
1065 req_info->req.opcode.s.minor = 0;
1066 req_info->req.param1 = ctx->auth_key_len;
1067 req_info->req.param2 = ctx->mac_type << 8;
1069 /* Add authentication key */
1070 req_info->in[*argcnt].vptr = ctx->key;
1071 req_info->in[*argcnt].size = round_up(ctx->auth_key_len, 8);
1072 req_info->req.dlen += round_up(ctx->auth_key_len, 8);
1078 static inline u32 create_aead_input_list(struct aead_request *req, u32 enc)
1080 struct otx_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
1081 struct otx_cpt_req_info *req_info = &rctx->cpt_req;
1082 u32 inputlen = req->cryptlen + req->assoclen;
1083 u32 status, argcnt = 0;
1085 status = create_aead_ctx_hdr(req, enc, &argcnt);
1088 update_input_data(req_info, req->src, inputlen, &argcnt);
1089 req_info->incnt = argcnt;
1094 static inline u32 create_aead_output_list(struct aead_request *req, u32 enc,
1097 struct otx_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
1098 struct otx_cpt_req_info *req_info = &rctx->cpt_req;
1099 u32 argcnt = 0, outputlen = 0;
1102 outputlen = req->cryptlen + req->assoclen + mac_len;
1104 outputlen = req->cryptlen + req->assoclen - mac_len;
1106 update_output_data(req_info, req->dst, 0, outputlen, &argcnt);
1107 req_info->outcnt = argcnt;
1112 static inline u32 create_aead_null_input_list(struct aead_request *req,
1113 u32 enc, u32 mac_len)
1115 struct otx_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
1116 struct otx_cpt_req_info *req_info = &rctx->cpt_req;
1117 u32 inputlen, argcnt = 0;
1120 inputlen = req->cryptlen + req->assoclen;
1122 inputlen = req->cryptlen + req->assoclen - mac_len;
1124 create_hmac_ctx_hdr(req, &argcnt, enc);
1125 update_input_data(req_info, req->src, inputlen, &argcnt);
1126 req_info->incnt = argcnt;
1131 static inline u32 create_aead_null_output_list(struct aead_request *req,
1132 u32 enc, u32 mac_len)
1134 struct otx_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
1135 struct otx_cpt_req_info *req_info = &rctx->cpt_req;
1136 struct scatterlist *dst;
1138 int argcnt = 0, status, offset;
1142 inputlen = req->cryptlen + req->assoclen;
1144 inputlen = req->cryptlen + req->assoclen - mac_len;
1147 * If source and destination are different
1148 * then copy payload to destination
1150 if (req->src != req->dst) {
1152 ptr = kmalloc(inputlen, (req_info->areq->flags &
1153 CRYPTO_TFM_REQ_MAY_SLEEP) ?
1154 GFP_KERNEL : GFP_ATOMIC);
1160 status = sg_copy_to_buffer(req->src, sg_nents(req->src), ptr,
1162 if (status != inputlen) {
1166 status = sg_copy_from_buffer(req->dst, sg_nents(req->dst), ptr,
1168 if (status != inputlen) {
1177 * In an encryption scenario hmac needs
1178 * to be appended after payload
1182 while (offset >= dst->length) {
1183 offset -= dst->length;
1191 update_output_data(req_info, dst, offset, mac_len, &argcnt);
1194 * In a decryption scenario calculated hmac for received
1195 * payload needs to be compare with hmac received
1197 status = sg_copy_buffer(req->src, sg_nents(req->src),
1198 rctx->fctx.hmac.s.hmac_recv, mac_len,
1200 if (status != mac_len) {
1205 req_info->out[argcnt].vptr = rctx->fctx.hmac.s.hmac_calc;
1206 req_info->out[argcnt].size = mac_len;
1210 req_info->outcnt = argcnt;
1219 static u32 cpt_aead_enc_dec(struct aead_request *req, u8 reg_type, u8 enc)
1221 struct otx_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
1222 struct otx_cpt_req_info *req_info = &rctx->cpt_req;
1223 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1224 struct pci_dev *pdev;
1225 u32 status, cpu_num;
1227 /* Clear control words */
1228 rctx->ctrl_word.flags = 0;
1229 rctx->fctx.enc.enc_ctrl.flags = 0;
1231 req_info->callback = otx_cpt_aead_callback;
1232 req_info->areq = &req->base;
1233 req_info->req_type = reg_type;
1234 req_info->is_enc = enc;
1235 req_info->is_trunc_hmac = false;
1238 case OTX_CPT_AEAD_ENC_DEC_REQ:
1239 status = create_aead_input_list(req, enc);
1242 status = create_aead_output_list(req, enc,
1243 crypto_aead_authsize(tfm));
1248 case OTX_CPT_AEAD_ENC_DEC_NULL_REQ:
1249 status = create_aead_null_input_list(req, enc,
1250 crypto_aead_authsize(tfm));
1253 status = create_aead_null_output_list(req, enc,
1254 crypto_aead_authsize(tfm));
1263 /* Validate that request doesn't exceed maximum CPT supported size */
1264 if (req_info->req.param1 > OTX_CPT_MAX_REQ_SIZE ||
1265 req_info->req.param2 > OTX_CPT_MAX_REQ_SIZE)
1268 status = get_se_device(&pdev, &cpu_num);
1272 req_info->ctrl.s.grp = 0;
1274 status = otx_cpt_do_request(pdev, req_info, cpu_num);
1276 * We perform an asynchronous send and once
1277 * the request is completed the driver would
1278 * intimate through registered call back functions
1283 static int otx_cpt_aead_encrypt(struct aead_request *req)
1285 return cpt_aead_enc_dec(req, OTX_CPT_AEAD_ENC_DEC_REQ, true);
1288 static int otx_cpt_aead_decrypt(struct aead_request *req)
1290 return cpt_aead_enc_dec(req, OTX_CPT_AEAD_ENC_DEC_REQ, false);
1293 static int otx_cpt_aead_null_encrypt(struct aead_request *req)
1295 return cpt_aead_enc_dec(req, OTX_CPT_AEAD_ENC_DEC_NULL_REQ, true);
1298 static int otx_cpt_aead_null_decrypt(struct aead_request *req)
1300 return cpt_aead_enc_dec(req, OTX_CPT_AEAD_ENC_DEC_NULL_REQ, false);
1303 static struct skcipher_alg otx_cpt_skciphers[] = { {
1304 .base.cra_name = "xts(aes)",
1305 .base.cra_driver_name = "cpt_xts_aes",
1306 .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1307 .base.cra_blocksize = AES_BLOCK_SIZE,
1308 .base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx),
1309 .base.cra_alignmask = 7,
1310 .base.cra_priority = 4001,
1311 .base.cra_module = THIS_MODULE,
1313 .init = otx_cpt_enc_dec_init,
1314 .ivsize = AES_BLOCK_SIZE,
1315 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1316 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1317 .setkey = otx_cpt_skcipher_xts_setkey,
1318 .encrypt = otx_cpt_skcipher_encrypt,
1319 .decrypt = otx_cpt_skcipher_decrypt,
1321 .base.cra_name = "cbc(aes)",
1322 .base.cra_driver_name = "cpt_cbc_aes",
1323 .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1324 .base.cra_blocksize = AES_BLOCK_SIZE,
1325 .base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx),
1326 .base.cra_alignmask = 7,
1327 .base.cra_priority = 4001,
1328 .base.cra_module = THIS_MODULE,
1330 .init = otx_cpt_enc_dec_init,
1331 .ivsize = AES_BLOCK_SIZE,
1332 .min_keysize = AES_MIN_KEY_SIZE,
1333 .max_keysize = AES_MAX_KEY_SIZE,
1334 .setkey = otx_cpt_skcipher_cbc_aes_setkey,
1335 .encrypt = otx_cpt_skcipher_encrypt,
1336 .decrypt = otx_cpt_skcipher_decrypt,
1338 .base.cra_name = "ecb(aes)",
1339 .base.cra_driver_name = "cpt_ecb_aes",
1340 .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1341 .base.cra_blocksize = AES_BLOCK_SIZE,
1342 .base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx),
1343 .base.cra_alignmask = 7,
1344 .base.cra_priority = 4001,
1345 .base.cra_module = THIS_MODULE,
1347 .init = otx_cpt_enc_dec_init,
1349 .min_keysize = AES_MIN_KEY_SIZE,
1350 .max_keysize = AES_MAX_KEY_SIZE,
1351 .setkey = otx_cpt_skcipher_ecb_aes_setkey,
1352 .encrypt = otx_cpt_skcipher_encrypt,
1353 .decrypt = otx_cpt_skcipher_decrypt,
1355 .base.cra_name = "cfb(aes)",
1356 .base.cra_driver_name = "cpt_cfb_aes",
1357 .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1358 .base.cra_blocksize = AES_BLOCK_SIZE,
1359 .base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx),
1360 .base.cra_alignmask = 7,
1361 .base.cra_priority = 4001,
1362 .base.cra_module = THIS_MODULE,
1364 .init = otx_cpt_enc_dec_init,
1365 .ivsize = AES_BLOCK_SIZE,
1366 .min_keysize = AES_MIN_KEY_SIZE,
1367 .max_keysize = AES_MAX_KEY_SIZE,
1368 .setkey = otx_cpt_skcipher_cfb_aes_setkey,
1369 .encrypt = otx_cpt_skcipher_encrypt,
1370 .decrypt = otx_cpt_skcipher_decrypt,
1372 .base.cra_name = "cbc(des3_ede)",
1373 .base.cra_driver_name = "cpt_cbc_des3_ede",
1374 .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1375 .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1376 .base.cra_ctxsize = sizeof(struct otx_cpt_des3_ctx),
1377 .base.cra_alignmask = 7,
1378 .base.cra_priority = 4001,
1379 .base.cra_module = THIS_MODULE,
1381 .init = otx_cpt_enc_dec_init,
1382 .min_keysize = DES3_EDE_KEY_SIZE,
1383 .max_keysize = DES3_EDE_KEY_SIZE,
1384 .ivsize = DES_BLOCK_SIZE,
1385 .setkey = otx_cpt_skcipher_cbc_des3_setkey,
1386 .encrypt = otx_cpt_skcipher_encrypt,
1387 .decrypt = otx_cpt_skcipher_decrypt,
1389 .base.cra_name = "ecb(des3_ede)",
1390 .base.cra_driver_name = "cpt_ecb_des3_ede",
1391 .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1392 .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1393 .base.cra_ctxsize = sizeof(struct otx_cpt_des3_ctx),
1394 .base.cra_alignmask = 7,
1395 .base.cra_priority = 4001,
1396 .base.cra_module = THIS_MODULE,
1398 .init = otx_cpt_enc_dec_init,
1399 .min_keysize = DES3_EDE_KEY_SIZE,
1400 .max_keysize = DES3_EDE_KEY_SIZE,
1402 .setkey = otx_cpt_skcipher_ecb_des3_setkey,
1403 .encrypt = otx_cpt_skcipher_encrypt,
1404 .decrypt = otx_cpt_skcipher_decrypt,
1407 static struct aead_alg otx_cpt_aeads[] = { {
1409 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1410 .cra_driver_name = "cpt_hmac_sha1_cbc_aes",
1411 .cra_blocksize = AES_BLOCK_SIZE,
1412 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1413 .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1414 .cra_priority = 4001,
1416 .cra_module = THIS_MODULE,
1418 .init = otx_cpt_aead_cbc_aes_sha1_init,
1419 .exit = otx_cpt_aead_exit,
1420 .setkey = otx_cpt_aead_cbc_aes_sha_setkey,
1421 .setauthsize = otx_cpt_aead_set_authsize,
1422 .encrypt = otx_cpt_aead_encrypt,
1423 .decrypt = otx_cpt_aead_decrypt,
1424 .ivsize = AES_BLOCK_SIZE,
1425 .maxauthsize = SHA1_DIGEST_SIZE,
1428 .cra_name = "authenc(hmac(sha256),cbc(aes))",
1429 .cra_driver_name = "cpt_hmac_sha256_cbc_aes",
1430 .cra_blocksize = AES_BLOCK_SIZE,
1431 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1432 .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1433 .cra_priority = 4001,
1435 .cra_module = THIS_MODULE,
1437 .init = otx_cpt_aead_cbc_aes_sha256_init,
1438 .exit = otx_cpt_aead_exit,
1439 .setkey = otx_cpt_aead_cbc_aes_sha_setkey,
1440 .setauthsize = otx_cpt_aead_set_authsize,
1441 .encrypt = otx_cpt_aead_encrypt,
1442 .decrypt = otx_cpt_aead_decrypt,
1443 .ivsize = AES_BLOCK_SIZE,
1444 .maxauthsize = SHA256_DIGEST_SIZE,
1447 .cra_name = "authenc(hmac(sha384),cbc(aes))",
1448 .cra_driver_name = "cpt_hmac_sha384_cbc_aes",
1449 .cra_blocksize = AES_BLOCK_SIZE,
1450 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1451 .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1452 .cra_priority = 4001,
1454 .cra_module = THIS_MODULE,
1456 .init = otx_cpt_aead_cbc_aes_sha384_init,
1457 .exit = otx_cpt_aead_exit,
1458 .setkey = otx_cpt_aead_cbc_aes_sha_setkey,
1459 .setauthsize = otx_cpt_aead_set_authsize,
1460 .encrypt = otx_cpt_aead_encrypt,
1461 .decrypt = otx_cpt_aead_decrypt,
1462 .ivsize = AES_BLOCK_SIZE,
1463 .maxauthsize = SHA384_DIGEST_SIZE,
1466 .cra_name = "authenc(hmac(sha512),cbc(aes))",
1467 .cra_driver_name = "cpt_hmac_sha512_cbc_aes",
1468 .cra_blocksize = AES_BLOCK_SIZE,
1469 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1470 .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1471 .cra_priority = 4001,
1473 .cra_module = THIS_MODULE,
1475 .init = otx_cpt_aead_cbc_aes_sha512_init,
1476 .exit = otx_cpt_aead_exit,
1477 .setkey = otx_cpt_aead_cbc_aes_sha_setkey,
1478 .setauthsize = otx_cpt_aead_set_authsize,
1479 .encrypt = otx_cpt_aead_encrypt,
1480 .decrypt = otx_cpt_aead_decrypt,
1481 .ivsize = AES_BLOCK_SIZE,
1482 .maxauthsize = SHA512_DIGEST_SIZE,
1485 .cra_name = "authenc(hmac(sha1),ecb(cipher_null))",
1486 .cra_driver_name = "cpt_hmac_sha1_ecb_null",
1488 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1489 .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1490 .cra_priority = 4001,
1492 .cra_module = THIS_MODULE,
1494 .init = otx_cpt_aead_ecb_null_sha1_init,
1495 .exit = otx_cpt_aead_exit,
1496 .setkey = otx_cpt_aead_ecb_null_sha_setkey,
1497 .setauthsize = otx_cpt_aead_set_authsize,
1498 .encrypt = otx_cpt_aead_null_encrypt,
1499 .decrypt = otx_cpt_aead_null_decrypt,
1501 .maxauthsize = SHA1_DIGEST_SIZE,
1504 .cra_name = "authenc(hmac(sha256),ecb(cipher_null))",
1505 .cra_driver_name = "cpt_hmac_sha256_ecb_null",
1507 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1508 .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1509 .cra_priority = 4001,
1511 .cra_module = THIS_MODULE,
1513 .init = otx_cpt_aead_ecb_null_sha256_init,
1514 .exit = otx_cpt_aead_exit,
1515 .setkey = otx_cpt_aead_ecb_null_sha_setkey,
1516 .setauthsize = otx_cpt_aead_set_authsize,
1517 .encrypt = otx_cpt_aead_null_encrypt,
1518 .decrypt = otx_cpt_aead_null_decrypt,
1520 .maxauthsize = SHA256_DIGEST_SIZE,
1523 .cra_name = "authenc(hmac(sha384),ecb(cipher_null))",
1524 .cra_driver_name = "cpt_hmac_sha384_ecb_null",
1526 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1527 .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1528 .cra_priority = 4001,
1530 .cra_module = THIS_MODULE,
1532 .init = otx_cpt_aead_ecb_null_sha384_init,
1533 .exit = otx_cpt_aead_exit,
1534 .setkey = otx_cpt_aead_ecb_null_sha_setkey,
1535 .setauthsize = otx_cpt_aead_set_authsize,
1536 .encrypt = otx_cpt_aead_null_encrypt,
1537 .decrypt = otx_cpt_aead_null_decrypt,
1539 .maxauthsize = SHA384_DIGEST_SIZE,
1542 .cra_name = "authenc(hmac(sha512),ecb(cipher_null))",
1543 .cra_driver_name = "cpt_hmac_sha512_ecb_null",
1545 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1546 .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1547 .cra_priority = 4001,
1549 .cra_module = THIS_MODULE,
1551 .init = otx_cpt_aead_ecb_null_sha512_init,
1552 .exit = otx_cpt_aead_exit,
1553 .setkey = otx_cpt_aead_ecb_null_sha_setkey,
1554 .setauthsize = otx_cpt_aead_set_authsize,
1555 .encrypt = otx_cpt_aead_null_encrypt,
1556 .decrypt = otx_cpt_aead_null_decrypt,
1558 .maxauthsize = SHA512_DIGEST_SIZE,
1561 .cra_name = "rfc4106(gcm(aes))",
1562 .cra_driver_name = "cpt_rfc4106_gcm_aes",
1564 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1565 .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1566 .cra_priority = 4001,
1568 .cra_module = THIS_MODULE,
1570 .init = otx_cpt_aead_gcm_aes_init,
1571 .exit = otx_cpt_aead_exit,
1572 .setkey = otx_cpt_aead_gcm_aes_setkey,
1573 .setauthsize = otx_cpt_aead_set_authsize,
1574 .encrypt = otx_cpt_aead_encrypt,
1575 .decrypt = otx_cpt_aead_decrypt,
1576 .ivsize = AES_GCM_IV_SIZE,
1577 .maxauthsize = AES_GCM_ICV_SIZE,
1580 static inline int is_any_alg_used(void)
1584 for (i = 0; i < ARRAY_SIZE(otx_cpt_skciphers); i++)
1585 if (refcount_read(&otx_cpt_skciphers[i].base.cra_refcnt) != 1)
1587 for (i = 0; i < ARRAY_SIZE(otx_cpt_aeads); i++)
1588 if (refcount_read(&otx_cpt_aeads[i].base.cra_refcnt) != 1)
1593 static inline int cpt_register_algs(void)
1597 if (!IS_ENABLED(CONFIG_DM_CRYPT)) {
1598 for (i = 0; i < ARRAY_SIZE(otx_cpt_skciphers); i++)
1599 otx_cpt_skciphers[i].base.cra_flags &= ~CRYPTO_ALG_DEAD;
1601 err = crypto_register_skciphers(otx_cpt_skciphers,
1602 ARRAY_SIZE(otx_cpt_skciphers));
1607 for (i = 0; i < ARRAY_SIZE(otx_cpt_aeads); i++)
1608 otx_cpt_aeads[i].base.cra_flags &= ~CRYPTO_ALG_DEAD;
1610 err = crypto_register_aeads(otx_cpt_aeads, ARRAY_SIZE(otx_cpt_aeads));
1612 crypto_unregister_skciphers(otx_cpt_skciphers,
1613 ARRAY_SIZE(otx_cpt_skciphers));
1620 static inline void cpt_unregister_algs(void)
1622 crypto_unregister_skciphers(otx_cpt_skciphers,
1623 ARRAY_SIZE(otx_cpt_skciphers));
1624 crypto_unregister_aeads(otx_cpt_aeads, ARRAY_SIZE(otx_cpt_aeads));
1627 static int compare_func(const void *lptr, const void *rptr)
1629 struct cpt_device_desc *ldesc = (struct cpt_device_desc *) lptr;
1630 struct cpt_device_desc *rdesc = (struct cpt_device_desc *) rptr;
1632 if (ldesc->dev->devfn < rdesc->dev->devfn)
1634 if (ldesc->dev->devfn > rdesc->dev->devfn)
1639 static void swap_func(void *lptr, void *rptr, int size)
1641 struct cpt_device_desc *ldesc = (struct cpt_device_desc *) lptr;
1642 struct cpt_device_desc *rdesc = (struct cpt_device_desc *) rptr;
1644 swap(*ldesc, *rdesc);
1647 int otx_cpt_crypto_init(struct pci_dev *pdev, struct module *mod,
1648 enum otx_cptpf_type pf_type,
1649 enum otx_cptvf_type engine_type,
1650 int num_queues, int num_devices)
1656 switch (engine_type) {
1657 case OTX_CPT_SE_TYPES:
1658 count = atomic_read(&se_devices.count);
1659 if (count >= CPT_MAX_VF_NUM) {
1660 dev_err(&pdev->dev, "No space to add a new device\n");
1664 se_devices.desc[count].pf_type = pf_type;
1665 se_devices.desc[count].num_queues = num_queues;
1666 se_devices.desc[count++].dev = pdev;
1667 atomic_inc(&se_devices.count);
1669 if (atomic_read(&se_devices.count) == num_devices &&
1670 is_crypto_registered == false) {
1671 if (cpt_register_algs()) {
1673 "Error in registering crypto algorithms\n");
1677 try_module_get(mod);
1678 is_crypto_registered = true;
1680 sort(se_devices.desc, count, sizeof(struct cpt_device_desc),
1681 compare_func, swap_func);
1684 case OTX_CPT_AE_TYPES:
1685 count = atomic_read(&ae_devices.count);
1686 if (count >= CPT_MAX_VF_NUM) {
1687 dev_err(&pdev->dev, "No space to a add new device\n");
1691 ae_devices.desc[count].pf_type = pf_type;
1692 ae_devices.desc[count].num_queues = num_queues;
1693 ae_devices.desc[count++].dev = pdev;
1694 atomic_inc(&ae_devices.count);
1695 sort(ae_devices.desc, count, sizeof(struct cpt_device_desc),
1696 compare_func, swap_func);
1700 dev_err(&pdev->dev, "Unknown VF type %d\n", engine_type);
1701 ret = BAD_OTX_CPTVF_TYPE;
1704 mutex_unlock(&mutex);
1708 void otx_cpt_crypto_exit(struct pci_dev *pdev, struct module *mod,
1709 enum otx_cptvf_type engine_type)
1711 struct cpt_device_table *dev_tbl;
1712 bool dev_found = false;
1717 dev_tbl = (engine_type == OTX_CPT_AE_TYPES) ? &ae_devices : &se_devices;
1718 count = atomic_read(&dev_tbl->count);
1719 for (i = 0; i < count; i++)
1720 if (pdev == dev_tbl->desc[i].dev) {
1721 for (j = i; j < count-1; j++)
1722 dev_tbl->desc[j] = dev_tbl->desc[j+1];
1728 dev_err(&pdev->dev, "%s device not found\n", __func__);
1732 if (engine_type != OTX_CPT_AE_TYPES) {
1733 if (atomic_dec_and_test(&se_devices.count) &&
1734 !is_any_alg_used()) {
1735 cpt_unregister_algs();
1737 is_crypto_registered = false;
1740 atomic_dec(&ae_devices.count);
1742 mutex_unlock(&mutex);