1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTX CPT driver
4 * Copyright (C) 2019 Marvell International Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #include <crypto/aes.h>
12 #include <crypto/authenc.h>
13 #include <crypto/cryptd.h>
14 #include <crypto/des.h>
15 #include <crypto/internal/aead.h>
16 #include <crypto/sha1.h>
17 #include <crypto/sha2.h>
18 #include <crypto/xts.h>
19 #include <crypto/scatterwalk.h>
20 #include <linux/rtnetlink.h>
21 #include <linux/sort.h>
22 #include <linux/module.h>
23 #include "otx_cptvf.h"
24 #include "otx_cptvf_algs.h"
25 #include "otx_cptvf_reqmgr.h"
27 #define CPT_MAX_VF_NUM 64
28 /* Size of salt in AES GCM mode */
29 #define AES_GCM_SALT_SIZE 4
30 /* Size of IV in AES GCM mode */
31 #define AES_GCM_IV_SIZE 8
32 /* Size of ICV (Integrity Check Value) in AES GCM mode */
33 #define AES_GCM_ICV_SIZE 16
34 /* Offset of IV in AES GCM mode */
35 #define AES_GCM_IV_OFFSET 8
36 #define CONTROL_WORD_LEN 8
37 #define KEY2_OFFSET 48
38 #define DMA_MODE_FLAG(dma_mode) \
39 (((dma_mode) == OTX_CPT_DMA_GATHER_SCATTER) ? (1 << 7) : 0)
41 /* Truncated SHA digest size */
42 #define SHA1_TRUNC_DIGEST_SIZE 12
43 #define SHA256_TRUNC_DIGEST_SIZE 16
44 #define SHA384_TRUNC_DIGEST_SIZE 24
45 #define SHA512_TRUNC_DIGEST_SIZE 32
47 static DEFINE_MUTEX(mutex);
48 static int is_crypto_registered;
50 struct cpt_device_desc {
51 enum otx_cptpf_type pf_type;
56 struct cpt_device_table {
58 struct cpt_device_desc desc[CPT_MAX_VF_NUM];
61 static struct cpt_device_table se_devices = {
62 .count = ATOMIC_INIT(0)
65 static struct cpt_device_table ae_devices = {
66 .count = ATOMIC_INIT(0)
69 static inline int get_se_device(struct pci_dev **pdev, int *cpu_num)
73 count = atomic_read(&se_devices.count);
79 if (se_devices.desc[0].pf_type == OTX_CPT_SE) {
81 * On OcteonTX platform there is one CPT instruction queue bound
82 * to each VF. We get maximum performance if one CPT queue
83 * is available for each cpu otherwise CPT queues need to be
84 * shared between cpus.
86 if (*cpu_num >= count)
88 *pdev = se_devices.desc[*cpu_num].dev;
90 pr_err("Unknown PF type %d\n", se_devices.desc[0].pf_type);
98 static inline int validate_hmac_cipher_null(struct otx_cpt_req_info *cpt_req)
100 struct otx_cpt_req_ctx *rctx;
101 struct aead_request *req;
102 struct crypto_aead *tfm;
104 req = container_of(cpt_req->areq, struct aead_request, base);
105 tfm = crypto_aead_reqtfm(req);
106 rctx = aead_request_ctx_dma(req);
107 if (memcmp(rctx->fctx.hmac.s.hmac_calc,
108 rctx->fctx.hmac.s.hmac_recv,
109 crypto_aead_authsize(tfm)) != 0)
115 static void otx_cpt_aead_callback(int status, void *arg1, void *arg2)
117 struct otx_cpt_info_buffer *cpt_info = arg2;
118 struct crypto_async_request *areq = arg1;
119 struct otx_cpt_req_info *cpt_req;
120 struct pci_dev *pdev;
125 cpt_req = cpt_info->req;
128 * When selected cipher is NULL we need to manually
129 * verify whether calculated hmac value matches
130 * received hmac value
132 if (cpt_req->req_type == OTX_CPT_AEAD_ENC_DEC_NULL_REQ &&
134 status = validate_hmac_cipher_null(cpt_req);
136 pdev = cpt_info->pdev;
137 do_request_cleanup(pdev, cpt_info);
141 crypto_request_complete(areq, status);
144 static void output_iv_copyback(struct crypto_async_request *areq)
146 struct otx_cpt_req_info *req_info;
147 struct skcipher_request *sreq;
148 struct crypto_skcipher *stfm;
149 struct otx_cpt_req_ctx *rctx;
150 struct otx_cpt_enc_ctx *ctx;
153 sreq = container_of(areq, struct skcipher_request, base);
154 stfm = crypto_skcipher_reqtfm(sreq);
155 ctx = crypto_skcipher_ctx(stfm);
156 if (ctx->cipher_type == OTX_CPT_AES_CBC ||
157 ctx->cipher_type == OTX_CPT_DES3_CBC) {
158 rctx = skcipher_request_ctx_dma(sreq);
159 req_info = &rctx->cpt_req;
160 ivsize = crypto_skcipher_ivsize(stfm);
161 start = sreq->cryptlen - ivsize;
163 if (req_info->is_enc) {
164 scatterwalk_map_and_copy(sreq->iv, sreq->dst, start,
167 if (sreq->src != sreq->dst) {
168 scatterwalk_map_and_copy(sreq->iv, sreq->src,
171 memcpy(sreq->iv, req_info->iv_out, ivsize);
172 kfree(req_info->iv_out);
178 static void otx_cpt_skcipher_callback(int status, void *arg1, void *arg2)
180 struct otx_cpt_info_buffer *cpt_info = arg2;
181 struct crypto_async_request *areq = arg1;
182 struct pci_dev *pdev;
186 output_iv_copyback(areq);
188 pdev = cpt_info->pdev;
189 do_request_cleanup(pdev, cpt_info);
191 crypto_request_complete(areq, status);
195 static inline void update_input_data(struct otx_cpt_req_info *req_info,
196 struct scatterlist *inp_sg,
197 u32 nbytes, u32 *argcnt)
199 req_info->req.dlen += nbytes;
202 u32 len = min(nbytes, inp_sg->length);
203 u8 *ptr = sg_virt(inp_sg);
205 req_info->in[*argcnt].vptr = (void *)ptr;
206 req_info->in[*argcnt].size = len;
209 inp_sg = sg_next(inp_sg);
213 static inline void update_output_data(struct otx_cpt_req_info *req_info,
214 struct scatterlist *outp_sg,
215 u32 offset, u32 nbytes, u32 *argcnt)
217 req_info->rlen += nbytes;
220 u32 len = min(nbytes, outp_sg->length - offset);
221 u8 *ptr = sg_virt(outp_sg);
223 req_info->out[*argcnt].vptr = (void *) (ptr + offset);
224 req_info->out[*argcnt].size = len;
228 outp_sg = sg_next(outp_sg);
232 static inline u32 create_ctx_hdr(struct skcipher_request *req, u32 enc,
235 struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
236 struct otx_cpt_req_ctx *rctx = skcipher_request_ctx_dma(req);
237 struct otx_cpt_req_info *req_info = &rctx->cpt_req;
238 struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
239 struct otx_cpt_enc_ctx *ctx = crypto_tfm_ctx(tfm);
240 struct otx_cpt_fc_ctx *fctx = &rctx->fctx;
241 int ivsize = crypto_skcipher_ivsize(stfm);
242 u32 start = req->cryptlen - ivsize;
245 flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
246 GFP_KERNEL : GFP_ATOMIC;
247 req_info->ctrl.s.dma_mode = OTX_CPT_DMA_GATHER_SCATTER;
248 req_info->ctrl.s.se_req = OTX_CPT_SE_CORE_REQ;
250 req_info->req.opcode.s.major = OTX_CPT_MAJOR_OP_FC |
251 DMA_MODE_FLAG(OTX_CPT_DMA_GATHER_SCATTER);
253 req_info->req.opcode.s.minor = 2;
255 req_info->req.opcode.s.minor = 3;
256 if ((ctx->cipher_type == OTX_CPT_AES_CBC ||
257 ctx->cipher_type == OTX_CPT_DES3_CBC) &&
258 req->src == req->dst) {
259 req_info->iv_out = kmalloc(ivsize, flags);
260 if (!req_info->iv_out)
263 scatterwalk_map_and_copy(req_info->iv_out, req->src,
267 /* Encryption data length */
268 req_info->req.param1 = req->cryptlen;
269 /* Authentication data length */
270 req_info->req.param2 = 0;
272 fctx->enc.enc_ctrl.e.enc_cipher = ctx->cipher_type;
273 fctx->enc.enc_ctrl.e.aes_key = ctx->key_type;
274 fctx->enc.enc_ctrl.e.iv_source = OTX_CPT_FROM_CPTR;
276 if (ctx->cipher_type == OTX_CPT_AES_XTS)
277 memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len * 2);
279 memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len);
281 memcpy(fctx->enc.encr_iv, req->iv, crypto_skcipher_ivsize(stfm));
283 fctx->enc.enc_ctrl.flags = cpu_to_be64(fctx->enc.enc_ctrl.cflags);
286 * Storing Packet Data Information in offset
287 * Control Word First 8 bytes
289 req_info->in[*argcnt].vptr = (u8 *)&rctx->ctrl_word;
290 req_info->in[*argcnt].size = CONTROL_WORD_LEN;
291 req_info->req.dlen += CONTROL_WORD_LEN;
294 req_info->in[*argcnt].vptr = (u8 *)fctx;
295 req_info->in[*argcnt].size = sizeof(struct otx_cpt_fc_ctx);
296 req_info->req.dlen += sizeof(struct otx_cpt_fc_ctx);
303 static inline u32 create_input_list(struct skcipher_request *req, u32 enc,
306 struct otx_cpt_req_ctx *rctx = skcipher_request_ctx_dma(req);
307 struct otx_cpt_req_info *req_info = &rctx->cpt_req;
311 ret = create_ctx_hdr(req, enc, &argcnt);
315 update_input_data(req_info, req->src, req->cryptlen, &argcnt);
316 req_info->incnt = argcnt;
321 static inline void create_output_list(struct skcipher_request *req,
324 struct otx_cpt_req_ctx *rctx = skcipher_request_ctx_dma(req);
325 struct otx_cpt_req_info *req_info = &rctx->cpt_req;
329 * OUTPUT Buffer Processing
330 * AES encryption/decryption output would be
331 * received in the following format
333 * ------IV--------|------ENCRYPTED/DECRYPTED DATA-----|
334 * [ 16 Bytes/ [ Request Enc/Dec/ DATA Len AES CBC ]
336 update_output_data(req_info, req->dst, 0, req->cryptlen, &argcnt);
337 req_info->outcnt = argcnt;
340 static inline int cpt_enc_dec(struct skcipher_request *req, u32 enc)
342 struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
343 struct otx_cpt_req_ctx *rctx = skcipher_request_ctx_dma(req);
344 struct otx_cpt_req_info *req_info = &rctx->cpt_req;
345 u32 enc_iv_len = crypto_skcipher_ivsize(stfm);
346 struct pci_dev *pdev;
349 /* Validate that request doesn't exceed maximum CPT supported size */
350 if (req->cryptlen > OTX_CPT_MAX_REQ_SIZE)
353 /* Clear control words */
354 rctx->ctrl_word.flags = 0;
355 rctx->fctx.enc.enc_ctrl.flags = 0;
357 status = create_input_list(req, enc, enc_iv_len);
360 create_output_list(req, enc_iv_len);
362 status = get_se_device(&pdev, &cpu_num);
366 req_info->callback = (void *)otx_cpt_skcipher_callback;
367 req_info->areq = &req->base;
368 req_info->req_type = OTX_CPT_ENC_DEC_REQ;
369 req_info->is_enc = enc;
370 req_info->is_trunc_hmac = false;
371 req_info->ctrl.s.grp = 0;
374 * We perform an asynchronous send and once
375 * the request is completed the driver would
376 * intimate through registered call back functions
378 status = otx_cpt_do_request(pdev, req_info, cpu_num);
383 static int otx_cpt_skcipher_encrypt(struct skcipher_request *req)
385 return cpt_enc_dec(req, true);
388 static int otx_cpt_skcipher_decrypt(struct skcipher_request *req)
390 return cpt_enc_dec(req, false);
393 static int otx_cpt_skcipher_xts_setkey(struct crypto_skcipher *tfm,
394 const u8 *key, u32 keylen)
396 struct otx_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
397 const u8 *key2 = key + (keylen / 2);
398 const u8 *key1 = key;
401 ret = xts_verify_key(tfm, key, keylen);
404 ctx->key_len = keylen;
405 memcpy(ctx->enc_key, key1, keylen / 2);
406 memcpy(ctx->enc_key + KEY2_OFFSET, key2, keylen / 2);
407 ctx->cipher_type = OTX_CPT_AES_XTS;
408 switch (ctx->key_len) {
409 case 2 * AES_KEYSIZE_128:
410 ctx->key_type = OTX_CPT_AES_128_BIT;
412 case 2 * AES_KEYSIZE_256:
413 ctx->key_type = OTX_CPT_AES_256_BIT;
422 static int cpt_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
423 u32 keylen, u8 cipher_type)
425 struct otx_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
427 if (keylen != DES3_EDE_KEY_SIZE)
430 ctx->key_len = keylen;
431 ctx->cipher_type = cipher_type;
433 memcpy(ctx->enc_key, key, keylen);
438 static int cpt_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
439 u32 keylen, u8 cipher_type)
441 struct otx_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
444 case AES_KEYSIZE_128:
445 ctx->key_type = OTX_CPT_AES_128_BIT;
447 case AES_KEYSIZE_192:
448 ctx->key_type = OTX_CPT_AES_192_BIT;
450 case AES_KEYSIZE_256:
451 ctx->key_type = OTX_CPT_AES_256_BIT;
456 ctx->key_len = keylen;
457 ctx->cipher_type = cipher_type;
459 memcpy(ctx->enc_key, key, keylen);
464 static int otx_cpt_skcipher_cbc_aes_setkey(struct crypto_skcipher *tfm,
465 const u8 *key, u32 keylen)
467 return cpt_aes_setkey(tfm, key, keylen, OTX_CPT_AES_CBC);
470 static int otx_cpt_skcipher_ecb_aes_setkey(struct crypto_skcipher *tfm,
471 const u8 *key, u32 keylen)
473 return cpt_aes_setkey(tfm, key, keylen, OTX_CPT_AES_ECB);
476 static int otx_cpt_skcipher_cbc_des3_setkey(struct crypto_skcipher *tfm,
477 const u8 *key, u32 keylen)
479 return cpt_des_setkey(tfm, key, keylen, OTX_CPT_DES3_CBC);
482 static int otx_cpt_skcipher_ecb_des3_setkey(struct crypto_skcipher *tfm,
483 const u8 *key, u32 keylen)
485 return cpt_des_setkey(tfm, key, keylen, OTX_CPT_DES3_ECB);
488 static int otx_cpt_enc_dec_init(struct crypto_skcipher *tfm)
490 struct otx_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
492 memset(ctx, 0, sizeof(*ctx));
494 * Additional memory for skcipher_request is
495 * allocated since the cryptd daemon uses
496 * this memory for request_ctx information
498 crypto_skcipher_set_reqsize_dma(
499 tfm, sizeof(struct otx_cpt_req_ctx) +
500 sizeof(struct skcipher_request));
505 static int cpt_aead_init(struct crypto_aead *tfm, u8 cipher_type, u8 mac_type)
507 struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
509 ctx->cipher_type = cipher_type;
510 ctx->mac_type = mac_type;
513 * When selected cipher is NULL we use HMAC opcode instead of
514 * FLEXICRYPTO opcode therefore we don't need to use HASH algorithms
515 * for calculating ipad and opad
517 if (ctx->cipher_type != OTX_CPT_CIPHER_NULL) {
518 switch (ctx->mac_type) {
520 ctx->hashalg = crypto_alloc_shash("sha1", 0,
522 if (IS_ERR(ctx->hashalg))
523 return PTR_ERR(ctx->hashalg);
527 ctx->hashalg = crypto_alloc_shash("sha256", 0,
529 if (IS_ERR(ctx->hashalg))
530 return PTR_ERR(ctx->hashalg);
534 ctx->hashalg = crypto_alloc_shash("sha384", 0,
536 if (IS_ERR(ctx->hashalg))
537 return PTR_ERR(ctx->hashalg);
541 ctx->hashalg = crypto_alloc_shash("sha512", 0,
543 if (IS_ERR(ctx->hashalg))
544 return PTR_ERR(ctx->hashalg);
549 crypto_aead_set_reqsize_dma(tfm, sizeof(struct otx_cpt_req_ctx));
554 static int otx_cpt_aead_cbc_aes_sha1_init(struct crypto_aead *tfm)
556 return cpt_aead_init(tfm, OTX_CPT_AES_CBC, OTX_CPT_SHA1);
559 static int otx_cpt_aead_cbc_aes_sha256_init(struct crypto_aead *tfm)
561 return cpt_aead_init(tfm, OTX_CPT_AES_CBC, OTX_CPT_SHA256);
564 static int otx_cpt_aead_cbc_aes_sha384_init(struct crypto_aead *tfm)
566 return cpt_aead_init(tfm, OTX_CPT_AES_CBC, OTX_CPT_SHA384);
569 static int otx_cpt_aead_cbc_aes_sha512_init(struct crypto_aead *tfm)
571 return cpt_aead_init(tfm, OTX_CPT_AES_CBC, OTX_CPT_SHA512);
574 static int otx_cpt_aead_ecb_null_sha1_init(struct crypto_aead *tfm)
576 return cpt_aead_init(tfm, OTX_CPT_CIPHER_NULL, OTX_CPT_SHA1);
579 static int otx_cpt_aead_ecb_null_sha256_init(struct crypto_aead *tfm)
581 return cpt_aead_init(tfm, OTX_CPT_CIPHER_NULL, OTX_CPT_SHA256);
584 static int otx_cpt_aead_ecb_null_sha384_init(struct crypto_aead *tfm)
586 return cpt_aead_init(tfm, OTX_CPT_CIPHER_NULL, OTX_CPT_SHA384);
589 static int otx_cpt_aead_ecb_null_sha512_init(struct crypto_aead *tfm)
591 return cpt_aead_init(tfm, OTX_CPT_CIPHER_NULL, OTX_CPT_SHA512);
594 static int otx_cpt_aead_gcm_aes_init(struct crypto_aead *tfm)
596 return cpt_aead_init(tfm, OTX_CPT_AES_GCM, OTX_CPT_MAC_NULL);
599 static void otx_cpt_aead_exit(struct crypto_aead *tfm)
601 struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
606 crypto_free_shash(ctx->hashalg);
611 * This is the Integrity Check Value validation (aka the authentication tag
614 static int otx_cpt_aead_set_authsize(struct crypto_aead *tfm,
615 unsigned int authsize)
617 struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
619 switch (ctx->mac_type) {
621 if (authsize != SHA1_DIGEST_SIZE &&
622 authsize != SHA1_TRUNC_DIGEST_SIZE)
625 if (authsize == SHA1_TRUNC_DIGEST_SIZE)
626 ctx->is_trunc_hmac = true;
630 if (authsize != SHA256_DIGEST_SIZE &&
631 authsize != SHA256_TRUNC_DIGEST_SIZE)
634 if (authsize == SHA256_TRUNC_DIGEST_SIZE)
635 ctx->is_trunc_hmac = true;
639 if (authsize != SHA384_DIGEST_SIZE &&
640 authsize != SHA384_TRUNC_DIGEST_SIZE)
643 if (authsize == SHA384_TRUNC_DIGEST_SIZE)
644 ctx->is_trunc_hmac = true;
648 if (authsize != SHA512_DIGEST_SIZE &&
649 authsize != SHA512_TRUNC_DIGEST_SIZE)
652 if (authsize == SHA512_TRUNC_DIGEST_SIZE)
653 ctx->is_trunc_hmac = true;
656 case OTX_CPT_MAC_NULL:
657 if (ctx->cipher_type == OTX_CPT_AES_GCM) {
658 if (authsize != AES_GCM_ICV_SIZE)
668 tfm->authsize = authsize;
672 static struct otx_cpt_sdesc *alloc_sdesc(struct crypto_shash *alg)
674 struct otx_cpt_sdesc *sdesc;
677 size = sizeof(struct shash_desc) + crypto_shash_descsize(alg);
678 sdesc = kmalloc(size, GFP_KERNEL);
682 sdesc->shash.tfm = alg;
687 static inline void swap_data32(void *buf, u32 len)
689 cpu_to_be32_array(buf, buf, len / 4);
692 static inline void swap_data64(void *buf, u32 len)
698 for (i = 0 ; i < len / 8; i++, src++, dst++)
699 *dst = cpu_to_be64p(src);
702 static int copy_pad(u8 mac_type, u8 *out_pad, u8 *in_pad)
704 struct sha512_state *sha512;
705 struct sha256_state *sha256;
706 struct sha1_state *sha1;
710 sha1 = (struct sha1_state *) in_pad;
711 swap_data32(sha1->state, SHA1_DIGEST_SIZE);
712 memcpy(out_pad, &sha1->state, SHA1_DIGEST_SIZE);
716 sha256 = (struct sha256_state *) in_pad;
717 swap_data32(sha256->state, SHA256_DIGEST_SIZE);
718 memcpy(out_pad, &sha256->state, SHA256_DIGEST_SIZE);
723 sha512 = (struct sha512_state *) in_pad;
724 swap_data64(sha512->state, SHA512_DIGEST_SIZE);
725 memcpy(out_pad, &sha512->state, SHA512_DIGEST_SIZE);
735 static int aead_hmac_init(struct crypto_aead *cipher)
737 struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher);
738 int state_size = crypto_shash_statesize(ctx->hashalg);
739 int ds = crypto_shash_digestsize(ctx->hashalg);
740 int bs = crypto_shash_blocksize(ctx->hashalg);
741 int authkeylen = ctx->auth_key_len;
742 u8 *ipad = NULL, *opad = NULL;
743 int ret = 0, icount = 0;
745 ctx->sdesc = alloc_sdesc(ctx->hashalg);
749 ctx->ipad = kzalloc(bs, GFP_KERNEL);
755 ctx->opad = kzalloc(bs, GFP_KERNEL);
761 ipad = kzalloc(state_size, GFP_KERNEL);
767 opad = kzalloc(state_size, GFP_KERNEL);
773 if (authkeylen > bs) {
774 ret = crypto_shash_digest(&ctx->sdesc->shash, ctx->key,
781 memcpy(ipad, ctx->key, authkeylen);
784 memset(ipad + authkeylen, 0, bs - authkeylen);
785 memcpy(opad, ipad, bs);
787 for (icount = 0; icount < bs; icount++) {
788 ipad[icount] ^= 0x36;
789 opad[icount] ^= 0x5c;
793 * Partial Hash calculated from the software
794 * algorithm is retrieved for IPAD & OPAD
797 /* IPAD Calculation */
798 crypto_shash_init(&ctx->sdesc->shash);
799 crypto_shash_update(&ctx->sdesc->shash, ipad, bs);
800 crypto_shash_export(&ctx->sdesc->shash, ipad);
801 ret = copy_pad(ctx->mac_type, ctx->ipad, ipad);
805 /* OPAD Calculation */
806 crypto_shash_init(&ctx->sdesc->shash);
807 crypto_shash_update(&ctx->sdesc->shash, opad, bs);
808 crypto_shash_export(&ctx->sdesc->shash, opad);
809 ret = copy_pad(ctx->mac_type, ctx->opad, opad);
831 static int otx_cpt_aead_cbc_aes_sha_setkey(struct crypto_aead *cipher,
832 const unsigned char *key,
835 struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher);
836 struct crypto_authenc_key_param *param;
837 int enckeylen = 0, authkeylen = 0;
838 struct rtattr *rta = (void *)key;
839 int status = -EINVAL;
841 if (!RTA_OK(rta, keylen))
844 if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
847 if (RTA_PAYLOAD(rta) < sizeof(*param))
850 param = RTA_DATA(rta);
851 enckeylen = be32_to_cpu(param->enckeylen);
852 key += RTA_ALIGN(rta->rta_len);
853 keylen -= RTA_ALIGN(rta->rta_len);
854 if (keylen < enckeylen)
857 if (keylen > OTX_CPT_MAX_KEY_SIZE)
860 authkeylen = keylen - enckeylen;
861 memcpy(ctx->key, key, keylen);
864 case AES_KEYSIZE_128:
865 ctx->key_type = OTX_CPT_AES_128_BIT;
867 case AES_KEYSIZE_192:
868 ctx->key_type = OTX_CPT_AES_192_BIT;
870 case AES_KEYSIZE_256:
871 ctx->key_type = OTX_CPT_AES_256_BIT;
874 /* Invalid key length */
878 ctx->enc_key_len = enckeylen;
879 ctx->auth_key_len = authkeylen;
881 status = aead_hmac_init(cipher);
890 static int otx_cpt_aead_ecb_null_sha_setkey(struct crypto_aead *cipher,
891 const unsigned char *key,
894 struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher);
895 struct crypto_authenc_key_param *param;
896 struct rtattr *rta = (void *)key;
899 if (!RTA_OK(rta, keylen))
902 if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
905 if (RTA_PAYLOAD(rta) < sizeof(*param))
908 param = RTA_DATA(rta);
909 enckeylen = be32_to_cpu(param->enckeylen);
910 key += RTA_ALIGN(rta->rta_len);
911 keylen -= RTA_ALIGN(rta->rta_len);
915 if (keylen > OTX_CPT_MAX_KEY_SIZE)
918 memcpy(ctx->key, key, keylen);
919 ctx->enc_key_len = enckeylen;
920 ctx->auth_key_len = keylen;
926 static int otx_cpt_aead_gcm_aes_setkey(struct crypto_aead *cipher,
927 const unsigned char *key,
930 struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher);
933 * For aes gcm we expect to get encryption key (16, 24, 32 bytes)
937 case AES_KEYSIZE_128 + AES_GCM_SALT_SIZE:
938 ctx->key_type = OTX_CPT_AES_128_BIT;
939 ctx->enc_key_len = AES_KEYSIZE_128;
941 case AES_KEYSIZE_192 + AES_GCM_SALT_SIZE:
942 ctx->key_type = OTX_CPT_AES_192_BIT;
943 ctx->enc_key_len = AES_KEYSIZE_192;
945 case AES_KEYSIZE_256 + AES_GCM_SALT_SIZE:
946 ctx->key_type = OTX_CPT_AES_256_BIT;
947 ctx->enc_key_len = AES_KEYSIZE_256;
950 /* Invalid key and salt length */
954 /* Store encryption key and salt */
955 memcpy(ctx->key, key, keylen);
960 static inline u32 create_aead_ctx_hdr(struct aead_request *req, u32 enc,
963 struct otx_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
964 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
965 struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
966 struct otx_cpt_req_info *req_info = &rctx->cpt_req;
967 struct otx_cpt_fc_ctx *fctx = &rctx->fctx;
968 int mac_len = crypto_aead_authsize(tfm);
971 rctx->ctrl_word.e.enc_data_offset = req->assoclen;
973 switch (ctx->cipher_type) {
974 case OTX_CPT_AES_CBC:
975 fctx->enc.enc_ctrl.e.iv_source = OTX_CPT_FROM_CPTR;
976 /* Copy encryption key to context */
977 memcpy(fctx->enc.encr_key, ctx->key + ctx->auth_key_len,
979 /* Copy IV to context */
980 memcpy(fctx->enc.encr_iv, req->iv, crypto_aead_ivsize(tfm));
982 ds = crypto_shash_digestsize(ctx->hashalg);
983 if (ctx->mac_type == OTX_CPT_SHA384)
984 ds = SHA512_DIGEST_SIZE;
986 memcpy(fctx->hmac.e.ipad, ctx->ipad, ds);
988 memcpy(fctx->hmac.e.opad, ctx->opad, ds);
991 case OTX_CPT_AES_GCM:
992 fctx->enc.enc_ctrl.e.iv_source = OTX_CPT_FROM_DPTR;
993 /* Copy encryption key to context */
994 memcpy(fctx->enc.encr_key, ctx->key, ctx->enc_key_len);
995 /* Copy salt to context */
996 memcpy(fctx->enc.encr_iv, ctx->key + ctx->enc_key_len,
999 rctx->ctrl_word.e.iv_offset = req->assoclen - AES_GCM_IV_OFFSET;
1003 /* Unknown cipher type */
1006 rctx->ctrl_word.flags = cpu_to_be64(rctx->ctrl_word.cflags);
1008 req_info->ctrl.s.dma_mode = OTX_CPT_DMA_GATHER_SCATTER;
1009 req_info->ctrl.s.se_req = OTX_CPT_SE_CORE_REQ;
1010 req_info->req.opcode.s.major = OTX_CPT_MAJOR_OP_FC |
1011 DMA_MODE_FLAG(OTX_CPT_DMA_GATHER_SCATTER);
1013 req_info->req.opcode.s.minor = 2;
1014 req_info->req.param1 = req->cryptlen;
1015 req_info->req.param2 = req->cryptlen + req->assoclen;
1017 req_info->req.opcode.s.minor = 3;
1018 req_info->req.param1 = req->cryptlen - mac_len;
1019 req_info->req.param2 = req->cryptlen + req->assoclen - mac_len;
1022 fctx->enc.enc_ctrl.e.enc_cipher = ctx->cipher_type;
1023 fctx->enc.enc_ctrl.e.aes_key = ctx->key_type;
1024 fctx->enc.enc_ctrl.e.mac_type = ctx->mac_type;
1025 fctx->enc.enc_ctrl.e.mac_len = mac_len;
1026 fctx->enc.enc_ctrl.flags = cpu_to_be64(fctx->enc.enc_ctrl.cflags);
1029 * Storing Packet Data Information in offset
1030 * Control Word First 8 bytes
1032 req_info->in[*argcnt].vptr = (u8 *)&rctx->ctrl_word;
1033 req_info->in[*argcnt].size = CONTROL_WORD_LEN;
1034 req_info->req.dlen += CONTROL_WORD_LEN;
1037 req_info->in[*argcnt].vptr = (u8 *)fctx;
1038 req_info->in[*argcnt].size = sizeof(struct otx_cpt_fc_ctx);
1039 req_info->req.dlen += sizeof(struct otx_cpt_fc_ctx);
1045 static inline u32 create_hmac_ctx_hdr(struct aead_request *req, u32 *argcnt,
1048 struct otx_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
1049 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1050 struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
1051 struct otx_cpt_req_info *req_info = &rctx->cpt_req;
1053 req_info->ctrl.s.dma_mode = OTX_CPT_DMA_GATHER_SCATTER;
1054 req_info->ctrl.s.se_req = OTX_CPT_SE_CORE_REQ;
1055 req_info->req.opcode.s.major = OTX_CPT_MAJOR_OP_HMAC |
1056 DMA_MODE_FLAG(OTX_CPT_DMA_GATHER_SCATTER);
1057 req_info->is_trunc_hmac = ctx->is_trunc_hmac;
1059 req_info->req.opcode.s.minor = 0;
1060 req_info->req.param1 = ctx->auth_key_len;
1061 req_info->req.param2 = ctx->mac_type << 8;
1063 /* Add authentication key */
1064 req_info->in[*argcnt].vptr = ctx->key;
1065 req_info->in[*argcnt].size = round_up(ctx->auth_key_len, 8);
1066 req_info->req.dlen += round_up(ctx->auth_key_len, 8);
1072 static inline u32 create_aead_input_list(struct aead_request *req, u32 enc)
1074 struct otx_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
1075 struct otx_cpt_req_info *req_info = &rctx->cpt_req;
1076 u32 inputlen = req->cryptlen + req->assoclen;
1077 u32 status, argcnt = 0;
1079 status = create_aead_ctx_hdr(req, enc, &argcnt);
1082 update_input_data(req_info, req->src, inputlen, &argcnt);
1083 req_info->incnt = argcnt;
1088 static inline u32 create_aead_output_list(struct aead_request *req, u32 enc,
1091 struct otx_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
1092 struct otx_cpt_req_info *req_info = &rctx->cpt_req;
1093 u32 argcnt = 0, outputlen = 0;
1096 outputlen = req->cryptlen + req->assoclen + mac_len;
1098 outputlen = req->cryptlen + req->assoclen - mac_len;
1100 update_output_data(req_info, req->dst, 0, outputlen, &argcnt);
1101 req_info->outcnt = argcnt;
1106 static inline u32 create_aead_null_input_list(struct aead_request *req,
1107 u32 enc, u32 mac_len)
1109 struct otx_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
1110 struct otx_cpt_req_info *req_info = &rctx->cpt_req;
1111 u32 inputlen, argcnt = 0;
1114 inputlen = req->cryptlen + req->assoclen;
1116 inputlen = req->cryptlen + req->assoclen - mac_len;
1118 create_hmac_ctx_hdr(req, &argcnt, enc);
1119 update_input_data(req_info, req->src, inputlen, &argcnt);
1120 req_info->incnt = argcnt;
1125 static inline u32 create_aead_null_output_list(struct aead_request *req,
1126 u32 enc, u32 mac_len)
1128 struct otx_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
1129 struct otx_cpt_req_info *req_info = &rctx->cpt_req;
1130 struct scatterlist *dst;
1132 int argcnt = 0, status, offset;
1136 inputlen = req->cryptlen + req->assoclen;
1138 inputlen = req->cryptlen + req->assoclen - mac_len;
1141 * If source and destination are different
1142 * then copy payload to destination
1144 if (req->src != req->dst) {
1146 ptr = kmalloc(inputlen, (req_info->areq->flags &
1147 CRYPTO_TFM_REQ_MAY_SLEEP) ?
1148 GFP_KERNEL : GFP_ATOMIC);
1154 status = sg_copy_to_buffer(req->src, sg_nents(req->src), ptr,
1156 if (status != inputlen) {
1160 status = sg_copy_from_buffer(req->dst, sg_nents(req->dst), ptr,
1162 if (status != inputlen) {
1171 * In an encryption scenario hmac needs
1172 * to be appended after payload
1176 while (offset >= dst->length) {
1177 offset -= dst->length;
1185 update_output_data(req_info, dst, offset, mac_len, &argcnt);
1188 * In a decryption scenario calculated hmac for received
1189 * payload needs to be compare with hmac received
1191 status = sg_copy_buffer(req->src, sg_nents(req->src),
1192 rctx->fctx.hmac.s.hmac_recv, mac_len,
1194 if (status != mac_len) {
1199 req_info->out[argcnt].vptr = rctx->fctx.hmac.s.hmac_calc;
1200 req_info->out[argcnt].size = mac_len;
1204 req_info->outcnt = argcnt;
1213 static u32 cpt_aead_enc_dec(struct aead_request *req, u8 reg_type, u8 enc)
1215 struct otx_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
1216 struct otx_cpt_req_info *req_info = &rctx->cpt_req;
1217 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1218 struct pci_dev *pdev;
1219 u32 status, cpu_num;
1221 /* Clear control words */
1222 rctx->ctrl_word.flags = 0;
1223 rctx->fctx.enc.enc_ctrl.flags = 0;
1225 req_info->callback = otx_cpt_aead_callback;
1226 req_info->areq = &req->base;
1227 req_info->req_type = reg_type;
1228 req_info->is_enc = enc;
1229 req_info->is_trunc_hmac = false;
1232 case OTX_CPT_AEAD_ENC_DEC_REQ:
1233 status = create_aead_input_list(req, enc);
1236 status = create_aead_output_list(req, enc,
1237 crypto_aead_authsize(tfm));
1242 case OTX_CPT_AEAD_ENC_DEC_NULL_REQ:
1243 status = create_aead_null_input_list(req, enc,
1244 crypto_aead_authsize(tfm));
1247 status = create_aead_null_output_list(req, enc,
1248 crypto_aead_authsize(tfm));
1257 /* Validate that request doesn't exceed maximum CPT supported size */
1258 if (req_info->req.param1 > OTX_CPT_MAX_REQ_SIZE ||
1259 req_info->req.param2 > OTX_CPT_MAX_REQ_SIZE)
1262 status = get_se_device(&pdev, &cpu_num);
1266 req_info->ctrl.s.grp = 0;
1268 status = otx_cpt_do_request(pdev, req_info, cpu_num);
1270 * We perform an asynchronous send and once
1271 * the request is completed the driver would
1272 * intimate through registered call back functions
1277 static int otx_cpt_aead_encrypt(struct aead_request *req)
1279 return cpt_aead_enc_dec(req, OTX_CPT_AEAD_ENC_DEC_REQ, true);
1282 static int otx_cpt_aead_decrypt(struct aead_request *req)
1284 return cpt_aead_enc_dec(req, OTX_CPT_AEAD_ENC_DEC_REQ, false);
1287 static int otx_cpt_aead_null_encrypt(struct aead_request *req)
1289 return cpt_aead_enc_dec(req, OTX_CPT_AEAD_ENC_DEC_NULL_REQ, true);
1292 static int otx_cpt_aead_null_decrypt(struct aead_request *req)
1294 return cpt_aead_enc_dec(req, OTX_CPT_AEAD_ENC_DEC_NULL_REQ, false);
1297 static struct skcipher_alg otx_cpt_skciphers[] = { {
1298 .base.cra_name = "xts(aes)",
1299 .base.cra_driver_name = "cpt_xts_aes",
1300 .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1301 .base.cra_blocksize = AES_BLOCK_SIZE,
1302 .base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx),
1303 .base.cra_alignmask = 7,
1304 .base.cra_priority = 4001,
1305 .base.cra_module = THIS_MODULE,
1307 .init = otx_cpt_enc_dec_init,
1308 .ivsize = AES_BLOCK_SIZE,
1309 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1310 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1311 .setkey = otx_cpt_skcipher_xts_setkey,
1312 .encrypt = otx_cpt_skcipher_encrypt,
1313 .decrypt = otx_cpt_skcipher_decrypt,
1315 .base.cra_name = "cbc(aes)",
1316 .base.cra_driver_name = "cpt_cbc_aes",
1317 .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1318 .base.cra_blocksize = AES_BLOCK_SIZE,
1319 .base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx),
1320 .base.cra_alignmask = 7,
1321 .base.cra_priority = 4001,
1322 .base.cra_module = THIS_MODULE,
1324 .init = otx_cpt_enc_dec_init,
1325 .ivsize = AES_BLOCK_SIZE,
1326 .min_keysize = AES_MIN_KEY_SIZE,
1327 .max_keysize = AES_MAX_KEY_SIZE,
1328 .setkey = otx_cpt_skcipher_cbc_aes_setkey,
1329 .encrypt = otx_cpt_skcipher_encrypt,
1330 .decrypt = otx_cpt_skcipher_decrypt,
1332 .base.cra_name = "ecb(aes)",
1333 .base.cra_driver_name = "cpt_ecb_aes",
1334 .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1335 .base.cra_blocksize = AES_BLOCK_SIZE,
1336 .base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx),
1337 .base.cra_alignmask = 7,
1338 .base.cra_priority = 4001,
1339 .base.cra_module = THIS_MODULE,
1341 .init = otx_cpt_enc_dec_init,
1343 .min_keysize = AES_MIN_KEY_SIZE,
1344 .max_keysize = AES_MAX_KEY_SIZE,
1345 .setkey = otx_cpt_skcipher_ecb_aes_setkey,
1346 .encrypt = otx_cpt_skcipher_encrypt,
1347 .decrypt = otx_cpt_skcipher_decrypt,
1349 .base.cra_name = "cbc(des3_ede)",
1350 .base.cra_driver_name = "cpt_cbc_des3_ede",
1351 .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1352 .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1353 .base.cra_ctxsize = sizeof(struct otx_cpt_des3_ctx),
1354 .base.cra_alignmask = 7,
1355 .base.cra_priority = 4001,
1356 .base.cra_module = THIS_MODULE,
1358 .init = otx_cpt_enc_dec_init,
1359 .min_keysize = DES3_EDE_KEY_SIZE,
1360 .max_keysize = DES3_EDE_KEY_SIZE,
1361 .ivsize = DES_BLOCK_SIZE,
1362 .setkey = otx_cpt_skcipher_cbc_des3_setkey,
1363 .encrypt = otx_cpt_skcipher_encrypt,
1364 .decrypt = otx_cpt_skcipher_decrypt,
1366 .base.cra_name = "ecb(des3_ede)",
1367 .base.cra_driver_name = "cpt_ecb_des3_ede",
1368 .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1369 .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1370 .base.cra_ctxsize = sizeof(struct otx_cpt_des3_ctx),
1371 .base.cra_alignmask = 7,
1372 .base.cra_priority = 4001,
1373 .base.cra_module = THIS_MODULE,
1375 .init = otx_cpt_enc_dec_init,
1376 .min_keysize = DES3_EDE_KEY_SIZE,
1377 .max_keysize = DES3_EDE_KEY_SIZE,
1379 .setkey = otx_cpt_skcipher_ecb_des3_setkey,
1380 .encrypt = otx_cpt_skcipher_encrypt,
1381 .decrypt = otx_cpt_skcipher_decrypt,
1384 static struct aead_alg otx_cpt_aeads[] = { {
1386 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1387 .cra_driver_name = "cpt_hmac_sha1_cbc_aes",
1388 .cra_blocksize = AES_BLOCK_SIZE,
1389 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1390 .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1391 .cra_priority = 4001,
1393 .cra_module = THIS_MODULE,
1395 .init = otx_cpt_aead_cbc_aes_sha1_init,
1396 .exit = otx_cpt_aead_exit,
1397 .setkey = otx_cpt_aead_cbc_aes_sha_setkey,
1398 .setauthsize = otx_cpt_aead_set_authsize,
1399 .encrypt = otx_cpt_aead_encrypt,
1400 .decrypt = otx_cpt_aead_decrypt,
1401 .ivsize = AES_BLOCK_SIZE,
1402 .maxauthsize = SHA1_DIGEST_SIZE,
1405 .cra_name = "authenc(hmac(sha256),cbc(aes))",
1406 .cra_driver_name = "cpt_hmac_sha256_cbc_aes",
1407 .cra_blocksize = AES_BLOCK_SIZE,
1408 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1409 .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1410 .cra_priority = 4001,
1412 .cra_module = THIS_MODULE,
1414 .init = otx_cpt_aead_cbc_aes_sha256_init,
1415 .exit = otx_cpt_aead_exit,
1416 .setkey = otx_cpt_aead_cbc_aes_sha_setkey,
1417 .setauthsize = otx_cpt_aead_set_authsize,
1418 .encrypt = otx_cpt_aead_encrypt,
1419 .decrypt = otx_cpt_aead_decrypt,
1420 .ivsize = AES_BLOCK_SIZE,
1421 .maxauthsize = SHA256_DIGEST_SIZE,
1424 .cra_name = "authenc(hmac(sha384),cbc(aes))",
1425 .cra_driver_name = "cpt_hmac_sha384_cbc_aes",
1426 .cra_blocksize = AES_BLOCK_SIZE,
1427 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1428 .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1429 .cra_priority = 4001,
1431 .cra_module = THIS_MODULE,
1433 .init = otx_cpt_aead_cbc_aes_sha384_init,
1434 .exit = otx_cpt_aead_exit,
1435 .setkey = otx_cpt_aead_cbc_aes_sha_setkey,
1436 .setauthsize = otx_cpt_aead_set_authsize,
1437 .encrypt = otx_cpt_aead_encrypt,
1438 .decrypt = otx_cpt_aead_decrypt,
1439 .ivsize = AES_BLOCK_SIZE,
1440 .maxauthsize = SHA384_DIGEST_SIZE,
1443 .cra_name = "authenc(hmac(sha512),cbc(aes))",
1444 .cra_driver_name = "cpt_hmac_sha512_cbc_aes",
1445 .cra_blocksize = AES_BLOCK_SIZE,
1446 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1447 .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1448 .cra_priority = 4001,
1450 .cra_module = THIS_MODULE,
1452 .init = otx_cpt_aead_cbc_aes_sha512_init,
1453 .exit = otx_cpt_aead_exit,
1454 .setkey = otx_cpt_aead_cbc_aes_sha_setkey,
1455 .setauthsize = otx_cpt_aead_set_authsize,
1456 .encrypt = otx_cpt_aead_encrypt,
1457 .decrypt = otx_cpt_aead_decrypt,
1458 .ivsize = AES_BLOCK_SIZE,
1459 .maxauthsize = SHA512_DIGEST_SIZE,
1462 .cra_name = "authenc(hmac(sha1),ecb(cipher_null))",
1463 .cra_driver_name = "cpt_hmac_sha1_ecb_null",
1465 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1466 .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1467 .cra_priority = 4001,
1469 .cra_module = THIS_MODULE,
1471 .init = otx_cpt_aead_ecb_null_sha1_init,
1472 .exit = otx_cpt_aead_exit,
1473 .setkey = otx_cpt_aead_ecb_null_sha_setkey,
1474 .setauthsize = otx_cpt_aead_set_authsize,
1475 .encrypt = otx_cpt_aead_null_encrypt,
1476 .decrypt = otx_cpt_aead_null_decrypt,
1478 .maxauthsize = SHA1_DIGEST_SIZE,
1481 .cra_name = "authenc(hmac(sha256),ecb(cipher_null))",
1482 .cra_driver_name = "cpt_hmac_sha256_ecb_null",
1484 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1485 .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1486 .cra_priority = 4001,
1488 .cra_module = THIS_MODULE,
1490 .init = otx_cpt_aead_ecb_null_sha256_init,
1491 .exit = otx_cpt_aead_exit,
1492 .setkey = otx_cpt_aead_ecb_null_sha_setkey,
1493 .setauthsize = otx_cpt_aead_set_authsize,
1494 .encrypt = otx_cpt_aead_null_encrypt,
1495 .decrypt = otx_cpt_aead_null_decrypt,
1497 .maxauthsize = SHA256_DIGEST_SIZE,
1500 .cra_name = "authenc(hmac(sha384),ecb(cipher_null))",
1501 .cra_driver_name = "cpt_hmac_sha384_ecb_null",
1503 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1504 .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1505 .cra_priority = 4001,
1507 .cra_module = THIS_MODULE,
1509 .init = otx_cpt_aead_ecb_null_sha384_init,
1510 .exit = otx_cpt_aead_exit,
1511 .setkey = otx_cpt_aead_ecb_null_sha_setkey,
1512 .setauthsize = otx_cpt_aead_set_authsize,
1513 .encrypt = otx_cpt_aead_null_encrypt,
1514 .decrypt = otx_cpt_aead_null_decrypt,
1516 .maxauthsize = SHA384_DIGEST_SIZE,
1519 .cra_name = "authenc(hmac(sha512),ecb(cipher_null))",
1520 .cra_driver_name = "cpt_hmac_sha512_ecb_null",
1522 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1523 .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1524 .cra_priority = 4001,
1526 .cra_module = THIS_MODULE,
1528 .init = otx_cpt_aead_ecb_null_sha512_init,
1529 .exit = otx_cpt_aead_exit,
1530 .setkey = otx_cpt_aead_ecb_null_sha_setkey,
1531 .setauthsize = otx_cpt_aead_set_authsize,
1532 .encrypt = otx_cpt_aead_null_encrypt,
1533 .decrypt = otx_cpt_aead_null_decrypt,
1535 .maxauthsize = SHA512_DIGEST_SIZE,
1538 .cra_name = "rfc4106(gcm(aes))",
1539 .cra_driver_name = "cpt_rfc4106_gcm_aes",
1541 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1542 .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1543 .cra_priority = 4001,
1545 .cra_module = THIS_MODULE,
1547 .init = otx_cpt_aead_gcm_aes_init,
1548 .exit = otx_cpt_aead_exit,
1549 .setkey = otx_cpt_aead_gcm_aes_setkey,
1550 .setauthsize = otx_cpt_aead_set_authsize,
1551 .encrypt = otx_cpt_aead_encrypt,
1552 .decrypt = otx_cpt_aead_decrypt,
1553 .ivsize = AES_GCM_IV_SIZE,
1554 .maxauthsize = AES_GCM_ICV_SIZE,
1557 static inline int is_any_alg_used(void)
1561 for (i = 0; i < ARRAY_SIZE(otx_cpt_skciphers); i++)
1562 if (refcount_read(&otx_cpt_skciphers[i].base.cra_refcnt) != 1)
1564 for (i = 0; i < ARRAY_SIZE(otx_cpt_aeads); i++)
1565 if (refcount_read(&otx_cpt_aeads[i].base.cra_refcnt) != 1)
1570 static inline int cpt_register_algs(void)
1574 if (!IS_ENABLED(CONFIG_DM_CRYPT)) {
1575 for (i = 0; i < ARRAY_SIZE(otx_cpt_skciphers); i++)
1576 otx_cpt_skciphers[i].base.cra_flags &= ~CRYPTO_ALG_DEAD;
1578 err = crypto_register_skciphers(otx_cpt_skciphers,
1579 ARRAY_SIZE(otx_cpt_skciphers));
1584 for (i = 0; i < ARRAY_SIZE(otx_cpt_aeads); i++)
1585 otx_cpt_aeads[i].base.cra_flags &= ~CRYPTO_ALG_DEAD;
1587 err = crypto_register_aeads(otx_cpt_aeads, ARRAY_SIZE(otx_cpt_aeads));
1589 crypto_unregister_skciphers(otx_cpt_skciphers,
1590 ARRAY_SIZE(otx_cpt_skciphers));
1597 static inline void cpt_unregister_algs(void)
1599 crypto_unregister_skciphers(otx_cpt_skciphers,
1600 ARRAY_SIZE(otx_cpt_skciphers));
1601 crypto_unregister_aeads(otx_cpt_aeads, ARRAY_SIZE(otx_cpt_aeads));
1604 static int compare_func(const void *lptr, const void *rptr)
1606 struct cpt_device_desc *ldesc = (struct cpt_device_desc *) lptr;
1607 struct cpt_device_desc *rdesc = (struct cpt_device_desc *) rptr;
1609 if (ldesc->dev->devfn < rdesc->dev->devfn)
1611 if (ldesc->dev->devfn > rdesc->dev->devfn)
1616 static void swap_func(void *lptr, void *rptr, int size)
1618 struct cpt_device_desc *ldesc = (struct cpt_device_desc *) lptr;
1619 struct cpt_device_desc *rdesc = (struct cpt_device_desc *) rptr;
1621 swap(*ldesc, *rdesc);
1624 int otx_cpt_crypto_init(struct pci_dev *pdev, struct module *mod,
1625 enum otx_cptpf_type pf_type,
1626 enum otx_cptvf_type engine_type,
1627 int num_queues, int num_devices)
1633 switch (engine_type) {
1634 case OTX_CPT_SE_TYPES:
1635 count = atomic_read(&se_devices.count);
1636 if (count >= CPT_MAX_VF_NUM) {
1637 dev_err(&pdev->dev, "No space to add a new device\n");
1641 se_devices.desc[count].pf_type = pf_type;
1642 se_devices.desc[count].num_queues = num_queues;
1643 se_devices.desc[count++].dev = pdev;
1644 atomic_inc(&se_devices.count);
1646 if (atomic_read(&se_devices.count) == num_devices &&
1647 is_crypto_registered == false) {
1648 if (cpt_register_algs()) {
1650 "Error in registering crypto algorithms\n");
1654 try_module_get(mod);
1655 is_crypto_registered = true;
1657 sort(se_devices.desc, count, sizeof(struct cpt_device_desc),
1658 compare_func, swap_func);
1661 case OTX_CPT_AE_TYPES:
1662 count = atomic_read(&ae_devices.count);
1663 if (count >= CPT_MAX_VF_NUM) {
1664 dev_err(&pdev->dev, "No space to a add new device\n");
1668 ae_devices.desc[count].pf_type = pf_type;
1669 ae_devices.desc[count].num_queues = num_queues;
1670 ae_devices.desc[count++].dev = pdev;
1671 atomic_inc(&ae_devices.count);
1672 sort(ae_devices.desc, count, sizeof(struct cpt_device_desc),
1673 compare_func, swap_func);
1677 dev_err(&pdev->dev, "Unknown VF type %d\n", engine_type);
1678 ret = BAD_OTX_CPTVF_TYPE;
1681 mutex_unlock(&mutex);
1685 void otx_cpt_crypto_exit(struct pci_dev *pdev, struct module *mod,
1686 enum otx_cptvf_type engine_type)
1688 struct cpt_device_table *dev_tbl;
1689 bool dev_found = false;
1694 dev_tbl = (engine_type == OTX_CPT_AE_TYPES) ? &ae_devices : &se_devices;
1695 count = atomic_read(&dev_tbl->count);
1696 for (i = 0; i < count; i++)
1697 if (pdev == dev_tbl->desc[i].dev) {
1698 for (j = i; j < count-1; j++)
1699 dev_tbl->desc[j] = dev_tbl->desc[j+1];
1705 dev_err(&pdev->dev, "%s device not found\n", __func__);
1709 if (engine_type != OTX_CPT_AE_TYPES) {
1710 if (atomic_dec_and_test(&se_devices.count) &&
1711 !is_any_alg_used()) {
1712 cpt_unregister_algs();
1714 is_crypto_registered = false;
1717 atomic_dec(&ae_devices.count);
1719 mutex_unlock(&mutex);