2 This file is provided under a dual BSD/GPLv2 license. When using or
3 redistributing this file, you may do so under either license.
6 Copyright(c) 2014 Intel Corporation.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of version 2 of the GNU General Public License as
9 published by the Free Software Foundation.
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
20 Copyright(c) 2014 Intel Corporation.
21 Redistribution and use in source and binary forms, with or without
22 modification, are permitted provided that the following conditions
25 * Redistributions of source code must retain the above copyright
26 notice, this list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright
28 notice, this list of conditions and the following disclaimer in
29 the documentation and/or other materials provided with the
31 * Neither the name of Intel Corporation nor the names of its
32 contributors may be used to endorse or promote products derived
33 from this software without specific prior written permission.
35 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
47 #include <linux/module.h>
48 #include <linux/slab.h>
49 #include <linux/crypto.h>
50 #include <crypto/internal/aead.h>
51 #include <crypto/aes.h>
52 #include <crypto/sha.h>
53 #include <crypto/hash.h>
54 #include <crypto/algapi.h>
55 #include <crypto/authenc.h>
56 #include <linux/dma-mapping.h>
57 #include "adf_accel_devices.h"
58 #include "adf_transport.h"
59 #include "adf_common_drv.h"
60 #include "qat_crypto.h"
61 #include "icp_qat_hw.h"
62 #include "icp_qat_fw.h"
63 #include "icp_qat_fw_la.h"
65 #define QAT_AES_HW_CONFIG_ENC(alg, mode) \
66 ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
67 ICP_QAT_HW_CIPHER_NO_CONVERT, \
68 ICP_QAT_HW_CIPHER_ENCRYPT)
70 #define QAT_AES_HW_CONFIG_DEC(alg, mode) \
71 ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
72 ICP_QAT_HW_CIPHER_KEY_CONVERT, \
73 ICP_QAT_HW_CIPHER_DECRYPT)
75 static DEFINE_MUTEX(algs_lock);
76 static unsigned int active_devs;
84 struct qat_alg_buf_list {
87 uint32_t num_mapped_bufs;
88 struct qat_alg_buf bufers[];
89 } __packed __aligned(64);
91 /* Common content descriptor */
94 struct qat_enc { /* Encrypt content desc */
95 struct icp_qat_hw_cipher_algo_blk cipher;
96 struct icp_qat_hw_auth_algo_blk hash;
98 struct qat_dec { /* Decrytp content desc */
99 struct icp_qat_hw_auth_algo_blk hash;
100 struct icp_qat_hw_cipher_algo_blk cipher;
105 struct qat_alg_aead_ctx {
106 struct qat_alg_cd *enc_cd;
107 struct qat_alg_cd *dec_cd;
108 dma_addr_t enc_cd_paddr;
109 dma_addr_t dec_cd_paddr;
110 struct icp_qat_fw_la_bulk_req enc_fw_req;
111 struct icp_qat_fw_la_bulk_req dec_fw_req;
112 struct crypto_shash *hash_tfm;
113 enum icp_qat_hw_auth_algo qat_hash_alg;
114 struct qat_crypto_instance *inst;
117 struct qat_alg_ablkcipher_ctx {
118 struct icp_qat_hw_cipher_algo_blk *enc_cd;
119 struct icp_qat_hw_cipher_algo_blk *dec_cd;
120 dma_addr_t enc_cd_paddr;
121 dma_addr_t dec_cd_paddr;
122 struct icp_qat_fw_la_bulk_req enc_fw_req;
123 struct icp_qat_fw_la_bulk_req dec_fw_req;
124 struct qat_crypto_instance *inst;
125 struct crypto_tfm *tfm;
126 spinlock_t lock; /* protects qat_alg_ablkcipher_ctx struct */
129 static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
131 switch (qat_hash_alg) {
132 case ICP_QAT_HW_AUTH_ALGO_SHA1:
133 return ICP_QAT_HW_SHA1_STATE1_SZ;
134 case ICP_QAT_HW_AUTH_ALGO_SHA256:
135 return ICP_QAT_HW_SHA256_STATE1_SZ;
136 case ICP_QAT_HW_AUTH_ALGO_SHA512:
137 return ICP_QAT_HW_SHA512_STATE1_SZ;
144 static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
145 struct qat_alg_aead_ctx *ctx,
146 const uint8_t *auth_key,
147 unsigned int auth_keylen)
149 SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
150 struct sha1_state sha1;
151 struct sha256_state sha256;
152 struct sha512_state sha512;
153 int block_size = crypto_shash_blocksize(ctx->hash_tfm);
154 int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
155 char ipad[block_size];
156 char opad[block_size];
157 __be32 *hash_state_out;
158 __be64 *hash512_state_out;
161 memset(ipad, 0, block_size);
162 memset(opad, 0, block_size);
163 shash->tfm = ctx->hash_tfm;
166 if (auth_keylen > block_size) {
167 int ret = crypto_shash_digest(shash, auth_key,
172 memcpy(opad, ipad, digest_size);
174 memcpy(ipad, auth_key, auth_keylen);
175 memcpy(opad, auth_key, auth_keylen);
178 for (i = 0; i < block_size; i++) {
179 char *ipad_ptr = ipad + i;
180 char *opad_ptr = opad + i;
185 if (crypto_shash_init(shash))
188 if (crypto_shash_update(shash, ipad, block_size))
191 hash_state_out = (__be32 *)hash->sha.state1;
192 hash512_state_out = (__be64 *)hash_state_out;
194 switch (ctx->qat_hash_alg) {
195 case ICP_QAT_HW_AUTH_ALGO_SHA1:
196 if (crypto_shash_export(shash, &sha1))
198 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
199 *hash_state_out = cpu_to_be32(*(sha1.state + i));
201 case ICP_QAT_HW_AUTH_ALGO_SHA256:
202 if (crypto_shash_export(shash, &sha256))
204 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
205 *hash_state_out = cpu_to_be32(*(sha256.state + i));
207 case ICP_QAT_HW_AUTH_ALGO_SHA512:
208 if (crypto_shash_export(shash, &sha512))
210 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
211 *hash512_state_out = cpu_to_be64(*(sha512.state + i));
217 if (crypto_shash_init(shash))
220 if (crypto_shash_update(shash, opad, block_size))
223 offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
224 hash_state_out = (__be32 *)(hash->sha.state1 + offset);
225 hash512_state_out = (__be64 *)hash_state_out;
227 switch (ctx->qat_hash_alg) {
228 case ICP_QAT_HW_AUTH_ALGO_SHA1:
229 if (crypto_shash_export(shash, &sha1))
231 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
232 *hash_state_out = cpu_to_be32(*(sha1.state + i));
234 case ICP_QAT_HW_AUTH_ALGO_SHA256:
235 if (crypto_shash_export(shash, &sha256))
237 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
238 *hash_state_out = cpu_to_be32(*(sha256.state + i));
240 case ICP_QAT_HW_AUTH_ALGO_SHA512:
241 if (crypto_shash_export(shash, &sha512))
243 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
244 *hash512_state_out = cpu_to_be64(*(sha512.state + i));
249 memzero_explicit(ipad, block_size);
250 memzero_explicit(opad, block_size);
254 static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
257 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
258 header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
259 header->comn_req_flags =
260 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
261 QAT_COMN_PTR_TYPE_SGL);
262 ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
263 ICP_QAT_FW_LA_PARTIAL_NONE);
264 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
265 ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
266 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
267 ICP_QAT_FW_LA_NO_PROTO);
268 ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
269 ICP_QAT_FW_LA_NO_UPDATE_STATE);
272 static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm,
274 struct crypto_authenc_keys *keys,
277 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
278 unsigned int digestsize = crypto_aead_authsize(aead_tfm);
279 struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd;
280 struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher;
281 struct icp_qat_hw_auth_algo_blk *hash =
282 (struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx +
283 sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen);
284 struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req;
285 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
286 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
287 void *ptr = &req_tmpl->cd_ctrl;
288 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
289 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
292 cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
293 memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
294 hash->sha.inner_setup.auth_config.config =
295 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
296 ctx->qat_hash_alg, digestsize);
297 hash->sha.inner_setup.auth_counter.counter =
298 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
300 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
304 qat_alg_init_common_hdr(header);
305 header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
306 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
307 ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
308 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
309 ICP_QAT_FW_LA_RET_AUTH_RES);
310 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
311 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
312 cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
313 cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
315 /* Cipher CD config setup */
316 cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
317 cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
318 cipher_cd_ctrl->cipher_cfg_offset = 0;
319 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
320 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
321 /* Auth CD config setup */
322 hash_cd_ctrl->hash_cfg_offset = ((char *)hash - (char *)cipher) >> 3;
323 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
324 hash_cd_ctrl->inner_res_sz = digestsize;
325 hash_cd_ctrl->final_sz = digestsize;
327 switch (ctx->qat_hash_alg) {
328 case ICP_QAT_HW_AUTH_ALGO_SHA1:
329 hash_cd_ctrl->inner_state1_sz =
330 round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
331 hash_cd_ctrl->inner_state2_sz =
332 round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
334 case ICP_QAT_HW_AUTH_ALGO_SHA256:
335 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
336 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
338 case ICP_QAT_HW_AUTH_ALGO_SHA512:
339 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
340 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
345 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
346 ((sizeof(struct icp_qat_hw_auth_setup) +
347 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
348 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
349 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
353 static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
355 struct crypto_authenc_keys *keys,
358 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
359 unsigned int digestsize = crypto_aead_authsize(aead_tfm);
360 struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd;
361 struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash;
362 struct icp_qat_hw_cipher_algo_blk *cipher =
363 (struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx +
364 sizeof(struct icp_qat_hw_auth_setup) +
365 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2);
366 struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req;
367 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
368 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
369 void *ptr = &req_tmpl->cd_ctrl;
370 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
371 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
372 struct icp_qat_fw_la_auth_req_params *auth_param =
373 (struct icp_qat_fw_la_auth_req_params *)
374 ((char *)&req_tmpl->serv_specif_rqpars +
375 sizeof(struct icp_qat_fw_la_cipher_req_params));
378 cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_DEC(alg, mode);
379 memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
380 hash->sha.inner_setup.auth_config.config =
381 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
384 hash->sha.inner_setup.auth_counter.counter =
385 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
387 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
391 qat_alg_init_common_hdr(header);
392 header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
393 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
394 ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
395 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
396 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
397 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
398 ICP_QAT_FW_LA_CMP_AUTH_RES);
399 cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
400 cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
402 /* Cipher CD config setup */
403 cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
404 cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
405 cipher_cd_ctrl->cipher_cfg_offset =
406 (sizeof(struct icp_qat_hw_auth_setup) +
407 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3;
408 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
409 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
411 /* Auth CD config setup */
412 hash_cd_ctrl->hash_cfg_offset = 0;
413 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
414 hash_cd_ctrl->inner_res_sz = digestsize;
415 hash_cd_ctrl->final_sz = digestsize;
417 switch (ctx->qat_hash_alg) {
418 case ICP_QAT_HW_AUTH_ALGO_SHA1:
419 hash_cd_ctrl->inner_state1_sz =
420 round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
421 hash_cd_ctrl->inner_state2_sz =
422 round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
424 case ICP_QAT_HW_AUTH_ALGO_SHA256:
425 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
426 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
428 case ICP_QAT_HW_AUTH_ALGO_SHA512:
429 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
430 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
436 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
437 ((sizeof(struct icp_qat_hw_auth_setup) +
438 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
439 auth_param->auth_res_sz = digestsize;
440 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
441 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
445 static void qat_alg_ablkcipher_init_com(struct qat_alg_ablkcipher_ctx *ctx,
446 struct icp_qat_fw_la_bulk_req *req,
447 struct icp_qat_hw_cipher_algo_blk *cd,
448 const uint8_t *key, unsigned int keylen)
450 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
451 struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
452 struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
454 memcpy(cd->aes.key, key, keylen);
455 qat_alg_init_common_hdr(header);
456 header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
457 cd_pars->u.s.content_desc_params_sz =
458 sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
459 /* Cipher CD config setup */
460 cd_ctrl->cipher_key_sz = keylen >> 3;
461 cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
462 cd_ctrl->cipher_cfg_offset = 0;
463 ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
464 ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
467 static void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_ctx *ctx,
468 int alg, const uint8_t *key,
469 unsigned int keylen, int mode)
471 struct icp_qat_hw_cipher_algo_blk *enc_cd = ctx->enc_cd;
472 struct icp_qat_fw_la_bulk_req *req = &ctx->enc_fw_req;
473 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
475 qat_alg_ablkcipher_init_com(ctx, req, enc_cd, key, keylen);
476 cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
477 enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
480 static void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_ctx *ctx,
481 int alg, const uint8_t *key,
482 unsigned int keylen, int mode)
484 struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd;
485 struct icp_qat_fw_la_bulk_req *req = &ctx->dec_fw_req;
486 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
488 qat_alg_ablkcipher_init_com(ctx, req, dec_cd, key, keylen);
489 cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
491 if (mode != ICP_QAT_HW_CIPHER_CTR_MODE)
492 dec_cd->aes.cipher_config.val =
493 QAT_AES_HW_CONFIG_DEC(alg, mode);
495 dec_cd->aes.cipher_config.val =
496 QAT_AES_HW_CONFIG_ENC(alg, mode);
499 static int qat_alg_validate_key(int key_len, int *alg, int mode)
501 if (mode != ICP_QAT_HW_CIPHER_XTS_MODE) {
503 case AES_KEYSIZE_128:
504 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
506 case AES_KEYSIZE_192:
507 *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
509 case AES_KEYSIZE_256:
510 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
517 case AES_KEYSIZE_128 << 1:
518 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
520 case AES_KEYSIZE_256 << 1:
521 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
530 static int qat_alg_aead_init_sessions(struct crypto_aead *tfm, const u8 *key,
531 unsigned int keylen, int mode)
533 struct crypto_authenc_keys keys;
536 if (crypto_authenc_extractkeys(&keys, key, keylen))
539 if (qat_alg_validate_key(keys.enckeylen, &alg, mode))
542 if (qat_alg_aead_init_enc_session(tfm, alg, &keys, mode))
545 if (qat_alg_aead_init_dec_session(tfm, alg, &keys, mode))
550 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
556 static int qat_alg_ablkcipher_init_sessions(struct qat_alg_ablkcipher_ctx *ctx,
563 if (qat_alg_validate_key(keylen, &alg, mode))
566 qat_alg_ablkcipher_init_enc(ctx, alg, key, keylen, mode);
567 qat_alg_ablkcipher_init_dec(ctx, alg, key, keylen, mode);
570 crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
574 static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key,
577 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
582 dev = &GET_DEV(ctx->inst->accel_dev);
583 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
584 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
585 memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
586 memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
589 int node = get_current_node();
590 struct qat_crypto_instance *inst =
591 qat_crypto_get_instance_node(node);
596 dev = &GET_DEV(inst->accel_dev);
598 ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd),
604 ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd),
611 if (qat_alg_aead_init_sessions(tfm, key, keylen,
612 ICP_QAT_HW_CIPHER_CBC_MODE))
618 memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
619 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
620 ctx->dec_cd, ctx->dec_cd_paddr);
623 memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
624 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
625 ctx->enc_cd, ctx->enc_cd_paddr);
630 static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
631 struct qat_crypto_request *qat_req)
633 struct device *dev = &GET_DEV(inst->accel_dev);
634 struct qat_alg_buf_list *bl = qat_req->buf.bl;
635 struct qat_alg_buf_list *blout = qat_req->buf.blout;
636 dma_addr_t blp = qat_req->buf.blp;
637 dma_addr_t blpout = qat_req->buf.bloutp;
638 size_t sz = qat_req->buf.sz;
639 size_t sz_out = qat_req->buf.sz_out;
642 for (i = 0; i < bl->num_bufs; i++)
643 dma_unmap_single(dev, bl->bufers[i].addr,
644 bl->bufers[i].len, DMA_BIDIRECTIONAL);
646 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
649 /* If out of place operation dma unmap only data */
650 int bufless = blout->num_bufs - blout->num_mapped_bufs;
652 for (i = bufless; i < blout->num_bufs; i++) {
653 dma_unmap_single(dev, blout->bufers[i].addr,
654 blout->bufers[i].len,
657 dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
662 static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
663 struct scatterlist *sgl,
664 struct scatterlist *sglout,
665 struct qat_crypto_request *qat_req)
667 struct device *dev = &GET_DEV(inst->accel_dev);
669 int n = sg_nents(sgl);
670 struct qat_alg_buf_list *bufl;
671 struct qat_alg_buf_list *buflout = NULL;
673 dma_addr_t bloutp = 0;
674 struct scatterlist *sg;
675 size_t sz_out, sz = sizeof(struct qat_alg_buf_list) +
676 ((1 + n) * sizeof(struct qat_alg_buf));
681 bufl = kzalloc_node(sz, GFP_ATOMIC,
682 dev_to_node(&GET_DEV(inst->accel_dev)));
686 blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
687 if (unlikely(dma_mapping_error(dev, blp)))
690 for_each_sg(sgl, sg, n, i) {
696 bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
699 bufl->bufers[y].len = sg->length;
700 if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
704 bufl->num_bufs = sg_nctr;
705 qat_req->buf.bl = bufl;
706 qat_req->buf.blp = blp;
707 qat_req->buf.sz = sz;
708 /* Handle out of place operation */
710 struct qat_alg_buf *bufers;
712 n = sg_nents(sglout);
713 sz_out = sizeof(struct qat_alg_buf_list) +
714 ((1 + n) * sizeof(struct qat_alg_buf));
716 buflout = kzalloc_node(sz_out, GFP_ATOMIC,
717 dev_to_node(&GET_DEV(inst->accel_dev)));
718 if (unlikely(!buflout))
720 bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
721 if (unlikely(dma_mapping_error(dev, bloutp)))
723 bufers = buflout->bufers;
724 for_each_sg(sglout, sg, n, i) {
730 bufers[y].addr = dma_map_single(dev, sg_virt(sg),
733 if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
735 bufers[y].len = sg->length;
738 buflout->num_bufs = sg_nctr;
739 buflout->num_mapped_bufs = sg_nctr;
740 qat_req->buf.blout = buflout;
741 qat_req->buf.bloutp = bloutp;
742 qat_req->buf.sz_out = sz_out;
744 /* Otherwise set the src and dst to the same address */
745 qat_req->buf.bloutp = qat_req->buf.blp;
746 qat_req->buf.sz_out = 0;
750 dev_err(dev, "Failed to map buf for dma\n");
752 for (i = 0; i < n; i++)
753 if (!dma_mapping_error(dev, bufl->bufers[i].addr))
754 dma_unmap_single(dev, bufl->bufers[i].addr,
758 if (!dma_mapping_error(dev, blp))
759 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
761 if (sgl != sglout && buflout) {
762 n = sg_nents(sglout);
763 for (i = 0; i < n; i++)
764 if (!dma_mapping_error(dev, buflout->bufers[i].addr))
765 dma_unmap_single(dev, buflout->bufers[i].addr,
766 buflout->bufers[i].len,
768 if (!dma_mapping_error(dev, bloutp))
769 dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
775 static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
776 struct qat_crypto_request *qat_req)
778 struct qat_alg_aead_ctx *ctx = qat_req->aead_ctx;
779 struct qat_crypto_instance *inst = ctx->inst;
780 struct aead_request *areq = qat_req->aead_req;
781 uint8_t stat_filed = qat_resp->comn_resp.comn_status;
782 int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
784 qat_alg_free_bufl(inst, qat_req);
785 if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
787 areq->base.complete(&areq->base, res);
790 static void qat_ablkcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
791 struct qat_crypto_request *qat_req)
793 struct qat_alg_ablkcipher_ctx *ctx = qat_req->ablkcipher_ctx;
794 struct qat_crypto_instance *inst = ctx->inst;
795 struct ablkcipher_request *areq = qat_req->ablkcipher_req;
796 uint8_t stat_filed = qat_resp->comn_resp.comn_status;
797 int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
799 qat_alg_free_bufl(inst, qat_req);
800 if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
802 areq->base.complete(&areq->base, res);
805 void qat_alg_callback(void *resp)
807 struct icp_qat_fw_la_resp *qat_resp = resp;
808 struct qat_crypto_request *qat_req =
809 (void *)(__force long)qat_resp->opaque_data;
811 qat_req->cb(qat_resp, qat_req);
814 static int qat_alg_aead_dec(struct aead_request *areq)
816 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
817 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
818 struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
819 struct qat_crypto_request *qat_req = aead_request_ctx(areq);
820 struct icp_qat_fw_la_cipher_req_params *cipher_param;
821 struct icp_qat_fw_la_auth_req_params *auth_param;
822 struct icp_qat_fw_la_bulk_req *msg;
823 int digst_size = crypto_aead_authsize(aead_tfm);
827 cipher_len = areq->cryptlen - digst_size;
828 if (cipher_len % AES_BLOCK_SIZE != 0)
831 ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
836 *msg = ctx->dec_fw_req;
837 qat_req->aead_ctx = ctx;
838 qat_req->aead_req = areq;
839 qat_req->cb = qat_aead_alg_callback;
840 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
841 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
842 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
843 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
844 cipher_param->cipher_length = cipher_len;
845 cipher_param->cipher_offset = areq->assoclen;
846 memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
847 auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
848 auth_param->auth_off = 0;
849 auth_param->auth_len = areq->assoclen + cipher_param->cipher_length;
851 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
852 } while (ret == -EAGAIN && ctr++ < 10);
854 if (ret == -EAGAIN) {
855 qat_alg_free_bufl(ctx->inst, qat_req);
861 static int qat_alg_aead_enc(struct aead_request *areq)
863 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
864 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
865 struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
866 struct qat_crypto_request *qat_req = aead_request_ctx(areq);
867 struct icp_qat_fw_la_cipher_req_params *cipher_param;
868 struct icp_qat_fw_la_auth_req_params *auth_param;
869 struct icp_qat_fw_la_bulk_req *msg;
870 uint8_t *iv = areq->iv;
873 if (areq->cryptlen % AES_BLOCK_SIZE != 0)
876 ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
881 *msg = ctx->enc_fw_req;
882 qat_req->aead_ctx = ctx;
883 qat_req->aead_req = areq;
884 qat_req->cb = qat_aead_alg_callback;
885 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
886 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
887 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
888 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
889 auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
891 memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
892 cipher_param->cipher_length = areq->cryptlen;
893 cipher_param->cipher_offset = areq->assoclen;
895 auth_param->auth_off = 0;
896 auth_param->auth_len = areq->assoclen + areq->cryptlen;
899 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
900 } while (ret == -EAGAIN && ctr++ < 10);
902 if (ret == -EAGAIN) {
903 qat_alg_free_bufl(ctx->inst, qat_req);
909 static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
910 const u8 *key, unsigned int keylen,
913 struct qat_alg_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
916 spin_lock(&ctx->lock);
919 dev = &GET_DEV(ctx->inst->accel_dev);
920 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
921 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
922 memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
923 memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
926 int node = get_current_node();
927 struct qat_crypto_instance *inst =
928 qat_crypto_get_instance_node(node);
930 spin_unlock(&ctx->lock);
934 dev = &GET_DEV(inst->accel_dev);
936 ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd),
940 spin_unlock(&ctx->lock);
943 ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd),
947 spin_unlock(&ctx->lock);
951 spin_unlock(&ctx->lock);
952 if (qat_alg_ablkcipher_init_sessions(ctx, key, keylen, mode))
958 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
959 dma_free_coherent(dev, sizeof(*ctx->dec_cd),
960 ctx->dec_cd, ctx->dec_cd_paddr);
963 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
964 dma_free_coherent(dev, sizeof(*ctx->enc_cd),
965 ctx->enc_cd, ctx->enc_cd_paddr);
970 static int qat_alg_ablkcipher_cbc_setkey(struct crypto_ablkcipher *tfm,
971 const u8 *key, unsigned int keylen)
973 return qat_alg_ablkcipher_setkey(tfm, key, keylen,
974 ICP_QAT_HW_CIPHER_CBC_MODE);
977 static int qat_alg_ablkcipher_ctr_setkey(struct crypto_ablkcipher *tfm,
978 const u8 *key, unsigned int keylen)
980 return qat_alg_ablkcipher_setkey(tfm, key, keylen,
981 ICP_QAT_HW_CIPHER_CTR_MODE);
984 static int qat_alg_ablkcipher_xts_setkey(struct crypto_ablkcipher *tfm,
985 const u8 *key, unsigned int keylen)
987 return qat_alg_ablkcipher_setkey(tfm, key, keylen,
988 ICP_QAT_HW_CIPHER_XTS_MODE);
991 static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req)
993 struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
994 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
995 struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
996 struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
997 struct icp_qat_fw_la_cipher_req_params *cipher_param;
998 struct icp_qat_fw_la_bulk_req *msg;
1001 ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
1005 msg = &qat_req->req;
1006 *msg = ctx->enc_fw_req;
1007 qat_req->ablkcipher_ctx = ctx;
1008 qat_req->ablkcipher_req = req;
1009 qat_req->cb = qat_ablkcipher_alg_callback;
1010 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
1011 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1012 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1013 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1014 cipher_param->cipher_length = req->nbytes;
1015 cipher_param->cipher_offset = 0;
1016 memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE);
1018 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
1019 } while (ret == -EAGAIN && ctr++ < 10);
1021 if (ret == -EAGAIN) {
1022 qat_alg_free_bufl(ctx->inst, qat_req);
1025 return -EINPROGRESS;
1028 static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req)
1030 struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
1031 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
1032 struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1033 struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
1034 struct icp_qat_fw_la_cipher_req_params *cipher_param;
1035 struct icp_qat_fw_la_bulk_req *msg;
1038 ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
1042 msg = &qat_req->req;
1043 *msg = ctx->dec_fw_req;
1044 qat_req->ablkcipher_ctx = ctx;
1045 qat_req->ablkcipher_req = req;
1046 qat_req->cb = qat_ablkcipher_alg_callback;
1047 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
1048 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1049 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1050 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1051 cipher_param->cipher_length = req->nbytes;
1052 cipher_param->cipher_offset = 0;
1053 memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE);
1055 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
1056 } while (ret == -EAGAIN && ctr++ < 10);
1058 if (ret == -EAGAIN) {
1059 qat_alg_free_bufl(ctx->inst, qat_req);
1062 return -EINPROGRESS;
1065 static int qat_alg_aead_init(struct crypto_aead *tfm,
1066 enum icp_qat_hw_auth_algo hash,
1067 const char *hash_name)
1069 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
1071 ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
1072 if (IS_ERR(ctx->hash_tfm))
1073 return PTR_ERR(ctx->hash_tfm);
1074 ctx->qat_hash_alg = hash;
1075 crypto_aead_set_reqsize(tfm, sizeof(struct qat_crypto_request));
1079 static int qat_alg_aead_sha1_init(struct crypto_aead *tfm)
1081 return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
1084 static int qat_alg_aead_sha256_init(struct crypto_aead *tfm)
1086 return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
1089 static int qat_alg_aead_sha512_init(struct crypto_aead *tfm)
1091 return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
1094 static void qat_alg_aead_exit(struct crypto_aead *tfm)
1096 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
1097 struct qat_crypto_instance *inst = ctx->inst;
1100 crypto_free_shash(ctx->hash_tfm);
1105 dev = &GET_DEV(inst->accel_dev);
1107 memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
1108 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1109 ctx->enc_cd, ctx->enc_cd_paddr);
1112 memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
1113 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1114 ctx->dec_cd, ctx->dec_cd_paddr);
1116 qat_crypto_put_instance(inst);
1119 static int qat_alg_ablkcipher_init(struct crypto_tfm *tfm)
1121 struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1123 spin_lock_init(&ctx->lock);
1124 tfm->crt_ablkcipher.reqsize = sizeof(struct qat_crypto_request);
1129 static void qat_alg_ablkcipher_exit(struct crypto_tfm *tfm)
1131 struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1132 struct qat_crypto_instance *inst = ctx->inst;
1138 dev = &GET_DEV(inst->accel_dev);
1140 memset(ctx->enc_cd, 0,
1141 sizeof(struct icp_qat_hw_cipher_algo_blk));
1142 dma_free_coherent(dev,
1143 sizeof(struct icp_qat_hw_cipher_algo_blk),
1144 ctx->enc_cd, ctx->enc_cd_paddr);
1147 memset(ctx->dec_cd, 0,
1148 sizeof(struct icp_qat_hw_cipher_algo_blk));
1149 dma_free_coherent(dev,
1150 sizeof(struct icp_qat_hw_cipher_algo_blk),
1151 ctx->dec_cd, ctx->dec_cd_paddr);
1153 qat_crypto_put_instance(inst);
1157 static struct aead_alg qat_aeads[] = { {
1159 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1160 .cra_driver_name = "qat_aes_cbc_hmac_sha1",
1161 .cra_priority = 4001,
1162 .cra_flags = CRYPTO_ALG_ASYNC,
1163 .cra_blocksize = AES_BLOCK_SIZE,
1164 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1165 .cra_module = THIS_MODULE,
1167 .init = qat_alg_aead_sha1_init,
1168 .exit = qat_alg_aead_exit,
1169 .setkey = qat_alg_aead_setkey,
1170 .decrypt = qat_alg_aead_dec,
1171 .encrypt = qat_alg_aead_enc,
1172 .ivsize = AES_BLOCK_SIZE,
1173 .maxauthsize = SHA1_DIGEST_SIZE,
1176 .cra_name = "authenc(hmac(sha256),cbc(aes))",
1177 .cra_driver_name = "qat_aes_cbc_hmac_sha256",
1178 .cra_priority = 4001,
1179 .cra_flags = CRYPTO_ALG_ASYNC,
1180 .cra_blocksize = AES_BLOCK_SIZE,
1181 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1182 .cra_module = THIS_MODULE,
1184 .init = qat_alg_aead_sha256_init,
1185 .exit = qat_alg_aead_exit,
1186 .setkey = qat_alg_aead_setkey,
1187 .decrypt = qat_alg_aead_dec,
1188 .encrypt = qat_alg_aead_enc,
1189 .ivsize = AES_BLOCK_SIZE,
1190 .maxauthsize = SHA256_DIGEST_SIZE,
1193 .cra_name = "authenc(hmac(sha512),cbc(aes))",
1194 .cra_driver_name = "qat_aes_cbc_hmac_sha512",
1195 .cra_priority = 4001,
1196 .cra_flags = CRYPTO_ALG_ASYNC,
1197 .cra_blocksize = AES_BLOCK_SIZE,
1198 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1199 .cra_module = THIS_MODULE,
1201 .init = qat_alg_aead_sha512_init,
1202 .exit = qat_alg_aead_exit,
1203 .setkey = qat_alg_aead_setkey,
1204 .decrypt = qat_alg_aead_dec,
1205 .encrypt = qat_alg_aead_enc,
1206 .ivsize = AES_BLOCK_SIZE,
1207 .maxauthsize = SHA512_DIGEST_SIZE,
1210 static struct crypto_alg qat_algs[] = { {
1211 .cra_name = "cbc(aes)",
1212 .cra_driver_name = "qat_aes_cbc",
1213 .cra_priority = 4001,
1214 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1215 .cra_blocksize = AES_BLOCK_SIZE,
1216 .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
1218 .cra_type = &crypto_ablkcipher_type,
1219 .cra_module = THIS_MODULE,
1220 .cra_init = qat_alg_ablkcipher_init,
1221 .cra_exit = qat_alg_ablkcipher_exit,
1224 .setkey = qat_alg_ablkcipher_cbc_setkey,
1225 .decrypt = qat_alg_ablkcipher_decrypt,
1226 .encrypt = qat_alg_ablkcipher_encrypt,
1227 .min_keysize = AES_MIN_KEY_SIZE,
1228 .max_keysize = AES_MAX_KEY_SIZE,
1229 .ivsize = AES_BLOCK_SIZE,
1233 .cra_name = "ctr(aes)",
1234 .cra_driver_name = "qat_aes_ctr",
1235 .cra_priority = 4001,
1236 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1237 .cra_blocksize = AES_BLOCK_SIZE,
1238 .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
1240 .cra_type = &crypto_ablkcipher_type,
1241 .cra_module = THIS_MODULE,
1242 .cra_init = qat_alg_ablkcipher_init,
1243 .cra_exit = qat_alg_ablkcipher_exit,
1246 .setkey = qat_alg_ablkcipher_ctr_setkey,
1247 .decrypt = qat_alg_ablkcipher_decrypt,
1248 .encrypt = qat_alg_ablkcipher_encrypt,
1249 .min_keysize = AES_MIN_KEY_SIZE,
1250 .max_keysize = AES_MAX_KEY_SIZE,
1251 .ivsize = AES_BLOCK_SIZE,
1255 .cra_name = "xts(aes)",
1256 .cra_driver_name = "qat_aes_xts",
1257 .cra_priority = 4001,
1258 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1259 .cra_blocksize = AES_BLOCK_SIZE,
1260 .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
1262 .cra_type = &crypto_ablkcipher_type,
1263 .cra_module = THIS_MODULE,
1264 .cra_init = qat_alg_ablkcipher_init,
1265 .cra_exit = qat_alg_ablkcipher_exit,
1268 .setkey = qat_alg_ablkcipher_xts_setkey,
1269 .decrypt = qat_alg_ablkcipher_decrypt,
1270 .encrypt = qat_alg_ablkcipher_encrypt,
1271 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1272 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1273 .ivsize = AES_BLOCK_SIZE,
1278 int qat_algs_register(void)
1282 mutex_lock(&algs_lock);
1283 if (++active_devs != 1)
1286 for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
1287 qat_algs[i].cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
1289 ret = crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
1293 for (i = 0; i < ARRAY_SIZE(qat_aeads); i++)
1294 qat_aeads[i].base.cra_flags = CRYPTO_ALG_ASYNC;
1296 ret = crypto_register_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
1301 mutex_unlock(&algs_lock);
1305 crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
1309 void qat_algs_unregister(void)
1311 mutex_lock(&algs_lock);
1312 if (--active_devs != 0)
1315 crypto_unregister_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
1316 crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
1319 mutex_unlock(&algs_lock);