1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2014 - 2020 Intel Corporation */
3 #include <linux/module.h>
4 #include <linux/slab.h>
5 #include <linux/crypto.h>
6 #include <crypto/internal/aead.h>
7 #include <crypto/internal/skcipher.h>
8 #include <crypto/aes.h>
9 #include <crypto/sha.h>
10 #include <crypto/hash.h>
11 #include <crypto/hmac.h>
12 #include <crypto/algapi.h>
13 #include <crypto/authenc.h>
14 #include <crypto/xts.h>
15 #include <linux/dma-mapping.h>
16 #include "adf_accel_devices.h"
17 #include "adf_transport.h"
18 #include "adf_common_drv.h"
19 #include "qat_crypto.h"
20 #include "icp_qat_hw.h"
21 #include "icp_qat_fw.h"
22 #include "icp_qat_fw_la.h"
24 #define QAT_AES_HW_CONFIG_ENC(alg, mode) \
25 ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
26 ICP_QAT_HW_CIPHER_NO_CONVERT, \
27 ICP_QAT_HW_CIPHER_ENCRYPT)
29 #define QAT_AES_HW_CONFIG_DEC(alg, mode) \
30 ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
31 ICP_QAT_HW_CIPHER_KEY_CONVERT, \
32 ICP_QAT_HW_CIPHER_DECRYPT)
34 static DEFINE_MUTEX(algs_lock);
35 static unsigned int active_devs;
37 /* Common content descriptor */
40 struct qat_enc { /* Encrypt content desc */
41 struct icp_qat_hw_cipher_algo_blk cipher;
42 struct icp_qat_hw_auth_algo_blk hash;
44 struct qat_dec { /* Decrypt content desc */
45 struct icp_qat_hw_auth_algo_blk hash;
46 struct icp_qat_hw_cipher_algo_blk cipher;
51 struct qat_alg_aead_ctx {
52 struct qat_alg_cd *enc_cd;
53 struct qat_alg_cd *dec_cd;
54 dma_addr_t enc_cd_paddr;
55 dma_addr_t dec_cd_paddr;
56 struct icp_qat_fw_la_bulk_req enc_fw_req;
57 struct icp_qat_fw_la_bulk_req dec_fw_req;
58 struct crypto_shash *hash_tfm;
59 enum icp_qat_hw_auth_algo qat_hash_alg;
60 struct qat_crypto_instance *inst;
62 struct sha1_state sha1;
63 struct sha256_state sha256;
64 struct sha512_state sha512;
66 char ipad[SHA512_BLOCK_SIZE]; /* sufficient for SHA-1/SHA-256 as well */
67 char opad[SHA512_BLOCK_SIZE];
70 struct qat_alg_skcipher_ctx {
71 struct icp_qat_hw_cipher_algo_blk *enc_cd;
72 struct icp_qat_hw_cipher_algo_blk *dec_cd;
73 dma_addr_t enc_cd_paddr;
74 dma_addr_t dec_cd_paddr;
75 struct icp_qat_fw_la_bulk_req enc_fw_req;
76 struct icp_qat_fw_la_bulk_req dec_fw_req;
77 struct qat_crypto_instance *inst;
78 struct crypto_skcipher *ftfm;
82 static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
84 switch (qat_hash_alg) {
85 case ICP_QAT_HW_AUTH_ALGO_SHA1:
86 return ICP_QAT_HW_SHA1_STATE1_SZ;
87 case ICP_QAT_HW_AUTH_ALGO_SHA256:
88 return ICP_QAT_HW_SHA256_STATE1_SZ;
89 case ICP_QAT_HW_AUTH_ALGO_SHA512:
90 return ICP_QAT_HW_SHA512_STATE1_SZ;
97 static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
98 struct qat_alg_aead_ctx *ctx,
100 unsigned int auth_keylen)
102 SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
103 int block_size = crypto_shash_blocksize(ctx->hash_tfm);
104 int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
105 __be32 *hash_state_out;
106 __be64 *hash512_state_out;
109 memset(ctx->ipad, 0, block_size);
110 memset(ctx->opad, 0, block_size);
111 shash->tfm = ctx->hash_tfm;
113 if (auth_keylen > block_size) {
114 int ret = crypto_shash_digest(shash, auth_key,
115 auth_keylen, ctx->ipad);
119 memcpy(ctx->opad, ctx->ipad, digest_size);
121 memcpy(ctx->ipad, auth_key, auth_keylen);
122 memcpy(ctx->opad, auth_key, auth_keylen);
125 for (i = 0; i < block_size; i++) {
126 char *ipad_ptr = ctx->ipad + i;
127 char *opad_ptr = ctx->opad + i;
128 *ipad_ptr ^= HMAC_IPAD_VALUE;
129 *opad_ptr ^= HMAC_OPAD_VALUE;
132 if (crypto_shash_init(shash))
135 if (crypto_shash_update(shash, ctx->ipad, block_size))
138 hash_state_out = (__be32 *)hash->sha.state1;
139 hash512_state_out = (__be64 *)hash_state_out;
141 switch (ctx->qat_hash_alg) {
142 case ICP_QAT_HW_AUTH_ALGO_SHA1:
143 if (crypto_shash_export(shash, &ctx->sha1))
145 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
146 *hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
148 case ICP_QAT_HW_AUTH_ALGO_SHA256:
149 if (crypto_shash_export(shash, &ctx->sha256))
151 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
152 *hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
154 case ICP_QAT_HW_AUTH_ALGO_SHA512:
155 if (crypto_shash_export(shash, &ctx->sha512))
157 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
158 *hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
164 if (crypto_shash_init(shash))
167 if (crypto_shash_update(shash, ctx->opad, block_size))
170 offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
174 hash_state_out = (__be32 *)(hash->sha.state1 + offset);
175 hash512_state_out = (__be64 *)hash_state_out;
177 switch (ctx->qat_hash_alg) {
178 case ICP_QAT_HW_AUTH_ALGO_SHA1:
179 if (crypto_shash_export(shash, &ctx->sha1))
181 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
182 *hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
184 case ICP_QAT_HW_AUTH_ALGO_SHA256:
185 if (crypto_shash_export(shash, &ctx->sha256))
187 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
188 *hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
190 case ICP_QAT_HW_AUTH_ALGO_SHA512:
191 if (crypto_shash_export(shash, &ctx->sha512))
193 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
194 *hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
199 memzero_explicit(ctx->ipad, block_size);
200 memzero_explicit(ctx->opad, block_size);
204 static void qat_alg_init_hdr_iv_updt(struct icp_qat_fw_comn_req_hdr *header)
206 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
207 ICP_QAT_FW_CIPH_IV_64BIT_PTR);
208 ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
209 ICP_QAT_FW_LA_UPDATE_STATE);
212 static void qat_alg_init_hdr_no_iv_updt(struct icp_qat_fw_comn_req_hdr *header)
214 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
215 ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
216 ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
217 ICP_QAT_FW_LA_NO_UPDATE_STATE);
220 static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
224 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
225 header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
226 header->comn_req_flags =
227 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
228 QAT_COMN_PTR_TYPE_SGL);
229 ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
230 ICP_QAT_FW_LA_PARTIAL_NONE);
232 qat_alg_init_hdr_no_iv_updt(header);
234 qat_alg_init_hdr_iv_updt(header);
235 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
236 ICP_QAT_FW_LA_NO_PROTO);
239 static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm,
241 struct crypto_authenc_keys *keys,
244 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
245 unsigned int digestsize = crypto_aead_authsize(aead_tfm);
246 struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd;
247 struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher;
248 struct icp_qat_hw_auth_algo_blk *hash =
249 (struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx +
250 sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen);
251 struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req;
252 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
253 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
254 void *ptr = &req_tmpl->cd_ctrl;
255 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
256 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
259 cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
260 memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
261 hash->sha.inner_setup.auth_config.config =
262 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
263 ctx->qat_hash_alg, digestsize);
264 hash->sha.inner_setup.auth_counter.counter =
265 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
267 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
271 qat_alg_init_common_hdr(header, 1);
272 header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
273 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
274 ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
275 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
276 ICP_QAT_FW_LA_RET_AUTH_RES);
277 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
278 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
279 cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
280 cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
282 /* Cipher CD config setup */
283 cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
284 cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
285 cipher_cd_ctrl->cipher_cfg_offset = 0;
286 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
287 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
288 /* Auth CD config setup */
289 hash_cd_ctrl->hash_cfg_offset = ((char *)hash - (char *)cipher) >> 3;
290 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
291 hash_cd_ctrl->inner_res_sz = digestsize;
292 hash_cd_ctrl->final_sz = digestsize;
294 switch (ctx->qat_hash_alg) {
295 case ICP_QAT_HW_AUTH_ALGO_SHA1:
296 hash_cd_ctrl->inner_state1_sz =
297 round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
298 hash_cd_ctrl->inner_state2_sz =
299 round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
301 case ICP_QAT_HW_AUTH_ALGO_SHA256:
302 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
303 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
305 case ICP_QAT_HW_AUTH_ALGO_SHA512:
306 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
307 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
312 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
313 ((sizeof(struct icp_qat_hw_auth_setup) +
314 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
315 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
316 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
320 static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
322 struct crypto_authenc_keys *keys,
325 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
326 unsigned int digestsize = crypto_aead_authsize(aead_tfm);
327 struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd;
328 struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash;
329 struct icp_qat_hw_cipher_algo_blk *cipher =
330 (struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx +
331 sizeof(struct icp_qat_hw_auth_setup) +
332 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2);
333 struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req;
334 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
335 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
336 void *ptr = &req_tmpl->cd_ctrl;
337 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
338 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
339 struct icp_qat_fw_la_auth_req_params *auth_param =
340 (struct icp_qat_fw_la_auth_req_params *)
341 ((char *)&req_tmpl->serv_specif_rqpars +
342 sizeof(struct icp_qat_fw_la_cipher_req_params));
345 cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_DEC(alg, mode);
346 memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
347 hash->sha.inner_setup.auth_config.config =
348 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
351 hash->sha.inner_setup.auth_counter.counter =
352 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
354 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
358 qat_alg_init_common_hdr(header, 1);
359 header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
360 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
361 ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
362 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
363 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
364 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
365 ICP_QAT_FW_LA_CMP_AUTH_RES);
366 cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
367 cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
369 /* Cipher CD config setup */
370 cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
371 cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
372 cipher_cd_ctrl->cipher_cfg_offset =
373 (sizeof(struct icp_qat_hw_auth_setup) +
374 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3;
375 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
376 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
378 /* Auth CD config setup */
379 hash_cd_ctrl->hash_cfg_offset = 0;
380 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
381 hash_cd_ctrl->inner_res_sz = digestsize;
382 hash_cd_ctrl->final_sz = digestsize;
384 switch (ctx->qat_hash_alg) {
385 case ICP_QAT_HW_AUTH_ALGO_SHA1:
386 hash_cd_ctrl->inner_state1_sz =
387 round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
388 hash_cd_ctrl->inner_state2_sz =
389 round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
391 case ICP_QAT_HW_AUTH_ALGO_SHA256:
392 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
393 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
395 case ICP_QAT_HW_AUTH_ALGO_SHA512:
396 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
397 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
403 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
404 ((sizeof(struct icp_qat_hw_auth_setup) +
405 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
406 auth_param->auth_res_sz = digestsize;
407 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
408 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
412 static void qat_alg_skcipher_init_com(struct qat_alg_skcipher_ctx *ctx,
413 struct icp_qat_fw_la_bulk_req *req,
414 struct icp_qat_hw_cipher_algo_blk *cd,
415 const u8 *key, unsigned int keylen)
417 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
418 struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
419 struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
421 memcpy(cd->aes.key, key, keylen);
422 qat_alg_init_common_hdr(header, 0);
423 header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
424 cd_pars->u.s.content_desc_params_sz =
425 sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
426 /* Cipher CD config setup */
427 cd_ctrl->cipher_key_sz = keylen >> 3;
428 cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
429 cd_ctrl->cipher_cfg_offset = 0;
430 ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
431 ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
434 static void qat_alg_skcipher_init_enc(struct qat_alg_skcipher_ctx *ctx,
435 int alg, const u8 *key,
436 unsigned int keylen, int mode)
438 struct icp_qat_hw_cipher_algo_blk *enc_cd = ctx->enc_cd;
439 struct icp_qat_fw_la_bulk_req *req = &ctx->enc_fw_req;
440 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
442 qat_alg_skcipher_init_com(ctx, req, enc_cd, key, keylen);
443 cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
444 enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
447 static void qat_alg_skcipher_init_dec(struct qat_alg_skcipher_ctx *ctx,
448 int alg, const u8 *key,
449 unsigned int keylen, int mode)
451 struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd;
452 struct icp_qat_fw_la_bulk_req *req = &ctx->dec_fw_req;
453 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
455 qat_alg_skcipher_init_com(ctx, req, dec_cd, key, keylen);
456 cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
458 if (mode != ICP_QAT_HW_CIPHER_CTR_MODE)
459 dec_cd->aes.cipher_config.val =
460 QAT_AES_HW_CONFIG_DEC(alg, mode);
462 dec_cd->aes.cipher_config.val =
463 QAT_AES_HW_CONFIG_ENC(alg, mode);
466 static int qat_alg_validate_key(int key_len, int *alg, int mode)
468 if (mode != ICP_QAT_HW_CIPHER_XTS_MODE) {
470 case AES_KEYSIZE_128:
471 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
473 case AES_KEYSIZE_192:
474 *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
476 case AES_KEYSIZE_256:
477 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
484 case AES_KEYSIZE_128 << 1:
485 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
487 case AES_KEYSIZE_256 << 1:
488 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
497 static int qat_alg_aead_init_sessions(struct crypto_aead *tfm, const u8 *key,
498 unsigned int keylen, int mode)
500 struct crypto_authenc_keys keys;
503 if (crypto_authenc_extractkeys(&keys, key, keylen))
506 if (qat_alg_validate_key(keys.enckeylen, &alg, mode))
509 if (qat_alg_aead_init_enc_session(tfm, alg, &keys, mode))
512 if (qat_alg_aead_init_dec_session(tfm, alg, &keys, mode))
515 memzero_explicit(&keys, sizeof(keys));
518 memzero_explicit(&keys, sizeof(keys));
521 memzero_explicit(&keys, sizeof(keys));
525 static int qat_alg_skcipher_init_sessions(struct qat_alg_skcipher_ctx *ctx,
532 if (qat_alg_validate_key(keylen, &alg, mode))
535 qat_alg_skcipher_init_enc(ctx, alg, key, keylen, mode);
536 qat_alg_skcipher_init_dec(ctx, alg, key, keylen, mode);
540 static int qat_alg_aead_rekey(struct crypto_aead *tfm, const u8 *key,
543 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
545 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
546 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
547 memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
548 memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
550 return qat_alg_aead_init_sessions(tfm, key, keylen,
551 ICP_QAT_HW_CIPHER_CBC_MODE);
554 static int qat_alg_aead_newkey(struct crypto_aead *tfm, const u8 *key,
557 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
558 struct qat_crypto_instance *inst = NULL;
559 int node = get_current_node();
563 inst = qat_crypto_get_instance_node(node);
566 dev = &GET_DEV(inst->accel_dev);
568 ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
575 ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
583 ret = qat_alg_aead_init_sessions(tfm, key, keylen,
584 ICP_QAT_HW_CIPHER_CBC_MODE);
591 memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
592 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
593 ctx->dec_cd, ctx->dec_cd_paddr);
596 memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
597 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
598 ctx->enc_cd, ctx->enc_cd_paddr);
602 qat_crypto_put_instance(inst);
606 static int qat_alg_aead_setkey(struct crypto_aead *tfm, const u8 *key,
609 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
612 return qat_alg_aead_rekey(tfm, key, keylen);
614 return qat_alg_aead_newkey(tfm, key, keylen);
617 static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
618 struct qat_crypto_request *qat_req)
620 struct device *dev = &GET_DEV(inst->accel_dev);
621 struct qat_alg_buf_list *bl = qat_req->buf.bl;
622 struct qat_alg_buf_list *blout = qat_req->buf.blout;
623 dma_addr_t blp = qat_req->buf.blp;
624 dma_addr_t blpout = qat_req->buf.bloutp;
625 size_t sz = qat_req->buf.sz;
626 size_t sz_out = qat_req->buf.sz_out;
630 bl_dma_dir = blp != blpout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
632 for (i = 0; i < bl->num_bufs; i++)
633 dma_unmap_single(dev, bl->bufers[i].addr,
634 bl->bufers[i].len, bl_dma_dir);
636 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
638 if (!qat_req->buf.sgl_src_valid)
642 /* If out of place operation dma unmap only data */
643 int bufless = blout->num_bufs - blout->num_mapped_bufs;
645 for (i = bufless; i < blout->num_bufs; i++) {
646 dma_unmap_single(dev, blout->bufers[i].addr,
647 blout->bufers[i].len,
650 dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
652 if (!qat_req->buf.sgl_dst_valid)
657 static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
658 struct scatterlist *sgl,
659 struct scatterlist *sglout,
660 struct qat_crypto_request *qat_req)
662 struct device *dev = &GET_DEV(inst->accel_dev);
664 int n = sg_nents(sgl);
665 struct qat_alg_buf_list *bufl;
666 struct qat_alg_buf_list *buflout = NULL;
667 dma_addr_t blp = DMA_MAPPING_ERROR;
668 dma_addr_t bloutp = DMA_MAPPING_ERROR;
669 struct scatterlist *sg;
670 size_t sz_out, sz = struct_size(bufl, bufers, n);
671 int node = dev_to_node(&GET_DEV(inst->accel_dev));
677 qat_req->buf.sgl_src_valid = false;
678 qat_req->buf.sgl_dst_valid = false;
680 if (n > QAT_MAX_BUFF_DESC) {
681 bufl = kzalloc_node(sz, GFP_ATOMIC, node);
685 bufl = &qat_req->buf.sgl_src.sgl_hdr;
686 memset(bufl, 0, sizeof(struct qat_alg_buf_list));
687 qat_req->buf.sgl_src_valid = true;
690 bufl_dma_dir = sgl != sglout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
692 for_each_sg(sgl, sg, n, i)
693 bufl->bufers[i].addr = DMA_MAPPING_ERROR;
695 for_each_sg(sgl, sg, n, i) {
701 bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
704 bufl->bufers[y].len = sg->length;
705 if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
709 bufl->num_bufs = sg_nctr;
710 blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
711 if (unlikely(dma_mapping_error(dev, blp)))
713 qat_req->buf.bl = bufl;
714 qat_req->buf.blp = blp;
715 qat_req->buf.sz = sz;
716 /* Handle out of place operation */
718 struct qat_alg_buf *bufers;
720 n = sg_nents(sglout);
721 sz_out = struct_size(buflout, bufers, n);
724 if (n > QAT_MAX_BUFF_DESC) {
725 buflout = kzalloc_node(sz_out, GFP_ATOMIC, node);
726 if (unlikely(!buflout))
729 buflout = &qat_req->buf.sgl_dst.sgl_hdr;
730 memset(buflout, 0, sizeof(struct qat_alg_buf_list));
731 qat_req->buf.sgl_dst_valid = true;
734 bufers = buflout->bufers;
735 for_each_sg(sglout, sg, n, i)
736 bufers[i].addr = DMA_MAPPING_ERROR;
738 for_each_sg(sglout, sg, n, i) {
744 bufers[y].addr = dma_map_single(dev, sg_virt(sg),
747 if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
749 bufers[y].len = sg->length;
752 buflout->num_bufs = sg_nctr;
753 buflout->num_mapped_bufs = sg_nctr;
754 bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
755 if (unlikely(dma_mapping_error(dev, bloutp)))
757 qat_req->buf.blout = buflout;
758 qat_req->buf.bloutp = bloutp;
759 qat_req->buf.sz_out = sz_out;
761 /* Otherwise set the src and dst to the same address */
762 qat_req->buf.bloutp = qat_req->buf.blp;
763 qat_req->buf.sz_out = 0;
768 if (!dma_mapping_error(dev, bloutp))
769 dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
771 n = sg_nents(sglout);
772 for (i = 0; i < n; i++)
773 if (!dma_mapping_error(dev, buflout->bufers[i].addr))
774 dma_unmap_single(dev, buflout->bufers[i].addr,
775 buflout->bufers[i].len,
778 if (!qat_req->buf.sgl_dst_valid)
782 if (!dma_mapping_error(dev, blp))
783 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
786 for (i = 0; i < n; i++)
787 if (!dma_mapping_error(dev, bufl->bufers[i].addr))
788 dma_unmap_single(dev, bufl->bufers[i].addr,
792 if (!qat_req->buf.sgl_src_valid)
795 dev_err(dev, "Failed to map buf for dma\n");
799 static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
800 struct qat_crypto_request *qat_req)
802 struct qat_alg_aead_ctx *ctx = qat_req->aead_ctx;
803 struct qat_crypto_instance *inst = ctx->inst;
804 struct aead_request *areq = qat_req->aead_req;
805 u8 stat_filed = qat_resp->comn_resp.comn_status;
806 int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
808 qat_alg_free_bufl(inst, qat_req);
809 if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
811 areq->base.complete(&areq->base, res);
814 static void qat_skcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
815 struct qat_crypto_request *qat_req)
817 struct qat_alg_skcipher_ctx *ctx = qat_req->skcipher_ctx;
818 struct qat_crypto_instance *inst = ctx->inst;
819 struct skcipher_request *sreq = qat_req->skcipher_req;
820 u8 stat_filed = qat_resp->comn_resp.comn_status;
821 struct device *dev = &GET_DEV(ctx->inst->accel_dev);
822 int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
824 qat_alg_free_bufl(inst, qat_req);
825 if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
828 memcpy(sreq->iv, qat_req->iv, AES_BLOCK_SIZE);
829 dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
832 sreq->base.complete(&sreq->base, res);
835 void qat_alg_callback(void *resp)
837 struct icp_qat_fw_la_resp *qat_resp = resp;
838 struct qat_crypto_request *qat_req =
839 (void *)(__force long)qat_resp->opaque_data;
841 qat_req->cb(qat_resp, qat_req);
844 static int qat_alg_aead_dec(struct aead_request *areq)
846 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
847 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
848 struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
849 struct qat_crypto_request *qat_req = aead_request_ctx(areq);
850 struct icp_qat_fw_la_cipher_req_params *cipher_param;
851 struct icp_qat_fw_la_auth_req_params *auth_param;
852 struct icp_qat_fw_la_bulk_req *msg;
853 int digst_size = crypto_aead_authsize(aead_tfm);
857 cipher_len = areq->cryptlen - digst_size;
858 if (cipher_len % AES_BLOCK_SIZE != 0)
861 ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
866 *msg = ctx->dec_fw_req;
867 qat_req->aead_ctx = ctx;
868 qat_req->aead_req = areq;
869 qat_req->cb = qat_aead_alg_callback;
870 qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
871 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
872 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
873 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
874 cipher_param->cipher_length = cipher_len;
875 cipher_param->cipher_offset = areq->assoclen;
876 memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
877 auth_param = (void *)((u8 *)cipher_param + sizeof(*cipher_param));
878 auth_param->auth_off = 0;
879 auth_param->auth_len = areq->assoclen + cipher_param->cipher_length;
881 ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
882 } while (ret == -EAGAIN && ctr++ < 10);
884 if (ret == -EAGAIN) {
885 qat_alg_free_bufl(ctx->inst, qat_req);
891 static int qat_alg_aead_enc(struct aead_request *areq)
893 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
894 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
895 struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
896 struct qat_crypto_request *qat_req = aead_request_ctx(areq);
897 struct icp_qat_fw_la_cipher_req_params *cipher_param;
898 struct icp_qat_fw_la_auth_req_params *auth_param;
899 struct icp_qat_fw_la_bulk_req *msg;
903 if (areq->cryptlen % AES_BLOCK_SIZE != 0)
906 ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
911 *msg = ctx->enc_fw_req;
912 qat_req->aead_ctx = ctx;
913 qat_req->aead_req = areq;
914 qat_req->cb = qat_aead_alg_callback;
915 qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
916 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
917 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
918 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
919 auth_param = (void *)((u8 *)cipher_param + sizeof(*cipher_param));
921 memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
922 cipher_param->cipher_length = areq->cryptlen;
923 cipher_param->cipher_offset = areq->assoclen;
925 auth_param->auth_off = 0;
926 auth_param->auth_len = areq->assoclen + areq->cryptlen;
929 ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
930 } while (ret == -EAGAIN && ctr++ < 10);
932 if (ret == -EAGAIN) {
933 qat_alg_free_bufl(ctx->inst, qat_req);
939 static int qat_alg_skcipher_rekey(struct qat_alg_skcipher_ctx *ctx,
940 const u8 *key, unsigned int keylen,
943 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
944 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
945 memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
946 memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
948 return qat_alg_skcipher_init_sessions(ctx, key, keylen, mode);
951 static int qat_alg_skcipher_newkey(struct qat_alg_skcipher_ctx *ctx,
952 const u8 *key, unsigned int keylen,
955 struct qat_crypto_instance *inst = NULL;
957 int node = get_current_node();
960 inst = qat_crypto_get_instance_node(node);
963 dev = &GET_DEV(inst->accel_dev);
965 ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
970 goto out_free_instance;
972 ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
980 ret = qat_alg_skcipher_init_sessions(ctx, key, keylen, mode);
987 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
988 dma_free_coherent(dev, sizeof(*ctx->dec_cd),
989 ctx->dec_cd, ctx->dec_cd_paddr);
992 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
993 dma_free_coherent(dev, sizeof(*ctx->enc_cd),
994 ctx->enc_cd, ctx->enc_cd_paddr);
998 qat_crypto_put_instance(inst);
1002 static int qat_alg_skcipher_setkey(struct crypto_skcipher *tfm,
1003 const u8 *key, unsigned int keylen,
1006 struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1009 return qat_alg_skcipher_rekey(ctx, key, keylen, mode);
1011 return qat_alg_skcipher_newkey(ctx, key, keylen, mode);
1014 static int qat_alg_skcipher_cbc_setkey(struct crypto_skcipher *tfm,
1015 const u8 *key, unsigned int keylen)
1017 return qat_alg_skcipher_setkey(tfm, key, keylen,
1018 ICP_QAT_HW_CIPHER_CBC_MODE);
1021 static int qat_alg_skcipher_ctr_setkey(struct crypto_skcipher *tfm,
1022 const u8 *key, unsigned int keylen)
1024 return qat_alg_skcipher_setkey(tfm, key, keylen,
1025 ICP_QAT_HW_CIPHER_CTR_MODE);
1028 static int qat_alg_skcipher_xts_setkey(struct crypto_skcipher *tfm,
1029 const u8 *key, unsigned int keylen)
1031 struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1034 ret = xts_verify_key(tfm, key, keylen);
1038 if (keylen >> 1 == AES_KEYSIZE_192) {
1039 ret = crypto_skcipher_setkey(ctx->ftfm, key, keylen);
1043 ctx->fallback = true;
1048 ctx->fallback = false;
1050 return qat_alg_skcipher_setkey(tfm, key, keylen,
1051 ICP_QAT_HW_CIPHER_XTS_MODE);
1054 static int qat_alg_skcipher_encrypt(struct skcipher_request *req)
1056 struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
1057 struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
1058 struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1059 struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
1060 struct icp_qat_fw_la_cipher_req_params *cipher_param;
1061 struct icp_qat_fw_la_bulk_req *msg;
1062 struct device *dev = &GET_DEV(ctx->inst->accel_dev);
1065 if (req->cryptlen == 0)
1068 qat_req->iv = dma_alloc_coherent(dev, AES_BLOCK_SIZE,
1069 &qat_req->iv_paddr, GFP_ATOMIC);
1073 ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
1074 if (unlikely(ret)) {
1075 dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
1080 msg = &qat_req->req;
1081 *msg = ctx->enc_fw_req;
1082 qat_req->skcipher_ctx = ctx;
1083 qat_req->skcipher_req = req;
1084 qat_req->cb = qat_skcipher_alg_callback;
1085 qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
1086 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1087 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1088 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1089 cipher_param->cipher_length = req->cryptlen;
1090 cipher_param->cipher_offset = 0;
1091 cipher_param->u.s.cipher_IV_ptr = qat_req->iv_paddr;
1092 memcpy(qat_req->iv, req->iv, AES_BLOCK_SIZE);
1094 ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
1095 } while (ret == -EAGAIN && ctr++ < 10);
1097 if (ret == -EAGAIN) {
1098 qat_alg_free_bufl(ctx->inst, qat_req);
1099 dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
1103 return -EINPROGRESS;
1106 static int qat_alg_skcipher_blk_encrypt(struct skcipher_request *req)
1108 if (req->cryptlen % AES_BLOCK_SIZE != 0)
1111 return qat_alg_skcipher_encrypt(req);
1114 static int qat_alg_skcipher_xts_encrypt(struct skcipher_request *req)
1116 struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
1117 struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(stfm);
1118 struct skcipher_request *nreq = skcipher_request_ctx(req);
1120 if (req->cryptlen < XTS_BLOCK_SIZE)
1123 if (ctx->fallback) {
1124 memcpy(nreq, req, sizeof(*req));
1125 skcipher_request_set_tfm(nreq, ctx->ftfm);
1126 return crypto_skcipher_encrypt(nreq);
1129 return qat_alg_skcipher_encrypt(req);
1132 static int qat_alg_skcipher_decrypt(struct skcipher_request *req)
1134 struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
1135 struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
1136 struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1137 struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
1138 struct icp_qat_fw_la_cipher_req_params *cipher_param;
1139 struct icp_qat_fw_la_bulk_req *msg;
1140 struct device *dev = &GET_DEV(ctx->inst->accel_dev);
1143 if (req->cryptlen == 0)
1146 qat_req->iv = dma_alloc_coherent(dev, AES_BLOCK_SIZE,
1147 &qat_req->iv_paddr, GFP_ATOMIC);
1151 ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
1152 if (unlikely(ret)) {
1153 dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
1158 msg = &qat_req->req;
1159 *msg = ctx->dec_fw_req;
1160 qat_req->skcipher_ctx = ctx;
1161 qat_req->skcipher_req = req;
1162 qat_req->cb = qat_skcipher_alg_callback;
1163 qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
1164 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1165 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1166 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1167 cipher_param->cipher_length = req->cryptlen;
1168 cipher_param->cipher_offset = 0;
1169 cipher_param->u.s.cipher_IV_ptr = qat_req->iv_paddr;
1170 memcpy(qat_req->iv, req->iv, AES_BLOCK_SIZE);
1172 ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
1173 } while (ret == -EAGAIN && ctr++ < 10);
1175 if (ret == -EAGAIN) {
1176 qat_alg_free_bufl(ctx->inst, qat_req);
1177 dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
1181 return -EINPROGRESS;
1184 static int qat_alg_skcipher_blk_decrypt(struct skcipher_request *req)
1186 if (req->cryptlen % AES_BLOCK_SIZE != 0)
1189 return qat_alg_skcipher_decrypt(req);
1192 static int qat_alg_skcipher_xts_decrypt(struct skcipher_request *req)
1194 struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
1195 struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(stfm);
1196 struct skcipher_request *nreq = skcipher_request_ctx(req);
1198 if (req->cryptlen < XTS_BLOCK_SIZE)
1201 if (ctx->fallback) {
1202 memcpy(nreq, req, sizeof(*req));
1203 skcipher_request_set_tfm(nreq, ctx->ftfm);
1204 return crypto_skcipher_decrypt(nreq);
1207 return qat_alg_skcipher_decrypt(req);
1210 static int qat_alg_aead_init(struct crypto_aead *tfm,
1211 enum icp_qat_hw_auth_algo hash,
1212 const char *hash_name)
1214 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
1216 ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
1217 if (IS_ERR(ctx->hash_tfm))
1218 return PTR_ERR(ctx->hash_tfm);
1219 ctx->qat_hash_alg = hash;
1220 crypto_aead_set_reqsize(tfm, sizeof(struct qat_crypto_request));
1224 static int qat_alg_aead_sha1_init(struct crypto_aead *tfm)
1226 return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
1229 static int qat_alg_aead_sha256_init(struct crypto_aead *tfm)
1231 return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
1234 static int qat_alg_aead_sha512_init(struct crypto_aead *tfm)
1236 return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
1239 static void qat_alg_aead_exit(struct crypto_aead *tfm)
1241 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
1242 struct qat_crypto_instance *inst = ctx->inst;
1245 crypto_free_shash(ctx->hash_tfm);
1250 dev = &GET_DEV(inst->accel_dev);
1252 memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
1253 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1254 ctx->enc_cd, ctx->enc_cd_paddr);
1257 memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
1258 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1259 ctx->dec_cd, ctx->dec_cd_paddr);
1261 qat_crypto_put_instance(inst);
1264 static int qat_alg_skcipher_init_tfm(struct crypto_skcipher *tfm)
1266 crypto_skcipher_set_reqsize(tfm, sizeof(struct qat_crypto_request));
1270 static int qat_alg_skcipher_init_xts_tfm(struct crypto_skcipher *tfm)
1272 struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1275 ctx->ftfm = crypto_alloc_skcipher("xts(aes)", 0,
1276 CRYPTO_ALG_NEED_FALLBACK);
1277 if (IS_ERR(ctx->ftfm))
1278 return PTR_ERR(ctx->ftfm);
1280 reqsize = max(sizeof(struct qat_crypto_request),
1281 sizeof(struct skcipher_request) +
1282 crypto_skcipher_reqsize(ctx->ftfm));
1283 crypto_skcipher_set_reqsize(tfm, reqsize);
1288 static void qat_alg_skcipher_exit_tfm(struct crypto_skcipher *tfm)
1290 struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1291 struct qat_crypto_instance *inst = ctx->inst;
1297 dev = &GET_DEV(inst->accel_dev);
1299 memset(ctx->enc_cd, 0,
1300 sizeof(struct icp_qat_hw_cipher_algo_blk));
1301 dma_free_coherent(dev,
1302 sizeof(struct icp_qat_hw_cipher_algo_blk),
1303 ctx->enc_cd, ctx->enc_cd_paddr);
1306 memset(ctx->dec_cd, 0,
1307 sizeof(struct icp_qat_hw_cipher_algo_blk));
1308 dma_free_coherent(dev,
1309 sizeof(struct icp_qat_hw_cipher_algo_blk),
1310 ctx->dec_cd, ctx->dec_cd_paddr);
1312 qat_crypto_put_instance(inst);
1315 static void qat_alg_skcipher_exit_xts_tfm(struct crypto_skcipher *tfm)
1317 struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1320 crypto_free_skcipher(ctx->ftfm);
1322 qat_alg_skcipher_exit_tfm(tfm);
1325 static struct aead_alg qat_aeads[] = { {
1327 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1328 .cra_driver_name = "qat_aes_cbc_hmac_sha1",
1329 .cra_priority = 4001,
1330 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1331 .cra_blocksize = AES_BLOCK_SIZE,
1332 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1333 .cra_module = THIS_MODULE,
1335 .init = qat_alg_aead_sha1_init,
1336 .exit = qat_alg_aead_exit,
1337 .setkey = qat_alg_aead_setkey,
1338 .decrypt = qat_alg_aead_dec,
1339 .encrypt = qat_alg_aead_enc,
1340 .ivsize = AES_BLOCK_SIZE,
1341 .maxauthsize = SHA1_DIGEST_SIZE,
1344 .cra_name = "authenc(hmac(sha256),cbc(aes))",
1345 .cra_driver_name = "qat_aes_cbc_hmac_sha256",
1346 .cra_priority = 4001,
1347 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1348 .cra_blocksize = AES_BLOCK_SIZE,
1349 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1350 .cra_module = THIS_MODULE,
1352 .init = qat_alg_aead_sha256_init,
1353 .exit = qat_alg_aead_exit,
1354 .setkey = qat_alg_aead_setkey,
1355 .decrypt = qat_alg_aead_dec,
1356 .encrypt = qat_alg_aead_enc,
1357 .ivsize = AES_BLOCK_SIZE,
1358 .maxauthsize = SHA256_DIGEST_SIZE,
1361 .cra_name = "authenc(hmac(sha512),cbc(aes))",
1362 .cra_driver_name = "qat_aes_cbc_hmac_sha512",
1363 .cra_priority = 4001,
1364 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1365 .cra_blocksize = AES_BLOCK_SIZE,
1366 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1367 .cra_module = THIS_MODULE,
1369 .init = qat_alg_aead_sha512_init,
1370 .exit = qat_alg_aead_exit,
1371 .setkey = qat_alg_aead_setkey,
1372 .decrypt = qat_alg_aead_dec,
1373 .encrypt = qat_alg_aead_enc,
1374 .ivsize = AES_BLOCK_SIZE,
1375 .maxauthsize = SHA512_DIGEST_SIZE,
1378 static struct skcipher_alg qat_skciphers[] = { {
1379 .base.cra_name = "cbc(aes)",
1380 .base.cra_driver_name = "qat_aes_cbc",
1381 .base.cra_priority = 4001,
1382 .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1383 .base.cra_blocksize = AES_BLOCK_SIZE,
1384 .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
1385 .base.cra_alignmask = 0,
1386 .base.cra_module = THIS_MODULE,
1388 .init = qat_alg_skcipher_init_tfm,
1389 .exit = qat_alg_skcipher_exit_tfm,
1390 .setkey = qat_alg_skcipher_cbc_setkey,
1391 .decrypt = qat_alg_skcipher_blk_decrypt,
1392 .encrypt = qat_alg_skcipher_blk_encrypt,
1393 .min_keysize = AES_MIN_KEY_SIZE,
1394 .max_keysize = AES_MAX_KEY_SIZE,
1395 .ivsize = AES_BLOCK_SIZE,
1397 .base.cra_name = "ctr(aes)",
1398 .base.cra_driver_name = "qat_aes_ctr",
1399 .base.cra_priority = 4001,
1400 .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1401 .base.cra_blocksize = 1,
1402 .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
1403 .base.cra_alignmask = 0,
1404 .base.cra_module = THIS_MODULE,
1406 .init = qat_alg_skcipher_init_tfm,
1407 .exit = qat_alg_skcipher_exit_tfm,
1408 .setkey = qat_alg_skcipher_ctr_setkey,
1409 .decrypt = qat_alg_skcipher_decrypt,
1410 .encrypt = qat_alg_skcipher_encrypt,
1411 .min_keysize = AES_MIN_KEY_SIZE,
1412 .max_keysize = AES_MAX_KEY_SIZE,
1413 .ivsize = AES_BLOCK_SIZE,
1415 .base.cra_name = "xts(aes)",
1416 .base.cra_driver_name = "qat_aes_xts",
1417 .base.cra_priority = 4001,
1418 .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK |
1419 CRYPTO_ALG_ALLOCATES_MEMORY,
1420 .base.cra_blocksize = AES_BLOCK_SIZE,
1421 .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
1422 .base.cra_alignmask = 0,
1423 .base.cra_module = THIS_MODULE,
1425 .init = qat_alg_skcipher_init_xts_tfm,
1426 .exit = qat_alg_skcipher_exit_xts_tfm,
1427 .setkey = qat_alg_skcipher_xts_setkey,
1428 .decrypt = qat_alg_skcipher_xts_decrypt,
1429 .encrypt = qat_alg_skcipher_xts_encrypt,
1430 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1431 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1432 .ivsize = AES_BLOCK_SIZE,
1435 int qat_algs_register(void)
1439 mutex_lock(&algs_lock);
1440 if (++active_devs != 1)
1443 ret = crypto_register_skciphers(qat_skciphers,
1444 ARRAY_SIZE(qat_skciphers));
1448 ret = crypto_register_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
1453 mutex_unlock(&algs_lock);
1457 crypto_unregister_skciphers(qat_skciphers, ARRAY_SIZE(qat_skciphers));
1461 void qat_algs_unregister(void)
1463 mutex_lock(&algs_lock);
1464 if (--active_devs != 0)
1467 crypto_unregister_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
1468 crypto_unregister_skciphers(qat_skciphers, ARRAY_SIZE(qat_skciphers));
1471 mutex_unlock(&algs_lock);