1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
6 #include <linux/device.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/interrupt.h>
9 #include <linux/moduleparam.h>
10 #include <linux/types.h>
11 #include <crypto/aes.h>
12 #include <crypto/internal/des.h>
13 #include <crypto/internal/skcipher.h>
17 static unsigned int aes_sw_max_len = CONFIG_CRYPTO_DEV_QCE_SW_MAX_LEN;
18 module_param(aes_sw_max_len, uint, 0644);
19 MODULE_PARM_DESC(aes_sw_max_len,
20 "Only use hardware for AES requests larger than this "
21 "[0=always use hardware; anything <16 breaks AES-GCM; default="
22 __stringify(CONFIG_CRYPTO_DEV_QCE_SW_MAX_LEN)"]");
24 static LIST_HEAD(skcipher_algs);
26 static void qce_skcipher_done(void *data)
28 struct crypto_async_request *async_req = data;
29 struct skcipher_request *req = skcipher_request_cast(async_req);
30 struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
31 struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req));
32 struct qce_device *qce = tmpl->qce;
33 struct qce_result_dump *result_buf = qce->dma.result_buf;
34 enum dma_data_direction dir_src, dir_dst;
39 diff_dst = (req->src != req->dst) ? true : false;
40 dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
41 dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
43 error = qce_dma_terminate_all(&qce->dma);
45 dev_dbg(qce->dev, "skcipher dma termination error (%d)\n",
49 dma_unmap_sg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src);
50 dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
52 sg_free_table(&rctx->dst_tbl);
54 error = qce_check_status(qce, &status);
56 dev_dbg(qce->dev, "skcipher operation error (%x)\n", status);
58 memcpy(rctx->iv, result_buf->encr_cntr_iv, rctx->ivsize);
59 qce->async_req_done(tmpl->qce, error);
63 qce_skcipher_async_req_handle(struct crypto_async_request *async_req)
65 struct skcipher_request *req = skcipher_request_cast(async_req);
66 struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
67 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
68 struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req));
69 struct qce_device *qce = tmpl->qce;
70 enum dma_data_direction dir_src, dir_dst;
71 struct scatterlist *sg;
74 int dst_nents, src_nents, ret;
77 rctx->ivsize = crypto_skcipher_ivsize(skcipher);
78 rctx->cryptlen = req->cryptlen;
80 diff_dst = (req->src != req->dst) ? true : false;
81 dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
82 dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
84 rctx->src_nents = sg_nents_for_len(req->src, req->cryptlen);
86 rctx->dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
88 rctx->dst_nents = rctx->src_nents;
89 if (rctx->src_nents < 0) {
90 dev_err(qce->dev, "Invalid numbers of src SG.\n");
91 return rctx->src_nents;
93 if (rctx->dst_nents < 0) {
94 dev_err(qce->dev, "Invalid numbers of dst SG.\n");
95 return -rctx->dst_nents;
100 gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
101 GFP_KERNEL : GFP_ATOMIC;
103 ret = sg_alloc_table(&rctx->dst_tbl, rctx->dst_nents, gfp);
107 sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ);
109 sg = qce_sgtable_add(&rctx->dst_tbl, req->dst, req->cryptlen);
115 sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg,
123 rctx->dst_sg = rctx->dst_tbl.sgl;
125 dst_nents = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
132 src_nents = dma_map_sg(qce->dev, req->src, rctx->src_nents, dir_src);
135 goto error_unmap_dst;
137 rctx->src_sg = req->src;
139 rctx->src_sg = rctx->dst_sg;
140 src_nents = dst_nents - 1;
143 ret = qce_dma_prep_sgs(&qce->dma, rctx->src_sg, src_nents,
144 rctx->dst_sg, dst_nents,
145 qce_skcipher_done, async_req);
147 goto error_unmap_src;
149 qce_dma_issue_pending(&qce->dma);
151 ret = qce_start(async_req, tmpl->crypto_alg_type, req->cryptlen, 0);
153 goto error_terminate;
158 qce_dma_terminate_all(&qce->dma);
161 dma_unmap_sg(qce->dev, req->src, rctx->src_nents, dir_src);
163 dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
165 sg_free_table(&rctx->dst_tbl);
169 static int qce_skcipher_setkey(struct crypto_skcipher *ablk, const u8 *key,
172 struct crypto_tfm *tfm = crypto_skcipher_tfm(ablk);
173 struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
174 unsigned long flags = to_cipher_tmpl(ablk)->alg_flags;
180 switch (IS_XTS(flags) ? keylen >> 1 : keylen) {
181 case AES_KEYSIZE_128:
182 case AES_KEYSIZE_256:
183 memcpy(ctx->enc_key, key, keylen);
187 ret = crypto_skcipher_setkey(ctx->fallback, key, keylen);
189 ctx->enc_keylen = keylen;
193 static int qce_des_setkey(struct crypto_skcipher *ablk, const u8 *key,
196 struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(ablk);
199 err = verify_skcipher_des_key(ablk, key);
203 ctx->enc_keylen = keylen;
204 memcpy(ctx->enc_key, key, keylen);
208 static int qce_des3_setkey(struct crypto_skcipher *ablk, const u8 *key,
211 struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(ablk);
214 err = verify_skcipher_des3_key(ablk, key);
218 ctx->enc_keylen = keylen;
219 memcpy(ctx->enc_key, key, keylen);
223 static int qce_skcipher_crypt(struct skcipher_request *req, int encrypt)
225 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
226 struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
227 struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
228 struct qce_alg_template *tmpl = to_cipher_tmpl(tfm);
232 rctx->flags = tmpl->alg_flags;
233 rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT;
234 keylen = IS_XTS(rctx->flags) ? ctx->enc_keylen >> 1 : ctx->enc_keylen;
236 /* qce is hanging when AES-XTS request len > QCE_SECTOR_SIZE and
237 * is not a multiple of it; pass such requests to the fallback
239 if (IS_AES(rctx->flags) &&
240 (((keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_256) ||
241 req->cryptlen <= aes_sw_max_len) ||
242 (IS_XTS(rctx->flags) && req->cryptlen > QCE_SECTOR_SIZE &&
243 req->cryptlen % QCE_SECTOR_SIZE))) {
244 skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
245 skcipher_request_set_callback(&rctx->fallback_req,
249 skcipher_request_set_crypt(&rctx->fallback_req, req->src,
250 req->dst, req->cryptlen, req->iv);
251 ret = encrypt ? crypto_skcipher_encrypt(&rctx->fallback_req) :
252 crypto_skcipher_decrypt(&rctx->fallback_req);
256 return tmpl->qce->async_req_enqueue(tmpl->qce, &req->base);
259 static int qce_skcipher_encrypt(struct skcipher_request *req)
261 return qce_skcipher_crypt(req, 1);
264 static int qce_skcipher_decrypt(struct skcipher_request *req)
266 return qce_skcipher_crypt(req, 0);
269 static int qce_skcipher_init(struct crypto_skcipher *tfm)
271 /* take the size without the fallback skcipher_request at the end */
272 crypto_skcipher_set_reqsize(tfm, offsetof(struct qce_cipher_reqctx,
277 static int qce_skcipher_init_fallback(struct crypto_skcipher *tfm)
279 struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
281 ctx->fallback = crypto_alloc_skcipher(crypto_tfm_alg_name(&tfm->base),
282 0, CRYPTO_ALG_NEED_FALLBACK);
283 if (IS_ERR(ctx->fallback))
284 return PTR_ERR(ctx->fallback);
286 crypto_skcipher_set_reqsize(tfm, sizeof(struct qce_cipher_reqctx) +
287 crypto_skcipher_reqsize(ctx->fallback));
291 static void qce_skcipher_exit(struct crypto_skcipher *tfm)
293 struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
295 crypto_free_skcipher(ctx->fallback);
298 struct qce_skcipher_def {
301 const char *drv_name;
302 unsigned int blocksize;
303 unsigned int chunksize;
305 unsigned int min_keysize;
306 unsigned int max_keysize;
309 static const struct qce_skcipher_def skcipher_def[] = {
311 .flags = QCE_ALG_AES | QCE_MODE_ECB,
313 .drv_name = "ecb-aes-qce",
314 .blocksize = AES_BLOCK_SIZE,
315 .ivsize = AES_BLOCK_SIZE,
316 .min_keysize = AES_MIN_KEY_SIZE,
317 .max_keysize = AES_MAX_KEY_SIZE,
320 .flags = QCE_ALG_AES | QCE_MODE_CBC,
322 .drv_name = "cbc-aes-qce",
323 .blocksize = AES_BLOCK_SIZE,
324 .ivsize = AES_BLOCK_SIZE,
325 .min_keysize = AES_MIN_KEY_SIZE,
326 .max_keysize = AES_MAX_KEY_SIZE,
329 .flags = QCE_ALG_AES | QCE_MODE_CTR,
331 .drv_name = "ctr-aes-qce",
333 .chunksize = AES_BLOCK_SIZE,
334 .ivsize = AES_BLOCK_SIZE,
335 .min_keysize = AES_MIN_KEY_SIZE,
336 .max_keysize = AES_MAX_KEY_SIZE,
339 .flags = QCE_ALG_AES | QCE_MODE_XTS,
341 .drv_name = "xts-aes-qce",
342 .blocksize = AES_BLOCK_SIZE,
343 .ivsize = AES_BLOCK_SIZE,
344 .min_keysize = AES_MIN_KEY_SIZE * 2,
345 .max_keysize = AES_MAX_KEY_SIZE * 2,
348 .flags = QCE_ALG_DES | QCE_MODE_ECB,
350 .drv_name = "ecb-des-qce",
351 .blocksize = DES_BLOCK_SIZE,
353 .min_keysize = DES_KEY_SIZE,
354 .max_keysize = DES_KEY_SIZE,
357 .flags = QCE_ALG_DES | QCE_MODE_CBC,
359 .drv_name = "cbc-des-qce",
360 .blocksize = DES_BLOCK_SIZE,
361 .ivsize = DES_BLOCK_SIZE,
362 .min_keysize = DES_KEY_SIZE,
363 .max_keysize = DES_KEY_SIZE,
366 .flags = QCE_ALG_3DES | QCE_MODE_ECB,
367 .name = "ecb(des3_ede)",
368 .drv_name = "ecb-3des-qce",
369 .blocksize = DES3_EDE_BLOCK_SIZE,
371 .min_keysize = DES3_EDE_KEY_SIZE,
372 .max_keysize = DES3_EDE_KEY_SIZE,
375 .flags = QCE_ALG_3DES | QCE_MODE_CBC,
376 .name = "cbc(des3_ede)",
377 .drv_name = "cbc-3des-qce",
378 .blocksize = DES3_EDE_BLOCK_SIZE,
379 .ivsize = DES3_EDE_BLOCK_SIZE,
380 .min_keysize = DES3_EDE_KEY_SIZE,
381 .max_keysize = DES3_EDE_KEY_SIZE,
385 static int qce_skcipher_register_one(const struct qce_skcipher_def *def,
386 struct qce_device *qce)
388 struct qce_alg_template *tmpl;
389 struct skcipher_alg *alg;
392 tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL);
396 alg = &tmpl->alg.skcipher;
398 snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
399 snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
402 alg->base.cra_blocksize = def->blocksize;
403 alg->chunksize = def->chunksize;
404 alg->ivsize = def->ivsize;
405 alg->min_keysize = def->min_keysize;
406 alg->max_keysize = def->max_keysize;
407 alg->setkey = IS_3DES(def->flags) ? qce_des3_setkey :
408 IS_DES(def->flags) ? qce_des_setkey :
410 alg->encrypt = qce_skcipher_encrypt;
411 alg->decrypt = qce_skcipher_decrypt;
413 alg->base.cra_priority = 300;
414 alg->base.cra_flags = CRYPTO_ALG_ASYNC |
415 CRYPTO_ALG_ALLOCATES_MEMORY |
416 CRYPTO_ALG_KERN_DRIVER_ONLY;
417 alg->base.cra_ctxsize = sizeof(struct qce_cipher_ctx);
418 alg->base.cra_alignmask = 0;
419 alg->base.cra_module = THIS_MODULE;
421 if (IS_AES(def->flags)) {
422 alg->base.cra_flags |= CRYPTO_ALG_NEED_FALLBACK;
423 alg->init = qce_skcipher_init_fallback;
424 alg->exit = qce_skcipher_exit;
426 alg->init = qce_skcipher_init;
429 INIT_LIST_HEAD(&tmpl->entry);
430 tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_SKCIPHER;
431 tmpl->alg_flags = def->flags;
434 ret = crypto_register_skcipher(alg);
436 dev_err(qce->dev, "%s registration failed\n", alg->base.cra_name);
441 list_add_tail(&tmpl->entry, &skcipher_algs);
442 dev_dbg(qce->dev, "%s is registered\n", alg->base.cra_name);
446 static void qce_skcipher_unregister(struct qce_device *qce)
448 struct qce_alg_template *tmpl, *n;
450 list_for_each_entry_safe(tmpl, n, &skcipher_algs, entry) {
451 crypto_unregister_skcipher(&tmpl->alg.skcipher);
452 list_del(&tmpl->entry);
457 static int qce_skcipher_register(struct qce_device *qce)
461 for (i = 0; i < ARRAY_SIZE(skcipher_def); i++) {
462 ret = qce_skcipher_register_one(&skcipher_def[i], qce);
469 qce_skcipher_unregister(qce);
473 const struct qce_algo_ops skcipher_ops = {
474 .type = CRYPTO_ALG_TYPE_SKCIPHER,
475 .register_algs = qce_skcipher_register,
476 .unregister_algs = qce_skcipher_unregister,
477 .async_req_handle = qce_skcipher_async_req_handle,