2 * Copyright (C) 2017 Marvell
4 * Antoine Tenart <antoine.tenart@free-electrons.com>
6 * This file is licensed under the terms of the GNU General Public
7 * License version 2. This program is licensed "as is" without any
8 * warranty of any kind, whether express or implied.
11 #include <linux/device.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/dmapool.h>
15 #include <crypto/aes.h>
16 #include <crypto/skcipher.h>
17 #include <crypto/internal/skcipher.h>
21 enum safexcel_cipher_direction {
26 struct safexcel_cipher_ctx {
27 struct safexcel_context base;
28 struct safexcel_crypto_priv *priv;
30 enum safexcel_cipher_direction direction;
37 struct safexcel_cipher_req {
41 static void safexcel_cipher_token(struct safexcel_cipher_ctx *ctx,
42 struct crypto_async_request *async,
43 struct safexcel_command_desc *cdesc,
46 struct skcipher_request *req = skcipher_request_cast(async);
47 struct safexcel_token *token;
50 if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) {
51 offset = AES_BLOCK_SIZE / sizeof(u32);
52 memcpy(cdesc->control_data.token, req->iv, AES_BLOCK_SIZE);
54 cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
57 token = (struct safexcel_token *)(cdesc->control_data.token + offset);
59 token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
60 token[0].packet_length = length;
61 token[0].stat = EIP197_TOKEN_STAT_LAST_PACKET;
62 token[0].instructions = EIP197_TOKEN_INS_LAST |
63 EIP197_TOKEN_INS_TYPE_CRYTO |
64 EIP197_TOKEN_INS_TYPE_OUTPUT;
67 static int safexcel_aes_setkey(struct crypto_skcipher *ctfm, const u8 *key,
70 struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm);
71 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
72 struct crypto_aes_ctx aes;
75 ret = crypto_aes_expand_key(&aes, key, len);
77 crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
81 for (i = 0; i < len / sizeof(u32); i++) {
82 if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) {
83 ctx->base.needs_inv = true;
88 for (i = 0; i < len / sizeof(u32); i++)
89 ctx->key[i] = cpu_to_le32(aes.key_enc[i]);
93 memzero_explicit(&aes, sizeof(aes));
97 static int safexcel_context_control(struct safexcel_cipher_ctx *ctx,
98 struct safexcel_command_desc *cdesc)
100 struct safexcel_crypto_priv *priv = ctx->priv;
103 if (ctx->direction == SAFEXCEL_ENCRYPT)
104 cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_CRYPTO_OUT;
106 cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_CRYPTO_IN;
108 cdesc->control_data.control0 |= CONTEXT_CONTROL_KEY_EN;
109 cdesc->control_data.control1 |= ctx->mode;
111 switch (ctx->key_len) {
112 case AES_KEYSIZE_128:
113 cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES128;
116 case AES_KEYSIZE_192:
117 cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES192;
120 case AES_KEYSIZE_256:
121 cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES256;
125 dev_err(priv->dev, "aes keysize not supported: %u\n",
129 cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(ctrl_size);
134 static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int ring,
135 struct crypto_async_request *async,
136 bool *should_complete, int *ret)
138 struct skcipher_request *req = skcipher_request_cast(async);
139 struct safexcel_result_desc *rdesc;
144 spin_lock_bh(&priv->ring[ring].egress_lock);
146 rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
149 "cipher: result: could not retrieve the result descriptor\n");
150 *ret = PTR_ERR(rdesc);
154 if (rdesc->result_data.error_code) {
156 "cipher: result: result descriptor error (%d)\n",
157 rdesc->result_data.error_code);
162 } while (!rdesc->last_seg);
164 safexcel_complete(priv, ring);
165 spin_unlock_bh(&priv->ring[ring].egress_lock);
167 if (req->src == req->dst) {
168 dma_unmap_sg(priv->dev, req->src,
169 sg_nents_for_len(req->src, req->cryptlen),
172 dma_unmap_sg(priv->dev, req->src,
173 sg_nents_for_len(req->src, req->cryptlen),
175 dma_unmap_sg(priv->dev, req->dst,
176 sg_nents_for_len(req->dst, req->cryptlen),
180 *should_complete = true;
185 static int safexcel_aes_send(struct crypto_async_request *async,
186 int ring, struct safexcel_request *request,
187 int *commands, int *results)
189 struct skcipher_request *req = skcipher_request_cast(async);
190 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
191 struct safexcel_crypto_priv *priv = ctx->priv;
192 struct safexcel_command_desc *cdesc;
193 struct safexcel_result_desc *rdesc;
194 struct scatterlist *sg;
195 int nr_src, nr_dst, n_cdesc = 0, n_rdesc = 0, queued = req->cryptlen;
198 if (req->src == req->dst) {
199 nr_src = dma_map_sg(priv->dev, req->src,
200 sg_nents_for_len(req->src, req->cryptlen),
206 nr_src = dma_map_sg(priv->dev, req->src,
207 sg_nents_for_len(req->src, req->cryptlen),
212 nr_dst = dma_map_sg(priv->dev, req->dst,
213 sg_nents_for_len(req->dst, req->cryptlen),
216 dma_unmap_sg(priv->dev, req->src,
217 sg_nents_for_len(req->src, req->cryptlen),
223 memcpy(ctx->base.ctxr->data, ctx->key, ctx->key_len);
225 spin_lock_bh(&priv->ring[ring].egress_lock);
227 /* command descriptors */
228 for_each_sg(req->src, sg, nr_src, i) {
229 int len = sg_dma_len(sg);
231 /* Do not overflow the request */
232 if (queued - len < 0)
235 cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc, !(queued - len),
236 sg_dma_address(sg), len, req->cryptlen,
239 /* No space left in the command descriptor ring */
240 ret = PTR_ERR(cdesc);
246 safexcel_context_control(ctx, cdesc);
247 safexcel_cipher_token(ctx, async, cdesc, req->cryptlen);
255 /* result descriptors */
256 for_each_sg(req->dst, sg, nr_dst, i) {
257 bool first = !i, last = (i == nr_dst - 1);
258 u32 len = sg_dma_len(sg);
260 rdesc = safexcel_add_rdesc(priv, ring, first, last,
261 sg_dma_address(sg), len);
263 /* No space left in the result descriptor ring */
264 ret = PTR_ERR(rdesc);
270 spin_unlock_bh(&priv->ring[ring].egress_lock);
272 request->req = &req->base;
279 for (i = 0; i < n_rdesc; i++)
280 safexcel_ring_rollback_wptr(priv, &priv->ring[ring].rdr);
282 for (i = 0; i < n_cdesc; i++)
283 safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
285 spin_unlock_bh(&priv->ring[ring].egress_lock);
287 if (req->src == req->dst) {
288 dma_unmap_sg(priv->dev, req->src,
289 sg_nents_for_len(req->src, req->cryptlen),
292 dma_unmap_sg(priv->dev, req->src,
293 sg_nents_for_len(req->src, req->cryptlen),
295 dma_unmap_sg(priv->dev, req->dst,
296 sg_nents_for_len(req->dst, req->cryptlen),
303 static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
305 struct crypto_async_request *async,
306 bool *should_complete, int *ret)
308 struct skcipher_request *req = skcipher_request_cast(async);
309 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
310 struct safexcel_result_desc *rdesc;
311 int ndesc = 0, enq_ret;
315 spin_lock_bh(&priv->ring[ring].egress_lock);
317 rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
320 "cipher: invalidate: could not retrieve the result descriptor\n");
321 *ret = PTR_ERR(rdesc);
325 if (rdesc->result_data.error_code) {
326 dev_err(priv->dev, "cipher: invalidate: result descriptor error (%d)\n",
327 rdesc->result_data.error_code);
332 } while (!rdesc->last_seg);
334 safexcel_complete(priv, ring);
335 spin_unlock_bh(&priv->ring[ring].egress_lock);
337 if (ctx->base.exit_inv) {
338 dma_pool_free(priv->context_pool, ctx->base.ctxr,
341 *should_complete = true;
346 ring = safexcel_select_ring(priv);
347 ctx->base.ring = ring;
349 spin_lock_bh(&priv->ring[ring].queue_lock);
350 enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async);
351 spin_unlock_bh(&priv->ring[ring].queue_lock);
353 if (enq_ret != -EINPROGRESS)
356 if (!priv->ring[ring].need_dequeue)
357 safexcel_dequeue(priv, ring);
359 *should_complete = false;
364 static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
365 struct crypto_async_request *async,
366 bool *should_complete, int *ret)
368 struct skcipher_request *req = skcipher_request_cast(async);
369 struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
372 if (sreq->needs_inv) {
373 sreq->needs_inv = false;
374 err = safexcel_handle_inv_result(priv, ring, async,
375 should_complete, ret);
377 err = safexcel_handle_req_result(priv, ring, async,
378 should_complete, ret);
384 static int safexcel_cipher_send_inv(struct crypto_async_request *async,
385 int ring, struct safexcel_request *request,
386 int *commands, int *results)
388 struct skcipher_request *req = skcipher_request_cast(async);
389 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
390 struct safexcel_crypto_priv *priv = ctx->priv;
393 ret = safexcel_invalidate_cache(async, &ctx->base, priv,
394 ctx->base.ctxr_dma, ring, request);
404 static int safexcel_send(struct crypto_async_request *async,
405 int ring, struct safexcel_request *request,
406 int *commands, int *results)
408 struct skcipher_request *req = skcipher_request_cast(async);
409 struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
413 ret = safexcel_cipher_send_inv(async, ring, request,
416 ret = safexcel_aes_send(async, ring, request,
421 static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm)
423 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
424 struct safexcel_crypto_priv *priv = ctx->priv;
425 SKCIPHER_REQUEST_ON_STACK(req, __crypto_skcipher_cast(tfm));
426 struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
427 struct safexcel_inv_result result = {};
428 int ring = ctx->base.ring;
430 memset(req, 0, sizeof(struct skcipher_request));
432 /* create invalidation request */
433 init_completion(&result.completion);
434 skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
435 safexcel_inv_complete, &result);
437 skcipher_request_set_tfm(req, __crypto_skcipher_cast(tfm));
438 ctx = crypto_tfm_ctx(req->base.tfm);
439 ctx->base.exit_inv = true;
440 sreq->needs_inv = true;
442 spin_lock_bh(&priv->ring[ring].queue_lock);
443 crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
444 spin_unlock_bh(&priv->ring[ring].queue_lock);
446 if (!priv->ring[ring].need_dequeue)
447 safexcel_dequeue(priv, ring);
449 wait_for_completion(&result.completion);
453 "cipher: sync: invalidate: completion error %d\n",
461 static int safexcel_aes(struct skcipher_request *req,
462 enum safexcel_cipher_direction dir, u32 mode)
464 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
465 struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
466 struct safexcel_crypto_priv *priv = ctx->priv;
469 sreq->needs_inv = false;
470 ctx->direction = dir;
473 if (ctx->base.ctxr) {
474 if (ctx->base.needs_inv) {
475 sreq->needs_inv = true;
476 ctx->base.needs_inv = false;
479 ctx->base.ring = safexcel_select_ring(priv);
480 ctx->base.ctxr = dma_pool_zalloc(priv->context_pool,
481 EIP197_GFP_FLAGS(req->base),
482 &ctx->base.ctxr_dma);
487 ring = ctx->base.ring;
489 spin_lock_bh(&priv->ring[ring].queue_lock);
490 ret = crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
491 spin_unlock_bh(&priv->ring[ring].queue_lock);
493 if (!priv->ring[ring].need_dequeue)
494 safexcel_dequeue(priv, ring);
499 static int safexcel_ecb_aes_encrypt(struct skcipher_request *req)
501 return safexcel_aes(req, SAFEXCEL_ENCRYPT,
502 CONTEXT_CONTROL_CRYPTO_MODE_ECB);
505 static int safexcel_ecb_aes_decrypt(struct skcipher_request *req)
507 return safexcel_aes(req, SAFEXCEL_DECRYPT,
508 CONTEXT_CONTROL_CRYPTO_MODE_ECB);
511 static int safexcel_skcipher_cra_init(struct crypto_tfm *tfm)
513 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
514 struct safexcel_alg_template *tmpl =
515 container_of(tfm->__crt_alg, struct safexcel_alg_template,
518 ctx->priv = tmpl->priv;
519 ctx->base.send = safexcel_send;
520 ctx->base.handle_result = safexcel_handle_result;
522 crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
523 sizeof(struct safexcel_cipher_req));
528 static void safexcel_skcipher_cra_exit(struct crypto_tfm *tfm)
530 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
531 struct safexcel_crypto_priv *priv = ctx->priv;
534 memzero_explicit(ctx->key, 8 * sizeof(u32));
536 /* context not allocated, skip invalidation */
540 memzero_explicit(ctx->base.ctxr->data, 8 * sizeof(u32));
542 ret = safexcel_cipher_exit_inv(tfm);
544 dev_warn(priv->dev, "cipher: invalidation error %d\n", ret);
547 struct safexcel_alg_template safexcel_alg_ecb_aes = {
548 .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
550 .setkey = safexcel_aes_setkey,
551 .encrypt = safexcel_ecb_aes_encrypt,
552 .decrypt = safexcel_ecb_aes_decrypt,
553 .min_keysize = AES_MIN_KEY_SIZE,
554 .max_keysize = AES_MAX_KEY_SIZE,
556 .cra_name = "ecb(aes)",
557 .cra_driver_name = "safexcel-ecb-aes",
559 .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
560 CRYPTO_ALG_KERN_DRIVER_ONLY,
561 .cra_blocksize = AES_BLOCK_SIZE,
562 .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
564 .cra_init = safexcel_skcipher_cra_init,
565 .cra_exit = safexcel_skcipher_cra_exit,
566 .cra_module = THIS_MODULE,
571 static int safexcel_cbc_aes_encrypt(struct skcipher_request *req)
573 return safexcel_aes(req, SAFEXCEL_ENCRYPT,
574 CONTEXT_CONTROL_CRYPTO_MODE_CBC);
577 static int safexcel_cbc_aes_decrypt(struct skcipher_request *req)
579 return safexcel_aes(req, SAFEXCEL_DECRYPT,
580 CONTEXT_CONTROL_CRYPTO_MODE_CBC);
583 struct safexcel_alg_template safexcel_alg_cbc_aes = {
584 .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
586 .setkey = safexcel_aes_setkey,
587 .encrypt = safexcel_cbc_aes_encrypt,
588 .decrypt = safexcel_cbc_aes_decrypt,
589 .min_keysize = AES_MIN_KEY_SIZE,
590 .max_keysize = AES_MAX_KEY_SIZE,
591 .ivsize = AES_BLOCK_SIZE,
593 .cra_name = "cbc(aes)",
594 .cra_driver_name = "safexcel-cbc-aes",
596 .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
597 CRYPTO_ALG_KERN_DRIVER_ONLY,
598 .cra_blocksize = AES_BLOCK_SIZE,
599 .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
601 .cra_init = safexcel_skcipher_cra_init,
602 .cra_exit = safexcel_skcipher_cra_exit,
603 .cra_module = THIS_MODULE,