1 // SPDX-License-Identifier: GPL-2.0
3 * sun8i-ce-cipher.c - hardware cryptographic offloader for
4 * Allwinner H3/A64/H5/H2+/H6/R40 SoC
6 * Copyright (C) 2016-2019 Corentin LABBE <clabbe.montjoie@gmail.com>
8 * This file add support for AES cipher with 128,192,256 bits keysize in
11 * You could find a link for the datasheet in Documentation/arm/sunxi.rst
14 #include <linux/bottom_half.h>
15 #include <linux/crypto.h>
16 #include <linux/dma-mapping.h>
18 #include <linux/pm_runtime.h>
19 #include <crypto/scatterwalk.h>
20 #include <crypto/internal/des.h>
21 #include <crypto/internal/skcipher.h>
24 static int sun8i_ce_cipher_need_fallback(struct skcipher_request *areq)
26 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
27 struct scatterlist *sg;
29 if (sg_nents(areq->src) > MAX_SG || sg_nents(areq->dst) > MAX_SG)
32 if (areq->cryptlen < crypto_skcipher_ivsize(tfm))
35 if (areq->cryptlen == 0 || areq->cryptlen % 16)
40 if (sg->length % 4 || !IS_ALIGNED(sg->offset, sizeof(u32)))
46 if (sg->length % 4 || !IS_ALIGNED(sg->offset, sizeof(u32)))
53 static int sun8i_ce_cipher_fallback(struct skcipher_request *areq)
55 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
56 struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
57 struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
59 #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
60 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
61 struct sun8i_ce_alg_template *algt;
63 algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher);
67 skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
68 skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
69 areq->base.complete, areq->base.data);
70 skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst,
71 areq->cryptlen, areq->iv);
72 if (rctx->op_dir & CE_DECRYPTION)
73 err = crypto_skcipher_decrypt(&rctx->fallback_req);
75 err = crypto_skcipher_encrypt(&rctx->fallback_req);
79 static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req)
81 struct skcipher_request *areq = container_of(async_req, struct skcipher_request, base);
82 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
83 struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
84 struct sun8i_ce_dev *ce = op->ce;
85 struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
86 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
87 struct sun8i_ce_alg_template *algt;
88 struct sun8i_ce_flow *chan;
90 struct scatterlist *sg;
91 unsigned int todo, len, offset, ivsize;
98 algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher);
100 dev_dbg(ce->dev, "%s %s %u %x IV(%p %u) key=%u\n", __func__,
101 crypto_tfm_alg_name(areq->base.tfm),
103 rctx->op_dir, areq->iv, crypto_skcipher_ivsize(tfm),
106 #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
112 chan = &ce->chanlist[flow];
115 memset(cet, 0, sizeof(struct ce_task));
117 cet->t_id = cpu_to_le32(flow);
118 common = ce->variant->alg_cipher[algt->ce_algo_id];
119 common |= rctx->op_dir | CE_COMM_INT;
120 cet->t_common_ctl = cpu_to_le32(common);
121 /* CTS and recent CE (H6) need length in bytes, in word otherwise */
122 if (ce->variant->cipher_t_dlen_in_bytes)
123 cet->t_dlen = cpu_to_le32(areq->cryptlen);
125 cet->t_dlen = cpu_to_le32(areq->cryptlen / 4);
127 sym = ce->variant->op_mode[algt->ce_blockmode];
131 sym |= CE_AES_128BITS;
134 sym |= CE_AES_192BITS;
137 sym |= CE_AES_256BITS;
141 cet->t_sym_ctl = cpu_to_le32(sym);
144 rctx->addr_key = dma_map_single(ce->dev, op->key, op->keylen, DMA_TO_DEVICE);
145 if (dma_mapping_error(ce->dev, rctx->addr_key)) {
146 dev_err(ce->dev, "Cannot DMA MAP KEY\n");
150 cet->t_key = cpu_to_le32(rctx->addr_key);
152 ivsize = crypto_skcipher_ivsize(tfm);
153 if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) {
154 rctx->ivlen = ivsize;
155 rctx->bounce_iv = kzalloc(ivsize, GFP_KERNEL | GFP_DMA);
156 if (!rctx->bounce_iv) {
160 if (rctx->op_dir & CE_DECRYPTION) {
161 rctx->backup_iv = kzalloc(ivsize, GFP_KERNEL);
162 if (!rctx->backup_iv) {
166 offset = areq->cryptlen - ivsize;
167 scatterwalk_map_and_copy(rctx->backup_iv, areq->src,
170 memcpy(rctx->bounce_iv, areq->iv, ivsize);
171 rctx->addr_iv = dma_map_single(ce->dev, rctx->bounce_iv, rctx->ivlen,
173 if (dma_mapping_error(ce->dev, rctx->addr_iv)) {
174 dev_err(ce->dev, "Cannot DMA MAP IV\n");
178 cet->t_iv = cpu_to_le32(rctx->addr_iv);
181 if (areq->src == areq->dst) {
182 nr_sgs = dma_map_sg(ce->dev, areq->src, sg_nents(areq->src),
184 if (nr_sgs <= 0 || nr_sgs > MAX_SG) {
185 dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
191 nr_sgs = dma_map_sg(ce->dev, areq->src, sg_nents(areq->src),
193 if (nr_sgs <= 0 || nr_sgs > MAX_SG) {
194 dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
198 nr_sgd = dma_map_sg(ce->dev, areq->dst, sg_nents(areq->dst),
200 if (nr_sgd <= 0 || nr_sgd > MAX_SG) {
201 dev_err(ce->dev, "Invalid sg number %d\n", nr_sgd);
207 len = areq->cryptlen;
208 for_each_sg(areq->src, sg, nr_sgs, i) {
209 cet->t_src[i].addr = cpu_to_le32(sg_dma_address(sg));
210 todo = min(len, sg_dma_len(sg));
211 cet->t_src[i].len = cpu_to_le32(todo / 4);
212 dev_dbg(ce->dev, "%s total=%u SG(%d %u off=%d) todo=%u\n", __func__,
213 areq->cryptlen, i, cet->t_src[i].len, sg->offset, todo);
217 dev_err(ce->dev, "remaining len %d\n", len);
222 len = areq->cryptlen;
223 for_each_sg(areq->dst, sg, nr_sgd, i) {
224 cet->t_dst[i].addr = cpu_to_le32(sg_dma_address(sg));
225 todo = min(len, sg_dma_len(sg));
226 cet->t_dst[i].len = cpu_to_le32(todo / 4);
227 dev_dbg(ce->dev, "%s total=%u SG(%d %u off=%d) todo=%u\n", __func__,
228 areq->cryptlen, i, cet->t_dst[i].len, sg->offset, todo);
232 dev_err(ce->dev, "remaining len %d\n", len);
237 chan->timeout = areq->cryptlen;
238 rctx->nr_sgs = nr_sgs;
239 rctx->nr_sgd = nr_sgd;
243 if (areq->src == areq->dst) {
244 dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_BIDIRECTIONAL);
247 dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_TO_DEVICE);
248 dma_unmap_sg(ce->dev, areq->dst, nr_sgd, DMA_FROM_DEVICE);
252 if (areq->iv && ivsize > 0) {
254 dma_unmap_single(ce->dev, rctx->addr_iv, rctx->ivlen, DMA_TO_DEVICE);
255 offset = areq->cryptlen - ivsize;
256 if (rctx->op_dir & CE_DECRYPTION) {
257 memcpy(areq->iv, rctx->backup_iv, ivsize);
258 kfree_sensitive(rctx->backup_iv);
260 scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
263 kfree(rctx->bounce_iv);
267 dma_unmap_single(ce->dev, rctx->addr_key, op->keylen, DMA_TO_DEVICE);
273 static int sun8i_ce_cipher_run(struct crypto_engine *engine, void *areq)
275 struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
276 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(breq);
277 struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
278 struct sun8i_ce_dev *ce = op->ce;
279 struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(breq);
283 err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(breq->base.tfm));
285 crypto_finalize_skcipher_request(engine, breq, err);
290 static int sun8i_ce_cipher_unprepare(struct crypto_engine *engine, void *async_req)
292 struct skcipher_request *areq = container_of(async_req, struct skcipher_request, base);
293 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
294 struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
295 struct sun8i_ce_dev *ce = op->ce;
296 struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
297 struct sun8i_ce_flow *chan;
299 unsigned int ivsize, offset;
300 int nr_sgs = rctx->nr_sgs;
301 int nr_sgd = rctx->nr_sgd;
305 chan = &ce->chanlist[flow];
307 ivsize = crypto_skcipher_ivsize(tfm);
309 if (areq->src == areq->dst) {
310 dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_BIDIRECTIONAL);
313 dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_TO_DEVICE);
314 dma_unmap_sg(ce->dev, areq->dst, nr_sgd, DMA_FROM_DEVICE);
317 if (areq->iv && ivsize > 0) {
319 dma_unmap_single(ce->dev, rctx->addr_iv, rctx->ivlen, DMA_TO_DEVICE);
320 offset = areq->cryptlen - ivsize;
321 if (rctx->op_dir & CE_DECRYPTION) {
322 memcpy(areq->iv, rctx->backup_iv, ivsize);
323 kfree_sensitive(rctx->backup_iv);
325 scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
328 kfree(rctx->bounce_iv);
331 dma_unmap_single(ce->dev, rctx->addr_key, op->keylen, DMA_TO_DEVICE);
336 int sun8i_ce_skdecrypt(struct skcipher_request *areq)
338 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
339 struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
340 struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
341 struct crypto_engine *engine;
344 rctx->op_dir = CE_DECRYPTION;
345 if (sun8i_ce_cipher_need_fallback(areq))
346 return sun8i_ce_cipher_fallback(areq);
348 e = sun8i_ce_get_engine_number(op->ce);
350 engine = op->ce->chanlist[e].engine;
352 return crypto_transfer_skcipher_request_to_engine(engine, areq);
355 int sun8i_ce_skencrypt(struct skcipher_request *areq)
357 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
358 struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
359 struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
360 struct crypto_engine *engine;
363 rctx->op_dir = CE_ENCRYPTION;
364 if (sun8i_ce_cipher_need_fallback(areq))
365 return sun8i_ce_cipher_fallback(areq);
367 e = sun8i_ce_get_engine_number(op->ce);
369 engine = op->ce->chanlist[e].engine;
371 return crypto_transfer_skcipher_request_to_engine(engine, areq);
374 int sun8i_ce_cipher_init(struct crypto_tfm *tfm)
376 struct sun8i_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
377 struct sun8i_ce_alg_template *algt;
378 const char *name = crypto_tfm_alg_name(tfm);
379 struct crypto_skcipher *sktfm = __crypto_skcipher_cast(tfm);
380 struct skcipher_alg *alg = crypto_skcipher_alg(sktfm);
383 memset(op, 0, sizeof(struct sun8i_cipher_tfm_ctx));
385 algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher);
388 op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
389 if (IS_ERR(op->fallback_tfm)) {
390 dev_err(op->ce->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
391 name, PTR_ERR(op->fallback_tfm));
392 return PTR_ERR(op->fallback_tfm);
395 sktfm->reqsize = sizeof(struct sun8i_cipher_req_ctx) +
396 crypto_skcipher_reqsize(op->fallback_tfm);
399 dev_info(op->ce->dev, "Fallback for %s is %s\n",
400 crypto_tfm_alg_driver_name(&sktfm->base),
401 crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm)));
403 op->enginectx.op.do_one_request = sun8i_ce_cipher_run;
404 op->enginectx.op.prepare_request = sun8i_ce_cipher_prepare;
405 op->enginectx.op.unprepare_request = sun8i_ce_cipher_unprepare;
407 err = pm_runtime_get_sync(op->ce->dev);
413 pm_runtime_put_noidle(op->ce->dev);
414 crypto_free_skcipher(op->fallback_tfm);
418 void sun8i_ce_cipher_exit(struct crypto_tfm *tfm)
420 struct sun8i_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
422 kfree_sensitive(op->key);
423 crypto_free_skcipher(op->fallback_tfm);
424 pm_runtime_put_sync_suspend(op->ce->dev);
427 int sun8i_ce_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
430 struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
431 struct sun8i_ce_dev *ce = op->ce;
441 dev_dbg(ce->dev, "ERROR: Invalid keylen %u\n", keylen);
444 kfree_sensitive(op->key);
446 op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
450 crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
451 crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
453 return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
456 int sun8i_ce_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
459 struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
462 err = verify_skcipher_des3_key(tfm, key);
466 kfree_sensitive(op->key);
468 op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
472 crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
473 crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
475 return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);