2 * sun4i-ss-cipher.c - hardware cryptographic accelerator for Allwinner A20 SoC
4 * Copyright (C) 2013-2015 Corentin LABBE <clabbe.montjoie@gmail.com>
6 * This file add support for AES cipher with 128,192,256 bits
7 * keysize in CBC and ECB mode.
8 * Add support also for DES and 3DES in CBC and ECB mode.
10 * You could find the datasheet in Documentation/arm/sunxi/README
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
19 static int sun4i_ss_opti_poll(struct skcipher_request *areq)
21 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
22 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
23 struct sun4i_ss_ctx *ss = op->ss;
24 unsigned int ivsize = crypto_skcipher_ivsize(tfm);
25 struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
27 /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
28 u32 rx_cnt = SS_RX_DEFAULT;
34 unsigned int ileft = areq->cryptlen;
35 unsigned int oleft = areq->cryptlen;
37 unsigned long pi = 0, po = 0; /* progress for in and out */
39 struct sg_mapping_iter mi, mo;
40 unsigned int oi, oo; /* offset for in and out */
47 dev_err_ratelimited(ss->dev, "ERROR: Empty IV\n");
51 if (!areq->src || !areq->dst) {
52 dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
56 spin_lock_irqsave(&ss->slock, flags);
58 for (i = 0; i < op->keylen / 4; i++)
59 writesl(ss->base + SS_KEY0 + i * 4, &op->key[i], 1);
62 for (i = 0; i < 4 && i < ivsize / 4; i++) {
63 v = *(u32 *)(areq->iv + i * 4);
64 writesl(ss->base + SS_IV0 + i * 4, &v, 1);
67 writel(mode, ss->base + SS_CTL);
70 ileft = areq->cryptlen / 4;
71 oleft = areq->cryptlen / 4;
76 sg_miter_start(&mi, areq->src, sg_nents(areq->src),
77 SG_MITER_FROM_SG | SG_MITER_ATOMIC);
79 sg_miter_skip(&mi, pi);
80 miter_err = sg_miter_next(&mi);
81 if (!miter_err || !mi.addr) {
82 dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
86 todo = min(rx_cnt, ileft);
87 todo = min_t(size_t, todo, (mi.length - oi) / 4);
90 writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo);
93 if (oi == mi.length) {
100 spaces = readl(ss->base + SS_FCSR);
101 rx_cnt = SS_RXFIFO_SPACES(spaces);
102 tx_cnt = SS_TXFIFO_SPACES(spaces);
104 sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
105 SG_MITER_TO_SG | SG_MITER_ATOMIC);
107 sg_miter_skip(&mo, po);
108 miter_err = sg_miter_next(&mo);
109 if (!miter_err || !mo.addr) {
110 dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
114 todo = min(tx_cnt, oleft);
115 todo = min_t(size_t, todo, (mo.length - oo) / 4);
118 readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
121 if (oo == mo.length) {
129 for (i = 0; i < 4 && i < ivsize / 4; i++) {
130 v = readl(ss->base + SS_IV0 + i * 4);
131 *(u32 *)(areq->iv + i * 4) = v;
136 writel(0, ss->base + SS_CTL);
137 spin_unlock_irqrestore(&ss->slock, flags);
141 /* Generic function that support SG with size not multiple of 4 */
142 static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
144 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
145 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
146 struct sun4i_ss_ctx *ss = op->ss;
148 struct scatterlist *in_sg = areq->src;
149 struct scatterlist *out_sg = areq->dst;
150 unsigned int ivsize = crypto_skcipher_ivsize(tfm);
151 struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
152 u32 mode = ctx->mode;
153 /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
154 u32 rx_cnt = SS_RX_DEFAULT;
160 unsigned int ileft = areq->cryptlen;
161 unsigned int oleft = areq->cryptlen;
163 struct sg_mapping_iter mi, mo;
164 unsigned long pi = 0, po = 0; /* progress for in and out */
166 unsigned int oi, oo; /* offset for in and out */
167 char buf[4 * SS_RX_MAX];/* buffer for linearize SG src */
168 char bufo[4 * SS_TX_MAX]; /* buffer for linearize SG dst */
169 unsigned int ob = 0; /* offset in buf */
170 unsigned int obo = 0; /* offset in bufo*/
171 unsigned int obl = 0; /* length of data in bufo */
178 dev_err_ratelimited(ss->dev, "ERROR: Empty IV\n");
182 if (!areq->src || !areq->dst) {
183 dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
188 * if we have only SGs with size multiple of 4,
189 * we can use the SS optimized function
191 while (in_sg && no_chunk == 1) {
192 if ((in_sg->length | in_sg->offset) & 3u)
194 in_sg = sg_next(in_sg);
196 while (out_sg && no_chunk == 1) {
197 if ((out_sg->length | out_sg->offset) & 3u)
199 out_sg = sg_next(out_sg);
203 return sun4i_ss_opti_poll(areq);
205 spin_lock_irqsave(&ss->slock, flags);
207 for (i = 0; i < op->keylen / 4; i++)
208 writesl(ss->base + SS_KEY0 + i * 4, &op->key[i], 1);
211 for (i = 0; i < 4 && i < ivsize / 4; i++) {
212 v = *(u32 *)(areq->iv + i * 4);
213 writesl(ss->base + SS_IV0 + i * 4, &v, 1);
216 writel(mode, ss->base + SS_CTL);
218 ileft = areq->cryptlen;
219 oleft = areq->cryptlen;
225 sg_miter_start(&mi, areq->src, sg_nents(areq->src),
226 SG_MITER_FROM_SG | SG_MITER_ATOMIC);
228 sg_miter_skip(&mi, pi);
229 miter_err = sg_miter_next(&mi);
230 if (!miter_err || !mi.addr) {
231 dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
236 * todo is the number of consecutive 4byte word that we
237 * can read from current SG
239 todo = min(rx_cnt, ileft / 4);
240 todo = min_t(size_t, todo, (mi.length - oi) / 4);
242 writesl(ss->base + SS_RXFIFO, mi.addr + oi,
248 * not enough consecutive bytes, so we need to
249 * linearize in buf. todo is in bytes
250 * After that copy, if we have a multiple of 4
251 * we need to be able to write all buf in one
252 * pass, so it is why we min() with rx_cnt
254 todo = min(rx_cnt * 4 - ob, ileft);
255 todo = min_t(size_t, todo, mi.length - oi);
256 memcpy(buf + ob, mi.addr + oi, todo);
261 writesl(ss->base + SS_RXFIFO, buf,
266 if (oi == mi.length) {
273 spaces = readl(ss->base + SS_FCSR);
274 rx_cnt = SS_RXFIFO_SPACES(spaces);
275 tx_cnt = SS_TXFIFO_SPACES(spaces);
279 sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
280 SG_MITER_TO_SG | SG_MITER_ATOMIC);
282 sg_miter_skip(&mo, po);
283 miter_err = sg_miter_next(&mo);
284 if (!miter_err || !mo.addr) {
285 dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
289 /* todo in 4bytes word */
290 todo = min(tx_cnt, oleft / 4);
291 todo = min_t(size_t, todo, (mo.length - oo) / 4);
294 readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
297 if (oo == mo.length) {
303 * read obl bytes in bufo, we read at maximum for
304 * emptying the device
306 readsl(ss->base + SS_TXFIFO, bufo, tx_cnt);
311 * how many bytes we can copy ?
312 * no more than remaining SG size
313 * no more than remaining buffer
314 * no need to test against oleft
317 mo.length - oo, obl - obo);
318 memcpy(mo.addr + oo, bufo + obo, todo);
322 if (oo == mo.length) {
328 /* bufo must be fully used here */
333 for (i = 0; i < 4 && i < ivsize / 4; i++) {
334 v = readl(ss->base + SS_IV0 + i * 4);
335 *(u32 *)(areq->iv + i * 4) = v;
340 writel(0, ss->base + SS_CTL);
341 spin_unlock_irqrestore(&ss->slock, flags);
347 int sun4i_ss_cbc_aes_encrypt(struct skcipher_request *areq)
349 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
350 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
351 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
353 rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
355 return sun4i_ss_cipher_poll(areq);
358 int sun4i_ss_cbc_aes_decrypt(struct skcipher_request *areq)
360 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
361 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
362 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
364 rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
366 return sun4i_ss_cipher_poll(areq);
370 int sun4i_ss_ecb_aes_encrypt(struct skcipher_request *areq)
372 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
373 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
374 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
376 rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
378 return sun4i_ss_cipher_poll(areq);
381 int sun4i_ss_ecb_aes_decrypt(struct skcipher_request *areq)
383 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
384 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
385 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
387 rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
389 return sun4i_ss_cipher_poll(areq);
393 int sun4i_ss_cbc_des_encrypt(struct skcipher_request *areq)
395 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
396 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
397 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
399 rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
401 return sun4i_ss_cipher_poll(areq);
404 int sun4i_ss_cbc_des_decrypt(struct skcipher_request *areq)
406 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
407 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
408 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
410 rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
412 return sun4i_ss_cipher_poll(areq);
416 int sun4i_ss_ecb_des_encrypt(struct skcipher_request *areq)
418 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
419 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
420 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
422 rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
424 return sun4i_ss_cipher_poll(areq);
427 int sun4i_ss_ecb_des_decrypt(struct skcipher_request *areq)
429 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
430 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
431 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
433 rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
435 return sun4i_ss_cipher_poll(areq);
439 int sun4i_ss_cbc_des3_encrypt(struct skcipher_request *areq)
441 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
442 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
443 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
445 rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
447 return sun4i_ss_cipher_poll(areq);
450 int sun4i_ss_cbc_des3_decrypt(struct skcipher_request *areq)
452 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
453 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
454 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
456 rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
458 return sun4i_ss_cipher_poll(areq);
462 int sun4i_ss_ecb_des3_encrypt(struct skcipher_request *areq)
464 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
465 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
466 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
468 rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
470 return sun4i_ss_cipher_poll(areq);
473 int sun4i_ss_ecb_des3_decrypt(struct skcipher_request *areq)
475 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
476 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
477 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
479 rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
481 return sun4i_ss_cipher_poll(areq);
484 int sun4i_ss_cipher_init(struct crypto_tfm *tfm)
486 struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
487 struct sun4i_ss_alg_template *algt;
489 memset(op, 0, sizeof(struct sun4i_tfm_ctx));
491 algt = container_of(tfm->__crt_alg, struct sun4i_ss_alg_template,
495 crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
496 sizeof(struct sun4i_cipher_req_ctx));
501 /* check and set the AES key, prepare the mode to be used */
502 int sun4i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
505 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
506 struct sun4i_ss_ctx *ss = op->ss;
510 op->keymode = SS_AES_128BITS;
513 op->keymode = SS_AES_192BITS;
516 op->keymode = SS_AES_256BITS;
519 dev_err(ss->dev, "ERROR: Invalid keylen %u\n", keylen);
520 crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
524 memcpy(op->key, key, keylen);
528 /* check and set the DES key, prepare the mode to be used */
529 int sun4i_ss_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
532 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
533 struct sun4i_ss_ctx *ss = op->ss;
535 u32 tmp[DES_EXPKEY_WORDS];
538 if (unlikely(keylen != DES_KEY_SIZE)) {
539 dev_err(ss->dev, "Invalid keylen %u\n", keylen);
540 crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
544 flags = crypto_skcipher_get_flags(tfm);
546 ret = des_ekey(tmp, key);
547 if (unlikely(!ret) && (flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
548 crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY);
549 dev_dbg(ss->dev, "Weak key %u\n", keylen);
554 memcpy(op->key, key, keylen);
558 /* check and set the 3DES key, prepare the mode to be used */
559 int sun4i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
562 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
563 struct sun4i_ss_ctx *ss = op->ss;
565 if (unlikely(keylen != 3 * DES_KEY_SIZE)) {
566 dev_err(ss->dev, "Invalid keylen %u\n", keylen);
567 crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
571 memcpy(op->key, key, keylen);