GNU Linux-libre 4.19.245-gnu1
[releases.git] / drivers / crypto / hisilicon / sec / sec_algs.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2016-2017 Hisilicon Limited. */
3 #include <linux/crypto.h>
4 #include <linux/dma-mapping.h>
5 #include <linux/dmapool.h>
6 #include <linux/module.h>
7 #include <linux/mutex.h>
8 #include <linux/slab.h>
9
10 #include <crypto/aes.h>
11 #include <crypto/algapi.h>
12 #include <crypto/des.h>
13 #include <crypto/skcipher.h>
14 #include <crypto/xts.h>
15 #include <crypto/internal/skcipher.h>
16
17 #include "sec_drv.h"
18
19 #define SEC_MAX_CIPHER_KEY              64
20 #define SEC_REQ_LIMIT SZ_32M
21
22 struct sec_c_alg_cfg {
23         unsigned c_alg          : 3;
24         unsigned c_mode         : 3;
25         unsigned key_len        : 2;
26         unsigned c_width        : 2;
27 };
28
29 static const struct sec_c_alg_cfg sec_c_alg_cfgs[] =  {
30         [SEC_C_DES_ECB_64] = {
31                 .c_alg = SEC_C_ALG_DES,
32                 .c_mode = SEC_C_MODE_ECB,
33                 .key_len = SEC_KEY_LEN_DES,
34         },
35         [SEC_C_DES_CBC_64] = {
36                 .c_alg = SEC_C_ALG_DES,
37                 .c_mode = SEC_C_MODE_CBC,
38                 .key_len = SEC_KEY_LEN_DES,
39         },
40         [SEC_C_3DES_ECB_192_3KEY] = {
41                 .c_alg = SEC_C_ALG_3DES,
42                 .c_mode = SEC_C_MODE_ECB,
43                 .key_len = SEC_KEY_LEN_3DES_3_KEY,
44         },
45         [SEC_C_3DES_ECB_192_2KEY] = {
46                 .c_alg = SEC_C_ALG_3DES,
47                 .c_mode = SEC_C_MODE_ECB,
48                 .key_len = SEC_KEY_LEN_3DES_2_KEY,
49         },
50         [SEC_C_3DES_CBC_192_3KEY] = {
51                 .c_alg = SEC_C_ALG_3DES,
52                 .c_mode = SEC_C_MODE_CBC,
53                 .key_len = SEC_KEY_LEN_3DES_3_KEY,
54         },
55         [SEC_C_3DES_CBC_192_2KEY] = {
56                 .c_alg = SEC_C_ALG_3DES,
57                 .c_mode = SEC_C_MODE_CBC,
58                 .key_len = SEC_KEY_LEN_3DES_2_KEY,
59         },
60         [SEC_C_AES_ECB_128] = {
61                 .c_alg = SEC_C_ALG_AES,
62                 .c_mode = SEC_C_MODE_ECB,
63                 .key_len = SEC_KEY_LEN_AES_128,
64         },
65         [SEC_C_AES_ECB_192] = {
66                 .c_alg = SEC_C_ALG_AES,
67                 .c_mode = SEC_C_MODE_ECB,
68                 .key_len = SEC_KEY_LEN_AES_192,
69         },
70         [SEC_C_AES_ECB_256] = {
71                 .c_alg = SEC_C_ALG_AES,
72                 .c_mode = SEC_C_MODE_ECB,
73                 .key_len = SEC_KEY_LEN_AES_256,
74         },
75         [SEC_C_AES_CBC_128] = {
76                 .c_alg = SEC_C_ALG_AES,
77                 .c_mode = SEC_C_MODE_CBC,
78                 .key_len = SEC_KEY_LEN_AES_128,
79         },
80         [SEC_C_AES_CBC_192] = {
81                 .c_alg = SEC_C_ALG_AES,
82                 .c_mode = SEC_C_MODE_CBC,
83                 .key_len = SEC_KEY_LEN_AES_192,
84         },
85         [SEC_C_AES_CBC_256] = {
86                 .c_alg = SEC_C_ALG_AES,
87                 .c_mode = SEC_C_MODE_CBC,
88                 .key_len = SEC_KEY_LEN_AES_256,
89         },
90         [SEC_C_AES_CTR_128] = {
91                 .c_alg = SEC_C_ALG_AES,
92                 .c_mode = SEC_C_MODE_CTR,
93                 .key_len = SEC_KEY_LEN_AES_128,
94         },
95         [SEC_C_AES_CTR_192] = {
96                 .c_alg = SEC_C_ALG_AES,
97                 .c_mode = SEC_C_MODE_CTR,
98                 .key_len = SEC_KEY_LEN_AES_192,
99         },
100         [SEC_C_AES_CTR_256] = {
101                 .c_alg = SEC_C_ALG_AES,
102                 .c_mode = SEC_C_MODE_CTR,
103                 .key_len = SEC_KEY_LEN_AES_256,
104         },
105         [SEC_C_AES_XTS_128] = {
106                 .c_alg = SEC_C_ALG_AES,
107                 .c_mode = SEC_C_MODE_XTS,
108                 .key_len = SEC_KEY_LEN_AES_128,
109         },
110         [SEC_C_AES_XTS_256] = {
111                 .c_alg = SEC_C_ALG_AES,
112                 .c_mode = SEC_C_MODE_XTS,
113                 .key_len = SEC_KEY_LEN_AES_256,
114         },
115         [SEC_C_NULL] = {
116         },
117 };
118
119 /*
120  * Mutex used to ensure safe operation of reference count of
121  * alg providers
122  */
123 static DEFINE_MUTEX(algs_lock);
124 static unsigned int active_devs;
125
126 static void sec_alg_skcipher_init_template(struct sec_alg_tfm_ctx *ctx,
127                                            struct sec_bd_info *req,
128                                            enum sec_cipher_alg alg)
129 {
130         const struct sec_c_alg_cfg *cfg = &sec_c_alg_cfgs[alg];
131
132         memset(req, 0, sizeof(*req));
133         req->w0 |= cfg->c_mode << SEC_BD_W0_C_MODE_S;
134         req->w1 |= cfg->c_alg << SEC_BD_W1_C_ALG_S;
135         req->w3 |= cfg->key_len << SEC_BD_W3_C_KEY_LEN_S;
136         req->w0 |= cfg->c_width << SEC_BD_W0_C_WIDTH_S;
137
138         req->cipher_key_addr_lo = lower_32_bits(ctx->pkey);
139         req->cipher_key_addr_hi = upper_32_bits(ctx->pkey);
140 }
141
142 static void sec_alg_skcipher_init_context(struct crypto_skcipher *atfm,
143                                           const u8 *key,
144                                           unsigned int keylen,
145                                           enum sec_cipher_alg alg)
146 {
147         struct crypto_tfm *tfm = crypto_skcipher_tfm(atfm);
148         struct sec_alg_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
149
150         ctx->cipher_alg = alg;
151         memcpy(ctx->key, key, keylen);
152         sec_alg_skcipher_init_template(ctx, &ctx->req_template,
153                                        ctx->cipher_alg);
154 }
155
156 static void sec_free_hw_sgl(struct sec_hw_sgl *hw_sgl,
157                             dma_addr_t psec_sgl, struct sec_dev_info *info)
158 {
159         struct sec_hw_sgl *sgl_current, *sgl_next;
160         dma_addr_t sgl_next_dma;
161
162         sgl_current = hw_sgl;
163         while (sgl_current) {
164                 sgl_next = sgl_current->next;
165                 sgl_next_dma = sgl_current->next_sgl;
166
167                 dma_pool_free(info->hw_sgl_pool, sgl_current, psec_sgl);
168
169                 sgl_current = sgl_next;
170                 psec_sgl = sgl_next_dma;
171         }
172 }
173
174 static int sec_alloc_and_fill_hw_sgl(struct sec_hw_sgl **sec_sgl,
175                                      dma_addr_t *psec_sgl,
176                                      struct scatterlist *sgl,
177                                      int count,
178                                      struct sec_dev_info *info,
179                                      gfp_t gfp)
180 {
181         struct sec_hw_sgl *sgl_current = NULL;
182         struct sec_hw_sgl *sgl_next;
183         dma_addr_t sgl_next_dma;
184         struct scatterlist *sg;
185         int ret, sge_index, i;
186
187         if (!count)
188                 return -EINVAL;
189
190         for_each_sg(sgl, sg, count, i) {
191                 sge_index = i % SEC_MAX_SGE_NUM;
192                 if (sge_index == 0) {
193                         sgl_next = dma_pool_zalloc(info->hw_sgl_pool,
194                                                    gfp, &sgl_next_dma);
195                         if (!sgl_next) {
196                                 ret = -ENOMEM;
197                                 goto err_free_hw_sgls;
198                         }
199
200                         if (!sgl_current) { /* First one */
201                                 *psec_sgl = sgl_next_dma;
202                                 *sec_sgl = sgl_next;
203                         } else { /* Chained */
204                                 sgl_current->entry_sum_in_sgl = SEC_MAX_SGE_NUM;
205                                 sgl_current->next_sgl = sgl_next_dma;
206                                 sgl_current->next = sgl_next;
207                         }
208                         sgl_current = sgl_next;
209                 }
210                 sgl_current->sge_entries[sge_index].buf = sg_dma_address(sg);
211                 sgl_current->sge_entries[sge_index].len = sg_dma_len(sg);
212                 sgl_current->data_bytes_in_sgl += sg_dma_len(sg);
213         }
214         sgl_current->entry_sum_in_sgl = count % SEC_MAX_SGE_NUM;
215         sgl_current->next_sgl = 0;
216         (*sec_sgl)->entry_sum_in_chain = count;
217
218         return 0;
219
220 err_free_hw_sgls:
221         sec_free_hw_sgl(*sec_sgl, *psec_sgl, info);
222         *psec_sgl = 0;
223
224         return ret;
225 }
226
227 static int sec_alg_skcipher_setkey(struct crypto_skcipher *tfm,
228                                    const u8 *key, unsigned int keylen,
229                                    enum sec_cipher_alg alg)
230 {
231         struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
232         struct device *dev = ctx->queue->dev_info->dev;
233
234         mutex_lock(&ctx->lock);
235         if (ctx->key) {
236                 /* rekeying */
237                 memset(ctx->key, 0, SEC_MAX_CIPHER_KEY);
238         } else {
239                 /* new key */
240                 ctx->key = dma_zalloc_coherent(dev, SEC_MAX_CIPHER_KEY,
241                                                &ctx->pkey, GFP_KERNEL);
242                 if (!ctx->key) {
243                         mutex_unlock(&ctx->lock);
244                         return -ENOMEM;
245                 }
246         }
247         mutex_unlock(&ctx->lock);
248         sec_alg_skcipher_init_context(tfm, key, keylen, alg);
249
250         return 0;
251 }
252
253 static int sec_alg_skcipher_setkey_aes_ecb(struct crypto_skcipher *tfm,
254                                            const u8 *key, unsigned int keylen)
255 {
256         enum sec_cipher_alg alg;
257
258         switch (keylen) {
259         case AES_KEYSIZE_128:
260                 alg = SEC_C_AES_ECB_128;
261                 break;
262         case AES_KEYSIZE_192:
263                 alg = SEC_C_AES_ECB_192;
264                 break;
265         case AES_KEYSIZE_256:
266                 alg = SEC_C_AES_ECB_256;
267                 break;
268         default:
269                 return -EINVAL;
270         }
271
272         return sec_alg_skcipher_setkey(tfm, key, keylen, alg);
273 }
274
275 static int sec_alg_skcipher_setkey_aes_cbc(struct crypto_skcipher *tfm,
276                                            const u8 *key, unsigned int keylen)
277 {
278         enum sec_cipher_alg alg;
279
280         switch (keylen) {
281         case AES_KEYSIZE_128:
282                 alg = SEC_C_AES_CBC_128;
283                 break;
284         case AES_KEYSIZE_192:
285                 alg = SEC_C_AES_CBC_192;
286                 break;
287         case AES_KEYSIZE_256:
288                 alg = SEC_C_AES_CBC_256;
289                 break;
290         default:
291                 return -EINVAL;
292         }
293
294         return sec_alg_skcipher_setkey(tfm, key, keylen, alg);
295 }
296
297 static int sec_alg_skcipher_setkey_aes_ctr(struct crypto_skcipher *tfm,
298                                            const u8 *key, unsigned int keylen)
299 {
300         enum sec_cipher_alg alg;
301
302         switch (keylen) {
303         case AES_KEYSIZE_128:
304                 alg = SEC_C_AES_CTR_128;
305                 break;
306         case AES_KEYSIZE_192:
307                 alg = SEC_C_AES_CTR_192;
308                 break;
309         case AES_KEYSIZE_256:
310                 alg = SEC_C_AES_CTR_256;
311                 break;
312         default:
313                 return -EINVAL;
314         }
315
316         return sec_alg_skcipher_setkey(tfm, key, keylen, alg);
317 }
318
319 static int sec_alg_skcipher_setkey_aes_xts(struct crypto_skcipher *tfm,
320                                            const u8 *key, unsigned int keylen)
321 {
322         enum sec_cipher_alg alg;
323         int ret;
324
325         ret = xts_verify_key(tfm, key, keylen);
326         if (ret)
327                 return ret;
328
329         switch (keylen) {
330         case AES_KEYSIZE_128 * 2:
331                 alg = SEC_C_AES_XTS_128;
332                 break;
333         case AES_KEYSIZE_256 * 2:
334                 alg = SEC_C_AES_XTS_256;
335                 break;
336         default:
337                 return -EINVAL;
338         }
339
340         return sec_alg_skcipher_setkey(tfm, key, keylen, alg);
341 }
342
343 static int sec_alg_skcipher_setkey_des_ecb(struct crypto_skcipher *tfm,
344                                            const u8 *key, unsigned int keylen)
345 {
346         if (keylen != DES_KEY_SIZE)
347                 return -EINVAL;
348
349         return sec_alg_skcipher_setkey(tfm, key, keylen, SEC_C_DES_ECB_64);
350 }
351
352 static int sec_alg_skcipher_setkey_des_cbc(struct crypto_skcipher *tfm,
353                                            const u8 *key, unsigned int keylen)
354 {
355         if (keylen != DES_KEY_SIZE)
356                 return -EINVAL;
357
358         return sec_alg_skcipher_setkey(tfm, key, keylen, SEC_C_DES_CBC_64);
359 }
360
361 static int sec_alg_skcipher_setkey_3des_ecb(struct crypto_skcipher *tfm,
362                                             const u8 *key, unsigned int keylen)
363 {
364         if (keylen != DES_KEY_SIZE * 3)
365                 return -EINVAL;
366
367         return sec_alg_skcipher_setkey(tfm, key, keylen,
368                                        SEC_C_3DES_ECB_192_3KEY);
369 }
370
371 static int sec_alg_skcipher_setkey_3des_cbc(struct crypto_skcipher *tfm,
372                                             const u8 *key, unsigned int keylen)
373 {
374         if (keylen != DES3_EDE_KEY_SIZE)
375                 return -EINVAL;
376
377         return sec_alg_skcipher_setkey(tfm, key, keylen,
378                                        SEC_C_3DES_CBC_192_3KEY);
379 }
380
381 static void sec_alg_free_el(struct sec_request_el *el,
382                             struct sec_dev_info *info)
383 {
384         sec_free_hw_sgl(el->out, el->dma_out, info);
385         sec_free_hw_sgl(el->in, el->dma_in, info);
386         kfree(el->sgl_in);
387         kfree(el->sgl_out);
388         kfree(el);
389 }
390
391 /* queuelock must be held */
392 static int sec_send_request(struct sec_request *sec_req, struct sec_queue *queue)
393 {
394         struct sec_request_el *el, *temp;
395         int ret = 0;
396
397         mutex_lock(&sec_req->lock);
398         list_for_each_entry_safe(el, temp, &sec_req->elements, head) {
399                 /*
400                  * Add to hardware queue only under following circumstances
401                  * 1) Software and hardware queue empty so no chain dependencies
402                  * 2) No dependencies as new IV - (check software queue empty
403                  *    to maintain order)
404                  * 3) No dependencies because the mode does no chaining.
405                  *
406                  * In other cases first insert onto the software queue which
407                  * is then emptied as requests complete
408                  */
409                 if (!queue->havesoftqueue ||
410                     (kfifo_is_empty(&queue->softqueue) &&
411                      sec_queue_empty(queue))) {
412                         ret = sec_queue_send(queue, &el->req, sec_req);
413                         if (ret == -EAGAIN) {
414                                 /* Wait unti we can send then try again */
415                                 /* DEAD if here - should not happen */
416                                 ret = -EBUSY;
417                                 goto err_unlock;
418                         }
419                 } else {
420                         kfifo_put(&queue->softqueue, el);
421                 }
422         }
423 err_unlock:
424         mutex_unlock(&sec_req->lock);
425
426         return ret;
427 }
428
429 static void sec_skcipher_alg_callback(struct sec_bd_info *sec_resp,
430                                       struct crypto_async_request *req_base)
431 {
432         struct skcipher_request *skreq = container_of(req_base,
433                                                       struct skcipher_request,
434                                                       base);
435         struct sec_request *sec_req = skcipher_request_ctx(skreq);
436         struct sec_request *backlog_req;
437         struct sec_request_el *sec_req_el, *nextrequest;
438         struct sec_alg_tfm_ctx *ctx = sec_req->tfm_ctx;
439         struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(skreq);
440         struct device *dev = ctx->queue->dev_info->dev;
441         int icv_or_skey_en, ret;
442         bool done;
443
444         sec_req_el = list_first_entry(&sec_req->elements, struct sec_request_el,
445                                       head);
446         icv_or_skey_en = (sec_resp->w0 & SEC_BD_W0_ICV_OR_SKEY_EN_M) >>
447                 SEC_BD_W0_ICV_OR_SKEY_EN_S;
448         if (sec_resp->w1 & SEC_BD_W1_BD_INVALID || icv_or_skey_en == 3) {
449                 dev_err(dev, "Got an invalid answer %lu %d\n",
450                         sec_resp->w1 & SEC_BD_W1_BD_INVALID,
451                         icv_or_skey_en);
452                 sec_req->err = -EINVAL;
453                 /*
454                  * We need to muddle on to avoid getting stuck with elements
455                  * on the queue. Error will be reported so requester so
456                  * it should be able to handle appropriately.
457                  */
458         }
459
460         mutex_lock(&ctx->queue->queuelock);
461         /* Put the IV in place for chained cases */
462         switch (ctx->cipher_alg) {
463         case SEC_C_AES_CBC_128:
464         case SEC_C_AES_CBC_192:
465         case SEC_C_AES_CBC_256:
466                 if (sec_req_el->req.w0 & SEC_BD_W0_DE)
467                         sg_pcopy_to_buffer(sec_req_el->sgl_out,
468                                            sg_nents(sec_req_el->sgl_out),
469                                            skreq->iv,
470                                            crypto_skcipher_ivsize(atfm),
471                                            sec_req_el->el_length -
472                                            crypto_skcipher_ivsize(atfm));
473                 else
474                         sg_pcopy_to_buffer(sec_req_el->sgl_in,
475                                            sg_nents(sec_req_el->sgl_in),
476                                            skreq->iv,
477                                            crypto_skcipher_ivsize(atfm),
478                                            sec_req_el->el_length -
479                                            crypto_skcipher_ivsize(atfm));
480                 /* No need to sync to the device as coherent DMA */
481                 break;
482         case SEC_C_AES_CTR_128:
483         case SEC_C_AES_CTR_192:
484         case SEC_C_AES_CTR_256:
485                 crypto_inc(skreq->iv, 16);
486                 break;
487         default:
488                 /* Do not update */
489                 break;
490         }
491
492         if (ctx->queue->havesoftqueue &&
493             !kfifo_is_empty(&ctx->queue->softqueue) &&
494             sec_queue_empty(ctx->queue)) {
495                 ret = kfifo_get(&ctx->queue->softqueue, &nextrequest);
496                 if (ret <= 0)
497                         dev_err(dev,
498                                 "Error getting next element from kfifo %d\n",
499                                 ret);
500                 else
501                         /* We know there is space so this cannot fail */
502                         sec_queue_send(ctx->queue, &nextrequest->req,
503                                        nextrequest->sec_req);
504         } else if (!list_empty(&ctx->backlog)) {
505                 /* Need to verify there is room first */
506                 backlog_req = list_first_entry(&ctx->backlog,
507                                                typeof(*backlog_req),
508                                                backlog_head);
509                 if (sec_queue_can_enqueue(ctx->queue,
510                     backlog_req->num_elements) ||
511                     (ctx->queue->havesoftqueue &&
512                      kfifo_avail(&ctx->queue->softqueue) >
513                      backlog_req->num_elements)) {
514                         sec_send_request(backlog_req, ctx->queue);
515                         backlog_req->req_base->complete(backlog_req->req_base,
516                                                         -EINPROGRESS);
517                         list_del(&backlog_req->backlog_head);
518                 }
519         }
520         mutex_unlock(&ctx->queue->queuelock);
521
522         mutex_lock(&sec_req->lock);
523         list_del(&sec_req_el->head);
524         mutex_unlock(&sec_req->lock);
525         sec_alg_free_el(sec_req_el, ctx->queue->dev_info);
526
527         /*
528          * Request is done.
529          * The dance is needed as the lock is freed in the completion
530          */
531         mutex_lock(&sec_req->lock);
532         done = list_empty(&sec_req->elements);
533         mutex_unlock(&sec_req->lock);
534         if (done) {
535                 if (crypto_skcipher_ivsize(atfm)) {
536                         dma_unmap_single(dev, sec_req->dma_iv,
537                                          crypto_skcipher_ivsize(atfm),
538                                          DMA_TO_DEVICE);
539                 }
540                 dma_unmap_sg(dev, skreq->src, sec_req->len_in,
541                              DMA_BIDIRECTIONAL);
542                 if (skreq->src != skreq->dst)
543                         dma_unmap_sg(dev, skreq->dst, sec_req->len_out,
544                                      DMA_BIDIRECTIONAL);
545                 skreq->base.complete(&skreq->base, sec_req->err);
546         }
547 }
548
549 void sec_alg_callback(struct sec_bd_info *resp, void *shadow)
550 {
551         struct sec_request *sec_req = shadow;
552
553         sec_req->cb(resp, sec_req->req_base);
554 }
555
556 static int sec_alg_alloc_and_calc_split_sizes(int length, size_t **split_sizes,
557                                               int *steps, gfp_t gfp)
558 {
559         size_t *sizes;
560         int i;
561
562         /* Split into suitable sized blocks */
563         *steps = roundup(length, SEC_REQ_LIMIT) / SEC_REQ_LIMIT;
564         sizes = kcalloc(*steps, sizeof(*sizes), gfp);
565         if (!sizes)
566                 return -ENOMEM;
567
568         for (i = 0; i < *steps - 1; i++)
569                 sizes[i] = SEC_REQ_LIMIT;
570         sizes[*steps - 1] = length - SEC_REQ_LIMIT * (*steps - 1);
571         *split_sizes = sizes;
572
573         return 0;
574 }
575
576 static int sec_map_and_split_sg(struct scatterlist *sgl, size_t *split_sizes,
577                                 int steps, struct scatterlist ***splits,
578                                 int **splits_nents,
579                                 int sgl_len_in,
580                                 struct device *dev, gfp_t gfp)
581 {
582         int ret, count;
583
584         count = dma_map_sg(dev, sgl, sgl_len_in, DMA_BIDIRECTIONAL);
585         if (!count)
586                 return -EINVAL;
587
588         *splits = kcalloc(steps, sizeof(struct scatterlist *), gfp);
589         if (!*splits) {
590                 ret = -ENOMEM;
591                 goto err_unmap_sg;
592         }
593         *splits_nents = kcalloc(steps, sizeof(int), gfp);
594         if (!*splits_nents) {
595                 ret = -ENOMEM;
596                 goto err_free_splits;
597         }
598
599         /* output the scatter list before and after this */
600         ret = sg_split(sgl, count, 0, steps, split_sizes,
601                        *splits, *splits_nents, gfp);
602         if (ret) {
603                 ret = -ENOMEM;
604                 goto err_free_splits_nents;
605         }
606
607         return 0;
608
609 err_free_splits_nents:
610         kfree(*splits_nents);
611 err_free_splits:
612         kfree(*splits);
613 err_unmap_sg:
614         dma_unmap_sg(dev, sgl, sgl_len_in, DMA_BIDIRECTIONAL);
615
616         return ret;
617 }
618
619 /*
620  * Reverses the sec_map_and_split_sg call for messages not yet added to
621  * the queues.
622  */
623 static void sec_unmap_sg_on_err(struct scatterlist *sgl, int steps,
624                                 struct scatterlist **splits, int *splits_nents,
625                                 int sgl_len_in, struct device *dev)
626 {
627         int i;
628
629         for (i = 0; i < steps; i++)
630                 kfree(splits[i]);
631         kfree(splits_nents);
632         kfree(splits);
633
634         dma_unmap_sg(dev, sgl, sgl_len_in, DMA_BIDIRECTIONAL);
635 }
636
637 static struct sec_request_el
638 *sec_alg_alloc_and_fill_el(struct sec_bd_info *template, int encrypt,
639                            int el_size, bool different_dest,
640                            struct scatterlist *sgl_in, int n_ents_in,
641                            struct scatterlist *sgl_out, int n_ents_out,
642                            struct sec_dev_info *info, gfp_t gfp)
643 {
644         struct sec_request_el *el;
645         struct sec_bd_info *req;
646         int ret;
647
648         el = kzalloc(sizeof(*el), gfp);
649         if (!el)
650                 return ERR_PTR(-ENOMEM);
651         el->el_length = el_size;
652         req = &el->req;
653         memcpy(req, template, sizeof(*req));
654
655         req->w0 &= ~SEC_BD_W0_CIPHER_M;
656         if (encrypt)
657                 req->w0 |= SEC_CIPHER_ENCRYPT << SEC_BD_W0_CIPHER_S;
658         else
659                 req->w0 |= SEC_CIPHER_DECRYPT << SEC_BD_W0_CIPHER_S;
660
661         req->w0 &= ~SEC_BD_W0_C_GRAN_SIZE_19_16_M;
662         req->w0 |= ((el_size >> 16) << SEC_BD_W0_C_GRAN_SIZE_19_16_S) &
663                 SEC_BD_W0_C_GRAN_SIZE_19_16_M;
664
665         req->w0 &= ~SEC_BD_W0_C_GRAN_SIZE_21_20_M;
666         req->w0 |= ((el_size >> 20) << SEC_BD_W0_C_GRAN_SIZE_21_20_S) &
667                 SEC_BD_W0_C_GRAN_SIZE_21_20_M;
668
669         /* Writing whole u32 so no need to take care of masking */
670         req->w2 = ((1 << SEC_BD_W2_GRAN_NUM_S) & SEC_BD_W2_GRAN_NUM_M) |
671                 ((el_size << SEC_BD_W2_C_GRAN_SIZE_15_0_S) &
672                  SEC_BD_W2_C_GRAN_SIZE_15_0_M);
673
674         req->w3 &= ~SEC_BD_W3_CIPHER_LEN_OFFSET_M;
675         req->w1 |= SEC_BD_W1_ADDR_TYPE;
676
677         el->sgl_in = sgl_in;
678
679         ret = sec_alloc_and_fill_hw_sgl(&el->in, &el->dma_in, el->sgl_in,
680                                         n_ents_in, info, gfp);
681         if (ret)
682                 goto err_free_el;
683
684         req->data_addr_lo = lower_32_bits(el->dma_in);
685         req->data_addr_hi = upper_32_bits(el->dma_in);
686
687         if (different_dest) {
688                 el->sgl_out = sgl_out;
689                 ret = sec_alloc_and_fill_hw_sgl(&el->out, &el->dma_out,
690                                                 el->sgl_out,
691                                                 n_ents_out, info, gfp);
692                 if (ret)
693                         goto err_free_hw_sgl_in;
694
695                 req->w0 |= SEC_BD_W0_DE;
696                 req->cipher_destin_addr_lo = lower_32_bits(el->dma_out);
697                 req->cipher_destin_addr_hi = upper_32_bits(el->dma_out);
698
699         } else {
700                 req->w0 &= ~SEC_BD_W0_DE;
701                 req->cipher_destin_addr_lo = lower_32_bits(el->dma_in);
702                 req->cipher_destin_addr_hi = upper_32_bits(el->dma_in);
703         }
704
705         return el;
706
707 err_free_hw_sgl_in:
708         sec_free_hw_sgl(el->in, el->dma_in, info);
709 err_free_el:
710         kfree(el);
711
712         return ERR_PTR(ret);
713 }
714
715 static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
716                                    bool encrypt)
717 {
718         struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(skreq);
719         struct crypto_tfm *tfm = crypto_skcipher_tfm(atfm);
720         struct sec_alg_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
721         struct sec_queue *queue = ctx->queue;
722         struct sec_request *sec_req = skcipher_request_ctx(skreq);
723         struct sec_dev_info *info = queue->dev_info;
724         int i, ret, steps;
725         size_t *split_sizes;
726         struct scatterlist **splits_in;
727         struct scatterlist **splits_out = NULL;
728         int *splits_in_nents;
729         int *splits_out_nents = NULL;
730         struct sec_request_el *el, *temp;
731         bool split = skreq->src != skreq->dst;
732         gfp_t gfp = skreq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
733
734         mutex_init(&sec_req->lock);
735         sec_req->req_base = &skreq->base;
736         sec_req->err = 0;
737         /* SGL mapping out here to allow us to break it up as necessary */
738         sec_req->len_in = sg_nents(skreq->src);
739
740         ret = sec_alg_alloc_and_calc_split_sizes(skreq->cryptlen, &split_sizes,
741                                                  &steps, gfp);
742         if (ret)
743                 return ret;
744         sec_req->num_elements = steps;
745         ret = sec_map_and_split_sg(skreq->src, split_sizes, steps, &splits_in,
746                                    &splits_in_nents, sec_req->len_in,
747                                    info->dev, gfp);
748         if (ret)
749                 goto err_free_split_sizes;
750
751         if (split) {
752                 sec_req->len_out = sg_nents(skreq->dst);
753                 ret = sec_map_and_split_sg(skreq->dst, split_sizes, steps,
754                                            &splits_out, &splits_out_nents,
755                                            sec_req->len_out, info->dev, gfp);
756                 if (ret)
757                         goto err_unmap_in_sg;
758         }
759         /* Shared info stored in seq_req - applies to all BDs */
760         sec_req->tfm_ctx = ctx;
761         sec_req->cb = sec_skcipher_alg_callback;
762         INIT_LIST_HEAD(&sec_req->elements);
763
764         /*
765          * Future optimization.
766          * In the chaining case we can't use a dma pool bounce buffer
767          * but in the case where we know there is no chaining we can
768          */
769         if (crypto_skcipher_ivsize(atfm)) {
770                 sec_req->dma_iv = dma_map_single(info->dev, skreq->iv,
771                                                  crypto_skcipher_ivsize(atfm),
772                                                  DMA_TO_DEVICE);
773                 if (dma_mapping_error(info->dev, sec_req->dma_iv)) {
774                         ret = -ENOMEM;
775                         goto err_unmap_out_sg;
776                 }
777         }
778
779         /* Set them all up then queue - cleaner error handling. */
780         for (i = 0; i < steps; i++) {
781                 el = sec_alg_alloc_and_fill_el(&ctx->req_template,
782                                                encrypt ? 1 : 0,
783                                                split_sizes[i],
784                                                skreq->src != skreq->dst,
785                                                splits_in[i], splits_in_nents[i],
786                                                split ? splits_out[i] : NULL,
787                                                split ? splits_out_nents[i] : 0,
788                                                info, gfp);
789                 if (IS_ERR(el)) {
790                         ret = PTR_ERR(el);
791                         goto err_free_elements;
792                 }
793                 el->req.cipher_iv_addr_lo = lower_32_bits(sec_req->dma_iv);
794                 el->req.cipher_iv_addr_hi = upper_32_bits(sec_req->dma_iv);
795                 el->sec_req = sec_req;
796                 list_add_tail(&el->head, &sec_req->elements);
797         }
798
799         /*
800          * Only attempt to queue if the whole lot can fit in the queue -
801          * we can't successfully cleanup after a partial queing so this
802          * must succeed or fail atomically.
803          *
804          * Big hammer test of both software and hardware queues - could be
805          * more refined but this is unlikely to happen so no need.
806          */
807
808         /* Grab a big lock for a long time to avoid concurrency issues */
809         mutex_lock(&queue->queuelock);
810
811         /*
812          * Can go on to queue if we have space in either:
813          * 1) The hardware queue and no software queue
814          * 2) The software queue
815          * AND there is nothing in the backlog.  If there is backlog we
816          * have to only queue to the backlog queue and return busy.
817          */
818         if ((!sec_queue_can_enqueue(queue, steps) &&
819              (!queue->havesoftqueue ||
820               kfifo_avail(&queue->softqueue) > steps)) ||
821             !list_empty(&ctx->backlog)) {
822                 ret = -EBUSY;
823                 if ((skreq->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
824                         list_add_tail(&sec_req->backlog_head, &ctx->backlog);
825                         mutex_unlock(&queue->queuelock);
826                         goto out;
827                 }
828
829                 mutex_unlock(&queue->queuelock);
830                 goto err_free_elements;
831         }
832         ret = sec_send_request(sec_req, queue);
833         mutex_unlock(&queue->queuelock);
834         if (ret)
835                 goto err_free_elements;
836
837         ret = -EINPROGRESS;
838 out:
839         /* Cleanup - all elements in pointer arrays have been copied */
840         kfree(splits_in_nents);
841         kfree(splits_in);
842         kfree(splits_out_nents);
843         kfree(splits_out);
844         kfree(split_sizes);
845         return ret;
846
847 err_free_elements:
848         list_for_each_entry_safe(el, temp, &sec_req->elements, head) {
849                 list_del(&el->head);
850                 sec_alg_free_el(el, info);
851         }
852         if (crypto_skcipher_ivsize(atfm))
853                 dma_unmap_single(info->dev, sec_req->dma_iv,
854                                  crypto_skcipher_ivsize(atfm),
855                                  DMA_BIDIRECTIONAL);
856 err_unmap_out_sg:
857         if (split)
858                 sec_unmap_sg_on_err(skreq->dst, steps, splits_out,
859                                     splits_out_nents, sec_req->len_out,
860                                     info->dev);
861 err_unmap_in_sg:
862         sec_unmap_sg_on_err(skreq->src, steps, splits_in, splits_in_nents,
863                             sec_req->len_in, info->dev);
864 err_free_split_sizes:
865         kfree(split_sizes);
866
867         return ret;
868 }
869
870 static int sec_alg_skcipher_encrypt(struct skcipher_request *req)
871 {
872         return sec_alg_skcipher_crypto(req, true);
873 }
874
875 static int sec_alg_skcipher_decrypt(struct skcipher_request *req)
876 {
877         return sec_alg_skcipher_crypto(req, false);
878 }
879
880 static int sec_alg_skcipher_init(struct crypto_skcipher *tfm)
881 {
882         struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
883
884         mutex_init(&ctx->lock);
885         INIT_LIST_HEAD(&ctx->backlog);
886         crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_request));
887
888         ctx->queue = sec_queue_alloc_start_safe();
889         if (IS_ERR(ctx->queue))
890                 return PTR_ERR(ctx->queue);
891
892         mutex_init(&ctx->queue->queuelock);
893         ctx->queue->havesoftqueue = false;
894
895         return 0;
896 }
897
898 static void sec_alg_skcipher_exit(struct crypto_skcipher *tfm)
899 {
900         struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
901         struct device *dev = ctx->queue->dev_info->dev;
902
903         if (ctx->key) {
904                 memzero_explicit(ctx->key, SEC_MAX_CIPHER_KEY);
905                 dma_free_coherent(dev, SEC_MAX_CIPHER_KEY, ctx->key,
906                                   ctx->pkey);
907         }
908         sec_queue_stop_release(ctx->queue);
909 }
910
911 static int sec_alg_skcipher_init_with_queue(struct crypto_skcipher *tfm)
912 {
913         struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
914         int ret;
915
916         ret = sec_alg_skcipher_init(tfm);
917         if (ret)
918                 return ret;
919
920         INIT_KFIFO(ctx->queue->softqueue);
921         ret = kfifo_alloc(&ctx->queue->softqueue, 512, GFP_KERNEL);
922         if (ret) {
923                 sec_alg_skcipher_exit(tfm);
924                 return ret;
925         }
926         ctx->queue->havesoftqueue = true;
927
928         return 0;
929 }
930
931 static void sec_alg_skcipher_exit_with_queue(struct crypto_skcipher *tfm)
932 {
933         struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
934
935         kfifo_free(&ctx->queue->softqueue);
936         sec_alg_skcipher_exit(tfm);
937 }
938
939 static struct skcipher_alg sec_algs[] = {
940         {
941                 .base = {
942                         .cra_name = "ecb(aes)",
943                         .cra_driver_name = "hisi_sec_aes_ecb",
944                         .cra_priority = 4001,
945                         .cra_flags = CRYPTO_ALG_ASYNC,
946                         .cra_blocksize = AES_BLOCK_SIZE,
947                         .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
948                         .cra_alignmask = 0,
949                         .cra_module = THIS_MODULE,
950                 },
951                 .init = sec_alg_skcipher_init,
952                 .exit = sec_alg_skcipher_exit,
953                 .setkey = sec_alg_skcipher_setkey_aes_ecb,
954                 .decrypt = sec_alg_skcipher_decrypt,
955                 .encrypt = sec_alg_skcipher_encrypt,
956                 .min_keysize = AES_MIN_KEY_SIZE,
957                 .max_keysize = AES_MAX_KEY_SIZE,
958                 .ivsize = 0,
959         }, {
960                 .base = {
961                         .cra_name = "cbc(aes)",
962                         .cra_driver_name = "hisi_sec_aes_cbc",
963                         .cra_priority = 4001,
964                         .cra_flags = CRYPTO_ALG_ASYNC,
965                         .cra_blocksize = AES_BLOCK_SIZE,
966                         .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
967                         .cra_alignmask = 0,
968                         .cra_module = THIS_MODULE,
969                 },
970                 .init = sec_alg_skcipher_init_with_queue,
971                 .exit = sec_alg_skcipher_exit_with_queue,
972                 .setkey = sec_alg_skcipher_setkey_aes_cbc,
973                 .decrypt = sec_alg_skcipher_decrypt,
974                 .encrypt = sec_alg_skcipher_encrypt,
975                 .min_keysize = AES_MIN_KEY_SIZE,
976                 .max_keysize = AES_MAX_KEY_SIZE,
977                 .ivsize = AES_BLOCK_SIZE,
978         }, {
979                 .base = {
980                         .cra_name = "ctr(aes)",
981                         .cra_driver_name = "hisi_sec_aes_ctr",
982                         .cra_priority = 4001,
983                         .cra_flags = CRYPTO_ALG_ASYNC,
984                         .cra_blocksize = AES_BLOCK_SIZE,
985                         .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
986                         .cra_alignmask = 0,
987                         .cra_module = THIS_MODULE,
988                 },
989                 .init = sec_alg_skcipher_init_with_queue,
990                 .exit = sec_alg_skcipher_exit_with_queue,
991                 .setkey = sec_alg_skcipher_setkey_aes_ctr,
992                 .decrypt = sec_alg_skcipher_decrypt,
993                 .encrypt = sec_alg_skcipher_encrypt,
994                 .min_keysize = AES_MIN_KEY_SIZE,
995                 .max_keysize = AES_MAX_KEY_SIZE,
996                 .ivsize = AES_BLOCK_SIZE,
997         }, {
998                 .base = {
999                         .cra_name = "xts(aes)",
1000                         .cra_driver_name = "hisi_sec_aes_xts",
1001                         .cra_priority = 4001,
1002                         .cra_flags = CRYPTO_ALG_ASYNC,
1003                         .cra_blocksize = AES_BLOCK_SIZE,
1004                         .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
1005                         .cra_alignmask = 0,
1006                         .cra_module = THIS_MODULE,
1007                 },
1008                 .init = sec_alg_skcipher_init,
1009                 .exit = sec_alg_skcipher_exit,
1010                 .setkey = sec_alg_skcipher_setkey_aes_xts,
1011                 .decrypt = sec_alg_skcipher_decrypt,
1012                 .encrypt = sec_alg_skcipher_encrypt,
1013                 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1014                 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1015                 .ivsize = AES_BLOCK_SIZE,
1016         }, {
1017         /* Unable to find any test vectors so untested */
1018                 .base = {
1019                         .cra_name = "ecb(des)",
1020                         .cra_driver_name = "hisi_sec_des_ecb",
1021                         .cra_priority = 4001,
1022                         .cra_flags = CRYPTO_ALG_ASYNC,
1023                         .cra_blocksize = DES_BLOCK_SIZE,
1024                         .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
1025                         .cra_alignmask = 0,
1026                         .cra_module = THIS_MODULE,
1027                 },
1028                 .init = sec_alg_skcipher_init,
1029                 .exit = sec_alg_skcipher_exit,
1030                 .setkey = sec_alg_skcipher_setkey_des_ecb,
1031                 .decrypt = sec_alg_skcipher_decrypt,
1032                 .encrypt = sec_alg_skcipher_encrypt,
1033                 .min_keysize = DES_KEY_SIZE,
1034                 .max_keysize = DES_KEY_SIZE,
1035                 .ivsize = 0,
1036         }, {
1037                 .base = {
1038                         .cra_name = "cbc(des)",
1039                         .cra_driver_name = "hisi_sec_des_cbc",
1040                         .cra_priority = 4001,
1041                         .cra_flags = CRYPTO_ALG_ASYNC,
1042                         .cra_blocksize = DES_BLOCK_SIZE,
1043                         .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
1044                         .cra_alignmask = 0,
1045                         .cra_module = THIS_MODULE,
1046                 },
1047                 .init = sec_alg_skcipher_init_with_queue,
1048                 .exit = sec_alg_skcipher_exit_with_queue,
1049                 .setkey = sec_alg_skcipher_setkey_des_cbc,
1050                 .decrypt = sec_alg_skcipher_decrypt,
1051                 .encrypt = sec_alg_skcipher_encrypt,
1052                 .min_keysize = DES_KEY_SIZE,
1053                 .max_keysize = DES_KEY_SIZE,
1054                 .ivsize = DES_BLOCK_SIZE,
1055         }, {
1056                 .base = {
1057                         .cra_name = "cbc(des3_ede)",
1058                         .cra_driver_name = "hisi_sec_3des_cbc",
1059                         .cra_priority = 4001,
1060                         .cra_flags = CRYPTO_ALG_ASYNC,
1061                         .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1062                         .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
1063                         .cra_alignmask = 0,
1064                         .cra_module = THIS_MODULE,
1065                 },
1066                 .init = sec_alg_skcipher_init_with_queue,
1067                 .exit = sec_alg_skcipher_exit_with_queue,
1068                 .setkey = sec_alg_skcipher_setkey_3des_cbc,
1069                 .decrypt = sec_alg_skcipher_decrypt,
1070                 .encrypt = sec_alg_skcipher_encrypt,
1071                 .min_keysize = DES3_EDE_KEY_SIZE,
1072                 .max_keysize = DES3_EDE_KEY_SIZE,
1073                 .ivsize = DES3_EDE_BLOCK_SIZE,
1074         }, {
1075                 .base = {
1076                         .cra_name = "ecb(des3_ede)",
1077                         .cra_driver_name = "hisi_sec_3des_ecb",
1078                         .cra_priority = 4001,
1079                         .cra_flags = CRYPTO_ALG_ASYNC,
1080                         .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1081                         .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
1082                         .cra_alignmask = 0,
1083                         .cra_module = THIS_MODULE,
1084                 },
1085                 .init = sec_alg_skcipher_init,
1086                 .exit = sec_alg_skcipher_exit,
1087                 .setkey = sec_alg_skcipher_setkey_3des_ecb,
1088                 .decrypt = sec_alg_skcipher_decrypt,
1089                 .encrypt = sec_alg_skcipher_encrypt,
1090                 .min_keysize = DES3_EDE_KEY_SIZE,
1091                 .max_keysize = DES3_EDE_KEY_SIZE,
1092                 .ivsize = 0,
1093         }
1094 };
1095
1096 int sec_algs_register(void)
1097 {
1098         int ret = 0;
1099
1100         mutex_lock(&algs_lock);
1101         if (++active_devs != 1)
1102                 goto unlock;
1103
1104         ret = crypto_register_skciphers(sec_algs, ARRAY_SIZE(sec_algs));
1105         if (ret)
1106                 --active_devs;
1107 unlock:
1108         mutex_unlock(&algs_lock);
1109
1110         return ret;
1111 }
1112
1113 void sec_algs_unregister(void)
1114 {
1115         mutex_lock(&algs_lock);
1116         if (--active_devs != 0)
1117                 goto unlock;
1118         crypto_unregister_skciphers(sec_algs, ARRAY_SIZE(sec_algs));
1119
1120 unlock:
1121         mutex_unlock(&algs_lock);
1122 }