GNU Linux-libre 5.16.19-gnu
[releases.git] / drivers / crypto / allwinner / sun8i-ss / sun8i-ss-hash.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * sun8i-ss-hash.c - hardware cryptographic offloader for
4  * Allwinner A80/A83T SoC
5  *
6  * Copyright (C) 2015-2020 Corentin Labbe <clabbe@baylibre.com>
7  *
8  * This file add support for MD5 and SHA1/SHA224/SHA256.
9  *
10  * You could find the datasheet in Documentation/arm/sunxi.rst
11  */
12 #include <linux/bottom_half.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/pm_runtime.h>
15 #include <linux/scatterlist.h>
16 #include <crypto/internal/hash.h>
17 #include <crypto/sha1.h>
18 #include <crypto/sha2.h>
19 #include <crypto/md5.h>
20 #include "sun8i-ss.h"
21
22 int sun8i_ss_hash_crainit(struct crypto_tfm *tfm)
23 {
24         struct sun8i_ss_hash_tfm_ctx *op = crypto_tfm_ctx(tfm);
25         struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
26         struct sun8i_ss_alg_template *algt;
27         int err;
28
29         memset(op, 0, sizeof(struct sun8i_ss_hash_tfm_ctx));
30
31         algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
32         op->ss = algt->ss;
33
34         op->enginectx.op.do_one_request = sun8i_ss_hash_run;
35         op->enginectx.op.prepare_request = NULL;
36         op->enginectx.op.unprepare_request = NULL;
37
38         /* FALLBACK */
39         op->fallback_tfm = crypto_alloc_ahash(crypto_tfm_alg_name(tfm), 0,
40                                               CRYPTO_ALG_NEED_FALLBACK);
41         if (IS_ERR(op->fallback_tfm)) {
42                 dev_err(algt->ss->dev, "Fallback driver could no be loaded\n");
43                 return PTR_ERR(op->fallback_tfm);
44         }
45
46         if (algt->alg.hash.halg.statesize < crypto_ahash_statesize(op->fallback_tfm))
47                 algt->alg.hash.halg.statesize = crypto_ahash_statesize(op->fallback_tfm);
48
49         crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
50                                  sizeof(struct sun8i_ss_hash_reqctx) +
51                                  crypto_ahash_reqsize(op->fallback_tfm));
52
53         dev_info(op->ss->dev, "Fallback for %s is %s\n",
54                  crypto_tfm_alg_driver_name(tfm),
55                  crypto_tfm_alg_driver_name(&op->fallback_tfm->base));
56         err = pm_runtime_get_sync(op->ss->dev);
57         if (err < 0)
58                 goto error_pm;
59         return 0;
60 error_pm:
61         pm_runtime_put_noidle(op->ss->dev);
62         crypto_free_ahash(op->fallback_tfm);
63         return err;
64 }
65
66 void sun8i_ss_hash_craexit(struct crypto_tfm *tfm)
67 {
68         struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_tfm_ctx(tfm);
69
70         crypto_free_ahash(tfmctx->fallback_tfm);
71         pm_runtime_put_sync_suspend(tfmctx->ss->dev);
72 }
73
74 int sun8i_ss_hash_init(struct ahash_request *areq)
75 {
76         struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
77         struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
78         struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
79
80         memset(rctx, 0, sizeof(struct sun8i_ss_hash_reqctx));
81
82         ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
83         rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
84
85         return crypto_ahash_init(&rctx->fallback_req);
86 }
87
88 int sun8i_ss_hash_export(struct ahash_request *areq, void *out)
89 {
90         struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
91         struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
92         struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
93
94         ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
95         rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
96
97         return crypto_ahash_export(&rctx->fallback_req, out);
98 }
99
100 int sun8i_ss_hash_import(struct ahash_request *areq, const void *in)
101 {
102         struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
103         struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
104         struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
105
106         ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
107         rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
108
109         return crypto_ahash_import(&rctx->fallback_req, in);
110 }
111
112 int sun8i_ss_hash_final(struct ahash_request *areq)
113 {
114         struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
115         struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
116         struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
117 #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
118         struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
119         struct sun8i_ss_alg_template *algt;
120 #endif
121
122         ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
123         rctx->fallback_req.base.flags = areq->base.flags &
124                                         CRYPTO_TFM_REQ_MAY_SLEEP;
125         rctx->fallback_req.result = areq->result;
126
127 #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
128         algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
129         algt->stat_fb++;
130 #endif
131
132         return crypto_ahash_final(&rctx->fallback_req);
133 }
134
135 int sun8i_ss_hash_update(struct ahash_request *areq)
136 {
137         struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
138         struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
139         struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
140
141         ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
142         rctx->fallback_req.base.flags = areq->base.flags &
143                                         CRYPTO_TFM_REQ_MAY_SLEEP;
144         rctx->fallback_req.nbytes = areq->nbytes;
145         rctx->fallback_req.src = areq->src;
146
147         return crypto_ahash_update(&rctx->fallback_req);
148 }
149
150 int sun8i_ss_hash_finup(struct ahash_request *areq)
151 {
152         struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
153         struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
154         struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
155 #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
156         struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
157         struct sun8i_ss_alg_template *algt;
158 #endif
159
160         ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
161         rctx->fallback_req.base.flags = areq->base.flags &
162                                         CRYPTO_TFM_REQ_MAY_SLEEP;
163
164         rctx->fallback_req.nbytes = areq->nbytes;
165         rctx->fallback_req.src = areq->src;
166         rctx->fallback_req.result = areq->result;
167 #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
168         algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
169         algt->stat_fb++;
170 #endif
171
172         return crypto_ahash_finup(&rctx->fallback_req);
173 }
174
175 static int sun8i_ss_hash_digest_fb(struct ahash_request *areq)
176 {
177         struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
178         struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
179         struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
180 #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
181         struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
182         struct sun8i_ss_alg_template *algt;
183 #endif
184
185         ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
186         rctx->fallback_req.base.flags = areq->base.flags &
187                                         CRYPTO_TFM_REQ_MAY_SLEEP;
188
189         rctx->fallback_req.nbytes = areq->nbytes;
190         rctx->fallback_req.src = areq->src;
191         rctx->fallback_req.result = areq->result;
192 #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
193         algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
194         algt->stat_fb++;
195 #endif
196
197         return crypto_ahash_digest(&rctx->fallback_req);
198 }
199
200 static int sun8i_ss_run_hash_task(struct sun8i_ss_dev *ss,
201                                   struct sun8i_ss_hash_reqctx *rctx,
202                                   const char *name)
203 {
204         int flow = rctx->flow;
205         u32 v = SS_START;
206         int i;
207
208 #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
209         ss->flows[flow].stat_req++;
210 #endif
211
212         /* choose between stream0/stream1 */
213         if (flow)
214                 v |= SS_FLOW1;
215         else
216                 v |= SS_FLOW0;
217
218         v |= rctx->method;
219
220         for (i = 0; i < MAX_SG; i++) {
221                 if (!rctx->t_dst[i].addr)
222                         break;
223
224                 mutex_lock(&ss->mlock);
225                 if (i > 0) {
226                         v |= BIT(17);
227                         writel(rctx->t_dst[i - 1].addr, ss->base + SS_KEY_ADR_REG);
228                         writel(rctx->t_dst[i - 1].addr, ss->base + SS_IV_ADR_REG);
229                 }
230
231                 dev_dbg(ss->dev,
232                         "Processing SG %d on flow %d %s ctl=%x %d to %d method=%x src=%x dst=%x\n",
233                         i, flow, name, v,
234                         rctx->t_src[i].len, rctx->t_dst[i].len,
235                         rctx->method, rctx->t_src[i].addr, rctx->t_dst[i].addr);
236
237                 writel(rctx->t_src[i].addr, ss->base + SS_SRC_ADR_REG);
238                 writel(rctx->t_dst[i].addr, ss->base + SS_DST_ADR_REG);
239                 writel(rctx->t_src[i].len, ss->base + SS_LEN_ADR_REG);
240                 writel(BIT(0) | BIT(1), ss->base + SS_INT_CTL_REG);
241
242                 reinit_completion(&ss->flows[flow].complete);
243                 ss->flows[flow].status = 0;
244                 wmb();
245
246                 writel(v, ss->base + SS_CTL_REG);
247                 mutex_unlock(&ss->mlock);
248                 wait_for_completion_interruptible_timeout(&ss->flows[flow].complete,
249                                                           msecs_to_jiffies(2000));
250                 if (ss->flows[flow].status == 0) {
251                         dev_err(ss->dev, "DMA timeout for %s\n", name);
252                         return -EFAULT;
253                 }
254         }
255
256         return 0;
257 }
258
259 static bool sun8i_ss_hash_need_fallback(struct ahash_request *areq)
260 {
261         struct scatterlist *sg;
262
263         if (areq->nbytes == 0)
264                 return true;
265         /* we need to reserve one SG for the padding one */
266         if (sg_nents(areq->src) > MAX_SG - 1)
267                 return true;
268         sg = areq->src;
269         while (sg) {
270                 /* SS can operate hash only on full block size
271                  * since SS support only MD5,sha1,sha224 and sha256, blocksize
272                  * is always 64
273                  * TODO: handle request if last SG is not len%64
274                  * but this will need to copy data on a new SG of size=64
275                  */
276                 if (sg->length % 64 || !IS_ALIGNED(sg->offset, sizeof(u32)))
277                         return true;
278                 sg = sg_next(sg);
279         }
280         return false;
281 }
282
283 int sun8i_ss_hash_digest(struct ahash_request *areq)
284 {
285         struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
286         struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
287         struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
288         struct sun8i_ss_alg_template *algt;
289         struct sun8i_ss_dev *ss;
290         struct crypto_engine *engine;
291         struct scatterlist *sg;
292         int nr_sgs, e, i;
293
294         if (sun8i_ss_hash_need_fallback(areq))
295                 return sun8i_ss_hash_digest_fb(areq);
296
297         nr_sgs = sg_nents(areq->src);
298         if (nr_sgs > MAX_SG - 1)
299                 return sun8i_ss_hash_digest_fb(areq);
300
301         for_each_sg(areq->src, sg, nr_sgs, i) {
302                 if (sg->length % 4 || !IS_ALIGNED(sg->offset, sizeof(u32)))
303                         return sun8i_ss_hash_digest_fb(areq);
304         }
305
306         algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
307         ss = algt->ss;
308
309         e = sun8i_ss_get_engine_number(ss);
310         rctx->flow = e;
311         engine = ss->flows[e].engine;
312
313         return crypto_transfer_hash_request_to_engine(engine, areq);
314 }
315
316 /* sun8i_ss_hash_run - run an ahash request
317  * Send the data of the request to the SS along with an extra SG with padding
318  */
319 int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq)
320 {
321         struct ahash_request *areq = container_of(breq, struct ahash_request, base);
322         struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
323         struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
324         struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
325         struct sun8i_ss_alg_template *algt;
326         struct sun8i_ss_dev *ss;
327         struct scatterlist *sg;
328         int nr_sgs, err, digestsize;
329         unsigned int len;
330         u64 fill, min_fill, byte_count;
331         void *pad, *result;
332         int j, i, todo;
333         __be64 *bebits;
334         __le64 *lebits;
335         dma_addr_t addr_res, addr_pad;
336         __le32 *bf;
337
338         algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
339         ss = algt->ss;
340
341         digestsize = algt->alg.hash.halg.digestsize;
342         if (digestsize == SHA224_DIGEST_SIZE)
343                 digestsize = SHA256_DIGEST_SIZE;
344
345         /* the padding could be up to two block. */
346         pad = kzalloc(algt->alg.hash.halg.base.cra_blocksize * 2, GFP_KERNEL | GFP_DMA);
347         if (!pad)
348                 return -ENOMEM;
349         bf = (__le32 *)pad;
350
351         result = kzalloc(digestsize, GFP_KERNEL | GFP_DMA);
352         if (!result) {
353                 kfree(pad);
354                 return -ENOMEM;
355         }
356
357         for (i = 0; i < MAX_SG; i++) {
358                 rctx->t_dst[i].addr = 0;
359                 rctx->t_dst[i].len = 0;
360         }
361
362 #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
363         algt->stat_req++;
364 #endif
365
366         rctx->method = ss->variant->alg_hash[algt->ss_algo_id];
367
368         nr_sgs = dma_map_sg(ss->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE);
369         if (nr_sgs <= 0 || nr_sgs > MAX_SG) {
370                 dev_err(ss->dev, "Invalid sg number %d\n", nr_sgs);
371                 err = -EINVAL;
372                 goto theend;
373         }
374
375         addr_res = dma_map_single(ss->dev, result, digestsize, DMA_FROM_DEVICE);
376         if (dma_mapping_error(ss->dev, addr_res)) {
377                 dev_err(ss->dev, "DMA map dest\n");
378                 err = -EINVAL;
379                 goto theend;
380         }
381
382         len = areq->nbytes;
383         for_each_sg(areq->src, sg, nr_sgs, i) {
384                 rctx->t_src[i].addr = sg_dma_address(sg);
385                 todo = min(len, sg_dma_len(sg));
386                 rctx->t_src[i].len = todo / 4;
387                 len -= todo;
388                 rctx->t_dst[i].addr = addr_res;
389                 rctx->t_dst[i].len = digestsize / 4;
390         }
391         if (len > 0) {
392                 dev_err(ss->dev, "remaining len %d\n", len);
393                 err = -EINVAL;
394                 goto theend;
395         }
396
397         byte_count = areq->nbytes;
398         j = 0;
399         bf[j++] = cpu_to_le32(0x80);
400
401         fill = 64 - (byte_count % 64);
402         min_fill = 3 * sizeof(u32);
403
404         if (fill < min_fill)
405                 fill += 64;
406
407         j += (fill - min_fill) / sizeof(u32);
408
409         switch (algt->ss_algo_id) {
410         case SS_ID_HASH_MD5:
411                 lebits = (__le64 *)&bf[j];
412                 *lebits = cpu_to_le64(byte_count << 3);
413                 j += 2;
414                 break;
415         case SS_ID_HASH_SHA1:
416         case SS_ID_HASH_SHA224:
417         case SS_ID_HASH_SHA256:
418                 bebits = (__be64 *)&bf[j];
419                 *bebits = cpu_to_be64(byte_count << 3);
420                 j += 2;
421                 break;
422         }
423
424         addr_pad = dma_map_single(ss->dev, pad, j * 4, DMA_TO_DEVICE);
425         rctx->t_src[i].addr = addr_pad;
426         rctx->t_src[i].len = j;
427         rctx->t_dst[i].addr = addr_res;
428         rctx->t_dst[i].len = digestsize / 4;
429         if (dma_mapping_error(ss->dev, addr_pad)) {
430                 dev_err(ss->dev, "DMA error on padding SG\n");
431                 err = -EINVAL;
432                 goto theend;
433         }
434
435         err = sun8i_ss_run_hash_task(ss, rctx, crypto_tfm_alg_name(areq->base.tfm));
436
437         dma_unmap_single(ss->dev, addr_pad, j * 4, DMA_TO_DEVICE);
438         dma_unmap_sg(ss->dev, areq->src, sg_nents(areq->src),
439                      DMA_TO_DEVICE);
440         dma_unmap_single(ss->dev, addr_res, digestsize, DMA_FROM_DEVICE);
441
442         memcpy(areq->result, result, algt->alg.hash.halg.digestsize);
443 theend:
444         kfree(pad);
445         kfree(result);
446         local_bh_disable();
447         crypto_finalize_hash_request(engine, breq, err);
448         local_bh_enable();
449         return 0;
450 }