GNU Linux-libre 6.7.9-gnu
[releases.git] / drivers / crypto / rockchip / rk3288_crypto_ahash.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Crypto acceleration support for Rockchip RK3288
4  *
5  * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd
6  *
7  * Author: Zain Wang <zain.wang@rock-chips.com>
8  *
9  * Some ideas are from marvell/cesa.c and s5p-sss.c driver.
10  */
11
12 #include <asm/unaligned.h>
13 #include <crypto/internal/hash.h>
14 #include <linux/device.h>
15 #include <linux/err.h>
16 #include <linux/iopoll.h>
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/string.h>
20 #include "rk3288_crypto.h"
21
22 /*
23  * IC can not process zero message hash,
24  * so we put the fixed hash out when met zero message.
25  */
26
27 static bool rk_ahash_need_fallback(struct ahash_request *req)
28 {
29         struct scatterlist *sg;
30
31         sg = req->src;
32         while (sg) {
33                 if (!IS_ALIGNED(sg->offset, sizeof(u32))) {
34                         return true;
35                 }
36                 if (sg->length % 4) {
37                         return true;
38                 }
39                 sg = sg_next(sg);
40         }
41         return false;
42 }
43
44 static int rk_ahash_digest_fb(struct ahash_request *areq)
45 {
46         struct rk_ahash_rctx *rctx = ahash_request_ctx(areq);
47         struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
48         struct rk_ahash_ctx *tfmctx = crypto_ahash_ctx(tfm);
49         struct ahash_alg *alg = crypto_ahash_alg(tfm);
50         struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.hash.base);
51
52         algt->stat_fb++;
53
54         ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
55         rctx->fallback_req.base.flags = areq->base.flags &
56                                         CRYPTO_TFM_REQ_MAY_SLEEP;
57
58         rctx->fallback_req.nbytes = areq->nbytes;
59         rctx->fallback_req.src = areq->src;
60         rctx->fallback_req.result = areq->result;
61
62         return crypto_ahash_digest(&rctx->fallback_req);
63 }
64
65 static int zero_message_process(struct ahash_request *req)
66 {
67         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
68         int rk_digest_size = crypto_ahash_digestsize(tfm);
69
70         switch (rk_digest_size) {
71         case SHA1_DIGEST_SIZE:
72                 memcpy(req->result, sha1_zero_message_hash, rk_digest_size);
73                 break;
74         case SHA256_DIGEST_SIZE:
75                 memcpy(req->result, sha256_zero_message_hash, rk_digest_size);
76                 break;
77         case MD5_DIGEST_SIZE:
78                 memcpy(req->result, md5_zero_message_hash, rk_digest_size);
79                 break;
80         default:
81                 return -EINVAL;
82         }
83
84         return 0;
85 }
86
87 static void rk_ahash_reg_init(struct ahash_request *req,
88                               struct rk_crypto_info *dev)
89 {
90         struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
91         int reg_status;
92
93         reg_status = CRYPTO_READ(dev, RK_CRYPTO_CTRL) |
94                      RK_CRYPTO_HASH_FLUSH | _SBF(0xffff, 16);
95         CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, reg_status);
96
97         reg_status = CRYPTO_READ(dev, RK_CRYPTO_CTRL);
98         reg_status &= (~RK_CRYPTO_HASH_FLUSH);
99         reg_status |= _SBF(0xffff, 16);
100         CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, reg_status);
101
102         memset_io(dev->reg + RK_CRYPTO_HASH_DOUT_0, 0, 32);
103
104         CRYPTO_WRITE(dev, RK_CRYPTO_INTENA, RK_CRYPTO_HRDMA_ERR_ENA |
105                                             RK_CRYPTO_HRDMA_DONE_ENA);
106
107         CRYPTO_WRITE(dev, RK_CRYPTO_INTSTS, RK_CRYPTO_HRDMA_ERR_INT |
108                                             RK_CRYPTO_HRDMA_DONE_INT);
109
110         CRYPTO_WRITE(dev, RK_CRYPTO_HASH_CTRL, rctx->mode |
111                                                RK_CRYPTO_HASH_SWAP_DO);
112
113         CRYPTO_WRITE(dev, RK_CRYPTO_CONF, RK_CRYPTO_BYTESWAP_HRFIFO |
114                                           RK_CRYPTO_BYTESWAP_BRFIFO |
115                                           RK_CRYPTO_BYTESWAP_BTFIFO);
116
117         CRYPTO_WRITE(dev, RK_CRYPTO_HASH_MSG_LEN, req->nbytes);
118 }
119
120 static int rk_ahash_init(struct ahash_request *req)
121 {
122         struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
123         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
124         struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
125
126         ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
127         rctx->fallback_req.base.flags = req->base.flags &
128                                         CRYPTO_TFM_REQ_MAY_SLEEP;
129
130         return crypto_ahash_init(&rctx->fallback_req);
131 }
132
133 static int rk_ahash_update(struct ahash_request *req)
134 {
135         struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
136         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
137         struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
138
139         ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
140         rctx->fallback_req.base.flags = req->base.flags &
141                                         CRYPTO_TFM_REQ_MAY_SLEEP;
142         rctx->fallback_req.nbytes = req->nbytes;
143         rctx->fallback_req.src = req->src;
144
145         return crypto_ahash_update(&rctx->fallback_req);
146 }
147
148 static int rk_ahash_final(struct ahash_request *req)
149 {
150         struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
151         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
152         struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
153
154         ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
155         rctx->fallback_req.base.flags = req->base.flags &
156                                         CRYPTO_TFM_REQ_MAY_SLEEP;
157         rctx->fallback_req.result = req->result;
158
159         return crypto_ahash_final(&rctx->fallback_req);
160 }
161
162 static int rk_ahash_finup(struct ahash_request *req)
163 {
164         struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
165         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
166         struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
167
168         ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
169         rctx->fallback_req.base.flags = req->base.flags &
170                                         CRYPTO_TFM_REQ_MAY_SLEEP;
171
172         rctx->fallback_req.nbytes = req->nbytes;
173         rctx->fallback_req.src = req->src;
174         rctx->fallback_req.result = req->result;
175
176         return crypto_ahash_finup(&rctx->fallback_req);
177 }
178
179 static int rk_ahash_import(struct ahash_request *req, const void *in)
180 {
181         struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
182         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
183         struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
184
185         ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
186         rctx->fallback_req.base.flags = req->base.flags &
187                                         CRYPTO_TFM_REQ_MAY_SLEEP;
188
189         return crypto_ahash_import(&rctx->fallback_req, in);
190 }
191
192 static int rk_ahash_export(struct ahash_request *req, void *out)
193 {
194         struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
195         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
196         struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
197
198         ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
199         rctx->fallback_req.base.flags = req->base.flags &
200                                         CRYPTO_TFM_REQ_MAY_SLEEP;
201
202         return crypto_ahash_export(&rctx->fallback_req, out);
203 }
204
205 static int rk_ahash_digest(struct ahash_request *req)
206 {
207         struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
208         struct rk_crypto_info *dev;
209         struct crypto_engine *engine;
210
211         if (rk_ahash_need_fallback(req))
212                 return rk_ahash_digest_fb(req);
213
214         if (!req->nbytes)
215                 return zero_message_process(req);
216
217         dev = get_rk_crypto();
218
219         rctx->dev = dev;
220         engine = dev->engine;
221
222         return crypto_transfer_hash_request_to_engine(engine, req);
223 }
224
225 static void crypto_ahash_dma_start(struct rk_crypto_info *dev, struct scatterlist *sg)
226 {
227         CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAS, sg_dma_address(sg));
228         CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAL, sg_dma_len(sg) / 4);
229         CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, RK_CRYPTO_HASH_START |
230                                           (RK_CRYPTO_HASH_START << 16));
231 }
232
233 static int rk_hash_prepare(struct crypto_engine *engine, void *breq)
234 {
235         struct ahash_request *areq = container_of(breq, struct ahash_request, base);
236         struct rk_ahash_rctx *rctx = ahash_request_ctx(areq);
237         struct rk_crypto_info *rkc = rctx->dev;
238         int ret;
239
240         ret = dma_map_sg(rkc->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE);
241         if (ret <= 0)
242                 return -EINVAL;
243
244         rctx->nrsg = ret;
245
246         return 0;
247 }
248
249 static void rk_hash_unprepare(struct crypto_engine *engine, void *breq)
250 {
251         struct ahash_request *areq = container_of(breq, struct ahash_request, base);
252         struct rk_ahash_rctx *rctx = ahash_request_ctx(areq);
253         struct rk_crypto_info *rkc = rctx->dev;
254
255         dma_unmap_sg(rkc->dev, areq->src, rctx->nrsg, DMA_TO_DEVICE);
256 }
257
258 static int rk_hash_run(struct crypto_engine *engine, void *breq)
259 {
260         struct ahash_request *areq = container_of(breq, struct ahash_request, base);
261         struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
262         struct rk_ahash_rctx *rctx = ahash_request_ctx(areq);
263         struct ahash_alg *alg = crypto_ahash_alg(tfm);
264         struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.hash.base);
265         struct scatterlist *sg = areq->src;
266         struct rk_crypto_info *rkc = rctx->dev;
267         int err;
268         int i;
269         u32 v;
270
271         err = pm_runtime_resume_and_get(rkc->dev);
272         if (err)
273                 return err;
274
275         err = rk_hash_prepare(engine, breq);
276         if (err)
277                 goto theend;
278
279         rctx->mode = 0;
280
281         algt->stat_req++;
282         rkc->nreq++;
283
284         switch (crypto_ahash_digestsize(tfm)) {
285         case SHA1_DIGEST_SIZE:
286                 rctx->mode = RK_CRYPTO_HASH_SHA1;
287                 break;
288         case SHA256_DIGEST_SIZE:
289                 rctx->mode = RK_CRYPTO_HASH_SHA256;
290                 break;
291         case MD5_DIGEST_SIZE:
292                 rctx->mode = RK_CRYPTO_HASH_MD5;
293                 break;
294         default:
295                 err =  -EINVAL;
296                 goto theend;
297         }
298
299         rk_ahash_reg_init(areq, rkc);
300
301         while (sg) {
302                 reinit_completion(&rkc->complete);
303                 rkc->status = 0;
304                 crypto_ahash_dma_start(rkc, sg);
305                 wait_for_completion_interruptible_timeout(&rkc->complete,
306                                                           msecs_to_jiffies(2000));
307                 if (!rkc->status) {
308                         dev_err(rkc->dev, "DMA timeout\n");
309                         err = -EFAULT;
310                         goto theend;
311                 }
312                 sg = sg_next(sg);
313         }
314
315         /*
316          * it will take some time to process date after last dma
317          * transmission.
318          *
319          * waiting time is relative with the last date len,
320          * so cannot set a fixed time here.
321          * 10us makes system not call here frequently wasting
322          * efficiency, and make it response quickly when dma
323          * complete.
324          */
325         readl_poll_timeout(rkc->reg + RK_CRYPTO_HASH_STS, v, v == 0, 10, 1000);
326
327         for (i = 0; i < crypto_ahash_digestsize(tfm) / 4; i++) {
328                 v = readl(rkc->reg + RK_CRYPTO_HASH_DOUT_0 + i * 4);
329                 put_unaligned_le32(v, areq->result + i * 4);
330         }
331
332 theend:
333         pm_runtime_put_autosuspend(rkc->dev);
334
335         local_bh_disable();
336         crypto_finalize_hash_request(engine, breq, err);
337         local_bh_enable();
338
339         rk_hash_unprepare(engine, breq);
340
341         return 0;
342 }
343
344 static int rk_hash_init_tfm(struct crypto_ahash *tfm)
345 {
346         struct rk_ahash_ctx *tctx = crypto_ahash_ctx(tfm);
347         const char *alg_name = crypto_ahash_alg_name(tfm);
348         struct ahash_alg *alg = crypto_ahash_alg(tfm);
349         struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.hash.base);
350
351         /* for fallback */
352         tctx->fallback_tfm = crypto_alloc_ahash(alg_name, 0,
353                                                 CRYPTO_ALG_NEED_FALLBACK);
354         if (IS_ERR(tctx->fallback_tfm)) {
355                 dev_err(algt->dev->dev, "Could not load fallback driver.\n");
356                 return PTR_ERR(tctx->fallback_tfm);
357         }
358
359         crypto_ahash_set_reqsize(tfm,
360                                  sizeof(struct rk_ahash_rctx) +
361                                  crypto_ahash_reqsize(tctx->fallback_tfm));
362
363         return 0;
364 }
365
366 static void rk_hash_exit_tfm(struct crypto_ahash *tfm)
367 {
368         struct rk_ahash_ctx *tctx = crypto_ahash_ctx(tfm);
369
370         crypto_free_ahash(tctx->fallback_tfm);
371 }
372
373 struct rk_crypto_tmp rk_ahash_sha1 = {
374         .type = CRYPTO_ALG_TYPE_AHASH,
375         .alg.hash.base = {
376                 .init = rk_ahash_init,
377                 .update = rk_ahash_update,
378                 .final = rk_ahash_final,
379                 .finup = rk_ahash_finup,
380                 .export = rk_ahash_export,
381                 .import = rk_ahash_import,
382                 .digest = rk_ahash_digest,
383                 .init_tfm = rk_hash_init_tfm,
384                 .exit_tfm = rk_hash_exit_tfm,
385                 .halg = {
386                          .digestsize = SHA1_DIGEST_SIZE,
387                          .statesize = sizeof(struct sha1_state),
388                          .base = {
389                                   .cra_name = "sha1",
390                                   .cra_driver_name = "rk-sha1",
391                                   .cra_priority = 300,
392                                   .cra_flags = CRYPTO_ALG_ASYNC |
393                                                CRYPTO_ALG_NEED_FALLBACK,
394                                   .cra_blocksize = SHA1_BLOCK_SIZE,
395                                   .cra_ctxsize = sizeof(struct rk_ahash_ctx),
396                                   .cra_module = THIS_MODULE,
397                         }
398                 }
399         },
400         .alg.hash.op = {
401                 .do_one_request = rk_hash_run,
402         },
403 };
404
405 struct rk_crypto_tmp rk_ahash_sha256 = {
406         .type = CRYPTO_ALG_TYPE_AHASH,
407         .alg.hash.base = {
408                 .init = rk_ahash_init,
409                 .update = rk_ahash_update,
410                 .final = rk_ahash_final,
411                 .finup = rk_ahash_finup,
412                 .export = rk_ahash_export,
413                 .import = rk_ahash_import,
414                 .digest = rk_ahash_digest,
415                 .init_tfm = rk_hash_init_tfm,
416                 .exit_tfm = rk_hash_exit_tfm,
417                 .halg = {
418                          .digestsize = SHA256_DIGEST_SIZE,
419                          .statesize = sizeof(struct sha256_state),
420                          .base = {
421                                   .cra_name = "sha256",
422                                   .cra_driver_name = "rk-sha256",
423                                   .cra_priority = 300,
424                                   .cra_flags = CRYPTO_ALG_ASYNC |
425                                                CRYPTO_ALG_NEED_FALLBACK,
426                                   .cra_blocksize = SHA256_BLOCK_SIZE,
427                                   .cra_ctxsize = sizeof(struct rk_ahash_ctx),
428                                   .cra_module = THIS_MODULE,
429                         }
430                 }
431         },
432         .alg.hash.op = {
433                 .do_one_request = rk_hash_run,
434         },
435 };
436
437 struct rk_crypto_tmp rk_ahash_md5 = {
438         .type = CRYPTO_ALG_TYPE_AHASH,
439         .alg.hash.base = {
440                 .init = rk_ahash_init,
441                 .update = rk_ahash_update,
442                 .final = rk_ahash_final,
443                 .finup = rk_ahash_finup,
444                 .export = rk_ahash_export,
445                 .import = rk_ahash_import,
446                 .digest = rk_ahash_digest,
447                 .init_tfm = rk_hash_init_tfm,
448                 .exit_tfm = rk_hash_exit_tfm,
449                 .halg = {
450                          .digestsize = MD5_DIGEST_SIZE,
451                          .statesize = sizeof(struct md5_state),
452                          .base = {
453                                   .cra_name = "md5",
454                                   .cra_driver_name = "rk-md5",
455                                   .cra_priority = 300,
456                                   .cra_flags = CRYPTO_ALG_ASYNC |
457                                                CRYPTO_ALG_NEED_FALLBACK,
458                                   .cra_blocksize = SHA1_BLOCK_SIZE,
459                                   .cra_ctxsize = sizeof(struct rk_ahash_ctx),
460                                   .cra_module = THIS_MODULE,
461                         }
462                 }
463         },
464         .alg.hash.op = {
465                 .do_one_request = rk_hash_run,
466         },
467 };