1 // SPDX-License-Identifier: GPL-2.0
3 * sun8i-ss-hash.c - hardware cryptographic offloader for
4 * Allwinner A80/A83T SoC
6 * Copyright (C) 2015-2020 Corentin Labbe <clabbe@baylibre.com>
8 * This file add support for MD5 and SHA1/SHA224/SHA256.
10 * You could find the datasheet in Documentation/arm/sunxi.rst
12 #include <linux/bottom_half.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/pm_runtime.h>
15 #include <linux/scatterlist.h>
16 #include <crypto/internal/hash.h>
17 #include <crypto/sha1.h>
18 #include <crypto/sha2.h>
19 #include <crypto/md5.h>
22 int sun8i_ss_hash_crainit(struct crypto_tfm *tfm)
24 struct sun8i_ss_hash_tfm_ctx *op = crypto_tfm_ctx(tfm);
25 struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
26 struct sun8i_ss_alg_template *algt;
29 memset(op, 0, sizeof(struct sun8i_ss_hash_tfm_ctx));
31 algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
34 op->enginectx.op.do_one_request = sun8i_ss_hash_run;
35 op->enginectx.op.prepare_request = NULL;
36 op->enginectx.op.unprepare_request = NULL;
39 op->fallback_tfm = crypto_alloc_ahash(crypto_tfm_alg_name(tfm), 0,
40 CRYPTO_ALG_NEED_FALLBACK);
41 if (IS_ERR(op->fallback_tfm)) {
42 dev_err(algt->ss->dev, "Fallback driver could no be loaded\n");
43 return PTR_ERR(op->fallback_tfm);
46 if (algt->alg.hash.halg.statesize < crypto_ahash_statesize(op->fallback_tfm))
47 algt->alg.hash.halg.statesize = crypto_ahash_statesize(op->fallback_tfm);
49 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
50 sizeof(struct sun8i_ss_hash_reqctx) +
51 crypto_ahash_reqsize(op->fallback_tfm));
53 dev_info(op->ss->dev, "Fallback for %s is %s\n",
54 crypto_tfm_alg_driver_name(tfm),
55 crypto_tfm_alg_driver_name(&op->fallback_tfm->base));
56 err = pm_runtime_get_sync(op->ss->dev);
61 pm_runtime_put_noidle(op->ss->dev);
62 crypto_free_ahash(op->fallback_tfm);
66 void sun8i_ss_hash_craexit(struct crypto_tfm *tfm)
68 struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_tfm_ctx(tfm);
70 crypto_free_ahash(tfmctx->fallback_tfm);
71 pm_runtime_put_sync_suspend(tfmctx->ss->dev);
74 int sun8i_ss_hash_init(struct ahash_request *areq)
76 struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
77 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
78 struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
80 memset(rctx, 0, sizeof(struct sun8i_ss_hash_reqctx));
82 ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
83 rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
85 return crypto_ahash_init(&rctx->fallback_req);
88 int sun8i_ss_hash_export(struct ahash_request *areq, void *out)
90 struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
91 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
92 struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
94 ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
95 rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
97 return crypto_ahash_export(&rctx->fallback_req, out);
100 int sun8i_ss_hash_import(struct ahash_request *areq, const void *in)
102 struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
103 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
104 struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
106 ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
107 rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
109 return crypto_ahash_import(&rctx->fallback_req, in);
112 int sun8i_ss_hash_final(struct ahash_request *areq)
114 struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
115 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
116 struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
117 #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
118 struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
119 struct sun8i_ss_alg_template *algt;
122 ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
123 rctx->fallback_req.base.flags = areq->base.flags &
124 CRYPTO_TFM_REQ_MAY_SLEEP;
125 rctx->fallback_req.result = areq->result;
127 #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
128 algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
132 return crypto_ahash_final(&rctx->fallback_req);
135 int sun8i_ss_hash_update(struct ahash_request *areq)
137 struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
138 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
139 struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
141 ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
142 rctx->fallback_req.base.flags = areq->base.flags &
143 CRYPTO_TFM_REQ_MAY_SLEEP;
144 rctx->fallback_req.nbytes = areq->nbytes;
145 rctx->fallback_req.src = areq->src;
147 return crypto_ahash_update(&rctx->fallback_req);
150 int sun8i_ss_hash_finup(struct ahash_request *areq)
152 struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
153 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
154 struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
155 #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
156 struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
157 struct sun8i_ss_alg_template *algt;
160 ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
161 rctx->fallback_req.base.flags = areq->base.flags &
162 CRYPTO_TFM_REQ_MAY_SLEEP;
164 rctx->fallback_req.nbytes = areq->nbytes;
165 rctx->fallback_req.src = areq->src;
166 rctx->fallback_req.result = areq->result;
167 #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
168 algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
172 return crypto_ahash_finup(&rctx->fallback_req);
175 static int sun8i_ss_hash_digest_fb(struct ahash_request *areq)
177 struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
178 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
179 struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
180 #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
181 struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
182 struct sun8i_ss_alg_template *algt;
185 ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
186 rctx->fallback_req.base.flags = areq->base.flags &
187 CRYPTO_TFM_REQ_MAY_SLEEP;
189 rctx->fallback_req.nbytes = areq->nbytes;
190 rctx->fallback_req.src = areq->src;
191 rctx->fallback_req.result = areq->result;
192 #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
193 algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
197 return crypto_ahash_digest(&rctx->fallback_req);
200 static int sun8i_ss_run_hash_task(struct sun8i_ss_dev *ss,
201 struct sun8i_ss_hash_reqctx *rctx,
204 int flow = rctx->flow;
208 #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
209 ss->flows[flow].stat_req++;
212 /* choose between stream0/stream1 */
220 for (i = 0; i < MAX_SG; i++) {
221 if (!rctx->t_dst[i].addr)
224 mutex_lock(&ss->mlock);
227 writel(rctx->t_dst[i - 1].addr, ss->base + SS_KEY_ADR_REG);
228 writel(rctx->t_dst[i - 1].addr, ss->base + SS_IV_ADR_REG);
232 "Processing SG %d on flow %d %s ctl=%x %d to %d method=%x src=%x dst=%x\n",
234 rctx->t_src[i].len, rctx->t_dst[i].len,
235 rctx->method, rctx->t_src[i].addr, rctx->t_dst[i].addr);
237 writel(rctx->t_src[i].addr, ss->base + SS_SRC_ADR_REG);
238 writel(rctx->t_dst[i].addr, ss->base + SS_DST_ADR_REG);
239 writel(rctx->t_src[i].len, ss->base + SS_LEN_ADR_REG);
240 writel(BIT(0) | BIT(1), ss->base + SS_INT_CTL_REG);
242 reinit_completion(&ss->flows[flow].complete);
243 ss->flows[flow].status = 0;
246 writel(v, ss->base + SS_CTL_REG);
247 mutex_unlock(&ss->mlock);
248 wait_for_completion_interruptible_timeout(&ss->flows[flow].complete,
249 msecs_to_jiffies(2000));
250 if (ss->flows[flow].status == 0) {
251 dev_err(ss->dev, "DMA timeout for %s\n", name);
259 static bool sun8i_ss_hash_need_fallback(struct ahash_request *areq)
261 struct scatterlist *sg;
263 if (areq->nbytes == 0)
265 /* we need to reserve one SG for the padding one */
266 if (sg_nents(areq->src) > MAX_SG - 1)
270 /* SS can operate hash only on full block size
271 * since SS support only MD5,sha1,sha224 and sha256, blocksize
273 * TODO: handle request if last SG is not len%64
274 * but this will need to copy data on a new SG of size=64
276 if (sg->length % 64 || !IS_ALIGNED(sg->offset, sizeof(u32)))
283 int sun8i_ss_hash_digest(struct ahash_request *areq)
285 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
286 struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
287 struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
288 struct sun8i_ss_alg_template *algt;
289 struct sun8i_ss_dev *ss;
290 struct crypto_engine *engine;
291 struct scatterlist *sg;
294 if (sun8i_ss_hash_need_fallback(areq))
295 return sun8i_ss_hash_digest_fb(areq);
297 nr_sgs = sg_nents(areq->src);
298 if (nr_sgs > MAX_SG - 1)
299 return sun8i_ss_hash_digest_fb(areq);
301 for_each_sg(areq->src, sg, nr_sgs, i) {
302 if (sg->length % 4 || !IS_ALIGNED(sg->offset, sizeof(u32)))
303 return sun8i_ss_hash_digest_fb(areq);
306 algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
309 e = sun8i_ss_get_engine_number(ss);
311 engine = ss->flows[e].engine;
313 return crypto_transfer_hash_request_to_engine(engine, areq);
316 /* sun8i_ss_hash_run - run an ahash request
317 * Send the data of the request to the SS along with an extra SG with padding
319 int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq)
321 struct ahash_request *areq = container_of(breq, struct ahash_request, base);
322 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
323 struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
324 struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
325 struct sun8i_ss_alg_template *algt;
326 struct sun8i_ss_dev *ss;
327 struct scatterlist *sg;
328 int nr_sgs, err, digestsize;
330 u64 fill, min_fill, byte_count;
335 dma_addr_t addr_res, addr_pad;
338 algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
341 digestsize = algt->alg.hash.halg.digestsize;
342 if (digestsize == SHA224_DIGEST_SIZE)
343 digestsize = SHA256_DIGEST_SIZE;
345 /* the padding could be up to two block. */
346 pad = kzalloc(algt->alg.hash.halg.base.cra_blocksize * 2, GFP_KERNEL | GFP_DMA);
351 result = kzalloc(digestsize, GFP_KERNEL | GFP_DMA);
357 for (i = 0; i < MAX_SG; i++) {
358 rctx->t_dst[i].addr = 0;
359 rctx->t_dst[i].len = 0;
362 #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
366 rctx->method = ss->variant->alg_hash[algt->ss_algo_id];
368 nr_sgs = dma_map_sg(ss->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE);
369 if (nr_sgs <= 0 || nr_sgs > MAX_SG) {
370 dev_err(ss->dev, "Invalid sg number %d\n", nr_sgs);
375 addr_res = dma_map_single(ss->dev, result, digestsize, DMA_FROM_DEVICE);
376 if (dma_mapping_error(ss->dev, addr_res)) {
377 dev_err(ss->dev, "DMA map dest\n");
383 for_each_sg(areq->src, sg, nr_sgs, i) {
384 rctx->t_src[i].addr = sg_dma_address(sg);
385 todo = min(len, sg_dma_len(sg));
386 rctx->t_src[i].len = todo / 4;
388 rctx->t_dst[i].addr = addr_res;
389 rctx->t_dst[i].len = digestsize / 4;
392 dev_err(ss->dev, "remaining len %d\n", len);
397 byte_count = areq->nbytes;
399 bf[j++] = cpu_to_le32(0x80);
401 fill = 64 - (byte_count % 64);
402 min_fill = 3 * sizeof(u32);
407 j += (fill - min_fill) / sizeof(u32);
409 switch (algt->ss_algo_id) {
411 lebits = (__le64 *)&bf[j];
412 *lebits = cpu_to_le64(byte_count << 3);
415 case SS_ID_HASH_SHA1:
416 case SS_ID_HASH_SHA224:
417 case SS_ID_HASH_SHA256:
418 bebits = (__be64 *)&bf[j];
419 *bebits = cpu_to_be64(byte_count << 3);
424 addr_pad = dma_map_single(ss->dev, pad, j * 4, DMA_TO_DEVICE);
425 rctx->t_src[i].addr = addr_pad;
426 rctx->t_src[i].len = j;
427 rctx->t_dst[i].addr = addr_res;
428 rctx->t_dst[i].len = digestsize / 4;
429 if (dma_mapping_error(ss->dev, addr_pad)) {
430 dev_err(ss->dev, "DMA error on padding SG\n");
435 err = sun8i_ss_run_hash_task(ss, rctx, crypto_tfm_alg_name(areq->base.tfm));
437 dma_unmap_single(ss->dev, addr_pad, j * 4, DMA_TO_DEVICE);
438 dma_unmap_sg(ss->dev, areq->src, sg_nents(areq->src),
440 dma_unmap_single(ss->dev, addr_res, digestsize, DMA_FROM_DEVICE);
442 memcpy(areq->result, result, algt->alg.hash.halg.digestsize);
447 crypto_finalize_hash_request(engine, breq, err);