2 * Multi buffer SHA1 algorithm Glue Code
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * Copyright(c) 2014 Intel Corporation.
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * Contact Information:
21 * Tim Chen <tim.c.chen@linux.intel.com>
25 * Copyright(c) 2014 Intel Corporation.
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
31 * * Redistributions of source code must retain the above copyright
32 * notice, this list of conditions and the following disclaimer.
33 * * Redistributions in binary form must reproduce the above copyright
34 * notice, this list of conditions and the following disclaimer in
35 * the documentation and/or other materials provided with the
37 * * Neither the name of Intel Corporation nor the names of its
38 * contributors may be used to endorse or promote products derived
39 * from this software without specific prior written permission.
41 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
42 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
43 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
44 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
45 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
46 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
47 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
48 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
49 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
50 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
51 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
56 #include <crypto/internal/hash.h>
57 #include <linux/init.h>
58 #include <linux/module.h>
60 #include <linux/cryptohash.h>
61 #include <linux/types.h>
62 #include <linux/list.h>
63 #include <crypto/scatterwalk.h>
64 #include <crypto/sha.h>
65 #include <crypto/mcryptd.h>
66 #include <crypto/crypto_wq.h>
67 #include <asm/byteorder.h>
68 #include <linux/hardirq.h>
69 #include <asm/fpu/api.h>
70 #include "sha1_mb_ctx.h"
72 #define FLUSH_INTERVAL 1000 /* in usec */
74 static struct mcryptd_alg_state sha1_mb_alg_state;
77 struct mcryptd_ahash *mcryptd_tfm;
80 static inline struct mcryptd_hash_request_ctx
81 *cast_hash_to_mcryptd_ctx(struct sha1_hash_ctx *hash_ctx)
83 struct ahash_request *areq;
85 areq = container_of((void *) hash_ctx, struct ahash_request, __ctx);
86 return container_of(areq, struct mcryptd_hash_request_ctx, areq);
89 static inline struct ahash_request
90 *cast_mcryptd_ctx_to_req(struct mcryptd_hash_request_ctx *ctx)
92 return container_of((void *) ctx, struct ahash_request, __ctx);
95 static void req_ctx_init(struct mcryptd_hash_request_ctx *rctx,
96 struct ahash_request *areq)
98 rctx->flag = HASH_UPDATE;
101 static asmlinkage void (*sha1_job_mgr_init)(struct sha1_mb_mgr *state);
102 static asmlinkage struct job_sha1* (*sha1_job_mgr_submit)
103 (struct sha1_mb_mgr *state, struct job_sha1 *job);
104 static asmlinkage struct job_sha1* (*sha1_job_mgr_flush)
105 (struct sha1_mb_mgr *state);
106 static asmlinkage struct job_sha1* (*sha1_job_mgr_get_comp_job)
107 (struct sha1_mb_mgr *state);
109 static inline void sha1_init_digest(uint32_t *digest)
111 static const uint32_t initial_digest[SHA1_DIGEST_LENGTH] = {SHA1_H0,
112 SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 };
113 memcpy(digest, initial_digest, sizeof(initial_digest));
116 static inline uint32_t sha1_pad(uint8_t padblock[SHA1_BLOCK_SIZE * 2],
119 uint32_t i = total_len & (SHA1_BLOCK_SIZE - 1);
121 memset(&padblock[i], 0, SHA1_BLOCK_SIZE);
124 i += ((SHA1_BLOCK_SIZE - 1) &
125 (0 - (total_len + SHA1_PADLENGTHFIELD_SIZE + 1)))
126 + 1 + SHA1_PADLENGTHFIELD_SIZE;
128 #if SHA1_PADLENGTHFIELD_SIZE == 16
129 *((uint64_t *) &padblock[i - 16]) = 0;
132 *((uint64_t *) &padblock[i - 8]) = cpu_to_be64(total_len << 3);
134 /* Number of extra blocks to hash */
135 return i >> SHA1_LOG2_BLOCK_SIZE;
138 static struct sha1_hash_ctx *sha1_ctx_mgr_resubmit(struct sha1_ctx_mgr *mgr,
139 struct sha1_hash_ctx *ctx)
142 if (ctx->status & HASH_CTX_STS_COMPLETE) {
143 /* Clear PROCESSING bit */
144 ctx->status = HASH_CTX_STS_COMPLETE;
149 * If the extra blocks are empty, begin hashing what remains
150 * in the user's buffer.
152 if (ctx->partial_block_buffer_length == 0 &&
153 ctx->incoming_buffer_length) {
155 const void *buffer = ctx->incoming_buffer;
156 uint32_t len = ctx->incoming_buffer_length;
160 * Only entire blocks can be hashed.
161 * Copy remainder to extra blocks buffer.
163 copy_len = len & (SHA1_BLOCK_SIZE-1);
167 memcpy(ctx->partial_block_buffer,
168 ((const char *) buffer + len),
170 ctx->partial_block_buffer_length = copy_len;
173 ctx->incoming_buffer_length = 0;
175 /* len should be a multiple of the block size now */
176 assert((len % SHA1_BLOCK_SIZE) == 0);
178 /* Set len to the number of blocks to be hashed */
179 len >>= SHA1_LOG2_BLOCK_SIZE;
183 ctx->job.buffer = (uint8_t *) buffer;
185 ctx = (struct sha1_hash_ctx *)sha1_job_mgr_submit(&mgr->mgr,
192 * If the extra blocks are not empty, then we are
193 * either on the last block(s) or we need more
194 * user input before continuing.
196 if (ctx->status & HASH_CTX_STS_LAST) {
198 uint8_t *buf = ctx->partial_block_buffer;
199 uint32_t n_extra_blocks =
200 sha1_pad(buf, ctx->total_length);
202 ctx->status = (HASH_CTX_STS_PROCESSING |
203 HASH_CTX_STS_COMPLETE);
204 ctx->job.buffer = buf;
205 ctx->job.len = (uint32_t) n_extra_blocks;
206 ctx = (struct sha1_hash_ctx *)
207 sha1_job_mgr_submit(&mgr->mgr, &ctx->job);
211 ctx->status = HASH_CTX_STS_IDLE;
218 static struct sha1_hash_ctx
219 *sha1_ctx_mgr_get_comp_ctx(struct sha1_ctx_mgr *mgr)
222 * If get_comp_job returns NULL, there are no jobs complete.
223 * If get_comp_job returns a job, verify that it is safe to return to
225 * If it is not ready, resubmit the job to finish processing.
226 * If sha1_ctx_mgr_resubmit returned a job, it is ready to be returned.
227 * Otherwise, all jobs currently being managed by the hash_ctx_mgr
228 * still need processing.
230 struct sha1_hash_ctx *ctx;
232 ctx = (struct sha1_hash_ctx *) sha1_job_mgr_get_comp_job(&mgr->mgr);
233 return sha1_ctx_mgr_resubmit(mgr, ctx);
236 static void sha1_ctx_mgr_init(struct sha1_ctx_mgr *mgr)
238 sha1_job_mgr_init(&mgr->mgr);
241 static struct sha1_hash_ctx *sha1_ctx_mgr_submit(struct sha1_ctx_mgr *mgr,
242 struct sha1_hash_ctx *ctx,
247 if (flags & (~HASH_ENTIRE)) {
249 * User should not pass anything other than FIRST, UPDATE, or
252 ctx->error = HASH_CTX_ERROR_INVALID_FLAGS;
256 if (ctx->status & HASH_CTX_STS_PROCESSING) {
257 /* Cannot submit to a currently processing job. */
258 ctx->error = HASH_CTX_ERROR_ALREADY_PROCESSING;
262 if ((ctx->status & HASH_CTX_STS_COMPLETE) && !(flags & HASH_FIRST)) {
263 /* Cannot update a finished job. */
264 ctx->error = HASH_CTX_ERROR_ALREADY_COMPLETED;
269 if (flags & HASH_FIRST) {
271 sha1_init_digest(ctx->job.result_digest);
273 /* Reset byte counter */
274 ctx->total_length = 0;
276 /* Clear extra blocks */
277 ctx->partial_block_buffer_length = 0;
281 * If we made it here, there were no errors during this call to
284 ctx->error = HASH_CTX_ERROR_NONE;
286 /* Store buffer ptr info from user */
287 ctx->incoming_buffer = buffer;
288 ctx->incoming_buffer_length = len;
291 * Store the user's request flags and mark this ctx as currently
294 ctx->status = (flags & HASH_LAST) ?
295 (HASH_CTX_STS_PROCESSING | HASH_CTX_STS_LAST) :
296 HASH_CTX_STS_PROCESSING;
298 /* Advance byte counter */
299 ctx->total_length += len;
302 * If there is anything currently buffered in the extra blocks,
303 * append to it until it contains a whole block.
304 * Or if the user's buffer contains less than a whole block,
305 * append as much as possible to the extra block.
307 if (ctx->partial_block_buffer_length || len < SHA1_BLOCK_SIZE) {
309 * Compute how many bytes to copy from user buffer into
312 uint32_t copy_len = SHA1_BLOCK_SIZE -
313 ctx->partial_block_buffer_length;
318 /* Copy and update relevant pointers and counters */
319 memcpy(&ctx->partial_block_buffer[ctx->partial_block_buffer_length],
322 ctx->partial_block_buffer_length += copy_len;
323 ctx->incoming_buffer = (const void *)
324 ((const char *)buffer + copy_len);
325 ctx->incoming_buffer_length = len - copy_len;
329 * The extra block should never contain more than 1 block
332 assert(ctx->partial_block_buffer_length <= SHA1_BLOCK_SIZE);
335 * If the extra block buffer contains exactly 1 block, it can
338 if (ctx->partial_block_buffer_length >= SHA1_BLOCK_SIZE) {
339 ctx->partial_block_buffer_length = 0;
341 ctx->job.buffer = ctx->partial_block_buffer;
343 ctx = (struct sha1_hash_ctx *)
344 sha1_job_mgr_submit(&mgr->mgr, &ctx->job);
348 return sha1_ctx_mgr_resubmit(mgr, ctx);
351 static struct sha1_hash_ctx *sha1_ctx_mgr_flush(struct sha1_ctx_mgr *mgr)
353 struct sha1_hash_ctx *ctx;
356 ctx = (struct sha1_hash_ctx *) sha1_job_mgr_flush(&mgr->mgr);
358 /* If flush returned 0, there are no more jobs in flight. */
363 * If flush returned a job, resubmit the job to finish
366 ctx = sha1_ctx_mgr_resubmit(mgr, ctx);
369 * If sha1_ctx_mgr_resubmit returned a job, it is ready to be
370 * returned. Otherwise, all jobs currently being managed by the
371 * sha1_ctx_mgr still need processing. Loop.
378 static int sha1_mb_init(struct ahash_request *areq)
380 struct sha1_hash_ctx *sctx = ahash_request_ctx(areq);
383 sctx->job.result_digest[0] = SHA1_H0;
384 sctx->job.result_digest[1] = SHA1_H1;
385 sctx->job.result_digest[2] = SHA1_H2;
386 sctx->job.result_digest[3] = SHA1_H3;
387 sctx->job.result_digest[4] = SHA1_H4;
388 sctx->total_length = 0;
389 sctx->partial_block_buffer_length = 0;
390 sctx->status = HASH_CTX_STS_IDLE;
395 static int sha1_mb_set_results(struct mcryptd_hash_request_ctx *rctx)
398 struct sha1_hash_ctx *sctx = ahash_request_ctx(&rctx->areq);
399 __be32 *dst = (__be32 *) rctx->out;
401 for (i = 0; i < 5; ++i)
402 dst[i] = cpu_to_be32(sctx->job.result_digest[i]);
407 static int sha_finish_walk(struct mcryptd_hash_request_ctx **ret_rctx,
408 struct mcryptd_alg_cstate *cstate, bool flush)
410 int flag = HASH_UPDATE;
412 struct mcryptd_hash_request_ctx *rctx = *ret_rctx;
413 struct sha1_hash_ctx *sha_ctx;
416 while (!(rctx->flag & HASH_DONE)) {
417 nbytes = crypto_ahash_walk_done(&rctx->walk, 0);
422 /* check if the walk is done */
423 if (crypto_ahash_walk_last(&rctx->walk)) {
424 rctx->flag |= HASH_DONE;
425 if (rctx->flag & HASH_FINAL)
429 sha_ctx = (struct sha1_hash_ctx *)
430 ahash_request_ctx(&rctx->areq);
432 sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx,
433 rctx->walk.data, nbytes, flag);
436 sha_ctx = sha1_ctx_mgr_flush(cstate->mgr);
440 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
447 /* copy the results */
448 if (rctx->flag & HASH_FINAL)
449 sha1_mb_set_results(rctx);
456 static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
457 struct mcryptd_alg_cstate *cstate,
460 struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
461 struct sha1_hash_ctx *sha_ctx;
462 struct mcryptd_hash_request_ctx *req_ctx;
465 /* remove from work list */
466 spin_lock(&cstate->work_lock);
467 list_del(&rctx->waiter);
468 spin_unlock(&cstate->work_lock);
471 rctx->complete(&req->base, err);
474 rctx->complete(&req->base, err);
478 /* check to see if there are other jobs that are done */
479 sha_ctx = sha1_ctx_mgr_get_comp_ctx(cstate->mgr);
481 req_ctx = cast_hash_to_mcryptd_ctx(sha_ctx);
482 ret = sha_finish_walk(&req_ctx, cstate, false);
484 spin_lock(&cstate->work_lock);
485 list_del(&req_ctx->waiter);
486 spin_unlock(&cstate->work_lock);
488 req = cast_mcryptd_ctx_to_req(req_ctx);
490 req_ctx->complete(&req->base, ret);
493 req_ctx->complete(&req->base, ret);
497 sha_ctx = sha1_ctx_mgr_get_comp_ctx(cstate->mgr);
503 static void sha1_mb_add_list(struct mcryptd_hash_request_ctx *rctx,
504 struct mcryptd_alg_cstate *cstate)
506 unsigned long next_flush;
507 unsigned long delay = usecs_to_jiffies(FLUSH_INTERVAL);
510 rctx->tag.arrival = jiffies; /* tag the arrival time */
511 rctx->tag.seq_num = cstate->next_seq_num++;
512 next_flush = rctx->tag.arrival + delay;
513 rctx->tag.expire = next_flush;
515 spin_lock(&cstate->work_lock);
516 list_add_tail(&rctx->waiter, &cstate->work_list);
517 spin_unlock(&cstate->work_lock);
519 mcryptd_arm_flusher(cstate, delay);
522 static int sha1_mb_update(struct ahash_request *areq)
524 struct mcryptd_hash_request_ctx *rctx =
525 container_of(areq, struct mcryptd_hash_request_ctx, areq);
526 struct mcryptd_alg_cstate *cstate =
527 this_cpu_ptr(sha1_mb_alg_state.alg_cstate);
529 struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
530 struct sha1_hash_ctx *sha_ctx;
535 if (rctx->tag.cpu != smp_processor_id()) {
536 pr_err("mcryptd error: cpu clash\n");
540 /* need to init context */
541 req_ctx_init(rctx, areq);
543 nbytes = crypto_ahash_walk_first(req, &rctx->walk);
550 if (crypto_ahash_walk_last(&rctx->walk))
551 rctx->flag |= HASH_DONE;
554 sha_ctx = (struct sha1_hash_ctx *) ahash_request_ctx(areq);
555 sha1_mb_add_list(rctx, cstate);
557 sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data,
558 nbytes, HASH_UPDATE);
561 /* check if anything is returned */
565 if (sha_ctx->error) {
566 ret = sha_ctx->error;
567 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
571 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
572 ret = sha_finish_walk(&rctx, cstate, false);
577 sha_complete_job(rctx, cstate, ret);
581 static int sha1_mb_finup(struct ahash_request *areq)
583 struct mcryptd_hash_request_ctx *rctx =
584 container_of(areq, struct mcryptd_hash_request_ctx, areq);
585 struct mcryptd_alg_cstate *cstate =
586 this_cpu_ptr(sha1_mb_alg_state.alg_cstate);
588 struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
589 struct sha1_hash_ctx *sha_ctx;
590 int ret = 0, flag = HASH_UPDATE, nbytes;
593 if (rctx->tag.cpu != smp_processor_id()) {
594 pr_err("mcryptd error: cpu clash\n");
598 /* need to init context */
599 req_ctx_init(rctx, areq);
601 nbytes = crypto_ahash_walk_first(req, &rctx->walk);
608 if (crypto_ahash_walk_last(&rctx->walk)) {
609 rctx->flag |= HASH_DONE;
614 rctx->flag |= HASH_FINAL;
615 sha_ctx = (struct sha1_hash_ctx *) ahash_request_ctx(areq);
616 sha1_mb_add_list(rctx, cstate);
619 sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data,
623 /* check if anything is returned */
627 if (sha_ctx->error) {
628 ret = sha_ctx->error;
632 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
633 ret = sha_finish_walk(&rctx, cstate, false);
637 sha_complete_job(rctx, cstate, ret);
641 static int sha1_mb_final(struct ahash_request *areq)
643 struct mcryptd_hash_request_ctx *rctx =
644 container_of(areq, struct mcryptd_hash_request_ctx, areq);
645 struct mcryptd_alg_cstate *cstate =
646 this_cpu_ptr(sha1_mb_alg_state.alg_cstate);
648 struct sha1_hash_ctx *sha_ctx;
653 if (rctx->tag.cpu != smp_processor_id()) {
654 pr_err("mcryptd error: cpu clash\n");
658 /* need to init context */
659 req_ctx_init(rctx, areq);
661 rctx->flag |= HASH_DONE | HASH_FINAL;
663 sha_ctx = (struct sha1_hash_ctx *) ahash_request_ctx(areq);
664 /* flag HASH_FINAL and 0 data size */
665 sha1_mb_add_list(rctx, cstate);
667 sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, &data, 0,
671 /* check if anything is returned */
675 if (sha_ctx->error) {
676 ret = sha_ctx->error;
677 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
681 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
682 ret = sha_finish_walk(&rctx, cstate, false);
686 sha_complete_job(rctx, cstate, ret);
690 static int sha1_mb_export(struct ahash_request *areq, void *out)
692 struct sha1_hash_ctx *sctx = ahash_request_ctx(areq);
694 memcpy(out, sctx, sizeof(*sctx));
699 static int sha1_mb_import(struct ahash_request *areq, const void *in)
701 struct sha1_hash_ctx *sctx = ahash_request_ctx(areq);
703 memcpy(sctx, in, sizeof(*sctx));
708 static int sha1_mb_async_init_tfm(struct crypto_tfm *tfm)
710 struct mcryptd_ahash *mcryptd_tfm;
711 struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm);
712 struct mcryptd_hash_ctx *mctx;
714 mcryptd_tfm = mcryptd_alloc_ahash("__intel_sha1-mb",
716 CRYPTO_ALG_INTERNAL);
717 if (IS_ERR(mcryptd_tfm))
718 return PTR_ERR(mcryptd_tfm);
719 mctx = crypto_ahash_ctx(&mcryptd_tfm->base);
720 mctx->alg_state = &sha1_mb_alg_state;
721 ctx->mcryptd_tfm = mcryptd_tfm;
722 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
723 sizeof(struct ahash_request) +
724 crypto_ahash_reqsize(&mcryptd_tfm->base));
729 static void sha1_mb_async_exit_tfm(struct crypto_tfm *tfm)
731 struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm);
733 mcryptd_free_ahash(ctx->mcryptd_tfm);
736 static int sha1_mb_areq_init_tfm(struct crypto_tfm *tfm)
738 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
739 sizeof(struct ahash_request) +
740 sizeof(struct sha1_hash_ctx));
745 static void sha1_mb_areq_exit_tfm(struct crypto_tfm *tfm)
747 struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm);
749 mcryptd_free_ahash(ctx->mcryptd_tfm);
752 static struct ahash_alg sha1_mb_areq_alg = {
753 .init = sha1_mb_init,
754 .update = sha1_mb_update,
755 .final = sha1_mb_final,
756 .finup = sha1_mb_finup,
757 .export = sha1_mb_export,
758 .import = sha1_mb_import,
760 .digestsize = SHA1_DIGEST_SIZE,
761 .statesize = sizeof(struct sha1_hash_ctx),
763 .cra_name = "__sha1-mb",
764 .cra_driver_name = "__intel_sha1-mb",
767 * use ASYNC flag as some buffers in multi-buffer
768 * algo may not have completed before hashing thread
771 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
774 .cra_blocksize = SHA1_BLOCK_SIZE,
775 .cra_module = THIS_MODULE,
776 .cra_list = LIST_HEAD_INIT
777 (sha1_mb_areq_alg.halg.base.cra_list),
778 .cra_init = sha1_mb_areq_init_tfm,
779 .cra_exit = sha1_mb_areq_exit_tfm,
780 .cra_ctxsize = sizeof(struct sha1_hash_ctx),
785 static int sha1_mb_async_init(struct ahash_request *req)
787 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
788 struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
789 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
790 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
792 memcpy(mcryptd_req, req, sizeof(*req));
793 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
794 return crypto_ahash_init(mcryptd_req);
797 static int sha1_mb_async_update(struct ahash_request *req)
799 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
801 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
802 struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
803 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
805 memcpy(mcryptd_req, req, sizeof(*req));
806 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
807 return crypto_ahash_update(mcryptd_req);
810 static int sha1_mb_async_finup(struct ahash_request *req)
812 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
814 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
815 struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
816 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
818 memcpy(mcryptd_req, req, sizeof(*req));
819 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
820 return crypto_ahash_finup(mcryptd_req);
823 static int sha1_mb_async_final(struct ahash_request *req)
825 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
827 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
828 struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
829 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
831 memcpy(mcryptd_req, req, sizeof(*req));
832 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
833 return crypto_ahash_final(mcryptd_req);
836 static int sha1_mb_async_digest(struct ahash_request *req)
838 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
839 struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
840 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
841 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
843 memcpy(mcryptd_req, req, sizeof(*req));
844 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
845 return crypto_ahash_digest(mcryptd_req);
848 static int sha1_mb_async_export(struct ahash_request *req, void *out)
850 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
851 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
852 struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
853 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
855 memcpy(mcryptd_req, req, sizeof(*req));
856 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
857 return crypto_ahash_export(mcryptd_req, out);
860 static int sha1_mb_async_import(struct ahash_request *req, const void *in)
862 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
863 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
864 struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
865 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
866 struct crypto_ahash *child = mcryptd_ahash_child(mcryptd_tfm);
867 struct mcryptd_hash_request_ctx *rctx;
868 struct ahash_request *areq;
870 memcpy(mcryptd_req, req, sizeof(*req));
871 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
872 rctx = ahash_request_ctx(mcryptd_req);
875 ahash_request_set_tfm(areq, child);
876 ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_SLEEP,
877 rctx->complete, req);
879 return crypto_ahash_import(mcryptd_req, in);
882 static struct ahash_alg sha1_mb_async_alg = {
883 .init = sha1_mb_async_init,
884 .update = sha1_mb_async_update,
885 .final = sha1_mb_async_final,
886 .finup = sha1_mb_async_finup,
887 .digest = sha1_mb_async_digest,
888 .export = sha1_mb_async_export,
889 .import = sha1_mb_async_import,
891 .digestsize = SHA1_DIGEST_SIZE,
892 .statesize = sizeof(struct sha1_hash_ctx),
895 .cra_driver_name = "sha1_mb",
897 .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
898 .cra_blocksize = SHA1_BLOCK_SIZE,
899 .cra_type = &crypto_ahash_type,
900 .cra_module = THIS_MODULE,
901 .cra_list = LIST_HEAD_INIT(sha1_mb_async_alg.halg.base.cra_list),
902 .cra_init = sha1_mb_async_init_tfm,
903 .cra_exit = sha1_mb_async_exit_tfm,
904 .cra_ctxsize = sizeof(struct sha1_mb_ctx),
910 static unsigned long sha1_mb_flusher(struct mcryptd_alg_cstate *cstate)
912 struct mcryptd_hash_request_ctx *rctx;
913 unsigned long cur_time;
914 unsigned long next_flush = 0;
915 struct sha1_hash_ctx *sha_ctx;
920 while (!list_empty(&cstate->work_list)) {
921 rctx = list_entry(cstate->work_list.next,
922 struct mcryptd_hash_request_ctx, waiter);
923 if (time_before(cur_time, rctx->tag.expire))
926 sha_ctx = (struct sha1_hash_ctx *)
927 sha1_ctx_mgr_flush(cstate->mgr);
930 pr_err("sha1_mb error: nothing got flushed for non-empty list\n");
933 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
934 sha_finish_walk(&rctx, cstate, true);
935 sha_complete_job(rctx, cstate, 0);
938 if (!list_empty(&cstate->work_list)) {
939 rctx = list_entry(cstate->work_list.next,
940 struct mcryptd_hash_request_ctx, waiter);
941 /* get the hash context and then flush time */
942 next_flush = rctx->tag.expire;
943 mcryptd_arm_flusher(cstate, get_delay(next_flush));
948 static int __init sha1_mb_mod_init(void)
953 struct mcryptd_alg_cstate *cpu_state;
955 /* check for dependent cpu features */
956 if (!boot_cpu_has(X86_FEATURE_AVX2) ||
957 !boot_cpu_has(X86_FEATURE_BMI2))
960 /* initialize multibuffer structures */
961 sha1_mb_alg_state.alg_cstate = alloc_percpu(struct mcryptd_alg_cstate);
963 sha1_job_mgr_init = sha1_mb_mgr_init_avx2;
964 sha1_job_mgr_submit = sha1_mb_mgr_submit_avx2;
965 sha1_job_mgr_flush = sha1_mb_mgr_flush_avx2;
966 sha1_job_mgr_get_comp_job = sha1_mb_mgr_get_comp_job_avx2;
968 if (!sha1_mb_alg_state.alg_cstate)
970 for_each_possible_cpu(cpu) {
971 cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu);
972 cpu_state->next_flush = 0;
973 cpu_state->next_seq_num = 0;
974 cpu_state->flusher_engaged = false;
975 INIT_DELAYED_WORK(&cpu_state->flush, mcryptd_flusher);
976 cpu_state->cpu = cpu;
977 cpu_state->alg_state = &sha1_mb_alg_state;
978 cpu_state->mgr = kzalloc(sizeof(struct sha1_ctx_mgr),
982 sha1_ctx_mgr_init(cpu_state->mgr);
983 INIT_LIST_HEAD(&cpu_state->work_list);
984 spin_lock_init(&cpu_state->work_lock);
986 sha1_mb_alg_state.flusher = &sha1_mb_flusher;
988 err = crypto_register_ahash(&sha1_mb_areq_alg);
991 err = crypto_register_ahash(&sha1_mb_async_alg);
998 crypto_unregister_ahash(&sha1_mb_areq_alg);
1000 for_each_possible_cpu(cpu) {
1001 cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu);
1002 kfree(cpu_state->mgr);
1004 free_percpu(sha1_mb_alg_state.alg_cstate);
1008 static void __exit sha1_mb_mod_fini(void)
1011 struct mcryptd_alg_cstate *cpu_state;
1013 crypto_unregister_ahash(&sha1_mb_async_alg);
1014 crypto_unregister_ahash(&sha1_mb_areq_alg);
1015 for_each_possible_cpu(cpu) {
1016 cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu);
1017 kfree(cpu_state->mgr);
1019 free_percpu(sha1_mb_alg_state.alg_cstate);
1022 module_init(sha1_mb_mod_init);
1023 module_exit(sha1_mb_mod_fini);
1025 MODULE_LICENSE("GPL");
1026 MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, multi buffer accelerated");
1028 MODULE_ALIAS_CRYPTO("sha1");