GNU Linux-libre 5.16.19-gnu
[releases.git] / drivers / crypto / caam / caamhash.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * caam - Freescale FSL CAAM support for ahash functions of crypto API
4  *
5  * Copyright 2011 Freescale Semiconductor, Inc.
6  * Copyright 2018-2019 NXP
7  *
8  * Based on caamalg.c crypto API driver.
9  *
10  * relationship of digest job descriptor or first job descriptor after init to
11  * shared descriptors:
12  *
13  * ---------------                     ---------------
14  * | JobDesc #1  |-------------------->|  ShareDesc  |
15  * | *(packet 1) |                     |  (hashKey)  |
16  * ---------------                     | (operation) |
17  *                                     ---------------
18  *
19  * relationship of subsequent job descriptors to shared descriptors:
20  *
21  * ---------------                     ---------------
22  * | JobDesc #2  |-------------------->|  ShareDesc  |
23  * | *(packet 2) |      |------------->|  (hashKey)  |
24  * ---------------      |    |-------->| (operation) |
25  *       .              |    |         | (load ctx2) |
26  *       .              |    |         ---------------
27  * ---------------      |    |
28  * | JobDesc #3  |------|    |
29  * | *(packet 3) |           |
30  * ---------------           |
31  *       .                   |
32  *       .                   |
33  * ---------------           |
34  * | JobDesc #4  |------------
35  * | *(packet 4) |
36  * ---------------
37  *
38  * The SharedDesc never changes for a connection unless rekeyed, but
39  * each packet will likely be in a different place. So all we need
40  * to know to process the packet is where the input is, where the
41  * output goes, and what context we want to process with. Context is
42  * in the SharedDesc, packet references in the JobDesc.
43  *
44  * So, a job desc looks like:
45  *
46  * ---------------------
47  * | Header            |
48  * | ShareDesc Pointer |
49  * | SEQ_OUT_PTR       |
50  * | (output buffer)   |
51  * | (output length)   |
52  * | SEQ_IN_PTR        |
53  * | (input buffer)    |
54  * | (input length)    |
55  * ---------------------
56  */
57
58 #include "compat.h"
59
60 #include "regs.h"
61 #include "intern.h"
62 #include "desc_constr.h"
63 #include "jr.h"
64 #include "error.h"
65 #include "sg_sw_sec4.h"
66 #include "key_gen.h"
67 #include "caamhash_desc.h"
68 #include <crypto/engine.h>
69
70 #define CAAM_CRA_PRIORITY               3000
71
72 /* max hash key is max split key size */
73 #define CAAM_MAX_HASH_KEY_SIZE          (SHA512_DIGEST_SIZE * 2)
74
75 #define CAAM_MAX_HASH_BLOCK_SIZE        SHA512_BLOCK_SIZE
76 #define CAAM_MAX_HASH_DIGEST_SIZE       SHA512_DIGEST_SIZE
77
78 #define DESC_HASH_MAX_USED_BYTES        (DESC_AHASH_FINAL_LEN + \
79                                          CAAM_MAX_HASH_KEY_SIZE)
80 #define DESC_HASH_MAX_USED_LEN          (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
81
82 /* caam context sizes for hashes: running digest + 8 */
83 #define HASH_MSG_LEN                    8
84 #define MAX_CTX_LEN                     (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
85
86 static struct list_head hash_list;
87
88 /* ahash per-session context */
89 struct caam_hash_ctx {
90         struct crypto_engine_ctx enginectx;
91         u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
92         u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
93         u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
94         u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
95         u8 key[CAAM_MAX_HASH_KEY_SIZE] ____cacheline_aligned;
96         dma_addr_t sh_desc_update_dma ____cacheline_aligned;
97         dma_addr_t sh_desc_update_first_dma;
98         dma_addr_t sh_desc_fin_dma;
99         dma_addr_t sh_desc_digest_dma;
100         enum dma_data_direction dir;
101         enum dma_data_direction key_dir;
102         struct device *jrdev;
103         int ctx_len;
104         struct alginfo adata;
105 };
106
107 /* ahash state */
108 struct caam_hash_state {
109         dma_addr_t buf_dma;
110         dma_addr_t ctx_dma;
111         int ctx_dma_len;
112         u8 buf[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
113         int buflen;
114         int next_buflen;
115         u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
116         int (*update)(struct ahash_request *req) ____cacheline_aligned;
117         int (*final)(struct ahash_request *req);
118         int (*finup)(struct ahash_request *req);
119         struct ahash_edesc *edesc;
120         void (*ahash_op_done)(struct device *jrdev, u32 *desc, u32 err,
121                               void *context);
122 };
123
124 struct caam_export_state {
125         u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
126         u8 caam_ctx[MAX_CTX_LEN];
127         int buflen;
128         int (*update)(struct ahash_request *req);
129         int (*final)(struct ahash_request *req);
130         int (*finup)(struct ahash_request *req);
131 };
132
133 static inline bool is_cmac_aes(u32 algtype)
134 {
135         return (algtype & (OP_ALG_ALGSEL_MASK | OP_ALG_AAI_MASK)) ==
136                (OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC);
137 }
138 /* Common job descriptor seq in/out ptr routines */
139
140 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
141 static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
142                                       struct caam_hash_state *state,
143                                       int ctx_len)
144 {
145         state->ctx_dma_len = ctx_len;
146         state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
147                                         ctx_len, DMA_FROM_DEVICE);
148         if (dma_mapping_error(jrdev, state->ctx_dma)) {
149                 dev_err(jrdev, "unable to map ctx\n");
150                 state->ctx_dma = 0;
151                 return -ENOMEM;
152         }
153
154         append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
155
156         return 0;
157 }
158
159 /* Map current buffer in state (if length > 0) and put it in link table */
160 static inline int buf_map_to_sec4_sg(struct device *jrdev,
161                                      struct sec4_sg_entry *sec4_sg,
162                                      struct caam_hash_state *state)
163 {
164         int buflen = state->buflen;
165
166         if (!buflen)
167                 return 0;
168
169         state->buf_dma = dma_map_single(jrdev, state->buf, buflen,
170                                         DMA_TO_DEVICE);
171         if (dma_mapping_error(jrdev, state->buf_dma)) {
172                 dev_err(jrdev, "unable to map buf\n");
173                 state->buf_dma = 0;
174                 return -ENOMEM;
175         }
176
177         dma_to_sec4_sg_one(sec4_sg, state->buf_dma, buflen, 0);
178
179         return 0;
180 }
181
182 /* Map state->caam_ctx, and add it to link table */
183 static inline int ctx_map_to_sec4_sg(struct device *jrdev,
184                                      struct caam_hash_state *state, int ctx_len,
185                                      struct sec4_sg_entry *sec4_sg, u32 flag)
186 {
187         state->ctx_dma_len = ctx_len;
188         state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
189         if (dma_mapping_error(jrdev, state->ctx_dma)) {
190                 dev_err(jrdev, "unable to map ctx\n");
191                 state->ctx_dma = 0;
192                 return -ENOMEM;
193         }
194
195         dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
196
197         return 0;
198 }
199
200 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
201 {
202         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
203         int digestsize = crypto_ahash_digestsize(ahash);
204         struct device *jrdev = ctx->jrdev;
205         struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
206         u32 *desc;
207
208         ctx->adata.key_virt = ctx->key;
209
210         /* ahash_update shared descriptor */
211         desc = ctx->sh_desc_update;
212         cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
213                           ctx->ctx_len, true, ctrlpriv->era);
214         dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
215                                    desc_bytes(desc), ctx->dir);
216
217         print_hex_dump_debug("ahash update shdesc@"__stringify(__LINE__)": ",
218                              DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
219                              1);
220
221         /* ahash_update_first shared descriptor */
222         desc = ctx->sh_desc_update_first;
223         cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
224                           ctx->ctx_len, false, ctrlpriv->era);
225         dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
226                                    desc_bytes(desc), ctx->dir);
227         print_hex_dump_debug("ahash update first shdesc@"__stringify(__LINE__)
228                              ": ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
229                              desc_bytes(desc), 1);
230
231         /* ahash_final shared descriptor */
232         desc = ctx->sh_desc_fin;
233         cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
234                           ctx->ctx_len, true, ctrlpriv->era);
235         dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
236                                    desc_bytes(desc), ctx->dir);
237
238         print_hex_dump_debug("ahash final shdesc@"__stringify(__LINE__)": ",
239                              DUMP_PREFIX_ADDRESS, 16, 4, desc,
240                              desc_bytes(desc), 1);
241
242         /* ahash_digest shared descriptor */
243         desc = ctx->sh_desc_digest;
244         cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
245                           ctx->ctx_len, false, ctrlpriv->era);
246         dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
247                                    desc_bytes(desc), ctx->dir);
248
249         print_hex_dump_debug("ahash digest shdesc@"__stringify(__LINE__)": ",
250                              DUMP_PREFIX_ADDRESS, 16, 4, desc,
251                              desc_bytes(desc), 1);
252
253         return 0;
254 }
255
256 static int axcbc_set_sh_desc(struct crypto_ahash *ahash)
257 {
258         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
259         int digestsize = crypto_ahash_digestsize(ahash);
260         struct device *jrdev = ctx->jrdev;
261         u32 *desc;
262
263         /* shared descriptor for ahash_update */
264         desc = ctx->sh_desc_update;
265         cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE,
266                             ctx->ctx_len, ctx->ctx_len);
267         dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
268                                    desc_bytes(desc), ctx->dir);
269         print_hex_dump_debug("axcbc update shdesc@" __stringify(__LINE__)" : ",
270                              DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
271                              1);
272
273         /* shared descriptor for ahash_{final,finup} */
274         desc = ctx->sh_desc_fin;
275         cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE,
276                             digestsize, ctx->ctx_len);
277         dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
278                                    desc_bytes(desc), ctx->dir);
279         print_hex_dump_debug("axcbc finup shdesc@" __stringify(__LINE__)" : ",
280                              DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
281                              1);
282
283         /* key is immediate data for INIT and INITFINAL states */
284         ctx->adata.key_virt = ctx->key;
285
286         /* shared descriptor for first invocation of ahash_update */
287         desc = ctx->sh_desc_update_first;
288         cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
289                             ctx->ctx_len);
290         dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
291                                    desc_bytes(desc), ctx->dir);
292         print_hex_dump_debug("axcbc update first shdesc@" __stringify(__LINE__)
293                              " : ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
294                              desc_bytes(desc), 1);
295
296         /* shared descriptor for ahash_digest */
297         desc = ctx->sh_desc_digest;
298         cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL,
299                             digestsize, ctx->ctx_len);
300         dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
301                                    desc_bytes(desc), ctx->dir);
302         print_hex_dump_debug("axcbc digest shdesc@" __stringify(__LINE__)" : ",
303                              DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
304                              1);
305         return 0;
306 }
307
308 static int acmac_set_sh_desc(struct crypto_ahash *ahash)
309 {
310         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
311         int digestsize = crypto_ahash_digestsize(ahash);
312         struct device *jrdev = ctx->jrdev;
313         u32 *desc;
314
315         /* shared descriptor for ahash_update */
316         desc = ctx->sh_desc_update;
317         cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE,
318                             ctx->ctx_len, ctx->ctx_len);
319         dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
320                                    desc_bytes(desc), ctx->dir);
321         print_hex_dump_debug("acmac update shdesc@" __stringify(__LINE__)" : ",
322                              DUMP_PREFIX_ADDRESS, 16, 4, desc,
323                              desc_bytes(desc), 1);
324
325         /* shared descriptor for ahash_{final,finup} */
326         desc = ctx->sh_desc_fin;
327         cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE,
328                             digestsize, ctx->ctx_len);
329         dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
330                                    desc_bytes(desc), ctx->dir);
331         print_hex_dump_debug("acmac finup shdesc@" __stringify(__LINE__)" : ",
332                              DUMP_PREFIX_ADDRESS, 16, 4, desc,
333                              desc_bytes(desc), 1);
334
335         /* shared descriptor for first invocation of ahash_update */
336         desc = ctx->sh_desc_update_first;
337         cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
338                             ctx->ctx_len);
339         dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
340                                    desc_bytes(desc), ctx->dir);
341         print_hex_dump_debug("acmac update first shdesc@" __stringify(__LINE__)
342                              " : ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
343                              desc_bytes(desc), 1);
344
345         /* shared descriptor for ahash_digest */
346         desc = ctx->sh_desc_digest;
347         cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL,
348                             digestsize, ctx->ctx_len);
349         dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
350                                    desc_bytes(desc), ctx->dir);
351         print_hex_dump_debug("acmac digest shdesc@" __stringify(__LINE__)" : ",
352                              DUMP_PREFIX_ADDRESS, 16, 4, desc,
353                              desc_bytes(desc), 1);
354
355         return 0;
356 }
357
358 /* Digest hash size if it is too large */
359 static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
360                            u32 digestsize)
361 {
362         struct device *jrdev = ctx->jrdev;
363         u32 *desc;
364         struct split_key_result result;
365         dma_addr_t key_dma;
366         int ret;
367
368         desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
369         if (!desc) {
370                 dev_err(jrdev, "unable to allocate key input memory\n");
371                 return -ENOMEM;
372         }
373
374         init_job_desc(desc, 0);
375
376         key_dma = dma_map_single(jrdev, key, *keylen, DMA_BIDIRECTIONAL);
377         if (dma_mapping_error(jrdev, key_dma)) {
378                 dev_err(jrdev, "unable to map key memory\n");
379                 kfree(desc);
380                 return -ENOMEM;
381         }
382
383         /* Job descriptor to perform unkeyed hash on key_in */
384         append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
385                          OP_ALG_AS_INITFINAL);
386         append_seq_in_ptr(desc, key_dma, *keylen, 0);
387         append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
388                              FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
389         append_seq_out_ptr(desc, key_dma, digestsize, 0);
390         append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
391                          LDST_SRCDST_BYTE_CONTEXT);
392
393         print_hex_dump_debug("key_in@"__stringify(__LINE__)": ",
394                              DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
395         print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
396                              DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
397                              1);
398
399         result.err = 0;
400         init_completion(&result.completion);
401
402         ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
403         if (ret == -EINPROGRESS) {
404                 /* in progress */
405                 wait_for_completion(&result.completion);
406                 ret = result.err;
407
408                 print_hex_dump_debug("digested key@"__stringify(__LINE__)": ",
409                                      DUMP_PREFIX_ADDRESS, 16, 4, key,
410                                      digestsize, 1);
411         }
412         dma_unmap_single(jrdev, key_dma, *keylen, DMA_BIDIRECTIONAL);
413
414         *keylen = digestsize;
415
416         kfree(desc);
417
418         return ret;
419 }
420
421 static int ahash_setkey(struct crypto_ahash *ahash,
422                         const u8 *key, unsigned int keylen)
423 {
424         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
425         struct device *jrdev = ctx->jrdev;
426         int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
427         int digestsize = crypto_ahash_digestsize(ahash);
428         struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
429         int ret;
430         u8 *hashed_key = NULL;
431
432         dev_dbg(jrdev, "keylen %d\n", keylen);
433
434         if (keylen > blocksize) {
435                 hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
436                 if (!hashed_key)
437                         return -ENOMEM;
438                 ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize);
439                 if (ret)
440                         goto bad_free_key;
441                 key = hashed_key;
442         }
443
444         /*
445          * If DKP is supported, use it in the shared descriptor to generate
446          * the split key.
447          */
448         if (ctrlpriv->era >= 6) {
449                 ctx->adata.key_inline = true;
450                 ctx->adata.keylen = keylen;
451                 ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
452                                                       OP_ALG_ALGSEL_MASK);
453
454                 if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
455                         goto bad_free_key;
456
457                 memcpy(ctx->key, key, keylen);
458
459                 /*
460                  * In case |user key| > |derived key|, using DKP<imm,imm>
461                  * would result in invalid opcodes (last bytes of user key) in
462                  * the resulting descriptor. Use DKP<ptr,imm> instead => both
463                  * virtual and dma key addresses are needed.
464                  */
465                 if (keylen > ctx->adata.keylen_pad)
466                         dma_sync_single_for_device(ctx->jrdev,
467                                                    ctx->adata.key_dma,
468                                                    ctx->adata.keylen_pad,
469                                                    DMA_TO_DEVICE);
470         } else {
471                 ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key,
472                                     keylen, CAAM_MAX_HASH_KEY_SIZE);
473                 if (ret)
474                         goto bad_free_key;
475         }
476
477         kfree(hashed_key);
478         return ahash_set_sh_desc(ahash);
479  bad_free_key:
480         kfree(hashed_key);
481         return -EINVAL;
482 }
483
484 static int axcbc_setkey(struct crypto_ahash *ahash, const u8 *key,
485                         unsigned int keylen)
486 {
487         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
488         struct device *jrdev = ctx->jrdev;
489
490         if (keylen != AES_KEYSIZE_128)
491                 return -EINVAL;
492
493         memcpy(ctx->key, key, keylen);
494         dma_sync_single_for_device(jrdev, ctx->adata.key_dma, keylen,
495                                    DMA_TO_DEVICE);
496         ctx->adata.keylen = keylen;
497
498         print_hex_dump_debug("axcbc ctx.key@" __stringify(__LINE__)" : ",
499                              DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, keylen, 1);
500
501         return axcbc_set_sh_desc(ahash);
502 }
503
504 static int acmac_setkey(struct crypto_ahash *ahash, const u8 *key,
505                         unsigned int keylen)
506 {
507         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
508         int err;
509
510         err = aes_check_keylen(keylen);
511         if (err)
512                 return err;
513
514         /* key is immediate data for all cmac shared descriptors */
515         ctx->adata.key_virt = key;
516         ctx->adata.keylen = keylen;
517
518         print_hex_dump_debug("acmac ctx.key@" __stringify(__LINE__)" : ",
519                              DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
520
521         return acmac_set_sh_desc(ahash);
522 }
523
524 /*
525  * ahash_edesc - s/w-extended ahash descriptor
526  * @sec4_sg_dma: physical mapped address of h/w link table
527  * @src_nents: number of segments in input scatterlist
528  * @sec4_sg_bytes: length of dma mapped sec4_sg space
529  * @bklog: stored to determine if the request needs backlog
530  * @hw_desc: the h/w job descriptor followed by any referenced link tables
531  * @sec4_sg: h/w link table
532  */
533 struct ahash_edesc {
534         dma_addr_t sec4_sg_dma;
535         int src_nents;
536         int sec4_sg_bytes;
537         bool bklog;
538         u32 hw_desc[DESC_JOB_IO_LEN_MAX / sizeof(u32)] ____cacheline_aligned;
539         struct sec4_sg_entry sec4_sg[];
540 };
541
542 static inline void ahash_unmap(struct device *dev,
543                         struct ahash_edesc *edesc,
544                         struct ahash_request *req, int dst_len)
545 {
546         struct caam_hash_state *state = ahash_request_ctx(req);
547
548         if (edesc->src_nents)
549                 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
550
551         if (edesc->sec4_sg_bytes)
552                 dma_unmap_single(dev, edesc->sec4_sg_dma,
553                                  edesc->sec4_sg_bytes, DMA_TO_DEVICE);
554
555         if (state->buf_dma) {
556                 dma_unmap_single(dev, state->buf_dma, state->buflen,
557                                  DMA_TO_DEVICE);
558                 state->buf_dma = 0;
559         }
560 }
561
562 static inline void ahash_unmap_ctx(struct device *dev,
563                         struct ahash_edesc *edesc,
564                         struct ahash_request *req, int dst_len, u32 flag)
565 {
566         struct caam_hash_state *state = ahash_request_ctx(req);
567
568         if (state->ctx_dma) {
569                 dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag);
570                 state->ctx_dma = 0;
571         }
572         ahash_unmap(dev, edesc, req, dst_len);
573 }
574
575 static inline void ahash_done_cpy(struct device *jrdev, u32 *desc, u32 err,
576                                   void *context, enum dma_data_direction dir)
577 {
578         struct ahash_request *req = context;
579         struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev);
580         struct ahash_edesc *edesc;
581         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
582         int digestsize = crypto_ahash_digestsize(ahash);
583         struct caam_hash_state *state = ahash_request_ctx(req);
584         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
585         int ecode = 0;
586         bool has_bklog;
587
588         dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
589
590         edesc = state->edesc;
591         has_bklog = edesc->bklog;
592
593         if (err)
594                 ecode = caam_jr_strstatus(jrdev, err);
595
596         ahash_unmap_ctx(jrdev, edesc, req, digestsize, dir);
597         memcpy(req->result, state->caam_ctx, digestsize);
598         kfree(edesc);
599
600         print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
601                              DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
602                              ctx->ctx_len, 1);
603
604         /*
605          * If no backlog flag, the completion of the request is done
606          * by CAAM, not crypto engine.
607          */
608         if (!has_bklog)
609                 req->base.complete(&req->base, ecode);
610         else
611                 crypto_finalize_hash_request(jrp->engine, req, ecode);
612 }
613
614 static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
615                        void *context)
616 {
617         ahash_done_cpy(jrdev, desc, err, context, DMA_FROM_DEVICE);
618 }
619
620 static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
621                                void *context)
622 {
623         ahash_done_cpy(jrdev, desc, err, context, DMA_BIDIRECTIONAL);
624 }
625
626 static inline void ahash_done_switch(struct device *jrdev, u32 *desc, u32 err,
627                                      void *context, enum dma_data_direction dir)
628 {
629         struct ahash_request *req = context;
630         struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev);
631         struct ahash_edesc *edesc;
632         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
633         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
634         struct caam_hash_state *state = ahash_request_ctx(req);
635         int digestsize = crypto_ahash_digestsize(ahash);
636         int ecode = 0;
637         bool has_bklog;
638
639         dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
640
641         edesc = state->edesc;
642         has_bklog = edesc->bklog;
643         if (err)
644                 ecode = caam_jr_strstatus(jrdev, err);
645
646         ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, dir);
647         kfree(edesc);
648
649         scatterwalk_map_and_copy(state->buf, req->src,
650                                  req->nbytes - state->next_buflen,
651                                  state->next_buflen, 0);
652         state->buflen = state->next_buflen;
653
654         print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
655                              DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
656                              state->buflen, 1);
657
658         print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
659                              DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
660                              ctx->ctx_len, 1);
661         if (req->result)
662                 print_hex_dump_debug("result@"__stringify(__LINE__)": ",
663                                      DUMP_PREFIX_ADDRESS, 16, 4, req->result,
664                                      digestsize, 1);
665
666         /*
667          * If no backlog flag, the completion of the request is done
668          * by CAAM, not crypto engine.
669          */
670         if (!has_bklog)
671                 req->base.complete(&req->base, ecode);
672         else
673                 crypto_finalize_hash_request(jrp->engine, req, ecode);
674
675 }
676
677 static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
678                           void *context)
679 {
680         ahash_done_switch(jrdev, desc, err, context, DMA_BIDIRECTIONAL);
681 }
682
683 static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
684                                void *context)
685 {
686         ahash_done_switch(jrdev, desc, err, context, DMA_FROM_DEVICE);
687 }
688
689 /*
690  * Allocate an enhanced descriptor, which contains the hardware descriptor
691  * and space for hardware scatter table containing sg_num entries.
692  */
693 static struct ahash_edesc *ahash_edesc_alloc(struct ahash_request *req,
694                                              int sg_num, u32 *sh_desc,
695                                              dma_addr_t sh_desc_dma)
696 {
697         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
698         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
699         struct caam_hash_state *state = ahash_request_ctx(req);
700         gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
701                        GFP_KERNEL : GFP_ATOMIC;
702         struct ahash_edesc *edesc;
703         unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry);
704
705         edesc = kzalloc(sizeof(*edesc) + sg_size, GFP_DMA | flags);
706         if (!edesc) {
707                 dev_err(ctx->jrdev, "could not allocate extended descriptor\n");
708                 return NULL;
709         }
710
711         state->edesc = edesc;
712
713         init_job_desc_shared(edesc->hw_desc, sh_desc_dma, desc_len(sh_desc),
714                              HDR_SHARE_DEFER | HDR_REVERSE);
715
716         return edesc;
717 }
718
719 static int ahash_edesc_add_src(struct caam_hash_ctx *ctx,
720                                struct ahash_edesc *edesc,
721                                struct ahash_request *req, int nents,
722                                unsigned int first_sg,
723                                unsigned int first_bytes, size_t to_hash)
724 {
725         dma_addr_t src_dma;
726         u32 options;
727
728         if (nents > 1 || first_sg) {
729                 struct sec4_sg_entry *sg = edesc->sec4_sg;
730                 unsigned int sgsize = sizeof(*sg) *
731                                       pad_sg_nents(first_sg + nents);
732
733                 sg_to_sec4_sg_last(req->src, to_hash, sg + first_sg, 0);
734
735                 src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE);
736                 if (dma_mapping_error(ctx->jrdev, src_dma)) {
737                         dev_err(ctx->jrdev, "unable to map S/G table\n");
738                         return -ENOMEM;
739                 }
740
741                 edesc->sec4_sg_bytes = sgsize;
742                 edesc->sec4_sg_dma = src_dma;
743                 options = LDST_SGF;
744         } else {
745                 src_dma = sg_dma_address(req->src);
746                 options = 0;
747         }
748
749         append_seq_in_ptr(edesc->hw_desc, src_dma, first_bytes + to_hash,
750                           options);
751
752         return 0;
753 }
754
755 static int ahash_do_one_req(struct crypto_engine *engine, void *areq)
756 {
757         struct ahash_request *req = ahash_request_cast(areq);
758         struct caam_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
759         struct caam_hash_state *state = ahash_request_ctx(req);
760         struct device *jrdev = ctx->jrdev;
761         u32 *desc = state->edesc->hw_desc;
762         int ret;
763
764         state->edesc->bklog = true;
765
766         ret = caam_jr_enqueue(jrdev, desc, state->ahash_op_done, req);
767
768         if (ret == -ENOSPC && engine->retry_support)
769                 return ret;
770
771         if (ret != -EINPROGRESS) {
772                 ahash_unmap(jrdev, state->edesc, req, 0);
773                 kfree(state->edesc);
774         } else {
775                 ret = 0;
776         }
777
778         return ret;
779 }
780
781 static int ahash_enqueue_req(struct device *jrdev,
782                              void (*cbk)(struct device *jrdev, u32 *desc,
783                                          u32 err, void *context),
784                              struct ahash_request *req,
785                              int dst_len, enum dma_data_direction dir)
786 {
787         struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev);
788         struct caam_hash_state *state = ahash_request_ctx(req);
789         struct ahash_edesc *edesc = state->edesc;
790         u32 *desc = edesc->hw_desc;
791         int ret;
792
793         state->ahash_op_done = cbk;
794
795         /*
796          * Only the backlog request are sent to crypto-engine since the others
797          * can be handled by CAAM, if free, especially since JR has up to 1024
798          * entries (more than the 10 entries from crypto-engine).
799          */
800         if (req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
801                 ret = crypto_transfer_hash_request_to_engine(jrpriv->engine,
802                                                              req);
803         else
804                 ret = caam_jr_enqueue(jrdev, desc, cbk, req);
805
806         if ((ret != -EINPROGRESS) && (ret != -EBUSY)) {
807                 ahash_unmap_ctx(jrdev, edesc, req, dst_len, dir);
808                 kfree(edesc);
809         }
810
811         return ret;
812 }
813
814 /* submit update job descriptor */
815 static int ahash_update_ctx(struct ahash_request *req)
816 {
817         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
818         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
819         struct caam_hash_state *state = ahash_request_ctx(req);
820         struct device *jrdev = ctx->jrdev;
821         u8 *buf = state->buf;
822         int *buflen = &state->buflen;
823         int *next_buflen = &state->next_buflen;
824         int blocksize = crypto_ahash_blocksize(ahash);
825         int in_len = *buflen + req->nbytes, to_hash;
826         u32 *desc;
827         int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index;
828         struct ahash_edesc *edesc;
829         int ret = 0;
830
831         *next_buflen = in_len & (blocksize - 1);
832         to_hash = in_len - *next_buflen;
833
834         /*
835          * For XCBC and CMAC, if to_hash is multiple of block size,
836          * keep last block in internal buffer
837          */
838         if ((is_xcbc_aes(ctx->adata.algtype) ||
839              is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
840              (*next_buflen == 0)) {
841                 *next_buflen = blocksize;
842                 to_hash -= blocksize;
843         }
844
845         if (to_hash) {
846                 int pad_nents;
847                 int src_len = req->nbytes - *next_buflen;
848
849                 src_nents = sg_nents_for_len(req->src, src_len);
850                 if (src_nents < 0) {
851                         dev_err(jrdev, "Invalid number of src SG.\n");
852                         return src_nents;
853                 }
854
855                 if (src_nents) {
856                         mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
857                                                   DMA_TO_DEVICE);
858                         if (!mapped_nents) {
859                                 dev_err(jrdev, "unable to DMA map source\n");
860                                 return -ENOMEM;
861                         }
862                 } else {
863                         mapped_nents = 0;
864                 }
865
866                 sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
867                 pad_nents = pad_sg_nents(sec4_sg_src_index + mapped_nents);
868                 sec4_sg_bytes = pad_nents * sizeof(struct sec4_sg_entry);
869
870                 /*
871                  * allocate space for base edesc and hw desc commands,
872                  * link tables
873                  */
874                 edesc = ahash_edesc_alloc(req, pad_nents, ctx->sh_desc_update,
875                                           ctx->sh_desc_update_dma);
876                 if (!edesc) {
877                         dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
878                         return -ENOMEM;
879                 }
880
881                 edesc->src_nents = src_nents;
882                 edesc->sec4_sg_bytes = sec4_sg_bytes;
883
884                 ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
885                                          edesc->sec4_sg, DMA_BIDIRECTIONAL);
886                 if (ret)
887                         goto unmap_ctx;
888
889                 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
890                 if (ret)
891                         goto unmap_ctx;
892
893                 if (mapped_nents)
894                         sg_to_sec4_sg_last(req->src, src_len,
895                                            edesc->sec4_sg + sec4_sg_src_index,
896                                            0);
897                 else
898                         sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index -
899                                             1);
900
901                 desc = edesc->hw_desc;
902
903                 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
904                                                      sec4_sg_bytes,
905                                                      DMA_TO_DEVICE);
906                 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
907                         dev_err(jrdev, "unable to map S/G table\n");
908                         ret = -ENOMEM;
909                         goto unmap_ctx;
910                 }
911
912                 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
913                                        to_hash, LDST_SGF);
914
915                 append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
916
917                 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
918                                      DUMP_PREFIX_ADDRESS, 16, 4, desc,
919                                      desc_bytes(desc), 1);
920
921                 ret = ahash_enqueue_req(jrdev, ahash_done_bi, req,
922                                         ctx->ctx_len, DMA_BIDIRECTIONAL);
923         } else if (*next_buflen) {
924                 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
925                                          req->nbytes, 0);
926                 *buflen = *next_buflen;
927
928                 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
929                                      DUMP_PREFIX_ADDRESS, 16, 4, buf,
930                                      *buflen, 1);
931         }
932
933         return ret;
934 unmap_ctx:
935         ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
936         kfree(edesc);
937         return ret;
938 }
939
940 static int ahash_final_ctx(struct ahash_request *req)
941 {
942         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
943         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
944         struct caam_hash_state *state = ahash_request_ctx(req);
945         struct device *jrdev = ctx->jrdev;
946         int buflen = state->buflen;
947         u32 *desc;
948         int sec4_sg_bytes;
949         int digestsize = crypto_ahash_digestsize(ahash);
950         struct ahash_edesc *edesc;
951         int ret;
952
953         sec4_sg_bytes = pad_sg_nents(1 + (buflen ? 1 : 0)) *
954                         sizeof(struct sec4_sg_entry);
955
956         /* allocate space for base edesc and hw desc commands, link tables */
957         edesc = ahash_edesc_alloc(req, 4, ctx->sh_desc_fin,
958                                   ctx->sh_desc_fin_dma);
959         if (!edesc)
960                 return -ENOMEM;
961
962         desc = edesc->hw_desc;
963
964         edesc->sec4_sg_bytes = sec4_sg_bytes;
965
966         ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
967                                  edesc->sec4_sg, DMA_BIDIRECTIONAL);
968         if (ret)
969                 goto unmap_ctx;
970
971         ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
972         if (ret)
973                 goto unmap_ctx;
974
975         sg_to_sec4_set_last(edesc->sec4_sg + (buflen ? 1 : 0));
976
977         edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
978                                             sec4_sg_bytes, DMA_TO_DEVICE);
979         if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
980                 dev_err(jrdev, "unable to map S/G table\n");
981                 ret = -ENOMEM;
982                 goto unmap_ctx;
983         }
984
985         append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
986                           LDST_SGF);
987         append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
988
989         print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
990                              DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
991                              1);
992
993         return ahash_enqueue_req(jrdev, ahash_done_ctx_src, req,
994                                  digestsize, DMA_BIDIRECTIONAL);
995  unmap_ctx:
996         ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
997         kfree(edesc);
998         return ret;
999 }
1000
1001 static int ahash_finup_ctx(struct ahash_request *req)
1002 {
1003         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1004         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1005         struct caam_hash_state *state = ahash_request_ctx(req);
1006         struct device *jrdev = ctx->jrdev;
1007         int buflen = state->buflen;
1008         u32 *desc;
1009         int sec4_sg_src_index;
1010         int src_nents, mapped_nents;
1011         int digestsize = crypto_ahash_digestsize(ahash);
1012         struct ahash_edesc *edesc;
1013         int ret;
1014
1015         src_nents = sg_nents_for_len(req->src, req->nbytes);
1016         if (src_nents < 0) {
1017                 dev_err(jrdev, "Invalid number of src SG.\n");
1018                 return src_nents;
1019         }
1020
1021         if (src_nents) {
1022                 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1023                                           DMA_TO_DEVICE);
1024                 if (!mapped_nents) {
1025                         dev_err(jrdev, "unable to DMA map source\n");
1026                         return -ENOMEM;
1027                 }
1028         } else {
1029                 mapped_nents = 0;
1030         }
1031
1032         sec4_sg_src_index = 1 + (buflen ? 1 : 0);
1033
1034         /* allocate space for base edesc and hw desc commands, link tables */
1035         edesc = ahash_edesc_alloc(req, sec4_sg_src_index + mapped_nents,
1036                                   ctx->sh_desc_fin, ctx->sh_desc_fin_dma);
1037         if (!edesc) {
1038                 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1039                 return -ENOMEM;
1040         }
1041
1042         desc = edesc->hw_desc;
1043
1044         edesc->src_nents = src_nents;
1045
1046         ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
1047                                  edesc->sec4_sg, DMA_BIDIRECTIONAL);
1048         if (ret)
1049                 goto unmap_ctx;
1050
1051         ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
1052         if (ret)
1053                 goto unmap_ctx;
1054
1055         ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents,
1056                                   sec4_sg_src_index, ctx->ctx_len + buflen,
1057                                   req->nbytes);
1058         if (ret)
1059                 goto unmap_ctx;
1060
1061         append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
1062
1063         print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1064                              DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1065                              1);
1066
1067         return ahash_enqueue_req(jrdev, ahash_done_ctx_src, req,
1068                                  digestsize, DMA_BIDIRECTIONAL);
1069  unmap_ctx:
1070         ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
1071         kfree(edesc);
1072         return ret;
1073 }
1074
1075 static int ahash_digest(struct ahash_request *req)
1076 {
1077         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1078         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1079         struct caam_hash_state *state = ahash_request_ctx(req);
1080         struct device *jrdev = ctx->jrdev;
1081         u32 *desc;
1082         int digestsize = crypto_ahash_digestsize(ahash);
1083         int src_nents, mapped_nents;
1084         struct ahash_edesc *edesc;
1085         int ret;
1086
1087         state->buf_dma = 0;
1088
1089         src_nents = sg_nents_for_len(req->src, req->nbytes);
1090         if (src_nents < 0) {
1091                 dev_err(jrdev, "Invalid number of src SG.\n");
1092                 return src_nents;
1093         }
1094
1095         if (src_nents) {
1096                 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1097                                           DMA_TO_DEVICE);
1098                 if (!mapped_nents) {
1099                         dev_err(jrdev, "unable to map source for DMA\n");
1100                         return -ENOMEM;
1101                 }
1102         } else {
1103                 mapped_nents = 0;
1104         }
1105
1106         /* allocate space for base edesc and hw desc commands, link tables */
1107         edesc = ahash_edesc_alloc(req, mapped_nents > 1 ? mapped_nents : 0,
1108                                   ctx->sh_desc_digest, ctx->sh_desc_digest_dma);
1109         if (!edesc) {
1110                 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1111                 return -ENOMEM;
1112         }
1113
1114         edesc->src_nents = src_nents;
1115
1116         ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1117                                   req->nbytes);
1118         if (ret) {
1119                 ahash_unmap(jrdev, edesc, req, digestsize);
1120                 kfree(edesc);
1121                 return ret;
1122         }
1123
1124         desc = edesc->hw_desc;
1125
1126         ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
1127         if (ret) {
1128                 ahash_unmap(jrdev, edesc, req, digestsize);
1129                 kfree(edesc);
1130                 return -ENOMEM;
1131         }
1132
1133         print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1134                              DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1135                              1);
1136
1137         return ahash_enqueue_req(jrdev, ahash_done, req, digestsize,
1138                                  DMA_FROM_DEVICE);
1139 }
1140
1141 /* submit ahash final if it the first job descriptor */
1142 static int ahash_final_no_ctx(struct ahash_request *req)
1143 {
1144         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1145         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1146         struct caam_hash_state *state = ahash_request_ctx(req);
1147         struct device *jrdev = ctx->jrdev;
1148         u8 *buf = state->buf;
1149         int buflen = state->buflen;
1150         u32 *desc;
1151         int digestsize = crypto_ahash_digestsize(ahash);
1152         struct ahash_edesc *edesc;
1153         int ret;
1154
1155         /* allocate space for base edesc and hw desc commands, link tables */
1156         edesc = ahash_edesc_alloc(req, 0, ctx->sh_desc_digest,
1157                                   ctx->sh_desc_digest_dma);
1158         if (!edesc)
1159                 return -ENOMEM;
1160
1161         desc = edesc->hw_desc;
1162
1163         if (buflen) {
1164                 state->buf_dma = dma_map_single(jrdev, buf, buflen,
1165                                                 DMA_TO_DEVICE);
1166                 if (dma_mapping_error(jrdev, state->buf_dma)) {
1167                         dev_err(jrdev, "unable to map src\n");
1168                         goto unmap;
1169                 }
1170
1171                 append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
1172         }
1173
1174         ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
1175         if (ret)
1176                 goto unmap;
1177
1178         print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1179                              DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1180                              1);
1181
1182         return ahash_enqueue_req(jrdev, ahash_done, req,
1183                                  digestsize, DMA_FROM_DEVICE);
1184  unmap:
1185         ahash_unmap(jrdev, edesc, req, digestsize);
1186         kfree(edesc);
1187         return -ENOMEM;
1188 }
1189
1190 /* submit ahash update if it the first job descriptor after update */
1191 static int ahash_update_no_ctx(struct ahash_request *req)
1192 {
1193         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1194         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1195         struct caam_hash_state *state = ahash_request_ctx(req);
1196         struct device *jrdev = ctx->jrdev;
1197         u8 *buf = state->buf;
1198         int *buflen = &state->buflen;
1199         int *next_buflen = &state->next_buflen;
1200         int blocksize = crypto_ahash_blocksize(ahash);
1201         int in_len = *buflen + req->nbytes, to_hash;
1202         int sec4_sg_bytes, src_nents, mapped_nents;
1203         struct ahash_edesc *edesc;
1204         u32 *desc;
1205         int ret = 0;
1206
1207         *next_buflen = in_len & (blocksize - 1);
1208         to_hash = in_len - *next_buflen;
1209
1210         /*
1211          * For XCBC and CMAC, if to_hash is multiple of block size,
1212          * keep last block in internal buffer
1213          */
1214         if ((is_xcbc_aes(ctx->adata.algtype) ||
1215              is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
1216              (*next_buflen == 0)) {
1217                 *next_buflen = blocksize;
1218                 to_hash -= blocksize;
1219         }
1220
1221         if (to_hash) {
1222                 int pad_nents;
1223                 int src_len = req->nbytes - *next_buflen;
1224
1225                 src_nents = sg_nents_for_len(req->src, src_len);
1226                 if (src_nents < 0) {
1227                         dev_err(jrdev, "Invalid number of src SG.\n");
1228                         return src_nents;
1229                 }
1230
1231                 if (src_nents) {
1232                         mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1233                                                   DMA_TO_DEVICE);
1234                         if (!mapped_nents) {
1235                                 dev_err(jrdev, "unable to DMA map source\n");
1236                                 return -ENOMEM;
1237                         }
1238                 } else {
1239                         mapped_nents = 0;
1240                 }
1241
1242                 pad_nents = pad_sg_nents(1 + mapped_nents);
1243                 sec4_sg_bytes = pad_nents * sizeof(struct sec4_sg_entry);
1244
1245                 /*
1246                  * allocate space for base edesc and hw desc commands,
1247                  * link tables
1248                  */
1249                 edesc = ahash_edesc_alloc(req, pad_nents,
1250                                           ctx->sh_desc_update_first,
1251                                           ctx->sh_desc_update_first_dma);
1252                 if (!edesc) {
1253                         dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1254                         return -ENOMEM;
1255                 }
1256
1257                 edesc->src_nents = src_nents;
1258                 edesc->sec4_sg_bytes = sec4_sg_bytes;
1259
1260                 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
1261                 if (ret)
1262                         goto unmap_ctx;
1263
1264                 sg_to_sec4_sg_last(req->src, src_len, edesc->sec4_sg + 1, 0);
1265
1266                 desc = edesc->hw_desc;
1267
1268                 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1269                                                     sec4_sg_bytes,
1270                                                     DMA_TO_DEVICE);
1271                 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1272                         dev_err(jrdev, "unable to map S/G table\n");
1273                         ret = -ENOMEM;
1274                         goto unmap_ctx;
1275                 }
1276
1277                 append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
1278
1279                 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1280                 if (ret)
1281                         goto unmap_ctx;
1282
1283                 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1284                                      DUMP_PREFIX_ADDRESS, 16, 4, desc,
1285                                      desc_bytes(desc), 1);
1286
1287                 ret = ahash_enqueue_req(jrdev, ahash_done_ctx_dst, req,
1288                                         ctx->ctx_len, DMA_TO_DEVICE);
1289                 if ((ret != -EINPROGRESS) && (ret != -EBUSY))
1290                         return ret;
1291                 state->update = ahash_update_ctx;
1292                 state->finup = ahash_finup_ctx;
1293                 state->final = ahash_final_ctx;
1294         } else if (*next_buflen) {
1295                 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
1296                                          req->nbytes, 0);
1297                 *buflen = *next_buflen;
1298
1299                 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
1300                                      DUMP_PREFIX_ADDRESS, 16, 4, buf,
1301                                      *buflen, 1);
1302         }
1303
1304         return ret;
1305  unmap_ctx:
1306         ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1307         kfree(edesc);
1308         return ret;
1309 }
1310
1311 /* submit ahash finup if it the first job descriptor after update */
1312 static int ahash_finup_no_ctx(struct ahash_request *req)
1313 {
1314         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1315         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1316         struct caam_hash_state *state = ahash_request_ctx(req);
1317         struct device *jrdev = ctx->jrdev;
1318         int buflen = state->buflen;
1319         u32 *desc;
1320         int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
1321         int digestsize = crypto_ahash_digestsize(ahash);
1322         struct ahash_edesc *edesc;
1323         int ret;
1324
1325         src_nents = sg_nents_for_len(req->src, req->nbytes);
1326         if (src_nents < 0) {
1327                 dev_err(jrdev, "Invalid number of src SG.\n");
1328                 return src_nents;
1329         }
1330
1331         if (src_nents) {
1332                 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1333                                           DMA_TO_DEVICE);
1334                 if (!mapped_nents) {
1335                         dev_err(jrdev, "unable to DMA map source\n");
1336                         return -ENOMEM;
1337                 }
1338         } else {
1339                 mapped_nents = 0;
1340         }
1341
1342         sec4_sg_src_index = 2;
1343         sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
1344                          sizeof(struct sec4_sg_entry);
1345
1346         /* allocate space for base edesc and hw desc commands, link tables */
1347         edesc = ahash_edesc_alloc(req, sec4_sg_src_index + mapped_nents,
1348                                   ctx->sh_desc_digest, ctx->sh_desc_digest_dma);
1349         if (!edesc) {
1350                 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1351                 return -ENOMEM;
1352         }
1353
1354         desc = edesc->hw_desc;
1355
1356         edesc->src_nents = src_nents;
1357         edesc->sec4_sg_bytes = sec4_sg_bytes;
1358
1359         ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
1360         if (ret)
1361                 goto unmap;
1362
1363         ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen,
1364                                   req->nbytes);
1365         if (ret) {
1366                 dev_err(jrdev, "unable to map S/G table\n");
1367                 goto unmap;
1368         }
1369
1370         ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
1371         if (ret)
1372                 goto unmap;
1373
1374         print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1375                              DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1376                              1);
1377
1378         return ahash_enqueue_req(jrdev, ahash_done, req,
1379                                  digestsize, DMA_FROM_DEVICE);
1380  unmap:
1381         ahash_unmap(jrdev, edesc, req, digestsize);
1382         kfree(edesc);
1383         return -ENOMEM;
1384
1385 }
1386
1387 /* submit first update job descriptor after init */
1388 static int ahash_update_first(struct ahash_request *req)
1389 {
1390         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1391         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1392         struct caam_hash_state *state = ahash_request_ctx(req);
1393         struct device *jrdev = ctx->jrdev;
1394         u8 *buf = state->buf;
1395         int *buflen = &state->buflen;
1396         int *next_buflen = &state->next_buflen;
1397         int to_hash;
1398         int blocksize = crypto_ahash_blocksize(ahash);
1399         u32 *desc;
1400         int src_nents, mapped_nents;
1401         struct ahash_edesc *edesc;
1402         int ret = 0;
1403
1404         *next_buflen = req->nbytes & (blocksize - 1);
1405         to_hash = req->nbytes - *next_buflen;
1406
1407         /*
1408          * For XCBC and CMAC, if to_hash is multiple of block size,
1409          * keep last block in internal buffer
1410          */
1411         if ((is_xcbc_aes(ctx->adata.algtype) ||
1412              is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
1413              (*next_buflen == 0)) {
1414                 *next_buflen = blocksize;
1415                 to_hash -= blocksize;
1416         }
1417
1418         if (to_hash) {
1419                 src_nents = sg_nents_for_len(req->src,
1420                                              req->nbytes - *next_buflen);
1421                 if (src_nents < 0) {
1422                         dev_err(jrdev, "Invalid number of src SG.\n");
1423                         return src_nents;
1424                 }
1425
1426                 if (src_nents) {
1427                         mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1428                                                   DMA_TO_DEVICE);
1429                         if (!mapped_nents) {
1430                                 dev_err(jrdev, "unable to map source for DMA\n");
1431                                 return -ENOMEM;
1432                         }
1433                 } else {
1434                         mapped_nents = 0;
1435                 }
1436
1437                 /*
1438                  * allocate space for base edesc and hw desc commands,
1439                  * link tables
1440                  */
1441                 edesc = ahash_edesc_alloc(req, mapped_nents > 1 ?
1442                                           mapped_nents : 0,
1443                                           ctx->sh_desc_update_first,
1444                                           ctx->sh_desc_update_first_dma);
1445                 if (!edesc) {
1446                         dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1447                         return -ENOMEM;
1448                 }
1449
1450                 edesc->src_nents = src_nents;
1451
1452                 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1453                                           to_hash);
1454                 if (ret)
1455                         goto unmap_ctx;
1456
1457                 desc = edesc->hw_desc;
1458
1459                 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1460                 if (ret)
1461                         goto unmap_ctx;
1462
1463                 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1464                                      DUMP_PREFIX_ADDRESS, 16, 4, desc,
1465                                      desc_bytes(desc), 1);
1466
1467                 ret = ahash_enqueue_req(jrdev, ahash_done_ctx_dst, req,
1468                                         ctx->ctx_len, DMA_TO_DEVICE);
1469                 if ((ret != -EINPROGRESS) && (ret != -EBUSY))
1470                         return ret;
1471                 state->update = ahash_update_ctx;
1472                 state->finup = ahash_finup_ctx;
1473                 state->final = ahash_final_ctx;
1474         } else if (*next_buflen) {
1475                 state->update = ahash_update_no_ctx;
1476                 state->finup = ahash_finup_no_ctx;
1477                 state->final = ahash_final_no_ctx;
1478                 scatterwalk_map_and_copy(buf, req->src, 0,
1479                                          req->nbytes, 0);
1480                 *buflen = *next_buflen;
1481
1482                 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
1483                                      DUMP_PREFIX_ADDRESS, 16, 4, buf,
1484                                      *buflen, 1);
1485         }
1486
1487         return ret;
1488  unmap_ctx:
1489         ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1490         kfree(edesc);
1491         return ret;
1492 }
1493
1494 static int ahash_finup_first(struct ahash_request *req)
1495 {
1496         return ahash_digest(req);
1497 }
1498
1499 static int ahash_init(struct ahash_request *req)
1500 {
1501         struct caam_hash_state *state = ahash_request_ctx(req);
1502
1503         state->update = ahash_update_first;
1504         state->finup = ahash_finup_first;
1505         state->final = ahash_final_no_ctx;
1506
1507         state->ctx_dma = 0;
1508         state->ctx_dma_len = 0;
1509         state->buf_dma = 0;
1510         state->buflen = 0;
1511         state->next_buflen = 0;
1512
1513         return 0;
1514 }
1515
1516 static int ahash_update(struct ahash_request *req)
1517 {
1518         struct caam_hash_state *state = ahash_request_ctx(req);
1519
1520         return state->update(req);
1521 }
1522
1523 static int ahash_finup(struct ahash_request *req)
1524 {
1525         struct caam_hash_state *state = ahash_request_ctx(req);
1526
1527         return state->finup(req);
1528 }
1529
1530 static int ahash_final(struct ahash_request *req)
1531 {
1532         struct caam_hash_state *state = ahash_request_ctx(req);
1533
1534         return state->final(req);
1535 }
1536
1537 static int ahash_export(struct ahash_request *req, void *out)
1538 {
1539         struct caam_hash_state *state = ahash_request_ctx(req);
1540         struct caam_export_state *export = out;
1541         u8 *buf = state->buf;
1542         int len = state->buflen;
1543
1544         memcpy(export->buf, buf, len);
1545         memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
1546         export->buflen = len;
1547         export->update = state->update;
1548         export->final = state->final;
1549         export->finup = state->finup;
1550
1551         return 0;
1552 }
1553
1554 static int ahash_import(struct ahash_request *req, const void *in)
1555 {
1556         struct caam_hash_state *state = ahash_request_ctx(req);
1557         const struct caam_export_state *export = in;
1558
1559         memset(state, 0, sizeof(*state));
1560         memcpy(state->buf, export->buf, export->buflen);
1561         memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
1562         state->buflen = export->buflen;
1563         state->update = export->update;
1564         state->final = export->final;
1565         state->finup = export->finup;
1566
1567         return 0;
1568 }
1569
1570 struct caam_hash_template {
1571         char name[CRYPTO_MAX_ALG_NAME];
1572         char driver_name[CRYPTO_MAX_ALG_NAME];
1573         char hmac_name[CRYPTO_MAX_ALG_NAME];
1574         char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
1575         unsigned int blocksize;
1576         struct ahash_alg template_ahash;
1577         u32 alg_type;
1578 };
1579
1580 /* ahash descriptors */
1581 static struct caam_hash_template driver_hash[] = {
1582         {
1583                 .name = "sha1",
1584                 .driver_name = "sha1-caam",
1585                 .hmac_name = "hmac(sha1)",
1586                 .hmac_driver_name = "hmac-sha1-caam",
1587                 .blocksize = SHA1_BLOCK_SIZE,
1588                 .template_ahash = {
1589                         .init = ahash_init,
1590                         .update = ahash_update,
1591                         .final = ahash_final,
1592                         .finup = ahash_finup,
1593                         .digest = ahash_digest,
1594                         .export = ahash_export,
1595                         .import = ahash_import,
1596                         .setkey = ahash_setkey,
1597                         .halg = {
1598                                 .digestsize = SHA1_DIGEST_SIZE,
1599                                 .statesize = sizeof(struct caam_export_state),
1600                         },
1601                 },
1602                 .alg_type = OP_ALG_ALGSEL_SHA1,
1603         }, {
1604                 .name = "sha224",
1605                 .driver_name = "sha224-caam",
1606                 .hmac_name = "hmac(sha224)",
1607                 .hmac_driver_name = "hmac-sha224-caam",
1608                 .blocksize = SHA224_BLOCK_SIZE,
1609                 .template_ahash = {
1610                         .init = ahash_init,
1611                         .update = ahash_update,
1612                         .final = ahash_final,
1613                         .finup = ahash_finup,
1614                         .digest = ahash_digest,
1615                         .export = ahash_export,
1616                         .import = ahash_import,
1617                         .setkey = ahash_setkey,
1618                         .halg = {
1619                                 .digestsize = SHA224_DIGEST_SIZE,
1620                                 .statesize = sizeof(struct caam_export_state),
1621                         },
1622                 },
1623                 .alg_type = OP_ALG_ALGSEL_SHA224,
1624         }, {
1625                 .name = "sha256",
1626                 .driver_name = "sha256-caam",
1627                 .hmac_name = "hmac(sha256)",
1628                 .hmac_driver_name = "hmac-sha256-caam",
1629                 .blocksize = SHA256_BLOCK_SIZE,
1630                 .template_ahash = {
1631                         .init = ahash_init,
1632                         .update = ahash_update,
1633                         .final = ahash_final,
1634                         .finup = ahash_finup,
1635                         .digest = ahash_digest,
1636                         .export = ahash_export,
1637                         .import = ahash_import,
1638                         .setkey = ahash_setkey,
1639                         .halg = {
1640                                 .digestsize = SHA256_DIGEST_SIZE,
1641                                 .statesize = sizeof(struct caam_export_state),
1642                         },
1643                 },
1644                 .alg_type = OP_ALG_ALGSEL_SHA256,
1645         }, {
1646                 .name = "sha384",
1647                 .driver_name = "sha384-caam",
1648                 .hmac_name = "hmac(sha384)",
1649                 .hmac_driver_name = "hmac-sha384-caam",
1650                 .blocksize = SHA384_BLOCK_SIZE,
1651                 .template_ahash = {
1652                         .init = ahash_init,
1653                         .update = ahash_update,
1654                         .final = ahash_final,
1655                         .finup = ahash_finup,
1656                         .digest = ahash_digest,
1657                         .export = ahash_export,
1658                         .import = ahash_import,
1659                         .setkey = ahash_setkey,
1660                         .halg = {
1661                                 .digestsize = SHA384_DIGEST_SIZE,
1662                                 .statesize = sizeof(struct caam_export_state),
1663                         },
1664                 },
1665                 .alg_type = OP_ALG_ALGSEL_SHA384,
1666         }, {
1667                 .name = "sha512",
1668                 .driver_name = "sha512-caam",
1669                 .hmac_name = "hmac(sha512)",
1670                 .hmac_driver_name = "hmac-sha512-caam",
1671                 .blocksize = SHA512_BLOCK_SIZE,
1672                 .template_ahash = {
1673                         .init = ahash_init,
1674                         .update = ahash_update,
1675                         .final = ahash_final,
1676                         .finup = ahash_finup,
1677                         .digest = ahash_digest,
1678                         .export = ahash_export,
1679                         .import = ahash_import,
1680                         .setkey = ahash_setkey,
1681                         .halg = {
1682                                 .digestsize = SHA512_DIGEST_SIZE,
1683                                 .statesize = sizeof(struct caam_export_state),
1684                         },
1685                 },
1686                 .alg_type = OP_ALG_ALGSEL_SHA512,
1687         }, {
1688                 .name = "md5",
1689                 .driver_name = "md5-caam",
1690                 .hmac_name = "hmac(md5)",
1691                 .hmac_driver_name = "hmac-md5-caam",
1692                 .blocksize = MD5_BLOCK_WORDS * 4,
1693                 .template_ahash = {
1694                         .init = ahash_init,
1695                         .update = ahash_update,
1696                         .final = ahash_final,
1697                         .finup = ahash_finup,
1698                         .digest = ahash_digest,
1699                         .export = ahash_export,
1700                         .import = ahash_import,
1701                         .setkey = ahash_setkey,
1702                         .halg = {
1703                                 .digestsize = MD5_DIGEST_SIZE,
1704                                 .statesize = sizeof(struct caam_export_state),
1705                         },
1706                 },
1707                 .alg_type = OP_ALG_ALGSEL_MD5,
1708         }, {
1709                 .hmac_name = "xcbc(aes)",
1710                 .hmac_driver_name = "xcbc-aes-caam",
1711                 .blocksize = AES_BLOCK_SIZE,
1712                 .template_ahash = {
1713                         .init = ahash_init,
1714                         .update = ahash_update,
1715                         .final = ahash_final,
1716                         .finup = ahash_finup,
1717                         .digest = ahash_digest,
1718                         .export = ahash_export,
1719                         .import = ahash_import,
1720                         .setkey = axcbc_setkey,
1721                         .halg = {
1722                                 .digestsize = AES_BLOCK_SIZE,
1723                                 .statesize = sizeof(struct caam_export_state),
1724                         },
1725                  },
1726                 .alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XCBC_MAC,
1727         }, {
1728                 .hmac_name = "cmac(aes)",
1729                 .hmac_driver_name = "cmac-aes-caam",
1730                 .blocksize = AES_BLOCK_SIZE,
1731                 .template_ahash = {
1732                         .init = ahash_init,
1733                         .update = ahash_update,
1734                         .final = ahash_final,
1735                         .finup = ahash_finup,
1736                         .digest = ahash_digest,
1737                         .export = ahash_export,
1738                         .import = ahash_import,
1739                         .setkey = acmac_setkey,
1740                         .halg = {
1741                                 .digestsize = AES_BLOCK_SIZE,
1742                                 .statesize = sizeof(struct caam_export_state),
1743                         },
1744                  },
1745                 .alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC,
1746         },
1747 };
1748
1749 struct caam_hash_alg {
1750         struct list_head entry;
1751         int alg_type;
1752         struct ahash_alg ahash_alg;
1753 };
1754
1755 static int caam_hash_cra_init(struct crypto_tfm *tfm)
1756 {
1757         struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
1758         struct crypto_alg *base = tfm->__crt_alg;
1759         struct hash_alg_common *halg =
1760                  container_of(base, struct hash_alg_common, base);
1761         struct ahash_alg *alg =
1762                  container_of(halg, struct ahash_alg, halg);
1763         struct caam_hash_alg *caam_hash =
1764                  container_of(alg, struct caam_hash_alg, ahash_alg);
1765         struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1766         /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1767         static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
1768                                          HASH_MSG_LEN + SHA1_DIGEST_SIZE,
1769                                          HASH_MSG_LEN + 32,
1770                                          HASH_MSG_LEN + SHA256_DIGEST_SIZE,
1771                                          HASH_MSG_LEN + 64,
1772                                          HASH_MSG_LEN + SHA512_DIGEST_SIZE };
1773         const size_t sh_desc_update_offset = offsetof(struct caam_hash_ctx,
1774                                                       sh_desc_update);
1775         dma_addr_t dma_addr;
1776         struct caam_drv_private *priv;
1777
1778         /*
1779          * Get a Job ring from Job Ring driver to ensure in-order
1780          * crypto request processing per tfm
1781          */
1782         ctx->jrdev = caam_jr_alloc();
1783         if (IS_ERR(ctx->jrdev)) {
1784                 pr_err("Job Ring Device allocation for transform failed\n");
1785                 return PTR_ERR(ctx->jrdev);
1786         }
1787
1788         priv = dev_get_drvdata(ctx->jrdev->parent);
1789
1790         if (is_xcbc_aes(caam_hash->alg_type)) {
1791                 ctx->dir = DMA_TO_DEVICE;
1792                 ctx->key_dir = DMA_BIDIRECTIONAL;
1793                 ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type;
1794                 ctx->ctx_len = 48;
1795         } else if (is_cmac_aes(caam_hash->alg_type)) {
1796                 ctx->dir = DMA_TO_DEVICE;
1797                 ctx->key_dir = DMA_NONE;
1798                 ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type;
1799                 ctx->ctx_len = 32;
1800         } else {
1801                 if (priv->era >= 6) {
1802                         ctx->dir = DMA_BIDIRECTIONAL;
1803                         ctx->key_dir = alg->setkey ? DMA_TO_DEVICE : DMA_NONE;
1804                 } else {
1805                         ctx->dir = DMA_TO_DEVICE;
1806                         ctx->key_dir = DMA_NONE;
1807                 }
1808                 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
1809                 ctx->ctx_len = runninglen[(ctx->adata.algtype &
1810                                            OP_ALG_ALGSEL_SUBMASK) >>
1811                                           OP_ALG_ALGSEL_SHIFT];
1812         }
1813
1814         if (ctx->key_dir != DMA_NONE) {
1815                 ctx->adata.key_dma = dma_map_single_attrs(ctx->jrdev, ctx->key,
1816                                                           ARRAY_SIZE(ctx->key),
1817                                                           ctx->key_dir,
1818                                                           DMA_ATTR_SKIP_CPU_SYNC);
1819                 if (dma_mapping_error(ctx->jrdev, ctx->adata.key_dma)) {
1820                         dev_err(ctx->jrdev, "unable to map key\n");
1821                         caam_jr_free(ctx->jrdev);
1822                         return -ENOMEM;
1823                 }
1824         }
1825
1826         dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
1827                                         offsetof(struct caam_hash_ctx, key) -
1828                                         sh_desc_update_offset,
1829                                         ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1830         if (dma_mapping_error(ctx->jrdev, dma_addr)) {
1831                 dev_err(ctx->jrdev, "unable to map shared descriptors\n");
1832
1833                 if (ctx->key_dir != DMA_NONE)
1834                         dma_unmap_single_attrs(ctx->jrdev, ctx->adata.key_dma,
1835                                                ARRAY_SIZE(ctx->key),
1836                                                ctx->key_dir,
1837                                                DMA_ATTR_SKIP_CPU_SYNC);
1838
1839                 caam_jr_free(ctx->jrdev);
1840                 return -ENOMEM;
1841         }
1842
1843         ctx->sh_desc_update_dma = dma_addr;
1844         ctx->sh_desc_update_first_dma = dma_addr +
1845                                         offsetof(struct caam_hash_ctx,
1846                                                  sh_desc_update_first) -
1847                                         sh_desc_update_offset;
1848         ctx->sh_desc_fin_dma = dma_addr + offsetof(struct caam_hash_ctx,
1849                                                    sh_desc_fin) -
1850                                         sh_desc_update_offset;
1851         ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx,
1852                                                       sh_desc_digest) -
1853                                         sh_desc_update_offset;
1854
1855         ctx->enginectx.op.do_one_request = ahash_do_one_req;
1856
1857         crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1858                                  sizeof(struct caam_hash_state));
1859
1860         /*
1861          * For keyed hash algorithms shared descriptors
1862          * will be created later in setkey() callback
1863          */
1864         return alg->setkey ? 0 : ahash_set_sh_desc(ahash);
1865 }
1866
1867 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
1868 {
1869         struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1870
1871         dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
1872                                offsetof(struct caam_hash_ctx, key) -
1873                                offsetof(struct caam_hash_ctx, sh_desc_update),
1874                                ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1875         if (ctx->key_dir != DMA_NONE)
1876                 dma_unmap_single_attrs(ctx->jrdev, ctx->adata.key_dma,
1877                                        ARRAY_SIZE(ctx->key), ctx->key_dir,
1878                                        DMA_ATTR_SKIP_CPU_SYNC);
1879         caam_jr_free(ctx->jrdev);
1880 }
1881
1882 void caam_algapi_hash_exit(void)
1883 {
1884         struct caam_hash_alg *t_alg, *n;
1885
1886         if (!hash_list.next)
1887                 return;
1888
1889         list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
1890                 crypto_unregister_ahash(&t_alg->ahash_alg);
1891                 list_del(&t_alg->entry);
1892                 kfree(t_alg);
1893         }
1894 }
1895
1896 static struct caam_hash_alg *
1897 caam_hash_alloc(struct caam_hash_template *template,
1898                 bool keyed)
1899 {
1900         struct caam_hash_alg *t_alg;
1901         struct ahash_alg *halg;
1902         struct crypto_alg *alg;
1903
1904         t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
1905         if (!t_alg) {
1906                 pr_err("failed to allocate t_alg\n");
1907                 return ERR_PTR(-ENOMEM);
1908         }
1909
1910         t_alg->ahash_alg = template->template_ahash;
1911         halg = &t_alg->ahash_alg;
1912         alg = &halg->halg.base;
1913
1914         if (keyed) {
1915                 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1916                          template->hmac_name);
1917                 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1918                          template->hmac_driver_name);
1919         } else {
1920                 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1921                          template->name);
1922                 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1923                          template->driver_name);
1924                 t_alg->ahash_alg.setkey = NULL;
1925         }
1926         alg->cra_module = THIS_MODULE;
1927         alg->cra_init = caam_hash_cra_init;
1928         alg->cra_exit = caam_hash_cra_exit;
1929         alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
1930         alg->cra_priority = CAAM_CRA_PRIORITY;
1931         alg->cra_blocksize = template->blocksize;
1932         alg->cra_alignmask = 0;
1933         alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY;
1934
1935         t_alg->alg_type = template->alg_type;
1936
1937         return t_alg;
1938 }
1939
1940 int caam_algapi_hash_init(struct device *ctrldev)
1941 {
1942         int i = 0, err = 0;
1943         struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
1944         unsigned int md_limit = SHA512_DIGEST_SIZE;
1945         u32 md_inst, md_vid;
1946
1947         /*
1948          * Register crypto algorithms the device supports.  First, identify
1949          * presence and attributes of MD block.
1950          */
1951         if (priv->era < 10) {
1952                 md_vid = (rd_reg32(&priv->ctrl->perfmon.cha_id_ls) &
1953                           CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
1954                 md_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
1955                            CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
1956         } else {
1957                 u32 mdha = rd_reg32(&priv->ctrl->vreg.mdha);
1958
1959                 md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
1960                 md_inst = mdha & CHA_VER_NUM_MASK;
1961         }
1962
1963         /*
1964          * Skip registration of any hashing algorithms if MD block
1965          * is not present.
1966          */
1967         if (!md_inst)
1968                 return 0;
1969
1970         /* Limit digest size based on LP256 */
1971         if (md_vid == CHA_VER_VID_MD_LP256)
1972                 md_limit = SHA256_DIGEST_SIZE;
1973
1974         INIT_LIST_HEAD(&hash_list);
1975
1976         /* register crypto algorithms the device supports */
1977         for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
1978                 struct caam_hash_alg *t_alg;
1979                 struct caam_hash_template *alg = driver_hash + i;
1980
1981                 /* If MD size is not supported by device, skip registration */
1982                 if (is_mdha(alg->alg_type) &&
1983                     alg->template_ahash.halg.digestsize > md_limit)
1984                         continue;
1985
1986                 /* register hmac version */
1987                 t_alg = caam_hash_alloc(alg, true);
1988                 if (IS_ERR(t_alg)) {
1989                         err = PTR_ERR(t_alg);
1990                         pr_warn("%s alg allocation failed\n",
1991                                 alg->hmac_driver_name);
1992                         continue;
1993                 }
1994
1995                 err = crypto_register_ahash(&t_alg->ahash_alg);
1996                 if (err) {
1997                         pr_warn("%s alg registration failed: %d\n",
1998                                 t_alg->ahash_alg.halg.base.cra_driver_name,
1999                                 err);
2000                         kfree(t_alg);
2001                 } else
2002                         list_add_tail(&t_alg->entry, &hash_list);
2003
2004                 if ((alg->alg_type & OP_ALG_ALGSEL_MASK) == OP_ALG_ALGSEL_AES)
2005                         continue;
2006
2007                 /* register unkeyed version */
2008                 t_alg = caam_hash_alloc(alg, false);
2009                 if (IS_ERR(t_alg)) {
2010                         err = PTR_ERR(t_alg);
2011                         pr_warn("%s alg allocation failed\n", alg->driver_name);
2012                         continue;
2013                 }
2014
2015                 err = crypto_register_ahash(&t_alg->ahash_alg);
2016                 if (err) {
2017                         pr_warn("%s alg registration failed: %d\n",
2018                                 t_alg->ahash_alg.halg.base.cra_driver_name,
2019                                 err);
2020                         kfree(t_alg);
2021                 } else
2022                         list_add_tail(&t_alg->entry, &hash_list);
2023         }
2024
2025         return err;
2026 }