1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
3 * Copyright 2015-2016 Freescale Semiconductor Inc.
4 * Copyright 2017-2019 NXP
9 #include "caamalg_qi2.h"
10 #include "dpseci_cmd.h"
11 #include "desc_constr.h"
13 #include "sg_sw_sec4.h"
14 #include "sg_sw_qm2.h"
16 #include "caamalg_desc.h"
17 #include "caamhash_desc.h"
18 #include "dpseci-debugfs.h"
19 #include <linux/fsl/mc.h>
20 #include <soc/fsl/dpaa2-io.h>
21 #include <soc/fsl/dpaa2-fd.h>
22 #include <crypto/xts.h>
23 #include <asm/unaligned.h>
25 #define CAAM_CRA_PRIORITY 2000
27 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
28 #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE + \
29 SHA512_DIGEST_SIZE * 2)
32 * This is a a cache of buffers, from which the users of CAAM QI driver
33 * can allocate short buffers. It's speedier than doing kmalloc on the hotpath.
34 * NOTE: A more elegant solution would be to have some headroom in the frames
35 * being processed. This can be added by the dpaa2-eth driver. This would
36 * pose a problem for userspace application processing which cannot
37 * know of this limitation. So for now, this will work.
38 * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
40 static struct kmem_cache *qi_cache;
42 struct caam_alg_entry {
51 struct caam_aead_alg {
53 struct caam_alg_entry caam;
57 struct caam_skcipher_alg {
58 struct skcipher_alg skcipher;
59 struct caam_alg_entry caam;
64 * struct caam_ctx - per-session context
65 * @flc: Flow Contexts array
66 * @key: [authentication key], encryption key
67 * @flc_dma: I/O virtual addresses of the Flow Contexts
68 * @key_dma: I/O virtual address of the key
69 * @dir: DMA direction for mapping key and Flow Contexts
71 * @adata: authentication algorithm details
72 * @cdata: encryption algorithm details
73 * @authsize: authentication tag (a.k.a. ICV / MAC) size
76 struct caam_flc flc[NUM_OP];
77 u8 key[CAAM_MAX_KEY_SIZE];
78 dma_addr_t flc_dma[NUM_OP];
80 enum dma_data_direction dir;
84 unsigned int authsize;
85 bool xts_key_fallback;
86 struct crypto_skcipher *fallback;
89 static void *dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv *priv,
92 phys_addr_t phys_addr;
94 phys_addr = priv->domain ? iommu_iova_to_phys(priv->domain, iova_addr) :
97 return phys_to_virt(phys_addr);
101 * qi_cache_zalloc - Allocate buffers from CAAM-QI cache
103 * Allocate data on the hotpath. Instead of using kzalloc, one can use the
104 * services of the CAAM QI memory cache (backed by kmem_cache). The buffers
105 * will have a size of CAAM_QI_MEMCACHE_SIZE, which should be sufficient for
106 * hosting 16 SG entries.
108 * @flags - flags that would be used for the equivalent kmalloc(..) call
110 * Returns a pointer to a retrieved buffer on success or NULL on failure.
112 static inline void *qi_cache_zalloc(gfp_t flags)
114 return kmem_cache_zalloc(qi_cache, flags);
118 * qi_cache_free - Frees buffers allocated from CAAM-QI cache
120 * @obj - buffer previously allocated by qi_cache_zalloc
122 * No checking is being done, the call is a passthrough call to
123 * kmem_cache_free(...)
125 static inline void qi_cache_free(void *obj)
127 kmem_cache_free(qi_cache, obj);
130 static struct caam_request *to_caam_req(struct crypto_async_request *areq)
132 switch (crypto_tfm_alg_type(areq->tfm)) {
133 case CRYPTO_ALG_TYPE_SKCIPHER:
134 return skcipher_request_ctx(skcipher_request_cast(areq));
135 case CRYPTO_ALG_TYPE_AEAD:
136 return aead_request_ctx(container_of(areq, struct aead_request,
138 case CRYPTO_ALG_TYPE_AHASH:
139 return ahash_request_ctx(ahash_request_cast(areq));
141 return ERR_PTR(-EINVAL);
145 static void caam_unmap(struct device *dev, struct scatterlist *src,
146 struct scatterlist *dst, int src_nents,
147 int dst_nents, dma_addr_t iv_dma, int ivsize,
148 enum dma_data_direction iv_dir, dma_addr_t qm_sg_dma,
153 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
155 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
157 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
161 dma_unmap_single(dev, iv_dma, ivsize, iv_dir);
164 dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
167 static int aead_set_sh_desc(struct crypto_aead *aead)
169 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
171 struct caam_ctx *ctx = crypto_aead_ctx(aead);
172 unsigned int ivsize = crypto_aead_ivsize(aead);
173 struct device *dev = ctx->dev;
174 struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
175 struct caam_flc *flc;
179 unsigned int data_len[2];
181 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
182 OP_ALG_AAI_CTR_MOD128);
183 const bool is_rfc3686 = alg->caam.rfc3686;
185 if (!ctx->cdata.keylen || !ctx->authsize)
189 * AES-CTR needs to load IV in CONTEXT1 reg
190 * at an offset of 128bits (16bytes)
191 * CONTEXT1[255:128] = IV
198 * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
201 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
202 nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
203 ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
207 * In case |user key| > |derived key|, using DKP<imm,imm> would result
208 * in invalid opcodes (last bytes of user key) in the resulting
209 * descriptor. Use DKP<ptr,imm> instead => both virtual and dma key
210 * addresses are needed.
212 ctx->adata.key_virt = ctx->key;
213 ctx->adata.key_dma = ctx->key_dma;
215 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
216 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
218 data_len[0] = ctx->adata.keylen_pad;
219 data_len[1] = ctx->cdata.keylen;
221 /* aead_encrypt shared descriptor */
222 if (desc_inline_query((alg->caam.geniv ? DESC_QI_AEAD_GIVENC_LEN :
223 DESC_QI_AEAD_ENC_LEN) +
224 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
225 DESC_JOB_IO_LEN, data_len, &inl_mask,
226 ARRAY_SIZE(data_len)) < 0)
229 ctx->adata.key_inline = !!(inl_mask & 1);
230 ctx->cdata.key_inline = !!(inl_mask & 2);
232 flc = &ctx->flc[ENCRYPT];
236 cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata,
237 ivsize, ctx->authsize, is_rfc3686,
238 nonce, ctx1_iv_off, true,
241 cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata,
242 ivsize, ctx->authsize, is_rfc3686, nonce,
243 ctx1_iv_off, true, priv->sec_attr.era);
245 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
246 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
247 sizeof(flc->flc) + desc_bytes(desc),
250 /* aead_decrypt shared descriptor */
251 if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
252 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
253 DESC_JOB_IO_LEN, data_len, &inl_mask,
254 ARRAY_SIZE(data_len)) < 0)
257 ctx->adata.key_inline = !!(inl_mask & 1);
258 ctx->cdata.key_inline = !!(inl_mask & 2);
260 flc = &ctx->flc[DECRYPT];
262 cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata,
263 ivsize, ctx->authsize, alg->caam.geniv,
264 is_rfc3686, nonce, ctx1_iv_off, true,
266 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
267 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
268 sizeof(flc->flc) + desc_bytes(desc),
274 static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
276 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
278 ctx->authsize = authsize;
279 aead_set_sh_desc(authenc);
284 static int aead_setkey(struct crypto_aead *aead, const u8 *key,
287 struct caam_ctx *ctx = crypto_aead_ctx(aead);
288 struct device *dev = ctx->dev;
289 struct crypto_authenc_keys keys;
291 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
294 dev_dbg(dev, "keylen %d enckeylen %d authkeylen %d\n",
295 keys.authkeylen + keys.enckeylen, keys.enckeylen,
297 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
298 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
300 ctx->adata.keylen = keys.authkeylen;
301 ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
304 if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
307 memcpy(ctx->key, keys.authkey, keys.authkeylen);
308 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
309 dma_sync_single_for_device(dev, ctx->key_dma, ctx->adata.keylen_pad +
310 keys.enckeylen, ctx->dir);
311 print_hex_dump_debug("ctx.key@" __stringify(__LINE__)": ",
312 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
313 ctx->adata.keylen_pad + keys.enckeylen, 1);
315 ctx->cdata.keylen = keys.enckeylen;
317 memzero_explicit(&keys, sizeof(keys));
318 return aead_set_sh_desc(aead);
320 memzero_explicit(&keys, sizeof(keys));
324 static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
327 struct crypto_authenc_keys keys;
330 err = crypto_authenc_extractkeys(&keys, key, keylen);
335 if (keys.enckeylen != DES3_EDE_KEY_SIZE)
338 err = crypto_des3_ede_verify_key(crypto_aead_tfm(aead), keys.enckey) ?:
339 aead_setkey(aead, key, keylen);
342 memzero_explicit(&keys, sizeof(keys));
346 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
349 struct crypto_aead *aead = crypto_aead_reqtfm(req);
350 struct caam_request *req_ctx = aead_request_ctx(req);
351 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
352 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
353 struct caam_ctx *ctx = crypto_aead_ctx(aead);
354 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
356 struct device *dev = ctx->dev;
357 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
358 GFP_KERNEL : GFP_ATOMIC;
359 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
360 int src_len, dst_len = 0;
361 struct aead_edesc *edesc;
362 dma_addr_t qm_sg_dma, iv_dma = 0;
364 unsigned int authsize = ctx->authsize;
365 int qm_sg_index = 0, qm_sg_nents = 0, qm_sg_bytes;
367 struct dpaa2_sg_entry *sg_table;
369 /* allocate space for base edesc, link tables and IV */
370 edesc = qi_cache_zalloc(GFP_DMA | flags);
371 if (unlikely(!edesc)) {
372 dev_err(dev, "could not allocate extended descriptor\n");
373 return ERR_PTR(-ENOMEM);
376 if (unlikely(req->dst != req->src)) {
377 src_len = req->assoclen + req->cryptlen;
378 dst_len = src_len + (encrypt ? authsize : (-authsize));
380 src_nents = sg_nents_for_len(req->src, src_len);
381 if (unlikely(src_nents < 0)) {
382 dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
384 qi_cache_free(edesc);
385 return ERR_PTR(src_nents);
388 dst_nents = sg_nents_for_len(req->dst, dst_len);
389 if (unlikely(dst_nents < 0)) {
390 dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
392 qi_cache_free(edesc);
393 return ERR_PTR(dst_nents);
397 mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
399 if (unlikely(!mapped_src_nents)) {
400 dev_err(dev, "unable to map source\n");
401 qi_cache_free(edesc);
402 return ERR_PTR(-ENOMEM);
405 mapped_src_nents = 0;
409 mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
411 if (unlikely(!mapped_dst_nents)) {
412 dev_err(dev, "unable to map destination\n");
413 dma_unmap_sg(dev, req->src, src_nents,
415 qi_cache_free(edesc);
416 return ERR_PTR(-ENOMEM);
419 mapped_dst_nents = 0;
422 src_len = req->assoclen + req->cryptlen +
423 (encrypt ? authsize : 0);
425 src_nents = sg_nents_for_len(req->src, src_len);
426 if (unlikely(src_nents < 0)) {
427 dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
429 qi_cache_free(edesc);
430 return ERR_PTR(src_nents);
433 mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
435 if (unlikely(!mapped_src_nents)) {
436 dev_err(dev, "unable to map source\n");
437 qi_cache_free(edesc);
438 return ERR_PTR(-ENOMEM);
442 if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv)
443 ivsize = crypto_aead_ivsize(aead);
446 * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
447 * Input is not contiguous.
448 * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
449 * the end of the table by allocating more S/G entries. Logic:
450 * if (src != dst && output S/G)
451 * pad output S/G, if needed
452 * else if (src == dst && S/G)
453 * overlapping S/Gs; pad one of them
454 * else if (input S/G) ...
455 * pad input S/G, if needed
457 qm_sg_nents = 1 + !!ivsize + mapped_src_nents;
458 if (mapped_dst_nents > 1)
459 qm_sg_nents += pad_sg_nents(mapped_dst_nents);
460 else if ((req->src == req->dst) && (mapped_src_nents > 1))
461 qm_sg_nents = max(pad_sg_nents(qm_sg_nents),
463 pad_sg_nents(mapped_src_nents));
465 qm_sg_nents = pad_sg_nents(qm_sg_nents);
467 sg_table = &edesc->sgt[0];
468 qm_sg_bytes = qm_sg_nents * sizeof(*sg_table);
469 if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
470 CAAM_QI_MEMCACHE_SIZE)) {
471 dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
472 qm_sg_nents, ivsize);
473 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
475 qi_cache_free(edesc);
476 return ERR_PTR(-ENOMEM);
480 u8 *iv = (u8 *)(sg_table + qm_sg_nents);
482 /* Make sure IV is located in a DMAable area */
483 memcpy(iv, req->iv, ivsize);
485 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
486 if (dma_mapping_error(dev, iv_dma)) {
487 dev_err(dev, "unable to map IV\n");
488 caam_unmap(dev, req->src, req->dst, src_nents,
489 dst_nents, 0, 0, DMA_NONE, 0, 0);
490 qi_cache_free(edesc);
491 return ERR_PTR(-ENOMEM);
495 edesc->src_nents = src_nents;
496 edesc->dst_nents = dst_nents;
497 edesc->iv_dma = iv_dma;
499 if ((alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK) ==
500 OP_ALG_ALGSEL_CHACHA20 && ivsize != CHACHAPOLY_IV_SIZE)
502 * The associated data comes already with the IV but we need
503 * to skip it when we authenticate or encrypt...
505 edesc->assoclen = cpu_to_caam32(req->assoclen - ivsize);
507 edesc->assoclen = cpu_to_caam32(req->assoclen);
508 edesc->assoclen_dma = dma_map_single(dev, &edesc->assoclen, 4,
510 if (dma_mapping_error(dev, edesc->assoclen_dma)) {
511 dev_err(dev, "unable to map assoclen\n");
512 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
513 iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
514 qi_cache_free(edesc);
515 return ERR_PTR(-ENOMEM);
518 dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
521 dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
524 sg_to_qm_sg_last(req->src, src_len, sg_table + qm_sg_index, 0);
525 qm_sg_index += mapped_src_nents;
527 if (mapped_dst_nents > 1)
528 sg_to_qm_sg_last(req->dst, dst_len, sg_table + qm_sg_index, 0);
530 qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
531 if (dma_mapping_error(dev, qm_sg_dma)) {
532 dev_err(dev, "unable to map S/G table\n");
533 dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
534 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
535 iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
536 qi_cache_free(edesc);
537 return ERR_PTR(-ENOMEM);
540 edesc->qm_sg_dma = qm_sg_dma;
541 edesc->qm_sg_bytes = qm_sg_bytes;
543 out_len = req->assoclen + req->cryptlen +
544 (encrypt ? ctx->authsize : (-ctx->authsize));
545 in_len = 4 + ivsize + req->assoclen + req->cryptlen;
547 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
548 dpaa2_fl_set_final(in_fle, true);
549 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
550 dpaa2_fl_set_addr(in_fle, qm_sg_dma);
551 dpaa2_fl_set_len(in_fle, in_len);
553 if (req->dst == req->src) {
554 if (mapped_src_nents == 1) {
555 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
556 dpaa2_fl_set_addr(out_fle, sg_dma_address(req->src));
558 dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
559 dpaa2_fl_set_addr(out_fle, qm_sg_dma +
560 (1 + !!ivsize) * sizeof(*sg_table));
562 } else if (!mapped_dst_nents) {
564 * crypto engine requires the output entry to be present when
565 * "frame list" FD is used.
566 * Since engine does not support FMT=2'b11 (unused entry type),
567 * leaving out_fle zeroized is the best option.
570 } else if (mapped_dst_nents == 1) {
571 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
572 dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
574 dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
575 dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index *
579 dpaa2_fl_set_len(out_fle, out_len);
585 static int chachapoly_set_sh_desc(struct crypto_aead *aead)
587 struct caam_ctx *ctx = crypto_aead_ctx(aead);
588 unsigned int ivsize = crypto_aead_ivsize(aead);
589 struct device *dev = ctx->dev;
590 struct caam_flc *flc;
593 if (!ctx->cdata.keylen || !ctx->authsize)
596 flc = &ctx->flc[ENCRYPT];
598 cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
599 ctx->authsize, true, true);
600 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
601 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
602 sizeof(flc->flc) + desc_bytes(desc),
605 flc = &ctx->flc[DECRYPT];
607 cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
608 ctx->authsize, false, true);
609 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
610 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
611 sizeof(flc->flc) + desc_bytes(desc),
617 static int chachapoly_setauthsize(struct crypto_aead *aead,
618 unsigned int authsize)
620 struct caam_ctx *ctx = crypto_aead_ctx(aead);
622 if (authsize != POLY1305_DIGEST_SIZE)
625 ctx->authsize = authsize;
626 return chachapoly_set_sh_desc(aead);
629 static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
632 struct caam_ctx *ctx = crypto_aead_ctx(aead);
633 unsigned int ivsize = crypto_aead_ivsize(aead);
634 unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize;
636 if (keylen != CHACHA_KEY_SIZE + saltlen)
639 memcpy(ctx->key, key, keylen);
640 ctx->cdata.key_virt = ctx->key;
641 ctx->cdata.keylen = keylen - saltlen;
643 return chachapoly_set_sh_desc(aead);
646 static int gcm_set_sh_desc(struct crypto_aead *aead)
648 struct caam_ctx *ctx = crypto_aead_ctx(aead);
649 struct device *dev = ctx->dev;
650 unsigned int ivsize = crypto_aead_ivsize(aead);
651 struct caam_flc *flc;
653 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
656 if (!ctx->cdata.keylen || !ctx->authsize)
660 * AES GCM encrypt shared descriptor
661 * Job Descriptor and Shared Descriptor
662 * must fit into the 64-word Descriptor h/w Buffer
664 if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
665 ctx->cdata.key_inline = true;
666 ctx->cdata.key_virt = ctx->key;
668 ctx->cdata.key_inline = false;
669 ctx->cdata.key_dma = ctx->key_dma;
672 flc = &ctx->flc[ENCRYPT];
674 cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
675 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
676 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
677 sizeof(flc->flc) + desc_bytes(desc),
681 * Job Descriptor and Shared Descriptors
682 * must all fit into the 64-word Descriptor h/w Buffer
684 if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
685 ctx->cdata.key_inline = true;
686 ctx->cdata.key_virt = ctx->key;
688 ctx->cdata.key_inline = false;
689 ctx->cdata.key_dma = ctx->key_dma;
692 flc = &ctx->flc[DECRYPT];
694 cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
695 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
696 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
697 sizeof(flc->flc) + desc_bytes(desc),
703 static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
705 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
708 err = crypto_gcm_check_authsize(authsize);
712 ctx->authsize = authsize;
713 gcm_set_sh_desc(authenc);
718 static int gcm_setkey(struct crypto_aead *aead,
719 const u8 *key, unsigned int keylen)
721 struct caam_ctx *ctx = crypto_aead_ctx(aead);
722 struct device *dev = ctx->dev;
725 ret = aes_check_keylen(keylen);
728 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
729 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
731 memcpy(ctx->key, key, keylen);
732 dma_sync_single_for_device(dev, ctx->key_dma, keylen, ctx->dir);
733 ctx->cdata.keylen = keylen;
735 return gcm_set_sh_desc(aead);
738 static int rfc4106_set_sh_desc(struct crypto_aead *aead)
740 struct caam_ctx *ctx = crypto_aead_ctx(aead);
741 struct device *dev = ctx->dev;
742 unsigned int ivsize = crypto_aead_ivsize(aead);
743 struct caam_flc *flc;
745 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
748 if (!ctx->cdata.keylen || !ctx->authsize)
751 ctx->cdata.key_virt = ctx->key;
754 * RFC4106 encrypt shared descriptor
755 * Job Descriptor and Shared Descriptor
756 * must fit into the 64-word Descriptor h/w Buffer
758 if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
759 ctx->cdata.key_inline = true;
761 ctx->cdata.key_inline = false;
762 ctx->cdata.key_dma = ctx->key_dma;
765 flc = &ctx->flc[ENCRYPT];
767 cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
769 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
770 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
771 sizeof(flc->flc) + desc_bytes(desc),
775 * Job Descriptor and Shared Descriptors
776 * must all fit into the 64-word Descriptor h/w Buffer
778 if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
779 ctx->cdata.key_inline = true;
781 ctx->cdata.key_inline = false;
782 ctx->cdata.key_dma = ctx->key_dma;
785 flc = &ctx->flc[DECRYPT];
787 cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
789 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
790 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
791 sizeof(flc->flc) + desc_bytes(desc),
797 static int rfc4106_setauthsize(struct crypto_aead *authenc,
798 unsigned int authsize)
800 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
803 err = crypto_rfc4106_check_authsize(authsize);
807 ctx->authsize = authsize;
808 rfc4106_set_sh_desc(authenc);
813 static int rfc4106_setkey(struct crypto_aead *aead,
814 const u8 *key, unsigned int keylen)
816 struct caam_ctx *ctx = crypto_aead_ctx(aead);
817 struct device *dev = ctx->dev;
820 ret = aes_check_keylen(keylen - 4);
824 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
825 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
827 memcpy(ctx->key, key, keylen);
829 * The last four bytes of the key material are used as the salt value
830 * in the nonce. Update the AES key length.
832 ctx->cdata.keylen = keylen - 4;
833 dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
836 return rfc4106_set_sh_desc(aead);
839 static int rfc4543_set_sh_desc(struct crypto_aead *aead)
841 struct caam_ctx *ctx = crypto_aead_ctx(aead);
842 struct device *dev = ctx->dev;
843 unsigned int ivsize = crypto_aead_ivsize(aead);
844 struct caam_flc *flc;
846 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
849 if (!ctx->cdata.keylen || !ctx->authsize)
852 ctx->cdata.key_virt = ctx->key;
855 * RFC4543 encrypt shared descriptor
856 * Job Descriptor and Shared Descriptor
857 * must fit into the 64-word Descriptor h/w Buffer
859 if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
860 ctx->cdata.key_inline = true;
862 ctx->cdata.key_inline = false;
863 ctx->cdata.key_dma = ctx->key_dma;
866 flc = &ctx->flc[ENCRYPT];
868 cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
870 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
871 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
872 sizeof(flc->flc) + desc_bytes(desc),
876 * Job Descriptor and Shared Descriptors
877 * must all fit into the 64-word Descriptor h/w Buffer
879 if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
880 ctx->cdata.key_inline = true;
882 ctx->cdata.key_inline = false;
883 ctx->cdata.key_dma = ctx->key_dma;
886 flc = &ctx->flc[DECRYPT];
888 cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
890 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
891 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
892 sizeof(flc->flc) + desc_bytes(desc),
898 static int rfc4543_setauthsize(struct crypto_aead *authenc,
899 unsigned int authsize)
901 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
906 ctx->authsize = authsize;
907 rfc4543_set_sh_desc(authenc);
912 static int rfc4543_setkey(struct crypto_aead *aead,
913 const u8 *key, unsigned int keylen)
915 struct caam_ctx *ctx = crypto_aead_ctx(aead);
916 struct device *dev = ctx->dev;
919 ret = aes_check_keylen(keylen - 4);
923 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
924 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
926 memcpy(ctx->key, key, keylen);
928 * The last four bytes of the key material are used as the salt value
929 * in the nonce. Update the AES key length.
931 ctx->cdata.keylen = keylen - 4;
932 dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
935 return rfc4543_set_sh_desc(aead);
938 static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
939 unsigned int keylen, const u32 ctx1_iv_off)
941 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
942 struct caam_skcipher_alg *alg =
943 container_of(crypto_skcipher_alg(skcipher),
944 struct caam_skcipher_alg, skcipher);
945 struct device *dev = ctx->dev;
946 struct caam_flc *flc;
947 unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
949 const bool is_rfc3686 = alg->caam.rfc3686;
951 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
952 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
954 ctx->cdata.keylen = keylen;
955 ctx->cdata.key_virt = key;
956 ctx->cdata.key_inline = true;
958 /* skcipher_encrypt shared descriptor */
959 flc = &ctx->flc[ENCRYPT];
961 cnstr_shdsc_skcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
963 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
964 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
965 sizeof(flc->flc) + desc_bytes(desc),
968 /* skcipher_decrypt shared descriptor */
969 flc = &ctx->flc[DECRYPT];
971 cnstr_shdsc_skcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
973 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
974 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
975 sizeof(flc->flc) + desc_bytes(desc),
981 static int aes_skcipher_setkey(struct crypto_skcipher *skcipher,
982 const u8 *key, unsigned int keylen)
986 err = aes_check_keylen(keylen);
990 return skcipher_setkey(skcipher, key, keylen, 0);
993 static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher,
994 const u8 *key, unsigned int keylen)
1001 * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
1002 * | *key = {KEY, NONCE}
1004 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
1005 keylen -= CTR_RFC3686_NONCE_SIZE;
1007 err = aes_check_keylen(keylen);
1011 return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
1014 static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher,
1015 const u8 *key, unsigned int keylen)
1021 * AES-CTR needs to load IV in CONTEXT1 reg
1022 * at an offset of 128bits (16bytes)
1023 * CONTEXT1[255:128] = IV
1027 err = aes_check_keylen(keylen);
1031 return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
1034 static int chacha20_skcipher_setkey(struct crypto_skcipher *skcipher,
1035 const u8 *key, unsigned int keylen)
1037 if (keylen != CHACHA_KEY_SIZE)
1040 return skcipher_setkey(skcipher, key, keylen, 0);
1043 static int des_skcipher_setkey(struct crypto_skcipher *skcipher,
1044 const u8 *key, unsigned int keylen)
1046 return verify_skcipher_des_key(skcipher, key) ?:
1047 skcipher_setkey(skcipher, key, keylen, 0);
1050 static int des3_skcipher_setkey(struct crypto_skcipher *skcipher,
1051 const u8 *key, unsigned int keylen)
1053 return verify_skcipher_des3_key(skcipher, key) ?:
1054 skcipher_setkey(skcipher, key, keylen, 0);
1057 static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
1058 unsigned int keylen)
1060 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1061 struct device *dev = ctx->dev;
1062 struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
1063 struct caam_flc *flc;
1067 err = xts_verify_key(skcipher, key, keylen);
1069 dev_dbg(dev, "key size mismatch\n");
1073 if (keylen != 2 * AES_KEYSIZE_128 && keylen != 2 * AES_KEYSIZE_256)
1074 ctx->xts_key_fallback = true;
1076 if (priv->sec_attr.era <= 8 || ctx->xts_key_fallback) {
1077 err = crypto_skcipher_setkey(ctx->fallback, key, keylen);
1082 ctx->cdata.keylen = keylen;
1083 ctx->cdata.key_virt = key;
1084 ctx->cdata.key_inline = true;
1086 /* xts_skcipher_encrypt shared descriptor */
1087 flc = &ctx->flc[ENCRYPT];
1088 desc = flc->sh_desc;
1089 cnstr_shdsc_xts_skcipher_encap(desc, &ctx->cdata);
1090 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
1091 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
1092 sizeof(flc->flc) + desc_bytes(desc),
1095 /* xts_skcipher_decrypt shared descriptor */
1096 flc = &ctx->flc[DECRYPT];
1097 desc = flc->sh_desc;
1098 cnstr_shdsc_xts_skcipher_decap(desc, &ctx->cdata);
1099 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
1100 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
1101 sizeof(flc->flc) + desc_bytes(desc),
1107 static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
1109 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1110 struct caam_request *req_ctx = skcipher_request_ctx(req);
1111 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
1112 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
1113 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1114 struct device *dev = ctx->dev;
1115 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1116 GFP_KERNEL : GFP_ATOMIC;
1117 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
1118 struct skcipher_edesc *edesc;
1121 int ivsize = crypto_skcipher_ivsize(skcipher);
1122 int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
1123 struct dpaa2_sg_entry *sg_table;
1125 src_nents = sg_nents_for_len(req->src, req->cryptlen);
1126 if (unlikely(src_nents < 0)) {
1127 dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
1129 return ERR_PTR(src_nents);
1132 if (unlikely(req->dst != req->src)) {
1133 dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
1134 if (unlikely(dst_nents < 0)) {
1135 dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
1137 return ERR_PTR(dst_nents);
1140 mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
1142 if (unlikely(!mapped_src_nents)) {
1143 dev_err(dev, "unable to map source\n");
1144 return ERR_PTR(-ENOMEM);
1147 mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
1149 if (unlikely(!mapped_dst_nents)) {
1150 dev_err(dev, "unable to map destination\n");
1151 dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
1152 return ERR_PTR(-ENOMEM);
1155 mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
1157 if (unlikely(!mapped_src_nents)) {
1158 dev_err(dev, "unable to map source\n");
1159 return ERR_PTR(-ENOMEM);
1163 qm_sg_ents = 1 + mapped_src_nents;
1164 dst_sg_idx = qm_sg_ents;
1167 * Input, output HW S/G tables: [IV, src][dst, IV]
1168 * IV entries point to the same buffer
1169 * If src == dst, S/G entries are reused (S/G tables overlap)
1171 * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
1172 * the end of the table by allocating more S/G entries.
1174 if (req->src != req->dst)
1175 qm_sg_ents += pad_sg_nents(mapped_dst_nents + 1);
1177 qm_sg_ents = 1 + pad_sg_nents(qm_sg_ents);
1179 qm_sg_bytes = qm_sg_ents * sizeof(struct dpaa2_sg_entry);
1180 if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
1181 ivsize > CAAM_QI_MEMCACHE_SIZE)) {
1182 dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
1183 qm_sg_ents, ivsize);
1184 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1186 return ERR_PTR(-ENOMEM);
1189 /* allocate space for base edesc, link tables and IV */
1190 edesc = qi_cache_zalloc(GFP_DMA | flags);
1191 if (unlikely(!edesc)) {
1192 dev_err(dev, "could not allocate extended descriptor\n");
1193 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1195 return ERR_PTR(-ENOMEM);
1198 /* Make sure IV is located in a DMAable area */
1199 sg_table = &edesc->sgt[0];
1200 iv = (u8 *)(sg_table + qm_sg_ents);
1201 memcpy(iv, req->iv, ivsize);
1203 iv_dma = dma_map_single(dev, iv, ivsize, DMA_BIDIRECTIONAL);
1204 if (dma_mapping_error(dev, iv_dma)) {
1205 dev_err(dev, "unable to map IV\n");
1206 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1208 qi_cache_free(edesc);
1209 return ERR_PTR(-ENOMEM);
1212 edesc->src_nents = src_nents;
1213 edesc->dst_nents = dst_nents;
1214 edesc->iv_dma = iv_dma;
1215 edesc->qm_sg_bytes = qm_sg_bytes;
1217 dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
1218 sg_to_qm_sg(req->src, req->cryptlen, sg_table + 1, 0);
1220 if (req->src != req->dst)
1221 sg_to_qm_sg(req->dst, req->cryptlen, sg_table + dst_sg_idx, 0);
1223 dma_to_qm_sg_one(sg_table + dst_sg_idx + mapped_dst_nents, iv_dma,
1226 edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
1228 if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
1229 dev_err(dev, "unable to map S/G table\n");
1230 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
1231 iv_dma, ivsize, DMA_BIDIRECTIONAL, 0, 0);
1232 qi_cache_free(edesc);
1233 return ERR_PTR(-ENOMEM);
1236 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
1237 dpaa2_fl_set_final(in_fle, true);
1238 dpaa2_fl_set_len(in_fle, req->cryptlen + ivsize);
1239 dpaa2_fl_set_len(out_fle, req->cryptlen + ivsize);
1241 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
1242 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
1244 dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
1246 if (req->src == req->dst)
1247 dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma +
1250 dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
1256 static void aead_unmap(struct device *dev, struct aead_edesc *edesc,
1257 struct aead_request *req)
1259 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1260 int ivsize = crypto_aead_ivsize(aead);
1262 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
1263 edesc->iv_dma, ivsize, DMA_TO_DEVICE, edesc->qm_sg_dma,
1264 edesc->qm_sg_bytes);
1265 dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
1268 static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
1269 struct skcipher_request *req)
1271 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1272 int ivsize = crypto_skcipher_ivsize(skcipher);
1274 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
1275 edesc->iv_dma, ivsize, DMA_BIDIRECTIONAL, edesc->qm_sg_dma,
1276 edesc->qm_sg_bytes);
1279 static void aead_encrypt_done(void *cbk_ctx, u32 status)
1281 struct crypto_async_request *areq = cbk_ctx;
1282 struct aead_request *req = container_of(areq, struct aead_request,
1284 struct caam_request *req_ctx = to_caam_req(areq);
1285 struct aead_edesc *edesc = req_ctx->edesc;
1286 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1287 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1290 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1292 if (unlikely(status))
1293 ecode = caam_qi2_strstatus(ctx->dev, status);
1295 aead_unmap(ctx->dev, edesc, req);
1296 qi_cache_free(edesc);
1297 aead_request_complete(req, ecode);
1300 static void aead_decrypt_done(void *cbk_ctx, u32 status)
1302 struct crypto_async_request *areq = cbk_ctx;
1303 struct aead_request *req = container_of(areq, struct aead_request,
1305 struct caam_request *req_ctx = to_caam_req(areq);
1306 struct aead_edesc *edesc = req_ctx->edesc;
1307 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1308 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1311 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1313 if (unlikely(status))
1314 ecode = caam_qi2_strstatus(ctx->dev, status);
1316 aead_unmap(ctx->dev, edesc, req);
1317 qi_cache_free(edesc);
1318 aead_request_complete(req, ecode);
1321 static int aead_encrypt(struct aead_request *req)
1323 struct aead_edesc *edesc;
1324 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1325 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1326 struct caam_request *caam_req = aead_request_ctx(req);
1329 /* allocate extended descriptor */
1330 edesc = aead_edesc_alloc(req, true);
1332 return PTR_ERR(edesc);
1334 caam_req->flc = &ctx->flc[ENCRYPT];
1335 caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
1336 caam_req->cbk = aead_encrypt_done;
1337 caam_req->ctx = &req->base;
1338 caam_req->edesc = edesc;
1339 ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1340 if (ret != -EINPROGRESS &&
1341 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1342 aead_unmap(ctx->dev, edesc, req);
1343 qi_cache_free(edesc);
1349 static int aead_decrypt(struct aead_request *req)
1351 struct aead_edesc *edesc;
1352 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1353 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1354 struct caam_request *caam_req = aead_request_ctx(req);
1357 /* allocate extended descriptor */
1358 edesc = aead_edesc_alloc(req, false);
1360 return PTR_ERR(edesc);
1362 caam_req->flc = &ctx->flc[DECRYPT];
1363 caam_req->flc_dma = ctx->flc_dma[DECRYPT];
1364 caam_req->cbk = aead_decrypt_done;
1365 caam_req->ctx = &req->base;
1366 caam_req->edesc = edesc;
1367 ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1368 if (ret != -EINPROGRESS &&
1369 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1370 aead_unmap(ctx->dev, edesc, req);
1371 qi_cache_free(edesc);
1377 static int ipsec_gcm_encrypt(struct aead_request *req)
1379 return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_encrypt(req);
1382 static int ipsec_gcm_decrypt(struct aead_request *req)
1384 return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_decrypt(req);
1387 static void skcipher_encrypt_done(void *cbk_ctx, u32 status)
1389 struct crypto_async_request *areq = cbk_ctx;
1390 struct skcipher_request *req = skcipher_request_cast(areq);
1391 struct caam_request *req_ctx = to_caam_req(areq);
1392 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1393 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1394 struct skcipher_edesc *edesc = req_ctx->edesc;
1396 int ivsize = crypto_skcipher_ivsize(skcipher);
1398 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1400 if (unlikely(status))
1401 ecode = caam_qi2_strstatus(ctx->dev, status);
1403 print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
1404 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1405 edesc->src_nents > 1 ? 100 : ivsize, 1);
1406 caam_dump_sg("dst @" __stringify(__LINE__)": ",
1407 DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1408 edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1410 skcipher_unmap(ctx->dev, edesc, req);
1413 * The crypto API expects us to set the IV (req->iv) to the last
1414 * ciphertext block (CBC mode) or last counter (CTR mode).
1415 * This is used e.g. by the CTS mode.
1418 memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes,
1421 qi_cache_free(edesc);
1422 skcipher_request_complete(req, ecode);
1425 static void skcipher_decrypt_done(void *cbk_ctx, u32 status)
1427 struct crypto_async_request *areq = cbk_ctx;
1428 struct skcipher_request *req = skcipher_request_cast(areq);
1429 struct caam_request *req_ctx = to_caam_req(areq);
1430 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1431 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1432 struct skcipher_edesc *edesc = req_ctx->edesc;
1434 int ivsize = crypto_skcipher_ivsize(skcipher);
1436 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1438 if (unlikely(status))
1439 ecode = caam_qi2_strstatus(ctx->dev, status);
1441 print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
1442 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1443 edesc->src_nents > 1 ? 100 : ivsize, 1);
1444 caam_dump_sg("dst @" __stringify(__LINE__)": ",
1445 DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1446 edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1448 skcipher_unmap(ctx->dev, edesc, req);
1451 * The crypto API expects us to set the IV (req->iv) to the last
1452 * ciphertext block (CBC mode) or last counter (CTR mode).
1453 * This is used e.g. by the CTS mode.
1456 memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes,
1459 qi_cache_free(edesc);
1460 skcipher_request_complete(req, ecode);
1463 static inline bool xts_skcipher_ivsize(struct skcipher_request *req)
1465 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1466 unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
1468 return !!get_unaligned((u64 *)(req->iv + (ivsize / 2)));
1471 static int skcipher_encrypt(struct skcipher_request *req)
1473 struct skcipher_edesc *edesc;
1474 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1475 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1476 struct caam_request *caam_req = skcipher_request_ctx(req);
1477 struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
1481 * XTS is expected to return an error even for input length = 0
1482 * Note that the case input length < block size will be caught during
1483 * HW offloading and return an error.
1485 if (!req->cryptlen && !ctx->fallback)
1488 if (ctx->fallback && ((priv->sec_attr.era <= 8 && xts_skcipher_ivsize(req)) ||
1489 ctx->xts_key_fallback)) {
1490 skcipher_request_set_tfm(&caam_req->fallback_req, ctx->fallback);
1491 skcipher_request_set_callback(&caam_req->fallback_req,
1495 skcipher_request_set_crypt(&caam_req->fallback_req, req->src,
1496 req->dst, req->cryptlen, req->iv);
1498 return crypto_skcipher_encrypt(&caam_req->fallback_req);
1501 /* allocate extended descriptor */
1502 edesc = skcipher_edesc_alloc(req);
1504 return PTR_ERR(edesc);
1506 caam_req->flc = &ctx->flc[ENCRYPT];
1507 caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
1508 caam_req->cbk = skcipher_encrypt_done;
1509 caam_req->ctx = &req->base;
1510 caam_req->edesc = edesc;
1511 ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1512 if (ret != -EINPROGRESS &&
1513 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1514 skcipher_unmap(ctx->dev, edesc, req);
1515 qi_cache_free(edesc);
1521 static int skcipher_decrypt(struct skcipher_request *req)
1523 struct skcipher_edesc *edesc;
1524 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1525 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1526 struct caam_request *caam_req = skcipher_request_ctx(req);
1527 struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
1531 * XTS is expected to return an error even for input length = 0
1532 * Note that the case input length < block size will be caught during
1533 * HW offloading and return an error.
1535 if (!req->cryptlen && !ctx->fallback)
1538 if (ctx->fallback && ((priv->sec_attr.era <= 8 && xts_skcipher_ivsize(req)) ||
1539 ctx->xts_key_fallback)) {
1540 skcipher_request_set_tfm(&caam_req->fallback_req, ctx->fallback);
1541 skcipher_request_set_callback(&caam_req->fallback_req,
1545 skcipher_request_set_crypt(&caam_req->fallback_req, req->src,
1546 req->dst, req->cryptlen, req->iv);
1548 return crypto_skcipher_decrypt(&caam_req->fallback_req);
1551 /* allocate extended descriptor */
1552 edesc = skcipher_edesc_alloc(req);
1554 return PTR_ERR(edesc);
1556 caam_req->flc = &ctx->flc[DECRYPT];
1557 caam_req->flc_dma = ctx->flc_dma[DECRYPT];
1558 caam_req->cbk = skcipher_decrypt_done;
1559 caam_req->ctx = &req->base;
1560 caam_req->edesc = edesc;
1561 ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1562 if (ret != -EINPROGRESS &&
1563 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1564 skcipher_unmap(ctx->dev, edesc, req);
1565 qi_cache_free(edesc);
1571 static int caam_cra_init(struct caam_ctx *ctx, struct caam_alg_entry *caam,
1574 dma_addr_t dma_addr;
1577 /* copy descriptor header template value */
1578 ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
1579 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
1581 ctx->dev = caam->dev;
1582 ctx->dir = uses_dkp ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
1584 dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc,
1585 offsetof(struct caam_ctx, flc_dma),
1586 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1587 if (dma_mapping_error(ctx->dev, dma_addr)) {
1588 dev_err(ctx->dev, "unable to map key, shared descriptors\n");
1592 for (i = 0; i < NUM_OP; i++)
1593 ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
1594 ctx->key_dma = dma_addr + NUM_OP * sizeof(ctx->flc[0]);
1599 static int caam_cra_init_skcipher(struct crypto_skcipher *tfm)
1601 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1602 struct caam_skcipher_alg *caam_alg =
1603 container_of(alg, typeof(*caam_alg), skcipher);
1604 struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
1605 u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
1608 if (alg_aai == OP_ALG_AAI_XTS) {
1609 const char *tfm_name = crypto_tfm_alg_name(&tfm->base);
1610 struct crypto_skcipher *fallback;
1612 fallback = crypto_alloc_skcipher(tfm_name, 0,
1613 CRYPTO_ALG_NEED_FALLBACK);
1614 if (IS_ERR(fallback)) {
1615 dev_err(caam_alg->caam.dev,
1616 "Failed to allocate %s fallback: %ld\n",
1617 tfm_name, PTR_ERR(fallback));
1618 return PTR_ERR(fallback);
1621 ctx->fallback = fallback;
1622 crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_request) +
1623 crypto_skcipher_reqsize(fallback));
1625 crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_request));
1628 ret = caam_cra_init(ctx, &caam_alg->caam, false);
1629 if (ret && ctx->fallback)
1630 crypto_free_skcipher(ctx->fallback);
1635 static int caam_cra_init_aead(struct crypto_aead *tfm)
1637 struct aead_alg *alg = crypto_aead_alg(tfm);
1638 struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
1641 crypto_aead_set_reqsize(tfm, sizeof(struct caam_request));
1642 return caam_cra_init(crypto_aead_ctx(tfm), &caam_alg->caam,
1643 !caam_alg->caam.nodkp);
1646 static void caam_exit_common(struct caam_ctx *ctx)
1648 dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0],
1649 offsetof(struct caam_ctx, flc_dma), ctx->dir,
1650 DMA_ATTR_SKIP_CPU_SYNC);
1653 static void caam_cra_exit(struct crypto_skcipher *tfm)
1655 struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
1658 crypto_free_skcipher(ctx->fallback);
1659 caam_exit_common(ctx);
1662 static void caam_cra_exit_aead(struct crypto_aead *tfm)
1664 caam_exit_common(crypto_aead_ctx(tfm));
1667 static struct caam_skcipher_alg driver_algs[] = {
1671 .cra_name = "cbc(aes)",
1672 .cra_driver_name = "cbc-aes-caam-qi2",
1673 .cra_blocksize = AES_BLOCK_SIZE,
1675 .setkey = aes_skcipher_setkey,
1676 .encrypt = skcipher_encrypt,
1677 .decrypt = skcipher_decrypt,
1678 .min_keysize = AES_MIN_KEY_SIZE,
1679 .max_keysize = AES_MAX_KEY_SIZE,
1680 .ivsize = AES_BLOCK_SIZE,
1682 .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1687 .cra_name = "cbc(des3_ede)",
1688 .cra_driver_name = "cbc-3des-caam-qi2",
1689 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1691 .setkey = des3_skcipher_setkey,
1692 .encrypt = skcipher_encrypt,
1693 .decrypt = skcipher_decrypt,
1694 .min_keysize = DES3_EDE_KEY_SIZE,
1695 .max_keysize = DES3_EDE_KEY_SIZE,
1696 .ivsize = DES3_EDE_BLOCK_SIZE,
1698 .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1703 .cra_name = "cbc(des)",
1704 .cra_driver_name = "cbc-des-caam-qi2",
1705 .cra_blocksize = DES_BLOCK_SIZE,
1707 .setkey = des_skcipher_setkey,
1708 .encrypt = skcipher_encrypt,
1709 .decrypt = skcipher_decrypt,
1710 .min_keysize = DES_KEY_SIZE,
1711 .max_keysize = DES_KEY_SIZE,
1712 .ivsize = DES_BLOCK_SIZE,
1714 .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1719 .cra_name = "ctr(aes)",
1720 .cra_driver_name = "ctr-aes-caam-qi2",
1723 .setkey = ctr_skcipher_setkey,
1724 .encrypt = skcipher_encrypt,
1725 .decrypt = skcipher_decrypt,
1726 .min_keysize = AES_MIN_KEY_SIZE,
1727 .max_keysize = AES_MAX_KEY_SIZE,
1728 .ivsize = AES_BLOCK_SIZE,
1729 .chunksize = AES_BLOCK_SIZE,
1731 .caam.class1_alg_type = OP_ALG_ALGSEL_AES |
1732 OP_ALG_AAI_CTR_MOD128,
1737 .cra_name = "rfc3686(ctr(aes))",
1738 .cra_driver_name = "rfc3686-ctr-aes-caam-qi2",
1741 .setkey = rfc3686_skcipher_setkey,
1742 .encrypt = skcipher_encrypt,
1743 .decrypt = skcipher_decrypt,
1744 .min_keysize = AES_MIN_KEY_SIZE +
1745 CTR_RFC3686_NONCE_SIZE,
1746 .max_keysize = AES_MAX_KEY_SIZE +
1747 CTR_RFC3686_NONCE_SIZE,
1748 .ivsize = CTR_RFC3686_IV_SIZE,
1749 .chunksize = AES_BLOCK_SIZE,
1752 .class1_alg_type = OP_ALG_ALGSEL_AES |
1753 OP_ALG_AAI_CTR_MOD128,
1760 .cra_name = "xts(aes)",
1761 .cra_driver_name = "xts-aes-caam-qi2",
1762 .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
1763 .cra_blocksize = AES_BLOCK_SIZE,
1765 .setkey = xts_skcipher_setkey,
1766 .encrypt = skcipher_encrypt,
1767 .decrypt = skcipher_decrypt,
1768 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1769 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1770 .ivsize = AES_BLOCK_SIZE,
1772 .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
1777 .cra_name = "chacha20",
1778 .cra_driver_name = "chacha20-caam-qi2",
1781 .setkey = chacha20_skcipher_setkey,
1782 .encrypt = skcipher_encrypt,
1783 .decrypt = skcipher_decrypt,
1784 .min_keysize = CHACHA_KEY_SIZE,
1785 .max_keysize = CHACHA_KEY_SIZE,
1786 .ivsize = CHACHA_IV_SIZE,
1788 .caam.class1_alg_type = OP_ALG_ALGSEL_CHACHA20,
1792 static struct caam_aead_alg driver_aeads[] = {
1796 .cra_name = "rfc4106(gcm(aes))",
1797 .cra_driver_name = "rfc4106-gcm-aes-caam-qi2",
1800 .setkey = rfc4106_setkey,
1801 .setauthsize = rfc4106_setauthsize,
1802 .encrypt = ipsec_gcm_encrypt,
1803 .decrypt = ipsec_gcm_decrypt,
1805 .maxauthsize = AES_BLOCK_SIZE,
1808 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1815 .cra_name = "rfc4543(gcm(aes))",
1816 .cra_driver_name = "rfc4543-gcm-aes-caam-qi2",
1819 .setkey = rfc4543_setkey,
1820 .setauthsize = rfc4543_setauthsize,
1821 .encrypt = ipsec_gcm_encrypt,
1822 .decrypt = ipsec_gcm_decrypt,
1824 .maxauthsize = AES_BLOCK_SIZE,
1827 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1831 /* Galois Counter Mode */
1835 .cra_name = "gcm(aes)",
1836 .cra_driver_name = "gcm-aes-caam-qi2",
1839 .setkey = gcm_setkey,
1840 .setauthsize = gcm_setauthsize,
1841 .encrypt = aead_encrypt,
1842 .decrypt = aead_decrypt,
1844 .maxauthsize = AES_BLOCK_SIZE,
1847 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1851 /* single-pass ipsec_esp descriptor */
1855 .cra_name = "authenc(hmac(md5),cbc(aes))",
1856 .cra_driver_name = "authenc-hmac-md5-"
1858 .cra_blocksize = AES_BLOCK_SIZE,
1860 .setkey = aead_setkey,
1861 .setauthsize = aead_setauthsize,
1862 .encrypt = aead_encrypt,
1863 .decrypt = aead_decrypt,
1864 .ivsize = AES_BLOCK_SIZE,
1865 .maxauthsize = MD5_DIGEST_SIZE,
1868 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1869 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1870 OP_ALG_AAI_HMAC_PRECOMP,
1876 .cra_name = "echainiv(authenc(hmac(md5),"
1878 .cra_driver_name = "echainiv-authenc-hmac-md5-"
1880 .cra_blocksize = AES_BLOCK_SIZE,
1882 .setkey = aead_setkey,
1883 .setauthsize = aead_setauthsize,
1884 .encrypt = aead_encrypt,
1885 .decrypt = aead_decrypt,
1886 .ivsize = AES_BLOCK_SIZE,
1887 .maxauthsize = MD5_DIGEST_SIZE,
1890 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1891 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1892 OP_ALG_AAI_HMAC_PRECOMP,
1899 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1900 .cra_driver_name = "authenc-hmac-sha1-"
1902 .cra_blocksize = AES_BLOCK_SIZE,
1904 .setkey = aead_setkey,
1905 .setauthsize = aead_setauthsize,
1906 .encrypt = aead_encrypt,
1907 .decrypt = aead_decrypt,
1908 .ivsize = AES_BLOCK_SIZE,
1909 .maxauthsize = SHA1_DIGEST_SIZE,
1912 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1913 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1914 OP_ALG_AAI_HMAC_PRECOMP,
1920 .cra_name = "echainiv(authenc(hmac(sha1),"
1922 .cra_driver_name = "echainiv-authenc-"
1923 "hmac-sha1-cbc-aes-caam-qi2",
1924 .cra_blocksize = AES_BLOCK_SIZE,
1926 .setkey = aead_setkey,
1927 .setauthsize = aead_setauthsize,
1928 .encrypt = aead_encrypt,
1929 .decrypt = aead_decrypt,
1930 .ivsize = AES_BLOCK_SIZE,
1931 .maxauthsize = SHA1_DIGEST_SIZE,
1934 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1935 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1936 OP_ALG_AAI_HMAC_PRECOMP,
1943 .cra_name = "authenc(hmac(sha224),cbc(aes))",
1944 .cra_driver_name = "authenc-hmac-sha224-"
1946 .cra_blocksize = AES_BLOCK_SIZE,
1948 .setkey = aead_setkey,
1949 .setauthsize = aead_setauthsize,
1950 .encrypt = aead_encrypt,
1951 .decrypt = aead_decrypt,
1952 .ivsize = AES_BLOCK_SIZE,
1953 .maxauthsize = SHA224_DIGEST_SIZE,
1956 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1957 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1958 OP_ALG_AAI_HMAC_PRECOMP,
1964 .cra_name = "echainiv(authenc(hmac(sha224),"
1966 .cra_driver_name = "echainiv-authenc-"
1967 "hmac-sha224-cbc-aes-caam-qi2",
1968 .cra_blocksize = AES_BLOCK_SIZE,
1970 .setkey = aead_setkey,
1971 .setauthsize = aead_setauthsize,
1972 .encrypt = aead_encrypt,
1973 .decrypt = aead_decrypt,
1974 .ivsize = AES_BLOCK_SIZE,
1975 .maxauthsize = SHA224_DIGEST_SIZE,
1978 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1979 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1980 OP_ALG_AAI_HMAC_PRECOMP,
1987 .cra_name = "authenc(hmac(sha256),cbc(aes))",
1988 .cra_driver_name = "authenc-hmac-sha256-"
1990 .cra_blocksize = AES_BLOCK_SIZE,
1992 .setkey = aead_setkey,
1993 .setauthsize = aead_setauthsize,
1994 .encrypt = aead_encrypt,
1995 .decrypt = aead_decrypt,
1996 .ivsize = AES_BLOCK_SIZE,
1997 .maxauthsize = SHA256_DIGEST_SIZE,
2000 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2001 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2002 OP_ALG_AAI_HMAC_PRECOMP,
2008 .cra_name = "echainiv(authenc(hmac(sha256),"
2010 .cra_driver_name = "echainiv-authenc-"
2011 "hmac-sha256-cbc-aes-"
2013 .cra_blocksize = AES_BLOCK_SIZE,
2015 .setkey = aead_setkey,
2016 .setauthsize = aead_setauthsize,
2017 .encrypt = aead_encrypt,
2018 .decrypt = aead_decrypt,
2019 .ivsize = AES_BLOCK_SIZE,
2020 .maxauthsize = SHA256_DIGEST_SIZE,
2023 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2024 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2025 OP_ALG_AAI_HMAC_PRECOMP,
2032 .cra_name = "authenc(hmac(sha384),cbc(aes))",
2033 .cra_driver_name = "authenc-hmac-sha384-"
2035 .cra_blocksize = AES_BLOCK_SIZE,
2037 .setkey = aead_setkey,
2038 .setauthsize = aead_setauthsize,
2039 .encrypt = aead_encrypt,
2040 .decrypt = aead_decrypt,
2041 .ivsize = AES_BLOCK_SIZE,
2042 .maxauthsize = SHA384_DIGEST_SIZE,
2045 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2046 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2047 OP_ALG_AAI_HMAC_PRECOMP,
2053 .cra_name = "echainiv(authenc(hmac(sha384),"
2055 .cra_driver_name = "echainiv-authenc-"
2056 "hmac-sha384-cbc-aes-"
2058 .cra_blocksize = AES_BLOCK_SIZE,
2060 .setkey = aead_setkey,
2061 .setauthsize = aead_setauthsize,
2062 .encrypt = aead_encrypt,
2063 .decrypt = aead_decrypt,
2064 .ivsize = AES_BLOCK_SIZE,
2065 .maxauthsize = SHA384_DIGEST_SIZE,
2068 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2069 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2070 OP_ALG_AAI_HMAC_PRECOMP,
2077 .cra_name = "authenc(hmac(sha512),cbc(aes))",
2078 .cra_driver_name = "authenc-hmac-sha512-"
2080 .cra_blocksize = AES_BLOCK_SIZE,
2082 .setkey = aead_setkey,
2083 .setauthsize = aead_setauthsize,
2084 .encrypt = aead_encrypt,
2085 .decrypt = aead_decrypt,
2086 .ivsize = AES_BLOCK_SIZE,
2087 .maxauthsize = SHA512_DIGEST_SIZE,
2090 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2091 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2092 OP_ALG_AAI_HMAC_PRECOMP,
2098 .cra_name = "echainiv(authenc(hmac(sha512),"
2100 .cra_driver_name = "echainiv-authenc-"
2101 "hmac-sha512-cbc-aes-"
2103 .cra_blocksize = AES_BLOCK_SIZE,
2105 .setkey = aead_setkey,
2106 .setauthsize = aead_setauthsize,
2107 .encrypt = aead_encrypt,
2108 .decrypt = aead_decrypt,
2109 .ivsize = AES_BLOCK_SIZE,
2110 .maxauthsize = SHA512_DIGEST_SIZE,
2113 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2114 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2115 OP_ALG_AAI_HMAC_PRECOMP,
2122 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2123 .cra_driver_name = "authenc-hmac-md5-"
2124 "cbc-des3_ede-caam-qi2",
2125 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2127 .setkey = des3_aead_setkey,
2128 .setauthsize = aead_setauthsize,
2129 .encrypt = aead_encrypt,
2130 .decrypt = aead_decrypt,
2131 .ivsize = DES3_EDE_BLOCK_SIZE,
2132 .maxauthsize = MD5_DIGEST_SIZE,
2135 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2136 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2137 OP_ALG_AAI_HMAC_PRECOMP,
2143 .cra_name = "echainiv(authenc(hmac(md5),"
2145 .cra_driver_name = "echainiv-authenc-hmac-md5-"
2146 "cbc-des3_ede-caam-qi2",
2147 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2149 .setkey = des3_aead_setkey,
2150 .setauthsize = aead_setauthsize,
2151 .encrypt = aead_encrypt,
2152 .decrypt = aead_decrypt,
2153 .ivsize = DES3_EDE_BLOCK_SIZE,
2154 .maxauthsize = MD5_DIGEST_SIZE,
2157 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2158 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2159 OP_ALG_AAI_HMAC_PRECOMP,
2166 .cra_name = "authenc(hmac(sha1),"
2168 .cra_driver_name = "authenc-hmac-sha1-"
2169 "cbc-des3_ede-caam-qi2",
2170 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2172 .setkey = des3_aead_setkey,
2173 .setauthsize = aead_setauthsize,
2174 .encrypt = aead_encrypt,
2175 .decrypt = aead_decrypt,
2176 .ivsize = DES3_EDE_BLOCK_SIZE,
2177 .maxauthsize = SHA1_DIGEST_SIZE,
2180 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2181 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2182 OP_ALG_AAI_HMAC_PRECOMP,
2188 .cra_name = "echainiv(authenc(hmac(sha1),"
2190 .cra_driver_name = "echainiv-authenc-"
2192 "cbc-des3_ede-caam-qi2",
2193 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2195 .setkey = des3_aead_setkey,
2196 .setauthsize = aead_setauthsize,
2197 .encrypt = aead_encrypt,
2198 .decrypt = aead_decrypt,
2199 .ivsize = DES3_EDE_BLOCK_SIZE,
2200 .maxauthsize = SHA1_DIGEST_SIZE,
2203 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2204 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2205 OP_ALG_AAI_HMAC_PRECOMP,
2212 .cra_name = "authenc(hmac(sha224),"
2214 .cra_driver_name = "authenc-hmac-sha224-"
2215 "cbc-des3_ede-caam-qi2",
2216 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2218 .setkey = des3_aead_setkey,
2219 .setauthsize = aead_setauthsize,
2220 .encrypt = aead_encrypt,
2221 .decrypt = aead_decrypt,
2222 .ivsize = DES3_EDE_BLOCK_SIZE,
2223 .maxauthsize = SHA224_DIGEST_SIZE,
2226 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2227 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2228 OP_ALG_AAI_HMAC_PRECOMP,
2234 .cra_name = "echainiv(authenc(hmac(sha224),"
2236 .cra_driver_name = "echainiv-authenc-"
2238 "cbc-des3_ede-caam-qi2",
2239 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2241 .setkey = des3_aead_setkey,
2242 .setauthsize = aead_setauthsize,
2243 .encrypt = aead_encrypt,
2244 .decrypt = aead_decrypt,
2245 .ivsize = DES3_EDE_BLOCK_SIZE,
2246 .maxauthsize = SHA224_DIGEST_SIZE,
2249 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2250 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2251 OP_ALG_AAI_HMAC_PRECOMP,
2258 .cra_name = "authenc(hmac(sha256),"
2260 .cra_driver_name = "authenc-hmac-sha256-"
2261 "cbc-des3_ede-caam-qi2",
2262 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2264 .setkey = des3_aead_setkey,
2265 .setauthsize = aead_setauthsize,
2266 .encrypt = aead_encrypt,
2267 .decrypt = aead_decrypt,
2268 .ivsize = DES3_EDE_BLOCK_SIZE,
2269 .maxauthsize = SHA256_DIGEST_SIZE,
2272 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2273 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2274 OP_ALG_AAI_HMAC_PRECOMP,
2280 .cra_name = "echainiv(authenc(hmac(sha256),"
2282 .cra_driver_name = "echainiv-authenc-"
2284 "cbc-des3_ede-caam-qi2",
2285 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2287 .setkey = des3_aead_setkey,
2288 .setauthsize = aead_setauthsize,
2289 .encrypt = aead_encrypt,
2290 .decrypt = aead_decrypt,
2291 .ivsize = DES3_EDE_BLOCK_SIZE,
2292 .maxauthsize = SHA256_DIGEST_SIZE,
2295 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2296 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2297 OP_ALG_AAI_HMAC_PRECOMP,
2304 .cra_name = "authenc(hmac(sha384),"
2306 .cra_driver_name = "authenc-hmac-sha384-"
2307 "cbc-des3_ede-caam-qi2",
2308 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2310 .setkey = des3_aead_setkey,
2311 .setauthsize = aead_setauthsize,
2312 .encrypt = aead_encrypt,
2313 .decrypt = aead_decrypt,
2314 .ivsize = DES3_EDE_BLOCK_SIZE,
2315 .maxauthsize = SHA384_DIGEST_SIZE,
2318 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2319 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2320 OP_ALG_AAI_HMAC_PRECOMP,
2326 .cra_name = "echainiv(authenc(hmac(sha384),"
2328 .cra_driver_name = "echainiv-authenc-"
2330 "cbc-des3_ede-caam-qi2",
2331 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2333 .setkey = des3_aead_setkey,
2334 .setauthsize = aead_setauthsize,
2335 .encrypt = aead_encrypt,
2336 .decrypt = aead_decrypt,
2337 .ivsize = DES3_EDE_BLOCK_SIZE,
2338 .maxauthsize = SHA384_DIGEST_SIZE,
2341 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2342 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2343 OP_ALG_AAI_HMAC_PRECOMP,
2350 .cra_name = "authenc(hmac(sha512),"
2352 .cra_driver_name = "authenc-hmac-sha512-"
2353 "cbc-des3_ede-caam-qi2",
2354 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2356 .setkey = des3_aead_setkey,
2357 .setauthsize = aead_setauthsize,
2358 .encrypt = aead_encrypt,
2359 .decrypt = aead_decrypt,
2360 .ivsize = DES3_EDE_BLOCK_SIZE,
2361 .maxauthsize = SHA512_DIGEST_SIZE,
2364 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2365 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2366 OP_ALG_AAI_HMAC_PRECOMP,
2372 .cra_name = "echainiv(authenc(hmac(sha512),"
2374 .cra_driver_name = "echainiv-authenc-"
2376 "cbc-des3_ede-caam-qi2",
2377 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2379 .setkey = des3_aead_setkey,
2380 .setauthsize = aead_setauthsize,
2381 .encrypt = aead_encrypt,
2382 .decrypt = aead_decrypt,
2383 .ivsize = DES3_EDE_BLOCK_SIZE,
2384 .maxauthsize = SHA512_DIGEST_SIZE,
2387 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2388 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2389 OP_ALG_AAI_HMAC_PRECOMP,
2396 .cra_name = "authenc(hmac(md5),cbc(des))",
2397 .cra_driver_name = "authenc-hmac-md5-"
2399 .cra_blocksize = DES_BLOCK_SIZE,
2401 .setkey = aead_setkey,
2402 .setauthsize = aead_setauthsize,
2403 .encrypt = aead_encrypt,
2404 .decrypt = aead_decrypt,
2405 .ivsize = DES_BLOCK_SIZE,
2406 .maxauthsize = MD5_DIGEST_SIZE,
2409 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2410 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2411 OP_ALG_AAI_HMAC_PRECOMP,
2417 .cra_name = "echainiv(authenc(hmac(md5),"
2419 .cra_driver_name = "echainiv-authenc-hmac-md5-"
2421 .cra_blocksize = DES_BLOCK_SIZE,
2423 .setkey = aead_setkey,
2424 .setauthsize = aead_setauthsize,
2425 .encrypt = aead_encrypt,
2426 .decrypt = aead_decrypt,
2427 .ivsize = DES_BLOCK_SIZE,
2428 .maxauthsize = MD5_DIGEST_SIZE,
2431 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2432 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2433 OP_ALG_AAI_HMAC_PRECOMP,
2440 .cra_name = "authenc(hmac(sha1),cbc(des))",
2441 .cra_driver_name = "authenc-hmac-sha1-"
2443 .cra_blocksize = DES_BLOCK_SIZE,
2445 .setkey = aead_setkey,
2446 .setauthsize = aead_setauthsize,
2447 .encrypt = aead_encrypt,
2448 .decrypt = aead_decrypt,
2449 .ivsize = DES_BLOCK_SIZE,
2450 .maxauthsize = SHA1_DIGEST_SIZE,
2453 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2454 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2455 OP_ALG_AAI_HMAC_PRECOMP,
2461 .cra_name = "echainiv(authenc(hmac(sha1),"
2463 .cra_driver_name = "echainiv-authenc-"
2464 "hmac-sha1-cbc-des-caam-qi2",
2465 .cra_blocksize = DES_BLOCK_SIZE,
2467 .setkey = aead_setkey,
2468 .setauthsize = aead_setauthsize,
2469 .encrypt = aead_encrypt,
2470 .decrypt = aead_decrypt,
2471 .ivsize = DES_BLOCK_SIZE,
2472 .maxauthsize = SHA1_DIGEST_SIZE,
2475 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2476 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2477 OP_ALG_AAI_HMAC_PRECOMP,
2484 .cra_name = "authenc(hmac(sha224),cbc(des))",
2485 .cra_driver_name = "authenc-hmac-sha224-"
2487 .cra_blocksize = DES_BLOCK_SIZE,
2489 .setkey = aead_setkey,
2490 .setauthsize = aead_setauthsize,
2491 .encrypt = aead_encrypt,
2492 .decrypt = aead_decrypt,
2493 .ivsize = DES_BLOCK_SIZE,
2494 .maxauthsize = SHA224_DIGEST_SIZE,
2497 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2498 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2499 OP_ALG_AAI_HMAC_PRECOMP,
2505 .cra_name = "echainiv(authenc(hmac(sha224),"
2507 .cra_driver_name = "echainiv-authenc-"
2508 "hmac-sha224-cbc-des-"
2510 .cra_blocksize = DES_BLOCK_SIZE,
2512 .setkey = aead_setkey,
2513 .setauthsize = aead_setauthsize,
2514 .encrypt = aead_encrypt,
2515 .decrypt = aead_decrypt,
2516 .ivsize = DES_BLOCK_SIZE,
2517 .maxauthsize = SHA224_DIGEST_SIZE,
2520 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2521 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2522 OP_ALG_AAI_HMAC_PRECOMP,
2529 .cra_name = "authenc(hmac(sha256),cbc(des))",
2530 .cra_driver_name = "authenc-hmac-sha256-"
2532 .cra_blocksize = DES_BLOCK_SIZE,
2534 .setkey = aead_setkey,
2535 .setauthsize = aead_setauthsize,
2536 .encrypt = aead_encrypt,
2537 .decrypt = aead_decrypt,
2538 .ivsize = DES_BLOCK_SIZE,
2539 .maxauthsize = SHA256_DIGEST_SIZE,
2542 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2543 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2544 OP_ALG_AAI_HMAC_PRECOMP,
2550 .cra_name = "echainiv(authenc(hmac(sha256),"
2552 .cra_driver_name = "echainiv-authenc-"
2553 "hmac-sha256-cbc-des-"
2555 .cra_blocksize = DES_BLOCK_SIZE,
2557 .setkey = aead_setkey,
2558 .setauthsize = aead_setauthsize,
2559 .encrypt = aead_encrypt,
2560 .decrypt = aead_decrypt,
2561 .ivsize = DES_BLOCK_SIZE,
2562 .maxauthsize = SHA256_DIGEST_SIZE,
2565 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2566 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2567 OP_ALG_AAI_HMAC_PRECOMP,
2574 .cra_name = "authenc(hmac(sha384),cbc(des))",
2575 .cra_driver_name = "authenc-hmac-sha384-"
2577 .cra_blocksize = DES_BLOCK_SIZE,
2579 .setkey = aead_setkey,
2580 .setauthsize = aead_setauthsize,
2581 .encrypt = aead_encrypt,
2582 .decrypt = aead_decrypt,
2583 .ivsize = DES_BLOCK_SIZE,
2584 .maxauthsize = SHA384_DIGEST_SIZE,
2587 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2588 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2589 OP_ALG_AAI_HMAC_PRECOMP,
2595 .cra_name = "echainiv(authenc(hmac(sha384),"
2597 .cra_driver_name = "echainiv-authenc-"
2598 "hmac-sha384-cbc-des-"
2600 .cra_blocksize = DES_BLOCK_SIZE,
2602 .setkey = aead_setkey,
2603 .setauthsize = aead_setauthsize,
2604 .encrypt = aead_encrypt,
2605 .decrypt = aead_decrypt,
2606 .ivsize = DES_BLOCK_SIZE,
2607 .maxauthsize = SHA384_DIGEST_SIZE,
2610 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2611 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2612 OP_ALG_AAI_HMAC_PRECOMP,
2619 .cra_name = "authenc(hmac(sha512),cbc(des))",
2620 .cra_driver_name = "authenc-hmac-sha512-"
2622 .cra_blocksize = DES_BLOCK_SIZE,
2624 .setkey = aead_setkey,
2625 .setauthsize = aead_setauthsize,
2626 .encrypt = aead_encrypt,
2627 .decrypt = aead_decrypt,
2628 .ivsize = DES_BLOCK_SIZE,
2629 .maxauthsize = SHA512_DIGEST_SIZE,
2632 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2633 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2634 OP_ALG_AAI_HMAC_PRECOMP,
2640 .cra_name = "echainiv(authenc(hmac(sha512),"
2642 .cra_driver_name = "echainiv-authenc-"
2643 "hmac-sha512-cbc-des-"
2645 .cra_blocksize = DES_BLOCK_SIZE,
2647 .setkey = aead_setkey,
2648 .setauthsize = aead_setauthsize,
2649 .encrypt = aead_encrypt,
2650 .decrypt = aead_decrypt,
2651 .ivsize = DES_BLOCK_SIZE,
2652 .maxauthsize = SHA512_DIGEST_SIZE,
2655 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2656 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2657 OP_ALG_AAI_HMAC_PRECOMP,
2664 .cra_name = "authenc(hmac(md5),"
2665 "rfc3686(ctr(aes)))",
2666 .cra_driver_name = "authenc-hmac-md5-"
2667 "rfc3686-ctr-aes-caam-qi2",
2670 .setkey = aead_setkey,
2671 .setauthsize = aead_setauthsize,
2672 .encrypt = aead_encrypt,
2673 .decrypt = aead_decrypt,
2674 .ivsize = CTR_RFC3686_IV_SIZE,
2675 .maxauthsize = MD5_DIGEST_SIZE,
2678 .class1_alg_type = OP_ALG_ALGSEL_AES |
2679 OP_ALG_AAI_CTR_MOD128,
2680 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2681 OP_ALG_AAI_HMAC_PRECOMP,
2688 .cra_name = "seqiv(authenc("
2689 "hmac(md5),rfc3686(ctr(aes))))",
2690 .cra_driver_name = "seqiv-authenc-hmac-md5-"
2691 "rfc3686-ctr-aes-caam-qi2",
2694 .setkey = aead_setkey,
2695 .setauthsize = aead_setauthsize,
2696 .encrypt = aead_encrypt,
2697 .decrypt = aead_decrypt,
2698 .ivsize = CTR_RFC3686_IV_SIZE,
2699 .maxauthsize = MD5_DIGEST_SIZE,
2702 .class1_alg_type = OP_ALG_ALGSEL_AES |
2703 OP_ALG_AAI_CTR_MOD128,
2704 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2705 OP_ALG_AAI_HMAC_PRECOMP,
2713 .cra_name = "authenc(hmac(sha1),"
2714 "rfc3686(ctr(aes)))",
2715 .cra_driver_name = "authenc-hmac-sha1-"
2716 "rfc3686-ctr-aes-caam-qi2",
2719 .setkey = aead_setkey,
2720 .setauthsize = aead_setauthsize,
2721 .encrypt = aead_encrypt,
2722 .decrypt = aead_decrypt,
2723 .ivsize = CTR_RFC3686_IV_SIZE,
2724 .maxauthsize = SHA1_DIGEST_SIZE,
2727 .class1_alg_type = OP_ALG_ALGSEL_AES |
2728 OP_ALG_AAI_CTR_MOD128,
2729 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2730 OP_ALG_AAI_HMAC_PRECOMP,
2737 .cra_name = "seqiv(authenc("
2738 "hmac(sha1),rfc3686(ctr(aes))))",
2739 .cra_driver_name = "seqiv-authenc-hmac-sha1-"
2740 "rfc3686-ctr-aes-caam-qi2",
2743 .setkey = aead_setkey,
2744 .setauthsize = aead_setauthsize,
2745 .encrypt = aead_encrypt,
2746 .decrypt = aead_decrypt,
2747 .ivsize = CTR_RFC3686_IV_SIZE,
2748 .maxauthsize = SHA1_DIGEST_SIZE,
2751 .class1_alg_type = OP_ALG_ALGSEL_AES |
2752 OP_ALG_AAI_CTR_MOD128,
2753 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2754 OP_ALG_AAI_HMAC_PRECOMP,
2762 .cra_name = "authenc(hmac(sha224),"
2763 "rfc3686(ctr(aes)))",
2764 .cra_driver_name = "authenc-hmac-sha224-"
2765 "rfc3686-ctr-aes-caam-qi2",
2768 .setkey = aead_setkey,
2769 .setauthsize = aead_setauthsize,
2770 .encrypt = aead_encrypt,
2771 .decrypt = aead_decrypt,
2772 .ivsize = CTR_RFC3686_IV_SIZE,
2773 .maxauthsize = SHA224_DIGEST_SIZE,
2776 .class1_alg_type = OP_ALG_ALGSEL_AES |
2777 OP_ALG_AAI_CTR_MOD128,
2778 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2779 OP_ALG_AAI_HMAC_PRECOMP,
2786 .cra_name = "seqiv(authenc("
2787 "hmac(sha224),rfc3686(ctr(aes))))",
2788 .cra_driver_name = "seqiv-authenc-hmac-sha224-"
2789 "rfc3686-ctr-aes-caam-qi2",
2792 .setkey = aead_setkey,
2793 .setauthsize = aead_setauthsize,
2794 .encrypt = aead_encrypt,
2795 .decrypt = aead_decrypt,
2796 .ivsize = CTR_RFC3686_IV_SIZE,
2797 .maxauthsize = SHA224_DIGEST_SIZE,
2800 .class1_alg_type = OP_ALG_ALGSEL_AES |
2801 OP_ALG_AAI_CTR_MOD128,
2802 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2803 OP_ALG_AAI_HMAC_PRECOMP,
2811 .cra_name = "authenc(hmac(sha256),"
2812 "rfc3686(ctr(aes)))",
2813 .cra_driver_name = "authenc-hmac-sha256-"
2814 "rfc3686-ctr-aes-caam-qi2",
2817 .setkey = aead_setkey,
2818 .setauthsize = aead_setauthsize,
2819 .encrypt = aead_encrypt,
2820 .decrypt = aead_decrypt,
2821 .ivsize = CTR_RFC3686_IV_SIZE,
2822 .maxauthsize = SHA256_DIGEST_SIZE,
2825 .class1_alg_type = OP_ALG_ALGSEL_AES |
2826 OP_ALG_AAI_CTR_MOD128,
2827 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2828 OP_ALG_AAI_HMAC_PRECOMP,
2835 .cra_name = "seqiv(authenc(hmac(sha256),"
2836 "rfc3686(ctr(aes))))",
2837 .cra_driver_name = "seqiv-authenc-hmac-sha256-"
2838 "rfc3686-ctr-aes-caam-qi2",
2841 .setkey = aead_setkey,
2842 .setauthsize = aead_setauthsize,
2843 .encrypt = aead_encrypt,
2844 .decrypt = aead_decrypt,
2845 .ivsize = CTR_RFC3686_IV_SIZE,
2846 .maxauthsize = SHA256_DIGEST_SIZE,
2849 .class1_alg_type = OP_ALG_ALGSEL_AES |
2850 OP_ALG_AAI_CTR_MOD128,
2851 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2852 OP_ALG_AAI_HMAC_PRECOMP,
2860 .cra_name = "authenc(hmac(sha384),"
2861 "rfc3686(ctr(aes)))",
2862 .cra_driver_name = "authenc-hmac-sha384-"
2863 "rfc3686-ctr-aes-caam-qi2",
2866 .setkey = aead_setkey,
2867 .setauthsize = aead_setauthsize,
2868 .encrypt = aead_encrypt,
2869 .decrypt = aead_decrypt,
2870 .ivsize = CTR_RFC3686_IV_SIZE,
2871 .maxauthsize = SHA384_DIGEST_SIZE,
2874 .class1_alg_type = OP_ALG_ALGSEL_AES |
2875 OP_ALG_AAI_CTR_MOD128,
2876 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2877 OP_ALG_AAI_HMAC_PRECOMP,
2884 .cra_name = "seqiv(authenc(hmac(sha384),"
2885 "rfc3686(ctr(aes))))",
2886 .cra_driver_name = "seqiv-authenc-hmac-sha384-"
2887 "rfc3686-ctr-aes-caam-qi2",
2890 .setkey = aead_setkey,
2891 .setauthsize = aead_setauthsize,
2892 .encrypt = aead_encrypt,
2893 .decrypt = aead_decrypt,
2894 .ivsize = CTR_RFC3686_IV_SIZE,
2895 .maxauthsize = SHA384_DIGEST_SIZE,
2898 .class1_alg_type = OP_ALG_ALGSEL_AES |
2899 OP_ALG_AAI_CTR_MOD128,
2900 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2901 OP_ALG_AAI_HMAC_PRECOMP,
2909 .cra_name = "rfc7539(chacha20,poly1305)",
2910 .cra_driver_name = "rfc7539-chacha20-poly1305-"
2914 .setkey = chachapoly_setkey,
2915 .setauthsize = chachapoly_setauthsize,
2916 .encrypt = aead_encrypt,
2917 .decrypt = aead_decrypt,
2918 .ivsize = CHACHAPOLY_IV_SIZE,
2919 .maxauthsize = POLY1305_DIGEST_SIZE,
2922 .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
2924 .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
2932 .cra_name = "rfc7539esp(chacha20,poly1305)",
2933 .cra_driver_name = "rfc7539esp-chacha20-"
2934 "poly1305-caam-qi2",
2937 .setkey = chachapoly_setkey,
2938 .setauthsize = chachapoly_setauthsize,
2939 .encrypt = aead_encrypt,
2940 .decrypt = aead_decrypt,
2942 .maxauthsize = POLY1305_DIGEST_SIZE,
2945 .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
2947 .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
2955 .cra_name = "authenc(hmac(sha512),"
2956 "rfc3686(ctr(aes)))",
2957 .cra_driver_name = "authenc-hmac-sha512-"
2958 "rfc3686-ctr-aes-caam-qi2",
2961 .setkey = aead_setkey,
2962 .setauthsize = aead_setauthsize,
2963 .encrypt = aead_encrypt,
2964 .decrypt = aead_decrypt,
2965 .ivsize = CTR_RFC3686_IV_SIZE,
2966 .maxauthsize = SHA512_DIGEST_SIZE,
2969 .class1_alg_type = OP_ALG_ALGSEL_AES |
2970 OP_ALG_AAI_CTR_MOD128,
2971 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2972 OP_ALG_AAI_HMAC_PRECOMP,
2979 .cra_name = "seqiv(authenc(hmac(sha512),"
2980 "rfc3686(ctr(aes))))",
2981 .cra_driver_name = "seqiv-authenc-hmac-sha512-"
2982 "rfc3686-ctr-aes-caam-qi2",
2985 .setkey = aead_setkey,
2986 .setauthsize = aead_setauthsize,
2987 .encrypt = aead_encrypt,
2988 .decrypt = aead_decrypt,
2989 .ivsize = CTR_RFC3686_IV_SIZE,
2990 .maxauthsize = SHA512_DIGEST_SIZE,
2993 .class1_alg_type = OP_ALG_ALGSEL_AES |
2994 OP_ALG_AAI_CTR_MOD128,
2995 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2996 OP_ALG_AAI_HMAC_PRECOMP,
3003 static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
3005 struct skcipher_alg *alg = &t_alg->skcipher;
3007 alg->base.cra_module = THIS_MODULE;
3008 alg->base.cra_priority = CAAM_CRA_PRIORITY;
3009 alg->base.cra_ctxsize = sizeof(struct caam_ctx);
3010 alg->base.cra_flags |= (CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
3011 CRYPTO_ALG_KERN_DRIVER_ONLY);
3013 alg->init = caam_cra_init_skcipher;
3014 alg->exit = caam_cra_exit;
3017 static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
3019 struct aead_alg *alg = &t_alg->aead;
3021 alg->base.cra_module = THIS_MODULE;
3022 alg->base.cra_priority = CAAM_CRA_PRIORITY;
3023 alg->base.cra_ctxsize = sizeof(struct caam_ctx);
3024 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
3025 CRYPTO_ALG_KERN_DRIVER_ONLY;
3027 alg->init = caam_cra_init_aead;
3028 alg->exit = caam_cra_exit_aead;
3031 /* max hash key is max split key size */
3032 #define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
3034 #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
3036 /* caam context sizes for hashes: running digest + 8 */
3037 #define HASH_MSG_LEN 8
3038 #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
3049 * struct caam_hash_ctx - ahash per-session context
3050 * @flc: Flow Contexts array
3051 * @key: authentication key
3052 * @flc_dma: I/O virtual addresses of the Flow Contexts
3053 * @dev: dpseci device
3054 * @ctx_len: size of Context Register
3055 * @adata: hashing algorithm details
3057 struct caam_hash_ctx {
3058 struct caam_flc flc[HASH_NUM_OP];
3059 u8 key[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
3060 dma_addr_t flc_dma[HASH_NUM_OP];
3063 struct alginfo adata;
3067 struct caam_hash_state {
3068 struct caam_request caam_req;
3072 u8 buf[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
3075 u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
3076 int (*update)(struct ahash_request *req);
3077 int (*final)(struct ahash_request *req);
3078 int (*finup)(struct ahash_request *req);
3081 struct caam_export_state {
3082 u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
3083 u8 caam_ctx[MAX_CTX_LEN];
3085 int (*update)(struct ahash_request *req);
3086 int (*final)(struct ahash_request *req);
3087 int (*finup)(struct ahash_request *req);
3090 /* Map current buffer in state (if length > 0) and put it in link table */
3091 static inline int buf_map_to_qm_sg(struct device *dev,
3092 struct dpaa2_sg_entry *qm_sg,
3093 struct caam_hash_state *state)
3095 int buflen = state->buflen;
3100 state->buf_dma = dma_map_single(dev, state->buf, buflen,
3102 if (dma_mapping_error(dev, state->buf_dma)) {
3103 dev_err(dev, "unable to map buf\n");
3108 dma_to_qm_sg_one(qm_sg, state->buf_dma, buflen, 0);
3113 /* Map state->caam_ctx, and add it to link table */
3114 static inline int ctx_map_to_qm_sg(struct device *dev,
3115 struct caam_hash_state *state, int ctx_len,
3116 struct dpaa2_sg_entry *qm_sg, u32 flag)
3118 state->ctx_dma_len = ctx_len;
3119 state->ctx_dma = dma_map_single(dev, state->caam_ctx, ctx_len, flag);
3120 if (dma_mapping_error(dev, state->ctx_dma)) {
3121 dev_err(dev, "unable to map ctx\n");
3126 dma_to_qm_sg_one(qm_sg, state->ctx_dma, ctx_len, 0);
3131 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
3133 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3134 int digestsize = crypto_ahash_digestsize(ahash);
3135 struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
3136 struct caam_flc *flc;
3139 /* ahash_update shared descriptor */
3140 flc = &ctx->flc[UPDATE];
3141 desc = flc->sh_desc;
3142 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
3143 ctx->ctx_len, true, priv->sec_attr.era);
3144 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3145 dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE],
3146 desc_bytes(desc), DMA_BIDIRECTIONAL);
3147 print_hex_dump_debug("ahash update shdesc@" __stringify(__LINE__)": ",
3148 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3151 /* ahash_update_first shared descriptor */
3152 flc = &ctx->flc[UPDATE_FIRST];
3153 desc = flc->sh_desc;
3154 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
3155 ctx->ctx_len, false, priv->sec_attr.era);
3156 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3157 dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE_FIRST],
3158 desc_bytes(desc), DMA_BIDIRECTIONAL);
3159 print_hex_dump_debug("ahash update first shdesc@" __stringify(__LINE__)": ",
3160 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3163 /* ahash_final shared descriptor */
3164 flc = &ctx->flc[FINALIZE];
3165 desc = flc->sh_desc;
3166 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
3167 ctx->ctx_len, true, priv->sec_attr.era);
3168 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3169 dma_sync_single_for_device(ctx->dev, ctx->flc_dma[FINALIZE],
3170 desc_bytes(desc), DMA_BIDIRECTIONAL);
3171 print_hex_dump_debug("ahash final shdesc@" __stringify(__LINE__)": ",
3172 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3175 /* ahash_digest shared descriptor */
3176 flc = &ctx->flc[DIGEST];
3177 desc = flc->sh_desc;
3178 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
3179 ctx->ctx_len, false, priv->sec_attr.era);
3180 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3181 dma_sync_single_for_device(ctx->dev, ctx->flc_dma[DIGEST],
3182 desc_bytes(desc), DMA_BIDIRECTIONAL);
3183 print_hex_dump_debug("ahash digest shdesc@" __stringify(__LINE__)": ",
3184 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3190 struct split_key_sh_result {
3191 struct completion completion;
3196 static void split_key_sh_done(void *cbk_ctx, u32 err)
3198 struct split_key_sh_result *res = cbk_ctx;
3200 dev_dbg(res->dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
3202 res->err = err ? caam_qi2_strstatus(res->dev, err) : 0;
3203 complete(&res->completion);
3206 /* Digest hash size if it is too large */
3207 static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
3210 struct caam_request *req_ctx;
3212 struct split_key_sh_result result;
3214 struct caam_flc *flc;
3217 struct dpaa2_fl_entry *in_fle, *out_fle;
3219 req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL | GFP_DMA);
3223 in_fle = &req_ctx->fd_flt[1];
3224 out_fle = &req_ctx->fd_flt[0];
3226 flc = kzalloc(sizeof(*flc), GFP_KERNEL | GFP_DMA);
3230 key_dma = dma_map_single(ctx->dev, key, *keylen, DMA_BIDIRECTIONAL);
3231 if (dma_mapping_error(ctx->dev, key_dma)) {
3232 dev_err(ctx->dev, "unable to map key memory\n");
3236 desc = flc->sh_desc;
3238 init_sh_desc(desc, 0);
3240 /* descriptor to perform unkeyed hash on key_in */
3241 append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
3242 OP_ALG_AS_INITFINAL);
3243 append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
3244 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
3245 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
3246 LDST_SRCDST_BYTE_CONTEXT);
3248 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3249 flc_dma = dma_map_single(ctx->dev, flc, sizeof(flc->flc) +
3250 desc_bytes(desc), DMA_TO_DEVICE);
3251 if (dma_mapping_error(ctx->dev, flc_dma)) {
3252 dev_err(ctx->dev, "unable to map shared descriptor\n");
3256 dpaa2_fl_set_final(in_fle, true);
3257 dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3258 dpaa2_fl_set_addr(in_fle, key_dma);
3259 dpaa2_fl_set_len(in_fle, *keylen);
3260 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3261 dpaa2_fl_set_addr(out_fle, key_dma);
3262 dpaa2_fl_set_len(out_fle, digestsize);
3264 print_hex_dump_debug("key_in@" __stringify(__LINE__)": ",
3265 DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
3266 print_hex_dump_debug("shdesc@" __stringify(__LINE__)": ",
3267 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3271 init_completion(&result.completion);
3272 result.dev = ctx->dev;
3275 req_ctx->flc_dma = flc_dma;
3276 req_ctx->cbk = split_key_sh_done;
3277 req_ctx->ctx = &result;
3279 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3280 if (ret == -EINPROGRESS) {
3282 wait_for_completion(&result.completion);
3284 print_hex_dump_debug("digested key@" __stringify(__LINE__)": ",
3285 DUMP_PREFIX_ADDRESS, 16, 4, key,
3289 dma_unmap_single(ctx->dev, flc_dma, sizeof(flc->flc) + desc_bytes(desc),
3292 dma_unmap_single(ctx->dev, key_dma, *keylen, DMA_BIDIRECTIONAL);
3298 *keylen = digestsize;
3303 static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
3304 unsigned int keylen)
3306 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3307 unsigned int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
3308 unsigned int digestsize = crypto_ahash_digestsize(ahash);
3310 u8 *hashed_key = NULL;
3312 dev_dbg(ctx->dev, "keylen %d blocksize %d\n", keylen, blocksize);
3314 if (keylen > blocksize) {
3315 hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
3318 ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize);
3324 ctx->adata.keylen = keylen;
3325 ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
3326 OP_ALG_ALGSEL_MASK);
3327 if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
3330 ctx->adata.key_virt = key;
3331 ctx->adata.key_inline = true;
3334 * In case |user key| > |derived key|, using DKP<imm,imm> would result
3335 * in invalid opcodes (last bytes of user key) in the resulting
3336 * descriptor. Use DKP<ptr,imm> instead => both virtual and dma key
3337 * addresses are needed.
3339 if (keylen > ctx->adata.keylen_pad) {
3340 memcpy(ctx->key, key, keylen);
3341 dma_sync_single_for_device(ctx->dev, ctx->adata.key_dma,
3342 ctx->adata.keylen_pad,
3346 ret = ahash_set_sh_desc(ahash);
3354 static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
3355 struct ahash_request *req)
3357 struct caam_hash_state *state = ahash_request_ctx(req);
3359 if (edesc->src_nents)
3360 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
3362 if (edesc->qm_sg_bytes)
3363 dma_unmap_single(dev, edesc->qm_sg_dma, edesc->qm_sg_bytes,
3366 if (state->buf_dma) {
3367 dma_unmap_single(dev, state->buf_dma, state->buflen,
3373 static inline void ahash_unmap_ctx(struct device *dev,
3374 struct ahash_edesc *edesc,
3375 struct ahash_request *req, u32 flag)
3377 struct caam_hash_state *state = ahash_request_ctx(req);
3379 if (state->ctx_dma) {
3380 dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag);
3383 ahash_unmap(dev, edesc, req);
3386 static void ahash_done(void *cbk_ctx, u32 status)
3388 struct crypto_async_request *areq = cbk_ctx;
3389 struct ahash_request *req = ahash_request_cast(areq);
3390 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3391 struct caam_hash_state *state = ahash_request_ctx(req);
3392 struct ahash_edesc *edesc = state->caam_req.edesc;
3393 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3394 int digestsize = crypto_ahash_digestsize(ahash);
3397 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3399 if (unlikely(status))
3400 ecode = caam_qi2_strstatus(ctx->dev, status);
3402 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3403 memcpy(req->result, state->caam_ctx, digestsize);
3404 qi_cache_free(edesc);
3406 print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3407 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3410 req->base.complete(&req->base, ecode);
3413 static void ahash_done_bi(void *cbk_ctx, u32 status)
3415 struct crypto_async_request *areq = cbk_ctx;
3416 struct ahash_request *req = ahash_request_cast(areq);
3417 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3418 struct caam_hash_state *state = ahash_request_ctx(req);
3419 struct ahash_edesc *edesc = state->caam_req.edesc;
3420 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3423 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3425 if (unlikely(status))
3426 ecode = caam_qi2_strstatus(ctx->dev, status);
3428 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3429 qi_cache_free(edesc);
3431 scatterwalk_map_and_copy(state->buf, req->src,
3432 req->nbytes - state->next_buflen,
3433 state->next_buflen, 0);
3434 state->buflen = state->next_buflen;
3436 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
3437 DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
3440 print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3441 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3444 print_hex_dump_debug("result@" __stringify(__LINE__)": ",
3445 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
3446 crypto_ahash_digestsize(ahash), 1);
3448 req->base.complete(&req->base, ecode);
3451 static void ahash_done_ctx_src(void *cbk_ctx, u32 status)
3453 struct crypto_async_request *areq = cbk_ctx;
3454 struct ahash_request *req = ahash_request_cast(areq);
3455 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3456 struct caam_hash_state *state = ahash_request_ctx(req);
3457 struct ahash_edesc *edesc = state->caam_req.edesc;
3458 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3459 int digestsize = crypto_ahash_digestsize(ahash);
3462 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3464 if (unlikely(status))
3465 ecode = caam_qi2_strstatus(ctx->dev, status);
3467 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3468 memcpy(req->result, state->caam_ctx, digestsize);
3469 qi_cache_free(edesc);
3471 print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3472 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3475 req->base.complete(&req->base, ecode);
3478 static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
3480 struct crypto_async_request *areq = cbk_ctx;
3481 struct ahash_request *req = ahash_request_cast(areq);
3482 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3483 struct caam_hash_state *state = ahash_request_ctx(req);
3484 struct ahash_edesc *edesc = state->caam_req.edesc;
3485 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3488 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3490 if (unlikely(status))
3491 ecode = caam_qi2_strstatus(ctx->dev, status);
3493 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3494 qi_cache_free(edesc);
3496 scatterwalk_map_and_copy(state->buf, req->src,
3497 req->nbytes - state->next_buflen,
3498 state->next_buflen, 0);
3499 state->buflen = state->next_buflen;
3501 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
3502 DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
3505 print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3506 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3509 print_hex_dump_debug("result@" __stringify(__LINE__)": ",
3510 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
3511 crypto_ahash_digestsize(ahash), 1);
3513 req->base.complete(&req->base, ecode);
3516 static int ahash_update_ctx(struct ahash_request *req)
3518 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3519 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3520 struct caam_hash_state *state = ahash_request_ctx(req);
3521 struct caam_request *req_ctx = &state->caam_req;
3522 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3523 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3524 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3525 GFP_KERNEL : GFP_ATOMIC;
3526 u8 *buf = state->buf;
3527 int *buflen = &state->buflen;
3528 int *next_buflen = &state->next_buflen;
3529 int in_len = *buflen + req->nbytes, to_hash;
3530 int src_nents, mapped_nents, qm_sg_bytes, qm_sg_src_index;
3531 struct ahash_edesc *edesc;
3534 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
3535 to_hash = in_len - *next_buflen;
3538 struct dpaa2_sg_entry *sg_table;
3539 int src_len = req->nbytes - *next_buflen;
3541 src_nents = sg_nents_for_len(req->src, src_len);
3542 if (src_nents < 0) {
3543 dev_err(ctx->dev, "Invalid number of src SG.\n");
3548 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3550 if (!mapped_nents) {
3551 dev_err(ctx->dev, "unable to DMA map source\n");
3558 /* allocate space for base edesc and link tables */
3559 edesc = qi_cache_zalloc(GFP_DMA | flags);
3561 dma_unmap_sg(ctx->dev, req->src, src_nents,
3566 edesc->src_nents = src_nents;
3567 qm_sg_src_index = 1 + (*buflen ? 1 : 0);
3568 qm_sg_bytes = pad_sg_nents(qm_sg_src_index + mapped_nents) *
3570 sg_table = &edesc->sgt[0];
3572 ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3577 ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3582 sg_to_qm_sg_last(req->src, src_len,
3583 sg_table + qm_sg_src_index, 0);
3585 dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1,
3589 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3590 qm_sg_bytes, DMA_TO_DEVICE);
3591 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3592 dev_err(ctx->dev, "unable to map S/G table\n");
3596 edesc->qm_sg_bytes = qm_sg_bytes;
3598 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3599 dpaa2_fl_set_final(in_fle, true);
3600 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3601 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3602 dpaa2_fl_set_len(in_fle, ctx->ctx_len + to_hash);
3603 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3604 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3605 dpaa2_fl_set_len(out_fle, ctx->ctx_len);
3607 req_ctx->flc = &ctx->flc[UPDATE];
3608 req_ctx->flc_dma = ctx->flc_dma[UPDATE];
3609 req_ctx->cbk = ahash_done_bi;
3610 req_ctx->ctx = &req->base;
3611 req_ctx->edesc = edesc;
3613 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3614 if (ret != -EINPROGRESS &&
3616 req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3618 } else if (*next_buflen) {
3619 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
3621 *buflen = *next_buflen;
3623 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
3624 DUMP_PREFIX_ADDRESS, 16, 4, buf,
3630 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3631 qi_cache_free(edesc);
3635 static int ahash_final_ctx(struct ahash_request *req)
3637 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3638 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3639 struct caam_hash_state *state = ahash_request_ctx(req);
3640 struct caam_request *req_ctx = &state->caam_req;
3641 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3642 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3643 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3644 GFP_KERNEL : GFP_ATOMIC;
3645 int buflen = state->buflen;
3647 int digestsize = crypto_ahash_digestsize(ahash);
3648 struct ahash_edesc *edesc;
3649 struct dpaa2_sg_entry *sg_table;
3652 /* allocate space for base edesc and link tables */
3653 edesc = qi_cache_zalloc(GFP_DMA | flags);
3657 qm_sg_bytes = pad_sg_nents(1 + (buflen ? 1 : 0)) * sizeof(*sg_table);
3658 sg_table = &edesc->sgt[0];
3660 ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3665 ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3669 dpaa2_sg_set_final(sg_table + (buflen ? 1 : 0), true);
3671 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
3673 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3674 dev_err(ctx->dev, "unable to map S/G table\n");
3678 edesc->qm_sg_bytes = qm_sg_bytes;
3680 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3681 dpaa2_fl_set_final(in_fle, true);
3682 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3683 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3684 dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen);
3685 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3686 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3687 dpaa2_fl_set_len(out_fle, digestsize);
3689 req_ctx->flc = &ctx->flc[FINALIZE];
3690 req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
3691 req_ctx->cbk = ahash_done_ctx_src;
3692 req_ctx->ctx = &req->base;
3693 req_ctx->edesc = edesc;
3695 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3696 if (ret == -EINPROGRESS ||
3697 (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3701 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3702 qi_cache_free(edesc);
3706 static int ahash_finup_ctx(struct ahash_request *req)
3708 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3709 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3710 struct caam_hash_state *state = ahash_request_ctx(req);
3711 struct caam_request *req_ctx = &state->caam_req;
3712 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3713 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3714 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3715 GFP_KERNEL : GFP_ATOMIC;
3716 int buflen = state->buflen;
3717 int qm_sg_bytes, qm_sg_src_index;
3718 int src_nents, mapped_nents;
3719 int digestsize = crypto_ahash_digestsize(ahash);
3720 struct ahash_edesc *edesc;
3721 struct dpaa2_sg_entry *sg_table;
3724 src_nents = sg_nents_for_len(req->src, req->nbytes);
3725 if (src_nents < 0) {
3726 dev_err(ctx->dev, "Invalid number of src SG.\n");
3731 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3733 if (!mapped_nents) {
3734 dev_err(ctx->dev, "unable to DMA map source\n");
3741 /* allocate space for base edesc and link tables */
3742 edesc = qi_cache_zalloc(GFP_DMA | flags);
3744 dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
3748 edesc->src_nents = src_nents;
3749 qm_sg_src_index = 1 + (buflen ? 1 : 0);
3750 qm_sg_bytes = pad_sg_nents(qm_sg_src_index + mapped_nents) *
3752 sg_table = &edesc->sgt[0];
3754 ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3759 ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3763 sg_to_qm_sg_last(req->src, req->nbytes, sg_table + qm_sg_src_index, 0);
3765 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
3767 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3768 dev_err(ctx->dev, "unable to map S/G table\n");
3772 edesc->qm_sg_bytes = qm_sg_bytes;
3774 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3775 dpaa2_fl_set_final(in_fle, true);
3776 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3777 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3778 dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen + req->nbytes);
3779 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3780 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3781 dpaa2_fl_set_len(out_fle, digestsize);
3783 req_ctx->flc = &ctx->flc[FINALIZE];
3784 req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
3785 req_ctx->cbk = ahash_done_ctx_src;
3786 req_ctx->ctx = &req->base;
3787 req_ctx->edesc = edesc;
3789 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3790 if (ret == -EINPROGRESS ||
3791 (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3795 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3796 qi_cache_free(edesc);
3800 static int ahash_digest(struct ahash_request *req)
3802 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3803 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3804 struct caam_hash_state *state = ahash_request_ctx(req);
3805 struct caam_request *req_ctx = &state->caam_req;
3806 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3807 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3808 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3809 GFP_KERNEL : GFP_ATOMIC;
3810 int digestsize = crypto_ahash_digestsize(ahash);
3811 int src_nents, mapped_nents;
3812 struct ahash_edesc *edesc;
3817 src_nents = sg_nents_for_len(req->src, req->nbytes);
3818 if (src_nents < 0) {
3819 dev_err(ctx->dev, "Invalid number of src SG.\n");
3824 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3826 if (!mapped_nents) {
3827 dev_err(ctx->dev, "unable to map source for DMA\n");
3834 /* allocate space for base edesc and link tables */
3835 edesc = qi_cache_zalloc(GFP_DMA | flags);
3837 dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
3841 edesc->src_nents = src_nents;
3842 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3844 if (mapped_nents > 1) {
3846 struct dpaa2_sg_entry *sg_table = &edesc->sgt[0];
3848 qm_sg_bytes = pad_sg_nents(mapped_nents) * sizeof(*sg_table);
3849 sg_to_qm_sg_last(req->src, req->nbytes, sg_table, 0);
3850 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3851 qm_sg_bytes, DMA_TO_DEVICE);
3852 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3853 dev_err(ctx->dev, "unable to map S/G table\n");
3856 edesc->qm_sg_bytes = qm_sg_bytes;
3857 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3858 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3860 dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3861 dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
3864 state->ctx_dma_len = digestsize;
3865 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
3867 if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
3868 dev_err(ctx->dev, "unable to map ctx\n");
3873 dpaa2_fl_set_final(in_fle, true);
3874 dpaa2_fl_set_len(in_fle, req->nbytes);
3875 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3876 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3877 dpaa2_fl_set_len(out_fle, digestsize);
3879 req_ctx->flc = &ctx->flc[DIGEST];
3880 req_ctx->flc_dma = ctx->flc_dma[DIGEST];
3881 req_ctx->cbk = ahash_done;
3882 req_ctx->ctx = &req->base;
3883 req_ctx->edesc = edesc;
3884 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3885 if (ret == -EINPROGRESS ||
3886 (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3890 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3891 qi_cache_free(edesc);
3895 static int ahash_final_no_ctx(struct ahash_request *req)
3897 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3898 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3899 struct caam_hash_state *state = ahash_request_ctx(req);
3900 struct caam_request *req_ctx = &state->caam_req;
3901 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3902 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3903 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3904 GFP_KERNEL : GFP_ATOMIC;
3905 u8 *buf = state->buf;
3906 int buflen = state->buflen;
3907 int digestsize = crypto_ahash_digestsize(ahash);
3908 struct ahash_edesc *edesc;
3911 /* allocate space for base edesc and link tables */
3912 edesc = qi_cache_zalloc(GFP_DMA | flags);
3917 state->buf_dma = dma_map_single(ctx->dev, buf, buflen,
3919 if (dma_mapping_error(ctx->dev, state->buf_dma)) {
3920 dev_err(ctx->dev, "unable to map src\n");
3925 state->ctx_dma_len = digestsize;
3926 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
3928 if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
3929 dev_err(ctx->dev, "unable to map ctx\n");
3934 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3935 dpaa2_fl_set_final(in_fle, true);
3937 * crypto engine requires the input entry to be present when
3938 * "frame list" FD is used.
3939 * Since engine does not support FMT=2'b11 (unused entry type), leaving
3940 * in_fle zeroized (except for "Final" flag) is the best option.
3943 dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3944 dpaa2_fl_set_addr(in_fle, state->buf_dma);
3945 dpaa2_fl_set_len(in_fle, buflen);
3947 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3948 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3949 dpaa2_fl_set_len(out_fle, digestsize);
3951 req_ctx->flc = &ctx->flc[DIGEST];
3952 req_ctx->flc_dma = ctx->flc_dma[DIGEST];
3953 req_ctx->cbk = ahash_done;
3954 req_ctx->ctx = &req->base;
3955 req_ctx->edesc = edesc;
3957 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3958 if (ret == -EINPROGRESS ||
3959 (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3963 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3964 qi_cache_free(edesc);
3968 static int ahash_update_no_ctx(struct ahash_request *req)
3970 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3971 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3972 struct caam_hash_state *state = ahash_request_ctx(req);
3973 struct caam_request *req_ctx = &state->caam_req;
3974 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3975 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3976 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3977 GFP_KERNEL : GFP_ATOMIC;
3978 u8 *buf = state->buf;
3979 int *buflen = &state->buflen;
3980 int *next_buflen = &state->next_buflen;
3981 int in_len = *buflen + req->nbytes, to_hash;
3982 int qm_sg_bytes, src_nents, mapped_nents;
3983 struct ahash_edesc *edesc;
3986 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
3987 to_hash = in_len - *next_buflen;
3990 struct dpaa2_sg_entry *sg_table;
3991 int src_len = req->nbytes - *next_buflen;
3993 src_nents = sg_nents_for_len(req->src, src_len);
3994 if (src_nents < 0) {
3995 dev_err(ctx->dev, "Invalid number of src SG.\n");
4000 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
4002 if (!mapped_nents) {
4003 dev_err(ctx->dev, "unable to DMA map source\n");
4010 /* allocate space for base edesc and link tables */
4011 edesc = qi_cache_zalloc(GFP_DMA | flags);
4013 dma_unmap_sg(ctx->dev, req->src, src_nents,
4018 edesc->src_nents = src_nents;
4019 qm_sg_bytes = pad_sg_nents(1 + mapped_nents) *
4021 sg_table = &edesc->sgt[0];
4023 ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
4027 sg_to_qm_sg_last(req->src, src_len, sg_table + 1, 0);
4029 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
4030 qm_sg_bytes, DMA_TO_DEVICE);
4031 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
4032 dev_err(ctx->dev, "unable to map S/G table\n");
4036 edesc->qm_sg_bytes = qm_sg_bytes;
4038 state->ctx_dma_len = ctx->ctx_len;
4039 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
4040 ctx->ctx_len, DMA_FROM_DEVICE);
4041 if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
4042 dev_err(ctx->dev, "unable to map ctx\n");
4048 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
4049 dpaa2_fl_set_final(in_fle, true);
4050 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
4051 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
4052 dpaa2_fl_set_len(in_fle, to_hash);
4053 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
4054 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
4055 dpaa2_fl_set_len(out_fle, ctx->ctx_len);
4057 req_ctx->flc = &ctx->flc[UPDATE_FIRST];
4058 req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
4059 req_ctx->cbk = ahash_done_ctx_dst;
4060 req_ctx->ctx = &req->base;
4061 req_ctx->edesc = edesc;
4063 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
4064 if (ret != -EINPROGRESS &&
4066 req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
4069 state->update = ahash_update_ctx;
4070 state->finup = ahash_finup_ctx;
4071 state->final = ahash_final_ctx;
4072 } else if (*next_buflen) {
4073 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
4075 *buflen = *next_buflen;
4077 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
4078 DUMP_PREFIX_ADDRESS, 16, 4, buf,
4084 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE);
4085 qi_cache_free(edesc);
4089 static int ahash_finup_no_ctx(struct ahash_request *req)
4091 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
4092 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
4093 struct caam_hash_state *state = ahash_request_ctx(req);
4094 struct caam_request *req_ctx = &state->caam_req;
4095 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
4096 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
4097 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
4098 GFP_KERNEL : GFP_ATOMIC;
4099 int buflen = state->buflen;
4100 int qm_sg_bytes, src_nents, mapped_nents;
4101 int digestsize = crypto_ahash_digestsize(ahash);
4102 struct ahash_edesc *edesc;
4103 struct dpaa2_sg_entry *sg_table;
4106 src_nents = sg_nents_for_len(req->src, req->nbytes);
4107 if (src_nents < 0) {
4108 dev_err(ctx->dev, "Invalid number of src SG.\n");
4113 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
4115 if (!mapped_nents) {
4116 dev_err(ctx->dev, "unable to DMA map source\n");
4123 /* allocate space for base edesc and link tables */
4124 edesc = qi_cache_zalloc(GFP_DMA | flags);
4126 dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
4130 edesc->src_nents = src_nents;
4131 qm_sg_bytes = pad_sg_nents(2 + mapped_nents) * sizeof(*sg_table);
4132 sg_table = &edesc->sgt[0];
4134 ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
4138 sg_to_qm_sg_last(req->src, req->nbytes, sg_table + 1, 0);
4140 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
4142 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
4143 dev_err(ctx->dev, "unable to map S/G table\n");
4147 edesc->qm_sg_bytes = qm_sg_bytes;
4149 state->ctx_dma_len = digestsize;
4150 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
4152 if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
4153 dev_err(ctx->dev, "unable to map ctx\n");
4159 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
4160 dpaa2_fl_set_final(in_fle, true);
4161 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
4162 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
4163 dpaa2_fl_set_len(in_fle, buflen + req->nbytes);
4164 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
4165 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
4166 dpaa2_fl_set_len(out_fle, digestsize);
4168 req_ctx->flc = &ctx->flc[DIGEST];
4169 req_ctx->flc_dma = ctx->flc_dma[DIGEST];
4170 req_ctx->cbk = ahash_done;
4171 req_ctx->ctx = &req->base;
4172 req_ctx->edesc = edesc;
4173 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
4174 if (ret != -EINPROGRESS &&
4175 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
4180 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
4181 qi_cache_free(edesc);
4185 static int ahash_update_first(struct ahash_request *req)
4187 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
4188 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
4189 struct caam_hash_state *state = ahash_request_ctx(req);
4190 struct caam_request *req_ctx = &state->caam_req;
4191 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
4192 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
4193 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
4194 GFP_KERNEL : GFP_ATOMIC;
4195 u8 *buf = state->buf;
4196 int *buflen = &state->buflen;
4197 int *next_buflen = &state->next_buflen;
4199 int src_nents, mapped_nents;
4200 struct ahash_edesc *edesc;
4203 *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
4205 to_hash = req->nbytes - *next_buflen;
4208 struct dpaa2_sg_entry *sg_table;
4209 int src_len = req->nbytes - *next_buflen;
4211 src_nents = sg_nents_for_len(req->src, src_len);
4212 if (src_nents < 0) {
4213 dev_err(ctx->dev, "Invalid number of src SG.\n");
4218 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
4220 if (!mapped_nents) {
4221 dev_err(ctx->dev, "unable to map source for DMA\n");
4228 /* allocate space for base edesc and link tables */
4229 edesc = qi_cache_zalloc(GFP_DMA | flags);
4231 dma_unmap_sg(ctx->dev, req->src, src_nents,
4236 edesc->src_nents = src_nents;
4237 sg_table = &edesc->sgt[0];
4239 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
4240 dpaa2_fl_set_final(in_fle, true);
4241 dpaa2_fl_set_len(in_fle, to_hash);
4243 if (mapped_nents > 1) {
4246 sg_to_qm_sg_last(req->src, src_len, sg_table, 0);
4247 qm_sg_bytes = pad_sg_nents(mapped_nents) *
4249 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
4252 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
4253 dev_err(ctx->dev, "unable to map S/G table\n");
4257 edesc->qm_sg_bytes = qm_sg_bytes;
4258 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
4259 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
4261 dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
4262 dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
4265 state->ctx_dma_len = ctx->ctx_len;
4266 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
4267 ctx->ctx_len, DMA_FROM_DEVICE);
4268 if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
4269 dev_err(ctx->dev, "unable to map ctx\n");
4275 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
4276 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
4277 dpaa2_fl_set_len(out_fle, ctx->ctx_len);
4279 req_ctx->flc = &ctx->flc[UPDATE_FIRST];
4280 req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
4281 req_ctx->cbk = ahash_done_ctx_dst;
4282 req_ctx->ctx = &req->base;
4283 req_ctx->edesc = edesc;
4285 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
4286 if (ret != -EINPROGRESS &&
4287 !(ret == -EBUSY && req->base.flags &
4288 CRYPTO_TFM_REQ_MAY_BACKLOG))
4291 state->update = ahash_update_ctx;
4292 state->finup = ahash_finup_ctx;
4293 state->final = ahash_final_ctx;
4294 } else if (*next_buflen) {
4295 state->update = ahash_update_no_ctx;
4296 state->finup = ahash_finup_no_ctx;
4297 state->final = ahash_final_no_ctx;
4298 scatterwalk_map_and_copy(buf, req->src, 0,
4300 *buflen = *next_buflen;
4302 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
4303 DUMP_PREFIX_ADDRESS, 16, 4, buf,
4309 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE);
4310 qi_cache_free(edesc);
4314 static int ahash_finup_first(struct ahash_request *req)
4316 return ahash_digest(req);
4319 static int ahash_init(struct ahash_request *req)
4321 struct caam_hash_state *state = ahash_request_ctx(req);
4323 state->update = ahash_update_first;
4324 state->finup = ahash_finup_first;
4325 state->final = ahash_final_no_ctx;
4328 state->ctx_dma_len = 0;
4331 state->next_buflen = 0;
4336 static int ahash_update(struct ahash_request *req)
4338 struct caam_hash_state *state = ahash_request_ctx(req);
4340 return state->update(req);
4343 static int ahash_finup(struct ahash_request *req)
4345 struct caam_hash_state *state = ahash_request_ctx(req);
4347 return state->finup(req);
4350 static int ahash_final(struct ahash_request *req)
4352 struct caam_hash_state *state = ahash_request_ctx(req);
4354 return state->final(req);
4357 static int ahash_export(struct ahash_request *req, void *out)
4359 struct caam_hash_state *state = ahash_request_ctx(req);
4360 struct caam_export_state *export = out;
4361 u8 *buf = state->buf;
4362 int len = state->buflen;
4364 memcpy(export->buf, buf, len);
4365 memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
4366 export->buflen = len;
4367 export->update = state->update;
4368 export->final = state->final;
4369 export->finup = state->finup;
4374 static int ahash_import(struct ahash_request *req, const void *in)
4376 struct caam_hash_state *state = ahash_request_ctx(req);
4377 const struct caam_export_state *export = in;
4379 memset(state, 0, sizeof(*state));
4380 memcpy(state->buf, export->buf, export->buflen);
4381 memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
4382 state->buflen = export->buflen;
4383 state->update = export->update;
4384 state->final = export->final;
4385 state->finup = export->finup;
4390 struct caam_hash_template {
4391 char name[CRYPTO_MAX_ALG_NAME];
4392 char driver_name[CRYPTO_MAX_ALG_NAME];
4393 char hmac_name[CRYPTO_MAX_ALG_NAME];
4394 char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
4395 unsigned int blocksize;
4396 struct ahash_alg template_ahash;
4400 /* ahash descriptors */
4401 static struct caam_hash_template driver_hash[] = {
4404 .driver_name = "sha1-caam-qi2",
4405 .hmac_name = "hmac(sha1)",
4406 .hmac_driver_name = "hmac-sha1-caam-qi2",
4407 .blocksize = SHA1_BLOCK_SIZE,
4410 .update = ahash_update,
4411 .final = ahash_final,
4412 .finup = ahash_finup,
4413 .digest = ahash_digest,
4414 .export = ahash_export,
4415 .import = ahash_import,
4416 .setkey = ahash_setkey,
4418 .digestsize = SHA1_DIGEST_SIZE,
4419 .statesize = sizeof(struct caam_export_state),
4422 .alg_type = OP_ALG_ALGSEL_SHA1,
4425 .driver_name = "sha224-caam-qi2",
4426 .hmac_name = "hmac(sha224)",
4427 .hmac_driver_name = "hmac-sha224-caam-qi2",
4428 .blocksize = SHA224_BLOCK_SIZE,
4431 .update = ahash_update,
4432 .final = ahash_final,
4433 .finup = ahash_finup,
4434 .digest = ahash_digest,
4435 .export = ahash_export,
4436 .import = ahash_import,
4437 .setkey = ahash_setkey,
4439 .digestsize = SHA224_DIGEST_SIZE,
4440 .statesize = sizeof(struct caam_export_state),
4443 .alg_type = OP_ALG_ALGSEL_SHA224,
4446 .driver_name = "sha256-caam-qi2",
4447 .hmac_name = "hmac(sha256)",
4448 .hmac_driver_name = "hmac-sha256-caam-qi2",
4449 .blocksize = SHA256_BLOCK_SIZE,
4452 .update = ahash_update,
4453 .final = ahash_final,
4454 .finup = ahash_finup,
4455 .digest = ahash_digest,
4456 .export = ahash_export,
4457 .import = ahash_import,
4458 .setkey = ahash_setkey,
4460 .digestsize = SHA256_DIGEST_SIZE,
4461 .statesize = sizeof(struct caam_export_state),
4464 .alg_type = OP_ALG_ALGSEL_SHA256,
4467 .driver_name = "sha384-caam-qi2",
4468 .hmac_name = "hmac(sha384)",
4469 .hmac_driver_name = "hmac-sha384-caam-qi2",
4470 .blocksize = SHA384_BLOCK_SIZE,
4473 .update = ahash_update,
4474 .final = ahash_final,
4475 .finup = ahash_finup,
4476 .digest = ahash_digest,
4477 .export = ahash_export,
4478 .import = ahash_import,
4479 .setkey = ahash_setkey,
4481 .digestsize = SHA384_DIGEST_SIZE,
4482 .statesize = sizeof(struct caam_export_state),
4485 .alg_type = OP_ALG_ALGSEL_SHA384,
4488 .driver_name = "sha512-caam-qi2",
4489 .hmac_name = "hmac(sha512)",
4490 .hmac_driver_name = "hmac-sha512-caam-qi2",
4491 .blocksize = SHA512_BLOCK_SIZE,
4494 .update = ahash_update,
4495 .final = ahash_final,
4496 .finup = ahash_finup,
4497 .digest = ahash_digest,
4498 .export = ahash_export,
4499 .import = ahash_import,
4500 .setkey = ahash_setkey,
4502 .digestsize = SHA512_DIGEST_SIZE,
4503 .statesize = sizeof(struct caam_export_state),
4506 .alg_type = OP_ALG_ALGSEL_SHA512,
4509 .driver_name = "md5-caam-qi2",
4510 .hmac_name = "hmac(md5)",
4511 .hmac_driver_name = "hmac-md5-caam-qi2",
4512 .blocksize = MD5_BLOCK_WORDS * 4,
4515 .update = ahash_update,
4516 .final = ahash_final,
4517 .finup = ahash_finup,
4518 .digest = ahash_digest,
4519 .export = ahash_export,
4520 .import = ahash_import,
4521 .setkey = ahash_setkey,
4523 .digestsize = MD5_DIGEST_SIZE,
4524 .statesize = sizeof(struct caam_export_state),
4527 .alg_type = OP_ALG_ALGSEL_MD5,
4531 struct caam_hash_alg {
4532 struct list_head entry;
4535 struct ahash_alg ahash_alg;
4538 static int caam_hash_cra_init(struct crypto_tfm *tfm)
4540 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
4541 struct crypto_alg *base = tfm->__crt_alg;
4542 struct hash_alg_common *halg =
4543 container_of(base, struct hash_alg_common, base);
4544 struct ahash_alg *alg =
4545 container_of(halg, struct ahash_alg, halg);
4546 struct caam_hash_alg *caam_hash =
4547 container_of(alg, struct caam_hash_alg, ahash_alg);
4548 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
4549 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
4550 static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
4551 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
4553 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
4555 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
4556 dma_addr_t dma_addr;
4559 ctx->dev = caam_hash->dev;
4562 ctx->adata.key_dma = dma_map_single_attrs(ctx->dev, ctx->key,
4563 ARRAY_SIZE(ctx->key),
4565 DMA_ATTR_SKIP_CPU_SYNC);
4566 if (dma_mapping_error(ctx->dev, ctx->adata.key_dma)) {
4567 dev_err(ctx->dev, "unable to map key\n");
4572 dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc, sizeof(ctx->flc),
4574 DMA_ATTR_SKIP_CPU_SYNC);
4575 if (dma_mapping_error(ctx->dev, dma_addr)) {
4576 dev_err(ctx->dev, "unable to map shared descriptors\n");
4577 if (ctx->adata.key_dma)
4578 dma_unmap_single_attrs(ctx->dev, ctx->adata.key_dma,
4579 ARRAY_SIZE(ctx->key),
4581 DMA_ATTR_SKIP_CPU_SYNC);
4585 for (i = 0; i < HASH_NUM_OP; i++)
4586 ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
4588 /* copy descriptor header template value */
4589 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
4591 ctx->ctx_len = runninglen[(ctx->adata.algtype &
4592 OP_ALG_ALGSEL_SUBMASK) >>
4593 OP_ALG_ALGSEL_SHIFT];
4595 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
4596 sizeof(struct caam_hash_state));
4599 * For keyed hash algorithms shared descriptors
4600 * will be created later in setkey() callback
4602 return alg->setkey ? 0 : ahash_set_sh_desc(ahash);
4605 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
4607 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
4609 dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0], sizeof(ctx->flc),
4610 DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC);
4611 if (ctx->adata.key_dma)
4612 dma_unmap_single_attrs(ctx->dev, ctx->adata.key_dma,
4613 ARRAY_SIZE(ctx->key), DMA_TO_DEVICE,
4614 DMA_ATTR_SKIP_CPU_SYNC);
4617 static struct caam_hash_alg *caam_hash_alloc(struct device *dev,
4618 struct caam_hash_template *template, bool keyed)
4620 struct caam_hash_alg *t_alg;
4621 struct ahash_alg *halg;
4622 struct crypto_alg *alg;
4624 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
4626 return ERR_PTR(-ENOMEM);
4628 t_alg->ahash_alg = template->template_ahash;
4629 halg = &t_alg->ahash_alg;
4630 alg = &halg->halg.base;
4633 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
4634 template->hmac_name);
4635 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
4636 template->hmac_driver_name);
4638 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
4640 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
4641 template->driver_name);
4642 t_alg->ahash_alg.setkey = NULL;
4644 alg->cra_module = THIS_MODULE;
4645 alg->cra_init = caam_hash_cra_init;
4646 alg->cra_exit = caam_hash_cra_exit;
4647 alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
4648 alg->cra_priority = CAAM_CRA_PRIORITY;
4649 alg->cra_blocksize = template->blocksize;
4650 alg->cra_alignmask = 0;
4651 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY;
4653 t_alg->alg_type = template->alg_type;
4659 static void dpaa2_caam_fqdan_cb(struct dpaa2_io_notification_ctx *nctx)
4661 struct dpaa2_caam_priv_per_cpu *ppriv;
4663 ppriv = container_of(nctx, struct dpaa2_caam_priv_per_cpu, nctx);
4664 napi_schedule_irqoff(&ppriv->napi);
4667 static int __cold dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv *priv)
4669 struct device *dev = priv->dev;
4670 struct dpaa2_io_notification_ctx *nctx;
4671 struct dpaa2_caam_priv_per_cpu *ppriv;
4672 int err, i = 0, cpu;
4674 for_each_online_cpu(cpu) {
4675 ppriv = per_cpu_ptr(priv->ppriv, cpu);
4677 nctx = &ppriv->nctx;
4679 nctx->id = ppriv->rsp_fqid;
4680 nctx->desired_cpu = cpu;
4681 nctx->cb = dpaa2_caam_fqdan_cb;
4683 /* Register notification callbacks */
4684 ppriv->dpio = dpaa2_io_service_select(cpu);
4685 err = dpaa2_io_service_register(ppriv->dpio, nctx, dev);
4686 if (unlikely(err)) {
4687 dev_dbg(dev, "No affine DPIO for cpu %d\n", cpu);
4690 * If no affine DPIO for this core, there's probably
4691 * none available for next cores either. Signal we want
4692 * to retry later, in case the DPIO devices weren't
4695 err = -EPROBE_DEFER;
4699 ppriv->store = dpaa2_io_store_create(DPAA2_CAAM_STORE_SIZE,
4701 if (unlikely(!ppriv->store)) {
4702 dev_err(dev, "dpaa2_io_store_create() failed\n");
4707 if (++i == priv->num_pairs)
4714 for_each_online_cpu(cpu) {
4715 ppriv = per_cpu_ptr(priv->ppriv, cpu);
4716 if (!ppriv->nctx.cb)
4718 dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx, dev);
4721 for_each_online_cpu(cpu) {
4722 ppriv = per_cpu_ptr(priv->ppriv, cpu);
4725 dpaa2_io_store_destroy(ppriv->store);
4731 static void __cold dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv *priv)
4733 struct dpaa2_caam_priv_per_cpu *ppriv;
4736 for_each_online_cpu(cpu) {
4737 ppriv = per_cpu_ptr(priv->ppriv, cpu);
4738 dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx,
4740 dpaa2_io_store_destroy(ppriv->store);
4742 if (++i == priv->num_pairs)
4747 static int dpaa2_dpseci_bind(struct dpaa2_caam_priv *priv)
4749 struct dpseci_rx_queue_cfg rx_queue_cfg;
4750 struct device *dev = priv->dev;
4751 struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
4752 struct dpaa2_caam_priv_per_cpu *ppriv;
4753 int err = 0, i = 0, cpu;
4755 /* Configure Rx queues */
4756 for_each_online_cpu(cpu) {
4757 ppriv = per_cpu_ptr(priv->ppriv, cpu);
4759 rx_queue_cfg.options = DPSECI_QUEUE_OPT_DEST |
4760 DPSECI_QUEUE_OPT_USER_CTX;
4761 rx_queue_cfg.order_preservation_en = 0;
4762 rx_queue_cfg.dest_cfg.dest_type = DPSECI_DEST_DPIO;
4763 rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
4765 * Rx priority (WQ) doesn't really matter, since we use
4766 * pull mode, i.e. volatile dequeues from specific FQs
4768 rx_queue_cfg.dest_cfg.priority = 0;
4769 rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
4771 err = dpseci_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
4774 dev_err(dev, "dpseci_set_rx_queue() failed with err %d\n",
4779 if (++i == priv->num_pairs)
4786 static void dpaa2_dpseci_congestion_free(struct dpaa2_caam_priv *priv)
4788 struct device *dev = priv->dev;
4790 if (!priv->cscn_mem)
4793 dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4794 kfree(priv->cscn_mem);
4797 static void dpaa2_dpseci_free(struct dpaa2_caam_priv *priv)
4799 struct device *dev = priv->dev;
4800 struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
4803 if (DPSECI_VER(priv->major_ver, priv->minor_ver) > DPSECI_VER(5, 3)) {
4804 err = dpseci_reset(priv->mc_io, 0, ls_dev->mc_handle);
4806 dev_err(dev, "dpseci_reset() failed\n");
4809 dpaa2_dpseci_congestion_free(priv);
4810 dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
4813 static void dpaa2_caam_process_fd(struct dpaa2_caam_priv *priv,
4814 const struct dpaa2_fd *fd)
4816 struct caam_request *req;
4819 if (dpaa2_fd_get_format(fd) != dpaa2_fd_list) {
4820 dev_err(priv->dev, "Only Frame List FD format is supported!\n");
4824 fd_err = dpaa2_fd_get_ctrl(fd) & FD_CTRL_ERR_MASK;
4825 if (unlikely(fd_err))
4826 dev_err_ratelimited(priv->dev, "FD error: %08x\n", fd_err);
4829 * FD[ADDR] is guaranteed to be valid, irrespective of errors reported
4830 * in FD[ERR] or FD[FRC].
4832 req = dpaa2_caam_iova_to_virt(priv, dpaa2_fd_get_addr(fd));
4833 dma_unmap_single(priv->dev, req->fd_flt_dma, sizeof(req->fd_flt),
4835 req->cbk(req->ctx, dpaa2_fd_get_frc(fd));
4838 static int dpaa2_caam_pull_fq(struct dpaa2_caam_priv_per_cpu *ppriv)
4842 /* Retry while portal is busy */
4844 err = dpaa2_io_service_pull_fq(ppriv->dpio, ppriv->rsp_fqid,
4846 } while (err == -EBUSY);
4849 dev_err(ppriv->priv->dev, "dpaa2_io_service_pull err %d", err);
4854 static int dpaa2_caam_store_consume(struct dpaa2_caam_priv_per_cpu *ppriv)
4856 struct dpaa2_dq *dq;
4857 int cleaned = 0, is_last;
4860 dq = dpaa2_io_store_next(ppriv->store, &is_last);
4861 if (unlikely(!dq)) {
4862 if (unlikely(!is_last)) {
4863 dev_dbg(ppriv->priv->dev,
4864 "FQ %d returned no valid frames\n",
4867 * MUST retry until we get some sort of
4868 * valid response token (be it "empty dequeue"
4869 * or a valid frame).
4877 dpaa2_caam_process_fd(ppriv->priv, dpaa2_dq_fd(dq));
4884 static int dpaa2_dpseci_poll(struct napi_struct *napi, int budget)
4886 struct dpaa2_caam_priv_per_cpu *ppriv;
4887 struct dpaa2_caam_priv *priv;
4888 int err, cleaned = 0, store_cleaned;
4890 ppriv = container_of(napi, struct dpaa2_caam_priv_per_cpu, napi);
4893 if (unlikely(dpaa2_caam_pull_fq(ppriv)))
4897 store_cleaned = dpaa2_caam_store_consume(ppriv);
4898 cleaned += store_cleaned;
4900 if (store_cleaned == 0 ||
4901 cleaned > budget - DPAA2_CAAM_STORE_SIZE)
4904 /* Try to dequeue some more */
4905 err = dpaa2_caam_pull_fq(ppriv);
4910 if (cleaned < budget) {
4911 napi_complete_done(napi, cleaned);
4912 err = dpaa2_io_service_rearm(ppriv->dpio, &ppriv->nctx);
4914 dev_err(priv->dev, "Notification rearm failed: %d\n",
4921 static int dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv *priv,
4924 struct dpseci_congestion_notification_cfg cong_notif_cfg = { 0 };
4925 struct device *dev = priv->dev;
4929 * Congestion group feature supported starting with DPSECI API v5.1
4930 * and only when object has been created with this capability.
4932 if ((DPSECI_VER(priv->major_ver, priv->minor_ver) < DPSECI_VER(5, 1)) ||
4933 !(priv->dpseci_attr.options & DPSECI_OPT_HAS_CG))
4936 priv->cscn_mem = kzalloc(DPAA2_CSCN_SIZE + DPAA2_CSCN_ALIGN,
4937 GFP_KERNEL | GFP_DMA);
4938 if (!priv->cscn_mem)
4941 priv->cscn_mem_aligned = PTR_ALIGN(priv->cscn_mem, DPAA2_CSCN_ALIGN);
4942 priv->cscn_dma = dma_map_single(dev, priv->cscn_mem_aligned,
4943 DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4944 if (dma_mapping_error(dev, priv->cscn_dma)) {
4945 dev_err(dev, "Error mapping CSCN memory area\n");
4950 cong_notif_cfg.units = DPSECI_CONGESTION_UNIT_BYTES;
4951 cong_notif_cfg.threshold_entry = DPAA2_SEC_CONG_ENTRY_THRESH;
4952 cong_notif_cfg.threshold_exit = DPAA2_SEC_CONG_EXIT_THRESH;
4953 cong_notif_cfg.message_ctx = (uintptr_t)priv;
4954 cong_notif_cfg.message_iova = priv->cscn_dma;
4955 cong_notif_cfg.notification_mode = DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER |
4956 DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT |
4957 DPSECI_CGN_MODE_COHERENT_WRITE;
4959 err = dpseci_set_congestion_notification(priv->mc_io, 0, token,
4962 dev_err(dev, "dpseci_set_congestion_notification failed\n");
4969 dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4971 kfree(priv->cscn_mem);
4976 static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
4978 struct device *dev = &ls_dev->dev;
4979 struct dpaa2_caam_priv *priv;
4980 struct dpaa2_caam_priv_per_cpu *ppriv;
4984 priv = dev_get_drvdata(dev);
4987 priv->dpsec_id = ls_dev->obj_desc.id;
4989 /* Get a handle for the DPSECI this interface is associate with */
4990 err = dpseci_open(priv->mc_io, 0, priv->dpsec_id, &ls_dev->mc_handle);
4992 dev_err(dev, "dpseci_open() failed: %d\n", err);
4996 err = dpseci_get_api_version(priv->mc_io, 0, &priv->major_ver,
4999 dev_err(dev, "dpseci_get_api_version() failed\n");
5003 dev_info(dev, "dpseci v%d.%d\n", priv->major_ver, priv->minor_ver);
5005 if (DPSECI_VER(priv->major_ver, priv->minor_ver) > DPSECI_VER(5, 3)) {
5006 err = dpseci_reset(priv->mc_io, 0, ls_dev->mc_handle);
5008 dev_err(dev, "dpseci_reset() failed\n");
5013 err = dpseci_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
5014 &priv->dpseci_attr);
5016 dev_err(dev, "dpseci_get_attributes() failed\n");
5020 err = dpseci_get_sec_attr(priv->mc_io, 0, ls_dev->mc_handle,
5023 dev_err(dev, "dpseci_get_sec_attr() failed\n");
5027 err = dpaa2_dpseci_congestion_setup(priv, ls_dev->mc_handle);
5029 dev_err(dev, "setup_congestion() failed\n");
5033 priv->num_pairs = min(priv->dpseci_attr.num_rx_queues,
5034 priv->dpseci_attr.num_tx_queues);
5035 if (priv->num_pairs > num_online_cpus()) {
5036 dev_warn(dev, "%d queues won't be used\n",
5037 priv->num_pairs - num_online_cpus());
5038 priv->num_pairs = num_online_cpus();
5041 for (i = 0; i < priv->dpseci_attr.num_rx_queues; i++) {
5042 err = dpseci_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
5043 &priv->rx_queue_attr[i]);
5045 dev_err(dev, "dpseci_get_rx_queue() failed\n");
5046 goto err_get_rx_queue;
5050 for (i = 0; i < priv->dpseci_attr.num_tx_queues; i++) {
5051 err = dpseci_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
5052 &priv->tx_queue_attr[i]);
5054 dev_err(dev, "dpseci_get_tx_queue() failed\n");
5055 goto err_get_rx_queue;
5060 for_each_online_cpu(cpu) {
5063 j = i % priv->num_pairs;
5065 ppriv = per_cpu_ptr(priv->ppriv, cpu);
5066 ppriv->req_fqid = priv->tx_queue_attr[j].fqid;
5069 * Allow all cores to enqueue, while only some of them
5070 * will take part in dequeuing.
5072 if (++i > priv->num_pairs)
5075 ppriv->rsp_fqid = priv->rx_queue_attr[j].fqid;
5078 dev_dbg(dev, "pair %d: rx queue %d, tx queue %d\n", j,
5079 priv->rx_queue_attr[j].fqid,
5080 priv->tx_queue_attr[j].fqid);
5082 ppriv->net_dev.dev = *dev;
5083 INIT_LIST_HEAD(&ppriv->net_dev.napi_list);
5084 netif_napi_add(&ppriv->net_dev, &ppriv->napi, dpaa2_dpseci_poll,
5085 DPAA2_CAAM_NAPI_WEIGHT);
5091 dpaa2_dpseci_congestion_free(priv);
5093 dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
5098 static int dpaa2_dpseci_enable(struct dpaa2_caam_priv *priv)
5100 struct device *dev = priv->dev;
5101 struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
5102 struct dpaa2_caam_priv_per_cpu *ppriv;
5105 for (i = 0; i < priv->num_pairs; i++) {
5106 ppriv = per_cpu_ptr(priv->ppriv, i);
5107 napi_enable(&ppriv->napi);
5110 return dpseci_enable(priv->mc_io, 0, ls_dev->mc_handle);
5113 static int __cold dpaa2_dpseci_disable(struct dpaa2_caam_priv *priv)
5115 struct device *dev = priv->dev;
5116 struct dpaa2_caam_priv_per_cpu *ppriv;
5117 struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
5118 int i, err = 0, enabled;
5120 err = dpseci_disable(priv->mc_io, 0, ls_dev->mc_handle);
5122 dev_err(dev, "dpseci_disable() failed\n");
5126 err = dpseci_is_enabled(priv->mc_io, 0, ls_dev->mc_handle, &enabled);
5128 dev_err(dev, "dpseci_is_enabled() failed\n");
5132 dev_dbg(dev, "disable: %s\n", enabled ? "false" : "true");
5134 for (i = 0; i < priv->num_pairs; i++) {
5135 ppriv = per_cpu_ptr(priv->ppriv, i);
5136 napi_disable(&ppriv->napi);
5137 netif_napi_del(&ppriv->napi);
5143 static struct list_head hash_list;
5145 static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
5148 struct dpaa2_caam_priv *priv;
5150 bool registered = false;
5153 * There is no way to get CAAM endianness - there is no direct register
5154 * space access and MC f/w does not provide this attribute.
5155 * All DPAA2-based SoCs have little endian CAAM, thus hard-code this
5158 caam_little_end = true;
5162 dev = &dpseci_dev->dev;
5164 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
5168 dev_set_drvdata(dev, priv);
5170 priv->domain = iommu_get_domain_for_dev(dev);
5172 qi_cache = kmem_cache_create("dpaa2_caamqicache", CAAM_QI_MEMCACHE_SIZE,
5173 0, SLAB_CACHE_DMA, NULL);
5175 dev_err(dev, "Can't allocate SEC cache\n");
5179 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49));
5181 dev_err(dev, "dma_set_mask_and_coherent() failed\n");
5185 /* Obtain a MC portal */
5186 err = fsl_mc_portal_allocate(dpseci_dev, 0, &priv->mc_io);
5189 err = -EPROBE_DEFER;
5191 dev_err(dev, "MC portal allocation failed\n");
5196 priv->ppriv = alloc_percpu(*priv->ppriv);
5198 dev_err(dev, "alloc_percpu() failed\n");
5200 goto err_alloc_ppriv;
5203 /* DPSECI initialization */
5204 err = dpaa2_dpseci_setup(dpseci_dev);
5206 dev_err(dev, "dpaa2_dpseci_setup() failed\n");
5207 goto err_dpseci_setup;
5211 err = dpaa2_dpseci_dpio_setup(priv);
5213 dev_err_probe(dev, err, "dpaa2_dpseci_dpio_setup() failed\n");
5214 goto err_dpio_setup;
5217 /* DPSECI binding to DPIO */
5218 err = dpaa2_dpseci_bind(priv);
5220 dev_err(dev, "dpaa2_dpseci_bind() failed\n");
5225 err = dpaa2_dpseci_enable(priv);
5227 dev_err(dev, "dpaa2_dpseci_enable() failed\n");
5231 dpaa2_dpseci_debugfs_init(priv);
5233 /* register crypto algorithms the device supports */
5234 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
5235 struct caam_skcipher_alg *t_alg = driver_algs + i;
5236 u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK;
5238 /* Skip DES algorithms if not supported by device */
5239 if (!priv->sec_attr.des_acc_num &&
5240 (alg_sel == OP_ALG_ALGSEL_3DES ||
5241 alg_sel == OP_ALG_ALGSEL_DES))
5244 /* Skip AES algorithms if not supported by device */
5245 if (!priv->sec_attr.aes_acc_num &&
5246 alg_sel == OP_ALG_ALGSEL_AES)
5249 /* Skip CHACHA20 algorithms if not supported by device */
5250 if (alg_sel == OP_ALG_ALGSEL_CHACHA20 &&
5251 !priv->sec_attr.ccha_acc_num)
5254 t_alg->caam.dev = dev;
5255 caam_skcipher_alg_init(t_alg);
5257 err = crypto_register_skcipher(&t_alg->skcipher);
5259 dev_warn(dev, "%s alg registration failed: %d\n",
5260 t_alg->skcipher.base.cra_driver_name, err);
5264 t_alg->registered = true;
5268 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
5269 struct caam_aead_alg *t_alg = driver_aeads + i;
5270 u32 c1_alg_sel = t_alg->caam.class1_alg_type &
5272 u32 c2_alg_sel = t_alg->caam.class2_alg_type &
5275 /* Skip DES algorithms if not supported by device */
5276 if (!priv->sec_attr.des_acc_num &&
5277 (c1_alg_sel == OP_ALG_ALGSEL_3DES ||
5278 c1_alg_sel == OP_ALG_ALGSEL_DES))
5281 /* Skip AES algorithms if not supported by device */
5282 if (!priv->sec_attr.aes_acc_num &&
5283 c1_alg_sel == OP_ALG_ALGSEL_AES)
5286 /* Skip CHACHA20 algorithms if not supported by device */
5287 if (c1_alg_sel == OP_ALG_ALGSEL_CHACHA20 &&
5288 !priv->sec_attr.ccha_acc_num)
5291 /* Skip POLY1305 algorithms if not supported by device */
5292 if (c2_alg_sel == OP_ALG_ALGSEL_POLY1305 &&
5293 !priv->sec_attr.ptha_acc_num)
5297 * Skip algorithms requiring message digests
5298 * if MD not supported by device.
5300 if ((c2_alg_sel & ~OP_ALG_ALGSEL_SUBMASK) == 0x40 &&
5301 !priv->sec_attr.md_acc_num)
5304 t_alg->caam.dev = dev;
5305 caam_aead_alg_init(t_alg);
5307 err = crypto_register_aead(&t_alg->aead);
5309 dev_warn(dev, "%s alg registration failed: %d\n",
5310 t_alg->aead.base.cra_driver_name, err);
5314 t_alg->registered = true;
5318 dev_info(dev, "algorithms registered in /proc/crypto\n");
5320 /* register hash algorithms the device supports */
5321 INIT_LIST_HEAD(&hash_list);
5324 * Skip registration of any hashing algorithms if MD block
5327 if (!priv->sec_attr.md_acc_num)
5330 for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
5331 struct caam_hash_alg *t_alg;
5332 struct caam_hash_template *alg = driver_hash + i;
5334 /* register hmac version */
5335 t_alg = caam_hash_alloc(dev, alg, true);
5336 if (IS_ERR(t_alg)) {
5337 err = PTR_ERR(t_alg);
5338 dev_warn(dev, "%s hash alg allocation failed: %d\n",
5339 alg->hmac_driver_name, err);
5343 err = crypto_register_ahash(&t_alg->ahash_alg);
5345 dev_warn(dev, "%s alg registration failed: %d\n",
5346 t_alg->ahash_alg.halg.base.cra_driver_name,
5350 list_add_tail(&t_alg->entry, &hash_list);
5353 /* register unkeyed version */
5354 t_alg = caam_hash_alloc(dev, alg, false);
5355 if (IS_ERR(t_alg)) {
5356 err = PTR_ERR(t_alg);
5357 dev_warn(dev, "%s alg allocation failed: %d\n",
5358 alg->driver_name, err);
5362 err = crypto_register_ahash(&t_alg->ahash_alg);
5364 dev_warn(dev, "%s alg registration failed: %d\n",
5365 t_alg->ahash_alg.halg.base.cra_driver_name,
5369 list_add_tail(&t_alg->entry, &hash_list);
5372 if (!list_empty(&hash_list))
5373 dev_info(dev, "hash algorithms registered in /proc/crypto\n");
5378 dpaa2_dpseci_dpio_free(priv);
5380 dpaa2_dpseci_free(priv);
5382 free_percpu(priv->ppriv);
5384 fsl_mc_portal_free(priv->mc_io);
5386 kmem_cache_destroy(qi_cache);
5391 static int __cold dpaa2_caam_remove(struct fsl_mc_device *ls_dev)
5394 struct dpaa2_caam_priv *priv;
5398 priv = dev_get_drvdata(dev);
5400 dpaa2_dpseci_debugfs_exit(priv);
5402 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
5403 struct caam_aead_alg *t_alg = driver_aeads + i;
5405 if (t_alg->registered)
5406 crypto_unregister_aead(&t_alg->aead);
5409 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
5410 struct caam_skcipher_alg *t_alg = driver_algs + i;
5412 if (t_alg->registered)
5413 crypto_unregister_skcipher(&t_alg->skcipher);
5416 if (hash_list.next) {
5417 struct caam_hash_alg *t_hash_alg, *p;
5419 list_for_each_entry_safe(t_hash_alg, p, &hash_list, entry) {
5420 crypto_unregister_ahash(&t_hash_alg->ahash_alg);
5421 list_del(&t_hash_alg->entry);
5426 dpaa2_dpseci_disable(priv);
5427 dpaa2_dpseci_dpio_free(priv);
5428 dpaa2_dpseci_free(priv);
5429 free_percpu(priv->ppriv);
5430 fsl_mc_portal_free(priv->mc_io);
5431 kmem_cache_destroy(qi_cache);
5436 int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req)
5439 struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
5440 struct dpaa2_caam_priv_per_cpu *ppriv;
5444 return PTR_ERR(req);
5446 if (priv->cscn_mem) {
5447 dma_sync_single_for_cpu(priv->dev, priv->cscn_dma,
5450 if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem_aligned))) {
5451 dev_dbg_ratelimited(dev, "Dropping request\n");
5456 dpaa2_fl_set_flc(&req->fd_flt[1], req->flc_dma);
5458 req->fd_flt_dma = dma_map_single(dev, req->fd_flt, sizeof(req->fd_flt),
5460 if (dma_mapping_error(dev, req->fd_flt_dma)) {
5461 dev_err(dev, "DMA mapping error for QI enqueue request\n");
5465 memset(&fd, 0, sizeof(fd));
5466 dpaa2_fd_set_format(&fd, dpaa2_fd_list);
5467 dpaa2_fd_set_addr(&fd, req->fd_flt_dma);
5468 dpaa2_fd_set_len(&fd, dpaa2_fl_get_len(&req->fd_flt[1]));
5469 dpaa2_fd_set_flc(&fd, req->flc_dma);
5471 ppriv = raw_cpu_ptr(priv->ppriv);
5472 for (i = 0; i < (priv->dpseci_attr.num_tx_queues << 1); i++) {
5473 err = dpaa2_io_service_enqueue_fq(ppriv->dpio, ppriv->req_fqid,
5481 if (unlikely(err)) {
5482 dev_err_ratelimited(dev, "Error enqueuing frame: %d\n", err);
5486 return -EINPROGRESS;
5489 dma_unmap_single(dev, req->fd_flt_dma, sizeof(req->fd_flt),
5493 EXPORT_SYMBOL(dpaa2_caam_enqueue);
5495 static const struct fsl_mc_device_id dpaa2_caam_match_id_table[] = {
5497 .vendor = FSL_MC_VENDOR_FREESCALE,
5498 .obj_type = "dpseci",
5502 MODULE_DEVICE_TABLE(fslmc, dpaa2_caam_match_id_table);
5504 static struct fsl_mc_driver dpaa2_caam_driver = {
5506 .name = KBUILD_MODNAME,
5507 .owner = THIS_MODULE,
5509 .probe = dpaa2_caam_probe,
5510 .remove = dpaa2_caam_remove,
5511 .match_id_table = dpaa2_caam_match_id_table
5514 MODULE_LICENSE("Dual BSD/GPL");
5515 MODULE_AUTHOR("Freescale Semiconductor, Inc");
5516 MODULE_DESCRIPTION("Freescale DPAA2 CAAM Driver");
5518 module_fsl_mc_driver(dpaa2_caam_driver);