1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
3 * caam - Freescale FSL CAAM support for Public Key Cryptography
5 * Copyright 2016 Freescale Semiconductor, Inc.
6 * Copyright 2018-2019 NXP
8 * There is no Shared Descriptor for PKC so that the Job Descriptor must carry
9 * all the desired key parameters, input and output pointers.
16 #include "desc_constr.h"
17 #include "sg_sw_sec4.h"
20 #define DESC_RSA_PUB_LEN (2 * CAAM_CMD_SZ + SIZEOF_RSA_PUB_PDB)
21 #define DESC_RSA_PRIV_F1_LEN (2 * CAAM_CMD_SZ + \
22 SIZEOF_RSA_PRIV_F1_PDB)
23 #define DESC_RSA_PRIV_F2_LEN (2 * CAAM_CMD_SZ + \
24 SIZEOF_RSA_PRIV_F2_PDB)
25 #define DESC_RSA_PRIV_F3_LEN (2 * CAAM_CMD_SZ + \
26 SIZEOF_RSA_PRIV_F3_PDB)
27 #define CAAM_RSA_MAX_INPUT_SIZE 512 /* for a 4096-bit modulus */
29 /* buffer filled with zeros, used for padding */
30 static u8 *zero_buffer;
33 * variable used to avoid double free of resources in case
34 * algorithm registration was unsuccessful
36 static bool init_done;
38 struct caam_akcipher_alg {
39 struct akcipher_alg akcipher;
43 static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc,
44 struct akcipher_request *req)
46 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
48 dma_unmap_sg(dev, req->dst, edesc->dst_nents, DMA_FROM_DEVICE);
49 dma_unmap_sg(dev, req_ctx->fixup_src, edesc->src_nents, DMA_TO_DEVICE);
51 if (edesc->sec4_sg_bytes)
52 dma_unmap_single(dev, edesc->sec4_sg_dma, edesc->sec4_sg_bytes,
56 static void rsa_pub_unmap(struct device *dev, struct rsa_edesc *edesc,
57 struct akcipher_request *req)
59 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
60 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
61 struct caam_rsa_key *key = &ctx->key;
62 struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
64 dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
65 dma_unmap_single(dev, pdb->e_dma, key->e_sz, DMA_TO_DEVICE);
68 static void rsa_priv_f1_unmap(struct device *dev, struct rsa_edesc *edesc,
69 struct akcipher_request *req)
71 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
72 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
73 struct caam_rsa_key *key = &ctx->key;
74 struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
76 dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
77 dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
80 static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc,
81 struct akcipher_request *req)
83 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
84 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
85 struct caam_rsa_key *key = &ctx->key;
86 struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
87 size_t p_sz = key->p_sz;
88 size_t q_sz = key->q_sz;
90 dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
91 dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
92 dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
93 dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
94 dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
97 static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
98 struct akcipher_request *req)
100 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
101 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
102 struct caam_rsa_key *key = &ctx->key;
103 struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
104 size_t p_sz = key->p_sz;
105 size_t q_sz = key->q_sz;
107 dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
108 dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
109 dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
110 dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
111 dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
112 dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
113 dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
116 /* RSA Job Completion handler */
117 static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context)
119 struct akcipher_request *req = context;
120 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
121 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
122 struct rsa_edesc *edesc;
127 ecode = caam_jr_strstatus(dev, err);
129 edesc = req_ctx->edesc;
130 has_bklog = edesc->bklog;
132 rsa_pub_unmap(dev, edesc, req);
133 rsa_io_unmap(dev, edesc, req);
137 * If no backlog flag, the completion of the request is done
138 * by CAAM, not crypto engine.
141 akcipher_request_complete(req, ecode);
143 crypto_finalize_akcipher_request(jrp->engine, req, ecode);
146 static void rsa_priv_f_done(struct device *dev, u32 *desc, u32 err,
149 struct akcipher_request *req = context;
150 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
151 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
152 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
153 struct caam_rsa_key *key = &ctx->key;
154 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
155 struct rsa_edesc *edesc;
160 ecode = caam_jr_strstatus(dev, err);
162 edesc = req_ctx->edesc;
163 has_bklog = edesc->bklog;
165 switch (key->priv_form) {
167 rsa_priv_f1_unmap(dev, edesc, req);
170 rsa_priv_f2_unmap(dev, edesc, req);
173 rsa_priv_f3_unmap(dev, edesc, req);
176 rsa_io_unmap(dev, edesc, req);
180 * If no backlog flag, the completion of the request is done
181 * by CAAM, not crypto engine.
184 akcipher_request_complete(req, ecode);
186 crypto_finalize_akcipher_request(jrp->engine, req, ecode);
190 * caam_rsa_count_leading_zeros - Count leading zeros, need it to strip,
191 * from a given scatterlist
193 * @sgl : scatterlist to count zeros from
194 * @nbytes: number of zeros, in bytes, to strip
195 * @flags : operation flags
197 static int caam_rsa_count_leading_zeros(struct scatterlist *sgl,
201 struct sg_mapping_iter miter;
204 unsigned int tbytes = nbytes;
207 ents = sg_nents_for_len(sgl, nbytes);
211 sg_miter_start(&miter, sgl, ents, SG_MITER_FROM_SG | flags);
216 /* do not strip more than given bytes */
217 while (len && !*buff && lzeros < nbytes) {
226 sg_miter_next(&miter);
234 miter.consumed = lzeros;
235 sg_miter_stop(&miter);
238 return tbytes - nbytes;
241 static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
244 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
245 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
246 struct device *dev = ctx->dev;
247 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
248 struct caam_rsa_key *key = &ctx->key;
249 struct rsa_edesc *edesc;
250 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
251 GFP_KERNEL : GFP_ATOMIC;
252 int sg_flags = (flags == GFP_ATOMIC) ? SG_MITER_ATOMIC : 0;
253 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
254 int src_nents, dst_nents;
255 int mapped_src_nents, mapped_dst_nents;
256 unsigned int diff_size = 0;
259 if (req->src_len > key->n_sz) {
261 * strip leading zeros and
262 * return the number of zeros to skip
264 lzeros = caam_rsa_count_leading_zeros(req->src, req->src_len -
265 key->n_sz, sg_flags);
267 return ERR_PTR(lzeros);
269 req_ctx->fixup_src = scatterwalk_ffwd(req_ctx->src, req->src,
271 req_ctx->fixup_src_len = req->src_len - lzeros;
274 * input src is less then n key modulus,
275 * so there will be zero padding
277 diff_size = key->n_sz - req->src_len;
278 req_ctx->fixup_src = req->src;
279 req_ctx->fixup_src_len = req->src_len;
282 src_nents = sg_nents_for_len(req_ctx->fixup_src,
283 req_ctx->fixup_src_len);
284 dst_nents = sg_nents_for_len(req->dst, req->dst_len);
286 mapped_src_nents = dma_map_sg(dev, req_ctx->fixup_src, src_nents,
288 if (unlikely(!mapped_src_nents)) {
289 dev_err(dev, "unable to map source\n");
290 return ERR_PTR(-ENOMEM);
292 mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
294 if (unlikely(!mapped_dst_nents)) {
295 dev_err(dev, "unable to map destination\n");
299 if (!diff_size && mapped_src_nents == 1)
300 sec4_sg_len = 0; /* no need for an input hw s/g table */
302 sec4_sg_len = mapped_src_nents + !!diff_size;
303 sec4_sg_index = sec4_sg_len;
305 if (mapped_dst_nents > 1)
306 sec4_sg_len += pad_sg_nents(mapped_dst_nents);
308 sec4_sg_len = pad_sg_nents(sec4_sg_len);
310 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
312 /* allocate space for base edesc, hw desc commands and link tables */
313 edesc = kzalloc(sizeof(*edesc) + desclen + sec4_sg_bytes,
318 edesc->sec4_sg = (void *)edesc + sizeof(*edesc) + desclen;
320 dma_to_sec4_sg_one(edesc->sec4_sg, ctx->padding_dma, diff_size,
324 sg_to_sec4_sg_last(req_ctx->fixup_src, req_ctx->fixup_src_len,
325 edesc->sec4_sg + !!diff_size, 0);
327 if (mapped_dst_nents > 1)
328 sg_to_sec4_sg_last(req->dst, req->dst_len,
329 edesc->sec4_sg + sec4_sg_index, 0);
331 /* Save nents for later use in Job Descriptor */
332 edesc->src_nents = src_nents;
333 edesc->dst_nents = dst_nents;
335 req_ctx->edesc = edesc;
340 edesc->mapped_src_nents = mapped_src_nents;
341 edesc->mapped_dst_nents = mapped_dst_nents;
343 edesc->sec4_sg_dma = dma_map_single(dev, edesc->sec4_sg,
344 sec4_sg_bytes, DMA_TO_DEVICE);
345 if (dma_mapping_error(dev, edesc->sec4_sg_dma)) {
346 dev_err(dev, "unable to map S/G table\n");
350 edesc->sec4_sg_bytes = sec4_sg_bytes;
352 print_hex_dump_debug("caampkc sec4_sg@" __stringify(__LINE__) ": ",
353 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
354 edesc->sec4_sg_bytes, 1);
361 dma_unmap_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
363 dma_unmap_sg(dev, req_ctx->fixup_src, src_nents, DMA_TO_DEVICE);
364 return ERR_PTR(-ENOMEM);
367 static int akcipher_do_one_req(struct crypto_engine *engine, void *areq)
369 struct akcipher_request *req = container_of(areq,
370 struct akcipher_request,
372 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
373 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
374 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
375 struct device *jrdev = ctx->dev;
376 u32 *desc = req_ctx->edesc->hw_desc;
379 req_ctx->edesc->bklog = true;
381 ret = caam_jr_enqueue(jrdev, desc, req_ctx->akcipher_op_done, req);
383 if (ret == -ENOSPC && engine->retry_support)
386 if (ret != -EINPROGRESS) {
387 rsa_pub_unmap(jrdev, req_ctx->edesc, req);
388 rsa_io_unmap(jrdev, req_ctx->edesc, req);
389 kfree(req_ctx->edesc);
397 static int set_rsa_pub_pdb(struct akcipher_request *req,
398 struct rsa_edesc *edesc)
400 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
401 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
402 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
403 struct caam_rsa_key *key = &ctx->key;
404 struct device *dev = ctx->dev;
405 struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
406 int sec4_sg_index = 0;
408 pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE);
409 if (dma_mapping_error(dev, pdb->n_dma)) {
410 dev_err(dev, "Unable to map RSA modulus memory\n");
414 pdb->e_dma = dma_map_single(dev, key->e, key->e_sz, DMA_TO_DEVICE);
415 if (dma_mapping_error(dev, pdb->e_dma)) {
416 dev_err(dev, "Unable to map RSA public exponent memory\n");
417 dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
421 if (edesc->mapped_src_nents > 1) {
422 pdb->sgf |= RSA_PDB_SGF_F;
423 pdb->f_dma = edesc->sec4_sg_dma;
424 sec4_sg_index += edesc->mapped_src_nents;
426 pdb->f_dma = sg_dma_address(req_ctx->fixup_src);
429 if (edesc->mapped_dst_nents > 1) {
430 pdb->sgf |= RSA_PDB_SGF_G;
431 pdb->g_dma = edesc->sec4_sg_dma +
432 sec4_sg_index * sizeof(struct sec4_sg_entry);
434 pdb->g_dma = sg_dma_address(req->dst);
437 pdb->sgf |= (key->e_sz << RSA_PDB_E_SHIFT) | key->n_sz;
438 pdb->f_len = req_ctx->fixup_src_len;
443 static int set_rsa_priv_f1_pdb(struct akcipher_request *req,
444 struct rsa_edesc *edesc)
446 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
447 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
448 struct caam_rsa_key *key = &ctx->key;
449 struct device *dev = ctx->dev;
450 struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
451 int sec4_sg_index = 0;
453 pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE);
454 if (dma_mapping_error(dev, pdb->n_dma)) {
455 dev_err(dev, "Unable to map modulus memory\n");
459 pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
460 if (dma_mapping_error(dev, pdb->d_dma)) {
461 dev_err(dev, "Unable to map RSA private exponent memory\n");
462 dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
466 if (edesc->mapped_src_nents > 1) {
467 pdb->sgf |= RSA_PRIV_PDB_SGF_G;
468 pdb->g_dma = edesc->sec4_sg_dma;
469 sec4_sg_index += edesc->mapped_src_nents;
472 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
474 pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
477 if (edesc->mapped_dst_nents > 1) {
478 pdb->sgf |= RSA_PRIV_PDB_SGF_F;
479 pdb->f_dma = edesc->sec4_sg_dma +
480 sec4_sg_index * sizeof(struct sec4_sg_entry);
482 pdb->f_dma = sg_dma_address(req->dst);
485 pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
490 static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
491 struct rsa_edesc *edesc)
493 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
494 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
495 struct caam_rsa_key *key = &ctx->key;
496 struct device *dev = ctx->dev;
497 struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
498 int sec4_sg_index = 0;
499 size_t p_sz = key->p_sz;
500 size_t q_sz = key->q_sz;
502 pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
503 if (dma_mapping_error(dev, pdb->d_dma)) {
504 dev_err(dev, "Unable to map RSA private exponent memory\n");
508 pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
509 if (dma_mapping_error(dev, pdb->p_dma)) {
510 dev_err(dev, "Unable to map RSA prime factor p memory\n");
514 pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
515 if (dma_mapping_error(dev, pdb->q_dma)) {
516 dev_err(dev, "Unable to map RSA prime factor q memory\n");
520 pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
521 if (dma_mapping_error(dev, pdb->tmp1_dma)) {
522 dev_err(dev, "Unable to map RSA tmp1 memory\n");
526 pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
527 if (dma_mapping_error(dev, pdb->tmp2_dma)) {
528 dev_err(dev, "Unable to map RSA tmp2 memory\n");
532 if (edesc->mapped_src_nents > 1) {
533 pdb->sgf |= RSA_PRIV_PDB_SGF_G;
534 pdb->g_dma = edesc->sec4_sg_dma;
535 sec4_sg_index += edesc->mapped_src_nents;
537 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
539 pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
542 if (edesc->mapped_dst_nents > 1) {
543 pdb->sgf |= RSA_PRIV_PDB_SGF_F;
544 pdb->f_dma = edesc->sec4_sg_dma +
545 sec4_sg_index * sizeof(struct sec4_sg_entry);
547 pdb->f_dma = sg_dma_address(req->dst);
550 pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
551 pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
556 dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
558 dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
560 dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
562 dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
567 static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
568 struct rsa_edesc *edesc)
570 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
571 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
572 struct caam_rsa_key *key = &ctx->key;
573 struct device *dev = ctx->dev;
574 struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
575 int sec4_sg_index = 0;
576 size_t p_sz = key->p_sz;
577 size_t q_sz = key->q_sz;
579 pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
580 if (dma_mapping_error(dev, pdb->p_dma)) {
581 dev_err(dev, "Unable to map RSA prime factor p memory\n");
585 pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
586 if (dma_mapping_error(dev, pdb->q_dma)) {
587 dev_err(dev, "Unable to map RSA prime factor q memory\n");
591 pdb->dp_dma = dma_map_single(dev, key->dp, p_sz, DMA_TO_DEVICE);
592 if (dma_mapping_error(dev, pdb->dp_dma)) {
593 dev_err(dev, "Unable to map RSA exponent dp memory\n");
597 pdb->dq_dma = dma_map_single(dev, key->dq, q_sz, DMA_TO_DEVICE);
598 if (dma_mapping_error(dev, pdb->dq_dma)) {
599 dev_err(dev, "Unable to map RSA exponent dq memory\n");
603 pdb->c_dma = dma_map_single(dev, key->qinv, p_sz, DMA_TO_DEVICE);
604 if (dma_mapping_error(dev, pdb->c_dma)) {
605 dev_err(dev, "Unable to map RSA CRT coefficient qinv memory\n");
609 pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
610 if (dma_mapping_error(dev, pdb->tmp1_dma)) {
611 dev_err(dev, "Unable to map RSA tmp1 memory\n");
615 pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
616 if (dma_mapping_error(dev, pdb->tmp2_dma)) {
617 dev_err(dev, "Unable to map RSA tmp2 memory\n");
621 if (edesc->mapped_src_nents > 1) {
622 pdb->sgf |= RSA_PRIV_PDB_SGF_G;
623 pdb->g_dma = edesc->sec4_sg_dma;
624 sec4_sg_index += edesc->mapped_src_nents;
626 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
628 pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
631 if (edesc->mapped_dst_nents > 1) {
632 pdb->sgf |= RSA_PRIV_PDB_SGF_F;
633 pdb->f_dma = edesc->sec4_sg_dma +
634 sec4_sg_index * sizeof(struct sec4_sg_entry);
636 pdb->f_dma = sg_dma_address(req->dst);
639 pdb->sgf |= key->n_sz;
640 pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
645 dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
647 dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
649 dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
651 dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
653 dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
655 dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
660 static int akcipher_enqueue_req(struct device *jrdev,
661 void (*cbk)(struct device *jrdev, u32 *desc,
662 u32 err, void *context),
663 struct akcipher_request *req)
665 struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev);
666 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
667 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
668 struct caam_rsa_key *key = &ctx->key;
669 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
670 struct rsa_edesc *edesc = req_ctx->edesc;
671 u32 *desc = edesc->hw_desc;
674 req_ctx->akcipher_op_done = cbk;
676 * Only the backlog request are sent to crypto-engine since the others
677 * can be handled by CAAM, if free, especially since JR has up to 1024
678 * entries (more than the 10 entries from crypto-engine).
680 if (req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
681 ret = crypto_transfer_akcipher_request_to_engine(jrpriv->engine,
684 ret = caam_jr_enqueue(jrdev, desc, cbk, req);
686 if ((ret != -EINPROGRESS) && (ret != -EBUSY)) {
687 switch (key->priv_form) {
689 rsa_priv_f1_unmap(jrdev, edesc, req);
692 rsa_priv_f2_unmap(jrdev, edesc, req);
695 rsa_priv_f3_unmap(jrdev, edesc, req);
698 rsa_pub_unmap(jrdev, edesc, req);
700 rsa_io_unmap(jrdev, edesc, req);
707 static int caam_rsa_enc(struct akcipher_request *req)
709 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
710 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
711 struct caam_rsa_key *key = &ctx->key;
712 struct device *jrdev = ctx->dev;
713 struct rsa_edesc *edesc;
716 if (unlikely(!key->n || !key->e))
719 if (req->dst_len < key->n_sz) {
720 req->dst_len = key->n_sz;
721 dev_err(jrdev, "Output buffer length less than parameter n\n");
725 /* Allocate extended descriptor */
726 edesc = rsa_edesc_alloc(req, DESC_RSA_PUB_LEN);
728 return PTR_ERR(edesc);
730 /* Set RSA Encrypt Protocol Data Block */
731 ret = set_rsa_pub_pdb(req, edesc);
735 /* Initialize Job Descriptor */
736 init_rsa_pub_desc(edesc->hw_desc, &edesc->pdb.pub);
738 return akcipher_enqueue_req(jrdev, rsa_pub_done, req);
741 rsa_io_unmap(jrdev, edesc, req);
746 static int caam_rsa_dec_priv_f1(struct akcipher_request *req)
748 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
749 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
750 struct device *jrdev = ctx->dev;
751 struct rsa_edesc *edesc;
754 /* Allocate extended descriptor */
755 edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F1_LEN);
757 return PTR_ERR(edesc);
759 /* Set RSA Decrypt Protocol Data Block - Private Key Form #1 */
760 ret = set_rsa_priv_f1_pdb(req, edesc);
764 /* Initialize Job Descriptor */
765 init_rsa_priv_f1_desc(edesc->hw_desc, &edesc->pdb.priv_f1);
767 return akcipher_enqueue_req(jrdev, rsa_priv_f_done, req);
770 rsa_io_unmap(jrdev, edesc, req);
775 static int caam_rsa_dec_priv_f2(struct akcipher_request *req)
777 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
778 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
779 struct device *jrdev = ctx->dev;
780 struct rsa_edesc *edesc;
783 /* Allocate extended descriptor */
784 edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F2_LEN);
786 return PTR_ERR(edesc);
788 /* Set RSA Decrypt Protocol Data Block - Private Key Form #2 */
789 ret = set_rsa_priv_f2_pdb(req, edesc);
793 /* Initialize Job Descriptor */
794 init_rsa_priv_f2_desc(edesc->hw_desc, &edesc->pdb.priv_f2);
796 return akcipher_enqueue_req(jrdev, rsa_priv_f_done, req);
799 rsa_io_unmap(jrdev, edesc, req);
804 static int caam_rsa_dec_priv_f3(struct akcipher_request *req)
806 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
807 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
808 struct device *jrdev = ctx->dev;
809 struct rsa_edesc *edesc;
812 /* Allocate extended descriptor */
813 edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F3_LEN);
815 return PTR_ERR(edesc);
817 /* Set RSA Decrypt Protocol Data Block - Private Key Form #3 */
818 ret = set_rsa_priv_f3_pdb(req, edesc);
822 /* Initialize Job Descriptor */
823 init_rsa_priv_f3_desc(edesc->hw_desc, &edesc->pdb.priv_f3);
825 return akcipher_enqueue_req(jrdev, rsa_priv_f_done, req);
828 rsa_io_unmap(jrdev, edesc, req);
833 static int caam_rsa_dec(struct akcipher_request *req)
835 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
836 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
837 struct caam_rsa_key *key = &ctx->key;
840 if (unlikely(!key->n || !key->d))
843 if (req->dst_len < key->n_sz) {
844 req->dst_len = key->n_sz;
845 dev_err(ctx->dev, "Output buffer length less than parameter n\n");
849 if (key->priv_form == FORM3)
850 ret = caam_rsa_dec_priv_f3(req);
851 else if (key->priv_form == FORM2)
852 ret = caam_rsa_dec_priv_f2(req);
854 ret = caam_rsa_dec_priv_f1(req);
859 static void caam_rsa_free_key(struct caam_rsa_key *key)
861 kfree_sensitive(key->d);
862 kfree_sensitive(key->p);
863 kfree_sensitive(key->q);
864 kfree_sensitive(key->dp);
865 kfree_sensitive(key->dq);
866 kfree_sensitive(key->qinv);
867 kfree_sensitive(key->tmp1);
868 kfree_sensitive(key->tmp2);
871 memset(key, 0, sizeof(*key));
874 static void caam_rsa_drop_leading_zeros(const u8 **ptr, size_t *nbytes)
876 while (!**ptr && *nbytes) {
883 * caam_read_rsa_crt - Used for reading dP, dQ, qInv CRT members.
884 * dP, dQ and qInv could decode to less than corresponding p, q length, as the
885 * BER-encoding requires that the minimum number of bytes be used to encode the
886 * integer. dP, dQ, qInv decoded values have to be zero-padded to appropriate
889 * @ptr : pointer to {dP, dQ, qInv} CRT member
890 * @nbytes: length in bytes of {dP, dQ, qInv} CRT member
891 * @dstlen: length in bytes of corresponding p or q prime factor
893 static u8 *caam_read_rsa_crt(const u8 *ptr, size_t nbytes, size_t dstlen)
897 caam_rsa_drop_leading_zeros(&ptr, &nbytes);
901 dst = kzalloc(dstlen, GFP_DMA | GFP_KERNEL);
905 memcpy(dst + (dstlen - nbytes), ptr, nbytes);
911 * caam_read_raw_data - Read a raw byte stream as a positive integer.
912 * The function skips buffer's leading zeros, copies the remained data
913 * to a buffer allocated in the GFP_DMA | GFP_KERNEL zone and returns
914 * the address of the new buffer.
916 * @buf : The data to read
917 * @nbytes: The amount of data to read
919 static inline u8 *caam_read_raw_data(const u8 *buf, size_t *nbytes)
922 caam_rsa_drop_leading_zeros(&buf, nbytes);
926 return kmemdup(buf, *nbytes, GFP_DMA | GFP_KERNEL);
929 static int caam_rsa_check_key_length(unsigned int len)
936 static int caam_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
939 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
940 struct rsa_key raw_key = {NULL};
941 struct caam_rsa_key *rsa_key = &ctx->key;
944 /* Free the old RSA key if any */
945 caam_rsa_free_key(rsa_key);
947 ret = rsa_parse_pub_key(&raw_key, key, keylen);
951 /* Copy key in DMA zone */
952 rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_DMA | GFP_KERNEL);
957 * Skip leading zeros and copy the positive integer to a buffer
958 * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor
959 * expects a positive integer for the RSA modulus and uses its length as
960 * decryption output length.
962 rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz);
966 if (caam_rsa_check_key_length(raw_key.n_sz << 3)) {
967 caam_rsa_free_key(rsa_key);
971 rsa_key->e_sz = raw_key.e_sz;
972 rsa_key->n_sz = raw_key.n_sz;
976 caam_rsa_free_key(rsa_key);
980 static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx,
981 struct rsa_key *raw_key)
983 struct caam_rsa_key *rsa_key = &ctx->key;
984 size_t p_sz = raw_key->p_sz;
985 size_t q_sz = raw_key->q_sz;
987 rsa_key->p = caam_read_raw_data(raw_key->p, &p_sz);
990 rsa_key->p_sz = p_sz;
992 rsa_key->q = caam_read_raw_data(raw_key->q, &q_sz);
995 rsa_key->q_sz = q_sz;
997 rsa_key->tmp1 = kzalloc(raw_key->p_sz, GFP_DMA | GFP_KERNEL);
1001 rsa_key->tmp2 = kzalloc(raw_key->q_sz, GFP_DMA | GFP_KERNEL);
1005 rsa_key->priv_form = FORM2;
1007 rsa_key->dp = caam_read_rsa_crt(raw_key->dp, raw_key->dp_sz, p_sz);
1011 rsa_key->dq = caam_read_rsa_crt(raw_key->dq, raw_key->dq_sz, q_sz);
1015 rsa_key->qinv = caam_read_rsa_crt(raw_key->qinv, raw_key->qinv_sz,
1020 rsa_key->priv_form = FORM3;
1025 kfree_sensitive(rsa_key->dq);
1027 kfree_sensitive(rsa_key->dp);
1029 kfree_sensitive(rsa_key->tmp2);
1031 kfree_sensitive(rsa_key->tmp1);
1033 kfree_sensitive(rsa_key->q);
1035 kfree_sensitive(rsa_key->p);
1038 static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
1039 unsigned int keylen)
1041 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1042 struct rsa_key raw_key = {NULL};
1043 struct caam_rsa_key *rsa_key = &ctx->key;
1046 /* Free the old RSA key if any */
1047 caam_rsa_free_key(rsa_key);
1049 ret = rsa_parse_priv_key(&raw_key, key, keylen);
1053 /* Copy key in DMA zone */
1054 rsa_key->d = kmemdup(raw_key.d, raw_key.d_sz, GFP_DMA | GFP_KERNEL);
1058 rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_DMA | GFP_KERNEL);
1063 * Skip leading zeros and copy the positive integer to a buffer
1064 * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor
1065 * expects a positive integer for the RSA modulus and uses its length as
1066 * decryption output length.
1068 rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz);
1072 if (caam_rsa_check_key_length(raw_key.n_sz << 3)) {
1073 caam_rsa_free_key(rsa_key);
1077 rsa_key->d_sz = raw_key.d_sz;
1078 rsa_key->e_sz = raw_key.e_sz;
1079 rsa_key->n_sz = raw_key.n_sz;
1081 caam_rsa_set_priv_key_form(ctx, &raw_key);
1086 caam_rsa_free_key(rsa_key);
1090 static unsigned int caam_rsa_max_size(struct crypto_akcipher *tfm)
1092 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1094 return ctx->key.n_sz;
1097 /* Per session pkc's driver context creation function */
1098 static int caam_rsa_init_tfm(struct crypto_akcipher *tfm)
1100 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1102 ctx->dev = caam_jr_alloc();
1104 if (IS_ERR(ctx->dev)) {
1105 pr_err("Job Ring Device allocation for transform failed\n");
1106 return PTR_ERR(ctx->dev);
1109 ctx->padding_dma = dma_map_single(ctx->dev, zero_buffer,
1110 CAAM_RSA_MAX_INPUT_SIZE - 1,
1112 if (dma_mapping_error(ctx->dev, ctx->padding_dma)) {
1113 dev_err(ctx->dev, "unable to map padding\n");
1114 caam_jr_free(ctx->dev);
1118 ctx->enginectx.op.do_one_request = akcipher_do_one_req;
1123 /* Per session pkc's driver context cleanup function */
1124 static void caam_rsa_exit_tfm(struct crypto_akcipher *tfm)
1126 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1127 struct caam_rsa_key *key = &ctx->key;
1129 dma_unmap_single(ctx->dev, ctx->padding_dma, CAAM_RSA_MAX_INPUT_SIZE -
1131 caam_rsa_free_key(key);
1132 caam_jr_free(ctx->dev);
1135 static struct caam_akcipher_alg caam_rsa = {
1137 .encrypt = caam_rsa_enc,
1138 .decrypt = caam_rsa_dec,
1139 .set_pub_key = caam_rsa_set_pub_key,
1140 .set_priv_key = caam_rsa_set_priv_key,
1141 .max_size = caam_rsa_max_size,
1142 .init = caam_rsa_init_tfm,
1143 .exit = caam_rsa_exit_tfm,
1144 .reqsize = sizeof(struct caam_rsa_req_ctx),
1147 .cra_driver_name = "rsa-caam",
1148 .cra_priority = 3000,
1149 .cra_module = THIS_MODULE,
1150 .cra_ctxsize = sizeof(struct caam_rsa_ctx),
1155 /* Public Key Cryptography module initialization handler */
1156 int caam_pkc_init(struct device *ctrldev)
1158 struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
1163 /* Determine public key hardware accelerator presence. */
1164 if (priv->era < 10) {
1165 pk_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
1166 CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT;
1168 pkha = rd_reg32(&priv->ctrl->vreg.pkha);
1169 pk_inst = pkha & CHA_VER_NUM_MASK;
1172 * Newer CAAMs support partially disabled functionality. If this is the
1173 * case, the number is non-zero, but this bit is set to indicate that
1174 * no encryption or decryption is supported. Only signing and verifying
1177 if (pkha & CHA_VER_MISC_PKHA_NO_CRYPT)
1181 /* Do not register algorithms if PKHA is not present. */
1185 /* allocate zero buffer, used for padding input */
1186 zero_buffer = kzalloc(CAAM_RSA_MAX_INPUT_SIZE - 1, GFP_DMA |
1191 err = crypto_register_akcipher(&caam_rsa.akcipher);
1195 dev_warn(ctrldev, "%s alg registration failed\n",
1196 caam_rsa.akcipher.base.cra_driver_name);
1199 caam_rsa.registered = true;
1200 dev_info(ctrldev, "caam pkc algorithms registered in /proc/crypto\n");
1206 void caam_pkc_exit(void)
1211 if (caam_rsa.registered)
1212 crypto_unregister_akcipher(&caam_rsa.akcipher);