1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2014 - 2020 Intel Corporation */
3 #include <linux/module.h>
4 #include <crypto/internal/rsa.h>
5 #include <crypto/internal/akcipher.h>
6 #include <crypto/akcipher.h>
7 #include <crypto/kpp.h>
8 #include <crypto/internal/kpp.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/fips.h>
12 #include <crypto/scatterwalk.h>
13 #include "icp_qat_fw_pke.h"
14 #include "adf_accel_devices.h"
15 #include "adf_transport.h"
16 #include "adf_common_drv.h"
17 #include "qat_crypto.h"
19 static DEFINE_MUTEX(algs_lock);
20 static unsigned int active_devs;
22 struct qat_rsa_input_params {
44 } __packed __aligned(64);
46 struct qat_rsa_output_params {
56 } __packed __aligned(64);
77 struct qat_crypto_instance *inst;
78 } __packed __aligned(64);
80 struct qat_dh_input_params {
93 } __packed __aligned(64);
95 struct qat_dh_output_params {
100 } __packed __aligned(64);
111 struct qat_crypto_instance *inst;
112 } __packed __aligned(64);
114 struct qat_asym_request {
116 struct qat_rsa_input_params rsa;
117 struct qat_dh_input_params dh;
120 struct qat_rsa_output_params rsa;
121 struct qat_dh_output_params dh;
127 struct icp_qat_fw_pke_request req;
129 struct qat_rsa_ctx *rsa;
130 struct qat_dh_ctx *dh;
133 struct akcipher_request *rsa;
134 struct kpp_request *dh;
137 void (*cb)(struct icp_qat_fw_pke_resp *resp);
140 static void qat_dh_cb(struct icp_qat_fw_pke_resp *resp)
142 struct qat_asym_request *req = (void *)(__force long)resp->opaque;
143 struct kpp_request *areq = req->areq.dh;
144 struct device *dev = &GET_DEV(req->ctx.dh->inst->accel_dev);
145 int err = ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
146 resp->pke_resp_hdr.comn_resp_flags);
148 err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
152 dma_free_coherent(dev, req->ctx.dh->p_size,
153 req->src_align, req->in.dh.in.b);
155 dma_unmap_single(dev, req->in.dh.in.b,
156 req->ctx.dh->p_size, DMA_TO_DEVICE);
159 areq->dst_len = req->ctx.dh->p_size;
160 if (req->dst_align) {
161 scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
164 dma_free_coherent(dev, req->ctx.dh->p_size, req->dst_align,
167 dma_unmap_single(dev, req->out.dh.r, req->ctx.dh->p_size,
171 dma_unmap_single(dev, req->phy_in, sizeof(struct qat_dh_input_params),
173 dma_unmap_single(dev, req->phy_out,
174 sizeof(struct qat_dh_output_params),
177 kpp_request_complete(areq, err);
180 #define PKE_DH_1536 0x390c1a49
181 #define PKE_DH_G2_1536 0x2e0b1a3e
182 #define PKE_DH_2048 0x4d0c1a60
183 #define PKE_DH_G2_2048 0x3e0b1a55
184 #define PKE_DH_3072 0x510c1a77
185 #define PKE_DH_G2_3072 0x3a0b1a6c
186 #define PKE_DH_4096 0x690c1a8e
187 #define PKE_DH_G2_4096 0x4a0b1a83
189 static unsigned long qat_dh_fn_id(unsigned int len, bool g2)
191 unsigned int bitslen = len << 3;
195 return g2 ? PKE_DH_G2_1536 : PKE_DH_1536;
197 return g2 ? PKE_DH_G2_2048 : PKE_DH_2048;
199 return g2 ? PKE_DH_G2_3072 : PKE_DH_3072;
201 return g2 ? PKE_DH_G2_4096 : PKE_DH_4096;
207 static inline struct qat_dh_ctx *qat_dh_get_params(struct crypto_kpp *tfm)
209 return kpp_tfm_ctx(tfm);
212 static int qat_dh_compute_value(struct kpp_request *req)
214 struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
215 struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
216 struct qat_crypto_instance *inst = ctx->inst;
217 struct device *dev = &GET_DEV(inst->accel_dev);
218 struct qat_asym_request *qat_req =
219 PTR_ALIGN(kpp_request_ctx(req), 64);
220 struct icp_qat_fw_pke_request *msg = &qat_req->req;
222 int n_input_params = 0;
224 if (unlikely(!ctx->xa))
227 if (req->dst_len < ctx->p_size) {
228 req->dst_len = ctx->p_size;
231 memset(msg, '\0', sizeof(*msg));
232 ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
233 ICP_QAT_FW_COMN_REQ_FLAG_SET);
235 msg->pke_hdr.cd_pars.func_id = qat_dh_fn_id(ctx->p_size,
236 !req->src && ctx->g2);
237 if (unlikely(!msg->pke_hdr.cd_pars.func_id))
240 qat_req->cb = qat_dh_cb;
241 qat_req->ctx.dh = ctx;
242 qat_req->areq.dh = req;
243 msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
244 msg->pke_hdr.comn_req_flags =
245 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
246 QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
249 * If no source is provided use g as base
252 qat_req->in.dh.in.xa = ctx->dma_xa;
253 qat_req->in.dh.in.p = ctx->dma_p;
257 qat_req->in.dh.in_g2.xa = ctx->dma_xa;
258 qat_req->in.dh.in_g2.p = ctx->dma_p;
261 qat_req->in.dh.in.b = ctx->dma_g;
262 qat_req->in.dh.in.xa = ctx->dma_xa;
263 qat_req->in.dh.in.p = ctx->dma_p;
271 * src can be of any size in valid range, but HW expects it to
272 * be the same as modulo p so in case it is different we need
273 * to allocate a new buf and copy src data.
274 * In other case we just need to map the user provided buffer.
275 * Also need to make sure that it is in contiguous buffer.
277 if (sg_is_last(req->src) && req->src_len == ctx->p_size) {
278 qat_req->src_align = NULL;
279 qat_req->in.dh.in.b = dma_map_single(dev,
283 if (unlikely(dma_mapping_error(dev,
284 qat_req->in.dh.in.b)))
288 int shift = ctx->p_size - req->src_len;
290 qat_req->src_align = dma_alloc_coherent(dev,
292 &qat_req->in.dh.in.b,
294 if (unlikely(!qat_req->src_align))
297 scatterwalk_map_and_copy(qat_req->src_align + shift,
298 req->src, 0, req->src_len, 0);
302 * dst can be of any size in valid range, but HW expects it to be the
303 * same as modulo m so in case it is different we need to allocate a
304 * new buf and copy src data.
305 * In other case we just need to map the user provided buffer.
306 * Also need to make sure that it is in contiguous buffer.
308 if (sg_is_last(req->dst) && req->dst_len == ctx->p_size) {
309 qat_req->dst_align = NULL;
310 qat_req->out.dh.r = dma_map_single(dev, sg_virt(req->dst),
314 if (unlikely(dma_mapping_error(dev, qat_req->out.dh.r)))
318 qat_req->dst_align = dma_alloc_coherent(dev, ctx->p_size,
321 if (unlikely(!qat_req->dst_align))
325 qat_req->in.dh.in_tab[n_input_params] = 0;
326 qat_req->out.dh.out_tab[1] = 0;
327 /* Mapping in.in.b or in.in_g2.xa is the same */
328 qat_req->phy_in = dma_map_single(dev, &qat_req->in.dh.in.b,
329 sizeof(struct qat_dh_input_params),
331 if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
334 qat_req->phy_out = dma_map_single(dev, &qat_req->out.dh.r,
335 sizeof(struct qat_dh_output_params),
337 if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
338 goto unmap_in_params;
340 msg->pke_mid.src_data_addr = qat_req->phy_in;
341 msg->pke_mid.dest_data_addr = qat_req->phy_out;
342 msg->pke_mid.opaque = (u64)(__force long)qat_req;
343 msg->input_param_count = n_input_params;
344 msg->output_param_count = 1;
347 ret = adf_send_message(ctx->inst->pke_tx, (u32 *)msg);
348 } while (ret == -EBUSY && ctr++ < 100);
353 if (!dma_mapping_error(dev, qat_req->phy_out))
354 dma_unmap_single(dev, qat_req->phy_out,
355 sizeof(struct qat_dh_output_params),
358 if (!dma_mapping_error(dev, qat_req->phy_in))
359 dma_unmap_single(dev, qat_req->phy_in,
360 sizeof(struct qat_dh_input_params),
363 if (qat_req->dst_align)
364 dma_free_coherent(dev, ctx->p_size, qat_req->dst_align,
367 if (!dma_mapping_error(dev, qat_req->out.dh.r))
368 dma_unmap_single(dev, qat_req->out.dh.r, ctx->p_size,
372 if (qat_req->src_align)
373 dma_free_coherent(dev, ctx->p_size, qat_req->src_align,
374 qat_req->in.dh.in.b);
376 if (!dma_mapping_error(dev, qat_req->in.dh.in.b))
377 dma_unmap_single(dev, qat_req->in.dh.in.b,
384 static int qat_dh_check_params_length(unsigned int p_len)
396 static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params)
398 struct qat_crypto_instance *inst = ctx->inst;
399 struct device *dev = &GET_DEV(inst->accel_dev);
401 if (qat_dh_check_params_length(params->p_size << 3))
404 ctx->p_size = params->p_size;
405 ctx->p = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_p, GFP_KERNEL);
408 memcpy(ctx->p, params->p, ctx->p_size);
410 /* If g equals 2 don't copy it */
411 if (params->g_size == 1 && *(char *)params->g == 0x02) {
416 ctx->g = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_g, GFP_KERNEL);
419 memcpy(ctx->g + (ctx->p_size - params->g_size), params->g,
425 static void qat_dh_clear_ctx(struct device *dev, struct qat_dh_ctx *ctx)
428 dma_free_coherent(dev, ctx->p_size, ctx->g, ctx->dma_g);
432 dma_free_coherent(dev, ctx->p_size, ctx->xa, ctx->dma_xa);
436 dma_free_coherent(dev, ctx->p_size, ctx->p, ctx->dma_p);
443 static int qat_dh_set_secret(struct crypto_kpp *tfm, const void *buf,
446 struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
447 struct device *dev = &GET_DEV(ctx->inst->accel_dev);
451 if (crypto_dh_decode_key(buf, len, ¶ms) < 0)
454 /* Free old secret if any */
455 qat_dh_clear_ctx(dev, ctx);
457 ret = qat_dh_set_params(ctx, ¶ms);
461 ctx->xa = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_xa,
467 memcpy(ctx->xa + (ctx->p_size - params.key_size), params.key,
473 qat_dh_clear_ctx(dev, ctx);
477 static unsigned int qat_dh_max_size(struct crypto_kpp *tfm)
479 struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
484 static int qat_dh_init_tfm(struct crypto_kpp *tfm)
486 struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
487 struct qat_crypto_instance *inst =
488 qat_crypto_get_instance_node(get_current_node());
499 static void qat_dh_exit_tfm(struct crypto_kpp *tfm)
501 struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
502 struct device *dev = &GET_DEV(ctx->inst->accel_dev);
504 qat_dh_clear_ctx(dev, ctx);
505 qat_crypto_put_instance(ctx->inst);
508 static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp)
510 struct qat_asym_request *req = (void *)(__force long)resp->opaque;
511 struct akcipher_request *areq = req->areq.rsa;
512 struct device *dev = &GET_DEV(req->ctx.rsa->inst->accel_dev);
513 int err = ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
514 resp->pke_resp_hdr.comn_resp_flags);
516 err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
519 dma_free_coherent(dev, req->ctx.rsa->key_sz, req->src_align,
522 dma_unmap_single(dev, req->in.rsa.enc.m, req->ctx.rsa->key_sz,
525 areq->dst_len = req->ctx.rsa->key_sz;
526 if (req->dst_align) {
527 scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
530 dma_free_coherent(dev, req->ctx.rsa->key_sz, req->dst_align,
533 dma_unmap_single(dev, req->out.rsa.enc.c, req->ctx.rsa->key_sz,
537 dma_unmap_single(dev, req->phy_in, sizeof(struct qat_rsa_input_params),
539 dma_unmap_single(dev, req->phy_out,
540 sizeof(struct qat_rsa_output_params),
543 akcipher_request_complete(areq, err);
546 void qat_alg_asym_callback(void *_resp)
548 struct icp_qat_fw_pke_resp *resp = _resp;
549 struct qat_asym_request *areq = (void *)(__force long)resp->opaque;
554 #define PKE_RSA_EP_512 0x1c161b21
555 #define PKE_RSA_EP_1024 0x35111bf7
556 #define PKE_RSA_EP_1536 0x4d111cdc
557 #define PKE_RSA_EP_2048 0x6e111dba
558 #define PKE_RSA_EP_3072 0x7d111ea3
559 #define PKE_RSA_EP_4096 0xa5101f7e
561 static unsigned long qat_rsa_enc_fn_id(unsigned int len)
563 unsigned int bitslen = len << 3;
567 return PKE_RSA_EP_512;
569 return PKE_RSA_EP_1024;
571 return PKE_RSA_EP_1536;
573 return PKE_RSA_EP_2048;
575 return PKE_RSA_EP_3072;
577 return PKE_RSA_EP_4096;
583 #define PKE_RSA_DP1_512 0x1c161b3c
584 #define PKE_RSA_DP1_1024 0x35111c12
585 #define PKE_RSA_DP1_1536 0x4d111cf7
586 #define PKE_RSA_DP1_2048 0x6e111dda
587 #define PKE_RSA_DP1_3072 0x7d111ebe
588 #define PKE_RSA_DP1_4096 0xa5101f98
590 static unsigned long qat_rsa_dec_fn_id(unsigned int len)
592 unsigned int bitslen = len << 3;
596 return PKE_RSA_DP1_512;
598 return PKE_RSA_DP1_1024;
600 return PKE_RSA_DP1_1536;
602 return PKE_RSA_DP1_2048;
604 return PKE_RSA_DP1_3072;
606 return PKE_RSA_DP1_4096;
612 #define PKE_RSA_DP2_512 0x1c131b57
613 #define PKE_RSA_DP2_1024 0x26131c2d
614 #define PKE_RSA_DP2_1536 0x45111d12
615 #define PKE_RSA_DP2_2048 0x59121dfa
616 #define PKE_RSA_DP2_3072 0x81121ed9
617 #define PKE_RSA_DP2_4096 0xb1111fb2
619 static unsigned long qat_rsa_dec_fn_id_crt(unsigned int len)
621 unsigned int bitslen = len << 3;
625 return PKE_RSA_DP2_512;
627 return PKE_RSA_DP2_1024;
629 return PKE_RSA_DP2_1536;
631 return PKE_RSA_DP2_2048;
633 return PKE_RSA_DP2_3072;
635 return PKE_RSA_DP2_4096;
641 static int qat_rsa_enc(struct akcipher_request *req)
643 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
644 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
645 struct qat_crypto_instance *inst = ctx->inst;
646 struct device *dev = &GET_DEV(inst->accel_dev);
647 struct qat_asym_request *qat_req =
648 PTR_ALIGN(akcipher_request_ctx(req), 64);
649 struct icp_qat_fw_pke_request *msg = &qat_req->req;
652 if (unlikely(!ctx->n || !ctx->e))
655 if (req->dst_len < ctx->key_sz) {
656 req->dst_len = ctx->key_sz;
659 memset(msg, '\0', sizeof(*msg));
660 ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
661 ICP_QAT_FW_COMN_REQ_FLAG_SET);
662 msg->pke_hdr.cd_pars.func_id = qat_rsa_enc_fn_id(ctx->key_sz);
663 if (unlikely(!msg->pke_hdr.cd_pars.func_id))
666 qat_req->cb = qat_rsa_cb;
667 qat_req->ctx.rsa = ctx;
668 qat_req->areq.rsa = req;
669 msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
670 msg->pke_hdr.comn_req_flags =
671 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
672 QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
674 qat_req->in.rsa.enc.e = ctx->dma_e;
675 qat_req->in.rsa.enc.n = ctx->dma_n;
679 * src can be of any size in valid range, but HW expects it to be the
680 * same as modulo n so in case it is different we need to allocate a
681 * new buf and copy src data.
682 * In other case we just need to map the user provided buffer.
683 * Also need to make sure that it is in contiguous buffer.
685 if (sg_is_last(req->src) && req->src_len == ctx->key_sz) {
686 qat_req->src_align = NULL;
687 qat_req->in.rsa.enc.m = dma_map_single(dev, sg_virt(req->src),
688 req->src_len, DMA_TO_DEVICE);
689 if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.enc.m)))
693 int shift = ctx->key_sz - req->src_len;
695 qat_req->src_align = dma_alloc_coherent(dev, ctx->key_sz,
696 &qat_req->in.rsa.enc.m,
698 if (unlikely(!qat_req->src_align))
701 scatterwalk_map_and_copy(qat_req->src_align + shift, req->src,
704 if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
705 qat_req->dst_align = NULL;
706 qat_req->out.rsa.enc.c = dma_map_single(dev, sg_virt(req->dst),
710 if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.enc.c)))
714 qat_req->dst_align = dma_alloc_coherent(dev, ctx->key_sz,
715 &qat_req->out.rsa.enc.c,
717 if (unlikely(!qat_req->dst_align))
721 qat_req->in.rsa.in_tab[3] = 0;
722 qat_req->out.rsa.out_tab[1] = 0;
723 qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa.enc.m,
724 sizeof(struct qat_rsa_input_params),
726 if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
729 qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa.enc.c,
730 sizeof(struct qat_rsa_output_params),
732 if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
733 goto unmap_in_params;
735 msg->pke_mid.src_data_addr = qat_req->phy_in;
736 msg->pke_mid.dest_data_addr = qat_req->phy_out;
737 msg->pke_mid.opaque = (u64)(__force long)qat_req;
738 msg->input_param_count = 3;
739 msg->output_param_count = 1;
741 ret = adf_send_message(ctx->inst->pke_tx, (u32 *)msg);
742 } while (ret == -EBUSY && ctr++ < 100);
747 if (!dma_mapping_error(dev, qat_req->phy_out))
748 dma_unmap_single(dev, qat_req->phy_out,
749 sizeof(struct qat_rsa_output_params),
752 if (!dma_mapping_error(dev, qat_req->phy_in))
753 dma_unmap_single(dev, qat_req->phy_in,
754 sizeof(struct qat_rsa_input_params),
757 if (qat_req->dst_align)
758 dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align,
759 qat_req->out.rsa.enc.c);
761 if (!dma_mapping_error(dev, qat_req->out.rsa.enc.c))
762 dma_unmap_single(dev, qat_req->out.rsa.enc.c,
763 ctx->key_sz, DMA_FROM_DEVICE);
765 if (qat_req->src_align)
766 dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
767 qat_req->in.rsa.enc.m);
769 if (!dma_mapping_error(dev, qat_req->in.rsa.enc.m))
770 dma_unmap_single(dev, qat_req->in.rsa.enc.m,
771 ctx->key_sz, DMA_TO_DEVICE);
775 static int qat_rsa_dec(struct akcipher_request *req)
777 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
778 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
779 struct qat_crypto_instance *inst = ctx->inst;
780 struct device *dev = &GET_DEV(inst->accel_dev);
781 struct qat_asym_request *qat_req =
782 PTR_ALIGN(akcipher_request_ctx(req), 64);
783 struct icp_qat_fw_pke_request *msg = &qat_req->req;
786 if (unlikely(!ctx->n || !ctx->d))
789 if (req->dst_len < ctx->key_sz) {
790 req->dst_len = ctx->key_sz;
793 memset(msg, '\0', sizeof(*msg));
794 ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
795 ICP_QAT_FW_COMN_REQ_FLAG_SET);
796 msg->pke_hdr.cd_pars.func_id = ctx->crt_mode ?
797 qat_rsa_dec_fn_id_crt(ctx->key_sz) :
798 qat_rsa_dec_fn_id(ctx->key_sz);
799 if (unlikely(!msg->pke_hdr.cd_pars.func_id))
802 qat_req->cb = qat_rsa_cb;
803 qat_req->ctx.rsa = ctx;
804 qat_req->areq.rsa = req;
805 msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
806 msg->pke_hdr.comn_req_flags =
807 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
808 QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
811 qat_req->in.rsa.dec_crt.p = ctx->dma_p;
812 qat_req->in.rsa.dec_crt.q = ctx->dma_q;
813 qat_req->in.rsa.dec_crt.dp = ctx->dma_dp;
814 qat_req->in.rsa.dec_crt.dq = ctx->dma_dq;
815 qat_req->in.rsa.dec_crt.qinv = ctx->dma_qinv;
817 qat_req->in.rsa.dec.d = ctx->dma_d;
818 qat_req->in.rsa.dec.n = ctx->dma_n;
823 * src can be of any size in valid range, but HW expects it to be the
824 * same as modulo n so in case it is different we need to allocate a
825 * new buf and copy src data.
826 * In other case we just need to map the user provided buffer.
827 * Also need to make sure that it is in contiguous buffer.
829 if (sg_is_last(req->src) && req->src_len == ctx->key_sz) {
830 qat_req->src_align = NULL;
831 qat_req->in.rsa.dec.c = dma_map_single(dev, sg_virt(req->src),
832 req->dst_len, DMA_TO_DEVICE);
833 if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.dec.c)))
837 int shift = ctx->key_sz - req->src_len;
839 qat_req->src_align = dma_alloc_coherent(dev, ctx->key_sz,
840 &qat_req->in.rsa.dec.c,
842 if (unlikely(!qat_req->src_align))
845 scatterwalk_map_and_copy(qat_req->src_align + shift, req->src,
848 if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
849 qat_req->dst_align = NULL;
850 qat_req->out.rsa.dec.m = dma_map_single(dev, sg_virt(req->dst),
854 if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.dec.m)))
858 qat_req->dst_align = dma_alloc_coherent(dev, ctx->key_sz,
859 &qat_req->out.rsa.dec.m,
861 if (unlikely(!qat_req->dst_align))
867 qat_req->in.rsa.in_tab[6] = 0;
869 qat_req->in.rsa.in_tab[3] = 0;
870 qat_req->out.rsa.out_tab[1] = 0;
871 qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa.dec.c,
872 sizeof(struct qat_rsa_input_params),
874 if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
877 qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa.dec.m,
878 sizeof(struct qat_rsa_output_params),
880 if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
881 goto unmap_in_params;
883 msg->pke_mid.src_data_addr = qat_req->phy_in;
884 msg->pke_mid.dest_data_addr = qat_req->phy_out;
885 msg->pke_mid.opaque = (u64)(__force long)qat_req;
887 msg->input_param_count = 6;
889 msg->input_param_count = 3;
891 msg->output_param_count = 1;
893 ret = adf_send_message(ctx->inst->pke_tx, (u32 *)msg);
894 } while (ret == -EBUSY && ctr++ < 100);
899 if (!dma_mapping_error(dev, qat_req->phy_out))
900 dma_unmap_single(dev, qat_req->phy_out,
901 sizeof(struct qat_rsa_output_params),
904 if (!dma_mapping_error(dev, qat_req->phy_in))
905 dma_unmap_single(dev, qat_req->phy_in,
906 sizeof(struct qat_rsa_input_params),
909 if (qat_req->dst_align)
910 dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align,
911 qat_req->out.rsa.dec.m);
913 if (!dma_mapping_error(dev, qat_req->out.rsa.dec.m))
914 dma_unmap_single(dev, qat_req->out.rsa.dec.m,
915 ctx->key_sz, DMA_FROM_DEVICE);
917 if (qat_req->src_align)
918 dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
919 qat_req->in.rsa.dec.c);
921 if (!dma_mapping_error(dev, qat_req->in.rsa.dec.c))
922 dma_unmap_single(dev, qat_req->in.rsa.dec.c,
923 ctx->key_sz, DMA_TO_DEVICE);
927 static int qat_rsa_set_n(struct qat_rsa_ctx *ctx, const char *value,
930 struct qat_crypto_instance *inst = ctx->inst;
931 struct device *dev = &GET_DEV(inst->accel_dev);
932 const char *ptr = value;
935 while (!*ptr && vlen) {
942 /* invalid key size provided */
943 if (!qat_rsa_enc_fn_id(ctx->key_sz))
947 ctx->n = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_n, GFP_KERNEL);
951 memcpy(ctx->n, ptr, ctx->key_sz);
959 static int qat_rsa_set_e(struct qat_rsa_ctx *ctx, const char *value,
962 struct qat_crypto_instance *inst = ctx->inst;
963 struct device *dev = &GET_DEV(inst->accel_dev);
964 const char *ptr = value;
966 while (!*ptr && vlen) {
971 if (!ctx->key_sz || !vlen || vlen > ctx->key_sz) {
976 ctx->e = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_e, GFP_KERNEL);
980 memcpy(ctx->e + (ctx->key_sz - vlen), ptr, vlen);
984 static int qat_rsa_set_d(struct qat_rsa_ctx *ctx, const char *value,
987 struct qat_crypto_instance *inst = ctx->inst;
988 struct device *dev = &GET_DEV(inst->accel_dev);
989 const char *ptr = value;
992 while (!*ptr && vlen) {
998 if (!ctx->key_sz || !vlen || vlen > ctx->key_sz)
1002 ctx->d = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL);
1006 memcpy(ctx->d + (ctx->key_sz - vlen), ptr, vlen);
1013 static void qat_rsa_drop_leading_zeros(const char **ptr, unsigned int *len)
1015 while (!**ptr && *len) {
1021 static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key)
1023 struct qat_crypto_instance *inst = ctx->inst;
1024 struct device *dev = &GET_DEV(inst->accel_dev);
1027 unsigned int half_key_sz = ctx->key_sz / 2;
1031 len = rsa_key->p_sz;
1032 qat_rsa_drop_leading_zeros(&ptr, &len);
1035 ctx->p = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_p, GFP_KERNEL);
1038 memcpy(ctx->p + (half_key_sz - len), ptr, len);
1042 len = rsa_key->q_sz;
1043 qat_rsa_drop_leading_zeros(&ptr, &len);
1046 ctx->q = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_q, GFP_KERNEL);
1049 memcpy(ctx->q + (half_key_sz - len), ptr, len);
1053 len = rsa_key->dp_sz;
1054 qat_rsa_drop_leading_zeros(&ptr, &len);
1057 ctx->dp = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_dp,
1061 memcpy(ctx->dp + (half_key_sz - len), ptr, len);
1065 len = rsa_key->dq_sz;
1066 qat_rsa_drop_leading_zeros(&ptr, &len);
1069 ctx->dq = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_dq,
1073 memcpy(ctx->dq + (half_key_sz - len), ptr, len);
1076 ptr = rsa_key->qinv;
1077 len = rsa_key->qinv_sz;
1078 qat_rsa_drop_leading_zeros(&ptr, &len);
1081 ctx->qinv = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_qinv,
1085 memcpy(ctx->qinv + (half_key_sz - len), ptr, len);
1087 ctx->crt_mode = true;
1091 memset(ctx->dq, '\0', half_key_sz);
1092 dma_free_coherent(dev, half_key_sz, ctx->dq, ctx->dma_dq);
1095 memset(ctx->dp, '\0', half_key_sz);
1096 dma_free_coherent(dev, half_key_sz, ctx->dp, ctx->dma_dp);
1099 memset(ctx->q, '\0', half_key_sz);
1100 dma_free_coherent(dev, half_key_sz, ctx->q, ctx->dma_q);
1103 memset(ctx->p, '\0', half_key_sz);
1104 dma_free_coherent(dev, half_key_sz, ctx->p, ctx->dma_p);
1107 ctx->crt_mode = false;
1110 static void qat_rsa_clear_ctx(struct device *dev, struct qat_rsa_ctx *ctx)
1112 unsigned int half_key_sz = ctx->key_sz / 2;
1114 /* Free the old key if any */
1116 dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n);
1118 dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e);
1120 memset(ctx->d, '\0', ctx->key_sz);
1121 dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d);
1124 memset(ctx->p, '\0', half_key_sz);
1125 dma_free_coherent(dev, half_key_sz, ctx->p, ctx->dma_p);
1128 memset(ctx->q, '\0', half_key_sz);
1129 dma_free_coherent(dev, half_key_sz, ctx->q, ctx->dma_q);
1132 memset(ctx->dp, '\0', half_key_sz);
1133 dma_free_coherent(dev, half_key_sz, ctx->dp, ctx->dma_dp);
1136 memset(ctx->dq, '\0', half_key_sz);
1137 dma_free_coherent(dev, half_key_sz, ctx->dq, ctx->dma_dq);
1140 memset(ctx->qinv, '\0', half_key_sz);
1141 dma_free_coherent(dev, half_key_sz, ctx->qinv, ctx->dma_qinv);
1152 ctx->crt_mode = false;
1156 static int qat_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
1157 unsigned int keylen, bool private)
1159 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1160 struct device *dev = &GET_DEV(ctx->inst->accel_dev);
1161 struct rsa_key rsa_key;
1164 qat_rsa_clear_ctx(dev, ctx);
1167 ret = rsa_parse_priv_key(&rsa_key, key, keylen);
1169 ret = rsa_parse_pub_key(&rsa_key, key, keylen);
1173 ret = qat_rsa_set_n(ctx, rsa_key.n, rsa_key.n_sz);
1176 ret = qat_rsa_set_e(ctx, rsa_key.e, rsa_key.e_sz);
1180 ret = qat_rsa_set_d(ctx, rsa_key.d, rsa_key.d_sz);
1183 qat_rsa_setkey_crt(ctx, &rsa_key);
1186 if (!ctx->n || !ctx->e) {
1187 /* invalid key provided */
1191 if (private && !ctx->d) {
1192 /* invalid private key provided */
1199 qat_rsa_clear_ctx(dev, ctx);
1203 static int qat_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key,
1204 unsigned int keylen)
1206 return qat_rsa_setkey(tfm, key, keylen, false);
1209 static int qat_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key,
1210 unsigned int keylen)
1212 return qat_rsa_setkey(tfm, key, keylen, true);
1215 static unsigned int qat_rsa_max_size(struct crypto_akcipher *tfm)
1217 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1222 static int qat_rsa_init_tfm(struct crypto_akcipher *tfm)
1224 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1225 struct qat_crypto_instance *inst =
1226 qat_crypto_get_instance_node(get_current_node());
1236 static void qat_rsa_exit_tfm(struct crypto_akcipher *tfm)
1238 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1239 struct device *dev = &GET_DEV(ctx->inst->accel_dev);
1242 dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n);
1244 dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e);
1246 memset(ctx->d, '\0', ctx->key_sz);
1247 dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d);
1249 qat_crypto_put_instance(ctx->inst);
1255 static struct akcipher_alg rsa = {
1256 .encrypt = qat_rsa_enc,
1257 .decrypt = qat_rsa_dec,
1258 .set_pub_key = qat_rsa_setpubkey,
1259 .set_priv_key = qat_rsa_setprivkey,
1260 .max_size = qat_rsa_max_size,
1261 .init = qat_rsa_init_tfm,
1262 .exit = qat_rsa_exit_tfm,
1263 .reqsize = sizeof(struct qat_asym_request) + 64,
1266 .cra_driver_name = "qat-rsa",
1267 .cra_priority = 1000,
1268 .cra_module = THIS_MODULE,
1269 .cra_ctxsize = sizeof(struct qat_rsa_ctx),
1273 static struct kpp_alg dh = {
1274 .set_secret = qat_dh_set_secret,
1275 .generate_public_key = qat_dh_compute_value,
1276 .compute_shared_secret = qat_dh_compute_value,
1277 .max_size = qat_dh_max_size,
1278 .init = qat_dh_init_tfm,
1279 .exit = qat_dh_exit_tfm,
1280 .reqsize = sizeof(struct qat_asym_request) + 64,
1283 .cra_driver_name = "qat-dh",
1284 .cra_priority = 1000,
1285 .cra_module = THIS_MODULE,
1286 .cra_ctxsize = sizeof(struct qat_dh_ctx),
1290 int qat_asym_algs_register(void)
1294 mutex_lock(&algs_lock);
1295 if (++active_devs == 1) {
1296 rsa.base.cra_flags = 0;
1297 ret = crypto_register_akcipher(&rsa);
1300 ret = crypto_register_kpp(&dh);
1303 mutex_unlock(&algs_lock);
1307 void qat_asym_algs_unregister(void)
1309 mutex_lock(&algs_lock);
1310 if (--active_devs == 0) {
1311 crypto_unregister_akcipher(&rsa);
1312 crypto_unregister_kpp(&dh);
1314 mutex_unlock(&algs_lock);