1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
4 #include <crypto/internal/aead.h>
5 #include <crypto/authenc.h>
6 #include <crypto/scatterwalk.h>
7 #include <linux/dmapool.h>
8 #include <linux/dma-mapping.h>
10 #include "cc_buffer_mgr.h"
11 #include "cc_lli_defs.h"
12 #include "cc_cipher.h"
16 union buffer_array_entry {
17 struct scatterlist *sgl;
18 dma_addr_t buffer_dma;
22 unsigned int num_of_buffers;
23 union buffer_array_entry entry[MAX_NUM_OF_BUFFERS_IN_MLLI];
24 unsigned int offset[MAX_NUM_OF_BUFFERS_IN_MLLI];
25 int nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
26 int total_data_len[MAX_NUM_OF_BUFFERS_IN_MLLI];
27 bool is_last[MAX_NUM_OF_BUFFERS_IN_MLLI];
28 u32 *mlli_nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
31 static inline char *cc_dma_buf_type(enum cc_req_dma_buf_type type)
46 * cc_copy_mac() - Copy MAC to temporary location
49 * @req: aead request object
50 * @dir: [IN] copy from/to sgl
52 static void cc_copy_mac(struct device *dev, struct aead_request *req,
53 enum cc_sg_cpy_direct dir)
55 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
56 u32 skip = req->assoclen + req->cryptlen;
58 cc_copy_sg_portion(dev, areq_ctx->backup_mac, req->src,
59 (skip - areq_ctx->req_authsize), skip, dir);
63 * cc_get_sgl_nents() - Get scatterlist number of entries.
67 * @nbytes: [IN] Total SGL data bytes.
68 * @lbytes: [OUT] Returns the amount of bytes at the last entry
71 * Number of entries in the scatterlist
73 static unsigned int cc_get_sgl_nents(struct device *dev,
74 struct scatterlist *sg_list,
75 unsigned int nbytes, u32 *lbytes)
77 unsigned int nents = 0;
81 while (nbytes && sg_list) {
83 /* get the number of bytes in the last entry */
85 nbytes -= (sg_list->length > nbytes) ?
86 nbytes : sg_list->length;
87 sg_list = sg_next(sg_list);
90 dev_dbg(dev, "nents %d last bytes %d\n", nents, *lbytes);
95 * cc_copy_sg_portion() - Copy scatter list data,
96 * from to_skip to end, to dest and vice versa
99 * @dest: Buffer to copy to/from
101 * @to_skip: Number of bytes to skip before copying
102 * @end: Offset of last byte to copy
103 * @direct: Transfer direction (true == from SG list to buffer, false == from
106 void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg,
107 u32 to_skip, u32 end, enum cc_sg_cpy_direct direct)
111 nents = sg_nents_for_len(sg, end);
112 sg_copy_buffer(sg, nents, dest, (end - to_skip + 1), to_skip,
113 (direct == CC_SG_TO_BUF));
116 static int cc_render_buff_to_mlli(struct device *dev, dma_addr_t buff_dma,
117 u32 buff_size, u32 *curr_nents,
120 u32 *mlli_entry_p = *mlli_entry_pp;
123 /* Verify there is no memory overflow*/
124 new_nents = (*curr_nents + buff_size / CC_MAX_MLLI_ENTRY_SIZE + 1);
125 if (new_nents > MAX_NUM_OF_TOTAL_MLLI_ENTRIES) {
126 dev_err(dev, "Too many mlli entries. current %d max %d\n",
127 new_nents, MAX_NUM_OF_TOTAL_MLLI_ENTRIES);
131 /*handle buffer longer than 64 kbytes */
132 while (buff_size > CC_MAX_MLLI_ENTRY_SIZE) {
133 cc_lli_set_addr(mlli_entry_p, buff_dma);
134 cc_lli_set_size(mlli_entry_p, CC_MAX_MLLI_ENTRY_SIZE);
135 dev_dbg(dev, "entry[%d]: single_buff=0x%08X size=%08X\n",
136 *curr_nents, mlli_entry_p[LLI_WORD0_OFFSET],
137 mlli_entry_p[LLI_WORD1_OFFSET]);
138 buff_dma += CC_MAX_MLLI_ENTRY_SIZE;
139 buff_size -= CC_MAX_MLLI_ENTRY_SIZE;
140 mlli_entry_p = mlli_entry_p + 2;
144 cc_lli_set_addr(mlli_entry_p, buff_dma);
145 cc_lli_set_size(mlli_entry_p, buff_size);
146 dev_dbg(dev, "entry[%d]: single_buff=0x%08X size=%08X\n",
147 *curr_nents, mlli_entry_p[LLI_WORD0_OFFSET],
148 mlli_entry_p[LLI_WORD1_OFFSET]);
149 mlli_entry_p = mlli_entry_p + 2;
150 *mlli_entry_pp = mlli_entry_p;
155 static int cc_render_sg_to_mlli(struct device *dev, struct scatterlist *sgl,
156 u32 sgl_data_len, u32 sgl_offset,
157 u32 *curr_nents, u32 **mlli_entry_pp)
159 struct scatterlist *curr_sgl = sgl;
160 u32 *mlli_entry_p = *mlli_entry_pp;
163 for ( ; (curr_sgl && sgl_data_len);
164 curr_sgl = sg_next(curr_sgl)) {
166 (sgl_data_len > sg_dma_len(curr_sgl) - sgl_offset) ?
167 sg_dma_len(curr_sgl) - sgl_offset :
169 sgl_data_len -= entry_data_len;
170 rc = cc_render_buff_to_mlli(dev, sg_dma_address(curr_sgl) +
171 sgl_offset, entry_data_len,
172 curr_nents, &mlli_entry_p);
178 *mlli_entry_pp = mlli_entry_p;
182 static int cc_generate_mlli(struct device *dev, struct buffer_array *sg_data,
183 struct mlli_params *mlli_params, gfp_t flags)
186 u32 total_nents = 0, prev_total_nents = 0;
189 dev_dbg(dev, "NUM of SG's = %d\n", sg_data->num_of_buffers);
191 /* Allocate memory from the pointed pool */
192 mlli_params->mlli_virt_addr =
193 dma_pool_alloc(mlli_params->curr_pool, flags,
194 &mlli_params->mlli_dma_addr);
195 if (!mlli_params->mlli_virt_addr) {
196 dev_err(dev, "dma_pool_alloc() failed\n");
198 goto build_mlli_exit;
200 /* Point to start of MLLI */
201 mlli_p = mlli_params->mlli_virt_addr;
202 /* go over all SG's and link it to one MLLI table */
203 for (i = 0; i < sg_data->num_of_buffers; i++) {
204 union buffer_array_entry *entry = &sg_data->entry[i];
205 u32 tot_len = sg_data->total_data_len[i];
206 u32 offset = sg_data->offset[i];
208 rc = cc_render_sg_to_mlli(dev, entry->sgl, tot_len, offset,
209 &total_nents, &mlli_p);
213 /* set last bit in the current table */
214 if (sg_data->mlli_nents[i]) {
215 /*Calculate the current MLLI table length for the
216 *length field in the descriptor
218 *sg_data->mlli_nents[i] +=
219 (total_nents - prev_total_nents);
220 prev_total_nents = total_nents;
224 /* Set MLLI size for the bypass operation */
225 mlli_params->mlli_len = (total_nents * LLI_ENTRY_BYTE_SIZE);
227 dev_dbg(dev, "MLLI params: virt_addr=%pK dma_addr=%pad mlli_len=0x%X\n",
228 mlli_params->mlli_virt_addr, &mlli_params->mlli_dma_addr,
229 mlli_params->mlli_len);
235 static void cc_add_sg_entry(struct device *dev, struct buffer_array *sgl_data,
236 unsigned int nents, struct scatterlist *sgl,
237 unsigned int data_len, unsigned int data_offset,
238 bool is_last_table, u32 *mlli_nents)
240 unsigned int index = sgl_data->num_of_buffers;
242 dev_dbg(dev, "index=%u nents=%u sgl=%pK data_len=0x%08X is_last=%d\n",
243 index, nents, sgl, data_len, is_last_table);
244 sgl_data->nents[index] = nents;
245 sgl_data->entry[index].sgl = sgl;
246 sgl_data->offset[index] = data_offset;
247 sgl_data->total_data_len[index] = data_len;
248 sgl_data->is_last[index] = is_last_table;
249 sgl_data->mlli_nents[index] = mlli_nents;
250 if (sgl_data->mlli_nents[index])
251 *sgl_data->mlli_nents[index] = 0;
252 sgl_data->num_of_buffers++;
255 static int cc_map_sg(struct device *dev, struct scatterlist *sg,
256 unsigned int nbytes, int direction, u32 *nents,
257 u32 max_sg_nents, u32 *lbytes, u32 *mapped_nents)
268 *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes);
269 if (*nents > max_sg_nents) {
271 dev_err(dev, "Too many fragments. current %d max %d\n",
272 *nents, max_sg_nents);
276 ret = dma_map_sg(dev, sg, *nents, direction);
277 if (dma_mapping_error(dev, ret)) {
279 dev_err(dev, "dma_map_sg() sg buffer failed %d\n", ret);
289 cc_set_aead_conf_buf(struct device *dev, struct aead_req_ctx *areq_ctx,
290 u8 *config_data, struct buffer_array *sg_data,
291 unsigned int assoclen)
293 dev_dbg(dev, " handle additional data config set to DLLI\n");
294 /* create sg for the current buffer */
295 sg_init_one(&areq_ctx->ccm_adata_sg, config_data,
296 AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size);
297 if (dma_map_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE) != 1) {
298 dev_err(dev, "dma_map_sg() config buffer failed\n");
301 dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
302 &sg_dma_address(&areq_ctx->ccm_adata_sg),
303 sg_page(&areq_ctx->ccm_adata_sg),
304 sg_virt(&areq_ctx->ccm_adata_sg),
305 areq_ctx->ccm_adata_sg.offset, areq_ctx->ccm_adata_sg.length);
306 /* prepare for case of MLLI */
308 cc_add_sg_entry(dev, sg_data, 1, &areq_ctx->ccm_adata_sg,
309 (AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size),
315 static int cc_set_hash_buf(struct device *dev, struct ahash_req_ctx *areq_ctx,
316 u8 *curr_buff, u32 curr_buff_cnt,
317 struct buffer_array *sg_data)
319 dev_dbg(dev, " handle curr buff %x set to DLLI\n", curr_buff_cnt);
320 /* create sg for the current buffer */
321 sg_init_one(areq_ctx->buff_sg, curr_buff, curr_buff_cnt);
322 if (dma_map_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE) != 1) {
323 dev_err(dev, "dma_map_sg() src buffer failed\n");
326 dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
327 &sg_dma_address(areq_ctx->buff_sg), sg_page(areq_ctx->buff_sg),
328 sg_virt(areq_ctx->buff_sg), areq_ctx->buff_sg->offset,
329 areq_ctx->buff_sg->length);
330 areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
331 areq_ctx->curr_sg = areq_ctx->buff_sg;
332 areq_ctx->in_nents = 0;
333 /* prepare for case of MLLI */
334 cc_add_sg_entry(dev, sg_data, 1, areq_ctx->buff_sg, curr_buff_cnt, 0,
339 void cc_unmap_cipher_request(struct device *dev, void *ctx,
340 unsigned int ivsize, struct scatterlist *src,
341 struct scatterlist *dst)
343 struct cipher_req_ctx *req_ctx = (struct cipher_req_ctx *)ctx;
345 if (req_ctx->gen_ctx.iv_dma_addr) {
346 dev_dbg(dev, "Unmapped iv: iv_dma_addr=%pad iv_size=%u\n",
347 &req_ctx->gen_ctx.iv_dma_addr, ivsize);
348 dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr,
349 ivsize, DMA_BIDIRECTIONAL);
352 if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI &&
353 req_ctx->mlli_params.mlli_virt_addr) {
354 dma_pool_free(req_ctx->mlli_params.curr_pool,
355 req_ctx->mlli_params.mlli_virt_addr,
356 req_ctx->mlli_params.mlli_dma_addr);
359 dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL);
360 dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src));
363 dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_BIDIRECTIONAL);
364 dev_dbg(dev, "Unmapped req->dst=%pK\n", sg_virt(dst));
368 int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
369 unsigned int ivsize, unsigned int nbytes,
370 void *info, struct scatterlist *src,
371 struct scatterlist *dst, gfp_t flags)
373 struct cipher_req_ctx *req_ctx = (struct cipher_req_ctx *)ctx;
374 struct mlli_params *mlli_params = &req_ctx->mlli_params;
375 struct device *dev = drvdata_to_dev(drvdata);
376 struct buffer_array sg_data;
379 u32 mapped_nents = 0;
381 req_ctx->dma_buf_type = CC_DMA_BUF_DLLI;
382 mlli_params->curr_pool = NULL;
383 sg_data.num_of_buffers = 0;
387 dump_byte_array("iv", info, ivsize);
388 req_ctx->gen_ctx.iv_dma_addr =
389 dma_map_single(dev, info, ivsize, DMA_BIDIRECTIONAL);
390 if (dma_mapping_error(dev, req_ctx->gen_ctx.iv_dma_addr)) {
391 dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
395 dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n",
396 ivsize, info, &req_ctx->gen_ctx.iv_dma_addr);
398 req_ctx->gen_ctx.iv_dma_addr = 0;
401 /* Map the src SGL */
402 rc = cc_map_sg(dev, src, nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents,
403 LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
406 if (mapped_nents > 1)
407 req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
410 /* Handle inplace operation */
411 if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
412 req_ctx->out_nents = 0;
413 cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src,
415 &req_ctx->in_mlli_nents);
419 rc = cc_map_sg(dev, dst, nbytes, DMA_BIDIRECTIONAL,
420 &req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
421 &dummy, &mapped_nents);
424 if (mapped_nents > 1)
425 req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
427 if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
428 cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src,
430 &req_ctx->in_mlli_nents);
431 cc_add_sg_entry(dev, &sg_data, req_ctx->out_nents, dst,
433 &req_ctx->out_mlli_nents);
437 if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
438 mlli_params->curr_pool = drvdata->mlli_buffs_pool;
439 rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
444 dev_dbg(dev, "areq_ctx->dma_buf_type = %s\n",
445 cc_dma_buf_type(req_ctx->dma_buf_type));
450 cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
454 void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
456 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
457 unsigned int hw_iv_size = areq_ctx->hw_iv_size;
458 struct cc_drvdata *drvdata = dev_get_drvdata(dev);
460 if (areq_ctx->mac_buf_dma_addr) {
461 dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr,
462 MAX_MAC_SIZE, DMA_BIDIRECTIONAL);
465 if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
466 if (areq_ctx->hkey_dma_addr) {
467 dma_unmap_single(dev, areq_ctx->hkey_dma_addr,
468 AES_BLOCK_SIZE, DMA_BIDIRECTIONAL);
471 if (areq_ctx->gcm_block_len_dma_addr) {
472 dma_unmap_single(dev, areq_ctx->gcm_block_len_dma_addr,
473 AES_BLOCK_SIZE, DMA_TO_DEVICE);
476 if (areq_ctx->gcm_iv_inc1_dma_addr) {
477 dma_unmap_single(dev, areq_ctx->gcm_iv_inc1_dma_addr,
478 AES_BLOCK_SIZE, DMA_TO_DEVICE);
481 if (areq_ctx->gcm_iv_inc2_dma_addr) {
482 dma_unmap_single(dev, areq_ctx->gcm_iv_inc2_dma_addr,
483 AES_BLOCK_SIZE, DMA_TO_DEVICE);
487 if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
488 if (areq_ctx->ccm_iv0_dma_addr) {
489 dma_unmap_single(dev, areq_ctx->ccm_iv0_dma_addr,
490 AES_BLOCK_SIZE, DMA_TO_DEVICE);
493 dma_unmap_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE);
495 if (areq_ctx->gen_ctx.iv_dma_addr) {
496 dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr,
497 hw_iv_size, DMA_BIDIRECTIONAL);
498 kfree_sensitive(areq_ctx->gen_ctx.iv);
502 if ((areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
503 areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) &&
504 (areq_ctx->mlli_params.mlli_virt_addr)) {
505 dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n",
506 &areq_ctx->mlli_params.mlli_dma_addr,
507 areq_ctx->mlli_params.mlli_virt_addr);
508 dma_pool_free(areq_ctx->mlli_params.curr_pool,
509 areq_ctx->mlli_params.mlli_virt_addr,
510 areq_ctx->mlli_params.mlli_dma_addr);
513 dev_dbg(dev, "Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n",
514 sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents,
515 areq_ctx->assoclen, req->cryptlen);
517 dma_unmap_sg(dev, req->src, areq_ctx->src.mapped_nents,
519 if (req->src != req->dst) {
520 dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n",
522 dma_unmap_sg(dev, req->dst, areq_ctx->dst.mapped_nents,
525 if (drvdata->coherent &&
526 areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT &&
527 req->src == req->dst) {
528 /* copy back mac from temporary location to deal with possible
529 * data memory overriding that caused by cache coherence
532 cc_copy_mac(dev, req, CC_SG_FROM_BUF);
536 static bool cc_is_icv_frag(unsigned int sgl_nents, unsigned int authsize,
537 u32 last_entry_data_size)
539 return ((sgl_nents > 1) && (last_entry_data_size < authsize));
542 static int cc_aead_chain_iv(struct cc_drvdata *drvdata,
543 struct aead_request *req,
544 struct buffer_array *sg_data,
545 bool is_last, bool do_chain)
547 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
548 unsigned int hw_iv_size = areq_ctx->hw_iv_size;
549 struct device *dev = drvdata_to_dev(drvdata);
550 gfp_t flags = cc_gfp_flags(&req->base);
554 areq_ctx->gen_ctx.iv_dma_addr = 0;
555 areq_ctx->gen_ctx.iv = NULL;
559 areq_ctx->gen_ctx.iv = kmemdup(req->iv, hw_iv_size, flags);
560 if (!areq_ctx->gen_ctx.iv)
563 areq_ctx->gen_ctx.iv_dma_addr =
564 dma_map_single(dev, areq_ctx->gen_ctx.iv, hw_iv_size,
566 if (dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr)) {
567 dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
568 hw_iv_size, req->iv);
569 kfree_sensitive(areq_ctx->gen_ctx.iv);
570 areq_ctx->gen_ctx.iv = NULL;
575 dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n",
576 hw_iv_size, req->iv, &areq_ctx->gen_ctx.iv_dma_addr);
582 static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
583 struct aead_request *req,
584 struct buffer_array *sg_data,
585 bool is_last, bool do_chain)
587 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
589 int mapped_nents = 0;
590 struct device *dev = drvdata_to_dev(drvdata);
594 goto chain_assoc_exit;
597 if (areq_ctx->assoclen == 0) {
598 areq_ctx->assoc_buff_type = CC_DMA_BUF_NULL;
599 areq_ctx->assoc.nents = 0;
600 areq_ctx->assoc.mlli_nents = 0;
601 dev_dbg(dev, "Chain assoc of length 0: buff_type=%s nents=%u\n",
602 cc_dma_buf_type(areq_ctx->assoc_buff_type),
603 areq_ctx->assoc.nents);
604 goto chain_assoc_exit;
607 mapped_nents = sg_nents_for_len(req->src, areq_ctx->assoclen);
608 if (mapped_nents < 0)
611 if (mapped_nents > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) {
612 dev_err(dev, "Too many fragments. current %d max %d\n",
613 mapped_nents, LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
616 areq_ctx->assoc.nents = mapped_nents;
618 /* in CCM case we have additional entry for
619 * ccm header configurations
621 if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
622 if ((mapped_nents + 1) > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) {
623 dev_err(dev, "CCM case.Too many fragments. Current %d max %d\n",
624 (areq_ctx->assoc.nents + 1),
625 LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
627 goto chain_assoc_exit;
631 if (mapped_nents == 1 && areq_ctx->ccm_hdr_size == ccm_header_size_null)
632 areq_ctx->assoc_buff_type = CC_DMA_BUF_DLLI;
634 areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
636 if (do_chain || areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) {
637 dev_dbg(dev, "Chain assoc: buff_type=%s nents=%u\n",
638 cc_dma_buf_type(areq_ctx->assoc_buff_type),
639 areq_ctx->assoc.nents);
640 cc_add_sg_entry(dev, sg_data, areq_ctx->assoc.nents, req->src,
641 areq_ctx->assoclen, 0, is_last,
642 &areq_ctx->assoc.mlli_nents);
643 areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
650 static void cc_prepare_aead_data_dlli(struct aead_request *req,
651 u32 *src_last_bytes, u32 *dst_last_bytes)
653 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
654 enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
655 unsigned int authsize = areq_ctx->req_authsize;
656 struct scatterlist *sg;
659 areq_ctx->is_icv_fragmented = false;
661 if ((req->src == req->dst) || direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
662 sg = areq_ctx->src_sgl;
663 offset = *src_last_bytes - authsize;
665 sg = areq_ctx->dst_sgl;
666 offset = *dst_last_bytes - authsize;
669 areq_ctx->icv_dma_addr = sg_dma_address(sg) + offset;
670 areq_ctx->icv_virt_addr = sg_virt(sg) + offset;
673 static void cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata,
674 struct aead_request *req,
675 struct buffer_array *sg_data,
676 u32 *src_last_bytes, u32 *dst_last_bytes,
679 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
680 enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
681 unsigned int authsize = areq_ctx->req_authsize;
682 struct device *dev = drvdata_to_dev(drvdata);
683 struct scatterlist *sg;
685 if (req->src == req->dst) {
687 cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
688 areq_ctx->src_sgl, areq_ctx->cryptlen,
689 areq_ctx->src_offset, is_last_table,
690 &areq_ctx->src.mlli_nents);
692 areq_ctx->is_icv_fragmented =
693 cc_is_icv_frag(areq_ctx->src.nents, authsize,
696 if (areq_ctx->is_icv_fragmented) {
697 /* Backup happens only when ICV is fragmented, ICV
698 * verification is made by CPU compare in order to
699 * simplify MAC verification upon request completion
701 if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
702 /* In coherent platforms (e.g. ACP)
703 * already copying ICV for any
704 * INPLACE-DECRYPT operation, hence
705 * we must neglect this code.
707 if (!drvdata->coherent)
708 cc_copy_mac(dev, req, CC_SG_TO_BUF);
710 areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
712 areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
713 areq_ctx->icv_dma_addr =
714 areq_ctx->mac_buf_dma_addr;
716 } else { /* Contig. ICV */
717 sg = &areq_ctx->src_sgl[areq_ctx->src.nents - 1];
718 /*Should hanlde if the sg is not contig.*/
719 areq_ctx->icv_dma_addr = sg_dma_address(sg) +
720 (*src_last_bytes - authsize);
721 areq_ctx->icv_virt_addr = sg_virt(sg) +
722 (*src_last_bytes - authsize);
725 } else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
726 /*NON-INPLACE and DECRYPT*/
727 cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
728 areq_ctx->src_sgl, areq_ctx->cryptlen,
729 areq_ctx->src_offset, is_last_table,
730 &areq_ctx->src.mlli_nents);
731 cc_add_sg_entry(dev, sg_data, areq_ctx->dst.nents,
732 areq_ctx->dst_sgl, areq_ctx->cryptlen,
733 areq_ctx->dst_offset, is_last_table,
734 &areq_ctx->dst.mlli_nents);
736 areq_ctx->is_icv_fragmented =
737 cc_is_icv_frag(areq_ctx->src.nents, authsize,
739 /* Backup happens only when ICV is fragmented, ICV
741 * verification is made by CPU compare in order to simplify
742 * MAC verification upon request completion
744 if (areq_ctx->is_icv_fragmented) {
745 cc_copy_mac(dev, req, CC_SG_TO_BUF);
746 areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
748 } else { /* Contig. ICV */
749 sg = &areq_ctx->src_sgl[areq_ctx->src.nents - 1];
750 /*Should hanlde if the sg is not contig.*/
751 areq_ctx->icv_dma_addr = sg_dma_address(sg) +
752 (*src_last_bytes - authsize);
753 areq_ctx->icv_virt_addr = sg_virt(sg) +
754 (*src_last_bytes - authsize);
758 /*NON-INPLACE and ENCRYPT*/
759 cc_add_sg_entry(dev, sg_data, areq_ctx->dst.nents,
760 areq_ctx->dst_sgl, areq_ctx->cryptlen,
761 areq_ctx->dst_offset, is_last_table,
762 &areq_ctx->dst.mlli_nents);
763 cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
764 areq_ctx->src_sgl, areq_ctx->cryptlen,
765 areq_ctx->src_offset, is_last_table,
766 &areq_ctx->src.mlli_nents);
768 areq_ctx->is_icv_fragmented =
769 cc_is_icv_frag(areq_ctx->dst.nents, authsize,
772 if (!areq_ctx->is_icv_fragmented) {
773 sg = &areq_ctx->dst_sgl[areq_ctx->dst.nents - 1];
775 areq_ctx->icv_dma_addr = sg_dma_address(sg) +
776 (*dst_last_bytes - authsize);
777 areq_ctx->icv_virt_addr = sg_virt(sg) +
778 (*dst_last_bytes - authsize);
780 areq_ctx->icv_dma_addr = areq_ctx->mac_buf_dma_addr;
781 areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
786 static int cc_aead_chain_data(struct cc_drvdata *drvdata,
787 struct aead_request *req,
788 struct buffer_array *sg_data,
789 bool is_last_table, bool do_chain)
791 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
792 struct device *dev = drvdata_to_dev(drvdata);
793 enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
794 unsigned int authsize = areq_ctx->req_authsize;
795 unsigned int src_last_bytes = 0, dst_last_bytes = 0;
797 u32 src_mapped_nents = 0, dst_mapped_nents = 0;
799 /* non-inplace mode */
800 unsigned int size_for_map = req->assoclen + req->cryptlen;
802 u32 size_to_skip = req->assoclen;
803 struct scatterlist *sgl;
805 offset = size_to_skip;
810 areq_ctx->src_sgl = req->src;
811 areq_ctx->dst_sgl = req->dst;
813 size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
815 src_mapped_nents = cc_get_sgl_nents(dev, req->src, size_for_map,
817 sg_index = areq_ctx->src_sgl->length;
818 //check where the data starts
819 while (src_mapped_nents && (sg_index <= size_to_skip)) {
821 offset -= areq_ctx->src_sgl->length;
822 sgl = sg_next(areq_ctx->src_sgl);
825 areq_ctx->src_sgl = sgl;
826 sg_index += areq_ctx->src_sgl->length;
828 if (src_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) {
829 dev_err(dev, "Too many fragments. current %d max %d\n",
830 src_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
834 areq_ctx->src.nents = src_mapped_nents;
836 areq_ctx->src_offset = offset;
838 if (req->src != req->dst) {
839 size_for_map = req->assoclen + req->cryptlen;
841 if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT)
842 size_for_map += authsize;
844 size_for_map -= authsize;
846 rc = cc_map_sg(dev, req->dst, size_for_map, DMA_BIDIRECTIONAL,
847 &areq_ctx->dst.mapped_nents,
848 LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes,
851 goto chain_data_exit;
854 dst_mapped_nents = cc_get_sgl_nents(dev, req->dst, size_for_map,
856 sg_index = areq_ctx->dst_sgl->length;
857 offset = size_to_skip;
859 //check where the data starts
860 while (dst_mapped_nents && sg_index <= size_to_skip) {
862 offset -= areq_ctx->dst_sgl->length;
863 sgl = sg_next(areq_ctx->dst_sgl);
866 areq_ctx->dst_sgl = sgl;
867 sg_index += areq_ctx->dst_sgl->length;
869 if (dst_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) {
870 dev_err(dev, "Too many fragments. current %d max %d\n",
871 dst_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
874 areq_ctx->dst.nents = dst_mapped_nents;
875 areq_ctx->dst_offset = offset;
876 if (src_mapped_nents > 1 ||
877 dst_mapped_nents > 1 ||
879 areq_ctx->data_buff_type = CC_DMA_BUF_MLLI;
880 cc_prepare_aead_data_mlli(drvdata, req, sg_data,
881 &src_last_bytes, &dst_last_bytes,
884 areq_ctx->data_buff_type = CC_DMA_BUF_DLLI;
885 cc_prepare_aead_data_dlli(req, &src_last_bytes,
893 static void cc_update_aead_mlli_nents(struct cc_drvdata *drvdata,
894 struct aead_request *req)
896 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
897 u32 curr_mlli_size = 0;
899 if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) {
900 areq_ctx->assoc.sram_addr = drvdata->mlli_sram_addr;
901 curr_mlli_size = areq_ctx->assoc.mlli_nents *
905 if (areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) {
906 /*Inplace case dst nents equal to src nents*/
907 if (req->src == req->dst) {
908 areq_ctx->dst.mlli_nents = areq_ctx->src.mlli_nents;
909 areq_ctx->src.sram_addr = drvdata->mlli_sram_addr +
911 areq_ctx->dst.sram_addr = areq_ctx->src.sram_addr;
912 if (!areq_ctx->is_single_pass)
913 areq_ctx->assoc.mlli_nents +=
914 areq_ctx->src.mlli_nents;
916 if (areq_ctx->gen_ctx.op_type ==
917 DRV_CRYPTO_DIRECTION_DECRYPT) {
918 areq_ctx->src.sram_addr =
919 drvdata->mlli_sram_addr +
921 areq_ctx->dst.sram_addr =
922 areq_ctx->src.sram_addr +
923 areq_ctx->src.mlli_nents *
925 if (!areq_ctx->is_single_pass)
926 areq_ctx->assoc.mlli_nents +=
927 areq_ctx->src.mlli_nents;
929 areq_ctx->dst.sram_addr =
930 drvdata->mlli_sram_addr +
932 areq_ctx->src.sram_addr =
933 areq_ctx->dst.sram_addr +
934 areq_ctx->dst.mlli_nents *
936 if (!areq_ctx->is_single_pass)
937 areq_ctx->assoc.mlli_nents +=
938 areq_ctx->dst.mlli_nents;
944 int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
946 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
947 struct mlli_params *mlli_params = &areq_ctx->mlli_params;
948 struct device *dev = drvdata_to_dev(drvdata);
949 struct buffer_array sg_data;
950 unsigned int authsize = areq_ctx->req_authsize;
953 u32 mapped_nents = 0;
954 u32 dummy = 0; /*used for the assoc data fragments */
956 gfp_t flags = cc_gfp_flags(&req->base);
958 mlli_params->curr_pool = NULL;
959 sg_data.num_of_buffers = 0;
961 /* copy mac to a temporary location to deal with possible
962 * data memory overriding that caused by cache coherence problem.
964 if (drvdata->coherent &&
965 areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT &&
966 req->src == req->dst)
967 cc_copy_mac(dev, req, CC_SG_TO_BUF);
969 /* cacluate the size for cipher remove ICV in decrypt*/
970 areq_ctx->cryptlen = (areq_ctx->gen_ctx.op_type ==
971 DRV_CRYPTO_DIRECTION_ENCRYPT) ?
973 (req->cryptlen - authsize);
975 dma_addr = dma_map_single(dev, areq_ctx->mac_buf, MAX_MAC_SIZE,
977 if (dma_mapping_error(dev, dma_addr)) {
978 dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
979 MAX_MAC_SIZE, areq_ctx->mac_buf);
981 goto aead_map_failure;
983 areq_ctx->mac_buf_dma_addr = dma_addr;
985 if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
986 void *addr = areq_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET;
988 dma_addr = dma_map_single(dev, addr, AES_BLOCK_SIZE,
991 if (dma_mapping_error(dev, dma_addr)) {
992 dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
993 AES_BLOCK_SIZE, addr);
994 areq_ctx->ccm_iv0_dma_addr = 0;
996 goto aead_map_failure;
998 areq_ctx->ccm_iv0_dma_addr = dma_addr;
1000 rc = cc_set_aead_conf_buf(dev, areq_ctx, areq_ctx->ccm_config,
1001 &sg_data, areq_ctx->assoclen);
1003 goto aead_map_failure;
1006 if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
1007 dma_addr = dma_map_single(dev, areq_ctx->hkey, AES_BLOCK_SIZE,
1009 if (dma_mapping_error(dev, dma_addr)) {
1010 dev_err(dev, "Mapping hkey %u B at va=%pK for DMA failed\n",
1011 AES_BLOCK_SIZE, areq_ctx->hkey);
1013 goto aead_map_failure;
1015 areq_ctx->hkey_dma_addr = dma_addr;
1017 dma_addr = dma_map_single(dev, &areq_ctx->gcm_len_block,
1018 AES_BLOCK_SIZE, DMA_TO_DEVICE);
1019 if (dma_mapping_error(dev, dma_addr)) {
1020 dev_err(dev, "Mapping gcm_len_block %u B at va=%pK for DMA failed\n",
1021 AES_BLOCK_SIZE, &areq_ctx->gcm_len_block);
1023 goto aead_map_failure;
1025 areq_ctx->gcm_block_len_dma_addr = dma_addr;
1027 dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc1,
1028 AES_BLOCK_SIZE, DMA_TO_DEVICE);
1030 if (dma_mapping_error(dev, dma_addr)) {
1031 dev_err(dev, "Mapping gcm_iv_inc1 %u B at va=%pK for DMA failed\n",
1032 AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc1));
1033 areq_ctx->gcm_iv_inc1_dma_addr = 0;
1035 goto aead_map_failure;
1037 areq_ctx->gcm_iv_inc1_dma_addr = dma_addr;
1039 dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc2,
1040 AES_BLOCK_SIZE, DMA_TO_DEVICE);
1042 if (dma_mapping_error(dev, dma_addr)) {
1043 dev_err(dev, "Mapping gcm_iv_inc2 %u B at va=%pK for DMA failed\n",
1044 AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc2));
1045 areq_ctx->gcm_iv_inc2_dma_addr = 0;
1047 goto aead_map_failure;
1049 areq_ctx->gcm_iv_inc2_dma_addr = dma_addr;
1052 size_to_map = req->cryptlen + req->assoclen;
1053 /* If we do in-place encryption, we also need the auth tag */
1054 if ((areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT) &&
1055 (req->src == req->dst)) {
1056 size_to_map += authsize;
1059 rc = cc_map_sg(dev, req->src, size_to_map, DMA_BIDIRECTIONAL,
1060 &areq_ctx->src.mapped_nents,
1061 (LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES +
1062 LLI_MAX_NUM_OF_DATA_ENTRIES),
1063 &dummy, &mapped_nents);
1065 goto aead_map_failure;
1067 if (areq_ctx->is_single_pass) {
1069 * Create MLLI table for:
1072 * Note: IV is contg. buffer (not an SGL)
1074 rc = cc_aead_chain_assoc(drvdata, req, &sg_data, true, false);
1076 goto aead_map_failure;
1077 rc = cc_aead_chain_iv(drvdata, req, &sg_data, true, false);
1079 goto aead_map_failure;
1080 rc = cc_aead_chain_data(drvdata, req, &sg_data, true, false);
1082 goto aead_map_failure;
1083 } else { /* DOUBLE-PASS flow */
1085 * Prepare MLLI table(s) in this order:
1087 * If ENCRYPT/DECRYPT (inplace):
1088 * (1) MLLI table for assoc
1089 * (2) IV entry (chained right after end of assoc)
1090 * (3) MLLI for src/dst (inplace operation)
1092 * If ENCRYPT (non-inplace)
1093 * (1) MLLI table for assoc
1094 * (2) IV entry (chained right after end of assoc)
1098 * If DECRYPT (non-inplace)
1099 * (1) MLLI table for assoc
1100 * (2) IV entry (chained right after end of assoc)
1104 rc = cc_aead_chain_assoc(drvdata, req, &sg_data, false, true);
1106 goto aead_map_failure;
1107 rc = cc_aead_chain_iv(drvdata, req, &sg_data, false, true);
1109 goto aead_map_failure;
1110 rc = cc_aead_chain_data(drvdata, req, &sg_data, true, true);
1112 goto aead_map_failure;
1115 /* Mlli support -start building the MLLI according to the above
1118 if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
1119 areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) {
1120 mlli_params->curr_pool = drvdata->mlli_buffs_pool;
1121 rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
1123 goto aead_map_failure;
1125 cc_update_aead_mlli_nents(drvdata, req);
1126 dev_dbg(dev, "assoc params mn %d\n",
1127 areq_ctx->assoc.mlli_nents);
1128 dev_dbg(dev, "src params mn %d\n", areq_ctx->src.mlli_nents);
1129 dev_dbg(dev, "dst params mn %d\n", areq_ctx->dst.mlli_nents);
1134 cc_unmap_aead_request(dev, req);
1138 int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
1139 struct scatterlist *src, unsigned int nbytes,
1140 bool do_update, gfp_t flags)
1142 struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1143 struct device *dev = drvdata_to_dev(drvdata);
1144 u8 *curr_buff = cc_hash_buf(areq_ctx);
1145 u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx);
1146 struct mlli_params *mlli_params = &areq_ctx->mlli_params;
1147 struct buffer_array sg_data;
1150 u32 mapped_nents = 0;
1152 dev_dbg(dev, "final params : curr_buff=%pK curr_buff_cnt=0x%X nbytes = 0x%X src=%pK curr_index=%u\n",
1153 curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index);
1154 /* Init the type of the dma buffer */
1155 areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL;
1156 mlli_params->curr_pool = NULL;
1157 sg_data.num_of_buffers = 0;
1158 areq_ctx->in_nents = 0;
1160 if (nbytes == 0 && *curr_buff_cnt == 0) {
1165 /* map the previous buffer */
1166 if (*curr_buff_cnt) {
1167 rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
1173 if (src && nbytes > 0 && do_update) {
1174 rc = cc_map_sg(dev, src, nbytes, DMA_TO_DEVICE,
1175 &areq_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
1176 &dummy, &mapped_nents);
1178 goto unmap_curr_buff;
1179 if (src && mapped_nents == 1 &&
1180 areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
1181 memcpy(areq_ctx->buff_sg, src,
1182 sizeof(struct scatterlist));
1183 areq_ctx->buff_sg->length = nbytes;
1184 areq_ctx->curr_sg = areq_ctx->buff_sg;
1185 areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
1187 areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI;
1192 if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) {
1193 mlli_params->curr_pool = drvdata->mlli_buffs_pool;
1194 /* add the src data to the sg_data */
1195 cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, nbytes,
1196 0, true, &areq_ctx->mlli_nents);
1197 rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
1199 goto fail_unmap_din;
1201 /* change the buffer index for the unmap function */
1202 areq_ctx->buff_index = (areq_ctx->buff_index ^ 1);
1203 dev_dbg(dev, "areq_ctx->data_dma_buf_type = %s\n",
1204 cc_dma_buf_type(areq_ctx->data_dma_buf_type));
1208 dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
1212 dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1217 int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
1218 struct scatterlist *src, unsigned int nbytes,
1219 unsigned int block_size, gfp_t flags)
1221 struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1222 struct device *dev = drvdata_to_dev(drvdata);
1223 u8 *curr_buff = cc_hash_buf(areq_ctx);
1224 u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx);
1225 u8 *next_buff = cc_next_buf(areq_ctx);
1226 u32 *next_buff_cnt = cc_next_buf_cnt(areq_ctx);
1227 struct mlli_params *mlli_params = &areq_ctx->mlli_params;
1228 unsigned int update_data_len;
1229 u32 total_in_len = nbytes + *curr_buff_cnt;
1230 struct buffer_array sg_data;
1231 unsigned int swap_index = 0;
1234 u32 mapped_nents = 0;
1236 dev_dbg(dev, " update params : curr_buff=%pK curr_buff_cnt=0x%X nbytes=0x%X src=%pK curr_index=%u\n",
1237 curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index);
1238 /* Init the type of the dma buffer */
1239 areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL;
1240 mlli_params->curr_pool = NULL;
1241 areq_ctx->curr_sg = NULL;
1242 sg_data.num_of_buffers = 0;
1243 areq_ctx->in_nents = 0;
1245 if (total_in_len < block_size) {
1246 dev_dbg(dev, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n",
1247 curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]);
1248 areq_ctx->in_nents = sg_nents_for_len(src, nbytes);
1249 sg_copy_to_buffer(src, areq_ctx->in_nents,
1250 &curr_buff[*curr_buff_cnt], nbytes);
1251 *curr_buff_cnt += nbytes;
1255 /* Calculate the residue size*/
1256 *next_buff_cnt = total_in_len & (block_size - 1);
1257 /* update data len */
1258 update_data_len = total_in_len - *next_buff_cnt;
1260 dev_dbg(dev, " temp length : *next_buff_cnt=0x%X update_data_len=0x%X\n",
1261 *next_buff_cnt, update_data_len);
1263 /* Copy the new residue to next buffer */
1264 if (*next_buff_cnt) {
1265 dev_dbg(dev, " handle residue: next buff %pK skip data %u residue %u\n",
1266 next_buff, (update_data_len - *curr_buff_cnt),
1268 cc_copy_sg_portion(dev, next_buff, src,
1269 (update_data_len - *curr_buff_cnt),
1270 nbytes, CC_SG_TO_BUF);
1271 /* change the buffer index for next operation */
1275 if (*curr_buff_cnt) {
1276 rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
1280 /* change the buffer index for next operation */
1284 if (update_data_len > *curr_buff_cnt) {
1285 rc = cc_map_sg(dev, src, (update_data_len - *curr_buff_cnt),
1286 DMA_TO_DEVICE, &areq_ctx->in_nents,
1287 LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
1290 goto unmap_curr_buff;
1291 if (mapped_nents == 1 &&
1292 areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
1293 /* only one entry in the SG and no previous data */
1294 memcpy(areq_ctx->buff_sg, src,
1295 sizeof(struct scatterlist));
1296 areq_ctx->buff_sg->length = update_data_len;
1297 areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
1298 areq_ctx->curr_sg = areq_ctx->buff_sg;
1300 areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI;
1304 if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) {
1305 mlli_params->curr_pool = drvdata->mlli_buffs_pool;
1306 /* add the src data to the sg_data */
1307 cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src,
1308 (update_data_len - *curr_buff_cnt), 0, true,
1309 &areq_ctx->mlli_nents);
1310 rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
1312 goto fail_unmap_din;
1314 areq_ctx->buff_index = (areq_ctx->buff_index ^ swap_index);
1319 dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
1323 dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1328 void cc_unmap_hash_request(struct device *dev, void *ctx,
1329 struct scatterlist *src, bool do_revert)
1331 struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1332 u32 *prev_len = cc_next_buf_cnt(areq_ctx);
1334 /*In case a pool was set, a table was
1335 *allocated and should be released
1337 if (areq_ctx->mlli_params.curr_pool) {
1338 dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n",
1339 &areq_ctx->mlli_params.mlli_dma_addr,
1340 areq_ctx->mlli_params.mlli_virt_addr);
1341 dma_pool_free(areq_ctx->mlli_params.curr_pool,
1342 areq_ctx->mlli_params.mlli_virt_addr,
1343 areq_ctx->mlli_params.mlli_dma_addr);
1346 if (src && areq_ctx->in_nents) {
1347 dev_dbg(dev, "Unmapped sg src: virt=%pK dma=%pad len=0x%X\n",
1348 sg_virt(src), &sg_dma_address(src), sg_dma_len(src));
1349 dma_unmap_sg(dev, src,
1350 areq_ctx->in_nents, DMA_TO_DEVICE);
1354 dev_dbg(dev, "Unmapped buffer: areq_ctx->buff_sg=%pK dma=%pad len 0x%X\n",
1355 sg_virt(areq_ctx->buff_sg),
1356 &sg_dma_address(areq_ctx->buff_sg),
1357 sg_dma_len(areq_ctx->buff_sg));
1358 dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1360 /* clean the previous data length for update
1365 areq_ctx->buff_index ^= 1;
1370 int cc_buffer_mgr_init(struct cc_drvdata *drvdata)
1372 struct device *dev = drvdata_to_dev(drvdata);
1374 drvdata->mlli_buffs_pool =
1375 dma_pool_create("dx_single_mlli_tables", dev,
1376 MAX_NUM_OF_TOTAL_MLLI_ENTRIES *
1377 LLI_ENTRY_BYTE_SIZE,
1378 MLLI_TABLE_MIN_ALIGNMENT, 0);
1380 if (!drvdata->mlli_buffs_pool)
1386 int cc_buffer_mgr_fini(struct cc_drvdata *drvdata)
1388 dma_pool_destroy(drvdata->mlli_buffs_pool);