1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2020 Marvell. */
4 #include "otx2_cptvf.h"
5 #include "otx2_cpt_common.h"
7 /* SG list header size in bytes */
8 #define SG_LIST_HDR_SIZE 8
10 /* Default timeout when waiting for free pending entry in us */
11 #define CPT_PENTRY_TIMEOUT 1000
12 #define CPT_PENTRY_STEP 50
14 /* Default threshold for stopping and resuming sender requests */
15 #define CPT_IQ_STOP_MARGIN 128
16 #define CPT_IQ_RESUME_MARGIN 512
18 /* Default command timeout in seconds */
19 #define CPT_COMMAND_TIMEOUT 4
20 #define CPT_TIME_IN_RESET_COUNT 5
22 static void otx2_cpt_dump_sg_list(struct pci_dev *pdev,
23 struct otx2_cpt_req_info *req)
27 pr_debug("Gather list size %d\n", req->in_cnt);
28 for (i = 0; i < req->in_cnt; i++) {
29 pr_debug("Buffer %d size %d, vptr 0x%p, dmaptr 0x%p\n", i,
30 req->in[i].size, req->in[i].vptr,
31 (void *) req->in[i].dma_addr);
32 pr_debug("Buffer hexdump (%d bytes)\n",
34 print_hex_dump_debug("", DUMP_PREFIX_NONE, 16, 1,
35 req->in[i].vptr, req->in[i].size, false);
37 pr_debug("Scatter list size %d\n", req->out_cnt);
38 for (i = 0; i < req->out_cnt; i++) {
39 pr_debug("Buffer %d size %d, vptr 0x%p, dmaptr 0x%p\n", i,
40 req->out[i].size, req->out[i].vptr,
41 (void *) req->out[i].dma_addr);
42 pr_debug("Buffer hexdump (%d bytes)\n", req->out[i].size);
43 print_hex_dump_debug("", DUMP_PREFIX_NONE, 16, 1,
44 req->out[i].vptr, req->out[i].size, false);
48 static inline struct otx2_cpt_pending_entry *get_free_pending_entry(
49 struct otx2_cpt_pending_queue *q,
52 struct otx2_cpt_pending_entry *ent = NULL;
54 ent = &q->head[q->rear];
55 if (unlikely(ent->busy))
59 if (unlikely(q->rear == qlen))
65 static inline u32 modulo_inc(u32 index, u32 length, u32 inc)
67 if (WARN_ON(inc > length))
71 if (unlikely(index >= length))
77 static inline void free_pentry(struct otx2_cpt_pending_entry *pentry)
79 pentry->completion_addr = NULL;
81 pentry->callback = NULL;
83 pentry->resume_sender = false;
87 static inline int setup_sgio_components(struct pci_dev *pdev,
88 struct otx2_cpt_buf_ptr *list,
89 int buf_count, u8 *buffer)
91 struct otx2_cpt_sglist_component *sg_ptr = NULL;
95 if (unlikely(!list)) {
96 dev_err(&pdev->dev, "Input list pointer is NULL\n");
100 for (i = 0; i < buf_count; i++) {
101 if (unlikely(!list[i].vptr))
103 list[i].dma_addr = dma_map_single(&pdev->dev, list[i].vptr,
106 if (unlikely(dma_mapping_error(&pdev->dev, list[i].dma_addr))) {
107 dev_err(&pdev->dev, "Dma mapping failed\n");
112 components = buf_count / 4;
113 sg_ptr = (struct otx2_cpt_sglist_component *)buffer;
114 for (i = 0; i < components; i++) {
115 sg_ptr->len0 = cpu_to_be16(list[i * 4 + 0].size);
116 sg_ptr->len1 = cpu_to_be16(list[i * 4 + 1].size);
117 sg_ptr->len2 = cpu_to_be16(list[i * 4 + 2].size);
118 sg_ptr->len3 = cpu_to_be16(list[i * 4 + 3].size);
119 sg_ptr->ptr0 = cpu_to_be64(list[i * 4 + 0].dma_addr);
120 sg_ptr->ptr1 = cpu_to_be64(list[i * 4 + 1].dma_addr);
121 sg_ptr->ptr2 = cpu_to_be64(list[i * 4 + 2].dma_addr);
122 sg_ptr->ptr3 = cpu_to_be64(list[i * 4 + 3].dma_addr);
125 components = buf_count % 4;
127 switch (components) {
129 sg_ptr->len2 = cpu_to_be16(list[i * 4 + 2].size);
130 sg_ptr->ptr2 = cpu_to_be64(list[i * 4 + 2].dma_addr);
133 sg_ptr->len1 = cpu_to_be16(list[i * 4 + 1].size);
134 sg_ptr->ptr1 = cpu_to_be64(list[i * 4 + 1].dma_addr);
137 sg_ptr->len0 = cpu_to_be16(list[i * 4 + 0].size);
138 sg_ptr->ptr0 = cpu_to_be64(list[i * 4 + 0].dma_addr);
146 for (j = 0; j < i; j++) {
147 if (list[j].dma_addr) {
148 dma_unmap_single(&pdev->dev, list[j].dma_addr,
149 list[j].size, DMA_BIDIRECTIONAL);
152 list[j].dma_addr = 0;
157 static inline struct otx2_cpt_inst_info *info_create(struct pci_dev *pdev,
158 struct otx2_cpt_req_info *req,
161 int align = OTX2_CPT_DMA_MINALIGN;
162 struct otx2_cpt_inst_info *info;
163 u32 dlen, align_dlen, info_len;
164 u16 g_sz_bytes, s_sz_bytes;
167 if (unlikely(req->in_cnt > OTX2_CPT_MAX_SG_IN_CNT ||
168 req->out_cnt > OTX2_CPT_MAX_SG_OUT_CNT)) {
169 dev_err(&pdev->dev, "Error too many sg components\n");
173 g_sz_bytes = ((req->in_cnt + 3) / 4) *
174 sizeof(struct otx2_cpt_sglist_component);
175 s_sz_bytes = ((req->out_cnt + 3) / 4) *
176 sizeof(struct otx2_cpt_sglist_component);
178 dlen = g_sz_bytes + s_sz_bytes + SG_LIST_HDR_SIZE;
179 align_dlen = ALIGN(dlen, align);
180 info_len = ALIGN(sizeof(*info), align);
181 total_mem_len = align_dlen + info_len + sizeof(union otx2_cpt_res_s);
183 info = kzalloc(total_mem_len, gfp);
188 info->in_buffer = (u8 *)info + info_len;
190 ((u16 *)info->in_buffer)[0] = req->out_cnt;
191 ((u16 *)info->in_buffer)[1] = req->in_cnt;
192 ((u16 *)info->in_buffer)[2] = 0;
193 ((u16 *)info->in_buffer)[3] = 0;
194 cpu_to_be64s((u64 *)info->in_buffer);
196 /* Setup gather (input) components */
197 if (setup_sgio_components(pdev, req->in, req->in_cnt,
198 &info->in_buffer[8])) {
199 dev_err(&pdev->dev, "Failed to setup gather list\n");
203 if (setup_sgio_components(pdev, req->out, req->out_cnt,
204 &info->in_buffer[8 + g_sz_bytes])) {
205 dev_err(&pdev->dev, "Failed to setup scatter list\n");
209 info->dma_len = total_mem_len - info_len;
210 info->dptr_baddr = dma_map_single(&pdev->dev, info->in_buffer,
211 info->dma_len, DMA_BIDIRECTIONAL);
212 if (unlikely(dma_mapping_error(&pdev->dev, info->dptr_baddr))) {
213 dev_err(&pdev->dev, "DMA Mapping failed for cpt req\n");
217 * Get buffer for union otx2_cpt_res_s response
218 * structure and its physical address
220 info->completion_addr = info->in_buffer + align_dlen;
221 info->comp_baddr = info->dptr_baddr + align_dlen;
226 otx2_cpt_info_destroy(pdev, info);
230 static int process_request(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
231 struct otx2_cpt_pending_queue *pqueue,
232 struct otx2_cptlf_info *lf)
234 struct otx2_cptvf_request *cpt_req = &req->req;
235 struct otx2_cpt_pending_entry *pentry = NULL;
236 union otx2_cpt_ctrl_info *ctrl = &req->ctrl;
237 struct otx2_cpt_inst_info *info = NULL;
238 union otx2_cpt_res_s *result = NULL;
239 struct otx2_cpt_iq_command iq_cmd;
240 union otx2_cpt_inst_s cptinst;
245 gfp = (req->areq->flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL :
247 if (unlikely(!otx2_cptlf_started(lf->lfs)))
250 info = info_create(pdev, req, gfp);
251 if (unlikely(!info)) {
252 dev_err(&pdev->dev, "Setting up cpt inst info failed");
255 cpt_req->dlen = info->dlen;
257 result = info->completion_addr;
258 result->s.compcode = OTX2_CPT_COMPLETION_CODE_INIT;
260 spin_lock_bh(&pqueue->lock);
261 pentry = get_free_pending_entry(pqueue, pqueue->qlen);
262 retry = CPT_PENTRY_TIMEOUT / CPT_PENTRY_STEP;
263 while (unlikely(!pentry) && retry--) {
264 spin_unlock_bh(&pqueue->lock);
265 udelay(CPT_PENTRY_STEP);
266 spin_lock_bh(&pqueue->lock);
267 pentry = get_free_pending_entry(pqueue, pqueue->qlen);
270 if (unlikely(!pentry)) {
276 * Check if we are close to filling in entire pending queue,
277 * if so then tell the sender to stop/sleep by returning -EBUSY
278 * We do it only for context which can sleep (GFP_KERNEL)
280 if (gfp == GFP_KERNEL &&
281 pqueue->pending_count > (pqueue->qlen - CPT_IQ_STOP_MARGIN)) {
282 pentry->resume_sender = true;
284 pentry->resume_sender = false;
285 resume_sender = pentry->resume_sender;
286 pqueue->pending_count++;
288 pentry->completion_addr = info->completion_addr;
290 pentry->callback = req->callback;
291 pentry->areq = req->areq;
293 info->pentry = pentry;
294 info->time_in = jiffies;
297 /* Fill in the command */
299 iq_cmd.cmd.s.opcode = cpu_to_be16(cpt_req->opcode.flags);
300 iq_cmd.cmd.s.param1 = cpu_to_be16(cpt_req->param1);
301 iq_cmd.cmd.s.param2 = cpu_to_be16(cpt_req->param2);
302 iq_cmd.cmd.s.dlen = cpu_to_be16(cpt_req->dlen);
304 /* 64-bit swap for microcode data reads, not needed for addresses*/
305 cpu_to_be64s(&iq_cmd.cmd.u);
306 iq_cmd.dptr = info->dptr_baddr;
309 iq_cmd.cptr.s.grp = ctrl->s.grp;
311 /* Fill in the CPT_INST_S type command for HW interpretation */
312 otx2_cpt_fill_inst(&cptinst, &iq_cmd, info->comp_baddr);
314 /* Print debug info if enabled */
315 otx2_cpt_dump_sg_list(pdev, req);
316 pr_debug("Cpt_inst_s hexdump (%d bytes)\n", OTX2_CPT_INST_SIZE);
317 print_hex_dump_debug("", 0, 16, 1, &cptinst, OTX2_CPT_INST_SIZE, false);
318 pr_debug("Dptr hexdump (%d bytes)\n", cpt_req->dlen);
319 print_hex_dump_debug("", 0, 16, 1, info->in_buffer,
320 cpt_req->dlen, false);
322 /* Send CPT command */
323 lf->lfs->ops->send_cmd(&cptinst, 1, lf);
326 * We allocate and prepare pending queue entry in critical section
327 * together with submitting CPT instruction to CPT instruction queue
328 * to make sure that order of CPT requests is the same in both
329 * pending and instruction queues
331 spin_unlock_bh(&pqueue->lock);
333 ret = resume_sender ? -EBUSY : -EINPROGRESS;
337 spin_unlock_bh(&pqueue->lock);
338 otx2_cpt_info_destroy(pdev, info);
342 int otx2_cpt_do_request(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
345 struct otx2_cptvf_dev *cptvf = pci_get_drvdata(pdev);
346 struct otx2_cptlfs_info *lfs = &cptvf->lfs;
348 return process_request(lfs->pdev, req, &lfs->lf[cpu_num].pqueue,
352 static int cpt_process_ccode(struct otx2_cptlfs_info *lfs,
353 union otx2_cpt_res_s *cpt_status,
354 struct otx2_cpt_inst_info *info,
357 u8 uc_ccode = lfs->ops->cpt_get_uc_compcode(cpt_status);
358 u8 ccode = lfs->ops->cpt_get_compcode(cpt_status);
359 struct pci_dev *pdev = lfs->pdev;
362 case OTX2_CPT_COMP_E_FAULT:
364 "Request failed with DMA fault\n");
365 otx2_cpt_dump_sg_list(pdev, info->req);
368 case OTX2_CPT_COMP_E_HWERR:
370 "Request failed with hardware error\n");
371 otx2_cpt_dump_sg_list(pdev, info->req);
374 case OTX2_CPT_COMP_E_INSTERR:
376 "Request failed with instruction error\n");
377 otx2_cpt_dump_sg_list(pdev, info->req);
380 case OTX2_CPT_COMP_E_NOTDONE:
381 /* check for timeout */
382 if (time_after_eq(jiffies, info->time_in +
383 CPT_COMMAND_TIMEOUT * HZ))
385 "Request timed out 0x%p", info->req);
386 else if (info->extra_time < CPT_TIME_IN_RESET_COUNT) {
387 info->time_in = jiffies;
392 case OTX2_CPT_COMP_E_GOOD:
393 case OTX2_CPT_COMP_E_WARN:
395 * Check microcode completion code, it is only valid
396 * when completion code is CPT_COMP_E::GOOD
398 if (uc_ccode != OTX2_CPT_UCC_SUCCESS) {
400 * If requested hmac is truncated and ucode returns
401 * s/g write length error then we report success
402 * because ucode writes as many bytes of calculated
403 * hmac as available in gather buffer and reports
404 * s/g write length error if number of bytes in gather
405 * buffer is less than full hmac size.
407 if (info->req->is_trunc_hmac &&
408 uc_ccode == OTX2_CPT_UCC_SG_WRITE_LENGTH) {
414 "Request failed with software error code 0x%x\n",
415 cpt_status->s.uc_compcode);
416 otx2_cpt_dump_sg_list(pdev, info->req);
419 /* Request has been processed with success */
425 "Request returned invalid status %d\n", ccode);
431 static inline void process_pending_queue(struct otx2_cptlfs_info *lfs,
432 struct otx2_cpt_pending_queue *pqueue)
434 struct otx2_cpt_pending_entry *resume_pentry = NULL;
435 void (*callback)(int status, void *arg, void *req);
436 struct otx2_cpt_pending_entry *pentry = NULL;
437 union otx2_cpt_res_s *cpt_status = NULL;
438 struct otx2_cpt_inst_info *info = NULL;
439 struct otx2_cpt_req_info *req = NULL;
440 struct crypto_async_request *areq;
441 struct pci_dev *pdev = lfs->pdev;
442 u32 res_code, resume_index;
445 spin_lock_bh(&pqueue->lock);
446 pentry = &pqueue->head[pqueue->front];
448 if (WARN_ON(!pentry)) {
449 spin_unlock_bh(&pqueue->lock);
454 if (unlikely(!pentry->busy)) {
455 spin_unlock_bh(&pqueue->lock);
459 if (unlikely(!pentry->callback)) {
460 dev_err(&pdev->dev, "Callback NULL\n");
465 if (unlikely(!info)) {
466 dev_err(&pdev->dev, "Pending entry post arg NULL\n");
471 if (unlikely(!req)) {
472 dev_err(&pdev->dev, "Request NULL\n");
476 cpt_status = pentry->completion_addr;
477 if (unlikely(!cpt_status)) {
478 dev_err(&pdev->dev, "Completion address NULL\n");
482 if (cpt_process_ccode(lfs, cpt_status, info, &res_code)) {
483 spin_unlock_bh(&pqueue->lock);
490 * Check if we should inform sending side to resume
491 * We do it CPT_IQ_RESUME_MARGIN elements in advance before
492 * pending queue becomes empty
494 resume_index = modulo_inc(pqueue->front, pqueue->qlen,
495 CPT_IQ_RESUME_MARGIN);
496 resume_pentry = &pqueue->head[resume_index];
498 resume_pentry->resume_sender) {
499 resume_pentry->resume_sender = false;
500 callback = resume_pentry->callback;
501 areq = resume_pentry->areq;
504 spin_unlock_bh(&pqueue->lock);
507 * EINPROGRESS is an indication for sending
508 * side that it can resume sending requests
510 callback(-EINPROGRESS, areq, info);
511 spin_lock_bh(&pqueue->lock);
515 callback = pentry->callback;
519 pqueue->pending_count--;
520 pqueue->front = modulo_inc(pqueue->front, pqueue->qlen, 1);
521 spin_unlock_bh(&pqueue->lock);
524 * Call callback after current pending entry has been
525 * processed, we don't do it if the callback pointer is
529 callback(res_code, areq, info);
533 void otx2_cpt_post_process(struct otx2_cptlf_wqe *wqe)
535 process_pending_queue(wqe->lfs,
536 &wqe->lfs->lf[wqe->lf_num].pqueue);
539 int otx2_cpt_get_kcrypto_eng_grp_num(struct pci_dev *pdev)
541 struct otx2_cptvf_dev *cptvf = pci_get_drvdata(pdev);
543 return cptvf->lfs.kcrypto_eng_grp_num;