1 /* SPDX-License-Identifier: GPL-2.0-only
2 * Copyright (C) 2020 Marvell.
5 #ifndef __OTX2_CPT_REQMGR_H
6 #define __OTX2_CPT_REQMGR_H
8 #include "otx2_cpt_common.h"
10 /* Completion code size and initial value */
11 #define OTX2_CPT_COMPLETION_CODE_SIZE 8
12 #define OTX2_CPT_COMPLETION_CODE_INIT OTX2_CPT_COMP_E_NOTDONE
14 * Maximum total number of SG buffers is 100, we divide it equally
15 * between input and output
17 #define OTX2_CPT_MAX_SG_IN_CNT 50
18 #define OTX2_CPT_MAX_SG_OUT_CNT 50
20 /* DMA mode direct or SG */
21 #define OTX2_CPT_DMA_MODE_DIRECT 0
22 #define OTX2_CPT_DMA_MODE_SG 1
24 /* Context source CPTR or DPTR */
25 #define OTX2_CPT_FROM_CPTR 0
26 #define OTX2_CPT_FROM_DPTR 1
28 #define OTX2_CPT_MAX_REQ_SIZE 65535
30 union otx2_cpt_opcode {
38 struct otx2_cptvf_request {
42 union otx2_cpt_opcode opcode;
46 * CPT_INST_S software command definitions
49 union otx2_cpt_iq_cmd_word0 {
59 union otx2_cpt_iq_cmd_word3 {
67 struct otx2_cpt_iq_command {
68 union otx2_cpt_iq_cmd_word0 cmd;
71 union otx2_cpt_iq_cmd_word3 cptr;
74 struct otx2_cpt_pending_entry {
75 void *completion_addr; /* Completion address */
77 /* Kernel async request callback */
78 void (*callback)(int status, void *arg1, void *arg2);
79 struct crypto_async_request *areq; /* Async request callback arg */
80 u8 resume_sender; /* Notify sender to resume sending requests */
81 u8 busy; /* Entry status (free/busy) */
84 struct otx2_cpt_pending_queue {
85 struct otx2_cpt_pending_entry *head; /* Head of the queue */
86 u32 front; /* Process work from here */
87 u32 rear; /* Append new work here */
88 u32 pending_count; /* Pending requests count */
89 u32 qlen; /* Queue length */
90 spinlock_t lock; /* Queue lock */
93 struct otx2_cpt_buf_ptr {
99 union otx2_cpt_ctrl_info {
102 #if defined(__BIG_ENDIAN_BITFIELD)
103 u32 reserved_6_31:26;
104 u32 grp:3; /* Group bits */
105 u32 dma_mode:2; /* DMA mode */
106 u32 se_req:1; /* To SE core */
108 u32 se_req:1; /* To SE core */
109 u32 dma_mode:2; /* DMA mode */
110 u32 grp:3; /* Group bits */
111 u32 reserved_6_31:26;
116 struct otx2_cpt_req_info {
117 /* Kernel async request callback */
118 void (*callback)(int status, void *arg1, void *arg2);
119 struct crypto_async_request *areq; /* Async request callback arg */
120 struct otx2_cptvf_request req;/* Request information (core specific) */
121 union otx2_cpt_ctrl_info ctrl;/* User control information */
122 struct otx2_cpt_buf_ptr in[OTX2_CPT_MAX_SG_IN_CNT];
123 struct otx2_cpt_buf_ptr out[OTX2_CPT_MAX_SG_OUT_CNT];
124 u8 *iv_out; /* IV to send back */
125 u16 rlen; /* Output length */
126 u8 in_cnt; /* Number of input buffers */
127 u8 out_cnt; /* Number of output buffers */
128 u8 req_type; /* Type of request */
129 u8 is_enc; /* Is a request an encryption request */
130 u8 is_trunc_hmac;/* Is truncated hmac used */
133 struct otx2_cpt_inst_info {
134 struct otx2_cpt_pending_entry *pentry;
135 struct otx2_cpt_req_info *req;
136 struct pci_dev *pdev;
137 void *completion_addr;
140 dma_addr_t dptr_baddr;
141 dma_addr_t rptr_baddr;
142 dma_addr_t comp_baddr;
143 unsigned long time_in;
149 struct otx2_cpt_sglist_component {
160 static inline void otx2_cpt_info_destroy(struct pci_dev *pdev,
161 struct otx2_cpt_inst_info *info)
163 struct otx2_cpt_req_info *req;
166 if (info->dptr_baddr)
167 dma_unmap_single(&pdev->dev, info->dptr_baddr,
168 info->dma_len, DMA_BIDIRECTIONAL);
172 for (i = 0; i < req->out_cnt; i++) {
173 if (req->out[i].dma_addr)
174 dma_unmap_single(&pdev->dev,
175 req->out[i].dma_addr,
180 for (i = 0; i < req->in_cnt; i++) {
181 if (req->in[i].dma_addr)
182 dma_unmap_single(&pdev->dev,
191 struct otx2_cptlf_wqe;
192 int otx2_cpt_do_request(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
194 void otx2_cpt_post_process(struct otx2_cptlf_wqe *wqe);
195 int otx2_cpt_get_kcrypto_eng_grp_num(struct pci_dev *pdev);
197 #endif /* __OTX2_CPT_REQMGR_H */