1 /* SPDX-License-Identifier: GPL-2.0
2 * Marvell OcteonTX CPT driver
4 * Copyright (C) 2019 Marvell International Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #ifndef __OTX_CPTVF_REQUEST_MANAGER_H
12 #define __OTX_CPTVF_REQUEST_MANAGER_H
14 #include <linux/types.h>
15 #include <linux/crypto.h>
16 #include <linux/pci.h>
17 #include "otx_cpt_hw_types.h"
20 * Maximum total number of SG buffers is 100, we divide it equally
21 * between input and output
23 #define OTX_CPT_MAX_SG_IN_CNT 50
24 #define OTX_CPT_MAX_SG_OUT_CNT 50
26 /* DMA mode direct or SG */
27 #define OTX_CPT_DMA_DIRECT_DIRECT 0
28 #define OTX_CPT_DMA_GATHER_SCATTER 1
30 /* Context source CPTR or DPTR */
31 #define OTX_CPT_FROM_CPTR 0
32 #define OTX_CPT_FROM_DPTR 1
34 /* CPT instruction queue alignment */
35 #define OTX_CPT_INST_Q_ALIGNMENT 128
36 #define OTX_CPT_MAX_REQ_SIZE 65535
38 /* Default command timeout in seconds */
39 #define OTX_CPT_COMMAND_TIMEOUT 4
40 #define OTX_CPT_TIMER_HOLD 0x03F
41 #define OTX_CPT_COUNT_HOLD 32
42 #define OTX_CPT_TIME_IN_RESET_COUNT 5
44 /* Minimum and maximum values for interrupt coalescing */
45 #define OTX_CPT_COALESC_MIN_TIME_WAIT 0x0
46 #define OTX_CPT_COALESC_MAX_TIME_WAIT ((1<<16)-1)
47 #define OTX_CPT_COALESC_MIN_NUM_WAIT 0x0
48 #define OTX_CPT_COALESC_MAX_NUM_WAIT ((1<<20)-1)
50 union otx_cpt_opcode_info {
58 struct otx_cptvf_request {
62 union otx_cpt_opcode_info opcode;
65 struct otx_cpt_buf_ptr {
71 union otx_cpt_ctrl_info {
74 #if defined(__BIG_ENDIAN_BITFIELD)
76 u32 grp:3; /* Group bits */
77 u32 dma_mode:2; /* DMA mode */
78 u32 se_req:1; /* To SE core */
80 u32 se_req:1; /* To SE core */
81 u32 dma_mode:2; /* DMA mode */
82 u32 grp:3; /* Group bits */
89 * CPT_INST_S software command definitions
92 union otx_cpt_iq_cmd_word0 {
102 union otx_cpt_iq_cmd_word3 {
105 #if defined(__BIG_ENDIAN_BITFIELD)
115 struct otx_cpt_iq_cmd {
116 union otx_cpt_iq_cmd_word0 cmd;
119 union otx_cpt_iq_cmd_word3 cptr;
122 struct otx_cpt_sglist_component {
138 struct otx_cpt_pending_entry {
139 u64 *completion_addr; /* Completion address */
140 struct otx_cpt_info_buffer *info;
141 /* Kernel async request callback */
142 void (*callback)(int status, void *arg1, void *arg2);
143 struct crypto_async_request *areq; /* Async request callback arg */
144 u8 resume_sender; /* Notify sender to resume sending requests */
145 u8 busy; /* Entry status (free/busy) */
148 struct otx_cpt_pending_queue {
149 struct otx_cpt_pending_entry *head; /* Head of the queue */
150 u32 front; /* Process work from here */
151 u32 rear; /* Append new work here */
152 u32 pending_count; /* Pending requests count */
153 u32 qlen; /* Queue length */
154 spinlock_t lock; /* Queue lock */
157 struct otx_cpt_req_info {
158 /* Kernel async request callback */
159 void (*callback)(int status, void *arg1, void *arg2);
160 struct crypto_async_request *areq; /* Async request callback arg */
161 struct otx_cptvf_request req;/* Request information (core specific) */
162 union otx_cpt_ctrl_info ctrl;/* User control information */
163 struct otx_cpt_buf_ptr in[OTX_CPT_MAX_SG_IN_CNT];
164 struct otx_cpt_buf_ptr out[OTX_CPT_MAX_SG_OUT_CNT];
165 u8 *iv_out; /* IV to send back */
166 u16 rlen; /* Output length */
167 u8 incnt; /* Number of input buffers */
168 u8 outcnt; /* Number of output buffers */
169 u8 req_type; /* Type of request */
170 u8 is_enc; /* Is a request an encryption request */
171 u8 is_trunc_hmac;/* Is truncated hmac used */
174 struct otx_cpt_info_buffer {
175 struct otx_cpt_pending_entry *pentry;
176 struct otx_cpt_req_info *req;
177 struct pci_dev *pdev;
178 u64 *completion_addr;
181 dma_addr_t dptr_baddr;
182 dma_addr_t rptr_baddr;
183 dma_addr_t comp_baddr;
184 unsigned long time_in;
190 static inline void do_request_cleanup(struct pci_dev *pdev,
191 struct otx_cpt_info_buffer *info)
193 struct otx_cpt_req_info *req;
196 if (info->dptr_baddr)
197 dma_unmap_single(&pdev->dev, info->dptr_baddr,
198 info->dma_len, DMA_BIDIRECTIONAL);
202 for (i = 0; i < req->outcnt; i++) {
203 if (req->out[i].dma_addr)
204 dma_unmap_single(&pdev->dev,
205 req->out[i].dma_addr,
210 for (i = 0; i < req->incnt; i++) {
211 if (req->in[i].dma_addr)
212 dma_unmap_single(&pdev->dev,
218 kfree_sensitive(info);
221 struct otx_cptvf_wqe;
222 void otx_cpt_dump_sg_list(struct pci_dev *pdev, struct otx_cpt_req_info *req);
223 void otx_cpt_post_process(struct otx_cptvf_wqe *wqe);
224 int otx_cpt_do_request(struct pci_dev *pdev, struct otx_cpt_req_info *req,
227 #endif /* __OTX_CPTVF_REQUEST_MANAGER_H */