1 // SPDX-License-Identifier: GPL-2.0-only
3 * Provide TDMA helper functions used by cipher and hash algorithm
6 * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
7 * Author: Arnaud Ebalard <arno@natisbad.org>
9 * This work is based on an initial version written by
10 * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
15 bool mv_cesa_req_dma_iter_next_transfer(struct mv_cesa_dma_iter *iter,
16 struct mv_cesa_sg_dma_iter *sgiter,
22 sgiter->op_offset += len;
23 sgiter->offset += len;
24 if (sgiter->offset == sg_dma_len(sgiter->sg)) {
25 if (sg_is_last(sgiter->sg))
28 sgiter->sg = sg_next(sgiter->sg);
31 if (sgiter->op_offset == iter->op_len)
37 void mv_cesa_dma_step(struct mv_cesa_req *dreq)
39 struct mv_cesa_engine *engine = dreq->engine;
41 writel_relaxed(0, engine->regs + CESA_SA_CFG);
43 mv_cesa_set_int_mask(engine, CESA_SA_INT_ACC0_IDMA_DONE);
44 writel_relaxed(CESA_TDMA_DST_BURST_128B | CESA_TDMA_SRC_BURST_128B |
45 CESA_TDMA_NO_BYTE_SWAP | CESA_TDMA_EN,
46 engine->regs + CESA_TDMA_CONTROL);
48 writel_relaxed(CESA_SA_CFG_ACT_CH0_IDMA | CESA_SA_CFG_MULTI_PKT |
49 CESA_SA_CFG_CH0_W_IDMA | CESA_SA_CFG_PARA_DIS,
50 engine->regs + CESA_SA_CFG);
51 writel_relaxed(dreq->chain.first->cur_dma,
52 engine->regs + CESA_TDMA_NEXT_ADDR);
53 WARN_ON(readl(engine->regs + CESA_SA_CMD) &
54 CESA_SA_CMD_EN_CESA_SA_ACCL0);
55 writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
58 void mv_cesa_dma_cleanup(struct mv_cesa_req *dreq)
60 struct mv_cesa_tdma_desc *tdma;
62 for (tdma = dreq->chain.first; tdma;) {
63 struct mv_cesa_tdma_desc *old_tdma = tdma;
64 u32 type = tdma->flags & CESA_TDMA_TYPE_MSK;
66 if (type == CESA_TDMA_OP)
67 dma_pool_free(cesa_dev->dma->op_pool, tdma->op,
68 le32_to_cpu(tdma->src));
71 dma_pool_free(cesa_dev->dma->tdma_desc_pool, old_tdma,
75 dreq->chain.first = NULL;
76 dreq->chain.last = NULL;
79 void mv_cesa_dma_prepare(struct mv_cesa_req *dreq,
80 struct mv_cesa_engine *engine)
82 struct mv_cesa_tdma_desc *tdma;
84 for (tdma = dreq->chain.first; tdma; tdma = tdma->next) {
85 if (tdma->flags & CESA_TDMA_DST_IN_SRAM)
86 tdma->dst = cpu_to_le32(tdma->dst_dma + engine->sram_dma);
88 if (tdma->flags & CESA_TDMA_SRC_IN_SRAM)
89 tdma->src = cpu_to_le32(tdma->src_dma + engine->sram_dma);
91 if ((tdma->flags & CESA_TDMA_TYPE_MSK) == CESA_TDMA_OP)
92 mv_cesa_adjust_op(engine, tdma->op);
96 void mv_cesa_tdma_chain(struct mv_cesa_engine *engine,
97 struct mv_cesa_req *dreq)
99 if (engine->chain.first == NULL && engine->chain.last == NULL) {
100 engine->chain.first = dreq->chain.first;
101 engine->chain.last = dreq->chain.last;
103 struct mv_cesa_tdma_desc *last;
105 last = engine->chain.last;
106 last->next = dreq->chain.first;
107 engine->chain.last = dreq->chain.last;
110 * Break the DMA chain if the CESA_TDMA_BREAK_CHAIN is set on
111 * the last element of the current chain, or if the request
112 * being queued needs the IV regs to be set before lauching
115 if (!(last->flags & CESA_TDMA_BREAK_CHAIN) &&
116 !(dreq->chain.first->flags & CESA_TDMA_SET_STATE))
117 last->next_dma = cpu_to_le32(dreq->chain.first->cur_dma);
121 int mv_cesa_tdma_process(struct mv_cesa_engine *engine, u32 status)
123 struct crypto_async_request *req = NULL;
124 struct mv_cesa_tdma_desc *tdma = NULL, *next = NULL;
128 tdma_cur = readl(engine->regs + CESA_TDMA_CUR);
130 for (tdma = engine->chain.first; tdma; tdma = next) {
131 spin_lock_bh(&engine->lock);
133 spin_unlock_bh(&engine->lock);
135 if (tdma->flags & CESA_TDMA_END_OF_REQ) {
136 struct crypto_async_request *backlog = NULL;
137 struct mv_cesa_ctx *ctx;
140 spin_lock_bh(&engine->lock);
142 * if req is NULL, this means we're processing the
143 * request in engine->req.
148 req = mv_cesa_dequeue_req_locked(engine,
151 /* Re-chaining to the next request */
152 engine->chain.first = tdma->next;
155 /* If this is the last request, clear the chain */
156 if (engine->chain.first == NULL)
157 engine->chain.last = NULL;
158 spin_unlock_bh(&engine->lock);
160 ctx = crypto_tfm_ctx(req->tfm);
161 current_status = (tdma->cur_dma == tdma_cur) ?
162 status : CESA_SA_INT_ACC0_IDMA_DONE;
163 res = ctx->ops->process(req, current_status);
164 ctx->ops->complete(req);
167 mv_cesa_engine_enqueue_complete_request(engine,
171 backlog->complete(backlog, -EINPROGRESS);
174 if (res || tdma->cur_dma == tdma_cur)
179 * Save the last request in error to engine->req, so that the core
180 * knows which request was faulty
183 spin_lock_bh(&engine->lock);
185 spin_unlock_bh(&engine->lock);
191 static struct mv_cesa_tdma_desc *
192 mv_cesa_dma_add_desc(struct mv_cesa_tdma_chain *chain, gfp_t flags)
194 struct mv_cesa_tdma_desc *new_tdma = NULL;
195 dma_addr_t dma_handle;
197 new_tdma = dma_pool_zalloc(cesa_dev->dma->tdma_desc_pool, flags,
200 return ERR_PTR(-ENOMEM);
202 new_tdma->cur_dma = dma_handle;
204 chain->last->next_dma = cpu_to_le32(dma_handle);
205 chain->last->next = new_tdma;
207 chain->first = new_tdma;
210 chain->last = new_tdma;
215 int mv_cesa_dma_add_result_op(struct mv_cesa_tdma_chain *chain, dma_addr_t src,
216 u32 size, u32 flags, gfp_t gfp_flags)
218 struct mv_cesa_tdma_desc *tdma, *op_desc;
220 tdma = mv_cesa_dma_add_desc(chain, gfp_flags);
222 return PTR_ERR(tdma);
224 /* We re-use an existing op_desc object to retrieve the context
225 * and result instead of allocating a new one.
226 * There is at least one object of this type in a CESA crypto
227 * req, just pick the first one in the chain.
229 for (op_desc = chain->first; op_desc; op_desc = op_desc->next) {
230 u32 type = op_desc->flags & CESA_TDMA_TYPE_MSK;
232 if (type == CESA_TDMA_OP)
239 tdma->byte_cnt = cpu_to_le32(size | BIT(31));
241 tdma->dst_dma = op_desc->src_dma;
242 tdma->op = op_desc->op;
244 flags &= (CESA_TDMA_DST_IN_SRAM | CESA_TDMA_SRC_IN_SRAM);
245 tdma->flags = flags | CESA_TDMA_RESULT;
249 struct mv_cesa_op_ctx *mv_cesa_dma_add_op(struct mv_cesa_tdma_chain *chain,
250 const struct mv_cesa_op_ctx *op_templ,
254 struct mv_cesa_tdma_desc *tdma;
255 struct mv_cesa_op_ctx *op;
256 dma_addr_t dma_handle;
259 tdma = mv_cesa_dma_add_desc(chain, flags);
261 return ERR_CAST(tdma);
263 op = dma_pool_alloc(cesa_dev->dma->op_pool, flags, &dma_handle);
265 return ERR_PTR(-ENOMEM);
269 size = skip_ctx ? sizeof(op->desc) : sizeof(*op);
273 tdma->byte_cnt = cpu_to_le32(size | BIT(31));
274 tdma->src = cpu_to_le32(dma_handle);
275 tdma->dst_dma = CESA_SA_CFG_SRAM_OFFSET;
276 tdma->flags = CESA_TDMA_DST_IN_SRAM | CESA_TDMA_OP;
281 int mv_cesa_dma_add_data_transfer(struct mv_cesa_tdma_chain *chain,
282 dma_addr_t dst, dma_addr_t src, u32 size,
283 u32 flags, gfp_t gfp_flags)
285 struct mv_cesa_tdma_desc *tdma;
287 tdma = mv_cesa_dma_add_desc(chain, gfp_flags);
289 return PTR_ERR(tdma);
291 tdma->byte_cnt = cpu_to_le32(size | BIT(31));
295 flags &= (CESA_TDMA_DST_IN_SRAM | CESA_TDMA_SRC_IN_SRAM);
296 tdma->flags = flags | CESA_TDMA_DATA;
301 int mv_cesa_dma_add_dummy_launch(struct mv_cesa_tdma_chain *chain, gfp_t flags)
303 struct mv_cesa_tdma_desc *tdma;
305 tdma = mv_cesa_dma_add_desc(chain, flags);
306 return PTR_ERR_OR_ZERO(tdma);
309 int mv_cesa_dma_add_dummy_end(struct mv_cesa_tdma_chain *chain, gfp_t flags)
311 struct mv_cesa_tdma_desc *tdma;
313 tdma = mv_cesa_dma_add_desc(chain, flags);
315 return PTR_ERR(tdma);
317 tdma->byte_cnt = cpu_to_le32(BIT(31));
322 int mv_cesa_dma_add_op_transfers(struct mv_cesa_tdma_chain *chain,
323 struct mv_cesa_dma_iter *dma_iter,
324 struct mv_cesa_sg_dma_iter *sgiter,
327 u32 flags = sgiter->dir == DMA_TO_DEVICE ?
328 CESA_TDMA_DST_IN_SRAM : CESA_TDMA_SRC_IN_SRAM;
335 len = mv_cesa_req_dma_iter_transfer_len(dma_iter, sgiter);
336 if (sgiter->dir == DMA_TO_DEVICE) {
337 dst = CESA_SA_DATA_SRAM_OFFSET + sgiter->op_offset;
338 src = sg_dma_address(sgiter->sg) + sgiter->offset;
340 dst = sg_dma_address(sgiter->sg) + sgiter->offset;
341 src = CESA_SA_DATA_SRAM_OFFSET + sgiter->op_offset;
344 ret = mv_cesa_dma_add_data_transfer(chain, dst, src, len,
349 } while (mv_cesa_req_dma_iter_next_transfer(dma_iter, sgiter, len));
354 size_t mv_cesa_sg_copy(struct mv_cesa_engine *engine,
355 struct scatterlist *sgl, unsigned int nents,
356 unsigned int sram_off, size_t buflen, off_t skip,
359 unsigned int sg_flags = SG_MITER_ATOMIC;
360 struct sg_mapping_iter miter;
361 unsigned int offset = 0;
364 sg_flags |= SG_MITER_FROM_SG;
366 sg_flags |= SG_MITER_TO_SG;
368 sg_miter_start(&miter, sgl, nents, sg_flags);
370 if (!sg_miter_skip(&miter, skip))
373 while ((offset < buflen) && sg_miter_next(&miter)) {
376 len = min(miter.length, buflen - offset);
380 memcpy(engine->sram_pool + sram_off + offset,
383 memcpy_toio(engine->sram + sram_off + offset,
388 engine->sram_pool + sram_off + offset,
391 memcpy_fromio(miter.addr,
392 engine->sram + sram_off + offset,
399 sg_miter_stop(&miter);