1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/workqueue.h>
4 #include <crypto/internal/skcipher.h>
6 #include "nitrox_dev.h"
7 #include "nitrox_req.h"
8 #include "nitrox_csr.h"
11 #define MIN_UDD_LEN 16
12 /* PKT_IN_HDR + SLC_STORE_INFO */
14 /* Base destination port for the solicited requests */
15 #define SOLICIT_BASE_DPORT 256
16 #define PENDING_SIG 0xFFFFFFFFFFFFFFFFUL
18 #define REQ_NOT_POSTED 1
23 * Response codes from SE microcode
25 * Completion with no error
26 * 0x43 - ERR_GC_DATA_LEN_INVALID
27 * Invalid Data length if Encryption Data length is
28 * less than 16 bytes for AES-XTS and AES-CTS.
29 * 0x45 - ERR_GC_CTX_LEN_INVALID
30 * Invalid context length: CTXL != 23 words.
31 * 0x4F - ERR_GC_DOCSIS_CIPHER_INVALID
32 * DOCSIS support is enabled with other than
33 * AES/DES-CBC mode encryption.
34 * 0x50 - ERR_GC_DOCSIS_OFFSET_INVALID
35 * Authentication offset is other than 0 with
36 * Encryption IV source = 0.
37 * Authentication offset is other than 8 (DES)/16 (AES)
38 * with Encryption IV source = 1
39 * 0x51 - ERR_GC_CRC32_INVALID_SELECTION
40 * CRC32 is enabled for other than DOCSIS encryption.
41 * 0x52 - ERR_GC_AES_CCM_FLAG_INVALID
42 * Invalid flag options in AES-CCM IV.
45 static inline int incr_index(int index, int count, int max)
47 if ((index + count) >= max)
48 index = index + count - max;
56 * dma_free_sglist - unmap and free the sg lists.
60 static void softreq_unmap_sgbufs(struct nitrox_softreq *sr)
62 struct nitrox_device *ndev = sr->ndev;
63 struct device *dev = DEV(ndev);
64 struct nitrox_sglist *sglist;
67 sglist = sr->in.sglist;
72 dma_unmap_single(dev, sglist->dma, sglist->len, DMA_BIDIRECTIONAL);
73 /* unmpa src sglist */
74 dma_unmap_sg(dev, sr->in.buf, (sr->in.map_bufs_cnt - 1), sr->in.dir);
75 /* unamp gather component */
76 dma_unmap_single(dev, sr->in.dma, sr->in.len, DMA_TO_DEVICE);
81 sr->in.map_bufs_cnt = 0;
85 sglist = sr->out.sglist;
90 dma_unmap_single(dev, sr->resp.orh_dma, ORH_HLEN, sr->out.dir);
92 /* unmap dst sglist */
94 dma_unmap_sg(dev, sr->out.buf, (sr->out.map_bufs_cnt - 3),
97 /* unmap completion */
98 dma_unmap_single(dev, sr->resp.completion_dma, COMP_HLEN, sr->out.dir);
100 /* unmap scatter component */
101 dma_unmap_single(dev, sr->out.dma, sr->out.len, DMA_TO_DEVICE);
102 kfree(sr->out.sglist);
103 kfree(sr->out.sgcomp);
104 sr->out.sglist = NULL;
106 sr->out.map_bufs_cnt = 0;
109 static void softreq_destroy(struct nitrox_softreq *sr)
111 softreq_unmap_sgbufs(sr);
116 * create_sg_component - create SG componets for N5 device.
117 * @sr: Request structure
119 * @nr_comp: total number of components required
121 * Component structure
123 * 63 48 47 32 31 16 15 0
124 * --------------------------------------
125 * | LEN0 | LEN1 | LEN2 | LEN3 |
126 * |-------------------------------------
128 * --------------------------------------
130 * --------------------------------------
132 * --------------------------------------
134 * --------------------------------------
136 * Returns 0 if success or a negative errno code on error.
138 static int create_sg_component(struct nitrox_softreq *sr,
139 struct nitrox_sgtable *sgtbl, int map_nents)
141 struct nitrox_device *ndev = sr->ndev;
142 struct nitrox_sgcomp *sgcomp;
143 struct nitrox_sglist *sglist;
148 nr_sgcomp = roundup(map_nents, 4) / 4;
150 /* each component holds 4 dma pointers */
151 sz_comp = nr_sgcomp * sizeof(*sgcomp);
152 sgcomp = kzalloc(sz_comp, sr->gfp);
156 sgtbl->sgcomp = sgcomp;
157 sgtbl->nr_sgcomp = nr_sgcomp;
159 sglist = sgtbl->sglist;
160 /* populate device sg component */
161 for (i = 0; i < nr_sgcomp; i++) {
162 for (j = 0; j < 4; j++) {
163 sgcomp->len[j] = cpu_to_be16(sglist->len);
164 sgcomp->dma[j] = cpu_to_be64(sglist->dma);
169 /* map the device sg component */
170 dma = dma_map_single(DEV(ndev), sgtbl->sgcomp, sz_comp, DMA_TO_DEVICE);
171 if (dma_mapping_error(DEV(ndev), dma)) {
172 kfree(sgtbl->sgcomp);
173 sgtbl->sgcomp = NULL;
178 sgtbl->len = sz_comp;
184 * dma_map_inbufs - DMA map input sglist and creates sglist component
186 * @sr: Request structure
187 * @req: Crypto request structre
189 * Returns 0 if successful or a negative errno code on error.
191 static int dma_map_inbufs(struct nitrox_softreq *sr,
192 struct se_crypto_request *req)
194 struct device *dev = DEV(sr->ndev);
195 struct scatterlist *sg = req->src;
196 struct nitrox_sglist *glist;
197 int i, nents, ret = 0;
201 nents = sg_nents(req->src);
203 /* creater gather list IV and src entries */
204 sz = roundup((1 + nents), 4) * sizeof(*glist);
205 glist = kzalloc(sz, sr->gfp);
209 sr->in.sglist = glist;
211 dma = dma_map_single(dev, &req->iv, req->ivsize, DMA_BIDIRECTIONAL);
212 if (dma_mapping_error(dev, dma)) {
217 sr->in.dir = (req->src == req->dst) ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
218 /* map src entries */
219 nents = dma_map_sg(dev, req->src, nents, sr->in.dir);
224 sr->in.buf = req->src;
226 /* store the mappings */
227 glist->len = req->ivsize;
230 sr->in.total_bytes += req->ivsize;
232 for_each_sg(req->src, sg, nents, i) {
233 glist->len = sg_dma_len(sg);
234 glist->dma = sg_dma_address(sg);
235 sr->in.total_bytes += glist->len;
238 /* roundup map count to align with entires in sg component */
239 sr->in.map_bufs_cnt = (1 + nents);
241 /* create NITROX gather component */
242 ret = create_sg_component(sr, &sr->in, sr->in.map_bufs_cnt);
249 dma_unmap_sg(dev, req->src, nents, sr->in.dir);
250 sr->in.map_bufs_cnt = 0;
252 dma_unmap_single(dev, dma, req->ivsize, DMA_BIDIRECTIONAL);
254 kfree(sr->in.sglist);
255 sr->in.sglist = NULL;
259 static int dma_map_outbufs(struct nitrox_softreq *sr,
260 struct se_crypto_request *req)
262 struct device *dev = DEV(sr->ndev);
263 struct nitrox_sglist *glist = sr->in.sglist;
264 struct nitrox_sglist *slist;
265 struct scatterlist *sg;
266 int i, nents, map_bufs_cnt, ret = 0;
269 nents = sg_nents(req->dst);
271 /* create scatter list ORH, IV, dst entries and Completion header */
272 sz = roundup((3 + nents), 4) * sizeof(*slist);
273 slist = kzalloc(sz, sr->gfp);
277 sr->out.sglist = slist;
278 sr->out.dir = DMA_BIDIRECTIONAL;
280 sr->resp.orh_dma = dma_map_single(dev, &sr->resp.orh, ORH_HLEN,
282 if (dma_mapping_error(dev, sr->resp.orh_dma)) {
288 sr->resp.completion_dma = dma_map_single(dev, &sr->resp.completion,
289 COMP_HLEN, sr->out.dir);
290 if (dma_mapping_error(dev, sr->resp.completion_dma)) {
295 sr->inplace = (req->src == req->dst) ? true : false;
298 nents = dma_map_sg(dev, req->dst, nents, sr->out.dir);
304 sr->out.buf = req->dst;
306 /* store the mappings */
308 slist->len = ORH_HLEN;
309 slist->dma = sr->resp.orh_dma;
312 /* copy the glist mappings */
314 nents = sr->in.map_bufs_cnt - 1;
315 map_bufs_cnt = sr->in.map_bufs_cnt;
316 while (map_bufs_cnt--) {
317 slist->len = glist->len;
318 slist->dma = glist->dma;
323 /* copy iv mapping */
324 slist->len = glist->len;
325 slist->dma = glist->dma;
327 /* copy remaining maps */
328 for_each_sg(req->dst, sg, nents, i) {
329 slist->len = sg_dma_len(sg);
330 slist->dma = sg_dma_address(sg);
336 slist->len = COMP_HLEN;
337 slist->dma = sr->resp.completion_dma;
339 sr->out.map_bufs_cnt = (3 + nents);
341 ret = create_sg_component(sr, &sr->out, sr->out.map_bufs_cnt);
343 goto outcomp_map_err;
349 dma_unmap_sg(dev, req->dst, nents, sr->out.dir);
350 sr->out.map_bufs_cnt = 0;
353 dma_unmap_single(dev, sr->resp.completion_dma, COMP_HLEN, sr->out.dir);
354 sr->resp.completion_dma = 0;
356 dma_unmap_single(dev, sr->resp.orh_dma, ORH_HLEN, sr->out.dir);
357 sr->resp.orh_dma = 0;
359 kfree(sr->out.sglist);
360 sr->out.sglist = NULL;
364 static inline int softreq_map_iobuf(struct nitrox_softreq *sr,
365 struct se_crypto_request *creq)
369 ret = dma_map_inbufs(sr, creq);
373 ret = dma_map_outbufs(sr, creq);
375 softreq_unmap_sgbufs(sr);
380 static inline void backlog_list_add(struct nitrox_softreq *sr,
381 struct nitrox_cmdq *cmdq)
383 INIT_LIST_HEAD(&sr->backlog);
385 spin_lock_bh(&cmdq->backlog_lock);
386 list_add_tail(&sr->backlog, &cmdq->backlog_head);
387 atomic_inc(&cmdq->backlog_count);
388 atomic_set(&sr->status, REQ_BACKLOG);
389 spin_unlock_bh(&cmdq->backlog_lock);
392 static inline void response_list_add(struct nitrox_softreq *sr,
393 struct nitrox_cmdq *cmdq)
395 INIT_LIST_HEAD(&sr->response);
397 spin_lock_bh(&cmdq->response_lock);
398 list_add_tail(&sr->response, &cmdq->response_head);
399 spin_unlock_bh(&cmdq->response_lock);
402 static inline void response_list_del(struct nitrox_softreq *sr,
403 struct nitrox_cmdq *cmdq)
405 spin_lock_bh(&cmdq->response_lock);
406 list_del(&sr->response);
407 spin_unlock_bh(&cmdq->response_lock);
410 static struct nitrox_softreq *
411 get_first_response_entry(struct nitrox_cmdq *cmdq)
413 return list_first_entry_or_null(&cmdq->response_head,
414 struct nitrox_softreq, response);
417 static inline bool cmdq_full(struct nitrox_cmdq *cmdq, int qlen)
419 if (atomic_inc_return(&cmdq->pending_count) > qlen) {
420 atomic_dec(&cmdq->pending_count);
421 /* sync with other cpus */
422 smp_mb__after_atomic();
429 * post_se_instr - Post SE instruction to Packet Input ring
430 * @sr: Request structure
432 * Returns 0 if successful or a negative error code,
433 * if no space in ring.
435 static void post_se_instr(struct nitrox_softreq *sr,
436 struct nitrox_cmdq *cmdq)
438 struct nitrox_device *ndev = sr->ndev;
442 spin_lock_bh(&cmdq->cmdq_lock);
444 idx = cmdq->write_idx;
445 /* copy the instruction */
446 ent = cmdq->head + (idx * cmdq->instr_size);
447 memcpy(ent, &sr->instr, cmdq->instr_size);
449 atomic_set(&sr->status, REQ_POSTED);
450 response_list_add(sr, cmdq);
451 sr->tstamp = jiffies;
452 /* flush the command queue updates */
455 /* Ring doorbell with count 1 */
456 writeq(1, cmdq->dbell_csr_addr);
457 /* orders the doorbell rings */
460 cmdq->write_idx = incr_index(idx, 1, ndev->qlen);
462 spin_unlock_bh(&cmdq->cmdq_lock);
465 static int post_backlog_cmds(struct nitrox_cmdq *cmdq)
467 struct nitrox_device *ndev = cmdq->ndev;
468 struct nitrox_softreq *sr, *tmp;
471 if (!atomic_read(&cmdq->backlog_count))
474 spin_lock_bh(&cmdq->backlog_lock);
476 list_for_each_entry_safe(sr, tmp, &cmdq->backlog_head, backlog) {
477 struct skcipher_request *skreq;
479 /* submit until space available */
480 if (unlikely(cmdq_full(cmdq, ndev->qlen))) {
484 /* delete from backlog list */
485 list_del(&sr->backlog);
486 atomic_dec(&cmdq->backlog_count);
487 /* sync with other cpus */
488 smp_mb__after_atomic();
491 /* post the command */
492 post_se_instr(sr, cmdq);
494 /* backlog requests are posted, wakeup with -EINPROGRESS */
495 skcipher_request_complete(skreq, -EINPROGRESS);
497 spin_unlock_bh(&cmdq->backlog_lock);
502 static int nitrox_enqueue_request(struct nitrox_softreq *sr)
504 struct nitrox_cmdq *cmdq = sr->cmdq;
505 struct nitrox_device *ndev = sr->ndev;
507 /* try to post backlog requests */
508 post_backlog_cmds(cmdq);
510 if (unlikely(cmdq_full(cmdq, ndev->qlen))) {
511 if (!(sr->flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
513 /* add to backlog list */
514 backlog_list_add(sr, cmdq);
517 post_se_instr(sr, cmdq);
523 * nitrox_se_request - Send request to SE core
524 * @ndev: NITROX device
525 * @req: Crypto request
527 * Returns 0 on success, or a negative error code.
529 int nitrox_process_se_request(struct nitrox_device *ndev,
530 struct se_crypto_request *req,
531 completion_t callback,
532 struct skcipher_request *skreq)
534 struct nitrox_softreq *sr;
535 dma_addr_t ctx_handle = 0;
538 if (!nitrox_ready(ndev))
541 sr = kzalloc(sizeof(*sr), req->gfp);
546 sr->flags = req->flags;
548 sr->callback = callback;
551 atomic_set(&sr->status, REQ_NOT_POSTED);
553 WRITE_ONCE(sr->resp.orh, PENDING_SIG);
554 WRITE_ONCE(sr->resp.completion, PENDING_SIG);
556 ret = softreq_map_iobuf(sr, req);
562 /* get the context handle */
563 if (req->ctx_handle) {
567 ctx_ptr = (u8 *)(uintptr_t)req->ctx_handle;
568 hdr = (struct ctx_hdr *)(ctx_ptr - sizeof(struct ctx_hdr));
569 ctx_handle = hdr->ctx_dma;
572 /* select the queue */
573 qno = smp_processor_id() % ndev->nr_queues;
575 sr->cmdq = &ndev->pkt_cmdqs[qno];
578 * 64-Byte Instruction Format
580 * ----------------------
582 * ----------------------
583 * | PKT_IN_INSTR_HDR | 8 bytes
584 * ----------------------
585 * | PKT_IN_HDR | 16 bytes
586 * ----------------------
587 * | SLC_INFO | 16 bytes
588 * ----------------------
589 * | Front data | 16 bytes
590 * ----------------------
593 /* fill the packet instruction */
595 sr->instr.dptr0 = cpu_to_be64(sr->in.dma);
598 sr->instr.ih.value = 0;
599 sr->instr.ih.s.g = 1;
600 sr->instr.ih.s.gsz = sr->in.map_bufs_cnt;
601 sr->instr.ih.s.ssz = sr->out.map_bufs_cnt;
602 sr->instr.ih.s.fsz = FDATA_SIZE + sizeof(struct gphdr);
603 sr->instr.ih.s.tlen = sr->instr.ih.s.fsz + sr->in.total_bytes;
604 sr->instr.ih.value = cpu_to_be64(sr->instr.ih.value);
607 sr->instr.irh.value[0] = 0;
608 sr->instr.irh.s.uddl = MIN_UDD_LEN;
609 /* context length in 64-bit words */
610 sr->instr.irh.s.ctxl = (req->ctrl.s.ctxl / 8);
611 /* offset from solicit base port 256 */
612 sr->instr.irh.s.destport = SOLICIT_BASE_DPORT + qno;
613 sr->instr.irh.s.ctxc = req->ctrl.s.ctxc;
614 sr->instr.irh.s.arg = req->ctrl.s.arg;
615 sr->instr.irh.s.opcode = req->opcode;
616 sr->instr.irh.value[0] = cpu_to_be64(sr->instr.irh.value[0]);
619 sr->instr.irh.s.ctxp = cpu_to_be64(ctx_handle);
622 sr->instr.slc.value[0] = 0;
623 sr->instr.slc.s.ssz = sr->out.map_bufs_cnt;
624 sr->instr.slc.value[0] = cpu_to_be64(sr->instr.slc.value[0]);
627 sr->instr.slc.s.rptr = cpu_to_be64(sr->out.dma);
630 * No conversion for front data,
631 * It goes into payload
632 * put GP Header in front data
634 sr->instr.fdata[0] = *((u64 *)&req->gph);
635 sr->instr.fdata[1] = 0;
637 ret = nitrox_enqueue_request(sr);
648 static inline int cmd_timeout(unsigned long tstamp, unsigned long timeout)
650 return time_after_eq(jiffies, (tstamp + timeout));
653 void backlog_qflush_work(struct work_struct *work)
655 struct nitrox_cmdq *cmdq;
657 cmdq = container_of(work, struct nitrox_cmdq, backlog_qflush);
658 post_backlog_cmds(cmdq);
662 * process_request_list - process completed requests
664 * @qno: queue to operate
666 * Returns the number of responses processed.
668 static void process_response_list(struct nitrox_cmdq *cmdq)
670 struct nitrox_device *ndev = cmdq->ndev;
671 struct nitrox_softreq *sr;
672 struct skcipher_request *skreq;
673 completion_t callback;
674 int req_completed = 0, err = 0, budget;
676 /* check all pending requests */
677 budget = atomic_read(&cmdq->pending_count);
679 while (req_completed < budget) {
680 sr = get_first_response_entry(cmdq);
684 if (atomic_read(&sr->status) != REQ_POSTED)
687 /* check orh and completion bytes updates */
688 if (READ_ONCE(sr->resp.orh) == READ_ONCE(sr->resp.completion)) {
689 /* request not completed, check for timeout */
690 if (!cmd_timeout(sr->tstamp, ndev->timeout))
692 dev_err_ratelimited(DEV(ndev),
693 "Request timeout, orh 0x%016llx\n",
694 READ_ONCE(sr->resp.orh));
696 atomic_dec(&cmdq->pending_count);
697 /* sync with other cpus */
698 smp_mb__after_atomic();
699 /* remove from response list */
700 response_list_del(sr, cmdq);
702 callback = sr->callback;
706 err = READ_ONCE(sr->resp.orh) & 0xff;
710 callback(skreq, err);
717 * pkt_slc_resp_handler - post processing of SE responses
719 void pkt_slc_resp_handler(unsigned long data)
721 struct bh_data *bh = (void *)(uintptr_t)(data);
722 struct nitrox_cmdq *cmdq = bh->cmdq;
723 union nps_pkt_slc_cnts pkt_slc_cnts;
725 /* read completion count */
726 pkt_slc_cnts.value = readq(bh->completion_cnt_csr_addr);
727 /* resend the interrupt if more work to do */
728 pkt_slc_cnts.s.resend = 1;
730 process_response_list(cmdq);
733 * clear the interrupt with resend bit enabled,
734 * MSI-X interrupt generates if Completion count > Threshold
736 writeq(pkt_slc_cnts.value, bh->completion_cnt_csr_addr);
737 /* order the writes */
740 if (atomic_read(&cmdq->backlog_count))
741 schedule_work(&cmdq->backlog_qflush);