1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2020 Marvell. */
4 #include "otx2_cpt_common.h"
5 #include "otx2_cptpf.h"
8 /* Fastpath ipsec opcode with inplace processing */
9 #define CPT_INLINE_RX_OPCODE (0x26 | (1 << 6))
10 #define CN10K_CPT_INLINE_RX_OPCODE (0x29 | (1 << 6))
12 #define cpt_inline_rx_opcode(pdev) \
15 if (is_dev_otx2(pdev)) \
16 opcode = CPT_INLINE_RX_OPCODE; \
18 opcode = CN10K_CPT_INLINE_RX_OPCODE; \
23 * CPT PF driver version, It will be incremented by 1 for every feature
24 * addition in CPT mailbox messages.
26 #define OTX2_CPT_PF_DRV_VERSION 0x1
28 static int forward_to_af(struct otx2_cptpf_dev *cptpf,
29 struct otx2_cptvf_info *vf,
30 struct mbox_msghdr *req, int size)
32 struct mbox_msghdr *msg;
35 mutex_lock(&cptpf->lock);
36 msg = otx2_mbox_alloc_msg(&cptpf->afpf_mbox, 0, size);
38 mutex_unlock(&cptpf->lock);
42 memcpy((uint8_t *)msg + sizeof(struct mbox_msghdr),
43 (uint8_t *)req + sizeof(struct mbox_msghdr), size);
45 msg->pcifunc = req->pcifunc;
49 ret = otx2_cpt_sync_mbox_msg(&cptpf->afpf_mbox);
50 /* Error code -EIO indicate there is a communication failure
51 * to the AF. Rest of the error codes indicate that AF processed
52 * VF messages and set the error codes in response messages
53 * (if any) so simply forward responses to VF.
56 dev_warn(&cptpf->pdev->dev,
57 "AF not responding to VF%d messages\n", vf->vf_id);
58 mutex_unlock(&cptpf->lock);
61 mutex_unlock(&cptpf->lock);
65 static int handle_msg_get_caps(struct otx2_cptpf_dev *cptpf,
66 struct otx2_cptvf_info *vf,
67 struct mbox_msghdr *req)
69 struct otx2_cpt_caps_rsp *rsp;
71 rsp = (struct otx2_cpt_caps_rsp *)
72 otx2_mbox_alloc_msg(&cptpf->vfpf_mbox, vf->vf_id,
77 rsp->hdr.id = MBOX_MSG_GET_CAPS;
78 rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
79 rsp->hdr.pcifunc = req->pcifunc;
80 rsp->cpt_pf_drv_version = OTX2_CPT_PF_DRV_VERSION;
81 rsp->cpt_revision = cptpf->pdev->revision;
82 memcpy(&rsp->eng_caps, &cptpf->eng_caps, sizeof(rsp->eng_caps));
87 static int handle_msg_get_eng_grp_num(struct otx2_cptpf_dev *cptpf,
88 struct otx2_cptvf_info *vf,
89 struct mbox_msghdr *req)
91 struct otx2_cpt_egrp_num_msg *grp_req;
92 struct otx2_cpt_egrp_num_rsp *rsp;
94 grp_req = (struct otx2_cpt_egrp_num_msg *)req;
95 rsp = (struct otx2_cpt_egrp_num_rsp *)
96 otx2_mbox_alloc_msg(&cptpf->vfpf_mbox, vf->vf_id, sizeof(*rsp));
100 rsp->hdr.id = MBOX_MSG_GET_ENG_GRP_NUM;
101 rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
102 rsp->hdr.pcifunc = req->pcifunc;
103 rsp->eng_type = grp_req->eng_type;
104 rsp->eng_grp_num = otx2_cpt_get_eng_grp(&cptpf->eng_grps,
110 static int handle_msg_kvf_limits(struct otx2_cptpf_dev *cptpf,
111 struct otx2_cptvf_info *vf,
112 struct mbox_msghdr *req)
114 struct otx2_cpt_kvf_limits_rsp *rsp;
116 rsp = (struct otx2_cpt_kvf_limits_rsp *)
117 otx2_mbox_alloc_msg(&cptpf->vfpf_mbox, vf->vf_id, sizeof(*rsp));
121 rsp->hdr.id = MBOX_MSG_GET_KVF_LIMITS;
122 rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
123 rsp->hdr.pcifunc = req->pcifunc;
124 rsp->kvf_limits = cptpf->kvf_limits;
129 static int send_inline_ipsec_inbound_msg(struct otx2_cptpf_dev *cptpf,
130 int sso_pf_func, u8 slot)
132 struct cpt_inline_ipsec_cfg_msg *req;
133 struct pci_dev *pdev = cptpf->pdev;
135 req = (struct cpt_inline_ipsec_cfg_msg *)
136 otx2_mbox_alloc_msg_rsp(&cptpf->afpf_mbox, 0,
137 sizeof(*req), sizeof(struct msg_rsp));
139 dev_err(&pdev->dev, "RVU MBOX failed to get message.\n");
142 memset(req, 0, sizeof(*req));
143 req->hdr.id = MBOX_MSG_CPT_INLINE_IPSEC_CFG;
144 req->hdr.sig = OTX2_MBOX_REQ_SIG;
145 req->hdr.pcifunc = OTX2_CPT_RVU_PFFUNC(cptpf->pf_id, 0);
146 req->dir = CPT_INLINE_INBOUND;
148 req->sso_pf_func_ovrd = cptpf->sso_pf_func_ovrd;
149 req->sso_pf_func = sso_pf_func;
152 return otx2_cpt_send_mbox_msg(&cptpf->afpf_mbox, pdev);
155 static int rx_inline_ipsec_lf_cfg(struct otx2_cptpf_dev *cptpf, u8 egrp,
156 struct otx2_cpt_rx_inline_lf_cfg *req)
158 struct nix_inline_ipsec_cfg *nix_req;
159 struct pci_dev *pdev = cptpf->pdev;
162 nix_req = (struct nix_inline_ipsec_cfg *)
163 otx2_mbox_alloc_msg_rsp(&cptpf->afpf_mbox, 0,
165 sizeof(struct msg_rsp));
166 if (nix_req == NULL) {
167 dev_err(&pdev->dev, "RVU MBOX failed to get message.\n");
170 memset(nix_req, 0, sizeof(*nix_req));
171 nix_req->hdr.id = MBOX_MSG_NIX_INLINE_IPSEC_CFG;
172 nix_req->hdr.sig = OTX2_MBOX_REQ_SIG;
174 if (!req->credit || req->credit > OTX2_CPT_INST_QLEN_MSGS)
175 nix_req->cpt_credit = OTX2_CPT_INST_QLEN_MSGS - 1;
177 nix_req->cpt_credit = req->credit - 1;
178 nix_req->gen_cfg.egrp = egrp;
180 nix_req->gen_cfg.opcode = req->opcode;
182 nix_req->gen_cfg.opcode = cpt_inline_rx_opcode(pdev);
183 nix_req->gen_cfg.param1 = req->param1;
184 nix_req->gen_cfg.param2 = req->param2;
185 nix_req->inst_qsel.cpt_pf_func = OTX2_CPT_RVU_PFFUNC(cptpf->pf_id, 0);
186 nix_req->inst_qsel.cpt_slot = 0;
187 ret = otx2_cpt_send_mbox_msg(&cptpf->afpf_mbox, pdev);
191 if (cptpf->has_cpt1) {
192 ret = send_inline_ipsec_inbound_msg(cptpf, req->sso_pf_func, 1);
197 return send_inline_ipsec_inbound_msg(cptpf, req->sso_pf_func, 0);
200 static int handle_msg_rx_inline_ipsec_lf_cfg(struct otx2_cptpf_dev *cptpf,
201 struct mbox_msghdr *req)
203 struct otx2_cpt_rx_inline_lf_cfg *cfg_req;
207 cfg_req = (struct otx2_cpt_rx_inline_lf_cfg *)req;
208 if (cptpf->lfs.lfs_num) {
209 dev_err(&cptpf->pdev->dev,
210 "LF is already configured for RX inline ipsec.\n");
214 * Allow LFs to execute requests destined to only grp IE_TYPES and
215 * set queue priority of each LF to high
217 egrp = otx2_cpt_get_eng_grp(&cptpf->eng_grps, OTX2_CPT_IE_TYPES);
218 if (egrp == OTX2_CPT_INVALID_CRYPTO_ENG_GRP) {
219 dev_err(&cptpf->pdev->dev,
220 "Engine group for inline ipsec is not available\n");
224 otx2_cptlf_set_dev_info(&cptpf->lfs, cptpf->pdev, cptpf->reg_base,
225 &cptpf->afpf_mbox, BLKADDR_CPT0);
226 ret = otx2_cptlf_init(&cptpf->lfs, 1 << egrp, OTX2_CPT_QUEUE_HI_PRIO,
229 dev_err(&cptpf->pdev->dev,
230 "LF configuration failed for RX inline ipsec.\n");
234 if (cptpf->has_cpt1) {
235 cptpf->rsrc_req_blkaddr = BLKADDR_CPT1;
236 otx2_cptlf_set_dev_info(&cptpf->cpt1_lfs, cptpf->pdev,
237 cptpf->reg_base, &cptpf->afpf_mbox,
239 ret = otx2_cptlf_init(&cptpf->cpt1_lfs, 1 << egrp,
240 OTX2_CPT_QUEUE_HI_PRIO, 1);
242 dev_err(&cptpf->pdev->dev,
243 "LF configuration failed for RX inline ipsec.\n");
246 cptpf->rsrc_req_blkaddr = 0;
249 ret = rx_inline_ipsec_lf_cfg(cptpf, egrp, cfg_req);
256 otx2_cptlf_shutdown(&cptpf->cpt1_lfs);
258 otx2_cptlf_shutdown(&cptpf->lfs);
262 static int cptpf_handle_vf_req(struct otx2_cptpf_dev *cptpf,
263 struct otx2_cptvf_info *vf,
264 struct mbox_msghdr *req, int size)
268 /* Check if msg is valid, if not reply with an invalid msg */
269 if (req->sig != OTX2_MBOX_REQ_SIG)
273 case MBOX_MSG_GET_ENG_GRP_NUM:
274 err = handle_msg_get_eng_grp_num(cptpf, vf, req);
276 case MBOX_MSG_GET_CAPS:
277 err = handle_msg_get_caps(cptpf, vf, req);
279 case MBOX_MSG_GET_KVF_LIMITS:
280 err = handle_msg_kvf_limits(cptpf, vf, req);
282 case MBOX_MSG_RX_INLINE_IPSEC_LF_CFG:
283 err = handle_msg_rx_inline_ipsec_lf_cfg(cptpf, req);
287 err = forward_to_af(cptpf, vf, req, size);
293 otx2_reply_invalid_msg(&cptpf->vfpf_mbox, vf->vf_id, 0, req->id);
294 otx2_mbox_msg_send(&cptpf->vfpf_mbox, vf->vf_id);
298 irqreturn_t otx2_cptpf_vfpf_mbox_intr(int __always_unused irq, void *arg)
300 struct otx2_cptpf_dev *cptpf = arg;
301 struct otx2_cptvf_info *vf;
306 * Check which VF has raised an interrupt and schedule
307 * corresponding work queue to process the messages
309 for (i = 0; i < 2; i++) {
310 /* Read the interrupt bits */
311 intr = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
312 RVU_PF_VFPF_MBOX_INTX(i));
314 for (vf_idx = i * 64; vf_idx < cptpf->enabled_vfs; vf_idx++) {
315 vf = &cptpf->vf[vf_idx];
316 if (intr & (1ULL << vf->intr_idx)) {
317 queue_work(cptpf->vfpf_mbox_wq,
318 &vf->vfpf_mbox_work);
319 /* Clear the interrupt */
320 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM,
321 0, RVU_PF_VFPF_MBOX_INTX(i),
322 BIT_ULL(vf->intr_idx));
329 void otx2_cptpf_vfpf_mbox_handler(struct work_struct *work)
331 struct otx2_cptpf_dev *cptpf;
332 struct otx2_cptvf_info *vf;
333 struct otx2_mbox_dev *mdev;
334 struct mbox_hdr *req_hdr;
335 struct mbox_msghdr *msg;
336 struct otx2_mbox *mbox;
339 vf = container_of(work, struct otx2_cptvf_info, vfpf_mbox_work);
341 mbox = &cptpf->vfpf_mbox;
342 /* sync with mbox memory region */
344 mdev = &mbox->dev[vf->vf_id];
345 /* Process received mbox messages */
346 req_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
347 offset = mbox->rx_start + ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
349 for (i = 0; i < req_hdr->num_msgs; i++) {
350 msg = (struct mbox_msghdr *)(mdev->mbase + offset);
352 /* Set which VF sent this message based on mbox IRQ */
353 msg->pcifunc = ((u16)cptpf->pf_id << RVU_PFVF_PF_SHIFT) |
354 ((vf->vf_id + 1) & RVU_PFVF_FUNC_MASK);
356 err = cptpf_handle_vf_req(cptpf, vf, msg,
357 msg->next_msgoff - offset);
359 * Behave as the AF, drop the msg if there is
360 * no memory, timeout handling also goes here
362 if (err == -ENOMEM || err == -EIO)
364 offset = msg->next_msgoff;
365 /* Write barrier required for VF responses which are handled by
366 * PF driver and not forwarded to AF.
370 /* Send mbox responses to VF */
372 otx2_mbox_msg_send(mbox, vf->vf_id);
375 irqreturn_t otx2_cptpf_afpf_mbox_intr(int __always_unused irq, void *arg)
377 struct otx2_cptpf_dev *cptpf = arg;
378 struct otx2_mbox_dev *mdev;
379 struct otx2_mbox *mbox;
380 struct mbox_hdr *hdr;
383 /* Read the interrupt bits */
384 intr = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT);
387 mbox = &cptpf->afpf_mbox;
388 mdev = &mbox->dev[0];
389 hdr = mdev->mbase + mbox->rx_start;
391 /* Schedule work queue function to process the MBOX request */
392 queue_work(cptpf->afpf_mbox_wq, &cptpf->afpf_mbox_work);
394 mbox = &cptpf->afpf_mbox_up;
395 mdev = &mbox->dev[0];
396 hdr = mdev->mbase + mbox->rx_start;
398 /* Schedule work queue function to process the MBOX request */
399 queue_work(cptpf->afpf_mbox_wq, &cptpf->afpf_mbox_up_work);
400 /* Clear and ack the interrupt */
401 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT,
407 static void process_afpf_mbox_msg(struct otx2_cptpf_dev *cptpf,
408 struct mbox_msghdr *msg)
410 struct otx2_cptlfs_info *lfs = &cptpf->lfs;
411 struct device *dev = &cptpf->pdev->dev;
412 struct cpt_rd_wr_reg_msg *rsp_rd_wr;
414 if (msg->id >= MBOX_MSG_MAX) {
415 dev_err(dev, "MBOX msg with unknown ID %d\n", msg->id);
418 if (msg->sig != OTX2_MBOX_RSP_SIG) {
419 dev_err(dev, "MBOX msg with wrong signature %x, ID %d\n",
423 if (cptpf->rsrc_req_blkaddr == BLKADDR_CPT1)
424 lfs = &cptpf->cpt1_lfs;
428 cptpf->pf_id = (msg->pcifunc >> RVU_PFVF_PF_SHIFT) &
431 case MBOX_MSG_CPT_RD_WR_REGISTER:
432 rsp_rd_wr = (struct cpt_rd_wr_reg_msg *)msg;
434 dev_err(dev, "Reg %llx rd/wr(%d) failed %d\n",
435 rsp_rd_wr->reg_offset, rsp_rd_wr->is_write,
439 if (!rsp_rd_wr->is_write)
440 *rsp_rd_wr->ret_val = rsp_rd_wr->val;
442 case MBOX_MSG_ATTACH_RESOURCES:
444 lfs->are_lfs_attached = 1;
446 case MBOX_MSG_DETACH_RESOURCES:
448 lfs->are_lfs_attached = 0;
450 case MBOX_MSG_CPT_INLINE_IPSEC_CFG:
451 case MBOX_MSG_NIX_INLINE_IPSEC_CFG:
456 "Unsupported msg %d received.\n", msg->id);
461 static void forward_to_vf(struct otx2_cptpf_dev *cptpf, struct mbox_msghdr *msg,
464 struct otx2_mbox *vfpf_mbox;
465 struct mbox_msghdr *fwd;
467 if (msg->id >= MBOX_MSG_MAX) {
468 dev_err(&cptpf->pdev->dev,
469 "MBOX msg with unknown ID %d\n", msg->id);
472 if (msg->sig != OTX2_MBOX_RSP_SIG) {
473 dev_err(&cptpf->pdev->dev,
474 "MBOX msg with wrong signature %x, ID %d\n",
478 vfpf_mbox = &cptpf->vfpf_mbox;
480 if (vf_id >= cptpf->enabled_vfs) {
481 dev_err(&cptpf->pdev->dev,
482 "MBOX msg to unknown VF: %d >= %d\n",
483 vf_id, cptpf->enabled_vfs);
486 if (msg->id == MBOX_MSG_VF_FLR)
489 fwd = otx2_mbox_alloc_msg(vfpf_mbox, vf_id, size);
491 dev_err(&cptpf->pdev->dev,
492 "Forwarding to VF%d failed.\n", vf_id);
495 memcpy((uint8_t *)fwd + sizeof(struct mbox_msghdr),
496 (uint8_t *)msg + sizeof(struct mbox_msghdr), size);
498 fwd->pcifunc = msg->pcifunc;
504 /* Handle mailbox messages received from AF */
505 void otx2_cptpf_afpf_mbox_handler(struct work_struct *work)
507 struct otx2_cptpf_dev *cptpf;
508 struct otx2_mbox *afpf_mbox;
509 struct otx2_mbox_dev *mdev;
510 struct mbox_hdr *rsp_hdr;
511 struct mbox_msghdr *msg;
512 int offset, vf_id, i;
514 cptpf = container_of(work, struct otx2_cptpf_dev, afpf_mbox_work);
515 afpf_mbox = &cptpf->afpf_mbox;
516 mdev = &afpf_mbox->dev[0];
517 /* Sync mbox data into memory */
520 rsp_hdr = (struct mbox_hdr *)(mdev->mbase + afpf_mbox->rx_start);
521 offset = ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
523 for (i = 0; i < rsp_hdr->num_msgs; i++) {
524 msg = (struct mbox_msghdr *)(mdev->mbase + afpf_mbox->rx_start +
526 vf_id = (msg->pcifunc >> RVU_PFVF_FUNC_SHIFT) &
529 forward_to_vf(cptpf, msg, vf_id,
530 msg->next_msgoff - offset);
532 process_afpf_mbox_msg(cptpf, msg);
534 offset = msg->next_msgoff;
535 /* Sync VF response ready to be sent */
539 otx2_mbox_reset(afpf_mbox, 0);
542 static void handle_msg_cpt_inst_lmtst(struct otx2_cptpf_dev *cptpf,
543 struct mbox_msghdr *msg)
545 struct cpt_inst_lmtst_req *req = (struct cpt_inst_lmtst_req *)msg;
546 struct otx2_cptlfs_info *lfs = &cptpf->lfs;
549 if (cptpf->lfs.lfs_num)
550 lfs->ops->send_cmd((union otx2_cpt_inst_s *)req->inst, 1,
553 rsp = (struct msg_rsp *)otx2_mbox_alloc_msg(&cptpf->afpf_mbox_up, 0,
558 rsp->hdr.id = msg->id;
559 rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
560 rsp->hdr.pcifunc = 0;
564 static void process_afpf_mbox_up_msg(struct otx2_cptpf_dev *cptpf,
565 struct mbox_msghdr *msg)
567 if (msg->id >= MBOX_MSG_MAX) {
568 dev_err(&cptpf->pdev->dev,
569 "MBOX msg with unknown ID %d\n", msg->id);
574 case MBOX_MSG_CPT_INST_LMTST:
575 handle_msg_cpt_inst_lmtst(cptpf, msg);
578 otx2_reply_invalid_msg(&cptpf->afpf_mbox_up, 0, 0, msg->id);
582 void otx2_cptpf_afpf_mbox_up_handler(struct work_struct *work)
584 struct otx2_cptpf_dev *cptpf;
585 struct otx2_mbox_dev *mdev;
586 struct mbox_hdr *rsp_hdr;
587 struct mbox_msghdr *msg;
588 struct otx2_mbox *mbox;
591 cptpf = container_of(work, struct otx2_cptpf_dev, afpf_mbox_up_work);
592 mbox = &cptpf->afpf_mbox_up;
593 mdev = &mbox->dev[0];
594 /* Sync mbox data into memory */
597 rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
598 offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
600 for (i = 0; i < rsp_hdr->num_msgs; i++) {
601 msg = (struct mbox_msghdr *)(mdev->mbase + offset);
603 process_afpf_mbox_up_msg(cptpf, msg);
605 offset = mbox->rx_start + msg->next_msgoff;
607 otx2_mbox_msg_send(mbox, 0);