1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2016 Cavium, Inc.
5 #include <linux/module.h>
8 static void cpt_send_msg_to_vf(struct cpt_device *cpt, int vf,
11 /* Writing mbox(0) causes interrupt */
12 cpt_write_csr64(cpt->reg_base, CPTX_PF_VFX_MBOXX(0, vf, 1),
14 cpt_write_csr64(cpt->reg_base, CPTX_PF_VFX_MBOXX(0, vf, 0), mbx->msg);
17 /* ACKs VF's mailbox message
18 * @vf: VF to which ACK to be sent
20 static void cpt_mbox_send_ack(struct cpt_device *cpt, int vf,
24 mbx->msg = CPT_MBOX_MSG_TYPE_ACK;
25 cpt_send_msg_to_vf(cpt, vf, mbx);
28 static void cpt_clear_mbox_intr(struct cpt_device *cpt, u32 vf)
31 cpt_write_csr64(cpt->reg_base, CPTX_PF_MBOX_INTX(0, 0), (1 << vf));
35 * Configure QLEN/Chunk sizes for VF
37 static void cpt_cfg_qlen_for_vf(struct cpt_device *cpt, int vf, u32 size)
39 union cptx_pf_qx_ctl pf_qx_ctl;
41 pf_qx_ctl.u = cpt_read_csr64(cpt->reg_base, CPTX_PF_QX_CTL(0, vf));
42 pf_qx_ctl.s.size = size;
43 pf_qx_ctl.s.cont_err = true;
44 cpt_write_csr64(cpt->reg_base, CPTX_PF_QX_CTL(0, vf), pf_qx_ctl.u);
48 * Configure VQ priority
50 static void cpt_cfg_vq_priority(struct cpt_device *cpt, int vf, u32 pri)
52 union cptx_pf_qx_ctl pf_qx_ctl;
54 pf_qx_ctl.u = cpt_read_csr64(cpt->reg_base, CPTX_PF_QX_CTL(0, vf));
55 pf_qx_ctl.s.pri = pri;
56 cpt_write_csr64(cpt->reg_base, CPTX_PF_QX_CTL(0, vf), pf_qx_ctl.u);
59 static int cpt_bind_vq_to_grp(struct cpt_device *cpt, u8 q, u8 grp)
61 struct microcode *mcode = cpt->mcode;
62 union cptx_pf_qx_ctl pf_qx_ctl;
63 struct device *dev = &cpt->pdev->dev;
65 if (q >= CPT_MAX_VF_NUM) {
66 dev_err(dev, "Queues are more than cores in the group");
69 if (grp >= CPT_MAX_CORE_GROUPS) {
70 dev_err(dev, "Request group is more than possible groups");
73 if (grp >= cpt->next_mc_idx) {
74 dev_err(dev, "Request group is higher than available functional groups");
77 pf_qx_ctl.u = cpt_read_csr64(cpt->reg_base, CPTX_PF_QX_CTL(0, q));
78 pf_qx_ctl.s.grp = mcode[grp].group;
79 cpt_write_csr64(cpt->reg_base, CPTX_PF_QX_CTL(0, q), pf_qx_ctl.u);
80 dev_dbg(dev, "VF %d TYPE %s", q, (mcode[grp].is_ae ? "AE" : "SE"));
82 return mcode[grp].is_ae ? AE_TYPES : SE_TYPES;
85 /* Interrupt handler to handle mailbox messages from VFs */
86 static void cpt_handle_mbox_intr(struct cpt_device *cpt, int vf)
88 struct cpt_vf_info *vfx = &cpt->vfinfo[vf];
89 struct cpt_mbox mbx = {};
91 struct device *dev = &cpt->pdev->dev;
93 * MBOX[0] contains msg
94 * MBOX[1] contains data
96 mbx.msg = cpt_read_csr64(cpt->reg_base, CPTX_PF_VFX_MBOXX(0, vf, 0));
97 mbx.data = cpt_read_csr64(cpt->reg_base, CPTX_PF_VFX_MBOXX(0, vf, 1));
98 dev_dbg(dev, "%s: Mailbox msg 0x%llx from VF%d", __func__, mbx.msg, vf);
101 vfx->state = VF_STATE_UP;
102 try_module_get(THIS_MODULE);
103 cpt_mbox_send_ack(cpt, vf, &mbx);
106 mbx.msg = CPT_MSG_READY;
108 cpt_send_msg_to_vf(cpt, vf, &mbx);
110 case CPT_MSG_VF_DOWN:
111 /* First msg in VF teardown sequence */
112 vfx->state = VF_STATE_DOWN;
113 module_put(THIS_MODULE);
114 cpt_mbox_send_ack(cpt, vf, &mbx);
117 vfx->qlen = mbx.data;
118 cpt_cfg_qlen_for_vf(cpt, vf, vfx->qlen);
119 cpt_mbox_send_ack(cpt, vf, &mbx);
121 case CPT_MSG_QBIND_GRP:
122 vftype = cpt_bind_vq_to_grp(cpt, vf, (u8)mbx.data);
123 if ((vftype != AE_TYPES) && (vftype != SE_TYPES))
124 dev_err(dev, "Queue %d binding to group %llu failed",
127 dev_dbg(dev, "Queue %d binding to group %llu successful",
129 mbx.msg = CPT_MSG_QBIND_GRP;
131 cpt_send_msg_to_vf(cpt, vf, &mbx);
134 case CPT_MSG_VQ_PRIORITY:
135 vfx->priority = mbx.data;
136 cpt_cfg_vq_priority(cpt, vf, vfx->priority);
137 cpt_mbox_send_ack(cpt, vf, &mbx);
140 dev_err(&cpt->pdev->dev, "Invalid msg from VF%d, msg 0x%llx\n",
146 void cpt_mbox_intr_handler (struct cpt_device *cpt, int mbx)
151 intr = cpt_read_csr64(cpt->reg_base, CPTX_PF_MBOX_INTX(0, 0));
152 dev_dbg(&cpt->pdev->dev, "PF interrupt Mbox%d 0x%llx\n", mbx, intr);
153 for (vf = 0; vf < CPT_MAX_VF_NUM; vf++) {
154 if (intr & (1ULL << vf)) {
155 dev_dbg(&cpt->pdev->dev, "Intr from VF %d\n", vf);
156 cpt_handle_mbox_intr(cpt, vf);
157 cpt_clear_mbox_intr(cpt, vf);