1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2020 Marvell. */
4 #include "otx2_cpt_common.h"
5 #include "otx2_cptlf.h"
8 #define CPT_TIMER_HOLD 0x03F
9 #define CPT_COUNT_HOLD 32
11 static void cptlf_do_set_done_time_wait(struct otx2_cptlf_info *lf,
14 union otx2_cptx_lf_done_wait done_wait;
16 done_wait.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
17 OTX2_CPT_LF_DONE_WAIT);
18 done_wait.s.time_wait = time_wait;
19 otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
20 OTX2_CPT_LF_DONE_WAIT, done_wait.u);
23 static void cptlf_do_set_done_num_wait(struct otx2_cptlf_info *lf, int num_wait)
25 union otx2_cptx_lf_done_wait done_wait;
27 done_wait.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
28 OTX2_CPT_LF_DONE_WAIT);
29 done_wait.s.num_wait = num_wait;
30 otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
31 OTX2_CPT_LF_DONE_WAIT, done_wait.u);
34 static void cptlf_set_done_time_wait(struct otx2_cptlfs_info *lfs,
39 for (slot = 0; slot < lfs->lfs_num; slot++)
40 cptlf_do_set_done_time_wait(&lfs->lf[slot], time_wait);
43 static void cptlf_set_done_num_wait(struct otx2_cptlfs_info *lfs, int num_wait)
47 for (slot = 0; slot < lfs->lfs_num; slot++)
48 cptlf_do_set_done_num_wait(&lfs->lf[slot], num_wait);
51 static int cptlf_set_pri(struct otx2_cptlf_info *lf, int pri)
53 struct otx2_cptlfs_info *lfs = lf->lfs;
54 union otx2_cptx_af_lf_ctrl lf_ctrl;
57 ret = otx2_cpt_read_af_reg(lfs->mbox, lfs->pdev,
58 CPT_AF_LFX_CTL(lf->slot),
59 &lf_ctrl.u, lfs->blkaddr);
63 lf_ctrl.s.pri = pri ? 1 : 0;
65 ret = otx2_cpt_write_af_reg(lfs->mbox, lfs->pdev,
66 CPT_AF_LFX_CTL(lf->slot),
67 lf_ctrl.u, lfs->blkaddr);
71 static int cptlf_set_eng_grps_mask(struct otx2_cptlf_info *lf,
74 struct otx2_cptlfs_info *lfs = lf->lfs;
75 union otx2_cptx_af_lf_ctrl lf_ctrl;
78 ret = otx2_cpt_read_af_reg(lfs->mbox, lfs->pdev,
79 CPT_AF_LFX_CTL(lf->slot),
80 &lf_ctrl.u, lfs->blkaddr);
84 lf_ctrl.s.grp = eng_grps_mask;
86 ret = otx2_cpt_write_af_reg(lfs->mbox, lfs->pdev,
87 CPT_AF_LFX_CTL(lf->slot),
88 lf_ctrl.u, lfs->blkaddr);
92 static int cptlf_set_grp_and_pri(struct otx2_cptlfs_info *lfs,
93 int eng_grp_mask, int pri)
97 for (slot = 0; slot < lfs->lfs_num; slot++) {
98 ret = cptlf_set_pri(&lfs->lf[slot], pri);
102 ret = cptlf_set_eng_grps_mask(&lfs->lf[slot], eng_grp_mask);
109 static void cptlf_hw_init(struct otx2_cptlfs_info *lfs)
111 /* Disable instruction queues */
112 otx2_cptlf_disable_iqueues(lfs);
114 /* Set instruction queues base addresses */
115 otx2_cptlf_set_iqueues_base_addr(lfs);
117 /* Set instruction queues sizes */
118 otx2_cptlf_set_iqueues_size(lfs);
120 /* Set done interrupts time wait */
121 cptlf_set_done_time_wait(lfs, CPT_TIMER_HOLD);
123 /* Set done interrupts num wait */
124 cptlf_set_done_num_wait(lfs, CPT_COUNT_HOLD);
126 /* Enable instruction queues */
127 otx2_cptlf_enable_iqueues(lfs);
130 static void cptlf_hw_cleanup(struct otx2_cptlfs_info *lfs)
132 /* Disable instruction queues */
133 otx2_cptlf_disable_iqueues(lfs);
136 static void cptlf_set_misc_intrs(struct otx2_cptlfs_info *lfs, u8 enable)
138 union otx2_cptx_lf_misc_int_ena_w1s irq_misc = { .u = 0x0 };
139 u64 reg = enable ? OTX2_CPT_LF_MISC_INT_ENA_W1S :
140 OTX2_CPT_LF_MISC_INT_ENA_W1C;
143 irq_misc.s.fault = 0x1;
144 irq_misc.s.hwerr = 0x1;
145 irq_misc.s.irde = 0x1;
146 irq_misc.s.nqerr = 0x1;
147 irq_misc.s.nwrp = 0x1;
149 for (slot = 0; slot < lfs->lfs_num; slot++)
150 otx2_cpt_write64(lfs->reg_base, BLKADDR_CPT0, slot, reg,
154 static void cptlf_enable_intrs(struct otx2_cptlfs_info *lfs)
158 /* Enable done interrupts */
159 for (slot = 0; slot < lfs->lfs_num; slot++)
160 otx2_cpt_write64(lfs->reg_base, BLKADDR_CPT0, slot,
161 OTX2_CPT_LF_DONE_INT_ENA_W1S, 0x1);
162 /* Enable Misc interrupts */
163 cptlf_set_misc_intrs(lfs, true);
166 static void cptlf_disable_intrs(struct otx2_cptlfs_info *lfs)
170 for (slot = 0; slot < lfs->lfs_num; slot++)
171 otx2_cpt_write64(lfs->reg_base, BLKADDR_CPT0, slot,
172 OTX2_CPT_LF_DONE_INT_ENA_W1C, 0x1);
173 cptlf_set_misc_intrs(lfs, false);
176 static inline int cptlf_read_done_cnt(struct otx2_cptlf_info *lf)
178 union otx2_cptx_lf_done irq_cnt;
180 irq_cnt.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
182 return irq_cnt.s.done;
185 static irqreturn_t cptlf_misc_intr_handler(int __always_unused irq, void *arg)
187 union otx2_cptx_lf_misc_int irq_misc, irq_misc_ack;
188 struct otx2_cptlf_info *lf = arg;
191 dev = &lf->lfs->pdev->dev;
192 irq_misc.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
193 OTX2_CPT_LF_MISC_INT);
194 irq_misc_ack.u = 0x0;
196 if (irq_misc.s.fault) {
197 dev_err(dev, "Memory error detected while executing CPT_INST_S, LF %d.\n",
199 irq_misc_ack.s.fault = 0x1;
201 } else if (irq_misc.s.hwerr) {
202 dev_err(dev, "HW error from an engine executing CPT_INST_S, LF %d.",
204 irq_misc_ack.s.hwerr = 0x1;
206 } else if (irq_misc.s.nwrp) {
207 dev_err(dev, "SMMU fault while writing CPT_RES_S to CPT_INST_S[RES_ADDR], LF %d.\n",
209 irq_misc_ack.s.nwrp = 0x1;
211 } else if (irq_misc.s.irde) {
212 dev_err(dev, "Memory error when accessing instruction memory queue CPT_LF_Q_BASE[ADDR].\n");
213 irq_misc_ack.s.irde = 0x1;
215 } else if (irq_misc.s.nqerr) {
216 dev_err(dev, "Error enqueuing an instruction received at CPT_LF_NQ.\n");
217 irq_misc_ack.s.nqerr = 0x1;
220 dev_err(dev, "Unhandled interrupt in CPT LF %d\n", lf->slot);
224 /* Acknowledge interrupts */
225 otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
226 OTX2_CPT_LF_MISC_INT, irq_misc_ack.u);
231 static irqreturn_t cptlf_done_intr_handler(int irq, void *arg)
233 union otx2_cptx_lf_done_wait done_wait;
234 struct otx2_cptlf_info *lf = arg;
237 /* Read the number of completed requests */
238 irq_cnt = cptlf_read_done_cnt(lf);
240 done_wait.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0,
241 lf->slot, OTX2_CPT_LF_DONE_WAIT);
242 /* Acknowledge the number of completed requests */
243 otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
244 OTX2_CPT_LF_DONE_ACK, irq_cnt);
246 otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
247 OTX2_CPT_LF_DONE_WAIT, done_wait.u);
248 if (unlikely(!lf->wqe)) {
249 dev_err(&lf->lfs->pdev->dev, "No work for LF %d\n",
254 /* Schedule processing of completed requests */
255 tasklet_hi_schedule(&lf->wqe->work);
260 void otx2_cptlf_unregister_interrupts(struct otx2_cptlfs_info *lfs)
264 for (i = 0; i < lfs->lfs_num; i++) {
265 for (offs = 0; offs < OTX2_CPT_LF_MSIX_VECTORS; offs++) {
266 if (!lfs->lf[i].is_irq_reg[offs])
269 vector = pci_irq_vector(lfs->pdev,
270 lfs->lf[i].msix_offset + offs);
271 free_irq(vector, &lfs->lf[i]);
272 lfs->lf[i].is_irq_reg[offs] = false;
275 cptlf_disable_intrs(lfs);
278 static int cptlf_do_register_interrrupts(struct otx2_cptlfs_info *lfs,
279 int lf_num, int irq_offset,
280 irq_handler_t handler)
284 vector = pci_irq_vector(lfs->pdev, lfs->lf[lf_num].msix_offset +
286 ret = request_irq(vector, handler, 0,
287 lfs->lf[lf_num].irq_name[irq_offset],
292 lfs->lf[lf_num].is_irq_reg[irq_offset] = true;
297 int otx2_cptlf_register_interrupts(struct otx2_cptlfs_info *lfs)
299 int irq_offs, ret, i;
301 for (i = 0; i < lfs->lfs_num; i++) {
302 irq_offs = OTX2_CPT_LF_INT_VEC_E_MISC;
303 snprintf(lfs->lf[i].irq_name[irq_offs], 32, "CPTLF Misc%d", i);
304 ret = cptlf_do_register_interrrupts(lfs, i, irq_offs,
305 cptlf_misc_intr_handler);
309 irq_offs = OTX2_CPT_LF_INT_VEC_E_DONE;
310 snprintf(lfs->lf[i].irq_name[irq_offs], 32, "OTX2_CPTLF Done%d",
312 ret = cptlf_do_register_interrrupts(lfs, i, irq_offs,
313 cptlf_done_intr_handler);
317 cptlf_enable_intrs(lfs);
321 otx2_cptlf_unregister_interrupts(lfs);
325 void otx2_cptlf_free_irqs_affinity(struct otx2_cptlfs_info *lfs)
329 for (slot = 0; slot < lfs->lfs_num; slot++) {
330 for (offs = 0; offs < OTX2_CPT_LF_MSIX_VECTORS; offs++)
331 irq_set_affinity_hint(pci_irq_vector(lfs->pdev,
332 lfs->lf[slot].msix_offset +
334 free_cpumask_var(lfs->lf[slot].affinity_mask);
338 int otx2_cptlf_set_irqs_affinity(struct otx2_cptlfs_info *lfs)
340 struct otx2_cptlf_info *lf = lfs->lf;
343 for (slot = 0; slot < lfs->lfs_num; slot++) {
344 if (!zalloc_cpumask_var(&lf[slot].affinity_mask, GFP_KERNEL)) {
345 dev_err(&lfs->pdev->dev,
346 "cpumask allocation failed for LF %d", slot);
348 goto free_affinity_mask;
351 cpumask_set_cpu(cpumask_local_spread(slot,
352 dev_to_node(&lfs->pdev->dev)),
353 lf[slot].affinity_mask);
355 for (offs = 0; offs < OTX2_CPT_LF_MSIX_VECTORS; offs++) {
356 ret = irq_set_affinity_hint(pci_irq_vector(lfs->pdev,
357 lf[slot].msix_offset + offs),
358 lf[slot].affinity_mask);
360 goto free_affinity_mask;
366 otx2_cptlf_free_irqs_affinity(lfs);
370 int otx2_cptlf_init(struct otx2_cptlfs_info *lfs, u8 eng_grp_mask, int pri,
375 if (!lfs->pdev || !lfs->reg_base)
378 lfs->lfs_num = lfs_num;
379 for (slot = 0; slot < lfs->lfs_num; slot++) {
380 lfs->lf[slot].lfs = lfs;
381 lfs->lf[slot].slot = slot;
383 lfs->lf[slot].lmtline = lfs->lmt_base +
384 (slot * LMTLINE_SIZE);
386 lfs->lf[slot].lmtline = lfs->reg_base +
387 OTX2_CPT_RVU_FUNC_ADDR_S(BLKADDR_LMT, slot,
388 OTX2_CPT_LMT_LF_LMTLINEX(0));
390 lfs->lf[slot].ioreg = lfs->reg_base +
391 OTX2_CPT_RVU_FUNC_ADDR_S(BLKADDR_CPT0, slot,
394 /* Send request to attach LFs */
395 ret = otx2_cpt_attach_rscrs_msg(lfs);
399 ret = otx2_cpt_alloc_instruction_queues(lfs);
401 dev_err(&lfs->pdev->dev,
402 "Allocating instruction queues failed\n");
407 * Allow each LF to execute requests destined to any of 8 engine
408 * groups and set queue priority of each LF to high
410 ret = cptlf_set_grp_and_pri(lfs, eng_grp_mask, pri);
417 otx2_cpt_free_instruction_queues(lfs);
418 cptlf_hw_cleanup(lfs);
420 otx2_cpt_detach_rsrcs_msg(lfs);
426 void otx2_cptlf_shutdown(struct otx2_cptlfs_info *lfs)
429 /* Cleanup LFs hardware side */
430 cptlf_hw_cleanup(lfs);
431 /* Send request to detach LFs */
432 otx2_cpt_detach_rsrcs_msg(lfs);