1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2020 Marvell. */
4 #include <linux/firmware.h>
5 #include "otx2_cpt_hw_types.h"
6 #include "otx2_cpt_common.h"
7 #include "otx2_cpt_devlink.h"
8 #include "otx2_cptpf_ucode.h"
9 #include "otx2_cptpf.h"
10 #include "cn10k_cpt.h"
13 #define OTX2_CPT_DRV_NAME "rvu_cptpf"
14 #define OTX2_CPT_DRV_STRING "Marvell RVU CPT Physical Function Driver"
16 #define CPT_UC_RID_CN9K_B0 1
18 static void cptpf_enable_vfpf_mbox_intr(struct otx2_cptpf_dev *cptpf,
23 /* Clear any pending interrupts */
24 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
25 RVU_PF_VFPF_MBOX_INTX(0), ~0x0ULL);
26 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
27 RVU_PF_VFPF_MBOX_INTX(1), ~0x0ULL);
29 /* Enable VF interrupts for VFs from 0 to 63 */
30 ena_bits = ((num_vfs - 1) % 64);
31 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
32 RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0),
33 GENMASK_ULL(ena_bits, 0));
36 /* Enable VF interrupts for VFs from 64 to 127 */
37 ena_bits = num_vfs - 64 - 1;
38 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
39 RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1),
40 GENMASK_ULL(ena_bits, 0));
44 static void cptpf_disable_vfpf_mbox_intr(struct otx2_cptpf_dev *cptpf,
49 /* Disable VF-PF interrupts */
50 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
51 RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), ~0ULL);
52 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
53 RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1), ~0ULL);
54 /* Clear any pending interrupts */
55 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
56 RVU_PF_VFPF_MBOX_INTX(0), ~0ULL);
58 vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFPF_MBOX0);
59 free_irq(vector, cptpf);
62 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
63 RVU_PF_VFPF_MBOX_INTX(1), ~0ULL);
64 vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
65 free_irq(vector, cptpf);
69 static void cptpf_enable_vf_flr_me_intrs(struct otx2_cptpf_dev *cptpf,
72 /* Clear FLR interrupt if any */
73 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(0),
76 /* Enable VF FLR interrupts */
77 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
78 RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(num_vfs));
79 /* Clear ME interrupt if any */
80 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFME_INTX(0),
82 /* Enable VF ME interrupts */
83 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
84 RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(num_vfs));
89 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(1),
90 INTR_MASK(num_vfs - 64));
91 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
92 RVU_PF_VFFLR_INT_ENA_W1SX(1), INTR_MASK(num_vfs - 64));
94 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFME_INTX(1),
95 INTR_MASK(num_vfs - 64));
96 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
97 RVU_PF_VFME_INT_ENA_W1SX(1), INTR_MASK(num_vfs - 64));
100 static void cptpf_disable_vf_flr_me_intrs(struct otx2_cptpf_dev *cptpf,
105 /* Disable VF FLR interrupts */
106 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
107 RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(num_vfs));
108 vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFFLR0);
109 free_irq(vector, cptpf);
111 /* Disable VF ME interrupts */
112 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
113 RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(num_vfs));
114 vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFME0);
115 free_irq(vector, cptpf);
120 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
121 RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(num_vfs - 64));
122 vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFFLR1);
123 free_irq(vector, cptpf);
125 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
126 RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(num_vfs - 64));
127 vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFME1);
128 free_irq(vector, cptpf);
131 static void cptpf_flr_wq_handler(struct work_struct *work)
133 struct cptpf_flr_work *flr_work;
134 struct otx2_cptpf_dev *pf;
135 struct mbox_msghdr *req;
136 struct otx2_mbox *mbox;
139 flr_work = container_of(work, struct cptpf_flr_work, work);
141 mbox = &pf->afpf_mbox;
143 vf = flr_work - pf->flr_work;
145 mutex_lock(&pf->lock);
146 req = otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
147 sizeof(struct msg_rsp));
149 mutex_unlock(&pf->lock);
153 req->sig = OTX2_MBOX_REQ_SIG;
154 req->id = MBOX_MSG_VF_FLR;
155 req->pcifunc &= RVU_PFVF_FUNC_MASK;
156 req->pcifunc |= (vf + 1) & RVU_PFVF_FUNC_MASK;
158 otx2_cpt_send_mbox_msg(mbox, pf->pdev);
159 if (!otx2_cpt_sync_mbox_msg(&pf->afpf_mbox)) {
165 /* Clear transaction pending register */
166 otx2_cpt_write64(pf->reg_base, BLKADDR_RVUM, 0,
167 RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
168 otx2_cpt_write64(pf->reg_base, BLKADDR_RVUM, 0,
169 RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf));
171 mutex_unlock(&pf->lock);
174 static irqreturn_t cptpf_vf_flr_intr(int __always_unused irq, void *arg)
176 int reg, dev, vf, start_vf, num_reg = 1;
177 struct otx2_cptpf_dev *cptpf = arg;
180 if (cptpf->max_vfs > 64)
183 for (reg = 0; reg < num_reg; reg++) {
184 intr = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
185 RVU_PF_VFFLR_INTX(reg));
189 for (vf = 0; vf < 64; vf++) {
190 if (!(intr & BIT_ULL(vf)))
193 queue_work(cptpf->flr_wq, &cptpf->flr_work[dev].work);
194 /* Clear interrupt */
195 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
196 RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf));
197 /* Disable the interrupt */
198 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
199 RVU_PF_VFFLR_INT_ENA_W1CX(reg),
206 static irqreturn_t cptpf_vf_me_intr(int __always_unused irq, void *arg)
208 struct otx2_cptpf_dev *cptpf = arg;
209 int reg, vf, num_reg = 1;
212 if (cptpf->max_vfs > 64)
215 for (reg = 0; reg < num_reg; reg++) {
216 intr = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
217 RVU_PF_VFME_INTX(reg));
220 for (vf = 0; vf < 64; vf++) {
221 if (!(intr & BIT_ULL(vf)))
223 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
224 RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
225 /* Clear interrupt */
226 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
227 RVU_PF_VFME_INTX(reg), BIT_ULL(vf));
233 static void cptpf_unregister_vfpf_intr(struct otx2_cptpf_dev *cptpf,
236 cptpf_disable_vfpf_mbox_intr(cptpf, num_vfs);
237 cptpf_disable_vf_flr_me_intrs(cptpf, num_vfs);
240 static int cptpf_register_vfpf_intr(struct otx2_cptpf_dev *cptpf, int num_vfs)
242 struct pci_dev *pdev = cptpf->pdev;
243 struct device *dev = &pdev->dev;
246 vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX0);
247 /* Register VF-PF mailbox interrupt handler */
248 ret = request_irq(vector, otx2_cptpf_vfpf_mbox_intr, 0, "CPTVFPF Mbox0",
252 "IRQ registration failed for PFVF mbox0 irq\n");
255 vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR0);
256 /* Register VF FLR interrupt handler */
257 ret = request_irq(vector, cptpf_vf_flr_intr, 0, "CPTPF FLR0", cptpf);
260 "IRQ registration failed for VFFLR0 irq\n");
263 vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFME0);
264 /* Register VF ME interrupt handler */
265 ret = request_irq(vector, cptpf_vf_me_intr, 0, "CPTPF ME0", cptpf);
268 "IRQ registration failed for PFVF mbox0 irq\n");
273 vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
274 ret = request_irq(vector, otx2_cptpf_vfpf_mbox_intr, 0,
275 "CPTVFPF Mbox1", cptpf);
278 "IRQ registration failed for PFVF mbox1 irq\n");
281 vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR1);
282 /* Register VF FLR interrupt handler */
283 ret = request_irq(vector, cptpf_vf_flr_intr, 0, "CPTPF FLR1",
287 "IRQ registration failed for VFFLR1 irq\n");
290 vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFME1);
291 /* Register VF FLR interrupt handler */
292 ret = request_irq(vector, cptpf_vf_me_intr, 0, "CPTPF ME1",
296 "IRQ registration failed for VFFLR1 irq\n");
300 cptpf_enable_vfpf_mbox_intr(cptpf, num_vfs);
301 cptpf_enable_vf_flr_me_intrs(cptpf, num_vfs);
306 vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR1);
307 free_irq(vector, cptpf);
309 vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
310 free_irq(vector, cptpf);
312 vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFME0);
313 free_irq(vector, cptpf);
315 vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR0);
316 free_irq(vector, cptpf);
318 vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX0);
319 free_irq(vector, cptpf);
323 static void cptpf_flr_wq_destroy(struct otx2_cptpf_dev *pf)
327 destroy_workqueue(pf->flr_wq);
332 static int cptpf_flr_wq_init(struct otx2_cptpf_dev *cptpf, int num_vfs)
336 cptpf->flr_wq = alloc_ordered_workqueue("cptpf_flr_wq", 0);
340 cptpf->flr_work = kcalloc(num_vfs, sizeof(struct cptpf_flr_work),
342 if (!cptpf->flr_work)
345 for (vf = 0; vf < num_vfs; vf++) {
346 cptpf->flr_work[vf].pf = cptpf;
347 INIT_WORK(&cptpf->flr_work[vf].work, cptpf_flr_wq_handler);
352 destroy_workqueue(cptpf->flr_wq);
356 static int cptpf_vfpf_mbox_init(struct otx2_cptpf_dev *cptpf, int num_vfs)
358 struct device *dev = &cptpf->pdev->dev;
362 cptpf->vfpf_mbox_wq =
363 alloc_ordered_workqueue("cpt_vfpf_mailbox",
364 WQ_HIGHPRI | WQ_MEM_RECLAIM);
365 if (!cptpf->vfpf_mbox_wq)
368 /* Map VF-PF mailbox memory */
369 if (test_bit(CN10K_MBOX, &cptpf->cap_flag))
370 vfpf_mbox_base = readq(cptpf->reg_base + RVU_PF_VF_MBOX_ADDR);
372 vfpf_mbox_base = readq(cptpf->reg_base + RVU_PF_VF_BAR4_ADDR);
374 if (!vfpf_mbox_base) {
375 dev_err(dev, "VF-PF mailbox address not configured\n");
379 cptpf->vfpf_mbox_base = devm_ioremap_wc(dev, vfpf_mbox_base,
380 MBOX_SIZE * cptpf->max_vfs);
381 if (!cptpf->vfpf_mbox_base) {
382 dev_err(dev, "Mapping of VF-PF mailbox address failed\n");
386 err = otx2_mbox_init(&cptpf->vfpf_mbox, cptpf->vfpf_mbox_base,
387 cptpf->pdev, cptpf->reg_base, MBOX_DIR_PFVF,
392 for (i = 0; i < num_vfs; i++) {
393 cptpf->vf[i].vf_id = i;
394 cptpf->vf[i].cptpf = cptpf;
395 cptpf->vf[i].intr_idx = i % 64;
396 INIT_WORK(&cptpf->vf[i].vfpf_mbox_work,
397 otx2_cptpf_vfpf_mbox_handler);
402 destroy_workqueue(cptpf->vfpf_mbox_wq);
406 static void cptpf_vfpf_mbox_destroy(struct otx2_cptpf_dev *cptpf)
408 destroy_workqueue(cptpf->vfpf_mbox_wq);
409 otx2_mbox_destroy(&cptpf->vfpf_mbox);
412 static void cptpf_disable_afpf_mbox_intr(struct otx2_cptpf_dev *cptpf)
414 /* Disable AF-PF interrupt */
415 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT_ENA_W1C,
417 /* Clear interrupt if any */
418 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT, 0x1ULL);
421 static int cptpf_register_afpf_mbox_intr(struct otx2_cptpf_dev *cptpf)
423 struct pci_dev *pdev = cptpf->pdev;
424 struct device *dev = &pdev->dev;
427 irq = pci_irq_vector(pdev, RVU_PF_INT_VEC_AFPF_MBOX);
428 /* Register AF-PF mailbox interrupt handler */
429 ret = devm_request_irq(dev, irq, otx2_cptpf_afpf_mbox_intr, 0,
430 "CPTAFPF Mbox", cptpf);
433 "IRQ registration failed for PFAF mbox irq\n");
436 /* Clear interrupt if any, to avoid spurious interrupts */
437 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT, 0x1ULL);
438 /* Enable AF-PF interrupt */
439 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT_ENA_W1S,
442 ret = otx2_cpt_send_ready_msg(&cptpf->afpf_mbox, cptpf->pdev);
445 "AF not responding to mailbox, deferring probe\n");
446 cptpf_disable_afpf_mbox_intr(cptpf);
447 return -EPROBE_DEFER;
452 static int cptpf_afpf_mbox_init(struct otx2_cptpf_dev *cptpf)
454 struct pci_dev *pdev = cptpf->pdev;
455 resource_size_t offset;
458 cptpf->afpf_mbox_wq =
459 alloc_ordered_workqueue("cpt_afpf_mailbox",
460 WQ_HIGHPRI | WQ_MEM_RECLAIM);
461 if (!cptpf->afpf_mbox_wq)
464 offset = pci_resource_start(pdev, PCI_MBOX_BAR_NUM);
465 /* Map AF-PF mailbox memory */
466 cptpf->afpf_mbox_base = devm_ioremap_wc(&pdev->dev, offset, MBOX_SIZE);
467 if (!cptpf->afpf_mbox_base) {
468 dev_err(&pdev->dev, "Unable to map BAR4\n");
473 err = otx2_mbox_init(&cptpf->afpf_mbox, cptpf->afpf_mbox_base,
474 pdev, cptpf->reg_base, MBOX_DIR_PFAF, 1);
478 err = otx2_mbox_init(&cptpf->afpf_mbox_up, cptpf->afpf_mbox_base,
479 pdev, cptpf->reg_base, MBOX_DIR_PFAF_UP, 1);
483 INIT_WORK(&cptpf->afpf_mbox_work, otx2_cptpf_afpf_mbox_handler);
484 INIT_WORK(&cptpf->afpf_mbox_up_work, otx2_cptpf_afpf_mbox_up_handler);
485 mutex_init(&cptpf->lock);
490 otx2_mbox_destroy(&cptpf->afpf_mbox);
492 destroy_workqueue(cptpf->afpf_mbox_wq);
496 static void cptpf_afpf_mbox_destroy(struct otx2_cptpf_dev *cptpf)
498 destroy_workqueue(cptpf->afpf_mbox_wq);
499 otx2_mbox_destroy(&cptpf->afpf_mbox);
500 otx2_mbox_destroy(&cptpf->afpf_mbox_up);
503 static ssize_t sso_pf_func_ovrd_show(struct device *dev,
504 struct device_attribute *attr, char *buf)
506 struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
508 return sprintf(buf, "%d\n", cptpf->sso_pf_func_ovrd);
511 static ssize_t sso_pf_func_ovrd_store(struct device *dev,
512 struct device_attribute *attr,
513 const char *buf, size_t count)
515 struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
518 if (!(cptpf->pdev->revision == CPT_UC_RID_CN9K_B0))
521 if (kstrtou8(buf, 0, &sso_pf_func_ovrd))
524 cptpf->sso_pf_func_ovrd = sso_pf_func_ovrd;
529 static ssize_t kvf_limits_show(struct device *dev,
530 struct device_attribute *attr, char *buf)
532 struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
534 return sprintf(buf, "%d\n", cptpf->kvf_limits);
537 static ssize_t kvf_limits_store(struct device *dev,
538 struct device_attribute *attr,
539 const char *buf, size_t count)
541 struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
545 ret = kstrtoint(buf, 0, &lfs_num);
548 if (lfs_num < 1 || lfs_num > num_online_cpus()) {
549 dev_err(dev, "lfs count %d must be in range [1 - %d]\n",
550 lfs_num, num_online_cpus());
553 cptpf->kvf_limits = lfs_num;
558 static DEVICE_ATTR_RW(kvf_limits);
559 static DEVICE_ATTR_RW(sso_pf_func_ovrd);
561 static struct attribute *cptpf_attrs[] = {
562 &dev_attr_kvf_limits.attr,
563 &dev_attr_sso_pf_func_ovrd.attr,
567 static const struct attribute_group cptpf_sysfs_group = {
568 .attrs = cptpf_attrs,
571 static int cpt_is_pf_usable(struct otx2_cptpf_dev *cptpf)
575 rev = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
576 RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_RVUM));
577 rev = (rev >> 12) & 0xFF;
579 * Check if AF has setup revision for RVUM block, otherwise
580 * driver probe should be deferred until AF driver comes up
583 dev_warn(&cptpf->pdev->dev,
584 "AF is not initialized, deferring probe\n");
585 return -EPROBE_DEFER;
590 static int cptx_device_reset(struct otx2_cptpf_dev *cptpf, int blkaddr)
592 int timeout = 10, ret;
595 ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
596 CPT_AF_BLK_RST, 0x1, blkaddr);
601 ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
602 CPT_AF_BLK_RST, ®, blkaddr);
606 if (!((reg >> 63) & 0x1))
609 usleep_range(10000, 20000);
617 static int cptpf_device_reset(struct otx2_cptpf_dev *cptpf)
621 if (cptpf->has_cpt1) {
622 ret = cptx_device_reset(cptpf, BLKADDR_CPT1);
626 return cptx_device_reset(cptpf, BLKADDR_CPT0);
629 static void cptpf_check_block_implemented(struct otx2_cptpf_dev *cptpf)
633 cfg = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
634 RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_CPT1));
635 if (cfg & BIT_ULL(11))
636 cptpf->has_cpt1 = true;
639 static int cptpf_device_init(struct otx2_cptpf_dev *cptpf)
641 union otx2_cptx_af_constants1 af_cnsts1 = {0};
644 /* check if 'implemented' bit is set for block BLKADDR_CPT1 */
645 cptpf_check_block_implemented(cptpf);
646 /* Reset the CPT PF device */
647 ret = cptpf_device_reset(cptpf);
651 /* Get number of SE, IE and AE engines */
652 ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
653 CPT_AF_CONSTANTS1, &af_cnsts1.u,
658 cptpf->eng_grps.avail.max_se_cnt = af_cnsts1.s.se;
659 cptpf->eng_grps.avail.max_ie_cnt = af_cnsts1.s.ie;
660 cptpf->eng_grps.avail.max_ae_cnt = af_cnsts1.s.ae;
662 /* Disable all cores */
663 ret = otx2_cpt_disable_all_cores(cptpf);
668 static int cptpf_sriov_disable(struct pci_dev *pdev)
670 struct otx2_cptpf_dev *cptpf = pci_get_drvdata(pdev);
671 int num_vfs = pci_num_vf(pdev);
676 pci_disable_sriov(pdev);
677 cptpf_unregister_vfpf_intr(cptpf, num_vfs);
678 cptpf_flr_wq_destroy(cptpf);
679 cptpf_vfpf_mbox_destroy(cptpf);
680 module_put(THIS_MODULE);
681 cptpf->enabled_vfs = 0;
686 static int cptpf_sriov_enable(struct pci_dev *pdev, int num_vfs)
688 struct otx2_cptpf_dev *cptpf = pci_get_drvdata(pdev);
691 /* Initialize VF<=>PF mailbox */
692 ret = cptpf_vfpf_mbox_init(cptpf, num_vfs);
696 ret = cptpf_flr_wq_init(cptpf, num_vfs);
699 /* Register VF<=>PF mailbox interrupt */
700 ret = cptpf_register_vfpf_intr(cptpf, num_vfs);
704 /* Get CPT HW capabilities using LOAD_FVC operation. */
705 ret = otx2_cpt_discover_eng_capabilities(cptpf);
709 ret = otx2_cpt_create_eng_grps(cptpf, &cptpf->eng_grps);
713 cptpf->enabled_vfs = num_vfs;
714 ret = pci_enable_sriov(pdev, num_vfs);
718 dev_notice(&cptpf->pdev->dev, "VFs enabled: %d\n", num_vfs);
720 try_module_get(THIS_MODULE);
724 cptpf_unregister_vfpf_intr(cptpf, num_vfs);
725 cptpf->enabled_vfs = 0;
727 cptpf_flr_wq_destroy(cptpf);
729 cptpf_vfpf_mbox_destroy(cptpf);
733 static int otx2_cptpf_sriov_configure(struct pci_dev *pdev, int num_vfs)
736 return cptpf_sriov_enable(pdev, num_vfs);
738 return cptpf_sriov_disable(pdev);
742 static int otx2_cptpf_probe(struct pci_dev *pdev,
743 const struct pci_device_id *ent)
745 struct device *dev = &pdev->dev;
746 struct otx2_cptpf_dev *cptpf;
749 cptpf = devm_kzalloc(dev, sizeof(*cptpf), GFP_KERNEL);
753 err = pcim_enable_device(pdev);
755 dev_err(dev, "Failed to enable PCI device\n");
759 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
761 dev_err(dev, "Unable to get usable DMA configuration\n");
764 /* Map PF's configuration registers */
765 err = pcim_iomap_regions_request_all(pdev, 1 << PCI_PF_REG_BAR_NUM,
768 dev_err(dev, "Couldn't get PCI resources 0x%x\n", err);
771 pci_set_master(pdev);
772 pci_set_drvdata(pdev, cptpf);
775 cptpf->reg_base = pcim_iomap_table(pdev)[PCI_PF_REG_BAR_NUM];
777 /* Check if AF driver is up, otherwise defer probe */
778 err = cpt_is_pf_usable(cptpf);
782 err = pci_alloc_irq_vectors(pdev, RVU_PF_INT_VEC_CNT,
783 RVU_PF_INT_VEC_CNT, PCI_IRQ_MSIX);
785 dev_err(dev, "Request for %d msix vectors failed\n",
789 otx2_cpt_set_hw_caps(pdev, &cptpf->cap_flag);
790 /* Initialize AF-PF mailbox */
791 err = cptpf_afpf_mbox_init(cptpf);
794 /* Register mailbox interrupt */
795 err = cptpf_register_afpf_mbox_intr(cptpf);
797 goto destroy_afpf_mbox;
799 cptpf->max_vfs = pci_sriov_get_totalvfs(pdev);
801 err = cn10k_cptpf_lmtst_init(cptpf);
803 goto unregister_intr;
805 /* Initialize CPT PF device */
806 err = cptpf_device_init(cptpf);
808 goto unregister_intr;
810 /* Initialize engine groups */
811 err = otx2_cpt_init_eng_grps(pdev, &cptpf->eng_grps);
813 goto unregister_intr;
815 err = sysfs_create_group(&dev->kobj, &cptpf_sysfs_group);
817 goto cleanup_eng_grps;
819 err = otx2_cpt_register_dl(cptpf);
826 sysfs_remove_group(&dev->kobj, &cptpf_sysfs_group);
828 otx2_cpt_cleanup_eng_grps(pdev, &cptpf->eng_grps);
830 cptpf_disable_afpf_mbox_intr(cptpf);
832 cptpf_afpf_mbox_destroy(cptpf);
834 pci_set_drvdata(pdev, NULL);
838 static void otx2_cptpf_remove(struct pci_dev *pdev)
840 struct otx2_cptpf_dev *cptpf = pci_get_drvdata(pdev);
845 cptpf_sriov_disable(pdev);
846 otx2_cpt_unregister_dl(cptpf);
847 /* Delete sysfs entry created for kernel VF limits */
848 sysfs_remove_group(&pdev->dev.kobj, &cptpf_sysfs_group);
849 /* Cleanup engine groups */
850 otx2_cpt_cleanup_eng_grps(pdev, &cptpf->eng_grps);
851 /* Disable AF-PF mailbox interrupt */
852 cptpf_disable_afpf_mbox_intr(cptpf);
853 /* Destroy AF-PF mbox */
854 cptpf_afpf_mbox_destroy(cptpf);
855 pci_set_drvdata(pdev, NULL);
858 /* Supported devices */
859 static const struct pci_device_id otx2_cpt_id_table[] = {
860 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OTX2_CPT_PCI_PF_DEVICE_ID) },
861 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, CN10K_CPT_PCI_PF_DEVICE_ID) },
862 { 0, } /* end of table */
865 static struct pci_driver otx2_cpt_pci_driver = {
866 .name = OTX2_CPT_DRV_NAME,
867 .id_table = otx2_cpt_id_table,
868 .probe = otx2_cptpf_probe,
869 .remove = otx2_cptpf_remove,
870 .sriov_configure = otx2_cptpf_sriov_configure
873 module_pci_driver(otx2_cpt_pci_driver);
875 MODULE_IMPORT_NS(CRYPTO_DEV_OCTEONTX2_CPT);
877 MODULE_AUTHOR("Marvell");
878 MODULE_DESCRIPTION(OTX2_CPT_DRV_STRING);
879 MODULE_LICENSE("GPL v2");
880 MODULE_DEVICE_TABLE(pci, otx2_cpt_id_table);