1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2020 Marvell. */
4 #include <linux/firmware.h>
5 #include "otx2_cpt_hw_types.h"
6 #include "otx2_cpt_common.h"
7 #include "otx2_cptpf_ucode.h"
8 #include "otx2_cptpf.h"
12 #define OTX2_CPT_DRV_NAME "rvu_cptpf"
13 #define OTX2_CPT_DRV_STRING "Marvell RVU CPT Physical Function Driver"
15 static void cptpf_enable_vfpf_mbox_intr(struct otx2_cptpf_dev *cptpf,
20 /* Clear any pending interrupts */
21 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
22 RVU_PF_VFPF_MBOX_INTX(0), ~0x0ULL);
23 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
24 RVU_PF_VFPF_MBOX_INTX(1), ~0x0ULL);
26 /* Enable VF interrupts for VFs from 0 to 63 */
27 ena_bits = ((num_vfs - 1) % 64);
28 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
29 RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0),
30 GENMASK_ULL(ena_bits, 0));
33 /* Enable VF interrupts for VFs from 64 to 127 */
34 ena_bits = num_vfs - 64 - 1;
35 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
36 RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1),
37 GENMASK_ULL(ena_bits, 0));
41 static void cptpf_disable_vfpf_mbox_intr(struct otx2_cptpf_dev *cptpf,
46 /* Disable VF-PF interrupts */
47 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
48 RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), ~0ULL);
49 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
50 RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1), ~0ULL);
51 /* Clear any pending interrupts */
52 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
53 RVU_PF_VFPF_MBOX_INTX(0), ~0ULL);
55 vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFPF_MBOX0);
56 free_irq(vector, cptpf);
59 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
60 RVU_PF_VFPF_MBOX_INTX(1), ~0ULL);
61 vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
62 free_irq(vector, cptpf);
66 static void cptpf_enable_vf_flr_me_intrs(struct otx2_cptpf_dev *cptpf,
69 /* Clear FLR interrupt if any */
70 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(0),
73 /* Enable VF FLR interrupts */
74 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
75 RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(num_vfs));
76 /* Clear ME interrupt if any */
77 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFME_INTX(0),
79 /* Enable VF ME interrupts */
80 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
81 RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(num_vfs));
86 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(1),
87 INTR_MASK(num_vfs - 64));
88 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
89 RVU_PF_VFFLR_INT_ENA_W1SX(1), INTR_MASK(num_vfs - 64));
91 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFME_INTX(1),
92 INTR_MASK(num_vfs - 64));
93 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
94 RVU_PF_VFME_INT_ENA_W1SX(1), INTR_MASK(num_vfs - 64));
97 static void cptpf_disable_vf_flr_me_intrs(struct otx2_cptpf_dev *cptpf,
102 /* Disable VF FLR interrupts */
103 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
104 RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(num_vfs));
105 vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFFLR0);
106 free_irq(vector, cptpf);
108 /* Disable VF ME interrupts */
109 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
110 RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(num_vfs));
111 vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFME0);
112 free_irq(vector, cptpf);
117 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
118 RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(num_vfs - 64));
119 vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFFLR1);
120 free_irq(vector, cptpf);
122 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
123 RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(num_vfs - 64));
124 vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFME1);
125 free_irq(vector, cptpf);
128 static void cptpf_flr_wq_handler(struct work_struct *work)
130 struct cptpf_flr_work *flr_work;
131 struct otx2_cptpf_dev *pf;
132 struct mbox_msghdr *req;
133 struct otx2_mbox *mbox;
136 flr_work = container_of(work, struct cptpf_flr_work, work);
138 mbox = &pf->afpf_mbox;
140 vf = flr_work - pf->flr_work;
142 req = otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
143 sizeof(struct msg_rsp));
147 req->sig = OTX2_MBOX_REQ_SIG;
148 req->id = MBOX_MSG_VF_FLR;
149 req->pcifunc &= RVU_PFVF_FUNC_MASK;
150 req->pcifunc |= (vf + 1) & RVU_PFVF_FUNC_MASK;
152 otx2_cpt_send_mbox_msg(mbox, pf->pdev);
158 /* Clear transaction pending register */
159 otx2_cpt_write64(pf->reg_base, BLKADDR_RVUM, 0,
160 RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
161 otx2_cpt_write64(pf->reg_base, BLKADDR_RVUM, 0,
162 RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf));
165 static irqreturn_t cptpf_vf_flr_intr(int __always_unused irq, void *arg)
167 int reg, dev, vf, start_vf, num_reg = 1;
168 struct otx2_cptpf_dev *cptpf = arg;
171 if (cptpf->max_vfs > 64)
174 for (reg = 0; reg < num_reg; reg++) {
175 intr = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
176 RVU_PF_VFFLR_INTX(reg));
180 for (vf = 0; vf < 64; vf++) {
181 if (!(intr & BIT_ULL(vf)))
184 queue_work(cptpf->flr_wq, &cptpf->flr_work[dev].work);
185 /* Clear interrupt */
186 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
187 RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf));
188 /* Disable the interrupt */
189 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
190 RVU_PF_VFFLR_INT_ENA_W1CX(reg),
197 static irqreturn_t cptpf_vf_me_intr(int __always_unused irq, void *arg)
199 struct otx2_cptpf_dev *cptpf = arg;
200 int reg, vf, num_reg = 1;
203 if (cptpf->max_vfs > 64)
206 for (reg = 0; reg < num_reg; reg++) {
207 intr = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
208 RVU_PF_VFME_INTX(reg));
211 for (vf = 0; vf < 64; vf++) {
212 if (!(intr & BIT_ULL(vf)))
214 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
215 RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
216 /* Clear interrupt */
217 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
218 RVU_PF_VFME_INTX(reg), BIT_ULL(vf));
224 static void cptpf_unregister_vfpf_intr(struct otx2_cptpf_dev *cptpf,
227 cptpf_disable_vfpf_mbox_intr(cptpf, num_vfs);
228 cptpf_disable_vf_flr_me_intrs(cptpf, num_vfs);
231 static int cptpf_register_vfpf_intr(struct otx2_cptpf_dev *cptpf, int num_vfs)
233 struct pci_dev *pdev = cptpf->pdev;
234 struct device *dev = &pdev->dev;
237 vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX0);
238 /* Register VF-PF mailbox interrupt handler */
239 ret = request_irq(vector, otx2_cptpf_vfpf_mbox_intr, 0, "CPTVFPF Mbox0",
243 "IRQ registration failed for PFVF mbox0 irq\n");
246 vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR0);
247 /* Register VF FLR interrupt handler */
248 ret = request_irq(vector, cptpf_vf_flr_intr, 0, "CPTPF FLR0", cptpf);
251 "IRQ registration failed for VFFLR0 irq\n");
254 vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFME0);
255 /* Register VF ME interrupt handler */
256 ret = request_irq(vector, cptpf_vf_me_intr, 0, "CPTPF ME0", cptpf);
259 "IRQ registration failed for PFVF mbox0 irq\n");
264 vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
265 ret = request_irq(vector, otx2_cptpf_vfpf_mbox_intr, 0,
266 "CPTVFPF Mbox1", cptpf);
269 "IRQ registration failed for PFVF mbox1 irq\n");
272 vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR1);
273 /* Register VF FLR interrupt handler */
274 ret = request_irq(vector, cptpf_vf_flr_intr, 0, "CPTPF FLR1",
278 "IRQ registration failed for VFFLR1 irq\n");
281 vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFME1);
282 /* Register VF FLR interrupt handler */
283 ret = request_irq(vector, cptpf_vf_me_intr, 0, "CPTPF ME1",
287 "IRQ registration failed for VFFLR1 irq\n");
291 cptpf_enable_vfpf_mbox_intr(cptpf, num_vfs);
292 cptpf_enable_vf_flr_me_intrs(cptpf, num_vfs);
297 vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR1);
298 free_irq(vector, cptpf);
300 vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
301 free_irq(vector, cptpf);
303 vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFME0);
304 free_irq(vector, cptpf);
306 vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR0);
307 free_irq(vector, cptpf);
309 vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX0);
310 free_irq(vector, cptpf);
314 static void cptpf_flr_wq_destroy(struct otx2_cptpf_dev *pf)
318 destroy_workqueue(pf->flr_wq);
323 static int cptpf_flr_wq_init(struct otx2_cptpf_dev *cptpf, int num_vfs)
327 cptpf->flr_wq = alloc_ordered_workqueue("cptpf_flr_wq", 0);
331 cptpf->flr_work = kcalloc(num_vfs, sizeof(struct cptpf_flr_work),
333 if (!cptpf->flr_work)
336 for (vf = 0; vf < num_vfs; vf++) {
337 cptpf->flr_work[vf].pf = cptpf;
338 INIT_WORK(&cptpf->flr_work[vf].work, cptpf_flr_wq_handler);
343 destroy_workqueue(cptpf->flr_wq);
347 static int cptpf_vfpf_mbox_init(struct otx2_cptpf_dev *cptpf, int num_vfs)
349 struct device *dev = &cptpf->pdev->dev;
353 cptpf->vfpf_mbox_wq = alloc_workqueue("cpt_vfpf_mailbox",
354 WQ_UNBOUND | WQ_HIGHPRI |
356 if (!cptpf->vfpf_mbox_wq)
359 /* Map VF-PF mailbox memory */
360 if (test_bit(CN10K_MBOX, &cptpf->cap_flag))
361 vfpf_mbox_base = readq(cptpf->reg_base + RVU_PF_VF_MBOX_ADDR);
363 vfpf_mbox_base = readq(cptpf->reg_base + RVU_PF_VF_BAR4_ADDR);
365 if (!vfpf_mbox_base) {
366 dev_err(dev, "VF-PF mailbox address not configured\n");
370 cptpf->vfpf_mbox_base = devm_ioremap_wc(dev, vfpf_mbox_base,
371 MBOX_SIZE * cptpf->max_vfs);
372 if (!cptpf->vfpf_mbox_base) {
373 dev_err(dev, "Mapping of VF-PF mailbox address failed\n");
377 err = otx2_mbox_init(&cptpf->vfpf_mbox, cptpf->vfpf_mbox_base,
378 cptpf->pdev, cptpf->reg_base, MBOX_DIR_PFVF,
383 for (i = 0; i < num_vfs; i++) {
384 cptpf->vf[i].vf_id = i;
385 cptpf->vf[i].cptpf = cptpf;
386 cptpf->vf[i].intr_idx = i % 64;
387 INIT_WORK(&cptpf->vf[i].vfpf_mbox_work,
388 otx2_cptpf_vfpf_mbox_handler);
393 destroy_workqueue(cptpf->vfpf_mbox_wq);
397 static void cptpf_vfpf_mbox_destroy(struct otx2_cptpf_dev *cptpf)
399 destroy_workqueue(cptpf->vfpf_mbox_wq);
400 otx2_mbox_destroy(&cptpf->vfpf_mbox);
403 static void cptpf_disable_afpf_mbox_intr(struct otx2_cptpf_dev *cptpf)
405 /* Disable AF-PF interrupt */
406 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT_ENA_W1C,
408 /* Clear interrupt if any */
409 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT, 0x1ULL);
412 static int cptpf_register_afpf_mbox_intr(struct otx2_cptpf_dev *cptpf)
414 struct pci_dev *pdev = cptpf->pdev;
415 struct device *dev = &pdev->dev;
418 irq = pci_irq_vector(pdev, RVU_PF_INT_VEC_AFPF_MBOX);
419 /* Register AF-PF mailbox interrupt handler */
420 ret = devm_request_irq(dev, irq, otx2_cptpf_afpf_mbox_intr, 0,
421 "CPTAFPF Mbox", cptpf);
424 "IRQ registration failed for PFAF mbox irq\n");
427 /* Clear interrupt if any, to avoid spurious interrupts */
428 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT, 0x1ULL);
429 /* Enable AF-PF interrupt */
430 otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT_ENA_W1S,
433 ret = otx2_cpt_send_ready_msg(&cptpf->afpf_mbox, cptpf->pdev);
436 "AF not responding to mailbox, deferring probe\n");
437 cptpf_disable_afpf_mbox_intr(cptpf);
438 return -EPROBE_DEFER;
443 static int cptpf_afpf_mbox_init(struct otx2_cptpf_dev *cptpf)
445 struct pci_dev *pdev = cptpf->pdev;
446 resource_size_t offset;
449 cptpf->afpf_mbox_wq = alloc_workqueue("cpt_afpf_mailbox",
450 WQ_UNBOUND | WQ_HIGHPRI |
452 if (!cptpf->afpf_mbox_wq)
455 offset = pci_resource_start(pdev, PCI_MBOX_BAR_NUM);
456 /* Map AF-PF mailbox memory */
457 cptpf->afpf_mbox_base = devm_ioremap_wc(&pdev->dev, offset, MBOX_SIZE);
458 if (!cptpf->afpf_mbox_base) {
459 dev_err(&pdev->dev, "Unable to map BAR4\n");
464 err = otx2_mbox_init(&cptpf->afpf_mbox, cptpf->afpf_mbox_base,
465 pdev, cptpf->reg_base, MBOX_DIR_PFAF, 1);
469 INIT_WORK(&cptpf->afpf_mbox_work, otx2_cptpf_afpf_mbox_handler);
473 destroy_workqueue(cptpf->afpf_mbox_wq);
477 static void cptpf_afpf_mbox_destroy(struct otx2_cptpf_dev *cptpf)
479 destroy_workqueue(cptpf->afpf_mbox_wq);
480 otx2_mbox_destroy(&cptpf->afpf_mbox);
483 static ssize_t kvf_limits_show(struct device *dev,
484 struct device_attribute *attr, char *buf)
486 struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
488 return sprintf(buf, "%d\n", cptpf->kvf_limits);
491 static ssize_t kvf_limits_store(struct device *dev,
492 struct device_attribute *attr,
493 const char *buf, size_t count)
495 struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
499 ret = kstrtoint(buf, 0, &lfs_num);
502 if (lfs_num < 1 || lfs_num > num_online_cpus()) {
503 dev_err(dev, "lfs count %d must be in range [1 - %d]\n",
504 lfs_num, num_online_cpus());
507 cptpf->kvf_limits = lfs_num;
512 static DEVICE_ATTR_RW(kvf_limits);
513 static struct attribute *cptpf_attrs[] = {
514 &dev_attr_kvf_limits.attr,
518 static const struct attribute_group cptpf_sysfs_group = {
519 .attrs = cptpf_attrs,
522 static int cpt_is_pf_usable(struct otx2_cptpf_dev *cptpf)
526 rev = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
527 RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_RVUM));
528 rev = (rev >> 12) & 0xFF;
530 * Check if AF has setup revision for RVUM block, otherwise
531 * driver probe should be deferred until AF driver comes up
534 dev_warn(&cptpf->pdev->dev,
535 "AF is not initialized, deferring probe\n");
536 return -EPROBE_DEFER;
541 static int cptx_device_reset(struct otx2_cptpf_dev *cptpf, int blkaddr)
543 int timeout = 10, ret;
546 ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
547 CPT_AF_BLK_RST, 0x1, blkaddr);
552 ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
553 CPT_AF_BLK_RST, ®, blkaddr);
557 if (!((reg >> 63) & 0x1))
560 usleep_range(10000, 20000);
568 static int cptpf_device_reset(struct otx2_cptpf_dev *cptpf)
572 if (cptpf->has_cpt1) {
573 ret = cptx_device_reset(cptpf, BLKADDR_CPT1);
577 return cptx_device_reset(cptpf, BLKADDR_CPT0);
580 static void cptpf_check_block_implemented(struct otx2_cptpf_dev *cptpf)
584 cfg = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
585 RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_CPT1));
586 if (cfg & BIT_ULL(11))
587 cptpf->has_cpt1 = true;
590 static int cptpf_device_init(struct otx2_cptpf_dev *cptpf)
592 union otx2_cptx_af_constants1 af_cnsts1 = {0};
595 /* check if 'implemented' bit is set for block BLKADDR_CPT1 */
596 cptpf_check_block_implemented(cptpf);
597 /* Reset the CPT PF device */
598 ret = cptpf_device_reset(cptpf);
602 /* Get number of SE, IE and AE engines */
603 ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
604 CPT_AF_CONSTANTS1, &af_cnsts1.u,
609 cptpf->eng_grps.avail.max_se_cnt = af_cnsts1.s.se;
610 cptpf->eng_grps.avail.max_ie_cnt = af_cnsts1.s.ie;
611 cptpf->eng_grps.avail.max_ae_cnt = af_cnsts1.s.ae;
613 /* Disable all cores */
614 ret = otx2_cpt_disable_all_cores(cptpf);
619 static int cptpf_sriov_disable(struct pci_dev *pdev)
621 struct otx2_cptpf_dev *cptpf = pci_get_drvdata(pdev);
622 int num_vfs = pci_num_vf(pdev);
627 pci_disable_sriov(pdev);
628 cptpf_unregister_vfpf_intr(cptpf, num_vfs);
629 cptpf_flr_wq_destroy(cptpf);
630 cptpf_vfpf_mbox_destroy(cptpf);
631 module_put(THIS_MODULE);
632 cptpf->enabled_vfs = 0;
637 static int cptpf_sriov_enable(struct pci_dev *pdev, int num_vfs)
639 struct otx2_cptpf_dev *cptpf = pci_get_drvdata(pdev);
642 /* Initialize VF<=>PF mailbox */
643 ret = cptpf_vfpf_mbox_init(cptpf, num_vfs);
647 ret = cptpf_flr_wq_init(cptpf, num_vfs);
650 /* Register VF<=>PF mailbox interrupt */
651 ret = cptpf_register_vfpf_intr(cptpf, num_vfs);
655 /* Get CPT HW capabilities using LOAD_FVC operation. */
656 ret = otx2_cpt_discover_eng_capabilities(cptpf);
660 ret = otx2_cpt_create_eng_grps(cptpf, &cptpf->eng_grps);
664 cptpf->enabled_vfs = num_vfs;
665 ret = pci_enable_sriov(pdev, num_vfs);
669 dev_notice(&cptpf->pdev->dev, "VFs enabled: %d\n", num_vfs);
671 try_module_get(THIS_MODULE);
675 cptpf_unregister_vfpf_intr(cptpf, num_vfs);
676 cptpf->enabled_vfs = 0;
678 cptpf_flr_wq_destroy(cptpf);
680 cptpf_vfpf_mbox_destroy(cptpf);
684 static int otx2_cptpf_sriov_configure(struct pci_dev *pdev, int num_vfs)
687 return cptpf_sriov_enable(pdev, num_vfs);
689 return cptpf_sriov_disable(pdev);
693 static int otx2_cptpf_probe(struct pci_dev *pdev,
694 const struct pci_device_id *ent)
696 struct device *dev = &pdev->dev;
697 struct otx2_cptpf_dev *cptpf;
700 cptpf = devm_kzalloc(dev, sizeof(*cptpf), GFP_KERNEL);
704 err = pcim_enable_device(pdev);
706 dev_err(dev, "Failed to enable PCI device\n");
710 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
712 dev_err(dev, "Unable to get usable DMA configuration\n");
715 /* Map PF's configuration registers */
716 err = pcim_iomap_regions_request_all(pdev, 1 << PCI_PF_REG_BAR_NUM,
719 dev_err(dev, "Couldn't get PCI resources 0x%x\n", err);
722 pci_set_master(pdev);
723 pci_set_drvdata(pdev, cptpf);
726 cptpf->reg_base = pcim_iomap_table(pdev)[PCI_PF_REG_BAR_NUM];
728 /* Check if AF driver is up, otherwise defer probe */
729 err = cpt_is_pf_usable(cptpf);
733 err = pci_alloc_irq_vectors(pdev, RVU_PF_INT_VEC_CNT,
734 RVU_PF_INT_VEC_CNT, PCI_IRQ_MSIX);
736 dev_err(dev, "Request for %d msix vectors failed\n",
740 otx2_cpt_set_hw_caps(pdev, &cptpf->cap_flag);
741 /* Initialize AF-PF mailbox */
742 err = cptpf_afpf_mbox_init(cptpf);
745 /* Register mailbox interrupt */
746 err = cptpf_register_afpf_mbox_intr(cptpf);
748 goto destroy_afpf_mbox;
750 cptpf->max_vfs = pci_sriov_get_totalvfs(pdev);
752 err = cn10k_cptpf_lmtst_init(cptpf);
754 goto unregister_intr;
756 /* Initialize CPT PF device */
757 err = cptpf_device_init(cptpf);
759 goto unregister_intr;
761 /* Initialize engine groups */
762 err = otx2_cpt_init_eng_grps(pdev, &cptpf->eng_grps);
764 goto unregister_intr;
766 err = sysfs_create_group(&dev->kobj, &cptpf_sysfs_group);
768 goto cleanup_eng_grps;
772 otx2_cpt_cleanup_eng_grps(pdev, &cptpf->eng_grps);
774 cptpf_disable_afpf_mbox_intr(cptpf);
776 cptpf_afpf_mbox_destroy(cptpf);
778 pci_set_drvdata(pdev, NULL);
782 static void otx2_cptpf_remove(struct pci_dev *pdev)
784 struct otx2_cptpf_dev *cptpf = pci_get_drvdata(pdev);
789 cptpf_sriov_disable(pdev);
790 /* Delete sysfs entry created for kernel VF limits */
791 sysfs_remove_group(&pdev->dev.kobj, &cptpf_sysfs_group);
792 /* Cleanup engine groups */
793 otx2_cpt_cleanup_eng_grps(pdev, &cptpf->eng_grps);
794 /* Disable AF-PF mailbox interrupt */
795 cptpf_disable_afpf_mbox_intr(cptpf);
796 /* Destroy AF-PF mbox */
797 cptpf_afpf_mbox_destroy(cptpf);
798 pci_set_drvdata(pdev, NULL);
801 /* Supported devices */
802 static const struct pci_device_id otx2_cpt_id_table[] = {
803 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OTX2_CPT_PCI_PF_DEVICE_ID) },
804 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, CN10K_CPT_PCI_PF_DEVICE_ID) },
805 { 0, } /* end of table */
808 static struct pci_driver otx2_cpt_pci_driver = {
809 .name = OTX2_CPT_DRV_NAME,
810 .id_table = otx2_cpt_id_table,
811 .probe = otx2_cptpf_probe,
812 .remove = otx2_cptpf_remove,
813 .sriov_configure = otx2_cptpf_sriov_configure
816 module_pci_driver(otx2_cpt_pci_driver);
818 MODULE_AUTHOR("Marvell");
819 MODULE_DESCRIPTION(OTX2_CPT_DRV_STRING);
820 MODULE_LICENSE("GPL v2");
821 MODULE_DEVICE_TABLE(pci, otx2_cpt_id_table);