2 * Copyright (C) 2016 Cavium, Inc.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License
6 * as published by the Free Software Foundation.
9 #include <linux/interrupt.h>
10 #include <linux/module.h>
14 #define DRV_NAME "thunder-cptvf"
15 #define DRV_VERSION "1.0"
18 struct tasklet_struct twork;
23 struct cptvf_wqe_info {
24 struct cptvf_wqe vq_wqe[CPT_NUM_QS_PER_VF];
27 static void vq_work_handler(unsigned long data)
29 struct cptvf_wqe_info *cwqe_info = (struct cptvf_wqe_info *)data;
30 struct cptvf_wqe *cwqe = &cwqe_info->vq_wqe[0];
32 vq_post_process(cwqe->cptvf, cwqe->qno);
35 static int init_worker_threads(struct cpt_vf *cptvf)
37 struct pci_dev *pdev = cptvf->pdev;
38 struct cptvf_wqe_info *cwqe_info;
41 cwqe_info = kzalloc(sizeof(*cwqe_info), GFP_KERNEL);
45 if (cptvf->nr_queues) {
46 dev_info(&pdev->dev, "Creating VQ worker threads (%d)\n",
50 for (i = 0; i < cptvf->nr_queues; i++) {
51 tasklet_init(&cwqe_info->vq_wqe[i].twork, vq_work_handler,
53 cwqe_info->vq_wqe[i].qno = i;
54 cwqe_info->vq_wqe[i].cptvf = cptvf;
57 cptvf->wqe_info = cwqe_info;
62 static void cleanup_worker_threads(struct cpt_vf *cptvf)
64 struct cptvf_wqe_info *cwqe_info;
65 struct pci_dev *pdev = cptvf->pdev;
68 cwqe_info = (struct cptvf_wqe_info *)cptvf->wqe_info;
72 if (cptvf->nr_queues) {
73 dev_info(&pdev->dev, "Cleaning VQ worker threads (%u)\n",
77 for (i = 0; i < cptvf->nr_queues; i++)
78 tasklet_kill(&cwqe_info->vq_wqe[i].twork);
81 cptvf->wqe_info = NULL;
84 static void free_pending_queues(struct pending_qinfo *pqinfo)
87 struct pending_queue *queue;
89 for_each_pending_queue(pqinfo, queue, i) {
93 /* free single queue */
94 kzfree((queue->head));
103 pqinfo->nr_queues = 0;
106 static int alloc_pending_queues(struct pending_qinfo *pqinfo, u32 qlen,
112 struct pending_queue *queue = NULL;
114 pqinfo->nr_queues = nr_queues;
117 size = (qlen * sizeof(struct pending_entry));
119 for_each_pending_queue(pqinfo, queue, i) {
120 queue->head = kzalloc((size), GFP_KERNEL);
128 atomic64_set((&queue->pending_count), (0));
130 /* init queue spin lock */
131 spin_lock_init(&queue->lock);
137 free_pending_queues(pqinfo);
142 static int init_pending_queues(struct cpt_vf *cptvf, u32 qlen, u32 nr_queues)
144 struct pci_dev *pdev = cptvf->pdev;
150 ret = alloc_pending_queues(&cptvf->pqinfo, qlen, nr_queues);
152 dev_err(&pdev->dev, "failed to setup pending queues (%u)\n",
160 static void cleanup_pending_queues(struct cpt_vf *cptvf)
162 struct pci_dev *pdev = cptvf->pdev;
164 if (!cptvf->nr_queues)
167 dev_info(&pdev->dev, "Cleaning VQ pending queue (%u)\n",
169 free_pending_queues(&cptvf->pqinfo);
172 static void free_command_queues(struct cpt_vf *cptvf,
173 struct command_qinfo *cqinfo)
176 struct command_queue *queue = NULL;
177 struct command_chunk *chunk = NULL;
178 struct pci_dev *pdev = cptvf->pdev;
179 struct hlist_node *node;
181 /* clean up for each queue */
182 for (i = 0; i < cptvf->nr_queues; i++) {
183 queue = &cqinfo->queue[i];
184 if (hlist_empty(&cqinfo->queue[i].chead))
187 hlist_for_each_entry_safe(chunk, node, &cqinfo->queue[i].chead,
189 dma_free_coherent(&pdev->dev, chunk->size,
194 hlist_del(&chunk->nextchunk);
203 cqinfo->cmd_size = 0;
206 static int alloc_command_queues(struct cpt_vf *cptvf,
207 struct command_qinfo *cqinfo, size_t cmd_size,
212 struct command_queue *queue = NULL;
213 struct pci_dev *pdev = cptvf->pdev;
216 cqinfo->cmd_size = cmd_size;
217 /* Qsize in dwords, needed for SADDR config, 1-next chunk pointer */
218 cptvf->qsize = min(qlen, cqinfo->qchunksize) *
219 CPT_NEXT_CHUNK_PTR_SIZE + 1;
220 /* Qsize in bytes to create space for alignment */
221 q_size = qlen * cqinfo->cmd_size;
223 /* per queue initialization */
224 for (i = 0; i < cptvf->nr_queues; i++) {
226 size_t rem_q_size = q_size;
227 struct command_chunk *curr = NULL, *first = NULL, *last = NULL;
228 u32 qcsize_bytes = cqinfo->qchunksize * cqinfo->cmd_size;
230 queue = &cqinfo->queue[i];
231 INIT_HLIST_HEAD(&cqinfo->queue[i].chead);
233 curr = kzalloc(sizeof(*curr), GFP_KERNEL);
237 c_size = (rem_q_size > qcsize_bytes) ? qcsize_bytes :
239 curr->head = (u8 *)dma_zalloc_coherent(&pdev->dev,
240 c_size + CPT_NEXT_CHUNK_PTR_SIZE,
241 &curr->dma_addr, GFP_KERNEL);
243 dev_err(&pdev->dev, "Command Q (%d) chunk (%d) allocation failed\n",
250 if (queue->nchunks == 0) {
251 hlist_add_head(&curr->nextchunk,
252 &cqinfo->queue[i].chead);
255 hlist_add_behind(&curr->nextchunk,
260 rem_q_size -= c_size;
262 *((u64 *)(&last->head[last->size])) = (u64)curr->dma_addr;
265 } while (rem_q_size);
267 /* Make the queue circular */
268 /* Tie back last chunk entry to head */
270 *((u64 *)(&last->head[last->size])) = (u64)curr->dma_addr;
272 spin_lock_init(&queue->lock);
277 free_command_queues(cptvf, cqinfo);
281 static int init_command_queues(struct cpt_vf *cptvf, u32 qlen)
283 struct pci_dev *pdev = cptvf->pdev;
286 /* setup AE command queues */
287 ret = alloc_command_queues(cptvf, &cptvf->cqinfo, CPT_INST_SIZE,
290 dev_err(&pdev->dev, "failed to allocate AE command queues (%u)\n",
298 static void cleanup_command_queues(struct cpt_vf *cptvf)
300 struct pci_dev *pdev = cptvf->pdev;
302 if (!cptvf->nr_queues)
305 dev_info(&pdev->dev, "Cleaning VQ command queue (%u)\n",
307 free_command_queues(cptvf, &cptvf->cqinfo);
310 static void cptvf_sw_cleanup(struct cpt_vf *cptvf)
312 cleanup_worker_threads(cptvf);
313 cleanup_pending_queues(cptvf);
314 cleanup_command_queues(cptvf);
317 static int cptvf_sw_init(struct cpt_vf *cptvf, u32 qlen, u32 nr_queues)
319 struct pci_dev *pdev = cptvf->pdev;
321 u32 max_dev_queues = 0;
323 max_dev_queues = CPT_NUM_QS_PER_VF;
325 nr_queues = min_t(u32, nr_queues, max_dev_queues);
326 cptvf->nr_queues = nr_queues;
328 ret = init_command_queues(cptvf, qlen);
330 dev_err(&pdev->dev, "Failed to setup command queues (%u)\n",
335 ret = init_pending_queues(cptvf, qlen, nr_queues);
337 dev_err(&pdev->dev, "Failed to setup pending queues (%u)\n",
342 /* Create worker threads for BH processing */
343 ret = init_worker_threads(cptvf);
345 dev_err(&pdev->dev, "Failed to setup worker threads\n");
352 cleanup_worker_threads(cptvf);
353 cleanup_pending_queues(cptvf);
356 cleanup_command_queues(cptvf);
361 static void cptvf_free_irq_affinity(struct cpt_vf *cptvf, int vec)
363 irq_set_affinity_hint(pci_irq_vector(cptvf->pdev, vec), NULL);
364 free_cpumask_var(cptvf->affinity_mask[vec]);
367 static void cptvf_write_vq_ctl(struct cpt_vf *cptvf, bool val)
369 union cptx_vqx_ctl vqx_ctl;
371 vqx_ctl.u = cpt_read_csr64(cptvf->reg_base, CPTX_VQX_CTL(0, 0));
373 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_CTL(0, 0), vqx_ctl.u);
376 void cptvf_write_vq_doorbell(struct cpt_vf *cptvf, u32 val)
378 union cptx_vqx_doorbell vqx_dbell;
380 vqx_dbell.u = cpt_read_csr64(cptvf->reg_base,
381 CPTX_VQX_DOORBELL(0, 0));
382 vqx_dbell.s.dbell_cnt = val * 8; /* Num of Instructions * 8 words */
383 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_DOORBELL(0, 0),
387 static void cptvf_write_vq_inprog(struct cpt_vf *cptvf, u8 val)
389 union cptx_vqx_inprog vqx_inprg;
391 vqx_inprg.u = cpt_read_csr64(cptvf->reg_base, CPTX_VQX_INPROG(0, 0));
392 vqx_inprg.s.inflight = val;
393 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_INPROG(0, 0), vqx_inprg.u);
396 static void cptvf_write_vq_done_numwait(struct cpt_vf *cptvf, u32 val)
398 union cptx_vqx_done_wait vqx_dwait;
400 vqx_dwait.u = cpt_read_csr64(cptvf->reg_base,
401 CPTX_VQX_DONE_WAIT(0, 0));
402 vqx_dwait.s.num_wait = val;
403 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_DONE_WAIT(0, 0),
407 static void cptvf_write_vq_done_timewait(struct cpt_vf *cptvf, u16 time)
409 union cptx_vqx_done_wait vqx_dwait;
411 vqx_dwait.u = cpt_read_csr64(cptvf->reg_base,
412 CPTX_VQX_DONE_WAIT(0, 0));
413 vqx_dwait.s.time_wait = time;
414 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_DONE_WAIT(0, 0),
418 static void cptvf_enable_swerr_interrupts(struct cpt_vf *cptvf)
420 union cptx_vqx_misc_ena_w1s vqx_misc_ena;
422 vqx_misc_ena.u = cpt_read_csr64(cptvf->reg_base,
423 CPTX_VQX_MISC_ENA_W1S(0, 0));
424 /* Set mbox(0) interupts for the requested vf */
425 vqx_misc_ena.s.swerr = 1;
426 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_ENA_W1S(0, 0),
430 static void cptvf_enable_mbox_interrupts(struct cpt_vf *cptvf)
432 union cptx_vqx_misc_ena_w1s vqx_misc_ena;
434 vqx_misc_ena.u = cpt_read_csr64(cptvf->reg_base,
435 CPTX_VQX_MISC_ENA_W1S(0, 0));
436 /* Set mbox(0) interupts for the requested vf */
437 vqx_misc_ena.s.mbox = 1;
438 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_ENA_W1S(0, 0),
442 static void cptvf_enable_done_interrupts(struct cpt_vf *cptvf)
444 union cptx_vqx_done_ena_w1s vqx_done_ena;
446 vqx_done_ena.u = cpt_read_csr64(cptvf->reg_base,
447 CPTX_VQX_DONE_ENA_W1S(0, 0));
448 /* Set DONE interrupt for the requested vf */
449 vqx_done_ena.s.done = 1;
450 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_DONE_ENA_W1S(0, 0),
454 static void cptvf_clear_dovf_intr(struct cpt_vf *cptvf)
456 union cptx_vqx_misc_int vqx_misc_int;
458 vqx_misc_int.u = cpt_read_csr64(cptvf->reg_base,
459 CPTX_VQX_MISC_INT(0, 0));
461 vqx_misc_int.s.dovf = 1;
462 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_INT(0, 0),
466 static void cptvf_clear_irde_intr(struct cpt_vf *cptvf)
468 union cptx_vqx_misc_int vqx_misc_int;
470 vqx_misc_int.u = cpt_read_csr64(cptvf->reg_base,
471 CPTX_VQX_MISC_INT(0, 0));
473 vqx_misc_int.s.irde = 1;
474 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_INT(0, 0),
478 static void cptvf_clear_nwrp_intr(struct cpt_vf *cptvf)
480 union cptx_vqx_misc_int vqx_misc_int;
482 vqx_misc_int.u = cpt_read_csr64(cptvf->reg_base,
483 CPTX_VQX_MISC_INT(0, 0));
485 vqx_misc_int.s.nwrp = 1;
486 cpt_write_csr64(cptvf->reg_base,
487 CPTX_VQX_MISC_INT(0, 0), vqx_misc_int.u);
490 static void cptvf_clear_mbox_intr(struct cpt_vf *cptvf)
492 union cptx_vqx_misc_int vqx_misc_int;
494 vqx_misc_int.u = cpt_read_csr64(cptvf->reg_base,
495 CPTX_VQX_MISC_INT(0, 0));
497 vqx_misc_int.s.mbox = 1;
498 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_INT(0, 0),
502 static void cptvf_clear_swerr_intr(struct cpt_vf *cptvf)
504 union cptx_vqx_misc_int vqx_misc_int;
506 vqx_misc_int.u = cpt_read_csr64(cptvf->reg_base,
507 CPTX_VQX_MISC_INT(0, 0));
509 vqx_misc_int.s.swerr = 1;
510 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_INT(0, 0),
514 static u64 cptvf_read_vf_misc_intr_status(struct cpt_vf *cptvf)
516 return cpt_read_csr64(cptvf->reg_base, CPTX_VQX_MISC_INT(0, 0));
519 static irqreturn_t cptvf_misc_intr_handler(int irq, void *cptvf_irq)
521 struct cpt_vf *cptvf = (struct cpt_vf *)cptvf_irq;
522 struct pci_dev *pdev = cptvf->pdev;
525 intr = cptvf_read_vf_misc_intr_status(cptvf);
526 /*Check for MISC interrupt types*/
527 if (likely(intr & CPT_VF_INTR_MBOX_MASK)) {
528 dev_dbg(&pdev->dev, "Mailbox interrupt 0x%llx on CPT VF %d\n",
530 cptvf_handle_mbox_intr(cptvf);
531 cptvf_clear_mbox_intr(cptvf);
532 } else if (unlikely(intr & CPT_VF_INTR_DOVF_MASK)) {
533 cptvf_clear_dovf_intr(cptvf);
534 /*Clear doorbell count*/
535 cptvf_write_vq_doorbell(cptvf, 0);
536 dev_err(&pdev->dev, "Doorbell overflow error interrupt 0x%llx on CPT VF %d\n",
538 } else if (unlikely(intr & CPT_VF_INTR_IRDE_MASK)) {
539 cptvf_clear_irde_intr(cptvf);
540 dev_err(&pdev->dev, "Instruction NCB read error interrupt 0x%llx on CPT VF %d\n",
542 } else if (unlikely(intr & CPT_VF_INTR_NWRP_MASK)) {
543 cptvf_clear_nwrp_intr(cptvf);
544 dev_err(&pdev->dev, "NCB response write error interrupt 0x%llx on CPT VF %d\n",
546 } else if (unlikely(intr & CPT_VF_INTR_SERR_MASK)) {
547 cptvf_clear_swerr_intr(cptvf);
548 dev_err(&pdev->dev, "Software error interrupt 0x%llx on CPT VF %d\n",
551 dev_err(&pdev->dev, "Unhandled interrupt in CPT VF %d\n",
558 static inline struct cptvf_wqe *get_cptvf_vq_wqe(struct cpt_vf *cptvf,
561 struct cptvf_wqe_info *nwqe_info;
563 if (unlikely(qno >= cptvf->nr_queues))
565 nwqe_info = (struct cptvf_wqe_info *)cptvf->wqe_info;
567 return &nwqe_info->vq_wqe[qno];
570 static inline u32 cptvf_read_vq_done_count(struct cpt_vf *cptvf)
572 union cptx_vqx_done vqx_done;
574 vqx_done.u = cpt_read_csr64(cptvf->reg_base, CPTX_VQX_DONE(0, 0));
575 return vqx_done.s.done;
578 static inline void cptvf_write_vq_done_ack(struct cpt_vf *cptvf,
581 union cptx_vqx_done_ack vqx_dack_cnt;
583 vqx_dack_cnt.u = cpt_read_csr64(cptvf->reg_base,
584 CPTX_VQX_DONE_ACK(0, 0));
585 vqx_dack_cnt.s.done_ack = ackcnt;
586 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_DONE_ACK(0, 0),
590 static irqreturn_t cptvf_done_intr_handler(int irq, void *cptvf_irq)
592 struct cpt_vf *cptvf = (struct cpt_vf *)cptvf_irq;
593 struct pci_dev *pdev = cptvf->pdev;
594 /* Read the number of completions */
595 u32 intr = cptvf_read_vq_done_count(cptvf);
598 struct cptvf_wqe *wqe;
600 /* Acknowledge the number of
601 * scheduled completions for processing
603 cptvf_write_vq_done_ack(cptvf, intr);
604 wqe = get_cptvf_vq_wqe(cptvf, 0);
605 if (unlikely(!wqe)) {
606 dev_err(&pdev->dev, "No work to schedule for VF (%d)",
610 tasklet_hi_schedule(&wqe->twork);
616 static void cptvf_set_irq_affinity(struct cpt_vf *cptvf, int vec)
618 struct pci_dev *pdev = cptvf->pdev;
621 if (!zalloc_cpumask_var(&cptvf->affinity_mask[vec],
623 dev_err(&pdev->dev, "Allocation failed for affinity_mask for VF %d",
628 cpu = cptvf->vfid % num_online_cpus();
629 cpumask_set_cpu(cpumask_local_spread(cpu, cptvf->node),
630 cptvf->affinity_mask[vec]);
631 irq_set_affinity_hint(pci_irq_vector(pdev, vec),
632 cptvf->affinity_mask[vec]);
635 static void cptvf_write_vq_saddr(struct cpt_vf *cptvf, u64 val)
637 union cptx_vqx_saddr vqx_saddr;
640 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_SADDR(0, 0), vqx_saddr.u);
643 void cptvf_device_init(struct cpt_vf *cptvf)
648 cptvf_write_vq_ctl(cptvf, 0);
649 /* Reset the doorbell */
650 cptvf_write_vq_doorbell(cptvf, 0);
652 cptvf_write_vq_inprog(cptvf, 0);
654 /* TODO: for now only one queue, so hard coded */
655 base_addr = (u64)(cptvf->cqinfo.queue[0].qhead->dma_addr);
656 cptvf_write_vq_saddr(cptvf, base_addr);
657 /* Configure timerhold / coalescence */
658 cptvf_write_vq_done_timewait(cptvf, CPT_TIMER_THOLD);
659 cptvf_write_vq_done_numwait(cptvf, 1);
661 cptvf_write_vq_ctl(cptvf, 1);
662 /* Flag the VF ready */
663 cptvf->flags |= CPT_FLAG_DEVICE_READY;
666 static int cptvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
668 struct device *dev = &pdev->dev;
669 struct cpt_vf *cptvf;
672 cptvf = devm_kzalloc(dev, sizeof(*cptvf), GFP_KERNEL);
676 pci_set_drvdata(pdev, cptvf);
678 err = pci_enable_device(pdev);
680 dev_err(dev, "Failed to enable PCI device\n");
681 pci_set_drvdata(pdev, NULL);
685 err = pci_request_regions(pdev, DRV_NAME);
687 dev_err(dev, "PCI request regions failed 0x%x\n", err);
688 goto cptvf_err_disable_device;
690 /* Mark as VF driver */
691 cptvf->flags |= CPT_FLAG_VF_DRIVER;
692 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
694 dev_err(dev, "Unable to get usable DMA configuration\n");
695 goto cptvf_err_release_regions;
698 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
700 dev_err(dev, "Unable to get 48-bit DMA for consistent allocations\n");
701 goto cptvf_err_release_regions;
704 /* MAP PF's configuration registers */
705 cptvf->reg_base = pcim_iomap(pdev, 0, 0);
706 if (!cptvf->reg_base) {
707 dev_err(dev, "Cannot map config register space, aborting\n");
709 goto cptvf_err_release_regions;
712 cptvf->node = dev_to_node(&pdev->dev);
713 err = pci_alloc_irq_vectors(pdev, CPT_VF_MSIX_VECTORS,
714 CPT_VF_MSIX_VECTORS, PCI_IRQ_MSIX);
716 dev_err(dev, "Request for #%d msix vectors failed\n",
717 CPT_VF_MSIX_VECTORS);
718 goto cptvf_err_release_regions;
721 err = request_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_MISC),
722 cptvf_misc_intr_handler, 0, "CPT VF misc intr",
725 dev_err(dev, "Request misc irq failed");
726 goto cptvf_free_vectors;
729 /* Enable mailbox interrupt */
730 cptvf_enable_mbox_interrupts(cptvf);
731 cptvf_enable_swerr_interrupts(cptvf);
733 /* Check ready with PF */
734 /* Gets chip ID / device Id from PF if ready */
735 err = cptvf_check_pf_ready(cptvf);
737 dev_err(dev, "PF not responding to READY msg");
738 goto cptvf_free_misc_irq;
741 /* CPT VF software resources initialization */
742 cptvf->cqinfo.qchunksize = CPT_CMD_QCHUNK_SIZE;
743 err = cptvf_sw_init(cptvf, CPT_CMD_QLEN, CPT_NUM_QS_PER_VF);
745 dev_err(dev, "cptvf_sw_init() failed");
746 goto cptvf_free_misc_irq;
748 /* Convey VQ LEN to PF */
749 err = cptvf_send_vq_size_msg(cptvf);
751 dev_err(dev, "PF not responding to QLEN msg");
752 goto cptvf_free_misc_irq;
755 /* CPT VF device initialization */
756 cptvf_device_init(cptvf);
757 /* Send msg to PF to assign currnet Q to required group */
759 err = cptvf_send_vf_to_grp_msg(cptvf);
761 dev_err(dev, "PF not responding to VF_GRP msg");
762 goto cptvf_free_misc_irq;
766 err = cptvf_send_vf_priority_msg(cptvf);
768 dev_err(dev, "PF not responding to VF_PRIO msg");
769 goto cptvf_free_misc_irq;
772 err = request_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_DONE),
773 cptvf_done_intr_handler, 0, "CPT VF done intr",
776 dev_err(dev, "Request done irq failed\n");
777 goto cptvf_free_misc_irq;
780 /* Enable mailbox interrupt */
781 cptvf_enable_done_interrupts(cptvf);
783 /* Set irq affinity masks */
784 cptvf_set_irq_affinity(cptvf, CPT_VF_INT_VEC_E_MISC);
785 cptvf_set_irq_affinity(cptvf, CPT_VF_INT_VEC_E_DONE);
787 err = cptvf_send_vf_up(cptvf);
789 dev_err(dev, "PF not responding to UP msg");
790 goto cptvf_free_irq_affinity;
792 err = cvm_crypto_init(cptvf);
794 dev_err(dev, "Algorithm register failed\n");
795 goto cptvf_free_irq_affinity;
799 cptvf_free_irq_affinity:
800 cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_DONE);
801 cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_MISC);
803 free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_MISC), cptvf);
805 pci_free_irq_vectors(cptvf->pdev);
806 cptvf_err_release_regions:
807 pci_release_regions(pdev);
808 cptvf_err_disable_device:
809 pci_disable_device(pdev);
810 pci_set_drvdata(pdev, NULL);
815 static void cptvf_remove(struct pci_dev *pdev)
817 struct cpt_vf *cptvf = pci_get_drvdata(pdev);
820 dev_err(&pdev->dev, "Invalid CPT-VF device\n");
824 /* Convey DOWN to PF */
825 if (cptvf_send_vf_down(cptvf)) {
826 dev_err(&pdev->dev, "PF not responding to DOWN msg");
828 cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_DONE);
829 cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_MISC);
830 free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_DONE), cptvf);
831 free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_MISC), cptvf);
832 pci_free_irq_vectors(cptvf->pdev);
833 cptvf_sw_cleanup(cptvf);
834 pci_set_drvdata(pdev, NULL);
835 pci_release_regions(pdev);
836 pci_disable_device(pdev);
841 static void cptvf_shutdown(struct pci_dev *pdev)
846 /* Supported devices */
847 static const struct pci_device_id cptvf_id_table[] = {
848 {PCI_VDEVICE(CAVIUM, CPT_81XX_PCI_VF_DEVICE_ID), 0},
849 { 0, } /* end of table */
852 static struct pci_driver cptvf_pci_driver = {
854 .id_table = cptvf_id_table,
855 .probe = cptvf_probe,
856 .remove = cptvf_remove,
857 .shutdown = cptvf_shutdown,
860 module_pci_driver(cptvf_pci_driver);
862 MODULE_AUTHOR("George Cherian <george.cherian@cavium.com>");
863 MODULE_DESCRIPTION("Cavium Thunder CPT Virtual Function Driver");
864 MODULE_LICENSE("GPL v2");
865 MODULE_VERSION(DRV_VERSION);
866 MODULE_DEVICE_TABLE(pci, cptvf_id_table);