1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2016 Cavium, Inc.
6 #include <linux/interrupt.h>
7 #include <linux/module.h>
11 #define DRV_NAME "thunder-cptvf"
12 #define DRV_VERSION "1.0"
15 struct tasklet_struct twork;
20 struct cptvf_wqe_info {
21 struct cptvf_wqe vq_wqe[CPT_NUM_QS_PER_VF];
24 static void vq_work_handler(unsigned long data)
26 struct cptvf_wqe_info *cwqe_info = (struct cptvf_wqe_info *)data;
27 struct cptvf_wqe *cwqe = &cwqe_info->vq_wqe[0];
29 vq_post_process(cwqe->cptvf, cwqe->qno);
32 static int init_worker_threads(struct cpt_vf *cptvf)
34 struct pci_dev *pdev = cptvf->pdev;
35 struct cptvf_wqe_info *cwqe_info;
38 cwqe_info = kzalloc(sizeof(*cwqe_info), GFP_KERNEL);
42 if (cptvf->nr_queues) {
43 dev_info(&pdev->dev, "Creating VQ worker threads (%d)\n",
47 for (i = 0; i < cptvf->nr_queues; i++) {
48 tasklet_init(&cwqe_info->vq_wqe[i].twork, vq_work_handler,
50 cwqe_info->vq_wqe[i].qno = i;
51 cwqe_info->vq_wqe[i].cptvf = cptvf;
54 cptvf->wqe_info = cwqe_info;
59 static void cleanup_worker_threads(struct cpt_vf *cptvf)
61 struct cptvf_wqe_info *cwqe_info;
62 struct pci_dev *pdev = cptvf->pdev;
65 cwqe_info = (struct cptvf_wqe_info *)cptvf->wqe_info;
69 if (cptvf->nr_queues) {
70 dev_info(&pdev->dev, "Cleaning VQ worker threads (%u)\n",
74 for (i = 0; i < cptvf->nr_queues; i++)
75 tasklet_kill(&cwqe_info->vq_wqe[i].twork);
77 kfree_sensitive(cwqe_info);
78 cptvf->wqe_info = NULL;
81 static void free_pending_queues(struct pending_qinfo *pqinfo)
84 struct pending_queue *queue;
86 for_each_pending_queue(pqinfo, queue, i) {
90 /* free single queue */
91 kfree_sensitive((queue->head));
100 pqinfo->nr_queues = 0;
103 static int alloc_pending_queues(struct pending_qinfo *pqinfo, u32 qlen,
109 struct pending_queue *queue = NULL;
111 pqinfo->nr_queues = nr_queues;
114 size = (qlen * sizeof(struct pending_entry));
116 for_each_pending_queue(pqinfo, queue, i) {
117 queue->head = kzalloc((size), GFP_KERNEL);
125 atomic64_set((&queue->pending_count), (0));
127 /* init queue spin lock */
128 spin_lock_init(&queue->lock);
134 free_pending_queues(pqinfo);
139 static int init_pending_queues(struct cpt_vf *cptvf, u32 qlen, u32 nr_queues)
141 struct pci_dev *pdev = cptvf->pdev;
147 ret = alloc_pending_queues(&cptvf->pqinfo, qlen, nr_queues);
149 dev_err(&pdev->dev, "failed to setup pending queues (%u)\n",
157 static void cleanup_pending_queues(struct cpt_vf *cptvf)
159 struct pci_dev *pdev = cptvf->pdev;
161 if (!cptvf->nr_queues)
164 dev_info(&pdev->dev, "Cleaning VQ pending queue (%u)\n",
166 free_pending_queues(&cptvf->pqinfo);
169 static void free_command_queues(struct cpt_vf *cptvf,
170 struct command_qinfo *cqinfo)
173 struct command_queue *queue = NULL;
174 struct command_chunk *chunk = NULL;
175 struct pci_dev *pdev = cptvf->pdev;
176 struct hlist_node *node;
178 /* clean up for each queue */
179 for (i = 0; i < cptvf->nr_queues; i++) {
180 queue = &cqinfo->queue[i];
181 if (hlist_empty(&cqinfo->queue[i].chead))
184 hlist_for_each_entry_safe(chunk, node, &cqinfo->queue[i].chead,
186 dma_free_coherent(&pdev->dev, chunk->size,
191 hlist_del(&chunk->nextchunk);
192 kfree_sensitive(chunk);
200 cqinfo->cmd_size = 0;
203 static int alloc_command_queues(struct cpt_vf *cptvf,
204 struct command_qinfo *cqinfo, size_t cmd_size,
209 struct command_queue *queue = NULL;
210 struct pci_dev *pdev = cptvf->pdev;
213 cqinfo->cmd_size = cmd_size;
214 /* Qsize in dwords, needed for SADDR config, 1-next chunk pointer */
215 cptvf->qsize = min(qlen, cqinfo->qchunksize) *
216 CPT_NEXT_CHUNK_PTR_SIZE + 1;
217 /* Qsize in bytes to create space for alignment */
218 q_size = qlen * cqinfo->cmd_size;
220 /* per queue initialization */
221 for (i = 0; i < cptvf->nr_queues; i++) {
223 size_t rem_q_size = q_size;
224 struct command_chunk *curr = NULL, *first = NULL, *last = NULL;
225 u32 qcsize_bytes = cqinfo->qchunksize * cqinfo->cmd_size;
227 queue = &cqinfo->queue[i];
228 INIT_HLIST_HEAD(&cqinfo->queue[i].chead);
230 curr = kzalloc(sizeof(*curr), GFP_KERNEL);
234 c_size = (rem_q_size > qcsize_bytes) ? qcsize_bytes :
236 curr->head = dma_alloc_coherent(&pdev->dev,
237 c_size + CPT_NEXT_CHUNK_PTR_SIZE,
241 dev_err(&pdev->dev, "Command Q (%d) chunk (%d) allocation failed\n",
248 if (queue->nchunks == 0) {
249 hlist_add_head(&curr->nextchunk,
250 &cqinfo->queue[i].chead);
253 hlist_add_behind(&curr->nextchunk,
258 rem_q_size -= c_size;
260 *((u64 *)(&last->head[last->size])) = (u64)curr->dma_addr;
263 } while (rem_q_size);
265 /* Make the queue circular */
266 /* Tie back last chunk entry to head */
268 *((u64 *)(&last->head[last->size])) = (u64)curr->dma_addr;
270 spin_lock_init(&queue->lock);
275 free_command_queues(cptvf, cqinfo);
279 static int init_command_queues(struct cpt_vf *cptvf, u32 qlen)
281 struct pci_dev *pdev = cptvf->pdev;
284 /* setup AE command queues */
285 ret = alloc_command_queues(cptvf, &cptvf->cqinfo, CPT_INST_SIZE,
288 dev_err(&pdev->dev, "failed to allocate AE command queues (%u)\n",
296 static void cleanup_command_queues(struct cpt_vf *cptvf)
298 struct pci_dev *pdev = cptvf->pdev;
300 if (!cptvf->nr_queues)
303 dev_info(&pdev->dev, "Cleaning VQ command queue (%u)\n",
305 free_command_queues(cptvf, &cptvf->cqinfo);
308 static void cptvf_sw_cleanup(struct cpt_vf *cptvf)
310 cleanup_worker_threads(cptvf);
311 cleanup_pending_queues(cptvf);
312 cleanup_command_queues(cptvf);
315 static int cptvf_sw_init(struct cpt_vf *cptvf, u32 qlen, u32 nr_queues)
317 struct pci_dev *pdev = cptvf->pdev;
319 u32 max_dev_queues = 0;
321 max_dev_queues = CPT_NUM_QS_PER_VF;
323 nr_queues = min_t(u32, nr_queues, max_dev_queues);
324 cptvf->nr_queues = nr_queues;
326 ret = init_command_queues(cptvf, qlen);
328 dev_err(&pdev->dev, "Failed to setup command queues (%u)\n",
333 ret = init_pending_queues(cptvf, qlen, nr_queues);
335 dev_err(&pdev->dev, "Failed to setup pending queues (%u)\n",
340 /* Create worker threads for BH processing */
341 ret = init_worker_threads(cptvf);
343 dev_err(&pdev->dev, "Failed to setup worker threads\n");
350 cleanup_worker_threads(cptvf);
351 cleanup_pending_queues(cptvf);
354 cleanup_command_queues(cptvf);
359 static void cptvf_free_irq_affinity(struct cpt_vf *cptvf, int vec)
361 irq_set_affinity_hint(pci_irq_vector(cptvf->pdev, vec), NULL);
362 free_cpumask_var(cptvf->affinity_mask[vec]);
365 static void cptvf_write_vq_ctl(struct cpt_vf *cptvf, bool val)
367 union cptx_vqx_ctl vqx_ctl;
369 vqx_ctl.u = cpt_read_csr64(cptvf->reg_base, CPTX_VQX_CTL(0, 0));
371 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_CTL(0, 0), vqx_ctl.u);
374 void cptvf_write_vq_doorbell(struct cpt_vf *cptvf, u32 val)
376 union cptx_vqx_doorbell vqx_dbell;
378 vqx_dbell.u = cpt_read_csr64(cptvf->reg_base,
379 CPTX_VQX_DOORBELL(0, 0));
380 vqx_dbell.s.dbell_cnt = val * 8; /* Num of Instructions * 8 words */
381 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_DOORBELL(0, 0),
385 static void cptvf_write_vq_inprog(struct cpt_vf *cptvf, u8 val)
387 union cptx_vqx_inprog vqx_inprg;
389 vqx_inprg.u = cpt_read_csr64(cptvf->reg_base, CPTX_VQX_INPROG(0, 0));
390 vqx_inprg.s.inflight = val;
391 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_INPROG(0, 0), vqx_inprg.u);
394 static void cptvf_write_vq_done_numwait(struct cpt_vf *cptvf, u32 val)
396 union cptx_vqx_done_wait vqx_dwait;
398 vqx_dwait.u = cpt_read_csr64(cptvf->reg_base,
399 CPTX_VQX_DONE_WAIT(0, 0));
400 vqx_dwait.s.num_wait = val;
401 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_DONE_WAIT(0, 0),
405 static void cptvf_write_vq_done_timewait(struct cpt_vf *cptvf, u16 time)
407 union cptx_vqx_done_wait vqx_dwait;
409 vqx_dwait.u = cpt_read_csr64(cptvf->reg_base,
410 CPTX_VQX_DONE_WAIT(0, 0));
411 vqx_dwait.s.time_wait = time;
412 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_DONE_WAIT(0, 0),
416 static void cptvf_enable_swerr_interrupts(struct cpt_vf *cptvf)
418 union cptx_vqx_misc_ena_w1s vqx_misc_ena;
420 vqx_misc_ena.u = cpt_read_csr64(cptvf->reg_base,
421 CPTX_VQX_MISC_ENA_W1S(0, 0));
422 /* Set mbox(0) interupts for the requested vf */
423 vqx_misc_ena.s.swerr = 1;
424 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_ENA_W1S(0, 0),
428 static void cptvf_enable_mbox_interrupts(struct cpt_vf *cptvf)
430 union cptx_vqx_misc_ena_w1s vqx_misc_ena;
432 vqx_misc_ena.u = cpt_read_csr64(cptvf->reg_base,
433 CPTX_VQX_MISC_ENA_W1S(0, 0));
434 /* Set mbox(0) interupts for the requested vf */
435 vqx_misc_ena.s.mbox = 1;
436 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_ENA_W1S(0, 0),
440 static void cptvf_enable_done_interrupts(struct cpt_vf *cptvf)
442 union cptx_vqx_done_ena_w1s vqx_done_ena;
444 vqx_done_ena.u = cpt_read_csr64(cptvf->reg_base,
445 CPTX_VQX_DONE_ENA_W1S(0, 0));
446 /* Set DONE interrupt for the requested vf */
447 vqx_done_ena.s.done = 1;
448 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_DONE_ENA_W1S(0, 0),
452 static void cptvf_clear_dovf_intr(struct cpt_vf *cptvf)
454 union cptx_vqx_misc_int vqx_misc_int;
456 vqx_misc_int.u = cpt_read_csr64(cptvf->reg_base,
457 CPTX_VQX_MISC_INT(0, 0));
459 vqx_misc_int.s.dovf = 1;
460 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_INT(0, 0),
464 static void cptvf_clear_irde_intr(struct cpt_vf *cptvf)
466 union cptx_vqx_misc_int vqx_misc_int;
468 vqx_misc_int.u = cpt_read_csr64(cptvf->reg_base,
469 CPTX_VQX_MISC_INT(0, 0));
471 vqx_misc_int.s.irde = 1;
472 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_INT(0, 0),
476 static void cptvf_clear_nwrp_intr(struct cpt_vf *cptvf)
478 union cptx_vqx_misc_int vqx_misc_int;
480 vqx_misc_int.u = cpt_read_csr64(cptvf->reg_base,
481 CPTX_VQX_MISC_INT(0, 0));
483 vqx_misc_int.s.nwrp = 1;
484 cpt_write_csr64(cptvf->reg_base,
485 CPTX_VQX_MISC_INT(0, 0), vqx_misc_int.u);
488 static void cptvf_clear_mbox_intr(struct cpt_vf *cptvf)
490 union cptx_vqx_misc_int vqx_misc_int;
492 vqx_misc_int.u = cpt_read_csr64(cptvf->reg_base,
493 CPTX_VQX_MISC_INT(0, 0));
495 vqx_misc_int.s.mbox = 1;
496 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_INT(0, 0),
500 static void cptvf_clear_swerr_intr(struct cpt_vf *cptvf)
502 union cptx_vqx_misc_int vqx_misc_int;
504 vqx_misc_int.u = cpt_read_csr64(cptvf->reg_base,
505 CPTX_VQX_MISC_INT(0, 0));
507 vqx_misc_int.s.swerr = 1;
508 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_INT(0, 0),
512 static u64 cptvf_read_vf_misc_intr_status(struct cpt_vf *cptvf)
514 return cpt_read_csr64(cptvf->reg_base, CPTX_VQX_MISC_INT(0, 0));
517 static irqreturn_t cptvf_misc_intr_handler(int irq, void *cptvf_irq)
519 struct cpt_vf *cptvf = (struct cpt_vf *)cptvf_irq;
520 struct pci_dev *pdev = cptvf->pdev;
523 intr = cptvf_read_vf_misc_intr_status(cptvf);
524 /*Check for MISC interrupt types*/
525 if (likely(intr & CPT_VF_INTR_MBOX_MASK)) {
526 dev_dbg(&pdev->dev, "Mailbox interrupt 0x%llx on CPT VF %d\n",
528 cptvf_handle_mbox_intr(cptvf);
529 cptvf_clear_mbox_intr(cptvf);
530 } else if (unlikely(intr & CPT_VF_INTR_DOVF_MASK)) {
531 cptvf_clear_dovf_intr(cptvf);
532 /*Clear doorbell count*/
533 cptvf_write_vq_doorbell(cptvf, 0);
534 dev_err(&pdev->dev, "Doorbell overflow error interrupt 0x%llx on CPT VF %d\n",
536 } else if (unlikely(intr & CPT_VF_INTR_IRDE_MASK)) {
537 cptvf_clear_irde_intr(cptvf);
538 dev_err(&pdev->dev, "Instruction NCB read error interrupt 0x%llx on CPT VF %d\n",
540 } else if (unlikely(intr & CPT_VF_INTR_NWRP_MASK)) {
541 cptvf_clear_nwrp_intr(cptvf);
542 dev_err(&pdev->dev, "NCB response write error interrupt 0x%llx on CPT VF %d\n",
544 } else if (unlikely(intr & CPT_VF_INTR_SERR_MASK)) {
545 cptvf_clear_swerr_intr(cptvf);
546 dev_err(&pdev->dev, "Software error interrupt 0x%llx on CPT VF %d\n",
549 dev_err(&pdev->dev, "Unhandled interrupt in CPT VF %d\n",
556 static inline struct cptvf_wqe *get_cptvf_vq_wqe(struct cpt_vf *cptvf,
559 struct cptvf_wqe_info *nwqe_info;
561 if (unlikely(qno >= cptvf->nr_queues))
563 nwqe_info = (struct cptvf_wqe_info *)cptvf->wqe_info;
565 return &nwqe_info->vq_wqe[qno];
568 static inline u32 cptvf_read_vq_done_count(struct cpt_vf *cptvf)
570 union cptx_vqx_done vqx_done;
572 vqx_done.u = cpt_read_csr64(cptvf->reg_base, CPTX_VQX_DONE(0, 0));
573 return vqx_done.s.done;
576 static inline void cptvf_write_vq_done_ack(struct cpt_vf *cptvf,
579 union cptx_vqx_done_ack vqx_dack_cnt;
581 vqx_dack_cnt.u = cpt_read_csr64(cptvf->reg_base,
582 CPTX_VQX_DONE_ACK(0, 0));
583 vqx_dack_cnt.s.done_ack = ackcnt;
584 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_DONE_ACK(0, 0),
588 static irqreturn_t cptvf_done_intr_handler(int irq, void *cptvf_irq)
590 struct cpt_vf *cptvf = (struct cpt_vf *)cptvf_irq;
591 struct pci_dev *pdev = cptvf->pdev;
592 /* Read the number of completions */
593 u32 intr = cptvf_read_vq_done_count(cptvf);
596 struct cptvf_wqe *wqe;
598 /* Acknowledge the number of
599 * scheduled completions for processing
601 cptvf_write_vq_done_ack(cptvf, intr);
602 wqe = get_cptvf_vq_wqe(cptvf, 0);
603 if (unlikely(!wqe)) {
604 dev_err(&pdev->dev, "No work to schedule for VF (%d)",
608 tasklet_hi_schedule(&wqe->twork);
614 static void cptvf_set_irq_affinity(struct cpt_vf *cptvf, int vec)
616 struct pci_dev *pdev = cptvf->pdev;
619 if (!zalloc_cpumask_var(&cptvf->affinity_mask[vec],
621 dev_err(&pdev->dev, "Allocation failed for affinity_mask for VF %d",
626 cpu = cptvf->vfid % num_online_cpus();
627 cpumask_set_cpu(cpumask_local_spread(cpu, cptvf->node),
628 cptvf->affinity_mask[vec]);
629 irq_set_affinity_hint(pci_irq_vector(pdev, vec),
630 cptvf->affinity_mask[vec]);
633 static void cptvf_write_vq_saddr(struct cpt_vf *cptvf, u64 val)
635 union cptx_vqx_saddr vqx_saddr;
638 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_SADDR(0, 0), vqx_saddr.u);
641 static void cptvf_device_init(struct cpt_vf *cptvf)
646 cptvf_write_vq_ctl(cptvf, 0);
647 /* Reset the doorbell */
648 cptvf_write_vq_doorbell(cptvf, 0);
650 cptvf_write_vq_inprog(cptvf, 0);
652 /* TODO: for now only one queue, so hard coded */
653 base_addr = (u64)(cptvf->cqinfo.queue[0].qhead->dma_addr);
654 cptvf_write_vq_saddr(cptvf, base_addr);
655 /* Configure timerhold / coalescence */
656 cptvf_write_vq_done_timewait(cptvf, CPT_TIMER_THOLD);
657 cptvf_write_vq_done_numwait(cptvf, 1);
659 cptvf_write_vq_ctl(cptvf, 1);
660 /* Flag the VF ready */
661 cptvf->flags |= CPT_FLAG_DEVICE_READY;
664 static int cptvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
666 struct device *dev = &pdev->dev;
667 struct cpt_vf *cptvf;
670 cptvf = devm_kzalloc(dev, sizeof(*cptvf), GFP_KERNEL);
674 pci_set_drvdata(pdev, cptvf);
676 err = pci_enable_device(pdev);
678 dev_err(dev, "Failed to enable PCI device\n");
679 pci_set_drvdata(pdev, NULL);
683 err = pci_request_regions(pdev, DRV_NAME);
685 dev_err(dev, "PCI request regions failed 0x%x\n", err);
686 goto cptvf_err_disable_device;
688 /* Mark as VF driver */
689 cptvf->flags |= CPT_FLAG_VF_DRIVER;
690 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
692 dev_err(dev, "Unable to get usable 48-bit DMA configuration\n");
693 goto cptvf_err_release_regions;
696 /* MAP PF's configuration registers */
697 cptvf->reg_base = pcim_iomap(pdev, 0, 0);
698 if (!cptvf->reg_base) {
699 dev_err(dev, "Cannot map config register space, aborting\n");
701 goto cptvf_err_release_regions;
704 cptvf->node = dev_to_node(&pdev->dev);
705 err = pci_alloc_irq_vectors(pdev, CPT_VF_MSIX_VECTORS,
706 CPT_VF_MSIX_VECTORS, PCI_IRQ_MSIX);
708 dev_err(dev, "Request for #%d msix vectors failed\n",
709 CPT_VF_MSIX_VECTORS);
710 goto cptvf_err_release_regions;
713 err = request_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_MISC),
714 cptvf_misc_intr_handler, 0, "CPT VF misc intr",
717 dev_err(dev, "Request misc irq failed");
718 goto cptvf_free_vectors;
721 /* Enable mailbox interrupt */
722 cptvf_enable_mbox_interrupts(cptvf);
723 cptvf_enable_swerr_interrupts(cptvf);
725 /* Check ready with PF */
726 /* Gets chip ID / device Id from PF if ready */
727 err = cptvf_check_pf_ready(cptvf);
729 dev_err(dev, "PF not responding to READY msg");
730 goto cptvf_free_misc_irq;
733 /* CPT VF software resources initialization */
734 cptvf->cqinfo.qchunksize = CPT_CMD_QCHUNK_SIZE;
735 err = cptvf_sw_init(cptvf, CPT_CMD_QLEN, CPT_NUM_QS_PER_VF);
737 dev_err(dev, "cptvf_sw_init() failed");
738 goto cptvf_free_misc_irq;
740 /* Convey VQ LEN to PF */
741 err = cptvf_send_vq_size_msg(cptvf);
743 dev_err(dev, "PF not responding to QLEN msg");
744 goto cptvf_free_misc_irq;
747 /* CPT VF device initialization */
748 cptvf_device_init(cptvf);
749 /* Send msg to PF to assign currnet Q to required group */
751 err = cptvf_send_vf_to_grp_msg(cptvf);
753 dev_err(dev, "PF not responding to VF_GRP msg");
754 goto cptvf_free_misc_irq;
758 err = cptvf_send_vf_priority_msg(cptvf);
760 dev_err(dev, "PF not responding to VF_PRIO msg");
761 goto cptvf_free_misc_irq;
764 err = request_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_DONE),
765 cptvf_done_intr_handler, 0, "CPT VF done intr",
768 dev_err(dev, "Request done irq failed\n");
769 goto cptvf_free_misc_irq;
772 /* Enable mailbox interrupt */
773 cptvf_enable_done_interrupts(cptvf);
775 /* Set irq affinity masks */
776 cptvf_set_irq_affinity(cptvf, CPT_VF_INT_VEC_E_MISC);
777 cptvf_set_irq_affinity(cptvf, CPT_VF_INT_VEC_E_DONE);
779 err = cptvf_send_vf_up(cptvf);
781 dev_err(dev, "PF not responding to UP msg");
782 goto cptvf_free_irq_affinity;
784 err = cvm_crypto_init(cptvf);
786 dev_err(dev, "Algorithm register failed\n");
787 goto cptvf_free_irq_affinity;
791 cptvf_free_irq_affinity:
792 cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_DONE);
793 cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_MISC);
795 free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_MISC), cptvf);
797 pci_free_irq_vectors(cptvf->pdev);
798 cptvf_err_release_regions:
799 pci_release_regions(pdev);
800 cptvf_err_disable_device:
801 pci_disable_device(pdev);
802 pci_set_drvdata(pdev, NULL);
807 static void cptvf_remove(struct pci_dev *pdev)
809 struct cpt_vf *cptvf = pci_get_drvdata(pdev);
812 dev_err(&pdev->dev, "Invalid CPT-VF device\n");
816 /* Convey DOWN to PF */
817 if (cptvf_send_vf_down(cptvf)) {
818 dev_err(&pdev->dev, "PF not responding to DOWN msg");
820 cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_DONE);
821 cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_MISC);
822 free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_DONE), cptvf);
823 free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_MISC), cptvf);
824 pci_free_irq_vectors(cptvf->pdev);
825 cptvf_sw_cleanup(cptvf);
826 pci_set_drvdata(pdev, NULL);
827 pci_release_regions(pdev);
828 pci_disable_device(pdev);
833 static void cptvf_shutdown(struct pci_dev *pdev)
838 /* Supported devices */
839 static const struct pci_device_id cptvf_id_table[] = {
840 {PCI_VDEVICE(CAVIUM, CPT_81XX_PCI_VF_DEVICE_ID), 0},
841 { 0, } /* end of table */
844 static struct pci_driver cptvf_pci_driver = {
846 .id_table = cptvf_id_table,
847 .probe = cptvf_probe,
848 .remove = cptvf_remove,
849 .shutdown = cptvf_shutdown,
852 module_pci_driver(cptvf_pci_driver);
854 MODULE_AUTHOR("George Cherian <george.cherian@cavium.com>");
855 MODULE_DESCRIPTION("Cavium Thunder CPT Virtual Function Driver");
856 MODULE_LICENSE("GPL v2");
857 MODULE_VERSION(DRV_VERSION);
858 MODULE_DEVICE_TABLE(pci, cptvf_id_table);