1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
4 #include <linux/bitmap.h>
5 #include <linux/delay.h>
6 #include <linux/interrupt.h>
8 #include <linux/io-64-nonatomic-lo-hi.h>
10 #include <linux/module.h>
11 #include <linux/nvme.h>
12 #include <linux/pci.h>
13 #include <linux/wait.h>
14 #include <linux/sched/signal.h>
16 #include "fun_queue.h"
19 #define FUN_ADMIN_CMD_TO_MS 3000
24 AQA_MIN_QUEUE_SIZE = 2,
25 AQA_MAX_QUEUE_SIZE = 4096
28 /* context for admin commands */
30 fun_admin_callback_t cb; /* callback to invoke on completion */
31 void *cb_data; /* user data provided to callback */
32 int cpu; /* CPU where the cmd's tag was allocated */
35 /* Context for synchronous admin commands. */
36 struct fun_sync_cmd_ctx {
37 struct completion compl;
38 u8 *rsp_buf; /* caller provided response buffer */
39 unsigned int rsp_len; /* response buffer size */
40 u8 rsp_status; /* command response status */
43 /* Wait for the CSTS.RDY bit to match @enabled. */
44 static int fun_wait_ready(struct fun_dev *fdev, bool enabled)
46 unsigned int cap_to = NVME_CAP_TIMEOUT(fdev->cap_reg);
47 u32 bit = enabled ? NVME_CSTS_RDY : 0;
48 unsigned long deadline;
50 deadline = ((cap_to + 1) * HZ / 2) + jiffies; /* CAP.TO is in 500ms */
53 u32 csts = readl(fdev->bar + NVME_REG_CSTS);
56 dev_err(fdev->dev, "CSTS register read %#x\n", csts);
60 if ((csts & NVME_CSTS_RDY) == bit)
63 if (time_is_before_jiffies(deadline))
70 "Timed out waiting for device to indicate RDY %u; aborting %s\n",
71 enabled, enabled ? "initialization" : "reset");
75 /* Check CSTS and return an error if it is unreadable or has unexpected
78 static int fun_check_csts_rdy(struct fun_dev *fdev, unsigned int expected_rdy)
80 u32 csts = readl(fdev->bar + NVME_REG_CSTS);
81 u32 actual_rdy = csts & NVME_CSTS_RDY;
84 dev_err(fdev->dev, "CSTS register read %#x\n", csts);
87 if (actual_rdy != expected_rdy) {
88 dev_err(fdev->dev, "Unexpected CSTS RDY %u\n", actual_rdy);
94 /* Check that CSTS RDY has the expected value. Then write a new value to the CC
95 * register and wait for CSTS RDY to match the new CC ENABLE state.
97 static int fun_update_cc_enable(struct fun_dev *fdev, unsigned int initial_rdy)
99 int rc = fun_check_csts_rdy(fdev, initial_rdy);
103 writel(fdev->cc_reg, fdev->bar + NVME_REG_CC);
104 return fun_wait_ready(fdev, !!(fdev->cc_reg & NVME_CC_ENABLE));
107 static int fun_disable_ctrl(struct fun_dev *fdev)
109 fdev->cc_reg &= ~(NVME_CC_SHN_MASK | NVME_CC_ENABLE);
110 return fun_update_cc_enable(fdev, 1);
113 static int fun_enable_ctrl(struct fun_dev *fdev, u32 admin_cqesz_log2,
114 u32 admin_sqesz_log2)
116 fdev->cc_reg = (admin_cqesz_log2 << NVME_CC_IOCQES_SHIFT) |
117 (admin_sqesz_log2 << NVME_CC_IOSQES_SHIFT) |
118 ((PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT) |
121 return fun_update_cc_enable(fdev, 0);
124 static int fun_map_bars(struct fun_dev *fdev, const char *name)
126 struct pci_dev *pdev = to_pci_dev(fdev->dev);
129 err = pci_request_mem_regions(pdev, name);
132 "Couldn't get PCI memory resources, err %d\n", err);
136 fdev->bar = pci_ioremap_bar(pdev, 0);
138 dev_err(&pdev->dev, "Couldn't map BAR 0\n");
139 pci_release_mem_regions(pdev);
146 static void fun_unmap_bars(struct fun_dev *fdev)
148 struct pci_dev *pdev = to_pci_dev(fdev->dev);
153 pci_release_mem_regions(pdev);
157 static int fun_set_dma_masks(struct device *dev)
161 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
163 dev_err(dev, "DMA mask configuration failed, err %d\n", err);
167 static irqreturn_t fun_admin_irq(int irq, void *data)
169 struct fun_queue *funq = data;
171 return fun_process_cq(funq, 0) ? IRQ_HANDLED : IRQ_NONE;
174 static void fun_complete_admin_cmd(struct fun_queue *funq, void *data,
175 void *entry, const struct fun_cqe_info *info)
177 const struct fun_admin_rsp_common *rsp_common = entry;
178 struct fun_dev *fdev = funq->fdev;
179 struct fun_cmd_ctx *cmd_ctx;
183 if (info->sqhd == cpu_to_be16(0xffff)) {
184 dev_dbg(fdev->dev, "adminq event");
186 fdev->adminq_cb(fdev, entry);
190 cid = be16_to_cpu(rsp_common->cid);
191 dev_dbg(fdev->dev, "admin CQE cid %u, op %u, ret %u\n", cid,
192 rsp_common->op, rsp_common->ret);
194 cmd_ctx = &fdev->cmd_ctx[cid];
195 if (cmd_ctx->cpu < 0) {
197 "admin CQE with CID=%u, op=%u does not match a pending command\n",
198 cid, rsp_common->op);
203 cmd_ctx->cb(fdev, entry, xchg(&cmd_ctx->cb_data, NULL));
207 sbitmap_queue_clear(&fdev->admin_sbq, cid, cpu);
210 static int fun_init_cmd_ctx(struct fun_dev *fdev, unsigned int ntags)
214 fdev->cmd_ctx = kvcalloc(ntags, sizeof(*fdev->cmd_ctx), GFP_KERNEL);
218 for (i = 0; i < ntags; i++)
219 fdev->cmd_ctx[i].cpu = -1;
224 /* Allocate and enable an admin queue and assign it the first IRQ vector. */
225 static int fun_enable_admin_queue(struct fun_dev *fdev,
226 const struct fun_dev_params *areq)
228 struct fun_queue_alloc_req qreq = {
229 .cqe_size_log2 = areq->cqe_size_log2,
230 .sqe_size_log2 = areq->sqe_size_log2,
231 .cq_depth = areq->cq_depth,
232 .sq_depth = areq->sq_depth,
233 .rq_depth = areq->rq_depth,
235 unsigned int ntags = areq->sq_depth - 1;
236 struct fun_queue *funq;
242 if (areq->sq_depth < AQA_MIN_QUEUE_SIZE ||
243 areq->sq_depth > AQA_MAX_QUEUE_SIZE ||
244 areq->cq_depth < AQA_MIN_QUEUE_SIZE ||
245 areq->cq_depth > AQA_MAX_QUEUE_SIZE)
248 fdev->admin_q = fun_alloc_queue(fdev, 0, &qreq);
252 rc = fun_init_cmd_ctx(fdev, ntags);
256 rc = sbitmap_queue_init_node(&fdev->admin_sbq, ntags, -1, false,
257 GFP_KERNEL, dev_to_node(fdev->dev));
261 funq = fdev->admin_q;
263 rc = fun_request_irq(funq, dev_name(fdev->dev), fun_admin_irq, funq);
267 fun_set_cq_callback(funq, fun_complete_admin_cmd, NULL);
268 fdev->adminq_cb = areq->event_cb;
270 writel((funq->sq_depth - 1) << AQA_ASQS_SHIFT |
271 (funq->cq_depth - 1) << AQA_ACQS_SHIFT,
272 fdev->bar + NVME_REG_AQA);
274 writeq(funq->sq_dma_addr, fdev->bar + NVME_REG_ASQ);
275 writeq(funq->cq_dma_addr, fdev->bar + NVME_REG_ACQ);
277 rc = fun_enable_ctrl(fdev, areq->cqe_size_log2, areq->sqe_size_log2);
281 if (areq->rq_depth) {
282 rc = fun_create_rq(funq);
292 fun_disable_ctrl(fdev);
296 sbitmap_queue_free(&fdev->admin_sbq);
298 kvfree(fdev->cmd_ctx);
299 fdev->cmd_ctx = NULL;
301 fun_free_queue(fdev->admin_q);
302 fdev->admin_q = NULL;
306 static void fun_disable_admin_queue(struct fun_dev *fdev)
308 struct fun_queue *admq = fdev->admin_q;
313 fun_disable_ctrl(fdev);
316 __fun_process_cq(admq, 0);
318 sbitmap_queue_free(&fdev->admin_sbq);
320 kvfree(fdev->cmd_ctx);
321 fdev->cmd_ctx = NULL;
323 fun_free_queue(admq);
324 fdev->admin_q = NULL;
327 /* Return %true if the admin queue has stopped servicing commands as can be
328 * detected through registers. This isn't exhaustive and may provide false
331 static bool fun_adminq_stopped(struct fun_dev *fdev)
333 u32 csts = readl(fdev->bar + NVME_REG_CSTS);
335 return (csts & (NVME_CSTS_CFS | NVME_CSTS_RDY)) != NVME_CSTS_RDY;
338 static int fun_wait_for_tag(struct fun_dev *fdev, int *cpup)
340 struct sbitmap_queue *sbq = &fdev->admin_sbq;
341 struct sbq_wait_state *ws = &sbq->ws[0];
342 DEFINE_SBQ_WAIT(wait);
346 sbitmap_prepare_to_wait(sbq, ws, &wait, TASK_UNINTERRUPTIBLE);
347 if (fdev->suppress_cmds) {
351 tag = sbitmap_queue_get(sbq, cpup);
357 sbitmap_finish_wait(sbq, ws, &wait);
361 /* Submit an asynchronous admin command. Caller is responsible for implementing
362 * any waiting or timeout. Upon command completion the callback @cb is called.
364 int fun_submit_admin_cmd(struct fun_dev *fdev, struct fun_admin_req_common *cmd,
365 fun_admin_callback_t cb, void *cb_data, bool wait_ok)
367 struct fun_queue *funq = fdev->admin_q;
368 unsigned int cmdsize = cmd->len8 * 8;
369 struct fun_cmd_ctx *cmd_ctx;
370 int tag, cpu, rc = 0;
372 if (WARN_ON(cmdsize > (1 << funq->sqe_size_log2)))
375 tag = sbitmap_queue_get(&fdev->admin_sbq, &cpu);
379 tag = fun_wait_for_tag(fdev, &cpu);
384 cmd->cid = cpu_to_be16(tag);
386 cmd_ctx = &fdev->cmd_ctx[tag];
388 cmd_ctx->cb_data = cb_data;
390 spin_lock(&funq->sq_lock);
392 if (unlikely(fdev->suppress_cmds)) {
394 sbitmap_queue_clear(&fdev->admin_sbq, tag, cpu);
397 memcpy(fun_sqe_at(funq, funq->sq_tail), cmd, cmdsize);
399 dev_dbg(fdev->dev, "admin cmd @ %u: %8ph\n", funq->sq_tail,
402 if (++funq->sq_tail == funq->sq_depth)
404 writel(funq->sq_tail, funq->sq_db);
406 spin_unlock(&funq->sq_lock);
410 /* Abandon a pending admin command by clearing the issuer's callback data.
411 * Failure indicates that the command either has already completed or its
412 * completion is racing with this call.
414 static bool fun_abandon_admin_cmd(struct fun_dev *fd,
415 const struct fun_admin_req_common *cmd,
418 u16 cid = be16_to_cpu(cmd->cid);
419 struct fun_cmd_ctx *cmd_ctx = &fd->cmd_ctx[cid];
421 return cmpxchg(&cmd_ctx->cb_data, cb_data, NULL) == cb_data;
424 /* Stop submission of new admin commands and wake up any processes waiting for
425 * tags. Already submitted commands are left to complete or time out.
427 static void fun_admin_stop(struct fun_dev *fdev)
429 spin_lock(&fdev->admin_q->sq_lock);
430 fdev->suppress_cmds = true;
431 spin_unlock(&fdev->admin_q->sq_lock);
432 sbitmap_queue_wake_all(&fdev->admin_sbq);
435 /* The callback for synchronous execution of admin commands. It copies the
436 * command response to the caller's buffer and signals completion.
438 static void fun_admin_cmd_sync_cb(struct fun_dev *fd, void *rsp, void *cb_data)
440 const struct fun_admin_rsp_common *rsp_common = rsp;
441 struct fun_sync_cmd_ctx *ctx = cb_data;
444 return; /* command issuer timed out and left */
446 unsigned int rsp_len = rsp_common->len8 * 8;
448 if (unlikely(rsp_len > ctx->rsp_len)) {
450 "response for op %u is %uB > response buffer %uB\n",
451 rsp_common->op, rsp_len, ctx->rsp_len);
452 rsp_len = ctx->rsp_len;
454 memcpy(ctx->rsp_buf, rsp, rsp_len);
456 ctx->rsp_status = rsp_common->ret;
457 complete(&ctx->compl);
460 /* Submit a synchronous admin command. */
461 int fun_submit_admin_sync_cmd(struct fun_dev *fdev,
462 struct fun_admin_req_common *cmd, void *rsp,
463 size_t rspsize, unsigned int timeout)
465 struct fun_sync_cmd_ctx ctx = {
466 .compl = COMPLETION_INITIALIZER_ONSTACK(ctx.compl),
470 unsigned int cmdlen = cmd->len8 * 8;
471 unsigned long jiffies_left;
474 ret = fun_submit_admin_cmd(fdev, cmd, fun_admin_cmd_sync_cb, &ctx,
480 timeout = FUN_ADMIN_CMD_TO_MS;
482 jiffies_left = wait_for_completion_timeout(&ctx.compl,
483 msecs_to_jiffies(timeout));
485 /* The command timed out. Attempt to cancel it so we can return.
486 * But if the command is in the process of completing we'll
489 if (fun_abandon_admin_cmd(fdev, cmd, &ctx)) {
490 dev_err(fdev->dev, "admin command timed out: %*ph\n",
492 fun_admin_stop(fdev);
493 /* see if the timeout was due to a queue failure */
494 if (fun_adminq_stopped(fdev))
496 "device does not accept admin commands\n");
500 wait_for_completion(&ctx.compl);
503 if (ctx.rsp_status) {
504 dev_err(fdev->dev, "admin command failed, err %d: %*ph\n",
505 ctx.rsp_status, cmdlen, cmd);
508 return -ctx.rsp_status;
510 EXPORT_SYMBOL_GPL(fun_submit_admin_sync_cmd);
512 /* Return the number of device resources of the requested type. */
513 int fun_get_res_count(struct fun_dev *fdev, enum fun_admin_op res)
516 struct fun_admin_res_count_req req;
517 struct fun_admin_res_count_rsp rsp;
521 cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(res, sizeof(cmd.req));
522 cmd.req.count = FUN_ADMIN_SIMPLE_SUBOP_INIT(FUN_ADMIN_SUBOP_RES_COUNT,
525 rc = fun_submit_admin_sync_cmd(fdev, &cmd.req.common, &cmd.rsp,
527 return rc ? rc : be32_to_cpu(cmd.rsp.count.data);
529 EXPORT_SYMBOL_GPL(fun_get_res_count);
531 /* Request that the instance of resource @res with the given id be deleted. */
532 int fun_res_destroy(struct fun_dev *fdev, enum fun_admin_op res,
533 unsigned int flags, u32 id)
535 struct fun_admin_generic_destroy_req req = {
536 .common = FUN_ADMIN_REQ_COMMON_INIT2(res, sizeof(req)),
537 .destroy = FUN_ADMIN_SIMPLE_SUBOP_INIT(FUN_ADMIN_SUBOP_DESTROY,
541 return fun_submit_admin_sync_cmd(fdev, &req.common, NULL, 0, 0);
543 EXPORT_SYMBOL_GPL(fun_res_destroy);
545 /* Bind two entities of the given types and IDs. */
546 int fun_bind(struct fun_dev *fdev, enum fun_admin_bind_type type0,
547 unsigned int id0, enum fun_admin_bind_type type1,
551 struct fun_admin_bind_req req;
552 struct fun_admin_bind_entry entry[2];
554 .req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_BIND,
556 .entry[0] = FUN_ADMIN_BIND_ENTRY_INIT(type0, id0),
557 .entry[1] = FUN_ADMIN_BIND_ENTRY_INIT(type1, id1),
560 return fun_submit_admin_sync_cmd(fdev, &cmd.req.common, NULL, 0, 0);
562 EXPORT_SYMBOL_GPL(fun_bind);
564 static int fun_get_dev_limits(struct fun_dev *fdev)
566 struct pci_dev *pdev = to_pci_dev(fdev->dev);
567 unsigned int cq_count, sq_count, num_dbs;
570 rc = fun_get_res_count(fdev, FUN_ADMIN_OP_EPCQ);
575 rc = fun_get_res_count(fdev, FUN_ADMIN_OP_EPSQ);
580 /* The admin queue consumes 1 CQ and at least 1 SQ. To be usable the
581 * device must provide additional queues.
583 if (cq_count < 2 || sq_count < 2 + !!fdev->admin_q->rq_depth)
586 /* Calculate the max QID based on SQ/CQ/doorbell counts.
587 * SQ/CQ doorbells alternate.
589 num_dbs = (pci_resource_len(pdev, 0) - NVME_REG_DBS) >>
590 (2 + NVME_CAP_STRIDE(fdev->cap_reg));
591 fdev->max_qid = min3(cq_count, sq_count, num_dbs / 2) - 1;
592 fdev->kern_end_qid = fdev->max_qid + 1;
596 /* Allocate all MSI-X vectors available on a function and at least @min_vecs. */
597 static int fun_alloc_irqs(struct pci_dev *pdev, unsigned int min_vecs)
599 int vecs, num_msix = pci_msix_vec_count(pdev);
603 if (min_vecs > num_msix)
606 vecs = pci_alloc_irq_vectors(pdev, min_vecs, num_msix, PCI_IRQ_MSIX);
609 "Allocated %d IRQ vectors of %d requested\n",
613 "Unable to allocate at least %u IRQ vectors\n",
619 /* Allocate and initialize the IRQ manager state. */
620 static int fun_alloc_irq_mgr(struct fun_dev *fdev)
622 fdev->irq_map = bitmap_zalloc(fdev->num_irqs, GFP_KERNEL);
626 spin_lock_init(&fdev->irqmgr_lock);
627 /* mark IRQ 0 allocated, it is used by the admin queue */
628 __set_bit(0, fdev->irq_map);
629 fdev->irqs_avail = fdev->num_irqs - 1;
633 /* Reserve @nirqs of the currently available IRQs and return their indices. */
634 int fun_reserve_irqs(struct fun_dev *fdev, unsigned int nirqs, u16 *irq_indices)
636 unsigned int b, n = 0;
642 spin_lock(&fdev->irqmgr_lock);
643 if (nirqs > fdev->irqs_avail)
646 for_each_clear_bit(b, fdev->irq_map, fdev->num_irqs) {
647 __set_bit(b, fdev->irq_map);
648 irq_indices[n++] = b;
654 fdev->irqs_avail -= n;
657 spin_unlock(&fdev->irqmgr_lock);
660 EXPORT_SYMBOL(fun_reserve_irqs);
662 /* Release @nirqs previously allocated IRQS with the supplied indices. */
663 void fun_release_irqs(struct fun_dev *fdev, unsigned int nirqs,
668 spin_lock(&fdev->irqmgr_lock);
669 for (i = 0; i < nirqs; i++)
670 __clear_bit(irq_indices[i], fdev->irq_map);
671 fdev->irqs_avail += nirqs;
672 spin_unlock(&fdev->irqmgr_lock);
674 EXPORT_SYMBOL(fun_release_irqs);
676 static void fun_serv_handler(struct work_struct *work)
678 struct fun_dev *fd = container_of(work, struct fun_dev, service_task);
680 if (test_bit(FUN_SERV_DISABLED, &fd->service_flags))
686 void fun_serv_stop(struct fun_dev *fd)
688 set_bit(FUN_SERV_DISABLED, &fd->service_flags);
689 cancel_work_sync(&fd->service_task);
691 EXPORT_SYMBOL_GPL(fun_serv_stop);
693 void fun_serv_restart(struct fun_dev *fd)
695 clear_bit(FUN_SERV_DISABLED, &fd->service_flags);
696 if (fd->service_flags)
697 schedule_work(&fd->service_task);
699 EXPORT_SYMBOL_GPL(fun_serv_restart);
701 void fun_serv_sched(struct fun_dev *fd)
703 if (!test_bit(FUN_SERV_DISABLED, &fd->service_flags))
704 schedule_work(&fd->service_task);
706 EXPORT_SYMBOL_GPL(fun_serv_sched);
708 /* Check and try to get the device into a proper state for initialization,
709 * i.e., CSTS.RDY = CC.EN = 0.
711 static int sanitize_dev(struct fun_dev *fdev)
715 fdev->cap_reg = readq(fdev->bar + NVME_REG_CAP);
716 fdev->cc_reg = readl(fdev->bar + NVME_REG_CC);
718 /* First get RDY to agree with the current EN. Give RDY the opportunity
719 * to complete a potential recent EN change.
721 rc = fun_wait_ready(fdev, fdev->cc_reg & NVME_CC_ENABLE);
725 /* Next, reset the device if EN is currently 1. */
726 if (fdev->cc_reg & NVME_CC_ENABLE)
727 rc = fun_disable_ctrl(fdev);
732 /* Undo the device initialization of fun_dev_enable(). */
733 void fun_dev_disable(struct fun_dev *fdev)
735 struct pci_dev *pdev = to_pci_dev(fdev->dev);
737 pci_set_drvdata(pdev, NULL);
739 if (fdev->fw_handle != FUN_HCI_ID_INVALID) {
740 fun_res_destroy(fdev, FUN_ADMIN_OP_SWUPGRADE, 0,
742 fdev->fw_handle = FUN_HCI_ID_INVALID;
745 fun_disable_admin_queue(fdev);
747 bitmap_free(fdev->irq_map);
748 pci_free_irq_vectors(pdev);
750 pci_clear_master(pdev);
751 pci_disable_pcie_error_reporting(pdev);
752 pci_disable_device(pdev);
754 fun_unmap_bars(fdev);
756 EXPORT_SYMBOL(fun_dev_disable);
758 /* Perform basic initialization of a device, including
759 * - PCI config space setup and BAR0 mapping
760 * - interrupt management initialization
761 * - 1 admin queue setup
762 * - determination of some device limits, such as number of queues.
764 int fun_dev_enable(struct fun_dev *fdev, struct pci_dev *pdev,
765 const struct fun_dev_params *areq, const char *name)
769 fdev->dev = &pdev->dev;
770 rc = fun_map_bars(fdev, name);
774 rc = fun_set_dma_masks(fdev->dev);
778 rc = pci_enable_device_mem(pdev);
780 dev_err(&pdev->dev, "Couldn't enable device, err %d\n", rc);
784 pci_enable_pcie_error_reporting(pdev);
786 rc = sanitize_dev(fdev);
790 fdev->fw_handle = FUN_HCI_ID_INVALID;
791 fdev->q_depth = NVME_CAP_MQES(fdev->cap_reg) + 1;
792 fdev->db_stride = 1 << NVME_CAP_STRIDE(fdev->cap_reg);
793 fdev->dbs = fdev->bar + NVME_REG_DBS;
795 INIT_WORK(&fdev->service_task, fun_serv_handler);
796 fdev->service_flags = FUN_SERV_DISABLED;
797 fdev->serv_cb = areq->serv_cb;
799 rc = fun_alloc_irqs(pdev, areq->min_msix + 1); /* +1 for admin CQ */
804 rc = fun_alloc_irq_mgr(fdev);
808 pci_set_master(pdev);
809 rc = fun_enable_admin_queue(fdev, areq);
813 rc = fun_get_dev_limits(fdev);
817 pci_save_state(pdev);
818 pci_set_drvdata(pdev, fdev);
819 pcie_print_link_status(pdev);
820 dev_dbg(fdev->dev, "q_depth %u, db_stride %u, max qid %d kern_end_qid %d\n",
821 fdev->q_depth, fdev->db_stride, fdev->max_qid,
826 fun_disable_admin_queue(fdev);
828 pci_clear_master(pdev);
829 bitmap_free(fdev->irq_map);
831 pci_free_irq_vectors(pdev);
833 pci_disable_pcie_error_reporting(pdev);
834 pci_disable_device(pdev);
836 fun_unmap_bars(fdev);
839 EXPORT_SYMBOL(fun_dev_enable);
841 MODULE_AUTHOR("Dimitris Michailidis <dmichail@fungible.com>");
842 MODULE_DESCRIPTION("Core services driver for Fungible devices");
843 MODULE_LICENSE("Dual BSD/GPL");