2 * Copyright (c) 2012-2016 VMware, Inc. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of EITHER the GNU General Public License
6 * version 2 as published by the Free Software Foundation or the BSD
7 * 2-Clause License. This program is distributed in the hope that it
8 * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
9 * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
10 * See the GNU General Public License version 2 for more details at
11 * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
13 * You should have received a copy of the GNU General Public License
14 * along with this program available in the file COPYING in the main
15 * directory of this source tree.
17 * The BSD 2-Clause License
19 * Redistribution and use in source and binary forms, with or
20 * without modification, are permitted provided that the following
23 * - Redistributions of source code must retain the above
24 * copyright notice, this list of conditions and the following
27 * - Redistributions in binary form must reproduce the above
28 * copyright notice, this list of conditions and the following
29 * disclaimer in the documentation and/or other materials
30 * provided with the distribution.
32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
33 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
34 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
36 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
37 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
38 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
39 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
41 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
43 * OF THE POSSIBILITY OF SUCH DAMAGE.
46 #include <linux/errno.h>
47 #include <linux/inetdevice.h>
48 #include <linux/init.h>
49 #include <linux/module.h>
50 #include <linux/slab.h>
51 #include <rdma/ib_addr.h>
52 #include <rdma/ib_smi.h>
53 #include <rdma/ib_user_verbs.h>
54 #include <net/addrconf.h>
58 #define DRV_NAME "vmw_pvrdma"
59 #define DRV_VERSION "1.0.1.0-k"
61 static DEFINE_MUTEX(pvrdma_device_list_lock);
62 static LIST_HEAD(pvrdma_device_list);
63 static struct workqueue_struct *event_wq;
65 static int pvrdma_add_gid(const struct ib_gid_attr *attr, void **context);
66 static int pvrdma_del_gid(const struct ib_gid_attr *attr, void **context);
68 static ssize_t hca_type_show(struct device *device,
69 struct device_attribute *attr, char *buf)
71 return sysfs_emit(buf, "VMW_PVRDMA-%s\n", DRV_VERSION);
73 static DEVICE_ATTR_RO(hca_type);
75 static ssize_t hw_rev_show(struct device *device,
76 struct device_attribute *attr, char *buf)
78 return sysfs_emit(buf, "%d\n", PVRDMA_REV_ID);
80 static DEVICE_ATTR_RO(hw_rev);
82 static ssize_t board_id_show(struct device *device,
83 struct device_attribute *attr, char *buf)
85 return sysfs_emit(buf, "%d\n", PVRDMA_BOARD_ID);
87 static DEVICE_ATTR_RO(board_id);
89 static struct attribute *pvrdma_class_attributes[] = {
90 &dev_attr_hw_rev.attr,
91 &dev_attr_hca_type.attr,
92 &dev_attr_board_id.attr,
96 static const struct attribute_group pvrdma_attr_group = {
97 .attrs = pvrdma_class_attributes,
100 static void pvrdma_get_fw_ver_str(struct ib_device *device, char *str)
102 struct pvrdma_dev *dev =
103 container_of(device, struct pvrdma_dev, ib_dev);
104 snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d\n",
105 (int) (dev->dsr->caps.fw_ver >> 32),
106 (int) (dev->dsr->caps.fw_ver >> 16) & 0xffff,
107 (int) dev->dsr->caps.fw_ver & 0xffff);
110 static int pvrdma_init_device(struct pvrdma_dev *dev)
112 /* Initialize some device related stuff */
113 spin_lock_init(&dev->cmd_lock);
114 sema_init(&dev->cmd_sema, 1);
115 atomic_set(&dev->num_qps, 0);
116 atomic_set(&dev->num_srqs, 0);
117 atomic_set(&dev->num_cqs, 0);
118 atomic_set(&dev->num_pds, 0);
119 atomic_set(&dev->num_ahs, 0);
124 static int pvrdma_port_immutable(struct ib_device *ibdev, u32 port_num,
125 struct ib_port_immutable *immutable)
127 struct pvrdma_dev *dev = to_vdev(ibdev);
128 struct ib_port_attr attr;
131 if (dev->dsr->caps.gid_types == PVRDMA_GID_TYPE_FLAG_ROCE_V1)
132 immutable->core_cap_flags |= RDMA_CORE_PORT_IBA_ROCE;
133 else if (dev->dsr->caps.gid_types == PVRDMA_GID_TYPE_FLAG_ROCE_V2)
134 immutable->core_cap_flags |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
136 err = ib_query_port(ibdev, port_num, &attr);
140 immutable->pkey_tbl_len = attr.pkey_tbl_len;
141 immutable->gid_tbl_len = attr.gid_tbl_len;
142 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
146 static const struct ib_device_ops pvrdma_dev_ops = {
147 .owner = THIS_MODULE,
148 .driver_id = RDMA_DRIVER_VMW_PVRDMA,
149 .uverbs_abi_ver = PVRDMA_UVERBS_ABI_VERSION,
151 .add_gid = pvrdma_add_gid,
152 .alloc_mr = pvrdma_alloc_mr,
153 .alloc_pd = pvrdma_alloc_pd,
154 .alloc_ucontext = pvrdma_alloc_ucontext,
155 .create_ah = pvrdma_create_ah,
156 .create_cq = pvrdma_create_cq,
157 .create_qp = pvrdma_create_qp,
158 .dealloc_pd = pvrdma_dealloc_pd,
159 .dealloc_ucontext = pvrdma_dealloc_ucontext,
160 .del_gid = pvrdma_del_gid,
161 .dereg_mr = pvrdma_dereg_mr,
162 .destroy_ah = pvrdma_destroy_ah,
163 .destroy_cq = pvrdma_destroy_cq,
164 .destroy_qp = pvrdma_destroy_qp,
165 .device_group = &pvrdma_attr_group,
166 .get_dev_fw_str = pvrdma_get_fw_ver_str,
167 .get_dma_mr = pvrdma_get_dma_mr,
168 .get_link_layer = pvrdma_port_link_layer,
169 .get_port_immutable = pvrdma_port_immutable,
170 .map_mr_sg = pvrdma_map_mr_sg,
172 .modify_port = pvrdma_modify_port,
173 .modify_qp = pvrdma_modify_qp,
174 .poll_cq = pvrdma_poll_cq,
175 .post_recv = pvrdma_post_recv,
176 .post_send = pvrdma_post_send,
177 .query_device = pvrdma_query_device,
178 .query_gid = pvrdma_query_gid,
179 .query_pkey = pvrdma_query_pkey,
180 .query_port = pvrdma_query_port,
181 .query_qp = pvrdma_query_qp,
182 .reg_user_mr = pvrdma_reg_user_mr,
183 .req_notify_cq = pvrdma_req_notify_cq,
185 INIT_RDMA_OBJ_SIZE(ib_ah, pvrdma_ah, ibah),
186 INIT_RDMA_OBJ_SIZE(ib_cq, pvrdma_cq, ibcq),
187 INIT_RDMA_OBJ_SIZE(ib_pd, pvrdma_pd, ibpd),
188 INIT_RDMA_OBJ_SIZE(ib_qp, pvrdma_qp, ibqp),
189 INIT_RDMA_OBJ_SIZE(ib_ucontext, pvrdma_ucontext, ibucontext),
192 static const struct ib_device_ops pvrdma_dev_srq_ops = {
193 .create_srq = pvrdma_create_srq,
194 .destroy_srq = pvrdma_destroy_srq,
195 .modify_srq = pvrdma_modify_srq,
196 .query_srq = pvrdma_query_srq,
198 INIT_RDMA_OBJ_SIZE(ib_srq, pvrdma_srq, ibsrq),
201 static int pvrdma_register_device(struct pvrdma_dev *dev)
205 dev->ib_dev.node_guid = dev->dsr->caps.node_guid;
206 dev->sys_image_guid = dev->dsr->caps.sys_image_guid;
208 dev->ib_dev.num_comp_vectors = 1;
209 dev->ib_dev.dev.parent = &dev->pdev->dev;
211 dev->ib_dev.node_type = RDMA_NODE_IB_CA;
212 dev->ib_dev.phys_port_cnt = dev->dsr->caps.phys_port_cnt;
214 ib_set_device_ops(&dev->ib_dev, &pvrdma_dev_ops);
216 mutex_init(&dev->port_mutex);
217 spin_lock_init(&dev->desc_lock);
219 dev->cq_tbl = kcalloc(dev->dsr->caps.max_cq, sizeof(struct pvrdma_cq *),
223 spin_lock_init(&dev->cq_tbl_lock);
225 dev->qp_tbl = kcalloc(dev->dsr->caps.max_qp, sizeof(struct pvrdma_qp *),
229 spin_lock_init(&dev->qp_tbl_lock);
231 /* Check if SRQ is supported by backend */
232 if (dev->dsr->caps.max_srq) {
233 ib_set_device_ops(&dev->ib_dev, &pvrdma_dev_srq_ops);
235 dev->srq_tbl = kcalloc(dev->dsr->caps.max_srq,
236 sizeof(struct pvrdma_srq *),
241 ret = ib_device_set_netdev(&dev->ib_dev, dev->netdev, 1);
244 spin_lock_init(&dev->srq_tbl_lock);
246 ret = ib_register_device(&dev->ib_dev, "vmw_pvrdma%d", &dev->pdev->dev);
250 dev->ib_active = true;
264 static irqreturn_t pvrdma_intr0_handler(int irq, void *dev_id)
266 u32 icr = PVRDMA_INTR_CAUSE_RESPONSE;
267 struct pvrdma_dev *dev = dev_id;
269 dev_dbg(&dev->pdev->dev, "interrupt 0 (response) handler\n");
271 if (!dev->pdev->msix_enabled) {
273 icr = pvrdma_read_reg(dev, PVRDMA_REG_ICR);
278 if (icr == PVRDMA_INTR_CAUSE_RESPONSE)
279 complete(&dev->cmd_done);
284 static void pvrdma_qp_event(struct pvrdma_dev *dev, u32 qpn, int type)
286 struct pvrdma_qp *qp;
289 spin_lock_irqsave(&dev->qp_tbl_lock, flags);
290 qp = dev->qp_tbl[qpn % dev->dsr->caps.max_qp];
292 refcount_inc(&qp->refcnt);
293 spin_unlock_irqrestore(&dev->qp_tbl_lock, flags);
295 if (qp && qp->ibqp.event_handler) {
296 struct ib_qp *ibqp = &qp->ibqp;
299 e.device = ibqp->device;
301 e.event = type; /* 1:1 mapping for now. */
302 ibqp->event_handler(&e, ibqp->qp_context);
305 if (refcount_dec_and_test(&qp->refcnt))
310 static void pvrdma_cq_event(struct pvrdma_dev *dev, u32 cqn, int type)
312 struct pvrdma_cq *cq;
315 spin_lock_irqsave(&dev->cq_tbl_lock, flags);
316 cq = dev->cq_tbl[cqn % dev->dsr->caps.max_cq];
318 refcount_inc(&cq->refcnt);
319 spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
321 if (cq && cq->ibcq.event_handler) {
322 struct ib_cq *ibcq = &cq->ibcq;
325 e.device = ibcq->device;
327 e.event = type; /* 1:1 mapping for now. */
328 ibcq->event_handler(&e, ibcq->cq_context);
331 if (refcount_dec_and_test(&cq->refcnt))
336 static void pvrdma_srq_event(struct pvrdma_dev *dev, u32 srqn, int type)
338 struct pvrdma_srq *srq;
341 spin_lock_irqsave(&dev->srq_tbl_lock, flags);
343 srq = dev->srq_tbl[srqn % dev->dsr->caps.max_srq];
347 refcount_inc(&srq->refcnt);
348 spin_unlock_irqrestore(&dev->srq_tbl_lock, flags);
350 if (srq && srq->ibsrq.event_handler) {
351 struct ib_srq *ibsrq = &srq->ibsrq;
354 e.device = ibsrq->device;
355 e.element.srq = ibsrq;
356 e.event = type; /* 1:1 mapping for now. */
357 ibsrq->event_handler(&e, ibsrq->srq_context);
360 if (refcount_dec_and_test(&srq->refcnt))
361 complete(&srq->free);
365 static void pvrdma_dispatch_event(struct pvrdma_dev *dev, int port,
366 enum ib_event_type event)
368 struct ib_event ib_event;
370 memset(&ib_event, 0, sizeof(ib_event));
371 ib_event.device = &dev->ib_dev;
372 ib_event.element.port_num = port;
373 ib_event.event = event;
374 ib_dispatch_event(&ib_event);
377 static void pvrdma_dev_event(struct pvrdma_dev *dev, u8 port, int type)
379 if (port < 1 || port > dev->dsr->caps.phys_port_cnt) {
380 dev_warn(&dev->pdev->dev, "event on port %d\n", port);
384 pvrdma_dispatch_event(dev, port, type);
387 static inline struct pvrdma_eqe *get_eqe(struct pvrdma_dev *dev, unsigned int i)
389 return (struct pvrdma_eqe *)pvrdma_page_dir_get_ptr(
392 sizeof(struct pvrdma_eqe) * i);
395 static irqreturn_t pvrdma_intr1_handler(int irq, void *dev_id)
397 struct pvrdma_dev *dev = dev_id;
398 struct pvrdma_ring *ring = &dev->async_ring_state->rx;
399 int ring_slots = (dev->dsr->async_ring_pages.num_pages - 1) *
400 PAGE_SIZE / sizeof(struct pvrdma_eqe);
403 dev_dbg(&dev->pdev->dev, "interrupt 1 (async event) handler\n");
406 * Don't process events until the IB device is registered. Otherwise
407 * we'll try to ib_dispatch_event() on an invalid device.
412 while (pvrdma_idx_ring_has_data(ring, ring_slots, &head) > 0) {
413 struct pvrdma_eqe *eqe;
415 eqe = get_eqe(dev, head);
418 case PVRDMA_EVENT_QP_FATAL:
419 case PVRDMA_EVENT_QP_REQ_ERR:
420 case PVRDMA_EVENT_QP_ACCESS_ERR:
421 case PVRDMA_EVENT_COMM_EST:
422 case PVRDMA_EVENT_SQ_DRAINED:
423 case PVRDMA_EVENT_PATH_MIG:
424 case PVRDMA_EVENT_PATH_MIG_ERR:
425 case PVRDMA_EVENT_QP_LAST_WQE_REACHED:
426 pvrdma_qp_event(dev, eqe->info, eqe->type);
429 case PVRDMA_EVENT_CQ_ERR:
430 pvrdma_cq_event(dev, eqe->info, eqe->type);
433 case PVRDMA_EVENT_SRQ_ERR:
434 case PVRDMA_EVENT_SRQ_LIMIT_REACHED:
435 pvrdma_srq_event(dev, eqe->info, eqe->type);
438 case PVRDMA_EVENT_PORT_ACTIVE:
439 case PVRDMA_EVENT_PORT_ERR:
440 case PVRDMA_EVENT_LID_CHANGE:
441 case PVRDMA_EVENT_PKEY_CHANGE:
442 case PVRDMA_EVENT_SM_CHANGE:
443 case PVRDMA_EVENT_CLIENT_REREGISTER:
444 case PVRDMA_EVENT_GID_CHANGE:
445 pvrdma_dev_event(dev, eqe->info, eqe->type);
448 case PVRDMA_EVENT_DEVICE_FATAL:
449 pvrdma_dev_event(dev, 1, eqe->type);
456 pvrdma_idx_ring_inc(&ring->cons_head, ring_slots);
462 static inline struct pvrdma_cqne *get_cqne(struct pvrdma_dev *dev,
465 return (struct pvrdma_cqne *)pvrdma_page_dir_get_ptr(
468 sizeof(struct pvrdma_cqne) * i);
471 static irqreturn_t pvrdma_intrx_handler(int irq, void *dev_id)
473 struct pvrdma_dev *dev = dev_id;
474 struct pvrdma_ring *ring = &dev->cq_ring_state->rx;
475 int ring_slots = (dev->dsr->cq_ring_pages.num_pages - 1) * PAGE_SIZE /
476 sizeof(struct pvrdma_cqne);
479 dev_dbg(&dev->pdev->dev, "interrupt x (completion) handler\n");
481 while (pvrdma_idx_ring_has_data(ring, ring_slots, &head) > 0) {
482 struct pvrdma_cqne *cqne;
483 struct pvrdma_cq *cq;
485 cqne = get_cqne(dev, head);
486 spin_lock(&dev->cq_tbl_lock);
487 cq = dev->cq_tbl[cqne->info % dev->dsr->caps.max_cq];
489 refcount_inc(&cq->refcnt);
490 spin_unlock(&dev->cq_tbl_lock);
492 if (cq && cq->ibcq.comp_handler)
493 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
495 if (refcount_dec_and_test(&cq->refcnt))
498 pvrdma_idx_ring_inc(&ring->cons_head, ring_slots);
504 static void pvrdma_free_irq(struct pvrdma_dev *dev)
508 dev_dbg(&dev->pdev->dev, "freeing interrupts\n");
509 for (i = 0; i < dev->nr_vectors; i++)
510 free_irq(pci_irq_vector(dev->pdev, i), dev);
513 static void pvrdma_enable_intrs(struct pvrdma_dev *dev)
515 dev_dbg(&dev->pdev->dev, "enable interrupts\n");
516 pvrdma_write_reg(dev, PVRDMA_REG_IMR, 0);
519 static void pvrdma_disable_intrs(struct pvrdma_dev *dev)
521 dev_dbg(&dev->pdev->dev, "disable interrupts\n");
522 pvrdma_write_reg(dev, PVRDMA_REG_IMR, ~0);
525 static int pvrdma_alloc_intrs(struct pvrdma_dev *dev)
527 struct pci_dev *pdev = dev->pdev;
530 ret = pci_alloc_irq_vectors(pdev, 1, PVRDMA_MAX_INTERRUPTS,
533 ret = pci_alloc_irq_vectors(pdev, 1, 1,
534 PCI_IRQ_MSI | PCI_IRQ_LEGACY);
538 dev->nr_vectors = ret;
540 ret = request_irq(pci_irq_vector(dev->pdev, 0), pvrdma_intr0_handler,
541 pdev->msix_enabled ? 0 : IRQF_SHARED, DRV_NAME, dev);
543 dev_err(&dev->pdev->dev,
544 "failed to request interrupt 0\n");
545 goto out_free_vectors;
548 for (i = 1; i < dev->nr_vectors; i++) {
549 ret = request_irq(pci_irq_vector(dev->pdev, i),
550 i == 1 ? pvrdma_intr1_handler :
551 pvrdma_intrx_handler,
554 dev_err(&dev->pdev->dev,
555 "failed to request interrupt %d\n", i);
564 free_irq(pci_irq_vector(dev->pdev, i), dev);
566 pci_free_irq_vectors(pdev);
570 static void pvrdma_free_slots(struct pvrdma_dev *dev)
572 struct pci_dev *pdev = dev->pdev;
575 dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->resp_slot,
576 dev->dsr->resp_slot_dma);
578 dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->cmd_slot,
579 dev->dsr->cmd_slot_dma);
582 static int pvrdma_add_gid_at_index(struct pvrdma_dev *dev,
583 const union ib_gid *gid,
588 union pvrdma_cmd_req req;
589 struct pvrdma_cmd_create_bind *cmd_bind = &req.create_bind;
591 if (!dev->sgid_tbl) {
592 dev_warn(&dev->pdev->dev, "sgid table not initialized\n");
596 memset(cmd_bind, 0, sizeof(*cmd_bind));
597 cmd_bind->hdr.cmd = PVRDMA_CMD_CREATE_BIND;
598 memcpy(cmd_bind->new_gid, gid->raw, 16);
599 cmd_bind->mtu = ib_mtu_enum_to_int(IB_MTU_1024);
600 cmd_bind->vlan = 0xfff;
601 cmd_bind->index = index;
602 cmd_bind->gid_type = gid_type;
604 ret = pvrdma_cmd_post(dev, &req, NULL, 0);
606 dev_warn(&dev->pdev->dev,
607 "could not create binding, error: %d\n", ret);
610 memcpy(&dev->sgid_tbl[index], gid, sizeof(*gid));
614 static int pvrdma_add_gid(const struct ib_gid_attr *attr, void **context)
616 struct pvrdma_dev *dev = to_vdev(attr->device);
618 return pvrdma_add_gid_at_index(dev, &attr->gid,
619 ib_gid_type_to_pvrdma(attr->gid_type),
623 static int pvrdma_del_gid_at_index(struct pvrdma_dev *dev, int index)
626 union pvrdma_cmd_req req;
627 struct pvrdma_cmd_destroy_bind *cmd_dest = &req.destroy_bind;
629 /* Update sgid table. */
630 if (!dev->sgid_tbl) {
631 dev_warn(&dev->pdev->dev, "sgid table not initialized\n");
635 memset(cmd_dest, 0, sizeof(*cmd_dest));
636 cmd_dest->hdr.cmd = PVRDMA_CMD_DESTROY_BIND;
637 memcpy(cmd_dest->dest_gid, &dev->sgid_tbl[index], 16);
638 cmd_dest->index = index;
640 ret = pvrdma_cmd_post(dev, &req, NULL, 0);
642 dev_warn(&dev->pdev->dev,
643 "could not destroy binding, error: %d\n", ret);
646 memset(&dev->sgid_tbl[index], 0, 16);
650 static int pvrdma_del_gid(const struct ib_gid_attr *attr, void **context)
652 struct pvrdma_dev *dev = to_vdev(attr->device);
654 dev_dbg(&dev->pdev->dev, "removing gid at index %u from %s",
655 attr->index, dev->netdev->name);
657 return pvrdma_del_gid_at_index(dev, attr->index);
660 static void pvrdma_netdevice_event_handle(struct pvrdma_dev *dev,
661 struct net_device *ndev,
664 struct pci_dev *pdev_net;
670 pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ERR);
673 pvrdma_write_reg(dev, PVRDMA_REG_CTL,
674 PVRDMA_DEVICE_CTL_UNQUIESCE);
678 if (pvrdma_read_reg(dev, PVRDMA_REG_ERR))
679 dev_err(&dev->pdev->dev,
680 "failed to activate device during link up\n");
682 pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE);
684 case NETDEV_UNREGISTER:
685 ib_device_set_netdev(&dev->ib_dev, NULL, 1);
686 dev_put(dev->netdev);
689 case NETDEV_REGISTER:
690 /* vmxnet3 will have same bus, slot. But func will be 0 */
691 slot = PCI_SLOT(dev->pdev->devfn);
692 pdev_net = pci_get_slot(dev->pdev->bus,
694 if ((dev->netdev == NULL) &&
695 (pci_get_drvdata(pdev_net) == ndev)) {
696 /* this is our netdev */
697 ib_device_set_netdev(&dev->ib_dev, ndev, 1);
701 pci_dev_put(pdev_net);
705 dev_dbg(&dev->pdev->dev, "ignore netdevice event %ld on %s\n",
706 event, dev_name(&dev->ib_dev.dev));
711 static void pvrdma_netdevice_event_work(struct work_struct *work)
713 struct pvrdma_netdevice_work *netdev_work;
714 struct pvrdma_dev *dev;
716 netdev_work = container_of(work, struct pvrdma_netdevice_work, work);
718 mutex_lock(&pvrdma_device_list_lock);
719 list_for_each_entry(dev, &pvrdma_device_list, device_link) {
720 if ((netdev_work->event == NETDEV_REGISTER) ||
721 (dev->netdev == netdev_work->event_netdev)) {
722 pvrdma_netdevice_event_handle(dev,
723 netdev_work->event_netdev,
728 mutex_unlock(&pvrdma_device_list_lock);
733 static int pvrdma_netdevice_event(struct notifier_block *this,
734 unsigned long event, void *ptr)
736 struct net_device *event_netdev = netdev_notifier_info_to_dev(ptr);
737 struct pvrdma_netdevice_work *netdev_work;
739 netdev_work = kmalloc(sizeof(*netdev_work), GFP_ATOMIC);
743 INIT_WORK(&netdev_work->work, pvrdma_netdevice_event_work);
744 netdev_work->event_netdev = event_netdev;
745 netdev_work->event = event;
746 queue_work(event_wq, &netdev_work->work);
751 static int pvrdma_pci_probe(struct pci_dev *pdev,
752 const struct pci_device_id *id)
754 struct pci_dev *pdev_net;
755 struct pvrdma_dev *dev;
759 dma_addr_t slot_dma = 0;
761 dev_dbg(&pdev->dev, "initializing driver %s\n", pci_name(pdev));
763 /* Allocate zero-out device */
764 dev = ib_alloc_device(pvrdma_dev, ib_dev);
766 dev_err(&pdev->dev, "failed to allocate IB device\n");
770 mutex_lock(&pvrdma_device_list_lock);
771 list_add(&dev->device_link, &pvrdma_device_list);
772 mutex_unlock(&pvrdma_device_list_lock);
774 ret = pvrdma_init_device(dev);
776 goto err_free_device;
779 pci_set_drvdata(pdev, dev);
781 ret = pci_enable_device(pdev);
783 dev_err(&pdev->dev, "cannot enable PCI device\n");
784 goto err_free_device;
787 dev_dbg(&pdev->dev, "PCI resource flags BAR0 %#lx\n",
788 pci_resource_flags(pdev, 0));
789 dev_dbg(&pdev->dev, "PCI resource len %#llx\n",
790 (unsigned long long)pci_resource_len(pdev, 0));
791 dev_dbg(&pdev->dev, "PCI resource start %#llx\n",
792 (unsigned long long)pci_resource_start(pdev, 0));
793 dev_dbg(&pdev->dev, "PCI resource flags BAR1 %#lx\n",
794 pci_resource_flags(pdev, 1));
795 dev_dbg(&pdev->dev, "PCI resource len %#llx\n",
796 (unsigned long long)pci_resource_len(pdev, 1));
797 dev_dbg(&pdev->dev, "PCI resource start %#llx\n",
798 (unsigned long long)pci_resource_start(pdev, 1));
800 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
801 !(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
802 dev_err(&pdev->dev, "PCI BAR region not MMIO\n");
804 goto err_disable_pdev;
807 ret = pci_request_regions(pdev, DRV_NAME);
809 dev_err(&pdev->dev, "cannot request PCI resources\n");
810 goto err_disable_pdev;
813 /* Enable 64-Bit DMA */
814 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
816 dev_err(&pdev->dev, "dma_set_mask failed\n");
817 goto err_free_resource;
819 dma_set_max_seg_size(&pdev->dev, UINT_MAX);
820 pci_set_master(pdev);
822 /* Map register space */
823 start = pci_resource_start(dev->pdev, PVRDMA_PCI_RESOURCE_REG);
824 len = pci_resource_len(dev->pdev, PVRDMA_PCI_RESOURCE_REG);
825 dev->regs = ioremap(start, len);
827 dev_err(&pdev->dev, "register mapping failed\n");
829 goto err_free_resource;
832 /* Setup per-device UAR. */
833 dev->driver_uar.index = 0;
834 dev->driver_uar.pfn =
835 pci_resource_start(dev->pdev, PVRDMA_PCI_RESOURCE_UAR) >>
837 dev->driver_uar.map =
838 ioremap(dev->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
839 if (!dev->driver_uar.map) {
840 dev_err(&pdev->dev, "failed to remap UAR pages\n");
845 dev->dsr_version = pvrdma_read_reg(dev, PVRDMA_REG_VERSION);
846 dev_info(&pdev->dev, "device version %d, driver version %d\n",
847 dev->dsr_version, PVRDMA_VERSION);
849 dev->dsr = dma_alloc_coherent(&pdev->dev, sizeof(*dev->dsr),
850 &dev->dsrbase, GFP_KERNEL);
852 dev_err(&pdev->dev, "failed to allocate shared region\n");
857 /* Setup the shared region */
858 dev->dsr->driver_version = PVRDMA_VERSION;
859 dev->dsr->gos_info.gos_bits = sizeof(void *) == 4 ?
862 dev->dsr->gos_info.gos_type = PVRDMA_GOS_TYPE_LINUX;
863 dev->dsr->gos_info.gos_ver = 1;
865 if (dev->dsr_version < PVRDMA_PPN64_VERSION)
866 dev->dsr->uar_pfn = dev->driver_uar.pfn;
868 dev->dsr->uar_pfn64 = dev->driver_uar.pfn;
871 dev->cmd_slot = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
872 &slot_dma, GFP_KERNEL);
873 if (!dev->cmd_slot) {
878 dev->dsr->cmd_slot_dma = (u64)slot_dma;
881 dev->resp_slot = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
882 &slot_dma, GFP_KERNEL);
883 if (!dev->resp_slot) {
888 dev->dsr->resp_slot_dma = (u64)slot_dma;
890 /* Async event ring */
891 dev->dsr->async_ring_pages.num_pages = PVRDMA_NUM_RING_PAGES;
892 ret = pvrdma_page_dir_init(dev, &dev->async_pdir,
893 dev->dsr->async_ring_pages.num_pages, true);
896 dev->async_ring_state = dev->async_pdir.pages[0];
897 dev->dsr->async_ring_pages.pdir_dma = dev->async_pdir.dir_dma;
899 /* CQ notification ring */
900 dev->dsr->cq_ring_pages.num_pages = PVRDMA_NUM_RING_PAGES;
901 ret = pvrdma_page_dir_init(dev, &dev->cq_pdir,
902 dev->dsr->cq_ring_pages.num_pages, true);
904 goto err_free_async_ring;
905 dev->cq_ring_state = dev->cq_pdir.pages[0];
906 dev->dsr->cq_ring_pages.pdir_dma = dev->cq_pdir.dir_dma;
909 * Write the PA of the shared region to the device. The writes must be
910 * ordered such that the high bits are written last. When the writes
911 * complete, the device will have filled out the capabilities.
914 pvrdma_write_reg(dev, PVRDMA_REG_DSRLOW, (u32)dev->dsrbase);
915 pvrdma_write_reg(dev, PVRDMA_REG_DSRHIGH,
916 (u32)((u64)(dev->dsrbase) >> 32));
918 /* Make sure the write is complete before reading status. */
921 /* The driver supports RoCE V1 and V2. */
922 if (!PVRDMA_SUPPORTED(dev)) {
923 dev_err(&pdev->dev, "driver needs RoCE v1 or v2 support\n");
925 goto err_free_cq_ring;
928 /* Paired vmxnet3 will have same bus, slot. But func will be 0 */
929 pdev_net = pci_get_slot(pdev->bus, PCI_DEVFN(PCI_SLOT(pdev->devfn), 0));
931 dev_err(&pdev->dev, "failed to find paired net device\n");
933 goto err_free_cq_ring;
936 if (pdev_net->vendor != PCI_VENDOR_ID_VMWARE ||
937 pdev_net->device != PCI_DEVICE_ID_VMWARE_VMXNET3) {
938 dev_err(&pdev->dev, "failed to find paired vmxnet3 device\n");
939 pci_dev_put(pdev_net);
941 goto err_free_cq_ring;
944 dev->netdev = pci_get_drvdata(pdev_net);
945 pci_dev_put(pdev_net);
947 dev_err(&pdev->dev, "failed to get vmxnet3 device\n");
949 goto err_free_cq_ring;
951 dev_hold(dev->netdev);
953 dev_info(&pdev->dev, "paired device to %s\n", dev->netdev->name);
955 /* Interrupt setup */
956 ret = pvrdma_alloc_intrs(dev);
958 dev_err(&pdev->dev, "failed to allocate interrupts\n");
960 goto err_free_cq_ring;
963 /* Allocate UAR table. */
964 ret = pvrdma_uar_table_init(dev);
966 dev_err(&pdev->dev, "failed to allocate UAR table\n");
971 /* Allocate GID table */
972 dev->sgid_tbl = kcalloc(dev->dsr->caps.gid_tbl_len,
973 sizeof(union ib_gid), GFP_KERNEL);
974 if (!dev->sgid_tbl) {
976 goto err_free_uar_table;
978 dev_dbg(&pdev->dev, "gid table len %d\n", dev->dsr->caps.gid_tbl_len);
980 pvrdma_enable_intrs(dev);
982 /* Activate pvrdma device */
983 pvrdma_write_reg(dev, PVRDMA_REG_CTL, PVRDMA_DEVICE_CTL_ACTIVATE);
985 /* Make sure the write is complete before reading status. */
988 /* Check if device was successfully activated */
989 ret = pvrdma_read_reg(dev, PVRDMA_REG_ERR);
991 dev_err(&pdev->dev, "failed to activate device\n");
993 goto err_disable_intr;
996 /* Register IB device */
997 ret = pvrdma_register_device(dev);
999 dev_err(&pdev->dev, "failed to register IB device\n");
1000 goto err_disable_intr;
1003 dev->nb_netdev.notifier_call = pvrdma_netdevice_event;
1004 ret = register_netdevice_notifier(&dev->nb_netdev);
1006 dev_err(&pdev->dev, "failed to register netdevice events\n");
1007 goto err_unreg_ibdev;
1010 dev_info(&pdev->dev, "attached to device\n");
1014 ib_unregister_device(&dev->ib_dev);
1016 pvrdma_disable_intrs(dev);
1017 kfree(dev->sgid_tbl);
1019 pvrdma_uar_table_cleanup(dev);
1021 pvrdma_free_irq(dev);
1022 pci_free_irq_vectors(pdev);
1024 dev_put(dev->netdev);
1026 pvrdma_page_dir_cleanup(dev, &dev->cq_pdir);
1027 err_free_async_ring:
1028 pvrdma_page_dir_cleanup(dev, &dev->async_pdir);
1030 pvrdma_free_slots(dev);
1032 dma_free_coherent(&pdev->dev, sizeof(*dev->dsr), dev->dsr,
1035 iounmap(dev->driver_uar.map);
1039 pci_release_regions(pdev);
1041 pci_disable_device(pdev);
1042 pci_set_drvdata(pdev, NULL);
1044 mutex_lock(&pvrdma_device_list_lock);
1045 list_del(&dev->device_link);
1046 mutex_unlock(&pvrdma_device_list_lock);
1047 ib_dealloc_device(&dev->ib_dev);
1051 static void pvrdma_pci_remove(struct pci_dev *pdev)
1053 struct pvrdma_dev *dev = pci_get_drvdata(pdev);
1058 dev_info(&pdev->dev, "detaching from device\n");
1060 unregister_netdevice_notifier(&dev->nb_netdev);
1061 dev->nb_netdev.notifier_call = NULL;
1063 flush_workqueue(event_wq);
1065 dev_put(dev->netdev);
1068 /* Unregister ib device */
1069 ib_unregister_device(&dev->ib_dev);
1071 mutex_lock(&pvrdma_device_list_lock);
1072 list_del(&dev->device_link);
1073 mutex_unlock(&pvrdma_device_list_lock);
1075 pvrdma_disable_intrs(dev);
1076 pvrdma_free_irq(dev);
1077 pci_free_irq_vectors(pdev);
1079 /* Deactivate pvrdma device */
1080 pvrdma_write_reg(dev, PVRDMA_REG_CTL, PVRDMA_DEVICE_CTL_RESET);
1081 pvrdma_page_dir_cleanup(dev, &dev->cq_pdir);
1082 pvrdma_page_dir_cleanup(dev, &dev->async_pdir);
1083 pvrdma_free_slots(dev);
1084 dma_free_coherent(&pdev->dev, sizeof(*dev->dsr), dev->dsr,
1088 kfree(dev->sgid_tbl);
1090 kfree(dev->srq_tbl);
1092 pvrdma_uar_table_cleanup(dev);
1093 iounmap(dev->driver_uar.map);
1095 ib_dealloc_device(&dev->ib_dev);
1097 /* Free pci resources */
1098 pci_release_regions(pdev);
1099 pci_disable_device(pdev);
1100 pci_set_drvdata(pdev, NULL);
1103 static const struct pci_device_id pvrdma_pci_table[] = {
1104 { PCI_DEVICE(PCI_VENDOR_ID_VMWARE, PCI_DEVICE_ID_VMWARE_PVRDMA), },
1108 MODULE_DEVICE_TABLE(pci, pvrdma_pci_table);
1110 static struct pci_driver pvrdma_driver = {
1112 .id_table = pvrdma_pci_table,
1113 .probe = pvrdma_pci_probe,
1114 .remove = pvrdma_pci_remove,
1117 static int __init pvrdma_init(void)
1121 event_wq = alloc_ordered_workqueue("pvrdma_event_wq", WQ_MEM_RECLAIM);
1125 err = pci_register_driver(&pvrdma_driver);
1127 destroy_workqueue(event_wq);
1132 static void __exit pvrdma_cleanup(void)
1134 pci_unregister_driver(&pvrdma_driver);
1136 destroy_workqueue(event_wq);
1139 module_init(pvrdma_init);
1140 module_exit(pvrdma_cleanup);
1142 MODULE_AUTHOR("VMware, Inc");
1143 MODULE_DESCRIPTION("VMware Paravirtual RDMA driver");
1144 MODULE_LICENSE("Dual BSD/GPL");