2 * Copyright (c) 2012-2016 VMware, Inc. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of EITHER the GNU General Public License
6 * version 2 as published by the Free Software Foundation or the BSD
7 * 2-Clause License. This program is distributed in the hope that it
8 * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
9 * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
10 * See the GNU General Public License version 2 for more details at
11 * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
13 * You should have received a copy of the GNU General Public License
14 * along with this program available in the file COPYING in the main
15 * directory of this source tree.
17 * The BSD 2-Clause License
19 * Redistribution and use in source and binary forms, with or
20 * without modification, are permitted provided that the following
23 * - Redistributions of source code must retain the above
24 * copyright notice, this list of conditions and the following
27 * - Redistributions in binary form must reproduce the above
28 * copyright notice, this list of conditions and the following
29 * disclaimer in the documentation and/or other materials
30 * provided with the distribution.
32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
33 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
34 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
36 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
37 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
38 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
39 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
41 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
43 * OF THE POSSIBILITY OF SUCH DAMAGE.
46 #include <linux/errno.h>
47 #include <linux/inetdevice.h>
48 #include <linux/init.h>
49 #include <linux/module.h>
50 #include <linux/slab.h>
51 #include <rdma/ib_addr.h>
52 #include <rdma/ib_smi.h>
53 #include <rdma/ib_user_verbs.h>
54 #include <net/addrconf.h>
58 #define DRV_NAME "vmw_pvrdma"
59 #define DRV_VERSION "1.0.1.0-k"
61 static DEFINE_MUTEX(pvrdma_device_list_lock);
62 static LIST_HEAD(pvrdma_device_list);
63 static struct workqueue_struct *event_wq;
65 static int pvrdma_add_gid(const struct ib_gid_attr *attr, void **context);
66 static int pvrdma_del_gid(const struct ib_gid_attr *attr, void **context);
68 static ssize_t show_hca(struct device *device, struct device_attribute *attr,
71 return sprintf(buf, "VMW_PVRDMA-%s\n", DRV_VERSION);
74 static ssize_t show_rev(struct device *device, struct device_attribute *attr,
77 return sprintf(buf, "%d\n", PVRDMA_REV_ID);
80 static ssize_t show_board(struct device *device, struct device_attribute *attr,
83 return sprintf(buf, "%d\n", PVRDMA_BOARD_ID);
86 static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
87 static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
88 static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
90 static struct device_attribute *pvrdma_class_attributes[] = {
96 static void pvrdma_get_fw_ver_str(struct ib_device *device, char *str)
98 struct pvrdma_dev *dev =
99 container_of(device, struct pvrdma_dev, ib_dev);
100 snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d\n",
101 (int) (dev->dsr->caps.fw_ver >> 32),
102 (int) (dev->dsr->caps.fw_ver >> 16) & 0xffff,
103 (int) dev->dsr->caps.fw_ver & 0xffff);
106 static int pvrdma_init_device(struct pvrdma_dev *dev)
108 /* Initialize some device related stuff */
109 spin_lock_init(&dev->cmd_lock);
110 sema_init(&dev->cmd_sema, 1);
111 atomic_set(&dev->num_qps, 0);
112 atomic_set(&dev->num_srqs, 0);
113 atomic_set(&dev->num_cqs, 0);
114 atomic_set(&dev->num_pds, 0);
115 atomic_set(&dev->num_ahs, 0);
120 static int pvrdma_port_immutable(struct ib_device *ibdev, u8 port_num,
121 struct ib_port_immutable *immutable)
123 struct pvrdma_dev *dev = to_vdev(ibdev);
124 struct ib_port_attr attr;
127 if (dev->dsr->caps.gid_types == PVRDMA_GID_TYPE_FLAG_ROCE_V1)
128 immutable->core_cap_flags |= RDMA_CORE_PORT_IBA_ROCE;
129 else if (dev->dsr->caps.gid_types == PVRDMA_GID_TYPE_FLAG_ROCE_V2)
130 immutable->core_cap_flags |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
132 err = ib_query_port(ibdev, port_num, &attr);
136 immutable->pkey_tbl_len = attr.pkey_tbl_len;
137 immutable->gid_tbl_len = attr.gid_tbl_len;
138 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
142 static struct net_device *pvrdma_get_netdev(struct ib_device *ibdev,
145 struct net_device *netdev;
146 struct pvrdma_dev *dev = to_vdev(ibdev);
152 netdev = dev->netdev;
160 static int pvrdma_register_device(struct pvrdma_dev *dev)
165 strlcpy(dev->ib_dev.name, "vmw_pvrdma%d", IB_DEVICE_NAME_MAX);
166 dev->ib_dev.node_guid = dev->dsr->caps.node_guid;
167 dev->sys_image_guid = dev->dsr->caps.sys_image_guid;
169 dev->ib_dev.owner = THIS_MODULE;
170 dev->ib_dev.num_comp_vectors = 1;
171 dev->ib_dev.dev.parent = &dev->pdev->dev;
172 dev->ib_dev.uverbs_abi_ver = PVRDMA_UVERBS_ABI_VERSION;
173 dev->ib_dev.uverbs_cmd_mask =
174 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
175 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
176 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
177 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
178 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
179 (1ull << IB_USER_VERBS_CMD_REG_MR) |
180 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
181 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
182 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
183 (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
184 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
185 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
186 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
187 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
188 (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
189 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
190 (1ull << IB_USER_VERBS_CMD_POST_SEND) |
191 (1ull << IB_USER_VERBS_CMD_POST_RECV) |
192 (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
193 (1ull << IB_USER_VERBS_CMD_DESTROY_AH);
195 dev->ib_dev.node_type = RDMA_NODE_IB_CA;
196 dev->ib_dev.phys_port_cnt = dev->dsr->caps.phys_port_cnt;
198 dev->ib_dev.query_device = pvrdma_query_device;
199 dev->ib_dev.query_port = pvrdma_query_port;
200 dev->ib_dev.query_gid = pvrdma_query_gid;
201 dev->ib_dev.query_pkey = pvrdma_query_pkey;
202 dev->ib_dev.modify_port = pvrdma_modify_port;
203 dev->ib_dev.alloc_ucontext = pvrdma_alloc_ucontext;
204 dev->ib_dev.dealloc_ucontext = pvrdma_dealloc_ucontext;
205 dev->ib_dev.mmap = pvrdma_mmap;
206 dev->ib_dev.alloc_pd = pvrdma_alloc_pd;
207 dev->ib_dev.dealloc_pd = pvrdma_dealloc_pd;
208 dev->ib_dev.create_ah = pvrdma_create_ah;
209 dev->ib_dev.destroy_ah = pvrdma_destroy_ah;
210 dev->ib_dev.create_qp = pvrdma_create_qp;
211 dev->ib_dev.modify_qp = pvrdma_modify_qp;
212 dev->ib_dev.query_qp = pvrdma_query_qp;
213 dev->ib_dev.destroy_qp = pvrdma_destroy_qp;
214 dev->ib_dev.post_send = pvrdma_post_send;
215 dev->ib_dev.post_recv = pvrdma_post_recv;
216 dev->ib_dev.create_cq = pvrdma_create_cq;
217 dev->ib_dev.destroy_cq = pvrdma_destroy_cq;
218 dev->ib_dev.poll_cq = pvrdma_poll_cq;
219 dev->ib_dev.req_notify_cq = pvrdma_req_notify_cq;
220 dev->ib_dev.get_dma_mr = pvrdma_get_dma_mr;
221 dev->ib_dev.reg_user_mr = pvrdma_reg_user_mr;
222 dev->ib_dev.dereg_mr = pvrdma_dereg_mr;
223 dev->ib_dev.alloc_mr = pvrdma_alloc_mr;
224 dev->ib_dev.map_mr_sg = pvrdma_map_mr_sg;
225 dev->ib_dev.add_gid = pvrdma_add_gid;
226 dev->ib_dev.del_gid = pvrdma_del_gid;
227 dev->ib_dev.get_netdev = pvrdma_get_netdev;
228 dev->ib_dev.get_port_immutable = pvrdma_port_immutable;
229 dev->ib_dev.get_link_layer = pvrdma_port_link_layer;
230 dev->ib_dev.get_dev_fw_str = pvrdma_get_fw_ver_str;
232 mutex_init(&dev->port_mutex);
233 spin_lock_init(&dev->desc_lock);
235 dev->cq_tbl = kcalloc(dev->dsr->caps.max_cq, sizeof(struct pvrdma_cq *),
239 spin_lock_init(&dev->cq_tbl_lock);
241 dev->qp_tbl = kcalloc(dev->dsr->caps.max_qp, sizeof(struct pvrdma_qp *),
245 spin_lock_init(&dev->qp_tbl_lock);
247 /* Check if SRQ is supported by backend */
248 if (dev->dsr->caps.max_srq) {
249 dev->ib_dev.uverbs_cmd_mask |=
250 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
251 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
252 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
253 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
254 (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV);
256 dev->ib_dev.create_srq = pvrdma_create_srq;
257 dev->ib_dev.modify_srq = pvrdma_modify_srq;
258 dev->ib_dev.query_srq = pvrdma_query_srq;
259 dev->ib_dev.destroy_srq = pvrdma_destroy_srq;
261 dev->srq_tbl = kcalloc(dev->dsr->caps.max_srq,
262 sizeof(struct pvrdma_srq *),
267 dev->ib_dev.driver_id = RDMA_DRIVER_VMW_PVRDMA;
268 spin_lock_init(&dev->srq_tbl_lock);
270 ret = ib_register_device(&dev->ib_dev, NULL);
274 for (i = 0; i < ARRAY_SIZE(pvrdma_class_attributes); ++i) {
275 ret = device_create_file(&dev->ib_dev.dev,
276 pvrdma_class_attributes[i]);
281 dev->ib_active = true;
286 ib_unregister_device(&dev->ib_dev);
297 static irqreturn_t pvrdma_intr0_handler(int irq, void *dev_id)
299 u32 icr = PVRDMA_INTR_CAUSE_RESPONSE;
300 struct pvrdma_dev *dev = dev_id;
302 dev_dbg(&dev->pdev->dev, "interrupt 0 (response) handler\n");
304 if (!dev->pdev->msix_enabled) {
306 icr = pvrdma_read_reg(dev, PVRDMA_REG_ICR);
311 if (icr == PVRDMA_INTR_CAUSE_RESPONSE)
312 complete(&dev->cmd_done);
317 static void pvrdma_qp_event(struct pvrdma_dev *dev, u32 qpn, int type)
319 struct pvrdma_qp *qp;
322 spin_lock_irqsave(&dev->qp_tbl_lock, flags);
323 qp = dev->qp_tbl[qpn % dev->dsr->caps.max_qp];
325 refcount_inc(&qp->refcnt);
326 spin_unlock_irqrestore(&dev->qp_tbl_lock, flags);
328 if (qp && qp->ibqp.event_handler) {
329 struct ib_qp *ibqp = &qp->ibqp;
332 e.device = ibqp->device;
334 e.event = type; /* 1:1 mapping for now. */
335 ibqp->event_handler(&e, ibqp->qp_context);
338 if (refcount_dec_and_test(&qp->refcnt))
343 static void pvrdma_cq_event(struct pvrdma_dev *dev, u32 cqn, int type)
345 struct pvrdma_cq *cq;
348 spin_lock_irqsave(&dev->cq_tbl_lock, flags);
349 cq = dev->cq_tbl[cqn % dev->dsr->caps.max_cq];
351 refcount_inc(&cq->refcnt);
352 spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
354 if (cq && cq->ibcq.event_handler) {
355 struct ib_cq *ibcq = &cq->ibcq;
358 e.device = ibcq->device;
360 e.event = type; /* 1:1 mapping for now. */
361 ibcq->event_handler(&e, ibcq->cq_context);
364 if (refcount_dec_and_test(&cq->refcnt))
369 static void pvrdma_srq_event(struct pvrdma_dev *dev, u32 srqn, int type)
371 struct pvrdma_srq *srq;
374 spin_lock_irqsave(&dev->srq_tbl_lock, flags);
376 srq = dev->srq_tbl[srqn % dev->dsr->caps.max_srq];
380 refcount_inc(&srq->refcnt);
381 spin_unlock_irqrestore(&dev->srq_tbl_lock, flags);
383 if (srq && srq->ibsrq.event_handler) {
384 struct ib_srq *ibsrq = &srq->ibsrq;
387 e.device = ibsrq->device;
388 e.element.srq = ibsrq;
389 e.event = type; /* 1:1 mapping for now. */
390 ibsrq->event_handler(&e, ibsrq->srq_context);
393 if (refcount_dec_and_test(&srq->refcnt))
394 complete(&srq->free);
398 static void pvrdma_dispatch_event(struct pvrdma_dev *dev, int port,
399 enum ib_event_type event)
401 struct ib_event ib_event;
403 memset(&ib_event, 0, sizeof(ib_event));
404 ib_event.device = &dev->ib_dev;
405 ib_event.element.port_num = port;
406 ib_event.event = event;
407 ib_dispatch_event(&ib_event);
410 static void pvrdma_dev_event(struct pvrdma_dev *dev, u8 port, int type)
412 if (port < 1 || port > dev->dsr->caps.phys_port_cnt) {
413 dev_warn(&dev->pdev->dev, "event on port %d\n", port);
417 pvrdma_dispatch_event(dev, port, type);
420 static inline struct pvrdma_eqe *get_eqe(struct pvrdma_dev *dev, unsigned int i)
422 return (struct pvrdma_eqe *)pvrdma_page_dir_get_ptr(
425 sizeof(struct pvrdma_eqe) * i);
428 static irqreturn_t pvrdma_intr1_handler(int irq, void *dev_id)
430 struct pvrdma_dev *dev = dev_id;
431 struct pvrdma_ring *ring = &dev->async_ring_state->rx;
432 int ring_slots = (dev->dsr->async_ring_pages.num_pages - 1) *
433 PAGE_SIZE / sizeof(struct pvrdma_eqe);
436 dev_dbg(&dev->pdev->dev, "interrupt 1 (async event) handler\n");
439 * Don't process events until the IB device is registered. Otherwise
440 * we'll try to ib_dispatch_event() on an invalid device.
445 while (pvrdma_idx_ring_has_data(ring, ring_slots, &head) > 0) {
446 struct pvrdma_eqe *eqe;
448 eqe = get_eqe(dev, head);
451 case PVRDMA_EVENT_QP_FATAL:
452 case PVRDMA_EVENT_QP_REQ_ERR:
453 case PVRDMA_EVENT_QP_ACCESS_ERR:
454 case PVRDMA_EVENT_COMM_EST:
455 case PVRDMA_EVENT_SQ_DRAINED:
456 case PVRDMA_EVENT_PATH_MIG:
457 case PVRDMA_EVENT_PATH_MIG_ERR:
458 case PVRDMA_EVENT_QP_LAST_WQE_REACHED:
459 pvrdma_qp_event(dev, eqe->info, eqe->type);
462 case PVRDMA_EVENT_CQ_ERR:
463 pvrdma_cq_event(dev, eqe->info, eqe->type);
466 case PVRDMA_EVENT_SRQ_ERR:
467 case PVRDMA_EVENT_SRQ_LIMIT_REACHED:
468 pvrdma_srq_event(dev, eqe->info, eqe->type);
471 case PVRDMA_EVENT_PORT_ACTIVE:
472 case PVRDMA_EVENT_PORT_ERR:
473 case PVRDMA_EVENT_LID_CHANGE:
474 case PVRDMA_EVENT_PKEY_CHANGE:
475 case PVRDMA_EVENT_SM_CHANGE:
476 case PVRDMA_EVENT_CLIENT_REREGISTER:
477 case PVRDMA_EVENT_GID_CHANGE:
478 pvrdma_dev_event(dev, eqe->info, eqe->type);
481 case PVRDMA_EVENT_DEVICE_FATAL:
482 pvrdma_dev_event(dev, 1, eqe->type);
489 pvrdma_idx_ring_inc(&ring->cons_head, ring_slots);
495 static inline struct pvrdma_cqne *get_cqne(struct pvrdma_dev *dev,
498 return (struct pvrdma_cqne *)pvrdma_page_dir_get_ptr(
501 sizeof(struct pvrdma_cqne) * i);
504 static irqreturn_t pvrdma_intrx_handler(int irq, void *dev_id)
506 struct pvrdma_dev *dev = dev_id;
507 struct pvrdma_ring *ring = &dev->cq_ring_state->rx;
508 int ring_slots = (dev->dsr->cq_ring_pages.num_pages - 1) * PAGE_SIZE /
509 sizeof(struct pvrdma_cqne);
513 dev_dbg(&dev->pdev->dev, "interrupt x (completion) handler\n");
515 while (pvrdma_idx_ring_has_data(ring, ring_slots, &head) > 0) {
516 struct pvrdma_cqne *cqne;
517 struct pvrdma_cq *cq;
519 cqne = get_cqne(dev, head);
520 spin_lock_irqsave(&dev->cq_tbl_lock, flags);
521 cq = dev->cq_tbl[cqne->info % dev->dsr->caps.max_cq];
523 refcount_inc(&cq->refcnt);
524 spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
526 if (cq && cq->ibcq.comp_handler)
527 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
529 if (refcount_dec_and_test(&cq->refcnt))
532 pvrdma_idx_ring_inc(&ring->cons_head, ring_slots);
538 static void pvrdma_free_irq(struct pvrdma_dev *dev)
542 dev_dbg(&dev->pdev->dev, "freeing interrupts\n");
543 for (i = 0; i < dev->nr_vectors; i++)
544 free_irq(pci_irq_vector(dev->pdev, i), dev);
547 static void pvrdma_enable_intrs(struct pvrdma_dev *dev)
549 dev_dbg(&dev->pdev->dev, "enable interrupts\n");
550 pvrdma_write_reg(dev, PVRDMA_REG_IMR, 0);
553 static void pvrdma_disable_intrs(struct pvrdma_dev *dev)
555 dev_dbg(&dev->pdev->dev, "disable interrupts\n");
556 pvrdma_write_reg(dev, PVRDMA_REG_IMR, ~0);
559 static int pvrdma_alloc_intrs(struct pvrdma_dev *dev)
561 struct pci_dev *pdev = dev->pdev;
564 ret = pci_alloc_irq_vectors(pdev, 1, PVRDMA_MAX_INTERRUPTS,
567 ret = pci_alloc_irq_vectors(pdev, 1, 1,
568 PCI_IRQ_MSI | PCI_IRQ_LEGACY);
572 dev->nr_vectors = ret;
574 ret = request_irq(pci_irq_vector(dev->pdev, 0), pvrdma_intr0_handler,
575 pdev->msix_enabled ? 0 : IRQF_SHARED, DRV_NAME, dev);
577 dev_err(&dev->pdev->dev,
578 "failed to request interrupt 0\n");
579 goto out_free_vectors;
582 for (i = 1; i < dev->nr_vectors; i++) {
583 ret = request_irq(pci_irq_vector(dev->pdev, i),
584 i == 1 ? pvrdma_intr1_handler :
585 pvrdma_intrx_handler,
588 dev_err(&dev->pdev->dev,
589 "failed to request interrupt %d\n", i);
598 free_irq(pci_irq_vector(dev->pdev, i), dev);
600 pci_free_irq_vectors(pdev);
604 static void pvrdma_free_slots(struct pvrdma_dev *dev)
606 struct pci_dev *pdev = dev->pdev;
609 dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->resp_slot,
610 dev->dsr->resp_slot_dma);
612 dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->cmd_slot,
613 dev->dsr->cmd_slot_dma);
616 static int pvrdma_add_gid_at_index(struct pvrdma_dev *dev,
617 const union ib_gid *gid,
622 union pvrdma_cmd_req req;
623 struct pvrdma_cmd_create_bind *cmd_bind = &req.create_bind;
625 if (!dev->sgid_tbl) {
626 dev_warn(&dev->pdev->dev, "sgid table not initialized\n");
630 memset(cmd_bind, 0, sizeof(*cmd_bind));
631 cmd_bind->hdr.cmd = PVRDMA_CMD_CREATE_BIND;
632 memcpy(cmd_bind->new_gid, gid->raw, 16);
633 cmd_bind->mtu = ib_mtu_enum_to_int(IB_MTU_1024);
634 cmd_bind->vlan = 0xfff;
635 cmd_bind->index = index;
636 cmd_bind->gid_type = gid_type;
638 ret = pvrdma_cmd_post(dev, &req, NULL, 0);
640 dev_warn(&dev->pdev->dev,
641 "could not create binding, error: %d\n", ret);
644 memcpy(&dev->sgid_tbl[index], gid, sizeof(*gid));
648 static int pvrdma_add_gid(const struct ib_gid_attr *attr, void **context)
650 struct pvrdma_dev *dev = to_vdev(attr->device);
652 return pvrdma_add_gid_at_index(dev, &attr->gid,
653 ib_gid_type_to_pvrdma(attr->gid_type),
657 static int pvrdma_del_gid_at_index(struct pvrdma_dev *dev, int index)
660 union pvrdma_cmd_req req;
661 struct pvrdma_cmd_destroy_bind *cmd_dest = &req.destroy_bind;
663 /* Update sgid table. */
664 if (!dev->sgid_tbl) {
665 dev_warn(&dev->pdev->dev, "sgid table not initialized\n");
669 memset(cmd_dest, 0, sizeof(*cmd_dest));
670 cmd_dest->hdr.cmd = PVRDMA_CMD_DESTROY_BIND;
671 memcpy(cmd_dest->dest_gid, &dev->sgid_tbl[index], 16);
672 cmd_dest->index = index;
674 ret = pvrdma_cmd_post(dev, &req, NULL, 0);
676 dev_warn(&dev->pdev->dev,
677 "could not destroy binding, error: %d\n", ret);
680 memset(&dev->sgid_tbl[index], 0, 16);
684 static int pvrdma_del_gid(const struct ib_gid_attr *attr, void **context)
686 struct pvrdma_dev *dev = to_vdev(attr->device);
688 dev_dbg(&dev->pdev->dev, "removing gid at index %u from %s",
689 attr->index, dev->netdev->name);
691 return pvrdma_del_gid_at_index(dev, attr->index);
694 static void pvrdma_netdevice_event_handle(struct pvrdma_dev *dev,
695 struct net_device *ndev,
698 struct pci_dev *pdev_net;
704 pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ERR);
707 pvrdma_write_reg(dev, PVRDMA_REG_CTL,
708 PVRDMA_DEVICE_CTL_UNQUIESCE);
712 if (pvrdma_read_reg(dev, PVRDMA_REG_ERR))
713 dev_err(&dev->pdev->dev,
714 "failed to activate device during link up\n");
716 pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE);
718 case NETDEV_UNREGISTER:
719 dev_put(dev->netdev);
722 case NETDEV_REGISTER:
723 /* vmxnet3 will have same bus, slot. But func will be 0 */
724 slot = PCI_SLOT(dev->pdev->devfn);
725 pdev_net = pci_get_slot(dev->pdev->bus,
727 if ((dev->netdev == NULL) &&
728 (pci_get_drvdata(pdev_net) == ndev)) {
729 /* this is our netdev */
733 pci_dev_put(pdev_net);
737 dev_dbg(&dev->pdev->dev, "ignore netdevice event %ld on %s\n",
738 event, dev->ib_dev.name);
743 static void pvrdma_netdevice_event_work(struct work_struct *work)
745 struct pvrdma_netdevice_work *netdev_work;
746 struct pvrdma_dev *dev;
748 netdev_work = container_of(work, struct pvrdma_netdevice_work, work);
750 mutex_lock(&pvrdma_device_list_lock);
751 list_for_each_entry(dev, &pvrdma_device_list, device_link) {
752 if ((netdev_work->event == NETDEV_REGISTER) ||
753 (dev->netdev == netdev_work->event_netdev)) {
754 pvrdma_netdevice_event_handle(dev,
755 netdev_work->event_netdev,
760 mutex_unlock(&pvrdma_device_list_lock);
765 static int pvrdma_netdevice_event(struct notifier_block *this,
766 unsigned long event, void *ptr)
768 struct net_device *event_netdev = netdev_notifier_info_to_dev(ptr);
769 struct pvrdma_netdevice_work *netdev_work;
771 netdev_work = kmalloc(sizeof(*netdev_work), GFP_ATOMIC);
775 INIT_WORK(&netdev_work->work, pvrdma_netdevice_event_work);
776 netdev_work->event_netdev = event_netdev;
777 netdev_work->event = event;
778 queue_work(event_wq, &netdev_work->work);
783 static int pvrdma_pci_probe(struct pci_dev *pdev,
784 const struct pci_device_id *id)
786 struct pci_dev *pdev_net;
787 struct pvrdma_dev *dev;
791 dma_addr_t slot_dma = 0;
793 dev_dbg(&pdev->dev, "initializing driver %s\n", pci_name(pdev));
795 /* Allocate zero-out device */
796 dev = (struct pvrdma_dev *)ib_alloc_device(sizeof(*dev));
798 dev_err(&pdev->dev, "failed to allocate IB device\n");
802 mutex_lock(&pvrdma_device_list_lock);
803 list_add(&dev->device_link, &pvrdma_device_list);
804 mutex_unlock(&pvrdma_device_list_lock);
806 ret = pvrdma_init_device(dev);
808 goto err_free_device;
811 pci_set_drvdata(pdev, dev);
813 ret = pci_enable_device(pdev);
815 dev_err(&pdev->dev, "cannot enable PCI device\n");
816 goto err_free_device;
819 dev_dbg(&pdev->dev, "PCI resource flags BAR0 %#lx\n",
820 pci_resource_flags(pdev, 0));
821 dev_dbg(&pdev->dev, "PCI resource len %#llx\n",
822 (unsigned long long)pci_resource_len(pdev, 0));
823 dev_dbg(&pdev->dev, "PCI resource start %#llx\n",
824 (unsigned long long)pci_resource_start(pdev, 0));
825 dev_dbg(&pdev->dev, "PCI resource flags BAR1 %#lx\n",
826 pci_resource_flags(pdev, 1));
827 dev_dbg(&pdev->dev, "PCI resource len %#llx\n",
828 (unsigned long long)pci_resource_len(pdev, 1));
829 dev_dbg(&pdev->dev, "PCI resource start %#llx\n",
830 (unsigned long long)pci_resource_start(pdev, 1));
832 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
833 !(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
834 dev_err(&pdev->dev, "PCI BAR region not MMIO\n");
836 goto err_disable_pdev;
839 ret = pci_request_regions(pdev, DRV_NAME);
841 dev_err(&pdev->dev, "cannot request PCI resources\n");
842 goto err_disable_pdev;
845 /* Enable 64-Bit DMA */
846 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
847 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
850 "pci_set_consistent_dma_mask failed\n");
851 goto err_free_resource;
854 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
857 "pci_set_dma_mask failed\n");
858 goto err_free_resource;
862 pci_set_master(pdev);
864 /* Map register space */
865 start = pci_resource_start(dev->pdev, PVRDMA_PCI_RESOURCE_REG);
866 len = pci_resource_len(dev->pdev, PVRDMA_PCI_RESOURCE_REG);
867 dev->regs = ioremap(start, len);
869 dev_err(&pdev->dev, "register mapping failed\n");
871 goto err_free_resource;
874 /* Setup per-device UAR. */
875 dev->driver_uar.index = 0;
876 dev->driver_uar.pfn =
877 pci_resource_start(dev->pdev, PVRDMA_PCI_RESOURCE_UAR) >>
879 dev->driver_uar.map =
880 ioremap(dev->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
881 if (!dev->driver_uar.map) {
882 dev_err(&pdev->dev, "failed to remap UAR pages\n");
887 dev->dsr_version = pvrdma_read_reg(dev, PVRDMA_REG_VERSION);
888 dev_info(&pdev->dev, "device version %d, driver version %d\n",
889 dev->dsr_version, PVRDMA_VERSION);
891 dev->dsr = dma_zalloc_coherent(&pdev->dev, sizeof(*dev->dsr),
892 &dev->dsrbase, GFP_KERNEL);
894 dev_err(&pdev->dev, "failed to allocate shared region\n");
899 /* Setup the shared region */
900 dev->dsr->driver_version = PVRDMA_VERSION;
901 dev->dsr->gos_info.gos_bits = sizeof(void *) == 4 ?
904 dev->dsr->gos_info.gos_type = PVRDMA_GOS_TYPE_LINUX;
905 dev->dsr->gos_info.gos_ver = 1;
906 dev->dsr->uar_pfn = dev->driver_uar.pfn;
909 dev->cmd_slot = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
910 &slot_dma, GFP_KERNEL);
911 if (!dev->cmd_slot) {
916 dev->dsr->cmd_slot_dma = (u64)slot_dma;
919 dev->resp_slot = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
920 &slot_dma, GFP_KERNEL);
921 if (!dev->resp_slot) {
926 dev->dsr->resp_slot_dma = (u64)slot_dma;
928 /* Async event ring */
929 dev->dsr->async_ring_pages.num_pages = PVRDMA_NUM_RING_PAGES;
930 ret = pvrdma_page_dir_init(dev, &dev->async_pdir,
931 dev->dsr->async_ring_pages.num_pages, true);
934 dev->async_ring_state = dev->async_pdir.pages[0];
935 dev->dsr->async_ring_pages.pdir_dma = dev->async_pdir.dir_dma;
937 /* CQ notification ring */
938 dev->dsr->cq_ring_pages.num_pages = PVRDMA_NUM_RING_PAGES;
939 ret = pvrdma_page_dir_init(dev, &dev->cq_pdir,
940 dev->dsr->cq_ring_pages.num_pages, true);
942 goto err_free_async_ring;
943 dev->cq_ring_state = dev->cq_pdir.pages[0];
944 dev->dsr->cq_ring_pages.pdir_dma = dev->cq_pdir.dir_dma;
947 * Write the PA of the shared region to the device. The writes must be
948 * ordered such that the high bits are written last. When the writes
949 * complete, the device will have filled out the capabilities.
952 pvrdma_write_reg(dev, PVRDMA_REG_DSRLOW, (u32)dev->dsrbase);
953 pvrdma_write_reg(dev, PVRDMA_REG_DSRHIGH,
954 (u32)((u64)(dev->dsrbase) >> 32));
956 /* Make sure the write is complete before reading status. */
959 /* The driver supports RoCE V1 and V2. */
960 if (!PVRDMA_SUPPORTED(dev)) {
961 dev_err(&pdev->dev, "driver needs RoCE v1 or v2 support\n");
963 goto err_free_cq_ring;
966 /* Paired vmxnet3 will have same bus, slot. But func will be 0 */
967 pdev_net = pci_get_slot(pdev->bus, PCI_DEVFN(PCI_SLOT(pdev->devfn), 0));
969 dev_err(&pdev->dev, "failed to find paired net device\n");
971 goto err_free_cq_ring;
974 if (pdev_net->vendor != PCI_VENDOR_ID_VMWARE ||
975 pdev_net->device != PCI_DEVICE_ID_VMWARE_VMXNET3) {
976 dev_err(&pdev->dev, "failed to find paired vmxnet3 device\n");
977 pci_dev_put(pdev_net);
979 goto err_free_cq_ring;
982 dev->netdev = pci_get_drvdata(pdev_net);
983 pci_dev_put(pdev_net);
985 dev_err(&pdev->dev, "failed to get vmxnet3 device\n");
987 goto err_free_cq_ring;
989 dev_hold(dev->netdev);
991 dev_info(&pdev->dev, "paired device to %s\n", dev->netdev->name);
993 /* Interrupt setup */
994 ret = pvrdma_alloc_intrs(dev);
996 dev_err(&pdev->dev, "failed to allocate interrupts\n");
998 goto err_free_cq_ring;
1001 /* Allocate UAR table. */
1002 ret = pvrdma_uar_table_init(dev);
1004 dev_err(&pdev->dev, "failed to allocate UAR table\n");
1006 goto err_free_intrs;
1009 /* Allocate GID table */
1010 dev->sgid_tbl = kcalloc(dev->dsr->caps.gid_tbl_len,
1011 sizeof(union ib_gid), GFP_KERNEL);
1012 if (!dev->sgid_tbl) {
1014 goto err_free_uar_table;
1016 dev_dbg(&pdev->dev, "gid table len %d\n", dev->dsr->caps.gid_tbl_len);
1018 pvrdma_enable_intrs(dev);
1020 /* Activate pvrdma device */
1021 pvrdma_write_reg(dev, PVRDMA_REG_CTL, PVRDMA_DEVICE_CTL_ACTIVATE);
1023 /* Make sure the write is complete before reading status. */
1026 /* Check if device was successfully activated */
1027 ret = pvrdma_read_reg(dev, PVRDMA_REG_ERR);
1029 dev_err(&pdev->dev, "failed to activate device\n");
1031 goto err_disable_intr;
1034 /* Register IB device */
1035 ret = pvrdma_register_device(dev);
1037 dev_err(&pdev->dev, "failed to register IB device\n");
1038 goto err_disable_intr;
1041 dev->nb_netdev.notifier_call = pvrdma_netdevice_event;
1042 ret = register_netdevice_notifier(&dev->nb_netdev);
1044 dev_err(&pdev->dev, "failed to register netdevice events\n");
1045 goto err_unreg_ibdev;
1048 dev_info(&pdev->dev, "attached to device\n");
1052 ib_unregister_device(&dev->ib_dev);
1054 pvrdma_disable_intrs(dev);
1055 kfree(dev->sgid_tbl);
1057 pvrdma_uar_table_cleanup(dev);
1059 pvrdma_free_irq(dev);
1060 pci_free_irq_vectors(pdev);
1063 dev_put(dev->netdev);
1066 pvrdma_page_dir_cleanup(dev, &dev->cq_pdir);
1067 err_free_async_ring:
1068 pvrdma_page_dir_cleanup(dev, &dev->async_pdir);
1070 pvrdma_free_slots(dev);
1072 dma_free_coherent(&pdev->dev, sizeof(*dev->dsr), dev->dsr,
1075 iounmap(dev->driver_uar.map);
1079 pci_release_regions(pdev);
1081 pci_disable_device(pdev);
1082 pci_set_drvdata(pdev, NULL);
1084 mutex_lock(&pvrdma_device_list_lock);
1085 list_del(&dev->device_link);
1086 mutex_unlock(&pvrdma_device_list_lock);
1087 ib_dealloc_device(&dev->ib_dev);
1091 static void pvrdma_pci_remove(struct pci_dev *pdev)
1093 struct pvrdma_dev *dev = pci_get_drvdata(pdev);
1098 dev_info(&pdev->dev, "detaching from device\n");
1100 unregister_netdevice_notifier(&dev->nb_netdev);
1101 dev->nb_netdev.notifier_call = NULL;
1103 flush_workqueue(event_wq);
1106 dev_put(dev->netdev);
1110 /* Unregister ib device */
1111 ib_unregister_device(&dev->ib_dev);
1113 mutex_lock(&pvrdma_device_list_lock);
1114 list_del(&dev->device_link);
1115 mutex_unlock(&pvrdma_device_list_lock);
1117 pvrdma_disable_intrs(dev);
1118 pvrdma_free_irq(dev);
1119 pci_free_irq_vectors(pdev);
1121 /* Deactivate pvrdma device */
1122 pvrdma_write_reg(dev, PVRDMA_REG_CTL, PVRDMA_DEVICE_CTL_RESET);
1123 pvrdma_page_dir_cleanup(dev, &dev->cq_pdir);
1124 pvrdma_page_dir_cleanup(dev, &dev->async_pdir);
1125 pvrdma_free_slots(dev);
1126 dma_free_coherent(&pdev->dev, sizeof(*dev->dsr), dev->dsr,
1130 kfree(dev->sgid_tbl);
1132 kfree(dev->srq_tbl);
1134 pvrdma_uar_table_cleanup(dev);
1135 iounmap(dev->driver_uar.map);
1137 ib_dealloc_device(&dev->ib_dev);
1139 /* Free pci resources */
1140 pci_release_regions(pdev);
1141 pci_disable_device(pdev);
1142 pci_set_drvdata(pdev, NULL);
1145 static const struct pci_device_id pvrdma_pci_table[] = {
1146 { PCI_DEVICE(PCI_VENDOR_ID_VMWARE, PCI_DEVICE_ID_VMWARE_PVRDMA), },
1150 MODULE_DEVICE_TABLE(pci, pvrdma_pci_table);
1152 static struct pci_driver pvrdma_driver = {
1154 .id_table = pvrdma_pci_table,
1155 .probe = pvrdma_pci_probe,
1156 .remove = pvrdma_pci_remove,
1159 static int __init pvrdma_init(void)
1163 event_wq = alloc_ordered_workqueue("pvrdma_event_wq", WQ_MEM_RECLAIM);
1167 err = pci_register_driver(&pvrdma_driver);
1169 destroy_workqueue(event_wq);
1174 static void __exit pvrdma_cleanup(void)
1176 pci_unregister_driver(&pvrdma_driver);
1178 destroy_workqueue(event_wq);
1181 module_init(pvrdma_init);
1182 module_exit(pvrdma_cleanup);
1184 MODULE_AUTHOR("VMware, Inc");
1185 MODULE_DESCRIPTION("VMware Paravirtual RDMA driver");
1186 MODULE_LICENSE("Dual BSD/GPL");