1 /* QLogic qedr NIC Driver
2 * Copyright (c) 2015-2016 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/dma-mapping.h>
33 #include <linux/crc32.h>
37 #include <linux/iommu.h>
39 #include <rdma/ib_verbs.h>
40 #include <rdma/ib_user_verbs.h>
41 #include <rdma/iw_cm.h>
42 #include <rdma/ib_umem.h>
43 #include <rdma/ib_addr.h>
44 #include <rdma/ib_cache.h>
47 #include <linux/qed/qed_if.h>
50 #include <rdma/qedr-abi.h>
53 #define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
55 int qedr_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
57 if (index >= QEDR_ROCE_PKEY_TABLE_LEN)
60 *pkey = QEDR_ROCE_PKEY_DEFAULT;
64 int qedr_query_gid(struct ib_device *ibdev, u8 port, int index,
67 struct qedr_dev *dev = get_qedr_dev(ibdev);
70 if (!rdma_cap_roce_gid_table(ibdev, port))
73 rc = ib_get_cached_gid(ibdev, port, index, sgid, NULL);
75 memcpy(sgid, &zgid, sizeof(*sgid));
79 DP_DEBUG(dev, QEDR_MSG_INIT, "query gid: index=%d %llx:%llx\n", index,
80 sgid->global.interface_id, sgid->global.subnet_prefix);
85 int qedr_add_gid(struct ib_device *device, u8 port_num,
86 unsigned int index, const union ib_gid *gid,
87 const struct ib_gid_attr *attr, void **context)
89 if (!rdma_cap_roce_gid_table(device, port_num))
92 if (port_num > QEDR_MAX_PORT)
101 int qedr_del_gid(struct ib_device *device, u8 port_num,
102 unsigned int index, void **context)
104 if (!rdma_cap_roce_gid_table(device, port_num))
107 if (port_num > QEDR_MAX_PORT)
116 int qedr_query_device(struct ib_device *ibdev,
117 struct ib_device_attr *attr, struct ib_udata *udata)
119 struct qedr_dev *dev = get_qedr_dev(ibdev);
120 struct qedr_device_attr *qattr = &dev->attr;
122 if (!dev->rdma_ctx) {
124 "qedr_query_device called with invalid params rdma_ctx=%p\n",
129 memset(attr, 0, sizeof(*attr));
131 attr->fw_ver = qattr->fw_ver;
132 attr->sys_image_guid = qattr->sys_image_guid;
133 attr->max_mr_size = qattr->max_mr_size;
134 attr->page_size_cap = qattr->page_size_caps;
135 attr->vendor_id = qattr->vendor_id;
136 attr->vendor_part_id = qattr->vendor_part_id;
137 attr->hw_ver = qattr->hw_ver;
138 attr->max_qp = qattr->max_qp;
139 attr->max_qp_wr = max_t(u32, qattr->max_sqe, qattr->max_rqe);
140 attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
141 IB_DEVICE_RC_RNR_NAK_GEN |
142 IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_MGT_EXTENSIONS;
144 attr->max_sge = qattr->max_sge;
145 attr->max_sge_rd = qattr->max_sge;
146 attr->max_cq = qattr->max_cq;
147 attr->max_cqe = qattr->max_cqe;
148 attr->max_mr = qattr->max_mr;
149 attr->max_mw = qattr->max_mw;
150 attr->max_pd = qattr->max_pd;
151 attr->atomic_cap = dev->atomic_cap;
152 attr->max_fmr = qattr->max_fmr;
153 attr->max_map_per_fmr = 16;
154 attr->max_qp_init_rd_atom =
155 1 << (fls(qattr->max_qp_req_rd_atomic_resc) - 1);
156 attr->max_qp_rd_atom =
157 min(1 << (fls(qattr->max_qp_resp_rd_atomic_resc) - 1),
158 attr->max_qp_init_rd_atom);
160 attr->max_srq = qattr->max_srq;
161 attr->max_srq_sge = qattr->max_srq_sge;
162 attr->max_srq_wr = qattr->max_srq_wr;
164 attr->local_ca_ack_delay = qattr->dev_ack_delay;
165 attr->max_fast_reg_page_list_len = qattr->max_mr / 8;
166 attr->max_pkeys = QEDR_ROCE_PKEY_MAX;
167 attr->max_ah = qattr->max_ah;
172 #define QEDR_SPEED_SDR (1)
173 #define QEDR_SPEED_DDR (2)
174 #define QEDR_SPEED_QDR (4)
175 #define QEDR_SPEED_FDR10 (8)
176 #define QEDR_SPEED_FDR (16)
177 #define QEDR_SPEED_EDR (32)
179 static inline void get_link_speed_and_width(int speed, u8 *ib_speed,
184 *ib_speed = QEDR_SPEED_SDR;
185 *ib_width = IB_WIDTH_1X;
188 *ib_speed = QEDR_SPEED_QDR;
189 *ib_width = IB_WIDTH_1X;
193 *ib_speed = QEDR_SPEED_DDR;
194 *ib_width = IB_WIDTH_4X;
198 *ib_speed = QEDR_SPEED_EDR;
199 *ib_width = IB_WIDTH_1X;
203 *ib_speed = QEDR_SPEED_QDR;
204 *ib_width = IB_WIDTH_4X;
208 *ib_speed = QEDR_SPEED_QDR;
209 *ib_width = IB_WIDTH_4X;
213 *ib_speed = QEDR_SPEED_EDR;
214 *ib_width = IB_WIDTH_4X;
219 *ib_speed = QEDR_SPEED_SDR;
220 *ib_width = IB_WIDTH_1X;
224 int qedr_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *attr)
226 struct qedr_dev *dev;
227 struct qed_rdma_port *rdma_port;
229 dev = get_qedr_dev(ibdev);
231 DP_ERR(dev, "invalid_port=0x%x\n", port);
235 if (!dev->rdma_ctx) {
236 DP_ERR(dev, "rdma_ctx is NULL\n");
240 rdma_port = dev->ops->rdma_query_port(dev->rdma_ctx);
241 memset(attr, 0, sizeof(*attr));
243 if (rdma_port->port_state == QED_RDMA_PORT_UP) {
244 attr->state = IB_PORT_ACTIVE;
245 attr->phys_state = 5;
247 attr->state = IB_PORT_DOWN;
248 attr->phys_state = 3;
250 attr->max_mtu = IB_MTU_4096;
251 attr->active_mtu = iboe_get_mtu(dev->ndev->mtu);
256 attr->port_cap_flags = IB_PORT_IP_BASED_GIDS;
257 attr->gid_tbl_len = QEDR_MAX_SGID;
258 attr->pkey_tbl_len = QEDR_ROCE_PKEY_TABLE_LEN;
259 attr->bad_pkey_cntr = rdma_port->pkey_bad_counter;
260 attr->qkey_viol_cntr = 0;
261 get_link_speed_and_width(rdma_port->link_speed,
262 &attr->active_speed, &attr->active_width);
263 attr->max_msg_sz = rdma_port->max_msg_size;
264 attr->max_vl_num = 4;
269 int qedr_modify_port(struct ib_device *ibdev, u8 port, int mask,
270 struct ib_port_modify *props)
272 struct qedr_dev *dev;
274 dev = get_qedr_dev(ibdev);
276 DP_ERR(dev, "invalid_port=0x%x\n", port);
283 static int qedr_add_mmap(struct qedr_ucontext *uctx, u64 phy_addr,
288 mm = kzalloc(sizeof(*mm), GFP_KERNEL);
292 mm->key.phy_addr = phy_addr;
293 /* This function might be called with a length which is not a multiple
294 * of PAGE_SIZE, while the mapping is PAGE_SIZE grained and the kernel
295 * forces this granularity by increasing the requested size if needed.
296 * When qedr_mmap is called, it will search the list with the updated
297 * length as a key. To prevent search failures, the length is rounded up
298 * in advance to PAGE_SIZE.
300 mm->key.len = roundup(len, PAGE_SIZE);
301 INIT_LIST_HEAD(&mm->entry);
303 mutex_lock(&uctx->mm_list_lock);
304 list_add(&mm->entry, &uctx->mm_head);
305 mutex_unlock(&uctx->mm_list_lock);
307 DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
308 "added (addr=0x%llx,len=0x%lx) for ctx=%p\n",
309 (unsigned long long)mm->key.phy_addr,
310 (unsigned long)mm->key.len, uctx);
315 static bool qedr_search_mmap(struct qedr_ucontext *uctx, u64 phy_addr,
321 mutex_lock(&uctx->mm_list_lock);
322 list_for_each_entry(mm, &uctx->mm_head, entry) {
323 if (len != mm->key.len || phy_addr != mm->key.phy_addr)
329 mutex_unlock(&uctx->mm_list_lock);
330 DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
331 "searched for (addr=0x%llx,len=0x%lx) for ctx=%p, result=%d\n",
332 mm->key.phy_addr, mm->key.len, uctx, found);
337 struct ib_ucontext *qedr_alloc_ucontext(struct ib_device *ibdev,
338 struct ib_udata *udata)
341 struct qedr_ucontext *ctx;
342 struct qedr_alloc_ucontext_resp uresp;
343 struct qedr_dev *dev = get_qedr_dev(ibdev);
344 struct qed_rdma_add_user_out_params oparams;
347 return ERR_PTR(-EFAULT);
349 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
351 return ERR_PTR(-ENOMEM);
353 rc = dev->ops->rdma_add_user(dev->rdma_ctx, &oparams);
356 "failed to allocate a DPI for a new RoCE application, rc=%d. To overcome this consider to increase the number of DPIs, increase the doorbell BAR size or just close unnecessary RoCE applications. In order to increase the number of DPIs consult the qedr readme\n",
361 ctx->dpi = oparams.dpi;
362 ctx->dpi_addr = oparams.dpi_addr;
363 ctx->dpi_phys_addr = oparams.dpi_phys_addr;
364 ctx->dpi_size = oparams.dpi_size;
365 INIT_LIST_HEAD(&ctx->mm_head);
366 mutex_init(&ctx->mm_list_lock);
368 memset(&uresp, 0, sizeof(uresp));
370 uresp.db_pa = ctx->dpi_phys_addr;
371 uresp.db_size = ctx->dpi_size;
372 uresp.max_send_wr = dev->attr.max_sqe;
373 uresp.max_recv_wr = dev->attr.max_rqe;
374 uresp.max_srq_wr = dev->attr.max_srq_wr;
375 uresp.sges_per_send_wr = QEDR_MAX_SQE_ELEMENTS_PER_SQE;
376 uresp.sges_per_recv_wr = QEDR_MAX_RQE_ELEMENTS_PER_RQE;
377 uresp.sges_per_srq_wr = dev->attr.max_srq_sge;
378 uresp.max_cqes = QEDR_MAX_CQES;
380 rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
386 rc = qedr_add_mmap(ctx, ctx->dpi_phys_addr, ctx->dpi_size);
390 DP_DEBUG(dev, QEDR_MSG_INIT, "Allocating user context %p\n",
392 return &ctx->ibucontext;
399 int qedr_dealloc_ucontext(struct ib_ucontext *ibctx)
401 struct qedr_ucontext *uctx = get_qedr_ucontext(ibctx);
402 struct qedr_mm *mm, *tmp;
405 DP_DEBUG(uctx->dev, QEDR_MSG_INIT, "Deallocating user context %p\n",
407 uctx->dev->ops->rdma_remove_user(uctx->dev->rdma_ctx, uctx->dpi);
409 list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
410 DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
411 "deleted (addr=0x%llx,len=0x%lx) for ctx=%p\n",
412 mm->key.phy_addr, mm->key.len, uctx);
413 list_del(&mm->entry);
421 int qedr_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
423 struct qedr_ucontext *ucontext = get_qedr_ucontext(context);
424 struct qedr_dev *dev = get_qedr_dev(context->device);
425 unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT;
426 u64 unmapped_db = dev->db_phys_addr;
427 unsigned long len = (vma->vm_end - vma->vm_start);
431 DP_DEBUG(dev, QEDR_MSG_INIT,
432 "qedr_mmap called vm_page=0x%lx vm_pgoff=0x%lx unmapped_db=0x%llx db_size=%x, len=%lx\n",
433 vm_page, vma->vm_pgoff, unmapped_db, dev->db_size, len);
434 if (vma->vm_start & (PAGE_SIZE - 1)) {
435 DP_ERR(dev, "Vma_start not page aligned = %ld\n",
440 found = qedr_search_mmap(ucontext, vm_page, len);
442 DP_ERR(dev, "Vma_pgoff not found in mapped array = %ld\n",
447 DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping doorbell bar\n");
449 if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db +
451 DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping doorbell bar\n");
452 if (vma->vm_flags & VM_READ) {
453 DP_ERR(dev, "Trying to map doorbell bar for read\n");
457 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
459 rc = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
460 PAGE_SIZE, vma->vm_page_prot);
462 DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping chains\n");
463 rc = remap_pfn_range(vma, vma->vm_start,
464 vma->vm_pgoff, len, vma->vm_page_prot);
466 DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_mmap return code: %d\n", rc);
470 struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
471 struct ib_ucontext *context, struct ib_udata *udata)
473 struct qedr_dev *dev = get_qedr_dev(ibdev);
478 DP_DEBUG(dev, QEDR_MSG_INIT, "Function called from: %s\n",
479 (udata && context) ? "User Lib" : "Kernel");
481 if (!dev->rdma_ctx) {
482 DP_ERR(dev, "invlaid RDMA context\n");
483 return ERR_PTR(-EINVAL);
486 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
488 return ERR_PTR(-ENOMEM);
490 rc = dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
496 if (udata && context) {
497 struct qedr_alloc_pd_uresp uresp;
501 rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
503 DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id);
504 dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id);
508 pd->uctx = get_qedr_ucontext(context);
519 int qedr_dealloc_pd(struct ib_pd *ibpd)
521 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
522 struct qedr_pd *pd = get_qedr_pd(ibpd);
525 pr_err("Invalid PD received in dealloc_pd\n");
527 DP_DEBUG(dev, QEDR_MSG_INIT, "Deallocating PD %d\n", pd->pd_id);
528 dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd->pd_id);
535 static void qedr_free_pbl(struct qedr_dev *dev,
536 struct qedr_pbl_info *pbl_info, struct qedr_pbl *pbl)
538 struct pci_dev *pdev = dev->pdev;
541 for (i = 0; i < pbl_info->num_pbls; i++) {
544 dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
545 pbl[i].va, pbl[i].pa);
551 #define MIN_FW_PBL_PAGE_SIZE (4 * 1024)
552 #define MAX_FW_PBL_PAGE_SIZE (64 * 1024)
554 #define NUM_PBES_ON_PAGE(_page_size) (_page_size / sizeof(u64))
555 #define MAX_PBES_ON_PAGE NUM_PBES_ON_PAGE(MAX_FW_PBL_PAGE_SIZE)
556 #define MAX_PBES_TWO_LAYER (MAX_PBES_ON_PAGE * MAX_PBES_ON_PAGE)
558 static struct qedr_pbl *qedr_alloc_pbl_tbl(struct qedr_dev *dev,
559 struct qedr_pbl_info *pbl_info,
562 struct pci_dev *pdev = dev->pdev;
563 struct qedr_pbl *pbl_table;
564 dma_addr_t *pbl_main_tbl;
569 pbl_table = kcalloc(pbl_info->num_pbls, sizeof(*pbl_table), flags);
571 return ERR_PTR(-ENOMEM);
573 for (i = 0; i < pbl_info->num_pbls; i++) {
574 va = dma_alloc_coherent(&pdev->dev, pbl_info->pbl_size,
579 memset(va, 0, pbl_info->pbl_size);
580 pbl_table[i].va = va;
581 pbl_table[i].pa = pa;
584 /* Two-Layer PBLs, if we have more than one pbl we need to initialize
585 * the first one with physical pointers to all of the rest
587 pbl_main_tbl = (dma_addr_t *)pbl_table[0].va;
588 for (i = 0; i < pbl_info->num_pbls - 1; i++)
589 pbl_main_tbl[i] = pbl_table[i + 1].pa;
594 for (i--; i >= 0; i--)
595 dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
596 pbl_table[i].va, pbl_table[i].pa);
598 qedr_free_pbl(dev, pbl_info, pbl_table);
600 return ERR_PTR(-ENOMEM);
603 static int qedr_prepare_pbl_tbl(struct qedr_dev *dev,
604 struct qedr_pbl_info *pbl_info,
605 u32 num_pbes, int two_layer_capable)
611 if ((num_pbes > MAX_PBES_ON_PAGE) && two_layer_capable) {
612 if (num_pbes > MAX_PBES_TWO_LAYER) {
613 DP_ERR(dev, "prepare pbl table: too many pages %d\n",
618 /* calculate required pbl page size */
619 pbl_size = MIN_FW_PBL_PAGE_SIZE;
620 pbl_capacity = NUM_PBES_ON_PAGE(pbl_size) *
621 NUM_PBES_ON_PAGE(pbl_size);
623 while (pbl_capacity < num_pbes) {
625 pbl_capacity = pbl_size / sizeof(u64);
626 pbl_capacity = pbl_capacity * pbl_capacity;
629 num_pbls = DIV_ROUND_UP(num_pbes, NUM_PBES_ON_PAGE(pbl_size));
630 num_pbls++; /* One for the layer0 ( points to the pbls) */
631 pbl_info->two_layered = true;
633 /* One layered PBL */
635 pbl_size = max_t(u32, MIN_FW_PBL_PAGE_SIZE,
636 roundup_pow_of_two((num_pbes * sizeof(u64))));
637 pbl_info->two_layered = false;
640 pbl_info->num_pbls = num_pbls;
641 pbl_info->pbl_size = pbl_size;
642 pbl_info->num_pbes = num_pbes;
644 DP_DEBUG(dev, QEDR_MSG_MR,
645 "prepare pbl table: num_pbes=%d, num_pbls=%d, pbl_size=%d\n",
646 pbl_info->num_pbes, pbl_info->num_pbls, pbl_info->pbl_size);
651 static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
652 struct qedr_pbl *pbl,
653 struct qedr_pbl_info *pbl_info)
655 int shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0;
656 struct qedr_pbl *pbl_tbl;
657 struct scatterlist *sg;
662 if (!pbl_info->num_pbes)
665 /* If we have a two layered pbl, the first pbl points to the rest
666 * of the pbls and the first entry lays on the second pbl in the table
668 if (pbl_info->two_layered)
673 pbe = (struct regpair *)pbl_tbl->va;
675 DP_ERR(dev, "cannot populate PBL due to a NULL PBE\n");
681 shift = ilog2(umem->page_size);
683 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
684 pages = sg_dma_len(sg) >> shift;
685 for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
686 /* store the page address in pbe */
687 pbe->lo = cpu_to_le32(sg_dma_address(sg) +
688 umem->page_size * pg_cnt);
689 addr = upper_32_bits(sg_dma_address(sg) +
690 umem->page_size * pg_cnt);
691 pbe->hi = cpu_to_le32(addr);
696 if (total_num_pbes == pbl_info->num_pbes)
699 /* If the given pbl is full storing the pbes,
702 if (pbe_cnt == (pbl_info->pbl_size / sizeof(u64))) {
704 pbe = (struct regpair *)pbl_tbl->va;
711 static int qedr_copy_cq_uresp(struct qedr_dev *dev,
712 struct qedr_cq *cq, struct ib_udata *udata)
714 struct qedr_create_cq_uresp uresp;
717 memset(&uresp, 0, sizeof(uresp));
719 uresp.db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
720 uresp.icid = cq->icid;
722 rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
724 DP_ERR(dev, "copy error cqid=0x%x.\n", cq->icid);
729 static void consume_cqe(struct qedr_cq *cq)
731 if (cq->latest_cqe == cq->toggle_cqe)
732 cq->pbl_toggle ^= RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
734 cq->latest_cqe = qed_chain_consume(&cq->pbl);
737 static inline int qedr_align_cq_entries(int entries)
739 u64 size, aligned_size;
741 /* We allocate an extra entry that we don't report to the FW. */
742 size = (entries + 1) * QEDR_CQE_SIZE;
743 aligned_size = ALIGN(size, PAGE_SIZE);
745 return aligned_size / QEDR_CQE_SIZE;
748 static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx,
749 struct qedr_dev *dev,
750 struct qedr_userq *q,
751 u64 buf_addr, size_t buf_len,
752 int access, int dmasync)
757 q->buf_addr = buf_addr;
758 q->buf_len = buf_len;
759 q->umem = ib_umem_get(ib_ctx, q->buf_addr, q->buf_len, access, dmasync);
760 if (IS_ERR(q->umem)) {
761 DP_ERR(dev, "create user queue: failed ib_umem_get, got %ld\n",
763 return PTR_ERR(q->umem);
766 page_cnt = ib_umem_page_count(q->umem);
767 rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, page_cnt, 0);
771 q->pbl_tbl = qedr_alloc_pbl_tbl(dev, &q->pbl_info, GFP_KERNEL);
772 if (IS_ERR_OR_NULL(q->pbl_tbl))
775 qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info);
780 ib_umem_release(q->umem);
785 static inline void qedr_init_cq_params(struct qedr_cq *cq,
786 struct qedr_ucontext *ctx,
787 struct qedr_dev *dev, int vector,
788 int chain_entries, int page_cnt,
790 struct qed_rdma_create_cq_in_params
793 memset(params, 0, sizeof(*params));
794 params->cq_handle_hi = upper_32_bits((uintptr_t)cq);
795 params->cq_handle_lo = lower_32_bits((uintptr_t)cq);
796 params->cnq_id = vector;
797 params->cq_size = chain_entries - 1;
798 params->dpi = (ctx) ? ctx->dpi : dev->dpi;
799 params->pbl_num_pages = page_cnt;
800 params->pbl_ptr = pbl_ptr;
801 params->pbl_two_level = 0;
804 static void doorbell_cq(struct qedr_cq *cq, u32 cons, u8 flags)
806 /* Flush data before signalling doorbell */
808 cq->db.data.agg_flags = flags;
809 cq->db.data.value = cpu_to_le32(cons);
810 writeq(cq->db.raw, cq->db_addr);
812 /* Make sure write would stick */
816 int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
818 struct qedr_cq *cq = get_qedr_cq(ibcq);
819 unsigned long sflags;
821 if (cq->cq_type == QEDR_CQ_TYPE_GSI)
824 spin_lock_irqsave(&cq->cq_lock, sflags);
828 if (flags & IB_CQ_SOLICITED)
829 cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD;
831 if (flags & IB_CQ_NEXT_COMP)
832 cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_CF_CMD;
834 doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
836 spin_unlock_irqrestore(&cq->cq_lock, sflags);
841 struct ib_cq *qedr_create_cq(struct ib_device *ibdev,
842 const struct ib_cq_init_attr *attr,
843 struct ib_ucontext *ib_ctx, struct ib_udata *udata)
845 struct qedr_ucontext *ctx = get_qedr_ucontext(ib_ctx);
846 struct qed_rdma_destroy_cq_out_params destroy_oparams;
847 struct qed_rdma_destroy_cq_in_params destroy_iparams;
848 struct qedr_dev *dev = get_qedr_dev(ibdev);
849 struct qed_rdma_create_cq_in_params params;
850 struct qedr_create_cq_ureq ureq;
851 int vector = attr->comp_vector;
852 int entries = attr->cqe;
860 DP_DEBUG(dev, QEDR_MSG_INIT,
861 "create_cq: called from %s. entries=%d, vector=%d\n",
862 udata ? "User Lib" : "Kernel", entries, vector);
864 if (entries > QEDR_MAX_CQES) {
866 "create cq: the number of entries %d is too high. Must be equal or below %d.\n",
867 entries, QEDR_MAX_CQES);
868 return ERR_PTR(-EINVAL);
871 chain_entries = qedr_align_cq_entries(entries);
872 chain_entries = min_t(int, chain_entries, QEDR_MAX_CQES);
874 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
876 return ERR_PTR(-ENOMEM);
879 memset(&ureq, 0, sizeof(ureq));
880 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) {
882 "create cq: problem copying data from user space\n");
888 "create cq: cannot create a cq with 0 entries\n");
892 cq->cq_type = QEDR_CQ_TYPE_USER;
894 rc = qedr_init_user_queue(ib_ctx, dev, &cq->q, ureq.addr,
895 ureq.len, IB_ACCESS_LOCAL_WRITE, 1);
899 pbl_ptr = cq->q.pbl_tbl->pa;
900 page_cnt = cq->q.pbl_info.num_pbes;
902 cq->cq_type = QEDR_CQ_TYPE_KERNEL;
904 rc = dev->ops->common->chain_alloc(dev->cdev,
905 QED_CHAIN_USE_TO_CONSUME,
907 QED_CHAIN_CNT_TYPE_U32,
909 sizeof(union rdma_cqe),
914 page_cnt = qed_chain_get_page_cnt(&cq->pbl);
915 pbl_ptr = qed_chain_get_pbl_phys(&cq->pbl);
918 qedr_init_cq_params(cq, ctx, dev, vector, chain_entries, page_cnt,
921 rc = dev->ops->rdma_create_cq(dev->rdma_ctx, ¶ms, &icid);
926 cq->sig = QEDR_CQ_MAGIC_NUMBER;
927 spin_lock_init(&cq->cq_lock);
930 rc = qedr_copy_cq_uresp(dev, cq, udata);
934 /* Generate doorbell address. */
935 cq->db_addr = dev->db_addr +
936 DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
937 cq->db.data.icid = cq->icid;
938 cq->db.data.params = DB_AGG_CMD_SET <<
939 RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT;
941 /* point to the very last element, passing it we will toggle */
942 cq->toggle_cqe = qed_chain_get_last_elem(&cq->pbl);
943 cq->pbl_toggle = RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
944 cq->latest_cqe = NULL;
946 cq->cq_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
949 DP_DEBUG(dev, QEDR_MSG_CQ,
950 "create cq: icid=0x%0x, addr=%p, size(entries)=0x%0x\n",
951 cq->icid, cq, params.cq_size);
956 destroy_iparams.icid = cq->icid;
957 dev->ops->rdma_destroy_cq(dev->rdma_ctx, &destroy_iparams,
961 qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
963 dev->ops->common->chain_free(dev->cdev, &cq->pbl);
966 ib_umem_release(cq->q.umem);
969 return ERR_PTR(-EINVAL);
972 int qedr_resize_cq(struct ib_cq *ibcq, int new_cnt, struct ib_udata *udata)
974 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
975 struct qedr_cq *cq = get_qedr_cq(ibcq);
977 DP_ERR(dev, "cq %p RESIZE NOT SUPPORTED\n", cq);
982 int qedr_destroy_cq(struct ib_cq *ibcq)
984 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
985 struct qed_rdma_destroy_cq_out_params oparams;
986 struct qed_rdma_destroy_cq_in_params iparams;
987 struct qedr_cq *cq = get_qedr_cq(ibcq);
989 DP_DEBUG(dev, QEDR_MSG_CQ, "destroy cq: cq_id %d", cq->icid);
991 /* GSIs CQs are handled by driver, so they don't exist in the FW */
992 if (cq->cq_type != QEDR_CQ_TYPE_GSI) {
993 iparams.icid = cq->icid;
994 dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
995 dev->ops->common->chain_free(dev->cdev, &cq->pbl);
998 if (ibcq->uobject && ibcq->uobject->context) {
999 qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
1000 ib_umem_release(cq->q.umem);
1008 static inline int get_gid_info_from_table(struct ib_qp *ibqp,
1009 struct ib_qp_attr *attr,
1011 struct qed_rdma_modify_qp_in_params
1014 enum rdma_network_type nw_type;
1015 struct ib_gid_attr gid_attr;
1021 rc = ib_get_cached_gid(ibqp->device, attr->ah_attr.port_num,
1022 attr->ah_attr.grh.sgid_index, &gid, &gid_attr);
1026 if (!memcmp(&gid, &zgid, sizeof(gid)))
1029 if (gid_attr.ndev) {
1030 qp_params->vlan_id = rdma_vlan_dev_vlan_id(gid_attr.ndev);
1032 dev_put(gid_attr.ndev);
1033 nw_type = ib_gid_to_network_type(gid_attr.gid_type, &gid);
1035 case RDMA_NETWORK_IPV6:
1036 memcpy(&qp_params->sgid.bytes[0], &gid.raw[0],
1037 sizeof(qp_params->sgid));
1038 memcpy(&qp_params->dgid.bytes[0],
1039 &attr->ah_attr.grh.dgid,
1040 sizeof(qp_params->dgid));
1041 qp_params->roce_mode = ROCE_V2_IPV6;
1042 SET_FIELD(qp_params->modify_flags,
1043 QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
1045 case RDMA_NETWORK_IB:
1046 memcpy(&qp_params->sgid.bytes[0], &gid.raw[0],
1047 sizeof(qp_params->sgid));
1048 memcpy(&qp_params->dgid.bytes[0],
1049 &attr->ah_attr.grh.dgid,
1050 sizeof(qp_params->dgid));
1051 qp_params->roce_mode = ROCE_V1;
1053 case RDMA_NETWORK_IPV4:
1054 memset(&qp_params->sgid, 0, sizeof(qp_params->sgid));
1055 memset(&qp_params->dgid, 0, sizeof(qp_params->dgid));
1056 ipv4_addr = qedr_get_ipv4_from_gid(gid.raw);
1057 qp_params->sgid.ipv4_addr = ipv4_addr;
1059 qedr_get_ipv4_from_gid(attr->ah_attr.grh.dgid.raw);
1060 qp_params->dgid.ipv4_addr = ipv4_addr;
1061 SET_FIELD(qp_params->modify_flags,
1062 QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
1063 qp_params->roce_mode = ROCE_V2_IPV4;
1068 for (i = 0; i < 4; i++) {
1069 qp_params->sgid.dwords[i] = ntohl(qp_params->sgid.dwords[i]);
1070 qp_params->dgid.dwords[i] = ntohl(qp_params->dgid.dwords[i]);
1073 if (qp_params->vlan_id >= VLAN_CFI_MASK)
1074 qp_params->vlan_id = 0;
1079 static void qedr_cleanup_user_sq(struct qedr_dev *dev, struct qedr_qp *qp)
1081 qedr_free_pbl(dev, &qp->usq.pbl_info, qp->usq.pbl_tbl);
1082 ib_umem_release(qp->usq.umem);
1085 static void qedr_cleanup_user_rq(struct qedr_dev *dev, struct qedr_qp *qp)
1087 qedr_free_pbl(dev, &qp->urq.pbl_info, qp->urq.pbl_tbl);
1088 ib_umem_release(qp->urq.umem);
1091 static void qedr_cleanup_kernel_sq(struct qedr_dev *dev, struct qedr_qp *qp)
1093 dev->ops->common->chain_free(dev->cdev, &qp->sq.pbl);
1094 kfree(qp->wqe_wr_id);
1097 static void qedr_cleanup_kernel_rq(struct qedr_dev *dev, struct qedr_qp *qp)
1099 dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl);
1100 kfree(qp->rqe_wr_id);
1103 static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev,
1104 struct ib_qp_init_attr *attrs)
1106 struct qedr_device_attr *qattr = &dev->attr;
1108 /* QP0... attrs->qp_type == IB_QPT_GSI */
1109 if (attrs->qp_type != IB_QPT_RC && attrs->qp_type != IB_QPT_GSI) {
1110 DP_DEBUG(dev, QEDR_MSG_QP,
1111 "create qp: unsupported qp type=0x%x requested\n",
1116 if (attrs->cap.max_send_wr > qattr->max_sqe) {
1118 "create qp: cannot create a SQ with %d elements (max_send_wr=0x%x)\n",
1119 attrs->cap.max_send_wr, qattr->max_sqe);
1123 if (attrs->cap.max_inline_data > qattr->max_inline) {
1125 "create qp: unsupported inline data size=0x%x requested (max_inline=0x%x)\n",
1126 attrs->cap.max_inline_data, qattr->max_inline);
1130 if (attrs->cap.max_send_sge > qattr->max_sge) {
1132 "create qp: unsupported send_sge=0x%x requested (max_send_sge=0x%x)\n",
1133 attrs->cap.max_send_sge, qattr->max_sge);
1137 if (attrs->cap.max_recv_sge > qattr->max_sge) {
1139 "create qp: unsupported recv_sge=0x%x requested (max_recv_sge=0x%x)\n",
1140 attrs->cap.max_recv_sge, qattr->max_sge);
1144 /* Unprivileged user space cannot create special QP */
1145 if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) {
1147 "create qp: userspace can't create special QPs of type=0x%x\n",
1155 static void qedr_copy_rq_uresp(struct qedr_create_qp_uresp *uresp,
1158 uresp->rq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
1159 uresp->rq_icid = qp->icid;
1162 static void qedr_copy_sq_uresp(struct qedr_create_qp_uresp *uresp,
1165 uresp->sq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1166 uresp->sq_icid = qp->icid + 1;
1169 static int qedr_copy_qp_uresp(struct qedr_dev *dev,
1170 struct qedr_qp *qp, struct ib_udata *udata)
1172 struct qedr_create_qp_uresp uresp;
1175 memset(&uresp, 0, sizeof(uresp));
1176 qedr_copy_sq_uresp(&uresp, qp);
1177 qedr_copy_rq_uresp(&uresp, qp);
1179 uresp.atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE;
1180 uresp.qp_id = qp->qp_id;
1182 rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1185 "create qp: failed a copy to user space with qp icid=0x%x.\n",
1191 static void qedr_set_qp_init_params(struct qedr_dev *dev,
1194 struct ib_qp_init_attr *attrs)
1198 spin_lock_init(&qp->q_lock);
1200 qp->qp_type = attrs->qp_type;
1201 qp->max_inline_data = attrs->cap.max_inline_data;
1202 qp->sq.max_sges = attrs->cap.max_send_sge;
1203 qp->state = QED_ROCE_QP_STATE_RESET;
1204 qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
1205 qp->sq_cq = get_qedr_cq(attrs->send_cq);
1206 qp->rq_cq = get_qedr_cq(attrs->recv_cq);
1209 DP_DEBUG(dev, QEDR_MSG_QP,
1210 "QP params:\tpd = %d, qp_type = %d, max_inline_data = %d, state = %d, signaled = %d, use_srq=%d\n",
1211 pd->pd_id, qp->qp_type, qp->max_inline_data,
1212 qp->state, qp->signaled, (attrs->srq) ? 1 : 0);
1213 DP_DEBUG(dev, QEDR_MSG_QP,
1214 "SQ params:\tsq_max_sges = %d, sq_cq_id = %d\n",
1215 qp->sq.max_sges, qp->sq_cq->icid);
1216 qp->rq.max_sges = attrs->cap.max_recv_sge;
1217 DP_DEBUG(dev, QEDR_MSG_QP,
1218 "RQ params:\trq_max_sges = %d, rq_cq_id = %d\n",
1219 qp->rq.max_sges, qp->rq_cq->icid);
1223 qedr_init_qp_user_params(struct qed_rdma_create_qp_in_params *params,
1224 struct qedr_create_qp_ureq *ureq)
1226 /* QP handle to be written in CQE */
1227 params->qp_handle_lo = ureq->qp_handle_lo;
1228 params->qp_handle_hi = ureq->qp_handle_hi;
1232 qedr_init_qp_kernel_doorbell_sq(struct qedr_dev *dev, struct qedr_qp *qp)
1234 qp->sq.db = dev->db_addr +
1235 DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1236 qp->sq.db_data.data.icid = qp->icid + 1;
1240 qedr_init_qp_kernel_doorbell_rq(struct qedr_dev *dev, struct qedr_qp *qp)
1242 qp->rq.db = dev->db_addr +
1243 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
1244 qp->rq.db_data.data.icid = qp->icid;
1248 qedr_init_qp_kernel_params_rq(struct qedr_dev *dev,
1249 struct qedr_qp *qp, struct ib_qp_init_attr *attrs)
1251 /* Allocate driver internal RQ array */
1252 qp->rqe_wr_id = kcalloc(qp->rq.max_wr, sizeof(*qp->rqe_wr_id),
1257 DP_DEBUG(dev, QEDR_MSG_QP, "RQ max_wr set to %d.\n", qp->rq.max_wr);
1263 qedr_init_qp_kernel_params_sq(struct qedr_dev *dev,
1265 struct ib_qp_init_attr *attrs,
1266 struct qed_rdma_create_qp_in_params *params)
1270 /* Allocate driver internal SQ array */
1271 temp_max_wr = attrs->cap.max_send_wr * dev->wq_multiplier;
1272 temp_max_wr = min_t(u32, temp_max_wr, dev->attr.max_sqe);
1274 /* temp_max_wr < attr->max_sqe < u16 so the casting is safe */
1275 qp->sq.max_wr = (u16)temp_max_wr;
1276 qp->wqe_wr_id = kcalloc(qp->sq.max_wr, sizeof(*qp->wqe_wr_id),
1281 DP_DEBUG(dev, QEDR_MSG_QP, "SQ max_wr set to %d.\n", qp->sq.max_wr);
1283 /* QP handle to be written in CQE */
1284 params->qp_handle_lo = lower_32_bits((uintptr_t)qp);
1285 params->qp_handle_hi = upper_32_bits((uintptr_t)qp);
1290 static inline int qedr_init_qp_kernel_sq(struct qedr_dev *dev,
1292 struct ib_qp_init_attr *attrs)
1294 u32 n_sq_elems, n_sq_entries;
1297 /* A single work request may take up to QEDR_MAX_SQ_WQE_SIZE elements in
1298 * the ring. The ring should allow at least a single WR, even if the
1299 * user requested none, due to allocation issues.
1301 n_sq_entries = attrs->cap.max_send_wr;
1302 n_sq_entries = min_t(u32, n_sq_entries, dev->attr.max_sqe);
1303 n_sq_entries = max_t(u32, n_sq_entries, 1);
1304 n_sq_elems = n_sq_entries * QEDR_MAX_SQE_ELEMENTS_PER_SQE;
1305 rc = dev->ops->common->chain_alloc(dev->cdev,
1306 QED_CHAIN_USE_TO_PRODUCE,
1308 QED_CHAIN_CNT_TYPE_U32,
1310 QEDR_SQE_ELEMENT_SIZE,
1313 DP_ERR(dev, "failed to allocate QP %p SQ\n", qp);
1317 DP_DEBUG(dev, QEDR_MSG_SQ,
1318 "SQ Pbl base addr = %llx max_send_wr=%d max_wr=%d capacity=%d, rc=%d\n",
1319 qed_chain_get_pbl_phys(&qp->sq.pbl), attrs->cap.max_send_wr,
1320 n_sq_entries, qed_chain_get_capacity(&qp->sq.pbl), rc);
1324 static inline int qedr_init_qp_kernel_rq(struct qedr_dev *dev,
1326 struct ib_qp_init_attr *attrs)
1328 u32 n_rq_elems, n_rq_entries;
1331 /* A single work request may take up to QEDR_MAX_RQ_WQE_SIZE elements in
1332 * the ring. There ring should allow at least a single WR, even if the
1333 * user requested none, due to allocation issues.
1335 n_rq_entries = max_t(u32, attrs->cap.max_recv_wr, 1);
1336 n_rq_elems = n_rq_entries * QEDR_MAX_RQE_ELEMENTS_PER_RQE;
1337 rc = dev->ops->common->chain_alloc(dev->cdev,
1338 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1340 QED_CHAIN_CNT_TYPE_U32,
1342 QEDR_RQE_ELEMENT_SIZE,
1346 DP_ERR(dev, "failed to allocate memory for QP %p RQ\n", qp);
1350 DP_DEBUG(dev, QEDR_MSG_RQ,
1351 "RQ Pbl base addr = %llx max_recv_wr=%d max_wr=%d capacity=%d, rc=%d\n",
1352 qed_chain_get_pbl_phys(&qp->rq.pbl), attrs->cap.max_recv_wr,
1353 n_rq_entries, qed_chain_get_capacity(&qp->rq.pbl), rc);
1355 /* n_rq_entries < u16 so the casting is safe */
1356 qp->rq.max_wr = (u16)n_rq_entries;
1362 qedr_init_qp_in_params_sq(struct qedr_dev *dev,
1365 struct ib_qp_init_attr *attrs,
1366 struct ib_udata *udata,
1367 struct qed_rdma_create_qp_in_params *params)
1369 /* QP handle to be written in an async event */
1370 params->qp_handle_async_lo = lower_32_bits((uintptr_t)qp);
1371 params->qp_handle_async_hi = upper_32_bits((uintptr_t)qp);
1373 params->signal_all = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR);
1374 params->fmr_and_reserved_lkey = !udata;
1375 params->pd = pd->pd_id;
1376 params->dpi = pd->uctx ? pd->uctx->dpi : dev->dpi;
1377 params->sq_cq_id = get_qedr_cq(attrs->send_cq)->icid;
1378 params->max_sq_sges = 0;
1379 params->stats_queue = 0;
1382 params->sq_num_pages = qp->usq.pbl_info.num_pbes;
1383 params->sq_pbl_ptr = qp->usq.pbl_tbl->pa;
1385 params->sq_num_pages = qed_chain_get_page_cnt(&qp->sq.pbl);
1386 params->sq_pbl_ptr = qed_chain_get_pbl_phys(&qp->sq.pbl);
1391 qedr_init_qp_in_params_rq(struct qedr_qp *qp,
1392 struct ib_qp_init_attr *attrs,
1393 struct ib_udata *udata,
1394 struct qed_rdma_create_qp_in_params *params)
1396 params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
1398 params->use_srq = false;
1401 params->rq_num_pages = qp->urq.pbl_info.num_pbes;
1402 params->rq_pbl_ptr = qp->urq.pbl_tbl->pa;
1404 params->rq_num_pages = qed_chain_get_page_cnt(&qp->rq.pbl);
1405 params->rq_pbl_ptr = qed_chain_get_pbl_phys(&qp->rq.pbl);
1409 static inline void qedr_qp_user_print(struct qedr_dev *dev, struct qedr_qp *qp)
1411 DP_DEBUG(dev, QEDR_MSG_QP,
1412 "create qp: successfully created user QP. qp=%p, sq_addr=0x%llx, sq_len=%zd, rq_addr=0x%llx, rq_len=%zd\n",
1413 qp, qp->usq.buf_addr, qp->usq.buf_len, qp->urq.buf_addr,
1417 static inline int qedr_init_user_qp(struct ib_ucontext *ib_ctx,
1418 struct qedr_dev *dev,
1420 struct qedr_create_qp_ureq *ureq)
1424 /* SQ - read access only (0), dma sync not required (0) */
1425 rc = qedr_init_user_queue(ib_ctx, dev, &qp->usq, ureq->sq_addr,
1426 ureq->sq_len, 0, 0);
1430 /* RQ - read access only (0), dma sync not required (0) */
1431 rc = qedr_init_user_queue(ib_ctx, dev, &qp->urq, ureq->rq_addr,
1432 ureq->rq_len, 0, 0);
1435 qedr_cleanup_user_sq(dev, qp);
1440 qedr_init_kernel_qp(struct qedr_dev *dev,
1442 struct ib_qp_init_attr *attrs,
1443 struct qed_rdma_create_qp_in_params *params)
1447 rc = qedr_init_qp_kernel_sq(dev, qp, attrs);
1449 DP_ERR(dev, "failed to init kernel QP %p SQ\n", qp);
1453 rc = qedr_init_qp_kernel_params_sq(dev, qp, attrs, params);
1455 dev->ops->common->chain_free(dev->cdev, &qp->sq.pbl);
1456 DP_ERR(dev, "failed to init kernel QP %p SQ params\n", qp);
1460 rc = qedr_init_qp_kernel_rq(dev, qp, attrs);
1462 qedr_cleanup_kernel_sq(dev, qp);
1463 DP_ERR(dev, "failed to init kernel QP %p RQ\n", qp);
1467 rc = qedr_init_qp_kernel_params_rq(dev, qp, attrs);
1469 DP_ERR(dev, "failed to init kernel QP %p RQ params\n", qp);
1470 qedr_cleanup_kernel_sq(dev, qp);
1471 dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl);
1478 struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
1479 struct ib_qp_init_attr *attrs,
1480 struct ib_udata *udata)
1482 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
1483 struct qed_rdma_create_qp_out_params out_params;
1484 struct qed_rdma_create_qp_in_params in_params;
1485 struct qedr_pd *pd = get_qedr_pd(ibpd);
1486 struct ib_ucontext *ib_ctx = NULL;
1487 struct qedr_ucontext *ctx = NULL;
1488 struct qedr_create_qp_ureq ureq;
1492 DP_DEBUG(dev, QEDR_MSG_QP, "create qp: called from %s, pd=%p\n",
1493 udata ? "user library" : "kernel", pd);
1495 rc = qedr_check_qp_attrs(ibpd, dev, attrs);
1499 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1501 return ERR_PTR(-ENOMEM);
1504 return ERR_PTR(-EINVAL);
1506 DP_DEBUG(dev, QEDR_MSG_QP,
1507 "create qp: sq_cq=%p, sq_icid=%d, rq_cq=%p, rq_icid=%d\n",
1508 get_qedr_cq(attrs->send_cq),
1509 get_qedr_cq(attrs->send_cq)->icid,
1510 get_qedr_cq(attrs->recv_cq),
1511 get_qedr_cq(attrs->recv_cq)->icid);
1513 qedr_set_qp_init_params(dev, qp, pd, attrs);
1515 if (attrs->qp_type == IB_QPT_GSI) {
1518 "create qp: unexpected udata when creating GSI QP\n");
1521 return qedr_create_gsi_qp(dev, attrs, qp);
1524 memset(&in_params, 0, sizeof(in_params));
1527 if (!(udata && ibpd->uobject && ibpd->uobject->context))
1530 ib_ctx = ibpd->uobject->context;
1531 ctx = get_qedr_ucontext(ib_ctx);
1533 memset(&ureq, 0, sizeof(ureq));
1534 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) {
1536 "create qp: problem copying data from user space\n");
1540 rc = qedr_init_user_qp(ib_ctx, dev, qp, &ureq);
1544 qedr_init_qp_user_params(&in_params, &ureq);
1546 rc = qedr_init_kernel_qp(dev, qp, attrs, &in_params);
1551 qedr_init_qp_in_params_sq(dev, pd, qp, attrs, udata, &in_params);
1552 qedr_init_qp_in_params_rq(qp, attrs, udata, &in_params);
1554 qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
1555 &in_params, &out_params);
1560 qp->qp_id = out_params.qp_id;
1561 qp->icid = out_params.icid;
1562 qp->ibqp.qp_num = qp->qp_id;
1565 rc = qedr_copy_qp_uresp(dev, qp, udata);
1569 qedr_qp_user_print(dev, qp);
1571 qedr_init_qp_kernel_doorbell_sq(dev, qp);
1572 qedr_init_qp_kernel_doorbell_rq(dev, qp);
1575 DP_DEBUG(dev, QEDR_MSG_QP, "created %s space QP %p\n",
1576 udata ? "user" : "kernel", qp);
1581 rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
1583 DP_ERR(dev, "create qp: fatal fault. rc=%d", rc);
1586 qedr_cleanup_user_sq(dev, qp);
1587 qedr_cleanup_user_rq(dev, qp);
1589 qedr_cleanup_kernel_sq(dev, qp);
1590 qedr_cleanup_kernel_rq(dev, qp);
1596 return ERR_PTR(-EFAULT);
1599 enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
1602 case QED_ROCE_QP_STATE_RESET:
1603 return IB_QPS_RESET;
1604 case QED_ROCE_QP_STATE_INIT:
1606 case QED_ROCE_QP_STATE_RTR:
1608 case QED_ROCE_QP_STATE_RTS:
1610 case QED_ROCE_QP_STATE_SQD:
1612 case QED_ROCE_QP_STATE_ERR:
1614 case QED_ROCE_QP_STATE_SQE:
1620 enum qed_roce_qp_state qedr_get_state_from_ibqp(enum ib_qp_state qp_state)
1624 return QED_ROCE_QP_STATE_RESET;
1626 return QED_ROCE_QP_STATE_INIT;
1628 return QED_ROCE_QP_STATE_RTR;
1630 return QED_ROCE_QP_STATE_RTS;
1632 return QED_ROCE_QP_STATE_SQD;
1634 return QED_ROCE_QP_STATE_ERR;
1636 return QED_ROCE_QP_STATE_ERR;
1640 static void qedr_reset_qp_hwq_info(struct qedr_qp_hwq_info *qph)
1642 qed_chain_reset(&qph->pbl);
1646 qph->db_data.data.value = cpu_to_le16(0);
1649 static int qedr_update_qp_state(struct qedr_dev *dev,
1651 enum qed_roce_qp_state new_state)
1655 if (new_state == qp->state)
1658 switch (qp->state) {
1659 case QED_ROCE_QP_STATE_RESET:
1660 switch (new_state) {
1661 case QED_ROCE_QP_STATE_INIT:
1662 qp->prev_wqe_size = 0;
1663 qedr_reset_qp_hwq_info(&qp->sq);
1664 qedr_reset_qp_hwq_info(&qp->rq);
1671 case QED_ROCE_QP_STATE_INIT:
1672 switch (new_state) {
1673 case QED_ROCE_QP_STATE_RTR:
1674 /* Update doorbell (in case post_recv was
1675 * done before move to RTR)
1678 writel(qp->rq.db_data.raw, qp->rq.db);
1679 /* Make sure write takes effect */
1682 case QED_ROCE_QP_STATE_ERR:
1685 /* Invalid state change. */
1690 case QED_ROCE_QP_STATE_RTR:
1692 switch (new_state) {
1693 case QED_ROCE_QP_STATE_RTS:
1695 case QED_ROCE_QP_STATE_ERR:
1698 /* Invalid state change. */
1703 case QED_ROCE_QP_STATE_RTS:
1705 switch (new_state) {
1706 case QED_ROCE_QP_STATE_SQD:
1708 case QED_ROCE_QP_STATE_ERR:
1711 /* Invalid state change. */
1716 case QED_ROCE_QP_STATE_SQD:
1718 switch (new_state) {
1719 case QED_ROCE_QP_STATE_RTS:
1720 case QED_ROCE_QP_STATE_ERR:
1723 /* Invalid state change. */
1728 case QED_ROCE_QP_STATE_ERR:
1730 switch (new_state) {
1731 case QED_ROCE_QP_STATE_RESET:
1732 if ((qp->rq.prod != qp->rq.cons) ||
1733 (qp->sq.prod != qp->sq.cons)) {
1735 "Error->Reset with rq/sq not empty rq.prod=%x rq.cons=%x sq.prod=%x sq.cons=%x\n",
1736 qp->rq.prod, qp->rq.cons, qp->sq.prod,
1754 int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1755 int attr_mask, struct ib_udata *udata)
1757 struct qedr_qp *qp = get_qedr_qp(ibqp);
1758 struct qed_rdma_modify_qp_in_params qp_params = { 0 };
1759 struct qedr_dev *dev = get_qedr_dev(&qp->dev->ibdev);
1760 enum ib_qp_state old_qp_state, new_qp_state;
1763 DP_DEBUG(dev, QEDR_MSG_QP,
1764 "modify qp: qp %p attr_mask=0x%x, state=%d", qp, attr_mask,
1767 old_qp_state = qedr_get_ibqp_state(qp->state);
1768 if (attr_mask & IB_QP_STATE)
1769 new_qp_state = attr->qp_state;
1771 new_qp_state = old_qp_state;
1773 if (!ib_modify_qp_is_ok
1774 (old_qp_state, new_qp_state, ibqp->qp_type, attr_mask,
1775 IB_LINK_LAYER_ETHERNET)) {
1777 "modify qp: invalid attribute mask=0x%x specified for\n"
1778 "qpn=0x%x of type=0x%x old_qp_state=0x%x, new_qp_state=0x%x\n",
1779 attr_mask, qp->qp_id, ibqp->qp_type, old_qp_state,
1785 /* Translate the masks... */
1786 if (attr_mask & IB_QP_STATE) {
1787 SET_FIELD(qp_params.modify_flags,
1788 QED_RDMA_MODIFY_QP_VALID_NEW_STATE, 1);
1789 qp_params.new_state = qedr_get_state_from_ibqp(attr->qp_state);
1792 if (attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
1793 qp_params.sqd_async = true;
1795 if (attr_mask & IB_QP_PKEY_INDEX) {
1796 SET_FIELD(qp_params.modify_flags,
1797 QED_ROCE_MODIFY_QP_VALID_PKEY, 1);
1798 if (attr->pkey_index >= QEDR_ROCE_PKEY_TABLE_LEN) {
1803 qp_params.pkey = QEDR_ROCE_PKEY_DEFAULT;
1806 if (attr_mask & IB_QP_QKEY)
1807 qp->qkey = attr->qkey;
1809 if (attr_mask & IB_QP_ACCESS_FLAGS) {
1810 SET_FIELD(qp_params.modify_flags,
1811 QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN, 1);
1812 qp_params.incoming_rdma_read_en = attr->qp_access_flags &
1813 IB_ACCESS_REMOTE_READ;
1814 qp_params.incoming_rdma_write_en = attr->qp_access_flags &
1815 IB_ACCESS_REMOTE_WRITE;
1816 qp_params.incoming_atomic_en = attr->qp_access_flags &
1817 IB_ACCESS_REMOTE_ATOMIC;
1820 if (attr_mask & (IB_QP_AV | IB_QP_PATH_MTU)) {
1821 if (attr_mask & IB_QP_PATH_MTU) {
1822 if (attr->path_mtu < IB_MTU_256 ||
1823 attr->path_mtu > IB_MTU_4096) {
1824 pr_err("error: Only MTU sizes of 256, 512, 1024, 2048 and 4096 are supported by RoCE\n");
1828 qp->mtu = min(ib_mtu_enum_to_int(attr->path_mtu),
1829 ib_mtu_enum_to_int(iboe_get_mtu
1835 ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
1836 pr_err("Fixing zeroed MTU to qp->mtu = %d\n", qp->mtu);
1839 SET_FIELD(qp_params.modify_flags,
1840 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR, 1);
1842 qp_params.traffic_class_tos = attr->ah_attr.grh.traffic_class;
1843 qp_params.flow_label = attr->ah_attr.grh.flow_label;
1844 qp_params.hop_limit_ttl = attr->ah_attr.grh.hop_limit;
1846 qp->sgid_idx = attr->ah_attr.grh.sgid_index;
1848 rc = get_gid_info_from_table(ibqp, attr, attr_mask, &qp_params);
1851 "modify qp: problems with GID index %d (rc=%d)\n",
1852 attr->ah_attr.grh.sgid_index, rc);
1856 rc = qedr_get_dmac(dev, &attr->ah_attr,
1857 qp_params.remote_mac_addr);
1861 qp_params.use_local_mac = true;
1862 ether_addr_copy(qp_params.local_mac_addr, dev->ndev->dev_addr);
1864 DP_DEBUG(dev, QEDR_MSG_QP, "dgid=%x:%x:%x:%x\n",
1865 qp_params.dgid.dwords[0], qp_params.dgid.dwords[1],
1866 qp_params.dgid.dwords[2], qp_params.dgid.dwords[3]);
1867 DP_DEBUG(dev, QEDR_MSG_QP, "sgid=%x:%x:%x:%x\n",
1868 qp_params.sgid.dwords[0], qp_params.sgid.dwords[1],
1869 qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]);
1870 DP_DEBUG(dev, QEDR_MSG_QP, "remote_mac=[%pM]\n",
1871 qp_params.remote_mac_addr);
1874 qp_params.mtu = qp->mtu;
1875 qp_params.lb_indication = false;
1878 if (!qp_params.mtu) {
1879 /* Stay with current MTU */
1881 qp_params.mtu = qp->mtu;
1884 ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
1887 if (attr_mask & IB_QP_TIMEOUT) {
1888 SET_FIELD(qp_params.modify_flags,
1889 QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT, 1);
1891 /* The received timeout value is an exponent used like this:
1892 * "12.7.34 LOCAL ACK TIMEOUT
1893 * Value representing the transport (ACK) timeout for use by
1894 * the remote, expressed as: 4.096 * 2^timeout [usec]"
1895 * The FW expects timeout in msec so we need to divide the usec
1896 * result by 1000. We'll approximate 1000~2^10, and 4.096 ~ 2^2,
1897 * so we get: 2^2 * 2^timeout / 2^10 = 2^(timeout - 8).
1898 * The value of zero means infinite so we use a 'max_t' to make
1899 * sure that sub 1 msec values will be configured as 1 msec.
1902 qp_params.ack_timeout =
1903 1 << max_t(int, attr->timeout - 8, 0);
1905 qp_params.ack_timeout = 0;
1908 if (attr_mask & IB_QP_RETRY_CNT) {
1909 SET_FIELD(qp_params.modify_flags,
1910 QED_ROCE_MODIFY_QP_VALID_RETRY_CNT, 1);
1911 qp_params.retry_cnt = attr->retry_cnt;
1914 if (attr_mask & IB_QP_RNR_RETRY) {
1915 SET_FIELD(qp_params.modify_flags,
1916 QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT, 1);
1917 qp_params.rnr_retry_cnt = attr->rnr_retry;
1920 if (attr_mask & IB_QP_RQ_PSN) {
1921 SET_FIELD(qp_params.modify_flags,
1922 QED_ROCE_MODIFY_QP_VALID_RQ_PSN, 1);
1923 qp_params.rq_psn = attr->rq_psn;
1924 qp->rq_psn = attr->rq_psn;
1927 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1928 if (attr->max_rd_atomic > dev->attr.max_qp_req_rd_atomic_resc) {
1931 "unsupported max_rd_atomic=%d, supported=%d\n",
1932 attr->max_rd_atomic,
1933 dev->attr.max_qp_req_rd_atomic_resc);
1937 SET_FIELD(qp_params.modify_flags,
1938 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ, 1);
1939 qp_params.max_rd_atomic_req = attr->max_rd_atomic;
1942 if (attr_mask & IB_QP_MIN_RNR_TIMER) {
1943 SET_FIELD(qp_params.modify_flags,
1944 QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER, 1);
1945 qp_params.min_rnr_nak_timer = attr->min_rnr_timer;
1948 if (attr_mask & IB_QP_SQ_PSN) {
1949 SET_FIELD(qp_params.modify_flags,
1950 QED_ROCE_MODIFY_QP_VALID_SQ_PSN, 1);
1951 qp_params.sq_psn = attr->sq_psn;
1952 qp->sq_psn = attr->sq_psn;
1955 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1956 if (attr->max_dest_rd_atomic >
1957 dev->attr.max_qp_resp_rd_atomic_resc) {
1959 "unsupported max_dest_rd_atomic=%d, supported=%d\n",
1960 attr->max_dest_rd_atomic,
1961 dev->attr.max_qp_resp_rd_atomic_resc);
1967 SET_FIELD(qp_params.modify_flags,
1968 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP, 1);
1969 qp_params.max_rd_atomic_resp = attr->max_dest_rd_atomic;
1972 if (attr_mask & IB_QP_DEST_QPN) {
1973 SET_FIELD(qp_params.modify_flags,
1974 QED_ROCE_MODIFY_QP_VALID_DEST_QP, 1);
1976 qp_params.dest_qp = attr->dest_qp_num;
1977 qp->dest_qp_num = attr->dest_qp_num;
1980 if (qp->qp_type != IB_QPT_GSI)
1981 rc = dev->ops->rdma_modify_qp(dev->rdma_ctx,
1982 qp->qed_qp, &qp_params);
1984 if (attr_mask & IB_QP_STATE) {
1985 if ((qp->qp_type != IB_QPT_GSI) && (!udata))
1986 qedr_update_qp_state(dev, qp, qp_params.new_state);
1987 qp->state = qp_params.new_state;
1994 static int qedr_to_ib_qp_acc_flags(struct qed_rdma_query_qp_out_params *params)
1996 int ib_qp_acc_flags = 0;
1998 if (params->incoming_rdma_write_en)
1999 ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE;
2000 if (params->incoming_rdma_read_en)
2001 ib_qp_acc_flags |= IB_ACCESS_REMOTE_READ;
2002 if (params->incoming_atomic_en)
2003 ib_qp_acc_flags |= IB_ACCESS_REMOTE_ATOMIC;
2004 ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE;
2005 return ib_qp_acc_flags;
2008 int qedr_query_qp(struct ib_qp *ibqp,
2009 struct ib_qp_attr *qp_attr,
2010 int attr_mask, struct ib_qp_init_attr *qp_init_attr)
2012 struct qed_rdma_query_qp_out_params params;
2013 struct qedr_qp *qp = get_qedr_qp(ibqp);
2014 struct qedr_dev *dev = qp->dev;
2017 memset(¶ms, 0, sizeof(params));
2018 memset(qp_attr, 0, sizeof(*qp_attr));
2019 memset(qp_init_attr, 0, sizeof(*qp_init_attr));
2021 if (qp->qp_type != IB_QPT_GSI) {
2022 rc = dev->ops->rdma_query_qp(dev->rdma_ctx, qp->qed_qp, ¶ms);
2025 qp_attr->qp_state = qedr_get_ibqp_state(params.state);
2027 qp_attr->qp_state = qedr_get_ibqp_state(QED_ROCE_QP_STATE_RTS);
2030 qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state);
2031 qp_attr->path_mtu = iboe_get_mtu(params.mtu);
2032 qp_attr->path_mig_state = IB_MIG_MIGRATED;
2033 qp_attr->rq_psn = params.rq_psn;
2034 qp_attr->sq_psn = params.sq_psn;
2035 qp_attr->dest_qp_num = params.dest_qp;
2037 qp_attr->qp_access_flags = qedr_to_ib_qp_acc_flags(¶ms);
2039 qp_attr->cap.max_send_wr = qp->sq.max_wr;
2040 qp_attr->cap.max_recv_wr = qp->rq.max_wr;
2041 qp_attr->cap.max_send_sge = qp->sq.max_sges;
2042 qp_attr->cap.max_recv_sge = qp->rq.max_sges;
2043 qp_attr->cap.max_inline_data = ROCE_REQ_MAX_INLINE_DATA_SIZE;
2044 qp_init_attr->cap = qp_attr->cap;
2046 memcpy(&qp_attr->ah_attr.grh.dgid.raw[0], ¶ms.dgid.bytes[0],
2047 sizeof(qp_attr->ah_attr.grh.dgid.raw));
2049 qp_attr->ah_attr.grh.flow_label = params.flow_label;
2050 qp_attr->ah_attr.grh.sgid_index = qp->sgid_idx;
2051 qp_attr->ah_attr.grh.hop_limit = params.hop_limit_ttl;
2052 qp_attr->ah_attr.grh.traffic_class = params.traffic_class_tos;
2054 qp_attr->ah_attr.ah_flags = IB_AH_GRH;
2055 qp_attr->ah_attr.port_num = 1;
2056 qp_attr->ah_attr.sl = 0;
2057 qp_attr->timeout = params.timeout;
2058 qp_attr->rnr_retry = params.rnr_retry;
2059 qp_attr->retry_cnt = params.retry_cnt;
2060 qp_attr->min_rnr_timer = params.min_rnr_nak_timer;
2061 qp_attr->pkey_index = params.pkey_index;
2062 qp_attr->port_num = 1;
2063 qp_attr->ah_attr.src_path_bits = 0;
2064 qp_attr->ah_attr.static_rate = 0;
2065 qp_attr->alt_pkey_index = 0;
2066 qp_attr->alt_port_num = 0;
2067 qp_attr->alt_timeout = 0;
2068 memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
2070 qp_attr->sq_draining = (params.state == QED_ROCE_QP_STATE_SQD) ? 1 : 0;
2071 qp_attr->max_dest_rd_atomic = params.max_dest_rd_atomic;
2072 qp_attr->max_rd_atomic = params.max_rd_atomic;
2073 qp_attr->en_sqd_async_notify = (params.sqd_async) ? 1 : 0;
2075 DP_DEBUG(dev, QEDR_MSG_QP, "QEDR_QUERY_QP: max_inline_data=%d\n",
2076 qp_attr->cap.max_inline_data);
2082 int qedr_destroy_qp(struct ib_qp *ibqp)
2084 struct qedr_qp *qp = get_qedr_qp(ibqp);
2085 struct qedr_dev *dev = qp->dev;
2086 struct ib_qp_attr attr;
2090 DP_DEBUG(dev, QEDR_MSG_QP, "destroy qp: destroying %p, qp type=%d\n",
2093 if (qp->state != (QED_ROCE_QP_STATE_RESET | QED_ROCE_QP_STATE_ERR |
2094 QED_ROCE_QP_STATE_INIT)) {
2095 attr.qp_state = IB_QPS_ERR;
2096 attr_mask |= IB_QP_STATE;
2098 /* Change the QP state to ERROR */
2099 qedr_modify_qp(ibqp, &attr, attr_mask, NULL);
2102 if (qp->qp_type != IB_QPT_GSI) {
2103 rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
2107 qedr_destroy_gsi_qp(dev);
2110 if (ibqp->uobject && ibqp->uobject->context) {
2111 qedr_cleanup_user_sq(dev, qp);
2112 qedr_cleanup_user_rq(dev, qp);
2114 qedr_cleanup_kernel_sq(dev, qp);
2115 qedr_cleanup_kernel_rq(dev, qp);
2123 struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
2127 ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
2129 return ERR_PTR(-ENOMEM);
2136 int qedr_destroy_ah(struct ib_ah *ibah)
2138 struct qedr_ah *ah = get_qedr_ah(ibah);
2144 static void free_mr_info(struct qedr_dev *dev, struct mr_info *info)
2146 struct qedr_pbl *pbl, *tmp;
2148 if (info->pbl_table)
2149 list_add_tail(&info->pbl_table->list_entry,
2150 &info->free_pbl_list);
2152 if (!list_empty(&info->inuse_pbl_list))
2153 list_splice(&info->inuse_pbl_list, &info->free_pbl_list);
2155 list_for_each_entry_safe(pbl, tmp, &info->free_pbl_list, list_entry) {
2156 list_del(&pbl->list_entry);
2157 qedr_free_pbl(dev, &info->pbl_info, pbl);
2161 static int init_mr_info(struct qedr_dev *dev, struct mr_info *info,
2162 size_t page_list_len, bool two_layered)
2164 struct qedr_pbl *tmp;
2167 INIT_LIST_HEAD(&info->free_pbl_list);
2168 INIT_LIST_HEAD(&info->inuse_pbl_list);
2170 rc = qedr_prepare_pbl_tbl(dev, &info->pbl_info,
2171 page_list_len, two_layered);
2175 info->pbl_table = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
2176 if (!info->pbl_table) {
2181 DP_DEBUG(dev, QEDR_MSG_MR, "pbl_table_pa = %pa\n",
2182 &info->pbl_table->pa);
2184 /* in usual case we use 2 PBLs, so we add one to free
2185 * list and allocating another one
2187 tmp = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
2189 DP_DEBUG(dev, QEDR_MSG_MR, "Extra PBL is not allocated\n");
2193 list_add_tail(&tmp->list_entry, &info->free_pbl_list);
2195 DP_DEBUG(dev, QEDR_MSG_MR, "extra pbl_table_pa = %pa\n", &tmp->pa);
2199 free_mr_info(dev, info);
2204 struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
2205 u64 usr_addr, int acc, struct ib_udata *udata)
2207 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2212 pd = get_qedr_pd(ibpd);
2213 DP_DEBUG(dev, QEDR_MSG_MR,
2214 "qedr_register user mr pd = %d start = %lld, len = %lld, usr_addr = %lld, acc = %d\n",
2215 pd->pd_id, start, len, usr_addr, acc);
2217 if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
2218 return ERR_PTR(-EINVAL);
2220 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2224 mr->type = QEDR_MR_USER;
2226 mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0);
2227 if (IS_ERR(mr->umem)) {
2232 rc = init_mr_info(dev, &mr->info, ib_umem_page_count(mr->umem), 1);
2236 qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table,
2237 &mr->info.pbl_info);
2239 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2241 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2245 /* Index only, 18 bit long, lkey = itid << 8 | key */
2246 mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
2248 mr->hw_mr.pd = pd->pd_id;
2249 mr->hw_mr.local_read = 1;
2250 mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
2251 mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
2252 mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
2253 mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
2254 mr->hw_mr.mw_bind = false;
2255 mr->hw_mr.pbl_ptr = mr->info.pbl_table[0].pa;
2256 mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
2257 mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
2258 mr->hw_mr.page_size_log = ilog2(mr->umem->page_size);
2259 mr->hw_mr.fbo = ib_umem_offset(mr->umem);
2260 mr->hw_mr.length = len;
2261 mr->hw_mr.vaddr = usr_addr;
2262 mr->hw_mr.zbva = false;
2263 mr->hw_mr.phy_mr = false;
2264 mr->hw_mr.dma_mr = false;
2266 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2268 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2272 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2273 if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
2274 mr->hw_mr.remote_atomic)
2275 mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2277 DP_DEBUG(dev, QEDR_MSG_MR, "register user mr lkey: %x\n",
2282 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2284 qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
2290 int qedr_dereg_mr(struct ib_mr *ib_mr)
2292 struct qedr_mr *mr = get_qedr_mr(ib_mr);
2293 struct qedr_dev *dev = get_qedr_dev(ib_mr->device);
2296 rc = dev->ops->rdma_deregister_tid(dev->rdma_ctx, mr->hw_mr.itid);
2300 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2302 if ((mr->type != QEDR_MR_DMA) && (mr->type != QEDR_MR_FRMR))
2303 qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
2305 /* it could be user registered memory. */
2307 ib_umem_release(mr->umem);
2314 struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd, int max_page_list_len)
2316 struct qedr_pd *pd = get_qedr_pd(ibpd);
2317 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2321 DP_DEBUG(dev, QEDR_MSG_MR,
2322 "qedr_alloc_frmr pd = %d max_page_list_len= %d\n", pd->pd_id,
2325 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2330 mr->type = QEDR_MR_FRMR;
2332 rc = init_mr_info(dev, &mr->info, max_page_list_len, 1);
2336 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2338 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2342 /* Index only, 18 bit long, lkey = itid << 8 | key */
2343 mr->hw_mr.tid_type = QED_RDMA_TID_FMR;
2345 mr->hw_mr.pd = pd->pd_id;
2346 mr->hw_mr.local_read = 1;
2347 mr->hw_mr.local_write = 0;
2348 mr->hw_mr.remote_read = 0;
2349 mr->hw_mr.remote_write = 0;
2350 mr->hw_mr.remote_atomic = 0;
2351 mr->hw_mr.mw_bind = false;
2352 mr->hw_mr.pbl_ptr = 0;
2353 mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
2354 mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
2356 mr->hw_mr.length = 0;
2357 mr->hw_mr.vaddr = 0;
2358 mr->hw_mr.zbva = false;
2359 mr->hw_mr.phy_mr = true;
2360 mr->hw_mr.dma_mr = false;
2362 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2364 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2368 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2369 mr->ibmr.rkey = mr->ibmr.lkey;
2371 DP_DEBUG(dev, QEDR_MSG_MR, "alloc frmr: %x\n", mr->ibmr.lkey);
2375 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2381 struct ib_mr *qedr_alloc_mr(struct ib_pd *ibpd,
2382 enum ib_mr_type mr_type, u32 max_num_sg)
2384 struct qedr_dev *dev;
2387 if (mr_type != IB_MR_TYPE_MEM_REG)
2388 return ERR_PTR(-EINVAL);
2390 mr = __qedr_alloc_mr(ibpd, max_num_sg);
2393 return ERR_PTR(-EINVAL);
2400 static int qedr_set_page(struct ib_mr *ibmr, u64 addr)
2402 struct qedr_mr *mr = get_qedr_mr(ibmr);
2403 struct qedr_pbl *pbl_table;
2404 struct regpair *pbe;
2407 if (unlikely(mr->npages == mr->info.pbl_info.num_pbes)) {
2408 DP_ERR(mr->dev, "qedr_set_page failes when %d\n", mr->npages);
2412 DP_DEBUG(mr->dev, QEDR_MSG_MR, "qedr_set_page pages[%d] = 0x%llx\n",
2415 pbes_in_page = mr->info.pbl_info.pbl_size / sizeof(u64);
2416 pbl_table = mr->info.pbl_table + (mr->npages / pbes_in_page);
2417 pbe = (struct regpair *)pbl_table->va;
2418 pbe += mr->npages % pbes_in_page;
2419 pbe->lo = cpu_to_le32((u32)addr);
2420 pbe->hi = cpu_to_le32((u32)upper_32_bits(addr));
2427 static void handle_completed_mrs(struct qedr_dev *dev, struct mr_info *info)
2429 int work = info->completed - info->completed_handled - 1;
2431 DP_DEBUG(dev, QEDR_MSG_MR, "Special FMR work = %d\n", work);
2432 while (work-- > 0 && !list_empty(&info->inuse_pbl_list)) {
2433 struct qedr_pbl *pbl;
2435 /* Free all the page list that are possible to be freed
2436 * (all the ones that were invalidated), under the assumption
2437 * that if an FMR was completed successfully that means that
2438 * if there was an invalidate operation before it also ended
2440 pbl = list_first_entry(&info->inuse_pbl_list,
2441 struct qedr_pbl, list_entry);
2442 list_del(&pbl->list_entry);
2443 list_add_tail(&pbl->list_entry, &info->free_pbl_list);
2444 info->completed_handled++;
2448 int qedr_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
2449 int sg_nents, unsigned int *sg_offset)
2451 struct qedr_mr *mr = get_qedr_mr(ibmr);
2455 handle_completed_mrs(mr->dev, &mr->info);
2456 return ib_sg_to_pages(ibmr, sg, sg_nents, NULL, qedr_set_page);
2459 struct ib_mr *qedr_get_dma_mr(struct ib_pd *ibpd, int acc)
2461 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2462 struct qedr_pd *pd = get_qedr_pd(ibpd);
2466 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2468 return ERR_PTR(-ENOMEM);
2470 mr->type = QEDR_MR_DMA;
2472 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2474 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2478 /* index only, 18 bit long, lkey = itid << 8 | key */
2479 mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
2480 mr->hw_mr.pd = pd->pd_id;
2481 mr->hw_mr.local_read = 1;
2482 mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
2483 mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
2484 mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
2485 mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
2486 mr->hw_mr.dma_mr = true;
2488 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2490 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2494 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2495 if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
2496 mr->hw_mr.remote_atomic)
2497 mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2499 DP_DEBUG(dev, QEDR_MSG_MR, "get dma mr: lkey = %x\n", mr->ibmr.lkey);
2503 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2509 static inline int qedr_wq_is_full(struct qedr_qp_hwq_info *wq)
2511 return (((wq->prod + 1) % wq->max_wr) == wq->cons);
2514 static int sge_data_len(struct ib_sge *sg_list, int num_sge)
2518 for (i = 0; i < num_sge; i++)
2519 len += sg_list[i].length;
2524 static void swap_wqe_data64(u64 *p)
2528 for (i = 0; i < QEDR_SQE_ELEMENT_SIZE / sizeof(u64); i++, p++)
2529 *p = cpu_to_be64(cpu_to_le64(*p));
2532 static u32 qedr_prepare_sq_inline_data(struct qedr_dev *dev,
2533 struct qedr_qp *qp, u8 *wqe_size,
2534 struct ib_send_wr *wr,
2535 struct ib_send_wr **bad_wr, u8 *bits,
2538 u32 data_size = sge_data_len(wr->sg_list, wr->num_sge);
2539 char *seg_prt, *wqe;
2542 if (data_size > ROCE_REQ_MAX_INLINE_DATA_SIZE) {
2543 DP_ERR(dev, "Too much inline data in WR: %d\n", data_size);
2557 /* Copy data inline */
2558 for (i = 0; i < wr->num_sge; i++) {
2559 u32 len = wr->sg_list[i].length;
2560 void *src = (void *)(uintptr_t)wr->sg_list[i].addr;
2565 /* New segment required */
2567 wqe = (char *)qed_chain_produce(&qp->sq.pbl);
2569 seg_siz = sizeof(struct rdma_sq_common_wqe);
2573 /* Calculate currently allowed length */
2574 cur = min_t(u32, len, seg_siz);
2575 memcpy(seg_prt, src, cur);
2577 /* Update segment variables */
2581 /* Update sge variables */
2585 /* Swap fully-completed segments */
2587 swap_wqe_data64((u64 *)wqe);
2591 /* swap last not completed segment */
2593 swap_wqe_data64((u64 *)wqe);
2598 #define RQ_SGE_SET(sge, vaddr, vlength, vflags) \
2600 DMA_REGPAIR_LE(sge->addr, vaddr); \
2601 (sge)->length = cpu_to_le32(vlength); \
2602 (sge)->flags = cpu_to_le32(vflags); \
2605 #define SRQ_HDR_SET(hdr, vwr_id, num_sge) \
2607 DMA_REGPAIR_LE(hdr->wr_id, vwr_id); \
2608 (hdr)->num_sges = num_sge; \
2611 #define SRQ_SGE_SET(sge, vaddr, vlength, vlkey) \
2613 DMA_REGPAIR_LE(sge->addr, vaddr); \
2614 (sge)->length = cpu_to_le32(vlength); \
2615 (sge)->l_key = cpu_to_le32(vlkey); \
2618 static u32 qedr_prepare_sq_sges(struct qedr_qp *qp, u8 *wqe_size,
2619 struct ib_send_wr *wr)
2624 for (i = 0; i < wr->num_sge; i++) {
2625 struct rdma_sq_sge *sge = qed_chain_produce(&qp->sq.pbl);
2627 DMA_REGPAIR_LE(sge->addr, wr->sg_list[i].addr);
2628 sge->l_key = cpu_to_le32(wr->sg_list[i].lkey);
2629 sge->length = cpu_to_le32(wr->sg_list[i].length);
2630 data_size += wr->sg_list[i].length;
2634 *wqe_size += wr->num_sge;
2639 static u32 qedr_prepare_sq_rdma_data(struct qedr_dev *dev,
2641 struct rdma_sq_rdma_wqe_1st *rwqe,
2642 struct rdma_sq_rdma_wqe_2nd *rwqe2,
2643 struct ib_send_wr *wr,
2644 struct ib_send_wr **bad_wr)
2646 rwqe2->r_key = cpu_to_le32(rdma_wr(wr)->rkey);
2647 DMA_REGPAIR_LE(rwqe2->remote_va, rdma_wr(wr)->remote_addr);
2649 if (wr->send_flags & IB_SEND_INLINE) {
2652 SET_FIELD2(flags, RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG, 1);
2653 return qedr_prepare_sq_inline_data(dev, qp, &rwqe->wqe_size, wr,
2654 bad_wr, &rwqe->flags, flags);
2657 return qedr_prepare_sq_sges(qp, &rwqe->wqe_size, wr);
2660 static u32 qedr_prepare_sq_send_data(struct qedr_dev *dev,
2662 struct rdma_sq_send_wqe_1st *swqe,
2663 struct rdma_sq_send_wqe_2st *swqe2,
2664 struct ib_send_wr *wr,
2665 struct ib_send_wr **bad_wr)
2667 memset(swqe2, 0, sizeof(*swqe2));
2668 if (wr->send_flags & IB_SEND_INLINE) {
2671 SET_FIELD2(flags, RDMA_SQ_SEND_WQE_INLINE_FLG, 1);
2672 return qedr_prepare_sq_inline_data(dev, qp, &swqe->wqe_size, wr,
2673 bad_wr, &swqe->flags, flags);
2676 return qedr_prepare_sq_sges(qp, &swqe->wqe_size, wr);
2679 static int qedr_prepare_reg(struct qedr_qp *qp,
2680 struct rdma_sq_fmr_wqe_1st *fwqe1,
2681 struct ib_reg_wr *wr)
2683 struct qedr_mr *mr = get_qedr_mr(wr->mr);
2684 struct rdma_sq_fmr_wqe_2nd *fwqe2;
2686 fwqe2 = (struct rdma_sq_fmr_wqe_2nd *)qed_chain_produce(&qp->sq.pbl);
2687 fwqe1->addr.hi = upper_32_bits(mr->ibmr.iova);
2688 fwqe1->addr.lo = lower_32_bits(mr->ibmr.iova);
2689 fwqe1->l_key = wr->key;
2691 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_READ,
2692 !!(wr->access & IB_ACCESS_REMOTE_READ));
2693 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE,
2694 !!(wr->access & IB_ACCESS_REMOTE_WRITE));
2695 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC,
2696 !!(wr->access & IB_ACCESS_REMOTE_ATOMIC));
2697 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_READ, 1);
2698 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE,
2699 !!(wr->access & IB_ACCESS_LOCAL_WRITE));
2700 fwqe2->fmr_ctrl = 0;
2702 SET_FIELD2(fwqe2->fmr_ctrl, RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG,
2703 ilog2(mr->ibmr.page_size) - 12);
2705 fwqe2->length_hi = 0;
2706 fwqe2->length_lo = mr->ibmr.length;
2707 fwqe2->pbl_addr.hi = upper_32_bits(mr->info.pbl_table->pa);
2708 fwqe2->pbl_addr.lo = lower_32_bits(mr->info.pbl_table->pa);
2710 qp->wqe_wr_id[qp->sq.prod].mr = mr;
2715 enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
2718 case IB_WR_RDMA_WRITE:
2719 case IB_WR_RDMA_WRITE_WITH_IMM:
2720 return IB_WC_RDMA_WRITE;
2721 case IB_WR_SEND_WITH_IMM:
2723 case IB_WR_SEND_WITH_INV:
2725 case IB_WR_RDMA_READ:
2726 return IB_WC_RDMA_READ;
2727 case IB_WR_ATOMIC_CMP_AND_SWP:
2728 return IB_WC_COMP_SWAP;
2729 case IB_WR_ATOMIC_FETCH_AND_ADD:
2730 return IB_WC_FETCH_ADD;
2732 return IB_WC_REG_MR;
2733 case IB_WR_LOCAL_INV:
2734 return IB_WC_LOCAL_INV;
2740 inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr)
2742 int wq_is_full, err_wr, pbl_is_full;
2743 struct qedr_dev *dev = qp->dev;
2745 /* prevent SQ overflow and/or processing of a bad WR */
2746 err_wr = wr->num_sge > qp->sq.max_sges;
2747 wq_is_full = qedr_wq_is_full(&qp->sq);
2748 pbl_is_full = qed_chain_get_elem_left_u32(&qp->sq.pbl) <
2749 QEDR_MAX_SQE_ELEMENTS_PER_SQE;
2750 if (wq_is_full || err_wr || pbl_is_full) {
2751 if (wq_is_full && !(qp->err_bitmap & QEDR_QP_ERR_SQ_FULL)) {
2753 "error: WQ is full. Post send on QP %p failed (this error appears only once)\n",
2755 qp->err_bitmap |= QEDR_QP_ERR_SQ_FULL;
2758 if (err_wr && !(qp->err_bitmap & QEDR_QP_ERR_BAD_SR)) {
2760 "error: WR is bad. Post send on QP %p failed (this error appears only once)\n",
2762 qp->err_bitmap |= QEDR_QP_ERR_BAD_SR;
2766 !(qp->err_bitmap & QEDR_QP_ERR_SQ_PBL_FULL)) {
2768 "error: WQ PBL is full. Post send on QP %p failed (this error appears only once)\n",
2770 qp->err_bitmap |= QEDR_QP_ERR_SQ_PBL_FULL;
2777 int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2778 struct ib_send_wr **bad_wr)
2780 struct qedr_dev *dev = get_qedr_dev(ibqp->device);
2781 struct qedr_qp *qp = get_qedr_qp(ibqp);
2782 struct rdma_sq_atomic_wqe_1st *awqe1;
2783 struct rdma_sq_atomic_wqe_2nd *awqe2;
2784 struct rdma_sq_atomic_wqe_3rd *awqe3;
2785 struct rdma_sq_send_wqe_2st *swqe2;
2786 struct rdma_sq_local_inv_wqe *iwqe;
2787 struct rdma_sq_rdma_wqe_2nd *rwqe2;
2788 struct rdma_sq_send_wqe_1st *swqe;
2789 struct rdma_sq_rdma_wqe_1st *rwqe;
2790 struct rdma_sq_fmr_wqe_1st *fwqe1;
2791 struct rdma_sq_common_wqe *wqe;
2796 if (!qedr_can_post_send(qp, wr)) {
2801 wqe = qed_chain_produce(&qp->sq.pbl);
2802 qp->wqe_wr_id[qp->sq.prod].signaled =
2803 !!(wr->send_flags & IB_SEND_SIGNALED) || qp->signaled;
2806 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_SE_FLG,
2807 !!(wr->send_flags & IB_SEND_SOLICITED));
2808 comp = (!!(wr->send_flags & IB_SEND_SIGNALED)) || qp->signaled;
2809 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_COMP_FLG, comp);
2810 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_RD_FENCE_FLG,
2811 !!(wr->send_flags & IB_SEND_FENCE));
2812 wqe->prev_wqe_size = qp->prev_wqe_size;
2814 qp->wqe_wr_id[qp->sq.prod].opcode = qedr_ib_to_wc_opcode(wr->opcode);
2816 switch (wr->opcode) {
2817 case IB_WR_SEND_WITH_IMM:
2818 if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) {
2823 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM;
2824 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
2826 swqe2 = qed_chain_produce(&qp->sq.pbl);
2828 swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.imm_data);
2829 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
2831 swqe->length = cpu_to_le32(length);
2832 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
2833 qp->prev_wqe_size = swqe->wqe_size;
2834 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
2837 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND;
2838 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
2841 swqe2 = qed_chain_produce(&qp->sq.pbl);
2842 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
2844 swqe->length = cpu_to_le32(length);
2845 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
2846 qp->prev_wqe_size = swqe->wqe_size;
2847 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
2849 case IB_WR_SEND_WITH_INV:
2850 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_INVALIDATE;
2851 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
2852 swqe2 = qed_chain_produce(&qp->sq.pbl);
2854 swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.invalidate_rkey);
2855 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
2857 swqe->length = cpu_to_le32(length);
2858 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
2859 qp->prev_wqe_size = swqe->wqe_size;
2860 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
2863 case IB_WR_RDMA_WRITE_WITH_IMM:
2864 if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) {
2869 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM;
2870 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
2873 rwqe->imm_data = htonl(cpu_to_le32(wr->ex.imm_data));
2874 rwqe2 = qed_chain_produce(&qp->sq.pbl);
2875 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
2877 rwqe->length = cpu_to_le32(length);
2878 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
2879 qp->prev_wqe_size = rwqe->wqe_size;
2880 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
2882 case IB_WR_RDMA_WRITE:
2883 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR;
2884 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
2887 rwqe2 = qed_chain_produce(&qp->sq.pbl);
2888 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
2890 rwqe->length = cpu_to_le32(length);
2891 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
2892 qp->prev_wqe_size = rwqe->wqe_size;
2893 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
2895 case IB_WR_RDMA_READ_WITH_INV:
2897 "RDMA READ WITH INVALIDATE not supported\n");
2902 case IB_WR_RDMA_READ:
2903 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_RD;
2904 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
2907 rwqe2 = qed_chain_produce(&qp->sq.pbl);
2908 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
2910 rwqe->length = cpu_to_le32(length);
2911 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
2912 qp->prev_wqe_size = rwqe->wqe_size;
2913 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
2916 case IB_WR_ATOMIC_CMP_AND_SWP:
2917 case IB_WR_ATOMIC_FETCH_AND_ADD:
2918 awqe1 = (struct rdma_sq_atomic_wqe_1st *)wqe;
2919 awqe1->wqe_size = 4;
2921 awqe2 = qed_chain_produce(&qp->sq.pbl);
2922 DMA_REGPAIR_LE(awqe2->remote_va, atomic_wr(wr)->remote_addr);
2923 awqe2->r_key = cpu_to_le32(atomic_wr(wr)->rkey);
2925 awqe3 = qed_chain_produce(&qp->sq.pbl);
2927 if (wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
2928 wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_ADD;
2929 DMA_REGPAIR_LE(awqe3->swap_data,
2930 atomic_wr(wr)->compare_add);
2932 wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_CMP_AND_SWAP;
2933 DMA_REGPAIR_LE(awqe3->swap_data,
2934 atomic_wr(wr)->swap);
2935 DMA_REGPAIR_LE(awqe3->cmp_data,
2936 atomic_wr(wr)->compare_add);
2939 qedr_prepare_sq_sges(qp, NULL, wr);
2941 qp->wqe_wr_id[qp->sq.prod].wqe_size = awqe1->wqe_size;
2942 qp->prev_wqe_size = awqe1->wqe_size;
2945 case IB_WR_LOCAL_INV:
2946 iwqe = (struct rdma_sq_local_inv_wqe *)wqe;
2949 iwqe->req_type = RDMA_SQ_REQ_TYPE_LOCAL_INVALIDATE;
2950 iwqe->inv_l_key = wr->ex.invalidate_rkey;
2951 qp->wqe_wr_id[qp->sq.prod].wqe_size = iwqe->wqe_size;
2952 qp->prev_wqe_size = iwqe->wqe_size;
2955 DP_DEBUG(dev, QEDR_MSG_CQ, "REG_MR\n");
2956 wqe->req_type = RDMA_SQ_REQ_TYPE_FAST_MR;
2957 fwqe1 = (struct rdma_sq_fmr_wqe_1st *)wqe;
2958 fwqe1->wqe_size = 2;
2960 rc = qedr_prepare_reg(qp, fwqe1, reg_wr(wr));
2962 DP_ERR(dev, "IB_REG_MR failed rc=%d\n", rc);
2967 qp->wqe_wr_id[qp->sq.prod].wqe_size = fwqe1->wqe_size;
2968 qp->prev_wqe_size = fwqe1->wqe_size;
2971 DP_ERR(dev, "invalid opcode 0x%x!\n", wr->opcode);
2980 /* Restore prod to its position before
2981 * this WR was processed
2983 value = le16_to_cpu(qp->sq.db_data.data.value);
2984 qed_chain_set_prod(&qp->sq.pbl, value, wqe);
2986 /* Restore prev_wqe_size */
2987 qp->prev_wqe_size = wqe->prev_wqe_size;
2989 DP_ERR(dev, "POST SEND FAILED\n");
2995 int qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2996 struct ib_send_wr **bad_wr)
2998 struct qedr_dev *dev = get_qedr_dev(ibqp->device);
2999 struct qedr_qp *qp = get_qedr_qp(ibqp);
3000 unsigned long flags;
3005 if (qp->qp_type == IB_QPT_GSI)
3006 return qedr_gsi_post_send(ibqp, wr, bad_wr);
3008 spin_lock_irqsave(&qp->q_lock, flags);
3010 if ((qp->state == QED_ROCE_QP_STATE_RESET) ||
3011 (qp->state == QED_ROCE_QP_STATE_ERR)) {
3012 spin_unlock_irqrestore(&qp->q_lock, flags);
3014 DP_DEBUG(dev, QEDR_MSG_CQ,
3015 "QP in wrong state! QP icid=0x%x state %d\n",
3016 qp->icid, qp->state);
3021 DP_ERR(dev, "Got an empty post send.\n");
3026 rc = __qedr_post_send(ibqp, wr, bad_wr);
3030 qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
3032 qedr_inc_sw_prod(&qp->sq);
3034 qp->sq.db_data.data.value++;
3040 * If there was a failure in the first WR then it will be triggered in
3041 * vane. However this is not harmful (as long as the producer value is
3042 * unchanged). For performance reasons we avoid checking for this
3043 * redundant doorbell.
3046 writel(qp->sq.db_data.raw, qp->sq.db);
3048 /* Make sure write sticks */
3051 spin_unlock_irqrestore(&qp->q_lock, flags);
3056 int qedr_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
3057 struct ib_recv_wr **bad_wr)
3059 struct qedr_qp *qp = get_qedr_qp(ibqp);
3060 struct qedr_dev *dev = qp->dev;
3061 unsigned long flags;
3064 if (qp->qp_type == IB_QPT_GSI)
3065 return qedr_gsi_post_recv(ibqp, wr, bad_wr);
3067 spin_lock_irqsave(&qp->q_lock, flags);
3069 if ((qp->state == QED_ROCE_QP_STATE_RESET) ||
3070 (qp->state == QED_ROCE_QP_STATE_ERR)) {
3071 spin_unlock_irqrestore(&qp->q_lock, flags);
3079 if (qed_chain_get_elem_left_u32(&qp->rq.pbl) <
3080 QEDR_MAX_RQE_ELEMENTS_PER_RQE ||
3081 wr->num_sge > qp->rq.max_sges) {
3082 DP_ERR(dev, "Can't post WR (%d < %d) || (%d > %d)\n",
3083 qed_chain_get_elem_left_u32(&qp->rq.pbl),
3084 QEDR_MAX_RQE_ELEMENTS_PER_RQE, wr->num_sge,
3090 for (i = 0; i < wr->num_sge; i++) {
3092 struct rdma_rq_sge *rqe =
3093 qed_chain_produce(&qp->rq.pbl);
3095 /* First one must include the number
3096 * of SGE in the list
3099 SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES,
3102 SET_FIELD(flags, RDMA_RQ_SGE_L_KEY,
3103 wr->sg_list[i].lkey);
3105 RQ_SGE_SET(rqe, wr->sg_list[i].addr,
3106 wr->sg_list[i].length, flags);
3109 /* Special case of no sges. FW requires between 1-4 sges...
3110 * in this case we need to post 1 sge with length zero. this is
3111 * because rdma write with immediate consumes an RQ.
3115 struct rdma_rq_sge *rqe =
3116 qed_chain_produce(&qp->rq.pbl);
3118 /* First one must include the number
3119 * of SGE in the list
3121 SET_FIELD(flags, RDMA_RQ_SGE_L_KEY, 0);
3122 SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES, 1);
3124 RQ_SGE_SET(rqe, 0, 0, flags);
3128 qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id;
3129 qp->rqe_wr_id[qp->rq.prod].wqe_size = i;
3131 qedr_inc_sw_prod(&qp->rq);
3133 /* Flush all the writes before signalling doorbell */
3136 qp->rq.db_data.data.value++;
3138 writel(qp->rq.db_data.raw, qp->rq.db);
3140 /* Make sure write sticks */
3146 spin_unlock_irqrestore(&qp->q_lock, flags);
3151 static int is_valid_cqe(struct qedr_cq *cq, union rdma_cqe *cqe)
3153 struct rdma_cqe_requester *resp_cqe = &cqe->req;
3155 return (resp_cqe->flags & RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK) ==
3159 static struct qedr_qp *cqe_get_qp(union rdma_cqe *cqe)
3161 struct rdma_cqe_requester *resp_cqe = &cqe->req;
3164 qp = (struct qedr_qp *)(uintptr_t)HILO_GEN(resp_cqe->qp_handle.hi,
3165 resp_cqe->qp_handle.lo,
3170 static enum rdma_cqe_type cqe_get_type(union rdma_cqe *cqe)
3172 struct rdma_cqe_requester *resp_cqe = &cqe->req;
3174 return GET_FIELD(resp_cqe->flags, RDMA_CQE_REQUESTER_TYPE);
3177 /* Return latest CQE (needs processing) */
3178 static union rdma_cqe *get_cqe(struct qedr_cq *cq)
3180 return cq->latest_cqe;
3183 /* In fmr we need to increase the number of fmr completed counter for the fmr
3184 * algorithm determining whether we can free a pbl or not.
3185 * we need to perform this whether the work request was signaled or not. for
3186 * this purpose we call this function from the condition that checks if a wr
3187 * should be skipped, to make sure we don't miss it ( possibly this fmr
3188 * operation was not signalted)
3190 static inline void qedr_chk_if_fmr(struct qedr_qp *qp)
3192 if (qp->wqe_wr_id[qp->sq.cons].opcode == IB_WC_REG_MR)
3193 qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
3196 static int process_req(struct qedr_dev *dev, struct qedr_qp *qp,
3197 struct qedr_cq *cq, int num_entries,
3198 struct ib_wc *wc, u16 hw_cons, enum ib_wc_status status,
3203 while (num_entries && qp->sq.wqe_cons != hw_cons) {
3204 if (!qp->wqe_wr_id[qp->sq.cons].signaled && !force) {
3205 qedr_chk_if_fmr(qp);
3211 wc->status = status;
3213 wc->src_qp = qp->id;
3216 wc->wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id;
3217 wc->opcode = qp->wqe_wr_id[qp->sq.cons].opcode;
3219 switch (wc->opcode) {
3220 case IB_WC_RDMA_WRITE:
3221 wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
3223 case IB_WC_COMP_SWAP:
3224 case IB_WC_FETCH_ADD:
3228 qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
3238 while (qp->wqe_wr_id[qp->sq.cons].wqe_size--)
3239 qed_chain_consume(&qp->sq.pbl);
3240 qedr_inc_sw_cons(&qp->sq);
3246 static int qedr_poll_cq_req(struct qedr_dev *dev,
3247 struct qedr_qp *qp, struct qedr_cq *cq,
3248 int num_entries, struct ib_wc *wc,
3249 struct rdma_cqe_requester *req)
3253 switch (req->status) {
3254 case RDMA_CQE_REQ_STS_OK:
3255 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
3258 case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR:
3259 if (qp->state != QED_ROCE_QP_STATE_ERR)
3261 "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3262 cq->icid, qp->icid);
3263 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
3264 IB_WC_WR_FLUSH_ERR, 0);
3267 /* process all WQE before the cosumer */
3268 qp->state = QED_ROCE_QP_STATE_ERR;
3269 cnt = process_req(dev, qp, cq, num_entries, wc,
3270 req->sq_cons - 1, IB_WC_SUCCESS, 0);
3272 /* if we have extra WC fill it with actual error info */
3273 if (cnt < num_entries) {
3274 enum ib_wc_status wc_status;
3276 switch (req->status) {
3277 case RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR:
3279 "Error: POLL CQ with RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3280 cq->icid, qp->icid);
3281 wc_status = IB_WC_BAD_RESP_ERR;
3283 case RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR:
3285 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3286 cq->icid, qp->icid);
3287 wc_status = IB_WC_LOC_LEN_ERR;
3289 case RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR:
3291 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3292 cq->icid, qp->icid);
3293 wc_status = IB_WC_LOC_QP_OP_ERR;
3295 case RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR:
3297 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3298 cq->icid, qp->icid);
3299 wc_status = IB_WC_LOC_PROT_ERR;
3301 case RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR:
3303 "Error: POLL CQ with RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3304 cq->icid, qp->icid);
3305 wc_status = IB_WC_MW_BIND_ERR;
3307 case RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR:
3309 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3310 cq->icid, qp->icid);
3311 wc_status = IB_WC_REM_INV_REQ_ERR;
3313 case RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR:
3315 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3316 cq->icid, qp->icid);
3317 wc_status = IB_WC_REM_ACCESS_ERR;
3319 case RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR:
3321 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3322 cq->icid, qp->icid);
3323 wc_status = IB_WC_REM_OP_ERR;
3325 case RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR:
3327 "Error: POLL CQ with RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3328 cq->icid, qp->icid);
3329 wc_status = IB_WC_RNR_RETRY_EXC_ERR;
3331 case RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR:
3333 "Error: POLL CQ with ROCE_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3334 cq->icid, qp->icid);
3335 wc_status = IB_WC_RETRY_EXC_ERR;
3339 "Error: POLL CQ with IB_WC_GENERAL_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3340 cq->icid, qp->icid);
3341 wc_status = IB_WC_GENERAL_ERR;
3343 cnt += process_req(dev, qp, cq, 1, wc, req->sq_cons,
3351 static void __process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
3352 struct qedr_cq *cq, struct ib_wc *wc,
3353 struct rdma_cqe_responder *resp, u64 wr_id)
3355 enum ib_wc_status wc_status = IB_WC_SUCCESS;
3358 wc->opcode = IB_WC_RECV;
3361 switch (resp->status) {
3362 case RDMA_CQE_RESP_STS_LOCAL_ACCESS_ERR:
3363 wc_status = IB_WC_LOC_ACCESS_ERR;
3365 case RDMA_CQE_RESP_STS_LOCAL_LENGTH_ERR:
3366 wc_status = IB_WC_LOC_LEN_ERR;
3368 case RDMA_CQE_RESP_STS_LOCAL_QP_OPERATION_ERR:
3369 wc_status = IB_WC_LOC_QP_OP_ERR;
3371 case RDMA_CQE_RESP_STS_LOCAL_PROTECTION_ERR:
3372 wc_status = IB_WC_LOC_PROT_ERR;
3374 case RDMA_CQE_RESP_STS_MEMORY_MGT_OPERATION_ERR:
3375 wc_status = IB_WC_MW_BIND_ERR;
3377 case RDMA_CQE_RESP_STS_REMOTE_INVALID_REQUEST_ERR:
3378 wc_status = IB_WC_REM_INV_RD_REQ_ERR;
3380 case RDMA_CQE_RESP_STS_OK:
3381 wc_status = IB_WC_SUCCESS;
3382 wc->byte_len = le32_to_cpu(resp->length);
3384 flags = resp->flags & QEDR_RESP_RDMA_IMM;
3386 if (flags == QEDR_RESP_RDMA_IMM)
3387 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
3389 if (flags == QEDR_RESP_RDMA_IMM || flags == QEDR_RESP_IMM) {
3391 le32_to_cpu(resp->imm_data_or_inv_r_Key);
3392 wc->wc_flags |= IB_WC_WITH_IMM;
3396 wc->status = IB_WC_GENERAL_ERR;
3397 DP_ERR(dev, "Invalid CQE status detected\n");
3401 wc->status = wc_status;
3402 wc->src_qp = qp->id;
3407 static int process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
3408 struct qedr_cq *cq, struct ib_wc *wc,
3409 struct rdma_cqe_responder *resp)
3411 u64 wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
3413 __process_resp_one(dev, qp, cq, wc, resp, wr_id);
3415 while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
3416 qed_chain_consume(&qp->rq.pbl);
3417 qedr_inc_sw_cons(&qp->rq);
3422 static int process_resp_flush(struct qedr_qp *qp, struct qedr_cq *cq,
3423 int num_entries, struct ib_wc *wc, u16 hw_cons)
3427 while (num_entries && qp->rq.wqe_cons != hw_cons) {
3429 wc->status = IB_WC_WR_FLUSH_ERR;
3431 wc->src_qp = qp->id;
3433 wc->wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
3438 while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
3439 qed_chain_consume(&qp->rq.pbl);
3440 qedr_inc_sw_cons(&qp->rq);
3446 static void try_consume_resp_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
3447 struct rdma_cqe_responder *resp, int *update)
3449 if (le16_to_cpu(resp->rq_cons) == qp->rq.wqe_cons) {
3455 static int qedr_poll_cq_resp(struct qedr_dev *dev, struct qedr_qp *qp,
3456 struct qedr_cq *cq, int num_entries,
3457 struct ib_wc *wc, struct rdma_cqe_responder *resp,
3462 if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
3463 cnt = process_resp_flush(qp, cq, num_entries, wc,
3465 try_consume_resp_cqe(cq, qp, resp, update);
3467 cnt = process_resp_one(dev, qp, cq, wc, resp);
3475 static void try_consume_req_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
3476 struct rdma_cqe_requester *req, int *update)
3478 if (le16_to_cpu(req->sq_cons) == qp->sq.wqe_cons) {
3484 int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
3486 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
3487 struct qedr_cq *cq = get_qedr_cq(ibcq);
3488 union rdma_cqe *cqe;
3489 u32 old_cons, new_cons;
3490 unsigned long flags;
3494 if (cq->cq_type == QEDR_CQ_TYPE_GSI)
3495 return qedr_gsi_poll_cq(ibcq, num_entries, wc);
3497 spin_lock_irqsave(&cq->cq_lock, flags);
3498 cqe = cq->latest_cqe;
3499 old_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
3500 while (num_entries && is_valid_cqe(cq, cqe)) {
3504 /* prevent speculative reads of any field of CQE */
3507 qp = cqe_get_qp(cqe);
3509 WARN(1, "Error: CQE QP pointer is NULL. CQE=%p\n", cqe);
3515 switch (cqe_get_type(cqe)) {
3516 case RDMA_CQE_TYPE_REQUESTER:
3517 cnt = qedr_poll_cq_req(dev, qp, cq, num_entries, wc,
3519 try_consume_req_cqe(cq, qp, &cqe->req, &update);
3521 case RDMA_CQE_TYPE_RESPONDER_RQ:
3522 cnt = qedr_poll_cq_resp(dev, qp, cq, num_entries, wc,
3523 &cqe->resp, &update);
3525 case RDMA_CQE_TYPE_INVALID:
3527 DP_ERR(dev, "Error: invalid CQE type = %d\n",
3536 new_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
3538 cq->cq_cons += new_cons - old_cons;
3541 /* doorbell notifies abount latest VALID entry,
3542 * but chain already point to the next INVALID one
3544 doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
3546 spin_unlock_irqrestore(&cq->cq_lock, flags);
3550 int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags,
3552 const struct ib_wc *in_wc,
3553 const struct ib_grh *in_grh,
3554 const struct ib_mad_hdr *mad_hdr,
3555 size_t in_mad_size, struct ib_mad_hdr *out_mad,
3556 size_t *out_mad_size, u16 *out_mad_pkey_index)
3558 struct qedr_dev *dev = get_qedr_dev(ibdev);
3560 DP_DEBUG(dev, QEDR_MSG_GSI,
3561 "QEDR_PROCESS_MAD in_mad %x %x %x %x %x %x %x %x\n",
3562 mad_hdr->attr_id, mad_hdr->base_version, mad_hdr->attr_mod,
3563 mad_hdr->class_specific, mad_hdr->class_version,
3564 mad_hdr->method, mad_hdr->mgmt_class, mad_hdr->status);
3565 return IB_MAD_RESULT_SUCCESS;
3568 int qedr_port_immutable(struct ib_device *ibdev, u8 port_num,
3569 struct ib_port_immutable *immutable)
3571 struct ib_port_attr attr;
3574 err = qedr_query_port(ibdev, port_num, &attr);
3578 immutable->pkey_tbl_len = attr.pkey_tbl_len;
3579 immutable->gid_tbl_len = attr.gid_tbl_len;
3580 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE |
3581 RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
3582 immutable->max_mad_size = IB_MGMT_MAD_SIZE;