1 /* QLogic qedr NIC Driver
2 * Copyright (c) 2015-2016 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/dma-mapping.h>
33 #include <linux/crc32.h>
37 #include <linux/iommu.h>
39 #include <rdma/ib_verbs.h>
40 #include <rdma/ib_user_verbs.h>
41 #include <rdma/iw_cm.h>
42 #include <rdma/ib_umem.h>
43 #include <rdma/ib_addr.h>
44 #include <rdma/ib_cache.h>
45 #include <rdma/uverbs_ioctl.h>
47 #include <linux/qed/common_hsi.h>
48 #include "qedr_hsi_rdma.h"
49 #include <linux/qed/qed_if.h>
52 #include <rdma/qedr-abi.h>
53 #include "qedr_roce_cm.h"
54 #include "qedr_iw_cm.h"
56 #define QEDR_SRQ_WQE_ELEM_SIZE sizeof(union rdma_srq_elm)
57 #define RDMA_MAX_SGE_PER_SRQ (4)
58 #define RDMA_MAX_SRQ_WQE_SIZE (RDMA_MAX_SGE_PER_SRQ + 1)
60 #define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
63 QEDR_USER_MMAP_IO_WC = 0,
64 QEDR_USER_MMAP_PHYS_PAGE,
67 static inline int qedr_ib_copy_to_udata(struct ib_udata *udata, void *src,
70 size_t min_len = min_t(size_t, len, udata->outlen);
72 return ib_copy_to_udata(udata, src, min_len);
75 int qedr_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey)
77 if (index >= QEDR_ROCE_PKEY_TABLE_LEN)
80 *pkey = QEDR_ROCE_PKEY_DEFAULT;
84 int qedr_iw_query_gid(struct ib_device *ibdev, u32 port,
85 int index, union ib_gid *sgid)
87 struct qedr_dev *dev = get_qedr_dev(ibdev);
89 memset(sgid->raw, 0, sizeof(sgid->raw));
90 ether_addr_copy(sgid->raw, dev->ndev->dev_addr);
92 DP_DEBUG(dev, QEDR_MSG_INIT, "QUERY sgid[%d]=%llx:%llx\n", index,
93 sgid->global.interface_id, sgid->global.subnet_prefix);
98 int qedr_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
100 struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
101 struct qedr_device_attr *qattr = &dev->attr;
102 struct qedr_srq *srq = get_qedr_srq(ibsrq);
104 srq_attr->srq_limit = srq->srq_limit;
105 srq_attr->max_wr = qattr->max_srq_wr;
106 srq_attr->max_sge = qattr->max_sge;
111 int qedr_query_device(struct ib_device *ibdev,
112 struct ib_device_attr *attr, struct ib_udata *udata)
114 struct qedr_dev *dev = get_qedr_dev(ibdev);
115 struct qedr_device_attr *qattr = &dev->attr;
117 if (!dev->rdma_ctx) {
119 "qedr_query_device called with invalid params rdma_ctx=%p\n",
124 memset(attr, 0, sizeof(*attr));
126 attr->fw_ver = qattr->fw_ver;
127 attr->sys_image_guid = qattr->sys_image_guid;
128 attr->max_mr_size = qattr->max_mr_size;
129 attr->page_size_cap = qattr->page_size_caps;
130 attr->vendor_id = qattr->vendor_id;
131 attr->vendor_part_id = qattr->vendor_part_id;
132 attr->hw_ver = qattr->hw_ver;
133 attr->max_qp = qattr->max_qp;
134 attr->max_qp_wr = max_t(u32, qattr->max_sqe, qattr->max_rqe);
135 attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
136 IB_DEVICE_RC_RNR_NAK_GEN |
137 IB_DEVICE_MEM_MGT_EXTENSIONS;
138 attr->kernel_cap_flags = IBK_LOCAL_DMA_LKEY;
140 if (!rdma_protocol_iwarp(&dev->ibdev, 1))
141 attr->device_cap_flags |= IB_DEVICE_XRC;
142 attr->max_send_sge = qattr->max_sge;
143 attr->max_recv_sge = qattr->max_sge;
144 attr->max_sge_rd = qattr->max_sge;
145 attr->max_cq = qattr->max_cq;
146 attr->max_cqe = qattr->max_cqe;
147 attr->max_mr = qattr->max_mr;
148 attr->max_mw = qattr->max_mw;
149 attr->max_pd = qattr->max_pd;
150 attr->atomic_cap = dev->atomic_cap;
151 attr->max_qp_init_rd_atom =
152 1 << (fls(qattr->max_qp_req_rd_atomic_resc) - 1);
153 attr->max_qp_rd_atom =
154 min(1 << (fls(qattr->max_qp_resp_rd_atomic_resc) - 1),
155 attr->max_qp_init_rd_atom);
157 attr->max_srq = qattr->max_srq;
158 attr->max_srq_sge = qattr->max_srq_sge;
159 attr->max_srq_wr = qattr->max_srq_wr;
161 attr->local_ca_ack_delay = qattr->dev_ack_delay;
162 attr->max_fast_reg_page_list_len = qattr->max_mr / 8;
163 attr->max_pkeys = qattr->max_pkey;
164 attr->max_ah = qattr->max_ah;
169 static inline void get_link_speed_and_width(int speed, u16 *ib_speed,
174 *ib_speed = IB_SPEED_SDR;
175 *ib_width = IB_WIDTH_1X;
178 *ib_speed = IB_SPEED_QDR;
179 *ib_width = IB_WIDTH_1X;
183 *ib_speed = IB_SPEED_DDR;
184 *ib_width = IB_WIDTH_4X;
188 *ib_speed = IB_SPEED_EDR;
189 *ib_width = IB_WIDTH_1X;
193 *ib_speed = IB_SPEED_QDR;
194 *ib_width = IB_WIDTH_4X;
198 *ib_speed = IB_SPEED_HDR;
199 *ib_width = IB_WIDTH_1X;
203 *ib_speed = IB_SPEED_EDR;
204 *ib_width = IB_WIDTH_4X;
209 *ib_speed = IB_SPEED_SDR;
210 *ib_width = IB_WIDTH_1X;
214 int qedr_query_port(struct ib_device *ibdev, u32 port,
215 struct ib_port_attr *attr)
217 struct qedr_dev *dev;
218 struct qed_rdma_port *rdma_port;
220 dev = get_qedr_dev(ibdev);
222 if (!dev->rdma_ctx) {
223 DP_ERR(dev, "rdma_ctx is NULL\n");
227 rdma_port = dev->ops->rdma_query_port(dev->rdma_ctx);
229 /* *attr being zeroed by the caller, avoid zeroing it here */
230 if (rdma_port->port_state == QED_RDMA_PORT_UP) {
231 attr->state = IB_PORT_ACTIVE;
232 attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
234 attr->state = IB_PORT_DOWN;
235 attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
237 attr->max_mtu = IB_MTU_4096;
242 attr->ip_gids = true;
243 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
244 attr->active_mtu = iboe_get_mtu(dev->iwarp_max_mtu);
245 attr->gid_tbl_len = 1;
247 attr->active_mtu = iboe_get_mtu(dev->ndev->mtu);
248 attr->gid_tbl_len = QEDR_MAX_SGID;
249 attr->pkey_tbl_len = QEDR_ROCE_PKEY_TABLE_LEN;
251 attr->bad_pkey_cntr = rdma_port->pkey_bad_counter;
252 attr->qkey_viol_cntr = 0;
253 get_link_speed_and_width(rdma_port->link_speed,
254 &attr->active_speed, &attr->active_width);
255 attr->max_msg_sz = rdma_port->max_msg_size;
256 attr->max_vl_num = 4;
261 int qedr_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
263 struct ib_device *ibdev = uctx->device;
265 struct qedr_ucontext *ctx = get_qedr_ucontext(uctx);
266 struct qedr_alloc_ucontext_resp uresp = {};
267 struct qedr_alloc_ucontext_req ureq = {};
268 struct qedr_dev *dev = get_qedr_dev(ibdev);
269 struct qed_rdma_add_user_out_params oparams;
270 struct qedr_user_mmap_entry *entry;
276 rc = ib_copy_from_udata(&ureq, udata,
277 min(sizeof(ureq), udata->inlen));
279 DP_ERR(dev, "Problem copying data from user space\n");
282 ctx->edpm_mode = !!(ureq.context_flags &
283 QEDR_ALLOC_UCTX_EDPM_MODE);
284 ctx->db_rec = !!(ureq.context_flags & QEDR_ALLOC_UCTX_DB_REC);
287 rc = dev->ops->rdma_add_user(dev->rdma_ctx, &oparams);
290 "failed to allocate a DPI for a new RoCE application, rc=%d. To overcome this consider to increase the number of DPIs, increase the doorbell BAR size or just close unnecessary RoCE applications. In order to increase the number of DPIs consult the qedr readme\n",
295 ctx->dpi = oparams.dpi;
296 ctx->dpi_addr = oparams.dpi_addr;
297 ctx->dpi_phys_addr = oparams.dpi_phys_addr;
298 ctx->dpi_size = oparams.dpi_size;
299 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
305 entry->io_address = ctx->dpi_phys_addr;
306 entry->length = ctx->dpi_size;
307 entry->mmap_flag = QEDR_USER_MMAP_IO_WC;
308 entry->dpi = ctx->dpi;
310 rc = rdma_user_mmap_entry_insert(uctx, &entry->rdma_entry,
316 ctx->db_mmap_entry = &entry->rdma_entry;
318 if (!dev->user_dpm_enabled)
320 else if (rdma_protocol_iwarp(&dev->ibdev, 1))
321 uresp.dpm_flags = QEDR_DPM_TYPE_IWARP_LEGACY;
323 uresp.dpm_flags = QEDR_DPM_TYPE_ROCE_ENHANCED |
324 QEDR_DPM_TYPE_ROCE_LEGACY |
325 QEDR_DPM_TYPE_ROCE_EDPM_MODE;
327 if (ureq.context_flags & QEDR_SUPPORT_DPM_SIZES) {
328 uresp.dpm_flags |= QEDR_DPM_SIZES_SET;
329 uresp.ldpm_limit_size = QEDR_LDPM_MAX_SIZE;
330 uresp.edpm_trans_size = QEDR_EDPM_TRANS_SIZE;
331 uresp.edpm_limit_size = QEDR_EDPM_MAX_SIZE;
334 uresp.wids_enabled = 1;
335 uresp.wid_count = oparams.wid_count;
336 uresp.db_pa = rdma_user_mmap_get_offset(ctx->db_mmap_entry);
337 uresp.db_size = ctx->dpi_size;
338 uresp.max_send_wr = dev->attr.max_sqe;
339 uresp.max_recv_wr = dev->attr.max_rqe;
340 uresp.max_srq_wr = dev->attr.max_srq_wr;
341 uresp.sges_per_send_wr = QEDR_MAX_SQE_ELEMENTS_PER_SQE;
342 uresp.sges_per_recv_wr = QEDR_MAX_RQE_ELEMENTS_PER_RQE;
343 uresp.sges_per_srq_wr = dev->attr.max_srq_sge;
344 uresp.max_cqes = QEDR_MAX_CQES;
346 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
352 DP_DEBUG(dev, QEDR_MSG_INIT, "Allocating user context %p\n",
357 if (!ctx->db_mmap_entry)
358 dev->ops->rdma_remove_user(dev->rdma_ctx, ctx->dpi);
360 rdma_user_mmap_entry_remove(ctx->db_mmap_entry);
365 void qedr_dealloc_ucontext(struct ib_ucontext *ibctx)
367 struct qedr_ucontext *uctx = get_qedr_ucontext(ibctx);
369 DP_DEBUG(uctx->dev, QEDR_MSG_INIT, "Deallocating user context %p\n",
372 rdma_user_mmap_entry_remove(uctx->db_mmap_entry);
375 void qedr_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
377 struct qedr_user_mmap_entry *entry = get_qedr_mmap_entry(rdma_entry);
378 struct qedr_dev *dev = entry->dev;
380 if (entry->mmap_flag == QEDR_USER_MMAP_PHYS_PAGE)
381 free_page((unsigned long)entry->address);
382 else if (entry->mmap_flag == QEDR_USER_MMAP_IO_WC)
383 dev->ops->rdma_remove_user(dev->rdma_ctx, entry->dpi);
388 int qedr_mmap(struct ib_ucontext *ucontext, struct vm_area_struct *vma)
390 struct ib_device *dev = ucontext->device;
391 size_t length = vma->vm_end - vma->vm_start;
392 struct rdma_user_mmap_entry *rdma_entry;
393 struct qedr_user_mmap_entry *entry;
398 "start %#lx, end %#lx, length = %#zx, pgoff = %#lx\n",
399 vma->vm_start, vma->vm_end, length, vma->vm_pgoff);
401 rdma_entry = rdma_user_mmap_entry_get(ucontext, vma);
403 ibdev_dbg(dev, "pgoff[%#lx] does not have valid entry\n",
407 entry = get_qedr_mmap_entry(rdma_entry);
409 "Mapping address[%#llx], length[%#zx], mmap_flag[%d]\n",
410 entry->io_address, length, entry->mmap_flag);
412 switch (entry->mmap_flag) {
413 case QEDR_USER_MMAP_IO_WC:
414 pfn = entry->io_address >> PAGE_SHIFT;
415 rc = rdma_user_mmap_io(ucontext, vma, pfn, length,
416 pgprot_writecombine(vma->vm_page_prot),
419 case QEDR_USER_MMAP_PHYS_PAGE:
420 rc = vm_insert_page(vma, vma->vm_start,
421 virt_to_page(entry->address));
429 "Couldn't mmap address[%#llx] length[%#zx] mmap_flag[%d] err[%d]\n",
430 entry->io_address, length, entry->mmap_flag, rc);
432 rdma_user_mmap_entry_put(rdma_entry);
436 int qedr_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
438 struct ib_device *ibdev = ibpd->device;
439 struct qedr_dev *dev = get_qedr_dev(ibdev);
440 struct qedr_pd *pd = get_qedr_pd(ibpd);
444 DP_DEBUG(dev, QEDR_MSG_INIT, "Function called from: %s\n",
445 udata ? "User Lib" : "Kernel");
447 if (!dev->rdma_ctx) {
448 DP_ERR(dev, "invalid RDMA context\n");
452 rc = dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
459 struct qedr_alloc_pd_uresp uresp = {
462 struct qedr_ucontext *context = rdma_udata_to_drv_context(
463 udata, struct qedr_ucontext, ibucontext);
465 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
467 DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id);
468 dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id);
479 int qedr_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
481 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
482 struct qedr_pd *pd = get_qedr_pd(ibpd);
484 DP_DEBUG(dev, QEDR_MSG_INIT, "Deallocating PD %d\n", pd->pd_id);
485 dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd->pd_id);
490 int qedr_alloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata)
492 struct qedr_dev *dev = get_qedr_dev(ibxrcd->device);
493 struct qedr_xrcd *xrcd = get_qedr_xrcd(ibxrcd);
495 return dev->ops->rdma_alloc_xrcd(dev->rdma_ctx, &xrcd->xrcd_id);
498 int qedr_dealloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata)
500 struct qedr_dev *dev = get_qedr_dev(ibxrcd->device);
501 u16 xrcd_id = get_qedr_xrcd(ibxrcd)->xrcd_id;
503 dev->ops->rdma_dealloc_xrcd(dev->rdma_ctx, xrcd_id);
506 static void qedr_free_pbl(struct qedr_dev *dev,
507 struct qedr_pbl_info *pbl_info, struct qedr_pbl *pbl)
509 struct pci_dev *pdev = dev->pdev;
512 for (i = 0; i < pbl_info->num_pbls; i++) {
515 dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
516 pbl[i].va, pbl[i].pa);
522 #define MIN_FW_PBL_PAGE_SIZE (4 * 1024)
523 #define MAX_FW_PBL_PAGE_SIZE (64 * 1024)
525 #define NUM_PBES_ON_PAGE(_page_size) (_page_size / sizeof(u64))
526 #define MAX_PBES_ON_PAGE NUM_PBES_ON_PAGE(MAX_FW_PBL_PAGE_SIZE)
527 #define MAX_PBES_TWO_LAYER (MAX_PBES_ON_PAGE * MAX_PBES_ON_PAGE)
529 static struct qedr_pbl *qedr_alloc_pbl_tbl(struct qedr_dev *dev,
530 struct qedr_pbl_info *pbl_info,
533 struct pci_dev *pdev = dev->pdev;
534 struct qedr_pbl *pbl_table;
535 dma_addr_t *pbl_main_tbl;
540 pbl_table = kcalloc(pbl_info->num_pbls, sizeof(*pbl_table), flags);
542 return ERR_PTR(-ENOMEM);
544 for (i = 0; i < pbl_info->num_pbls; i++) {
545 va = dma_alloc_coherent(&pdev->dev, pbl_info->pbl_size, &pa,
550 pbl_table[i].va = va;
551 pbl_table[i].pa = pa;
554 /* Two-Layer PBLs, if we have more than one pbl we need to initialize
555 * the first one with physical pointers to all of the rest
557 pbl_main_tbl = (dma_addr_t *)pbl_table[0].va;
558 for (i = 0; i < pbl_info->num_pbls - 1; i++)
559 pbl_main_tbl[i] = pbl_table[i + 1].pa;
564 for (i--; i >= 0; i--)
565 dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
566 pbl_table[i].va, pbl_table[i].pa);
568 qedr_free_pbl(dev, pbl_info, pbl_table);
570 return ERR_PTR(-ENOMEM);
573 static int qedr_prepare_pbl_tbl(struct qedr_dev *dev,
574 struct qedr_pbl_info *pbl_info,
575 u32 num_pbes, int two_layer_capable)
581 if ((num_pbes > MAX_PBES_ON_PAGE) && two_layer_capable) {
582 if (num_pbes > MAX_PBES_TWO_LAYER) {
583 DP_ERR(dev, "prepare pbl table: too many pages %d\n",
588 /* calculate required pbl page size */
589 pbl_size = MIN_FW_PBL_PAGE_SIZE;
590 pbl_capacity = NUM_PBES_ON_PAGE(pbl_size) *
591 NUM_PBES_ON_PAGE(pbl_size);
593 while (pbl_capacity < num_pbes) {
595 pbl_capacity = pbl_size / sizeof(u64);
596 pbl_capacity = pbl_capacity * pbl_capacity;
599 num_pbls = DIV_ROUND_UP(num_pbes, NUM_PBES_ON_PAGE(pbl_size));
600 num_pbls++; /* One for the layer0 ( points to the pbls) */
601 pbl_info->two_layered = true;
603 /* One layered PBL */
605 pbl_size = max_t(u32, MIN_FW_PBL_PAGE_SIZE,
606 roundup_pow_of_two((num_pbes * sizeof(u64))));
607 pbl_info->two_layered = false;
610 pbl_info->num_pbls = num_pbls;
611 pbl_info->pbl_size = pbl_size;
612 pbl_info->num_pbes = num_pbes;
614 DP_DEBUG(dev, QEDR_MSG_MR,
615 "prepare pbl table: num_pbes=%d, num_pbls=%d, pbl_size=%d\n",
616 pbl_info->num_pbes, pbl_info->num_pbls, pbl_info->pbl_size);
621 static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
622 struct qedr_pbl *pbl,
623 struct qedr_pbl_info *pbl_info, u32 pg_shift)
625 int pbe_cnt, total_num_pbes = 0;
626 struct qedr_pbl *pbl_tbl;
627 struct ib_block_iter biter;
630 if (!pbl_info->num_pbes)
633 /* If we have a two layered pbl, the first pbl points to the rest
634 * of the pbls and the first entry lays on the second pbl in the table
636 if (pbl_info->two_layered)
641 pbe = (struct regpair *)pbl_tbl->va;
643 DP_ERR(dev, "cannot populate PBL due to a NULL PBE\n");
649 rdma_umem_for_each_dma_block (umem, &biter, BIT(pg_shift)) {
650 u64 pg_addr = rdma_block_iter_dma_address(&biter);
652 pbe->lo = cpu_to_le32(pg_addr);
653 pbe->hi = cpu_to_le32(upper_32_bits(pg_addr));
659 if (total_num_pbes == pbl_info->num_pbes)
662 /* If the given pbl is full storing the pbes, move to next pbl.
664 if (pbe_cnt == (pbl_info->pbl_size / sizeof(u64))) {
666 pbe = (struct regpair *)pbl_tbl->va;
672 static int qedr_db_recovery_add(struct qedr_dev *dev,
673 void __iomem *db_addr,
675 enum qed_db_rec_width db_width,
676 enum qed_db_rec_space db_space)
679 DP_DEBUG(dev, QEDR_MSG_INIT, "avoiding db rec since old lib\n");
683 return dev->ops->common->db_recovery_add(dev->cdev, db_addr, db_data,
687 static void qedr_db_recovery_del(struct qedr_dev *dev,
688 void __iomem *db_addr,
692 DP_DEBUG(dev, QEDR_MSG_INIT, "avoiding db rec since old lib\n");
696 /* Ignore return code as there is not much we can do about it. Error
697 * log will be printed inside.
699 dev->ops->common->db_recovery_del(dev->cdev, db_addr, db_data);
702 static int qedr_copy_cq_uresp(struct qedr_dev *dev,
703 struct qedr_cq *cq, struct ib_udata *udata,
706 struct qedr_create_cq_uresp uresp;
709 memset(&uresp, 0, sizeof(uresp));
711 uresp.db_offset = db_offset;
712 uresp.icid = cq->icid;
713 if (cq->q.db_mmap_entry)
715 rdma_user_mmap_get_offset(cq->q.db_mmap_entry);
717 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
719 DP_ERR(dev, "copy error cqid=0x%x.\n", cq->icid);
724 static void consume_cqe(struct qedr_cq *cq)
726 if (cq->latest_cqe == cq->toggle_cqe)
727 cq->pbl_toggle ^= RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
729 cq->latest_cqe = qed_chain_consume(&cq->pbl);
732 static inline int qedr_align_cq_entries(int entries)
734 u64 size, aligned_size;
736 /* We allocate an extra entry that we don't report to the FW. */
737 size = (entries + 1) * QEDR_CQE_SIZE;
738 aligned_size = ALIGN(size, PAGE_SIZE);
740 return aligned_size / QEDR_CQE_SIZE;
743 static int qedr_init_user_db_rec(struct ib_udata *udata,
744 struct qedr_dev *dev, struct qedr_userq *q,
745 bool requires_db_rec)
747 struct qedr_ucontext *uctx =
748 rdma_udata_to_drv_context(udata, struct qedr_ucontext,
750 struct qedr_user_mmap_entry *entry;
753 /* Aborting for non doorbell userqueue (SRQ) or non-supporting lib */
754 if (requires_db_rec == 0 || !uctx->db_rec)
757 /* Allocate a page for doorbell recovery, add to mmap */
758 q->db_rec_data = (void *)get_zeroed_page(GFP_USER);
759 if (!q->db_rec_data) {
760 DP_ERR(dev, "get_zeroed_page failed\n");
764 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
766 goto err_free_db_data;
768 entry->address = q->db_rec_data;
769 entry->length = PAGE_SIZE;
770 entry->mmap_flag = QEDR_USER_MMAP_PHYS_PAGE;
771 rc = rdma_user_mmap_entry_insert(&uctx->ibucontext,
777 q->db_mmap_entry = &entry->rdma_entry;
785 free_page((unsigned long)q->db_rec_data);
786 q->db_rec_data = NULL;
790 static inline int qedr_init_user_queue(struct ib_udata *udata,
791 struct qedr_dev *dev,
792 struct qedr_userq *q, u64 buf_addr,
793 size_t buf_len, bool requires_db_rec,
800 q->buf_addr = buf_addr;
801 q->buf_len = buf_len;
802 q->umem = ib_umem_get(&dev->ibdev, q->buf_addr, q->buf_len, access);
803 if (IS_ERR(q->umem)) {
804 DP_ERR(dev, "create user queue: failed ib_umem_get, got %ld\n",
806 return PTR_ERR(q->umem);
809 fw_pages = ib_umem_num_dma_blocks(q->umem, 1 << FW_PAGE_SHIFT);
810 rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, fw_pages, 0);
814 if (alloc_and_init) {
815 q->pbl_tbl = qedr_alloc_pbl_tbl(dev, &q->pbl_info, GFP_KERNEL);
816 if (IS_ERR(q->pbl_tbl)) {
817 rc = PTR_ERR(q->pbl_tbl);
820 qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info,
823 q->pbl_tbl = kzalloc(sizeof(*q->pbl_tbl), GFP_KERNEL);
830 /* mmap the user address used to store doorbell data for recovery */
831 return qedr_init_user_db_rec(udata, dev, q, requires_db_rec);
834 ib_umem_release(q->umem);
840 static inline void qedr_init_cq_params(struct qedr_cq *cq,
841 struct qedr_ucontext *ctx,
842 struct qedr_dev *dev, int vector,
843 int chain_entries, int page_cnt,
845 struct qed_rdma_create_cq_in_params
848 memset(params, 0, sizeof(*params));
849 params->cq_handle_hi = upper_32_bits((uintptr_t)cq);
850 params->cq_handle_lo = lower_32_bits((uintptr_t)cq);
851 params->cnq_id = vector;
852 params->cq_size = chain_entries - 1;
853 params->dpi = (ctx) ? ctx->dpi : dev->dpi;
854 params->pbl_num_pages = page_cnt;
855 params->pbl_ptr = pbl_ptr;
856 params->pbl_two_level = 0;
859 static void doorbell_cq(struct qedr_cq *cq, u32 cons, u8 flags)
861 cq->db.data.agg_flags = flags;
862 cq->db.data.value = cpu_to_le32(cons);
863 writeq(cq->db.raw, cq->db_addr);
866 int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
868 struct qedr_cq *cq = get_qedr_cq(ibcq);
869 unsigned long sflags;
870 struct qedr_dev *dev;
872 dev = get_qedr_dev(ibcq->device);
876 "warning: arm was invoked after destroy for cq %p (icid=%d)\n",
882 if (cq->cq_type == QEDR_CQ_TYPE_GSI)
885 spin_lock_irqsave(&cq->cq_lock, sflags);
889 if (flags & IB_CQ_SOLICITED)
890 cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD;
892 if (flags & IB_CQ_NEXT_COMP)
893 cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_CF_CMD;
895 doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
897 spin_unlock_irqrestore(&cq->cq_lock, sflags);
902 int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
903 struct ib_udata *udata)
905 struct ib_device *ibdev = ibcq->device;
906 struct qedr_ucontext *ctx = rdma_udata_to_drv_context(
907 udata, struct qedr_ucontext, ibucontext);
908 struct qed_rdma_destroy_cq_out_params destroy_oparams;
909 struct qed_rdma_destroy_cq_in_params destroy_iparams;
910 struct qed_chain_init_params chain_params = {
911 .mode = QED_CHAIN_MODE_PBL,
912 .intended_use = QED_CHAIN_USE_TO_CONSUME,
913 .cnt_type = QED_CHAIN_CNT_TYPE_U32,
914 .elem_size = sizeof(union rdma_cqe),
916 struct qedr_dev *dev = get_qedr_dev(ibdev);
917 struct qed_rdma_create_cq_in_params params;
918 struct qedr_create_cq_ureq ureq = {};
919 int vector = attr->comp_vector;
920 int entries = attr->cqe;
921 struct qedr_cq *cq = get_qedr_cq(ibcq);
929 DP_DEBUG(dev, QEDR_MSG_INIT,
930 "create_cq: called from %s. entries=%d, vector=%d\n",
931 udata ? "User Lib" : "Kernel", entries, vector);
936 if (entries > QEDR_MAX_CQES) {
938 "create cq: the number of entries %d is too high. Must be equal or below %d.\n",
939 entries, QEDR_MAX_CQES);
943 chain_entries = qedr_align_cq_entries(entries);
944 chain_entries = min_t(int, chain_entries, QEDR_MAX_CQES);
945 chain_params.num_elems = chain_entries;
947 /* calc db offset. user will add DPI base, kernel will add db addr */
948 db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
951 if (ib_copy_from_udata(&ureq, udata, min(sizeof(ureq),
954 "create cq: problem copying data from user space\n");
960 "create cq: cannot create a cq with 0 entries\n");
964 cq->cq_type = QEDR_CQ_TYPE_USER;
966 rc = qedr_init_user_queue(udata, dev, &cq->q, ureq.addr,
967 ureq.len, true, IB_ACCESS_LOCAL_WRITE,
972 pbl_ptr = cq->q.pbl_tbl->pa;
973 page_cnt = cq->q.pbl_info.num_pbes;
975 cq->ibcq.cqe = chain_entries;
976 cq->q.db_addr = ctx->dpi_addr + db_offset;
978 cq->cq_type = QEDR_CQ_TYPE_KERNEL;
980 rc = dev->ops->common->chain_alloc(dev->cdev, &cq->pbl,
985 page_cnt = qed_chain_get_page_cnt(&cq->pbl);
986 pbl_ptr = qed_chain_get_pbl_phys(&cq->pbl);
987 cq->ibcq.cqe = cq->pbl.capacity;
990 qedr_init_cq_params(cq, ctx, dev, vector, chain_entries, page_cnt,
993 rc = dev->ops->rdma_create_cq(dev->rdma_ctx, ¶ms, &icid);
998 cq->sig = QEDR_CQ_MAGIC_NUMBER;
999 spin_lock_init(&cq->cq_lock);
1002 rc = qedr_copy_cq_uresp(dev, cq, udata, db_offset);
1006 rc = qedr_db_recovery_add(dev, cq->q.db_addr,
1007 &cq->q.db_rec_data->db_data,
1014 /* Generate doorbell address. */
1015 cq->db.data.icid = cq->icid;
1016 cq->db_addr = dev->db_addr + db_offset;
1017 cq->db.data.params = DB_AGG_CMD_MAX <<
1018 RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT;
1020 /* point to the very last element, passing it we will toggle */
1021 cq->toggle_cqe = qed_chain_get_last_elem(&cq->pbl);
1022 cq->pbl_toggle = RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
1023 cq->latest_cqe = NULL;
1025 cq->cq_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
1027 rc = qedr_db_recovery_add(dev, cq->db_addr, &cq->db.data,
1028 DB_REC_WIDTH_64B, DB_REC_KERNEL);
1033 DP_DEBUG(dev, QEDR_MSG_CQ,
1034 "create cq: icid=0x%0x, addr=%p, size(entries)=0x%0x\n",
1035 cq->icid, cq, params.cq_size);
1040 destroy_iparams.icid = cq->icid;
1041 dev->ops->rdma_destroy_cq(dev->rdma_ctx, &destroy_iparams,
1045 qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
1046 ib_umem_release(cq->q.umem);
1047 if (cq->q.db_mmap_entry)
1048 rdma_user_mmap_entry_remove(cq->q.db_mmap_entry);
1050 dev->ops->common->chain_free(dev->cdev, &cq->pbl);
1056 #define QEDR_DESTROY_CQ_MAX_ITERATIONS (10)
1057 #define QEDR_DESTROY_CQ_ITER_DURATION (10)
1059 int qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
1061 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
1062 struct qed_rdma_destroy_cq_out_params oparams;
1063 struct qed_rdma_destroy_cq_in_params iparams;
1064 struct qedr_cq *cq = get_qedr_cq(ibcq);
1067 DP_DEBUG(dev, QEDR_MSG_CQ, "destroy cq %p (icid=%d)\n", cq, cq->icid);
1071 /* GSIs CQs are handled by driver, so they don't exist in the FW */
1072 if (cq->cq_type == QEDR_CQ_TYPE_GSI) {
1073 qedr_db_recovery_del(dev, cq->db_addr, &cq->db.data);
1077 iparams.icid = cq->icid;
1078 dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
1079 dev->ops->common->chain_free(dev->cdev, &cq->pbl);
1082 qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
1083 ib_umem_release(cq->q.umem);
1085 if (cq->q.db_rec_data) {
1086 qedr_db_recovery_del(dev, cq->q.db_addr,
1087 &cq->q.db_rec_data->db_data);
1088 rdma_user_mmap_entry_remove(cq->q.db_mmap_entry);
1091 qedr_db_recovery_del(dev, cq->db_addr, &cq->db.data);
1094 /* We don't want the IRQ handler to handle a non-existing CQ so we
1095 * wait until all CNQ interrupts, if any, are received. This will always
1096 * happen and will always happen very fast. If not, then a serious error
1097 * has occured. That is why we can use a long delay.
1098 * We spin for a short time so we don’t lose time on context switching
1099 * in case all the completions are handled in that span. Otherwise
1100 * we sleep for a while and check again. Since the CNQ may be
1101 * associated with (only) the current CPU we use msleep to allow the
1102 * current CPU to be freed.
1103 * The CNQ notification is increased in qedr_irq_handler().
1105 iter = QEDR_DESTROY_CQ_MAX_ITERATIONS;
1106 while (oparams.num_cq_notif != READ_ONCE(cq->cnq_notif) && iter) {
1107 udelay(QEDR_DESTROY_CQ_ITER_DURATION);
1111 iter = QEDR_DESTROY_CQ_MAX_ITERATIONS;
1112 while (oparams.num_cq_notif != READ_ONCE(cq->cnq_notif) && iter) {
1113 msleep(QEDR_DESTROY_CQ_ITER_DURATION);
1117 /* Note that we don't need to have explicit code to wait for the
1118 * completion of the event handler because it is invoked from the EQ.
1119 * Since the destroy CQ ramrod has also been received on the EQ we can
1120 * be certain that there's no event handler in process.
1125 static inline int get_gid_info_from_table(struct ib_qp *ibqp,
1126 struct ib_qp_attr *attr,
1128 struct qed_rdma_modify_qp_in_params
1131 const struct ib_gid_attr *gid_attr;
1132 enum rdma_network_type nw_type;
1133 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
1138 gid_attr = grh->sgid_attr;
1139 ret = rdma_read_gid_l2_fields(gid_attr, &qp_params->vlan_id, NULL);
1143 nw_type = rdma_gid_attr_network_type(gid_attr);
1145 case RDMA_NETWORK_IPV6:
1146 memcpy(&qp_params->sgid.bytes[0], &gid_attr->gid.raw[0],
1147 sizeof(qp_params->sgid));
1148 memcpy(&qp_params->dgid.bytes[0],
1150 sizeof(qp_params->dgid));
1151 qp_params->roce_mode = ROCE_V2_IPV6;
1152 SET_FIELD(qp_params->modify_flags,
1153 QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
1155 case RDMA_NETWORK_ROCE_V1:
1156 memcpy(&qp_params->sgid.bytes[0], &gid_attr->gid.raw[0],
1157 sizeof(qp_params->sgid));
1158 memcpy(&qp_params->dgid.bytes[0],
1160 sizeof(qp_params->dgid));
1161 qp_params->roce_mode = ROCE_V1;
1163 case RDMA_NETWORK_IPV4:
1164 memset(&qp_params->sgid, 0, sizeof(qp_params->sgid));
1165 memset(&qp_params->dgid, 0, sizeof(qp_params->dgid));
1166 ipv4_addr = qedr_get_ipv4_from_gid(gid_attr->gid.raw);
1167 qp_params->sgid.ipv4_addr = ipv4_addr;
1169 qedr_get_ipv4_from_gid(grh->dgid.raw);
1170 qp_params->dgid.ipv4_addr = ipv4_addr;
1171 SET_FIELD(qp_params->modify_flags,
1172 QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
1173 qp_params->roce_mode = ROCE_V2_IPV4;
1179 for (i = 0; i < 4; i++) {
1180 qp_params->sgid.dwords[i] = ntohl(qp_params->sgid.dwords[i]);
1181 qp_params->dgid.dwords[i] = ntohl(qp_params->dgid.dwords[i]);
1184 if (qp_params->vlan_id >= VLAN_CFI_MASK)
1185 qp_params->vlan_id = 0;
1190 static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev,
1191 struct ib_qp_init_attr *attrs,
1192 struct ib_udata *udata)
1194 struct qedr_device_attr *qattr = &dev->attr;
1196 /* QP0... attrs->qp_type == IB_QPT_GSI */
1197 if (attrs->qp_type != IB_QPT_RC &&
1198 attrs->qp_type != IB_QPT_GSI &&
1199 attrs->qp_type != IB_QPT_XRC_INI &&
1200 attrs->qp_type != IB_QPT_XRC_TGT) {
1201 DP_DEBUG(dev, QEDR_MSG_QP,
1202 "create qp: unsupported qp type=0x%x requested\n",
1207 if (attrs->cap.max_send_wr > qattr->max_sqe) {
1209 "create qp: cannot create a SQ with %d elements (max_send_wr=0x%x)\n",
1210 attrs->cap.max_send_wr, qattr->max_sqe);
1214 if (attrs->cap.max_inline_data > qattr->max_inline) {
1216 "create qp: unsupported inline data size=0x%x requested (max_inline=0x%x)\n",
1217 attrs->cap.max_inline_data, qattr->max_inline);
1221 if (attrs->cap.max_send_sge > qattr->max_sge) {
1223 "create qp: unsupported send_sge=0x%x requested (max_send_sge=0x%x)\n",
1224 attrs->cap.max_send_sge, qattr->max_sge);
1228 if (attrs->cap.max_recv_sge > qattr->max_sge) {
1230 "create qp: unsupported recv_sge=0x%x requested (max_recv_sge=0x%x)\n",
1231 attrs->cap.max_recv_sge, qattr->max_sge);
1235 /* verify consumer QPs are not trying to use GSI QP's CQ.
1236 * TGT QP isn't associated with RQ/SQ
1238 if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created) &&
1239 (attrs->qp_type != IB_QPT_XRC_TGT) &&
1240 (attrs->qp_type != IB_QPT_XRC_INI)) {
1241 struct qedr_cq *send_cq = get_qedr_cq(attrs->send_cq);
1242 struct qedr_cq *recv_cq = get_qedr_cq(attrs->recv_cq);
1244 if ((send_cq->cq_type == QEDR_CQ_TYPE_GSI) ||
1245 (recv_cq->cq_type == QEDR_CQ_TYPE_GSI)) {
1247 "create qp: consumer QP cannot use GSI CQs.\n");
1255 static int qedr_copy_srq_uresp(struct qedr_dev *dev,
1256 struct qedr_srq *srq, struct ib_udata *udata)
1258 struct qedr_create_srq_uresp uresp = {};
1261 uresp.srq_id = srq->srq_id;
1263 rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1265 DP_ERR(dev, "create srq: problem copying data to user space\n");
1270 static void qedr_copy_rq_uresp(struct qedr_dev *dev,
1271 struct qedr_create_qp_uresp *uresp,
1274 /* iWARP requires two doorbells per RQ. */
1275 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
1276 uresp->rq_db_offset =
1277 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD);
1278 uresp->rq_db2_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS);
1280 uresp->rq_db_offset =
1281 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
1284 uresp->rq_icid = qp->icid;
1285 if (qp->urq.db_mmap_entry)
1286 uresp->rq_db_rec_addr =
1287 rdma_user_mmap_get_offset(qp->urq.db_mmap_entry);
1290 static void qedr_copy_sq_uresp(struct qedr_dev *dev,
1291 struct qedr_create_qp_uresp *uresp,
1294 uresp->sq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1296 /* iWARP uses the same cid for rq and sq */
1297 if (rdma_protocol_iwarp(&dev->ibdev, 1))
1298 uresp->sq_icid = qp->icid;
1300 uresp->sq_icid = qp->icid + 1;
1302 if (qp->usq.db_mmap_entry)
1303 uresp->sq_db_rec_addr =
1304 rdma_user_mmap_get_offset(qp->usq.db_mmap_entry);
1307 static int qedr_copy_qp_uresp(struct qedr_dev *dev,
1308 struct qedr_qp *qp, struct ib_udata *udata,
1309 struct qedr_create_qp_uresp *uresp)
1313 memset(uresp, 0, sizeof(*uresp));
1315 if (qedr_qp_has_sq(qp))
1316 qedr_copy_sq_uresp(dev, uresp, qp);
1318 if (qedr_qp_has_rq(qp))
1319 qedr_copy_rq_uresp(dev, uresp, qp);
1321 uresp->atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE;
1322 uresp->qp_id = qp->qp_id;
1324 rc = qedr_ib_copy_to_udata(udata, uresp, sizeof(*uresp));
1327 "create qp: failed a copy to user space with qp icid=0x%x.\n",
1333 static void qedr_reset_qp_hwq_info(struct qedr_qp_hwq_info *qph)
1335 qed_chain_reset(&qph->pbl);
1339 qph->db_data.data.value = cpu_to_le16(0);
1342 static void qedr_set_common_qp_params(struct qedr_dev *dev,
1345 struct ib_qp_init_attr *attrs)
1347 spin_lock_init(&qp->q_lock);
1348 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
1349 kref_init(&qp->refcnt);
1350 init_completion(&qp->iwarp_cm_comp);
1351 init_completion(&qp->qp_rel_comp);
1355 qp->qp_type = attrs->qp_type;
1356 qp->max_inline_data = attrs->cap.max_inline_data;
1357 qp->state = QED_ROCE_QP_STATE_RESET;
1359 qp->prev_wqe_size = 0;
1361 qp->signaled = attrs->sq_sig_type == IB_SIGNAL_ALL_WR;
1363 if (qedr_qp_has_sq(qp)) {
1364 qedr_reset_qp_hwq_info(&qp->sq);
1365 qp->sq.max_sges = attrs->cap.max_send_sge;
1366 qp->sq_cq = get_qedr_cq(attrs->send_cq);
1367 DP_DEBUG(dev, QEDR_MSG_QP,
1368 "SQ params:\tsq_max_sges = %d, sq_cq_id = %d\n",
1369 qp->sq.max_sges, qp->sq_cq->icid);
1373 qp->srq = get_qedr_srq(attrs->srq);
1375 if (qedr_qp_has_rq(qp)) {
1376 qedr_reset_qp_hwq_info(&qp->rq);
1377 qp->rq_cq = get_qedr_cq(attrs->recv_cq);
1378 qp->rq.max_sges = attrs->cap.max_recv_sge;
1379 DP_DEBUG(dev, QEDR_MSG_QP,
1380 "RQ params:\trq_max_sges = %d, rq_cq_id = %d\n",
1381 qp->rq.max_sges, qp->rq_cq->icid);
1384 DP_DEBUG(dev, QEDR_MSG_QP,
1385 "QP params:\tpd = %d, qp_type = %d, max_inline_data = %d, state = %d, signaled = %d, use_srq=%d\n",
1386 pd->pd_id, qp->qp_type, qp->max_inline_data,
1387 qp->state, qp->signaled, (attrs->srq) ? 1 : 0);
1388 DP_DEBUG(dev, QEDR_MSG_QP,
1389 "SQ params:\tsq_max_sges = %d, sq_cq_id = %d\n",
1390 qp->sq.max_sges, qp->sq_cq->icid);
1393 static int qedr_set_roce_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
1397 if (qedr_qp_has_sq(qp)) {
1398 qp->sq.db = dev->db_addr +
1399 DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1400 qp->sq.db_data.data.icid = qp->icid + 1;
1401 rc = qedr_db_recovery_add(dev, qp->sq.db, &qp->sq.db_data,
1402 DB_REC_WIDTH_32B, DB_REC_KERNEL);
1407 if (qedr_qp_has_rq(qp)) {
1408 qp->rq.db = dev->db_addr +
1409 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
1410 qp->rq.db_data.data.icid = qp->icid;
1411 rc = qedr_db_recovery_add(dev, qp->rq.db, &qp->rq.db_data,
1412 DB_REC_WIDTH_32B, DB_REC_KERNEL);
1413 if (rc && qedr_qp_has_sq(qp))
1414 qedr_db_recovery_del(dev, qp->sq.db, &qp->sq.db_data);
1420 static int qedr_check_srq_params(struct qedr_dev *dev,
1421 struct ib_srq_init_attr *attrs,
1422 struct ib_udata *udata)
1424 struct qedr_device_attr *qattr = &dev->attr;
1426 if (attrs->attr.max_wr > qattr->max_srq_wr) {
1428 "create srq: unsupported srq_wr=0x%x requested (max_srq_wr=0x%x)\n",
1429 attrs->attr.max_wr, qattr->max_srq_wr);
1433 if (attrs->attr.max_sge > qattr->max_sge) {
1435 "create srq: unsupported sge=0x%x requested (max_srq_sge=0x%x)\n",
1436 attrs->attr.max_sge, qattr->max_sge);
1439 if (!udata && attrs->srq_type == IB_SRQT_XRC) {
1440 DP_ERR(dev, "XRC SRQs are not supported in kernel-space\n");
1447 static void qedr_free_srq_user_params(struct qedr_srq *srq)
1449 qedr_free_pbl(srq->dev, &srq->usrq.pbl_info, srq->usrq.pbl_tbl);
1450 ib_umem_release(srq->usrq.umem);
1451 ib_umem_release(srq->prod_umem);
1454 static void qedr_free_srq_kernel_params(struct qedr_srq *srq)
1456 struct qedr_srq_hwq_info *hw_srq = &srq->hw_srq;
1457 struct qedr_dev *dev = srq->dev;
1459 dev->ops->common->chain_free(dev->cdev, &hw_srq->pbl);
1461 dma_free_coherent(&dev->pdev->dev, sizeof(struct rdma_srq_producers),
1462 hw_srq->virt_prod_pair_addr,
1463 hw_srq->phy_prod_pair_addr);
1466 static int qedr_init_srq_user_params(struct ib_udata *udata,
1467 struct qedr_srq *srq,
1468 struct qedr_create_srq_ureq *ureq,
1471 struct scatterlist *sg;
1474 rc = qedr_init_user_queue(udata, srq->dev, &srq->usrq, ureq->srq_addr,
1475 ureq->srq_len, false, access, 1);
1479 srq->prod_umem = ib_umem_get(srq->ibsrq.device, ureq->prod_pair_addr,
1480 sizeof(struct rdma_srq_producers), access);
1481 if (IS_ERR(srq->prod_umem)) {
1482 qedr_free_pbl(srq->dev, &srq->usrq.pbl_info, srq->usrq.pbl_tbl);
1483 ib_umem_release(srq->usrq.umem);
1485 "create srq: failed ib_umem_get for producer, got %ld\n",
1486 PTR_ERR(srq->prod_umem));
1487 return PTR_ERR(srq->prod_umem);
1490 sg = srq->prod_umem->sgt_append.sgt.sgl;
1491 srq->hw_srq.phy_prod_pair_addr = sg_dma_address(sg);
1496 static int qedr_alloc_srq_kernel_params(struct qedr_srq *srq,
1497 struct qedr_dev *dev,
1498 struct ib_srq_init_attr *init_attr)
1500 struct qedr_srq_hwq_info *hw_srq = &srq->hw_srq;
1501 struct qed_chain_init_params params = {
1502 .mode = QED_CHAIN_MODE_PBL,
1503 .intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1504 .cnt_type = QED_CHAIN_CNT_TYPE_U32,
1505 .elem_size = QEDR_SRQ_WQE_ELEM_SIZE,
1507 dma_addr_t phy_prod_pair_addr;
1512 va = dma_alloc_coherent(&dev->pdev->dev,
1513 sizeof(struct rdma_srq_producers),
1514 &phy_prod_pair_addr, GFP_KERNEL);
1517 "create srq: failed to allocate dma memory for producer\n");
1521 hw_srq->phy_prod_pair_addr = phy_prod_pair_addr;
1522 hw_srq->virt_prod_pair_addr = va;
1524 num_elems = init_attr->attr.max_wr * RDMA_MAX_SRQ_WQE_SIZE;
1525 params.num_elems = num_elems;
1527 rc = dev->ops->common->chain_alloc(dev->cdev, &hw_srq->pbl, ¶ms);
1531 hw_srq->num_elems = num_elems;
1536 dma_free_coherent(&dev->pdev->dev, sizeof(struct rdma_srq_producers),
1537 va, phy_prod_pair_addr);
1541 int qedr_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr,
1542 struct ib_udata *udata)
1544 struct qed_rdma_destroy_srq_in_params destroy_in_params;
1545 struct qed_rdma_create_srq_in_params in_params = {};
1546 struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
1547 struct qed_rdma_create_srq_out_params out_params;
1548 struct qedr_pd *pd = get_qedr_pd(ibsrq->pd);
1549 struct qedr_create_srq_ureq ureq = {};
1550 u64 pbl_base_addr, phy_prod_pair_addr;
1551 struct qedr_srq_hwq_info *hw_srq;
1552 u32 page_cnt, page_size;
1553 struct qedr_srq *srq = get_qedr_srq(ibsrq);
1556 DP_DEBUG(dev, QEDR_MSG_QP,
1557 "create SRQ called from %s (pd %p)\n",
1558 (udata) ? "User lib" : "kernel", pd);
1560 if (init_attr->srq_type != IB_SRQT_BASIC &&
1561 init_attr->srq_type != IB_SRQT_XRC)
1564 rc = qedr_check_srq_params(dev, init_attr, udata);
1569 srq->is_xrc = (init_attr->srq_type == IB_SRQT_XRC);
1570 hw_srq = &srq->hw_srq;
1571 spin_lock_init(&srq->lock);
1573 hw_srq->max_wr = init_attr->attr.max_wr;
1574 hw_srq->max_sges = init_attr->attr.max_sge;
1577 if (ib_copy_from_udata(&ureq, udata, min(sizeof(ureq),
1580 "create srq: problem copying data from user space\n");
1584 rc = qedr_init_srq_user_params(udata, srq, &ureq, 0);
1588 page_cnt = srq->usrq.pbl_info.num_pbes;
1589 pbl_base_addr = srq->usrq.pbl_tbl->pa;
1590 phy_prod_pair_addr = hw_srq->phy_prod_pair_addr;
1591 page_size = PAGE_SIZE;
1593 struct qed_chain *pbl;
1595 rc = qedr_alloc_srq_kernel_params(srq, dev, init_attr);
1600 page_cnt = qed_chain_get_page_cnt(pbl);
1601 pbl_base_addr = qed_chain_get_pbl_phys(pbl);
1602 phy_prod_pair_addr = hw_srq->phy_prod_pair_addr;
1603 page_size = QED_CHAIN_PAGE_SIZE;
1606 in_params.pd_id = pd->pd_id;
1607 in_params.pbl_base_addr = pbl_base_addr;
1608 in_params.prod_pair_addr = phy_prod_pair_addr;
1609 in_params.num_pages = page_cnt;
1610 in_params.page_size = page_size;
1612 struct qedr_xrcd *xrcd = get_qedr_xrcd(init_attr->ext.xrc.xrcd);
1613 struct qedr_cq *cq = get_qedr_cq(init_attr->ext.cq);
1615 in_params.is_xrc = 1;
1616 in_params.xrcd_id = xrcd->xrcd_id;
1617 in_params.cq_cid = cq->icid;
1620 rc = dev->ops->rdma_create_srq(dev->rdma_ctx, &in_params, &out_params);
1624 srq->srq_id = out_params.srq_id;
1627 rc = qedr_copy_srq_uresp(dev, srq, udata);
1632 rc = xa_insert_irq(&dev->srqs, srq->srq_id, srq, GFP_KERNEL);
1636 DP_DEBUG(dev, QEDR_MSG_SRQ,
1637 "create srq: created srq with srq_id=0x%0x\n", srq->srq_id);
1641 destroy_in_params.srq_id = srq->srq_id;
1643 dev->ops->rdma_destroy_srq(dev->rdma_ctx, &destroy_in_params);
1646 qedr_free_srq_user_params(srq);
1648 qedr_free_srq_kernel_params(srq);
1653 int qedr_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
1655 struct qed_rdma_destroy_srq_in_params in_params = {};
1656 struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
1657 struct qedr_srq *srq = get_qedr_srq(ibsrq);
1659 xa_erase_irq(&dev->srqs, srq->srq_id);
1660 in_params.srq_id = srq->srq_id;
1661 in_params.is_xrc = srq->is_xrc;
1662 dev->ops->rdma_destroy_srq(dev->rdma_ctx, &in_params);
1665 qedr_free_srq_user_params(srq);
1667 qedr_free_srq_kernel_params(srq);
1669 DP_DEBUG(dev, QEDR_MSG_SRQ,
1670 "destroy srq: destroyed srq with srq_id=0x%0x\n",
1675 int qedr_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
1676 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
1678 struct qed_rdma_modify_srq_in_params in_params = {};
1679 struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
1680 struct qedr_srq *srq = get_qedr_srq(ibsrq);
1683 if (attr_mask & IB_SRQ_MAX_WR) {
1685 "modify srq: invalid attribute mask=0x%x specified for %p\n",
1690 if (attr_mask & IB_SRQ_LIMIT) {
1691 if (attr->srq_limit >= srq->hw_srq.max_wr) {
1693 "modify srq: invalid srq_limit=0x%x (max_srq_limit=0x%x)\n",
1694 attr->srq_limit, srq->hw_srq.max_wr);
1698 in_params.srq_id = srq->srq_id;
1699 in_params.wqe_limit = attr->srq_limit;
1700 rc = dev->ops->rdma_modify_srq(dev->rdma_ctx, &in_params);
1705 srq->srq_limit = attr->srq_limit;
1707 DP_DEBUG(dev, QEDR_MSG_SRQ,
1708 "modify srq: modified srq with srq_id=0x%0x\n", srq->srq_id);
1713 static enum qed_rdma_qp_type qedr_ib_to_qed_qp_type(enum ib_qp_type ib_qp_type)
1715 switch (ib_qp_type) {
1717 return QED_RDMA_QP_TYPE_RC;
1718 case IB_QPT_XRC_INI:
1719 return QED_RDMA_QP_TYPE_XRC_INI;
1720 case IB_QPT_XRC_TGT:
1721 return QED_RDMA_QP_TYPE_XRC_TGT;
1723 return QED_RDMA_QP_TYPE_INVAL;
1728 qedr_init_common_qp_in_params(struct qedr_dev *dev,
1731 struct ib_qp_init_attr *attrs,
1732 bool fmr_and_reserved_lkey,
1733 struct qed_rdma_create_qp_in_params *params)
1735 /* QP handle to be written in an async event */
1736 params->qp_handle_async_lo = lower_32_bits((uintptr_t) qp);
1737 params->qp_handle_async_hi = upper_32_bits((uintptr_t) qp);
1739 params->signal_all = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR);
1740 params->fmr_and_reserved_lkey = fmr_and_reserved_lkey;
1741 params->qp_type = qedr_ib_to_qed_qp_type(attrs->qp_type);
1742 params->stats_queue = 0;
1745 params->pd = pd->pd_id;
1746 params->dpi = pd->uctx ? pd->uctx->dpi : dev->dpi;
1749 if (qedr_qp_has_sq(qp))
1750 params->sq_cq_id = get_qedr_cq(attrs->send_cq)->icid;
1752 if (qedr_qp_has_rq(qp))
1753 params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
1755 if (qedr_qp_has_srq(qp)) {
1756 params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
1757 params->srq_id = qp->srq->srq_id;
1758 params->use_srq = true;
1761 params->use_srq = false;
1765 static inline void qedr_qp_user_print(struct qedr_dev *dev, struct qedr_qp *qp)
1767 DP_DEBUG(dev, QEDR_MSG_QP, "create qp: successfully created user QP. "
1775 qedr_qp_has_sq(qp) ? qp->usq.buf_addr : 0x0,
1776 qedr_qp_has_sq(qp) ? qp->usq.buf_len : 0,
1777 qedr_qp_has_rq(qp) ? qp->urq.buf_addr : 0x0,
1778 qedr_qp_has_sq(qp) ? qp->urq.buf_len : 0);
1782 qedr_iwarp_populate_user_qp(struct qedr_dev *dev,
1784 struct qed_rdma_create_qp_out_params *out_params)
1786 qp->usq.pbl_tbl->va = out_params->sq_pbl_virt;
1787 qp->usq.pbl_tbl->pa = out_params->sq_pbl_phys;
1789 qedr_populate_pbls(dev, qp->usq.umem, qp->usq.pbl_tbl,
1790 &qp->usq.pbl_info, FW_PAGE_SHIFT);
1792 qp->urq.pbl_tbl->va = out_params->rq_pbl_virt;
1793 qp->urq.pbl_tbl->pa = out_params->rq_pbl_phys;
1796 qedr_populate_pbls(dev, qp->urq.umem, qp->urq.pbl_tbl,
1797 &qp->urq.pbl_info, FW_PAGE_SHIFT);
1800 static void qedr_cleanup_user(struct qedr_dev *dev,
1801 struct qedr_ucontext *ctx,
1804 if (qedr_qp_has_sq(qp)) {
1805 ib_umem_release(qp->usq.umem);
1806 qp->usq.umem = NULL;
1809 if (qedr_qp_has_rq(qp)) {
1810 ib_umem_release(qp->urq.umem);
1811 qp->urq.umem = NULL;
1814 if (rdma_protocol_roce(&dev->ibdev, 1)) {
1815 qedr_free_pbl(dev, &qp->usq.pbl_info, qp->usq.pbl_tbl);
1816 qedr_free_pbl(dev, &qp->urq.pbl_info, qp->urq.pbl_tbl);
1818 kfree(qp->usq.pbl_tbl);
1819 kfree(qp->urq.pbl_tbl);
1822 if (qp->usq.db_rec_data) {
1823 qedr_db_recovery_del(dev, qp->usq.db_addr,
1824 &qp->usq.db_rec_data->db_data);
1825 rdma_user_mmap_entry_remove(qp->usq.db_mmap_entry);
1828 if (qp->urq.db_rec_data) {
1829 qedr_db_recovery_del(dev, qp->urq.db_addr,
1830 &qp->urq.db_rec_data->db_data);
1831 rdma_user_mmap_entry_remove(qp->urq.db_mmap_entry);
1834 if (rdma_protocol_iwarp(&dev->ibdev, 1))
1835 qedr_db_recovery_del(dev, qp->urq.db_rec_db2_addr,
1836 &qp->urq.db_rec_db2_data);
1839 static int qedr_create_user_qp(struct qedr_dev *dev,
1842 struct ib_udata *udata,
1843 struct ib_qp_init_attr *attrs)
1845 struct qed_rdma_create_qp_in_params in_params;
1846 struct qed_rdma_create_qp_out_params out_params;
1847 struct qedr_create_qp_uresp uresp = {};
1848 struct qedr_create_qp_ureq ureq = {};
1849 int alloc_and_init = rdma_protocol_roce(&dev->ibdev, 1);
1850 struct qedr_ucontext *ctx = NULL;
1851 struct qedr_pd *pd = NULL;
1854 qp->create_type = QEDR_QP_CREATE_USER;
1857 pd = get_qedr_pd(ibpd);
1862 rc = ib_copy_from_udata(&ureq, udata, min(sizeof(ureq),
1865 DP_ERR(dev, "Problem copying data from user space\n");
1870 if (qedr_qp_has_sq(qp)) {
1871 /* SQ - read access only (0) */
1872 rc = qedr_init_user_queue(udata, dev, &qp->usq, ureq.sq_addr,
1873 ureq.sq_len, true, 0, alloc_and_init);
1878 if (qedr_qp_has_rq(qp)) {
1879 /* RQ - read access only (0) */
1880 rc = qedr_init_user_queue(udata, dev, &qp->urq, ureq.rq_addr,
1881 ureq.rq_len, true, 0, alloc_and_init);
1883 ib_umem_release(qp->usq.umem);
1884 qp->usq.umem = NULL;
1885 if (rdma_protocol_roce(&dev->ibdev, 1)) {
1886 qedr_free_pbl(dev, &qp->usq.pbl_info,
1889 kfree(qp->usq.pbl_tbl);
1895 memset(&in_params, 0, sizeof(in_params));
1896 qedr_init_common_qp_in_params(dev, pd, qp, attrs, false, &in_params);
1897 in_params.qp_handle_lo = ureq.qp_handle_lo;
1898 in_params.qp_handle_hi = ureq.qp_handle_hi;
1900 if (qp->qp_type == IB_QPT_XRC_TGT) {
1901 struct qedr_xrcd *xrcd = get_qedr_xrcd(attrs->xrcd);
1903 in_params.xrcd_id = xrcd->xrcd_id;
1904 in_params.qp_handle_lo = qp->qp_id;
1905 in_params.use_srq = 1;
1908 if (qedr_qp_has_sq(qp)) {
1909 in_params.sq_num_pages = qp->usq.pbl_info.num_pbes;
1910 in_params.sq_pbl_ptr = qp->usq.pbl_tbl->pa;
1913 if (qedr_qp_has_rq(qp)) {
1914 in_params.rq_num_pages = qp->urq.pbl_info.num_pbes;
1915 in_params.rq_pbl_ptr = qp->urq.pbl_tbl->pa;
1919 SET_FIELD(in_params.flags, QED_ROCE_EDPM_MODE, ctx->edpm_mode);
1921 qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
1922 &in_params, &out_params);
1929 if (rdma_protocol_iwarp(&dev->ibdev, 1))
1930 qedr_iwarp_populate_user_qp(dev, qp, &out_params);
1932 qp->qp_id = out_params.qp_id;
1933 qp->icid = out_params.icid;
1936 rc = qedr_copy_qp_uresp(dev, qp, udata, &uresp);
1941 /* db offset was calculated in copy_qp_uresp, now set in the user q */
1942 if (qedr_qp_has_sq(qp)) {
1943 qp->usq.db_addr = ctx->dpi_addr + uresp.sq_db_offset;
1944 qp->sq.max_wr = attrs->cap.max_send_wr;
1945 rc = qedr_db_recovery_add(dev, qp->usq.db_addr,
1946 &qp->usq.db_rec_data->db_data,
1953 if (qedr_qp_has_rq(qp)) {
1954 qp->urq.db_addr = ctx->dpi_addr + uresp.rq_db_offset;
1955 qp->rq.max_wr = attrs->cap.max_recv_wr;
1956 rc = qedr_db_recovery_add(dev, qp->urq.db_addr,
1957 &qp->urq.db_rec_data->db_data,
1964 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
1965 qp->urq.db_rec_db2_addr = ctx->dpi_addr + uresp.rq_db2_offset;
1967 /* calculate the db_rec_db2 data since it is constant so no
1968 * need to reflect from user
1970 qp->urq.db_rec_db2_data.data.icid = cpu_to_le16(qp->icid);
1971 qp->urq.db_rec_db2_data.data.value =
1972 cpu_to_le16(DQ_TCM_IWARP_POST_RQ_CF_CMD);
1974 rc = qedr_db_recovery_add(dev, qp->urq.db_rec_db2_addr,
1975 &qp->urq.db_rec_db2_data,
1981 qedr_qp_user_print(dev, qp);
1984 rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
1986 DP_ERR(dev, "create qp: fatal fault. rc=%d", rc);
1989 qedr_cleanup_user(dev, ctx, qp);
1993 static int qedr_set_iwarp_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
1997 qp->sq.db = dev->db_addr +
1998 DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1999 qp->sq.db_data.data.icid = qp->icid;
2001 rc = qedr_db_recovery_add(dev, qp->sq.db,
2008 qp->rq.db = dev->db_addr +
2009 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD);
2010 qp->rq.db_data.data.icid = qp->icid;
2011 qp->rq.iwarp_db2 = dev->db_addr +
2012 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS);
2013 qp->rq.iwarp_db2_data.data.icid = qp->icid;
2014 qp->rq.iwarp_db2_data.data.value = DQ_TCM_IWARP_POST_RQ_CF_CMD;
2016 rc = qedr_db_recovery_add(dev, qp->rq.db,
2023 rc = qedr_db_recovery_add(dev, qp->rq.iwarp_db2,
2024 &qp->rq.iwarp_db2_data,
2031 qedr_roce_create_kernel_qp(struct qedr_dev *dev,
2033 struct qed_rdma_create_qp_in_params *in_params,
2034 u32 n_sq_elems, u32 n_rq_elems)
2036 struct qed_rdma_create_qp_out_params out_params;
2037 struct qed_chain_init_params params = {
2038 .mode = QED_CHAIN_MODE_PBL,
2039 .cnt_type = QED_CHAIN_CNT_TYPE_U32,
2043 params.intended_use = QED_CHAIN_USE_TO_PRODUCE;
2044 params.num_elems = n_sq_elems;
2045 params.elem_size = QEDR_SQE_ELEMENT_SIZE;
2047 rc = dev->ops->common->chain_alloc(dev->cdev, &qp->sq.pbl, ¶ms);
2051 in_params->sq_num_pages = qed_chain_get_page_cnt(&qp->sq.pbl);
2052 in_params->sq_pbl_ptr = qed_chain_get_pbl_phys(&qp->sq.pbl);
2054 params.intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE;
2055 params.num_elems = n_rq_elems;
2056 params.elem_size = QEDR_RQE_ELEMENT_SIZE;
2058 rc = dev->ops->common->chain_alloc(dev->cdev, &qp->rq.pbl, ¶ms);
2062 in_params->rq_num_pages = qed_chain_get_page_cnt(&qp->rq.pbl);
2063 in_params->rq_pbl_ptr = qed_chain_get_pbl_phys(&qp->rq.pbl);
2065 qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
2066 in_params, &out_params);
2071 qp->qp_id = out_params.qp_id;
2072 qp->icid = out_params.icid;
2074 return qedr_set_roce_db_info(dev, qp);
2078 qedr_iwarp_create_kernel_qp(struct qedr_dev *dev,
2080 struct qed_rdma_create_qp_in_params *in_params,
2081 u32 n_sq_elems, u32 n_rq_elems)
2083 struct qed_rdma_create_qp_out_params out_params;
2084 struct qed_chain_init_params params = {
2085 .mode = QED_CHAIN_MODE_PBL,
2086 .cnt_type = QED_CHAIN_CNT_TYPE_U32,
2090 in_params->sq_num_pages = QED_CHAIN_PAGE_CNT(n_sq_elems,
2091 QEDR_SQE_ELEMENT_SIZE,
2092 QED_CHAIN_PAGE_SIZE,
2093 QED_CHAIN_MODE_PBL);
2094 in_params->rq_num_pages = QED_CHAIN_PAGE_CNT(n_rq_elems,
2095 QEDR_RQE_ELEMENT_SIZE,
2096 QED_CHAIN_PAGE_SIZE,
2097 QED_CHAIN_MODE_PBL);
2099 qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
2100 in_params, &out_params);
2105 /* Now we allocate the chain */
2107 params.intended_use = QED_CHAIN_USE_TO_PRODUCE;
2108 params.num_elems = n_sq_elems;
2109 params.elem_size = QEDR_SQE_ELEMENT_SIZE;
2110 params.ext_pbl_virt = out_params.sq_pbl_virt;
2111 params.ext_pbl_phys = out_params.sq_pbl_phys;
2113 rc = dev->ops->common->chain_alloc(dev->cdev, &qp->sq.pbl, ¶ms);
2117 params.intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE;
2118 params.num_elems = n_rq_elems;
2119 params.elem_size = QEDR_RQE_ELEMENT_SIZE;
2120 params.ext_pbl_virt = out_params.rq_pbl_virt;
2121 params.ext_pbl_phys = out_params.rq_pbl_phys;
2123 rc = dev->ops->common->chain_alloc(dev->cdev, &qp->rq.pbl, ¶ms);
2127 qp->qp_id = out_params.qp_id;
2128 qp->icid = out_params.icid;
2130 return qedr_set_iwarp_db_info(dev, qp);
2133 dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
2138 static void qedr_cleanup_kernel(struct qedr_dev *dev, struct qedr_qp *qp)
2140 dev->ops->common->chain_free(dev->cdev, &qp->sq.pbl);
2141 kfree(qp->wqe_wr_id);
2143 dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl);
2144 kfree(qp->rqe_wr_id);
2146 /* GSI qp is not registered to db mechanism so no need to delete */
2147 if (qp->qp_type == IB_QPT_GSI)
2150 qedr_db_recovery_del(dev, qp->sq.db, &qp->sq.db_data);
2153 qedr_db_recovery_del(dev, qp->rq.db, &qp->rq.db_data);
2155 if (rdma_protocol_iwarp(&dev->ibdev, 1))
2156 qedr_db_recovery_del(dev, qp->rq.iwarp_db2,
2157 &qp->rq.iwarp_db2_data);
2161 static int qedr_create_kernel_qp(struct qedr_dev *dev,
2164 struct ib_qp_init_attr *attrs)
2166 struct qed_rdma_create_qp_in_params in_params;
2167 struct qedr_pd *pd = get_qedr_pd(ibpd);
2173 memset(&in_params, 0, sizeof(in_params));
2174 qp->create_type = QEDR_QP_CREATE_KERNEL;
2176 /* A single work request may take up to QEDR_MAX_SQ_WQE_SIZE elements in
2177 * the ring. The ring should allow at least a single WR, even if the
2178 * user requested none, due to allocation issues.
2179 * We should add an extra WR since the prod and cons indices of
2180 * wqe_wr_id are managed in such a way that the WQ is considered full
2181 * when (prod+1)%max_wr==cons. We currently don't do that because we
2182 * double the number of entries due an iSER issue that pushes far more
2183 * WRs than indicated. If we decline its ib_post_send() then we get
2184 * error prints in the dmesg we'd like to avoid.
2186 qp->sq.max_wr = min_t(u32, attrs->cap.max_send_wr * dev->wq_multiplier,
2189 qp->wqe_wr_id = kcalloc(qp->sq.max_wr, sizeof(*qp->wqe_wr_id),
2191 if (!qp->wqe_wr_id) {
2192 DP_ERR(dev, "create qp: failed SQ shadow memory allocation\n");
2196 /* QP handle to be written in CQE */
2197 in_params.qp_handle_lo = lower_32_bits((uintptr_t) qp);
2198 in_params.qp_handle_hi = upper_32_bits((uintptr_t) qp);
2200 /* A single work request may take up to QEDR_MAX_RQ_WQE_SIZE elements in
2201 * the ring. There ring should allow at least a single WR, even if the
2202 * user requested none, due to allocation issues.
2204 qp->rq.max_wr = (u16) max_t(u32, attrs->cap.max_recv_wr, 1);
2206 /* Allocate driver internal RQ array */
2207 qp->rqe_wr_id = kcalloc(qp->rq.max_wr, sizeof(*qp->rqe_wr_id),
2209 if (!qp->rqe_wr_id) {
2211 "create qp: failed RQ shadow memory allocation\n");
2212 kfree(qp->wqe_wr_id);
2216 qedr_init_common_qp_in_params(dev, pd, qp, attrs, true, &in_params);
2218 n_sq_entries = attrs->cap.max_send_wr;
2219 n_sq_entries = min_t(u32, n_sq_entries, dev->attr.max_sqe);
2220 n_sq_entries = max_t(u32, n_sq_entries, 1);
2221 n_sq_elems = n_sq_entries * QEDR_MAX_SQE_ELEMENTS_PER_SQE;
2223 n_rq_elems = qp->rq.max_wr * QEDR_MAX_RQE_ELEMENTS_PER_RQE;
2225 if (rdma_protocol_iwarp(&dev->ibdev, 1))
2226 rc = qedr_iwarp_create_kernel_qp(dev, qp, &in_params,
2227 n_sq_elems, n_rq_elems);
2229 rc = qedr_roce_create_kernel_qp(dev, qp, &in_params,
2230 n_sq_elems, n_rq_elems);
2232 qedr_cleanup_kernel(dev, qp);
2237 static int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp,
2238 struct ib_udata *udata)
2240 struct qedr_ucontext *ctx =
2241 rdma_udata_to_drv_context(udata, struct qedr_ucontext,
2245 if (qp->qp_type != IB_QPT_GSI) {
2246 rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
2251 if (qp->create_type == QEDR_QP_CREATE_USER)
2252 qedr_cleanup_user(dev, ctx, qp);
2254 qedr_cleanup_kernel(dev, qp);
2259 int qedr_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs,
2260 struct ib_udata *udata)
2262 struct qedr_xrcd *xrcd = NULL;
2263 struct ib_pd *ibpd = ibqp->pd;
2264 struct qedr_pd *pd = get_qedr_pd(ibpd);
2265 struct qedr_dev *dev = get_qedr_dev(ibqp->device);
2266 struct qedr_qp *qp = get_qedr_qp(ibqp);
2269 if (attrs->create_flags)
2272 if (attrs->qp_type == IB_QPT_XRC_TGT)
2273 xrcd = get_qedr_xrcd(attrs->xrcd);
2275 pd = get_qedr_pd(ibpd);
2277 DP_DEBUG(dev, QEDR_MSG_QP, "create qp: called from %s, pd=%p\n",
2278 udata ? "user library" : "kernel", pd);
2280 rc = qedr_check_qp_attrs(ibpd, dev, attrs, udata);
2284 DP_DEBUG(dev, QEDR_MSG_QP,
2285 "create qp: called from %s, event_handler=%p, eepd=%p sq_cq=%p, sq_icid=%d, rq_cq=%p, rq_icid=%d\n",
2286 udata ? "user library" : "kernel", attrs->event_handler, pd,
2287 get_qedr_cq(attrs->send_cq),
2288 get_qedr_cq(attrs->send_cq)->icid,
2289 get_qedr_cq(attrs->recv_cq),
2290 attrs->recv_cq ? get_qedr_cq(attrs->recv_cq)->icid : 0);
2292 qedr_set_common_qp_params(dev, qp, pd, attrs);
2294 if (attrs->qp_type == IB_QPT_GSI)
2295 return qedr_create_gsi_qp(dev, attrs, qp);
2298 rc = qedr_create_user_qp(dev, qp, ibpd, udata, attrs);
2300 rc = qedr_create_kernel_qp(dev, qp, ibpd, attrs);
2305 qp->ibqp.qp_num = qp->qp_id;
2307 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
2308 rc = xa_insert(&dev->qps, qp->qp_id, qp, GFP_KERNEL);
2310 goto out_free_qp_resources;
2315 out_free_qp_resources:
2316 qedr_free_qp_resources(dev, qp, udata);
2320 static enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
2323 case QED_ROCE_QP_STATE_RESET:
2324 return IB_QPS_RESET;
2325 case QED_ROCE_QP_STATE_INIT:
2327 case QED_ROCE_QP_STATE_RTR:
2329 case QED_ROCE_QP_STATE_RTS:
2331 case QED_ROCE_QP_STATE_SQD:
2333 case QED_ROCE_QP_STATE_ERR:
2335 case QED_ROCE_QP_STATE_SQE:
2341 static enum qed_roce_qp_state qedr_get_state_from_ibqp(
2342 enum ib_qp_state qp_state)
2346 return QED_ROCE_QP_STATE_RESET;
2348 return QED_ROCE_QP_STATE_INIT;
2350 return QED_ROCE_QP_STATE_RTR;
2352 return QED_ROCE_QP_STATE_RTS;
2354 return QED_ROCE_QP_STATE_SQD;
2356 return QED_ROCE_QP_STATE_ERR;
2358 return QED_ROCE_QP_STATE_ERR;
2362 static int qedr_update_qp_state(struct qedr_dev *dev,
2364 enum qed_roce_qp_state cur_state,
2365 enum qed_roce_qp_state new_state)
2369 if (new_state == cur_state)
2372 switch (cur_state) {
2373 case QED_ROCE_QP_STATE_RESET:
2374 switch (new_state) {
2375 case QED_ROCE_QP_STATE_INIT:
2382 case QED_ROCE_QP_STATE_INIT:
2383 switch (new_state) {
2384 case QED_ROCE_QP_STATE_RTR:
2385 /* Update doorbell (in case post_recv was
2386 * done before move to RTR)
2389 if (rdma_protocol_roce(&dev->ibdev, 1)) {
2390 writel(qp->rq.db_data.raw, qp->rq.db);
2393 case QED_ROCE_QP_STATE_ERR:
2396 /* Invalid state change. */
2401 case QED_ROCE_QP_STATE_RTR:
2403 switch (new_state) {
2404 case QED_ROCE_QP_STATE_RTS:
2406 case QED_ROCE_QP_STATE_ERR:
2409 /* Invalid state change. */
2414 case QED_ROCE_QP_STATE_RTS:
2416 switch (new_state) {
2417 case QED_ROCE_QP_STATE_SQD:
2419 case QED_ROCE_QP_STATE_ERR:
2422 /* Invalid state change. */
2427 case QED_ROCE_QP_STATE_SQD:
2429 switch (new_state) {
2430 case QED_ROCE_QP_STATE_RTS:
2431 case QED_ROCE_QP_STATE_ERR:
2434 /* Invalid state change. */
2439 case QED_ROCE_QP_STATE_ERR:
2441 switch (new_state) {
2442 case QED_ROCE_QP_STATE_RESET:
2443 if ((qp->rq.prod != qp->rq.cons) ||
2444 (qp->sq.prod != qp->sq.cons)) {
2446 "Error->Reset with rq/sq not empty rq.prod=%x rq.cons=%x sq.prod=%x sq.cons=%x\n",
2447 qp->rq.prod, qp->rq.cons, qp->sq.prod,
2465 int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
2466 int attr_mask, struct ib_udata *udata)
2468 struct qedr_qp *qp = get_qedr_qp(ibqp);
2469 struct qed_rdma_modify_qp_in_params qp_params = { 0 };
2470 struct qedr_dev *dev = get_qedr_dev(&qp->dev->ibdev);
2471 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
2472 enum ib_qp_state old_qp_state, new_qp_state;
2473 enum qed_roce_qp_state cur_state;
2476 DP_DEBUG(dev, QEDR_MSG_QP,
2477 "modify qp: qp %p attr_mask=0x%x, state=%d", qp, attr_mask,
2480 if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
2483 old_qp_state = qedr_get_ibqp_state(qp->state);
2484 if (attr_mask & IB_QP_STATE)
2485 new_qp_state = attr->qp_state;
2487 new_qp_state = old_qp_state;
2489 if (rdma_protocol_roce(&dev->ibdev, 1)) {
2490 if (!ib_modify_qp_is_ok(old_qp_state, new_qp_state,
2491 ibqp->qp_type, attr_mask)) {
2493 "modify qp: invalid attribute mask=0x%x specified for\n"
2494 "qpn=0x%x of type=0x%x old_qp_state=0x%x, new_qp_state=0x%x\n",
2495 attr_mask, qp->qp_id, ibqp->qp_type,
2496 old_qp_state, new_qp_state);
2502 /* Translate the masks... */
2503 if (attr_mask & IB_QP_STATE) {
2504 SET_FIELD(qp_params.modify_flags,
2505 QED_RDMA_MODIFY_QP_VALID_NEW_STATE, 1);
2506 qp_params.new_state = qedr_get_state_from_ibqp(attr->qp_state);
2509 if (attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
2510 qp_params.sqd_async = true;
2512 if (attr_mask & IB_QP_PKEY_INDEX) {
2513 SET_FIELD(qp_params.modify_flags,
2514 QED_ROCE_MODIFY_QP_VALID_PKEY, 1);
2515 if (attr->pkey_index >= QEDR_ROCE_PKEY_TABLE_LEN) {
2520 qp_params.pkey = QEDR_ROCE_PKEY_DEFAULT;
2523 if (attr_mask & IB_QP_QKEY)
2524 qp->qkey = attr->qkey;
2526 if (attr_mask & IB_QP_ACCESS_FLAGS) {
2527 SET_FIELD(qp_params.modify_flags,
2528 QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN, 1);
2529 qp_params.incoming_rdma_read_en = attr->qp_access_flags &
2530 IB_ACCESS_REMOTE_READ;
2531 qp_params.incoming_rdma_write_en = attr->qp_access_flags &
2532 IB_ACCESS_REMOTE_WRITE;
2533 qp_params.incoming_atomic_en = attr->qp_access_flags &
2534 IB_ACCESS_REMOTE_ATOMIC;
2537 if (attr_mask & (IB_QP_AV | IB_QP_PATH_MTU)) {
2538 if (rdma_protocol_iwarp(&dev->ibdev, 1))
2541 if (attr_mask & IB_QP_PATH_MTU) {
2542 if (attr->path_mtu < IB_MTU_256 ||
2543 attr->path_mtu > IB_MTU_4096) {
2544 pr_err("error: Only MTU sizes of 256, 512, 1024, 2048 and 4096 are supported by RoCE\n");
2548 qp->mtu = min(ib_mtu_enum_to_int(attr->path_mtu),
2549 ib_mtu_enum_to_int(iboe_get_mtu
2555 ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
2556 pr_err("Fixing zeroed MTU to qp->mtu = %d\n", qp->mtu);
2559 SET_FIELD(qp_params.modify_flags,
2560 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR, 1);
2562 qp_params.traffic_class_tos = grh->traffic_class;
2563 qp_params.flow_label = grh->flow_label;
2564 qp_params.hop_limit_ttl = grh->hop_limit;
2566 qp->sgid_idx = grh->sgid_index;
2568 rc = get_gid_info_from_table(ibqp, attr, attr_mask, &qp_params);
2571 "modify qp: problems with GID index %d (rc=%d)\n",
2572 grh->sgid_index, rc);
2576 rc = qedr_get_dmac(dev, &attr->ah_attr,
2577 qp_params.remote_mac_addr);
2581 qp_params.use_local_mac = true;
2582 ether_addr_copy(qp_params.local_mac_addr, dev->ndev->dev_addr);
2584 DP_DEBUG(dev, QEDR_MSG_QP, "dgid=%x:%x:%x:%x\n",
2585 qp_params.dgid.dwords[0], qp_params.dgid.dwords[1],
2586 qp_params.dgid.dwords[2], qp_params.dgid.dwords[3]);
2587 DP_DEBUG(dev, QEDR_MSG_QP, "sgid=%x:%x:%x:%x\n",
2588 qp_params.sgid.dwords[0], qp_params.sgid.dwords[1],
2589 qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]);
2590 DP_DEBUG(dev, QEDR_MSG_QP, "remote_mac=[%pM]\n",
2591 qp_params.remote_mac_addr);
2593 qp_params.mtu = qp->mtu;
2594 qp_params.lb_indication = false;
2597 if (!qp_params.mtu) {
2598 /* Stay with current MTU */
2600 qp_params.mtu = qp->mtu;
2603 ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
2606 if (attr_mask & IB_QP_TIMEOUT) {
2607 SET_FIELD(qp_params.modify_flags,
2608 QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT, 1);
2610 /* The received timeout value is an exponent used like this:
2611 * "12.7.34 LOCAL ACK TIMEOUT
2612 * Value representing the transport (ACK) timeout for use by
2613 * the remote, expressed as: 4.096 * 2^timeout [usec]"
2614 * The FW expects timeout in msec so we need to divide the usec
2615 * result by 1000. We'll approximate 1000~2^10, and 4.096 ~ 2^2,
2616 * so we get: 2^2 * 2^timeout / 2^10 = 2^(timeout - 8).
2617 * The value of zero means infinite so we use a 'max_t' to make
2618 * sure that sub 1 msec values will be configured as 1 msec.
2621 qp_params.ack_timeout =
2622 1 << max_t(int, attr->timeout - 8, 0);
2624 qp_params.ack_timeout = 0;
2626 qp->timeout = attr->timeout;
2629 if (attr_mask & IB_QP_RETRY_CNT) {
2630 SET_FIELD(qp_params.modify_flags,
2631 QED_ROCE_MODIFY_QP_VALID_RETRY_CNT, 1);
2632 qp_params.retry_cnt = attr->retry_cnt;
2635 if (attr_mask & IB_QP_RNR_RETRY) {
2636 SET_FIELD(qp_params.modify_flags,
2637 QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT, 1);
2638 qp_params.rnr_retry_cnt = attr->rnr_retry;
2641 if (attr_mask & IB_QP_RQ_PSN) {
2642 SET_FIELD(qp_params.modify_flags,
2643 QED_ROCE_MODIFY_QP_VALID_RQ_PSN, 1);
2644 qp_params.rq_psn = attr->rq_psn;
2645 qp->rq_psn = attr->rq_psn;
2648 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
2649 if (attr->max_rd_atomic > dev->attr.max_qp_req_rd_atomic_resc) {
2652 "unsupported max_rd_atomic=%d, supported=%d\n",
2653 attr->max_rd_atomic,
2654 dev->attr.max_qp_req_rd_atomic_resc);
2658 SET_FIELD(qp_params.modify_flags,
2659 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ, 1);
2660 qp_params.max_rd_atomic_req = attr->max_rd_atomic;
2663 if (attr_mask & IB_QP_MIN_RNR_TIMER) {
2664 SET_FIELD(qp_params.modify_flags,
2665 QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER, 1);
2666 qp_params.min_rnr_nak_timer = attr->min_rnr_timer;
2669 if (attr_mask & IB_QP_SQ_PSN) {
2670 SET_FIELD(qp_params.modify_flags,
2671 QED_ROCE_MODIFY_QP_VALID_SQ_PSN, 1);
2672 qp_params.sq_psn = attr->sq_psn;
2673 qp->sq_psn = attr->sq_psn;
2676 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
2677 if (attr->max_dest_rd_atomic >
2678 dev->attr.max_qp_resp_rd_atomic_resc) {
2680 "unsupported max_dest_rd_atomic=%d, supported=%d\n",
2681 attr->max_dest_rd_atomic,
2682 dev->attr.max_qp_resp_rd_atomic_resc);
2688 SET_FIELD(qp_params.modify_flags,
2689 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP, 1);
2690 qp_params.max_rd_atomic_resp = attr->max_dest_rd_atomic;
2693 if (attr_mask & IB_QP_DEST_QPN) {
2694 SET_FIELD(qp_params.modify_flags,
2695 QED_ROCE_MODIFY_QP_VALID_DEST_QP, 1);
2697 qp_params.dest_qp = attr->dest_qp_num;
2698 qp->dest_qp_num = attr->dest_qp_num;
2701 cur_state = qp->state;
2703 /* Update the QP state before the actual ramrod to prevent a race with
2704 * fast path. Modifying the QP state to error will cause the device to
2705 * flush the CQEs and while polling the flushed CQEs will considered as
2706 * a potential issue if the QP isn't in error state.
2708 if ((attr_mask & IB_QP_STATE) && qp->qp_type != IB_QPT_GSI &&
2709 !udata && qp_params.new_state == QED_ROCE_QP_STATE_ERR)
2710 qp->state = QED_ROCE_QP_STATE_ERR;
2712 if (qp->qp_type != IB_QPT_GSI)
2713 rc = dev->ops->rdma_modify_qp(dev->rdma_ctx,
2714 qp->qed_qp, &qp_params);
2716 if (attr_mask & IB_QP_STATE) {
2717 if ((qp->qp_type != IB_QPT_GSI) && (!udata))
2718 rc = qedr_update_qp_state(dev, qp, cur_state,
2719 qp_params.new_state);
2720 qp->state = qp_params.new_state;
2727 static int qedr_to_ib_qp_acc_flags(struct qed_rdma_query_qp_out_params *params)
2729 int ib_qp_acc_flags = 0;
2731 if (params->incoming_rdma_write_en)
2732 ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE;
2733 if (params->incoming_rdma_read_en)
2734 ib_qp_acc_flags |= IB_ACCESS_REMOTE_READ;
2735 if (params->incoming_atomic_en)
2736 ib_qp_acc_flags |= IB_ACCESS_REMOTE_ATOMIC;
2737 ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE;
2738 return ib_qp_acc_flags;
2741 int qedr_query_qp(struct ib_qp *ibqp,
2742 struct ib_qp_attr *qp_attr,
2743 int attr_mask, struct ib_qp_init_attr *qp_init_attr)
2745 struct qed_rdma_query_qp_out_params params;
2746 struct qedr_qp *qp = get_qedr_qp(ibqp);
2747 struct qedr_dev *dev = qp->dev;
2750 memset(¶ms, 0, sizeof(params));
2751 memset(qp_attr, 0, sizeof(*qp_attr));
2752 memset(qp_init_attr, 0, sizeof(*qp_init_attr));
2754 if (qp->qp_type != IB_QPT_GSI) {
2755 rc = dev->ops->rdma_query_qp(dev->rdma_ctx, qp->qed_qp, ¶ms);
2758 qp_attr->qp_state = qedr_get_ibqp_state(params.state);
2760 qp_attr->qp_state = qedr_get_ibqp_state(QED_ROCE_QP_STATE_RTS);
2763 qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state);
2764 qp_attr->path_mtu = ib_mtu_int_to_enum(params.mtu);
2765 qp_attr->path_mig_state = IB_MIG_MIGRATED;
2766 qp_attr->rq_psn = params.rq_psn;
2767 qp_attr->sq_psn = params.sq_psn;
2768 qp_attr->dest_qp_num = params.dest_qp;
2770 qp_attr->qp_access_flags = qedr_to_ib_qp_acc_flags(¶ms);
2772 qp_attr->cap.max_send_wr = qp->sq.max_wr;
2773 qp_attr->cap.max_recv_wr = qp->rq.max_wr;
2774 qp_attr->cap.max_send_sge = qp->sq.max_sges;
2775 qp_attr->cap.max_recv_sge = qp->rq.max_sges;
2776 qp_attr->cap.max_inline_data = dev->attr.max_inline;
2777 qp_init_attr->cap = qp_attr->cap;
2779 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
2780 rdma_ah_set_grh(&qp_attr->ah_attr, NULL,
2781 params.flow_label, qp->sgid_idx,
2782 params.hop_limit_ttl, params.traffic_class_tos);
2783 rdma_ah_set_dgid_raw(&qp_attr->ah_attr, ¶ms.dgid.bytes[0]);
2784 rdma_ah_set_port_num(&qp_attr->ah_attr, 1);
2785 rdma_ah_set_sl(&qp_attr->ah_attr, 0);
2786 qp_attr->timeout = qp->timeout;
2787 qp_attr->rnr_retry = params.rnr_retry;
2788 qp_attr->retry_cnt = params.retry_cnt;
2789 qp_attr->min_rnr_timer = params.min_rnr_nak_timer;
2790 qp_attr->pkey_index = params.pkey_index;
2791 qp_attr->port_num = 1;
2792 rdma_ah_set_path_bits(&qp_attr->ah_attr, 0);
2793 rdma_ah_set_static_rate(&qp_attr->ah_attr, 0);
2794 qp_attr->alt_pkey_index = 0;
2795 qp_attr->alt_port_num = 0;
2796 qp_attr->alt_timeout = 0;
2797 memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
2799 qp_attr->sq_draining = (params.state == QED_ROCE_QP_STATE_SQD) ? 1 : 0;
2800 qp_attr->max_dest_rd_atomic = params.max_dest_rd_atomic;
2801 qp_attr->max_rd_atomic = params.max_rd_atomic;
2802 qp_attr->en_sqd_async_notify = (params.sqd_async) ? 1 : 0;
2804 DP_DEBUG(dev, QEDR_MSG_QP, "QEDR_QUERY_QP: max_inline_data=%d\n",
2805 qp_attr->cap.max_inline_data);
2811 int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
2813 struct qedr_qp *qp = get_qedr_qp(ibqp);
2814 struct qedr_dev *dev = qp->dev;
2815 struct ib_qp_attr attr;
2818 DP_DEBUG(dev, QEDR_MSG_QP, "destroy qp: destroying %p, qp type=%d\n",
2821 if (rdma_protocol_roce(&dev->ibdev, 1)) {
2822 if ((qp->state != QED_ROCE_QP_STATE_RESET) &&
2823 (qp->state != QED_ROCE_QP_STATE_ERR) &&
2824 (qp->state != QED_ROCE_QP_STATE_INIT)) {
2826 attr.qp_state = IB_QPS_ERR;
2827 attr_mask |= IB_QP_STATE;
2829 /* Change the QP state to ERROR */
2830 qedr_modify_qp(ibqp, &attr, attr_mask, NULL);
2833 /* If connection establishment started the WAIT_FOR_CONNECT
2834 * bit will be on and we need to Wait for the establishment
2835 * to complete before destroying the qp.
2837 if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_CONNECT,
2838 &qp->iwarp_cm_flags))
2839 wait_for_completion(&qp->iwarp_cm_comp);
2841 /* If graceful disconnect started, the WAIT_FOR_DISCONNECT
2842 * bit will be on, and we need to wait for the disconnect to
2843 * complete before continuing. We can use the same completion,
2844 * iwarp_cm_comp, since this is the only place that waits for
2845 * this completion and it is sequential. In addition,
2846 * disconnect can't occur before the connection is fully
2847 * established, therefore if WAIT_FOR_DISCONNECT is on it
2848 * means WAIT_FOR_CONNECT is also on and the completion for
2849 * CONNECT already occurred.
2851 if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_DISCONNECT,
2852 &qp->iwarp_cm_flags))
2853 wait_for_completion(&qp->iwarp_cm_comp);
2856 if (qp->qp_type == IB_QPT_GSI)
2857 qedr_destroy_gsi_qp(dev);
2859 /* We need to remove the entry from the xarray before we release the
2860 * qp_id to avoid a race of the qp_id being reallocated and failing
2863 if (rdma_protocol_iwarp(&dev->ibdev, 1))
2864 xa_erase(&dev->qps, qp->qp_id);
2866 qedr_free_qp_resources(dev, qp, udata);
2868 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
2869 qedr_iw_qp_rem_ref(&qp->ibqp);
2870 wait_for_completion(&qp->qp_rel_comp);
2876 int qedr_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
2877 struct ib_udata *udata)
2879 struct qedr_ah *ah = get_qedr_ah(ibah);
2881 rdma_copy_ah_attr(&ah->attr, init_attr->ah_attr);
2886 int qedr_destroy_ah(struct ib_ah *ibah, u32 flags)
2888 struct qedr_ah *ah = get_qedr_ah(ibah);
2890 rdma_destroy_ah_attr(&ah->attr);
2894 static void free_mr_info(struct qedr_dev *dev, struct mr_info *info)
2896 struct qedr_pbl *pbl, *tmp;
2898 if (info->pbl_table)
2899 list_add_tail(&info->pbl_table->list_entry,
2900 &info->free_pbl_list);
2902 if (!list_empty(&info->inuse_pbl_list))
2903 list_splice(&info->inuse_pbl_list, &info->free_pbl_list);
2905 list_for_each_entry_safe(pbl, tmp, &info->free_pbl_list, list_entry) {
2906 list_del(&pbl->list_entry);
2907 qedr_free_pbl(dev, &info->pbl_info, pbl);
2911 static int init_mr_info(struct qedr_dev *dev, struct mr_info *info,
2912 size_t page_list_len, bool two_layered)
2914 struct qedr_pbl *tmp;
2917 INIT_LIST_HEAD(&info->free_pbl_list);
2918 INIT_LIST_HEAD(&info->inuse_pbl_list);
2920 rc = qedr_prepare_pbl_tbl(dev, &info->pbl_info,
2921 page_list_len, two_layered);
2925 info->pbl_table = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
2926 if (IS_ERR(info->pbl_table)) {
2927 rc = PTR_ERR(info->pbl_table);
2931 DP_DEBUG(dev, QEDR_MSG_MR, "pbl_table_pa = %pa\n",
2932 &info->pbl_table->pa);
2934 /* in usual case we use 2 PBLs, so we add one to free
2935 * list and allocating another one
2937 tmp = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
2939 DP_DEBUG(dev, QEDR_MSG_MR, "Extra PBL is not allocated\n");
2943 list_add_tail(&tmp->list_entry, &info->free_pbl_list);
2945 DP_DEBUG(dev, QEDR_MSG_MR, "extra pbl_table_pa = %pa\n", &tmp->pa);
2949 free_mr_info(dev, info);
2954 struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
2955 u64 usr_addr, int acc, struct ib_udata *udata)
2957 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2962 pd = get_qedr_pd(ibpd);
2963 DP_DEBUG(dev, QEDR_MSG_MR,
2964 "qedr_register user mr pd = %d start = %lld, len = %lld, usr_addr = %lld, acc = %d\n",
2965 pd->pd_id, start, len, usr_addr, acc);
2967 if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
2968 return ERR_PTR(-EINVAL);
2970 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2974 mr->type = QEDR_MR_USER;
2976 mr->umem = ib_umem_get(ibpd->device, start, len, acc);
2977 if (IS_ERR(mr->umem)) {
2982 rc = init_mr_info(dev, &mr->info,
2983 ib_umem_num_dma_blocks(mr->umem, PAGE_SIZE), 1);
2987 qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table,
2988 &mr->info.pbl_info, PAGE_SHIFT);
2990 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2993 DP_ERR(dev, "Out of MR resources\n");
2995 DP_ERR(dev, "roce alloc tid returned error %d\n", rc);
3000 /* Index only, 18 bit long, lkey = itid << 8 | key */
3001 mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
3003 mr->hw_mr.pd = pd->pd_id;
3004 mr->hw_mr.local_read = 1;
3005 mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
3006 mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
3007 mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
3008 mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
3009 mr->hw_mr.mw_bind = false;
3010 mr->hw_mr.pbl_ptr = mr->info.pbl_table[0].pa;
3011 mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
3012 mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
3013 mr->hw_mr.page_size_log = PAGE_SHIFT;
3014 mr->hw_mr.length = len;
3015 mr->hw_mr.vaddr = usr_addr;
3016 mr->hw_mr.phy_mr = false;
3017 mr->hw_mr.dma_mr = false;
3019 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
3021 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
3025 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
3026 if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
3027 mr->hw_mr.remote_atomic)
3028 mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
3030 DP_DEBUG(dev, QEDR_MSG_MR, "register user mr lkey: %x\n",
3035 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
3037 qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
3043 int qedr_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
3045 struct qedr_mr *mr = get_qedr_mr(ib_mr);
3046 struct qedr_dev *dev = get_qedr_dev(ib_mr->device);
3049 rc = dev->ops->rdma_deregister_tid(dev->rdma_ctx, mr->hw_mr.itid);
3053 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
3055 if (mr->type != QEDR_MR_DMA)
3056 free_mr_info(dev, &mr->info);
3058 /* it could be user registered memory. */
3059 ib_umem_release(mr->umem);
3066 static struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd,
3067 int max_page_list_len)
3069 struct qedr_pd *pd = get_qedr_pd(ibpd);
3070 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
3074 DP_DEBUG(dev, QEDR_MSG_MR,
3075 "qedr_alloc_frmr pd = %d max_page_list_len= %d\n", pd->pd_id,
3078 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3083 mr->type = QEDR_MR_FRMR;
3085 rc = init_mr_info(dev, &mr->info, max_page_list_len, 1);
3089 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
3092 DP_ERR(dev, "Out of MR resources\n");
3094 DP_ERR(dev, "roce alloc tid returned error %d\n", rc);
3099 /* Index only, 18 bit long, lkey = itid << 8 | key */
3100 mr->hw_mr.tid_type = QED_RDMA_TID_FMR;
3102 mr->hw_mr.pd = pd->pd_id;
3103 mr->hw_mr.local_read = 1;
3104 mr->hw_mr.local_write = 0;
3105 mr->hw_mr.remote_read = 0;
3106 mr->hw_mr.remote_write = 0;
3107 mr->hw_mr.remote_atomic = 0;
3108 mr->hw_mr.mw_bind = false;
3109 mr->hw_mr.pbl_ptr = 0;
3110 mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
3111 mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
3112 mr->hw_mr.length = 0;
3113 mr->hw_mr.vaddr = 0;
3114 mr->hw_mr.phy_mr = true;
3115 mr->hw_mr.dma_mr = false;
3117 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
3119 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
3123 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
3124 mr->ibmr.rkey = mr->ibmr.lkey;
3126 DP_DEBUG(dev, QEDR_MSG_MR, "alloc frmr: %x\n", mr->ibmr.lkey);
3130 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
3132 qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
3138 struct ib_mr *qedr_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
3143 if (mr_type != IB_MR_TYPE_MEM_REG)
3144 return ERR_PTR(-EINVAL);
3146 mr = __qedr_alloc_mr(ibpd, max_num_sg);
3149 return ERR_PTR(-EINVAL);
3154 static int qedr_set_page(struct ib_mr *ibmr, u64 addr)
3156 struct qedr_mr *mr = get_qedr_mr(ibmr);
3157 struct qedr_pbl *pbl_table;
3158 struct regpair *pbe;
3161 if (unlikely(mr->npages == mr->info.pbl_info.num_pbes)) {
3162 DP_ERR(mr->dev, "qedr_set_page fails when %d\n", mr->npages);
3166 DP_DEBUG(mr->dev, QEDR_MSG_MR, "qedr_set_page pages[%d] = 0x%llx\n",
3169 pbes_in_page = mr->info.pbl_info.pbl_size / sizeof(u64);
3170 pbl_table = mr->info.pbl_table + (mr->npages / pbes_in_page);
3171 pbe = (struct regpair *)pbl_table->va;
3172 pbe += mr->npages % pbes_in_page;
3173 pbe->lo = cpu_to_le32((u32)addr);
3174 pbe->hi = cpu_to_le32((u32)upper_32_bits(addr));
3181 static void handle_completed_mrs(struct qedr_dev *dev, struct mr_info *info)
3183 int work = info->completed - info->completed_handled - 1;
3185 DP_DEBUG(dev, QEDR_MSG_MR, "Special FMR work = %d\n", work);
3186 while (work-- > 0 && !list_empty(&info->inuse_pbl_list)) {
3187 struct qedr_pbl *pbl;
3189 /* Free all the page list that are possible to be freed
3190 * (all the ones that were invalidated), under the assumption
3191 * that if an FMR was completed successfully that means that
3192 * if there was an invalidate operation before it also ended
3194 pbl = list_first_entry(&info->inuse_pbl_list,
3195 struct qedr_pbl, list_entry);
3196 list_move_tail(&pbl->list_entry, &info->free_pbl_list);
3197 info->completed_handled++;
3201 int qedr_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
3202 int sg_nents, unsigned int *sg_offset)
3204 struct qedr_mr *mr = get_qedr_mr(ibmr);
3208 handle_completed_mrs(mr->dev, &mr->info);
3209 return ib_sg_to_pages(ibmr, sg, sg_nents, NULL, qedr_set_page);
3212 struct ib_mr *qedr_get_dma_mr(struct ib_pd *ibpd, int acc)
3214 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
3215 struct qedr_pd *pd = get_qedr_pd(ibpd);
3219 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3221 return ERR_PTR(-ENOMEM);
3223 mr->type = QEDR_MR_DMA;
3225 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
3228 DP_ERR(dev, "Out of MR resources\n");
3230 DP_ERR(dev, "roce alloc tid returned error %d\n", rc);
3235 /* index only, 18 bit long, lkey = itid << 8 | key */
3236 mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
3237 mr->hw_mr.pd = pd->pd_id;
3238 mr->hw_mr.local_read = 1;
3239 mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
3240 mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
3241 mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
3242 mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
3243 mr->hw_mr.dma_mr = true;
3245 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
3247 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
3251 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
3252 if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
3253 mr->hw_mr.remote_atomic)
3254 mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
3256 DP_DEBUG(dev, QEDR_MSG_MR, "get dma mr: lkey = %x\n", mr->ibmr.lkey);
3260 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
3266 static inline int qedr_wq_is_full(struct qedr_qp_hwq_info *wq)
3268 return (((wq->prod + 1) % wq->max_wr) == wq->cons);
3271 static int sge_data_len(struct ib_sge *sg_list, int num_sge)
3275 for (i = 0; i < num_sge; i++)
3276 len += sg_list[i].length;
3281 static void swap_wqe_data64(u64 *p)
3285 for (i = 0; i < QEDR_SQE_ELEMENT_SIZE / sizeof(u64); i++, p++)
3286 *p = cpu_to_be64(cpu_to_le64(*p));
3289 static u32 qedr_prepare_sq_inline_data(struct qedr_dev *dev,
3290 struct qedr_qp *qp, u8 *wqe_size,
3291 const struct ib_send_wr *wr,
3292 const struct ib_send_wr **bad_wr,
3295 u32 data_size = sge_data_len(wr->sg_list, wr->num_sge);
3296 char *seg_prt, *wqe;
3299 if (data_size > ROCE_REQ_MAX_INLINE_DATA_SIZE) {
3300 DP_ERR(dev, "Too much inline data in WR: %d\n", data_size);
3314 /* Copy data inline */
3315 for (i = 0; i < wr->num_sge; i++) {
3316 u32 len = wr->sg_list[i].length;
3317 void *src = (void *)(uintptr_t)wr->sg_list[i].addr;
3322 /* New segment required */
3324 wqe = (char *)qed_chain_produce(&qp->sq.pbl);
3326 seg_siz = sizeof(struct rdma_sq_common_wqe);
3330 /* Calculate currently allowed length */
3331 cur = min_t(u32, len, seg_siz);
3332 memcpy(seg_prt, src, cur);
3334 /* Update segment variables */
3338 /* Update sge variables */
3342 /* Swap fully-completed segments */
3344 swap_wqe_data64((u64 *)wqe);
3348 /* swap last not completed segment */
3350 swap_wqe_data64((u64 *)wqe);
3355 #define RQ_SGE_SET(sge, vaddr, vlength, vflags) \
3357 DMA_REGPAIR_LE(sge->addr, vaddr); \
3358 (sge)->length = cpu_to_le32(vlength); \
3359 (sge)->flags = cpu_to_le32(vflags); \
3362 #define SRQ_HDR_SET(hdr, vwr_id, num_sge) \
3364 DMA_REGPAIR_LE(hdr->wr_id, vwr_id); \
3365 (hdr)->num_sges = num_sge; \
3368 #define SRQ_SGE_SET(sge, vaddr, vlength, vlkey) \
3370 DMA_REGPAIR_LE(sge->addr, vaddr); \
3371 (sge)->length = cpu_to_le32(vlength); \
3372 (sge)->l_key = cpu_to_le32(vlkey); \
3375 static u32 qedr_prepare_sq_sges(struct qedr_qp *qp, u8 *wqe_size,
3376 const struct ib_send_wr *wr)
3381 for (i = 0; i < wr->num_sge; i++) {
3382 struct rdma_sq_sge *sge = qed_chain_produce(&qp->sq.pbl);
3384 DMA_REGPAIR_LE(sge->addr, wr->sg_list[i].addr);
3385 sge->l_key = cpu_to_le32(wr->sg_list[i].lkey);
3386 sge->length = cpu_to_le32(wr->sg_list[i].length);
3387 data_size += wr->sg_list[i].length;
3391 *wqe_size += wr->num_sge;
3396 static u32 qedr_prepare_sq_rdma_data(struct qedr_dev *dev,
3398 struct rdma_sq_rdma_wqe_1st *rwqe,
3399 struct rdma_sq_rdma_wqe_2nd *rwqe2,
3400 const struct ib_send_wr *wr,
3401 const struct ib_send_wr **bad_wr)
3403 rwqe2->r_key = cpu_to_le32(rdma_wr(wr)->rkey);
3404 DMA_REGPAIR_LE(rwqe2->remote_va, rdma_wr(wr)->remote_addr);
3406 if (wr->send_flags & IB_SEND_INLINE &&
3407 (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
3408 wr->opcode == IB_WR_RDMA_WRITE)) {
3411 SET_FIELD2(flags, RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG, 1);
3412 return qedr_prepare_sq_inline_data(dev, qp, &rwqe->wqe_size, wr,
3413 bad_wr, &rwqe->flags, flags);
3416 return qedr_prepare_sq_sges(qp, &rwqe->wqe_size, wr);
3419 static u32 qedr_prepare_sq_send_data(struct qedr_dev *dev,
3421 struct rdma_sq_send_wqe_1st *swqe,
3422 struct rdma_sq_send_wqe_2st *swqe2,
3423 const struct ib_send_wr *wr,
3424 const struct ib_send_wr **bad_wr)
3426 memset(swqe2, 0, sizeof(*swqe2));
3427 if (wr->send_flags & IB_SEND_INLINE) {
3430 SET_FIELD2(flags, RDMA_SQ_SEND_WQE_INLINE_FLG, 1);
3431 return qedr_prepare_sq_inline_data(dev, qp, &swqe->wqe_size, wr,
3432 bad_wr, &swqe->flags, flags);
3435 return qedr_prepare_sq_sges(qp, &swqe->wqe_size, wr);
3438 static int qedr_prepare_reg(struct qedr_qp *qp,
3439 struct rdma_sq_fmr_wqe_1st *fwqe1,
3440 const struct ib_reg_wr *wr)
3442 struct qedr_mr *mr = get_qedr_mr(wr->mr);
3443 struct rdma_sq_fmr_wqe_2nd *fwqe2;
3445 fwqe2 = (struct rdma_sq_fmr_wqe_2nd *)qed_chain_produce(&qp->sq.pbl);
3446 fwqe1->addr.hi = upper_32_bits(mr->ibmr.iova);
3447 fwqe1->addr.lo = lower_32_bits(mr->ibmr.iova);
3448 fwqe1->l_key = wr->key;
3450 fwqe2->access_ctrl = 0;
3452 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_READ,
3453 !!(wr->access & IB_ACCESS_REMOTE_READ));
3454 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE,
3455 !!(wr->access & IB_ACCESS_REMOTE_WRITE));
3456 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC,
3457 !!(wr->access & IB_ACCESS_REMOTE_ATOMIC));
3458 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_READ, 1);
3459 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE,
3460 !!(wr->access & IB_ACCESS_LOCAL_WRITE));
3461 fwqe2->fmr_ctrl = 0;
3463 SET_FIELD2(fwqe2->fmr_ctrl, RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG,
3464 ilog2(mr->ibmr.page_size) - 12);
3466 fwqe2->length_hi = 0;
3467 fwqe2->length_lo = mr->ibmr.length;
3468 fwqe2->pbl_addr.hi = upper_32_bits(mr->info.pbl_table->pa);
3469 fwqe2->pbl_addr.lo = lower_32_bits(mr->info.pbl_table->pa);
3471 qp->wqe_wr_id[qp->sq.prod].mr = mr;
3476 static enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
3479 case IB_WR_RDMA_WRITE:
3480 case IB_WR_RDMA_WRITE_WITH_IMM:
3481 return IB_WC_RDMA_WRITE;
3482 case IB_WR_SEND_WITH_IMM:
3484 case IB_WR_SEND_WITH_INV:
3486 case IB_WR_RDMA_READ:
3487 case IB_WR_RDMA_READ_WITH_INV:
3488 return IB_WC_RDMA_READ;
3489 case IB_WR_ATOMIC_CMP_AND_SWP:
3490 return IB_WC_COMP_SWAP;
3491 case IB_WR_ATOMIC_FETCH_AND_ADD:
3492 return IB_WC_FETCH_ADD;
3494 return IB_WC_REG_MR;
3495 case IB_WR_LOCAL_INV:
3496 return IB_WC_LOCAL_INV;
3502 static inline bool qedr_can_post_send(struct qedr_qp *qp,
3503 const struct ib_send_wr *wr)
3505 int wq_is_full, err_wr, pbl_is_full;
3506 struct qedr_dev *dev = qp->dev;
3508 /* prevent SQ overflow and/or processing of a bad WR */
3509 err_wr = wr->num_sge > qp->sq.max_sges;
3510 wq_is_full = qedr_wq_is_full(&qp->sq);
3511 pbl_is_full = qed_chain_get_elem_left_u32(&qp->sq.pbl) <
3512 QEDR_MAX_SQE_ELEMENTS_PER_SQE;
3513 if (wq_is_full || err_wr || pbl_is_full) {
3514 if (wq_is_full && !(qp->err_bitmap & QEDR_QP_ERR_SQ_FULL)) {
3516 "error: WQ is full. Post send on QP %p failed (this error appears only once)\n",
3518 qp->err_bitmap |= QEDR_QP_ERR_SQ_FULL;
3521 if (err_wr && !(qp->err_bitmap & QEDR_QP_ERR_BAD_SR)) {
3523 "error: WR is bad. Post send on QP %p failed (this error appears only once)\n",
3525 qp->err_bitmap |= QEDR_QP_ERR_BAD_SR;
3529 !(qp->err_bitmap & QEDR_QP_ERR_SQ_PBL_FULL)) {
3531 "error: WQ PBL is full. Post send on QP %p failed (this error appears only once)\n",
3533 qp->err_bitmap |= QEDR_QP_ERR_SQ_PBL_FULL;
3540 static int __qedr_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
3541 const struct ib_send_wr **bad_wr)
3543 struct qedr_dev *dev = get_qedr_dev(ibqp->device);
3544 struct qedr_qp *qp = get_qedr_qp(ibqp);
3545 struct rdma_sq_atomic_wqe_1st *awqe1;
3546 struct rdma_sq_atomic_wqe_2nd *awqe2;
3547 struct rdma_sq_atomic_wqe_3rd *awqe3;
3548 struct rdma_sq_send_wqe_2st *swqe2;
3549 struct rdma_sq_local_inv_wqe *iwqe;
3550 struct rdma_sq_rdma_wqe_2nd *rwqe2;
3551 struct rdma_sq_send_wqe_1st *swqe;
3552 struct rdma_sq_rdma_wqe_1st *rwqe;
3553 struct rdma_sq_fmr_wqe_1st *fwqe1;
3554 struct rdma_sq_common_wqe *wqe;
3559 if (!qedr_can_post_send(qp, wr)) {
3564 wqe = qed_chain_produce(&qp->sq.pbl);
3565 qp->wqe_wr_id[qp->sq.prod].signaled =
3566 !!(wr->send_flags & IB_SEND_SIGNALED) || qp->signaled;
3569 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_SE_FLG,
3570 !!(wr->send_flags & IB_SEND_SOLICITED));
3571 comp = (!!(wr->send_flags & IB_SEND_SIGNALED)) || qp->signaled;
3572 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_COMP_FLG, comp);
3573 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_RD_FENCE_FLG,
3574 !!(wr->send_flags & IB_SEND_FENCE));
3575 wqe->prev_wqe_size = qp->prev_wqe_size;
3577 qp->wqe_wr_id[qp->sq.prod].opcode = qedr_ib_to_wc_opcode(wr->opcode);
3579 switch (wr->opcode) {
3580 case IB_WR_SEND_WITH_IMM:
3581 if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) {
3586 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM;
3587 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
3589 swqe2 = qed_chain_produce(&qp->sq.pbl);
3591 swqe->inv_key_or_imm_data = cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
3592 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
3594 swqe->length = cpu_to_le32(length);
3595 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
3596 qp->prev_wqe_size = swqe->wqe_size;
3597 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
3600 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND;
3601 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
3604 swqe2 = qed_chain_produce(&qp->sq.pbl);
3605 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
3607 swqe->length = cpu_to_le32(length);
3608 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
3609 qp->prev_wqe_size = swqe->wqe_size;
3610 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
3612 case IB_WR_SEND_WITH_INV:
3613 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_INVALIDATE;
3614 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
3615 swqe2 = qed_chain_produce(&qp->sq.pbl);
3617 swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.invalidate_rkey);
3618 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
3620 swqe->length = cpu_to_le32(length);
3621 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
3622 qp->prev_wqe_size = swqe->wqe_size;
3623 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
3626 case IB_WR_RDMA_WRITE_WITH_IMM:
3627 if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) {
3632 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM;
3633 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
3636 rwqe->imm_data = htonl(cpu_to_le32(wr->ex.imm_data));
3637 rwqe2 = qed_chain_produce(&qp->sq.pbl);
3638 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
3640 rwqe->length = cpu_to_le32(length);
3641 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
3642 qp->prev_wqe_size = rwqe->wqe_size;
3643 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
3645 case IB_WR_RDMA_WRITE:
3646 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR;
3647 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
3650 rwqe2 = qed_chain_produce(&qp->sq.pbl);
3651 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
3653 rwqe->length = cpu_to_le32(length);
3654 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
3655 qp->prev_wqe_size = rwqe->wqe_size;
3656 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
3658 case IB_WR_RDMA_READ_WITH_INV:
3659 SET_FIELD2(wqe->flags, RDMA_SQ_RDMA_WQE_1ST_READ_INV_FLG, 1);
3660 fallthrough; /* same is identical to RDMA READ */
3662 case IB_WR_RDMA_READ:
3663 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_RD;
3664 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
3667 rwqe2 = qed_chain_produce(&qp->sq.pbl);
3668 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
3670 rwqe->length = cpu_to_le32(length);
3671 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
3672 qp->prev_wqe_size = rwqe->wqe_size;
3673 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
3676 case IB_WR_ATOMIC_CMP_AND_SWP:
3677 case IB_WR_ATOMIC_FETCH_AND_ADD:
3678 awqe1 = (struct rdma_sq_atomic_wqe_1st *)wqe;
3679 awqe1->wqe_size = 4;
3681 awqe2 = qed_chain_produce(&qp->sq.pbl);
3682 DMA_REGPAIR_LE(awqe2->remote_va, atomic_wr(wr)->remote_addr);
3683 awqe2->r_key = cpu_to_le32(atomic_wr(wr)->rkey);
3685 awqe3 = qed_chain_produce(&qp->sq.pbl);
3687 if (wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
3688 wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_ADD;
3689 DMA_REGPAIR_LE(awqe3->swap_data,
3690 atomic_wr(wr)->compare_add);
3692 wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_CMP_AND_SWAP;
3693 DMA_REGPAIR_LE(awqe3->swap_data,
3694 atomic_wr(wr)->swap);
3695 DMA_REGPAIR_LE(awqe3->cmp_data,
3696 atomic_wr(wr)->compare_add);
3699 qedr_prepare_sq_sges(qp, NULL, wr);
3701 qp->wqe_wr_id[qp->sq.prod].wqe_size = awqe1->wqe_size;
3702 qp->prev_wqe_size = awqe1->wqe_size;
3705 case IB_WR_LOCAL_INV:
3706 iwqe = (struct rdma_sq_local_inv_wqe *)wqe;
3709 iwqe->req_type = RDMA_SQ_REQ_TYPE_LOCAL_INVALIDATE;
3710 iwqe->inv_l_key = wr->ex.invalidate_rkey;
3711 qp->wqe_wr_id[qp->sq.prod].wqe_size = iwqe->wqe_size;
3712 qp->prev_wqe_size = iwqe->wqe_size;
3715 DP_DEBUG(dev, QEDR_MSG_CQ, "REG_MR\n");
3716 wqe->req_type = RDMA_SQ_REQ_TYPE_FAST_MR;
3717 fwqe1 = (struct rdma_sq_fmr_wqe_1st *)wqe;
3718 fwqe1->wqe_size = 2;
3720 rc = qedr_prepare_reg(qp, fwqe1, reg_wr(wr));
3722 DP_ERR(dev, "IB_REG_MR failed rc=%d\n", rc);
3727 qp->wqe_wr_id[qp->sq.prod].wqe_size = fwqe1->wqe_size;
3728 qp->prev_wqe_size = fwqe1->wqe_size;
3731 DP_ERR(dev, "invalid opcode 0x%x!\n", wr->opcode);
3740 /* Restore prod to its position before
3741 * this WR was processed
3743 value = le16_to_cpu(qp->sq.db_data.data.value);
3744 qed_chain_set_prod(&qp->sq.pbl, value, wqe);
3746 /* Restore prev_wqe_size */
3747 qp->prev_wqe_size = wqe->prev_wqe_size;
3749 DP_ERR(dev, "POST SEND FAILED\n");
3755 int qedr_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
3756 const struct ib_send_wr **bad_wr)
3758 struct qedr_dev *dev = get_qedr_dev(ibqp->device);
3759 struct qedr_qp *qp = get_qedr_qp(ibqp);
3760 unsigned long flags;
3765 if (qp->qp_type == IB_QPT_GSI)
3766 return qedr_gsi_post_send(ibqp, wr, bad_wr);
3768 spin_lock_irqsave(&qp->q_lock, flags);
3770 if (rdma_protocol_roce(&dev->ibdev, 1)) {
3771 if ((qp->state != QED_ROCE_QP_STATE_RTS) &&
3772 (qp->state != QED_ROCE_QP_STATE_ERR) &&
3773 (qp->state != QED_ROCE_QP_STATE_SQD)) {
3774 spin_unlock_irqrestore(&qp->q_lock, flags);
3776 DP_DEBUG(dev, QEDR_MSG_CQ,
3777 "QP in wrong state! QP icid=0x%x state %d\n",
3778 qp->icid, qp->state);
3784 rc = __qedr_post_send(ibqp, wr, bad_wr);
3788 qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
3790 qedr_inc_sw_prod(&qp->sq);
3792 qp->sq.db_data.data.value++;
3798 * If there was a failure in the first WR then it will be triggered in
3799 * vane. However this is not harmful (as long as the producer value is
3800 * unchanged). For performance reasons we avoid checking for this
3801 * redundant doorbell.
3803 * qp->wqe_wr_id is accessed during qedr_poll_cq, as
3804 * soon as we give the doorbell, we could get a completion
3805 * for this wr, therefore we need to make sure that the
3806 * memory is updated before giving the doorbell.
3807 * During qedr_poll_cq, rmb is called before accessing the
3808 * cqe. This covers for the smp_rmb as well.
3811 writel(qp->sq.db_data.raw, qp->sq.db);
3813 spin_unlock_irqrestore(&qp->q_lock, flags);
3818 static u32 qedr_srq_elem_left(struct qedr_srq_hwq_info *hw_srq)
3822 /* Calculate number of elements used based on producer
3823 * count and consumer count and subtract it from max
3824 * work request supported so that we get elements left.
3826 used = hw_srq->wr_prod_cnt - (u32)atomic_read(&hw_srq->wr_cons_cnt);
3828 return hw_srq->max_wr - used;
3831 int qedr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
3832 const struct ib_recv_wr **bad_wr)
3834 struct qedr_srq *srq = get_qedr_srq(ibsrq);
3835 struct qedr_srq_hwq_info *hw_srq;
3836 struct qedr_dev *dev = srq->dev;
3837 struct qed_chain *pbl;
3838 unsigned long flags;
3842 spin_lock_irqsave(&srq->lock, flags);
3844 hw_srq = &srq->hw_srq;
3845 pbl = &srq->hw_srq.pbl;
3847 struct rdma_srq_wqe_header *hdr;
3850 if (!qedr_srq_elem_left(hw_srq) ||
3851 wr->num_sge > srq->hw_srq.max_sges) {
3852 DP_ERR(dev, "Can't post WR (%d,%d) || (%d > %d)\n",
3853 hw_srq->wr_prod_cnt,
3854 atomic_read(&hw_srq->wr_cons_cnt),
3855 wr->num_sge, srq->hw_srq.max_sges);
3861 hdr = qed_chain_produce(pbl);
3862 num_sge = wr->num_sge;
3863 /* Set number of sge and work request id in header */
3864 SRQ_HDR_SET(hdr, wr->wr_id, num_sge);
3866 srq->hw_srq.wr_prod_cnt++;
3870 DP_DEBUG(dev, QEDR_MSG_SRQ,
3871 "SRQ WR: SGEs: %d with wr_id[%d] = %llx\n",
3872 wr->num_sge, hw_srq->wqe_prod, wr->wr_id);
3874 for (i = 0; i < wr->num_sge; i++) {
3875 struct rdma_srq_sge *srq_sge = qed_chain_produce(pbl);
3877 /* Set SGE length, lkey and address */
3878 SRQ_SGE_SET(srq_sge, wr->sg_list[i].addr,
3879 wr->sg_list[i].length, wr->sg_list[i].lkey);
3881 DP_DEBUG(dev, QEDR_MSG_SRQ,
3882 "[%d]: len %d key %x addr %x:%x\n",
3883 i, srq_sge->length, srq_sge->l_key,
3884 srq_sge->addr.hi, srq_sge->addr.lo);
3888 /* Update WQE and SGE information before
3889 * updating producer.
3893 /* SRQ producer is 8 bytes. Need to update SGE producer index
3894 * in first 4 bytes and need to update WQE producer in
3897 srq->hw_srq.virt_prod_pair_addr->sge_prod = cpu_to_le32(hw_srq->sge_prod);
3898 /* Make sure sge producer is updated first */
3900 srq->hw_srq.virt_prod_pair_addr->wqe_prod = cpu_to_le32(hw_srq->wqe_prod);
3905 DP_DEBUG(dev, QEDR_MSG_SRQ, "POST: Elements in S-RQ: %d\n",
3906 qed_chain_get_elem_left(pbl));
3907 spin_unlock_irqrestore(&srq->lock, flags);
3912 int qedr_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
3913 const struct ib_recv_wr **bad_wr)
3915 struct qedr_qp *qp = get_qedr_qp(ibqp);
3916 struct qedr_dev *dev = qp->dev;
3917 unsigned long flags;
3920 if (qp->qp_type == IB_QPT_GSI)
3921 return qedr_gsi_post_recv(ibqp, wr, bad_wr);
3923 spin_lock_irqsave(&qp->q_lock, flags);
3928 if (qed_chain_get_elem_left_u32(&qp->rq.pbl) <
3929 QEDR_MAX_RQE_ELEMENTS_PER_RQE ||
3930 wr->num_sge > qp->rq.max_sges) {
3931 DP_ERR(dev, "Can't post WR (%d < %d) || (%d > %d)\n",
3932 qed_chain_get_elem_left_u32(&qp->rq.pbl),
3933 QEDR_MAX_RQE_ELEMENTS_PER_RQE, wr->num_sge,
3939 for (i = 0; i < wr->num_sge; i++) {
3941 struct rdma_rq_sge *rqe =
3942 qed_chain_produce(&qp->rq.pbl);
3944 /* First one must include the number
3945 * of SGE in the list
3948 SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES,
3951 SET_FIELD(flags, RDMA_RQ_SGE_L_KEY_LO,
3952 wr->sg_list[i].lkey);
3954 RQ_SGE_SET(rqe, wr->sg_list[i].addr,
3955 wr->sg_list[i].length, flags);
3958 /* Special case of no sges. FW requires between 1-4 sges...
3959 * in this case we need to post 1 sge with length zero. this is
3960 * because rdma write with immediate consumes an RQ.
3964 struct rdma_rq_sge *rqe =
3965 qed_chain_produce(&qp->rq.pbl);
3967 /* First one must include the number
3968 * of SGE in the list
3970 SET_FIELD(flags, RDMA_RQ_SGE_L_KEY_LO, 0);
3971 SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES, 1);
3973 RQ_SGE_SET(rqe, 0, 0, flags);
3977 qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id;
3978 qp->rqe_wr_id[qp->rq.prod].wqe_size = i;
3980 qedr_inc_sw_prod(&qp->rq);
3982 /* qp->rqe_wr_id is accessed during qedr_poll_cq, as
3983 * soon as we give the doorbell, we could get a completion
3984 * for this wr, therefore we need to make sure that the
3985 * memory is update before giving the doorbell.
3986 * During qedr_poll_cq, rmb is called before accessing the
3987 * cqe. This covers for the smp_rmb as well.
3991 qp->rq.db_data.data.value++;
3993 writel(qp->rq.db_data.raw, qp->rq.db);
3995 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
3996 writel(qp->rq.iwarp_db2_data.raw, qp->rq.iwarp_db2);
4002 spin_unlock_irqrestore(&qp->q_lock, flags);
4007 static int is_valid_cqe(struct qedr_cq *cq, union rdma_cqe *cqe)
4009 struct rdma_cqe_requester *resp_cqe = &cqe->req;
4011 return (resp_cqe->flags & RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK) ==
4015 static struct qedr_qp *cqe_get_qp(union rdma_cqe *cqe)
4017 struct rdma_cqe_requester *resp_cqe = &cqe->req;
4020 qp = (struct qedr_qp *)(uintptr_t)HILO_GEN(resp_cqe->qp_handle.hi,
4021 resp_cqe->qp_handle.lo,
4026 static enum rdma_cqe_type cqe_get_type(union rdma_cqe *cqe)
4028 struct rdma_cqe_requester *resp_cqe = &cqe->req;
4030 return GET_FIELD(resp_cqe->flags, RDMA_CQE_REQUESTER_TYPE);
4033 /* Return latest CQE (needs processing) */
4034 static union rdma_cqe *get_cqe(struct qedr_cq *cq)
4036 return cq->latest_cqe;
4039 /* In fmr we need to increase the number of fmr completed counter for the fmr
4040 * algorithm determining whether we can free a pbl or not.
4041 * we need to perform this whether the work request was signaled or not. for
4042 * this purpose we call this function from the condition that checks if a wr
4043 * should be skipped, to make sure we don't miss it ( possibly this fmr
4044 * operation was not signalted)
4046 static inline void qedr_chk_if_fmr(struct qedr_qp *qp)
4048 if (qp->wqe_wr_id[qp->sq.cons].opcode == IB_WC_REG_MR)
4049 qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
4052 static int process_req(struct qedr_dev *dev, struct qedr_qp *qp,
4053 struct qedr_cq *cq, int num_entries,
4054 struct ib_wc *wc, u16 hw_cons, enum ib_wc_status status,
4059 while (num_entries && qp->sq.wqe_cons != hw_cons) {
4060 if (!qp->wqe_wr_id[qp->sq.cons].signaled && !force) {
4061 qedr_chk_if_fmr(qp);
4067 wc->status = status;
4070 wc->src_qp = qp->id;
4073 wc->wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id;
4074 wc->opcode = qp->wqe_wr_id[qp->sq.cons].opcode;
4076 switch (wc->opcode) {
4077 case IB_WC_RDMA_WRITE:
4078 wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
4080 case IB_WC_COMP_SWAP:
4081 case IB_WC_FETCH_ADD:
4085 qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
4087 case IB_WC_RDMA_READ:
4089 wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
4099 while (qp->wqe_wr_id[qp->sq.cons].wqe_size--)
4100 qed_chain_consume(&qp->sq.pbl);
4101 qedr_inc_sw_cons(&qp->sq);
4107 static int qedr_poll_cq_req(struct qedr_dev *dev,
4108 struct qedr_qp *qp, struct qedr_cq *cq,
4109 int num_entries, struct ib_wc *wc,
4110 struct rdma_cqe_requester *req)
4114 switch (req->status) {
4115 case RDMA_CQE_REQ_STS_OK:
4116 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
4119 case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR:
4120 if (qp->state != QED_ROCE_QP_STATE_ERR)
4121 DP_DEBUG(dev, QEDR_MSG_CQ,
4122 "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4123 cq->icid, qp->icid);
4124 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
4125 IB_WC_WR_FLUSH_ERR, 1);
4128 /* process all WQE before the cosumer */
4129 qp->state = QED_ROCE_QP_STATE_ERR;
4130 cnt = process_req(dev, qp, cq, num_entries, wc,
4131 req->sq_cons - 1, IB_WC_SUCCESS, 0);
4133 /* if we have extra WC fill it with actual error info */
4134 if (cnt < num_entries) {
4135 enum ib_wc_status wc_status;
4137 switch (req->status) {
4138 case RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR:
4140 "Error: POLL CQ with RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4141 cq->icid, qp->icid);
4142 wc_status = IB_WC_BAD_RESP_ERR;
4144 case RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR:
4146 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4147 cq->icid, qp->icid);
4148 wc_status = IB_WC_LOC_LEN_ERR;
4150 case RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR:
4152 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4153 cq->icid, qp->icid);
4154 wc_status = IB_WC_LOC_QP_OP_ERR;
4156 case RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR:
4158 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4159 cq->icid, qp->icid);
4160 wc_status = IB_WC_LOC_PROT_ERR;
4162 case RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR:
4164 "Error: POLL CQ with RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4165 cq->icid, qp->icid);
4166 wc_status = IB_WC_MW_BIND_ERR;
4168 case RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR:
4170 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4171 cq->icid, qp->icid);
4172 wc_status = IB_WC_REM_INV_REQ_ERR;
4174 case RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR:
4176 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4177 cq->icid, qp->icid);
4178 wc_status = IB_WC_REM_ACCESS_ERR;
4180 case RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR:
4182 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4183 cq->icid, qp->icid);
4184 wc_status = IB_WC_REM_OP_ERR;
4186 case RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR:
4188 "Error: POLL CQ with RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4189 cq->icid, qp->icid);
4190 wc_status = IB_WC_RNR_RETRY_EXC_ERR;
4192 case RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR:
4194 "Error: POLL CQ with ROCE_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4195 cq->icid, qp->icid);
4196 wc_status = IB_WC_RETRY_EXC_ERR;
4200 "Error: POLL CQ with IB_WC_GENERAL_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4201 cq->icid, qp->icid);
4202 wc_status = IB_WC_GENERAL_ERR;
4204 cnt += process_req(dev, qp, cq, 1, wc, req->sq_cons,
4212 static inline int qedr_cqe_resp_status_to_ib(u8 status)
4215 case RDMA_CQE_RESP_STS_LOCAL_ACCESS_ERR:
4216 return IB_WC_LOC_ACCESS_ERR;
4217 case RDMA_CQE_RESP_STS_LOCAL_LENGTH_ERR:
4218 return IB_WC_LOC_LEN_ERR;
4219 case RDMA_CQE_RESP_STS_LOCAL_QP_OPERATION_ERR:
4220 return IB_WC_LOC_QP_OP_ERR;
4221 case RDMA_CQE_RESP_STS_LOCAL_PROTECTION_ERR:
4222 return IB_WC_LOC_PROT_ERR;
4223 case RDMA_CQE_RESP_STS_MEMORY_MGT_OPERATION_ERR:
4224 return IB_WC_MW_BIND_ERR;
4225 case RDMA_CQE_RESP_STS_REMOTE_INVALID_REQUEST_ERR:
4226 return IB_WC_REM_INV_RD_REQ_ERR;
4227 case RDMA_CQE_RESP_STS_OK:
4228 return IB_WC_SUCCESS;
4230 return IB_WC_GENERAL_ERR;
4234 static inline int qedr_set_ok_cqe_resp_wc(struct rdma_cqe_responder *resp,
4237 wc->status = IB_WC_SUCCESS;
4238 wc->byte_len = le32_to_cpu(resp->length);
4240 if (resp->flags & QEDR_RESP_IMM) {
4241 wc->ex.imm_data = cpu_to_be32(le32_to_cpu(resp->imm_data_or_inv_r_Key));
4242 wc->wc_flags |= IB_WC_WITH_IMM;
4244 if (resp->flags & QEDR_RESP_RDMA)
4245 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
4247 if (resp->flags & QEDR_RESP_INV)
4250 } else if (resp->flags & QEDR_RESP_INV) {
4251 wc->ex.imm_data = le32_to_cpu(resp->imm_data_or_inv_r_Key);
4252 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
4254 if (resp->flags & QEDR_RESP_RDMA)
4257 } else if (resp->flags & QEDR_RESP_RDMA) {
4264 static void __process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
4265 struct qedr_cq *cq, struct ib_wc *wc,
4266 struct rdma_cqe_responder *resp, u64 wr_id)
4268 /* Must fill fields before qedr_set_ok_cqe_resp_wc() */
4269 wc->opcode = IB_WC_RECV;
4272 if (likely(resp->status == RDMA_CQE_RESP_STS_OK)) {
4273 if (qedr_set_ok_cqe_resp_wc(resp, wc))
4275 "CQ %p (icid=%d) has invalid CQE responder flags=0x%x\n",
4276 cq, cq->icid, resp->flags);
4279 wc->status = qedr_cqe_resp_status_to_ib(resp->status);
4280 if (wc->status == IB_WC_GENERAL_ERR)
4282 "CQ %p (icid=%d) contains an invalid CQE status %d\n",
4283 cq, cq->icid, resp->status);
4286 /* Fill the rest of the WC */
4288 wc->src_qp = qp->id;
4293 static int process_resp_one_srq(struct qedr_dev *dev, struct qedr_qp *qp,
4294 struct qedr_cq *cq, struct ib_wc *wc,
4295 struct rdma_cqe_responder *resp)
4297 struct qedr_srq *srq = qp->srq;
4300 wr_id = HILO_GEN(le32_to_cpu(resp->srq_wr_id.hi),
4301 le32_to_cpu(resp->srq_wr_id.lo), u64);
4303 if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
4304 wc->status = IB_WC_WR_FLUSH_ERR;
4308 wc->src_qp = qp->id;
4312 __process_resp_one(dev, qp, cq, wc, resp, wr_id);
4314 atomic_inc(&srq->hw_srq.wr_cons_cnt);
4318 static int process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
4319 struct qedr_cq *cq, struct ib_wc *wc,
4320 struct rdma_cqe_responder *resp)
4322 u64 wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
4324 __process_resp_one(dev, qp, cq, wc, resp, wr_id);
4326 while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
4327 qed_chain_consume(&qp->rq.pbl);
4328 qedr_inc_sw_cons(&qp->rq);
4333 static int process_resp_flush(struct qedr_qp *qp, struct qedr_cq *cq,
4334 int num_entries, struct ib_wc *wc, u16 hw_cons)
4338 while (num_entries && qp->rq.wqe_cons != hw_cons) {
4340 wc->status = IB_WC_WR_FLUSH_ERR;
4343 wc->src_qp = qp->id;
4345 wc->wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
4350 while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
4351 qed_chain_consume(&qp->rq.pbl);
4352 qedr_inc_sw_cons(&qp->rq);
4358 static void try_consume_resp_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
4359 struct rdma_cqe_responder *resp, int *update)
4361 if (le16_to_cpu(resp->rq_cons_or_srq_id) == qp->rq.wqe_cons) {
4367 static int qedr_poll_cq_resp_srq(struct qedr_dev *dev, struct qedr_qp *qp,
4368 struct qedr_cq *cq, int num_entries,
4370 struct rdma_cqe_responder *resp)
4374 cnt = process_resp_one_srq(dev, qp, cq, wc, resp);
4380 static int qedr_poll_cq_resp(struct qedr_dev *dev, struct qedr_qp *qp,
4381 struct qedr_cq *cq, int num_entries,
4382 struct ib_wc *wc, struct rdma_cqe_responder *resp,
4387 if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
4388 cnt = process_resp_flush(qp, cq, num_entries, wc,
4389 resp->rq_cons_or_srq_id);
4390 try_consume_resp_cqe(cq, qp, resp, update);
4392 cnt = process_resp_one(dev, qp, cq, wc, resp);
4400 static void try_consume_req_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
4401 struct rdma_cqe_requester *req, int *update)
4403 if (le16_to_cpu(req->sq_cons) == qp->sq.wqe_cons) {
4409 int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
4411 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
4412 struct qedr_cq *cq = get_qedr_cq(ibcq);
4413 union rdma_cqe *cqe;
4414 u32 old_cons, new_cons;
4415 unsigned long flags;
4419 if (cq->destroyed) {
4421 "warning: poll was invoked after destroy for cq %p (icid=%d)\n",
4426 if (cq->cq_type == QEDR_CQ_TYPE_GSI)
4427 return qedr_gsi_poll_cq(ibcq, num_entries, wc);
4429 spin_lock_irqsave(&cq->cq_lock, flags);
4430 cqe = cq->latest_cqe;
4431 old_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
4432 while (num_entries && is_valid_cqe(cq, cqe)) {
4436 /* prevent speculative reads of any field of CQE */
4439 qp = cqe_get_qp(cqe);
4441 WARN(1, "Error: CQE QP pointer is NULL. CQE=%p\n", cqe);
4447 switch (cqe_get_type(cqe)) {
4448 case RDMA_CQE_TYPE_REQUESTER:
4449 cnt = qedr_poll_cq_req(dev, qp, cq, num_entries, wc,
4451 try_consume_req_cqe(cq, qp, &cqe->req, &update);
4453 case RDMA_CQE_TYPE_RESPONDER_RQ:
4454 cnt = qedr_poll_cq_resp(dev, qp, cq, num_entries, wc,
4455 &cqe->resp, &update);
4457 case RDMA_CQE_TYPE_RESPONDER_SRQ:
4458 cnt = qedr_poll_cq_resp_srq(dev, qp, cq, num_entries,
4462 case RDMA_CQE_TYPE_INVALID:
4464 DP_ERR(dev, "Error: invalid CQE type = %d\n",
4473 new_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
4475 cq->cq_cons += new_cons - old_cons;
4478 /* doorbell notifies abount latest VALID entry,
4479 * but chain already point to the next INVALID one
4481 doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
4483 spin_unlock_irqrestore(&cq->cq_lock, flags);
4487 int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags,
4488 u32 port_num, const struct ib_wc *in_wc,
4489 const struct ib_grh *in_grh, const struct ib_mad *in,
4490 struct ib_mad *out_mad, size_t *out_mad_size,
4491 u16 *out_mad_pkey_index)
4493 return IB_MAD_RESULT_SUCCESS;