1 /* QLogic qedr NIC Driver
2 * Copyright (c) 2015-2016 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/dma-mapping.h>
33 #include <linux/crc32.h>
37 #include <linux/iommu.h>
39 #include <rdma/ib_verbs.h>
40 #include <rdma/ib_user_verbs.h>
41 #include <rdma/iw_cm.h>
42 #include <rdma/ib_umem.h>
43 #include <rdma/ib_addr.h>
44 #include <rdma/ib_cache.h>
45 #include <rdma/uverbs_ioctl.h>
47 #include <linux/qed/common_hsi.h>
48 #include "qedr_hsi_rdma.h"
49 #include <linux/qed/qed_if.h>
52 #include <rdma/qedr-abi.h>
53 #include "qedr_roce_cm.h"
54 #include "qedr_iw_cm.h"
56 #define QEDR_SRQ_WQE_ELEM_SIZE sizeof(union rdma_srq_elm)
57 #define RDMA_MAX_SGE_PER_SRQ (4)
58 #define RDMA_MAX_SRQ_WQE_SIZE (RDMA_MAX_SGE_PER_SRQ + 1)
60 #define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
63 QEDR_USER_MMAP_IO_WC = 0,
64 QEDR_USER_MMAP_PHYS_PAGE,
67 static inline int qedr_ib_copy_to_udata(struct ib_udata *udata, void *src,
70 size_t min_len = min_t(size_t, len, udata->outlen);
72 return ib_copy_to_udata(udata, src, min_len);
75 int qedr_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
77 if (index >= QEDR_ROCE_PKEY_TABLE_LEN)
80 *pkey = QEDR_ROCE_PKEY_DEFAULT;
84 int qedr_iw_query_gid(struct ib_device *ibdev, u8 port,
85 int index, union ib_gid *sgid)
87 struct qedr_dev *dev = get_qedr_dev(ibdev);
89 memset(sgid->raw, 0, sizeof(sgid->raw));
90 ether_addr_copy(sgid->raw, dev->ndev->dev_addr);
92 DP_DEBUG(dev, QEDR_MSG_INIT, "QUERY sgid[%d]=%llx:%llx\n", index,
93 sgid->global.interface_id, sgid->global.subnet_prefix);
98 int qedr_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
100 struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
101 struct qedr_device_attr *qattr = &dev->attr;
102 struct qedr_srq *srq = get_qedr_srq(ibsrq);
104 srq_attr->srq_limit = srq->srq_limit;
105 srq_attr->max_wr = qattr->max_srq_wr;
106 srq_attr->max_sge = qattr->max_sge;
111 int qedr_query_device(struct ib_device *ibdev,
112 struct ib_device_attr *attr, struct ib_udata *udata)
114 struct qedr_dev *dev = get_qedr_dev(ibdev);
115 struct qedr_device_attr *qattr = &dev->attr;
117 if (!dev->rdma_ctx) {
119 "qedr_query_device called with invalid params rdma_ctx=%p\n",
124 memset(attr, 0, sizeof(*attr));
126 attr->fw_ver = qattr->fw_ver;
127 attr->sys_image_guid = qattr->sys_image_guid;
128 attr->max_mr_size = qattr->max_mr_size;
129 attr->page_size_cap = qattr->page_size_caps;
130 attr->vendor_id = qattr->vendor_id;
131 attr->vendor_part_id = qattr->vendor_part_id;
132 attr->hw_ver = qattr->hw_ver;
133 attr->max_qp = qattr->max_qp;
134 attr->max_qp_wr = max_t(u32, qattr->max_sqe, qattr->max_rqe);
135 attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
136 IB_DEVICE_RC_RNR_NAK_GEN |
137 IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_MGT_EXTENSIONS;
139 if (!rdma_protocol_iwarp(&dev->ibdev, 1))
140 attr->device_cap_flags |= IB_DEVICE_XRC;
141 attr->max_send_sge = qattr->max_sge;
142 attr->max_recv_sge = qattr->max_sge;
143 attr->max_sge_rd = qattr->max_sge;
144 attr->max_cq = qattr->max_cq;
145 attr->max_cqe = qattr->max_cqe;
146 attr->max_mr = qattr->max_mr;
147 attr->max_mw = qattr->max_mw;
148 attr->max_pd = qattr->max_pd;
149 attr->atomic_cap = dev->atomic_cap;
150 attr->max_qp_init_rd_atom =
151 1 << (fls(qattr->max_qp_req_rd_atomic_resc) - 1);
152 attr->max_qp_rd_atom =
153 min(1 << (fls(qattr->max_qp_resp_rd_atomic_resc) - 1),
154 attr->max_qp_init_rd_atom);
156 attr->max_srq = qattr->max_srq;
157 attr->max_srq_sge = qattr->max_srq_sge;
158 attr->max_srq_wr = qattr->max_srq_wr;
160 attr->local_ca_ack_delay = qattr->dev_ack_delay;
161 attr->max_fast_reg_page_list_len = qattr->max_mr / 8;
162 attr->max_pkeys = qattr->max_pkey;
163 attr->max_ah = qattr->max_ah;
168 static inline void get_link_speed_and_width(int speed, u16 *ib_speed,
173 *ib_speed = IB_SPEED_SDR;
174 *ib_width = IB_WIDTH_1X;
177 *ib_speed = IB_SPEED_QDR;
178 *ib_width = IB_WIDTH_1X;
182 *ib_speed = IB_SPEED_DDR;
183 *ib_width = IB_WIDTH_4X;
187 *ib_speed = IB_SPEED_EDR;
188 *ib_width = IB_WIDTH_1X;
192 *ib_speed = IB_SPEED_QDR;
193 *ib_width = IB_WIDTH_4X;
197 *ib_speed = IB_SPEED_HDR;
198 *ib_width = IB_WIDTH_1X;
202 *ib_speed = IB_SPEED_EDR;
203 *ib_width = IB_WIDTH_4X;
208 *ib_speed = IB_SPEED_SDR;
209 *ib_width = IB_WIDTH_1X;
213 int qedr_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *attr)
215 struct qedr_dev *dev;
216 struct qed_rdma_port *rdma_port;
218 dev = get_qedr_dev(ibdev);
220 if (!dev->rdma_ctx) {
221 DP_ERR(dev, "rdma_ctx is NULL\n");
225 rdma_port = dev->ops->rdma_query_port(dev->rdma_ctx);
227 /* *attr being zeroed by the caller, avoid zeroing it here */
228 if (rdma_port->port_state == QED_RDMA_PORT_UP) {
229 attr->state = IB_PORT_ACTIVE;
230 attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
232 attr->state = IB_PORT_DOWN;
233 attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
235 attr->max_mtu = IB_MTU_4096;
240 attr->ip_gids = true;
241 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
242 attr->active_mtu = iboe_get_mtu(dev->iwarp_max_mtu);
243 attr->gid_tbl_len = 1;
245 attr->active_mtu = iboe_get_mtu(dev->ndev->mtu);
246 attr->gid_tbl_len = QEDR_MAX_SGID;
247 attr->pkey_tbl_len = QEDR_ROCE_PKEY_TABLE_LEN;
249 attr->bad_pkey_cntr = rdma_port->pkey_bad_counter;
250 attr->qkey_viol_cntr = 0;
251 get_link_speed_and_width(rdma_port->link_speed,
252 &attr->active_speed, &attr->active_width);
253 attr->max_msg_sz = rdma_port->max_msg_size;
254 attr->max_vl_num = 4;
259 int qedr_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
261 struct ib_device *ibdev = uctx->device;
263 struct qedr_ucontext *ctx = get_qedr_ucontext(uctx);
264 struct qedr_alloc_ucontext_resp uresp = {};
265 struct qedr_alloc_ucontext_req ureq = {};
266 struct qedr_dev *dev = get_qedr_dev(ibdev);
267 struct qed_rdma_add_user_out_params oparams;
268 struct qedr_user_mmap_entry *entry;
274 rc = ib_copy_from_udata(&ureq, udata,
275 min(sizeof(ureq), udata->inlen));
277 DP_ERR(dev, "Problem copying data from user space\n");
280 ctx->edpm_mode = !!(ureq.context_flags &
281 QEDR_ALLOC_UCTX_EDPM_MODE);
282 ctx->db_rec = !!(ureq.context_flags & QEDR_ALLOC_UCTX_DB_REC);
285 rc = dev->ops->rdma_add_user(dev->rdma_ctx, &oparams);
288 "failed to allocate a DPI for a new RoCE application, rc=%d. To overcome this consider to increase the number of DPIs, increase the doorbell BAR size or just close unnecessary RoCE applications. In order to increase the number of DPIs consult the qedr readme\n",
293 ctx->dpi = oparams.dpi;
294 ctx->dpi_addr = oparams.dpi_addr;
295 ctx->dpi_phys_addr = oparams.dpi_phys_addr;
296 ctx->dpi_size = oparams.dpi_size;
297 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
303 entry->io_address = ctx->dpi_phys_addr;
304 entry->length = ctx->dpi_size;
305 entry->mmap_flag = QEDR_USER_MMAP_IO_WC;
306 entry->dpi = ctx->dpi;
308 rc = rdma_user_mmap_entry_insert(uctx, &entry->rdma_entry,
314 ctx->db_mmap_entry = &entry->rdma_entry;
316 if (!dev->user_dpm_enabled)
318 else if (rdma_protocol_iwarp(&dev->ibdev, 1))
319 uresp.dpm_flags = QEDR_DPM_TYPE_IWARP_LEGACY;
321 uresp.dpm_flags = QEDR_DPM_TYPE_ROCE_ENHANCED |
322 QEDR_DPM_TYPE_ROCE_LEGACY |
323 QEDR_DPM_TYPE_ROCE_EDPM_MODE;
325 if (ureq.context_flags & QEDR_SUPPORT_DPM_SIZES) {
326 uresp.dpm_flags |= QEDR_DPM_SIZES_SET;
327 uresp.ldpm_limit_size = QEDR_LDPM_MAX_SIZE;
328 uresp.edpm_trans_size = QEDR_EDPM_TRANS_SIZE;
329 uresp.edpm_limit_size = QEDR_EDPM_MAX_SIZE;
332 uresp.wids_enabled = 1;
333 uresp.wid_count = oparams.wid_count;
334 uresp.db_pa = rdma_user_mmap_get_offset(ctx->db_mmap_entry);
335 uresp.db_size = ctx->dpi_size;
336 uresp.max_send_wr = dev->attr.max_sqe;
337 uresp.max_recv_wr = dev->attr.max_rqe;
338 uresp.max_srq_wr = dev->attr.max_srq_wr;
339 uresp.sges_per_send_wr = QEDR_MAX_SQE_ELEMENTS_PER_SQE;
340 uresp.sges_per_recv_wr = QEDR_MAX_RQE_ELEMENTS_PER_RQE;
341 uresp.sges_per_srq_wr = dev->attr.max_srq_sge;
342 uresp.max_cqes = QEDR_MAX_CQES;
344 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
350 DP_DEBUG(dev, QEDR_MSG_INIT, "Allocating user context %p\n",
355 if (!ctx->db_mmap_entry)
356 dev->ops->rdma_remove_user(dev->rdma_ctx, ctx->dpi);
358 rdma_user_mmap_entry_remove(ctx->db_mmap_entry);
363 void qedr_dealloc_ucontext(struct ib_ucontext *ibctx)
365 struct qedr_ucontext *uctx = get_qedr_ucontext(ibctx);
367 DP_DEBUG(uctx->dev, QEDR_MSG_INIT, "Deallocating user context %p\n",
370 rdma_user_mmap_entry_remove(uctx->db_mmap_entry);
373 void qedr_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
375 struct qedr_user_mmap_entry *entry = get_qedr_mmap_entry(rdma_entry);
376 struct qedr_dev *dev = entry->dev;
378 if (entry->mmap_flag == QEDR_USER_MMAP_PHYS_PAGE)
379 free_page((unsigned long)entry->address);
380 else if (entry->mmap_flag == QEDR_USER_MMAP_IO_WC)
381 dev->ops->rdma_remove_user(dev->rdma_ctx, entry->dpi);
386 int qedr_mmap(struct ib_ucontext *ucontext, struct vm_area_struct *vma)
388 struct ib_device *dev = ucontext->device;
389 size_t length = vma->vm_end - vma->vm_start;
390 struct rdma_user_mmap_entry *rdma_entry;
391 struct qedr_user_mmap_entry *entry;
396 "start %#lx, end %#lx, length = %#zx, pgoff = %#lx\n",
397 vma->vm_start, vma->vm_end, length, vma->vm_pgoff);
399 rdma_entry = rdma_user_mmap_entry_get(ucontext, vma);
401 ibdev_dbg(dev, "pgoff[%#lx] does not have valid entry\n",
405 entry = get_qedr_mmap_entry(rdma_entry);
407 "Mapping address[%#llx], length[%#zx], mmap_flag[%d]\n",
408 entry->io_address, length, entry->mmap_flag);
410 switch (entry->mmap_flag) {
411 case QEDR_USER_MMAP_IO_WC:
412 pfn = entry->io_address >> PAGE_SHIFT;
413 rc = rdma_user_mmap_io(ucontext, vma, pfn, length,
414 pgprot_writecombine(vma->vm_page_prot),
417 case QEDR_USER_MMAP_PHYS_PAGE:
418 rc = vm_insert_page(vma, vma->vm_start,
419 virt_to_page(entry->address));
427 "Couldn't mmap address[%#llx] length[%#zx] mmap_flag[%d] err[%d]\n",
428 entry->io_address, length, entry->mmap_flag, rc);
430 rdma_user_mmap_entry_put(rdma_entry);
434 int qedr_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
436 struct ib_device *ibdev = ibpd->device;
437 struct qedr_dev *dev = get_qedr_dev(ibdev);
438 struct qedr_pd *pd = get_qedr_pd(ibpd);
442 DP_DEBUG(dev, QEDR_MSG_INIT, "Function called from: %s\n",
443 udata ? "User Lib" : "Kernel");
445 if (!dev->rdma_ctx) {
446 DP_ERR(dev, "invalid RDMA context\n");
450 rc = dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
457 struct qedr_alloc_pd_uresp uresp = {
460 struct qedr_ucontext *context = rdma_udata_to_drv_context(
461 udata, struct qedr_ucontext, ibucontext);
463 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
465 DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id);
466 dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id);
477 int qedr_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
479 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
480 struct qedr_pd *pd = get_qedr_pd(ibpd);
482 DP_DEBUG(dev, QEDR_MSG_INIT, "Deallocating PD %d\n", pd->pd_id);
483 dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd->pd_id);
488 int qedr_alloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata)
490 struct qedr_dev *dev = get_qedr_dev(ibxrcd->device);
491 struct qedr_xrcd *xrcd = get_qedr_xrcd(ibxrcd);
493 return dev->ops->rdma_alloc_xrcd(dev->rdma_ctx, &xrcd->xrcd_id);
496 int qedr_dealloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata)
498 struct qedr_dev *dev = get_qedr_dev(ibxrcd->device);
499 u16 xrcd_id = get_qedr_xrcd(ibxrcd)->xrcd_id;
501 dev->ops->rdma_dealloc_xrcd(dev->rdma_ctx, xrcd_id);
504 static void qedr_free_pbl(struct qedr_dev *dev,
505 struct qedr_pbl_info *pbl_info, struct qedr_pbl *pbl)
507 struct pci_dev *pdev = dev->pdev;
510 for (i = 0; i < pbl_info->num_pbls; i++) {
513 dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
514 pbl[i].va, pbl[i].pa);
520 #define MIN_FW_PBL_PAGE_SIZE (4 * 1024)
521 #define MAX_FW_PBL_PAGE_SIZE (64 * 1024)
523 #define NUM_PBES_ON_PAGE(_page_size) (_page_size / sizeof(u64))
524 #define MAX_PBES_ON_PAGE NUM_PBES_ON_PAGE(MAX_FW_PBL_PAGE_SIZE)
525 #define MAX_PBES_TWO_LAYER (MAX_PBES_ON_PAGE * MAX_PBES_ON_PAGE)
527 static struct qedr_pbl *qedr_alloc_pbl_tbl(struct qedr_dev *dev,
528 struct qedr_pbl_info *pbl_info,
531 struct pci_dev *pdev = dev->pdev;
532 struct qedr_pbl *pbl_table;
533 dma_addr_t *pbl_main_tbl;
538 pbl_table = kcalloc(pbl_info->num_pbls, sizeof(*pbl_table), flags);
540 return ERR_PTR(-ENOMEM);
542 for (i = 0; i < pbl_info->num_pbls; i++) {
543 va = dma_alloc_coherent(&pdev->dev, pbl_info->pbl_size, &pa,
548 pbl_table[i].va = va;
549 pbl_table[i].pa = pa;
552 /* Two-Layer PBLs, if we have more than one pbl we need to initialize
553 * the first one with physical pointers to all of the rest
555 pbl_main_tbl = (dma_addr_t *)pbl_table[0].va;
556 for (i = 0; i < pbl_info->num_pbls - 1; i++)
557 pbl_main_tbl[i] = pbl_table[i + 1].pa;
562 for (i--; i >= 0; i--)
563 dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
564 pbl_table[i].va, pbl_table[i].pa);
566 qedr_free_pbl(dev, pbl_info, pbl_table);
568 return ERR_PTR(-ENOMEM);
571 static int qedr_prepare_pbl_tbl(struct qedr_dev *dev,
572 struct qedr_pbl_info *pbl_info,
573 u32 num_pbes, int two_layer_capable)
579 if ((num_pbes > MAX_PBES_ON_PAGE) && two_layer_capable) {
580 if (num_pbes > MAX_PBES_TWO_LAYER) {
581 DP_ERR(dev, "prepare pbl table: too many pages %d\n",
586 /* calculate required pbl page size */
587 pbl_size = MIN_FW_PBL_PAGE_SIZE;
588 pbl_capacity = NUM_PBES_ON_PAGE(pbl_size) *
589 NUM_PBES_ON_PAGE(pbl_size);
591 while (pbl_capacity < num_pbes) {
593 pbl_capacity = pbl_size / sizeof(u64);
594 pbl_capacity = pbl_capacity * pbl_capacity;
597 num_pbls = DIV_ROUND_UP(num_pbes, NUM_PBES_ON_PAGE(pbl_size));
598 num_pbls++; /* One for the layer0 ( points to the pbls) */
599 pbl_info->two_layered = true;
601 /* One layered PBL */
603 pbl_size = max_t(u32, MIN_FW_PBL_PAGE_SIZE,
604 roundup_pow_of_two((num_pbes * sizeof(u64))));
605 pbl_info->two_layered = false;
608 pbl_info->num_pbls = num_pbls;
609 pbl_info->pbl_size = pbl_size;
610 pbl_info->num_pbes = num_pbes;
612 DP_DEBUG(dev, QEDR_MSG_MR,
613 "prepare pbl table: num_pbes=%d, num_pbls=%d, pbl_size=%d\n",
614 pbl_info->num_pbes, pbl_info->num_pbls, pbl_info->pbl_size);
619 static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
620 struct qedr_pbl *pbl,
621 struct qedr_pbl_info *pbl_info, u32 pg_shift)
623 int pbe_cnt, total_num_pbes = 0;
624 struct qedr_pbl *pbl_tbl;
625 struct ib_block_iter biter;
628 if (!pbl_info->num_pbes)
631 /* If we have a two layered pbl, the first pbl points to the rest
632 * of the pbls and the first entry lays on the second pbl in the table
634 if (pbl_info->two_layered)
639 pbe = (struct regpair *)pbl_tbl->va;
641 DP_ERR(dev, "cannot populate PBL due to a NULL PBE\n");
647 rdma_umem_for_each_dma_block (umem, &biter, BIT(pg_shift)) {
648 u64 pg_addr = rdma_block_iter_dma_address(&biter);
650 pbe->lo = cpu_to_le32(pg_addr);
651 pbe->hi = cpu_to_le32(upper_32_bits(pg_addr));
657 if (total_num_pbes == pbl_info->num_pbes)
660 /* If the given pbl is full storing the pbes, move to next pbl.
662 if (pbe_cnt == (pbl_info->pbl_size / sizeof(u64))) {
664 pbe = (struct regpair *)pbl_tbl->va;
670 static int qedr_db_recovery_add(struct qedr_dev *dev,
671 void __iomem *db_addr,
673 enum qed_db_rec_width db_width,
674 enum qed_db_rec_space db_space)
677 DP_DEBUG(dev, QEDR_MSG_INIT, "avoiding db rec since old lib\n");
681 return dev->ops->common->db_recovery_add(dev->cdev, db_addr, db_data,
685 static void qedr_db_recovery_del(struct qedr_dev *dev,
686 void __iomem *db_addr,
690 DP_DEBUG(dev, QEDR_MSG_INIT, "avoiding db rec since old lib\n");
694 /* Ignore return code as there is not much we can do about it. Error
695 * log will be printed inside.
697 dev->ops->common->db_recovery_del(dev->cdev, db_addr, db_data);
700 static int qedr_copy_cq_uresp(struct qedr_dev *dev,
701 struct qedr_cq *cq, struct ib_udata *udata,
704 struct qedr_create_cq_uresp uresp;
707 memset(&uresp, 0, sizeof(uresp));
709 uresp.db_offset = db_offset;
710 uresp.icid = cq->icid;
711 if (cq->q.db_mmap_entry)
713 rdma_user_mmap_get_offset(cq->q.db_mmap_entry);
715 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
717 DP_ERR(dev, "copy error cqid=0x%x.\n", cq->icid);
722 static void consume_cqe(struct qedr_cq *cq)
724 if (cq->latest_cqe == cq->toggle_cqe)
725 cq->pbl_toggle ^= RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
727 cq->latest_cqe = qed_chain_consume(&cq->pbl);
730 static inline int qedr_align_cq_entries(int entries)
732 u64 size, aligned_size;
734 /* We allocate an extra entry that we don't report to the FW. */
735 size = (entries + 1) * QEDR_CQE_SIZE;
736 aligned_size = ALIGN(size, PAGE_SIZE);
738 return aligned_size / QEDR_CQE_SIZE;
741 static int qedr_init_user_db_rec(struct ib_udata *udata,
742 struct qedr_dev *dev, struct qedr_userq *q,
743 bool requires_db_rec)
745 struct qedr_ucontext *uctx =
746 rdma_udata_to_drv_context(udata, struct qedr_ucontext,
748 struct qedr_user_mmap_entry *entry;
751 /* Aborting for non doorbell userqueue (SRQ) or non-supporting lib */
752 if (requires_db_rec == 0 || !uctx->db_rec)
755 /* Allocate a page for doorbell recovery, add to mmap */
756 q->db_rec_data = (void *)get_zeroed_page(GFP_USER);
757 if (!q->db_rec_data) {
758 DP_ERR(dev, "get_zeroed_page failed\n");
762 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
764 goto err_free_db_data;
766 entry->address = q->db_rec_data;
767 entry->length = PAGE_SIZE;
768 entry->mmap_flag = QEDR_USER_MMAP_PHYS_PAGE;
769 rc = rdma_user_mmap_entry_insert(&uctx->ibucontext,
775 q->db_mmap_entry = &entry->rdma_entry;
783 free_page((unsigned long)q->db_rec_data);
784 q->db_rec_data = NULL;
788 static inline int qedr_init_user_queue(struct ib_udata *udata,
789 struct qedr_dev *dev,
790 struct qedr_userq *q, u64 buf_addr,
791 size_t buf_len, bool requires_db_rec,
798 q->buf_addr = buf_addr;
799 q->buf_len = buf_len;
800 q->umem = ib_umem_get(&dev->ibdev, q->buf_addr, q->buf_len, access);
801 if (IS_ERR(q->umem)) {
802 DP_ERR(dev, "create user queue: failed ib_umem_get, got %ld\n",
804 return PTR_ERR(q->umem);
807 fw_pages = ib_umem_num_dma_blocks(q->umem, 1 << FW_PAGE_SHIFT);
808 rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, fw_pages, 0);
812 if (alloc_and_init) {
813 q->pbl_tbl = qedr_alloc_pbl_tbl(dev, &q->pbl_info, GFP_KERNEL);
814 if (IS_ERR(q->pbl_tbl)) {
815 rc = PTR_ERR(q->pbl_tbl);
818 qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info,
821 q->pbl_tbl = kzalloc(sizeof(*q->pbl_tbl), GFP_KERNEL);
828 /* mmap the user address used to store doorbell data for recovery */
829 return qedr_init_user_db_rec(udata, dev, q, requires_db_rec);
832 ib_umem_release(q->umem);
838 static inline void qedr_init_cq_params(struct qedr_cq *cq,
839 struct qedr_ucontext *ctx,
840 struct qedr_dev *dev, int vector,
841 int chain_entries, int page_cnt,
843 struct qed_rdma_create_cq_in_params
846 memset(params, 0, sizeof(*params));
847 params->cq_handle_hi = upper_32_bits((uintptr_t)cq);
848 params->cq_handle_lo = lower_32_bits((uintptr_t)cq);
849 params->cnq_id = vector;
850 params->cq_size = chain_entries - 1;
851 params->dpi = (ctx) ? ctx->dpi : dev->dpi;
852 params->pbl_num_pages = page_cnt;
853 params->pbl_ptr = pbl_ptr;
854 params->pbl_two_level = 0;
857 static void doorbell_cq(struct qedr_cq *cq, u32 cons, u8 flags)
859 cq->db.data.agg_flags = flags;
860 cq->db.data.value = cpu_to_le32(cons);
861 writeq(cq->db.raw, cq->db_addr);
864 int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
866 struct qedr_cq *cq = get_qedr_cq(ibcq);
867 unsigned long sflags;
868 struct qedr_dev *dev;
870 dev = get_qedr_dev(ibcq->device);
874 "warning: arm was invoked after destroy for cq %p (icid=%d)\n",
880 if (cq->cq_type == QEDR_CQ_TYPE_GSI)
883 spin_lock_irqsave(&cq->cq_lock, sflags);
887 if (flags & IB_CQ_SOLICITED)
888 cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD;
890 if (flags & IB_CQ_NEXT_COMP)
891 cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_CF_CMD;
893 doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
895 spin_unlock_irqrestore(&cq->cq_lock, sflags);
900 int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
901 struct ib_udata *udata)
903 struct ib_device *ibdev = ibcq->device;
904 struct qedr_ucontext *ctx = rdma_udata_to_drv_context(
905 udata, struct qedr_ucontext, ibucontext);
906 struct qed_rdma_destroy_cq_out_params destroy_oparams;
907 struct qed_rdma_destroy_cq_in_params destroy_iparams;
908 struct qed_chain_init_params chain_params = {
909 .mode = QED_CHAIN_MODE_PBL,
910 .intended_use = QED_CHAIN_USE_TO_CONSUME,
911 .cnt_type = QED_CHAIN_CNT_TYPE_U32,
912 .elem_size = sizeof(union rdma_cqe),
914 struct qedr_dev *dev = get_qedr_dev(ibdev);
915 struct qed_rdma_create_cq_in_params params;
916 struct qedr_create_cq_ureq ureq = {};
917 int vector = attr->comp_vector;
918 int entries = attr->cqe;
919 struct qedr_cq *cq = get_qedr_cq(ibcq);
927 DP_DEBUG(dev, QEDR_MSG_INIT,
928 "create_cq: called from %s. entries=%d, vector=%d\n",
929 udata ? "User Lib" : "Kernel", entries, vector);
931 if (entries > QEDR_MAX_CQES) {
933 "create cq: the number of entries %d is too high. Must be equal or below %d.\n",
934 entries, QEDR_MAX_CQES);
938 chain_entries = qedr_align_cq_entries(entries);
939 chain_entries = min_t(int, chain_entries, QEDR_MAX_CQES);
940 chain_params.num_elems = chain_entries;
942 /* calc db offset. user will add DPI base, kernel will add db addr */
943 db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
946 if (ib_copy_from_udata(&ureq, udata, min(sizeof(ureq),
949 "create cq: problem copying data from user space\n");
955 "create cq: cannot create a cq with 0 entries\n");
959 cq->cq_type = QEDR_CQ_TYPE_USER;
961 rc = qedr_init_user_queue(udata, dev, &cq->q, ureq.addr,
962 ureq.len, true, IB_ACCESS_LOCAL_WRITE,
967 pbl_ptr = cq->q.pbl_tbl->pa;
968 page_cnt = cq->q.pbl_info.num_pbes;
970 cq->ibcq.cqe = chain_entries;
971 cq->q.db_addr = ctx->dpi_addr + db_offset;
973 cq->cq_type = QEDR_CQ_TYPE_KERNEL;
975 rc = dev->ops->common->chain_alloc(dev->cdev, &cq->pbl,
980 page_cnt = qed_chain_get_page_cnt(&cq->pbl);
981 pbl_ptr = qed_chain_get_pbl_phys(&cq->pbl);
982 cq->ibcq.cqe = cq->pbl.capacity;
985 qedr_init_cq_params(cq, ctx, dev, vector, chain_entries, page_cnt,
988 rc = dev->ops->rdma_create_cq(dev->rdma_ctx, ¶ms, &icid);
993 cq->sig = QEDR_CQ_MAGIC_NUMBER;
994 spin_lock_init(&cq->cq_lock);
997 rc = qedr_copy_cq_uresp(dev, cq, udata, db_offset);
1001 rc = qedr_db_recovery_add(dev, cq->q.db_addr,
1002 &cq->q.db_rec_data->db_data,
1009 /* Generate doorbell address. */
1010 cq->db.data.icid = cq->icid;
1011 cq->db_addr = dev->db_addr + db_offset;
1012 cq->db.data.params = DB_AGG_CMD_MAX <<
1013 RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT;
1015 /* point to the very last element, passing it we will toggle */
1016 cq->toggle_cqe = qed_chain_get_last_elem(&cq->pbl);
1017 cq->pbl_toggle = RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
1018 cq->latest_cqe = NULL;
1020 cq->cq_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
1022 rc = qedr_db_recovery_add(dev, cq->db_addr, &cq->db.data,
1023 DB_REC_WIDTH_64B, DB_REC_KERNEL);
1028 DP_DEBUG(dev, QEDR_MSG_CQ,
1029 "create cq: icid=0x%0x, addr=%p, size(entries)=0x%0x\n",
1030 cq->icid, cq, params.cq_size);
1035 destroy_iparams.icid = cq->icid;
1036 dev->ops->rdma_destroy_cq(dev->rdma_ctx, &destroy_iparams,
1040 qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
1041 ib_umem_release(cq->q.umem);
1042 if (cq->q.db_mmap_entry)
1043 rdma_user_mmap_entry_remove(cq->q.db_mmap_entry);
1045 dev->ops->common->chain_free(dev->cdev, &cq->pbl);
1051 int qedr_resize_cq(struct ib_cq *ibcq, int new_cnt, struct ib_udata *udata)
1053 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
1054 struct qedr_cq *cq = get_qedr_cq(ibcq);
1056 DP_ERR(dev, "cq %p RESIZE NOT SUPPORTED\n", cq);
1061 #define QEDR_DESTROY_CQ_MAX_ITERATIONS (10)
1062 #define QEDR_DESTROY_CQ_ITER_DURATION (10)
1064 int qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
1066 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
1067 struct qed_rdma_destroy_cq_out_params oparams;
1068 struct qed_rdma_destroy_cq_in_params iparams;
1069 struct qedr_cq *cq = get_qedr_cq(ibcq);
1072 DP_DEBUG(dev, QEDR_MSG_CQ, "destroy cq %p (icid=%d)\n", cq, cq->icid);
1076 /* GSIs CQs are handled by driver, so they don't exist in the FW */
1077 if (cq->cq_type == QEDR_CQ_TYPE_GSI) {
1078 qedr_db_recovery_del(dev, cq->db_addr, &cq->db.data);
1082 iparams.icid = cq->icid;
1083 dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
1084 dev->ops->common->chain_free(dev->cdev, &cq->pbl);
1087 qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
1088 ib_umem_release(cq->q.umem);
1090 if (cq->q.db_rec_data) {
1091 qedr_db_recovery_del(dev, cq->q.db_addr,
1092 &cq->q.db_rec_data->db_data);
1093 rdma_user_mmap_entry_remove(cq->q.db_mmap_entry);
1096 qedr_db_recovery_del(dev, cq->db_addr, &cq->db.data);
1099 /* We don't want the IRQ handler to handle a non-existing CQ so we
1100 * wait until all CNQ interrupts, if any, are received. This will always
1101 * happen and will always happen very fast. If not, then a serious error
1102 * has occured. That is why we can use a long delay.
1103 * We spin for a short time so we don’t lose time on context switching
1104 * in case all the completions are handled in that span. Otherwise
1105 * we sleep for a while and check again. Since the CNQ may be
1106 * associated with (only) the current CPU we use msleep to allow the
1107 * current CPU to be freed.
1108 * The CNQ notification is increased in qedr_irq_handler().
1110 iter = QEDR_DESTROY_CQ_MAX_ITERATIONS;
1111 while (oparams.num_cq_notif != READ_ONCE(cq->cnq_notif) && iter) {
1112 udelay(QEDR_DESTROY_CQ_ITER_DURATION);
1116 iter = QEDR_DESTROY_CQ_MAX_ITERATIONS;
1117 while (oparams.num_cq_notif != READ_ONCE(cq->cnq_notif) && iter) {
1118 msleep(QEDR_DESTROY_CQ_ITER_DURATION);
1122 /* Note that we don't need to have explicit code to wait for the
1123 * completion of the event handler because it is invoked from the EQ.
1124 * Since the destroy CQ ramrod has also been received on the EQ we can
1125 * be certain that there's no event handler in process.
1130 static inline int get_gid_info_from_table(struct ib_qp *ibqp,
1131 struct ib_qp_attr *attr,
1133 struct qed_rdma_modify_qp_in_params
1136 const struct ib_gid_attr *gid_attr;
1137 enum rdma_network_type nw_type;
1138 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
1143 gid_attr = grh->sgid_attr;
1144 ret = rdma_read_gid_l2_fields(gid_attr, &qp_params->vlan_id, NULL);
1148 nw_type = rdma_gid_attr_network_type(gid_attr);
1150 case RDMA_NETWORK_IPV6:
1151 memcpy(&qp_params->sgid.bytes[0], &gid_attr->gid.raw[0],
1152 sizeof(qp_params->sgid));
1153 memcpy(&qp_params->dgid.bytes[0],
1155 sizeof(qp_params->dgid));
1156 qp_params->roce_mode = ROCE_V2_IPV6;
1157 SET_FIELD(qp_params->modify_flags,
1158 QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
1160 case RDMA_NETWORK_ROCE_V1:
1161 memcpy(&qp_params->sgid.bytes[0], &gid_attr->gid.raw[0],
1162 sizeof(qp_params->sgid));
1163 memcpy(&qp_params->dgid.bytes[0],
1165 sizeof(qp_params->dgid));
1166 qp_params->roce_mode = ROCE_V1;
1168 case RDMA_NETWORK_IPV4:
1169 memset(&qp_params->sgid, 0, sizeof(qp_params->sgid));
1170 memset(&qp_params->dgid, 0, sizeof(qp_params->dgid));
1171 ipv4_addr = qedr_get_ipv4_from_gid(gid_attr->gid.raw);
1172 qp_params->sgid.ipv4_addr = ipv4_addr;
1174 qedr_get_ipv4_from_gid(grh->dgid.raw);
1175 qp_params->dgid.ipv4_addr = ipv4_addr;
1176 SET_FIELD(qp_params->modify_flags,
1177 QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
1178 qp_params->roce_mode = ROCE_V2_IPV4;
1184 for (i = 0; i < 4; i++) {
1185 qp_params->sgid.dwords[i] = ntohl(qp_params->sgid.dwords[i]);
1186 qp_params->dgid.dwords[i] = ntohl(qp_params->dgid.dwords[i]);
1189 if (qp_params->vlan_id >= VLAN_CFI_MASK)
1190 qp_params->vlan_id = 0;
1195 static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev,
1196 struct ib_qp_init_attr *attrs,
1197 struct ib_udata *udata)
1199 struct qedr_device_attr *qattr = &dev->attr;
1201 /* QP0... attrs->qp_type == IB_QPT_GSI */
1202 if (attrs->qp_type != IB_QPT_RC &&
1203 attrs->qp_type != IB_QPT_GSI &&
1204 attrs->qp_type != IB_QPT_XRC_INI &&
1205 attrs->qp_type != IB_QPT_XRC_TGT) {
1206 DP_DEBUG(dev, QEDR_MSG_QP,
1207 "create qp: unsupported qp type=0x%x requested\n",
1212 if (attrs->cap.max_send_wr > qattr->max_sqe) {
1214 "create qp: cannot create a SQ with %d elements (max_send_wr=0x%x)\n",
1215 attrs->cap.max_send_wr, qattr->max_sqe);
1219 if (attrs->cap.max_inline_data > qattr->max_inline) {
1221 "create qp: unsupported inline data size=0x%x requested (max_inline=0x%x)\n",
1222 attrs->cap.max_inline_data, qattr->max_inline);
1226 if (attrs->cap.max_send_sge > qattr->max_sge) {
1228 "create qp: unsupported send_sge=0x%x requested (max_send_sge=0x%x)\n",
1229 attrs->cap.max_send_sge, qattr->max_sge);
1233 if (attrs->cap.max_recv_sge > qattr->max_sge) {
1235 "create qp: unsupported recv_sge=0x%x requested (max_recv_sge=0x%x)\n",
1236 attrs->cap.max_recv_sge, qattr->max_sge);
1240 /* verify consumer QPs are not trying to use GSI QP's CQ.
1241 * TGT QP isn't associated with RQ/SQ
1243 if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created) &&
1244 (attrs->qp_type != IB_QPT_XRC_TGT) &&
1245 (attrs->qp_type != IB_QPT_XRC_INI)) {
1246 struct qedr_cq *send_cq = get_qedr_cq(attrs->send_cq);
1247 struct qedr_cq *recv_cq = get_qedr_cq(attrs->recv_cq);
1249 if ((send_cq->cq_type == QEDR_CQ_TYPE_GSI) ||
1250 (recv_cq->cq_type == QEDR_CQ_TYPE_GSI)) {
1252 "create qp: consumer QP cannot use GSI CQs.\n");
1260 static int qedr_copy_srq_uresp(struct qedr_dev *dev,
1261 struct qedr_srq *srq, struct ib_udata *udata)
1263 struct qedr_create_srq_uresp uresp = {};
1266 uresp.srq_id = srq->srq_id;
1268 rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1270 DP_ERR(dev, "create srq: problem copying data to user space\n");
1275 static void qedr_copy_rq_uresp(struct qedr_dev *dev,
1276 struct qedr_create_qp_uresp *uresp,
1279 /* iWARP requires two doorbells per RQ. */
1280 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
1281 uresp->rq_db_offset =
1282 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD);
1283 uresp->rq_db2_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS);
1285 uresp->rq_db_offset =
1286 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
1289 uresp->rq_icid = qp->icid;
1290 if (qp->urq.db_mmap_entry)
1291 uresp->rq_db_rec_addr =
1292 rdma_user_mmap_get_offset(qp->urq.db_mmap_entry);
1295 static void qedr_copy_sq_uresp(struct qedr_dev *dev,
1296 struct qedr_create_qp_uresp *uresp,
1299 uresp->sq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1301 /* iWARP uses the same cid for rq and sq */
1302 if (rdma_protocol_iwarp(&dev->ibdev, 1))
1303 uresp->sq_icid = qp->icid;
1305 uresp->sq_icid = qp->icid + 1;
1307 if (qp->usq.db_mmap_entry)
1308 uresp->sq_db_rec_addr =
1309 rdma_user_mmap_get_offset(qp->usq.db_mmap_entry);
1312 static int qedr_copy_qp_uresp(struct qedr_dev *dev,
1313 struct qedr_qp *qp, struct ib_udata *udata,
1314 struct qedr_create_qp_uresp *uresp)
1318 memset(uresp, 0, sizeof(*uresp));
1320 if (qedr_qp_has_sq(qp))
1321 qedr_copy_sq_uresp(dev, uresp, qp);
1323 if (qedr_qp_has_rq(qp))
1324 qedr_copy_rq_uresp(dev, uresp, qp);
1326 uresp->atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE;
1327 uresp->qp_id = qp->qp_id;
1329 rc = qedr_ib_copy_to_udata(udata, uresp, sizeof(*uresp));
1332 "create qp: failed a copy to user space with qp icid=0x%x.\n",
1338 static void qedr_set_common_qp_params(struct qedr_dev *dev,
1341 struct ib_qp_init_attr *attrs)
1343 spin_lock_init(&qp->q_lock);
1344 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
1345 kref_init(&qp->refcnt);
1346 init_completion(&qp->iwarp_cm_comp);
1350 qp->qp_type = attrs->qp_type;
1351 qp->max_inline_data = attrs->cap.max_inline_data;
1352 qp->state = QED_ROCE_QP_STATE_RESET;
1353 qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
1355 if (qedr_qp_has_sq(qp)) {
1356 qp->sq.max_sges = attrs->cap.max_send_sge;
1357 qp->sq_cq = get_qedr_cq(attrs->send_cq);
1358 DP_DEBUG(dev, QEDR_MSG_QP,
1359 "SQ params:\tsq_max_sges = %d, sq_cq_id = %d\n",
1360 qp->sq.max_sges, qp->sq_cq->icid);
1364 qp->srq = get_qedr_srq(attrs->srq);
1366 if (qedr_qp_has_rq(qp)) {
1367 qp->rq_cq = get_qedr_cq(attrs->recv_cq);
1368 qp->rq.max_sges = attrs->cap.max_recv_sge;
1369 DP_DEBUG(dev, QEDR_MSG_QP,
1370 "RQ params:\trq_max_sges = %d, rq_cq_id = %d\n",
1371 qp->rq.max_sges, qp->rq_cq->icid);
1374 DP_DEBUG(dev, QEDR_MSG_QP,
1375 "QP params:\tpd = %d, qp_type = %d, max_inline_data = %d, state = %d, signaled = %d, use_srq=%d\n",
1376 pd->pd_id, qp->qp_type, qp->max_inline_data,
1377 qp->state, qp->signaled, (attrs->srq) ? 1 : 0);
1378 DP_DEBUG(dev, QEDR_MSG_QP,
1379 "SQ params:\tsq_max_sges = %d, sq_cq_id = %d\n",
1380 qp->sq.max_sges, qp->sq_cq->icid);
1383 static int qedr_set_roce_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
1387 if (qedr_qp_has_sq(qp)) {
1388 qp->sq.db = dev->db_addr +
1389 DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1390 qp->sq.db_data.data.icid = qp->icid + 1;
1391 rc = qedr_db_recovery_add(dev, qp->sq.db, &qp->sq.db_data,
1392 DB_REC_WIDTH_32B, DB_REC_KERNEL);
1397 if (qedr_qp_has_rq(qp)) {
1398 qp->rq.db = dev->db_addr +
1399 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
1400 qp->rq.db_data.data.icid = qp->icid;
1401 rc = qedr_db_recovery_add(dev, qp->rq.db, &qp->rq.db_data,
1402 DB_REC_WIDTH_32B, DB_REC_KERNEL);
1403 if (rc && qedr_qp_has_sq(qp))
1404 qedr_db_recovery_del(dev, qp->sq.db, &qp->sq.db_data);
1410 static int qedr_check_srq_params(struct qedr_dev *dev,
1411 struct ib_srq_init_attr *attrs,
1412 struct ib_udata *udata)
1414 struct qedr_device_attr *qattr = &dev->attr;
1416 if (attrs->attr.max_wr > qattr->max_srq_wr) {
1418 "create srq: unsupported srq_wr=0x%x requested (max_srq_wr=0x%x)\n",
1419 attrs->attr.max_wr, qattr->max_srq_wr);
1423 if (attrs->attr.max_sge > qattr->max_sge) {
1425 "create srq: unsupported sge=0x%x requested (max_srq_sge=0x%x)\n",
1426 attrs->attr.max_sge, qattr->max_sge);
1429 if (!udata && attrs->srq_type == IB_SRQT_XRC) {
1430 DP_ERR(dev, "XRC SRQs are not supported in kernel-space\n");
1437 static void qedr_free_srq_user_params(struct qedr_srq *srq)
1439 qedr_free_pbl(srq->dev, &srq->usrq.pbl_info, srq->usrq.pbl_tbl);
1440 ib_umem_release(srq->usrq.umem);
1441 ib_umem_release(srq->prod_umem);
1444 static void qedr_free_srq_kernel_params(struct qedr_srq *srq)
1446 struct qedr_srq_hwq_info *hw_srq = &srq->hw_srq;
1447 struct qedr_dev *dev = srq->dev;
1449 dev->ops->common->chain_free(dev->cdev, &hw_srq->pbl);
1451 dma_free_coherent(&dev->pdev->dev, sizeof(struct rdma_srq_producers),
1452 hw_srq->virt_prod_pair_addr,
1453 hw_srq->phy_prod_pair_addr);
1456 static int qedr_init_srq_user_params(struct ib_udata *udata,
1457 struct qedr_srq *srq,
1458 struct qedr_create_srq_ureq *ureq,
1461 struct scatterlist *sg;
1464 rc = qedr_init_user_queue(udata, srq->dev, &srq->usrq, ureq->srq_addr,
1465 ureq->srq_len, false, access, 1);
1469 srq->prod_umem = ib_umem_get(srq->ibsrq.device, ureq->prod_pair_addr,
1470 sizeof(struct rdma_srq_producers), access);
1471 if (IS_ERR(srq->prod_umem)) {
1472 qedr_free_pbl(srq->dev, &srq->usrq.pbl_info, srq->usrq.pbl_tbl);
1473 ib_umem_release(srq->usrq.umem);
1475 "create srq: failed ib_umem_get for producer, got %ld\n",
1476 PTR_ERR(srq->prod_umem));
1477 return PTR_ERR(srq->prod_umem);
1480 sg = srq->prod_umem->sg_head.sgl;
1481 srq->hw_srq.phy_prod_pair_addr = sg_dma_address(sg);
1486 static int qedr_alloc_srq_kernel_params(struct qedr_srq *srq,
1487 struct qedr_dev *dev,
1488 struct ib_srq_init_attr *init_attr)
1490 struct qedr_srq_hwq_info *hw_srq = &srq->hw_srq;
1491 struct qed_chain_init_params params = {
1492 .mode = QED_CHAIN_MODE_PBL,
1493 .intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1494 .cnt_type = QED_CHAIN_CNT_TYPE_U32,
1495 .elem_size = QEDR_SRQ_WQE_ELEM_SIZE,
1497 dma_addr_t phy_prod_pair_addr;
1502 va = dma_alloc_coherent(&dev->pdev->dev,
1503 sizeof(struct rdma_srq_producers),
1504 &phy_prod_pair_addr, GFP_KERNEL);
1507 "create srq: failed to allocate dma memory for producer\n");
1511 hw_srq->phy_prod_pair_addr = phy_prod_pair_addr;
1512 hw_srq->virt_prod_pair_addr = va;
1514 num_elems = init_attr->attr.max_wr * RDMA_MAX_SRQ_WQE_SIZE;
1515 params.num_elems = num_elems;
1517 rc = dev->ops->common->chain_alloc(dev->cdev, &hw_srq->pbl, ¶ms);
1521 hw_srq->num_elems = num_elems;
1526 dma_free_coherent(&dev->pdev->dev, sizeof(struct rdma_srq_producers),
1527 va, phy_prod_pair_addr);
1531 int qedr_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr,
1532 struct ib_udata *udata)
1534 struct qed_rdma_destroy_srq_in_params destroy_in_params;
1535 struct qed_rdma_create_srq_in_params in_params = {};
1536 struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
1537 struct qed_rdma_create_srq_out_params out_params;
1538 struct qedr_pd *pd = get_qedr_pd(ibsrq->pd);
1539 struct qedr_create_srq_ureq ureq = {};
1540 u64 pbl_base_addr, phy_prod_pair_addr;
1541 struct qedr_srq_hwq_info *hw_srq;
1542 u32 page_cnt, page_size;
1543 struct qedr_srq *srq = get_qedr_srq(ibsrq);
1546 DP_DEBUG(dev, QEDR_MSG_QP,
1547 "create SRQ called from %s (pd %p)\n",
1548 (udata) ? "User lib" : "kernel", pd);
1550 rc = qedr_check_srq_params(dev, init_attr, udata);
1555 srq->is_xrc = (init_attr->srq_type == IB_SRQT_XRC);
1556 hw_srq = &srq->hw_srq;
1557 spin_lock_init(&srq->lock);
1559 hw_srq->max_wr = init_attr->attr.max_wr;
1560 hw_srq->max_sges = init_attr->attr.max_sge;
1563 if (ib_copy_from_udata(&ureq, udata, min(sizeof(ureq),
1566 "create srq: problem copying data from user space\n");
1570 rc = qedr_init_srq_user_params(udata, srq, &ureq, 0);
1574 page_cnt = srq->usrq.pbl_info.num_pbes;
1575 pbl_base_addr = srq->usrq.pbl_tbl->pa;
1576 phy_prod_pair_addr = hw_srq->phy_prod_pair_addr;
1577 page_size = PAGE_SIZE;
1579 struct qed_chain *pbl;
1581 rc = qedr_alloc_srq_kernel_params(srq, dev, init_attr);
1586 page_cnt = qed_chain_get_page_cnt(pbl);
1587 pbl_base_addr = qed_chain_get_pbl_phys(pbl);
1588 phy_prod_pair_addr = hw_srq->phy_prod_pair_addr;
1589 page_size = QED_CHAIN_PAGE_SIZE;
1592 in_params.pd_id = pd->pd_id;
1593 in_params.pbl_base_addr = pbl_base_addr;
1594 in_params.prod_pair_addr = phy_prod_pair_addr;
1595 in_params.num_pages = page_cnt;
1596 in_params.page_size = page_size;
1598 struct qedr_xrcd *xrcd = get_qedr_xrcd(init_attr->ext.xrc.xrcd);
1599 struct qedr_cq *cq = get_qedr_cq(init_attr->ext.cq);
1601 in_params.is_xrc = 1;
1602 in_params.xrcd_id = xrcd->xrcd_id;
1603 in_params.cq_cid = cq->icid;
1606 rc = dev->ops->rdma_create_srq(dev->rdma_ctx, &in_params, &out_params);
1610 srq->srq_id = out_params.srq_id;
1613 rc = qedr_copy_srq_uresp(dev, srq, udata);
1618 rc = xa_insert_irq(&dev->srqs, srq->srq_id, srq, GFP_KERNEL);
1622 DP_DEBUG(dev, QEDR_MSG_SRQ,
1623 "create srq: created srq with srq_id=0x%0x\n", srq->srq_id);
1627 destroy_in_params.srq_id = srq->srq_id;
1629 dev->ops->rdma_destroy_srq(dev->rdma_ctx, &destroy_in_params);
1632 qedr_free_srq_user_params(srq);
1634 qedr_free_srq_kernel_params(srq);
1639 int qedr_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
1641 struct qed_rdma_destroy_srq_in_params in_params = {};
1642 struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
1643 struct qedr_srq *srq = get_qedr_srq(ibsrq);
1645 xa_erase_irq(&dev->srqs, srq->srq_id);
1646 in_params.srq_id = srq->srq_id;
1647 in_params.is_xrc = srq->is_xrc;
1648 dev->ops->rdma_destroy_srq(dev->rdma_ctx, &in_params);
1651 qedr_free_srq_user_params(srq);
1653 qedr_free_srq_kernel_params(srq);
1655 DP_DEBUG(dev, QEDR_MSG_SRQ,
1656 "destroy srq: destroyed srq with srq_id=0x%0x\n",
1661 int qedr_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
1662 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
1664 struct qed_rdma_modify_srq_in_params in_params = {};
1665 struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
1666 struct qedr_srq *srq = get_qedr_srq(ibsrq);
1669 if (attr_mask & IB_SRQ_MAX_WR) {
1671 "modify srq: invalid attribute mask=0x%x specified for %p\n",
1676 if (attr_mask & IB_SRQ_LIMIT) {
1677 if (attr->srq_limit >= srq->hw_srq.max_wr) {
1679 "modify srq: invalid srq_limit=0x%x (max_srq_limit=0x%x)\n",
1680 attr->srq_limit, srq->hw_srq.max_wr);
1684 in_params.srq_id = srq->srq_id;
1685 in_params.wqe_limit = attr->srq_limit;
1686 rc = dev->ops->rdma_modify_srq(dev->rdma_ctx, &in_params);
1691 srq->srq_limit = attr->srq_limit;
1693 DP_DEBUG(dev, QEDR_MSG_SRQ,
1694 "modify srq: modified srq with srq_id=0x%0x\n", srq->srq_id);
1699 static enum qed_rdma_qp_type qedr_ib_to_qed_qp_type(enum ib_qp_type ib_qp_type)
1701 switch (ib_qp_type) {
1703 return QED_RDMA_QP_TYPE_RC;
1704 case IB_QPT_XRC_INI:
1705 return QED_RDMA_QP_TYPE_XRC_INI;
1706 case IB_QPT_XRC_TGT:
1707 return QED_RDMA_QP_TYPE_XRC_TGT;
1709 return QED_RDMA_QP_TYPE_INVAL;
1714 qedr_init_common_qp_in_params(struct qedr_dev *dev,
1717 struct ib_qp_init_attr *attrs,
1718 bool fmr_and_reserved_lkey,
1719 struct qed_rdma_create_qp_in_params *params)
1721 /* QP handle to be written in an async event */
1722 params->qp_handle_async_lo = lower_32_bits((uintptr_t) qp);
1723 params->qp_handle_async_hi = upper_32_bits((uintptr_t) qp);
1725 params->signal_all = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR);
1726 params->fmr_and_reserved_lkey = fmr_and_reserved_lkey;
1727 params->qp_type = qedr_ib_to_qed_qp_type(attrs->qp_type);
1728 params->stats_queue = 0;
1731 params->pd = pd->pd_id;
1732 params->dpi = pd->uctx ? pd->uctx->dpi : dev->dpi;
1735 if (qedr_qp_has_sq(qp))
1736 params->sq_cq_id = get_qedr_cq(attrs->send_cq)->icid;
1738 if (qedr_qp_has_rq(qp))
1739 params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
1741 if (qedr_qp_has_srq(qp)) {
1742 params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
1743 params->srq_id = qp->srq->srq_id;
1744 params->use_srq = true;
1747 params->use_srq = false;
1751 static inline void qedr_qp_user_print(struct qedr_dev *dev, struct qedr_qp *qp)
1753 DP_DEBUG(dev, QEDR_MSG_QP, "create qp: successfully created user QP. "
1761 qedr_qp_has_sq(qp) ? qp->usq.buf_addr : 0x0,
1762 qedr_qp_has_sq(qp) ? qp->usq.buf_len : 0,
1763 qedr_qp_has_rq(qp) ? qp->urq.buf_addr : 0x0,
1764 qedr_qp_has_sq(qp) ? qp->urq.buf_len : 0);
1768 qedr_iwarp_populate_user_qp(struct qedr_dev *dev,
1770 struct qed_rdma_create_qp_out_params *out_params)
1772 qp->usq.pbl_tbl->va = out_params->sq_pbl_virt;
1773 qp->usq.pbl_tbl->pa = out_params->sq_pbl_phys;
1775 qedr_populate_pbls(dev, qp->usq.umem, qp->usq.pbl_tbl,
1776 &qp->usq.pbl_info, FW_PAGE_SHIFT);
1778 qp->urq.pbl_tbl->va = out_params->rq_pbl_virt;
1779 qp->urq.pbl_tbl->pa = out_params->rq_pbl_phys;
1782 qedr_populate_pbls(dev, qp->urq.umem, qp->urq.pbl_tbl,
1783 &qp->urq.pbl_info, FW_PAGE_SHIFT);
1786 static void qedr_cleanup_user(struct qedr_dev *dev,
1787 struct qedr_ucontext *ctx,
1790 if (qedr_qp_has_sq(qp)) {
1791 ib_umem_release(qp->usq.umem);
1792 qp->usq.umem = NULL;
1795 if (qedr_qp_has_rq(qp)) {
1796 ib_umem_release(qp->urq.umem);
1797 qp->urq.umem = NULL;
1800 if (rdma_protocol_roce(&dev->ibdev, 1)) {
1801 qedr_free_pbl(dev, &qp->usq.pbl_info, qp->usq.pbl_tbl);
1802 qedr_free_pbl(dev, &qp->urq.pbl_info, qp->urq.pbl_tbl);
1804 kfree(qp->usq.pbl_tbl);
1805 kfree(qp->urq.pbl_tbl);
1808 if (qp->usq.db_rec_data) {
1809 qedr_db_recovery_del(dev, qp->usq.db_addr,
1810 &qp->usq.db_rec_data->db_data);
1811 rdma_user_mmap_entry_remove(qp->usq.db_mmap_entry);
1814 if (qp->urq.db_rec_data) {
1815 qedr_db_recovery_del(dev, qp->urq.db_addr,
1816 &qp->urq.db_rec_data->db_data);
1817 rdma_user_mmap_entry_remove(qp->urq.db_mmap_entry);
1820 if (rdma_protocol_iwarp(&dev->ibdev, 1))
1821 qedr_db_recovery_del(dev, qp->urq.db_rec_db2_addr,
1822 &qp->urq.db_rec_db2_data);
1825 static int qedr_create_user_qp(struct qedr_dev *dev,
1828 struct ib_udata *udata,
1829 struct ib_qp_init_attr *attrs)
1831 struct qed_rdma_create_qp_in_params in_params;
1832 struct qed_rdma_create_qp_out_params out_params;
1833 struct qedr_create_qp_uresp uresp = {};
1834 struct qedr_create_qp_ureq ureq = {};
1835 int alloc_and_init = rdma_protocol_roce(&dev->ibdev, 1);
1836 struct qedr_ucontext *ctx = NULL;
1837 struct qedr_pd *pd = NULL;
1840 qp->create_type = QEDR_QP_CREATE_USER;
1843 pd = get_qedr_pd(ibpd);
1848 rc = ib_copy_from_udata(&ureq, udata, min(sizeof(ureq),
1851 DP_ERR(dev, "Problem copying data from user space\n");
1856 if (qedr_qp_has_sq(qp)) {
1857 /* SQ - read access only (0) */
1858 rc = qedr_init_user_queue(udata, dev, &qp->usq, ureq.sq_addr,
1859 ureq.sq_len, true, 0, alloc_and_init);
1864 if (qedr_qp_has_rq(qp)) {
1865 /* RQ - read access only (0) */
1866 rc = qedr_init_user_queue(udata, dev, &qp->urq, ureq.rq_addr,
1867 ureq.rq_len, true, 0, alloc_and_init);
1872 memset(&in_params, 0, sizeof(in_params));
1873 qedr_init_common_qp_in_params(dev, pd, qp, attrs, false, &in_params);
1874 in_params.qp_handle_lo = ureq.qp_handle_lo;
1875 in_params.qp_handle_hi = ureq.qp_handle_hi;
1877 if (qp->qp_type == IB_QPT_XRC_TGT) {
1878 struct qedr_xrcd *xrcd = get_qedr_xrcd(attrs->xrcd);
1880 in_params.xrcd_id = xrcd->xrcd_id;
1881 in_params.qp_handle_lo = qp->qp_id;
1882 in_params.use_srq = 1;
1885 if (qedr_qp_has_sq(qp)) {
1886 in_params.sq_num_pages = qp->usq.pbl_info.num_pbes;
1887 in_params.sq_pbl_ptr = qp->usq.pbl_tbl->pa;
1890 if (qedr_qp_has_rq(qp)) {
1891 in_params.rq_num_pages = qp->urq.pbl_info.num_pbes;
1892 in_params.rq_pbl_ptr = qp->urq.pbl_tbl->pa;
1896 SET_FIELD(in_params.flags, QED_ROCE_EDPM_MODE, ctx->edpm_mode);
1898 qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
1899 &in_params, &out_params);
1906 if (rdma_protocol_iwarp(&dev->ibdev, 1))
1907 qedr_iwarp_populate_user_qp(dev, qp, &out_params);
1909 qp->qp_id = out_params.qp_id;
1910 qp->icid = out_params.icid;
1913 rc = qedr_copy_qp_uresp(dev, qp, udata, &uresp);
1918 /* db offset was calculated in copy_qp_uresp, now set in the user q */
1919 if (qedr_qp_has_sq(qp)) {
1920 qp->usq.db_addr = ctx->dpi_addr + uresp.sq_db_offset;
1921 qp->sq.max_wr = attrs->cap.max_send_wr;
1922 rc = qedr_db_recovery_add(dev, qp->usq.db_addr,
1923 &qp->usq.db_rec_data->db_data,
1930 if (qedr_qp_has_rq(qp)) {
1931 qp->urq.db_addr = ctx->dpi_addr + uresp.rq_db_offset;
1932 qp->rq.max_wr = attrs->cap.max_recv_wr;
1933 rc = qedr_db_recovery_add(dev, qp->urq.db_addr,
1934 &qp->urq.db_rec_data->db_data,
1941 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
1942 qp->urq.db_rec_db2_addr = ctx->dpi_addr + uresp.rq_db2_offset;
1944 /* calculate the db_rec_db2 data since it is constant so no
1945 * need to reflect from user
1947 qp->urq.db_rec_db2_data.data.icid = cpu_to_le16(qp->icid);
1948 qp->urq.db_rec_db2_data.data.value =
1949 cpu_to_le16(DQ_TCM_IWARP_POST_RQ_CF_CMD);
1951 rc = qedr_db_recovery_add(dev, qp->urq.db_rec_db2_addr,
1952 &qp->urq.db_rec_db2_data,
1958 qedr_qp_user_print(dev, qp);
1961 rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
1963 DP_ERR(dev, "create qp: fatal fault. rc=%d", rc);
1966 qedr_cleanup_user(dev, ctx, qp);
1970 static int qedr_set_iwarp_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
1974 qp->sq.db = dev->db_addr +
1975 DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1976 qp->sq.db_data.data.icid = qp->icid;
1978 rc = qedr_db_recovery_add(dev, qp->sq.db,
1985 qp->rq.db = dev->db_addr +
1986 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD);
1987 qp->rq.db_data.data.icid = qp->icid;
1988 qp->rq.iwarp_db2 = dev->db_addr +
1989 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS);
1990 qp->rq.iwarp_db2_data.data.icid = qp->icid;
1991 qp->rq.iwarp_db2_data.data.value = DQ_TCM_IWARP_POST_RQ_CF_CMD;
1993 rc = qedr_db_recovery_add(dev, qp->rq.db,
2000 rc = qedr_db_recovery_add(dev, qp->rq.iwarp_db2,
2001 &qp->rq.iwarp_db2_data,
2008 qedr_roce_create_kernel_qp(struct qedr_dev *dev,
2010 struct qed_rdma_create_qp_in_params *in_params,
2011 u32 n_sq_elems, u32 n_rq_elems)
2013 struct qed_rdma_create_qp_out_params out_params;
2014 struct qed_chain_init_params params = {
2015 .mode = QED_CHAIN_MODE_PBL,
2016 .cnt_type = QED_CHAIN_CNT_TYPE_U32,
2020 params.intended_use = QED_CHAIN_USE_TO_PRODUCE;
2021 params.num_elems = n_sq_elems;
2022 params.elem_size = QEDR_SQE_ELEMENT_SIZE;
2024 rc = dev->ops->common->chain_alloc(dev->cdev, &qp->sq.pbl, ¶ms);
2028 in_params->sq_num_pages = qed_chain_get_page_cnt(&qp->sq.pbl);
2029 in_params->sq_pbl_ptr = qed_chain_get_pbl_phys(&qp->sq.pbl);
2031 params.intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE;
2032 params.num_elems = n_rq_elems;
2033 params.elem_size = QEDR_RQE_ELEMENT_SIZE;
2035 rc = dev->ops->common->chain_alloc(dev->cdev, &qp->rq.pbl, ¶ms);
2039 in_params->rq_num_pages = qed_chain_get_page_cnt(&qp->rq.pbl);
2040 in_params->rq_pbl_ptr = qed_chain_get_pbl_phys(&qp->rq.pbl);
2042 qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
2043 in_params, &out_params);
2048 qp->qp_id = out_params.qp_id;
2049 qp->icid = out_params.icid;
2051 return qedr_set_roce_db_info(dev, qp);
2055 qedr_iwarp_create_kernel_qp(struct qedr_dev *dev,
2057 struct qed_rdma_create_qp_in_params *in_params,
2058 u32 n_sq_elems, u32 n_rq_elems)
2060 struct qed_rdma_create_qp_out_params out_params;
2061 struct qed_chain_init_params params = {
2062 .mode = QED_CHAIN_MODE_PBL,
2063 .cnt_type = QED_CHAIN_CNT_TYPE_U32,
2067 in_params->sq_num_pages = QED_CHAIN_PAGE_CNT(n_sq_elems,
2068 QEDR_SQE_ELEMENT_SIZE,
2069 QED_CHAIN_PAGE_SIZE,
2070 QED_CHAIN_MODE_PBL);
2071 in_params->rq_num_pages = QED_CHAIN_PAGE_CNT(n_rq_elems,
2072 QEDR_RQE_ELEMENT_SIZE,
2073 QED_CHAIN_PAGE_SIZE,
2074 QED_CHAIN_MODE_PBL);
2076 qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
2077 in_params, &out_params);
2082 /* Now we allocate the chain */
2084 params.intended_use = QED_CHAIN_USE_TO_PRODUCE;
2085 params.num_elems = n_sq_elems;
2086 params.elem_size = QEDR_SQE_ELEMENT_SIZE;
2087 params.ext_pbl_virt = out_params.sq_pbl_virt;
2088 params.ext_pbl_phys = out_params.sq_pbl_phys;
2090 rc = dev->ops->common->chain_alloc(dev->cdev, &qp->sq.pbl, ¶ms);
2094 params.intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE;
2095 params.num_elems = n_rq_elems;
2096 params.elem_size = QEDR_RQE_ELEMENT_SIZE;
2097 params.ext_pbl_virt = out_params.rq_pbl_virt;
2098 params.ext_pbl_phys = out_params.rq_pbl_phys;
2100 rc = dev->ops->common->chain_alloc(dev->cdev, &qp->rq.pbl, ¶ms);
2104 qp->qp_id = out_params.qp_id;
2105 qp->icid = out_params.icid;
2107 return qedr_set_iwarp_db_info(dev, qp);
2110 dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
2115 static void qedr_cleanup_kernel(struct qedr_dev *dev, struct qedr_qp *qp)
2117 dev->ops->common->chain_free(dev->cdev, &qp->sq.pbl);
2118 kfree(qp->wqe_wr_id);
2120 dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl);
2121 kfree(qp->rqe_wr_id);
2123 /* GSI qp is not registered to db mechanism so no need to delete */
2124 if (qp->qp_type == IB_QPT_GSI)
2127 qedr_db_recovery_del(dev, qp->sq.db, &qp->sq.db_data);
2130 qedr_db_recovery_del(dev, qp->rq.db, &qp->rq.db_data);
2132 if (rdma_protocol_iwarp(&dev->ibdev, 1))
2133 qedr_db_recovery_del(dev, qp->rq.iwarp_db2,
2134 &qp->rq.iwarp_db2_data);
2138 static int qedr_create_kernel_qp(struct qedr_dev *dev,
2141 struct ib_qp_init_attr *attrs)
2143 struct qed_rdma_create_qp_in_params in_params;
2144 struct qedr_pd *pd = get_qedr_pd(ibpd);
2150 memset(&in_params, 0, sizeof(in_params));
2151 qp->create_type = QEDR_QP_CREATE_KERNEL;
2153 /* A single work request may take up to QEDR_MAX_SQ_WQE_SIZE elements in
2154 * the ring. The ring should allow at least a single WR, even if the
2155 * user requested none, due to allocation issues.
2156 * We should add an extra WR since the prod and cons indices of
2157 * wqe_wr_id are managed in such a way that the WQ is considered full
2158 * when (prod+1)%max_wr==cons. We currently don't do that because we
2159 * double the number of entries due an iSER issue that pushes far more
2160 * WRs than indicated. If we decline its ib_post_send() then we get
2161 * error prints in the dmesg we'd like to avoid.
2163 qp->sq.max_wr = min_t(u32, attrs->cap.max_send_wr * dev->wq_multiplier,
2166 qp->wqe_wr_id = kcalloc(qp->sq.max_wr, sizeof(*qp->wqe_wr_id),
2168 if (!qp->wqe_wr_id) {
2169 DP_ERR(dev, "create qp: failed SQ shadow memory allocation\n");
2173 /* QP handle to be written in CQE */
2174 in_params.qp_handle_lo = lower_32_bits((uintptr_t) qp);
2175 in_params.qp_handle_hi = upper_32_bits((uintptr_t) qp);
2177 /* A single work request may take up to QEDR_MAX_RQ_WQE_SIZE elements in
2178 * the ring. There ring should allow at least a single WR, even if the
2179 * user requested none, due to allocation issues.
2181 qp->rq.max_wr = (u16) max_t(u32, attrs->cap.max_recv_wr, 1);
2183 /* Allocate driver internal RQ array */
2184 qp->rqe_wr_id = kcalloc(qp->rq.max_wr, sizeof(*qp->rqe_wr_id),
2186 if (!qp->rqe_wr_id) {
2188 "create qp: failed RQ shadow memory allocation\n");
2189 kfree(qp->wqe_wr_id);
2193 qedr_init_common_qp_in_params(dev, pd, qp, attrs, true, &in_params);
2195 n_sq_entries = attrs->cap.max_send_wr;
2196 n_sq_entries = min_t(u32, n_sq_entries, dev->attr.max_sqe);
2197 n_sq_entries = max_t(u32, n_sq_entries, 1);
2198 n_sq_elems = n_sq_entries * QEDR_MAX_SQE_ELEMENTS_PER_SQE;
2200 n_rq_elems = qp->rq.max_wr * QEDR_MAX_RQE_ELEMENTS_PER_RQE;
2202 if (rdma_protocol_iwarp(&dev->ibdev, 1))
2203 rc = qedr_iwarp_create_kernel_qp(dev, qp, &in_params,
2204 n_sq_elems, n_rq_elems);
2206 rc = qedr_roce_create_kernel_qp(dev, qp, &in_params,
2207 n_sq_elems, n_rq_elems);
2209 qedr_cleanup_kernel(dev, qp);
2214 static int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp,
2215 struct ib_udata *udata)
2217 struct qedr_ucontext *ctx =
2218 rdma_udata_to_drv_context(udata, struct qedr_ucontext,
2222 if (qp->qp_type != IB_QPT_GSI) {
2223 rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
2228 if (qp->create_type == QEDR_QP_CREATE_USER)
2229 qedr_cleanup_user(dev, ctx, qp);
2231 qedr_cleanup_kernel(dev, qp);
2236 struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
2237 struct ib_qp_init_attr *attrs,
2238 struct ib_udata *udata)
2240 struct qedr_xrcd *xrcd = NULL;
2241 struct qedr_pd *pd = NULL;
2242 struct qedr_dev *dev;
2247 if (attrs->qp_type == IB_QPT_XRC_TGT) {
2248 xrcd = get_qedr_xrcd(attrs->xrcd);
2249 dev = get_qedr_dev(xrcd->ibxrcd.device);
2251 pd = get_qedr_pd(ibpd);
2252 dev = get_qedr_dev(ibpd->device);
2255 DP_DEBUG(dev, QEDR_MSG_QP, "create qp: called from %s, pd=%p\n",
2256 udata ? "user library" : "kernel", pd);
2258 rc = qedr_check_qp_attrs(ibpd, dev, attrs, udata);
2262 DP_DEBUG(dev, QEDR_MSG_QP,
2263 "create qp: called from %s, event_handler=%p, eepd=%p sq_cq=%p, sq_icid=%d, rq_cq=%p, rq_icid=%d\n",
2264 udata ? "user library" : "kernel", attrs->event_handler, pd,
2265 get_qedr_cq(attrs->send_cq),
2266 get_qedr_cq(attrs->send_cq)->icid,
2267 get_qedr_cq(attrs->recv_cq),
2268 attrs->recv_cq ? get_qedr_cq(attrs->recv_cq)->icid : 0);
2270 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
2272 DP_ERR(dev, "create qp: failed allocating memory\n");
2273 return ERR_PTR(-ENOMEM);
2276 qedr_set_common_qp_params(dev, qp, pd, attrs);
2278 if (attrs->qp_type == IB_QPT_GSI) {
2279 ibqp = qedr_create_gsi_qp(dev, attrs, qp);
2286 rc = qedr_create_user_qp(dev, qp, ibpd, udata, attrs);
2288 rc = qedr_create_kernel_qp(dev, qp, ibpd, attrs);
2293 qp->ibqp.qp_num = qp->qp_id;
2295 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
2296 rc = xa_insert(&dev->qps, qp->qp_id, qp, GFP_KERNEL);
2298 goto out_free_qp_resources;
2303 out_free_qp_resources:
2304 qedr_free_qp_resources(dev, qp, udata);
2308 return ERR_PTR(-EFAULT);
2311 static enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
2314 case QED_ROCE_QP_STATE_RESET:
2315 return IB_QPS_RESET;
2316 case QED_ROCE_QP_STATE_INIT:
2318 case QED_ROCE_QP_STATE_RTR:
2320 case QED_ROCE_QP_STATE_RTS:
2322 case QED_ROCE_QP_STATE_SQD:
2324 case QED_ROCE_QP_STATE_ERR:
2326 case QED_ROCE_QP_STATE_SQE:
2332 static enum qed_roce_qp_state qedr_get_state_from_ibqp(
2333 enum ib_qp_state qp_state)
2337 return QED_ROCE_QP_STATE_RESET;
2339 return QED_ROCE_QP_STATE_INIT;
2341 return QED_ROCE_QP_STATE_RTR;
2343 return QED_ROCE_QP_STATE_RTS;
2345 return QED_ROCE_QP_STATE_SQD;
2347 return QED_ROCE_QP_STATE_ERR;
2349 return QED_ROCE_QP_STATE_ERR;
2353 static void qedr_reset_qp_hwq_info(struct qedr_qp_hwq_info *qph)
2355 qed_chain_reset(&qph->pbl);
2359 qph->db_data.data.value = cpu_to_le16(0);
2362 static int qedr_update_qp_state(struct qedr_dev *dev,
2364 enum qed_roce_qp_state cur_state,
2365 enum qed_roce_qp_state new_state)
2369 if (new_state == cur_state)
2372 switch (cur_state) {
2373 case QED_ROCE_QP_STATE_RESET:
2374 switch (new_state) {
2375 case QED_ROCE_QP_STATE_INIT:
2376 qp->prev_wqe_size = 0;
2377 qedr_reset_qp_hwq_info(&qp->sq);
2378 qedr_reset_qp_hwq_info(&qp->rq);
2385 case QED_ROCE_QP_STATE_INIT:
2386 switch (new_state) {
2387 case QED_ROCE_QP_STATE_RTR:
2388 /* Update doorbell (in case post_recv was
2389 * done before move to RTR)
2392 if (rdma_protocol_roce(&dev->ibdev, 1)) {
2393 writel(qp->rq.db_data.raw, qp->rq.db);
2396 case QED_ROCE_QP_STATE_ERR:
2399 /* Invalid state change. */
2404 case QED_ROCE_QP_STATE_RTR:
2406 switch (new_state) {
2407 case QED_ROCE_QP_STATE_RTS:
2409 case QED_ROCE_QP_STATE_ERR:
2412 /* Invalid state change. */
2417 case QED_ROCE_QP_STATE_RTS:
2419 switch (new_state) {
2420 case QED_ROCE_QP_STATE_SQD:
2422 case QED_ROCE_QP_STATE_ERR:
2425 /* Invalid state change. */
2430 case QED_ROCE_QP_STATE_SQD:
2432 switch (new_state) {
2433 case QED_ROCE_QP_STATE_RTS:
2434 case QED_ROCE_QP_STATE_ERR:
2437 /* Invalid state change. */
2442 case QED_ROCE_QP_STATE_ERR:
2444 switch (new_state) {
2445 case QED_ROCE_QP_STATE_RESET:
2446 if ((qp->rq.prod != qp->rq.cons) ||
2447 (qp->sq.prod != qp->sq.cons)) {
2449 "Error->Reset with rq/sq not empty rq.prod=%x rq.cons=%x sq.prod=%x sq.cons=%x\n",
2450 qp->rq.prod, qp->rq.cons, qp->sq.prod,
2468 int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
2469 int attr_mask, struct ib_udata *udata)
2471 struct qedr_qp *qp = get_qedr_qp(ibqp);
2472 struct qed_rdma_modify_qp_in_params qp_params = { 0 };
2473 struct qedr_dev *dev = get_qedr_dev(&qp->dev->ibdev);
2474 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
2475 enum ib_qp_state old_qp_state, new_qp_state;
2476 enum qed_roce_qp_state cur_state;
2479 DP_DEBUG(dev, QEDR_MSG_QP,
2480 "modify qp: qp %p attr_mask=0x%x, state=%d", qp, attr_mask,
2483 old_qp_state = qedr_get_ibqp_state(qp->state);
2484 if (attr_mask & IB_QP_STATE)
2485 new_qp_state = attr->qp_state;
2487 new_qp_state = old_qp_state;
2489 if (rdma_protocol_roce(&dev->ibdev, 1)) {
2490 if (!ib_modify_qp_is_ok(old_qp_state, new_qp_state,
2491 ibqp->qp_type, attr_mask)) {
2493 "modify qp: invalid attribute mask=0x%x specified for\n"
2494 "qpn=0x%x of type=0x%x old_qp_state=0x%x, new_qp_state=0x%x\n",
2495 attr_mask, qp->qp_id, ibqp->qp_type,
2496 old_qp_state, new_qp_state);
2502 /* Translate the masks... */
2503 if (attr_mask & IB_QP_STATE) {
2504 SET_FIELD(qp_params.modify_flags,
2505 QED_RDMA_MODIFY_QP_VALID_NEW_STATE, 1);
2506 qp_params.new_state = qedr_get_state_from_ibqp(attr->qp_state);
2509 if (attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
2510 qp_params.sqd_async = true;
2512 if (attr_mask & IB_QP_PKEY_INDEX) {
2513 SET_FIELD(qp_params.modify_flags,
2514 QED_ROCE_MODIFY_QP_VALID_PKEY, 1);
2515 if (attr->pkey_index >= QEDR_ROCE_PKEY_TABLE_LEN) {
2520 qp_params.pkey = QEDR_ROCE_PKEY_DEFAULT;
2523 if (attr_mask & IB_QP_QKEY)
2524 qp->qkey = attr->qkey;
2526 if (attr_mask & IB_QP_ACCESS_FLAGS) {
2527 SET_FIELD(qp_params.modify_flags,
2528 QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN, 1);
2529 qp_params.incoming_rdma_read_en = attr->qp_access_flags &
2530 IB_ACCESS_REMOTE_READ;
2531 qp_params.incoming_rdma_write_en = attr->qp_access_flags &
2532 IB_ACCESS_REMOTE_WRITE;
2533 qp_params.incoming_atomic_en = attr->qp_access_flags &
2534 IB_ACCESS_REMOTE_ATOMIC;
2537 if (attr_mask & (IB_QP_AV | IB_QP_PATH_MTU)) {
2538 if (rdma_protocol_iwarp(&dev->ibdev, 1))
2541 if (attr_mask & IB_QP_PATH_MTU) {
2542 if (attr->path_mtu < IB_MTU_256 ||
2543 attr->path_mtu > IB_MTU_4096) {
2544 pr_err("error: Only MTU sizes of 256, 512, 1024, 2048 and 4096 are supported by RoCE\n");
2548 qp->mtu = min(ib_mtu_enum_to_int(attr->path_mtu),
2549 ib_mtu_enum_to_int(iboe_get_mtu
2555 ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
2556 pr_err("Fixing zeroed MTU to qp->mtu = %d\n", qp->mtu);
2559 SET_FIELD(qp_params.modify_flags,
2560 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR, 1);
2562 qp_params.traffic_class_tos = grh->traffic_class;
2563 qp_params.flow_label = grh->flow_label;
2564 qp_params.hop_limit_ttl = grh->hop_limit;
2566 qp->sgid_idx = grh->sgid_index;
2568 rc = get_gid_info_from_table(ibqp, attr, attr_mask, &qp_params);
2571 "modify qp: problems with GID index %d (rc=%d)\n",
2572 grh->sgid_index, rc);
2576 rc = qedr_get_dmac(dev, &attr->ah_attr,
2577 qp_params.remote_mac_addr);
2581 qp_params.use_local_mac = true;
2582 ether_addr_copy(qp_params.local_mac_addr, dev->ndev->dev_addr);
2584 DP_DEBUG(dev, QEDR_MSG_QP, "dgid=%x:%x:%x:%x\n",
2585 qp_params.dgid.dwords[0], qp_params.dgid.dwords[1],
2586 qp_params.dgid.dwords[2], qp_params.dgid.dwords[3]);
2587 DP_DEBUG(dev, QEDR_MSG_QP, "sgid=%x:%x:%x:%x\n",
2588 qp_params.sgid.dwords[0], qp_params.sgid.dwords[1],
2589 qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]);
2590 DP_DEBUG(dev, QEDR_MSG_QP, "remote_mac=[%pM]\n",
2591 qp_params.remote_mac_addr);
2593 qp_params.mtu = qp->mtu;
2594 qp_params.lb_indication = false;
2597 if (!qp_params.mtu) {
2598 /* Stay with current MTU */
2600 qp_params.mtu = qp->mtu;
2603 ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
2606 if (attr_mask & IB_QP_TIMEOUT) {
2607 SET_FIELD(qp_params.modify_flags,
2608 QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT, 1);
2610 /* The received timeout value is an exponent used like this:
2611 * "12.7.34 LOCAL ACK TIMEOUT
2612 * Value representing the transport (ACK) timeout for use by
2613 * the remote, expressed as: 4.096 * 2^timeout [usec]"
2614 * The FW expects timeout in msec so we need to divide the usec
2615 * result by 1000. We'll approximate 1000~2^10, and 4.096 ~ 2^2,
2616 * so we get: 2^2 * 2^timeout / 2^10 = 2^(timeout - 8).
2617 * The value of zero means infinite so we use a 'max_t' to make
2618 * sure that sub 1 msec values will be configured as 1 msec.
2621 qp_params.ack_timeout =
2622 1 << max_t(int, attr->timeout - 8, 0);
2624 qp_params.ack_timeout = 0;
2626 qp->timeout = attr->timeout;
2629 if (attr_mask & IB_QP_RETRY_CNT) {
2630 SET_FIELD(qp_params.modify_flags,
2631 QED_ROCE_MODIFY_QP_VALID_RETRY_CNT, 1);
2632 qp_params.retry_cnt = attr->retry_cnt;
2635 if (attr_mask & IB_QP_RNR_RETRY) {
2636 SET_FIELD(qp_params.modify_flags,
2637 QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT, 1);
2638 qp_params.rnr_retry_cnt = attr->rnr_retry;
2641 if (attr_mask & IB_QP_RQ_PSN) {
2642 SET_FIELD(qp_params.modify_flags,
2643 QED_ROCE_MODIFY_QP_VALID_RQ_PSN, 1);
2644 qp_params.rq_psn = attr->rq_psn;
2645 qp->rq_psn = attr->rq_psn;
2648 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
2649 if (attr->max_rd_atomic > dev->attr.max_qp_req_rd_atomic_resc) {
2652 "unsupported max_rd_atomic=%d, supported=%d\n",
2653 attr->max_rd_atomic,
2654 dev->attr.max_qp_req_rd_atomic_resc);
2658 SET_FIELD(qp_params.modify_flags,
2659 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ, 1);
2660 qp_params.max_rd_atomic_req = attr->max_rd_atomic;
2663 if (attr_mask & IB_QP_MIN_RNR_TIMER) {
2664 SET_FIELD(qp_params.modify_flags,
2665 QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER, 1);
2666 qp_params.min_rnr_nak_timer = attr->min_rnr_timer;
2669 if (attr_mask & IB_QP_SQ_PSN) {
2670 SET_FIELD(qp_params.modify_flags,
2671 QED_ROCE_MODIFY_QP_VALID_SQ_PSN, 1);
2672 qp_params.sq_psn = attr->sq_psn;
2673 qp->sq_psn = attr->sq_psn;
2676 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
2677 if (attr->max_dest_rd_atomic >
2678 dev->attr.max_qp_resp_rd_atomic_resc) {
2680 "unsupported max_dest_rd_atomic=%d, supported=%d\n",
2681 attr->max_dest_rd_atomic,
2682 dev->attr.max_qp_resp_rd_atomic_resc);
2688 SET_FIELD(qp_params.modify_flags,
2689 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP, 1);
2690 qp_params.max_rd_atomic_resp = attr->max_dest_rd_atomic;
2693 if (attr_mask & IB_QP_DEST_QPN) {
2694 SET_FIELD(qp_params.modify_flags,
2695 QED_ROCE_MODIFY_QP_VALID_DEST_QP, 1);
2697 qp_params.dest_qp = attr->dest_qp_num;
2698 qp->dest_qp_num = attr->dest_qp_num;
2701 cur_state = qp->state;
2703 /* Update the QP state before the actual ramrod to prevent a race with
2704 * fast path. Modifying the QP state to error will cause the device to
2705 * flush the CQEs and while polling the flushed CQEs will considered as
2706 * a potential issue if the QP isn't in error state.
2708 if ((attr_mask & IB_QP_STATE) && qp->qp_type != IB_QPT_GSI &&
2709 !udata && qp_params.new_state == QED_ROCE_QP_STATE_ERR)
2710 qp->state = QED_ROCE_QP_STATE_ERR;
2712 if (qp->qp_type != IB_QPT_GSI)
2713 rc = dev->ops->rdma_modify_qp(dev->rdma_ctx,
2714 qp->qed_qp, &qp_params);
2716 if (attr_mask & IB_QP_STATE) {
2717 if ((qp->qp_type != IB_QPT_GSI) && (!udata))
2718 rc = qedr_update_qp_state(dev, qp, cur_state,
2719 qp_params.new_state);
2720 qp->state = qp_params.new_state;
2727 static int qedr_to_ib_qp_acc_flags(struct qed_rdma_query_qp_out_params *params)
2729 int ib_qp_acc_flags = 0;
2731 if (params->incoming_rdma_write_en)
2732 ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE;
2733 if (params->incoming_rdma_read_en)
2734 ib_qp_acc_flags |= IB_ACCESS_REMOTE_READ;
2735 if (params->incoming_atomic_en)
2736 ib_qp_acc_flags |= IB_ACCESS_REMOTE_ATOMIC;
2737 ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE;
2738 return ib_qp_acc_flags;
2741 int qedr_query_qp(struct ib_qp *ibqp,
2742 struct ib_qp_attr *qp_attr,
2743 int attr_mask, struct ib_qp_init_attr *qp_init_attr)
2745 struct qed_rdma_query_qp_out_params params;
2746 struct qedr_qp *qp = get_qedr_qp(ibqp);
2747 struct qedr_dev *dev = qp->dev;
2750 memset(¶ms, 0, sizeof(params));
2751 memset(qp_attr, 0, sizeof(*qp_attr));
2752 memset(qp_init_attr, 0, sizeof(*qp_init_attr));
2754 if (qp->qp_type != IB_QPT_GSI) {
2755 rc = dev->ops->rdma_query_qp(dev->rdma_ctx, qp->qed_qp, ¶ms);
2758 qp_attr->qp_state = qedr_get_ibqp_state(params.state);
2760 qp_attr->qp_state = qedr_get_ibqp_state(QED_ROCE_QP_STATE_RTS);
2763 qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state);
2764 qp_attr->path_mtu = ib_mtu_int_to_enum(params.mtu);
2765 qp_attr->path_mig_state = IB_MIG_MIGRATED;
2766 qp_attr->rq_psn = params.rq_psn;
2767 qp_attr->sq_psn = params.sq_psn;
2768 qp_attr->dest_qp_num = params.dest_qp;
2770 qp_attr->qp_access_flags = qedr_to_ib_qp_acc_flags(¶ms);
2772 qp_attr->cap.max_send_wr = qp->sq.max_wr;
2773 qp_attr->cap.max_recv_wr = qp->rq.max_wr;
2774 qp_attr->cap.max_send_sge = qp->sq.max_sges;
2775 qp_attr->cap.max_recv_sge = qp->rq.max_sges;
2776 qp_attr->cap.max_inline_data = dev->attr.max_inline;
2777 qp_init_attr->cap = qp_attr->cap;
2779 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
2780 rdma_ah_set_grh(&qp_attr->ah_attr, NULL,
2781 params.flow_label, qp->sgid_idx,
2782 params.hop_limit_ttl, params.traffic_class_tos);
2783 rdma_ah_set_dgid_raw(&qp_attr->ah_attr, ¶ms.dgid.bytes[0]);
2784 rdma_ah_set_port_num(&qp_attr->ah_attr, 1);
2785 rdma_ah_set_sl(&qp_attr->ah_attr, 0);
2786 qp_attr->timeout = qp->timeout;
2787 qp_attr->rnr_retry = params.rnr_retry;
2788 qp_attr->retry_cnt = params.retry_cnt;
2789 qp_attr->min_rnr_timer = params.min_rnr_nak_timer;
2790 qp_attr->pkey_index = params.pkey_index;
2791 qp_attr->port_num = 1;
2792 rdma_ah_set_path_bits(&qp_attr->ah_attr, 0);
2793 rdma_ah_set_static_rate(&qp_attr->ah_attr, 0);
2794 qp_attr->alt_pkey_index = 0;
2795 qp_attr->alt_port_num = 0;
2796 qp_attr->alt_timeout = 0;
2797 memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
2799 qp_attr->sq_draining = (params.state == QED_ROCE_QP_STATE_SQD) ? 1 : 0;
2800 qp_attr->max_dest_rd_atomic = params.max_dest_rd_atomic;
2801 qp_attr->max_rd_atomic = params.max_rd_atomic;
2802 qp_attr->en_sqd_async_notify = (params.sqd_async) ? 1 : 0;
2804 DP_DEBUG(dev, QEDR_MSG_QP, "QEDR_QUERY_QP: max_inline_data=%d\n",
2805 qp_attr->cap.max_inline_data);
2811 int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
2813 struct qedr_qp *qp = get_qedr_qp(ibqp);
2814 struct qedr_dev *dev = qp->dev;
2815 struct ib_qp_attr attr;
2818 DP_DEBUG(dev, QEDR_MSG_QP, "destroy qp: destroying %p, qp type=%d\n",
2821 if (rdma_protocol_roce(&dev->ibdev, 1)) {
2822 if ((qp->state != QED_ROCE_QP_STATE_RESET) &&
2823 (qp->state != QED_ROCE_QP_STATE_ERR) &&
2824 (qp->state != QED_ROCE_QP_STATE_INIT)) {
2826 attr.qp_state = IB_QPS_ERR;
2827 attr_mask |= IB_QP_STATE;
2829 /* Change the QP state to ERROR */
2830 qedr_modify_qp(ibqp, &attr, attr_mask, NULL);
2833 /* If connection establishment started the WAIT_FOR_CONNECT
2834 * bit will be on and we need to Wait for the establishment
2835 * to complete before destroying the qp.
2837 if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_CONNECT,
2838 &qp->iwarp_cm_flags))
2839 wait_for_completion(&qp->iwarp_cm_comp);
2841 /* If graceful disconnect started, the WAIT_FOR_DISCONNECT
2842 * bit will be on, and we need to wait for the disconnect to
2843 * complete before continuing. We can use the same completion,
2844 * iwarp_cm_comp, since this is the only place that waits for
2845 * this completion and it is sequential. In addition,
2846 * disconnect can't occur before the connection is fully
2847 * established, therefore if WAIT_FOR_DISCONNECT is on it
2848 * means WAIT_FOR_CONNECT is also on and the completion for
2849 * CONNECT already occurred.
2851 if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_DISCONNECT,
2852 &qp->iwarp_cm_flags))
2853 wait_for_completion(&qp->iwarp_cm_comp);
2856 if (qp->qp_type == IB_QPT_GSI)
2857 qedr_destroy_gsi_qp(dev);
2859 /* We need to remove the entry from the xarray before we release the
2860 * qp_id to avoid a race of the qp_id being reallocated and failing
2863 if (rdma_protocol_iwarp(&dev->ibdev, 1))
2864 xa_erase(&dev->qps, qp->qp_id);
2866 qedr_free_qp_resources(dev, qp, udata);
2868 if (rdma_protocol_iwarp(&dev->ibdev, 1))
2869 qedr_iw_qp_rem_ref(&qp->ibqp);
2876 int qedr_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
2877 struct ib_udata *udata)
2879 struct qedr_ah *ah = get_qedr_ah(ibah);
2881 rdma_copy_ah_attr(&ah->attr, init_attr->ah_attr);
2886 int qedr_destroy_ah(struct ib_ah *ibah, u32 flags)
2888 struct qedr_ah *ah = get_qedr_ah(ibah);
2890 rdma_destroy_ah_attr(&ah->attr);
2894 static void free_mr_info(struct qedr_dev *dev, struct mr_info *info)
2896 struct qedr_pbl *pbl, *tmp;
2898 if (info->pbl_table)
2899 list_add_tail(&info->pbl_table->list_entry,
2900 &info->free_pbl_list);
2902 if (!list_empty(&info->inuse_pbl_list))
2903 list_splice(&info->inuse_pbl_list, &info->free_pbl_list);
2905 list_for_each_entry_safe(pbl, tmp, &info->free_pbl_list, list_entry) {
2906 list_del(&pbl->list_entry);
2907 qedr_free_pbl(dev, &info->pbl_info, pbl);
2911 static int init_mr_info(struct qedr_dev *dev, struct mr_info *info,
2912 size_t page_list_len, bool two_layered)
2914 struct qedr_pbl *tmp;
2917 INIT_LIST_HEAD(&info->free_pbl_list);
2918 INIT_LIST_HEAD(&info->inuse_pbl_list);
2920 rc = qedr_prepare_pbl_tbl(dev, &info->pbl_info,
2921 page_list_len, two_layered);
2925 info->pbl_table = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
2926 if (IS_ERR(info->pbl_table)) {
2927 rc = PTR_ERR(info->pbl_table);
2931 DP_DEBUG(dev, QEDR_MSG_MR, "pbl_table_pa = %pa\n",
2932 &info->pbl_table->pa);
2934 /* in usual case we use 2 PBLs, so we add one to free
2935 * list and allocating another one
2937 tmp = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
2939 DP_DEBUG(dev, QEDR_MSG_MR, "Extra PBL is not allocated\n");
2943 list_add_tail(&tmp->list_entry, &info->free_pbl_list);
2945 DP_DEBUG(dev, QEDR_MSG_MR, "extra pbl_table_pa = %pa\n", &tmp->pa);
2949 free_mr_info(dev, info);
2954 struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
2955 u64 usr_addr, int acc, struct ib_udata *udata)
2957 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2962 pd = get_qedr_pd(ibpd);
2963 DP_DEBUG(dev, QEDR_MSG_MR,
2964 "qedr_register user mr pd = %d start = %lld, len = %lld, usr_addr = %lld, acc = %d\n",
2965 pd->pd_id, start, len, usr_addr, acc);
2967 if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
2968 return ERR_PTR(-EINVAL);
2970 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2974 mr->type = QEDR_MR_USER;
2976 mr->umem = ib_umem_get(ibpd->device, start, len, acc);
2977 if (IS_ERR(mr->umem)) {
2982 rc = init_mr_info(dev, &mr->info,
2983 ib_umem_num_dma_blocks(mr->umem, PAGE_SIZE), 1);
2987 qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table,
2988 &mr->info.pbl_info, PAGE_SHIFT);
2990 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2993 DP_ERR(dev, "Out of MR resources\n");
2995 DP_ERR(dev, "roce alloc tid returned error %d\n", rc);
3000 /* Index only, 18 bit long, lkey = itid << 8 | key */
3001 mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
3003 mr->hw_mr.pd = pd->pd_id;
3004 mr->hw_mr.local_read = 1;
3005 mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
3006 mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
3007 mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
3008 mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
3009 mr->hw_mr.mw_bind = false;
3010 mr->hw_mr.pbl_ptr = mr->info.pbl_table[0].pa;
3011 mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
3012 mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
3013 mr->hw_mr.page_size_log = PAGE_SHIFT;
3014 mr->hw_mr.length = len;
3015 mr->hw_mr.vaddr = usr_addr;
3016 mr->hw_mr.phy_mr = false;
3017 mr->hw_mr.dma_mr = false;
3019 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
3021 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
3025 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
3026 if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
3027 mr->hw_mr.remote_atomic)
3028 mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
3030 DP_DEBUG(dev, QEDR_MSG_MR, "register user mr lkey: %x\n",
3035 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
3037 qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
3043 int qedr_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
3045 struct qedr_mr *mr = get_qedr_mr(ib_mr);
3046 struct qedr_dev *dev = get_qedr_dev(ib_mr->device);
3049 rc = dev->ops->rdma_deregister_tid(dev->rdma_ctx, mr->hw_mr.itid);
3053 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
3055 if (mr->type != QEDR_MR_DMA)
3056 free_mr_info(dev, &mr->info);
3058 /* it could be user registered memory. */
3059 ib_umem_release(mr->umem);
3066 static struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd,
3067 int max_page_list_len)
3069 struct qedr_pd *pd = get_qedr_pd(ibpd);
3070 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
3074 DP_DEBUG(dev, QEDR_MSG_MR,
3075 "qedr_alloc_frmr pd = %d max_page_list_len= %d\n", pd->pd_id,
3078 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3083 mr->type = QEDR_MR_FRMR;
3085 rc = init_mr_info(dev, &mr->info, max_page_list_len, 1);
3089 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
3092 DP_ERR(dev, "Out of MR resources\n");
3094 DP_ERR(dev, "roce alloc tid returned error %d\n", rc);
3099 /* Index only, 18 bit long, lkey = itid << 8 | key */
3100 mr->hw_mr.tid_type = QED_RDMA_TID_FMR;
3102 mr->hw_mr.pd = pd->pd_id;
3103 mr->hw_mr.local_read = 1;
3104 mr->hw_mr.local_write = 0;
3105 mr->hw_mr.remote_read = 0;
3106 mr->hw_mr.remote_write = 0;
3107 mr->hw_mr.remote_atomic = 0;
3108 mr->hw_mr.mw_bind = false;
3109 mr->hw_mr.pbl_ptr = 0;
3110 mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
3111 mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
3112 mr->hw_mr.length = 0;
3113 mr->hw_mr.vaddr = 0;
3114 mr->hw_mr.phy_mr = true;
3115 mr->hw_mr.dma_mr = false;
3117 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
3119 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
3123 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
3124 mr->ibmr.rkey = mr->ibmr.lkey;
3126 DP_DEBUG(dev, QEDR_MSG_MR, "alloc frmr: %x\n", mr->ibmr.lkey);
3130 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
3132 qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
3138 struct ib_mr *qedr_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
3143 if (mr_type != IB_MR_TYPE_MEM_REG)
3144 return ERR_PTR(-EINVAL);
3146 mr = __qedr_alloc_mr(ibpd, max_num_sg);
3149 return ERR_PTR(-EINVAL);
3154 static int qedr_set_page(struct ib_mr *ibmr, u64 addr)
3156 struct qedr_mr *mr = get_qedr_mr(ibmr);
3157 struct qedr_pbl *pbl_table;
3158 struct regpair *pbe;
3161 if (unlikely(mr->npages == mr->info.pbl_info.num_pbes)) {
3162 DP_ERR(mr->dev, "qedr_set_page fails when %d\n", mr->npages);
3166 DP_DEBUG(mr->dev, QEDR_MSG_MR, "qedr_set_page pages[%d] = 0x%llx\n",
3169 pbes_in_page = mr->info.pbl_info.pbl_size / sizeof(u64);
3170 pbl_table = mr->info.pbl_table + (mr->npages / pbes_in_page);
3171 pbe = (struct regpair *)pbl_table->va;
3172 pbe += mr->npages % pbes_in_page;
3173 pbe->lo = cpu_to_le32((u32)addr);
3174 pbe->hi = cpu_to_le32((u32)upper_32_bits(addr));
3181 static void handle_completed_mrs(struct qedr_dev *dev, struct mr_info *info)
3183 int work = info->completed - info->completed_handled - 1;
3185 DP_DEBUG(dev, QEDR_MSG_MR, "Special FMR work = %d\n", work);
3186 while (work-- > 0 && !list_empty(&info->inuse_pbl_list)) {
3187 struct qedr_pbl *pbl;
3189 /* Free all the page list that are possible to be freed
3190 * (all the ones that were invalidated), under the assumption
3191 * that if an FMR was completed successfully that means that
3192 * if there was an invalidate operation before it also ended
3194 pbl = list_first_entry(&info->inuse_pbl_list,
3195 struct qedr_pbl, list_entry);
3196 list_move_tail(&pbl->list_entry, &info->free_pbl_list);
3197 info->completed_handled++;
3201 int qedr_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
3202 int sg_nents, unsigned int *sg_offset)
3204 struct qedr_mr *mr = get_qedr_mr(ibmr);
3208 handle_completed_mrs(mr->dev, &mr->info);
3209 return ib_sg_to_pages(ibmr, sg, sg_nents, NULL, qedr_set_page);
3212 struct ib_mr *qedr_get_dma_mr(struct ib_pd *ibpd, int acc)
3214 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
3215 struct qedr_pd *pd = get_qedr_pd(ibpd);
3219 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3221 return ERR_PTR(-ENOMEM);
3223 mr->type = QEDR_MR_DMA;
3225 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
3228 DP_ERR(dev, "Out of MR resources\n");
3230 DP_ERR(dev, "roce alloc tid returned error %d\n", rc);
3235 /* index only, 18 bit long, lkey = itid << 8 | key */
3236 mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
3237 mr->hw_mr.pd = pd->pd_id;
3238 mr->hw_mr.local_read = 1;
3239 mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
3240 mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
3241 mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
3242 mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
3243 mr->hw_mr.dma_mr = true;
3245 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
3247 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
3251 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
3252 if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
3253 mr->hw_mr.remote_atomic)
3254 mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
3256 DP_DEBUG(dev, QEDR_MSG_MR, "get dma mr: lkey = %x\n", mr->ibmr.lkey);
3260 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
3266 static inline int qedr_wq_is_full(struct qedr_qp_hwq_info *wq)
3268 return (((wq->prod + 1) % wq->max_wr) == wq->cons);
3271 static int sge_data_len(struct ib_sge *sg_list, int num_sge)
3275 for (i = 0; i < num_sge; i++)
3276 len += sg_list[i].length;
3281 static void swap_wqe_data64(u64 *p)
3285 for (i = 0; i < QEDR_SQE_ELEMENT_SIZE / sizeof(u64); i++, p++)
3286 *p = cpu_to_be64(cpu_to_le64(*p));
3289 static u32 qedr_prepare_sq_inline_data(struct qedr_dev *dev,
3290 struct qedr_qp *qp, u8 *wqe_size,
3291 const struct ib_send_wr *wr,
3292 const struct ib_send_wr **bad_wr,
3295 u32 data_size = sge_data_len(wr->sg_list, wr->num_sge);
3296 char *seg_prt, *wqe;
3299 if (data_size > ROCE_REQ_MAX_INLINE_DATA_SIZE) {
3300 DP_ERR(dev, "Too much inline data in WR: %d\n", data_size);
3314 /* Copy data inline */
3315 for (i = 0; i < wr->num_sge; i++) {
3316 u32 len = wr->sg_list[i].length;
3317 void *src = (void *)(uintptr_t)wr->sg_list[i].addr;
3322 /* New segment required */
3324 wqe = (char *)qed_chain_produce(&qp->sq.pbl);
3326 seg_siz = sizeof(struct rdma_sq_common_wqe);
3330 /* Calculate currently allowed length */
3331 cur = min_t(u32, len, seg_siz);
3332 memcpy(seg_prt, src, cur);
3334 /* Update segment variables */
3338 /* Update sge variables */
3342 /* Swap fully-completed segments */
3344 swap_wqe_data64((u64 *)wqe);
3348 /* swap last not completed segment */
3350 swap_wqe_data64((u64 *)wqe);
3355 #define RQ_SGE_SET(sge, vaddr, vlength, vflags) \
3357 DMA_REGPAIR_LE(sge->addr, vaddr); \
3358 (sge)->length = cpu_to_le32(vlength); \
3359 (sge)->flags = cpu_to_le32(vflags); \
3362 #define SRQ_HDR_SET(hdr, vwr_id, num_sge) \
3364 DMA_REGPAIR_LE(hdr->wr_id, vwr_id); \
3365 (hdr)->num_sges = num_sge; \
3368 #define SRQ_SGE_SET(sge, vaddr, vlength, vlkey) \
3370 DMA_REGPAIR_LE(sge->addr, vaddr); \
3371 (sge)->length = cpu_to_le32(vlength); \
3372 (sge)->l_key = cpu_to_le32(vlkey); \
3375 static u32 qedr_prepare_sq_sges(struct qedr_qp *qp, u8 *wqe_size,
3376 const struct ib_send_wr *wr)
3381 for (i = 0; i < wr->num_sge; i++) {
3382 struct rdma_sq_sge *sge = qed_chain_produce(&qp->sq.pbl);
3384 DMA_REGPAIR_LE(sge->addr, wr->sg_list[i].addr);
3385 sge->l_key = cpu_to_le32(wr->sg_list[i].lkey);
3386 sge->length = cpu_to_le32(wr->sg_list[i].length);
3387 data_size += wr->sg_list[i].length;
3391 *wqe_size += wr->num_sge;
3396 static u32 qedr_prepare_sq_rdma_data(struct qedr_dev *dev,
3398 struct rdma_sq_rdma_wqe_1st *rwqe,
3399 struct rdma_sq_rdma_wqe_2nd *rwqe2,
3400 const struct ib_send_wr *wr,
3401 const struct ib_send_wr **bad_wr)
3403 rwqe2->r_key = cpu_to_le32(rdma_wr(wr)->rkey);
3404 DMA_REGPAIR_LE(rwqe2->remote_va, rdma_wr(wr)->remote_addr);
3406 if (wr->send_flags & IB_SEND_INLINE &&
3407 (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
3408 wr->opcode == IB_WR_RDMA_WRITE)) {
3411 SET_FIELD2(flags, RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG, 1);
3412 return qedr_prepare_sq_inline_data(dev, qp, &rwqe->wqe_size, wr,
3413 bad_wr, &rwqe->flags, flags);
3416 return qedr_prepare_sq_sges(qp, &rwqe->wqe_size, wr);
3419 static u32 qedr_prepare_sq_send_data(struct qedr_dev *dev,
3421 struct rdma_sq_send_wqe_1st *swqe,
3422 struct rdma_sq_send_wqe_2st *swqe2,
3423 const struct ib_send_wr *wr,
3424 const struct ib_send_wr **bad_wr)
3426 memset(swqe2, 0, sizeof(*swqe2));
3427 if (wr->send_flags & IB_SEND_INLINE) {
3430 SET_FIELD2(flags, RDMA_SQ_SEND_WQE_INLINE_FLG, 1);
3431 return qedr_prepare_sq_inline_data(dev, qp, &swqe->wqe_size, wr,
3432 bad_wr, &swqe->flags, flags);
3435 return qedr_prepare_sq_sges(qp, &swqe->wqe_size, wr);
3438 static int qedr_prepare_reg(struct qedr_qp *qp,
3439 struct rdma_sq_fmr_wqe_1st *fwqe1,
3440 const struct ib_reg_wr *wr)
3442 struct qedr_mr *mr = get_qedr_mr(wr->mr);
3443 struct rdma_sq_fmr_wqe_2nd *fwqe2;
3445 fwqe2 = (struct rdma_sq_fmr_wqe_2nd *)qed_chain_produce(&qp->sq.pbl);
3446 fwqe1->addr.hi = upper_32_bits(mr->ibmr.iova);
3447 fwqe1->addr.lo = lower_32_bits(mr->ibmr.iova);
3448 fwqe1->l_key = wr->key;
3450 fwqe2->access_ctrl = 0;
3452 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_READ,
3453 !!(wr->access & IB_ACCESS_REMOTE_READ));
3454 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE,
3455 !!(wr->access & IB_ACCESS_REMOTE_WRITE));
3456 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC,
3457 !!(wr->access & IB_ACCESS_REMOTE_ATOMIC));
3458 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_READ, 1);
3459 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE,
3460 !!(wr->access & IB_ACCESS_LOCAL_WRITE));
3461 fwqe2->fmr_ctrl = 0;
3463 SET_FIELD2(fwqe2->fmr_ctrl, RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG,
3464 ilog2(mr->ibmr.page_size) - 12);
3466 fwqe2->length_hi = 0;
3467 fwqe2->length_lo = mr->ibmr.length;
3468 fwqe2->pbl_addr.hi = upper_32_bits(mr->info.pbl_table->pa);
3469 fwqe2->pbl_addr.lo = lower_32_bits(mr->info.pbl_table->pa);
3471 qp->wqe_wr_id[qp->sq.prod].mr = mr;
3476 static enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
3479 case IB_WR_RDMA_WRITE:
3480 case IB_WR_RDMA_WRITE_WITH_IMM:
3481 return IB_WC_RDMA_WRITE;
3482 case IB_WR_SEND_WITH_IMM:
3484 case IB_WR_SEND_WITH_INV:
3486 case IB_WR_RDMA_READ:
3487 case IB_WR_RDMA_READ_WITH_INV:
3488 return IB_WC_RDMA_READ;
3489 case IB_WR_ATOMIC_CMP_AND_SWP:
3490 return IB_WC_COMP_SWAP;
3491 case IB_WR_ATOMIC_FETCH_AND_ADD:
3492 return IB_WC_FETCH_ADD;
3494 return IB_WC_REG_MR;
3495 case IB_WR_LOCAL_INV:
3496 return IB_WC_LOCAL_INV;
3502 static inline bool qedr_can_post_send(struct qedr_qp *qp,
3503 const struct ib_send_wr *wr)
3505 int wq_is_full, err_wr, pbl_is_full;
3506 struct qedr_dev *dev = qp->dev;
3508 /* prevent SQ overflow and/or processing of a bad WR */
3509 err_wr = wr->num_sge > qp->sq.max_sges;
3510 wq_is_full = qedr_wq_is_full(&qp->sq);
3511 pbl_is_full = qed_chain_get_elem_left_u32(&qp->sq.pbl) <
3512 QEDR_MAX_SQE_ELEMENTS_PER_SQE;
3513 if (wq_is_full || err_wr || pbl_is_full) {
3514 if (wq_is_full && !(qp->err_bitmap & QEDR_QP_ERR_SQ_FULL)) {
3516 "error: WQ is full. Post send on QP %p failed (this error appears only once)\n",
3518 qp->err_bitmap |= QEDR_QP_ERR_SQ_FULL;
3521 if (err_wr && !(qp->err_bitmap & QEDR_QP_ERR_BAD_SR)) {
3523 "error: WR is bad. Post send on QP %p failed (this error appears only once)\n",
3525 qp->err_bitmap |= QEDR_QP_ERR_BAD_SR;
3529 !(qp->err_bitmap & QEDR_QP_ERR_SQ_PBL_FULL)) {
3531 "error: WQ PBL is full. Post send on QP %p failed (this error appears only once)\n",
3533 qp->err_bitmap |= QEDR_QP_ERR_SQ_PBL_FULL;
3540 static int __qedr_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
3541 const struct ib_send_wr **bad_wr)
3543 struct qedr_dev *dev = get_qedr_dev(ibqp->device);
3544 struct qedr_qp *qp = get_qedr_qp(ibqp);
3545 struct rdma_sq_atomic_wqe_1st *awqe1;
3546 struct rdma_sq_atomic_wqe_2nd *awqe2;
3547 struct rdma_sq_atomic_wqe_3rd *awqe3;
3548 struct rdma_sq_send_wqe_2st *swqe2;
3549 struct rdma_sq_local_inv_wqe *iwqe;
3550 struct rdma_sq_rdma_wqe_2nd *rwqe2;
3551 struct rdma_sq_send_wqe_1st *swqe;
3552 struct rdma_sq_rdma_wqe_1st *rwqe;
3553 struct rdma_sq_fmr_wqe_1st *fwqe1;
3554 struct rdma_sq_common_wqe *wqe;
3559 if (!qedr_can_post_send(qp, wr)) {
3564 wqe = qed_chain_produce(&qp->sq.pbl);
3565 qp->wqe_wr_id[qp->sq.prod].signaled =
3566 !!(wr->send_flags & IB_SEND_SIGNALED) || qp->signaled;
3569 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_SE_FLG,
3570 !!(wr->send_flags & IB_SEND_SOLICITED));
3571 comp = (!!(wr->send_flags & IB_SEND_SIGNALED)) || qp->signaled;
3572 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_COMP_FLG, comp);
3573 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_RD_FENCE_FLG,
3574 !!(wr->send_flags & IB_SEND_FENCE));
3575 wqe->prev_wqe_size = qp->prev_wqe_size;
3577 qp->wqe_wr_id[qp->sq.prod].opcode = qedr_ib_to_wc_opcode(wr->opcode);
3579 switch (wr->opcode) {
3580 case IB_WR_SEND_WITH_IMM:
3581 if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) {
3586 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM;
3587 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
3589 swqe2 = qed_chain_produce(&qp->sq.pbl);
3591 swqe->inv_key_or_imm_data = cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
3592 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
3594 swqe->length = cpu_to_le32(length);
3595 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
3596 qp->prev_wqe_size = swqe->wqe_size;
3597 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
3600 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND;
3601 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
3604 swqe2 = qed_chain_produce(&qp->sq.pbl);
3605 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
3607 swqe->length = cpu_to_le32(length);
3608 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
3609 qp->prev_wqe_size = swqe->wqe_size;
3610 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
3612 case IB_WR_SEND_WITH_INV:
3613 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_INVALIDATE;
3614 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
3615 swqe2 = qed_chain_produce(&qp->sq.pbl);
3617 swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.invalidate_rkey);
3618 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
3620 swqe->length = cpu_to_le32(length);
3621 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
3622 qp->prev_wqe_size = swqe->wqe_size;
3623 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
3626 case IB_WR_RDMA_WRITE_WITH_IMM:
3627 if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) {
3632 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM;
3633 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
3636 rwqe->imm_data = htonl(cpu_to_le32(wr->ex.imm_data));
3637 rwqe2 = qed_chain_produce(&qp->sq.pbl);
3638 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
3640 rwqe->length = cpu_to_le32(length);
3641 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
3642 qp->prev_wqe_size = rwqe->wqe_size;
3643 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
3645 case IB_WR_RDMA_WRITE:
3646 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR;
3647 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
3650 rwqe2 = qed_chain_produce(&qp->sq.pbl);
3651 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
3653 rwqe->length = cpu_to_le32(length);
3654 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
3655 qp->prev_wqe_size = rwqe->wqe_size;
3656 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
3658 case IB_WR_RDMA_READ_WITH_INV:
3659 SET_FIELD2(wqe->flags, RDMA_SQ_RDMA_WQE_1ST_READ_INV_FLG, 1);
3660 fallthrough; /* same is identical to RDMA READ */
3662 case IB_WR_RDMA_READ:
3663 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_RD;
3664 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
3667 rwqe2 = qed_chain_produce(&qp->sq.pbl);
3668 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
3670 rwqe->length = cpu_to_le32(length);
3671 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
3672 qp->prev_wqe_size = rwqe->wqe_size;
3673 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
3676 case IB_WR_ATOMIC_CMP_AND_SWP:
3677 case IB_WR_ATOMIC_FETCH_AND_ADD:
3678 awqe1 = (struct rdma_sq_atomic_wqe_1st *)wqe;
3679 awqe1->wqe_size = 4;
3681 awqe2 = qed_chain_produce(&qp->sq.pbl);
3682 DMA_REGPAIR_LE(awqe2->remote_va, atomic_wr(wr)->remote_addr);
3683 awqe2->r_key = cpu_to_le32(atomic_wr(wr)->rkey);
3685 awqe3 = qed_chain_produce(&qp->sq.pbl);
3687 if (wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
3688 wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_ADD;
3689 DMA_REGPAIR_LE(awqe3->swap_data,
3690 atomic_wr(wr)->compare_add);
3692 wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_CMP_AND_SWAP;
3693 DMA_REGPAIR_LE(awqe3->swap_data,
3694 atomic_wr(wr)->swap);
3695 DMA_REGPAIR_LE(awqe3->cmp_data,
3696 atomic_wr(wr)->compare_add);
3699 qedr_prepare_sq_sges(qp, NULL, wr);
3701 qp->wqe_wr_id[qp->sq.prod].wqe_size = awqe1->wqe_size;
3702 qp->prev_wqe_size = awqe1->wqe_size;
3705 case IB_WR_LOCAL_INV:
3706 iwqe = (struct rdma_sq_local_inv_wqe *)wqe;
3709 iwqe->req_type = RDMA_SQ_REQ_TYPE_LOCAL_INVALIDATE;
3710 iwqe->inv_l_key = wr->ex.invalidate_rkey;
3711 qp->wqe_wr_id[qp->sq.prod].wqe_size = iwqe->wqe_size;
3712 qp->prev_wqe_size = iwqe->wqe_size;
3715 DP_DEBUG(dev, QEDR_MSG_CQ, "REG_MR\n");
3716 wqe->req_type = RDMA_SQ_REQ_TYPE_FAST_MR;
3717 fwqe1 = (struct rdma_sq_fmr_wqe_1st *)wqe;
3718 fwqe1->wqe_size = 2;
3720 rc = qedr_prepare_reg(qp, fwqe1, reg_wr(wr));
3722 DP_ERR(dev, "IB_REG_MR failed rc=%d\n", rc);
3727 qp->wqe_wr_id[qp->sq.prod].wqe_size = fwqe1->wqe_size;
3728 qp->prev_wqe_size = fwqe1->wqe_size;
3731 DP_ERR(dev, "invalid opcode 0x%x!\n", wr->opcode);
3740 /* Restore prod to its position before
3741 * this WR was processed
3743 value = le16_to_cpu(qp->sq.db_data.data.value);
3744 qed_chain_set_prod(&qp->sq.pbl, value, wqe);
3746 /* Restore prev_wqe_size */
3747 qp->prev_wqe_size = wqe->prev_wqe_size;
3749 DP_ERR(dev, "POST SEND FAILED\n");
3755 int qedr_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
3756 const struct ib_send_wr **bad_wr)
3758 struct qedr_dev *dev = get_qedr_dev(ibqp->device);
3759 struct qedr_qp *qp = get_qedr_qp(ibqp);
3760 unsigned long flags;
3765 if (qp->qp_type == IB_QPT_GSI)
3766 return qedr_gsi_post_send(ibqp, wr, bad_wr);
3768 spin_lock_irqsave(&qp->q_lock, flags);
3770 if (rdma_protocol_roce(&dev->ibdev, 1)) {
3771 if ((qp->state != QED_ROCE_QP_STATE_RTS) &&
3772 (qp->state != QED_ROCE_QP_STATE_ERR) &&
3773 (qp->state != QED_ROCE_QP_STATE_SQD)) {
3774 spin_unlock_irqrestore(&qp->q_lock, flags);
3776 DP_DEBUG(dev, QEDR_MSG_CQ,
3777 "QP in wrong state! QP icid=0x%x state %d\n",
3778 qp->icid, qp->state);
3784 rc = __qedr_post_send(ibqp, wr, bad_wr);
3788 qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
3790 qedr_inc_sw_prod(&qp->sq);
3792 qp->sq.db_data.data.value++;
3798 * If there was a failure in the first WR then it will be triggered in
3799 * vane. However this is not harmful (as long as the producer value is
3800 * unchanged). For performance reasons we avoid checking for this
3801 * redundant doorbell.
3803 * qp->wqe_wr_id is accessed during qedr_poll_cq, as
3804 * soon as we give the doorbell, we could get a completion
3805 * for this wr, therefore we need to make sure that the
3806 * memory is updated before giving the doorbell.
3807 * During qedr_poll_cq, rmb is called before accessing the
3808 * cqe. This covers for the smp_rmb as well.
3811 writel(qp->sq.db_data.raw, qp->sq.db);
3813 spin_unlock_irqrestore(&qp->q_lock, flags);
3818 static u32 qedr_srq_elem_left(struct qedr_srq_hwq_info *hw_srq)
3822 /* Calculate number of elements used based on producer
3823 * count and consumer count and subtract it from max
3824 * work request supported so that we get elements left.
3826 used = hw_srq->wr_prod_cnt - (u32)atomic_read(&hw_srq->wr_cons_cnt);
3828 return hw_srq->max_wr - used;
3831 int qedr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
3832 const struct ib_recv_wr **bad_wr)
3834 struct qedr_srq *srq = get_qedr_srq(ibsrq);
3835 struct qedr_srq_hwq_info *hw_srq;
3836 struct qedr_dev *dev = srq->dev;
3837 struct qed_chain *pbl;
3838 unsigned long flags;
3842 spin_lock_irqsave(&srq->lock, flags);
3844 hw_srq = &srq->hw_srq;
3845 pbl = &srq->hw_srq.pbl;
3847 struct rdma_srq_wqe_header *hdr;
3850 if (!qedr_srq_elem_left(hw_srq) ||
3851 wr->num_sge > srq->hw_srq.max_sges) {
3852 DP_ERR(dev, "Can't post WR (%d,%d) || (%d > %d)\n",
3853 hw_srq->wr_prod_cnt,
3854 atomic_read(&hw_srq->wr_cons_cnt),
3855 wr->num_sge, srq->hw_srq.max_sges);
3861 hdr = qed_chain_produce(pbl);
3862 num_sge = wr->num_sge;
3863 /* Set number of sge and work request id in header */
3864 SRQ_HDR_SET(hdr, wr->wr_id, num_sge);
3866 srq->hw_srq.wr_prod_cnt++;
3870 DP_DEBUG(dev, QEDR_MSG_SRQ,
3871 "SRQ WR: SGEs: %d with wr_id[%d] = %llx\n",
3872 wr->num_sge, hw_srq->wqe_prod, wr->wr_id);
3874 for (i = 0; i < wr->num_sge; i++) {
3875 struct rdma_srq_sge *srq_sge = qed_chain_produce(pbl);
3877 /* Set SGE length, lkey and address */
3878 SRQ_SGE_SET(srq_sge, wr->sg_list[i].addr,
3879 wr->sg_list[i].length, wr->sg_list[i].lkey);
3881 DP_DEBUG(dev, QEDR_MSG_SRQ,
3882 "[%d]: len %d key %x addr %x:%x\n",
3883 i, srq_sge->length, srq_sge->l_key,
3884 srq_sge->addr.hi, srq_sge->addr.lo);
3888 /* Update WQE and SGE information before
3889 * updating producer.
3893 /* SRQ producer is 8 bytes. Need to update SGE producer index
3894 * in first 4 bytes and need to update WQE producer in
3897 srq->hw_srq.virt_prod_pair_addr->sge_prod = cpu_to_le32(hw_srq->sge_prod);
3898 /* Make sure sge producer is updated first */
3900 srq->hw_srq.virt_prod_pair_addr->wqe_prod = cpu_to_le32(hw_srq->wqe_prod);
3905 DP_DEBUG(dev, QEDR_MSG_SRQ, "POST: Elements in S-RQ: %d\n",
3906 qed_chain_get_elem_left(pbl));
3907 spin_unlock_irqrestore(&srq->lock, flags);
3912 int qedr_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
3913 const struct ib_recv_wr **bad_wr)
3915 struct qedr_qp *qp = get_qedr_qp(ibqp);
3916 struct qedr_dev *dev = qp->dev;
3917 unsigned long flags;
3920 if (qp->qp_type == IB_QPT_GSI)
3921 return qedr_gsi_post_recv(ibqp, wr, bad_wr);
3923 spin_lock_irqsave(&qp->q_lock, flags);
3925 if (qp->state == QED_ROCE_QP_STATE_RESET) {
3926 spin_unlock_irqrestore(&qp->q_lock, flags);
3934 if (qed_chain_get_elem_left_u32(&qp->rq.pbl) <
3935 QEDR_MAX_RQE_ELEMENTS_PER_RQE ||
3936 wr->num_sge > qp->rq.max_sges) {
3937 DP_ERR(dev, "Can't post WR (%d < %d) || (%d > %d)\n",
3938 qed_chain_get_elem_left_u32(&qp->rq.pbl),
3939 QEDR_MAX_RQE_ELEMENTS_PER_RQE, wr->num_sge,
3945 for (i = 0; i < wr->num_sge; i++) {
3947 struct rdma_rq_sge *rqe =
3948 qed_chain_produce(&qp->rq.pbl);
3950 /* First one must include the number
3951 * of SGE in the list
3954 SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES,
3957 SET_FIELD(flags, RDMA_RQ_SGE_L_KEY_LO,
3958 wr->sg_list[i].lkey);
3960 RQ_SGE_SET(rqe, wr->sg_list[i].addr,
3961 wr->sg_list[i].length, flags);
3964 /* Special case of no sges. FW requires between 1-4 sges...
3965 * in this case we need to post 1 sge with length zero. this is
3966 * because rdma write with immediate consumes an RQ.
3970 struct rdma_rq_sge *rqe =
3971 qed_chain_produce(&qp->rq.pbl);
3973 /* First one must include the number
3974 * of SGE in the list
3976 SET_FIELD(flags, RDMA_RQ_SGE_L_KEY_LO, 0);
3977 SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES, 1);
3979 RQ_SGE_SET(rqe, 0, 0, flags);
3983 qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id;
3984 qp->rqe_wr_id[qp->rq.prod].wqe_size = i;
3986 qedr_inc_sw_prod(&qp->rq);
3988 /* qp->rqe_wr_id is accessed during qedr_poll_cq, as
3989 * soon as we give the doorbell, we could get a completion
3990 * for this wr, therefore we need to make sure that the
3991 * memory is update before giving the doorbell.
3992 * During qedr_poll_cq, rmb is called before accessing the
3993 * cqe. This covers for the smp_rmb as well.
3997 qp->rq.db_data.data.value++;
3999 writel(qp->rq.db_data.raw, qp->rq.db);
4001 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
4002 writel(qp->rq.iwarp_db2_data.raw, qp->rq.iwarp_db2);
4008 spin_unlock_irqrestore(&qp->q_lock, flags);
4013 static int is_valid_cqe(struct qedr_cq *cq, union rdma_cqe *cqe)
4015 struct rdma_cqe_requester *resp_cqe = &cqe->req;
4017 return (resp_cqe->flags & RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK) ==
4021 static struct qedr_qp *cqe_get_qp(union rdma_cqe *cqe)
4023 struct rdma_cqe_requester *resp_cqe = &cqe->req;
4026 qp = (struct qedr_qp *)(uintptr_t)HILO_GEN(resp_cqe->qp_handle.hi,
4027 resp_cqe->qp_handle.lo,
4032 static enum rdma_cqe_type cqe_get_type(union rdma_cqe *cqe)
4034 struct rdma_cqe_requester *resp_cqe = &cqe->req;
4036 return GET_FIELD(resp_cqe->flags, RDMA_CQE_REQUESTER_TYPE);
4039 /* Return latest CQE (needs processing) */
4040 static union rdma_cqe *get_cqe(struct qedr_cq *cq)
4042 return cq->latest_cqe;
4045 /* In fmr we need to increase the number of fmr completed counter for the fmr
4046 * algorithm determining whether we can free a pbl or not.
4047 * we need to perform this whether the work request was signaled or not. for
4048 * this purpose we call this function from the condition that checks if a wr
4049 * should be skipped, to make sure we don't miss it ( possibly this fmr
4050 * operation was not signalted)
4052 static inline void qedr_chk_if_fmr(struct qedr_qp *qp)
4054 if (qp->wqe_wr_id[qp->sq.cons].opcode == IB_WC_REG_MR)
4055 qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
4058 static int process_req(struct qedr_dev *dev, struct qedr_qp *qp,
4059 struct qedr_cq *cq, int num_entries,
4060 struct ib_wc *wc, u16 hw_cons, enum ib_wc_status status,
4065 while (num_entries && qp->sq.wqe_cons != hw_cons) {
4066 if (!qp->wqe_wr_id[qp->sq.cons].signaled && !force) {
4067 qedr_chk_if_fmr(qp);
4073 wc->status = status;
4076 wc->src_qp = qp->id;
4079 wc->wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id;
4080 wc->opcode = qp->wqe_wr_id[qp->sq.cons].opcode;
4082 switch (wc->opcode) {
4083 case IB_WC_RDMA_WRITE:
4084 wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
4086 case IB_WC_COMP_SWAP:
4087 case IB_WC_FETCH_ADD:
4091 qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
4093 case IB_WC_RDMA_READ:
4095 wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
4105 while (qp->wqe_wr_id[qp->sq.cons].wqe_size--)
4106 qed_chain_consume(&qp->sq.pbl);
4107 qedr_inc_sw_cons(&qp->sq);
4113 static int qedr_poll_cq_req(struct qedr_dev *dev,
4114 struct qedr_qp *qp, struct qedr_cq *cq,
4115 int num_entries, struct ib_wc *wc,
4116 struct rdma_cqe_requester *req)
4120 switch (req->status) {
4121 case RDMA_CQE_REQ_STS_OK:
4122 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
4125 case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR:
4126 if (qp->state != QED_ROCE_QP_STATE_ERR)
4127 DP_DEBUG(dev, QEDR_MSG_CQ,
4128 "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4129 cq->icid, qp->icid);
4130 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
4131 IB_WC_WR_FLUSH_ERR, 1);
4134 /* process all WQE before the cosumer */
4135 qp->state = QED_ROCE_QP_STATE_ERR;
4136 cnt = process_req(dev, qp, cq, num_entries, wc,
4137 req->sq_cons - 1, IB_WC_SUCCESS, 0);
4139 /* if we have extra WC fill it with actual error info */
4140 if (cnt < num_entries) {
4141 enum ib_wc_status wc_status;
4143 switch (req->status) {
4144 case RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR:
4146 "Error: POLL CQ with RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4147 cq->icid, qp->icid);
4148 wc_status = IB_WC_BAD_RESP_ERR;
4150 case RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR:
4152 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4153 cq->icid, qp->icid);
4154 wc_status = IB_WC_LOC_LEN_ERR;
4156 case RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR:
4158 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4159 cq->icid, qp->icid);
4160 wc_status = IB_WC_LOC_QP_OP_ERR;
4162 case RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR:
4164 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4165 cq->icid, qp->icid);
4166 wc_status = IB_WC_LOC_PROT_ERR;
4168 case RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR:
4170 "Error: POLL CQ with RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4171 cq->icid, qp->icid);
4172 wc_status = IB_WC_MW_BIND_ERR;
4174 case RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR:
4176 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4177 cq->icid, qp->icid);
4178 wc_status = IB_WC_REM_INV_REQ_ERR;
4180 case RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR:
4182 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4183 cq->icid, qp->icid);
4184 wc_status = IB_WC_REM_ACCESS_ERR;
4186 case RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR:
4188 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4189 cq->icid, qp->icid);
4190 wc_status = IB_WC_REM_OP_ERR;
4192 case RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR:
4194 "Error: POLL CQ with RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4195 cq->icid, qp->icid);
4196 wc_status = IB_WC_RNR_RETRY_EXC_ERR;
4198 case RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR:
4200 "Error: POLL CQ with ROCE_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4201 cq->icid, qp->icid);
4202 wc_status = IB_WC_RETRY_EXC_ERR;
4206 "Error: POLL CQ with IB_WC_GENERAL_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4207 cq->icid, qp->icid);
4208 wc_status = IB_WC_GENERAL_ERR;
4210 cnt += process_req(dev, qp, cq, 1, wc, req->sq_cons,
4218 static inline int qedr_cqe_resp_status_to_ib(u8 status)
4221 case RDMA_CQE_RESP_STS_LOCAL_ACCESS_ERR:
4222 return IB_WC_LOC_ACCESS_ERR;
4223 case RDMA_CQE_RESP_STS_LOCAL_LENGTH_ERR:
4224 return IB_WC_LOC_LEN_ERR;
4225 case RDMA_CQE_RESP_STS_LOCAL_QP_OPERATION_ERR:
4226 return IB_WC_LOC_QP_OP_ERR;
4227 case RDMA_CQE_RESP_STS_LOCAL_PROTECTION_ERR:
4228 return IB_WC_LOC_PROT_ERR;
4229 case RDMA_CQE_RESP_STS_MEMORY_MGT_OPERATION_ERR:
4230 return IB_WC_MW_BIND_ERR;
4231 case RDMA_CQE_RESP_STS_REMOTE_INVALID_REQUEST_ERR:
4232 return IB_WC_REM_INV_RD_REQ_ERR;
4233 case RDMA_CQE_RESP_STS_OK:
4234 return IB_WC_SUCCESS;
4236 return IB_WC_GENERAL_ERR;
4240 static inline int qedr_set_ok_cqe_resp_wc(struct rdma_cqe_responder *resp,
4243 wc->status = IB_WC_SUCCESS;
4244 wc->byte_len = le32_to_cpu(resp->length);
4246 if (resp->flags & QEDR_RESP_IMM) {
4247 wc->ex.imm_data = cpu_to_be32(le32_to_cpu(resp->imm_data_or_inv_r_Key));
4248 wc->wc_flags |= IB_WC_WITH_IMM;
4250 if (resp->flags & QEDR_RESP_RDMA)
4251 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
4253 if (resp->flags & QEDR_RESP_INV)
4256 } else if (resp->flags & QEDR_RESP_INV) {
4257 wc->ex.imm_data = le32_to_cpu(resp->imm_data_or_inv_r_Key);
4258 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
4260 if (resp->flags & QEDR_RESP_RDMA)
4263 } else if (resp->flags & QEDR_RESP_RDMA) {
4270 static void __process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
4271 struct qedr_cq *cq, struct ib_wc *wc,
4272 struct rdma_cqe_responder *resp, u64 wr_id)
4274 /* Must fill fields before qedr_set_ok_cqe_resp_wc() */
4275 wc->opcode = IB_WC_RECV;
4278 if (likely(resp->status == RDMA_CQE_RESP_STS_OK)) {
4279 if (qedr_set_ok_cqe_resp_wc(resp, wc))
4281 "CQ %p (icid=%d) has invalid CQE responder flags=0x%x\n",
4282 cq, cq->icid, resp->flags);
4285 wc->status = qedr_cqe_resp_status_to_ib(resp->status);
4286 if (wc->status == IB_WC_GENERAL_ERR)
4288 "CQ %p (icid=%d) contains an invalid CQE status %d\n",
4289 cq, cq->icid, resp->status);
4292 /* Fill the rest of the WC */
4294 wc->src_qp = qp->id;
4299 static int process_resp_one_srq(struct qedr_dev *dev, struct qedr_qp *qp,
4300 struct qedr_cq *cq, struct ib_wc *wc,
4301 struct rdma_cqe_responder *resp)
4303 struct qedr_srq *srq = qp->srq;
4306 wr_id = HILO_GEN(le32_to_cpu(resp->srq_wr_id.hi),
4307 le32_to_cpu(resp->srq_wr_id.lo), u64);
4309 if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
4310 wc->status = IB_WC_WR_FLUSH_ERR;
4314 wc->src_qp = qp->id;
4318 __process_resp_one(dev, qp, cq, wc, resp, wr_id);
4320 atomic_inc(&srq->hw_srq.wr_cons_cnt);
4324 static int process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
4325 struct qedr_cq *cq, struct ib_wc *wc,
4326 struct rdma_cqe_responder *resp)
4328 u64 wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
4330 __process_resp_one(dev, qp, cq, wc, resp, wr_id);
4332 while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
4333 qed_chain_consume(&qp->rq.pbl);
4334 qedr_inc_sw_cons(&qp->rq);
4339 static int process_resp_flush(struct qedr_qp *qp, struct qedr_cq *cq,
4340 int num_entries, struct ib_wc *wc, u16 hw_cons)
4344 while (num_entries && qp->rq.wqe_cons != hw_cons) {
4346 wc->status = IB_WC_WR_FLUSH_ERR;
4349 wc->src_qp = qp->id;
4351 wc->wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
4356 while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
4357 qed_chain_consume(&qp->rq.pbl);
4358 qedr_inc_sw_cons(&qp->rq);
4364 static void try_consume_resp_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
4365 struct rdma_cqe_responder *resp, int *update)
4367 if (le16_to_cpu(resp->rq_cons_or_srq_id) == qp->rq.wqe_cons) {
4373 static int qedr_poll_cq_resp_srq(struct qedr_dev *dev, struct qedr_qp *qp,
4374 struct qedr_cq *cq, int num_entries,
4376 struct rdma_cqe_responder *resp)
4380 cnt = process_resp_one_srq(dev, qp, cq, wc, resp);
4386 static int qedr_poll_cq_resp(struct qedr_dev *dev, struct qedr_qp *qp,
4387 struct qedr_cq *cq, int num_entries,
4388 struct ib_wc *wc, struct rdma_cqe_responder *resp,
4393 if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
4394 cnt = process_resp_flush(qp, cq, num_entries, wc,
4395 resp->rq_cons_or_srq_id);
4396 try_consume_resp_cqe(cq, qp, resp, update);
4398 cnt = process_resp_one(dev, qp, cq, wc, resp);
4406 static void try_consume_req_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
4407 struct rdma_cqe_requester *req, int *update)
4409 if (le16_to_cpu(req->sq_cons) == qp->sq.wqe_cons) {
4415 int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
4417 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
4418 struct qedr_cq *cq = get_qedr_cq(ibcq);
4419 union rdma_cqe *cqe;
4420 u32 old_cons, new_cons;
4421 unsigned long flags;
4425 if (cq->destroyed) {
4427 "warning: poll was invoked after destroy for cq %p (icid=%d)\n",
4432 if (cq->cq_type == QEDR_CQ_TYPE_GSI)
4433 return qedr_gsi_poll_cq(ibcq, num_entries, wc);
4435 spin_lock_irqsave(&cq->cq_lock, flags);
4436 cqe = cq->latest_cqe;
4437 old_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
4438 while (num_entries && is_valid_cqe(cq, cqe)) {
4442 /* prevent speculative reads of any field of CQE */
4445 qp = cqe_get_qp(cqe);
4447 WARN(1, "Error: CQE QP pointer is NULL. CQE=%p\n", cqe);
4453 switch (cqe_get_type(cqe)) {
4454 case RDMA_CQE_TYPE_REQUESTER:
4455 cnt = qedr_poll_cq_req(dev, qp, cq, num_entries, wc,
4457 try_consume_req_cqe(cq, qp, &cqe->req, &update);
4459 case RDMA_CQE_TYPE_RESPONDER_RQ:
4460 cnt = qedr_poll_cq_resp(dev, qp, cq, num_entries, wc,
4461 &cqe->resp, &update);
4463 case RDMA_CQE_TYPE_RESPONDER_SRQ:
4464 cnt = qedr_poll_cq_resp_srq(dev, qp, cq, num_entries,
4468 case RDMA_CQE_TYPE_INVALID:
4470 DP_ERR(dev, "Error: invalid CQE type = %d\n",
4479 new_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
4481 cq->cq_cons += new_cons - old_cons;
4484 /* doorbell notifies abount latest VALID entry,
4485 * but chain already point to the next INVALID one
4487 doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
4489 spin_unlock_irqrestore(&cq->cq_lock, flags);
4493 int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags,
4494 u8 port_num, const struct ib_wc *in_wc,
4495 const struct ib_grh *in_grh, const struct ib_mad *in,
4496 struct ib_mad *out_mad, size_t *out_mad_size,
4497 u16 *out_mad_pkey_index)
4499 return IB_MAD_RESULT_SUCCESS;