Mention branches and keyring.
[releases.git] / hw / qedr / verbs.c
1 /* QLogic qedr NIC Driver
2  * Copyright (c) 2015-2016  QLogic Corporation
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and /or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <linux/dma-mapping.h>
33 #include <linux/crc32.h>
34 #include <net/ip.h>
35 #include <net/ipv6.h>
36 #include <net/udp.h>
37 #include <linux/iommu.h>
38
39 #include <rdma/ib_verbs.h>
40 #include <rdma/ib_user_verbs.h>
41 #include <rdma/iw_cm.h>
42 #include <rdma/ib_umem.h>
43 #include <rdma/ib_addr.h>
44 #include <rdma/ib_cache.h>
45 #include <rdma/uverbs_ioctl.h>
46
47 #include <linux/qed/common_hsi.h>
48 #include "qedr_hsi_rdma.h"
49 #include <linux/qed/qed_if.h>
50 #include "qedr.h"
51 #include "verbs.h"
52 #include <rdma/qedr-abi.h>
53 #include "qedr_roce_cm.h"
54 #include "qedr_iw_cm.h"
55
56 #define QEDR_SRQ_WQE_ELEM_SIZE  sizeof(union rdma_srq_elm)
57 #define RDMA_MAX_SGE_PER_SRQ    (4)
58 #define RDMA_MAX_SRQ_WQE_SIZE   (RDMA_MAX_SGE_PER_SRQ + 1)
59
60 #define DB_ADDR_SHIFT(addr)             ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
61
62 enum {
63         QEDR_USER_MMAP_IO_WC = 0,
64         QEDR_USER_MMAP_PHYS_PAGE,
65 };
66
67 static inline int qedr_ib_copy_to_udata(struct ib_udata *udata, void *src,
68                                         size_t len)
69 {
70         size_t min_len = min_t(size_t, len, udata->outlen);
71
72         return ib_copy_to_udata(udata, src, min_len);
73 }
74
75 int qedr_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey)
76 {
77         if (index >= QEDR_ROCE_PKEY_TABLE_LEN)
78                 return -EINVAL;
79
80         *pkey = QEDR_ROCE_PKEY_DEFAULT;
81         return 0;
82 }
83
84 int qedr_iw_query_gid(struct ib_device *ibdev, u32 port,
85                       int index, union ib_gid *sgid)
86 {
87         struct qedr_dev *dev = get_qedr_dev(ibdev);
88
89         memset(sgid->raw, 0, sizeof(sgid->raw));
90         ether_addr_copy(sgid->raw, dev->ndev->dev_addr);
91
92         DP_DEBUG(dev, QEDR_MSG_INIT, "QUERY sgid[%d]=%llx:%llx\n", index,
93                  sgid->global.interface_id, sgid->global.subnet_prefix);
94
95         return 0;
96 }
97
98 int qedr_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
99 {
100         struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
101         struct qedr_device_attr *qattr = &dev->attr;
102         struct qedr_srq *srq = get_qedr_srq(ibsrq);
103
104         srq_attr->srq_limit = srq->srq_limit;
105         srq_attr->max_wr = qattr->max_srq_wr;
106         srq_attr->max_sge = qattr->max_sge;
107
108         return 0;
109 }
110
111 int qedr_query_device(struct ib_device *ibdev,
112                       struct ib_device_attr *attr, struct ib_udata *udata)
113 {
114         struct qedr_dev *dev = get_qedr_dev(ibdev);
115         struct qedr_device_attr *qattr = &dev->attr;
116
117         if (!dev->rdma_ctx) {
118                 DP_ERR(dev,
119                        "qedr_query_device called with invalid params rdma_ctx=%p\n",
120                        dev->rdma_ctx);
121                 return -EINVAL;
122         }
123
124         memset(attr, 0, sizeof(*attr));
125
126         attr->fw_ver = qattr->fw_ver;
127         attr->sys_image_guid = qattr->sys_image_guid;
128         attr->max_mr_size = qattr->max_mr_size;
129         attr->page_size_cap = qattr->page_size_caps;
130         attr->vendor_id = qattr->vendor_id;
131         attr->vendor_part_id = qattr->vendor_part_id;
132         attr->hw_ver = qattr->hw_ver;
133         attr->max_qp = qattr->max_qp;
134         attr->max_qp_wr = max_t(u32, qattr->max_sqe, qattr->max_rqe);
135         attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
136             IB_DEVICE_RC_RNR_NAK_GEN |
137             IB_DEVICE_MEM_MGT_EXTENSIONS;
138         attr->kernel_cap_flags = IBK_LOCAL_DMA_LKEY;
139
140         if (!rdma_protocol_iwarp(&dev->ibdev, 1))
141                 attr->device_cap_flags |= IB_DEVICE_XRC;
142         attr->max_send_sge = qattr->max_sge;
143         attr->max_recv_sge = qattr->max_sge;
144         attr->max_sge_rd = qattr->max_sge;
145         attr->max_cq = qattr->max_cq;
146         attr->max_cqe = qattr->max_cqe;
147         attr->max_mr = qattr->max_mr;
148         attr->max_mw = qattr->max_mw;
149         attr->max_pd = qattr->max_pd;
150         attr->atomic_cap = dev->atomic_cap;
151         attr->max_qp_init_rd_atom =
152             1 << (fls(qattr->max_qp_req_rd_atomic_resc) - 1);
153         attr->max_qp_rd_atom =
154             min(1 << (fls(qattr->max_qp_resp_rd_atomic_resc) - 1),
155                 attr->max_qp_init_rd_atom);
156
157         attr->max_srq = qattr->max_srq;
158         attr->max_srq_sge = qattr->max_srq_sge;
159         attr->max_srq_wr = qattr->max_srq_wr;
160
161         attr->local_ca_ack_delay = qattr->dev_ack_delay;
162         attr->max_fast_reg_page_list_len = qattr->max_mr / 8;
163         attr->max_pkeys = qattr->max_pkey;
164         attr->max_ah = qattr->max_ah;
165
166         return 0;
167 }
168
169 static inline void get_link_speed_and_width(int speed, u16 *ib_speed,
170                                             u8 *ib_width)
171 {
172         switch (speed) {
173         case 1000:
174                 *ib_speed = IB_SPEED_SDR;
175                 *ib_width = IB_WIDTH_1X;
176                 break;
177         case 10000:
178                 *ib_speed = IB_SPEED_QDR;
179                 *ib_width = IB_WIDTH_1X;
180                 break;
181
182         case 20000:
183                 *ib_speed = IB_SPEED_DDR;
184                 *ib_width = IB_WIDTH_4X;
185                 break;
186
187         case 25000:
188                 *ib_speed = IB_SPEED_EDR;
189                 *ib_width = IB_WIDTH_1X;
190                 break;
191
192         case 40000:
193                 *ib_speed = IB_SPEED_QDR;
194                 *ib_width = IB_WIDTH_4X;
195                 break;
196
197         case 50000:
198                 *ib_speed = IB_SPEED_HDR;
199                 *ib_width = IB_WIDTH_1X;
200                 break;
201
202         case 100000:
203                 *ib_speed = IB_SPEED_EDR;
204                 *ib_width = IB_WIDTH_4X;
205                 break;
206
207         default:
208                 /* Unsupported */
209                 *ib_speed = IB_SPEED_SDR;
210                 *ib_width = IB_WIDTH_1X;
211         }
212 }
213
214 int qedr_query_port(struct ib_device *ibdev, u32 port,
215                     struct ib_port_attr *attr)
216 {
217         struct qedr_dev *dev;
218         struct qed_rdma_port *rdma_port;
219
220         dev = get_qedr_dev(ibdev);
221
222         if (!dev->rdma_ctx) {
223                 DP_ERR(dev, "rdma_ctx is NULL\n");
224                 return -EINVAL;
225         }
226
227         rdma_port = dev->ops->rdma_query_port(dev->rdma_ctx);
228
229         /* *attr being zeroed by the caller, avoid zeroing it here */
230         if (rdma_port->port_state == QED_RDMA_PORT_UP) {
231                 attr->state = IB_PORT_ACTIVE;
232                 attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
233         } else {
234                 attr->state = IB_PORT_DOWN;
235                 attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
236         }
237         attr->max_mtu = IB_MTU_4096;
238         attr->lid = 0;
239         attr->lmc = 0;
240         attr->sm_lid = 0;
241         attr->sm_sl = 0;
242         attr->ip_gids = true;
243         if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
244                 attr->active_mtu = iboe_get_mtu(dev->iwarp_max_mtu);
245                 attr->gid_tbl_len = 1;
246         } else {
247                 attr->active_mtu = iboe_get_mtu(dev->ndev->mtu);
248                 attr->gid_tbl_len = QEDR_MAX_SGID;
249                 attr->pkey_tbl_len = QEDR_ROCE_PKEY_TABLE_LEN;
250         }
251         attr->bad_pkey_cntr = rdma_port->pkey_bad_counter;
252         attr->qkey_viol_cntr = 0;
253         get_link_speed_and_width(rdma_port->link_speed,
254                                  &attr->active_speed, &attr->active_width);
255         attr->max_msg_sz = rdma_port->max_msg_size;
256         attr->max_vl_num = 4;
257
258         return 0;
259 }
260
261 int qedr_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
262 {
263         struct ib_device *ibdev = uctx->device;
264         int rc;
265         struct qedr_ucontext *ctx = get_qedr_ucontext(uctx);
266         struct qedr_alloc_ucontext_resp uresp = {};
267         struct qedr_alloc_ucontext_req ureq = {};
268         struct qedr_dev *dev = get_qedr_dev(ibdev);
269         struct qed_rdma_add_user_out_params oparams;
270         struct qedr_user_mmap_entry *entry;
271
272         if (!udata)
273                 return -EFAULT;
274
275         if (udata->inlen) {
276                 rc = ib_copy_from_udata(&ureq, udata,
277                                         min(sizeof(ureq), udata->inlen));
278                 if (rc) {
279                         DP_ERR(dev, "Problem copying data from user space\n");
280                         return -EFAULT;
281                 }
282                 ctx->edpm_mode = !!(ureq.context_flags &
283                                     QEDR_ALLOC_UCTX_EDPM_MODE);
284                 ctx->db_rec = !!(ureq.context_flags & QEDR_ALLOC_UCTX_DB_REC);
285         }
286
287         rc = dev->ops->rdma_add_user(dev->rdma_ctx, &oparams);
288         if (rc) {
289                 DP_ERR(dev,
290                        "failed to allocate a DPI for a new RoCE application, rc=%d. To overcome this consider to increase the number of DPIs, increase the doorbell BAR size or just close unnecessary RoCE applications. In order to increase the number of DPIs consult the qedr readme\n",
291                        rc);
292                 return rc;
293         }
294
295         ctx->dpi = oparams.dpi;
296         ctx->dpi_addr = oparams.dpi_addr;
297         ctx->dpi_phys_addr = oparams.dpi_phys_addr;
298         ctx->dpi_size = oparams.dpi_size;
299         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
300         if (!entry) {
301                 rc = -ENOMEM;
302                 goto err;
303         }
304
305         entry->io_address = ctx->dpi_phys_addr;
306         entry->length = ctx->dpi_size;
307         entry->mmap_flag = QEDR_USER_MMAP_IO_WC;
308         entry->dpi = ctx->dpi;
309         entry->dev = dev;
310         rc = rdma_user_mmap_entry_insert(uctx, &entry->rdma_entry,
311                                          ctx->dpi_size);
312         if (rc) {
313                 kfree(entry);
314                 goto err;
315         }
316         ctx->db_mmap_entry = &entry->rdma_entry;
317
318         if (!dev->user_dpm_enabled)
319                 uresp.dpm_flags = 0;
320         else if (rdma_protocol_iwarp(&dev->ibdev, 1))
321                 uresp.dpm_flags = QEDR_DPM_TYPE_IWARP_LEGACY;
322         else
323                 uresp.dpm_flags = QEDR_DPM_TYPE_ROCE_ENHANCED |
324                                   QEDR_DPM_TYPE_ROCE_LEGACY |
325                                   QEDR_DPM_TYPE_ROCE_EDPM_MODE;
326
327         if (ureq.context_flags & QEDR_SUPPORT_DPM_SIZES) {
328                 uresp.dpm_flags |= QEDR_DPM_SIZES_SET;
329                 uresp.ldpm_limit_size = QEDR_LDPM_MAX_SIZE;
330                 uresp.edpm_trans_size = QEDR_EDPM_TRANS_SIZE;
331                 uresp.edpm_limit_size = QEDR_EDPM_MAX_SIZE;
332         }
333
334         uresp.wids_enabled = 1;
335         uresp.wid_count = oparams.wid_count;
336         uresp.db_pa = rdma_user_mmap_get_offset(ctx->db_mmap_entry);
337         uresp.db_size = ctx->dpi_size;
338         uresp.max_send_wr = dev->attr.max_sqe;
339         uresp.max_recv_wr = dev->attr.max_rqe;
340         uresp.max_srq_wr = dev->attr.max_srq_wr;
341         uresp.sges_per_send_wr = QEDR_MAX_SQE_ELEMENTS_PER_SQE;
342         uresp.sges_per_recv_wr = QEDR_MAX_RQE_ELEMENTS_PER_RQE;
343         uresp.sges_per_srq_wr = dev->attr.max_srq_sge;
344         uresp.max_cqes = QEDR_MAX_CQES;
345
346         rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
347         if (rc)
348                 goto err;
349
350         ctx->dev = dev;
351
352         DP_DEBUG(dev, QEDR_MSG_INIT, "Allocating user context %p\n",
353                  &ctx->ibucontext);
354         return 0;
355
356 err:
357         if (!ctx->db_mmap_entry)
358                 dev->ops->rdma_remove_user(dev->rdma_ctx, ctx->dpi);
359         else
360                 rdma_user_mmap_entry_remove(ctx->db_mmap_entry);
361
362         return rc;
363 }
364
365 void qedr_dealloc_ucontext(struct ib_ucontext *ibctx)
366 {
367         struct qedr_ucontext *uctx = get_qedr_ucontext(ibctx);
368
369         DP_DEBUG(uctx->dev, QEDR_MSG_INIT, "Deallocating user context %p\n",
370                  uctx);
371
372         rdma_user_mmap_entry_remove(uctx->db_mmap_entry);
373 }
374
375 void qedr_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
376 {
377         struct qedr_user_mmap_entry *entry = get_qedr_mmap_entry(rdma_entry);
378         struct qedr_dev *dev = entry->dev;
379
380         if (entry->mmap_flag == QEDR_USER_MMAP_PHYS_PAGE)
381                 free_page((unsigned long)entry->address);
382         else if (entry->mmap_flag == QEDR_USER_MMAP_IO_WC)
383                 dev->ops->rdma_remove_user(dev->rdma_ctx, entry->dpi);
384
385         kfree(entry);
386 }
387
388 int qedr_mmap(struct ib_ucontext *ucontext, struct vm_area_struct *vma)
389 {
390         struct ib_device *dev = ucontext->device;
391         size_t length = vma->vm_end - vma->vm_start;
392         struct rdma_user_mmap_entry *rdma_entry;
393         struct qedr_user_mmap_entry *entry;
394         int rc = 0;
395         u64 pfn;
396
397         ibdev_dbg(dev,
398                   "start %#lx, end %#lx, length = %#zx, pgoff = %#lx\n",
399                   vma->vm_start, vma->vm_end, length, vma->vm_pgoff);
400
401         rdma_entry = rdma_user_mmap_entry_get(ucontext, vma);
402         if (!rdma_entry) {
403                 ibdev_dbg(dev, "pgoff[%#lx] does not have valid entry\n",
404                           vma->vm_pgoff);
405                 return -EINVAL;
406         }
407         entry = get_qedr_mmap_entry(rdma_entry);
408         ibdev_dbg(dev,
409                   "Mapping address[%#llx], length[%#zx], mmap_flag[%d]\n",
410                   entry->io_address, length, entry->mmap_flag);
411
412         switch (entry->mmap_flag) {
413         case QEDR_USER_MMAP_IO_WC:
414                 pfn = entry->io_address >> PAGE_SHIFT;
415                 rc = rdma_user_mmap_io(ucontext, vma, pfn, length,
416                                        pgprot_writecombine(vma->vm_page_prot),
417                                        rdma_entry);
418                 break;
419         case QEDR_USER_MMAP_PHYS_PAGE:
420                 rc = vm_insert_page(vma, vma->vm_start,
421                                     virt_to_page(entry->address));
422                 break;
423         default:
424                 rc = -EINVAL;
425         }
426
427         if (rc)
428                 ibdev_dbg(dev,
429                           "Couldn't mmap address[%#llx] length[%#zx] mmap_flag[%d] err[%d]\n",
430                           entry->io_address, length, entry->mmap_flag, rc);
431
432         rdma_user_mmap_entry_put(rdma_entry);
433         return rc;
434 }
435
436 int qedr_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
437 {
438         struct ib_device *ibdev = ibpd->device;
439         struct qedr_dev *dev = get_qedr_dev(ibdev);
440         struct qedr_pd *pd = get_qedr_pd(ibpd);
441         u16 pd_id;
442         int rc;
443
444         DP_DEBUG(dev, QEDR_MSG_INIT, "Function called from: %s\n",
445                  udata ? "User Lib" : "Kernel");
446
447         if (!dev->rdma_ctx) {
448                 DP_ERR(dev, "invalid RDMA context\n");
449                 return -EINVAL;
450         }
451
452         rc = dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
453         if (rc)
454                 return rc;
455
456         pd->pd_id = pd_id;
457
458         if (udata) {
459                 struct qedr_alloc_pd_uresp uresp = {
460                         .pd_id = pd_id,
461                 };
462                 struct qedr_ucontext *context = rdma_udata_to_drv_context(
463                         udata, struct qedr_ucontext, ibucontext);
464
465                 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
466                 if (rc) {
467                         DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id);
468                         dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id);
469                         return rc;
470                 }
471
472                 pd->uctx = context;
473                 pd->uctx->pd = pd;
474         }
475
476         return 0;
477 }
478
479 int qedr_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
480 {
481         struct qedr_dev *dev = get_qedr_dev(ibpd->device);
482         struct qedr_pd *pd = get_qedr_pd(ibpd);
483
484         DP_DEBUG(dev, QEDR_MSG_INIT, "Deallocating PD %d\n", pd->pd_id);
485         dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd->pd_id);
486         return 0;
487 }
488
489
490 int qedr_alloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata)
491 {
492         struct qedr_dev *dev = get_qedr_dev(ibxrcd->device);
493         struct qedr_xrcd *xrcd = get_qedr_xrcd(ibxrcd);
494
495         return dev->ops->rdma_alloc_xrcd(dev->rdma_ctx, &xrcd->xrcd_id);
496 }
497
498 int qedr_dealloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata)
499 {
500         struct qedr_dev *dev = get_qedr_dev(ibxrcd->device);
501         u16 xrcd_id = get_qedr_xrcd(ibxrcd)->xrcd_id;
502
503         dev->ops->rdma_dealloc_xrcd(dev->rdma_ctx, xrcd_id);
504         return 0;
505 }
506 static void qedr_free_pbl(struct qedr_dev *dev,
507                           struct qedr_pbl_info *pbl_info, struct qedr_pbl *pbl)
508 {
509         struct pci_dev *pdev = dev->pdev;
510         int i;
511
512         for (i = 0; i < pbl_info->num_pbls; i++) {
513                 if (!pbl[i].va)
514                         continue;
515                 dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
516                                   pbl[i].va, pbl[i].pa);
517         }
518
519         kfree(pbl);
520 }
521
522 #define MIN_FW_PBL_PAGE_SIZE (4 * 1024)
523 #define MAX_FW_PBL_PAGE_SIZE (64 * 1024)
524
525 #define NUM_PBES_ON_PAGE(_page_size) (_page_size / sizeof(u64))
526 #define MAX_PBES_ON_PAGE NUM_PBES_ON_PAGE(MAX_FW_PBL_PAGE_SIZE)
527 #define MAX_PBES_TWO_LAYER (MAX_PBES_ON_PAGE * MAX_PBES_ON_PAGE)
528
529 static struct qedr_pbl *qedr_alloc_pbl_tbl(struct qedr_dev *dev,
530                                            struct qedr_pbl_info *pbl_info,
531                                            gfp_t flags)
532 {
533         struct pci_dev *pdev = dev->pdev;
534         struct qedr_pbl *pbl_table;
535         dma_addr_t *pbl_main_tbl;
536         dma_addr_t pa;
537         void *va;
538         int i;
539
540         pbl_table = kcalloc(pbl_info->num_pbls, sizeof(*pbl_table), flags);
541         if (!pbl_table)
542                 return ERR_PTR(-ENOMEM);
543
544         for (i = 0; i < pbl_info->num_pbls; i++) {
545                 va = dma_alloc_coherent(&pdev->dev, pbl_info->pbl_size, &pa,
546                                         flags);
547                 if (!va)
548                         goto err;
549
550                 pbl_table[i].va = va;
551                 pbl_table[i].pa = pa;
552         }
553
554         /* Two-Layer PBLs, if we have more than one pbl we need to initialize
555          * the first one with physical pointers to all of the rest
556          */
557         pbl_main_tbl = (dma_addr_t *)pbl_table[0].va;
558         for (i = 0; i < pbl_info->num_pbls - 1; i++)
559                 pbl_main_tbl[i] = pbl_table[i + 1].pa;
560
561         return pbl_table;
562
563 err:
564         for (i--; i >= 0; i--)
565                 dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
566                                   pbl_table[i].va, pbl_table[i].pa);
567
568         qedr_free_pbl(dev, pbl_info, pbl_table);
569
570         return ERR_PTR(-ENOMEM);
571 }
572
573 static int qedr_prepare_pbl_tbl(struct qedr_dev *dev,
574                                 struct qedr_pbl_info *pbl_info,
575                                 u32 num_pbes, int two_layer_capable)
576 {
577         u32 pbl_capacity;
578         u32 pbl_size;
579         u32 num_pbls;
580
581         if ((num_pbes > MAX_PBES_ON_PAGE) && two_layer_capable) {
582                 if (num_pbes > MAX_PBES_TWO_LAYER) {
583                         DP_ERR(dev, "prepare pbl table: too many pages %d\n",
584                                num_pbes);
585                         return -EINVAL;
586                 }
587
588                 /* calculate required pbl page size */
589                 pbl_size = MIN_FW_PBL_PAGE_SIZE;
590                 pbl_capacity = NUM_PBES_ON_PAGE(pbl_size) *
591                                NUM_PBES_ON_PAGE(pbl_size);
592
593                 while (pbl_capacity < num_pbes) {
594                         pbl_size *= 2;
595                         pbl_capacity = pbl_size / sizeof(u64);
596                         pbl_capacity = pbl_capacity * pbl_capacity;
597                 }
598
599                 num_pbls = DIV_ROUND_UP(num_pbes, NUM_PBES_ON_PAGE(pbl_size));
600                 num_pbls++;     /* One for the layer0 ( points to the pbls) */
601                 pbl_info->two_layered = true;
602         } else {
603                 /* One layered PBL */
604                 num_pbls = 1;
605                 pbl_size = max_t(u32, MIN_FW_PBL_PAGE_SIZE,
606                                  roundup_pow_of_two((num_pbes * sizeof(u64))));
607                 pbl_info->two_layered = false;
608         }
609
610         pbl_info->num_pbls = num_pbls;
611         pbl_info->pbl_size = pbl_size;
612         pbl_info->num_pbes = num_pbes;
613
614         DP_DEBUG(dev, QEDR_MSG_MR,
615                  "prepare pbl table: num_pbes=%d, num_pbls=%d, pbl_size=%d\n",
616                  pbl_info->num_pbes, pbl_info->num_pbls, pbl_info->pbl_size);
617
618         return 0;
619 }
620
621 static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
622                                struct qedr_pbl *pbl,
623                                struct qedr_pbl_info *pbl_info, u32 pg_shift)
624 {
625         int pbe_cnt, total_num_pbes = 0;
626         struct qedr_pbl *pbl_tbl;
627         struct ib_block_iter biter;
628         struct regpair *pbe;
629
630         if (!pbl_info->num_pbes)
631                 return;
632
633         /* If we have a two layered pbl, the first pbl points to the rest
634          * of the pbls and the first entry lays on the second pbl in the table
635          */
636         if (pbl_info->two_layered)
637                 pbl_tbl = &pbl[1];
638         else
639                 pbl_tbl = pbl;
640
641         pbe = (struct regpair *)pbl_tbl->va;
642         if (!pbe) {
643                 DP_ERR(dev, "cannot populate PBL due to a NULL PBE\n");
644                 return;
645         }
646
647         pbe_cnt = 0;
648
649         rdma_umem_for_each_dma_block (umem, &biter, BIT(pg_shift)) {
650                 u64 pg_addr = rdma_block_iter_dma_address(&biter);
651
652                 pbe->lo = cpu_to_le32(pg_addr);
653                 pbe->hi = cpu_to_le32(upper_32_bits(pg_addr));
654
655                 pbe_cnt++;
656                 total_num_pbes++;
657                 pbe++;
658
659                 if (total_num_pbes == pbl_info->num_pbes)
660                         return;
661
662                 /* If the given pbl is full storing the pbes, move to next pbl.
663                  */
664                 if (pbe_cnt == (pbl_info->pbl_size / sizeof(u64))) {
665                         pbl_tbl++;
666                         pbe = (struct regpair *)pbl_tbl->va;
667                         pbe_cnt = 0;
668                 }
669         }
670 }
671
672 static int qedr_db_recovery_add(struct qedr_dev *dev,
673                                 void __iomem *db_addr,
674                                 void *db_data,
675                                 enum qed_db_rec_width db_width,
676                                 enum qed_db_rec_space db_space)
677 {
678         if (!db_data) {
679                 DP_DEBUG(dev, QEDR_MSG_INIT, "avoiding db rec since old lib\n");
680                 return 0;
681         }
682
683         return dev->ops->common->db_recovery_add(dev->cdev, db_addr, db_data,
684                                                  db_width, db_space);
685 }
686
687 static void qedr_db_recovery_del(struct qedr_dev *dev,
688                                  void __iomem *db_addr,
689                                  void *db_data)
690 {
691         if (!db_data) {
692                 DP_DEBUG(dev, QEDR_MSG_INIT, "avoiding db rec since old lib\n");
693                 return;
694         }
695
696         /* Ignore return code as there is not much we can do about it. Error
697          * log will be printed inside.
698          */
699         dev->ops->common->db_recovery_del(dev->cdev, db_addr, db_data);
700 }
701
702 static int qedr_copy_cq_uresp(struct qedr_dev *dev,
703                               struct qedr_cq *cq, struct ib_udata *udata,
704                               u32 db_offset)
705 {
706         struct qedr_create_cq_uresp uresp;
707         int rc;
708
709         memset(&uresp, 0, sizeof(uresp));
710
711         uresp.db_offset = db_offset;
712         uresp.icid = cq->icid;
713         if (cq->q.db_mmap_entry)
714                 uresp.db_rec_addr =
715                         rdma_user_mmap_get_offset(cq->q.db_mmap_entry);
716
717         rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
718         if (rc)
719                 DP_ERR(dev, "copy error cqid=0x%x.\n", cq->icid);
720
721         return rc;
722 }
723
724 static void consume_cqe(struct qedr_cq *cq)
725 {
726         if (cq->latest_cqe == cq->toggle_cqe)
727                 cq->pbl_toggle ^= RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
728
729         cq->latest_cqe = qed_chain_consume(&cq->pbl);
730 }
731
732 static inline int qedr_align_cq_entries(int entries)
733 {
734         u64 size, aligned_size;
735
736         /* We allocate an extra entry that we don't report to the FW. */
737         size = (entries + 1) * QEDR_CQE_SIZE;
738         aligned_size = ALIGN(size, PAGE_SIZE);
739
740         return aligned_size / QEDR_CQE_SIZE;
741 }
742
743 static int qedr_init_user_db_rec(struct ib_udata *udata,
744                                  struct qedr_dev *dev, struct qedr_userq *q,
745                                  bool requires_db_rec)
746 {
747         struct qedr_ucontext *uctx =
748                 rdma_udata_to_drv_context(udata, struct qedr_ucontext,
749                                           ibucontext);
750         struct qedr_user_mmap_entry *entry;
751         int rc;
752
753         /* Aborting for non doorbell userqueue (SRQ) or non-supporting lib */
754         if (requires_db_rec == 0 || !uctx->db_rec)
755                 return 0;
756
757         /* Allocate a page for doorbell recovery, add to mmap */
758         q->db_rec_data = (void *)get_zeroed_page(GFP_USER);
759         if (!q->db_rec_data) {
760                 DP_ERR(dev, "get_zeroed_page failed\n");
761                 return -ENOMEM;
762         }
763
764         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
765         if (!entry)
766                 goto err_free_db_data;
767
768         entry->address = q->db_rec_data;
769         entry->length = PAGE_SIZE;
770         entry->mmap_flag = QEDR_USER_MMAP_PHYS_PAGE;
771         rc = rdma_user_mmap_entry_insert(&uctx->ibucontext,
772                                          &entry->rdma_entry,
773                                          PAGE_SIZE);
774         if (rc)
775                 goto err_free_entry;
776
777         q->db_mmap_entry = &entry->rdma_entry;
778
779         return 0;
780
781 err_free_entry:
782         kfree(entry);
783
784 err_free_db_data:
785         free_page((unsigned long)q->db_rec_data);
786         q->db_rec_data = NULL;
787         return -ENOMEM;
788 }
789
790 static inline int qedr_init_user_queue(struct ib_udata *udata,
791                                        struct qedr_dev *dev,
792                                        struct qedr_userq *q, u64 buf_addr,
793                                        size_t buf_len, bool requires_db_rec,
794                                        int access,
795                                        int alloc_and_init)
796 {
797         u32 fw_pages;
798         int rc;
799
800         q->buf_addr = buf_addr;
801         q->buf_len = buf_len;
802         q->umem = ib_umem_get(&dev->ibdev, q->buf_addr, q->buf_len, access);
803         if (IS_ERR(q->umem)) {
804                 DP_ERR(dev, "create user queue: failed ib_umem_get, got %ld\n",
805                        PTR_ERR(q->umem));
806                 return PTR_ERR(q->umem);
807         }
808
809         fw_pages = ib_umem_num_dma_blocks(q->umem, 1 << FW_PAGE_SHIFT);
810         rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, fw_pages, 0);
811         if (rc)
812                 goto err0;
813
814         if (alloc_and_init) {
815                 q->pbl_tbl = qedr_alloc_pbl_tbl(dev, &q->pbl_info, GFP_KERNEL);
816                 if (IS_ERR(q->pbl_tbl)) {
817                         rc = PTR_ERR(q->pbl_tbl);
818                         goto err0;
819                 }
820                 qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info,
821                                    FW_PAGE_SHIFT);
822         } else {
823                 q->pbl_tbl = kzalloc(sizeof(*q->pbl_tbl), GFP_KERNEL);
824                 if (!q->pbl_tbl) {
825                         rc = -ENOMEM;
826                         goto err0;
827                 }
828         }
829
830         /* mmap the user address used to store doorbell data for recovery */
831         return qedr_init_user_db_rec(udata, dev, q, requires_db_rec);
832
833 err0:
834         ib_umem_release(q->umem);
835         q->umem = NULL;
836
837         return rc;
838 }
839
840 static inline void qedr_init_cq_params(struct qedr_cq *cq,
841                                        struct qedr_ucontext *ctx,
842                                        struct qedr_dev *dev, int vector,
843                                        int chain_entries, int page_cnt,
844                                        u64 pbl_ptr,
845                                        struct qed_rdma_create_cq_in_params
846                                        *params)
847 {
848         memset(params, 0, sizeof(*params));
849         params->cq_handle_hi = upper_32_bits((uintptr_t)cq);
850         params->cq_handle_lo = lower_32_bits((uintptr_t)cq);
851         params->cnq_id = vector;
852         params->cq_size = chain_entries - 1;
853         params->dpi = (ctx) ? ctx->dpi : dev->dpi;
854         params->pbl_num_pages = page_cnt;
855         params->pbl_ptr = pbl_ptr;
856         params->pbl_two_level = 0;
857 }
858
859 static void doorbell_cq(struct qedr_cq *cq, u32 cons, u8 flags)
860 {
861         cq->db.data.agg_flags = flags;
862         cq->db.data.value = cpu_to_le32(cons);
863         writeq(cq->db.raw, cq->db_addr);
864 }
865
866 int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
867 {
868         struct qedr_cq *cq = get_qedr_cq(ibcq);
869         unsigned long sflags;
870         struct qedr_dev *dev;
871
872         dev = get_qedr_dev(ibcq->device);
873
874         if (cq->destroyed) {
875                 DP_ERR(dev,
876                        "warning: arm was invoked after destroy for cq %p (icid=%d)\n",
877                        cq, cq->icid);
878                 return -EINVAL;
879         }
880
881
882         if (cq->cq_type == QEDR_CQ_TYPE_GSI)
883                 return 0;
884
885         spin_lock_irqsave(&cq->cq_lock, sflags);
886
887         cq->arm_flags = 0;
888
889         if (flags & IB_CQ_SOLICITED)
890                 cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD;
891
892         if (flags & IB_CQ_NEXT_COMP)
893                 cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_CF_CMD;
894
895         doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
896
897         spin_unlock_irqrestore(&cq->cq_lock, sflags);
898
899         return 0;
900 }
901
902 int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
903                    struct ib_udata *udata)
904 {
905         struct ib_device *ibdev = ibcq->device;
906         struct qedr_ucontext *ctx = rdma_udata_to_drv_context(
907                 udata, struct qedr_ucontext, ibucontext);
908         struct qed_rdma_destroy_cq_out_params destroy_oparams;
909         struct qed_rdma_destroy_cq_in_params destroy_iparams;
910         struct qed_chain_init_params chain_params = {
911                 .mode           = QED_CHAIN_MODE_PBL,
912                 .intended_use   = QED_CHAIN_USE_TO_CONSUME,
913                 .cnt_type       = QED_CHAIN_CNT_TYPE_U32,
914                 .elem_size      = sizeof(union rdma_cqe),
915         };
916         struct qedr_dev *dev = get_qedr_dev(ibdev);
917         struct qed_rdma_create_cq_in_params params;
918         struct qedr_create_cq_ureq ureq = {};
919         int vector = attr->comp_vector;
920         int entries = attr->cqe;
921         struct qedr_cq *cq = get_qedr_cq(ibcq);
922         int chain_entries;
923         u32 db_offset;
924         int page_cnt;
925         u64 pbl_ptr;
926         u16 icid;
927         int rc;
928
929         DP_DEBUG(dev, QEDR_MSG_INIT,
930                  "create_cq: called from %s. entries=%d, vector=%d\n",
931                  udata ? "User Lib" : "Kernel", entries, vector);
932
933         if (attr->flags)
934                 return -EOPNOTSUPP;
935
936         if (entries > QEDR_MAX_CQES) {
937                 DP_ERR(dev,
938                        "create cq: the number of entries %d is too high. Must be equal or below %d.\n",
939                        entries, QEDR_MAX_CQES);
940                 return -EINVAL;
941         }
942
943         chain_entries = qedr_align_cq_entries(entries);
944         chain_entries = min_t(int, chain_entries, QEDR_MAX_CQES);
945         chain_params.num_elems = chain_entries;
946
947         /* calc db offset. user will add DPI base, kernel will add db addr */
948         db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
949
950         if (udata) {
951                 if (ib_copy_from_udata(&ureq, udata, min(sizeof(ureq),
952                                                          udata->inlen))) {
953                         DP_ERR(dev,
954                                "create cq: problem copying data from user space\n");
955                         goto err0;
956                 }
957
958                 if (!ureq.len) {
959                         DP_ERR(dev,
960                                "create cq: cannot create a cq with 0 entries\n");
961                         goto err0;
962                 }
963
964                 cq->cq_type = QEDR_CQ_TYPE_USER;
965
966                 rc = qedr_init_user_queue(udata, dev, &cq->q, ureq.addr,
967                                           ureq.len, true, IB_ACCESS_LOCAL_WRITE,
968                                           1);
969                 if (rc)
970                         goto err0;
971
972                 pbl_ptr = cq->q.pbl_tbl->pa;
973                 page_cnt = cq->q.pbl_info.num_pbes;
974
975                 cq->ibcq.cqe = chain_entries;
976                 cq->q.db_addr = ctx->dpi_addr + db_offset;
977         } else {
978                 cq->cq_type = QEDR_CQ_TYPE_KERNEL;
979
980                 rc = dev->ops->common->chain_alloc(dev->cdev, &cq->pbl,
981                                                    &chain_params);
982                 if (rc)
983                         goto err0;
984
985                 page_cnt = qed_chain_get_page_cnt(&cq->pbl);
986                 pbl_ptr = qed_chain_get_pbl_phys(&cq->pbl);
987                 cq->ibcq.cqe = cq->pbl.capacity;
988         }
989
990         qedr_init_cq_params(cq, ctx, dev, vector, chain_entries, page_cnt,
991                             pbl_ptr, &params);
992
993         rc = dev->ops->rdma_create_cq(dev->rdma_ctx, &params, &icid);
994         if (rc)
995                 goto err1;
996
997         cq->icid = icid;
998         cq->sig = QEDR_CQ_MAGIC_NUMBER;
999         spin_lock_init(&cq->cq_lock);
1000
1001         if (udata) {
1002                 rc = qedr_copy_cq_uresp(dev, cq, udata, db_offset);
1003                 if (rc)
1004                         goto err2;
1005
1006                 rc = qedr_db_recovery_add(dev, cq->q.db_addr,
1007                                           &cq->q.db_rec_data->db_data,
1008                                           DB_REC_WIDTH_64B,
1009                                           DB_REC_USER);
1010                 if (rc)
1011                         goto err2;
1012
1013         } else {
1014                 /* Generate doorbell address. */
1015                 cq->db.data.icid = cq->icid;
1016                 cq->db_addr = dev->db_addr + db_offset;
1017                 cq->db.data.params = DB_AGG_CMD_MAX <<
1018                     RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT;
1019
1020                 /* point to the very last element, passing it we will toggle */
1021                 cq->toggle_cqe = qed_chain_get_last_elem(&cq->pbl);
1022                 cq->pbl_toggle = RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
1023                 cq->latest_cqe = NULL;
1024                 consume_cqe(cq);
1025                 cq->cq_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
1026
1027                 rc = qedr_db_recovery_add(dev, cq->db_addr, &cq->db.data,
1028                                           DB_REC_WIDTH_64B, DB_REC_KERNEL);
1029                 if (rc)
1030                         goto err2;
1031         }
1032
1033         DP_DEBUG(dev, QEDR_MSG_CQ,
1034                  "create cq: icid=0x%0x, addr=%p, size(entries)=0x%0x\n",
1035                  cq->icid, cq, params.cq_size);
1036
1037         return 0;
1038
1039 err2:
1040         destroy_iparams.icid = cq->icid;
1041         dev->ops->rdma_destroy_cq(dev->rdma_ctx, &destroy_iparams,
1042                                   &destroy_oparams);
1043 err1:
1044         if (udata) {
1045                 qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
1046                 ib_umem_release(cq->q.umem);
1047                 if (cq->q.db_mmap_entry)
1048                         rdma_user_mmap_entry_remove(cq->q.db_mmap_entry);
1049         } else {
1050                 dev->ops->common->chain_free(dev->cdev, &cq->pbl);
1051         }
1052 err0:
1053         return -EINVAL;
1054 }
1055
1056 #define QEDR_DESTROY_CQ_MAX_ITERATIONS          (10)
1057 #define QEDR_DESTROY_CQ_ITER_DURATION           (10)
1058
1059 int qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
1060 {
1061         struct qedr_dev *dev = get_qedr_dev(ibcq->device);
1062         struct qed_rdma_destroy_cq_out_params oparams;
1063         struct qed_rdma_destroy_cq_in_params iparams;
1064         struct qedr_cq *cq = get_qedr_cq(ibcq);
1065         int iter;
1066
1067         DP_DEBUG(dev, QEDR_MSG_CQ, "destroy cq %p (icid=%d)\n", cq, cq->icid);
1068
1069         cq->destroyed = 1;
1070
1071         /* GSIs CQs are handled by driver, so they don't exist in the FW */
1072         if (cq->cq_type == QEDR_CQ_TYPE_GSI) {
1073                 qedr_db_recovery_del(dev, cq->db_addr, &cq->db.data);
1074                 return 0;
1075         }
1076
1077         iparams.icid = cq->icid;
1078         dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
1079         dev->ops->common->chain_free(dev->cdev, &cq->pbl);
1080
1081         if (udata) {
1082                 qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
1083                 ib_umem_release(cq->q.umem);
1084
1085                 if (cq->q.db_rec_data) {
1086                         qedr_db_recovery_del(dev, cq->q.db_addr,
1087                                              &cq->q.db_rec_data->db_data);
1088                         rdma_user_mmap_entry_remove(cq->q.db_mmap_entry);
1089                 }
1090         } else {
1091                 qedr_db_recovery_del(dev, cq->db_addr, &cq->db.data);
1092         }
1093
1094         /* We don't want the IRQ handler to handle a non-existing CQ so we
1095          * wait until all CNQ interrupts, if any, are received. This will always
1096          * happen and will always happen very fast. If not, then a serious error
1097          * has occured. That is why we can use a long delay.
1098          * We spin for a short time so we don’t lose time on context switching
1099          * in case all the completions are handled in that span. Otherwise
1100          * we sleep for a while and check again. Since the CNQ may be
1101          * associated with (only) the current CPU we use msleep to allow the
1102          * current CPU to be freed.
1103          * The CNQ notification is increased in qedr_irq_handler().
1104          */
1105         iter = QEDR_DESTROY_CQ_MAX_ITERATIONS;
1106         while (oparams.num_cq_notif != READ_ONCE(cq->cnq_notif) && iter) {
1107                 udelay(QEDR_DESTROY_CQ_ITER_DURATION);
1108                 iter--;
1109         }
1110
1111         iter = QEDR_DESTROY_CQ_MAX_ITERATIONS;
1112         while (oparams.num_cq_notif != READ_ONCE(cq->cnq_notif) && iter) {
1113                 msleep(QEDR_DESTROY_CQ_ITER_DURATION);
1114                 iter--;
1115         }
1116
1117         /* Note that we don't need to have explicit code to wait for the
1118          * completion of the event handler because it is invoked from the EQ.
1119          * Since the destroy CQ ramrod has also been received on the EQ we can
1120          * be certain that there's no event handler in process.
1121          */
1122         return 0;
1123 }
1124
1125 static inline int get_gid_info_from_table(struct ib_qp *ibqp,
1126                                           struct ib_qp_attr *attr,
1127                                           int attr_mask,
1128                                           struct qed_rdma_modify_qp_in_params
1129                                           *qp_params)
1130 {
1131         const struct ib_gid_attr *gid_attr;
1132         enum rdma_network_type nw_type;
1133         const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
1134         u32 ipv4_addr;
1135         int ret;
1136         int i;
1137
1138         gid_attr = grh->sgid_attr;
1139         ret = rdma_read_gid_l2_fields(gid_attr, &qp_params->vlan_id, NULL);
1140         if (ret)
1141                 return ret;
1142
1143         nw_type = rdma_gid_attr_network_type(gid_attr);
1144         switch (nw_type) {
1145         case RDMA_NETWORK_IPV6:
1146                 memcpy(&qp_params->sgid.bytes[0], &gid_attr->gid.raw[0],
1147                        sizeof(qp_params->sgid));
1148                 memcpy(&qp_params->dgid.bytes[0],
1149                        &grh->dgid,
1150                        sizeof(qp_params->dgid));
1151                 qp_params->roce_mode = ROCE_V2_IPV6;
1152                 SET_FIELD(qp_params->modify_flags,
1153                           QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
1154                 break;
1155         case RDMA_NETWORK_ROCE_V1:
1156                 memcpy(&qp_params->sgid.bytes[0], &gid_attr->gid.raw[0],
1157                        sizeof(qp_params->sgid));
1158                 memcpy(&qp_params->dgid.bytes[0],
1159                        &grh->dgid,
1160                        sizeof(qp_params->dgid));
1161                 qp_params->roce_mode = ROCE_V1;
1162                 break;
1163         case RDMA_NETWORK_IPV4:
1164                 memset(&qp_params->sgid, 0, sizeof(qp_params->sgid));
1165                 memset(&qp_params->dgid, 0, sizeof(qp_params->dgid));
1166                 ipv4_addr = qedr_get_ipv4_from_gid(gid_attr->gid.raw);
1167                 qp_params->sgid.ipv4_addr = ipv4_addr;
1168                 ipv4_addr =
1169                     qedr_get_ipv4_from_gid(grh->dgid.raw);
1170                 qp_params->dgid.ipv4_addr = ipv4_addr;
1171                 SET_FIELD(qp_params->modify_flags,
1172                           QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
1173                 qp_params->roce_mode = ROCE_V2_IPV4;
1174                 break;
1175         default:
1176                 return -EINVAL;
1177         }
1178
1179         for (i = 0; i < 4; i++) {
1180                 qp_params->sgid.dwords[i] = ntohl(qp_params->sgid.dwords[i]);
1181                 qp_params->dgid.dwords[i] = ntohl(qp_params->dgid.dwords[i]);
1182         }
1183
1184         if (qp_params->vlan_id >= VLAN_CFI_MASK)
1185                 qp_params->vlan_id = 0;
1186
1187         return 0;
1188 }
1189
1190 static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev,
1191                                struct ib_qp_init_attr *attrs,
1192                                struct ib_udata *udata)
1193 {
1194         struct qedr_device_attr *qattr = &dev->attr;
1195
1196         /* QP0... attrs->qp_type == IB_QPT_GSI */
1197         if (attrs->qp_type != IB_QPT_RC &&
1198             attrs->qp_type != IB_QPT_GSI &&
1199             attrs->qp_type != IB_QPT_XRC_INI &&
1200             attrs->qp_type != IB_QPT_XRC_TGT) {
1201                 DP_DEBUG(dev, QEDR_MSG_QP,
1202                          "create qp: unsupported qp type=0x%x requested\n",
1203                          attrs->qp_type);
1204                 return -EOPNOTSUPP;
1205         }
1206
1207         if (attrs->cap.max_send_wr > qattr->max_sqe) {
1208                 DP_ERR(dev,
1209                        "create qp: cannot create a SQ with %d elements (max_send_wr=0x%x)\n",
1210                        attrs->cap.max_send_wr, qattr->max_sqe);
1211                 return -EINVAL;
1212         }
1213
1214         if (attrs->cap.max_inline_data > qattr->max_inline) {
1215                 DP_ERR(dev,
1216                        "create qp: unsupported inline data size=0x%x requested (max_inline=0x%x)\n",
1217                        attrs->cap.max_inline_data, qattr->max_inline);
1218                 return -EINVAL;
1219         }
1220
1221         if (attrs->cap.max_send_sge > qattr->max_sge) {
1222                 DP_ERR(dev,
1223                        "create qp: unsupported send_sge=0x%x requested (max_send_sge=0x%x)\n",
1224                        attrs->cap.max_send_sge, qattr->max_sge);
1225                 return -EINVAL;
1226         }
1227
1228         if (attrs->cap.max_recv_sge > qattr->max_sge) {
1229                 DP_ERR(dev,
1230                        "create qp: unsupported recv_sge=0x%x requested (max_recv_sge=0x%x)\n",
1231                        attrs->cap.max_recv_sge, qattr->max_sge);
1232                 return -EINVAL;
1233         }
1234
1235         /* verify consumer QPs are not trying to use GSI QP's CQ.
1236          * TGT QP isn't associated with RQ/SQ
1237          */
1238         if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created) &&
1239             (attrs->qp_type != IB_QPT_XRC_TGT) &&
1240             (attrs->qp_type != IB_QPT_XRC_INI)) {
1241                 struct qedr_cq *send_cq = get_qedr_cq(attrs->send_cq);
1242                 struct qedr_cq *recv_cq = get_qedr_cq(attrs->recv_cq);
1243
1244                 if ((send_cq->cq_type == QEDR_CQ_TYPE_GSI) ||
1245                     (recv_cq->cq_type == QEDR_CQ_TYPE_GSI)) {
1246                         DP_ERR(dev,
1247                                "create qp: consumer QP cannot use GSI CQs.\n");
1248                         return -EINVAL;
1249                 }
1250         }
1251
1252         return 0;
1253 }
1254
1255 static int qedr_copy_srq_uresp(struct qedr_dev *dev,
1256                                struct qedr_srq *srq, struct ib_udata *udata)
1257 {
1258         struct qedr_create_srq_uresp uresp = {};
1259         int rc;
1260
1261         uresp.srq_id = srq->srq_id;
1262
1263         rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1264         if (rc)
1265                 DP_ERR(dev, "create srq: problem copying data to user space\n");
1266
1267         return rc;
1268 }
1269
1270 static void qedr_copy_rq_uresp(struct qedr_dev *dev,
1271                                struct qedr_create_qp_uresp *uresp,
1272                                struct qedr_qp *qp)
1273 {
1274         /* iWARP requires two doorbells per RQ. */
1275         if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
1276                 uresp->rq_db_offset =
1277                     DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD);
1278                 uresp->rq_db2_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS);
1279         } else {
1280                 uresp->rq_db_offset =
1281                     DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
1282         }
1283
1284         uresp->rq_icid = qp->icid;
1285         if (qp->urq.db_mmap_entry)
1286                 uresp->rq_db_rec_addr =
1287                         rdma_user_mmap_get_offset(qp->urq.db_mmap_entry);
1288 }
1289
1290 static void qedr_copy_sq_uresp(struct qedr_dev *dev,
1291                                struct qedr_create_qp_uresp *uresp,
1292                                struct qedr_qp *qp)
1293 {
1294         uresp->sq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1295
1296         /* iWARP uses the same cid for rq and sq */
1297         if (rdma_protocol_iwarp(&dev->ibdev, 1))
1298                 uresp->sq_icid = qp->icid;
1299         else
1300                 uresp->sq_icid = qp->icid + 1;
1301
1302         if (qp->usq.db_mmap_entry)
1303                 uresp->sq_db_rec_addr =
1304                         rdma_user_mmap_get_offset(qp->usq.db_mmap_entry);
1305 }
1306
1307 static int qedr_copy_qp_uresp(struct qedr_dev *dev,
1308                               struct qedr_qp *qp, struct ib_udata *udata,
1309                               struct qedr_create_qp_uresp *uresp)
1310 {
1311         int rc;
1312
1313         memset(uresp, 0, sizeof(*uresp));
1314
1315         if (qedr_qp_has_sq(qp))
1316                 qedr_copy_sq_uresp(dev, uresp, qp);
1317
1318         if (qedr_qp_has_rq(qp))
1319                 qedr_copy_rq_uresp(dev, uresp, qp);
1320
1321         uresp->atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE;
1322         uresp->qp_id = qp->qp_id;
1323
1324         rc = qedr_ib_copy_to_udata(udata, uresp, sizeof(*uresp));
1325         if (rc)
1326                 DP_ERR(dev,
1327                        "create qp: failed a copy to user space with qp icid=0x%x.\n",
1328                        qp->icid);
1329
1330         return rc;
1331 }
1332
1333 static void qedr_reset_qp_hwq_info(struct qedr_qp_hwq_info *qph)
1334 {
1335         qed_chain_reset(&qph->pbl);
1336         qph->prod = 0;
1337         qph->cons = 0;
1338         qph->wqe_cons = 0;
1339         qph->db_data.data.value = cpu_to_le16(0);
1340 }
1341
1342 static void qedr_set_common_qp_params(struct qedr_dev *dev,
1343                                       struct qedr_qp *qp,
1344                                       struct qedr_pd *pd,
1345                                       struct ib_qp_init_attr *attrs)
1346 {
1347         spin_lock_init(&qp->q_lock);
1348         if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
1349                 kref_init(&qp->refcnt);
1350                 init_completion(&qp->iwarp_cm_comp);
1351                 init_completion(&qp->qp_rel_comp);
1352         }
1353
1354         qp->pd = pd;
1355         qp->qp_type = attrs->qp_type;
1356         qp->max_inline_data = attrs->cap.max_inline_data;
1357         qp->state = QED_ROCE_QP_STATE_RESET;
1358
1359         qp->prev_wqe_size = 0;
1360
1361         qp->signaled = attrs->sq_sig_type == IB_SIGNAL_ALL_WR;
1362         qp->dev = dev;
1363         if (qedr_qp_has_sq(qp)) {
1364                 qedr_reset_qp_hwq_info(&qp->sq);
1365                 qp->sq.max_sges = attrs->cap.max_send_sge;
1366                 qp->sq_cq = get_qedr_cq(attrs->send_cq);
1367                 DP_DEBUG(dev, QEDR_MSG_QP,
1368                          "SQ params:\tsq_max_sges = %d, sq_cq_id = %d\n",
1369                          qp->sq.max_sges, qp->sq_cq->icid);
1370         }
1371
1372         if (attrs->srq)
1373                 qp->srq = get_qedr_srq(attrs->srq);
1374
1375         if (qedr_qp_has_rq(qp)) {
1376                 qedr_reset_qp_hwq_info(&qp->rq);
1377                 qp->rq_cq = get_qedr_cq(attrs->recv_cq);
1378                 qp->rq.max_sges = attrs->cap.max_recv_sge;
1379                 DP_DEBUG(dev, QEDR_MSG_QP,
1380                          "RQ params:\trq_max_sges = %d, rq_cq_id = %d\n",
1381                          qp->rq.max_sges, qp->rq_cq->icid);
1382         }
1383
1384         DP_DEBUG(dev, QEDR_MSG_QP,
1385                  "QP params:\tpd = %d, qp_type = %d, max_inline_data = %d, state = %d, signaled = %d, use_srq=%d\n",
1386                  pd->pd_id, qp->qp_type, qp->max_inline_data,
1387                  qp->state, qp->signaled, (attrs->srq) ? 1 : 0);
1388         DP_DEBUG(dev, QEDR_MSG_QP,
1389                  "SQ params:\tsq_max_sges = %d, sq_cq_id = %d\n",
1390                  qp->sq.max_sges, qp->sq_cq->icid);
1391 }
1392
1393 static int qedr_set_roce_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
1394 {
1395         int rc = 0;
1396
1397         if (qedr_qp_has_sq(qp)) {
1398                 qp->sq.db = dev->db_addr +
1399                             DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1400                 qp->sq.db_data.data.icid = qp->icid + 1;
1401                 rc = qedr_db_recovery_add(dev, qp->sq.db, &qp->sq.db_data,
1402                                           DB_REC_WIDTH_32B, DB_REC_KERNEL);
1403                 if (rc)
1404                         return rc;
1405         }
1406
1407         if (qedr_qp_has_rq(qp)) {
1408                 qp->rq.db = dev->db_addr +
1409                             DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
1410                 qp->rq.db_data.data.icid = qp->icid;
1411                 rc = qedr_db_recovery_add(dev, qp->rq.db, &qp->rq.db_data,
1412                                           DB_REC_WIDTH_32B, DB_REC_KERNEL);
1413                 if (rc && qedr_qp_has_sq(qp))
1414                         qedr_db_recovery_del(dev, qp->sq.db, &qp->sq.db_data);
1415         }
1416
1417         return rc;
1418 }
1419
1420 static int qedr_check_srq_params(struct qedr_dev *dev,
1421                                  struct ib_srq_init_attr *attrs,
1422                                  struct ib_udata *udata)
1423 {
1424         struct qedr_device_attr *qattr = &dev->attr;
1425
1426         if (attrs->attr.max_wr > qattr->max_srq_wr) {
1427                 DP_ERR(dev,
1428                        "create srq: unsupported srq_wr=0x%x requested (max_srq_wr=0x%x)\n",
1429                        attrs->attr.max_wr, qattr->max_srq_wr);
1430                 return -EINVAL;
1431         }
1432
1433         if (attrs->attr.max_sge > qattr->max_sge) {
1434                 DP_ERR(dev,
1435                        "create srq: unsupported sge=0x%x requested (max_srq_sge=0x%x)\n",
1436                        attrs->attr.max_sge, qattr->max_sge);
1437         }
1438
1439         if (!udata && attrs->srq_type == IB_SRQT_XRC) {
1440                 DP_ERR(dev, "XRC SRQs are not supported in kernel-space\n");
1441                 return -EINVAL;
1442         }
1443
1444         return 0;
1445 }
1446
1447 static void qedr_free_srq_user_params(struct qedr_srq *srq)
1448 {
1449         qedr_free_pbl(srq->dev, &srq->usrq.pbl_info, srq->usrq.pbl_tbl);
1450         ib_umem_release(srq->usrq.umem);
1451         ib_umem_release(srq->prod_umem);
1452 }
1453
1454 static void qedr_free_srq_kernel_params(struct qedr_srq *srq)
1455 {
1456         struct qedr_srq_hwq_info *hw_srq = &srq->hw_srq;
1457         struct qedr_dev *dev = srq->dev;
1458
1459         dev->ops->common->chain_free(dev->cdev, &hw_srq->pbl);
1460
1461         dma_free_coherent(&dev->pdev->dev, sizeof(struct rdma_srq_producers),
1462                           hw_srq->virt_prod_pair_addr,
1463                           hw_srq->phy_prod_pair_addr);
1464 }
1465
1466 static int qedr_init_srq_user_params(struct ib_udata *udata,
1467                                      struct qedr_srq *srq,
1468                                      struct qedr_create_srq_ureq *ureq,
1469                                      int access)
1470 {
1471         struct scatterlist *sg;
1472         int rc;
1473
1474         rc = qedr_init_user_queue(udata, srq->dev, &srq->usrq, ureq->srq_addr,
1475                                   ureq->srq_len, false, access, 1);
1476         if (rc)
1477                 return rc;
1478
1479         srq->prod_umem = ib_umem_get(srq->ibsrq.device, ureq->prod_pair_addr,
1480                                      sizeof(struct rdma_srq_producers), access);
1481         if (IS_ERR(srq->prod_umem)) {
1482                 qedr_free_pbl(srq->dev, &srq->usrq.pbl_info, srq->usrq.pbl_tbl);
1483                 ib_umem_release(srq->usrq.umem);
1484                 DP_ERR(srq->dev,
1485                        "create srq: failed ib_umem_get for producer, got %ld\n",
1486                        PTR_ERR(srq->prod_umem));
1487                 return PTR_ERR(srq->prod_umem);
1488         }
1489
1490         sg = srq->prod_umem->sgt_append.sgt.sgl;
1491         srq->hw_srq.phy_prod_pair_addr = sg_dma_address(sg);
1492
1493         return 0;
1494 }
1495
1496 static int qedr_alloc_srq_kernel_params(struct qedr_srq *srq,
1497                                         struct qedr_dev *dev,
1498                                         struct ib_srq_init_attr *init_attr)
1499 {
1500         struct qedr_srq_hwq_info *hw_srq = &srq->hw_srq;
1501         struct qed_chain_init_params params = {
1502                 .mode           = QED_CHAIN_MODE_PBL,
1503                 .intended_use   = QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1504                 .cnt_type       = QED_CHAIN_CNT_TYPE_U32,
1505                 .elem_size      = QEDR_SRQ_WQE_ELEM_SIZE,
1506         };
1507         dma_addr_t phy_prod_pair_addr;
1508         u32 num_elems;
1509         void *va;
1510         int rc;
1511
1512         va = dma_alloc_coherent(&dev->pdev->dev,
1513                                 sizeof(struct rdma_srq_producers),
1514                                 &phy_prod_pair_addr, GFP_KERNEL);
1515         if (!va) {
1516                 DP_ERR(dev,
1517                        "create srq: failed to allocate dma memory for producer\n");
1518                 return -ENOMEM;
1519         }
1520
1521         hw_srq->phy_prod_pair_addr = phy_prod_pair_addr;
1522         hw_srq->virt_prod_pair_addr = va;
1523
1524         num_elems = init_attr->attr.max_wr * RDMA_MAX_SRQ_WQE_SIZE;
1525         params.num_elems = num_elems;
1526
1527         rc = dev->ops->common->chain_alloc(dev->cdev, &hw_srq->pbl, &params);
1528         if (rc)
1529                 goto err0;
1530
1531         hw_srq->num_elems = num_elems;
1532
1533         return 0;
1534
1535 err0:
1536         dma_free_coherent(&dev->pdev->dev, sizeof(struct rdma_srq_producers),
1537                           va, phy_prod_pair_addr);
1538         return rc;
1539 }
1540
1541 int qedr_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr,
1542                     struct ib_udata *udata)
1543 {
1544         struct qed_rdma_destroy_srq_in_params destroy_in_params;
1545         struct qed_rdma_create_srq_in_params in_params = {};
1546         struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
1547         struct qed_rdma_create_srq_out_params out_params;
1548         struct qedr_pd *pd = get_qedr_pd(ibsrq->pd);
1549         struct qedr_create_srq_ureq ureq = {};
1550         u64 pbl_base_addr, phy_prod_pair_addr;
1551         struct qedr_srq_hwq_info *hw_srq;
1552         u32 page_cnt, page_size;
1553         struct qedr_srq *srq = get_qedr_srq(ibsrq);
1554         int rc = 0;
1555
1556         DP_DEBUG(dev, QEDR_MSG_QP,
1557                  "create SRQ called from %s (pd %p)\n",
1558                  (udata) ? "User lib" : "kernel", pd);
1559
1560         if (init_attr->srq_type != IB_SRQT_BASIC &&
1561             init_attr->srq_type != IB_SRQT_XRC)
1562                 return -EOPNOTSUPP;
1563
1564         rc = qedr_check_srq_params(dev, init_attr, udata);
1565         if (rc)
1566                 return -EINVAL;
1567
1568         srq->dev = dev;
1569         srq->is_xrc = (init_attr->srq_type == IB_SRQT_XRC);
1570         hw_srq = &srq->hw_srq;
1571         spin_lock_init(&srq->lock);
1572
1573         hw_srq->max_wr = init_attr->attr.max_wr;
1574         hw_srq->max_sges = init_attr->attr.max_sge;
1575
1576         if (udata) {
1577                 if (ib_copy_from_udata(&ureq, udata, min(sizeof(ureq),
1578                                                          udata->inlen))) {
1579                         DP_ERR(dev,
1580                                "create srq: problem copying data from user space\n");
1581                         goto err0;
1582                 }
1583
1584                 rc = qedr_init_srq_user_params(udata, srq, &ureq, 0);
1585                 if (rc)
1586                         goto err0;
1587
1588                 page_cnt = srq->usrq.pbl_info.num_pbes;
1589                 pbl_base_addr = srq->usrq.pbl_tbl->pa;
1590                 phy_prod_pair_addr = hw_srq->phy_prod_pair_addr;
1591                 page_size = PAGE_SIZE;
1592         } else {
1593                 struct qed_chain *pbl;
1594
1595                 rc = qedr_alloc_srq_kernel_params(srq, dev, init_attr);
1596                 if (rc)
1597                         goto err0;
1598
1599                 pbl = &hw_srq->pbl;
1600                 page_cnt = qed_chain_get_page_cnt(pbl);
1601                 pbl_base_addr = qed_chain_get_pbl_phys(pbl);
1602                 phy_prod_pair_addr = hw_srq->phy_prod_pair_addr;
1603                 page_size = QED_CHAIN_PAGE_SIZE;
1604         }
1605
1606         in_params.pd_id = pd->pd_id;
1607         in_params.pbl_base_addr = pbl_base_addr;
1608         in_params.prod_pair_addr = phy_prod_pair_addr;
1609         in_params.num_pages = page_cnt;
1610         in_params.page_size = page_size;
1611         if (srq->is_xrc) {
1612                 struct qedr_xrcd *xrcd = get_qedr_xrcd(init_attr->ext.xrc.xrcd);
1613                 struct qedr_cq *cq = get_qedr_cq(init_attr->ext.cq);
1614
1615                 in_params.is_xrc = 1;
1616                 in_params.xrcd_id = xrcd->xrcd_id;
1617                 in_params.cq_cid = cq->icid;
1618         }
1619
1620         rc = dev->ops->rdma_create_srq(dev->rdma_ctx, &in_params, &out_params);
1621         if (rc)
1622                 goto err1;
1623
1624         srq->srq_id = out_params.srq_id;
1625
1626         if (udata) {
1627                 rc = qedr_copy_srq_uresp(dev, srq, udata);
1628                 if (rc)
1629                         goto err2;
1630         }
1631
1632         rc = xa_insert_irq(&dev->srqs, srq->srq_id, srq, GFP_KERNEL);
1633         if (rc)
1634                 goto err2;
1635
1636         DP_DEBUG(dev, QEDR_MSG_SRQ,
1637                  "create srq: created srq with srq_id=0x%0x\n", srq->srq_id);
1638         return 0;
1639
1640 err2:
1641         destroy_in_params.srq_id = srq->srq_id;
1642
1643         dev->ops->rdma_destroy_srq(dev->rdma_ctx, &destroy_in_params);
1644 err1:
1645         if (udata)
1646                 qedr_free_srq_user_params(srq);
1647         else
1648                 qedr_free_srq_kernel_params(srq);
1649 err0:
1650         return -EFAULT;
1651 }
1652
1653 int qedr_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
1654 {
1655         struct qed_rdma_destroy_srq_in_params in_params = {};
1656         struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
1657         struct qedr_srq *srq = get_qedr_srq(ibsrq);
1658
1659         xa_erase_irq(&dev->srqs, srq->srq_id);
1660         in_params.srq_id = srq->srq_id;
1661         in_params.is_xrc = srq->is_xrc;
1662         dev->ops->rdma_destroy_srq(dev->rdma_ctx, &in_params);
1663
1664         if (ibsrq->uobject)
1665                 qedr_free_srq_user_params(srq);
1666         else
1667                 qedr_free_srq_kernel_params(srq);
1668
1669         DP_DEBUG(dev, QEDR_MSG_SRQ,
1670                  "destroy srq: destroyed srq with srq_id=0x%0x\n",
1671                  srq->srq_id);
1672         return 0;
1673 }
1674
1675 int qedr_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
1676                     enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
1677 {
1678         struct qed_rdma_modify_srq_in_params in_params = {};
1679         struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
1680         struct qedr_srq *srq = get_qedr_srq(ibsrq);
1681         int rc;
1682
1683         if (attr_mask & IB_SRQ_MAX_WR) {
1684                 DP_ERR(dev,
1685                        "modify srq: invalid attribute mask=0x%x specified for %p\n",
1686                        attr_mask, srq);
1687                 return -EINVAL;
1688         }
1689
1690         if (attr_mask & IB_SRQ_LIMIT) {
1691                 if (attr->srq_limit >= srq->hw_srq.max_wr) {
1692                         DP_ERR(dev,
1693                                "modify srq: invalid srq_limit=0x%x (max_srq_limit=0x%x)\n",
1694                                attr->srq_limit, srq->hw_srq.max_wr);
1695                         return -EINVAL;
1696                 }
1697
1698                 in_params.srq_id = srq->srq_id;
1699                 in_params.wqe_limit = attr->srq_limit;
1700                 rc = dev->ops->rdma_modify_srq(dev->rdma_ctx, &in_params);
1701                 if (rc)
1702                         return rc;
1703         }
1704
1705         srq->srq_limit = attr->srq_limit;
1706
1707         DP_DEBUG(dev, QEDR_MSG_SRQ,
1708                  "modify srq: modified srq with srq_id=0x%0x\n", srq->srq_id);
1709
1710         return 0;
1711 }
1712
1713 static enum qed_rdma_qp_type qedr_ib_to_qed_qp_type(enum ib_qp_type ib_qp_type)
1714 {
1715         switch (ib_qp_type) {
1716         case IB_QPT_RC:
1717                 return QED_RDMA_QP_TYPE_RC;
1718         case IB_QPT_XRC_INI:
1719                 return QED_RDMA_QP_TYPE_XRC_INI;
1720         case IB_QPT_XRC_TGT:
1721                 return QED_RDMA_QP_TYPE_XRC_TGT;
1722         default:
1723                 return QED_RDMA_QP_TYPE_INVAL;
1724         }
1725 }
1726
1727 static inline void
1728 qedr_init_common_qp_in_params(struct qedr_dev *dev,
1729                               struct qedr_pd *pd,
1730                               struct qedr_qp *qp,
1731                               struct ib_qp_init_attr *attrs,
1732                               bool fmr_and_reserved_lkey,
1733                               struct qed_rdma_create_qp_in_params *params)
1734 {
1735         /* QP handle to be written in an async event */
1736         params->qp_handle_async_lo = lower_32_bits((uintptr_t) qp);
1737         params->qp_handle_async_hi = upper_32_bits((uintptr_t) qp);
1738
1739         params->signal_all = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR);
1740         params->fmr_and_reserved_lkey = fmr_and_reserved_lkey;
1741         params->qp_type = qedr_ib_to_qed_qp_type(attrs->qp_type);
1742         params->stats_queue = 0;
1743
1744         if (pd) {
1745                 params->pd = pd->pd_id;
1746                 params->dpi = pd->uctx ? pd->uctx->dpi : dev->dpi;
1747         }
1748
1749         if (qedr_qp_has_sq(qp))
1750                 params->sq_cq_id = get_qedr_cq(attrs->send_cq)->icid;
1751
1752         if (qedr_qp_has_rq(qp))
1753                 params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
1754
1755         if (qedr_qp_has_srq(qp)) {
1756                 params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
1757                 params->srq_id = qp->srq->srq_id;
1758                 params->use_srq = true;
1759         } else {
1760                 params->srq_id = 0;
1761                 params->use_srq = false;
1762         }
1763 }
1764
1765 static inline void qedr_qp_user_print(struct qedr_dev *dev, struct qedr_qp *qp)
1766 {
1767         DP_DEBUG(dev, QEDR_MSG_QP, "create qp: successfully created user QP. "
1768                  "qp=%p. "
1769                  "sq_addr=0x%llx, "
1770                  "sq_len=%zd, "
1771                  "rq_addr=0x%llx, "
1772                  "rq_len=%zd"
1773                  "\n",
1774                  qp,
1775                  qedr_qp_has_sq(qp) ? qp->usq.buf_addr : 0x0,
1776                  qedr_qp_has_sq(qp) ? qp->usq.buf_len : 0,
1777                  qedr_qp_has_rq(qp) ? qp->urq.buf_addr : 0x0,
1778                  qedr_qp_has_sq(qp) ? qp->urq.buf_len : 0);
1779 }
1780
1781 static inline void
1782 qedr_iwarp_populate_user_qp(struct qedr_dev *dev,
1783                             struct qedr_qp *qp,
1784                             struct qed_rdma_create_qp_out_params *out_params)
1785 {
1786         qp->usq.pbl_tbl->va = out_params->sq_pbl_virt;
1787         qp->usq.pbl_tbl->pa = out_params->sq_pbl_phys;
1788
1789         qedr_populate_pbls(dev, qp->usq.umem, qp->usq.pbl_tbl,
1790                            &qp->usq.pbl_info, FW_PAGE_SHIFT);
1791         if (!qp->srq) {
1792                 qp->urq.pbl_tbl->va = out_params->rq_pbl_virt;
1793                 qp->urq.pbl_tbl->pa = out_params->rq_pbl_phys;
1794         }
1795
1796         qedr_populate_pbls(dev, qp->urq.umem, qp->urq.pbl_tbl,
1797                            &qp->urq.pbl_info, FW_PAGE_SHIFT);
1798 }
1799
1800 static void qedr_cleanup_user(struct qedr_dev *dev,
1801                               struct qedr_ucontext *ctx,
1802                               struct qedr_qp *qp)
1803 {
1804         if (qedr_qp_has_sq(qp)) {
1805                 ib_umem_release(qp->usq.umem);
1806                 qp->usq.umem = NULL;
1807         }
1808
1809         if (qedr_qp_has_rq(qp)) {
1810                 ib_umem_release(qp->urq.umem);
1811                 qp->urq.umem = NULL;
1812         }
1813
1814         if (rdma_protocol_roce(&dev->ibdev, 1)) {
1815                 qedr_free_pbl(dev, &qp->usq.pbl_info, qp->usq.pbl_tbl);
1816                 qedr_free_pbl(dev, &qp->urq.pbl_info, qp->urq.pbl_tbl);
1817         } else {
1818                 kfree(qp->usq.pbl_tbl);
1819                 kfree(qp->urq.pbl_tbl);
1820         }
1821
1822         if (qp->usq.db_rec_data) {
1823                 qedr_db_recovery_del(dev, qp->usq.db_addr,
1824                                      &qp->usq.db_rec_data->db_data);
1825                 rdma_user_mmap_entry_remove(qp->usq.db_mmap_entry);
1826         }
1827
1828         if (qp->urq.db_rec_data) {
1829                 qedr_db_recovery_del(dev, qp->urq.db_addr,
1830                                      &qp->urq.db_rec_data->db_data);
1831                 rdma_user_mmap_entry_remove(qp->urq.db_mmap_entry);
1832         }
1833
1834         if (rdma_protocol_iwarp(&dev->ibdev, 1))
1835                 qedr_db_recovery_del(dev, qp->urq.db_rec_db2_addr,
1836                                      &qp->urq.db_rec_db2_data);
1837 }
1838
1839 static int qedr_create_user_qp(struct qedr_dev *dev,
1840                                struct qedr_qp *qp,
1841                                struct ib_pd *ibpd,
1842                                struct ib_udata *udata,
1843                                struct ib_qp_init_attr *attrs)
1844 {
1845         struct qed_rdma_create_qp_in_params in_params;
1846         struct qed_rdma_create_qp_out_params out_params;
1847         struct qedr_create_qp_uresp uresp = {};
1848         struct qedr_create_qp_ureq ureq = {};
1849         int alloc_and_init = rdma_protocol_roce(&dev->ibdev, 1);
1850         struct qedr_ucontext *ctx = NULL;
1851         struct qedr_pd *pd = NULL;
1852         int rc = 0;
1853
1854         qp->create_type = QEDR_QP_CREATE_USER;
1855
1856         if (ibpd) {
1857                 pd = get_qedr_pd(ibpd);
1858                 ctx = pd->uctx;
1859         }
1860
1861         if (udata) {
1862                 rc = ib_copy_from_udata(&ureq, udata, min(sizeof(ureq),
1863                                         udata->inlen));
1864                 if (rc) {
1865                         DP_ERR(dev, "Problem copying data from user space\n");
1866                         return rc;
1867                 }
1868         }
1869
1870         if (qedr_qp_has_sq(qp)) {
1871                 /* SQ - read access only (0) */
1872                 rc = qedr_init_user_queue(udata, dev, &qp->usq, ureq.sq_addr,
1873                                           ureq.sq_len, true, 0, alloc_and_init);
1874                 if (rc)
1875                         return rc;
1876         }
1877
1878         if (qedr_qp_has_rq(qp)) {
1879                 /* RQ - read access only (0) */
1880                 rc = qedr_init_user_queue(udata, dev, &qp->urq, ureq.rq_addr,
1881                                           ureq.rq_len, true, 0, alloc_and_init);
1882                 if (rc) {
1883                         ib_umem_release(qp->usq.umem);
1884                         qp->usq.umem = NULL;
1885                         if (rdma_protocol_roce(&dev->ibdev, 1)) {
1886                                 qedr_free_pbl(dev, &qp->usq.pbl_info,
1887                                               qp->usq.pbl_tbl);
1888                         } else {
1889                                 kfree(qp->usq.pbl_tbl);
1890                         }
1891                         return rc;
1892                 }
1893         }
1894
1895         memset(&in_params, 0, sizeof(in_params));
1896         qedr_init_common_qp_in_params(dev, pd, qp, attrs, false, &in_params);
1897         in_params.qp_handle_lo = ureq.qp_handle_lo;
1898         in_params.qp_handle_hi = ureq.qp_handle_hi;
1899
1900         if (qp->qp_type == IB_QPT_XRC_TGT) {
1901                 struct qedr_xrcd *xrcd = get_qedr_xrcd(attrs->xrcd);
1902
1903                 in_params.xrcd_id = xrcd->xrcd_id;
1904                 in_params.qp_handle_lo = qp->qp_id;
1905                 in_params.use_srq = 1;
1906         }
1907
1908         if (qedr_qp_has_sq(qp)) {
1909                 in_params.sq_num_pages = qp->usq.pbl_info.num_pbes;
1910                 in_params.sq_pbl_ptr = qp->usq.pbl_tbl->pa;
1911         }
1912
1913         if (qedr_qp_has_rq(qp)) {
1914                 in_params.rq_num_pages = qp->urq.pbl_info.num_pbes;
1915                 in_params.rq_pbl_ptr = qp->urq.pbl_tbl->pa;
1916         }
1917
1918         if (ctx)
1919                 SET_FIELD(in_params.flags, QED_ROCE_EDPM_MODE, ctx->edpm_mode);
1920
1921         qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
1922                                               &in_params, &out_params);
1923
1924         if (!qp->qed_qp) {
1925                 rc = -ENOMEM;
1926                 goto err1;
1927         }
1928
1929         if (rdma_protocol_iwarp(&dev->ibdev, 1))
1930                 qedr_iwarp_populate_user_qp(dev, qp, &out_params);
1931
1932         qp->qp_id = out_params.qp_id;
1933         qp->icid = out_params.icid;
1934
1935         if (udata) {
1936                 rc = qedr_copy_qp_uresp(dev, qp, udata, &uresp);
1937                 if (rc)
1938                         goto err;
1939         }
1940
1941         /* db offset was calculated in copy_qp_uresp, now set in the user q */
1942         if (qedr_qp_has_sq(qp)) {
1943                 qp->usq.db_addr = ctx->dpi_addr + uresp.sq_db_offset;
1944                 qp->sq.max_wr = attrs->cap.max_send_wr;
1945                 rc = qedr_db_recovery_add(dev, qp->usq.db_addr,
1946                                           &qp->usq.db_rec_data->db_data,
1947                                           DB_REC_WIDTH_32B,
1948                                           DB_REC_USER);
1949                 if (rc)
1950                         goto err;
1951         }
1952
1953         if (qedr_qp_has_rq(qp)) {
1954                 qp->urq.db_addr = ctx->dpi_addr + uresp.rq_db_offset;
1955                 qp->rq.max_wr = attrs->cap.max_recv_wr;
1956                 rc = qedr_db_recovery_add(dev, qp->urq.db_addr,
1957                                           &qp->urq.db_rec_data->db_data,
1958                                           DB_REC_WIDTH_32B,
1959                                           DB_REC_USER);
1960                 if (rc)
1961                         goto err;
1962         }
1963
1964         if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
1965                 qp->urq.db_rec_db2_addr = ctx->dpi_addr + uresp.rq_db2_offset;
1966
1967                 /* calculate the db_rec_db2 data since it is constant so no
1968                  * need to reflect from user
1969                  */
1970                 qp->urq.db_rec_db2_data.data.icid = cpu_to_le16(qp->icid);
1971                 qp->urq.db_rec_db2_data.data.value =
1972                         cpu_to_le16(DQ_TCM_IWARP_POST_RQ_CF_CMD);
1973
1974                 rc = qedr_db_recovery_add(dev, qp->urq.db_rec_db2_addr,
1975                                           &qp->urq.db_rec_db2_data,
1976                                           DB_REC_WIDTH_32B,
1977                                           DB_REC_USER);
1978                 if (rc)
1979                         goto err;
1980         }
1981         qedr_qp_user_print(dev, qp);
1982         return rc;
1983 err:
1984         rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
1985         if (rc)
1986                 DP_ERR(dev, "create qp: fatal fault. rc=%d", rc);
1987
1988 err1:
1989         qedr_cleanup_user(dev, ctx, qp);
1990         return rc;
1991 }
1992
1993 static int qedr_set_iwarp_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
1994 {
1995         int rc;
1996
1997         qp->sq.db = dev->db_addr +
1998             DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1999         qp->sq.db_data.data.icid = qp->icid;
2000
2001         rc = qedr_db_recovery_add(dev, qp->sq.db,
2002                                   &qp->sq.db_data,
2003                                   DB_REC_WIDTH_32B,
2004                                   DB_REC_KERNEL);
2005         if (rc)
2006                 return rc;
2007
2008         qp->rq.db = dev->db_addr +
2009                     DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD);
2010         qp->rq.db_data.data.icid = qp->icid;
2011         qp->rq.iwarp_db2 = dev->db_addr +
2012                            DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS);
2013         qp->rq.iwarp_db2_data.data.icid = qp->icid;
2014         qp->rq.iwarp_db2_data.data.value = DQ_TCM_IWARP_POST_RQ_CF_CMD;
2015
2016         rc = qedr_db_recovery_add(dev, qp->rq.db,
2017                                   &qp->rq.db_data,
2018                                   DB_REC_WIDTH_32B,
2019                                   DB_REC_KERNEL);
2020         if (rc)
2021                 return rc;
2022
2023         rc = qedr_db_recovery_add(dev, qp->rq.iwarp_db2,
2024                                   &qp->rq.iwarp_db2_data,
2025                                   DB_REC_WIDTH_32B,
2026                                   DB_REC_KERNEL);
2027         return rc;
2028 }
2029
2030 static int
2031 qedr_roce_create_kernel_qp(struct qedr_dev *dev,
2032                            struct qedr_qp *qp,
2033                            struct qed_rdma_create_qp_in_params *in_params,
2034                            u32 n_sq_elems, u32 n_rq_elems)
2035 {
2036         struct qed_rdma_create_qp_out_params out_params;
2037         struct qed_chain_init_params params = {
2038                 .mode           = QED_CHAIN_MODE_PBL,
2039                 .cnt_type       = QED_CHAIN_CNT_TYPE_U32,
2040         };
2041         int rc;
2042
2043         params.intended_use = QED_CHAIN_USE_TO_PRODUCE;
2044         params.num_elems = n_sq_elems;
2045         params.elem_size = QEDR_SQE_ELEMENT_SIZE;
2046
2047         rc = dev->ops->common->chain_alloc(dev->cdev, &qp->sq.pbl, &params);
2048         if (rc)
2049                 return rc;
2050
2051         in_params->sq_num_pages = qed_chain_get_page_cnt(&qp->sq.pbl);
2052         in_params->sq_pbl_ptr = qed_chain_get_pbl_phys(&qp->sq.pbl);
2053
2054         params.intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE;
2055         params.num_elems = n_rq_elems;
2056         params.elem_size = QEDR_RQE_ELEMENT_SIZE;
2057
2058         rc = dev->ops->common->chain_alloc(dev->cdev, &qp->rq.pbl, &params);
2059         if (rc)
2060                 return rc;
2061
2062         in_params->rq_num_pages = qed_chain_get_page_cnt(&qp->rq.pbl);
2063         in_params->rq_pbl_ptr = qed_chain_get_pbl_phys(&qp->rq.pbl);
2064
2065         qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
2066                                               in_params, &out_params);
2067
2068         if (!qp->qed_qp)
2069                 return -EINVAL;
2070
2071         qp->qp_id = out_params.qp_id;
2072         qp->icid = out_params.icid;
2073
2074         return qedr_set_roce_db_info(dev, qp);
2075 }
2076
2077 static int
2078 qedr_iwarp_create_kernel_qp(struct qedr_dev *dev,
2079                             struct qedr_qp *qp,
2080                             struct qed_rdma_create_qp_in_params *in_params,
2081                             u32 n_sq_elems, u32 n_rq_elems)
2082 {
2083         struct qed_rdma_create_qp_out_params out_params;
2084         struct qed_chain_init_params params = {
2085                 .mode           = QED_CHAIN_MODE_PBL,
2086                 .cnt_type       = QED_CHAIN_CNT_TYPE_U32,
2087         };
2088         int rc;
2089
2090         in_params->sq_num_pages = QED_CHAIN_PAGE_CNT(n_sq_elems,
2091                                                      QEDR_SQE_ELEMENT_SIZE,
2092                                                      QED_CHAIN_PAGE_SIZE,
2093                                                      QED_CHAIN_MODE_PBL);
2094         in_params->rq_num_pages = QED_CHAIN_PAGE_CNT(n_rq_elems,
2095                                                      QEDR_RQE_ELEMENT_SIZE,
2096                                                      QED_CHAIN_PAGE_SIZE,
2097                                                      QED_CHAIN_MODE_PBL);
2098
2099         qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
2100                                               in_params, &out_params);
2101
2102         if (!qp->qed_qp)
2103                 return -EINVAL;
2104
2105         /* Now we allocate the chain */
2106
2107         params.intended_use = QED_CHAIN_USE_TO_PRODUCE;
2108         params.num_elems = n_sq_elems;
2109         params.elem_size = QEDR_SQE_ELEMENT_SIZE;
2110         params.ext_pbl_virt = out_params.sq_pbl_virt;
2111         params.ext_pbl_phys = out_params.sq_pbl_phys;
2112
2113         rc = dev->ops->common->chain_alloc(dev->cdev, &qp->sq.pbl, &params);
2114         if (rc)
2115                 goto err;
2116
2117         params.intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE;
2118         params.num_elems = n_rq_elems;
2119         params.elem_size = QEDR_RQE_ELEMENT_SIZE;
2120         params.ext_pbl_virt = out_params.rq_pbl_virt;
2121         params.ext_pbl_phys = out_params.rq_pbl_phys;
2122
2123         rc = dev->ops->common->chain_alloc(dev->cdev, &qp->rq.pbl, &params);
2124         if (rc)
2125                 goto err;
2126
2127         qp->qp_id = out_params.qp_id;
2128         qp->icid = out_params.icid;
2129
2130         return qedr_set_iwarp_db_info(dev, qp);
2131
2132 err:
2133         dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
2134
2135         return rc;
2136 }
2137
2138 static void qedr_cleanup_kernel(struct qedr_dev *dev, struct qedr_qp *qp)
2139 {
2140         dev->ops->common->chain_free(dev->cdev, &qp->sq.pbl);
2141         kfree(qp->wqe_wr_id);
2142
2143         dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl);
2144         kfree(qp->rqe_wr_id);
2145
2146         /* GSI qp is not registered to db mechanism so no need to delete */
2147         if (qp->qp_type == IB_QPT_GSI)
2148                 return;
2149
2150         qedr_db_recovery_del(dev, qp->sq.db, &qp->sq.db_data);
2151
2152         if (!qp->srq) {
2153                 qedr_db_recovery_del(dev, qp->rq.db, &qp->rq.db_data);
2154
2155                 if (rdma_protocol_iwarp(&dev->ibdev, 1))
2156                         qedr_db_recovery_del(dev, qp->rq.iwarp_db2,
2157                                              &qp->rq.iwarp_db2_data);
2158         }
2159 }
2160
2161 static int qedr_create_kernel_qp(struct qedr_dev *dev,
2162                                  struct qedr_qp *qp,
2163                                  struct ib_pd *ibpd,
2164                                  struct ib_qp_init_attr *attrs)
2165 {
2166         struct qed_rdma_create_qp_in_params in_params;
2167         struct qedr_pd *pd = get_qedr_pd(ibpd);
2168         int rc = -EINVAL;
2169         u32 n_rq_elems;
2170         u32 n_sq_elems;
2171         u32 n_sq_entries;
2172
2173         memset(&in_params, 0, sizeof(in_params));
2174         qp->create_type = QEDR_QP_CREATE_KERNEL;
2175
2176         /* A single work request may take up to QEDR_MAX_SQ_WQE_SIZE elements in
2177          * the ring. The ring should allow at least a single WR, even if the
2178          * user requested none, due to allocation issues.
2179          * We should add an extra WR since the prod and cons indices of
2180          * wqe_wr_id are managed in such a way that the WQ is considered full
2181          * when (prod+1)%max_wr==cons. We currently don't do that because we
2182          * double the number of entries due an iSER issue that pushes far more
2183          * WRs than indicated. If we decline its ib_post_send() then we get
2184          * error prints in the dmesg we'd like to avoid.
2185          */
2186         qp->sq.max_wr = min_t(u32, attrs->cap.max_send_wr * dev->wq_multiplier,
2187                               dev->attr.max_sqe);
2188
2189         qp->wqe_wr_id = kcalloc(qp->sq.max_wr, sizeof(*qp->wqe_wr_id),
2190                                 GFP_KERNEL);
2191         if (!qp->wqe_wr_id) {
2192                 DP_ERR(dev, "create qp: failed SQ shadow memory allocation\n");
2193                 return -ENOMEM;
2194         }
2195
2196         /* QP handle to be written in CQE */
2197         in_params.qp_handle_lo = lower_32_bits((uintptr_t) qp);
2198         in_params.qp_handle_hi = upper_32_bits((uintptr_t) qp);
2199
2200         /* A single work request may take up to QEDR_MAX_RQ_WQE_SIZE elements in
2201          * the ring. There ring should allow at least a single WR, even if the
2202          * user requested none, due to allocation issues.
2203          */
2204         qp->rq.max_wr = (u16) max_t(u32, attrs->cap.max_recv_wr, 1);
2205
2206         /* Allocate driver internal RQ array */
2207         qp->rqe_wr_id = kcalloc(qp->rq.max_wr, sizeof(*qp->rqe_wr_id),
2208                                 GFP_KERNEL);
2209         if (!qp->rqe_wr_id) {
2210                 DP_ERR(dev,
2211                        "create qp: failed RQ shadow memory allocation\n");
2212                 kfree(qp->wqe_wr_id);
2213                 return -ENOMEM;
2214         }
2215
2216         qedr_init_common_qp_in_params(dev, pd, qp, attrs, true, &in_params);
2217
2218         n_sq_entries = attrs->cap.max_send_wr;
2219         n_sq_entries = min_t(u32, n_sq_entries, dev->attr.max_sqe);
2220         n_sq_entries = max_t(u32, n_sq_entries, 1);
2221         n_sq_elems = n_sq_entries * QEDR_MAX_SQE_ELEMENTS_PER_SQE;
2222
2223         n_rq_elems = qp->rq.max_wr * QEDR_MAX_RQE_ELEMENTS_PER_RQE;
2224
2225         if (rdma_protocol_iwarp(&dev->ibdev, 1))
2226                 rc = qedr_iwarp_create_kernel_qp(dev, qp, &in_params,
2227                                                  n_sq_elems, n_rq_elems);
2228         else
2229                 rc = qedr_roce_create_kernel_qp(dev, qp, &in_params,
2230                                                 n_sq_elems, n_rq_elems);
2231         if (rc)
2232                 qedr_cleanup_kernel(dev, qp);
2233
2234         return rc;
2235 }
2236
2237 static int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp,
2238                                   struct ib_udata *udata)
2239 {
2240         struct qedr_ucontext *ctx =
2241                 rdma_udata_to_drv_context(udata, struct qedr_ucontext,
2242                                           ibucontext);
2243         int rc;
2244
2245         if (qp->qp_type != IB_QPT_GSI) {
2246                 rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
2247                 if (rc)
2248                         return rc;
2249         }
2250
2251         if (qp->create_type == QEDR_QP_CREATE_USER)
2252                 qedr_cleanup_user(dev, ctx, qp);
2253         else
2254                 qedr_cleanup_kernel(dev, qp);
2255
2256         return 0;
2257 }
2258
2259 int qedr_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs,
2260                    struct ib_udata *udata)
2261 {
2262         struct qedr_xrcd *xrcd = NULL;
2263         struct ib_pd *ibpd = ibqp->pd;
2264         struct qedr_pd *pd = get_qedr_pd(ibpd);
2265         struct qedr_dev *dev = get_qedr_dev(ibqp->device);
2266         struct qedr_qp *qp = get_qedr_qp(ibqp);
2267         int rc = 0;
2268
2269         if (attrs->create_flags)
2270                 return -EOPNOTSUPP;
2271
2272         if (attrs->qp_type == IB_QPT_XRC_TGT)
2273                 xrcd = get_qedr_xrcd(attrs->xrcd);
2274         else
2275                 pd = get_qedr_pd(ibpd);
2276
2277         DP_DEBUG(dev, QEDR_MSG_QP, "create qp: called from %s, pd=%p\n",
2278                  udata ? "user library" : "kernel", pd);
2279
2280         rc = qedr_check_qp_attrs(ibpd, dev, attrs, udata);
2281         if (rc)
2282                 return rc;
2283
2284         DP_DEBUG(dev, QEDR_MSG_QP,
2285                  "create qp: called from %s, event_handler=%p, eepd=%p sq_cq=%p, sq_icid=%d, rq_cq=%p, rq_icid=%d\n",
2286                  udata ? "user library" : "kernel", attrs->event_handler, pd,
2287                  get_qedr_cq(attrs->send_cq),
2288                  get_qedr_cq(attrs->send_cq)->icid,
2289                  get_qedr_cq(attrs->recv_cq),
2290                  attrs->recv_cq ? get_qedr_cq(attrs->recv_cq)->icid : 0);
2291
2292         qedr_set_common_qp_params(dev, qp, pd, attrs);
2293
2294         if (attrs->qp_type == IB_QPT_GSI)
2295                 return qedr_create_gsi_qp(dev, attrs, qp);
2296
2297         if (udata || xrcd)
2298                 rc = qedr_create_user_qp(dev, qp, ibpd, udata, attrs);
2299         else
2300                 rc = qedr_create_kernel_qp(dev, qp, ibpd, attrs);
2301
2302         if (rc)
2303                 return rc;
2304
2305         qp->ibqp.qp_num = qp->qp_id;
2306
2307         if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
2308                 rc = xa_insert(&dev->qps, qp->qp_id, qp, GFP_KERNEL);
2309                 if (rc)
2310                         goto out_free_qp_resources;
2311         }
2312
2313         return 0;
2314
2315 out_free_qp_resources:
2316         qedr_free_qp_resources(dev, qp, udata);
2317         return -EFAULT;
2318 }
2319
2320 static enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
2321 {
2322         switch (qp_state) {
2323         case QED_ROCE_QP_STATE_RESET:
2324                 return IB_QPS_RESET;
2325         case QED_ROCE_QP_STATE_INIT:
2326                 return IB_QPS_INIT;
2327         case QED_ROCE_QP_STATE_RTR:
2328                 return IB_QPS_RTR;
2329         case QED_ROCE_QP_STATE_RTS:
2330                 return IB_QPS_RTS;
2331         case QED_ROCE_QP_STATE_SQD:
2332                 return IB_QPS_SQD;
2333         case QED_ROCE_QP_STATE_ERR:
2334                 return IB_QPS_ERR;
2335         case QED_ROCE_QP_STATE_SQE:
2336                 return IB_QPS_SQE;
2337         }
2338         return IB_QPS_ERR;
2339 }
2340
2341 static enum qed_roce_qp_state qedr_get_state_from_ibqp(
2342                                         enum ib_qp_state qp_state)
2343 {
2344         switch (qp_state) {
2345         case IB_QPS_RESET:
2346                 return QED_ROCE_QP_STATE_RESET;
2347         case IB_QPS_INIT:
2348                 return QED_ROCE_QP_STATE_INIT;
2349         case IB_QPS_RTR:
2350                 return QED_ROCE_QP_STATE_RTR;
2351         case IB_QPS_RTS:
2352                 return QED_ROCE_QP_STATE_RTS;
2353         case IB_QPS_SQD:
2354                 return QED_ROCE_QP_STATE_SQD;
2355         case IB_QPS_ERR:
2356                 return QED_ROCE_QP_STATE_ERR;
2357         default:
2358                 return QED_ROCE_QP_STATE_ERR;
2359         }
2360 }
2361
2362 static int qedr_update_qp_state(struct qedr_dev *dev,
2363                                 struct qedr_qp *qp,
2364                                 enum qed_roce_qp_state cur_state,
2365                                 enum qed_roce_qp_state new_state)
2366 {
2367         int status = 0;
2368
2369         if (new_state == cur_state)
2370                 return 0;
2371
2372         switch (cur_state) {
2373         case QED_ROCE_QP_STATE_RESET:
2374                 switch (new_state) {
2375                 case QED_ROCE_QP_STATE_INIT:
2376                         break;
2377                 default:
2378                         status = -EINVAL;
2379                         break;
2380                 }
2381                 break;
2382         case QED_ROCE_QP_STATE_INIT:
2383                 switch (new_state) {
2384                 case QED_ROCE_QP_STATE_RTR:
2385                         /* Update doorbell (in case post_recv was
2386                          * done before move to RTR)
2387                          */
2388
2389                         if (rdma_protocol_roce(&dev->ibdev, 1)) {
2390                                 writel(qp->rq.db_data.raw, qp->rq.db);
2391                         }
2392                         break;
2393                 case QED_ROCE_QP_STATE_ERR:
2394                         break;
2395                 default:
2396                         /* Invalid state change. */
2397                         status = -EINVAL;
2398                         break;
2399                 }
2400                 break;
2401         case QED_ROCE_QP_STATE_RTR:
2402                 /* RTR->XXX */
2403                 switch (new_state) {
2404                 case QED_ROCE_QP_STATE_RTS:
2405                         break;
2406                 case QED_ROCE_QP_STATE_ERR:
2407                         break;
2408                 default:
2409                         /* Invalid state change. */
2410                         status = -EINVAL;
2411                         break;
2412                 }
2413                 break;
2414         case QED_ROCE_QP_STATE_RTS:
2415                 /* RTS->XXX */
2416                 switch (new_state) {
2417                 case QED_ROCE_QP_STATE_SQD:
2418                         break;
2419                 case QED_ROCE_QP_STATE_ERR:
2420                         break;
2421                 default:
2422                         /* Invalid state change. */
2423                         status = -EINVAL;
2424                         break;
2425                 }
2426                 break;
2427         case QED_ROCE_QP_STATE_SQD:
2428                 /* SQD->XXX */
2429                 switch (new_state) {
2430                 case QED_ROCE_QP_STATE_RTS:
2431                 case QED_ROCE_QP_STATE_ERR:
2432                         break;
2433                 default:
2434                         /* Invalid state change. */
2435                         status = -EINVAL;
2436                         break;
2437                 }
2438                 break;
2439         case QED_ROCE_QP_STATE_ERR:
2440                 /* ERR->XXX */
2441                 switch (new_state) {
2442                 case QED_ROCE_QP_STATE_RESET:
2443                         if ((qp->rq.prod != qp->rq.cons) ||
2444                             (qp->sq.prod != qp->sq.cons)) {
2445                                 DP_NOTICE(dev,
2446                                           "Error->Reset with rq/sq not empty rq.prod=%x rq.cons=%x sq.prod=%x sq.cons=%x\n",
2447                                           qp->rq.prod, qp->rq.cons, qp->sq.prod,
2448                                           qp->sq.cons);
2449                                 status = -EINVAL;
2450                         }
2451                         break;
2452                 default:
2453                         status = -EINVAL;
2454                         break;
2455                 }
2456                 break;
2457         default:
2458                 status = -EINVAL;
2459                 break;
2460         }
2461
2462         return status;
2463 }
2464
2465 int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
2466                    int attr_mask, struct ib_udata *udata)
2467 {
2468         struct qedr_qp *qp = get_qedr_qp(ibqp);
2469         struct qed_rdma_modify_qp_in_params qp_params = { 0 };
2470         struct qedr_dev *dev = get_qedr_dev(&qp->dev->ibdev);
2471         const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
2472         enum ib_qp_state old_qp_state, new_qp_state;
2473         enum qed_roce_qp_state cur_state;
2474         int rc = 0;
2475
2476         DP_DEBUG(dev, QEDR_MSG_QP,
2477                  "modify qp: qp %p attr_mask=0x%x, state=%d", qp, attr_mask,
2478                  attr->qp_state);
2479
2480         if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
2481                 return -EOPNOTSUPP;
2482
2483         old_qp_state = qedr_get_ibqp_state(qp->state);
2484         if (attr_mask & IB_QP_STATE)
2485                 new_qp_state = attr->qp_state;
2486         else
2487                 new_qp_state = old_qp_state;
2488
2489         if (rdma_protocol_roce(&dev->ibdev, 1)) {
2490                 if (!ib_modify_qp_is_ok(old_qp_state, new_qp_state,
2491                                         ibqp->qp_type, attr_mask)) {
2492                         DP_ERR(dev,
2493                                "modify qp: invalid attribute mask=0x%x specified for\n"
2494                                "qpn=0x%x of type=0x%x old_qp_state=0x%x, new_qp_state=0x%x\n",
2495                                attr_mask, qp->qp_id, ibqp->qp_type,
2496                                old_qp_state, new_qp_state);
2497                         rc = -EINVAL;
2498                         goto err;
2499                 }
2500         }
2501
2502         /* Translate the masks... */
2503         if (attr_mask & IB_QP_STATE) {
2504                 SET_FIELD(qp_params.modify_flags,
2505                           QED_RDMA_MODIFY_QP_VALID_NEW_STATE, 1);
2506                 qp_params.new_state = qedr_get_state_from_ibqp(attr->qp_state);
2507         }
2508
2509         if (attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
2510                 qp_params.sqd_async = true;
2511
2512         if (attr_mask & IB_QP_PKEY_INDEX) {
2513                 SET_FIELD(qp_params.modify_flags,
2514                           QED_ROCE_MODIFY_QP_VALID_PKEY, 1);
2515                 if (attr->pkey_index >= QEDR_ROCE_PKEY_TABLE_LEN) {
2516                         rc = -EINVAL;
2517                         goto err;
2518                 }
2519
2520                 qp_params.pkey = QEDR_ROCE_PKEY_DEFAULT;
2521         }
2522
2523         if (attr_mask & IB_QP_QKEY)
2524                 qp->qkey = attr->qkey;
2525
2526         if (attr_mask & IB_QP_ACCESS_FLAGS) {
2527                 SET_FIELD(qp_params.modify_flags,
2528                           QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN, 1);
2529                 qp_params.incoming_rdma_read_en = attr->qp_access_flags &
2530                                                   IB_ACCESS_REMOTE_READ;
2531                 qp_params.incoming_rdma_write_en = attr->qp_access_flags &
2532                                                    IB_ACCESS_REMOTE_WRITE;
2533                 qp_params.incoming_atomic_en = attr->qp_access_flags &
2534                                                IB_ACCESS_REMOTE_ATOMIC;
2535         }
2536
2537         if (attr_mask & (IB_QP_AV | IB_QP_PATH_MTU)) {
2538                 if (rdma_protocol_iwarp(&dev->ibdev, 1))
2539                         return -EINVAL;
2540
2541                 if (attr_mask & IB_QP_PATH_MTU) {
2542                         if (attr->path_mtu < IB_MTU_256 ||
2543                             attr->path_mtu > IB_MTU_4096) {
2544                                 pr_err("error: Only MTU sizes of 256, 512, 1024, 2048 and 4096 are supported by RoCE\n");
2545                                 rc = -EINVAL;
2546                                 goto err;
2547                         }
2548                         qp->mtu = min(ib_mtu_enum_to_int(attr->path_mtu),
2549                                       ib_mtu_enum_to_int(iboe_get_mtu
2550                                                          (dev->ndev->mtu)));
2551                 }
2552
2553                 if (!qp->mtu) {
2554                         qp->mtu =
2555                         ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
2556                         pr_err("Fixing zeroed MTU to qp->mtu = %d\n", qp->mtu);
2557                 }
2558
2559                 SET_FIELD(qp_params.modify_flags,
2560                           QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR, 1);
2561
2562                 qp_params.traffic_class_tos = grh->traffic_class;
2563                 qp_params.flow_label = grh->flow_label;
2564                 qp_params.hop_limit_ttl = grh->hop_limit;
2565
2566                 qp->sgid_idx = grh->sgid_index;
2567
2568                 rc = get_gid_info_from_table(ibqp, attr, attr_mask, &qp_params);
2569                 if (rc) {
2570                         DP_ERR(dev,
2571                                "modify qp: problems with GID index %d (rc=%d)\n",
2572                                grh->sgid_index, rc);
2573                         return rc;
2574                 }
2575
2576                 rc = qedr_get_dmac(dev, &attr->ah_attr,
2577                                    qp_params.remote_mac_addr);
2578                 if (rc)
2579                         return rc;
2580
2581                 qp_params.use_local_mac = true;
2582                 ether_addr_copy(qp_params.local_mac_addr, dev->ndev->dev_addr);
2583
2584                 DP_DEBUG(dev, QEDR_MSG_QP, "dgid=%x:%x:%x:%x\n",
2585                          qp_params.dgid.dwords[0], qp_params.dgid.dwords[1],
2586                          qp_params.dgid.dwords[2], qp_params.dgid.dwords[3]);
2587                 DP_DEBUG(dev, QEDR_MSG_QP, "sgid=%x:%x:%x:%x\n",
2588                          qp_params.sgid.dwords[0], qp_params.sgid.dwords[1],
2589                          qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]);
2590                 DP_DEBUG(dev, QEDR_MSG_QP, "remote_mac=[%pM]\n",
2591                          qp_params.remote_mac_addr);
2592
2593                 qp_params.mtu = qp->mtu;
2594                 qp_params.lb_indication = false;
2595         }
2596
2597         if (!qp_params.mtu) {
2598                 /* Stay with current MTU */
2599                 if (qp->mtu)
2600                         qp_params.mtu = qp->mtu;
2601                 else
2602                         qp_params.mtu =
2603                             ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
2604         }
2605
2606         if (attr_mask & IB_QP_TIMEOUT) {
2607                 SET_FIELD(qp_params.modify_flags,
2608                           QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT, 1);
2609
2610                 /* The received timeout value is an exponent used like this:
2611                  *    "12.7.34 LOCAL ACK TIMEOUT
2612                  *    Value representing the transport (ACK) timeout for use by
2613                  *    the remote, expressed as: 4.096 * 2^timeout [usec]"
2614                  * The FW expects timeout in msec so we need to divide the usec
2615                  * result by 1000. We'll approximate 1000~2^10, and 4.096 ~ 2^2,
2616                  * so we get: 2^2 * 2^timeout / 2^10 = 2^(timeout - 8).
2617                  * The value of zero means infinite so we use a 'max_t' to make
2618                  * sure that sub 1 msec values will be configured as 1 msec.
2619                  */
2620                 if (attr->timeout)
2621                         qp_params.ack_timeout =
2622                                         1 << max_t(int, attr->timeout - 8, 0);
2623                 else
2624                         qp_params.ack_timeout = 0;
2625
2626                 qp->timeout = attr->timeout;
2627         }
2628
2629         if (attr_mask & IB_QP_RETRY_CNT) {
2630                 SET_FIELD(qp_params.modify_flags,
2631                           QED_ROCE_MODIFY_QP_VALID_RETRY_CNT, 1);
2632                 qp_params.retry_cnt = attr->retry_cnt;
2633         }
2634
2635         if (attr_mask & IB_QP_RNR_RETRY) {
2636                 SET_FIELD(qp_params.modify_flags,
2637                           QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT, 1);
2638                 qp_params.rnr_retry_cnt = attr->rnr_retry;
2639         }
2640
2641         if (attr_mask & IB_QP_RQ_PSN) {
2642                 SET_FIELD(qp_params.modify_flags,
2643                           QED_ROCE_MODIFY_QP_VALID_RQ_PSN, 1);
2644                 qp_params.rq_psn = attr->rq_psn;
2645                 qp->rq_psn = attr->rq_psn;
2646         }
2647
2648         if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
2649                 if (attr->max_rd_atomic > dev->attr.max_qp_req_rd_atomic_resc) {
2650                         rc = -EINVAL;
2651                         DP_ERR(dev,
2652                                "unsupported max_rd_atomic=%d, supported=%d\n",
2653                                attr->max_rd_atomic,
2654                                dev->attr.max_qp_req_rd_atomic_resc);
2655                         goto err;
2656                 }
2657
2658                 SET_FIELD(qp_params.modify_flags,
2659                           QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ, 1);
2660                 qp_params.max_rd_atomic_req = attr->max_rd_atomic;
2661         }
2662
2663         if (attr_mask & IB_QP_MIN_RNR_TIMER) {
2664                 SET_FIELD(qp_params.modify_flags,
2665                           QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER, 1);
2666                 qp_params.min_rnr_nak_timer = attr->min_rnr_timer;
2667         }
2668
2669         if (attr_mask & IB_QP_SQ_PSN) {
2670                 SET_FIELD(qp_params.modify_flags,
2671                           QED_ROCE_MODIFY_QP_VALID_SQ_PSN, 1);
2672                 qp_params.sq_psn = attr->sq_psn;
2673                 qp->sq_psn = attr->sq_psn;
2674         }
2675
2676         if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
2677                 if (attr->max_dest_rd_atomic >
2678                     dev->attr.max_qp_resp_rd_atomic_resc) {
2679                         DP_ERR(dev,
2680                                "unsupported max_dest_rd_atomic=%d, supported=%d\n",
2681                                attr->max_dest_rd_atomic,
2682                                dev->attr.max_qp_resp_rd_atomic_resc);
2683
2684                         rc = -EINVAL;
2685                         goto err;
2686                 }
2687
2688                 SET_FIELD(qp_params.modify_flags,
2689                           QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP, 1);
2690                 qp_params.max_rd_atomic_resp = attr->max_dest_rd_atomic;
2691         }
2692
2693         if (attr_mask & IB_QP_DEST_QPN) {
2694                 SET_FIELD(qp_params.modify_flags,
2695                           QED_ROCE_MODIFY_QP_VALID_DEST_QP, 1);
2696
2697                 qp_params.dest_qp = attr->dest_qp_num;
2698                 qp->dest_qp_num = attr->dest_qp_num;
2699         }
2700
2701         cur_state = qp->state;
2702
2703         /* Update the QP state before the actual ramrod to prevent a race with
2704          * fast path. Modifying the QP state to error will cause the device to
2705          * flush the CQEs and while polling the flushed CQEs will considered as
2706          * a potential issue if the QP isn't in error state.
2707          */
2708         if ((attr_mask & IB_QP_STATE) && qp->qp_type != IB_QPT_GSI &&
2709             !udata && qp_params.new_state == QED_ROCE_QP_STATE_ERR)
2710                 qp->state = QED_ROCE_QP_STATE_ERR;
2711
2712         if (qp->qp_type != IB_QPT_GSI)
2713                 rc = dev->ops->rdma_modify_qp(dev->rdma_ctx,
2714                                               qp->qed_qp, &qp_params);
2715
2716         if (attr_mask & IB_QP_STATE) {
2717                 if ((qp->qp_type != IB_QPT_GSI) && (!udata))
2718                         rc = qedr_update_qp_state(dev, qp, cur_state,
2719                                                   qp_params.new_state);
2720                 qp->state = qp_params.new_state;
2721         }
2722
2723 err:
2724         return rc;
2725 }
2726
2727 static int qedr_to_ib_qp_acc_flags(struct qed_rdma_query_qp_out_params *params)
2728 {
2729         int ib_qp_acc_flags = 0;
2730
2731         if (params->incoming_rdma_write_en)
2732                 ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE;
2733         if (params->incoming_rdma_read_en)
2734                 ib_qp_acc_flags |= IB_ACCESS_REMOTE_READ;
2735         if (params->incoming_atomic_en)
2736                 ib_qp_acc_flags |= IB_ACCESS_REMOTE_ATOMIC;
2737         ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE;
2738         return ib_qp_acc_flags;
2739 }
2740
2741 int qedr_query_qp(struct ib_qp *ibqp,
2742                   struct ib_qp_attr *qp_attr,
2743                   int attr_mask, struct ib_qp_init_attr *qp_init_attr)
2744 {
2745         struct qed_rdma_query_qp_out_params params;
2746         struct qedr_qp *qp = get_qedr_qp(ibqp);
2747         struct qedr_dev *dev = qp->dev;
2748         int rc = 0;
2749
2750         memset(&params, 0, sizeof(params));
2751         memset(qp_attr, 0, sizeof(*qp_attr));
2752         memset(qp_init_attr, 0, sizeof(*qp_init_attr));
2753
2754         if (qp->qp_type != IB_QPT_GSI) {
2755                 rc = dev->ops->rdma_query_qp(dev->rdma_ctx, qp->qed_qp, &params);
2756                 if (rc)
2757                         goto err;
2758                 qp_attr->qp_state = qedr_get_ibqp_state(params.state);
2759         } else {
2760                 qp_attr->qp_state = qedr_get_ibqp_state(QED_ROCE_QP_STATE_RTS);
2761         }
2762
2763         qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state);
2764         qp_attr->path_mtu = ib_mtu_int_to_enum(params.mtu);
2765         qp_attr->path_mig_state = IB_MIG_MIGRATED;
2766         qp_attr->rq_psn = params.rq_psn;
2767         qp_attr->sq_psn = params.sq_psn;
2768         qp_attr->dest_qp_num = params.dest_qp;
2769
2770         qp_attr->qp_access_flags = qedr_to_ib_qp_acc_flags(&params);
2771
2772         qp_attr->cap.max_send_wr = qp->sq.max_wr;
2773         qp_attr->cap.max_recv_wr = qp->rq.max_wr;
2774         qp_attr->cap.max_send_sge = qp->sq.max_sges;
2775         qp_attr->cap.max_recv_sge = qp->rq.max_sges;
2776         qp_attr->cap.max_inline_data = dev->attr.max_inline;
2777         qp_init_attr->cap = qp_attr->cap;
2778
2779         qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
2780         rdma_ah_set_grh(&qp_attr->ah_attr, NULL,
2781                         params.flow_label, qp->sgid_idx,
2782                         params.hop_limit_ttl, params.traffic_class_tos);
2783         rdma_ah_set_dgid_raw(&qp_attr->ah_attr, &params.dgid.bytes[0]);
2784         rdma_ah_set_port_num(&qp_attr->ah_attr, 1);
2785         rdma_ah_set_sl(&qp_attr->ah_attr, 0);
2786         qp_attr->timeout = qp->timeout;
2787         qp_attr->rnr_retry = params.rnr_retry;
2788         qp_attr->retry_cnt = params.retry_cnt;
2789         qp_attr->min_rnr_timer = params.min_rnr_nak_timer;
2790         qp_attr->pkey_index = params.pkey_index;
2791         qp_attr->port_num = 1;
2792         rdma_ah_set_path_bits(&qp_attr->ah_attr, 0);
2793         rdma_ah_set_static_rate(&qp_attr->ah_attr, 0);
2794         qp_attr->alt_pkey_index = 0;
2795         qp_attr->alt_port_num = 0;
2796         qp_attr->alt_timeout = 0;
2797         memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
2798
2799         qp_attr->sq_draining = (params.state == QED_ROCE_QP_STATE_SQD) ? 1 : 0;
2800         qp_attr->max_dest_rd_atomic = params.max_dest_rd_atomic;
2801         qp_attr->max_rd_atomic = params.max_rd_atomic;
2802         qp_attr->en_sqd_async_notify = (params.sqd_async) ? 1 : 0;
2803
2804         DP_DEBUG(dev, QEDR_MSG_QP, "QEDR_QUERY_QP: max_inline_data=%d\n",
2805                  qp_attr->cap.max_inline_data);
2806
2807 err:
2808         return rc;
2809 }
2810
2811 int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
2812 {
2813         struct qedr_qp *qp = get_qedr_qp(ibqp);
2814         struct qedr_dev *dev = qp->dev;
2815         struct ib_qp_attr attr;
2816         int attr_mask = 0;
2817
2818         DP_DEBUG(dev, QEDR_MSG_QP, "destroy qp: destroying %p, qp type=%d\n",
2819                  qp, qp->qp_type);
2820
2821         if (rdma_protocol_roce(&dev->ibdev, 1)) {
2822                 if ((qp->state != QED_ROCE_QP_STATE_RESET) &&
2823                     (qp->state != QED_ROCE_QP_STATE_ERR) &&
2824                     (qp->state != QED_ROCE_QP_STATE_INIT)) {
2825
2826                         attr.qp_state = IB_QPS_ERR;
2827                         attr_mask |= IB_QP_STATE;
2828
2829                         /* Change the QP state to ERROR */
2830                         qedr_modify_qp(ibqp, &attr, attr_mask, NULL);
2831                 }
2832         } else {
2833                 /* If connection establishment started the WAIT_FOR_CONNECT
2834                  * bit will be on and we need to Wait for the establishment
2835                  * to complete before destroying the qp.
2836                  */
2837                 if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_CONNECT,
2838                                      &qp->iwarp_cm_flags))
2839                         wait_for_completion(&qp->iwarp_cm_comp);
2840
2841                 /* If graceful disconnect started, the WAIT_FOR_DISCONNECT
2842                  * bit will be on, and we need to wait for the disconnect to
2843                  * complete before continuing. We can use the same completion,
2844                  * iwarp_cm_comp, since this is the only place that waits for
2845                  * this completion and it is sequential. In addition,
2846                  * disconnect can't occur before the connection is fully
2847                  * established, therefore if WAIT_FOR_DISCONNECT is on it
2848                  * means WAIT_FOR_CONNECT is also on and the completion for
2849                  * CONNECT already occurred.
2850                  */
2851                 if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_DISCONNECT,
2852                                      &qp->iwarp_cm_flags))
2853                         wait_for_completion(&qp->iwarp_cm_comp);
2854         }
2855
2856         if (qp->qp_type == IB_QPT_GSI)
2857                 qedr_destroy_gsi_qp(dev);
2858
2859         /* We need to remove the entry from the xarray before we release the
2860          * qp_id to avoid a race of the qp_id being reallocated and failing
2861          * on xa_insert
2862          */
2863         if (rdma_protocol_iwarp(&dev->ibdev, 1))
2864                 xa_erase(&dev->qps, qp->qp_id);
2865
2866         qedr_free_qp_resources(dev, qp, udata);
2867
2868         if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
2869                 qedr_iw_qp_rem_ref(&qp->ibqp);
2870                 wait_for_completion(&qp->qp_rel_comp);
2871         }
2872
2873         return 0;
2874 }
2875
2876 int qedr_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
2877                    struct ib_udata *udata)
2878 {
2879         struct qedr_ah *ah = get_qedr_ah(ibah);
2880
2881         rdma_copy_ah_attr(&ah->attr, init_attr->ah_attr);
2882
2883         return 0;
2884 }
2885
2886 int qedr_destroy_ah(struct ib_ah *ibah, u32 flags)
2887 {
2888         struct qedr_ah *ah = get_qedr_ah(ibah);
2889
2890         rdma_destroy_ah_attr(&ah->attr);
2891         return 0;
2892 }
2893
2894 static void free_mr_info(struct qedr_dev *dev, struct mr_info *info)
2895 {
2896         struct qedr_pbl *pbl, *tmp;
2897
2898         if (info->pbl_table)
2899                 list_add_tail(&info->pbl_table->list_entry,
2900                               &info->free_pbl_list);
2901
2902         if (!list_empty(&info->inuse_pbl_list))
2903                 list_splice(&info->inuse_pbl_list, &info->free_pbl_list);
2904
2905         list_for_each_entry_safe(pbl, tmp, &info->free_pbl_list, list_entry) {
2906                 list_del(&pbl->list_entry);
2907                 qedr_free_pbl(dev, &info->pbl_info, pbl);
2908         }
2909 }
2910
2911 static int init_mr_info(struct qedr_dev *dev, struct mr_info *info,
2912                         size_t page_list_len, bool two_layered)
2913 {
2914         struct qedr_pbl *tmp;
2915         int rc;
2916
2917         INIT_LIST_HEAD(&info->free_pbl_list);
2918         INIT_LIST_HEAD(&info->inuse_pbl_list);
2919
2920         rc = qedr_prepare_pbl_tbl(dev, &info->pbl_info,
2921                                   page_list_len, two_layered);
2922         if (rc)
2923                 goto done;
2924
2925         info->pbl_table = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
2926         if (IS_ERR(info->pbl_table)) {
2927                 rc = PTR_ERR(info->pbl_table);
2928                 goto done;
2929         }
2930
2931         DP_DEBUG(dev, QEDR_MSG_MR, "pbl_table_pa = %pa\n",
2932                  &info->pbl_table->pa);
2933
2934         /* in usual case we use 2 PBLs, so we add one to free
2935          * list and allocating another one
2936          */
2937         tmp = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
2938         if (IS_ERR(tmp)) {
2939                 DP_DEBUG(dev, QEDR_MSG_MR, "Extra PBL is not allocated\n");
2940                 goto done;
2941         }
2942
2943         list_add_tail(&tmp->list_entry, &info->free_pbl_list);
2944
2945         DP_DEBUG(dev, QEDR_MSG_MR, "extra pbl_table_pa = %pa\n", &tmp->pa);
2946
2947 done:
2948         if (rc)
2949                 free_mr_info(dev, info);
2950
2951         return rc;
2952 }
2953
2954 struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
2955                                u64 usr_addr, int acc, struct ib_udata *udata)
2956 {
2957         struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2958         struct qedr_mr *mr;
2959         struct qedr_pd *pd;
2960         int rc = -ENOMEM;
2961
2962         pd = get_qedr_pd(ibpd);
2963         DP_DEBUG(dev, QEDR_MSG_MR,
2964                  "qedr_register user mr pd = %d start = %lld, len = %lld, usr_addr = %lld, acc = %d\n",
2965                  pd->pd_id, start, len, usr_addr, acc);
2966
2967         if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
2968                 return ERR_PTR(-EINVAL);
2969
2970         mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2971         if (!mr)
2972                 return ERR_PTR(rc);
2973
2974         mr->type = QEDR_MR_USER;
2975
2976         mr->umem = ib_umem_get(ibpd->device, start, len, acc);
2977         if (IS_ERR(mr->umem)) {
2978                 rc = -EFAULT;
2979                 goto err0;
2980         }
2981
2982         rc = init_mr_info(dev, &mr->info,
2983                           ib_umem_num_dma_blocks(mr->umem, PAGE_SIZE), 1);
2984         if (rc)
2985                 goto err1;
2986
2987         qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table,
2988                            &mr->info.pbl_info, PAGE_SHIFT);
2989
2990         rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2991         if (rc) {
2992                 if (rc == -EINVAL)
2993                         DP_ERR(dev, "Out of MR resources\n");
2994                 else
2995                         DP_ERR(dev, "roce alloc tid returned error %d\n", rc);
2996
2997                 goto err1;
2998         }
2999
3000         /* Index only, 18 bit long, lkey = itid << 8 | key */
3001         mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
3002         mr->hw_mr.key = 0;
3003         mr->hw_mr.pd = pd->pd_id;
3004         mr->hw_mr.local_read = 1;
3005         mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
3006         mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
3007         mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
3008         mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
3009         mr->hw_mr.mw_bind = false;
3010         mr->hw_mr.pbl_ptr = mr->info.pbl_table[0].pa;
3011         mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
3012         mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
3013         mr->hw_mr.page_size_log = PAGE_SHIFT;
3014         mr->hw_mr.length = len;
3015         mr->hw_mr.vaddr = usr_addr;
3016         mr->hw_mr.phy_mr = false;
3017         mr->hw_mr.dma_mr = false;
3018
3019         rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
3020         if (rc) {
3021                 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
3022                 goto err2;
3023         }
3024
3025         mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
3026         if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
3027             mr->hw_mr.remote_atomic)
3028                 mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
3029
3030         DP_DEBUG(dev, QEDR_MSG_MR, "register user mr lkey: %x\n",
3031                  mr->ibmr.lkey);
3032         return &mr->ibmr;
3033
3034 err2:
3035         dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
3036 err1:
3037         qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
3038 err0:
3039         kfree(mr);
3040         return ERR_PTR(rc);
3041 }
3042
3043 int qedr_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
3044 {
3045         struct qedr_mr *mr = get_qedr_mr(ib_mr);
3046         struct qedr_dev *dev = get_qedr_dev(ib_mr->device);
3047         int rc = 0;
3048
3049         rc = dev->ops->rdma_deregister_tid(dev->rdma_ctx, mr->hw_mr.itid);
3050         if (rc)
3051                 return rc;
3052
3053         dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
3054
3055         if (mr->type != QEDR_MR_DMA)
3056                 free_mr_info(dev, &mr->info);
3057
3058         /* it could be user registered memory. */
3059         ib_umem_release(mr->umem);
3060
3061         kfree(mr);
3062
3063         return rc;
3064 }
3065
3066 static struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd,
3067                                        int max_page_list_len)
3068 {
3069         struct qedr_pd *pd = get_qedr_pd(ibpd);
3070         struct qedr_dev *dev = get_qedr_dev(ibpd->device);
3071         struct qedr_mr *mr;
3072         int rc = -ENOMEM;
3073
3074         DP_DEBUG(dev, QEDR_MSG_MR,
3075                  "qedr_alloc_frmr pd = %d max_page_list_len= %d\n", pd->pd_id,
3076                  max_page_list_len);
3077
3078         mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3079         if (!mr)
3080                 return ERR_PTR(rc);
3081
3082         mr->dev = dev;
3083         mr->type = QEDR_MR_FRMR;
3084
3085         rc = init_mr_info(dev, &mr->info, max_page_list_len, 1);
3086         if (rc)
3087                 goto err0;
3088
3089         rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
3090         if (rc) {
3091                 if (rc == -EINVAL)
3092                         DP_ERR(dev, "Out of MR resources\n");
3093                 else
3094                         DP_ERR(dev, "roce alloc tid returned error %d\n", rc);
3095
3096                 goto err1;
3097         }
3098
3099         /* Index only, 18 bit long, lkey = itid << 8 | key */
3100         mr->hw_mr.tid_type = QED_RDMA_TID_FMR;
3101         mr->hw_mr.key = 0;
3102         mr->hw_mr.pd = pd->pd_id;
3103         mr->hw_mr.local_read = 1;
3104         mr->hw_mr.local_write = 0;
3105         mr->hw_mr.remote_read = 0;
3106         mr->hw_mr.remote_write = 0;
3107         mr->hw_mr.remote_atomic = 0;
3108         mr->hw_mr.mw_bind = false;
3109         mr->hw_mr.pbl_ptr = 0;
3110         mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
3111         mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
3112         mr->hw_mr.length = 0;
3113         mr->hw_mr.vaddr = 0;
3114         mr->hw_mr.phy_mr = true;
3115         mr->hw_mr.dma_mr = false;
3116
3117         rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
3118         if (rc) {
3119                 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
3120                 goto err2;
3121         }
3122
3123         mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
3124         mr->ibmr.rkey = mr->ibmr.lkey;
3125
3126         DP_DEBUG(dev, QEDR_MSG_MR, "alloc frmr: %x\n", mr->ibmr.lkey);
3127         return mr;
3128
3129 err2:
3130         dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
3131 err1:
3132         qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
3133 err0:
3134         kfree(mr);
3135         return ERR_PTR(rc);
3136 }
3137
3138 struct ib_mr *qedr_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
3139                             u32 max_num_sg)
3140 {
3141         struct qedr_mr *mr;
3142
3143         if (mr_type != IB_MR_TYPE_MEM_REG)
3144                 return ERR_PTR(-EINVAL);
3145
3146         mr = __qedr_alloc_mr(ibpd, max_num_sg);
3147
3148         if (IS_ERR(mr))
3149                 return ERR_PTR(-EINVAL);
3150
3151         return &mr->ibmr;
3152 }
3153
3154 static int qedr_set_page(struct ib_mr *ibmr, u64 addr)
3155 {
3156         struct qedr_mr *mr = get_qedr_mr(ibmr);
3157         struct qedr_pbl *pbl_table;
3158         struct regpair *pbe;
3159         u32 pbes_in_page;
3160
3161         if (unlikely(mr->npages == mr->info.pbl_info.num_pbes)) {
3162                 DP_ERR(mr->dev, "qedr_set_page fails when %d\n", mr->npages);
3163                 return -ENOMEM;
3164         }
3165
3166         DP_DEBUG(mr->dev, QEDR_MSG_MR, "qedr_set_page pages[%d] = 0x%llx\n",
3167                  mr->npages, addr);
3168
3169         pbes_in_page = mr->info.pbl_info.pbl_size / sizeof(u64);
3170         pbl_table = mr->info.pbl_table + (mr->npages / pbes_in_page);
3171         pbe = (struct regpair *)pbl_table->va;
3172         pbe +=  mr->npages % pbes_in_page;
3173         pbe->lo = cpu_to_le32((u32)addr);
3174         pbe->hi = cpu_to_le32((u32)upper_32_bits(addr));
3175
3176         mr->npages++;
3177
3178         return 0;
3179 }
3180
3181 static void handle_completed_mrs(struct qedr_dev *dev, struct mr_info *info)
3182 {
3183         int work = info->completed - info->completed_handled - 1;
3184
3185         DP_DEBUG(dev, QEDR_MSG_MR, "Special FMR work = %d\n", work);
3186         while (work-- > 0 && !list_empty(&info->inuse_pbl_list)) {
3187                 struct qedr_pbl *pbl;
3188
3189                 /* Free all the page list that are possible to be freed
3190                  * (all the ones that were invalidated), under the assumption
3191                  * that if an FMR was completed successfully that means that
3192                  * if there was an invalidate operation before it also ended
3193                  */
3194                 pbl = list_first_entry(&info->inuse_pbl_list,
3195                                        struct qedr_pbl, list_entry);
3196                 list_move_tail(&pbl->list_entry, &info->free_pbl_list);
3197                 info->completed_handled++;
3198         }
3199 }
3200
3201 int qedr_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
3202                    int sg_nents, unsigned int *sg_offset)
3203 {
3204         struct qedr_mr *mr = get_qedr_mr(ibmr);
3205
3206         mr->npages = 0;
3207
3208         handle_completed_mrs(mr->dev, &mr->info);
3209         return ib_sg_to_pages(ibmr, sg, sg_nents, NULL, qedr_set_page);
3210 }
3211
3212 struct ib_mr *qedr_get_dma_mr(struct ib_pd *ibpd, int acc)
3213 {
3214         struct qedr_dev *dev = get_qedr_dev(ibpd->device);
3215         struct qedr_pd *pd = get_qedr_pd(ibpd);
3216         struct qedr_mr *mr;
3217         int rc;
3218
3219         mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3220         if (!mr)
3221                 return ERR_PTR(-ENOMEM);
3222
3223         mr->type = QEDR_MR_DMA;
3224
3225         rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
3226         if (rc) {
3227                 if (rc == -EINVAL)
3228                         DP_ERR(dev, "Out of MR resources\n");
3229                 else
3230                         DP_ERR(dev, "roce alloc tid returned error %d\n", rc);
3231
3232                 goto err1;
3233         }
3234
3235         /* index only, 18 bit long, lkey = itid << 8 | key */
3236         mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
3237         mr->hw_mr.pd = pd->pd_id;
3238         mr->hw_mr.local_read = 1;
3239         mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
3240         mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
3241         mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
3242         mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
3243         mr->hw_mr.dma_mr = true;
3244
3245         rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
3246         if (rc) {
3247                 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
3248                 goto err2;
3249         }
3250
3251         mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
3252         if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
3253             mr->hw_mr.remote_atomic)
3254                 mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
3255
3256         DP_DEBUG(dev, QEDR_MSG_MR, "get dma mr: lkey = %x\n", mr->ibmr.lkey);
3257         return &mr->ibmr;
3258
3259 err2:
3260         dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
3261 err1:
3262         kfree(mr);
3263         return ERR_PTR(rc);
3264 }
3265
3266 static inline int qedr_wq_is_full(struct qedr_qp_hwq_info *wq)
3267 {
3268         return (((wq->prod + 1) % wq->max_wr) == wq->cons);
3269 }
3270
3271 static int sge_data_len(struct ib_sge *sg_list, int num_sge)
3272 {
3273         int i, len = 0;
3274
3275         for (i = 0; i < num_sge; i++)
3276                 len += sg_list[i].length;
3277
3278         return len;
3279 }
3280
3281 static void swap_wqe_data64(u64 *p)
3282 {
3283         int i;
3284
3285         for (i = 0; i < QEDR_SQE_ELEMENT_SIZE / sizeof(u64); i++, p++)
3286                 *p = cpu_to_be64(cpu_to_le64(*p));
3287 }
3288
3289 static u32 qedr_prepare_sq_inline_data(struct qedr_dev *dev,
3290                                        struct qedr_qp *qp, u8 *wqe_size,
3291                                        const struct ib_send_wr *wr,
3292                                        const struct ib_send_wr **bad_wr,
3293                                        u8 *bits, u8 bit)
3294 {
3295         u32 data_size = sge_data_len(wr->sg_list, wr->num_sge);
3296         char *seg_prt, *wqe;
3297         int i, seg_siz;
3298
3299         if (data_size > ROCE_REQ_MAX_INLINE_DATA_SIZE) {
3300                 DP_ERR(dev, "Too much inline data in WR: %d\n", data_size);
3301                 *bad_wr = wr;
3302                 return 0;
3303         }
3304
3305         if (!data_size)
3306                 return data_size;
3307
3308         *bits |= bit;
3309
3310         seg_prt = NULL;
3311         wqe = NULL;
3312         seg_siz = 0;
3313
3314         /* Copy data inline */
3315         for (i = 0; i < wr->num_sge; i++) {
3316                 u32 len = wr->sg_list[i].length;
3317                 void *src = (void *)(uintptr_t)wr->sg_list[i].addr;
3318
3319                 while (len > 0) {
3320                         u32 cur;
3321
3322                         /* New segment required */
3323                         if (!seg_siz) {
3324                                 wqe = (char *)qed_chain_produce(&qp->sq.pbl);
3325                                 seg_prt = wqe;
3326                                 seg_siz = sizeof(struct rdma_sq_common_wqe);
3327                                 (*wqe_size)++;
3328                         }
3329
3330                         /* Calculate currently allowed length */
3331                         cur = min_t(u32, len, seg_siz);
3332                         memcpy(seg_prt, src, cur);
3333
3334                         /* Update segment variables */
3335                         seg_prt += cur;
3336                         seg_siz -= cur;
3337
3338                         /* Update sge variables */
3339                         src += cur;
3340                         len -= cur;
3341
3342                         /* Swap fully-completed segments */
3343                         if (!seg_siz)
3344                                 swap_wqe_data64((u64 *)wqe);
3345                 }
3346         }
3347
3348         /* swap last not completed segment */
3349         if (seg_siz)
3350                 swap_wqe_data64((u64 *)wqe);
3351
3352         return data_size;
3353 }
3354
3355 #define RQ_SGE_SET(sge, vaddr, vlength, vflags)                 \
3356         do {                                                    \
3357                 DMA_REGPAIR_LE(sge->addr, vaddr);               \
3358                 (sge)->length = cpu_to_le32(vlength);           \
3359                 (sge)->flags = cpu_to_le32(vflags);             \
3360         } while (0)
3361
3362 #define SRQ_HDR_SET(hdr, vwr_id, num_sge)                       \
3363         do {                                                    \
3364                 DMA_REGPAIR_LE(hdr->wr_id, vwr_id);             \
3365                 (hdr)->num_sges = num_sge;                      \
3366         } while (0)
3367
3368 #define SRQ_SGE_SET(sge, vaddr, vlength, vlkey)                 \
3369         do {                                                    \
3370                 DMA_REGPAIR_LE(sge->addr, vaddr);               \
3371                 (sge)->length = cpu_to_le32(vlength);           \
3372                 (sge)->l_key = cpu_to_le32(vlkey);              \
3373         } while (0)
3374
3375 static u32 qedr_prepare_sq_sges(struct qedr_qp *qp, u8 *wqe_size,
3376                                 const struct ib_send_wr *wr)
3377 {
3378         u32 data_size = 0;
3379         int i;
3380
3381         for (i = 0; i < wr->num_sge; i++) {
3382                 struct rdma_sq_sge *sge = qed_chain_produce(&qp->sq.pbl);
3383
3384                 DMA_REGPAIR_LE(sge->addr, wr->sg_list[i].addr);
3385                 sge->l_key = cpu_to_le32(wr->sg_list[i].lkey);
3386                 sge->length = cpu_to_le32(wr->sg_list[i].length);
3387                 data_size += wr->sg_list[i].length;
3388         }
3389
3390         if (wqe_size)
3391                 *wqe_size += wr->num_sge;
3392
3393         return data_size;
3394 }
3395
3396 static u32 qedr_prepare_sq_rdma_data(struct qedr_dev *dev,
3397                                      struct qedr_qp *qp,
3398                                      struct rdma_sq_rdma_wqe_1st *rwqe,
3399                                      struct rdma_sq_rdma_wqe_2nd *rwqe2,
3400                                      const struct ib_send_wr *wr,
3401                                      const struct ib_send_wr **bad_wr)
3402 {
3403         rwqe2->r_key = cpu_to_le32(rdma_wr(wr)->rkey);
3404         DMA_REGPAIR_LE(rwqe2->remote_va, rdma_wr(wr)->remote_addr);
3405
3406         if (wr->send_flags & IB_SEND_INLINE &&
3407             (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
3408              wr->opcode == IB_WR_RDMA_WRITE)) {
3409                 u8 flags = 0;
3410
3411                 SET_FIELD2(flags, RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG, 1);
3412                 return qedr_prepare_sq_inline_data(dev, qp, &rwqe->wqe_size, wr,
3413                                                    bad_wr, &rwqe->flags, flags);
3414         }
3415
3416         return qedr_prepare_sq_sges(qp, &rwqe->wqe_size, wr);
3417 }
3418
3419 static u32 qedr_prepare_sq_send_data(struct qedr_dev *dev,
3420                                      struct qedr_qp *qp,
3421                                      struct rdma_sq_send_wqe_1st *swqe,
3422                                      struct rdma_sq_send_wqe_2st *swqe2,
3423                                      const struct ib_send_wr *wr,
3424                                      const struct ib_send_wr **bad_wr)
3425 {
3426         memset(swqe2, 0, sizeof(*swqe2));
3427         if (wr->send_flags & IB_SEND_INLINE) {
3428                 u8 flags = 0;
3429
3430                 SET_FIELD2(flags, RDMA_SQ_SEND_WQE_INLINE_FLG, 1);
3431                 return qedr_prepare_sq_inline_data(dev, qp, &swqe->wqe_size, wr,
3432                                                    bad_wr, &swqe->flags, flags);
3433         }
3434
3435         return qedr_prepare_sq_sges(qp, &swqe->wqe_size, wr);
3436 }
3437
3438 static int qedr_prepare_reg(struct qedr_qp *qp,
3439                             struct rdma_sq_fmr_wqe_1st *fwqe1,
3440                             const struct ib_reg_wr *wr)
3441 {
3442         struct qedr_mr *mr = get_qedr_mr(wr->mr);
3443         struct rdma_sq_fmr_wqe_2nd *fwqe2;
3444
3445         fwqe2 = (struct rdma_sq_fmr_wqe_2nd *)qed_chain_produce(&qp->sq.pbl);
3446         fwqe1->addr.hi = upper_32_bits(mr->ibmr.iova);
3447         fwqe1->addr.lo = lower_32_bits(mr->ibmr.iova);
3448         fwqe1->l_key = wr->key;
3449
3450         fwqe2->access_ctrl = 0;
3451
3452         SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_READ,
3453                    !!(wr->access & IB_ACCESS_REMOTE_READ));
3454         SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE,
3455                    !!(wr->access & IB_ACCESS_REMOTE_WRITE));
3456         SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC,
3457                    !!(wr->access & IB_ACCESS_REMOTE_ATOMIC));
3458         SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_READ, 1);
3459         SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE,
3460                    !!(wr->access & IB_ACCESS_LOCAL_WRITE));
3461         fwqe2->fmr_ctrl = 0;
3462
3463         SET_FIELD2(fwqe2->fmr_ctrl, RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG,
3464                    ilog2(mr->ibmr.page_size) - 12);
3465
3466         fwqe2->length_hi = 0;
3467         fwqe2->length_lo = mr->ibmr.length;
3468         fwqe2->pbl_addr.hi = upper_32_bits(mr->info.pbl_table->pa);
3469         fwqe2->pbl_addr.lo = lower_32_bits(mr->info.pbl_table->pa);
3470
3471         qp->wqe_wr_id[qp->sq.prod].mr = mr;
3472
3473         return 0;
3474 }
3475
3476 static enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
3477 {
3478         switch (opcode) {
3479         case IB_WR_RDMA_WRITE:
3480         case IB_WR_RDMA_WRITE_WITH_IMM:
3481                 return IB_WC_RDMA_WRITE;
3482         case IB_WR_SEND_WITH_IMM:
3483         case IB_WR_SEND:
3484         case IB_WR_SEND_WITH_INV:
3485                 return IB_WC_SEND;
3486         case IB_WR_RDMA_READ:
3487         case IB_WR_RDMA_READ_WITH_INV:
3488                 return IB_WC_RDMA_READ;
3489         case IB_WR_ATOMIC_CMP_AND_SWP:
3490                 return IB_WC_COMP_SWAP;
3491         case IB_WR_ATOMIC_FETCH_AND_ADD:
3492                 return IB_WC_FETCH_ADD;
3493         case IB_WR_REG_MR:
3494                 return IB_WC_REG_MR;
3495         case IB_WR_LOCAL_INV:
3496                 return IB_WC_LOCAL_INV;
3497         default:
3498                 return IB_WC_SEND;
3499         }
3500 }
3501
3502 static inline bool qedr_can_post_send(struct qedr_qp *qp,
3503                                       const struct ib_send_wr *wr)
3504 {
3505         int wq_is_full, err_wr, pbl_is_full;
3506         struct qedr_dev *dev = qp->dev;
3507
3508         /* prevent SQ overflow and/or processing of a bad WR */
3509         err_wr = wr->num_sge > qp->sq.max_sges;
3510         wq_is_full = qedr_wq_is_full(&qp->sq);
3511         pbl_is_full = qed_chain_get_elem_left_u32(&qp->sq.pbl) <
3512                       QEDR_MAX_SQE_ELEMENTS_PER_SQE;
3513         if (wq_is_full || err_wr || pbl_is_full) {
3514                 if (wq_is_full && !(qp->err_bitmap & QEDR_QP_ERR_SQ_FULL)) {
3515                         DP_ERR(dev,
3516                                "error: WQ is full. Post send on QP %p failed (this error appears only once)\n",
3517                                qp);
3518                         qp->err_bitmap |= QEDR_QP_ERR_SQ_FULL;
3519                 }
3520
3521                 if (err_wr && !(qp->err_bitmap & QEDR_QP_ERR_BAD_SR)) {
3522                         DP_ERR(dev,
3523                                "error: WR is bad. Post send on QP %p failed (this error appears only once)\n",
3524                                qp);
3525                         qp->err_bitmap |= QEDR_QP_ERR_BAD_SR;
3526                 }
3527
3528                 if (pbl_is_full &&
3529                     !(qp->err_bitmap & QEDR_QP_ERR_SQ_PBL_FULL)) {
3530                         DP_ERR(dev,
3531                                "error: WQ PBL is full. Post send on QP %p failed (this error appears only once)\n",
3532                                qp);
3533                         qp->err_bitmap |= QEDR_QP_ERR_SQ_PBL_FULL;
3534                 }
3535                 return false;
3536         }
3537         return true;
3538 }
3539
3540 static int __qedr_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
3541                             const struct ib_send_wr **bad_wr)
3542 {
3543         struct qedr_dev *dev = get_qedr_dev(ibqp->device);
3544         struct qedr_qp *qp = get_qedr_qp(ibqp);
3545         struct rdma_sq_atomic_wqe_1st *awqe1;
3546         struct rdma_sq_atomic_wqe_2nd *awqe2;
3547         struct rdma_sq_atomic_wqe_3rd *awqe3;
3548         struct rdma_sq_send_wqe_2st *swqe2;
3549         struct rdma_sq_local_inv_wqe *iwqe;
3550         struct rdma_sq_rdma_wqe_2nd *rwqe2;
3551         struct rdma_sq_send_wqe_1st *swqe;
3552         struct rdma_sq_rdma_wqe_1st *rwqe;
3553         struct rdma_sq_fmr_wqe_1st *fwqe1;
3554         struct rdma_sq_common_wqe *wqe;
3555         u32 length;
3556         int rc = 0;
3557         bool comp;
3558
3559         if (!qedr_can_post_send(qp, wr)) {
3560                 *bad_wr = wr;
3561                 return -ENOMEM;
3562         }
3563
3564         wqe = qed_chain_produce(&qp->sq.pbl);
3565         qp->wqe_wr_id[qp->sq.prod].signaled =
3566                 !!(wr->send_flags & IB_SEND_SIGNALED) || qp->signaled;
3567
3568         wqe->flags = 0;
3569         SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_SE_FLG,
3570                    !!(wr->send_flags & IB_SEND_SOLICITED));
3571         comp = (!!(wr->send_flags & IB_SEND_SIGNALED)) || qp->signaled;
3572         SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_COMP_FLG, comp);
3573         SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_RD_FENCE_FLG,
3574                    !!(wr->send_flags & IB_SEND_FENCE));
3575         wqe->prev_wqe_size = qp->prev_wqe_size;
3576
3577         qp->wqe_wr_id[qp->sq.prod].opcode = qedr_ib_to_wc_opcode(wr->opcode);
3578
3579         switch (wr->opcode) {
3580         case IB_WR_SEND_WITH_IMM:
3581                 if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) {
3582                         rc = -EINVAL;
3583                         *bad_wr = wr;
3584                         break;
3585                 }
3586                 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM;
3587                 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
3588                 swqe->wqe_size = 2;
3589                 swqe2 = qed_chain_produce(&qp->sq.pbl);
3590
3591                 swqe->inv_key_or_imm_data = cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
3592                 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
3593                                                    wr, bad_wr);
3594                 swqe->length = cpu_to_le32(length);
3595                 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
3596                 qp->prev_wqe_size = swqe->wqe_size;
3597                 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
3598                 break;
3599         case IB_WR_SEND:
3600                 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND;
3601                 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
3602
3603                 swqe->wqe_size = 2;
3604                 swqe2 = qed_chain_produce(&qp->sq.pbl);
3605                 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
3606                                                    wr, bad_wr);
3607                 swqe->length = cpu_to_le32(length);
3608                 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
3609                 qp->prev_wqe_size = swqe->wqe_size;
3610                 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
3611                 break;
3612         case IB_WR_SEND_WITH_INV:
3613                 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_INVALIDATE;
3614                 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
3615                 swqe2 = qed_chain_produce(&qp->sq.pbl);
3616                 swqe->wqe_size = 2;
3617                 swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.invalidate_rkey);
3618                 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
3619                                                    wr, bad_wr);
3620                 swqe->length = cpu_to_le32(length);
3621                 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
3622                 qp->prev_wqe_size = swqe->wqe_size;
3623                 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
3624                 break;
3625
3626         case IB_WR_RDMA_WRITE_WITH_IMM:
3627                 if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) {
3628                         rc = -EINVAL;
3629                         *bad_wr = wr;
3630                         break;
3631                 }
3632                 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM;
3633                 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
3634
3635                 rwqe->wqe_size = 2;
3636                 rwqe->imm_data = htonl(cpu_to_le32(wr->ex.imm_data));
3637                 rwqe2 = qed_chain_produce(&qp->sq.pbl);
3638                 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
3639                                                    wr, bad_wr);
3640                 rwqe->length = cpu_to_le32(length);
3641                 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
3642                 qp->prev_wqe_size = rwqe->wqe_size;
3643                 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
3644                 break;
3645         case IB_WR_RDMA_WRITE:
3646                 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR;
3647                 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
3648
3649                 rwqe->wqe_size = 2;
3650                 rwqe2 = qed_chain_produce(&qp->sq.pbl);
3651                 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
3652                                                    wr, bad_wr);
3653                 rwqe->length = cpu_to_le32(length);
3654                 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
3655                 qp->prev_wqe_size = rwqe->wqe_size;
3656                 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
3657                 break;
3658         case IB_WR_RDMA_READ_WITH_INV:
3659                 SET_FIELD2(wqe->flags, RDMA_SQ_RDMA_WQE_1ST_READ_INV_FLG, 1);
3660                 fallthrough;    /* same is identical to RDMA READ */
3661
3662         case IB_WR_RDMA_READ:
3663                 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_RD;
3664                 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
3665
3666                 rwqe->wqe_size = 2;
3667                 rwqe2 = qed_chain_produce(&qp->sq.pbl);
3668                 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
3669                                                    wr, bad_wr);
3670                 rwqe->length = cpu_to_le32(length);
3671                 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
3672                 qp->prev_wqe_size = rwqe->wqe_size;
3673                 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
3674                 break;
3675
3676         case IB_WR_ATOMIC_CMP_AND_SWP:
3677         case IB_WR_ATOMIC_FETCH_AND_ADD:
3678                 awqe1 = (struct rdma_sq_atomic_wqe_1st *)wqe;
3679                 awqe1->wqe_size = 4;
3680
3681                 awqe2 = qed_chain_produce(&qp->sq.pbl);
3682                 DMA_REGPAIR_LE(awqe2->remote_va, atomic_wr(wr)->remote_addr);
3683                 awqe2->r_key = cpu_to_le32(atomic_wr(wr)->rkey);
3684
3685                 awqe3 = qed_chain_produce(&qp->sq.pbl);
3686
3687                 if (wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
3688                         wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_ADD;
3689                         DMA_REGPAIR_LE(awqe3->swap_data,
3690                                        atomic_wr(wr)->compare_add);
3691                 } else {
3692                         wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_CMP_AND_SWAP;
3693                         DMA_REGPAIR_LE(awqe3->swap_data,
3694                                        atomic_wr(wr)->swap);
3695                         DMA_REGPAIR_LE(awqe3->cmp_data,
3696                                        atomic_wr(wr)->compare_add);
3697                 }
3698
3699                 qedr_prepare_sq_sges(qp, NULL, wr);
3700
3701                 qp->wqe_wr_id[qp->sq.prod].wqe_size = awqe1->wqe_size;
3702                 qp->prev_wqe_size = awqe1->wqe_size;
3703                 break;
3704
3705         case IB_WR_LOCAL_INV:
3706                 iwqe = (struct rdma_sq_local_inv_wqe *)wqe;
3707                 iwqe->wqe_size = 1;
3708
3709                 iwqe->req_type = RDMA_SQ_REQ_TYPE_LOCAL_INVALIDATE;
3710                 iwqe->inv_l_key = wr->ex.invalidate_rkey;
3711                 qp->wqe_wr_id[qp->sq.prod].wqe_size = iwqe->wqe_size;
3712                 qp->prev_wqe_size = iwqe->wqe_size;
3713                 break;
3714         case IB_WR_REG_MR:
3715                 DP_DEBUG(dev, QEDR_MSG_CQ, "REG_MR\n");
3716                 wqe->req_type = RDMA_SQ_REQ_TYPE_FAST_MR;
3717                 fwqe1 = (struct rdma_sq_fmr_wqe_1st *)wqe;
3718                 fwqe1->wqe_size = 2;
3719
3720                 rc = qedr_prepare_reg(qp, fwqe1, reg_wr(wr));
3721                 if (rc) {
3722                         DP_ERR(dev, "IB_REG_MR failed rc=%d\n", rc);
3723                         *bad_wr = wr;
3724                         break;
3725                 }
3726
3727                 qp->wqe_wr_id[qp->sq.prod].wqe_size = fwqe1->wqe_size;
3728                 qp->prev_wqe_size = fwqe1->wqe_size;
3729                 break;
3730         default:
3731                 DP_ERR(dev, "invalid opcode 0x%x!\n", wr->opcode);
3732                 rc = -EINVAL;
3733                 *bad_wr = wr;
3734                 break;
3735         }
3736
3737         if (*bad_wr) {
3738                 u16 value;
3739
3740                 /* Restore prod to its position before
3741                  * this WR was processed
3742                  */
3743                 value = le16_to_cpu(qp->sq.db_data.data.value);
3744                 qed_chain_set_prod(&qp->sq.pbl, value, wqe);
3745
3746                 /* Restore prev_wqe_size */
3747                 qp->prev_wqe_size = wqe->prev_wqe_size;
3748                 rc = -EINVAL;
3749                 DP_ERR(dev, "POST SEND FAILED\n");
3750         }
3751
3752         return rc;
3753 }
3754
3755 int qedr_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
3756                    const struct ib_send_wr **bad_wr)
3757 {
3758         struct qedr_dev *dev = get_qedr_dev(ibqp->device);
3759         struct qedr_qp *qp = get_qedr_qp(ibqp);
3760         unsigned long flags;
3761         int rc = 0;
3762
3763         *bad_wr = NULL;
3764
3765         if (qp->qp_type == IB_QPT_GSI)
3766                 return qedr_gsi_post_send(ibqp, wr, bad_wr);
3767
3768         spin_lock_irqsave(&qp->q_lock, flags);
3769
3770         if (rdma_protocol_roce(&dev->ibdev, 1)) {
3771                 if ((qp->state != QED_ROCE_QP_STATE_RTS) &&
3772                     (qp->state != QED_ROCE_QP_STATE_ERR) &&
3773                     (qp->state != QED_ROCE_QP_STATE_SQD)) {
3774                         spin_unlock_irqrestore(&qp->q_lock, flags);
3775                         *bad_wr = wr;
3776                         DP_DEBUG(dev, QEDR_MSG_CQ,
3777                                  "QP in wrong state! QP icid=0x%x state %d\n",
3778                                  qp->icid, qp->state);
3779                         return -EINVAL;
3780                 }
3781         }
3782
3783         while (wr) {
3784                 rc = __qedr_post_send(ibqp, wr, bad_wr);
3785                 if (rc)
3786                         break;
3787
3788                 qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
3789
3790                 qedr_inc_sw_prod(&qp->sq);
3791
3792                 qp->sq.db_data.data.value++;
3793
3794                 wr = wr->next;
3795         }
3796
3797         /* Trigger doorbell
3798          * If there was a failure in the first WR then it will be triggered in
3799          * vane. However this is not harmful (as long as the producer value is
3800          * unchanged). For performance reasons we avoid checking for this
3801          * redundant doorbell.
3802          *
3803          * qp->wqe_wr_id is accessed during qedr_poll_cq, as
3804          * soon as we give the doorbell, we could get a completion
3805          * for this wr, therefore we need to make sure that the
3806          * memory is updated before giving the doorbell.
3807          * During qedr_poll_cq, rmb is called before accessing the
3808          * cqe. This covers for the smp_rmb as well.
3809          */
3810         smp_wmb();
3811         writel(qp->sq.db_data.raw, qp->sq.db);
3812
3813         spin_unlock_irqrestore(&qp->q_lock, flags);
3814
3815         return rc;
3816 }
3817
3818 static u32 qedr_srq_elem_left(struct qedr_srq_hwq_info *hw_srq)
3819 {
3820         u32 used;
3821
3822         /* Calculate number of elements used based on producer
3823          * count and consumer count and subtract it from max
3824          * work request supported so that we get elements left.
3825          */
3826         used = hw_srq->wr_prod_cnt - (u32)atomic_read(&hw_srq->wr_cons_cnt);
3827
3828         return hw_srq->max_wr - used;
3829 }
3830
3831 int qedr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
3832                        const struct ib_recv_wr **bad_wr)
3833 {
3834         struct qedr_srq *srq = get_qedr_srq(ibsrq);
3835         struct qedr_srq_hwq_info *hw_srq;
3836         struct qedr_dev *dev = srq->dev;
3837         struct qed_chain *pbl;
3838         unsigned long flags;
3839         int status = 0;
3840         u32 num_sge;
3841
3842         spin_lock_irqsave(&srq->lock, flags);
3843
3844         hw_srq = &srq->hw_srq;
3845         pbl = &srq->hw_srq.pbl;
3846         while (wr) {
3847                 struct rdma_srq_wqe_header *hdr;
3848                 int i;
3849
3850                 if (!qedr_srq_elem_left(hw_srq) ||
3851                     wr->num_sge > srq->hw_srq.max_sges) {
3852                         DP_ERR(dev, "Can't post WR  (%d,%d) || (%d > %d)\n",
3853                                hw_srq->wr_prod_cnt,
3854                                atomic_read(&hw_srq->wr_cons_cnt),
3855                                wr->num_sge, srq->hw_srq.max_sges);
3856                         status = -ENOMEM;
3857                         *bad_wr = wr;
3858                         break;
3859                 }
3860
3861                 hdr = qed_chain_produce(pbl);
3862                 num_sge = wr->num_sge;
3863                 /* Set number of sge and work request id in header */
3864                 SRQ_HDR_SET(hdr, wr->wr_id, num_sge);
3865
3866                 srq->hw_srq.wr_prod_cnt++;
3867                 hw_srq->wqe_prod++;
3868                 hw_srq->sge_prod++;
3869
3870                 DP_DEBUG(dev, QEDR_MSG_SRQ,
3871                          "SRQ WR: SGEs: %d with wr_id[%d] = %llx\n",
3872                          wr->num_sge, hw_srq->wqe_prod, wr->wr_id);
3873
3874                 for (i = 0; i < wr->num_sge; i++) {
3875                         struct rdma_srq_sge *srq_sge = qed_chain_produce(pbl);
3876
3877                         /* Set SGE length, lkey and address */
3878                         SRQ_SGE_SET(srq_sge, wr->sg_list[i].addr,
3879                                     wr->sg_list[i].length, wr->sg_list[i].lkey);
3880
3881                         DP_DEBUG(dev, QEDR_MSG_SRQ,
3882                                  "[%d]: len %d key %x addr %x:%x\n",
3883                                  i, srq_sge->length, srq_sge->l_key,
3884                                  srq_sge->addr.hi, srq_sge->addr.lo);
3885                         hw_srq->sge_prod++;
3886                 }
3887
3888                 /* Update WQE and SGE information before
3889                  * updating producer.
3890                  */
3891                 dma_wmb();
3892
3893                 /* SRQ producer is 8 bytes. Need to update SGE producer index
3894                  * in first 4 bytes and need to update WQE producer in
3895                  * next 4 bytes.
3896                  */
3897                 srq->hw_srq.virt_prod_pair_addr->sge_prod = cpu_to_le32(hw_srq->sge_prod);
3898                 /* Make sure sge producer is updated first */
3899                 dma_wmb();
3900                 srq->hw_srq.virt_prod_pair_addr->wqe_prod = cpu_to_le32(hw_srq->wqe_prod);
3901
3902                 wr = wr->next;
3903         }
3904
3905         DP_DEBUG(dev, QEDR_MSG_SRQ, "POST: Elements in S-RQ: %d\n",
3906                  qed_chain_get_elem_left(pbl));
3907         spin_unlock_irqrestore(&srq->lock, flags);
3908
3909         return status;
3910 }
3911
3912 int qedr_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
3913                    const struct ib_recv_wr **bad_wr)
3914 {
3915         struct qedr_qp *qp = get_qedr_qp(ibqp);
3916         struct qedr_dev *dev = qp->dev;
3917         unsigned long flags;
3918         int status = 0;
3919
3920         if (qp->qp_type == IB_QPT_GSI)
3921                 return qedr_gsi_post_recv(ibqp, wr, bad_wr);
3922
3923         spin_lock_irqsave(&qp->q_lock, flags);
3924
3925         while (wr) {
3926                 int i;
3927
3928                 if (qed_chain_get_elem_left_u32(&qp->rq.pbl) <
3929                     QEDR_MAX_RQE_ELEMENTS_PER_RQE ||
3930                     wr->num_sge > qp->rq.max_sges) {
3931                         DP_ERR(dev, "Can't post WR  (%d < %d) || (%d > %d)\n",
3932                                qed_chain_get_elem_left_u32(&qp->rq.pbl),
3933                                QEDR_MAX_RQE_ELEMENTS_PER_RQE, wr->num_sge,
3934                                qp->rq.max_sges);
3935                         status = -ENOMEM;
3936                         *bad_wr = wr;
3937                         break;
3938                 }
3939                 for (i = 0; i < wr->num_sge; i++) {
3940                         u32 flags = 0;
3941                         struct rdma_rq_sge *rqe =
3942                             qed_chain_produce(&qp->rq.pbl);
3943
3944                         /* First one must include the number
3945                          * of SGE in the list
3946                          */
3947                         if (!i)
3948                                 SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES,
3949                                           wr->num_sge);
3950
3951                         SET_FIELD(flags, RDMA_RQ_SGE_L_KEY_LO,
3952                                   wr->sg_list[i].lkey);
3953
3954                         RQ_SGE_SET(rqe, wr->sg_list[i].addr,
3955                                    wr->sg_list[i].length, flags);
3956                 }
3957
3958                 /* Special case of no sges. FW requires between 1-4 sges...
3959                  * in this case we need to post 1 sge with length zero. this is
3960                  * because rdma write with immediate consumes an RQ.
3961                  */
3962                 if (!wr->num_sge) {
3963                         u32 flags = 0;
3964                         struct rdma_rq_sge *rqe =
3965                             qed_chain_produce(&qp->rq.pbl);
3966
3967                         /* First one must include the number
3968                          * of SGE in the list
3969                          */
3970                         SET_FIELD(flags, RDMA_RQ_SGE_L_KEY_LO, 0);
3971                         SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES, 1);
3972
3973                         RQ_SGE_SET(rqe, 0, 0, flags);
3974                         i = 1;
3975                 }
3976
3977                 qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id;
3978                 qp->rqe_wr_id[qp->rq.prod].wqe_size = i;
3979
3980                 qedr_inc_sw_prod(&qp->rq);
3981
3982                 /* qp->rqe_wr_id is accessed during qedr_poll_cq, as
3983                  * soon as we give the doorbell, we could get a completion
3984                  * for this wr, therefore we need to make sure that the
3985                  * memory is update before giving the doorbell.
3986                  * During qedr_poll_cq, rmb is called before accessing the
3987                  * cqe. This covers for the smp_rmb as well.
3988                  */
3989                 smp_wmb();
3990
3991                 qp->rq.db_data.data.value++;
3992
3993                 writel(qp->rq.db_data.raw, qp->rq.db);
3994
3995                 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
3996                         writel(qp->rq.iwarp_db2_data.raw, qp->rq.iwarp_db2);
3997                 }
3998
3999                 wr = wr->next;
4000         }
4001
4002         spin_unlock_irqrestore(&qp->q_lock, flags);
4003
4004         return status;
4005 }
4006
4007 static int is_valid_cqe(struct qedr_cq *cq, union rdma_cqe *cqe)
4008 {
4009         struct rdma_cqe_requester *resp_cqe = &cqe->req;
4010
4011         return (resp_cqe->flags & RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK) ==
4012                 cq->pbl_toggle;
4013 }
4014
4015 static struct qedr_qp *cqe_get_qp(union rdma_cqe *cqe)
4016 {
4017         struct rdma_cqe_requester *resp_cqe = &cqe->req;
4018         struct qedr_qp *qp;
4019
4020         qp = (struct qedr_qp *)(uintptr_t)HILO_GEN(resp_cqe->qp_handle.hi,
4021                                                    resp_cqe->qp_handle.lo,
4022                                                    u64);
4023         return qp;
4024 }
4025
4026 static enum rdma_cqe_type cqe_get_type(union rdma_cqe *cqe)
4027 {
4028         struct rdma_cqe_requester *resp_cqe = &cqe->req;
4029
4030         return GET_FIELD(resp_cqe->flags, RDMA_CQE_REQUESTER_TYPE);
4031 }
4032
4033 /* Return latest CQE (needs processing) */
4034 static union rdma_cqe *get_cqe(struct qedr_cq *cq)
4035 {
4036         return cq->latest_cqe;
4037 }
4038
4039 /* In fmr we need to increase the number of fmr completed counter for the fmr
4040  * algorithm determining whether we can free a pbl or not.
4041  * we need to perform this whether the work request was signaled or not. for
4042  * this purpose we call this function from the condition that checks if a wr
4043  * should be skipped, to make sure we don't miss it ( possibly this fmr
4044  * operation was not signalted)
4045  */
4046 static inline void qedr_chk_if_fmr(struct qedr_qp *qp)
4047 {
4048         if (qp->wqe_wr_id[qp->sq.cons].opcode == IB_WC_REG_MR)
4049                 qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
4050 }
4051
4052 static int process_req(struct qedr_dev *dev, struct qedr_qp *qp,
4053                        struct qedr_cq *cq, int num_entries,
4054                        struct ib_wc *wc, u16 hw_cons, enum ib_wc_status status,
4055                        int force)
4056 {
4057         u16 cnt = 0;
4058
4059         while (num_entries && qp->sq.wqe_cons != hw_cons) {
4060                 if (!qp->wqe_wr_id[qp->sq.cons].signaled && !force) {
4061                         qedr_chk_if_fmr(qp);
4062                         /* skip WC */
4063                         goto next_cqe;
4064                 }
4065
4066                 /* fill WC */
4067                 wc->status = status;
4068                 wc->vendor_err = 0;
4069                 wc->wc_flags = 0;
4070                 wc->src_qp = qp->id;
4071                 wc->qp = &qp->ibqp;
4072
4073                 wc->wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id;
4074                 wc->opcode = qp->wqe_wr_id[qp->sq.cons].opcode;
4075
4076                 switch (wc->opcode) {
4077                 case IB_WC_RDMA_WRITE:
4078                         wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
4079                         break;
4080                 case IB_WC_COMP_SWAP:
4081                 case IB_WC_FETCH_ADD:
4082                         wc->byte_len = 8;
4083                         break;
4084                 case IB_WC_REG_MR:
4085                         qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
4086                         break;
4087                 case IB_WC_RDMA_READ:
4088                 case IB_WC_SEND:
4089                         wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
4090                         break;
4091                 default:
4092                         break;
4093                 }
4094
4095                 num_entries--;
4096                 wc++;
4097                 cnt++;
4098 next_cqe:
4099                 while (qp->wqe_wr_id[qp->sq.cons].wqe_size--)
4100                         qed_chain_consume(&qp->sq.pbl);
4101                 qedr_inc_sw_cons(&qp->sq);
4102         }
4103
4104         return cnt;
4105 }
4106
4107 static int qedr_poll_cq_req(struct qedr_dev *dev,
4108                             struct qedr_qp *qp, struct qedr_cq *cq,
4109                             int num_entries, struct ib_wc *wc,
4110                             struct rdma_cqe_requester *req)
4111 {
4112         int cnt = 0;
4113
4114         switch (req->status) {
4115         case RDMA_CQE_REQ_STS_OK:
4116                 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
4117                                   IB_WC_SUCCESS, 0);
4118                 break;
4119         case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR:
4120                 if (qp->state != QED_ROCE_QP_STATE_ERR)
4121                         DP_DEBUG(dev, QEDR_MSG_CQ,
4122                                  "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4123                                  cq->icid, qp->icid);
4124                 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
4125                                   IB_WC_WR_FLUSH_ERR, 1);
4126                 break;
4127         default:
4128                 /* process all WQE before the cosumer */
4129                 qp->state = QED_ROCE_QP_STATE_ERR;
4130                 cnt = process_req(dev, qp, cq, num_entries, wc,
4131                                   req->sq_cons - 1, IB_WC_SUCCESS, 0);
4132                 wc += cnt;
4133                 /* if we have extra WC fill it with actual error info */
4134                 if (cnt < num_entries) {
4135                         enum ib_wc_status wc_status;
4136
4137                         switch (req->status) {
4138                         case RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR:
4139                                 DP_ERR(dev,
4140                                        "Error: POLL CQ with RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4141                                        cq->icid, qp->icid);
4142                                 wc_status = IB_WC_BAD_RESP_ERR;
4143                                 break;
4144                         case RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR:
4145                                 DP_ERR(dev,
4146                                        "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4147                                        cq->icid, qp->icid);
4148                                 wc_status = IB_WC_LOC_LEN_ERR;
4149                                 break;
4150                         case RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR:
4151                                 DP_ERR(dev,
4152                                        "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4153                                        cq->icid, qp->icid);
4154                                 wc_status = IB_WC_LOC_QP_OP_ERR;
4155                                 break;
4156                         case RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR:
4157                                 DP_ERR(dev,
4158                                        "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4159                                        cq->icid, qp->icid);
4160                                 wc_status = IB_WC_LOC_PROT_ERR;
4161                                 break;
4162                         case RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR:
4163                                 DP_ERR(dev,
4164                                        "Error: POLL CQ with RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4165                                        cq->icid, qp->icid);
4166                                 wc_status = IB_WC_MW_BIND_ERR;
4167                                 break;
4168                         case RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR:
4169                                 DP_ERR(dev,
4170                                        "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4171                                        cq->icid, qp->icid);
4172                                 wc_status = IB_WC_REM_INV_REQ_ERR;
4173                                 break;
4174                         case RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR:
4175                                 DP_ERR(dev,
4176                                        "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4177                                        cq->icid, qp->icid);
4178                                 wc_status = IB_WC_REM_ACCESS_ERR;
4179                                 break;
4180                         case RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR:
4181                                 DP_ERR(dev,
4182                                        "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4183                                        cq->icid, qp->icid);
4184                                 wc_status = IB_WC_REM_OP_ERR;
4185                                 break;
4186                         case RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR:
4187                                 DP_ERR(dev,
4188                                        "Error: POLL CQ with RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4189                                        cq->icid, qp->icid);
4190                                 wc_status = IB_WC_RNR_RETRY_EXC_ERR;
4191                                 break;
4192                         case RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR:
4193                                 DP_ERR(dev,
4194                                        "Error: POLL CQ with ROCE_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4195                                        cq->icid, qp->icid);
4196                                 wc_status = IB_WC_RETRY_EXC_ERR;
4197                                 break;
4198                         default:
4199                                 DP_ERR(dev,
4200                                        "Error: POLL CQ with IB_WC_GENERAL_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4201                                        cq->icid, qp->icid);
4202                                 wc_status = IB_WC_GENERAL_ERR;
4203                         }
4204                         cnt += process_req(dev, qp, cq, 1, wc, req->sq_cons,
4205                                            wc_status, 1);
4206                 }
4207         }
4208
4209         return cnt;
4210 }
4211
4212 static inline int qedr_cqe_resp_status_to_ib(u8 status)
4213 {
4214         switch (status) {
4215         case RDMA_CQE_RESP_STS_LOCAL_ACCESS_ERR:
4216                 return IB_WC_LOC_ACCESS_ERR;
4217         case RDMA_CQE_RESP_STS_LOCAL_LENGTH_ERR:
4218                 return IB_WC_LOC_LEN_ERR;
4219         case RDMA_CQE_RESP_STS_LOCAL_QP_OPERATION_ERR:
4220                 return IB_WC_LOC_QP_OP_ERR;
4221         case RDMA_CQE_RESP_STS_LOCAL_PROTECTION_ERR:
4222                 return IB_WC_LOC_PROT_ERR;
4223         case RDMA_CQE_RESP_STS_MEMORY_MGT_OPERATION_ERR:
4224                 return IB_WC_MW_BIND_ERR;
4225         case RDMA_CQE_RESP_STS_REMOTE_INVALID_REQUEST_ERR:
4226                 return IB_WC_REM_INV_RD_REQ_ERR;
4227         case RDMA_CQE_RESP_STS_OK:
4228                 return IB_WC_SUCCESS;
4229         default:
4230                 return IB_WC_GENERAL_ERR;
4231         }
4232 }
4233
4234 static inline int qedr_set_ok_cqe_resp_wc(struct rdma_cqe_responder *resp,
4235                                           struct ib_wc *wc)
4236 {
4237         wc->status = IB_WC_SUCCESS;
4238         wc->byte_len = le32_to_cpu(resp->length);
4239
4240         if (resp->flags & QEDR_RESP_IMM) {
4241                 wc->ex.imm_data = cpu_to_be32(le32_to_cpu(resp->imm_data_or_inv_r_Key));
4242                 wc->wc_flags |= IB_WC_WITH_IMM;
4243
4244                 if (resp->flags & QEDR_RESP_RDMA)
4245                         wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
4246
4247                 if (resp->flags & QEDR_RESP_INV)
4248                         return -EINVAL;
4249
4250         } else if (resp->flags & QEDR_RESP_INV) {
4251                 wc->ex.imm_data = le32_to_cpu(resp->imm_data_or_inv_r_Key);
4252                 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
4253
4254                 if (resp->flags & QEDR_RESP_RDMA)
4255                         return -EINVAL;
4256
4257         } else if (resp->flags & QEDR_RESP_RDMA) {
4258                 return -EINVAL;
4259         }
4260
4261         return 0;
4262 }
4263
4264 static void __process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
4265                                struct qedr_cq *cq, struct ib_wc *wc,
4266                                struct rdma_cqe_responder *resp, u64 wr_id)
4267 {
4268         /* Must fill fields before qedr_set_ok_cqe_resp_wc() */
4269         wc->opcode = IB_WC_RECV;
4270         wc->wc_flags = 0;
4271
4272         if (likely(resp->status == RDMA_CQE_RESP_STS_OK)) {
4273                 if (qedr_set_ok_cqe_resp_wc(resp, wc))
4274                         DP_ERR(dev,
4275                                "CQ %p (icid=%d) has invalid CQE responder flags=0x%x\n",
4276                                cq, cq->icid, resp->flags);
4277
4278         } else {
4279                 wc->status = qedr_cqe_resp_status_to_ib(resp->status);
4280                 if (wc->status == IB_WC_GENERAL_ERR)
4281                         DP_ERR(dev,
4282                                "CQ %p (icid=%d) contains an invalid CQE status %d\n",
4283                                cq, cq->icid, resp->status);
4284         }
4285
4286         /* Fill the rest of the WC */
4287         wc->vendor_err = 0;
4288         wc->src_qp = qp->id;
4289         wc->qp = &qp->ibqp;
4290         wc->wr_id = wr_id;
4291 }
4292
4293 static int process_resp_one_srq(struct qedr_dev *dev, struct qedr_qp *qp,
4294                                 struct qedr_cq *cq, struct ib_wc *wc,
4295                                 struct rdma_cqe_responder *resp)
4296 {
4297         struct qedr_srq *srq = qp->srq;
4298         u64 wr_id;
4299
4300         wr_id = HILO_GEN(le32_to_cpu(resp->srq_wr_id.hi),
4301                          le32_to_cpu(resp->srq_wr_id.lo), u64);
4302
4303         if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
4304                 wc->status = IB_WC_WR_FLUSH_ERR;
4305                 wc->vendor_err = 0;
4306                 wc->wr_id = wr_id;
4307                 wc->byte_len = 0;
4308                 wc->src_qp = qp->id;
4309                 wc->qp = &qp->ibqp;
4310                 wc->wr_id = wr_id;
4311         } else {
4312                 __process_resp_one(dev, qp, cq, wc, resp, wr_id);
4313         }
4314         atomic_inc(&srq->hw_srq.wr_cons_cnt);
4315
4316         return 1;
4317 }
4318 static int process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
4319                             struct qedr_cq *cq, struct ib_wc *wc,
4320                             struct rdma_cqe_responder *resp)
4321 {
4322         u64 wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
4323
4324         __process_resp_one(dev, qp, cq, wc, resp, wr_id);
4325
4326         while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
4327                 qed_chain_consume(&qp->rq.pbl);
4328         qedr_inc_sw_cons(&qp->rq);
4329
4330         return 1;
4331 }
4332
4333 static int process_resp_flush(struct qedr_qp *qp, struct qedr_cq *cq,
4334                               int num_entries, struct ib_wc *wc, u16 hw_cons)
4335 {
4336         u16 cnt = 0;
4337
4338         while (num_entries && qp->rq.wqe_cons != hw_cons) {
4339                 /* fill WC */
4340                 wc->status = IB_WC_WR_FLUSH_ERR;
4341                 wc->vendor_err = 0;
4342                 wc->wc_flags = 0;
4343                 wc->src_qp = qp->id;
4344                 wc->byte_len = 0;
4345                 wc->wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
4346                 wc->qp = &qp->ibqp;
4347                 num_entries--;
4348                 wc++;
4349                 cnt++;
4350                 while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
4351                         qed_chain_consume(&qp->rq.pbl);
4352                 qedr_inc_sw_cons(&qp->rq);
4353         }
4354
4355         return cnt;
4356 }
4357
4358 static void try_consume_resp_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
4359                                  struct rdma_cqe_responder *resp, int *update)
4360 {
4361         if (le16_to_cpu(resp->rq_cons_or_srq_id) == qp->rq.wqe_cons) {
4362                 consume_cqe(cq);
4363                 *update |= 1;
4364         }
4365 }
4366
4367 static int qedr_poll_cq_resp_srq(struct qedr_dev *dev, struct qedr_qp *qp,
4368                                  struct qedr_cq *cq, int num_entries,
4369                                  struct ib_wc *wc,
4370                                  struct rdma_cqe_responder *resp)
4371 {
4372         int cnt;
4373
4374         cnt = process_resp_one_srq(dev, qp, cq, wc, resp);
4375         consume_cqe(cq);
4376
4377         return cnt;
4378 }
4379
4380 static int qedr_poll_cq_resp(struct qedr_dev *dev, struct qedr_qp *qp,
4381                              struct qedr_cq *cq, int num_entries,
4382                              struct ib_wc *wc, struct rdma_cqe_responder *resp,
4383                              int *update)
4384 {
4385         int cnt;
4386
4387         if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
4388                 cnt = process_resp_flush(qp, cq, num_entries, wc,
4389                                          resp->rq_cons_or_srq_id);
4390                 try_consume_resp_cqe(cq, qp, resp, update);
4391         } else {
4392                 cnt = process_resp_one(dev, qp, cq, wc, resp);
4393                 consume_cqe(cq);
4394                 *update |= 1;
4395         }
4396
4397         return cnt;
4398 }
4399
4400 static void try_consume_req_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
4401                                 struct rdma_cqe_requester *req, int *update)
4402 {
4403         if (le16_to_cpu(req->sq_cons) == qp->sq.wqe_cons) {
4404                 consume_cqe(cq);
4405                 *update |= 1;
4406         }
4407 }
4408
4409 int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
4410 {
4411         struct qedr_dev *dev = get_qedr_dev(ibcq->device);
4412         struct qedr_cq *cq = get_qedr_cq(ibcq);
4413         union rdma_cqe *cqe;
4414         u32 old_cons, new_cons;
4415         unsigned long flags;
4416         int update = 0;
4417         int done = 0;
4418
4419         if (cq->destroyed) {
4420                 DP_ERR(dev,
4421                        "warning: poll was invoked after destroy for cq %p (icid=%d)\n",
4422                        cq, cq->icid);
4423                 return 0;
4424         }
4425
4426         if (cq->cq_type == QEDR_CQ_TYPE_GSI)
4427                 return qedr_gsi_poll_cq(ibcq, num_entries, wc);
4428
4429         spin_lock_irqsave(&cq->cq_lock, flags);
4430         cqe = cq->latest_cqe;
4431         old_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
4432         while (num_entries && is_valid_cqe(cq, cqe)) {
4433                 struct qedr_qp *qp;
4434                 int cnt = 0;
4435
4436                 /* prevent speculative reads of any field of CQE */
4437                 rmb();
4438
4439                 qp = cqe_get_qp(cqe);
4440                 if (!qp) {
4441                         WARN(1, "Error: CQE QP pointer is NULL. CQE=%p\n", cqe);
4442                         break;
4443                 }
4444
4445                 wc->qp = &qp->ibqp;
4446
4447                 switch (cqe_get_type(cqe)) {
4448                 case RDMA_CQE_TYPE_REQUESTER:
4449                         cnt = qedr_poll_cq_req(dev, qp, cq, num_entries, wc,
4450                                                &cqe->req);
4451                         try_consume_req_cqe(cq, qp, &cqe->req, &update);
4452                         break;
4453                 case RDMA_CQE_TYPE_RESPONDER_RQ:
4454                         cnt = qedr_poll_cq_resp(dev, qp, cq, num_entries, wc,
4455                                                 &cqe->resp, &update);
4456                         break;
4457                 case RDMA_CQE_TYPE_RESPONDER_SRQ:
4458                         cnt = qedr_poll_cq_resp_srq(dev, qp, cq, num_entries,
4459                                                     wc, &cqe->resp);
4460                         update = 1;
4461                         break;
4462                 case RDMA_CQE_TYPE_INVALID:
4463                 default:
4464                         DP_ERR(dev, "Error: invalid CQE type = %d\n",
4465                                cqe_get_type(cqe));
4466                 }
4467                 num_entries -= cnt;
4468                 wc += cnt;
4469                 done += cnt;
4470
4471                 cqe = get_cqe(cq);
4472         }
4473         new_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
4474
4475         cq->cq_cons += new_cons - old_cons;
4476
4477         if (update)
4478                 /* doorbell notifies abount latest VALID entry,
4479                  * but chain already point to the next INVALID one
4480                  */
4481                 doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
4482
4483         spin_unlock_irqrestore(&cq->cq_lock, flags);
4484         return done;
4485 }
4486
4487 int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags,
4488                      u32 port_num, const struct ib_wc *in_wc,
4489                      const struct ib_grh *in_grh, const struct ib_mad *in,
4490                      struct ib_mad *out_mad, size_t *out_mad_size,
4491                      u16 *out_mad_pkey_index)
4492 {
4493         return IB_MAD_RESULT_SUCCESS;
4494 }