GNU Linux-libre 4.9.287-gnu1
[releases.git] / drivers / infiniband / hw / ocrdma / ocrdma_verbs.c
1 /* This file is part of the Emulex RoCE Device Driver for
2  * RoCE (RDMA over Converged Ethernet) adapters.
3  * Copyright (C) 2012-2015 Emulex. All rights reserved.
4  * EMULEX and SLI are trademarks of Emulex.
5  * www.emulex.com
6  *
7  * This software is available to you under a choice of one of two licenses.
8  * You may choose to be licensed under the terms of the GNU General Public
9  * License (GPL) Version 2, available from the file COPYING in the main
10  * directory of this source tree, or the BSD license below:
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  *
16  * - Redistributions of source code must retain the above copyright notice,
17  *   this list of conditions and the following disclaimer.
18  *
19  * - Redistributions in binary form must reproduce the above copyright
20  *   notice, this list of conditions and the following disclaimer in
21  *   the documentation and/or other materials provided with the distribution.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34  *
35  * Contact Information:
36  * linux-drivers@emulex.com
37  *
38  * Emulex
39  * 3333 Susan Street
40  * Costa Mesa, CA 92626
41  */
42
43 #include <linux/dma-mapping.h>
44 #include <rdma/ib_verbs.h>
45 #include <rdma/ib_user_verbs.h>
46 #include <rdma/iw_cm.h>
47 #include <rdma/ib_umem.h>
48 #include <rdma/ib_addr.h>
49 #include <rdma/ib_cache.h>
50
51 #include "ocrdma.h"
52 #include "ocrdma_hw.h"
53 #include "ocrdma_verbs.h"
54 #include <rdma/ocrdma-abi.h>
55
56 int ocrdma_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
57 {
58         if (index > 0)
59                 return -EINVAL;
60
61         *pkey = 0xffff;
62         return 0;
63 }
64
65 int ocrdma_query_gid(struct ib_device *ibdev, u8 port,
66                      int index, union ib_gid *sgid)
67 {
68         int ret;
69         struct ocrdma_dev *dev;
70
71         dev = get_ocrdma_dev(ibdev);
72         memset(sgid, 0, sizeof(*sgid));
73         if (index >= OCRDMA_MAX_SGID)
74                 return -EINVAL;
75
76         ret = ib_get_cached_gid(ibdev, port, index, sgid, NULL);
77         if (ret == -EAGAIN) {
78                 memcpy(sgid, &zgid, sizeof(*sgid));
79                 return 0;
80         }
81
82         return ret;
83 }
84
85 int ocrdma_add_gid(struct ib_device *device,
86                    u8 port_num,
87                    unsigned int index,
88                    const union ib_gid *gid,
89                    const struct ib_gid_attr *attr,
90                    void **context) {
91         return  0;
92 }
93
94 int  ocrdma_del_gid(struct ib_device *device,
95                     u8 port_num,
96                     unsigned int index,
97                     void **context) {
98         return 0;
99 }
100
101 int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr,
102                         struct ib_udata *uhw)
103 {
104         struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
105
106         if (uhw->inlen || uhw->outlen)
107                 return -EINVAL;
108
109         memset(attr, 0, sizeof *attr);
110         memcpy(&attr->fw_ver, &dev->attr.fw_ver[0],
111                min(sizeof(dev->attr.fw_ver), sizeof(attr->fw_ver)));
112         ocrdma_get_guid(dev, (u8 *)&attr->sys_image_guid);
113         attr->max_mr_size = dev->attr.max_mr_size;
114         attr->page_size_cap = 0xffff000;
115         attr->vendor_id = dev->nic_info.pdev->vendor;
116         attr->vendor_part_id = dev->nic_info.pdev->device;
117         attr->hw_ver = dev->asic_id;
118         attr->max_qp = dev->attr.max_qp;
119         attr->max_ah = OCRDMA_MAX_AH;
120         attr->max_qp_wr = dev->attr.max_wqe;
121
122         attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
123                                         IB_DEVICE_RC_RNR_NAK_GEN |
124                                         IB_DEVICE_SHUTDOWN_PORT |
125                                         IB_DEVICE_SYS_IMAGE_GUID |
126                                         IB_DEVICE_LOCAL_DMA_LKEY |
127                                         IB_DEVICE_MEM_MGT_EXTENSIONS;
128         attr->max_sge = min(dev->attr.max_send_sge, dev->attr.max_recv_sge);
129         attr->max_sge_rd = dev->attr.max_rdma_sge;
130         attr->max_cq = dev->attr.max_cq;
131         attr->max_cqe = dev->attr.max_cqe;
132         attr->max_mr = dev->attr.max_mr;
133         attr->max_mw = dev->attr.max_mw;
134         attr->max_pd = dev->attr.max_pd;
135         attr->atomic_cap = 0;
136         attr->max_fmr = 0;
137         attr->max_map_per_fmr = 0;
138         attr->max_qp_rd_atom =
139             min(dev->attr.max_ord_per_qp, dev->attr.max_ird_per_qp);
140         attr->max_qp_init_rd_atom = dev->attr.max_ord_per_qp;
141         attr->max_srq = dev->attr.max_srq;
142         attr->max_srq_sge = dev->attr.max_srq_sge;
143         attr->max_srq_wr = dev->attr.max_rqe;
144         attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay;
145         attr->max_fast_reg_page_list_len = dev->attr.max_pages_per_frmr;
146         attr->max_pkeys = 1;
147         return 0;
148 }
149
150 struct net_device *ocrdma_get_netdev(struct ib_device *ibdev, u8 port_num)
151 {
152         struct ocrdma_dev *dev;
153         struct net_device *ndev = NULL;
154
155         rcu_read_lock();
156
157         dev = get_ocrdma_dev(ibdev);
158         if (dev)
159                 ndev = dev->nic_info.netdev;
160         if (ndev)
161                 dev_hold(ndev);
162
163         rcu_read_unlock();
164
165         return ndev;
166 }
167
168 static inline void get_link_speed_and_width(struct ocrdma_dev *dev,
169                                             u8 *ib_speed, u8 *ib_width)
170 {
171         int status;
172         u8 speed;
173
174         status = ocrdma_mbx_get_link_speed(dev, &speed, NULL);
175         if (status)
176                 speed = OCRDMA_PHYS_LINK_SPEED_ZERO;
177
178         switch (speed) {
179         case OCRDMA_PHYS_LINK_SPEED_1GBPS:
180                 *ib_speed = IB_SPEED_SDR;
181                 *ib_width = IB_WIDTH_1X;
182                 break;
183
184         case OCRDMA_PHYS_LINK_SPEED_10GBPS:
185                 *ib_speed = IB_SPEED_QDR;
186                 *ib_width = IB_WIDTH_1X;
187                 break;
188
189         case OCRDMA_PHYS_LINK_SPEED_20GBPS:
190                 *ib_speed = IB_SPEED_DDR;
191                 *ib_width = IB_WIDTH_4X;
192                 break;
193
194         case OCRDMA_PHYS_LINK_SPEED_40GBPS:
195                 *ib_speed = IB_SPEED_QDR;
196                 *ib_width = IB_WIDTH_4X;
197                 break;
198
199         default:
200                 /* Unsupported */
201                 *ib_speed = IB_SPEED_SDR;
202                 *ib_width = IB_WIDTH_1X;
203         }
204 }
205
206 int ocrdma_query_port(struct ib_device *ibdev,
207                       u8 port, struct ib_port_attr *props)
208 {
209         enum ib_port_state port_state;
210         struct ocrdma_dev *dev;
211         struct net_device *netdev;
212
213         dev = get_ocrdma_dev(ibdev);
214         if (port > 1) {
215                 pr_err("%s(%d) invalid_port=0x%x\n", __func__,
216                        dev->id, port);
217                 return -EINVAL;
218         }
219         netdev = dev->nic_info.netdev;
220         if (netif_running(netdev) && netif_oper_up(netdev)) {
221                 port_state = IB_PORT_ACTIVE;
222                 props->phys_state = 5;
223         } else {
224                 port_state = IB_PORT_DOWN;
225                 props->phys_state = 3;
226         }
227         props->max_mtu = IB_MTU_4096;
228         props->active_mtu = iboe_get_mtu(netdev->mtu);
229         props->lid = 0;
230         props->lmc = 0;
231         props->sm_lid = 0;
232         props->sm_sl = 0;
233         props->state = port_state;
234         props->port_cap_flags =
235             IB_PORT_CM_SUP |
236             IB_PORT_REINIT_SUP |
237             IB_PORT_DEVICE_MGMT_SUP | IB_PORT_VENDOR_CLASS_SUP |
238             IB_PORT_IP_BASED_GIDS;
239         props->gid_tbl_len = OCRDMA_MAX_SGID;
240         props->pkey_tbl_len = 1;
241         props->bad_pkey_cntr = 0;
242         props->qkey_viol_cntr = 0;
243         get_link_speed_and_width(dev, &props->active_speed,
244                                  &props->active_width);
245         props->max_msg_sz = 0x80000000;
246         props->max_vl_num = 4;
247         return 0;
248 }
249
250 int ocrdma_modify_port(struct ib_device *ibdev, u8 port, int mask,
251                        struct ib_port_modify *props)
252 {
253         struct ocrdma_dev *dev;
254
255         dev = get_ocrdma_dev(ibdev);
256         if (port > 1) {
257                 pr_err("%s(%d) invalid_port=0x%x\n", __func__, dev->id, port);
258                 return -EINVAL;
259         }
260         return 0;
261 }
262
263 static int ocrdma_add_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
264                            unsigned long len)
265 {
266         struct ocrdma_mm *mm;
267
268         mm = kzalloc(sizeof(*mm), GFP_KERNEL);
269         if (mm == NULL)
270                 return -ENOMEM;
271         mm->key.phy_addr = phy_addr;
272         mm->key.len = len;
273         INIT_LIST_HEAD(&mm->entry);
274
275         mutex_lock(&uctx->mm_list_lock);
276         list_add_tail(&mm->entry, &uctx->mm_head);
277         mutex_unlock(&uctx->mm_list_lock);
278         return 0;
279 }
280
281 static void ocrdma_del_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
282                             unsigned long len)
283 {
284         struct ocrdma_mm *mm, *tmp;
285
286         mutex_lock(&uctx->mm_list_lock);
287         list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
288                 if (len != mm->key.len && phy_addr != mm->key.phy_addr)
289                         continue;
290
291                 list_del(&mm->entry);
292                 kfree(mm);
293                 break;
294         }
295         mutex_unlock(&uctx->mm_list_lock);
296 }
297
298 static bool ocrdma_search_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
299                               unsigned long len)
300 {
301         bool found = false;
302         struct ocrdma_mm *mm;
303
304         mutex_lock(&uctx->mm_list_lock);
305         list_for_each_entry(mm, &uctx->mm_head, entry) {
306                 if (len != mm->key.len && phy_addr != mm->key.phy_addr)
307                         continue;
308
309                 found = true;
310                 break;
311         }
312         mutex_unlock(&uctx->mm_list_lock);
313         return found;
314 }
315
316
317 static u16 _ocrdma_pd_mgr_get_bitmap(struct ocrdma_dev *dev, bool dpp_pool)
318 {
319         u16 pd_bitmap_idx = 0;
320         const unsigned long *pd_bitmap;
321
322         if (dpp_pool) {
323                 pd_bitmap = dev->pd_mgr->pd_dpp_bitmap;
324                 pd_bitmap_idx = find_first_zero_bit(pd_bitmap,
325                                                     dev->pd_mgr->max_dpp_pd);
326                 __set_bit(pd_bitmap_idx, dev->pd_mgr->pd_dpp_bitmap);
327                 dev->pd_mgr->pd_dpp_count++;
328                 if (dev->pd_mgr->pd_dpp_count > dev->pd_mgr->pd_dpp_thrsh)
329                         dev->pd_mgr->pd_dpp_thrsh = dev->pd_mgr->pd_dpp_count;
330         } else {
331                 pd_bitmap = dev->pd_mgr->pd_norm_bitmap;
332                 pd_bitmap_idx = find_first_zero_bit(pd_bitmap,
333                                                     dev->pd_mgr->max_normal_pd);
334                 __set_bit(pd_bitmap_idx, dev->pd_mgr->pd_norm_bitmap);
335                 dev->pd_mgr->pd_norm_count++;
336                 if (dev->pd_mgr->pd_norm_count > dev->pd_mgr->pd_norm_thrsh)
337                         dev->pd_mgr->pd_norm_thrsh = dev->pd_mgr->pd_norm_count;
338         }
339         return pd_bitmap_idx;
340 }
341
342 static int _ocrdma_pd_mgr_put_bitmap(struct ocrdma_dev *dev, u16 pd_id,
343                                         bool dpp_pool)
344 {
345         u16 pd_count;
346         u16 pd_bit_index;
347
348         pd_count = dpp_pool ? dev->pd_mgr->pd_dpp_count :
349                               dev->pd_mgr->pd_norm_count;
350         if (pd_count == 0)
351                 return -EINVAL;
352
353         if (dpp_pool) {
354                 pd_bit_index = pd_id - dev->pd_mgr->pd_dpp_start;
355                 if (pd_bit_index >= dev->pd_mgr->max_dpp_pd) {
356                         return -EINVAL;
357                 } else {
358                         __clear_bit(pd_bit_index, dev->pd_mgr->pd_dpp_bitmap);
359                         dev->pd_mgr->pd_dpp_count--;
360                 }
361         } else {
362                 pd_bit_index = pd_id - dev->pd_mgr->pd_norm_start;
363                 if (pd_bit_index >= dev->pd_mgr->max_normal_pd) {
364                         return -EINVAL;
365                 } else {
366                         __clear_bit(pd_bit_index, dev->pd_mgr->pd_norm_bitmap);
367                         dev->pd_mgr->pd_norm_count--;
368                 }
369         }
370
371         return 0;
372 }
373
374 static u8 ocrdma_put_pd_num(struct ocrdma_dev *dev, u16 pd_id,
375                                    bool dpp_pool)
376 {
377         int status;
378
379         mutex_lock(&dev->dev_lock);
380         status = _ocrdma_pd_mgr_put_bitmap(dev, pd_id, dpp_pool);
381         mutex_unlock(&dev->dev_lock);
382         return status;
383 }
384
385 static int ocrdma_get_pd_num(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
386 {
387         u16 pd_idx = 0;
388         int status = 0;
389
390         mutex_lock(&dev->dev_lock);
391         if (pd->dpp_enabled) {
392                 /* try allocating DPP PD, if not available then normal PD */
393                 if (dev->pd_mgr->pd_dpp_count < dev->pd_mgr->max_dpp_pd) {
394                         pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, true);
395                         pd->id = dev->pd_mgr->pd_dpp_start + pd_idx;
396                         pd->dpp_page = dev->pd_mgr->dpp_page_index + pd_idx;
397                 } else if (dev->pd_mgr->pd_norm_count <
398                            dev->pd_mgr->max_normal_pd) {
399                         pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, false);
400                         pd->id = dev->pd_mgr->pd_norm_start + pd_idx;
401                         pd->dpp_enabled = false;
402                 } else {
403                         status = -EINVAL;
404                 }
405         } else {
406                 if (dev->pd_mgr->pd_norm_count < dev->pd_mgr->max_normal_pd) {
407                         pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, false);
408                         pd->id = dev->pd_mgr->pd_norm_start + pd_idx;
409                 } else {
410                         status = -EINVAL;
411                 }
412         }
413         mutex_unlock(&dev->dev_lock);
414         return status;
415 }
416
417 static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev,
418                                           struct ocrdma_ucontext *uctx,
419                                           struct ib_udata *udata)
420 {
421         struct ocrdma_pd *pd = NULL;
422         int status;
423
424         pd = kzalloc(sizeof(*pd), GFP_KERNEL);
425         if (!pd)
426                 return ERR_PTR(-ENOMEM);
427
428         if (udata && uctx && dev->attr.max_dpp_pds) {
429                 pd->dpp_enabled =
430                         ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R;
431                 pd->num_dpp_qp =
432                         pd->dpp_enabled ? (dev->nic_info.db_page_size /
433                                            dev->attr.wqe_size) : 0;
434         }
435
436         if (dev->pd_mgr->pd_prealloc_valid) {
437                 status = ocrdma_get_pd_num(dev, pd);
438                 if (status == 0) {
439                         return pd;
440                 } else {
441                         kfree(pd);
442                         return ERR_PTR(status);
443                 }
444         }
445
446 retry:
447         status = ocrdma_mbx_alloc_pd(dev, pd);
448         if (status) {
449                 if (pd->dpp_enabled) {
450                         pd->dpp_enabled = false;
451                         pd->num_dpp_qp = 0;
452                         goto retry;
453                 } else {
454                         kfree(pd);
455                         return ERR_PTR(status);
456                 }
457         }
458
459         return pd;
460 }
461
462 static inline int is_ucontext_pd(struct ocrdma_ucontext *uctx,
463                                  struct ocrdma_pd *pd)
464 {
465         return (uctx->cntxt_pd == pd ? true : false);
466 }
467
468 static int _ocrdma_dealloc_pd(struct ocrdma_dev *dev,
469                               struct ocrdma_pd *pd)
470 {
471         int status;
472
473         if (dev->pd_mgr->pd_prealloc_valid)
474                 status = ocrdma_put_pd_num(dev, pd->id, pd->dpp_enabled);
475         else
476                 status = ocrdma_mbx_dealloc_pd(dev, pd);
477
478         kfree(pd);
479         return status;
480 }
481
482 static int ocrdma_alloc_ucontext_pd(struct ocrdma_dev *dev,
483                                     struct ocrdma_ucontext *uctx,
484                                     struct ib_udata *udata)
485 {
486         int status = 0;
487
488         uctx->cntxt_pd = _ocrdma_alloc_pd(dev, uctx, udata);
489         if (IS_ERR(uctx->cntxt_pd)) {
490                 status = PTR_ERR(uctx->cntxt_pd);
491                 uctx->cntxt_pd = NULL;
492                 goto err;
493         }
494
495         uctx->cntxt_pd->uctx = uctx;
496         uctx->cntxt_pd->ibpd.device = &dev->ibdev;
497 err:
498         return status;
499 }
500
501 static int ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx)
502 {
503         struct ocrdma_pd *pd = uctx->cntxt_pd;
504         struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
505
506         if (uctx->pd_in_use) {
507                 pr_err("%s(%d) Freeing in use pdid=0x%x.\n",
508                        __func__, dev->id, pd->id);
509         }
510         uctx->cntxt_pd = NULL;
511         (void)_ocrdma_dealloc_pd(dev, pd);
512         return 0;
513 }
514
515 static struct ocrdma_pd *ocrdma_get_ucontext_pd(struct ocrdma_ucontext *uctx)
516 {
517         struct ocrdma_pd *pd = NULL;
518
519         mutex_lock(&uctx->mm_list_lock);
520         if (!uctx->pd_in_use) {
521                 uctx->pd_in_use = true;
522                 pd = uctx->cntxt_pd;
523         }
524         mutex_unlock(&uctx->mm_list_lock);
525
526         return pd;
527 }
528
529 static void ocrdma_release_ucontext_pd(struct ocrdma_ucontext *uctx)
530 {
531         mutex_lock(&uctx->mm_list_lock);
532         uctx->pd_in_use = false;
533         mutex_unlock(&uctx->mm_list_lock);
534 }
535
536 struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev,
537                                           struct ib_udata *udata)
538 {
539         int status;
540         struct ocrdma_ucontext *ctx;
541         struct ocrdma_alloc_ucontext_resp resp;
542         struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
543         struct pci_dev *pdev = dev->nic_info.pdev;
544         u32 map_len = roundup(sizeof(u32) * 2048, PAGE_SIZE);
545
546         if (!udata)
547                 return ERR_PTR(-EFAULT);
548         ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
549         if (!ctx)
550                 return ERR_PTR(-ENOMEM);
551         INIT_LIST_HEAD(&ctx->mm_head);
552         mutex_init(&ctx->mm_list_lock);
553
554         ctx->ah_tbl.va = dma_alloc_coherent(&pdev->dev, map_len,
555                                             &ctx->ah_tbl.pa, GFP_KERNEL);
556         if (!ctx->ah_tbl.va) {
557                 kfree(ctx);
558                 return ERR_PTR(-ENOMEM);
559         }
560         memset(ctx->ah_tbl.va, 0, map_len);
561         ctx->ah_tbl.len = map_len;
562
563         memset(&resp, 0, sizeof(resp));
564         resp.ah_tbl_len = ctx->ah_tbl.len;
565         resp.ah_tbl_page = virt_to_phys(ctx->ah_tbl.va);
566
567         status = ocrdma_add_mmap(ctx, resp.ah_tbl_page, resp.ah_tbl_len);
568         if (status)
569                 goto map_err;
570
571         status = ocrdma_alloc_ucontext_pd(dev, ctx, udata);
572         if (status)
573                 goto pd_err;
574
575         resp.dev_id = dev->id;
576         resp.max_inline_data = dev->attr.max_inline_data;
577         resp.wqe_size = dev->attr.wqe_size;
578         resp.rqe_size = dev->attr.rqe_size;
579         resp.dpp_wqe_size = dev->attr.wqe_size;
580
581         memcpy(resp.fw_ver, dev->attr.fw_ver, sizeof(resp.fw_ver));
582         status = ib_copy_to_udata(udata, &resp, sizeof(resp));
583         if (status)
584                 goto cpy_err;
585         return &ctx->ibucontext;
586
587 cpy_err:
588 pd_err:
589         ocrdma_del_mmap(ctx, ctx->ah_tbl.pa, ctx->ah_tbl.len);
590 map_err:
591         dma_free_coherent(&pdev->dev, ctx->ah_tbl.len, ctx->ah_tbl.va,
592                           ctx->ah_tbl.pa);
593         kfree(ctx);
594         return ERR_PTR(status);
595 }
596
597 int ocrdma_dealloc_ucontext(struct ib_ucontext *ibctx)
598 {
599         int status;
600         struct ocrdma_mm *mm, *tmp;
601         struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ibctx);
602         struct ocrdma_dev *dev = get_ocrdma_dev(ibctx->device);
603         struct pci_dev *pdev = dev->nic_info.pdev;
604
605         status = ocrdma_dealloc_ucontext_pd(uctx);
606
607         ocrdma_del_mmap(uctx, uctx->ah_tbl.pa, uctx->ah_tbl.len);
608         dma_free_coherent(&pdev->dev, uctx->ah_tbl.len, uctx->ah_tbl.va,
609                           uctx->ah_tbl.pa);
610
611         list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
612                 list_del(&mm->entry);
613                 kfree(mm);
614         }
615         kfree(uctx);
616         return status;
617 }
618
619 int ocrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
620 {
621         struct ocrdma_ucontext *ucontext = get_ocrdma_ucontext(context);
622         struct ocrdma_dev *dev = get_ocrdma_dev(context->device);
623         unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT;
624         u64 unmapped_db = (u64) dev->nic_info.unmapped_db;
625         unsigned long len = (vma->vm_end - vma->vm_start);
626         int status;
627         bool found;
628
629         if (vma->vm_start & (PAGE_SIZE - 1))
630                 return -EINVAL;
631         found = ocrdma_search_mmap(ucontext, vma->vm_pgoff << PAGE_SHIFT, len);
632         if (!found)
633                 return -EINVAL;
634
635         if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db +
636                 dev->nic_info.db_total_size)) &&
637                 (len <= dev->nic_info.db_page_size)) {
638                 if (vma->vm_flags & VM_READ)
639                         return -EPERM;
640
641                 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
642                 status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
643                                             len, vma->vm_page_prot);
644         } else if (dev->nic_info.dpp_unmapped_len &&
645                 (vm_page >= (u64) dev->nic_info.dpp_unmapped_addr) &&
646                 (vm_page <= (u64) (dev->nic_info.dpp_unmapped_addr +
647                         dev->nic_info.dpp_unmapped_len)) &&
648                 (len <= dev->nic_info.dpp_unmapped_len)) {
649                 if (vma->vm_flags & VM_READ)
650                         return -EPERM;
651
652                 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
653                 status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
654                                             len, vma->vm_page_prot);
655         } else {
656                 status = remap_pfn_range(vma, vma->vm_start,
657                                          vma->vm_pgoff, len, vma->vm_page_prot);
658         }
659         return status;
660 }
661
662 static int ocrdma_copy_pd_uresp(struct ocrdma_dev *dev, struct ocrdma_pd *pd,
663                                 struct ib_ucontext *ib_ctx,
664                                 struct ib_udata *udata)
665 {
666         int status;
667         u64 db_page_addr;
668         u64 dpp_page_addr = 0;
669         u32 db_page_size;
670         struct ocrdma_alloc_pd_uresp rsp;
671         struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx);
672
673         memset(&rsp, 0, sizeof(rsp));
674         rsp.id = pd->id;
675         rsp.dpp_enabled = pd->dpp_enabled;
676         db_page_addr = ocrdma_get_db_addr(dev, pd->id);
677         db_page_size = dev->nic_info.db_page_size;
678
679         status = ocrdma_add_mmap(uctx, db_page_addr, db_page_size);
680         if (status)
681                 return status;
682
683         if (pd->dpp_enabled) {
684                 dpp_page_addr = dev->nic_info.dpp_unmapped_addr +
685                                 (pd->id * PAGE_SIZE);
686                 status = ocrdma_add_mmap(uctx, dpp_page_addr,
687                                  PAGE_SIZE);
688                 if (status)
689                         goto dpp_map_err;
690                 rsp.dpp_page_addr_hi = upper_32_bits(dpp_page_addr);
691                 rsp.dpp_page_addr_lo = dpp_page_addr;
692         }
693
694         status = ib_copy_to_udata(udata, &rsp, sizeof(rsp));
695         if (status)
696                 goto ucopy_err;
697
698         pd->uctx = uctx;
699         return 0;
700
701 ucopy_err:
702         if (pd->dpp_enabled)
703                 ocrdma_del_mmap(pd->uctx, dpp_page_addr, PAGE_SIZE);
704 dpp_map_err:
705         ocrdma_del_mmap(pd->uctx, db_page_addr, db_page_size);
706         return status;
707 }
708
709 struct ib_pd *ocrdma_alloc_pd(struct ib_device *ibdev,
710                               struct ib_ucontext *context,
711                               struct ib_udata *udata)
712 {
713         struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
714         struct ocrdma_pd *pd;
715         struct ocrdma_ucontext *uctx = NULL;
716         int status;
717         u8 is_uctx_pd = false;
718
719         if (udata && context) {
720                 uctx = get_ocrdma_ucontext(context);
721                 pd = ocrdma_get_ucontext_pd(uctx);
722                 if (pd) {
723                         is_uctx_pd = true;
724                         goto pd_mapping;
725                 }
726         }
727
728         pd = _ocrdma_alloc_pd(dev, uctx, udata);
729         if (IS_ERR(pd)) {
730                 status = PTR_ERR(pd);
731                 goto exit;
732         }
733
734 pd_mapping:
735         if (udata && context) {
736                 status = ocrdma_copy_pd_uresp(dev, pd, context, udata);
737                 if (status)
738                         goto err;
739         }
740         return &pd->ibpd;
741
742 err:
743         if (is_uctx_pd) {
744                 ocrdma_release_ucontext_pd(uctx);
745         } else {
746                 status = _ocrdma_dealloc_pd(dev, pd);
747         }
748 exit:
749         return ERR_PTR(status);
750 }
751
752 int ocrdma_dealloc_pd(struct ib_pd *ibpd)
753 {
754         struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
755         struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
756         struct ocrdma_ucontext *uctx = NULL;
757         int status = 0;
758         u64 usr_db;
759
760         uctx = pd->uctx;
761         if (uctx) {
762                 u64 dpp_db = dev->nic_info.dpp_unmapped_addr +
763                         (pd->id * PAGE_SIZE);
764                 if (pd->dpp_enabled)
765                         ocrdma_del_mmap(pd->uctx, dpp_db, PAGE_SIZE);
766                 usr_db = ocrdma_get_db_addr(dev, pd->id);
767                 ocrdma_del_mmap(pd->uctx, usr_db, dev->nic_info.db_page_size);
768
769                 if (is_ucontext_pd(uctx, pd)) {
770                         ocrdma_release_ucontext_pd(uctx);
771                         return status;
772                 }
773         }
774         status = _ocrdma_dealloc_pd(dev, pd);
775         return status;
776 }
777
778 static int ocrdma_alloc_lkey(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
779                             u32 pdid, int acc, u32 num_pbls, u32 addr_check)
780 {
781         int status;
782
783         mr->hwmr.fr_mr = 0;
784         mr->hwmr.local_rd = 1;
785         mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
786         mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
787         mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
788         mr->hwmr.mw_bind = (acc & IB_ACCESS_MW_BIND) ? 1 : 0;
789         mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
790         mr->hwmr.num_pbls = num_pbls;
791
792         status = ocrdma_mbx_alloc_lkey(dev, &mr->hwmr, pdid, addr_check);
793         if (status)
794                 return status;
795
796         mr->ibmr.lkey = mr->hwmr.lkey;
797         if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
798                 mr->ibmr.rkey = mr->hwmr.lkey;
799         return 0;
800 }
801
802 struct ib_mr *ocrdma_get_dma_mr(struct ib_pd *ibpd, int acc)
803 {
804         int status;
805         struct ocrdma_mr *mr;
806         struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
807         struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
808
809         if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) {
810                 pr_err("%s err, invalid access rights\n", __func__);
811                 return ERR_PTR(-EINVAL);
812         }
813
814         mr = kzalloc(sizeof(*mr), GFP_KERNEL);
815         if (!mr)
816                 return ERR_PTR(-ENOMEM);
817
818         status = ocrdma_alloc_lkey(dev, mr, pd->id, acc, 0,
819                                    OCRDMA_ADDR_CHECK_DISABLE);
820         if (status) {
821                 kfree(mr);
822                 return ERR_PTR(status);
823         }
824
825         return &mr->ibmr;
826 }
827
828 static void ocrdma_free_mr_pbl_tbl(struct ocrdma_dev *dev,
829                                    struct ocrdma_hw_mr *mr)
830 {
831         struct pci_dev *pdev = dev->nic_info.pdev;
832         int i = 0;
833
834         if (mr->pbl_table) {
835                 for (i = 0; i < mr->num_pbls; i++) {
836                         if (!mr->pbl_table[i].va)
837                                 continue;
838                         dma_free_coherent(&pdev->dev, mr->pbl_size,
839                                           mr->pbl_table[i].va,
840                                           mr->pbl_table[i].pa);
841                 }
842                 kfree(mr->pbl_table);
843                 mr->pbl_table = NULL;
844         }
845 }
846
847 static int ocrdma_get_pbl_info(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
848                               u32 num_pbes)
849 {
850         u32 num_pbls = 0;
851         u32 idx = 0;
852         int status = 0;
853         u32 pbl_size;
854
855         do {
856                 pbl_size = OCRDMA_MIN_HPAGE_SIZE * (1 << idx);
857                 if (pbl_size > MAX_OCRDMA_PBL_SIZE) {
858                         status = -EFAULT;
859                         break;
860                 }
861                 num_pbls = roundup(num_pbes, (pbl_size / sizeof(u64)));
862                 num_pbls = num_pbls / (pbl_size / sizeof(u64));
863                 idx++;
864         } while (num_pbls >= dev->attr.max_num_mr_pbl);
865
866         mr->hwmr.num_pbes = num_pbes;
867         mr->hwmr.num_pbls = num_pbls;
868         mr->hwmr.pbl_size = pbl_size;
869         return status;
870 }
871
872 static int ocrdma_build_pbl_tbl(struct ocrdma_dev *dev, struct ocrdma_hw_mr *mr)
873 {
874         int status = 0;
875         int i;
876         u32 dma_len = mr->pbl_size;
877         struct pci_dev *pdev = dev->nic_info.pdev;
878         void *va;
879         dma_addr_t pa;
880
881         mr->pbl_table = kzalloc(sizeof(struct ocrdma_pbl) *
882                                 mr->num_pbls, GFP_KERNEL);
883
884         if (!mr->pbl_table)
885                 return -ENOMEM;
886
887         for (i = 0; i < mr->num_pbls; i++) {
888                 va = dma_alloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL);
889                 if (!va) {
890                         ocrdma_free_mr_pbl_tbl(dev, mr);
891                         status = -ENOMEM;
892                         break;
893                 }
894                 memset(va, 0, dma_len);
895                 mr->pbl_table[i].va = va;
896                 mr->pbl_table[i].pa = pa;
897         }
898         return status;
899 }
900
901 static void build_user_pbes(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
902                             u32 num_pbes)
903 {
904         struct ocrdma_pbe *pbe;
905         struct scatterlist *sg;
906         struct ocrdma_pbl *pbl_tbl = mr->hwmr.pbl_table;
907         struct ib_umem *umem = mr->umem;
908         int shift, pg_cnt, pages, pbe_cnt, entry, total_num_pbes = 0;
909
910         if (!mr->hwmr.num_pbes)
911                 return;
912
913         pbe = (struct ocrdma_pbe *)pbl_tbl->va;
914         pbe_cnt = 0;
915
916         shift = ilog2(umem->page_size);
917
918         for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
919                 pages = sg_dma_len(sg) >> shift;
920                 for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
921                         /* store the page address in pbe */
922                         pbe->pa_lo =
923                             cpu_to_le32(sg_dma_address
924                                         (sg) +
925                                         (umem->page_size * pg_cnt));
926                         pbe->pa_hi =
927                             cpu_to_le32(upper_32_bits
928                                         ((sg_dma_address
929                                           (sg) +
930                                           umem->page_size * pg_cnt)));
931                         pbe_cnt += 1;
932                         total_num_pbes += 1;
933                         pbe++;
934
935                         /* if done building pbes, issue the mbx cmd. */
936                         if (total_num_pbes == num_pbes)
937                                 return;
938
939                         /* if the given pbl is full storing the pbes,
940                          * move to next pbl.
941                          */
942                         if (pbe_cnt ==
943                                 (mr->hwmr.pbl_size / sizeof(u64))) {
944                                 pbl_tbl++;
945                                 pbe = (struct ocrdma_pbe *)pbl_tbl->va;
946                                 pbe_cnt = 0;
947                         }
948
949                 }
950         }
951 }
952
953 struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
954                                  u64 usr_addr, int acc, struct ib_udata *udata)
955 {
956         int status = -ENOMEM;
957         struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
958         struct ocrdma_mr *mr;
959         struct ocrdma_pd *pd;
960         u32 num_pbes;
961
962         pd = get_ocrdma_pd(ibpd);
963
964         if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
965                 return ERR_PTR(-EINVAL);
966
967         mr = kzalloc(sizeof(*mr), GFP_KERNEL);
968         if (!mr)
969                 return ERR_PTR(status);
970         mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0);
971         if (IS_ERR(mr->umem)) {
972                 status = -EFAULT;
973                 goto umem_err;
974         }
975         num_pbes = ib_umem_page_count(mr->umem);
976         status = ocrdma_get_pbl_info(dev, mr, num_pbes);
977         if (status)
978                 goto umem_err;
979
980         mr->hwmr.pbe_size = mr->umem->page_size;
981         mr->hwmr.fbo = ib_umem_offset(mr->umem);
982         mr->hwmr.va = usr_addr;
983         mr->hwmr.len = len;
984         mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
985         mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
986         mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
987         mr->hwmr.local_rd = 1;
988         mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
989         status = ocrdma_build_pbl_tbl(dev, &mr->hwmr);
990         if (status)
991                 goto umem_err;
992         build_user_pbes(dev, mr, num_pbes);
993         status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, acc);
994         if (status)
995                 goto mbx_err;
996         mr->ibmr.lkey = mr->hwmr.lkey;
997         if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
998                 mr->ibmr.rkey = mr->hwmr.lkey;
999
1000         return &mr->ibmr;
1001
1002 mbx_err:
1003         ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
1004 umem_err:
1005         kfree(mr);
1006         return ERR_PTR(status);
1007 }
1008
1009 int ocrdma_dereg_mr(struct ib_mr *ib_mr)
1010 {
1011         struct ocrdma_mr *mr = get_ocrdma_mr(ib_mr);
1012         struct ocrdma_dev *dev = get_ocrdma_dev(ib_mr->device);
1013
1014         (void) ocrdma_mbx_dealloc_lkey(dev, mr->hwmr.fr_mr, mr->hwmr.lkey);
1015
1016         kfree(mr->pages);
1017         ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
1018
1019         /* it could be user registered memory. */
1020         if (mr->umem)
1021                 ib_umem_release(mr->umem);
1022         kfree(mr);
1023
1024         /* Don't stop cleanup, in case FW is unresponsive */
1025         if (dev->mqe_ctx.fw_error_state) {
1026                 pr_err("%s(%d) fw not responding.\n",
1027                        __func__, dev->id);
1028         }
1029         return 0;
1030 }
1031
1032 static int ocrdma_copy_cq_uresp(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
1033                                 struct ib_udata *udata,
1034                                 struct ib_ucontext *ib_ctx)
1035 {
1036         int status;
1037         struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx);
1038         struct ocrdma_create_cq_uresp uresp;
1039
1040         memset(&uresp, 0, sizeof(uresp));
1041         uresp.cq_id = cq->id;
1042         uresp.page_size = PAGE_ALIGN(cq->len);
1043         uresp.num_pages = 1;
1044         uresp.max_hw_cqe = cq->max_hw_cqe;
1045         uresp.page_addr[0] = virt_to_phys(cq->va);
1046         uresp.db_page_addr =  ocrdma_get_db_addr(dev, uctx->cntxt_pd->id);
1047         uresp.db_page_size = dev->nic_info.db_page_size;
1048         uresp.phase_change = cq->phase_change ? 1 : 0;
1049         status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1050         if (status) {
1051                 pr_err("%s(%d) copy error cqid=0x%x.\n",
1052                        __func__, dev->id, cq->id);
1053                 goto err;
1054         }
1055         status = ocrdma_add_mmap(uctx, uresp.db_page_addr, uresp.db_page_size);
1056         if (status)
1057                 goto err;
1058         status = ocrdma_add_mmap(uctx, uresp.page_addr[0], uresp.page_size);
1059         if (status) {
1060                 ocrdma_del_mmap(uctx, uresp.db_page_addr, uresp.db_page_size);
1061                 goto err;
1062         }
1063         cq->ucontext = uctx;
1064 err:
1065         return status;
1066 }
1067
1068 struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev,
1069                                const struct ib_cq_init_attr *attr,
1070                                struct ib_ucontext *ib_ctx,
1071                                struct ib_udata *udata)
1072 {
1073         int entries = attr->cqe;
1074         struct ocrdma_cq *cq;
1075         struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
1076         struct ocrdma_ucontext *uctx = NULL;
1077         u16 pd_id = 0;
1078         int status;
1079         struct ocrdma_create_cq_ureq ureq;
1080
1081         if (attr->flags)
1082                 return ERR_PTR(-EINVAL);
1083
1084         if (udata) {
1085                 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
1086                         return ERR_PTR(-EFAULT);
1087         } else
1088                 ureq.dpp_cq = 0;
1089         cq = kzalloc(sizeof(*cq), GFP_KERNEL);
1090         if (!cq)
1091                 return ERR_PTR(-ENOMEM);
1092
1093         spin_lock_init(&cq->cq_lock);
1094         spin_lock_init(&cq->comp_handler_lock);
1095         INIT_LIST_HEAD(&cq->sq_head);
1096         INIT_LIST_HEAD(&cq->rq_head);
1097
1098         if (ib_ctx) {
1099                 uctx = get_ocrdma_ucontext(ib_ctx);
1100                 pd_id = uctx->cntxt_pd->id;
1101         }
1102
1103         status = ocrdma_mbx_create_cq(dev, cq, entries, ureq.dpp_cq, pd_id);
1104         if (status) {
1105                 kfree(cq);
1106                 return ERR_PTR(status);
1107         }
1108         if (ib_ctx) {
1109                 status = ocrdma_copy_cq_uresp(dev, cq, udata, ib_ctx);
1110                 if (status)
1111                         goto ctx_err;
1112         }
1113         cq->phase = OCRDMA_CQE_VALID;
1114         dev->cq_tbl[cq->id] = cq;
1115         return &cq->ibcq;
1116
1117 ctx_err:
1118         ocrdma_mbx_destroy_cq(dev, cq);
1119         kfree(cq);
1120         return ERR_PTR(status);
1121 }
1122
1123 int ocrdma_resize_cq(struct ib_cq *ibcq, int new_cnt,
1124                      struct ib_udata *udata)
1125 {
1126         int status = 0;
1127         struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
1128
1129         if (new_cnt < 1 || new_cnt > cq->max_hw_cqe) {
1130                 status = -EINVAL;
1131                 return status;
1132         }
1133         ibcq->cqe = new_cnt;
1134         return status;
1135 }
1136
1137 static void ocrdma_flush_cq(struct ocrdma_cq *cq)
1138 {
1139         int cqe_cnt;
1140         int valid_count = 0;
1141         unsigned long flags;
1142
1143         struct ocrdma_dev *dev = get_ocrdma_dev(cq->ibcq.device);
1144         struct ocrdma_cqe *cqe = NULL;
1145
1146         cqe = cq->va;
1147         cqe_cnt = cq->cqe_cnt;
1148
1149         /* Last irq might have scheduled a polling thread
1150          * sync-up with it before hard flushing.
1151          */
1152         spin_lock_irqsave(&cq->cq_lock, flags);
1153         while (cqe_cnt) {
1154                 if (is_cqe_valid(cq, cqe))
1155                         valid_count++;
1156                 cqe++;
1157                 cqe_cnt--;
1158         }
1159         ocrdma_ring_cq_db(dev, cq->id, false, false, valid_count);
1160         spin_unlock_irqrestore(&cq->cq_lock, flags);
1161 }
1162
1163 int ocrdma_destroy_cq(struct ib_cq *ibcq)
1164 {
1165         struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
1166         struct ocrdma_eq *eq = NULL;
1167         struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
1168         int pdid = 0;
1169         u32 irq, indx;
1170
1171         dev->cq_tbl[cq->id] = NULL;
1172         indx = ocrdma_get_eq_table_index(dev, cq->eqn);
1173         if (indx == -EINVAL)
1174                 BUG();
1175
1176         eq = &dev->eq_tbl[indx];
1177         irq = ocrdma_get_irq(dev, eq);
1178         synchronize_irq(irq);
1179         ocrdma_flush_cq(cq);
1180
1181         (void)ocrdma_mbx_destroy_cq(dev, cq);
1182         if (cq->ucontext) {
1183                 pdid = cq->ucontext->cntxt_pd->id;
1184                 ocrdma_del_mmap(cq->ucontext, (u64) cq->pa,
1185                                 PAGE_ALIGN(cq->len));
1186                 ocrdma_del_mmap(cq->ucontext,
1187                                 ocrdma_get_db_addr(dev, pdid),
1188                                 dev->nic_info.db_page_size);
1189         }
1190
1191         kfree(cq);
1192         return 0;
1193 }
1194
1195 static int ocrdma_add_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
1196 {
1197         int status = -EINVAL;
1198
1199         if (qp->id < OCRDMA_MAX_QP && dev->qp_tbl[qp->id] == NULL) {
1200                 dev->qp_tbl[qp->id] = qp;
1201                 status = 0;
1202         }
1203         return status;
1204 }
1205
1206 static void ocrdma_del_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
1207 {
1208         dev->qp_tbl[qp->id] = NULL;
1209 }
1210
1211 static int ocrdma_check_qp_params(struct ib_pd *ibpd, struct ocrdma_dev *dev,
1212                                   struct ib_qp_init_attr *attrs)
1213 {
1214         if ((attrs->qp_type != IB_QPT_GSI) &&
1215             (attrs->qp_type != IB_QPT_RC) &&
1216             (attrs->qp_type != IB_QPT_UC) &&
1217             (attrs->qp_type != IB_QPT_UD)) {
1218                 pr_err("%s(%d) unsupported qp type=0x%x requested\n",
1219                        __func__, dev->id, attrs->qp_type);
1220                 return -EINVAL;
1221         }
1222         /* Skip the check for QP1 to support CM size of 128 */
1223         if ((attrs->qp_type != IB_QPT_GSI) &&
1224             (attrs->cap.max_send_wr > dev->attr.max_wqe)) {
1225                 pr_err("%s(%d) unsupported send_wr=0x%x requested\n",
1226                        __func__, dev->id, attrs->cap.max_send_wr);
1227                 pr_err("%s(%d) supported send_wr=0x%x\n",
1228                        __func__, dev->id, dev->attr.max_wqe);
1229                 return -EINVAL;
1230         }
1231         if (!attrs->srq && (attrs->cap.max_recv_wr > dev->attr.max_rqe)) {
1232                 pr_err("%s(%d) unsupported recv_wr=0x%x requested\n",
1233                        __func__, dev->id, attrs->cap.max_recv_wr);
1234                 pr_err("%s(%d) supported recv_wr=0x%x\n",
1235                        __func__, dev->id, dev->attr.max_rqe);
1236                 return -EINVAL;
1237         }
1238         if (attrs->cap.max_inline_data > dev->attr.max_inline_data) {
1239                 pr_err("%s(%d) unsupported inline data size=0x%x requested\n",
1240                        __func__, dev->id, attrs->cap.max_inline_data);
1241                 pr_err("%s(%d) supported inline data size=0x%x\n",
1242                        __func__, dev->id, dev->attr.max_inline_data);
1243                 return -EINVAL;
1244         }
1245         if (attrs->cap.max_send_sge > dev->attr.max_send_sge) {
1246                 pr_err("%s(%d) unsupported send_sge=0x%x requested\n",
1247                        __func__, dev->id, attrs->cap.max_send_sge);
1248                 pr_err("%s(%d) supported send_sge=0x%x\n",
1249                        __func__, dev->id, dev->attr.max_send_sge);
1250                 return -EINVAL;
1251         }
1252         if (attrs->cap.max_recv_sge > dev->attr.max_recv_sge) {
1253                 pr_err("%s(%d) unsupported recv_sge=0x%x requested\n",
1254                        __func__, dev->id, attrs->cap.max_recv_sge);
1255                 pr_err("%s(%d) supported recv_sge=0x%x\n",
1256                        __func__, dev->id, dev->attr.max_recv_sge);
1257                 return -EINVAL;
1258         }
1259         /* unprivileged user space cannot create special QP */
1260         if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) {
1261                 pr_err
1262                     ("%s(%d) Userspace can't create special QPs of type=0x%x\n",
1263                      __func__, dev->id, attrs->qp_type);
1264                 return -EINVAL;
1265         }
1266         /* allow creating only one GSI type of QP */
1267         if (attrs->qp_type == IB_QPT_GSI && dev->gsi_qp_created) {
1268                 pr_err("%s(%d) GSI special QPs already created.\n",
1269                        __func__, dev->id);
1270                 return -EINVAL;
1271         }
1272         /* verify consumer QPs are not trying to use GSI QP's CQ */
1273         if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created)) {
1274                 if ((dev->gsi_sqcq == get_ocrdma_cq(attrs->send_cq)) ||
1275                         (dev->gsi_rqcq == get_ocrdma_cq(attrs->recv_cq))) {
1276                         pr_err("%s(%d) Consumer QP cannot use GSI CQs.\n",
1277                                 __func__, dev->id);
1278                         return -EINVAL;
1279                 }
1280         }
1281         return 0;
1282 }
1283
1284 static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp,
1285                                 struct ib_udata *udata, int dpp_offset,
1286                                 int dpp_credit_lmt, int srq)
1287 {
1288         int status;
1289         u64 usr_db;
1290         struct ocrdma_create_qp_uresp uresp;
1291         struct ocrdma_pd *pd = qp->pd;
1292         struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
1293
1294         memset(&uresp, 0, sizeof(uresp));
1295         usr_db = dev->nic_info.unmapped_db +
1296                         (pd->id * dev->nic_info.db_page_size);
1297         uresp.qp_id = qp->id;
1298         uresp.sq_dbid = qp->sq.dbid;
1299         uresp.num_sq_pages = 1;
1300         uresp.sq_page_size = PAGE_ALIGN(qp->sq.len);
1301         uresp.sq_page_addr[0] = virt_to_phys(qp->sq.va);
1302         uresp.num_wqe_allocated = qp->sq.max_cnt;
1303         if (!srq) {
1304                 uresp.rq_dbid = qp->rq.dbid;
1305                 uresp.num_rq_pages = 1;
1306                 uresp.rq_page_size = PAGE_ALIGN(qp->rq.len);
1307                 uresp.rq_page_addr[0] = virt_to_phys(qp->rq.va);
1308                 uresp.num_rqe_allocated = qp->rq.max_cnt;
1309         }
1310         uresp.db_page_addr = usr_db;
1311         uresp.db_page_size = dev->nic_info.db_page_size;
1312         uresp.db_sq_offset = OCRDMA_DB_GEN2_SQ_OFFSET;
1313         uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET;
1314         uresp.db_shift = OCRDMA_DB_RQ_SHIFT;
1315
1316         if (qp->dpp_enabled) {
1317                 uresp.dpp_credit = dpp_credit_lmt;
1318                 uresp.dpp_offset = dpp_offset;
1319         }
1320         status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1321         if (status) {
1322                 pr_err("%s(%d) user copy error.\n", __func__, dev->id);
1323                 goto err;
1324         }
1325         status = ocrdma_add_mmap(pd->uctx, uresp.sq_page_addr[0],
1326                                  uresp.sq_page_size);
1327         if (status)
1328                 goto err;
1329
1330         if (!srq) {
1331                 status = ocrdma_add_mmap(pd->uctx, uresp.rq_page_addr[0],
1332                                          uresp.rq_page_size);
1333                 if (status)
1334                         goto rq_map_err;
1335         }
1336         return status;
1337 rq_map_err:
1338         ocrdma_del_mmap(pd->uctx, uresp.sq_page_addr[0], uresp.sq_page_size);
1339 err:
1340         return status;
1341 }
1342
1343 static void ocrdma_set_qp_db(struct ocrdma_dev *dev, struct ocrdma_qp *qp,
1344                              struct ocrdma_pd *pd)
1345 {
1346         if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) {
1347                 qp->sq_db = dev->nic_info.db +
1348                         (pd->id * dev->nic_info.db_page_size) +
1349                         OCRDMA_DB_GEN2_SQ_OFFSET;
1350                 qp->rq_db = dev->nic_info.db +
1351                         (pd->id * dev->nic_info.db_page_size) +
1352                         OCRDMA_DB_GEN2_RQ_OFFSET;
1353         } else {
1354                 qp->sq_db = dev->nic_info.db +
1355                         (pd->id * dev->nic_info.db_page_size) +
1356                         OCRDMA_DB_SQ_OFFSET;
1357                 qp->rq_db = dev->nic_info.db +
1358                         (pd->id * dev->nic_info.db_page_size) +
1359                         OCRDMA_DB_RQ_OFFSET;
1360         }
1361 }
1362
1363 static int ocrdma_alloc_wr_id_tbl(struct ocrdma_qp *qp)
1364 {
1365         qp->wqe_wr_id_tbl =
1366             kzalloc(sizeof(*(qp->wqe_wr_id_tbl)) * qp->sq.max_cnt,
1367                     GFP_KERNEL);
1368         if (qp->wqe_wr_id_tbl == NULL)
1369                 return -ENOMEM;
1370         qp->rqe_wr_id_tbl =
1371             kzalloc(sizeof(u64) * qp->rq.max_cnt, GFP_KERNEL);
1372         if (qp->rqe_wr_id_tbl == NULL)
1373                 return -ENOMEM;
1374
1375         return 0;
1376 }
1377
1378 static void ocrdma_set_qp_init_params(struct ocrdma_qp *qp,
1379                                       struct ocrdma_pd *pd,
1380                                       struct ib_qp_init_attr *attrs)
1381 {
1382         qp->pd = pd;
1383         spin_lock_init(&qp->q_lock);
1384         INIT_LIST_HEAD(&qp->sq_entry);
1385         INIT_LIST_HEAD(&qp->rq_entry);
1386
1387         qp->qp_type = attrs->qp_type;
1388         qp->cap_flags = OCRDMA_QP_INB_RD | OCRDMA_QP_INB_WR;
1389         qp->max_inline_data = attrs->cap.max_inline_data;
1390         qp->sq.max_sges = attrs->cap.max_send_sge;
1391         qp->rq.max_sges = attrs->cap.max_recv_sge;
1392         qp->state = OCRDMA_QPS_RST;
1393         qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
1394 }
1395
1396 static void ocrdma_store_gsi_qp_cq(struct ocrdma_dev *dev,
1397                                    struct ib_qp_init_attr *attrs)
1398 {
1399         if (attrs->qp_type == IB_QPT_GSI) {
1400                 dev->gsi_qp_created = 1;
1401                 dev->gsi_sqcq = get_ocrdma_cq(attrs->send_cq);
1402                 dev->gsi_rqcq = get_ocrdma_cq(attrs->recv_cq);
1403         }
1404 }
1405
1406 struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd,
1407                                struct ib_qp_init_attr *attrs,
1408                                struct ib_udata *udata)
1409 {
1410         int status;
1411         struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
1412         struct ocrdma_qp *qp;
1413         struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
1414         struct ocrdma_create_qp_ureq ureq;
1415         u16 dpp_credit_lmt, dpp_offset;
1416
1417         status = ocrdma_check_qp_params(ibpd, dev, attrs);
1418         if (status)
1419                 goto gen_err;
1420
1421         memset(&ureq, 0, sizeof(ureq));
1422         if (udata) {
1423                 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
1424                         return ERR_PTR(-EFAULT);
1425         }
1426         qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1427         if (!qp) {
1428                 status = -ENOMEM;
1429                 goto gen_err;
1430         }
1431         ocrdma_set_qp_init_params(qp, pd, attrs);
1432         if (udata == NULL)
1433                 qp->cap_flags |= (OCRDMA_QP_MW_BIND | OCRDMA_QP_LKEY0 |
1434                                         OCRDMA_QP_FAST_REG);
1435
1436         mutex_lock(&dev->dev_lock);
1437         status = ocrdma_mbx_create_qp(qp, attrs, ureq.enable_dpp_cq,
1438                                         ureq.dpp_cq_id,
1439                                         &dpp_offset, &dpp_credit_lmt);
1440         if (status)
1441                 goto mbx_err;
1442
1443         /* user space QP's wr_id table are managed in library */
1444         if (udata == NULL) {
1445                 status = ocrdma_alloc_wr_id_tbl(qp);
1446                 if (status)
1447                         goto map_err;
1448         }
1449
1450         status = ocrdma_add_qpn_map(dev, qp);
1451         if (status)
1452                 goto map_err;
1453         ocrdma_set_qp_db(dev, qp, pd);
1454         if (udata) {
1455                 status = ocrdma_copy_qp_uresp(qp, udata, dpp_offset,
1456                                               dpp_credit_lmt,
1457                                               (attrs->srq != NULL));
1458                 if (status)
1459                         goto cpy_err;
1460         }
1461         ocrdma_store_gsi_qp_cq(dev, attrs);
1462         qp->ibqp.qp_num = qp->id;
1463         mutex_unlock(&dev->dev_lock);
1464         return &qp->ibqp;
1465
1466 cpy_err:
1467         ocrdma_del_qpn_map(dev, qp);
1468 map_err:
1469         ocrdma_mbx_destroy_qp(dev, qp);
1470 mbx_err:
1471         mutex_unlock(&dev->dev_lock);
1472         kfree(qp->wqe_wr_id_tbl);
1473         kfree(qp->rqe_wr_id_tbl);
1474         kfree(qp);
1475         pr_err("%s(%d) error=%d\n", __func__, dev->id, status);
1476 gen_err:
1477         return ERR_PTR(status);
1478 }
1479
1480 int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1481                       int attr_mask)
1482 {
1483         int status = 0;
1484         struct ocrdma_qp *qp;
1485         struct ocrdma_dev *dev;
1486         enum ib_qp_state old_qps;
1487
1488         qp = get_ocrdma_qp(ibqp);
1489         dev = get_ocrdma_dev(ibqp->device);
1490         if (attr_mask & IB_QP_STATE)
1491                 status = ocrdma_qp_state_change(qp, attr->qp_state, &old_qps);
1492         /* if new and previous states are same hw doesn't need to
1493          * know about it.
1494          */
1495         if (status < 0)
1496                 return status;
1497         return ocrdma_mbx_modify_qp(dev, qp, attr, attr_mask);
1498 }
1499
1500 int ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1501                      int attr_mask, struct ib_udata *udata)
1502 {
1503         unsigned long flags;
1504         int status = -EINVAL;
1505         struct ocrdma_qp *qp;
1506         struct ocrdma_dev *dev;
1507         enum ib_qp_state old_qps, new_qps;
1508
1509         qp = get_ocrdma_qp(ibqp);
1510         dev = get_ocrdma_dev(ibqp->device);
1511
1512         /* syncronize with multiple context trying to change, retrive qps */
1513         mutex_lock(&dev->dev_lock);
1514         /* syncronize with wqe, rqe posting and cqe processing contexts */
1515         spin_lock_irqsave(&qp->q_lock, flags);
1516         old_qps = get_ibqp_state(qp->state);
1517         if (attr_mask & IB_QP_STATE)
1518                 new_qps = attr->qp_state;
1519         else
1520                 new_qps = old_qps;
1521         spin_unlock_irqrestore(&qp->q_lock, flags);
1522
1523         if (!ib_modify_qp_is_ok(old_qps, new_qps, ibqp->qp_type, attr_mask,
1524                                 IB_LINK_LAYER_ETHERNET)) {
1525                 pr_err("%s(%d) invalid attribute mask=0x%x specified for\n"
1526                        "qpn=0x%x of type=0x%x old_qps=0x%x, new_qps=0x%x\n",
1527                        __func__, dev->id, attr_mask, qp->id, ibqp->qp_type,
1528                        old_qps, new_qps);
1529                 goto param_err;
1530         }
1531
1532         status = _ocrdma_modify_qp(ibqp, attr, attr_mask);
1533         if (status > 0)
1534                 status = 0;
1535 param_err:
1536         mutex_unlock(&dev->dev_lock);
1537         return status;
1538 }
1539
1540 static enum ib_mtu ocrdma_mtu_int_to_enum(u16 mtu)
1541 {
1542         switch (mtu) {
1543         case 256:
1544                 return IB_MTU_256;
1545         case 512:
1546                 return IB_MTU_512;
1547         case 1024:
1548                 return IB_MTU_1024;
1549         case 2048:
1550                 return IB_MTU_2048;
1551         case 4096:
1552                 return IB_MTU_4096;
1553         default:
1554                 return IB_MTU_1024;
1555         }
1556 }
1557
1558 static int ocrdma_to_ib_qp_acc_flags(int qp_cap_flags)
1559 {
1560         int ib_qp_acc_flags = 0;
1561
1562         if (qp_cap_flags & OCRDMA_QP_INB_WR)
1563                 ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE;
1564         if (qp_cap_flags & OCRDMA_QP_INB_RD)
1565                 ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE;
1566         return ib_qp_acc_flags;
1567 }
1568
1569 int ocrdma_query_qp(struct ib_qp *ibqp,
1570                     struct ib_qp_attr *qp_attr,
1571                     int attr_mask, struct ib_qp_init_attr *qp_init_attr)
1572 {
1573         int status;
1574         u32 qp_state;
1575         struct ocrdma_qp_params params;
1576         struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
1577         struct ocrdma_dev *dev = get_ocrdma_dev(ibqp->device);
1578
1579         memset(&params, 0, sizeof(params));
1580         mutex_lock(&dev->dev_lock);
1581         status = ocrdma_mbx_query_qp(dev, qp, &params);
1582         mutex_unlock(&dev->dev_lock);
1583         if (status)
1584                 goto mbx_err;
1585         if (qp->qp_type == IB_QPT_UD)
1586                 qp_attr->qkey = params.qkey;
1587         qp_attr->path_mtu =
1588                 ocrdma_mtu_int_to_enum(params.path_mtu_pkey_indx &
1589                                 OCRDMA_QP_PARAMS_PATH_MTU_MASK) >>
1590                                 OCRDMA_QP_PARAMS_PATH_MTU_SHIFT;
1591         qp_attr->path_mig_state = IB_MIG_MIGRATED;
1592         qp_attr->rq_psn = params.hop_lmt_rq_psn & OCRDMA_QP_PARAMS_RQ_PSN_MASK;
1593         qp_attr->sq_psn = params.tclass_sq_psn & OCRDMA_QP_PARAMS_SQ_PSN_MASK;
1594         qp_attr->dest_qp_num =
1595             params.ack_to_rnr_rtc_dest_qpn & OCRDMA_QP_PARAMS_DEST_QPN_MASK;
1596
1597         qp_attr->qp_access_flags = ocrdma_to_ib_qp_acc_flags(qp->cap_flags);
1598         qp_attr->cap.max_send_wr = qp->sq.max_cnt - 1;
1599         qp_attr->cap.max_recv_wr = qp->rq.max_cnt - 1;
1600         qp_attr->cap.max_send_sge = qp->sq.max_sges;
1601         qp_attr->cap.max_recv_sge = qp->rq.max_sges;
1602         qp_attr->cap.max_inline_data = qp->max_inline_data;
1603         qp_init_attr->cap = qp_attr->cap;
1604         memcpy(&qp_attr->ah_attr.grh.dgid, &params.dgid[0],
1605                sizeof(params.dgid));
1606         qp_attr->ah_attr.grh.flow_label = params.rnt_rc_sl_fl &
1607             OCRDMA_QP_PARAMS_FLOW_LABEL_MASK;
1608         qp_attr->ah_attr.grh.sgid_index = qp->sgid_idx;
1609         qp_attr->ah_attr.grh.hop_limit = (params.hop_lmt_rq_psn &
1610                                           OCRDMA_QP_PARAMS_HOP_LMT_MASK) >>
1611                                                 OCRDMA_QP_PARAMS_HOP_LMT_SHIFT;
1612         qp_attr->ah_attr.grh.traffic_class = (params.tclass_sq_psn &
1613                                               OCRDMA_QP_PARAMS_TCLASS_MASK) >>
1614                                                 OCRDMA_QP_PARAMS_TCLASS_SHIFT;
1615
1616         qp_attr->ah_attr.ah_flags = IB_AH_GRH;
1617         qp_attr->ah_attr.port_num = 1;
1618         qp_attr->ah_attr.sl = (params.rnt_rc_sl_fl &
1619                                OCRDMA_QP_PARAMS_SL_MASK) >>
1620                                 OCRDMA_QP_PARAMS_SL_SHIFT;
1621         qp_attr->timeout = (params.ack_to_rnr_rtc_dest_qpn &
1622                             OCRDMA_QP_PARAMS_ACK_TIMEOUT_MASK) >>
1623                                 OCRDMA_QP_PARAMS_ACK_TIMEOUT_SHIFT;
1624         qp_attr->rnr_retry = (params.ack_to_rnr_rtc_dest_qpn &
1625                               OCRDMA_QP_PARAMS_RNR_RETRY_CNT_MASK) >>
1626                                 OCRDMA_QP_PARAMS_RNR_RETRY_CNT_SHIFT;
1627         qp_attr->retry_cnt =
1628             (params.rnt_rc_sl_fl & OCRDMA_QP_PARAMS_RETRY_CNT_MASK) >>
1629                 OCRDMA_QP_PARAMS_RETRY_CNT_SHIFT;
1630         qp_attr->min_rnr_timer = 0;
1631         qp_attr->pkey_index = 0;
1632         qp_attr->port_num = 1;
1633         qp_attr->ah_attr.src_path_bits = 0;
1634         qp_attr->ah_attr.static_rate = 0;
1635         qp_attr->alt_pkey_index = 0;
1636         qp_attr->alt_port_num = 0;
1637         qp_attr->alt_timeout = 0;
1638         memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
1639         qp_state = (params.max_sge_recv_flags & OCRDMA_QP_PARAMS_STATE_MASK) >>
1640                     OCRDMA_QP_PARAMS_STATE_SHIFT;
1641         qp_attr->qp_state = get_ibqp_state(qp_state);
1642         qp_attr->cur_qp_state = qp_attr->qp_state;
1643         qp_attr->sq_draining = (qp_state == OCRDMA_QPS_SQ_DRAINING) ? 1 : 0;
1644         qp_attr->max_dest_rd_atomic =
1645             params.max_ord_ird >> OCRDMA_QP_PARAMS_MAX_ORD_SHIFT;
1646         qp_attr->max_rd_atomic =
1647             params.max_ord_ird & OCRDMA_QP_PARAMS_MAX_IRD_MASK;
1648         qp_attr->en_sqd_async_notify = (params.max_sge_recv_flags &
1649                                 OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC) ? 1 : 0;
1650         /* Sync driver QP state with FW */
1651         ocrdma_qp_state_change(qp, qp_attr->qp_state, NULL);
1652 mbx_err:
1653         return status;
1654 }
1655
1656 static void ocrdma_srq_toggle_bit(struct ocrdma_srq *srq, unsigned int idx)
1657 {
1658         unsigned int i = idx / 32;
1659         u32 mask = (1U << (idx % 32));
1660
1661         srq->idx_bit_fields[i] ^= mask;
1662 }
1663
1664 static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info *q)
1665 {
1666         return ((q->max_wqe_idx - q->head) + q->tail) % q->max_cnt;
1667 }
1668
1669 static int is_hw_sq_empty(struct ocrdma_qp *qp)
1670 {
1671         return (qp->sq.tail == qp->sq.head);
1672 }
1673
1674 static int is_hw_rq_empty(struct ocrdma_qp *qp)
1675 {
1676         return (qp->rq.tail == qp->rq.head);
1677 }
1678
1679 static void *ocrdma_hwq_head(struct ocrdma_qp_hwq_info *q)
1680 {
1681         return q->va + (q->head * q->entry_size);
1682 }
1683
1684 static void *ocrdma_hwq_head_from_idx(struct ocrdma_qp_hwq_info *q,
1685                                       u32 idx)
1686 {
1687         return q->va + (idx * q->entry_size);
1688 }
1689
1690 static void ocrdma_hwq_inc_head(struct ocrdma_qp_hwq_info *q)
1691 {
1692         q->head = (q->head + 1) & q->max_wqe_idx;
1693 }
1694
1695 static void ocrdma_hwq_inc_tail(struct ocrdma_qp_hwq_info *q)
1696 {
1697         q->tail = (q->tail + 1) & q->max_wqe_idx;
1698 }
1699
1700 /* discard the cqe for a given QP */
1701 static void ocrdma_discard_cqes(struct ocrdma_qp *qp, struct ocrdma_cq *cq)
1702 {
1703         unsigned long cq_flags;
1704         unsigned long flags;
1705         int discard_cnt = 0;
1706         u32 cur_getp, stop_getp;
1707         struct ocrdma_cqe *cqe;
1708         u32 qpn = 0, wqe_idx = 0;
1709
1710         spin_lock_irqsave(&cq->cq_lock, cq_flags);
1711
1712         /* traverse through the CQEs in the hw CQ,
1713          * find the matching CQE for a given qp,
1714          * mark the matching one discarded by clearing qpn.
1715          * ring the doorbell in the poll_cq() as
1716          * we don't complete out of order cqe.
1717          */
1718
1719         cur_getp = cq->getp;
1720         /* find upto when do we reap the cq. */
1721         stop_getp = cur_getp;
1722         do {
1723                 if (is_hw_sq_empty(qp) && (!qp->srq && is_hw_rq_empty(qp)))
1724                         break;
1725
1726                 cqe = cq->va + cur_getp;
1727                 /* if (a) done reaping whole hw cq, or
1728                  *    (b) qp_xq becomes empty.
1729                  * then exit
1730                  */
1731                 qpn = cqe->cmn.qpn & OCRDMA_CQE_QPN_MASK;
1732                 /* if previously discarded cqe found, skip that too. */
1733                 /* check for matching qp */
1734                 if (qpn == 0 || qpn != qp->id)
1735                         goto skip_cqe;
1736
1737                 if (is_cqe_for_sq(cqe)) {
1738                         ocrdma_hwq_inc_tail(&qp->sq);
1739                 } else {
1740                         if (qp->srq) {
1741                                 wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >>
1742                                         OCRDMA_CQE_BUFTAG_SHIFT) &
1743                                         qp->srq->rq.max_wqe_idx;
1744                                 if (wqe_idx < 1)
1745                                         BUG();
1746                                 spin_lock_irqsave(&qp->srq->q_lock, flags);
1747                                 ocrdma_hwq_inc_tail(&qp->srq->rq);
1748                                 ocrdma_srq_toggle_bit(qp->srq, wqe_idx - 1);
1749                                 spin_unlock_irqrestore(&qp->srq->q_lock, flags);
1750
1751                         } else {
1752                                 ocrdma_hwq_inc_tail(&qp->rq);
1753                         }
1754                 }
1755                 /* mark cqe discarded so that it is not picked up later
1756                  * in the poll_cq().
1757                  */
1758                 discard_cnt += 1;
1759                 cqe->cmn.qpn = 0;
1760 skip_cqe:
1761                 cur_getp = (cur_getp + 1) % cq->max_hw_cqe;
1762         } while (cur_getp != stop_getp);
1763         spin_unlock_irqrestore(&cq->cq_lock, cq_flags);
1764 }
1765
1766 void ocrdma_del_flush_qp(struct ocrdma_qp *qp)
1767 {
1768         int found = false;
1769         unsigned long flags;
1770         struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
1771         /* sync with any active CQ poll */
1772
1773         spin_lock_irqsave(&dev->flush_q_lock, flags);
1774         found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp);
1775         if (found)
1776                 list_del(&qp->sq_entry);
1777         if (!qp->srq) {
1778                 found = ocrdma_is_qp_in_rq_flushlist(qp->rq_cq, qp);
1779                 if (found)
1780                         list_del(&qp->rq_entry);
1781         }
1782         spin_unlock_irqrestore(&dev->flush_q_lock, flags);
1783 }
1784
1785 int ocrdma_destroy_qp(struct ib_qp *ibqp)
1786 {
1787         struct ocrdma_pd *pd;
1788         struct ocrdma_qp *qp;
1789         struct ocrdma_dev *dev;
1790         struct ib_qp_attr attrs;
1791         int attr_mask;
1792         unsigned long flags;
1793
1794         qp = get_ocrdma_qp(ibqp);
1795         dev = get_ocrdma_dev(ibqp->device);
1796
1797         pd = qp->pd;
1798
1799         /* change the QP state to ERROR */
1800         if (qp->state != OCRDMA_QPS_RST) {
1801                 attrs.qp_state = IB_QPS_ERR;
1802                 attr_mask = IB_QP_STATE;
1803                 _ocrdma_modify_qp(ibqp, &attrs, attr_mask);
1804         }
1805         /* ensure that CQEs for newly created QP (whose id may be same with
1806          * one which just getting destroyed are same), dont get
1807          * discarded until the old CQEs are discarded.
1808          */
1809         mutex_lock(&dev->dev_lock);
1810         (void) ocrdma_mbx_destroy_qp(dev, qp);
1811
1812         /*
1813          * acquire CQ lock while destroy is in progress, in order to
1814          * protect against proessing in-flight CQEs for this QP.
1815          */
1816         spin_lock_irqsave(&qp->sq_cq->cq_lock, flags);
1817         if (qp->rq_cq && (qp->rq_cq != qp->sq_cq))
1818                 spin_lock(&qp->rq_cq->cq_lock);
1819
1820         ocrdma_del_qpn_map(dev, qp);
1821
1822         if (qp->rq_cq && (qp->rq_cq != qp->sq_cq))
1823                 spin_unlock(&qp->rq_cq->cq_lock);
1824         spin_unlock_irqrestore(&qp->sq_cq->cq_lock, flags);
1825
1826         if (!pd->uctx) {
1827                 ocrdma_discard_cqes(qp, qp->sq_cq);
1828                 ocrdma_discard_cqes(qp, qp->rq_cq);
1829         }
1830         mutex_unlock(&dev->dev_lock);
1831
1832         if (pd->uctx) {
1833                 ocrdma_del_mmap(pd->uctx, (u64) qp->sq.pa,
1834                                 PAGE_ALIGN(qp->sq.len));
1835                 if (!qp->srq)
1836                         ocrdma_del_mmap(pd->uctx, (u64) qp->rq.pa,
1837                                         PAGE_ALIGN(qp->rq.len));
1838         }
1839
1840         ocrdma_del_flush_qp(qp);
1841
1842         kfree(qp->wqe_wr_id_tbl);
1843         kfree(qp->rqe_wr_id_tbl);
1844         kfree(qp);
1845         return 0;
1846 }
1847
1848 static int ocrdma_copy_srq_uresp(struct ocrdma_dev *dev, struct ocrdma_srq *srq,
1849                                 struct ib_udata *udata)
1850 {
1851         int status;
1852         struct ocrdma_create_srq_uresp uresp;
1853
1854         memset(&uresp, 0, sizeof(uresp));
1855         uresp.rq_dbid = srq->rq.dbid;
1856         uresp.num_rq_pages = 1;
1857         uresp.rq_page_addr[0] = virt_to_phys(srq->rq.va);
1858         uresp.rq_page_size = srq->rq.len;
1859         uresp.db_page_addr = dev->nic_info.unmapped_db +
1860             (srq->pd->id * dev->nic_info.db_page_size);
1861         uresp.db_page_size = dev->nic_info.db_page_size;
1862         uresp.num_rqe_allocated = srq->rq.max_cnt;
1863         if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) {
1864                 uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET;
1865                 uresp.db_shift = 24;
1866         } else {
1867                 uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET;
1868                 uresp.db_shift = 16;
1869         }
1870
1871         status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1872         if (status)
1873                 return status;
1874         status = ocrdma_add_mmap(srq->pd->uctx, uresp.rq_page_addr[0],
1875                                  uresp.rq_page_size);
1876         if (status)
1877                 return status;
1878         return status;
1879 }
1880
1881 struct ib_srq *ocrdma_create_srq(struct ib_pd *ibpd,
1882                                  struct ib_srq_init_attr *init_attr,
1883                                  struct ib_udata *udata)
1884 {
1885         int status = -ENOMEM;
1886         struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
1887         struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
1888         struct ocrdma_srq *srq;
1889
1890         if (init_attr->attr.max_sge > dev->attr.max_recv_sge)
1891                 return ERR_PTR(-EINVAL);
1892         if (init_attr->attr.max_wr > dev->attr.max_rqe)
1893                 return ERR_PTR(-EINVAL);
1894
1895         srq = kzalloc(sizeof(*srq), GFP_KERNEL);
1896         if (!srq)
1897                 return ERR_PTR(status);
1898
1899         spin_lock_init(&srq->q_lock);
1900         srq->pd = pd;
1901         srq->db = dev->nic_info.db + (pd->id * dev->nic_info.db_page_size);
1902         status = ocrdma_mbx_create_srq(dev, srq, init_attr, pd);
1903         if (status)
1904                 goto err;
1905
1906         if (udata == NULL) {
1907                 srq->rqe_wr_id_tbl = kzalloc(sizeof(u64) * srq->rq.max_cnt,
1908                             GFP_KERNEL);
1909                 if (srq->rqe_wr_id_tbl == NULL)
1910                         goto arm_err;
1911
1912                 srq->bit_fields_len = (srq->rq.max_cnt / 32) +
1913                     (srq->rq.max_cnt % 32 ? 1 : 0);
1914                 srq->idx_bit_fields =
1915                     kmalloc(srq->bit_fields_len * sizeof(u32), GFP_KERNEL);
1916                 if (srq->idx_bit_fields == NULL)
1917                         goto arm_err;
1918                 memset(srq->idx_bit_fields, 0xff,
1919                        srq->bit_fields_len * sizeof(u32));
1920         }
1921
1922         if (init_attr->attr.srq_limit) {
1923                 status = ocrdma_mbx_modify_srq(srq, &init_attr->attr);
1924                 if (status)
1925                         goto arm_err;
1926         }
1927
1928         if (udata) {
1929                 status = ocrdma_copy_srq_uresp(dev, srq, udata);
1930                 if (status)
1931                         goto arm_err;
1932         }
1933
1934         return &srq->ibsrq;
1935
1936 arm_err:
1937         ocrdma_mbx_destroy_srq(dev, srq);
1938 err:
1939         kfree(srq->rqe_wr_id_tbl);
1940         kfree(srq->idx_bit_fields);
1941         kfree(srq);
1942         return ERR_PTR(status);
1943 }
1944
1945 int ocrdma_modify_srq(struct ib_srq *ibsrq,
1946                       struct ib_srq_attr *srq_attr,
1947                       enum ib_srq_attr_mask srq_attr_mask,
1948                       struct ib_udata *udata)
1949 {
1950         int status;
1951         struct ocrdma_srq *srq;
1952
1953         srq = get_ocrdma_srq(ibsrq);
1954         if (srq_attr_mask & IB_SRQ_MAX_WR)
1955                 status = -EINVAL;
1956         else
1957                 status = ocrdma_mbx_modify_srq(srq, srq_attr);
1958         return status;
1959 }
1960
1961 int ocrdma_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
1962 {
1963         int status;
1964         struct ocrdma_srq *srq;
1965
1966         srq = get_ocrdma_srq(ibsrq);
1967         status = ocrdma_mbx_query_srq(srq, srq_attr);
1968         return status;
1969 }
1970
1971 int ocrdma_destroy_srq(struct ib_srq *ibsrq)
1972 {
1973         int status;
1974         struct ocrdma_srq *srq;
1975         struct ocrdma_dev *dev = get_ocrdma_dev(ibsrq->device);
1976
1977         srq = get_ocrdma_srq(ibsrq);
1978
1979         status = ocrdma_mbx_destroy_srq(dev, srq);
1980
1981         if (srq->pd->uctx)
1982                 ocrdma_del_mmap(srq->pd->uctx, (u64) srq->rq.pa,
1983                                 PAGE_ALIGN(srq->rq.len));
1984
1985         kfree(srq->idx_bit_fields);
1986         kfree(srq->rqe_wr_id_tbl);
1987         kfree(srq);
1988         return status;
1989 }
1990
1991 /* unprivileged verbs and their support functions. */
1992 static void ocrdma_build_ud_hdr(struct ocrdma_qp *qp,
1993                                 struct ocrdma_hdr_wqe *hdr,
1994                                 struct ib_send_wr *wr)
1995 {
1996         struct ocrdma_ewqe_ud_hdr *ud_hdr =
1997                 (struct ocrdma_ewqe_ud_hdr *)(hdr + 1);
1998         struct ocrdma_ah *ah = get_ocrdma_ah(ud_wr(wr)->ah);
1999
2000         ud_hdr->rsvd_dest_qpn = ud_wr(wr)->remote_qpn;
2001         if (qp->qp_type == IB_QPT_GSI)
2002                 ud_hdr->qkey = qp->qkey;
2003         else
2004                 ud_hdr->qkey = ud_wr(wr)->remote_qkey;
2005         ud_hdr->rsvd_ahid = ah->id;
2006         ud_hdr->hdr_type = ah->hdr_type;
2007         if (ah->av->valid & OCRDMA_AV_VLAN_VALID)
2008                 hdr->cw |= (OCRDMA_FLAG_AH_VLAN_PR << OCRDMA_WQE_FLAGS_SHIFT);
2009 }
2010
2011 static void ocrdma_build_sges(struct ocrdma_hdr_wqe *hdr,
2012                               struct ocrdma_sge *sge, int num_sge,
2013                               struct ib_sge *sg_list)
2014 {
2015         int i;
2016
2017         for (i = 0; i < num_sge; i++) {
2018                 sge[i].lrkey = sg_list[i].lkey;
2019                 sge[i].addr_lo = sg_list[i].addr;
2020                 sge[i].addr_hi = upper_32_bits(sg_list[i].addr);
2021                 sge[i].len = sg_list[i].length;
2022                 hdr->total_len += sg_list[i].length;
2023         }
2024         if (num_sge == 0)
2025                 memset(sge, 0, sizeof(*sge));
2026 }
2027
2028 static inline uint32_t ocrdma_sglist_len(struct ib_sge *sg_list, int num_sge)
2029 {
2030         uint32_t total_len = 0, i;
2031
2032         for (i = 0; i < num_sge; i++)
2033                 total_len += sg_list[i].length;
2034         return total_len;
2035 }
2036
2037
2038 static int ocrdma_build_inline_sges(struct ocrdma_qp *qp,
2039                                     struct ocrdma_hdr_wqe *hdr,
2040                                     struct ocrdma_sge *sge,
2041                                     struct ib_send_wr *wr, u32 wqe_size)
2042 {
2043         int i;
2044         char *dpp_addr;
2045
2046         if (wr->send_flags & IB_SEND_INLINE && qp->qp_type != IB_QPT_UD) {
2047                 hdr->total_len = ocrdma_sglist_len(wr->sg_list, wr->num_sge);
2048                 if (unlikely(hdr->total_len > qp->max_inline_data)) {
2049                         pr_err("%s() supported_len=0x%x,\n"
2050                                " unsupported len req=0x%x\n", __func__,
2051                                 qp->max_inline_data, hdr->total_len);
2052                         return -EINVAL;
2053                 }
2054                 dpp_addr = (char *)sge;
2055                 for (i = 0; i < wr->num_sge; i++) {
2056                         memcpy(dpp_addr,
2057                                (void *)(unsigned long)wr->sg_list[i].addr,
2058                                wr->sg_list[i].length);
2059                         dpp_addr += wr->sg_list[i].length;
2060                 }
2061
2062                 wqe_size += roundup(hdr->total_len, OCRDMA_WQE_ALIGN_BYTES);
2063                 if (0 == hdr->total_len)
2064                         wqe_size += sizeof(struct ocrdma_sge);
2065                 hdr->cw |= (OCRDMA_TYPE_INLINE << OCRDMA_WQE_TYPE_SHIFT);
2066         } else {
2067                 ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list);
2068                 if (wr->num_sge)
2069                         wqe_size += (wr->num_sge * sizeof(struct ocrdma_sge));
2070                 else
2071                         wqe_size += sizeof(struct ocrdma_sge);
2072                 hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
2073         }
2074         hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
2075         return 0;
2076 }
2077
2078 static int ocrdma_build_send(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
2079                              struct ib_send_wr *wr)
2080 {
2081         int status;
2082         struct ocrdma_sge *sge;
2083         u32 wqe_size = sizeof(*hdr);
2084
2085         if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
2086                 ocrdma_build_ud_hdr(qp, hdr, wr);
2087                 sge = (struct ocrdma_sge *)(hdr + 2);
2088                 wqe_size += sizeof(struct ocrdma_ewqe_ud_hdr);
2089         } else {
2090                 sge = (struct ocrdma_sge *)(hdr + 1);
2091         }
2092
2093         status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size);
2094         return status;
2095 }
2096
2097 static int ocrdma_build_write(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
2098                               struct ib_send_wr *wr)
2099 {
2100         int status;
2101         struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1);
2102         struct ocrdma_sge *sge = ext_rw + 1;
2103         u32 wqe_size = sizeof(*hdr) + sizeof(*ext_rw);
2104
2105         status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size);
2106         if (status)
2107                 return status;
2108         ext_rw->addr_lo = rdma_wr(wr)->remote_addr;
2109         ext_rw->addr_hi = upper_32_bits(rdma_wr(wr)->remote_addr);
2110         ext_rw->lrkey = rdma_wr(wr)->rkey;
2111         ext_rw->len = hdr->total_len;
2112         return 0;
2113 }
2114
2115 static void ocrdma_build_read(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
2116                               struct ib_send_wr *wr)
2117 {
2118         struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1);
2119         struct ocrdma_sge *sge = ext_rw + 1;
2120         u32 wqe_size = ((wr->num_sge + 1) * sizeof(struct ocrdma_sge)) +
2121             sizeof(struct ocrdma_hdr_wqe);
2122
2123         ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list);
2124         hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
2125         hdr->cw |= (OCRDMA_READ << OCRDMA_WQE_OPCODE_SHIFT);
2126         hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
2127
2128         ext_rw->addr_lo = rdma_wr(wr)->remote_addr;
2129         ext_rw->addr_hi = upper_32_bits(rdma_wr(wr)->remote_addr);
2130         ext_rw->lrkey = rdma_wr(wr)->rkey;
2131         ext_rw->len = hdr->total_len;
2132 }
2133
2134 static int get_encoded_page_size(int pg_sz)
2135 {
2136         /* Max size is 256M 4096 << 16 */
2137         int i = 0;
2138         for (; i < 17; i++)
2139                 if (pg_sz == (4096 << i))
2140                         break;
2141         return i;
2142 }
2143
2144 static int ocrdma_build_reg(struct ocrdma_qp *qp,
2145                             struct ocrdma_hdr_wqe *hdr,
2146                             struct ib_reg_wr *wr)
2147 {
2148         u64 fbo;
2149         struct ocrdma_ewqe_fr *fast_reg = (struct ocrdma_ewqe_fr *)(hdr + 1);
2150         struct ocrdma_mr *mr = get_ocrdma_mr(wr->mr);
2151         struct ocrdma_pbl *pbl_tbl = mr->hwmr.pbl_table;
2152         struct ocrdma_pbe *pbe;
2153         u32 wqe_size = sizeof(*fast_reg) + sizeof(*hdr);
2154         int num_pbes = 0, i;
2155
2156         wqe_size = roundup(wqe_size, OCRDMA_WQE_ALIGN_BYTES);
2157
2158         hdr->cw |= (OCRDMA_FR_MR << OCRDMA_WQE_OPCODE_SHIFT);
2159         hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
2160
2161         if (wr->access & IB_ACCESS_LOCAL_WRITE)
2162                 hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_LOCAL_WR;
2163         if (wr->access & IB_ACCESS_REMOTE_WRITE)
2164                 hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_WR;
2165         if (wr->access & IB_ACCESS_REMOTE_READ)
2166                 hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_RD;
2167         hdr->lkey = wr->key;
2168         hdr->total_len = mr->ibmr.length;
2169
2170         fbo = mr->ibmr.iova - mr->pages[0];
2171
2172         fast_reg->va_hi = upper_32_bits(mr->ibmr.iova);
2173         fast_reg->va_lo = (u32) (mr->ibmr.iova & 0xffffffff);
2174         fast_reg->fbo_hi = upper_32_bits(fbo);
2175         fast_reg->fbo_lo = (u32) fbo & 0xffffffff;
2176         fast_reg->num_sges = mr->npages;
2177         fast_reg->size_sge = get_encoded_page_size(mr->ibmr.page_size);
2178
2179         pbe = pbl_tbl->va;
2180         for (i = 0; i < mr->npages; i++) {
2181                 u64 buf_addr = mr->pages[i];
2182
2183                 pbe->pa_lo = cpu_to_le32((u32) (buf_addr & PAGE_MASK));
2184                 pbe->pa_hi = cpu_to_le32((u32) upper_32_bits(buf_addr));
2185                 num_pbes += 1;
2186                 pbe++;
2187
2188                 /* if the pbl is full storing the pbes,
2189                  * move to next pbl.
2190                 */
2191                 if (num_pbes == (mr->hwmr.pbl_size/sizeof(u64))) {
2192                         pbl_tbl++;
2193                         pbe = (struct ocrdma_pbe *)pbl_tbl->va;
2194                 }
2195         }
2196
2197         return 0;
2198 }
2199
2200 static void ocrdma_ring_sq_db(struct ocrdma_qp *qp)
2201 {
2202         u32 val = qp->sq.dbid | (1 << OCRDMA_DB_SQ_SHIFT);
2203
2204         iowrite32(val, qp->sq_db);
2205 }
2206
2207 int ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2208                      struct ib_send_wr **bad_wr)
2209 {
2210         int status = 0;
2211         struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
2212         struct ocrdma_hdr_wqe *hdr;
2213         unsigned long flags;
2214
2215         spin_lock_irqsave(&qp->q_lock, flags);
2216         if (qp->state != OCRDMA_QPS_RTS && qp->state != OCRDMA_QPS_SQD) {
2217                 spin_unlock_irqrestore(&qp->q_lock, flags);
2218                 *bad_wr = wr;
2219                 return -EINVAL;
2220         }
2221
2222         while (wr) {
2223                 if (qp->qp_type == IB_QPT_UD &&
2224                     (wr->opcode != IB_WR_SEND &&
2225                      wr->opcode != IB_WR_SEND_WITH_IMM)) {
2226                         *bad_wr = wr;
2227                         status = -EINVAL;
2228                         break;
2229                 }
2230                 if (ocrdma_hwq_free_cnt(&qp->sq) == 0 ||
2231                     wr->num_sge > qp->sq.max_sges) {
2232                         *bad_wr = wr;
2233                         status = -ENOMEM;
2234                         break;
2235                 }
2236                 hdr = ocrdma_hwq_head(&qp->sq);
2237                 hdr->cw = 0;
2238                 if (wr->send_flags & IB_SEND_SIGNALED || qp->signaled)
2239                         hdr->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT);
2240                 if (wr->send_flags & IB_SEND_FENCE)
2241                         hdr->cw |=
2242                             (OCRDMA_FLAG_FENCE_L << OCRDMA_WQE_FLAGS_SHIFT);
2243                 if (wr->send_flags & IB_SEND_SOLICITED)
2244                         hdr->cw |=
2245                             (OCRDMA_FLAG_SOLICIT << OCRDMA_WQE_FLAGS_SHIFT);
2246                 hdr->total_len = 0;
2247                 switch (wr->opcode) {
2248                 case IB_WR_SEND_WITH_IMM:
2249                         hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT);
2250                         hdr->immdt = ntohl(wr->ex.imm_data);
2251                 case IB_WR_SEND:
2252                         hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT);
2253                         ocrdma_build_send(qp, hdr, wr);
2254                         break;
2255                 case IB_WR_SEND_WITH_INV:
2256                         hdr->cw |= (OCRDMA_FLAG_INV << OCRDMA_WQE_FLAGS_SHIFT);
2257                         hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT);
2258                         hdr->lkey = wr->ex.invalidate_rkey;
2259                         status = ocrdma_build_send(qp, hdr, wr);
2260                         break;
2261                 case IB_WR_RDMA_WRITE_WITH_IMM:
2262                         hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT);
2263                         hdr->immdt = ntohl(wr->ex.imm_data);
2264                 case IB_WR_RDMA_WRITE:
2265                         hdr->cw |= (OCRDMA_WRITE << OCRDMA_WQE_OPCODE_SHIFT);
2266                         status = ocrdma_build_write(qp, hdr, wr);
2267                         break;
2268                 case IB_WR_RDMA_READ:
2269                         ocrdma_build_read(qp, hdr, wr);
2270                         break;
2271                 case IB_WR_LOCAL_INV:
2272                         hdr->cw |=
2273                             (OCRDMA_LKEY_INV << OCRDMA_WQE_OPCODE_SHIFT);
2274                         hdr->cw |= ((sizeof(struct ocrdma_hdr_wqe) +
2275                                         sizeof(struct ocrdma_sge)) /
2276                                 OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT;
2277                         hdr->lkey = wr->ex.invalidate_rkey;
2278                         break;
2279                 case IB_WR_REG_MR:
2280                         status = ocrdma_build_reg(qp, hdr, reg_wr(wr));
2281                         break;
2282                 default:
2283                         status = -EINVAL;
2284                         break;
2285                 }
2286                 if (status) {
2287                         *bad_wr = wr;
2288                         break;
2289                 }
2290                 if (wr->send_flags & IB_SEND_SIGNALED || qp->signaled)
2291                         qp->wqe_wr_id_tbl[qp->sq.head].signaled = 1;
2292                 else
2293                         qp->wqe_wr_id_tbl[qp->sq.head].signaled = 0;
2294                 qp->wqe_wr_id_tbl[qp->sq.head].wrid = wr->wr_id;
2295                 ocrdma_cpu_to_le32(hdr, ((hdr->cw >> OCRDMA_WQE_SIZE_SHIFT) &
2296                                    OCRDMA_WQE_SIZE_MASK) * OCRDMA_WQE_STRIDE);
2297                 /* make sure wqe is written before adapter can access it */
2298                 wmb();
2299                 /* inform hw to start processing it */
2300                 ocrdma_ring_sq_db(qp);
2301
2302                 /* update pointer, counter for next wr */
2303                 ocrdma_hwq_inc_head(&qp->sq);
2304                 wr = wr->next;
2305         }
2306         spin_unlock_irqrestore(&qp->q_lock, flags);
2307         return status;
2308 }
2309
2310 static void ocrdma_ring_rq_db(struct ocrdma_qp *qp)
2311 {
2312         u32 val = qp->rq.dbid | (1 << OCRDMA_DB_RQ_SHIFT);
2313
2314         iowrite32(val, qp->rq_db);
2315 }
2316
2317 static void ocrdma_build_rqe(struct ocrdma_hdr_wqe *rqe, struct ib_recv_wr *wr,
2318                              u16 tag)
2319 {
2320         u32 wqe_size = 0;
2321         struct ocrdma_sge *sge;
2322         if (wr->num_sge)
2323                 wqe_size = (wr->num_sge * sizeof(*sge)) + sizeof(*rqe);
2324         else
2325                 wqe_size = sizeof(*sge) + sizeof(*rqe);
2326
2327         rqe->cw = ((wqe_size / OCRDMA_WQE_STRIDE) <<
2328                                 OCRDMA_WQE_SIZE_SHIFT);
2329         rqe->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT);
2330         rqe->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
2331         rqe->total_len = 0;
2332         rqe->rsvd_tag = tag;
2333         sge = (struct ocrdma_sge *)(rqe + 1);
2334         ocrdma_build_sges(rqe, sge, wr->num_sge, wr->sg_list);
2335         ocrdma_cpu_to_le32(rqe, wqe_size);
2336 }
2337
2338 int ocrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
2339                      struct ib_recv_wr **bad_wr)
2340 {
2341         int status = 0;
2342         unsigned long flags;
2343         struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
2344         struct ocrdma_hdr_wqe *rqe;
2345
2346         spin_lock_irqsave(&qp->q_lock, flags);
2347         if (qp->state == OCRDMA_QPS_RST || qp->state == OCRDMA_QPS_ERR) {
2348                 spin_unlock_irqrestore(&qp->q_lock, flags);
2349                 *bad_wr = wr;
2350                 return -EINVAL;
2351         }
2352         while (wr) {
2353                 if (ocrdma_hwq_free_cnt(&qp->rq) == 0 ||
2354                     wr->num_sge > qp->rq.max_sges) {
2355                         *bad_wr = wr;
2356                         status = -ENOMEM;
2357                         break;
2358                 }
2359                 rqe = ocrdma_hwq_head(&qp->rq);
2360                 ocrdma_build_rqe(rqe, wr, 0);
2361
2362                 qp->rqe_wr_id_tbl[qp->rq.head] = wr->wr_id;
2363                 /* make sure rqe is written before adapter can access it */
2364                 wmb();
2365
2366                 /* inform hw to start processing it */
2367                 ocrdma_ring_rq_db(qp);
2368
2369                 /* update pointer, counter for next wr */
2370                 ocrdma_hwq_inc_head(&qp->rq);
2371                 wr = wr->next;
2372         }
2373         spin_unlock_irqrestore(&qp->q_lock, flags);
2374         return status;
2375 }
2376
2377 /* cqe for srq's rqe can potentially arrive out of order.
2378  * index gives the entry in the shadow table where to store
2379  * the wr_id. tag/index is returned in cqe to reference back
2380  * for a given rqe.
2381  */
2382 static int ocrdma_srq_get_idx(struct ocrdma_srq *srq)
2383 {
2384         int row = 0;
2385         int indx = 0;
2386
2387         for (row = 0; row < srq->bit_fields_len; row++) {
2388                 if (srq->idx_bit_fields[row]) {
2389                         indx = ffs(srq->idx_bit_fields[row]);
2390                         indx = (row * 32) + (indx - 1);
2391                         if (indx >= srq->rq.max_cnt)
2392                                 BUG();
2393                         ocrdma_srq_toggle_bit(srq, indx);
2394                         break;
2395                 }
2396         }
2397
2398         if (row == srq->bit_fields_len)
2399                 BUG();
2400         return indx + 1; /* Use from index 1 */
2401 }
2402
2403 static void ocrdma_ring_srq_db(struct ocrdma_srq *srq)
2404 {
2405         u32 val = srq->rq.dbid | (1 << 16);
2406
2407         iowrite32(val, srq->db + OCRDMA_DB_GEN2_SRQ_OFFSET);
2408 }
2409
2410 int ocrdma_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
2411                          struct ib_recv_wr **bad_wr)
2412 {
2413         int status = 0;
2414         unsigned long flags;
2415         struct ocrdma_srq *srq;
2416         struct ocrdma_hdr_wqe *rqe;
2417         u16 tag;
2418
2419         srq = get_ocrdma_srq(ibsrq);
2420
2421         spin_lock_irqsave(&srq->q_lock, flags);
2422         while (wr) {
2423                 if (ocrdma_hwq_free_cnt(&srq->rq) == 0 ||
2424                     wr->num_sge > srq->rq.max_sges) {
2425                         status = -ENOMEM;
2426                         *bad_wr = wr;
2427                         break;
2428                 }
2429                 tag = ocrdma_srq_get_idx(srq);
2430                 rqe = ocrdma_hwq_head(&srq->rq);
2431                 ocrdma_build_rqe(rqe, wr, tag);
2432
2433                 srq->rqe_wr_id_tbl[tag] = wr->wr_id;
2434                 /* make sure rqe is written before adapter can perform DMA */
2435                 wmb();
2436                 /* inform hw to start processing it */
2437                 ocrdma_ring_srq_db(srq);
2438                 /* update pointer, counter for next wr */
2439                 ocrdma_hwq_inc_head(&srq->rq);
2440                 wr = wr->next;
2441         }
2442         spin_unlock_irqrestore(&srq->q_lock, flags);
2443         return status;
2444 }
2445
2446 static enum ib_wc_status ocrdma_to_ibwc_err(u16 status)
2447 {
2448         enum ib_wc_status ibwc_status;
2449
2450         switch (status) {
2451         case OCRDMA_CQE_GENERAL_ERR:
2452                 ibwc_status = IB_WC_GENERAL_ERR;
2453                 break;
2454         case OCRDMA_CQE_LOC_LEN_ERR:
2455                 ibwc_status = IB_WC_LOC_LEN_ERR;
2456                 break;
2457         case OCRDMA_CQE_LOC_QP_OP_ERR:
2458                 ibwc_status = IB_WC_LOC_QP_OP_ERR;
2459                 break;
2460         case OCRDMA_CQE_LOC_EEC_OP_ERR:
2461                 ibwc_status = IB_WC_LOC_EEC_OP_ERR;
2462                 break;
2463         case OCRDMA_CQE_LOC_PROT_ERR:
2464                 ibwc_status = IB_WC_LOC_PROT_ERR;
2465                 break;
2466         case OCRDMA_CQE_WR_FLUSH_ERR:
2467                 ibwc_status = IB_WC_WR_FLUSH_ERR;
2468                 break;
2469         case OCRDMA_CQE_MW_BIND_ERR:
2470                 ibwc_status = IB_WC_MW_BIND_ERR;
2471                 break;
2472         case OCRDMA_CQE_BAD_RESP_ERR:
2473                 ibwc_status = IB_WC_BAD_RESP_ERR;
2474                 break;
2475         case OCRDMA_CQE_LOC_ACCESS_ERR:
2476                 ibwc_status = IB_WC_LOC_ACCESS_ERR;
2477                 break;
2478         case OCRDMA_CQE_REM_INV_REQ_ERR:
2479                 ibwc_status = IB_WC_REM_INV_REQ_ERR;
2480                 break;
2481         case OCRDMA_CQE_REM_ACCESS_ERR:
2482                 ibwc_status = IB_WC_REM_ACCESS_ERR;
2483                 break;
2484         case OCRDMA_CQE_REM_OP_ERR:
2485                 ibwc_status = IB_WC_REM_OP_ERR;
2486                 break;
2487         case OCRDMA_CQE_RETRY_EXC_ERR:
2488                 ibwc_status = IB_WC_RETRY_EXC_ERR;
2489                 break;
2490         case OCRDMA_CQE_RNR_RETRY_EXC_ERR:
2491                 ibwc_status = IB_WC_RNR_RETRY_EXC_ERR;
2492                 break;
2493         case OCRDMA_CQE_LOC_RDD_VIOL_ERR:
2494                 ibwc_status = IB_WC_LOC_RDD_VIOL_ERR;
2495                 break;
2496         case OCRDMA_CQE_REM_INV_RD_REQ_ERR:
2497                 ibwc_status = IB_WC_REM_INV_RD_REQ_ERR;
2498                 break;
2499         case OCRDMA_CQE_REM_ABORT_ERR:
2500                 ibwc_status = IB_WC_REM_ABORT_ERR;
2501                 break;
2502         case OCRDMA_CQE_INV_EECN_ERR:
2503                 ibwc_status = IB_WC_INV_EECN_ERR;
2504                 break;
2505         case OCRDMA_CQE_INV_EEC_STATE_ERR:
2506                 ibwc_status = IB_WC_INV_EEC_STATE_ERR;
2507                 break;
2508         case OCRDMA_CQE_FATAL_ERR:
2509                 ibwc_status = IB_WC_FATAL_ERR;
2510                 break;
2511         case OCRDMA_CQE_RESP_TIMEOUT_ERR:
2512                 ibwc_status = IB_WC_RESP_TIMEOUT_ERR;
2513                 break;
2514         default:
2515                 ibwc_status = IB_WC_GENERAL_ERR;
2516                 break;
2517         }
2518         return ibwc_status;
2519 }
2520
2521 static void ocrdma_update_wc(struct ocrdma_qp *qp, struct ib_wc *ibwc,
2522                       u32 wqe_idx)
2523 {
2524         struct ocrdma_hdr_wqe *hdr;
2525         struct ocrdma_sge *rw;
2526         int opcode;
2527
2528         hdr = ocrdma_hwq_head_from_idx(&qp->sq, wqe_idx);
2529
2530         ibwc->wr_id = qp->wqe_wr_id_tbl[wqe_idx].wrid;
2531         /* Undo the hdr->cw swap */
2532         opcode = le32_to_cpu(hdr->cw) & OCRDMA_WQE_OPCODE_MASK;
2533         switch (opcode) {
2534         case OCRDMA_WRITE:
2535                 ibwc->opcode = IB_WC_RDMA_WRITE;
2536                 break;
2537         case OCRDMA_READ:
2538                 rw = (struct ocrdma_sge *)(hdr + 1);
2539                 ibwc->opcode = IB_WC_RDMA_READ;
2540                 ibwc->byte_len = rw->len;
2541                 break;
2542         case OCRDMA_SEND:
2543                 ibwc->opcode = IB_WC_SEND;
2544                 break;
2545         case OCRDMA_FR_MR:
2546                 ibwc->opcode = IB_WC_REG_MR;
2547                 break;
2548         case OCRDMA_LKEY_INV:
2549                 ibwc->opcode = IB_WC_LOCAL_INV;
2550                 break;
2551         default:
2552                 ibwc->status = IB_WC_GENERAL_ERR;
2553                 pr_err("%s() invalid opcode received = 0x%x\n",
2554                        __func__, hdr->cw & OCRDMA_WQE_OPCODE_MASK);
2555                 break;
2556         }
2557 }
2558
2559 static void ocrdma_set_cqe_status_flushed(struct ocrdma_qp *qp,
2560                                                 struct ocrdma_cqe *cqe)
2561 {
2562         if (is_cqe_for_sq(cqe)) {
2563                 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2564                                 cqe->flags_status_srcqpn) &
2565                                         ~OCRDMA_CQE_STATUS_MASK);
2566                 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2567                                 cqe->flags_status_srcqpn) |
2568                                 (OCRDMA_CQE_WR_FLUSH_ERR <<
2569                                         OCRDMA_CQE_STATUS_SHIFT));
2570         } else {
2571                 if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
2572                         cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2573                                         cqe->flags_status_srcqpn) &
2574                                                 ~OCRDMA_CQE_UD_STATUS_MASK);
2575                         cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2576                                         cqe->flags_status_srcqpn) |
2577                                         (OCRDMA_CQE_WR_FLUSH_ERR <<
2578                                                 OCRDMA_CQE_UD_STATUS_SHIFT));
2579                 } else {
2580                         cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2581                                         cqe->flags_status_srcqpn) &
2582                                                 ~OCRDMA_CQE_STATUS_MASK);
2583                         cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2584                                         cqe->flags_status_srcqpn) |
2585                                         (OCRDMA_CQE_WR_FLUSH_ERR <<
2586                                                 OCRDMA_CQE_STATUS_SHIFT));
2587                 }
2588         }
2589 }
2590
2591 static bool ocrdma_update_err_cqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
2592                                   struct ocrdma_qp *qp, int status)
2593 {
2594         bool expand = false;
2595
2596         ibwc->byte_len = 0;
2597         ibwc->qp = &qp->ibqp;
2598         ibwc->status = ocrdma_to_ibwc_err(status);
2599
2600         ocrdma_flush_qp(qp);
2601         ocrdma_qp_state_change(qp, IB_QPS_ERR, NULL);
2602
2603         /* if wqe/rqe pending for which cqe needs to be returned,
2604          * trigger inflating it.
2605          */
2606         if (!is_hw_rq_empty(qp) || !is_hw_sq_empty(qp)) {
2607                 expand = true;
2608                 ocrdma_set_cqe_status_flushed(qp, cqe);
2609         }
2610         return expand;
2611 }
2612
2613 static int ocrdma_update_err_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
2614                                   struct ocrdma_qp *qp, int status)
2615 {
2616         ibwc->opcode = IB_WC_RECV;
2617         ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
2618         ocrdma_hwq_inc_tail(&qp->rq);
2619
2620         return ocrdma_update_err_cqe(ibwc, cqe, qp, status);
2621 }
2622
2623 static int ocrdma_update_err_scqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
2624                                   struct ocrdma_qp *qp, int status)
2625 {
2626         ocrdma_update_wc(qp, ibwc, qp->sq.tail);
2627         ocrdma_hwq_inc_tail(&qp->sq);
2628
2629         return ocrdma_update_err_cqe(ibwc, cqe, qp, status);
2630 }
2631
2632
2633 static bool ocrdma_poll_err_scqe(struct ocrdma_qp *qp,
2634                                  struct ocrdma_cqe *cqe, struct ib_wc *ibwc,
2635                                  bool *polled, bool *stop)
2636 {
2637         bool expand;
2638         struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
2639         int status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2640                 OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
2641         if (status < OCRDMA_MAX_CQE_ERR)
2642                 atomic_inc(&dev->cqe_err_stats[status]);
2643
2644         /* when hw sq is empty, but rq is not empty, so we continue
2645          * to keep the cqe in order to get the cq event again.
2646          */
2647         if (is_hw_sq_empty(qp) && !is_hw_rq_empty(qp)) {
2648                 /* when cq for rq and sq is same, it is safe to return
2649                  * flush cqe for RQEs.
2650                  */
2651                 if (!qp->srq && (qp->sq_cq == qp->rq_cq)) {
2652                         *polled = true;
2653                         status = OCRDMA_CQE_WR_FLUSH_ERR;
2654                         expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status);
2655                 } else {
2656                         /* stop processing further cqe as this cqe is used for
2657                          * triggering cq event on buddy cq of RQ.
2658                          * When QP is destroyed, this cqe will be removed
2659                          * from the cq's hardware q.
2660                          */
2661                         *polled = false;
2662                         *stop = true;
2663                         expand = false;
2664                 }
2665         } else if (is_hw_sq_empty(qp)) {
2666                 /* Do nothing */
2667                 expand = false;
2668                 *polled = false;
2669                 *stop = false;
2670         } else {
2671                 *polled = true;
2672                 expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status);
2673         }
2674         return expand;
2675 }
2676
2677 static bool ocrdma_poll_success_scqe(struct ocrdma_qp *qp,
2678                                      struct ocrdma_cqe *cqe,
2679                                      struct ib_wc *ibwc, bool *polled)
2680 {
2681         bool expand = false;
2682         int tail = qp->sq.tail;
2683         u32 wqe_idx;
2684
2685         if (!qp->wqe_wr_id_tbl[tail].signaled) {
2686                 *polled = false;    /* WC cannot be consumed yet */
2687         } else {
2688                 ibwc->status = IB_WC_SUCCESS;
2689                 ibwc->wc_flags = 0;
2690                 ibwc->qp = &qp->ibqp;
2691                 ocrdma_update_wc(qp, ibwc, tail);
2692                 *polled = true;
2693         }
2694         wqe_idx = (le32_to_cpu(cqe->wq.wqeidx) &
2695                         OCRDMA_CQE_WQEIDX_MASK) & qp->sq.max_wqe_idx;
2696         if (tail != wqe_idx)
2697                 expand = true; /* Coalesced CQE can't be consumed yet */
2698
2699         ocrdma_hwq_inc_tail(&qp->sq);
2700         return expand;
2701 }
2702
2703 static bool ocrdma_poll_scqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
2704                              struct ib_wc *ibwc, bool *polled, bool *stop)
2705 {
2706         int status;
2707         bool expand;
2708
2709         status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2710                 OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
2711
2712         if (status == OCRDMA_CQE_SUCCESS)
2713                 expand = ocrdma_poll_success_scqe(qp, cqe, ibwc, polled);
2714         else
2715                 expand = ocrdma_poll_err_scqe(qp, cqe, ibwc, polled, stop);
2716         return expand;
2717 }
2718
2719 static int ocrdma_update_ud_rcqe(struct ocrdma_dev *dev, struct ib_wc *ibwc,
2720                                  struct ocrdma_cqe *cqe)
2721 {
2722         int status;
2723         u16 hdr_type = 0;
2724
2725         status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2726                 OCRDMA_CQE_UD_STATUS_MASK) >> OCRDMA_CQE_UD_STATUS_SHIFT;
2727         ibwc->src_qp = le32_to_cpu(cqe->flags_status_srcqpn) &
2728                                                 OCRDMA_CQE_SRCQP_MASK;
2729         ibwc->pkey_index = 0;
2730         ibwc->wc_flags = IB_WC_GRH;
2731         ibwc->byte_len = (le32_to_cpu(cqe->ud.rxlen_pkey) >>
2732                           OCRDMA_CQE_UD_XFER_LEN_SHIFT) &
2733                           OCRDMA_CQE_UD_XFER_LEN_MASK;
2734
2735         if (ocrdma_is_udp_encap_supported(dev)) {
2736                 hdr_type = (le32_to_cpu(cqe->ud.rxlen_pkey) >>
2737                             OCRDMA_CQE_UD_L3TYPE_SHIFT) &
2738                             OCRDMA_CQE_UD_L3TYPE_MASK;
2739                 ibwc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
2740                 ibwc->network_hdr_type = hdr_type;
2741         }
2742
2743         return status;
2744 }
2745
2746 static void ocrdma_update_free_srq_cqe(struct ib_wc *ibwc,
2747                                        struct ocrdma_cqe *cqe,
2748                                        struct ocrdma_qp *qp)
2749 {
2750         unsigned long flags;
2751         struct ocrdma_srq *srq;
2752         u32 wqe_idx;
2753
2754         srq = get_ocrdma_srq(qp->ibqp.srq);
2755         wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >>
2756                 OCRDMA_CQE_BUFTAG_SHIFT) & srq->rq.max_wqe_idx;
2757         if (wqe_idx < 1)
2758                 BUG();
2759
2760         ibwc->wr_id = srq->rqe_wr_id_tbl[wqe_idx];
2761         spin_lock_irqsave(&srq->q_lock, flags);
2762         ocrdma_srq_toggle_bit(srq, wqe_idx - 1);
2763         spin_unlock_irqrestore(&srq->q_lock, flags);
2764         ocrdma_hwq_inc_tail(&srq->rq);
2765 }
2766
2767 static bool ocrdma_poll_err_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
2768                                 struct ib_wc *ibwc, bool *polled, bool *stop,
2769                                 int status)
2770 {
2771         bool expand;
2772         struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
2773
2774         if (status < OCRDMA_MAX_CQE_ERR)
2775                 atomic_inc(&dev->cqe_err_stats[status]);
2776
2777         /* when hw_rq is empty, but wq is not empty, so continue
2778          * to keep the cqe to get the cq event again.
2779          */
2780         if (is_hw_rq_empty(qp) && !is_hw_sq_empty(qp)) {
2781                 if (!qp->srq && (qp->sq_cq == qp->rq_cq)) {
2782                         *polled = true;
2783                         status = OCRDMA_CQE_WR_FLUSH_ERR;
2784                         expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status);
2785                 } else {
2786                         *polled = false;
2787                         *stop = true;
2788                         expand = false;
2789                 }
2790         } else if (is_hw_rq_empty(qp)) {
2791                 /* Do nothing */
2792                 expand = false;
2793                 *polled = false;
2794                 *stop = false;
2795         } else {
2796                 *polled = true;
2797                 expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status);
2798         }
2799         return expand;
2800 }
2801
2802 static void ocrdma_poll_success_rcqe(struct ocrdma_qp *qp,
2803                                      struct ocrdma_cqe *cqe, struct ib_wc *ibwc)
2804 {
2805         struct ocrdma_dev *dev;
2806
2807         dev = get_ocrdma_dev(qp->ibqp.device);
2808         ibwc->opcode = IB_WC_RECV;
2809         ibwc->qp = &qp->ibqp;
2810         ibwc->status = IB_WC_SUCCESS;
2811
2812         if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI)
2813                 ocrdma_update_ud_rcqe(dev, ibwc, cqe);
2814         else
2815                 ibwc->byte_len = le32_to_cpu(cqe->rq.rxlen);
2816
2817         if (is_cqe_imm(cqe)) {
2818                 ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt));
2819                 ibwc->wc_flags |= IB_WC_WITH_IMM;
2820         } else if (is_cqe_wr_imm(cqe)) {
2821                 ibwc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
2822                 ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt));
2823                 ibwc->wc_flags |= IB_WC_WITH_IMM;
2824         } else if (is_cqe_invalidated(cqe)) {
2825                 ibwc->ex.invalidate_rkey = le32_to_cpu(cqe->rq.lkey_immdt);
2826                 ibwc->wc_flags |= IB_WC_WITH_INVALIDATE;
2827         }
2828         if (qp->ibqp.srq) {
2829                 ocrdma_update_free_srq_cqe(ibwc, cqe, qp);
2830         } else {
2831                 ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
2832                 ocrdma_hwq_inc_tail(&qp->rq);
2833         }
2834 }
2835
2836 static bool ocrdma_poll_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
2837                              struct ib_wc *ibwc, bool *polled, bool *stop)
2838 {
2839         int status;
2840         bool expand = false;
2841
2842         ibwc->wc_flags = 0;
2843         if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
2844                 status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2845                                         OCRDMA_CQE_UD_STATUS_MASK) >>
2846                                         OCRDMA_CQE_UD_STATUS_SHIFT;
2847         } else {
2848                 status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2849                              OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
2850         }
2851
2852         if (status == OCRDMA_CQE_SUCCESS) {
2853                 *polled = true;
2854                 ocrdma_poll_success_rcqe(qp, cqe, ibwc);
2855         } else {
2856                 expand = ocrdma_poll_err_rcqe(qp, cqe, ibwc, polled, stop,
2857                                               status);
2858         }
2859         return expand;
2860 }
2861
2862 static void ocrdma_change_cq_phase(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe,
2863                                    u16 cur_getp)
2864 {
2865         if (cq->phase_change) {
2866                 if (cur_getp == 0)
2867                         cq->phase = (~cq->phase & OCRDMA_CQE_VALID);
2868         } else {
2869                 /* clear valid bit */
2870                 cqe->flags_status_srcqpn = 0;
2871         }
2872 }
2873
2874 static int ocrdma_poll_hwcq(struct ocrdma_cq *cq, int num_entries,
2875                             struct ib_wc *ibwc)
2876 {
2877         u16 qpn = 0;
2878         int i = 0;
2879         bool expand = false;
2880         int polled_hw_cqes = 0;
2881         struct ocrdma_qp *qp = NULL;
2882         struct ocrdma_dev *dev = get_ocrdma_dev(cq->ibcq.device);
2883         struct ocrdma_cqe *cqe;
2884         u16 cur_getp; bool polled = false; bool stop = false;
2885
2886         cur_getp = cq->getp;
2887         while (num_entries) {
2888                 cqe = cq->va + cur_getp;
2889                 /* check whether valid cqe or not */
2890                 if (!is_cqe_valid(cq, cqe))
2891                         break;
2892                 qpn = (le32_to_cpu(cqe->cmn.qpn) & OCRDMA_CQE_QPN_MASK);
2893                 /* ignore discarded cqe */
2894                 if (qpn == 0)
2895                         goto skip_cqe;
2896                 qp = dev->qp_tbl[qpn];
2897                 BUG_ON(qp == NULL);
2898
2899                 if (is_cqe_for_sq(cqe)) {
2900                         expand = ocrdma_poll_scqe(qp, cqe, ibwc, &polled,
2901                                                   &stop);
2902                 } else {
2903                         expand = ocrdma_poll_rcqe(qp, cqe, ibwc, &polled,
2904                                                   &stop);
2905                 }
2906                 if (expand)
2907                         goto expand_cqe;
2908                 if (stop)
2909                         goto stop_cqe;
2910                 /* clear qpn to avoid duplicate processing by discard_cqe() */
2911                 cqe->cmn.qpn = 0;
2912 skip_cqe:
2913                 polled_hw_cqes += 1;
2914                 cur_getp = (cur_getp + 1) % cq->max_hw_cqe;
2915                 ocrdma_change_cq_phase(cq, cqe, cur_getp);
2916 expand_cqe:
2917                 if (polled) {
2918                         num_entries -= 1;
2919                         i += 1;
2920                         ibwc = ibwc + 1;
2921                         polled = false;
2922                 }
2923         }
2924 stop_cqe:
2925         cq->getp = cur_getp;
2926
2927         if (polled_hw_cqes)
2928                 ocrdma_ring_cq_db(dev, cq->id, false, false, polled_hw_cqes);
2929
2930         return i;
2931 }
2932
2933 /* insert error cqe if the QP's SQ or RQ's CQ matches the CQ under poll. */
2934 static int ocrdma_add_err_cqe(struct ocrdma_cq *cq, int num_entries,
2935                               struct ocrdma_qp *qp, struct ib_wc *ibwc)
2936 {
2937         int err_cqes = 0;
2938
2939         while (num_entries) {
2940                 if (is_hw_sq_empty(qp) && is_hw_rq_empty(qp))
2941                         break;
2942                 if (!is_hw_sq_empty(qp) && qp->sq_cq == cq) {
2943                         ocrdma_update_wc(qp, ibwc, qp->sq.tail);
2944                         ocrdma_hwq_inc_tail(&qp->sq);
2945                 } else if (!is_hw_rq_empty(qp) && qp->rq_cq == cq) {
2946                         ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
2947                         ocrdma_hwq_inc_tail(&qp->rq);
2948                 } else {
2949                         return err_cqes;
2950                 }
2951                 ibwc->byte_len = 0;
2952                 ibwc->status = IB_WC_WR_FLUSH_ERR;
2953                 ibwc = ibwc + 1;
2954                 err_cqes += 1;
2955                 num_entries -= 1;
2956         }
2957         return err_cqes;
2958 }
2959
2960 int ocrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
2961 {
2962         int cqes_to_poll = num_entries;
2963         struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
2964         struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
2965         int num_os_cqe = 0, err_cqes = 0;
2966         struct ocrdma_qp *qp;
2967         unsigned long flags;
2968
2969         /* poll cqes from adapter CQ */
2970         spin_lock_irqsave(&cq->cq_lock, flags);
2971         num_os_cqe = ocrdma_poll_hwcq(cq, cqes_to_poll, wc);
2972         spin_unlock_irqrestore(&cq->cq_lock, flags);
2973         cqes_to_poll -= num_os_cqe;
2974
2975         if (cqes_to_poll) {
2976                 wc = wc + num_os_cqe;
2977                 /* adapter returns single error cqe when qp moves to
2978                  * error state. So insert error cqes with wc_status as
2979                  * FLUSHED for pending WQEs and RQEs of QP's SQ and RQ
2980                  * respectively which uses this CQ.
2981                  */
2982                 spin_lock_irqsave(&dev->flush_q_lock, flags);
2983                 list_for_each_entry(qp, &cq->sq_head, sq_entry) {
2984                         if (cqes_to_poll == 0)
2985                                 break;
2986                         err_cqes = ocrdma_add_err_cqe(cq, cqes_to_poll, qp, wc);
2987                         cqes_to_poll -= err_cqes;
2988                         num_os_cqe += err_cqes;
2989                         wc = wc + err_cqes;
2990                 }
2991                 spin_unlock_irqrestore(&dev->flush_q_lock, flags);
2992         }
2993         return num_os_cqe;
2994 }
2995
2996 int ocrdma_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags cq_flags)
2997 {
2998         struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
2999         struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
3000         u16 cq_id;
3001         unsigned long flags;
3002         bool arm_needed = false, sol_needed = false;
3003
3004         cq_id = cq->id;
3005
3006         spin_lock_irqsave(&cq->cq_lock, flags);
3007         if (cq_flags & IB_CQ_NEXT_COMP || cq_flags & IB_CQ_SOLICITED)
3008                 arm_needed = true;
3009         if (cq_flags & IB_CQ_SOLICITED)
3010                 sol_needed = true;
3011
3012         ocrdma_ring_cq_db(dev, cq_id, arm_needed, sol_needed, 0);
3013         spin_unlock_irqrestore(&cq->cq_lock, flags);
3014
3015         return 0;
3016 }
3017
3018 struct ib_mr *ocrdma_alloc_mr(struct ib_pd *ibpd,
3019                               enum ib_mr_type mr_type,
3020                               u32 max_num_sg)
3021 {
3022         int status;
3023         struct ocrdma_mr *mr;
3024         struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
3025         struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
3026
3027         if (mr_type != IB_MR_TYPE_MEM_REG)
3028                 return ERR_PTR(-EINVAL);
3029
3030         if (max_num_sg > dev->attr.max_pages_per_frmr)
3031                 return ERR_PTR(-EINVAL);
3032
3033         mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3034         if (!mr)
3035                 return ERR_PTR(-ENOMEM);
3036
3037         mr->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
3038         if (!mr->pages) {
3039                 status = -ENOMEM;
3040                 goto pl_err;
3041         }
3042
3043         status = ocrdma_get_pbl_info(dev, mr, max_num_sg);
3044         if (status)
3045                 goto pbl_err;
3046         mr->hwmr.fr_mr = 1;
3047         mr->hwmr.remote_rd = 0;
3048         mr->hwmr.remote_wr = 0;
3049         mr->hwmr.local_rd = 0;
3050         mr->hwmr.local_wr = 0;
3051         mr->hwmr.mw_bind = 0;
3052         status = ocrdma_build_pbl_tbl(dev, &mr->hwmr);
3053         if (status)
3054                 goto pbl_err;
3055         status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, 0);
3056         if (status)
3057                 goto mbx_err;
3058         mr->ibmr.rkey = mr->hwmr.lkey;
3059         mr->ibmr.lkey = mr->hwmr.lkey;
3060         dev->stag_arr[(mr->hwmr.lkey >> 8) & (OCRDMA_MAX_STAG - 1)] =
3061                 (unsigned long) mr;
3062         return &mr->ibmr;
3063 mbx_err:
3064         ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
3065 pbl_err:
3066         kfree(mr->pages);
3067 pl_err:
3068         kfree(mr);
3069         return ERR_PTR(-ENOMEM);
3070 }
3071
3072 static int ocrdma_set_page(struct ib_mr *ibmr, u64 addr)
3073 {
3074         struct ocrdma_mr *mr = get_ocrdma_mr(ibmr);
3075
3076         if (unlikely(mr->npages == mr->hwmr.num_pbes))
3077                 return -ENOMEM;
3078
3079         mr->pages[mr->npages++] = addr;
3080
3081         return 0;
3082 }
3083
3084 int ocrdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
3085                      unsigned int *sg_offset)
3086 {
3087         struct ocrdma_mr *mr = get_ocrdma_mr(ibmr);
3088
3089         mr->npages = 0;
3090
3091         return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, ocrdma_set_page);
3092 }