GNU Linux-libre 4.14.324-gnu1
[releases.git] / drivers / infiniband / hw / cxgb4 / cq.c
1 /*
2  * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include "iw_cxgb4.h"
34
35 static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
36                       struct c4iw_dev_ucontext *uctx, struct sk_buff *skb)
37 {
38         struct fw_ri_res_wr *res_wr;
39         struct fw_ri_res *res;
40         int wr_len;
41         struct c4iw_wr_wait wr_wait;
42         int ret;
43
44         wr_len = sizeof *res_wr + sizeof *res;
45         set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
46
47         res_wr = __skb_put_zero(skb, wr_len);
48         res_wr->op_nres = cpu_to_be32(
49                         FW_WR_OP_V(FW_RI_RES_WR) |
50                         FW_RI_RES_WR_NRES_V(1) |
51                         FW_WR_COMPL_F);
52         res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
53         res_wr->cookie = (uintptr_t)&wr_wait;
54         res = res_wr->res;
55         res->u.cq.restype = FW_RI_RES_TYPE_CQ;
56         res->u.cq.op = FW_RI_RES_OP_RESET;
57         res->u.cq.iqid = cpu_to_be32(cq->cqid);
58
59         c4iw_init_wr_wait(&wr_wait);
60         ret = c4iw_ofld_send(rdev, skb);
61         if (!ret) {
62                 ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
63         }
64
65         kfree(cq->sw_queue);
66         dma_free_coherent(&(rdev->lldi.pdev->dev),
67                           cq->memsize, cq->queue,
68                           dma_unmap_addr(cq, mapping));
69         c4iw_put_cqid(rdev, cq->cqid, uctx);
70         return ret;
71 }
72
73 static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
74                      struct c4iw_dev_ucontext *uctx)
75 {
76         struct fw_ri_res_wr *res_wr;
77         struct fw_ri_res *res;
78         int wr_len;
79         int user = (uctx != &rdev->uctx);
80         struct c4iw_wr_wait wr_wait;
81         int ret;
82         struct sk_buff *skb;
83
84         cq->cqid = c4iw_get_cqid(rdev, uctx);
85         if (!cq->cqid) {
86                 ret = -ENOMEM;
87                 goto err1;
88         }
89
90         if (!user) {
91                 cq->sw_queue = kzalloc(cq->memsize, GFP_KERNEL);
92                 if (!cq->sw_queue) {
93                         ret = -ENOMEM;
94                         goto err2;
95                 }
96         }
97         cq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, cq->memsize,
98                                        &cq->dma_addr, GFP_KERNEL);
99         if (!cq->queue) {
100                 ret = -ENOMEM;
101                 goto err3;
102         }
103         dma_unmap_addr_set(cq, mapping, cq->dma_addr);
104         memset(cq->queue, 0, cq->memsize);
105
106         /* build fw_ri_res_wr */
107         wr_len = sizeof *res_wr + sizeof *res;
108
109         skb = alloc_skb(wr_len, GFP_KERNEL);
110         if (!skb) {
111                 ret = -ENOMEM;
112                 goto err4;
113         }
114         set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
115
116         res_wr = __skb_put_zero(skb, wr_len);
117         res_wr->op_nres = cpu_to_be32(
118                         FW_WR_OP_V(FW_RI_RES_WR) |
119                         FW_RI_RES_WR_NRES_V(1) |
120                         FW_WR_COMPL_F);
121         res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
122         res_wr->cookie = (uintptr_t)&wr_wait;
123         res = res_wr->res;
124         res->u.cq.restype = FW_RI_RES_TYPE_CQ;
125         res->u.cq.op = FW_RI_RES_OP_WRITE;
126         res->u.cq.iqid = cpu_to_be32(cq->cqid);
127         res->u.cq.iqandst_to_iqandstindex = cpu_to_be32(
128                         FW_RI_RES_WR_IQANUS_V(0) |
129                         FW_RI_RES_WR_IQANUD_V(1) |
130                         FW_RI_RES_WR_IQANDST_F |
131                         FW_RI_RES_WR_IQANDSTINDEX_V(
132                                 rdev->lldi.ciq_ids[cq->vector]));
133         res->u.cq.iqdroprss_to_iqesize = cpu_to_be16(
134                         FW_RI_RES_WR_IQDROPRSS_F |
135                         FW_RI_RES_WR_IQPCIECH_V(2) |
136                         FW_RI_RES_WR_IQINTCNTTHRESH_V(0) |
137                         FW_RI_RES_WR_IQO_F |
138                         FW_RI_RES_WR_IQESIZE_V(1));
139         res->u.cq.iqsize = cpu_to_be16(cq->size);
140         res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr);
141
142         c4iw_init_wr_wait(&wr_wait);
143
144         ret = c4iw_ofld_send(rdev, skb);
145         if (ret)
146                 goto err4;
147         pr_debug("%s wait_event wr_wait %p\n", __func__, &wr_wait);
148         ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
149         if (ret)
150                 goto err4;
151
152         cq->gen = 1;
153         cq->gts = rdev->lldi.gts_reg;
154         cq->rdev = rdev;
155
156         cq->bar2_va = c4iw_bar2_addrs(rdev, cq->cqid, T4_BAR2_QTYPE_INGRESS,
157                                       &cq->bar2_qid,
158                                       user ? &cq->bar2_pa : NULL);
159         if (user && !cq->bar2_pa) {
160                 pr_warn("%s: cqid %u not in BAR2 range\n",
161                         pci_name(rdev->lldi.pdev), cq->cqid);
162                 ret = -EINVAL;
163                 goto err4;
164         }
165         return 0;
166 err4:
167         dma_free_coherent(&rdev->lldi.pdev->dev, cq->memsize, cq->queue,
168                           dma_unmap_addr(cq, mapping));
169 err3:
170         kfree(cq->sw_queue);
171 err2:
172         c4iw_put_cqid(rdev, cq->cqid, uctx);
173 err1:
174         return ret;
175 }
176
177 static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq)
178 {
179         struct t4_cqe cqe;
180
181         pr_debug("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
182                  wq, cq, cq->sw_cidx, cq->sw_pidx);
183         memset(&cqe, 0, sizeof(cqe));
184         cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
185                                  CQE_OPCODE_V(FW_RI_SEND) |
186                                  CQE_TYPE_V(0) |
187                                  CQE_SWCQE_V(1) |
188                                  CQE_QPID_V(wq->sq.qid));
189         cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
190         cq->sw_queue[cq->sw_pidx] = cqe;
191         t4_swcq_produce(cq);
192 }
193
194 int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count)
195 {
196         int flushed = 0;
197         int in_use = wq->rq.in_use - count;
198
199         BUG_ON(in_use < 0);
200         pr_debug("%s wq %p cq %p rq.in_use %u skip count %u\n", __func__,
201                  wq, cq, wq->rq.in_use, count);
202         while (in_use--) {
203                 insert_recv_cqe(wq, cq);
204                 flushed++;
205         }
206         return flushed;
207 }
208
209 static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq,
210                           struct t4_swsqe *swcqe)
211 {
212         struct t4_cqe cqe;
213
214         pr_debug("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
215                  wq, cq, cq->sw_cidx, cq->sw_pidx);
216         memset(&cqe, 0, sizeof(cqe));
217         cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
218                                  CQE_OPCODE_V(swcqe->opcode) |
219                                  CQE_TYPE_V(1) |
220                                  CQE_SWCQE_V(1) |
221                                  CQE_QPID_V(wq->sq.qid));
222         CQE_WRID_SQ_IDX(&cqe) = swcqe->idx;
223         cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
224         cq->sw_queue[cq->sw_pidx] = cqe;
225         t4_swcq_produce(cq);
226 }
227
228 static void advance_oldest_read(struct t4_wq *wq);
229
230 int c4iw_flush_sq(struct c4iw_qp *qhp)
231 {
232         int flushed = 0;
233         struct t4_wq *wq = &qhp->wq;
234         struct c4iw_cq *chp = to_c4iw_cq(qhp->ibqp.send_cq);
235         struct t4_cq *cq = &chp->cq;
236         int idx;
237         struct t4_swsqe *swsqe;
238
239         if (wq->sq.flush_cidx == -1)
240                 wq->sq.flush_cidx = wq->sq.cidx;
241         idx = wq->sq.flush_cidx;
242         BUG_ON(idx >= wq->sq.size);
243         while (idx != wq->sq.pidx) {
244                 swsqe = &wq->sq.sw_sq[idx];
245                 BUG_ON(swsqe->flushed);
246                 swsqe->flushed = 1;
247                 insert_sq_cqe(wq, cq, swsqe);
248                 if (wq->sq.oldest_read == swsqe) {
249                         BUG_ON(swsqe->opcode != FW_RI_READ_REQ);
250                         advance_oldest_read(wq);
251                 }
252                 flushed++;
253                 if (++idx == wq->sq.size)
254                         idx = 0;
255         }
256         wq->sq.flush_cidx += flushed;
257         if (wq->sq.flush_cidx >= wq->sq.size)
258                 wq->sq.flush_cidx -= wq->sq.size;
259         return flushed;
260 }
261
262 static void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq)
263 {
264         struct t4_swsqe *swsqe;
265         int cidx;
266
267         if (wq->sq.flush_cidx == -1)
268                 wq->sq.flush_cidx = wq->sq.cidx;
269         cidx = wq->sq.flush_cidx;
270         BUG_ON(cidx > wq->sq.size);
271
272         while (cidx != wq->sq.pidx) {
273                 swsqe = &wq->sq.sw_sq[cidx];
274                 if (!swsqe->signaled) {
275                         if (++cidx == wq->sq.size)
276                                 cidx = 0;
277                 } else if (swsqe->complete) {
278
279                         BUG_ON(swsqe->flushed);
280
281                         /*
282                          * Insert this completed cqe into the swcq.
283                          */
284                         pr_debug("%s moving cqe into swcq sq idx %u cq idx %u\n",
285                                  __func__, cidx, cq->sw_pidx);
286                         swsqe->cqe.header |= htonl(CQE_SWCQE_V(1));
287                         cq->sw_queue[cq->sw_pidx] = swsqe->cqe;
288                         t4_swcq_produce(cq);
289                         swsqe->flushed = 1;
290                         if (++cidx == wq->sq.size)
291                                 cidx = 0;
292                         wq->sq.flush_cidx = cidx;
293                 } else
294                         break;
295         }
296 }
297
298 static void create_read_req_cqe(struct t4_wq *wq, struct t4_cqe *hw_cqe,
299                 struct t4_cqe *read_cqe)
300 {
301         read_cqe->u.scqe.cidx = wq->sq.oldest_read->idx;
302         read_cqe->len = htonl(wq->sq.oldest_read->read_len);
303         read_cqe->header = htonl(CQE_QPID_V(CQE_QPID(hw_cqe)) |
304                         CQE_SWCQE_V(SW_CQE(hw_cqe)) |
305                         CQE_OPCODE_V(FW_RI_READ_REQ) |
306                         CQE_TYPE_V(1));
307         read_cqe->bits_type_ts = hw_cqe->bits_type_ts;
308 }
309
310 static void advance_oldest_read(struct t4_wq *wq)
311 {
312
313         u32 rptr = wq->sq.oldest_read - wq->sq.sw_sq + 1;
314
315         if (rptr == wq->sq.size)
316                 rptr = 0;
317         while (rptr != wq->sq.pidx) {
318                 wq->sq.oldest_read = &wq->sq.sw_sq[rptr];
319
320                 if (wq->sq.oldest_read->opcode == FW_RI_READ_REQ)
321                         return;
322                 if (++rptr == wq->sq.size)
323                         rptr = 0;
324         }
325         wq->sq.oldest_read = NULL;
326 }
327
328 /*
329  * Move all CQEs from the HWCQ into the SWCQ.
330  * Deal with out-of-order and/or completions that complete
331  * prior unsignalled WRs.
332  */
333 void c4iw_flush_hw_cq(struct c4iw_cq *chp, struct c4iw_qp *flush_qhp)
334 {
335         struct t4_cqe *hw_cqe, *swcqe, read_cqe;
336         struct c4iw_qp *qhp;
337         struct t4_swsqe *swsqe;
338         int ret;
339
340         pr_debug("%s  cqid 0x%x\n", __func__, chp->cq.cqid);
341         ret = t4_next_hw_cqe(&chp->cq, &hw_cqe);
342
343         /*
344          * This logic is similar to poll_cq(), but not quite the same
345          * unfortunately.  Need to move pertinent HW CQEs to the SW CQ but
346          * also do any translation magic that poll_cq() normally does.
347          */
348         while (!ret) {
349                 qhp = get_qhp(chp->rhp, CQE_QPID(hw_cqe));
350
351                 /*
352                  * drop CQEs with no associated QP
353                  */
354                 if (qhp == NULL)
355                         goto next_cqe;
356
357                 if (flush_qhp != qhp) {
358                         spin_lock(&qhp->lock);
359
360                         if (qhp->wq.flushed == 1)
361                                 goto next_cqe;
362                 }
363
364                 if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE)
365                         goto next_cqe;
366
367                 if (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP) {
368
369                         /* If we have reached here because of async
370                          * event or other error, and have egress error
371                          * then drop
372                          */
373                         if (CQE_TYPE(hw_cqe) == 1)
374                                 goto next_cqe;
375
376                         /* drop peer2peer RTR reads.
377                          */
378                         if (CQE_WRID_STAG(hw_cqe) == 1)
379                                 goto next_cqe;
380
381                         /*
382                          * Eat completions for unsignaled read WRs.
383                          */
384                         if (!qhp->wq.sq.oldest_read->signaled) {
385                                 advance_oldest_read(&qhp->wq);
386                                 goto next_cqe;
387                         }
388
389                         /*
390                          * Don't write to the HWCQ, create a new read req CQE
391                          * in local memory and move it into the swcq.
392                          */
393                         create_read_req_cqe(&qhp->wq, hw_cqe, &read_cqe);
394                         hw_cqe = &read_cqe;
395                         advance_oldest_read(&qhp->wq);
396                 }
397
398                 /* if its a SQ completion, then do the magic to move all the
399                  * unsignaled and now in-order completions into the swcq.
400                  */
401                 if (SQ_TYPE(hw_cqe)) {
402                         swsqe = &qhp->wq.sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)];
403                         swsqe->cqe = *hw_cqe;
404                         swsqe->complete = 1;
405                         flush_completed_wrs(&qhp->wq, &chp->cq);
406                 } else {
407                         swcqe = &chp->cq.sw_queue[chp->cq.sw_pidx];
408                         *swcqe = *hw_cqe;
409                         swcqe->header |= cpu_to_be32(CQE_SWCQE_V(1));
410                         t4_swcq_produce(&chp->cq);
411                 }
412 next_cqe:
413                 t4_hwcq_consume(&chp->cq);
414                 ret = t4_next_hw_cqe(&chp->cq, &hw_cqe);
415                 if (qhp && flush_qhp != qhp)
416                         spin_unlock(&qhp->lock);
417         }
418 }
419
420 static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq)
421 {
422         if (DRAIN_CQE(cqe)) {
423                 WARN_ONCE(1, "Unexpected DRAIN CQE qp id %u!\n", wq->sq.qid);
424                 return 0;
425         }
426
427         if (CQE_OPCODE(cqe) == FW_RI_TERMINATE)
428                 return 0;
429
430         if ((CQE_OPCODE(cqe) == FW_RI_RDMA_WRITE) && RQ_TYPE(cqe))
431                 return 0;
432
433         if ((CQE_OPCODE(cqe) == FW_RI_READ_RESP) && SQ_TYPE(cqe))
434                 return 0;
435
436         if (CQE_SEND_OPCODE(cqe) && RQ_TYPE(cqe) && t4_rq_empty(wq))
437                 return 0;
438         return 1;
439 }
440
441 void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
442 {
443         struct t4_cqe *cqe;
444         u32 ptr;
445
446         *count = 0;
447         pr_debug("%s count zero %d\n", __func__, *count);
448         ptr = cq->sw_cidx;
449         while (ptr != cq->sw_pidx) {
450                 cqe = &cq->sw_queue[ptr];
451                 if (RQ_TYPE(cqe) && (CQE_OPCODE(cqe) != FW_RI_READ_RESP) &&
452                     (CQE_QPID(cqe) == wq->sq.qid) && cqe_completes_wr(cqe, wq))
453                         (*count)++;
454                 if (++ptr == cq->size)
455                         ptr = 0;
456         }
457         pr_debug("%s cq %p count %d\n", __func__, cq, *count);
458 }
459
460 /*
461  * poll_cq
462  *
463  * Caller must:
464  *     check the validity of the first CQE,
465  *     supply the wq assicated with the qpid.
466  *
467  * credit: cq credit to return to sge.
468  * cqe_flushed: 1 iff the CQE is flushed.
469  * cqe: copy of the polled CQE.
470  *
471  * return value:
472  *    0             CQE returned ok.
473  *    -EAGAIN       CQE skipped, try again.
474  *    -EOVERFLOW    CQ overflow detected.
475  */
476 static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
477                    u8 *cqe_flushed, u64 *cookie, u32 *credit)
478 {
479         int ret = 0;
480         struct t4_cqe *hw_cqe, read_cqe;
481
482         *cqe_flushed = 0;
483         *credit = 0;
484         ret = t4_next_cqe(cq, &hw_cqe);
485         if (ret)
486                 return ret;
487
488         pr_debug("%s CQE OVF %u qpid 0x%0x genbit %u type %u status 0x%0x opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
489                  __func__, CQE_OVFBIT(hw_cqe), CQE_QPID(hw_cqe),
490                  CQE_GENBIT(hw_cqe), CQE_TYPE(hw_cqe), CQE_STATUS(hw_cqe),
491                  CQE_OPCODE(hw_cqe), CQE_LEN(hw_cqe), CQE_WRID_HI(hw_cqe),
492                  CQE_WRID_LOW(hw_cqe));
493
494         /*
495          * skip cqe's not affiliated with a QP.
496          */
497         if (wq == NULL) {
498                 ret = -EAGAIN;
499                 goto skip_cqe;
500         }
501
502         /*
503         * skip hw cqe's if the wq is flushed.
504         */
505         if (wq->flushed && !SW_CQE(hw_cqe)) {
506                 ret = -EAGAIN;
507                 goto skip_cqe;
508         }
509
510         /*
511          * skip TERMINATE cqes...
512          */
513         if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE) {
514                 ret = -EAGAIN;
515                 goto skip_cqe;
516         }
517
518         /*
519          * Special cqe for drain WR completions...
520          */
521         if (DRAIN_CQE(hw_cqe)) {
522                 *cookie = CQE_DRAIN_COOKIE(hw_cqe);
523                 *cqe = *hw_cqe;
524                 goto skip_cqe;
525         }
526
527         /*
528          * Gotta tweak READ completions:
529          *      1) the cqe doesn't contain the sq_wptr from the wr.
530          *      2) opcode not reflected from the wr.
531          *      3) read_len not reflected from the wr.
532          *      4) cq_type is RQ_TYPE not SQ_TYPE.
533          */
534         if (RQ_TYPE(hw_cqe) && (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP)) {
535
536                 /* If we have reached here because of async
537                  * event or other error, and have egress error
538                  * then drop
539                  */
540                 if (CQE_TYPE(hw_cqe) == 1) {
541                         if (CQE_STATUS(hw_cqe))
542                                 t4_set_wq_in_error(wq);
543                         ret = -EAGAIN;
544                         goto skip_cqe;
545                 }
546
547                 /* If this is an unsolicited read response, then the read
548                  * was generated by the kernel driver as part of peer-2-peer
549                  * connection setup.  So ignore the completion.
550                  */
551                 if (CQE_WRID_STAG(hw_cqe) == 1) {
552                         if (CQE_STATUS(hw_cqe))
553                                 t4_set_wq_in_error(wq);
554                         ret = -EAGAIN;
555                         goto skip_cqe;
556                 }
557
558                 /*
559                  * Eat completions for unsignaled read WRs.
560                  */
561                 if (!wq->sq.oldest_read->signaled) {
562                         advance_oldest_read(wq);
563                         ret = -EAGAIN;
564                         goto skip_cqe;
565                 }
566
567                 /*
568                  * Don't write to the HWCQ, so create a new read req CQE
569                  * in local memory.
570                  */
571                 create_read_req_cqe(wq, hw_cqe, &read_cqe);
572                 hw_cqe = &read_cqe;
573                 advance_oldest_read(wq);
574         }
575
576         if (CQE_STATUS(hw_cqe) || t4_wq_in_error(wq)) {
577                 *cqe_flushed = (CQE_STATUS(hw_cqe) == T4_ERR_SWFLUSH);
578                 t4_set_wq_in_error(wq);
579         }
580
581         /*
582          * RECV completion.
583          */
584         if (RQ_TYPE(hw_cqe)) {
585
586                 /*
587                  * HW only validates 4 bits of MSN.  So we must validate that
588                  * the MSN in the SEND is the next expected MSN.  If its not,
589                  * then we complete this with T4_ERR_MSN and mark the wq in
590                  * error.
591                  */
592
593                 if (t4_rq_empty(wq)) {
594                         t4_set_wq_in_error(wq);
595                         ret = -EAGAIN;
596                         goto skip_cqe;
597                 }
598                 if (unlikely(!CQE_STATUS(hw_cqe) &&
599                              CQE_WRID_MSN(hw_cqe) != wq->rq.msn)) {
600                         t4_set_wq_in_error(wq);
601                         hw_cqe->header |= cpu_to_be32(CQE_STATUS_V(T4_ERR_MSN));
602                 }
603                 goto proc_cqe;
604         }
605
606         /*
607          * If we get here its a send completion.
608          *
609          * Handle out of order completion. These get stuffed
610          * in the SW SQ. Then the SW SQ is walked to move any
611          * now in-order completions into the SW CQ.  This handles
612          * 2 cases:
613          *      1) reaping unsignaled WRs when the first subsequent
614          *         signaled WR is completed.
615          *      2) out of order read completions.
616          */
617         if (!SW_CQE(hw_cqe) && (CQE_WRID_SQ_IDX(hw_cqe) != wq->sq.cidx)) {
618                 struct t4_swsqe *swsqe;
619
620                 pr_debug("%s out of order completion going in sw_sq at idx %u\n",
621                          __func__, CQE_WRID_SQ_IDX(hw_cqe));
622                 swsqe = &wq->sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)];
623                 swsqe->cqe = *hw_cqe;
624                 swsqe->complete = 1;
625                 ret = -EAGAIN;
626                 goto flush_wq;
627         }
628
629 proc_cqe:
630         *cqe = *hw_cqe;
631
632         /*
633          * Reap the associated WR(s) that are freed up with this
634          * completion.
635          */
636         if (SQ_TYPE(hw_cqe)) {
637                 int idx = CQE_WRID_SQ_IDX(hw_cqe);
638                 BUG_ON(idx >= wq->sq.size);
639
640                 /*
641                 * Account for any unsignaled completions completed by
642                 * this signaled completion.  In this case, cidx points
643                 * to the first unsignaled one, and idx points to the
644                 * signaled one.  So adjust in_use based on this delta.
645                 * if this is not completing any unsigned wrs, then the
646                 * delta will be 0. Handle wrapping also!
647                 */
648                 if (idx < wq->sq.cidx)
649                         wq->sq.in_use -= wq->sq.size + idx - wq->sq.cidx;
650                 else
651                         wq->sq.in_use -= idx - wq->sq.cidx;
652                 BUG_ON(wq->sq.in_use <= 0 && wq->sq.in_use >= wq->sq.size);
653
654                 wq->sq.cidx = (uint16_t)idx;
655                 pr_debug("%s completing sq idx %u\n", __func__, wq->sq.cidx);
656                 *cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id;
657                 if (c4iw_wr_log)
658                         c4iw_log_wr_stats(wq, hw_cqe);
659                 t4_sq_consume(wq);
660         } else {
661                 pr_debug("%s completing rq idx %u\n", __func__, wq->rq.cidx);
662                 *cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id;
663                 BUG_ON(t4_rq_empty(wq));
664                 if (c4iw_wr_log)
665                         c4iw_log_wr_stats(wq, hw_cqe);
666                 t4_rq_consume(wq);
667                 goto skip_cqe;
668         }
669
670 flush_wq:
671         /*
672          * Flush any completed cqes that are now in-order.
673          */
674         flush_completed_wrs(wq, cq);
675
676 skip_cqe:
677         if (SW_CQE(hw_cqe)) {
678                 pr_debug("%s cq %p cqid 0x%x skip sw cqe cidx %u\n",
679                          __func__, cq, cq->cqid, cq->sw_cidx);
680                 t4_swcq_consume(cq);
681         } else {
682                 pr_debug("%s cq %p cqid 0x%x skip hw cqe cidx %u\n",
683                          __func__, cq, cq->cqid, cq->cidx);
684                 t4_hwcq_consume(cq);
685         }
686         return ret;
687 }
688
689 /*
690  * Get one cq entry from c4iw and map it to openib.
691  *
692  * Returns:
693  *      0                       cqe returned
694  *      -ENODATA                EMPTY;
695  *      -EAGAIN                 caller must try again
696  *      any other -errno        fatal error
697  */
698 static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
699 {
700         struct c4iw_qp *qhp = NULL;
701         struct t4_cqe uninitialized_var(cqe), *rd_cqe;
702         struct t4_wq *wq;
703         u32 credit = 0;
704         u8 cqe_flushed;
705         u64 cookie = 0;
706         int ret;
707
708         ret = t4_next_cqe(&chp->cq, &rd_cqe);
709
710         if (ret)
711                 return ret;
712
713         qhp = get_qhp(chp->rhp, CQE_QPID(rd_cqe));
714         if (!qhp)
715                 wq = NULL;
716         else {
717                 spin_lock(&qhp->lock);
718                 wq = &(qhp->wq);
719         }
720         ret = poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, &credit);
721         if (ret)
722                 goto out;
723
724         wc->wr_id = cookie;
725         wc->qp = &qhp->ibqp;
726         wc->vendor_err = CQE_STATUS(&cqe);
727         wc->wc_flags = 0;
728
729         pr_debug("%s qpid 0x%x type %d opcode %d status 0x%x len %u wrid hi 0x%x lo 0x%x cookie 0x%llx\n",
730                  __func__, CQE_QPID(&cqe),
731                  CQE_TYPE(&cqe), CQE_OPCODE(&cqe),
732                  CQE_STATUS(&cqe), CQE_LEN(&cqe),
733                  CQE_WRID_HI(&cqe), CQE_WRID_LOW(&cqe),
734                  (unsigned long long)cookie);
735
736         if (CQE_TYPE(&cqe) == 0) {
737                 if (!CQE_STATUS(&cqe))
738                         wc->byte_len = CQE_LEN(&cqe);
739                 else
740                         wc->byte_len = 0;
741                 wc->opcode = IB_WC_RECV;
742                 if (CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_INV ||
743                     CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_SE_INV) {
744                         wc->ex.invalidate_rkey = CQE_WRID_STAG(&cqe);
745                         wc->wc_flags |= IB_WC_WITH_INVALIDATE;
746                         c4iw_invalidate_mr(qhp->rhp, wc->ex.invalidate_rkey);
747                 }
748         } else {
749                 switch (CQE_OPCODE(&cqe)) {
750                 case FW_RI_RDMA_WRITE:
751                         wc->opcode = IB_WC_RDMA_WRITE;
752                         break;
753                 case FW_RI_READ_REQ:
754                         wc->opcode = IB_WC_RDMA_READ;
755                         wc->byte_len = CQE_LEN(&cqe);
756                         break;
757                 case FW_RI_SEND_WITH_INV:
758                 case FW_RI_SEND_WITH_SE_INV:
759                         wc->opcode = IB_WC_SEND;
760                         wc->wc_flags |= IB_WC_WITH_INVALIDATE;
761                         break;
762                 case FW_RI_SEND:
763                 case FW_RI_SEND_WITH_SE:
764                         wc->opcode = IB_WC_SEND;
765                         break;
766
767                 case FW_RI_LOCAL_INV:
768                         wc->opcode = IB_WC_LOCAL_INV;
769                         break;
770                 case FW_RI_FAST_REGISTER:
771                         wc->opcode = IB_WC_REG_MR;
772
773                         /* Invalidate the MR if the fastreg failed */
774                         if (CQE_STATUS(&cqe) != T4_ERR_SUCCESS)
775                                 c4iw_invalidate_mr(qhp->rhp,
776                                                    CQE_WRID_FR_STAG(&cqe));
777                         break;
778                 default:
779                         pr_err("Unexpected opcode %d in the CQE received for QPID=0x%0x\n",
780                                CQE_OPCODE(&cqe), CQE_QPID(&cqe));
781                         ret = -EINVAL;
782                         goto out;
783                 }
784         }
785
786         if (cqe_flushed)
787                 wc->status = IB_WC_WR_FLUSH_ERR;
788         else {
789
790                 switch (CQE_STATUS(&cqe)) {
791                 case T4_ERR_SUCCESS:
792                         wc->status = IB_WC_SUCCESS;
793                         break;
794                 case T4_ERR_STAG:
795                         wc->status = IB_WC_LOC_ACCESS_ERR;
796                         break;
797                 case T4_ERR_PDID:
798                         wc->status = IB_WC_LOC_PROT_ERR;
799                         break;
800                 case T4_ERR_QPID:
801                 case T4_ERR_ACCESS:
802                         wc->status = IB_WC_LOC_ACCESS_ERR;
803                         break;
804                 case T4_ERR_WRAP:
805                         wc->status = IB_WC_GENERAL_ERR;
806                         break;
807                 case T4_ERR_BOUND:
808                         wc->status = IB_WC_LOC_LEN_ERR;
809                         break;
810                 case T4_ERR_INVALIDATE_SHARED_MR:
811                 case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
812                         wc->status = IB_WC_MW_BIND_ERR;
813                         break;
814                 case T4_ERR_CRC:
815                 case T4_ERR_MARKER:
816                 case T4_ERR_PDU_LEN_ERR:
817                 case T4_ERR_OUT_OF_RQE:
818                 case T4_ERR_DDP_VERSION:
819                 case T4_ERR_RDMA_VERSION:
820                 case T4_ERR_DDP_QUEUE_NUM:
821                 case T4_ERR_MSN:
822                 case T4_ERR_TBIT:
823                 case T4_ERR_MO:
824                 case T4_ERR_MSN_RANGE:
825                 case T4_ERR_IRD_OVERFLOW:
826                 case T4_ERR_OPCODE:
827                 case T4_ERR_INTERNAL_ERR:
828                         wc->status = IB_WC_FATAL_ERR;
829                         break;
830                 case T4_ERR_SWFLUSH:
831                         wc->status = IB_WC_WR_FLUSH_ERR;
832                         break;
833                 default:
834                         pr_err("Unexpected cqe_status 0x%x for QPID=0x%0x\n",
835                                CQE_STATUS(&cqe), CQE_QPID(&cqe));
836                         wc->status = IB_WC_FATAL_ERR;
837                 }
838         }
839 out:
840         if (wq)
841                 spin_unlock(&qhp->lock);
842         return ret;
843 }
844
845 int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
846 {
847         struct c4iw_cq *chp;
848         unsigned long flags;
849         int npolled;
850         int err = 0;
851
852         chp = to_c4iw_cq(ibcq);
853
854         spin_lock_irqsave(&chp->lock, flags);
855         for (npolled = 0; npolled < num_entries; ++npolled) {
856                 do {
857                         err = c4iw_poll_cq_one(chp, wc + npolled);
858                 } while (err == -EAGAIN);
859                 if (err)
860                         break;
861         }
862         spin_unlock_irqrestore(&chp->lock, flags);
863         return !err || err == -ENODATA ? npolled : err;
864 }
865
866 int c4iw_destroy_cq(struct ib_cq *ib_cq)
867 {
868         struct c4iw_cq *chp;
869         struct c4iw_ucontext *ucontext;
870
871         pr_debug("%s ib_cq %p\n", __func__, ib_cq);
872         chp = to_c4iw_cq(ib_cq);
873
874         remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid);
875         atomic_dec(&chp->refcnt);
876         wait_event(chp->wait, !atomic_read(&chp->refcnt));
877
878         ucontext = ib_cq->uobject ? to_c4iw_ucontext(ib_cq->uobject->context)
879                                   : NULL;
880         destroy_cq(&chp->rhp->rdev, &chp->cq,
881                    ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx,
882                    chp->destroy_skb);
883         chp->destroy_skb = NULL;
884         kfree(chp);
885         return 0;
886 }
887
888 struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
889                              const struct ib_cq_init_attr *attr,
890                              struct ib_ucontext *ib_context,
891                              struct ib_udata *udata)
892 {
893         int entries = attr->cqe;
894         int vector = attr->comp_vector;
895         struct c4iw_dev *rhp;
896         struct c4iw_cq *chp;
897         struct c4iw_create_cq_resp uresp;
898         struct c4iw_ucontext *ucontext = NULL;
899         int ret, wr_len;
900         size_t memsize, hwentries;
901         struct c4iw_mm_entry *mm, *mm2;
902
903         pr_debug("%s ib_dev %p entries %d\n", __func__, ibdev, entries);
904         if (attr->flags)
905                 return ERR_PTR(-EINVAL);
906
907         rhp = to_c4iw_dev(ibdev);
908
909         if (entries < 1 || entries > ibdev->attrs.max_cqe)
910                 return ERR_PTR(-EINVAL);
911
912         if (vector >= rhp->rdev.lldi.nciq)
913                 return ERR_PTR(-EINVAL);
914
915         chp = kzalloc(sizeof(*chp), GFP_KERNEL);
916         if (!chp)
917                 return ERR_PTR(-ENOMEM);
918
919         wr_len = sizeof(struct fw_ri_res_wr) + sizeof(struct fw_ri_res);
920         chp->destroy_skb = alloc_skb(wr_len, GFP_KERNEL);
921         if (!chp->destroy_skb) {
922                 ret = -ENOMEM;
923                 goto err1;
924         }
925
926         if (ib_context)
927                 ucontext = to_c4iw_ucontext(ib_context);
928
929         /* account for the status page. */
930         entries++;
931
932         /* IQ needs one extra entry to differentiate full vs empty. */
933         entries++;
934
935         /*
936          * entries must be multiple of 16 for HW.
937          */
938         entries = roundup(entries, 16);
939
940         /*
941          * Make actual HW queue 2x to avoid cdix_inc overflows.
942          */
943         hwentries = min(entries * 2, rhp->rdev.hw_queue.t4_max_iq_size);
944
945         /*
946          * Make HW queue at least 64 entries so GTS updates aren't too
947          * frequent.
948          */
949         if (hwentries < 64)
950                 hwentries = 64;
951
952         memsize = hwentries * sizeof *chp->cq.queue;
953
954         /*
955          * memsize must be a multiple of the page size if its a user cq.
956          */
957         if (ucontext)
958                 memsize = roundup(memsize, PAGE_SIZE);
959         chp->cq.size = hwentries;
960         chp->cq.memsize = memsize;
961         chp->cq.vector = vector;
962
963         ret = create_cq(&rhp->rdev, &chp->cq,
964                         ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
965         if (ret)
966                 goto err2;
967
968         chp->rhp = rhp;
969         chp->cq.size--;                         /* status page */
970         chp->ibcq.cqe = entries - 2;
971         spin_lock_init(&chp->lock);
972         spin_lock_init(&chp->comp_handler_lock);
973         atomic_set(&chp->refcnt, 1);
974         init_waitqueue_head(&chp->wait);
975         ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid);
976         if (ret)
977                 goto err3;
978
979         if (ucontext) {
980                 ret = -ENOMEM;
981                 mm = kmalloc(sizeof *mm, GFP_KERNEL);
982                 if (!mm)
983                         goto err4;
984                 mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
985                 if (!mm2)
986                         goto err5;
987
988                 uresp.qid_mask = rhp->rdev.cqmask;
989                 uresp.cqid = chp->cq.cqid;
990                 uresp.size = chp->cq.size;
991                 uresp.memsize = chp->cq.memsize;
992                 spin_lock(&ucontext->mmap_lock);
993                 uresp.key = ucontext->key;
994                 ucontext->key += PAGE_SIZE;
995                 uresp.gts_key = ucontext->key;
996                 ucontext->key += PAGE_SIZE;
997                 spin_unlock(&ucontext->mmap_lock);
998                 ret = ib_copy_to_udata(udata, &uresp,
999                                        sizeof(uresp) - sizeof(uresp.reserved));
1000                 if (ret)
1001                         goto err6;
1002
1003                 mm->key = uresp.key;
1004                 mm->addr = virt_to_phys(chp->cq.queue);
1005                 mm->len = chp->cq.memsize;
1006                 insert_mmap(ucontext, mm);
1007
1008                 mm2->key = uresp.gts_key;
1009                 mm2->addr = chp->cq.bar2_pa;
1010                 mm2->len = PAGE_SIZE;
1011                 insert_mmap(ucontext, mm2);
1012         }
1013         pr_debug("%s cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n",
1014                  __func__, chp->cq.cqid, chp, chp->cq.size,
1015                  chp->cq.memsize, (unsigned long long)chp->cq.dma_addr);
1016         return &chp->ibcq;
1017 err6:
1018         kfree(mm2);
1019 err5:
1020         kfree(mm);
1021 err4:
1022         remove_handle(rhp, &rhp->cqidr, chp->cq.cqid);
1023 err3:
1024         destroy_cq(&chp->rhp->rdev, &chp->cq,
1025                    ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
1026                    chp->destroy_skb);
1027 err2:
1028         kfree_skb(chp->destroy_skb);
1029 err1:
1030         kfree(chp);
1031         return ERR_PTR(ret);
1032 }
1033
1034 int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
1035 {
1036         return -ENOSYS;
1037 }
1038
1039 int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
1040 {
1041         struct c4iw_cq *chp;
1042         int ret = 0;
1043         unsigned long flag;
1044
1045         chp = to_c4iw_cq(ibcq);
1046         spin_lock_irqsave(&chp->lock, flag);
1047         t4_arm_cq(&chp->cq,
1048                   (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED);
1049         if (flags & IB_CQ_REPORT_MISSED_EVENTS)
1050                 ret = t4_cq_notempty(&chp->cq);
1051         spin_unlock_irqrestore(&chp->lock, flag);
1052         return ret;
1053 }