GNU Linux-libre 4.14.302-gnu1
[releases.git] / drivers / infiniband / sw / rxe / rxe_resp.c
1 /*
2  * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3  * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33
34 #include <linux/skbuff.h>
35
36 #include "rxe.h"
37 #include "rxe_loc.h"
38 #include "rxe_queue.h"
39
40 enum resp_states {
41         RESPST_NONE,
42         RESPST_GET_REQ,
43         RESPST_CHK_PSN,
44         RESPST_CHK_OP_SEQ,
45         RESPST_CHK_OP_VALID,
46         RESPST_CHK_RESOURCE,
47         RESPST_CHK_LENGTH,
48         RESPST_CHK_RKEY,
49         RESPST_EXECUTE,
50         RESPST_READ_REPLY,
51         RESPST_COMPLETE,
52         RESPST_ACKNOWLEDGE,
53         RESPST_CLEANUP,
54         RESPST_DUPLICATE_REQUEST,
55         RESPST_ERR_MALFORMED_WQE,
56         RESPST_ERR_UNSUPPORTED_OPCODE,
57         RESPST_ERR_MISALIGNED_ATOMIC,
58         RESPST_ERR_PSN_OUT_OF_SEQ,
59         RESPST_ERR_MISSING_OPCODE_FIRST,
60         RESPST_ERR_MISSING_OPCODE_LAST_C,
61         RESPST_ERR_MISSING_OPCODE_LAST_D1E,
62         RESPST_ERR_TOO_MANY_RDMA_ATM_REQ,
63         RESPST_ERR_RNR,
64         RESPST_ERR_RKEY_VIOLATION,
65         RESPST_ERR_LENGTH,
66         RESPST_ERR_CQ_OVERFLOW,
67         RESPST_ERROR,
68         RESPST_RESET,
69         RESPST_DONE,
70         RESPST_EXIT,
71 };
72
73 static char *resp_state_name[] = {
74         [RESPST_NONE]                           = "NONE",
75         [RESPST_GET_REQ]                        = "GET_REQ",
76         [RESPST_CHK_PSN]                        = "CHK_PSN",
77         [RESPST_CHK_OP_SEQ]                     = "CHK_OP_SEQ",
78         [RESPST_CHK_OP_VALID]                   = "CHK_OP_VALID",
79         [RESPST_CHK_RESOURCE]                   = "CHK_RESOURCE",
80         [RESPST_CHK_LENGTH]                     = "CHK_LENGTH",
81         [RESPST_CHK_RKEY]                       = "CHK_RKEY",
82         [RESPST_EXECUTE]                        = "EXECUTE",
83         [RESPST_READ_REPLY]                     = "READ_REPLY",
84         [RESPST_COMPLETE]                       = "COMPLETE",
85         [RESPST_ACKNOWLEDGE]                    = "ACKNOWLEDGE",
86         [RESPST_CLEANUP]                        = "CLEANUP",
87         [RESPST_DUPLICATE_REQUEST]              = "DUPLICATE_REQUEST",
88         [RESPST_ERR_MALFORMED_WQE]              = "ERR_MALFORMED_WQE",
89         [RESPST_ERR_UNSUPPORTED_OPCODE]         = "ERR_UNSUPPORTED_OPCODE",
90         [RESPST_ERR_MISALIGNED_ATOMIC]          = "ERR_MISALIGNED_ATOMIC",
91         [RESPST_ERR_PSN_OUT_OF_SEQ]             = "ERR_PSN_OUT_OF_SEQ",
92         [RESPST_ERR_MISSING_OPCODE_FIRST]       = "ERR_MISSING_OPCODE_FIRST",
93         [RESPST_ERR_MISSING_OPCODE_LAST_C]      = "ERR_MISSING_OPCODE_LAST_C",
94         [RESPST_ERR_MISSING_OPCODE_LAST_D1E]    = "ERR_MISSING_OPCODE_LAST_D1E",
95         [RESPST_ERR_TOO_MANY_RDMA_ATM_REQ]      = "ERR_TOO_MANY_RDMA_ATM_REQ",
96         [RESPST_ERR_RNR]                        = "ERR_RNR",
97         [RESPST_ERR_RKEY_VIOLATION]             = "ERR_RKEY_VIOLATION",
98         [RESPST_ERR_LENGTH]                     = "ERR_LENGTH",
99         [RESPST_ERR_CQ_OVERFLOW]                = "ERR_CQ_OVERFLOW",
100         [RESPST_ERROR]                          = "ERROR",
101         [RESPST_RESET]                          = "RESET",
102         [RESPST_DONE]                           = "DONE",
103         [RESPST_EXIT]                           = "EXIT",
104 };
105
106 /* rxe_recv calls here to add a request packet to the input queue */
107 void rxe_resp_queue_pkt(struct rxe_dev *rxe, struct rxe_qp *qp,
108                         struct sk_buff *skb)
109 {
110         int must_sched;
111         struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
112
113         skb_queue_tail(&qp->req_pkts, skb);
114
115         must_sched = (pkt->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST) ||
116                         (skb_queue_len(&qp->req_pkts) > 1);
117
118         rxe_run_task(&qp->resp.task, must_sched);
119 }
120
121 static inline enum resp_states get_req(struct rxe_qp *qp,
122                                        struct rxe_pkt_info **pkt_p)
123 {
124         struct sk_buff *skb;
125
126         if (qp->resp.state == QP_STATE_ERROR) {
127                 skb = skb_dequeue(&qp->req_pkts);
128                 if (skb) {
129                         /* drain request packet queue */
130                         rxe_drop_ref(qp);
131                         kfree_skb(skb);
132                         return RESPST_GET_REQ;
133                 }
134
135                 /* go drain recv wr queue */
136                 return RESPST_CHK_RESOURCE;
137         }
138
139         skb = skb_peek(&qp->req_pkts);
140         if (!skb)
141                 return RESPST_EXIT;
142
143         *pkt_p = SKB_TO_PKT(skb);
144
145         return (qp->resp.res) ? RESPST_READ_REPLY : RESPST_CHK_PSN;
146 }
147
148 static enum resp_states check_psn(struct rxe_qp *qp,
149                                   struct rxe_pkt_info *pkt)
150 {
151         int diff = psn_compare(pkt->psn, qp->resp.psn);
152         struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
153
154         switch (qp_type(qp)) {
155         case IB_QPT_RC:
156                 if (diff > 0) {
157                         if (qp->resp.sent_psn_nak)
158                                 return RESPST_CLEANUP;
159
160                         qp->resp.sent_psn_nak = 1;
161                         rxe_counter_inc(rxe, RXE_CNT_OUT_OF_SEQ_REQ);
162                         return RESPST_ERR_PSN_OUT_OF_SEQ;
163
164                 } else if (diff < 0) {
165                         rxe_counter_inc(rxe, RXE_CNT_DUP_REQ);
166                         return RESPST_DUPLICATE_REQUEST;
167                 }
168
169                 if (qp->resp.sent_psn_nak)
170                         qp->resp.sent_psn_nak = 0;
171
172                 break;
173
174         case IB_QPT_UC:
175                 if (qp->resp.drop_msg || diff != 0) {
176                         if (pkt->mask & RXE_START_MASK) {
177                                 qp->resp.drop_msg = 0;
178                                 return RESPST_CHK_OP_SEQ;
179                         }
180
181                         qp->resp.drop_msg = 1;
182                         return RESPST_CLEANUP;
183                 }
184                 break;
185         default:
186                 break;
187         }
188
189         return RESPST_CHK_OP_SEQ;
190 }
191
192 static enum resp_states check_op_seq(struct rxe_qp *qp,
193                                      struct rxe_pkt_info *pkt)
194 {
195         switch (qp_type(qp)) {
196         case IB_QPT_RC:
197                 switch (qp->resp.opcode) {
198                 case IB_OPCODE_RC_SEND_FIRST:
199                 case IB_OPCODE_RC_SEND_MIDDLE:
200                         switch (pkt->opcode) {
201                         case IB_OPCODE_RC_SEND_MIDDLE:
202                         case IB_OPCODE_RC_SEND_LAST:
203                         case IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE:
204                         case IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE:
205                                 return RESPST_CHK_OP_VALID;
206                         default:
207                                 return RESPST_ERR_MISSING_OPCODE_LAST_C;
208                         }
209
210                 case IB_OPCODE_RC_RDMA_WRITE_FIRST:
211                 case IB_OPCODE_RC_RDMA_WRITE_MIDDLE:
212                         switch (pkt->opcode) {
213                         case IB_OPCODE_RC_RDMA_WRITE_MIDDLE:
214                         case IB_OPCODE_RC_RDMA_WRITE_LAST:
215                         case IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
216                                 return RESPST_CHK_OP_VALID;
217                         default:
218                                 return RESPST_ERR_MISSING_OPCODE_LAST_C;
219                         }
220
221                 default:
222                         switch (pkt->opcode) {
223                         case IB_OPCODE_RC_SEND_MIDDLE:
224                         case IB_OPCODE_RC_SEND_LAST:
225                         case IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE:
226                         case IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE:
227                         case IB_OPCODE_RC_RDMA_WRITE_MIDDLE:
228                         case IB_OPCODE_RC_RDMA_WRITE_LAST:
229                         case IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
230                                 return RESPST_ERR_MISSING_OPCODE_FIRST;
231                         default:
232                                 return RESPST_CHK_OP_VALID;
233                         }
234                 }
235                 break;
236
237         case IB_QPT_UC:
238                 switch (qp->resp.opcode) {
239                 case IB_OPCODE_UC_SEND_FIRST:
240                 case IB_OPCODE_UC_SEND_MIDDLE:
241                         switch (pkt->opcode) {
242                         case IB_OPCODE_UC_SEND_MIDDLE:
243                         case IB_OPCODE_UC_SEND_LAST:
244                         case IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE:
245                                 return RESPST_CHK_OP_VALID;
246                         default:
247                                 return RESPST_ERR_MISSING_OPCODE_LAST_D1E;
248                         }
249
250                 case IB_OPCODE_UC_RDMA_WRITE_FIRST:
251                 case IB_OPCODE_UC_RDMA_WRITE_MIDDLE:
252                         switch (pkt->opcode) {
253                         case IB_OPCODE_UC_RDMA_WRITE_MIDDLE:
254                         case IB_OPCODE_UC_RDMA_WRITE_LAST:
255                         case IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
256                                 return RESPST_CHK_OP_VALID;
257                         default:
258                                 return RESPST_ERR_MISSING_OPCODE_LAST_D1E;
259                         }
260
261                 default:
262                         switch (pkt->opcode) {
263                         case IB_OPCODE_UC_SEND_MIDDLE:
264                         case IB_OPCODE_UC_SEND_LAST:
265                         case IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE:
266                         case IB_OPCODE_UC_RDMA_WRITE_MIDDLE:
267                         case IB_OPCODE_UC_RDMA_WRITE_LAST:
268                         case IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
269                                 qp->resp.drop_msg = 1;
270                                 return RESPST_CLEANUP;
271                         default:
272                                 return RESPST_CHK_OP_VALID;
273                         }
274                 }
275                 break;
276
277         default:
278                 return RESPST_CHK_OP_VALID;
279         }
280 }
281
282 static enum resp_states check_op_valid(struct rxe_qp *qp,
283                                        struct rxe_pkt_info *pkt)
284 {
285         switch (qp_type(qp)) {
286         case IB_QPT_RC:
287                 if (((pkt->mask & RXE_READ_MASK) &&
288                      !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_READ)) ||
289                     ((pkt->mask & RXE_WRITE_MASK) &&
290                      !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_WRITE)) ||
291                     ((pkt->mask & RXE_ATOMIC_MASK) &&
292                      !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_ATOMIC))) {
293                         return RESPST_ERR_UNSUPPORTED_OPCODE;
294                 }
295
296                 break;
297
298         case IB_QPT_UC:
299                 if ((pkt->mask & RXE_WRITE_MASK) &&
300                     !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_WRITE)) {
301                         qp->resp.drop_msg = 1;
302                         return RESPST_CLEANUP;
303                 }
304
305                 break;
306
307         case IB_QPT_UD:
308         case IB_QPT_SMI:
309         case IB_QPT_GSI:
310                 break;
311
312         default:
313                 WARN_ON_ONCE(1);
314                 break;
315         }
316
317         return RESPST_CHK_RESOURCE;
318 }
319
320 static enum resp_states get_srq_wqe(struct rxe_qp *qp)
321 {
322         struct rxe_srq *srq = qp->srq;
323         struct rxe_queue *q = srq->rq.queue;
324         struct rxe_recv_wqe *wqe;
325         struct ib_event ev;
326
327         if (srq->error)
328                 return RESPST_ERR_RNR;
329
330         spin_lock_bh(&srq->rq.consumer_lock);
331
332         wqe = queue_head(q);
333         if (!wqe) {
334                 spin_unlock_bh(&srq->rq.consumer_lock);
335                 return RESPST_ERR_RNR;
336         }
337
338         /* note kernel and user space recv wqes have same size */
339         memcpy(&qp->resp.srq_wqe, wqe, sizeof(qp->resp.srq_wqe));
340
341         qp->resp.wqe = &qp->resp.srq_wqe.wqe;
342         advance_consumer(q);
343
344         if (srq->limit && srq->ibsrq.event_handler &&
345             (queue_count(q) < srq->limit)) {
346                 srq->limit = 0;
347                 goto event;
348         }
349
350         spin_unlock_bh(&srq->rq.consumer_lock);
351         return RESPST_CHK_LENGTH;
352
353 event:
354         spin_unlock_bh(&srq->rq.consumer_lock);
355         ev.device = qp->ibqp.device;
356         ev.element.srq = qp->ibqp.srq;
357         ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
358         srq->ibsrq.event_handler(&ev, srq->ibsrq.srq_context);
359         return RESPST_CHK_LENGTH;
360 }
361
362 static enum resp_states check_resource(struct rxe_qp *qp,
363                                        struct rxe_pkt_info *pkt)
364 {
365         struct rxe_srq *srq = qp->srq;
366
367         if (qp->resp.state == QP_STATE_ERROR) {
368                 if (qp->resp.wqe) {
369                         qp->resp.status = IB_WC_WR_FLUSH_ERR;
370                         return RESPST_COMPLETE;
371                 } else if (!srq) {
372                         qp->resp.wqe = queue_head(qp->rq.queue);
373                         if (qp->resp.wqe) {
374                                 qp->resp.status = IB_WC_WR_FLUSH_ERR;
375                                 return RESPST_COMPLETE;
376                         } else {
377                                 return RESPST_EXIT;
378                         }
379                 } else {
380                         return RESPST_EXIT;
381                 }
382         }
383
384         if (pkt->mask & RXE_READ_OR_ATOMIC) {
385                 /* it is the requesters job to not send
386                  * too many read/atomic ops, we just
387                  * recycle the responder resource queue
388                  */
389                 if (likely(qp->attr.max_dest_rd_atomic > 0))
390                         return RESPST_CHK_LENGTH;
391                 else
392                         return RESPST_ERR_TOO_MANY_RDMA_ATM_REQ;
393         }
394
395         if (pkt->mask & RXE_RWR_MASK) {
396                 if (srq)
397                         return get_srq_wqe(qp);
398
399                 qp->resp.wqe = queue_head(qp->rq.queue);
400                 return (qp->resp.wqe) ? RESPST_CHK_LENGTH : RESPST_ERR_RNR;
401         }
402
403         return RESPST_CHK_LENGTH;
404 }
405
406 static enum resp_states check_length(struct rxe_qp *qp,
407                                      struct rxe_pkt_info *pkt)
408 {
409         switch (qp_type(qp)) {
410         case IB_QPT_RC:
411                 return RESPST_CHK_RKEY;
412
413         case IB_QPT_UC:
414                 return RESPST_CHK_RKEY;
415
416         default:
417                 return RESPST_CHK_RKEY;
418         }
419 }
420
421 static enum resp_states check_rkey(struct rxe_qp *qp,
422                                    struct rxe_pkt_info *pkt)
423 {
424         struct rxe_mem *mem = NULL;
425         u64 va;
426         u32 rkey;
427         u32 resid;
428         u32 pktlen;
429         int mtu = qp->mtu;
430         enum resp_states state;
431         int access;
432
433         if (pkt->mask & (RXE_READ_MASK | RXE_WRITE_MASK)) {
434                 if (pkt->mask & RXE_RETH_MASK) {
435                         qp->resp.va = reth_va(pkt);
436                         qp->resp.rkey = reth_rkey(pkt);
437                         qp->resp.resid = reth_len(pkt);
438                         qp->resp.length = reth_len(pkt);
439                 }
440                 access = (pkt->mask & RXE_READ_MASK) ? IB_ACCESS_REMOTE_READ
441                                                      : IB_ACCESS_REMOTE_WRITE;
442         } else if (pkt->mask & RXE_ATOMIC_MASK) {
443                 qp->resp.va = atmeth_va(pkt);
444                 qp->resp.rkey = atmeth_rkey(pkt);
445                 qp->resp.resid = sizeof(u64);
446                 access = IB_ACCESS_REMOTE_ATOMIC;
447         } else {
448                 return RESPST_EXECUTE;
449         }
450
451         /* A zero-byte op is not required to set an addr or rkey. */
452         if ((pkt->mask & (RXE_READ_MASK | RXE_WRITE_OR_SEND)) &&
453             (pkt->mask & RXE_RETH_MASK) &&
454             reth_len(pkt) == 0) {
455                 return RESPST_EXECUTE;
456         }
457
458         va      = qp->resp.va;
459         rkey    = qp->resp.rkey;
460         resid   = qp->resp.resid;
461         pktlen  = payload_size(pkt);
462
463         mem = lookup_mem(qp->pd, access, rkey, lookup_remote);
464         if (!mem) {
465                 state = RESPST_ERR_RKEY_VIOLATION;
466                 goto err;
467         }
468
469         if (unlikely(mem->state == RXE_MEM_STATE_FREE)) {
470                 state = RESPST_ERR_RKEY_VIOLATION;
471                 goto err;
472         }
473
474         if (mem_check_range(mem, va, resid)) {
475                 state = RESPST_ERR_RKEY_VIOLATION;
476                 goto err;
477         }
478
479         if (pkt->mask & RXE_WRITE_MASK)  {
480                 if (resid > mtu) {
481                         if (pktlen != mtu || bth_pad(pkt)) {
482                                 state = RESPST_ERR_LENGTH;
483                                 goto err;
484                         }
485                 } else {
486                         if (pktlen != resid) {
487                                 state = RESPST_ERR_LENGTH;
488                                 goto err;
489                         }
490                         if ((bth_pad(pkt) != (0x3 & (-resid)))) {
491                                 /* This case may not be exactly that
492                                  * but nothing else fits.
493                                  */
494                                 state = RESPST_ERR_LENGTH;
495                                 goto err;
496                         }
497                 }
498         }
499
500         WARN_ON_ONCE(qp->resp.mr);
501
502         qp->resp.mr = mem;
503         return RESPST_EXECUTE;
504
505 err:
506         if (mem)
507                 rxe_drop_ref(mem);
508         return state;
509 }
510
511 static enum resp_states send_data_in(struct rxe_qp *qp, void *data_addr,
512                                      int data_len)
513 {
514         int err;
515         struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
516
517         err = copy_data(rxe, qp->pd, IB_ACCESS_LOCAL_WRITE, &qp->resp.wqe->dma,
518                         data_addr, data_len, to_mem_obj, NULL);
519         if (unlikely(err))
520                 return (err == -ENOSPC) ? RESPST_ERR_LENGTH
521                                         : RESPST_ERR_MALFORMED_WQE;
522
523         return RESPST_NONE;
524 }
525
526 static enum resp_states write_data_in(struct rxe_qp *qp,
527                                       struct rxe_pkt_info *pkt)
528 {
529         enum resp_states rc = RESPST_NONE;
530         int     err;
531         int data_len = payload_size(pkt);
532
533         err = rxe_mem_copy(qp->resp.mr, qp->resp.va, payload_addr(pkt),
534                            data_len, to_mem_obj, NULL);
535         if (err) {
536                 rc = RESPST_ERR_RKEY_VIOLATION;
537                 goto out;
538         }
539
540         qp->resp.va += data_len;
541         qp->resp.resid -= data_len;
542
543 out:
544         return rc;
545 }
546
547 /* Guarantee atomicity of atomic operations at the machine level. */
548 static DEFINE_SPINLOCK(atomic_ops_lock);
549
550 static enum resp_states process_atomic(struct rxe_qp *qp,
551                                        struct rxe_pkt_info *pkt)
552 {
553         u64 iova = atmeth_va(pkt);
554         u64 *vaddr;
555         enum resp_states ret;
556         struct rxe_mem *mr = qp->resp.mr;
557
558         if (mr->state != RXE_MEM_STATE_VALID) {
559                 ret = RESPST_ERR_RKEY_VIOLATION;
560                 goto out;
561         }
562
563         vaddr = iova_to_vaddr(mr, iova, sizeof(u64));
564
565         /* check vaddr is 8 bytes aligned. */
566         if (!vaddr || (uintptr_t)vaddr & 7) {
567                 ret = RESPST_ERR_MISALIGNED_ATOMIC;
568                 goto out;
569         }
570
571         spin_lock_bh(&atomic_ops_lock);
572
573         qp->resp.atomic_orig = *vaddr;
574
575         if (pkt->opcode == IB_OPCODE_RC_COMPARE_SWAP ||
576             pkt->opcode == IB_OPCODE_RD_COMPARE_SWAP) {
577                 if (*vaddr == atmeth_comp(pkt))
578                         *vaddr = atmeth_swap_add(pkt);
579         } else {
580                 *vaddr += atmeth_swap_add(pkt);
581         }
582
583         spin_unlock_bh(&atomic_ops_lock);
584
585         ret = RESPST_NONE;
586 out:
587         return ret;
588 }
589
590 static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp,
591                                           struct rxe_pkt_info *pkt,
592                                           struct rxe_pkt_info *ack,
593                                           int opcode,
594                                           int payload,
595                                           u32 psn,
596                                           u8 syndrome,
597                                           u32 *crcp)
598 {
599         struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
600         struct sk_buff *skb;
601         u32 crc = 0;
602         u32 *p;
603         int paylen;
604         int pad;
605         int err;
606
607         /*
608          * allocate packet
609          */
610         pad = (-payload) & 0x3;
611         paylen = rxe_opcode[opcode].length + payload + pad + RXE_ICRC_SIZE;
612
613         skb = rxe_init_packet(rxe, &qp->pri_av, paylen, ack);
614         if (!skb)
615                 return NULL;
616
617         ack->qp = qp;
618         ack->opcode = opcode;
619         ack->mask = rxe_opcode[opcode].mask;
620         ack->offset = pkt->offset;
621         ack->paylen = paylen;
622
623         /* fill in bth using the request packet headers */
624         memcpy(ack->hdr, pkt->hdr, pkt->offset + RXE_BTH_BYTES);
625
626         bth_set_opcode(ack, opcode);
627         bth_set_qpn(ack, qp->attr.dest_qp_num);
628         bth_set_pad(ack, pad);
629         bth_set_se(ack, 0);
630         bth_set_psn(ack, psn);
631         bth_set_ack(ack, 0);
632         ack->psn = psn;
633
634         if (ack->mask & RXE_AETH_MASK) {
635                 aeth_set_syn(ack, syndrome);
636                 aeth_set_msn(ack, qp->resp.msn);
637         }
638
639         if (ack->mask & RXE_ATMACK_MASK)
640                 atmack_set_orig(ack, qp->resp.atomic_orig);
641
642         err = rxe_prepare(rxe, ack, skb, &crc);
643         if (err) {
644                 kfree_skb(skb);
645                 return NULL;
646         }
647
648         if (crcp) {
649                 /* CRC computation will be continued by the caller */
650                 *crcp = crc;
651         } else {
652                 p = payload_addr(ack) + payload + bth_pad(ack);
653                 *p = ~crc;
654         }
655
656         return skb;
657 }
658
659 /* RDMA read response. If res is not NULL, then we have a current RDMA request
660  * being processed or replayed.
661  */
662 static enum resp_states read_reply(struct rxe_qp *qp,
663                                    struct rxe_pkt_info *req_pkt)
664 {
665         struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
666         struct rxe_pkt_info ack_pkt;
667         struct sk_buff *skb;
668         int mtu = qp->mtu;
669         enum resp_states state;
670         int payload;
671         int opcode;
672         int err;
673         struct resp_res *res = qp->resp.res;
674         u32 icrc;
675         u32 *p;
676
677         if (!res) {
678                 /* This is the first time we process that request. Get a
679                  * resource
680                  */
681                 res = &qp->resp.resources[qp->resp.res_head];
682
683                 free_rd_atomic_resource(qp, res);
684                 rxe_advance_resp_resource(qp);
685
686                 res->type               = RXE_READ_MASK;
687                 res->replay             = 0;
688
689                 res->read.va            = qp->resp.va;
690                 res->read.va_org        = qp->resp.va;
691
692                 res->first_psn          = req_pkt->psn;
693
694                 if (reth_len(req_pkt)) {
695                         res->last_psn   = (req_pkt->psn +
696                                            (reth_len(req_pkt) + mtu - 1) /
697                                            mtu - 1) & BTH_PSN_MASK;
698                 } else {
699                         res->last_psn   = res->first_psn;
700                 }
701                 res->cur_psn            = req_pkt->psn;
702
703                 res->read.resid         = qp->resp.resid;
704                 res->read.length        = qp->resp.resid;
705                 res->read.rkey          = qp->resp.rkey;
706
707                 /* note res inherits the reference to mr from qp */
708                 res->read.mr            = qp->resp.mr;
709                 qp->resp.mr             = NULL;
710
711                 qp->resp.res            = res;
712                 res->state              = rdatm_res_state_new;
713         }
714
715         if (res->state == rdatm_res_state_new) {
716                 if (res->read.resid <= mtu)
717                         opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY;
718                 else
719                         opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST;
720         } else {
721                 if (res->read.resid > mtu)
722                         opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE;
723                 else
724                         opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST;
725         }
726
727         res->state = rdatm_res_state_next;
728
729         payload = min_t(int, res->read.resid, mtu);
730
731         skb = prepare_ack_packet(qp, req_pkt, &ack_pkt, opcode, payload,
732                                  res->cur_psn, AETH_ACK_UNLIMITED, &icrc);
733         if (!skb)
734                 return RESPST_ERR_RNR;
735
736         err = rxe_mem_copy(res->read.mr, res->read.va, payload_addr(&ack_pkt),
737                            payload, from_mem_obj, &icrc);
738         if (err)
739                 pr_err("Failed copying memory\n");
740
741         if (bth_pad(&ack_pkt)) {
742                 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
743                 u8 *pad = payload_addr(&ack_pkt) + payload;
744
745                 memset(pad, 0, bth_pad(&ack_pkt));
746                 icrc = rxe_crc32(rxe, icrc, pad, bth_pad(&ack_pkt));
747         }
748         p = payload_addr(&ack_pkt) + payload + bth_pad(&ack_pkt);
749         *p = ~icrc;
750
751         err = rxe_xmit_packet(rxe, qp, &ack_pkt, skb);
752         if (err) {
753                 pr_err("Failed sending RDMA reply.\n");
754                 return RESPST_ERR_RNR;
755         }
756
757         res->read.va += payload;
758         res->read.resid -= payload;
759         res->cur_psn = (res->cur_psn + 1) & BTH_PSN_MASK;
760
761         if (res->read.resid > 0) {
762                 state = RESPST_DONE;
763         } else {
764                 qp->resp.res = NULL;
765                 if (!res->replay)
766                         qp->resp.opcode = -1;
767                 if (psn_compare(res->cur_psn, qp->resp.psn) >= 0)
768                         qp->resp.psn = res->cur_psn;
769                 state = RESPST_CLEANUP;
770         }
771
772         return state;
773 }
774
775 static void build_rdma_network_hdr(union rdma_network_hdr *hdr,
776                                    struct rxe_pkt_info *pkt)
777 {
778         struct sk_buff *skb = PKT_TO_SKB(pkt);
779
780         memset(hdr, 0, sizeof(*hdr));
781         if (skb->protocol == htons(ETH_P_IP))
782                 memcpy(&hdr->roce4grh, ip_hdr(skb), sizeof(hdr->roce4grh));
783         else if (skb->protocol == htons(ETH_P_IPV6))
784                 memcpy(&hdr->ibgrh, ipv6_hdr(skb), sizeof(hdr->ibgrh));
785 }
786
787 /* Executes a new request. A retried request never reach that function (send
788  * and writes are discarded, and reads and atomics are retried elsewhere.
789  */
790 static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
791 {
792         enum resp_states err;
793
794         if (pkt->mask & RXE_SEND_MASK) {
795                 if (qp_type(qp) == IB_QPT_UD ||
796                     qp_type(qp) == IB_QPT_SMI ||
797                     qp_type(qp) == IB_QPT_GSI) {
798                         union rdma_network_hdr hdr;
799
800                         build_rdma_network_hdr(&hdr, pkt);
801
802                         err = send_data_in(qp, &hdr, sizeof(hdr));
803                         if (err)
804                                 return err;
805                 }
806                 err = send_data_in(qp, payload_addr(pkt), payload_size(pkt));
807                 if (err)
808                         return err;
809         } else if (pkt->mask & RXE_WRITE_MASK) {
810                 err = write_data_in(qp, pkt);
811                 if (err)
812                         return err;
813         } else if (pkt->mask & RXE_READ_MASK) {
814                 /* For RDMA Read we can increment the msn now. See C9-148. */
815                 qp->resp.msn++;
816                 return RESPST_READ_REPLY;
817         } else if (pkt->mask & RXE_ATOMIC_MASK) {
818                 err = process_atomic(qp, pkt);
819                 if (err)
820                         return err;
821         } else {
822                 /* Unreachable */
823                 WARN_ON_ONCE(1);
824         }
825
826         /* next expected psn, read handles this separately */
827         qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
828         qp->resp.ack_psn = qp->resp.psn;
829
830         qp->resp.opcode = pkt->opcode;
831         qp->resp.status = IB_WC_SUCCESS;
832
833         if (pkt->mask & RXE_COMP_MASK) {
834                 /* We successfully processed this new request. */
835                 qp->resp.msn++;
836                 return RESPST_COMPLETE;
837         } else if (qp_type(qp) == IB_QPT_RC)
838                 return RESPST_ACKNOWLEDGE;
839         else
840                 return RESPST_CLEANUP;
841 }
842
843 static enum resp_states do_complete(struct rxe_qp *qp,
844                                     struct rxe_pkt_info *pkt)
845 {
846         struct rxe_cqe cqe;
847         struct ib_wc *wc = &cqe.ibwc;
848         struct ib_uverbs_wc *uwc = &cqe.uibwc;
849         struct rxe_recv_wqe *wqe = qp->resp.wqe;
850
851         if (unlikely(!wqe))
852                 return RESPST_CLEANUP;
853
854         memset(&cqe, 0, sizeof(cqe));
855
856         if (qp->rcq->is_user) {
857                 uwc->status             = qp->resp.status;
858                 uwc->qp_num             = qp->ibqp.qp_num;
859                 uwc->wr_id              = wqe->wr_id;
860         } else {
861                 wc->status              = qp->resp.status;
862                 wc->qp                  = &qp->ibqp;
863                 wc->wr_id               = wqe->wr_id;
864         }
865
866         if (wc->status == IB_WC_SUCCESS) {
867                 wc->opcode = (pkt->mask & RXE_IMMDT_MASK &&
868                                 pkt->mask & RXE_WRITE_MASK) ?
869                                         IB_WC_RECV_RDMA_WITH_IMM : IB_WC_RECV;
870                 wc->vendor_err = 0;
871                 wc->byte_len = (pkt->mask & RXE_IMMDT_MASK &&
872                                 pkt->mask & RXE_WRITE_MASK) ?
873                                         qp->resp.length : wqe->dma.length - wqe->dma.resid;
874
875                 /* fields after byte_len are different between kernel and user
876                  * space
877                  */
878                 if (qp->rcq->is_user) {
879                         uwc->wc_flags = IB_WC_GRH;
880
881                         if (pkt->mask & RXE_IMMDT_MASK) {
882                                 uwc->wc_flags |= IB_WC_WITH_IMM;
883                                 uwc->ex.imm_data =
884                                         (__u32 __force)immdt_imm(pkt);
885                         }
886
887                         if (pkt->mask & RXE_IETH_MASK) {
888                                 uwc->wc_flags |= IB_WC_WITH_INVALIDATE;
889                                 uwc->ex.invalidate_rkey = ieth_rkey(pkt);
890                         }
891
892                         uwc->qp_num             = qp->ibqp.qp_num;
893
894                         if (pkt->mask & RXE_DETH_MASK)
895                                 uwc->src_qp = deth_sqp(pkt);
896
897                         uwc->port_num           = qp->attr.port_num;
898                 } else {
899                         struct sk_buff *skb = PKT_TO_SKB(pkt);
900
901                         wc->wc_flags = IB_WC_GRH | IB_WC_WITH_NETWORK_HDR_TYPE;
902                         if (skb->protocol == htons(ETH_P_IP))
903                                 wc->network_hdr_type = RDMA_NETWORK_IPV4;
904                         else
905                                 wc->network_hdr_type = RDMA_NETWORK_IPV6;
906
907                         if (pkt->mask & RXE_IMMDT_MASK) {
908                                 wc->wc_flags |= IB_WC_WITH_IMM;
909                                 wc->ex.imm_data = immdt_imm(pkt);
910                         }
911
912                         if (pkt->mask & RXE_IETH_MASK) {
913                                 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
914                                 struct rxe_mem *rmr;
915
916                                 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
917                                 wc->ex.invalidate_rkey = ieth_rkey(pkt);
918
919                                 rmr = rxe_pool_get_index(&rxe->mr_pool,
920                                                          wc->ex.invalidate_rkey >> 8);
921                                 if (unlikely(!rmr)) {
922                                         pr_err("Bad rkey %#x invalidation\n",
923                                                wc->ex.invalidate_rkey);
924                                         return RESPST_ERROR;
925                                 }
926                                 rmr->state = RXE_MEM_STATE_FREE;
927                                 rxe_drop_ref(rmr);
928                         }
929
930                         wc->qp                  = &qp->ibqp;
931
932                         if (pkt->mask & RXE_DETH_MASK)
933                                 wc->src_qp = deth_sqp(pkt);
934
935                         wc->port_num            = qp->attr.port_num;
936                 }
937         }
938
939         /* have copy for srq and reference for !srq */
940         if (!qp->srq)
941                 advance_consumer(qp->rq.queue);
942
943         qp->resp.wqe = NULL;
944
945         if (rxe_cq_post(qp->rcq, &cqe, pkt ? bth_se(pkt) : 1))
946                 return RESPST_ERR_CQ_OVERFLOW;
947
948         if (qp->resp.state == QP_STATE_ERROR)
949                 return RESPST_CHK_RESOURCE;
950
951         if (!pkt)
952                 return RESPST_DONE;
953         else if (qp_type(qp) == IB_QPT_RC)
954                 return RESPST_ACKNOWLEDGE;
955         else
956                 return RESPST_CLEANUP;
957 }
958
959 static int send_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
960                     u8 syndrome, u32 psn)
961 {
962         int err = 0;
963         struct rxe_pkt_info ack_pkt;
964         struct sk_buff *skb;
965         struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
966
967         skb = prepare_ack_packet(qp, pkt, &ack_pkt, IB_OPCODE_RC_ACKNOWLEDGE,
968                                  0, psn, syndrome, NULL);
969         if (!skb) {
970                 err = -ENOMEM;
971                 goto err1;
972         }
973
974         err = rxe_xmit_packet(rxe, qp, &ack_pkt, skb);
975         if (err)
976                 pr_err_ratelimited("Failed sending ack\n");
977
978 err1:
979         return err;
980 }
981
982 static int send_atomic_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
983                            u8 syndrome)
984 {
985         int rc = 0;
986         struct rxe_pkt_info ack_pkt;
987         struct sk_buff *skb;
988         struct sk_buff *skb_copy;
989         struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
990         struct resp_res *res;
991
992         skb = prepare_ack_packet(qp, pkt, &ack_pkt,
993                                  IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE, 0, pkt->psn,
994                                  syndrome, NULL);
995         if (!skb) {
996                 rc = -ENOMEM;
997                 goto out;
998         }
999
1000         skb_copy = skb_clone(skb, GFP_ATOMIC);
1001         if (skb_copy)
1002                 rxe_add_ref(qp); /* for the new SKB */
1003         else {
1004                 pr_warn("Could not clone atomic response\n");
1005                 rc = -ENOMEM;
1006                 goto out;
1007         }
1008
1009         res = &qp->resp.resources[qp->resp.res_head];
1010         free_rd_atomic_resource(qp, res);
1011         rxe_advance_resp_resource(qp);
1012
1013         memcpy(SKB_TO_PKT(skb), &ack_pkt, sizeof(ack_pkt));
1014         memset((unsigned char *)SKB_TO_PKT(skb) + sizeof(ack_pkt), 0,
1015                sizeof(skb->cb) - sizeof(ack_pkt));
1016
1017         res->type = RXE_ATOMIC_MASK;
1018         res->atomic.skb = skb;
1019         res->first_psn = ack_pkt.psn;
1020         res->last_psn  = ack_pkt.psn;
1021         res->cur_psn   = ack_pkt.psn;
1022
1023         rc = rxe_xmit_packet(rxe, qp, &ack_pkt, skb_copy);
1024         if (rc) {
1025                 pr_err_ratelimited("Failed sending ack\n");
1026                 rxe_drop_ref(qp);
1027                 kfree_skb(skb_copy);
1028         }
1029
1030 out:
1031         return rc;
1032 }
1033
1034 static enum resp_states acknowledge(struct rxe_qp *qp,
1035                                     struct rxe_pkt_info *pkt)
1036 {
1037         if (qp_type(qp) != IB_QPT_RC)
1038                 return RESPST_CLEANUP;
1039
1040         if (qp->resp.aeth_syndrome != AETH_ACK_UNLIMITED)
1041                 send_ack(qp, pkt, qp->resp.aeth_syndrome, pkt->psn);
1042         else if (pkt->mask & RXE_ATOMIC_MASK)
1043                 send_atomic_ack(qp, pkt, AETH_ACK_UNLIMITED);
1044         else if (bth_ack(pkt))
1045                 send_ack(qp, pkt, AETH_ACK_UNLIMITED, pkt->psn);
1046
1047         return RESPST_CLEANUP;
1048 }
1049
1050 static enum resp_states cleanup(struct rxe_qp *qp,
1051                                 struct rxe_pkt_info *pkt)
1052 {
1053         struct sk_buff *skb;
1054
1055         if (pkt) {
1056                 skb = skb_dequeue(&qp->req_pkts);
1057                 rxe_drop_ref(qp);
1058                 kfree_skb(skb);
1059         }
1060
1061         if (qp->resp.mr) {
1062                 rxe_drop_ref(qp->resp.mr);
1063                 qp->resp.mr = NULL;
1064         }
1065
1066         return RESPST_DONE;
1067 }
1068
1069 static struct resp_res *find_resource(struct rxe_qp *qp, u32 psn)
1070 {
1071         int i;
1072
1073         for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
1074                 struct resp_res *res = &qp->resp.resources[i];
1075
1076                 if (res->type == 0)
1077                         continue;
1078
1079                 if (psn_compare(psn, res->first_psn) >= 0 &&
1080                     psn_compare(psn, res->last_psn) <= 0) {
1081                         return res;
1082                 }
1083         }
1084
1085         return NULL;
1086 }
1087
1088 static enum resp_states duplicate_request(struct rxe_qp *qp,
1089                                           struct rxe_pkt_info *pkt)
1090 {
1091         enum resp_states rc;
1092         u32 prev_psn = (qp->resp.ack_psn - 1) & BTH_PSN_MASK;
1093
1094         if (pkt->mask & RXE_SEND_MASK ||
1095             pkt->mask & RXE_WRITE_MASK) {
1096                 /* SEND. Ack again and cleanup. C9-105. */
1097                 if (bth_ack(pkt))
1098                         send_ack(qp, pkt, AETH_ACK_UNLIMITED, prev_psn);
1099                 rc = RESPST_CLEANUP;
1100                 goto out;
1101         } else if (pkt->mask & RXE_READ_MASK) {
1102                 struct resp_res *res;
1103
1104                 res = find_resource(qp, pkt->psn);
1105                 if (!res) {
1106                         /* Resource not found. Class D error.  Drop the
1107                          * request.
1108                          */
1109                         rc = RESPST_CLEANUP;
1110                         goto out;
1111                 } else {
1112                         /* Ensure this new request is the same as the previous
1113                          * one or a subset of it.
1114                          */
1115                         u64 iova = reth_va(pkt);
1116                         u32 resid = reth_len(pkt);
1117
1118                         if (iova < res->read.va_org ||
1119                             resid > res->read.length ||
1120                             (iova + resid) > (res->read.va_org +
1121                                               res->read.length)) {
1122                                 rc = RESPST_CLEANUP;
1123                                 goto out;
1124                         }
1125
1126                         if (reth_rkey(pkt) != res->read.rkey) {
1127                                 rc = RESPST_CLEANUP;
1128                                 goto out;
1129                         }
1130
1131                         res->cur_psn = pkt->psn;
1132                         res->state = (pkt->psn == res->first_psn) ?
1133                                         rdatm_res_state_new :
1134                                         rdatm_res_state_replay;
1135                         res->replay = 1;
1136
1137                         /* Reset the resource, except length. */
1138                         res->read.va_org = iova;
1139                         res->read.va = iova;
1140                         res->read.resid = resid;
1141
1142                         /* Replay the RDMA read reply. */
1143                         qp->resp.res = res;
1144                         rc = RESPST_READ_REPLY;
1145                         goto out;
1146                 }
1147         } else {
1148                 struct resp_res *res;
1149
1150                 /* Find the operation in our list of responder resources. */
1151                 res = find_resource(qp, pkt->psn);
1152                 if (res) {
1153                         struct sk_buff *skb_copy;
1154
1155                         skb_copy = skb_clone(res->atomic.skb, GFP_ATOMIC);
1156                         if (skb_copy) {
1157                                 rxe_add_ref(qp); /* for the new SKB */
1158                         } else {
1159                                 pr_warn("Couldn't clone atomic resp\n");
1160                                 rc = RESPST_CLEANUP;
1161                                 goto out;
1162                         }
1163
1164                         /* Resend the result. */
1165                         rc = rxe_xmit_packet(to_rdev(qp->ibqp.device), qp,
1166                                              pkt, skb_copy);
1167                         if (rc) {
1168                                 pr_err("Failed resending result. This flow is not handled - skb ignored\n");
1169                                 rxe_drop_ref(qp);
1170                                 rc = RESPST_CLEANUP;
1171                                 goto out;
1172                         }
1173                 }
1174
1175                 /* Resource not found. Class D error. Drop the request. */
1176                 rc = RESPST_CLEANUP;
1177                 goto out;
1178         }
1179 out:
1180         return rc;
1181 }
1182
1183 /* Process a class A or C. Both are treated the same in this implementation. */
1184 static void do_class_ac_error(struct rxe_qp *qp, u8 syndrome,
1185                               enum ib_wc_status status)
1186 {
1187         qp->resp.aeth_syndrome  = syndrome;
1188         qp->resp.status         = status;
1189
1190         /* indicate that we should go through the ERROR state */
1191         qp->resp.goto_error     = 1;
1192 }
1193
1194 static enum resp_states do_class_d1e_error(struct rxe_qp *qp)
1195 {
1196         /* UC */
1197         if (qp->srq) {
1198                 /* Class E */
1199                 qp->resp.drop_msg = 1;
1200                 if (qp->resp.wqe) {
1201                         qp->resp.status = IB_WC_REM_INV_REQ_ERR;
1202                         return RESPST_COMPLETE;
1203                 } else {
1204                         return RESPST_CLEANUP;
1205                 }
1206         } else {
1207                 /* Class D1. This packet may be the start of a
1208                  * new message and could be valid. The previous
1209                  * message is invalid and ignored. reset the
1210                  * recv wr to its original state
1211                  */
1212                 if (qp->resp.wqe) {
1213                         qp->resp.wqe->dma.resid = qp->resp.wqe->dma.length;
1214                         qp->resp.wqe->dma.cur_sge = 0;
1215                         qp->resp.wqe->dma.sge_offset = 0;
1216                         qp->resp.opcode = -1;
1217                 }
1218
1219                 if (qp->resp.mr) {
1220                         rxe_drop_ref(qp->resp.mr);
1221                         qp->resp.mr = NULL;
1222                 }
1223
1224                 return RESPST_CLEANUP;
1225         }
1226 }
1227
1228 static void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify)
1229 {
1230         struct sk_buff *skb;
1231
1232         while ((skb = skb_dequeue(&qp->req_pkts))) {
1233                 rxe_drop_ref(qp);
1234                 kfree_skb(skb);
1235         }
1236
1237         if (notify)
1238                 return;
1239
1240         while (!qp->srq && qp->rq.queue && queue_head(qp->rq.queue))
1241                 advance_consumer(qp->rq.queue);
1242 }
1243
1244 int rxe_responder(void *arg)
1245 {
1246         struct rxe_qp *qp = (struct rxe_qp *)arg;
1247         struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
1248         enum resp_states state;
1249         struct rxe_pkt_info *pkt = NULL;
1250         int ret = 0;
1251
1252         rxe_add_ref(qp);
1253
1254         qp->resp.aeth_syndrome = AETH_ACK_UNLIMITED;
1255
1256         if (!qp->valid) {
1257                 ret = -EINVAL;
1258                 goto done;
1259         }
1260
1261         switch (qp->resp.state) {
1262         case QP_STATE_RESET:
1263                 state = RESPST_RESET;
1264                 break;
1265
1266         default:
1267                 state = RESPST_GET_REQ;
1268                 break;
1269         }
1270
1271         while (1) {
1272                 pr_debug("qp#%d state = %s\n", qp_num(qp),
1273                          resp_state_name[state]);
1274                 switch (state) {
1275                 case RESPST_GET_REQ:
1276                         state = get_req(qp, &pkt);
1277                         break;
1278                 case RESPST_CHK_PSN:
1279                         state = check_psn(qp, pkt);
1280                         break;
1281                 case RESPST_CHK_OP_SEQ:
1282                         state = check_op_seq(qp, pkt);
1283                         break;
1284                 case RESPST_CHK_OP_VALID:
1285                         state = check_op_valid(qp, pkt);
1286                         break;
1287                 case RESPST_CHK_RESOURCE:
1288                         state = check_resource(qp, pkt);
1289                         break;
1290                 case RESPST_CHK_LENGTH:
1291                         state = check_length(qp, pkt);
1292                         break;
1293                 case RESPST_CHK_RKEY:
1294                         state = check_rkey(qp, pkt);
1295                         break;
1296                 case RESPST_EXECUTE:
1297                         state = execute(qp, pkt);
1298                         break;
1299                 case RESPST_COMPLETE:
1300                         state = do_complete(qp, pkt);
1301                         break;
1302                 case RESPST_READ_REPLY:
1303                         state = read_reply(qp, pkt);
1304                         break;
1305                 case RESPST_ACKNOWLEDGE:
1306                         state = acknowledge(qp, pkt);
1307                         break;
1308                 case RESPST_CLEANUP:
1309                         state = cleanup(qp, pkt);
1310                         break;
1311                 case RESPST_DUPLICATE_REQUEST:
1312                         state = duplicate_request(qp, pkt);
1313                         break;
1314                 case RESPST_ERR_PSN_OUT_OF_SEQ:
1315                         /* RC only - Class B. Drop packet. */
1316                         send_ack(qp, pkt, AETH_NAK_PSN_SEQ_ERROR, qp->resp.psn);
1317                         state = RESPST_CLEANUP;
1318                         break;
1319
1320                 case RESPST_ERR_TOO_MANY_RDMA_ATM_REQ:
1321                 case RESPST_ERR_MISSING_OPCODE_FIRST:
1322                 case RESPST_ERR_MISSING_OPCODE_LAST_C:
1323                 case RESPST_ERR_UNSUPPORTED_OPCODE:
1324                 case RESPST_ERR_MISALIGNED_ATOMIC:
1325                         /* RC Only - Class C. */
1326                         do_class_ac_error(qp, AETH_NAK_INVALID_REQ,
1327                                           IB_WC_REM_INV_REQ_ERR);
1328                         state = RESPST_COMPLETE;
1329                         break;
1330
1331                 case RESPST_ERR_MISSING_OPCODE_LAST_D1E:
1332                         state = do_class_d1e_error(qp);
1333                         break;
1334                 case RESPST_ERR_RNR:
1335                         if (qp_type(qp) == IB_QPT_RC) {
1336                                 rxe_counter_inc(rxe, RXE_CNT_SND_RNR);
1337                                 /* RC - class B */
1338                                 send_ack(qp, pkt, AETH_RNR_NAK |
1339                                          (~AETH_TYPE_MASK &
1340                                          qp->attr.min_rnr_timer),
1341                                          pkt->psn);
1342                         } else {
1343                                 /* UD/UC - class D */
1344                                 qp->resp.drop_msg = 1;
1345                         }
1346                         state = RESPST_CLEANUP;
1347                         break;
1348
1349                 case RESPST_ERR_RKEY_VIOLATION:
1350                         if (qp_type(qp) == IB_QPT_RC) {
1351                                 /* Class C */
1352                                 do_class_ac_error(qp, AETH_NAK_REM_ACC_ERR,
1353                                                   IB_WC_REM_ACCESS_ERR);
1354                                 state = RESPST_COMPLETE;
1355                         } else {
1356                                 qp->resp.drop_msg = 1;
1357                                 if (qp->srq) {
1358                                         /* UC/SRQ Class D */
1359                                         qp->resp.status = IB_WC_REM_ACCESS_ERR;
1360                                         state = RESPST_COMPLETE;
1361                                 } else {
1362                                         /* UC/non-SRQ Class E. */
1363                                         state = RESPST_CLEANUP;
1364                                 }
1365                         }
1366                         break;
1367
1368                 case RESPST_ERR_LENGTH:
1369                         if (qp_type(qp) == IB_QPT_RC) {
1370                                 /* Class C */
1371                                 do_class_ac_error(qp, AETH_NAK_INVALID_REQ,
1372                                                   IB_WC_REM_INV_REQ_ERR);
1373                                 state = RESPST_COMPLETE;
1374                         } else if (qp->srq) {
1375                                 /* UC/UD - class E */
1376                                 qp->resp.status = IB_WC_REM_INV_REQ_ERR;
1377                                 state = RESPST_COMPLETE;
1378                         } else {
1379                                 /* UC/UD - class D */
1380                                 qp->resp.drop_msg = 1;
1381                                 state = RESPST_CLEANUP;
1382                         }
1383                         break;
1384
1385                 case RESPST_ERR_MALFORMED_WQE:
1386                         /* All, Class A. */
1387                         do_class_ac_error(qp, AETH_NAK_REM_OP_ERR,
1388                                           IB_WC_LOC_QP_OP_ERR);
1389                         state = RESPST_COMPLETE;
1390                         break;
1391
1392                 case RESPST_ERR_CQ_OVERFLOW:
1393                         /* All - Class G */
1394                         state = RESPST_ERROR;
1395                         break;
1396
1397                 case RESPST_DONE:
1398                         if (qp->resp.goto_error) {
1399                                 state = RESPST_ERROR;
1400                                 break;
1401                         }
1402
1403                         goto done;
1404
1405                 case RESPST_EXIT:
1406                         if (qp->resp.goto_error) {
1407                                 state = RESPST_ERROR;
1408                                 break;
1409                         }
1410
1411                         goto exit;
1412
1413                 case RESPST_RESET:
1414                         rxe_drain_req_pkts(qp, false);
1415                         qp->resp.wqe = NULL;
1416                         goto exit;
1417
1418                 case RESPST_ERROR:
1419                         qp->resp.goto_error = 0;
1420                         pr_warn("qp#%d moved to error state\n", qp_num(qp));
1421                         rxe_qp_error(qp);
1422                         goto exit;
1423
1424                 default:
1425                         WARN_ON_ONCE(1);
1426                 }
1427         }
1428
1429 exit:
1430         ret = -EAGAIN;
1431 done:
1432         rxe_drop_ref(qp);
1433         return ret;
1434 }