GNU Linux-libre 4.14.295-gnu1
[releases.git] / drivers / infiniband / sw / rxe / rxe_qp.c
1 /*
2  * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3  * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *         Redistribution and use in source and binary forms, with or
12  *         without modification, are permitted provided that the following
13  *         conditions are met:
14  *
15  *              - Redistributions of source code must retain the above
16  *                copyright notice, this list of conditions and the following
17  *                disclaimer.
18  *
19  *              - Redistributions in binary form must reproduce the above
20  *                copyright notice, this list of conditions and the following
21  *                disclaimer in the documentation and/or other materials
22  *                provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33
34 #include <linux/skbuff.h>
35 #include <linux/delay.h>
36 #include <linux/sched.h>
37 #include <linux/vmalloc.h>
38
39 #include "rxe.h"
40 #include "rxe_loc.h"
41 #include "rxe_queue.h"
42 #include "rxe_task.h"
43
44 char *rxe_qp_state_name[] = {
45         [QP_STATE_RESET]        = "RESET",
46         [QP_STATE_INIT]         = "INIT",
47         [QP_STATE_READY]        = "READY",
48         [QP_STATE_DRAIN]        = "DRAIN",
49         [QP_STATE_DRAINED]      = "DRAINED",
50         [QP_STATE_ERROR]        = "ERROR",
51 };
52
53 static int rxe_qp_chk_cap(struct rxe_dev *rxe, struct ib_qp_cap *cap,
54                           int has_srq)
55 {
56         if (cap->max_send_wr > rxe->attr.max_qp_wr) {
57                 pr_warn("invalid send wr = %d > %d\n",
58                         cap->max_send_wr, rxe->attr.max_qp_wr);
59                 goto err1;
60         }
61
62         if (cap->max_send_sge > rxe->attr.max_sge) {
63                 pr_warn("invalid send sge = %d > %d\n",
64                         cap->max_send_sge, rxe->attr.max_sge);
65                 goto err1;
66         }
67
68         if (!has_srq) {
69                 if (cap->max_recv_wr > rxe->attr.max_qp_wr) {
70                         pr_warn("invalid recv wr = %d > %d\n",
71                                 cap->max_recv_wr, rxe->attr.max_qp_wr);
72                         goto err1;
73                 }
74
75                 if (cap->max_recv_sge > rxe->attr.max_sge) {
76                         pr_warn("invalid recv sge = %d > %d\n",
77                                 cap->max_recv_sge, rxe->attr.max_sge);
78                         goto err1;
79                 }
80         }
81
82         if (cap->max_inline_data > rxe->max_inline_data) {
83                 pr_warn("invalid max inline data = %d > %d\n",
84                         cap->max_inline_data, rxe->max_inline_data);
85                 goto err1;
86         }
87
88         return 0;
89
90 err1:
91         return -EINVAL;
92 }
93
94 int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init)
95 {
96         struct ib_qp_cap *cap = &init->cap;
97         struct rxe_port *port;
98         int port_num = init->port_num;
99
100         if (!init->recv_cq || !init->send_cq) {
101                 pr_warn("missing cq\n");
102                 goto err1;
103         }
104
105         if (rxe_qp_chk_cap(rxe, cap, !!init->srq))
106                 goto err1;
107
108         if (init->qp_type == IB_QPT_SMI || init->qp_type == IB_QPT_GSI) {
109                 if (port_num != 1) {
110                         pr_warn("invalid port = %d\n", port_num);
111                         goto err1;
112                 }
113
114                 port = &rxe->port;
115
116                 if (init->qp_type == IB_QPT_SMI && port->qp_smi_index) {
117                         pr_warn("SMI QP exists for port %d\n", port_num);
118                         goto err1;
119                 }
120
121                 if (init->qp_type == IB_QPT_GSI && port->qp_gsi_index) {
122                         pr_warn("GSI QP exists for port %d\n", port_num);
123                         goto err1;
124                 }
125         }
126
127         return 0;
128
129 err1:
130         return -EINVAL;
131 }
132
133 static int alloc_rd_atomic_resources(struct rxe_qp *qp, unsigned int n)
134 {
135         qp->resp.res_head = 0;
136         qp->resp.res_tail = 0;
137         qp->resp.resources = kcalloc(n, sizeof(struct resp_res), GFP_KERNEL);
138
139         if (!qp->resp.resources)
140                 return -ENOMEM;
141
142         return 0;
143 }
144
145 static void free_rd_atomic_resources(struct rxe_qp *qp)
146 {
147         if (qp->resp.resources) {
148                 int i;
149
150                 for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
151                         struct resp_res *res = &qp->resp.resources[i];
152
153                         free_rd_atomic_resource(qp, res);
154                 }
155                 kfree(qp->resp.resources);
156                 qp->resp.resources = NULL;
157         }
158 }
159
160 void free_rd_atomic_resource(struct rxe_qp *qp, struct resp_res *res)
161 {
162         if (res->type == RXE_ATOMIC_MASK) {
163                 rxe_drop_ref(qp);
164                 kfree_skb(res->atomic.skb);
165         } else if (res->type == RXE_READ_MASK) {
166                 if (res->read.mr)
167                         rxe_drop_ref(res->read.mr);
168         }
169         res->type = 0;
170 }
171
172 static void cleanup_rd_atomic_resources(struct rxe_qp *qp)
173 {
174         int i;
175         struct resp_res *res;
176
177         if (qp->resp.resources) {
178                 for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
179                         res = &qp->resp.resources[i];
180                         free_rd_atomic_resource(qp, res);
181                 }
182         }
183 }
184
185 static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp,
186                              struct ib_qp_init_attr *init)
187 {
188         struct rxe_port *port;
189         u32 qpn;
190
191         qp->sq_sig_type         = init->sq_sig_type;
192         qp->attr.path_mtu       = 1;
193         qp->mtu                 = ib_mtu_enum_to_int(qp->attr.path_mtu);
194
195         qpn                     = qp->pelem.index;
196         port                    = &rxe->port;
197
198         switch (init->qp_type) {
199         case IB_QPT_SMI:
200                 qp->ibqp.qp_num         = 0;
201                 port->qp_smi_index      = qpn;
202                 qp->attr.port_num       = init->port_num;
203                 break;
204
205         case IB_QPT_GSI:
206                 qp->ibqp.qp_num         = 1;
207                 port->qp_gsi_index      = qpn;
208                 qp->attr.port_num       = init->port_num;
209                 break;
210
211         default:
212                 qp->ibqp.qp_num         = qpn;
213                 break;
214         }
215
216         INIT_LIST_HEAD(&qp->grp_list);
217
218         skb_queue_head_init(&qp->send_pkts);
219
220         spin_lock_init(&qp->grp_lock);
221         spin_lock_init(&qp->state_lock);
222
223         spin_lock_init(&qp->req.task.state_lock);
224         spin_lock_init(&qp->resp.task.state_lock);
225         spin_lock_init(&qp->comp.task.state_lock);
226
227         spin_lock_init(&qp->sq.sq_lock);
228         spin_lock_init(&qp->rq.producer_lock);
229         spin_lock_init(&qp->rq.consumer_lock);
230
231         atomic_set(&qp->ssn, 0);
232         atomic_set(&qp->skb_out, 0);
233 }
234
235 static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
236                            struct ib_qp_init_attr *init,
237                            struct ib_ucontext *context, struct ib_udata *udata)
238 {
239         int err;
240         int wqe_size;
241
242         err = sock_create_kern(&init_net, AF_INET, SOCK_DGRAM, 0, &qp->sk);
243         if (err < 0)
244                 return err;
245         qp->sk->sk->sk_user_data = qp;
246
247         qp->sq.max_wr           = init->cap.max_send_wr;
248         qp->sq.max_sge          = init->cap.max_send_sge;
249         qp->sq.max_inline       = init->cap.max_inline_data;
250
251         wqe_size = max_t(int, sizeof(struct rxe_send_wqe) +
252                          qp->sq.max_sge * sizeof(struct ib_sge),
253                          sizeof(struct rxe_send_wqe) +
254                          qp->sq.max_inline);
255
256         qp->sq.queue = rxe_queue_init(rxe,
257                                       &qp->sq.max_wr,
258                                       wqe_size);
259         if (!qp->sq.queue)
260                 return -ENOMEM;
261
262         err = do_mmap_info(rxe, udata, true,
263                            context, qp->sq.queue->buf,
264                            qp->sq.queue->buf_size, &qp->sq.queue->ip);
265
266         if (err) {
267                 vfree(qp->sq.queue->buf);
268                 kfree(qp->sq.queue);
269                 qp->sq.queue = NULL;
270                 return err;
271         }
272
273         qp->req.wqe_index       = producer_index(qp->sq.queue);
274         qp->req.state           = QP_STATE_RESET;
275         qp->req.opcode          = -1;
276         qp->comp.opcode         = -1;
277
278         skb_queue_head_init(&qp->req_pkts);
279
280         rxe_init_task(rxe, &qp->req.task, qp,
281                       rxe_requester, "req");
282         rxe_init_task(rxe, &qp->comp.task, qp,
283                       rxe_completer, "comp");
284
285         qp->qp_timeout_jiffies = 0; /* Can't be set for UD/UC in modify_qp */
286         if (init->qp_type == IB_QPT_RC) {
287                 setup_timer(&qp->rnr_nak_timer, rnr_nak_timer, (unsigned long)qp);
288                 setup_timer(&qp->retrans_timer, retransmit_timer, (unsigned long)qp);
289         }
290         return 0;
291 }
292
293 static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
294                             struct ib_qp_init_attr *init,
295                             struct ib_ucontext *context, struct ib_udata *udata)
296 {
297         int err;
298         int wqe_size;
299
300         if (!qp->srq) {
301                 qp->rq.max_wr           = init->cap.max_recv_wr;
302                 qp->rq.max_sge          = init->cap.max_recv_sge;
303
304                 wqe_size = rcv_wqe_size(qp->rq.max_sge);
305
306                 pr_debug("qp#%d max_wr = %d, max_sge = %d, wqe_size = %d\n",
307                          qp_num(qp), qp->rq.max_wr, qp->rq.max_sge, wqe_size);
308
309                 qp->rq.queue = rxe_queue_init(rxe,
310                                               &qp->rq.max_wr,
311                                               wqe_size);
312                 if (!qp->rq.queue)
313                         return -ENOMEM;
314
315                 err = do_mmap_info(rxe, udata, false, context,
316                                    qp->rq.queue->buf,
317                                    qp->rq.queue->buf_size,
318                                    &qp->rq.queue->ip);
319                 if (err) {
320                         vfree(qp->rq.queue->buf);
321                         kfree(qp->rq.queue);
322                         qp->rq.queue = NULL;
323                         return err;
324                 }
325         }
326
327         skb_queue_head_init(&qp->resp_pkts);
328
329         rxe_init_task(rxe, &qp->resp.task, qp,
330                       rxe_responder, "resp");
331
332         qp->resp.opcode         = OPCODE_NONE;
333         qp->resp.msn            = 0;
334         qp->resp.state          = QP_STATE_RESET;
335
336         return 0;
337 }
338
339 /* called by the create qp verb */
340 int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
341                      struct ib_qp_init_attr *init, struct ib_udata *udata,
342                      struct ib_pd *ibpd)
343 {
344         int err;
345         struct rxe_cq *rcq = to_rcq(init->recv_cq);
346         struct rxe_cq *scq = to_rcq(init->send_cq);
347         struct rxe_srq *srq = init->srq ? to_rsrq(init->srq) : NULL;
348         struct ib_ucontext *context = udata ? ibpd->uobject->context : NULL;
349
350         rxe_add_ref(pd);
351         rxe_add_ref(rcq);
352         rxe_add_ref(scq);
353         if (srq)
354                 rxe_add_ref(srq);
355
356         qp->pd                  = pd;
357         qp->rcq                 = rcq;
358         qp->scq                 = scq;
359         qp->srq                 = srq;
360
361         rxe_qp_init_misc(rxe, qp, init);
362
363         err = rxe_qp_init_req(rxe, qp, init, context, udata);
364         if (err)
365                 goto err1;
366
367         err = rxe_qp_init_resp(rxe, qp, init, context, udata);
368         if (err)
369                 goto err2;
370
371         qp->attr.qp_state = IB_QPS_RESET;
372         qp->valid = 1;
373
374         return 0;
375
376 err2:
377         rxe_queue_cleanup(qp->sq.queue);
378 err1:
379         qp->pd = NULL;
380         qp->rcq = NULL;
381         qp->scq = NULL;
382         qp->srq = NULL;
383
384         if (srq)
385                 rxe_drop_ref(srq);
386         rxe_drop_ref(scq);
387         rxe_drop_ref(rcq);
388         rxe_drop_ref(pd);
389
390         return err;
391 }
392
393 /* called by the query qp verb */
394 int rxe_qp_to_init(struct rxe_qp *qp, struct ib_qp_init_attr *init)
395 {
396         init->event_handler             = qp->ibqp.event_handler;
397         init->qp_context                = qp->ibqp.qp_context;
398         init->send_cq                   = qp->ibqp.send_cq;
399         init->recv_cq                   = qp->ibqp.recv_cq;
400         init->srq                       = qp->ibqp.srq;
401
402         init->cap.max_send_wr           = qp->sq.max_wr;
403         init->cap.max_send_sge          = qp->sq.max_sge;
404         init->cap.max_inline_data       = qp->sq.max_inline;
405
406         if (!qp->srq) {
407                 init->cap.max_recv_wr           = qp->rq.max_wr;
408                 init->cap.max_recv_sge          = qp->rq.max_sge;
409         }
410
411         init->sq_sig_type               = qp->sq_sig_type;
412
413         init->qp_type                   = qp->ibqp.qp_type;
414         init->port_num                  = 1;
415
416         return 0;
417 }
418
419 /* called by the modify qp verb, this routine checks all the parameters before
420  * making any changes
421  */
422 int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp,
423                     struct ib_qp_attr *attr, int mask)
424 {
425         enum ib_qp_state cur_state = (mask & IB_QP_CUR_STATE) ?
426                                         attr->cur_qp_state : qp->attr.qp_state;
427         enum ib_qp_state new_state = (mask & IB_QP_STATE) ?
428                                         attr->qp_state : cur_state;
429
430         if (!ib_modify_qp_is_ok(cur_state, new_state, qp_type(qp), mask,
431                                 IB_LINK_LAYER_ETHERNET)) {
432                 pr_warn("invalid mask or state for qp\n");
433                 goto err1;
434         }
435
436         if (mask & IB_QP_STATE) {
437                 if (cur_state == IB_QPS_SQD) {
438                         if (qp->req.state == QP_STATE_DRAIN &&
439                             new_state != IB_QPS_ERR)
440                                 goto err1;
441                 }
442         }
443
444         if (mask & IB_QP_PORT) {
445                 if (attr->port_num != 1) {
446                         pr_warn("invalid port %d\n", attr->port_num);
447                         goto err1;
448                 }
449         }
450
451         if (mask & IB_QP_CAP && rxe_qp_chk_cap(rxe, &attr->cap, !!qp->srq))
452                 goto err1;
453
454         if (mask & IB_QP_AV && rxe_av_chk_attr(rxe, &attr->ah_attr))
455                 goto err1;
456
457         if (mask & IB_QP_ALT_PATH) {
458                 if (rxe_av_chk_attr(rxe, &attr->alt_ah_attr))
459                         goto err1;
460                 if (attr->alt_port_num != 1) {
461                         pr_warn("invalid alt port %d\n", attr->alt_port_num);
462                         goto err1;
463                 }
464                 if (attr->alt_timeout > 31) {
465                         pr_warn("invalid QP alt timeout %d > 31\n",
466                                 attr->alt_timeout);
467                         goto err1;
468                 }
469         }
470
471         if (mask & IB_QP_PATH_MTU) {
472                 struct rxe_port *port = &rxe->port;
473
474                 enum ib_mtu max_mtu = port->attr.max_mtu;
475                 enum ib_mtu mtu = attr->path_mtu;
476
477                 if (mtu > max_mtu) {
478                         pr_debug("invalid mtu (%d) > (%d)\n",
479                                  ib_mtu_enum_to_int(mtu),
480                                  ib_mtu_enum_to_int(max_mtu));
481                         goto err1;
482                 }
483         }
484
485         if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
486                 if (attr->max_rd_atomic > rxe->attr.max_qp_rd_atom) {
487                         pr_warn("invalid max_rd_atomic %d > %d\n",
488                                 attr->max_rd_atomic,
489                                 rxe->attr.max_qp_rd_atom);
490                         goto err1;
491                 }
492         }
493
494         if (mask & IB_QP_TIMEOUT) {
495                 if (attr->timeout > 31) {
496                         pr_warn("invalid QP timeout %d > 31\n",
497                                 attr->timeout);
498                         goto err1;
499                 }
500         }
501
502         return 0;
503
504 err1:
505         return -EINVAL;
506 }
507
508 /* move the qp to the reset state */
509 static void rxe_qp_reset(struct rxe_qp *qp)
510 {
511         /* stop tasks from running */
512         rxe_disable_task(&qp->resp.task);
513
514         /* stop request/comp */
515         if (qp->sq.queue) {
516                 if (qp_type(qp) == IB_QPT_RC)
517                         rxe_disable_task(&qp->comp.task);
518                 rxe_disable_task(&qp->req.task);
519         }
520
521         /* move qp to the reset state */
522         qp->req.state = QP_STATE_RESET;
523         qp->resp.state = QP_STATE_RESET;
524
525         /* let state machines reset themselves drain work and packet queues
526          * etc.
527          */
528         __rxe_do_task(&qp->resp.task);
529
530         if (qp->sq.queue) {
531                 __rxe_do_task(&qp->comp.task);
532                 __rxe_do_task(&qp->req.task);
533                 rxe_queue_reset(qp->sq.queue);
534         }
535
536         /* cleanup attributes */
537         atomic_set(&qp->ssn, 0);
538         qp->req.opcode = -1;
539         qp->req.need_retry = 0;
540         qp->req.noack_pkts = 0;
541         qp->resp.msn = 0;
542         qp->resp.opcode = -1;
543         qp->resp.drop_msg = 0;
544         qp->resp.goto_error = 0;
545         qp->resp.sent_psn_nak = 0;
546
547         if (qp->resp.mr) {
548                 rxe_drop_ref(qp->resp.mr);
549                 qp->resp.mr = NULL;
550         }
551
552         cleanup_rd_atomic_resources(qp);
553
554         /* reenable tasks */
555         rxe_enable_task(&qp->resp.task);
556
557         if (qp->sq.queue) {
558                 if (qp_type(qp) == IB_QPT_RC)
559                         rxe_enable_task(&qp->comp.task);
560
561                 rxe_enable_task(&qp->req.task);
562         }
563 }
564
565 /* drain the send queue */
566 static void rxe_qp_drain(struct rxe_qp *qp)
567 {
568         if (qp->sq.queue) {
569                 if (qp->req.state != QP_STATE_DRAINED) {
570                         qp->req.state = QP_STATE_DRAIN;
571                         if (qp_type(qp) == IB_QPT_RC)
572                                 rxe_run_task(&qp->comp.task, 1);
573                         else
574                                 __rxe_do_task(&qp->comp.task);
575                         rxe_run_task(&qp->req.task, 1);
576                 }
577         }
578 }
579
580 /* move the qp to the error state */
581 void rxe_qp_error(struct rxe_qp *qp)
582 {
583         qp->req.state = QP_STATE_ERROR;
584         qp->resp.state = QP_STATE_ERROR;
585         qp->attr.qp_state = IB_QPS_ERR;
586
587         /* drain work and packet queues */
588         rxe_run_task(&qp->resp.task, 1);
589
590         if (qp_type(qp) == IB_QPT_RC)
591                 rxe_run_task(&qp->comp.task, 1);
592         else
593                 __rxe_do_task(&qp->comp.task);
594         rxe_run_task(&qp->req.task, 1);
595 }
596
597 /* called by the modify qp verb */
598 int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
599                      struct ib_udata *udata)
600 {
601         int err;
602         struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
603         union ib_gid sgid;
604         struct ib_gid_attr sgid_attr;
605
606         if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
607                 int max_rd_atomic = attr->max_rd_atomic ?
608                         roundup_pow_of_two(attr->max_rd_atomic) : 0;
609
610                 qp->attr.max_rd_atomic = max_rd_atomic;
611                 atomic_set(&qp->req.rd_atomic, max_rd_atomic);
612         }
613
614         if (mask & IB_QP_MAX_DEST_RD_ATOMIC) {
615                 int max_dest_rd_atomic = attr->max_dest_rd_atomic ?
616                         roundup_pow_of_two(attr->max_dest_rd_atomic) : 0;
617
618                 qp->attr.max_dest_rd_atomic = max_dest_rd_atomic;
619
620                 free_rd_atomic_resources(qp);
621
622                 err = alloc_rd_atomic_resources(qp, max_dest_rd_atomic);
623                 if (err)
624                         return err;
625         }
626
627         if (mask & IB_QP_CUR_STATE)
628                 qp->attr.cur_qp_state = attr->qp_state;
629
630         if (mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
631                 qp->attr.en_sqd_async_notify = attr->en_sqd_async_notify;
632
633         if (mask & IB_QP_ACCESS_FLAGS)
634                 qp->attr.qp_access_flags = attr->qp_access_flags;
635
636         if (mask & IB_QP_PKEY_INDEX)
637                 qp->attr.pkey_index = attr->pkey_index;
638
639         if (mask & IB_QP_PORT)
640                 qp->attr.port_num = attr->port_num;
641
642         if (mask & IB_QP_QKEY)
643                 qp->attr.qkey = attr->qkey;
644
645         if (mask & IB_QP_AV) {
646                 ib_get_cached_gid(&rxe->ib_dev, 1,
647                                   rdma_ah_read_grh(&attr->ah_attr)->sgid_index,
648                                   &sgid, &sgid_attr);
649                 rxe_av_from_attr(rxe, attr->port_num, &qp->pri_av,
650                                  &attr->ah_attr);
651                 rxe_av_fill_ip_info(rxe, &qp->pri_av, &attr->ah_attr,
652                                     &sgid_attr, &sgid);
653                 if (sgid_attr.ndev)
654                         dev_put(sgid_attr.ndev);
655         }
656
657         if (mask & IB_QP_ALT_PATH) {
658                 u8 sgid_index =
659                         rdma_ah_read_grh(&attr->alt_ah_attr)->sgid_index;
660
661                 ib_get_cached_gid(&rxe->ib_dev, 1, sgid_index,
662                                   &sgid, &sgid_attr);
663
664                 rxe_av_from_attr(rxe, attr->alt_port_num, &qp->alt_av,
665                                  &attr->alt_ah_attr);
666                 rxe_av_fill_ip_info(rxe, &qp->alt_av, &attr->alt_ah_attr,
667                                     &sgid_attr, &sgid);
668                 if (sgid_attr.ndev)
669                         dev_put(sgid_attr.ndev);
670
671                 qp->attr.alt_port_num = attr->alt_port_num;
672                 qp->attr.alt_pkey_index = attr->alt_pkey_index;
673                 qp->attr.alt_timeout = attr->alt_timeout;
674         }
675
676         if (mask & IB_QP_PATH_MTU) {
677                 qp->attr.path_mtu = attr->path_mtu;
678                 qp->mtu = ib_mtu_enum_to_int(attr->path_mtu);
679         }
680
681         if (mask & IB_QP_TIMEOUT) {
682                 qp->attr.timeout = attr->timeout;
683                 if (attr->timeout == 0) {
684                         qp->qp_timeout_jiffies = 0;
685                 } else {
686                         /* According to the spec, timeout = 4.096 * 2 ^ attr->timeout [us] */
687                         int j = nsecs_to_jiffies(4096ULL << attr->timeout);
688
689                         qp->qp_timeout_jiffies = j ? j : 1;
690                 }
691         }
692
693         if (mask & IB_QP_RETRY_CNT) {
694                 qp->attr.retry_cnt = attr->retry_cnt;
695                 qp->comp.retry_cnt = attr->retry_cnt;
696                 pr_debug("qp#%d set retry count = %d\n", qp_num(qp),
697                          attr->retry_cnt);
698         }
699
700         if (mask & IB_QP_RNR_RETRY) {
701                 qp->attr.rnr_retry = attr->rnr_retry;
702                 qp->comp.rnr_retry = attr->rnr_retry;
703                 pr_debug("qp#%d set rnr retry count = %d\n", qp_num(qp),
704                          attr->rnr_retry);
705         }
706
707         if (mask & IB_QP_RQ_PSN) {
708                 qp->attr.rq_psn = (attr->rq_psn & BTH_PSN_MASK);
709                 qp->resp.psn = qp->attr.rq_psn;
710                 pr_debug("qp#%d set resp psn = 0x%x\n", qp_num(qp),
711                          qp->resp.psn);
712         }
713
714         if (mask & IB_QP_MIN_RNR_TIMER) {
715                 qp->attr.min_rnr_timer = attr->min_rnr_timer;
716                 pr_debug("qp#%d set min rnr timer = 0x%x\n", qp_num(qp),
717                          attr->min_rnr_timer);
718         }
719
720         if (mask & IB_QP_SQ_PSN) {
721                 qp->attr.sq_psn = (attr->sq_psn & BTH_PSN_MASK);
722                 qp->req.psn = qp->attr.sq_psn;
723                 qp->comp.psn = qp->attr.sq_psn;
724                 pr_debug("qp#%d set req psn = 0x%x\n", qp_num(qp), qp->req.psn);
725         }
726
727         if (mask & IB_QP_PATH_MIG_STATE)
728                 qp->attr.path_mig_state = attr->path_mig_state;
729
730         if (mask & IB_QP_DEST_QPN)
731                 qp->attr.dest_qp_num = attr->dest_qp_num;
732
733         if (mask & IB_QP_STATE) {
734                 qp->attr.qp_state = attr->qp_state;
735
736                 switch (attr->qp_state) {
737                 case IB_QPS_RESET:
738                         pr_debug("qp#%d state -> RESET\n", qp_num(qp));
739                         rxe_qp_reset(qp);
740                         break;
741
742                 case IB_QPS_INIT:
743                         pr_debug("qp#%d state -> INIT\n", qp_num(qp));
744                         qp->req.state = QP_STATE_INIT;
745                         qp->resp.state = QP_STATE_INIT;
746                         break;
747
748                 case IB_QPS_RTR:
749                         pr_debug("qp#%d state -> RTR\n", qp_num(qp));
750                         qp->resp.state = QP_STATE_READY;
751                         break;
752
753                 case IB_QPS_RTS:
754                         pr_debug("qp#%d state -> RTS\n", qp_num(qp));
755                         qp->req.state = QP_STATE_READY;
756                         break;
757
758                 case IB_QPS_SQD:
759                         pr_debug("qp#%d state -> SQD\n", qp_num(qp));
760                         rxe_qp_drain(qp);
761                         break;
762
763                 case IB_QPS_SQE:
764                         pr_warn("qp#%d state -> SQE !!?\n", qp_num(qp));
765                         /* Not possible from modify_qp. */
766                         break;
767
768                 case IB_QPS_ERR:
769                         pr_debug("qp#%d state -> ERR\n", qp_num(qp));
770                         rxe_qp_error(qp);
771                         break;
772                 }
773         }
774
775         return 0;
776 }
777
778 /* called by the query qp verb */
779 int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask)
780 {
781         struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
782
783         *attr = qp->attr;
784
785         attr->rq_psn                            = qp->resp.psn;
786         attr->sq_psn                            = qp->req.psn;
787
788         attr->cap.max_send_wr                   = qp->sq.max_wr;
789         attr->cap.max_send_sge                  = qp->sq.max_sge;
790         attr->cap.max_inline_data               = qp->sq.max_inline;
791
792         if (!qp->srq) {
793                 attr->cap.max_recv_wr           = qp->rq.max_wr;
794                 attr->cap.max_recv_sge          = qp->rq.max_sge;
795         }
796
797         rxe_av_to_attr(rxe, &qp->pri_av, &attr->ah_attr);
798         rxe_av_to_attr(rxe, &qp->alt_av, &attr->alt_ah_attr);
799
800         if (qp->req.state == QP_STATE_DRAIN) {
801                 attr->sq_draining = 1;
802                 /* applications that get this state
803                  * typically spin on it. yield the
804                  * processor
805                  */
806                 cond_resched();
807         } else {
808                 attr->sq_draining = 0;
809         }
810
811         pr_debug("attr->sq_draining = %d\n", attr->sq_draining);
812
813         return 0;
814 }
815
816 /* called by the destroy qp verb */
817 void rxe_qp_destroy(struct rxe_qp *qp)
818 {
819         qp->valid = 0;
820         qp->qp_timeout_jiffies = 0;
821         rxe_cleanup_task(&qp->resp.task);
822
823         if (qp_type(qp) == IB_QPT_RC) {
824                 del_timer_sync(&qp->retrans_timer);
825                 del_timer_sync(&qp->rnr_nak_timer);
826         }
827
828         rxe_cleanup_task(&qp->req.task);
829         rxe_cleanup_task(&qp->comp.task);
830
831         /* flush out any receive wr's or pending requests */
832         __rxe_do_task(&qp->req.task);
833         if (qp->sq.queue) {
834                 __rxe_do_task(&qp->comp.task);
835                 __rxe_do_task(&qp->req.task);
836         }
837 }
838
839 /* called when the last reference to the qp is dropped */
840 static void rxe_qp_do_cleanup(struct work_struct *work)
841 {
842         struct rxe_qp *qp = container_of(work, typeof(*qp), cleanup_work.work);
843
844         rxe_drop_all_mcast_groups(qp);
845
846         if (qp->sq.queue)
847                 rxe_queue_cleanup(qp->sq.queue);
848
849         if (qp->srq)
850                 rxe_drop_ref(qp->srq);
851
852         if (qp->rq.queue)
853                 rxe_queue_cleanup(qp->rq.queue);
854
855         if (qp->scq)
856                 rxe_drop_ref(qp->scq);
857         if (qp->rcq)
858                 rxe_drop_ref(qp->rcq);
859         if (qp->pd)
860                 rxe_drop_ref(qp->pd);
861
862         if (qp->resp.mr) {
863                 rxe_drop_ref(qp->resp.mr);
864                 qp->resp.mr = NULL;
865         }
866
867         if (qp_type(qp) == IB_QPT_RC)
868                 sk_dst_reset(qp->sk->sk);
869
870         free_rd_atomic_resources(qp);
871
872         kernel_sock_shutdown(qp->sk, SHUT_RDWR);
873         sock_release(qp->sk);
874 }
875
876 /* called when the last reference to the qp is dropped */
877 void rxe_qp_cleanup(struct rxe_pool_entry *arg)
878 {
879         struct rxe_qp *qp = container_of(arg, typeof(*qp), pelem);
880
881         execute_in_process_context(rxe_qp_do_cleanup, &qp->cleanup_work);
882 }