GNU Linux-libre 4.14.257-gnu1
[releases.git] / drivers / infiniband / hw / usnic / usnic_ib_qp_grp.c
1 /*
2  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  */
33 #include <linux/bug.h>
34 #include <linux/errno.h>
35 #include <linux/module.h>
36 #include <linux/spinlock.h>
37
38 #include "usnic_log.h"
39 #include "usnic_vnic.h"
40 #include "usnic_fwd.h"
41 #include "usnic_uiom.h"
42 #include "usnic_debugfs.h"
43 #include "usnic_ib_qp_grp.h"
44 #include "usnic_ib_sysfs.h"
45 #include "usnic_transport.h"
46
47 #define DFLT_RQ_IDX     0
48
49 const char *usnic_ib_qp_grp_state_to_string(enum ib_qp_state state)
50 {
51         switch (state) {
52         case IB_QPS_RESET:
53                 return "Rst";
54         case IB_QPS_INIT:
55                 return "Init";
56         case IB_QPS_RTR:
57                 return "RTR";
58         case IB_QPS_RTS:
59                 return "RTS";
60         case IB_QPS_SQD:
61                 return "SQD";
62         case IB_QPS_SQE:
63                 return "SQE";
64         case IB_QPS_ERR:
65                 return "ERR";
66         default:
67                 return "UNKNOWN STATE";
68
69         }
70 }
71
72 int usnic_ib_qp_grp_dump_hdr(char *buf, int buf_sz)
73 {
74         return scnprintf(buf, buf_sz, "|QPN\t|State\t|PID\t|VF Idx\t|Fil ID");
75 }
76
77 int usnic_ib_qp_grp_dump_rows(void *obj, char *buf, int buf_sz)
78 {
79         struct usnic_ib_qp_grp *qp_grp = obj;
80         struct usnic_ib_qp_grp_flow *default_flow;
81         if (obj) {
82                 default_flow = list_first_entry(&qp_grp->flows_lst,
83                                         struct usnic_ib_qp_grp_flow, link);
84                 return scnprintf(buf, buf_sz, "|%d\t|%s\t|%d\t|%hu\t|%d",
85                                         qp_grp->ibqp.qp_num,
86                                         usnic_ib_qp_grp_state_to_string(
87                                                         qp_grp->state),
88                                         qp_grp->owner_pid,
89                                         usnic_vnic_get_index(qp_grp->vf->vnic),
90                                         default_flow->flow->flow_id);
91         } else {
92                 return scnprintf(buf, buf_sz, "|N/A\t|N/A\t|N/A\t|N/A\t|N/A");
93         }
94 }
95
96 static struct usnic_vnic_res_chunk *
97 get_qp_res_chunk(struct usnic_ib_qp_grp *qp_grp)
98 {
99         lockdep_assert_held(&qp_grp->lock);
100         /*
101          * The QP res chunk, used to derive qp indices,
102          * are just indices of the RQs
103          */
104         return usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ);
105 }
106
107 static int enable_qp_grp(struct usnic_ib_qp_grp *qp_grp)
108 {
109
110         int status;
111         int i, vnic_idx;
112         struct usnic_vnic_res_chunk *res_chunk;
113         struct usnic_vnic_res *res;
114
115         lockdep_assert_held(&qp_grp->lock);
116
117         vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
118
119         res_chunk = get_qp_res_chunk(qp_grp);
120         if (IS_ERR(res_chunk)) {
121                 usnic_err("Unable to get qp res with err %ld\n",
122                                 PTR_ERR(res_chunk));
123                 return PTR_ERR(res_chunk);
124         }
125
126         for (i = 0; i < res_chunk->cnt; i++) {
127                 res = res_chunk->res[i];
128                 status = usnic_fwd_enable_qp(qp_grp->ufdev, vnic_idx,
129                                                 res->vnic_idx);
130                 if (status) {
131                         usnic_err("Failed to enable qp %d of %s:%d\n with err %d\n",
132                                         res->vnic_idx, qp_grp->ufdev->name,
133                                         vnic_idx, status);
134                         goto out_err;
135                 }
136         }
137
138         return 0;
139
140 out_err:
141         for (i--; i >= 0; i--) {
142                 res = res_chunk->res[i];
143                 usnic_fwd_disable_qp(qp_grp->ufdev, vnic_idx,
144                                         res->vnic_idx);
145         }
146
147         return status;
148 }
149
150 static int disable_qp_grp(struct usnic_ib_qp_grp *qp_grp)
151 {
152         int i, vnic_idx;
153         struct usnic_vnic_res_chunk *res_chunk;
154         struct usnic_vnic_res *res;
155         int status = 0;
156
157         lockdep_assert_held(&qp_grp->lock);
158         vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
159
160         res_chunk = get_qp_res_chunk(qp_grp);
161         if (IS_ERR(res_chunk)) {
162                 usnic_err("Unable to get qp res with err %ld\n",
163                         PTR_ERR(res_chunk));
164                 return PTR_ERR(res_chunk);
165         }
166
167         for (i = 0; i < res_chunk->cnt; i++) {
168                 res = res_chunk->res[i];
169                 status = usnic_fwd_disable_qp(qp_grp->ufdev, vnic_idx,
170                                                 res->vnic_idx);
171                 if (status) {
172                         usnic_err("Failed to disable rq %d of %s:%d\n with err %d\n",
173                                         res->vnic_idx,
174                                         qp_grp->ufdev->name,
175                                         vnic_idx, status);
176                 }
177         }
178
179         return status;
180
181 }
182
183 static int init_filter_action(struct usnic_ib_qp_grp *qp_grp,
184                                 struct usnic_filter_action *uaction)
185 {
186         struct usnic_vnic_res_chunk *res_chunk;
187
188         res_chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ);
189         if (IS_ERR(res_chunk)) {
190                 usnic_err("Unable to get %s with err %ld\n",
191                         usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_RQ),
192                         PTR_ERR(res_chunk));
193                 return PTR_ERR(res_chunk);
194         }
195
196         uaction->vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
197         uaction->action.type = FILTER_ACTION_RQ_STEERING;
198         uaction->action.u.rq_idx = res_chunk->res[DFLT_RQ_IDX]->vnic_idx;
199
200         return 0;
201 }
202
203 static struct usnic_ib_qp_grp_flow*
204 create_roce_custom_flow(struct usnic_ib_qp_grp *qp_grp,
205                         struct usnic_transport_spec *trans_spec)
206 {
207         uint16_t port_num;
208         int err;
209         struct filter filter;
210         struct usnic_filter_action uaction;
211         struct usnic_ib_qp_grp_flow *qp_flow;
212         struct usnic_fwd_flow *flow;
213         enum usnic_transport_type trans_type;
214
215         trans_type = trans_spec->trans_type;
216         port_num = trans_spec->usnic_roce.port_num;
217
218         /* Reserve Port */
219         port_num = usnic_transport_rsrv_port(trans_type, port_num);
220         if (port_num == 0)
221                 return ERR_PTR(-EINVAL);
222
223         /* Create Flow */
224         usnic_fwd_init_usnic_filter(&filter, port_num);
225         err = init_filter_action(qp_grp, &uaction);
226         if (err)
227                 goto out_unreserve_port;
228
229         flow = usnic_fwd_alloc_flow(qp_grp->ufdev, &filter, &uaction);
230         if (IS_ERR_OR_NULL(flow)) {
231                 err = flow ? PTR_ERR(flow) : -EFAULT;
232                 goto out_unreserve_port;
233         }
234
235         /* Create Flow Handle */
236         qp_flow = kzalloc(sizeof(*qp_flow), GFP_ATOMIC);
237         if (!qp_flow) {
238                 err = -ENOMEM;
239                 goto out_dealloc_flow;
240         }
241         qp_flow->flow = flow;
242         qp_flow->trans_type = trans_type;
243         qp_flow->usnic_roce.port_num = port_num;
244         qp_flow->qp_grp = qp_grp;
245         return qp_flow;
246
247 out_dealloc_flow:
248         usnic_fwd_dealloc_flow(flow);
249 out_unreserve_port:
250         usnic_transport_unrsrv_port(trans_type, port_num);
251         return ERR_PTR(err);
252 }
253
254 static void release_roce_custom_flow(struct usnic_ib_qp_grp_flow *qp_flow)
255 {
256         usnic_fwd_dealloc_flow(qp_flow->flow);
257         usnic_transport_unrsrv_port(qp_flow->trans_type,
258                                         qp_flow->usnic_roce.port_num);
259         kfree(qp_flow);
260 }
261
262 static struct usnic_ib_qp_grp_flow*
263 create_udp_flow(struct usnic_ib_qp_grp *qp_grp,
264                 struct usnic_transport_spec *trans_spec)
265 {
266         struct socket *sock;
267         int sock_fd;
268         int err;
269         struct filter filter;
270         struct usnic_filter_action uaction;
271         struct usnic_ib_qp_grp_flow *qp_flow;
272         struct usnic_fwd_flow *flow;
273         enum usnic_transport_type trans_type;
274         uint32_t addr;
275         uint16_t port_num;
276         int proto;
277
278         trans_type = trans_spec->trans_type;
279         sock_fd = trans_spec->udp.sock_fd;
280
281         /* Get and check socket */
282         sock = usnic_transport_get_socket(sock_fd);
283         if (IS_ERR_OR_NULL(sock))
284                 return ERR_CAST(sock);
285
286         err = usnic_transport_sock_get_addr(sock, &proto, &addr, &port_num);
287         if (err)
288                 goto out_put_sock;
289
290         if (proto != IPPROTO_UDP) {
291                 usnic_err("Protocol for fd %d is not UDP", sock_fd);
292                 err = -EPERM;
293                 goto out_put_sock;
294         }
295
296         /* Create flow */
297         usnic_fwd_init_udp_filter(&filter, addr, port_num);
298         err = init_filter_action(qp_grp, &uaction);
299         if (err)
300                 goto out_put_sock;
301
302         flow = usnic_fwd_alloc_flow(qp_grp->ufdev, &filter, &uaction);
303         if (IS_ERR_OR_NULL(flow)) {
304                 err = flow ? PTR_ERR(flow) : -EFAULT;
305                 goto out_put_sock;
306         }
307
308         /* Create qp_flow */
309         qp_flow = kzalloc(sizeof(*qp_flow), GFP_ATOMIC);
310         if (!qp_flow) {
311                 err = -ENOMEM;
312                 goto out_dealloc_flow;
313         }
314         qp_flow->flow = flow;
315         qp_flow->trans_type = trans_type;
316         qp_flow->udp.sock = sock;
317         qp_flow->qp_grp = qp_grp;
318         return qp_flow;
319
320 out_dealloc_flow:
321         usnic_fwd_dealloc_flow(flow);
322 out_put_sock:
323         usnic_transport_put_socket(sock);
324         return ERR_PTR(err);
325 }
326
327 static void release_udp_flow(struct usnic_ib_qp_grp_flow *qp_flow)
328 {
329         usnic_fwd_dealloc_flow(qp_flow->flow);
330         usnic_transport_put_socket(qp_flow->udp.sock);
331         kfree(qp_flow);
332 }
333
334 static struct usnic_ib_qp_grp_flow*
335 create_and_add_flow(struct usnic_ib_qp_grp *qp_grp,
336                         struct usnic_transport_spec *trans_spec)
337 {
338         struct usnic_ib_qp_grp_flow *qp_flow;
339         enum usnic_transport_type trans_type;
340
341         trans_type = trans_spec->trans_type;
342         switch (trans_type) {
343         case USNIC_TRANSPORT_ROCE_CUSTOM:
344                 qp_flow = create_roce_custom_flow(qp_grp, trans_spec);
345                 break;
346         case USNIC_TRANSPORT_IPV4_UDP:
347                 qp_flow = create_udp_flow(qp_grp, trans_spec);
348                 break;
349         default:
350                 usnic_err("Unsupported transport %u\n",
351                                 trans_spec->trans_type);
352                 return ERR_PTR(-EINVAL);
353         }
354
355         if (!IS_ERR_OR_NULL(qp_flow)) {
356                 list_add_tail(&qp_flow->link, &qp_grp->flows_lst);
357                 usnic_debugfs_flow_add(qp_flow);
358         }
359
360
361         return qp_flow;
362 }
363
364 static void release_and_remove_flow(struct usnic_ib_qp_grp_flow *qp_flow)
365 {
366         usnic_debugfs_flow_remove(qp_flow);
367         list_del(&qp_flow->link);
368
369         switch (qp_flow->trans_type) {
370         case USNIC_TRANSPORT_ROCE_CUSTOM:
371                 release_roce_custom_flow(qp_flow);
372                 break;
373         case USNIC_TRANSPORT_IPV4_UDP:
374                 release_udp_flow(qp_flow);
375                 break;
376         default:
377                 WARN(1, "Unsupported transport %u\n",
378                                 qp_flow->trans_type);
379                 break;
380         }
381 }
382
383 static void release_and_remove_all_flows(struct usnic_ib_qp_grp *qp_grp)
384 {
385         struct usnic_ib_qp_grp_flow *qp_flow, *tmp;
386         list_for_each_entry_safe(qp_flow, tmp, &qp_grp->flows_lst, link)
387                 release_and_remove_flow(qp_flow);
388 }
389
390 int usnic_ib_qp_grp_modify(struct usnic_ib_qp_grp *qp_grp,
391                                 enum ib_qp_state new_state,
392                                 void *data)
393 {
394         int status = 0;
395         int vnic_idx;
396         struct ib_event ib_event;
397         enum ib_qp_state old_state;
398         struct usnic_transport_spec *trans_spec;
399         struct usnic_ib_qp_grp_flow *qp_flow;
400
401         old_state = qp_grp->state;
402         vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
403         trans_spec = (struct usnic_transport_spec *) data;
404
405         spin_lock(&qp_grp->lock);
406         switch (new_state) {
407         case IB_QPS_RESET:
408                 switch (old_state) {
409                 case IB_QPS_RESET:
410                         /* NO-OP */
411                         break;
412                 case IB_QPS_INIT:
413                         release_and_remove_all_flows(qp_grp);
414                         status = 0;
415                         break;
416                 case IB_QPS_RTR:
417                 case IB_QPS_RTS:
418                 case IB_QPS_ERR:
419                         status = disable_qp_grp(qp_grp);
420                         release_and_remove_all_flows(qp_grp);
421                         break;
422                 default:
423                         status = -EINVAL;
424                 }
425                 break;
426         case IB_QPS_INIT:
427                 switch (old_state) {
428                 case IB_QPS_RESET:
429                         if (trans_spec) {
430                                 qp_flow = create_and_add_flow(qp_grp,
431                                                                 trans_spec);
432                                 if (IS_ERR_OR_NULL(qp_flow)) {
433                                         status = qp_flow ? PTR_ERR(qp_flow) : -EFAULT;
434                                         break;
435                                 }
436                         } else {
437                                 /*
438                                  * Optional to specify filters.
439                                  */
440                                 status = 0;
441                         }
442                         break;
443                 case IB_QPS_INIT:
444                         if (trans_spec) {
445                                 qp_flow = create_and_add_flow(qp_grp,
446                                                                 trans_spec);
447                                 if (IS_ERR_OR_NULL(qp_flow)) {
448                                         status = qp_flow ? PTR_ERR(qp_flow) : -EFAULT;
449                                         break;
450                                 }
451                         } else {
452                                 /*
453                                  * Doesn't make sense to go into INIT state
454                                  * from INIT state w/o adding filters.
455                                  */
456                                 status = -EINVAL;
457                         }
458                         break;
459                 case IB_QPS_RTR:
460                         status = disable_qp_grp(qp_grp);
461                         break;
462                 case IB_QPS_RTS:
463                         status = disable_qp_grp(qp_grp);
464                         break;
465                 default:
466                         status = -EINVAL;
467                 }
468                 break;
469         case IB_QPS_RTR:
470                 switch (old_state) {
471                 case IB_QPS_INIT:
472                         status = enable_qp_grp(qp_grp);
473                         break;
474                 default:
475                         status = -EINVAL;
476                 }
477                 break;
478         case IB_QPS_RTS:
479                 switch (old_state) {
480                 case IB_QPS_RTR:
481                         /* NO-OP FOR NOW */
482                         break;
483                 default:
484                         status = -EINVAL;
485                 }
486                 break;
487         case IB_QPS_ERR:
488                 ib_event.device = &qp_grp->vf->pf->ib_dev;
489                 ib_event.element.qp = &qp_grp->ibqp;
490                 ib_event.event = IB_EVENT_QP_FATAL;
491
492                 switch (old_state) {
493                 case IB_QPS_RESET:
494                         qp_grp->ibqp.event_handler(&ib_event,
495                                         qp_grp->ibqp.qp_context);
496                         break;
497                 case IB_QPS_INIT:
498                         release_and_remove_all_flows(qp_grp);
499                         qp_grp->ibqp.event_handler(&ib_event,
500                                         qp_grp->ibqp.qp_context);
501                         break;
502                 case IB_QPS_RTR:
503                 case IB_QPS_RTS:
504                         status = disable_qp_grp(qp_grp);
505                         release_and_remove_all_flows(qp_grp);
506                         qp_grp->ibqp.event_handler(&ib_event,
507                                         qp_grp->ibqp.qp_context);
508                         break;
509                 default:
510                         status = -EINVAL;
511                 }
512                 break;
513         default:
514                 status = -EINVAL;
515         }
516         spin_unlock(&qp_grp->lock);
517
518         if (!status) {
519                 qp_grp->state = new_state;
520                 usnic_info("Transitioned %u from %s to %s",
521                 qp_grp->grp_id,
522                 usnic_ib_qp_grp_state_to_string(old_state),
523                 usnic_ib_qp_grp_state_to_string(new_state));
524         } else {
525                 usnic_err("Failed to transition %u from %s to %s",
526                 qp_grp->grp_id,
527                 usnic_ib_qp_grp_state_to_string(old_state),
528                 usnic_ib_qp_grp_state_to_string(new_state));
529         }
530
531         return status;
532 }
533
534 static struct usnic_vnic_res_chunk**
535 alloc_res_chunk_list(struct usnic_vnic *vnic,
536                         struct usnic_vnic_res_spec *res_spec, void *owner_obj)
537 {
538         enum usnic_vnic_res_type res_type;
539         struct usnic_vnic_res_chunk **res_chunk_list;
540         int err, i, res_cnt, res_lst_sz;
541
542         for (res_lst_sz = 0;
543                 res_spec->resources[res_lst_sz].type != USNIC_VNIC_RES_TYPE_EOL;
544                 res_lst_sz++) {
545                 /* Do Nothing */
546         }
547
548         res_chunk_list = kzalloc(sizeof(*res_chunk_list)*(res_lst_sz+1),
549                                         GFP_ATOMIC);
550         if (!res_chunk_list)
551                 return ERR_PTR(-ENOMEM);
552
553         for (i = 0; res_spec->resources[i].type != USNIC_VNIC_RES_TYPE_EOL;
554                 i++) {
555                 res_type = res_spec->resources[i].type;
556                 res_cnt = res_spec->resources[i].cnt;
557
558                 res_chunk_list[i] = usnic_vnic_get_resources(vnic, res_type,
559                                         res_cnt, owner_obj);
560                 if (IS_ERR_OR_NULL(res_chunk_list[i])) {
561                         err = res_chunk_list[i] ?
562                                         PTR_ERR(res_chunk_list[i]) : -ENOMEM;
563                         usnic_err("Failed to get %s from %s with err %d\n",
564                                 usnic_vnic_res_type_to_str(res_type),
565                                 usnic_vnic_pci_name(vnic),
566                                 err);
567                         goto out_free_res;
568                 }
569         }
570
571         return res_chunk_list;
572
573 out_free_res:
574         for (i--; i >= 0; i--)
575                 usnic_vnic_put_resources(res_chunk_list[i]);
576         kfree(res_chunk_list);
577         return ERR_PTR(err);
578 }
579
580 static void free_qp_grp_res(struct usnic_vnic_res_chunk **res_chunk_list)
581 {
582         int i;
583         for (i = 0; res_chunk_list[i]; i++)
584                 usnic_vnic_put_resources(res_chunk_list[i]);
585         kfree(res_chunk_list);
586 }
587
588 static int qp_grp_and_vf_bind(struct usnic_ib_vf *vf,
589                                 struct usnic_ib_pd *pd,
590                                 struct usnic_ib_qp_grp *qp_grp)
591 {
592         int err;
593         struct pci_dev *pdev;
594
595         lockdep_assert_held(&vf->lock);
596
597         pdev = usnic_vnic_get_pdev(vf->vnic);
598         if (vf->qp_grp_ref_cnt == 0) {
599                 err = usnic_uiom_attach_dev_to_pd(pd->umem_pd, &pdev->dev);
600                 if (err) {
601                         usnic_err("Failed to attach %s to domain\n",
602                                         pci_name(pdev));
603                         return err;
604                 }
605                 vf->pd = pd;
606         }
607         vf->qp_grp_ref_cnt++;
608
609         WARN_ON(vf->pd != pd);
610         qp_grp->vf = vf;
611
612         return 0;
613 }
614
615 static void qp_grp_and_vf_unbind(struct usnic_ib_qp_grp *qp_grp)
616 {
617         struct pci_dev *pdev;
618         struct usnic_ib_pd *pd;
619
620         lockdep_assert_held(&qp_grp->vf->lock);
621
622         pd = qp_grp->vf->pd;
623         pdev = usnic_vnic_get_pdev(qp_grp->vf->vnic);
624         if (--qp_grp->vf->qp_grp_ref_cnt == 0) {
625                 qp_grp->vf->pd = NULL;
626                 usnic_uiom_detach_dev_from_pd(pd->umem_pd, &pdev->dev);
627         }
628         qp_grp->vf = NULL;
629 }
630
631 static void log_spec(struct usnic_vnic_res_spec *res_spec)
632 {
633         char buf[512];
634         usnic_vnic_spec_dump(buf, sizeof(buf), res_spec);
635         usnic_dbg("%s\n", buf);
636 }
637
638 static int qp_grp_id_from_flow(struct usnic_ib_qp_grp_flow *qp_flow,
639                                 uint32_t *id)
640 {
641         enum usnic_transport_type trans_type = qp_flow->trans_type;
642         int err;
643         uint16_t port_num = 0;
644
645         switch (trans_type) {
646         case USNIC_TRANSPORT_ROCE_CUSTOM:
647                 *id = qp_flow->usnic_roce.port_num;
648                 break;
649         case USNIC_TRANSPORT_IPV4_UDP:
650                 err = usnic_transport_sock_get_addr(qp_flow->udp.sock,
651                                                         NULL, NULL,
652                                                         &port_num);
653                 if (err)
654                         return err;
655                 /*
656                  * Copy port_num to stack first and then to *id,
657                  * so that the short to int cast works for little
658                  * and big endian systems.
659                  */
660                 *id = port_num;
661                 break;
662         default:
663                 usnic_err("Unsupported transport %u\n", trans_type);
664                 return -EINVAL;
665         }
666
667         return 0;
668 }
669
670 struct usnic_ib_qp_grp *
671 usnic_ib_qp_grp_create(struct usnic_fwd_dev *ufdev, struct usnic_ib_vf *vf,
672                         struct usnic_ib_pd *pd,
673                         struct usnic_vnic_res_spec *res_spec,
674                         struct usnic_transport_spec *transport_spec)
675 {
676         struct usnic_ib_qp_grp *qp_grp;
677         int err;
678         enum usnic_transport_type transport = transport_spec->trans_type;
679         struct usnic_ib_qp_grp_flow *qp_flow;
680
681         lockdep_assert_held(&vf->lock);
682
683         err = usnic_vnic_res_spec_satisfied(&min_transport_spec[transport],
684                                                 res_spec);
685         if (err) {
686                 usnic_err("Spec does not meet miniumum req for transport %d\n",
687                                 transport);
688                 log_spec(res_spec);
689                 return ERR_PTR(err);
690         }
691
692         qp_grp = kzalloc(sizeof(*qp_grp), GFP_ATOMIC);
693         if (!qp_grp)
694                 return NULL;
695
696         qp_grp->res_chunk_list = alloc_res_chunk_list(vf->vnic, res_spec,
697                                                         qp_grp);
698         if (IS_ERR_OR_NULL(qp_grp->res_chunk_list)) {
699                 err = qp_grp->res_chunk_list ?
700                                 PTR_ERR(qp_grp->res_chunk_list) : -ENOMEM;
701                 goto out_free_qp_grp;
702         }
703
704         err = qp_grp_and_vf_bind(vf, pd, qp_grp);
705         if (err)
706                 goto out_free_res;
707
708         INIT_LIST_HEAD(&qp_grp->flows_lst);
709         spin_lock_init(&qp_grp->lock);
710         qp_grp->ufdev = ufdev;
711         qp_grp->state = IB_QPS_RESET;
712         qp_grp->owner_pid = current->pid;
713
714         qp_flow = create_and_add_flow(qp_grp, transport_spec);
715         if (IS_ERR_OR_NULL(qp_flow)) {
716                 usnic_err("Unable to create and add flow with err %ld\n",
717                                 PTR_ERR(qp_flow));
718                 err = qp_flow ? PTR_ERR(qp_flow) : -EFAULT;
719                 goto out_qp_grp_vf_unbind;
720         }
721
722         err = qp_grp_id_from_flow(qp_flow, &qp_grp->grp_id);
723         if (err)
724                 goto out_release_flow;
725         qp_grp->ibqp.qp_num = qp_grp->grp_id;
726
727         usnic_ib_sysfs_qpn_add(qp_grp);
728
729         return qp_grp;
730
731 out_release_flow:
732         release_and_remove_flow(qp_flow);
733 out_qp_grp_vf_unbind:
734         qp_grp_and_vf_unbind(qp_grp);
735 out_free_res:
736         free_qp_grp_res(qp_grp->res_chunk_list);
737 out_free_qp_grp:
738         kfree(qp_grp);
739
740         return ERR_PTR(err);
741 }
742
743 void usnic_ib_qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp)
744 {
745
746         WARN_ON(qp_grp->state != IB_QPS_RESET);
747         lockdep_assert_held(&qp_grp->vf->lock);
748
749         release_and_remove_all_flows(qp_grp);
750         usnic_ib_sysfs_qpn_remove(qp_grp);
751         qp_grp_and_vf_unbind(qp_grp);
752         free_qp_grp_res(qp_grp->res_chunk_list);
753         kfree(qp_grp);
754 }
755
756 struct usnic_vnic_res_chunk*
757 usnic_ib_qp_grp_get_chunk(struct usnic_ib_qp_grp *qp_grp,
758                                 enum usnic_vnic_res_type res_type)
759 {
760         int i;
761
762         for (i = 0; qp_grp->res_chunk_list[i]; i++) {
763                 if (qp_grp->res_chunk_list[i]->type == res_type)
764                         return qp_grp->res_chunk_list[i];
765         }
766
767         return ERR_PTR(-EINVAL);
768 }