2 * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/bug.h>
34 #include <linux/errno.h>
35 #include <linux/module.h>
36 #include <linux/spinlock.h>
38 #include "usnic_log.h"
39 #include "usnic_vnic.h"
40 #include "usnic_fwd.h"
41 #include "usnic_uiom.h"
42 #include "usnic_debugfs.h"
43 #include "usnic_ib_qp_grp.h"
44 #include "usnic_ib_sysfs.h"
45 #include "usnic_transport.h"
49 const char *usnic_ib_qp_grp_state_to_string(enum ib_qp_state state)
67 return "UNKNOWN STATE";
72 int usnic_ib_qp_grp_dump_hdr(char *buf, int buf_sz)
74 return scnprintf(buf, buf_sz, "|QPN\t|State\t|PID\t|VF Idx\t|Fil ID");
77 int usnic_ib_qp_grp_dump_rows(void *obj, char *buf, int buf_sz)
79 struct usnic_ib_qp_grp *qp_grp = obj;
80 struct usnic_ib_qp_grp_flow *default_flow;
82 default_flow = list_first_entry(&qp_grp->flows_lst,
83 struct usnic_ib_qp_grp_flow, link);
84 return scnprintf(buf, buf_sz, "|%d\t|%s\t|%d\t|%hu\t|%d",
86 usnic_ib_qp_grp_state_to_string(
89 usnic_vnic_get_index(qp_grp->vf->vnic),
90 default_flow->flow->flow_id);
92 return scnprintf(buf, buf_sz, "|N/A\t|N/A\t|N/A\t|N/A\t|N/A");
96 static struct usnic_vnic_res_chunk *
97 get_qp_res_chunk(struct usnic_ib_qp_grp *qp_grp)
99 lockdep_assert_held(&qp_grp->lock);
101 * The QP res chunk, used to derive qp indices,
102 * are just indices of the RQs
104 return usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ);
107 static int enable_qp_grp(struct usnic_ib_qp_grp *qp_grp)
112 struct usnic_vnic_res_chunk *res_chunk;
113 struct usnic_vnic_res *res;
115 lockdep_assert_held(&qp_grp->lock);
117 vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
119 res_chunk = get_qp_res_chunk(qp_grp);
120 if (IS_ERR(res_chunk)) {
121 usnic_err("Unable to get qp res with err %ld\n",
123 return PTR_ERR(res_chunk);
126 for (i = 0; i < res_chunk->cnt; i++) {
127 res = res_chunk->res[i];
128 status = usnic_fwd_enable_qp(qp_grp->ufdev, vnic_idx,
131 usnic_err("Failed to enable qp %d of %s:%d\n with err %d\n",
132 res->vnic_idx, qp_grp->ufdev->name,
141 for (i--; i >= 0; i--) {
142 res = res_chunk->res[i];
143 usnic_fwd_disable_qp(qp_grp->ufdev, vnic_idx,
150 static int disable_qp_grp(struct usnic_ib_qp_grp *qp_grp)
153 struct usnic_vnic_res_chunk *res_chunk;
154 struct usnic_vnic_res *res;
157 lockdep_assert_held(&qp_grp->lock);
158 vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
160 res_chunk = get_qp_res_chunk(qp_grp);
161 if (IS_ERR(res_chunk)) {
162 usnic_err("Unable to get qp res with err %ld\n",
164 return PTR_ERR(res_chunk);
167 for (i = 0; i < res_chunk->cnt; i++) {
168 res = res_chunk->res[i];
169 status = usnic_fwd_disable_qp(qp_grp->ufdev, vnic_idx,
172 usnic_err("Failed to disable rq %d of %s:%d\n with err %d\n",
183 static int init_filter_action(struct usnic_ib_qp_grp *qp_grp,
184 struct usnic_filter_action *uaction)
186 struct usnic_vnic_res_chunk *res_chunk;
188 res_chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ);
189 if (IS_ERR(res_chunk)) {
190 usnic_err("Unable to get %s with err %ld\n",
191 usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_RQ),
193 return PTR_ERR(res_chunk);
196 uaction->vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
197 uaction->action.type = FILTER_ACTION_RQ_STEERING;
198 uaction->action.u.rq_idx = res_chunk->res[DFLT_RQ_IDX]->vnic_idx;
203 static struct usnic_ib_qp_grp_flow*
204 create_roce_custom_flow(struct usnic_ib_qp_grp *qp_grp,
205 struct usnic_transport_spec *trans_spec)
209 struct filter filter;
210 struct usnic_filter_action uaction;
211 struct usnic_ib_qp_grp_flow *qp_flow;
212 struct usnic_fwd_flow *flow;
213 enum usnic_transport_type trans_type;
215 trans_type = trans_spec->trans_type;
216 port_num = trans_spec->usnic_roce.port_num;
219 port_num = usnic_transport_rsrv_port(trans_type, port_num);
221 return ERR_PTR(-EINVAL);
224 usnic_fwd_init_usnic_filter(&filter, port_num);
225 err = init_filter_action(qp_grp, &uaction);
227 goto out_unreserve_port;
229 flow = usnic_fwd_alloc_flow(qp_grp->ufdev, &filter, &uaction);
230 if (IS_ERR_OR_NULL(flow)) {
231 err = flow ? PTR_ERR(flow) : -EFAULT;
232 goto out_unreserve_port;
235 /* Create Flow Handle */
236 qp_flow = kzalloc(sizeof(*qp_flow), GFP_ATOMIC);
239 goto out_dealloc_flow;
241 qp_flow->flow = flow;
242 qp_flow->trans_type = trans_type;
243 qp_flow->usnic_roce.port_num = port_num;
244 qp_flow->qp_grp = qp_grp;
248 usnic_fwd_dealloc_flow(flow);
250 usnic_transport_unrsrv_port(trans_type, port_num);
254 static void release_roce_custom_flow(struct usnic_ib_qp_grp_flow *qp_flow)
256 usnic_fwd_dealloc_flow(qp_flow->flow);
257 usnic_transport_unrsrv_port(qp_flow->trans_type,
258 qp_flow->usnic_roce.port_num);
262 static struct usnic_ib_qp_grp_flow*
263 create_udp_flow(struct usnic_ib_qp_grp *qp_grp,
264 struct usnic_transport_spec *trans_spec)
269 struct filter filter;
270 struct usnic_filter_action uaction;
271 struct usnic_ib_qp_grp_flow *qp_flow;
272 struct usnic_fwd_flow *flow;
273 enum usnic_transport_type trans_type;
278 trans_type = trans_spec->trans_type;
279 sock_fd = trans_spec->udp.sock_fd;
281 /* Get and check socket */
282 sock = usnic_transport_get_socket(sock_fd);
283 if (IS_ERR_OR_NULL(sock))
284 return ERR_CAST(sock);
286 err = usnic_transport_sock_get_addr(sock, &proto, &addr, &port_num);
290 if (proto != IPPROTO_UDP) {
291 usnic_err("Protocol for fd %d is not UDP", sock_fd);
297 usnic_fwd_init_udp_filter(&filter, addr, port_num);
298 err = init_filter_action(qp_grp, &uaction);
302 flow = usnic_fwd_alloc_flow(qp_grp->ufdev, &filter, &uaction);
303 if (IS_ERR_OR_NULL(flow)) {
304 err = flow ? PTR_ERR(flow) : -EFAULT;
309 qp_flow = kzalloc(sizeof(*qp_flow), GFP_ATOMIC);
312 goto out_dealloc_flow;
314 qp_flow->flow = flow;
315 qp_flow->trans_type = trans_type;
316 qp_flow->udp.sock = sock;
317 qp_flow->qp_grp = qp_grp;
321 usnic_fwd_dealloc_flow(flow);
323 usnic_transport_put_socket(sock);
327 static void release_udp_flow(struct usnic_ib_qp_grp_flow *qp_flow)
329 usnic_fwd_dealloc_flow(qp_flow->flow);
330 usnic_transport_put_socket(qp_flow->udp.sock);
334 static struct usnic_ib_qp_grp_flow*
335 create_and_add_flow(struct usnic_ib_qp_grp *qp_grp,
336 struct usnic_transport_spec *trans_spec)
338 struct usnic_ib_qp_grp_flow *qp_flow;
339 enum usnic_transport_type trans_type;
341 trans_type = trans_spec->trans_type;
342 switch (trans_type) {
343 case USNIC_TRANSPORT_ROCE_CUSTOM:
344 qp_flow = create_roce_custom_flow(qp_grp, trans_spec);
346 case USNIC_TRANSPORT_IPV4_UDP:
347 qp_flow = create_udp_flow(qp_grp, trans_spec);
350 usnic_err("Unsupported transport %u\n",
351 trans_spec->trans_type);
352 return ERR_PTR(-EINVAL);
355 if (!IS_ERR_OR_NULL(qp_flow)) {
356 list_add_tail(&qp_flow->link, &qp_grp->flows_lst);
357 usnic_debugfs_flow_add(qp_flow);
364 static void release_and_remove_flow(struct usnic_ib_qp_grp_flow *qp_flow)
366 usnic_debugfs_flow_remove(qp_flow);
367 list_del(&qp_flow->link);
369 switch (qp_flow->trans_type) {
370 case USNIC_TRANSPORT_ROCE_CUSTOM:
371 release_roce_custom_flow(qp_flow);
373 case USNIC_TRANSPORT_IPV4_UDP:
374 release_udp_flow(qp_flow);
377 WARN(1, "Unsupported transport %u\n",
378 qp_flow->trans_type);
383 static void release_and_remove_all_flows(struct usnic_ib_qp_grp *qp_grp)
385 struct usnic_ib_qp_grp_flow *qp_flow, *tmp;
386 list_for_each_entry_safe(qp_flow, tmp, &qp_grp->flows_lst, link)
387 release_and_remove_flow(qp_flow);
390 int usnic_ib_qp_grp_modify(struct usnic_ib_qp_grp *qp_grp,
391 enum ib_qp_state new_state,
396 struct ib_event ib_event;
397 enum ib_qp_state old_state;
398 struct usnic_transport_spec *trans_spec;
399 struct usnic_ib_qp_grp_flow *qp_flow;
401 old_state = qp_grp->state;
402 vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
403 trans_spec = (struct usnic_transport_spec *) data;
405 spin_lock(&qp_grp->lock);
413 release_and_remove_all_flows(qp_grp);
419 status = disable_qp_grp(qp_grp);
420 release_and_remove_all_flows(qp_grp);
430 qp_flow = create_and_add_flow(qp_grp,
432 if (IS_ERR_OR_NULL(qp_flow)) {
433 status = qp_flow ? PTR_ERR(qp_flow) : -EFAULT;
438 * Optional to specify filters.
445 qp_flow = create_and_add_flow(qp_grp,
447 if (IS_ERR_OR_NULL(qp_flow)) {
448 status = qp_flow ? PTR_ERR(qp_flow) : -EFAULT;
453 * Doesn't make sense to go into INIT state
454 * from INIT state w/o adding filters.
460 status = disable_qp_grp(qp_grp);
463 status = disable_qp_grp(qp_grp);
472 status = enable_qp_grp(qp_grp);
488 ib_event.device = &qp_grp->vf->pf->ib_dev;
489 ib_event.element.qp = &qp_grp->ibqp;
490 ib_event.event = IB_EVENT_QP_FATAL;
494 qp_grp->ibqp.event_handler(&ib_event,
495 qp_grp->ibqp.qp_context);
498 release_and_remove_all_flows(qp_grp);
499 qp_grp->ibqp.event_handler(&ib_event,
500 qp_grp->ibqp.qp_context);
504 status = disable_qp_grp(qp_grp);
505 release_and_remove_all_flows(qp_grp);
506 qp_grp->ibqp.event_handler(&ib_event,
507 qp_grp->ibqp.qp_context);
516 spin_unlock(&qp_grp->lock);
519 qp_grp->state = new_state;
520 usnic_info("Transitioned %u from %s to %s",
522 usnic_ib_qp_grp_state_to_string(old_state),
523 usnic_ib_qp_grp_state_to_string(new_state));
525 usnic_err("Failed to transition %u from %s to %s",
527 usnic_ib_qp_grp_state_to_string(old_state),
528 usnic_ib_qp_grp_state_to_string(new_state));
534 static struct usnic_vnic_res_chunk**
535 alloc_res_chunk_list(struct usnic_vnic *vnic,
536 struct usnic_vnic_res_spec *res_spec, void *owner_obj)
538 enum usnic_vnic_res_type res_type;
539 struct usnic_vnic_res_chunk **res_chunk_list;
540 int err, i, res_cnt, res_lst_sz;
543 res_spec->resources[res_lst_sz].type != USNIC_VNIC_RES_TYPE_EOL;
548 res_chunk_list = kzalloc(sizeof(*res_chunk_list)*(res_lst_sz+1),
551 return ERR_PTR(-ENOMEM);
553 for (i = 0; res_spec->resources[i].type != USNIC_VNIC_RES_TYPE_EOL;
555 res_type = res_spec->resources[i].type;
556 res_cnt = res_spec->resources[i].cnt;
558 res_chunk_list[i] = usnic_vnic_get_resources(vnic, res_type,
560 if (IS_ERR_OR_NULL(res_chunk_list[i])) {
561 err = res_chunk_list[i] ?
562 PTR_ERR(res_chunk_list[i]) : -ENOMEM;
563 usnic_err("Failed to get %s from %s with err %d\n",
564 usnic_vnic_res_type_to_str(res_type),
565 usnic_vnic_pci_name(vnic),
571 return res_chunk_list;
574 for (i--; i >= 0; i--)
575 usnic_vnic_put_resources(res_chunk_list[i]);
576 kfree(res_chunk_list);
580 static void free_qp_grp_res(struct usnic_vnic_res_chunk **res_chunk_list)
583 for (i = 0; res_chunk_list[i]; i++)
584 usnic_vnic_put_resources(res_chunk_list[i]);
585 kfree(res_chunk_list);
588 static int qp_grp_and_vf_bind(struct usnic_ib_vf *vf,
589 struct usnic_ib_pd *pd,
590 struct usnic_ib_qp_grp *qp_grp)
593 struct pci_dev *pdev;
595 lockdep_assert_held(&vf->lock);
597 pdev = usnic_vnic_get_pdev(vf->vnic);
598 if (vf->qp_grp_ref_cnt == 0) {
599 err = usnic_uiom_attach_dev_to_pd(pd->umem_pd, &pdev->dev);
601 usnic_err("Failed to attach %s to domain\n",
607 vf->qp_grp_ref_cnt++;
609 WARN_ON(vf->pd != pd);
615 static void qp_grp_and_vf_unbind(struct usnic_ib_qp_grp *qp_grp)
617 struct pci_dev *pdev;
618 struct usnic_ib_pd *pd;
620 lockdep_assert_held(&qp_grp->vf->lock);
623 pdev = usnic_vnic_get_pdev(qp_grp->vf->vnic);
624 if (--qp_grp->vf->qp_grp_ref_cnt == 0) {
625 qp_grp->vf->pd = NULL;
626 usnic_uiom_detach_dev_from_pd(pd->umem_pd, &pdev->dev);
631 static void log_spec(struct usnic_vnic_res_spec *res_spec)
634 usnic_vnic_spec_dump(buf, sizeof(buf), res_spec);
635 usnic_dbg("%s\n", buf);
638 static int qp_grp_id_from_flow(struct usnic_ib_qp_grp_flow *qp_flow,
641 enum usnic_transport_type trans_type = qp_flow->trans_type;
643 uint16_t port_num = 0;
645 switch (trans_type) {
646 case USNIC_TRANSPORT_ROCE_CUSTOM:
647 *id = qp_flow->usnic_roce.port_num;
649 case USNIC_TRANSPORT_IPV4_UDP:
650 err = usnic_transport_sock_get_addr(qp_flow->udp.sock,
656 * Copy port_num to stack first and then to *id,
657 * so that the short to int cast works for little
658 * and big endian systems.
663 usnic_err("Unsupported transport %u\n", trans_type);
670 struct usnic_ib_qp_grp *
671 usnic_ib_qp_grp_create(struct usnic_fwd_dev *ufdev, struct usnic_ib_vf *vf,
672 struct usnic_ib_pd *pd,
673 struct usnic_vnic_res_spec *res_spec,
674 struct usnic_transport_spec *transport_spec)
676 struct usnic_ib_qp_grp *qp_grp;
678 enum usnic_transport_type transport = transport_spec->trans_type;
679 struct usnic_ib_qp_grp_flow *qp_flow;
681 lockdep_assert_held(&vf->lock);
683 err = usnic_vnic_res_spec_satisfied(&min_transport_spec[transport],
686 usnic_err("Spec does not meet miniumum req for transport %d\n",
692 qp_grp = kzalloc(sizeof(*qp_grp), GFP_ATOMIC);
696 qp_grp->res_chunk_list = alloc_res_chunk_list(vf->vnic, res_spec,
698 if (IS_ERR_OR_NULL(qp_grp->res_chunk_list)) {
699 err = qp_grp->res_chunk_list ?
700 PTR_ERR(qp_grp->res_chunk_list) : -ENOMEM;
701 goto out_free_qp_grp;
704 err = qp_grp_and_vf_bind(vf, pd, qp_grp);
708 INIT_LIST_HEAD(&qp_grp->flows_lst);
709 spin_lock_init(&qp_grp->lock);
710 qp_grp->ufdev = ufdev;
711 qp_grp->state = IB_QPS_RESET;
712 qp_grp->owner_pid = current->pid;
714 qp_flow = create_and_add_flow(qp_grp, transport_spec);
715 if (IS_ERR_OR_NULL(qp_flow)) {
716 usnic_err("Unable to create and add flow with err %ld\n",
718 err = qp_flow ? PTR_ERR(qp_flow) : -EFAULT;
719 goto out_qp_grp_vf_unbind;
722 err = qp_grp_id_from_flow(qp_flow, &qp_grp->grp_id);
724 goto out_release_flow;
725 qp_grp->ibqp.qp_num = qp_grp->grp_id;
727 usnic_ib_sysfs_qpn_add(qp_grp);
732 release_and_remove_flow(qp_flow);
733 out_qp_grp_vf_unbind:
734 qp_grp_and_vf_unbind(qp_grp);
736 free_qp_grp_res(qp_grp->res_chunk_list);
743 void usnic_ib_qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp)
746 WARN_ON(qp_grp->state != IB_QPS_RESET);
747 lockdep_assert_held(&qp_grp->vf->lock);
749 release_and_remove_all_flows(qp_grp);
750 usnic_ib_sysfs_qpn_remove(qp_grp);
751 qp_grp_and_vf_unbind(qp_grp);
752 free_qp_grp_res(qp_grp->res_chunk_list);
756 struct usnic_vnic_res_chunk*
757 usnic_ib_qp_grp_get_chunk(struct usnic_ib_qp_grp *qp_grp,
758 enum usnic_vnic_res_type res_type)
762 for (i = 0; qp_grp->res_chunk_list[i]; i++) {
763 if (qp_grp->res_chunk_list[i]->type == res_type)
764 return qp_grp->res_chunk_list[i];
767 return ERR_PTR(-EINVAL);