2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
4 * Copyright (c) 2006 Intel Corporation. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/init.h>
36 #include <linux/err.h>
37 #include <linux/random.h>
38 #include <linux/spinlock.h>
39 #include <linux/slab.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/kref.h>
42 #include <linux/xarray.h>
43 #include <linux/workqueue.h>
44 #include <uapi/linux/if_ether.h>
45 #include <rdma/ib_pack.h>
46 #include <rdma/ib_cache.h>
47 #include <rdma/rdma_netlink.h>
48 #include <net/netlink.h>
49 #include <uapi/rdma/ib_user_sa.h>
50 #include <rdma/ib_marshall.h>
51 #include <rdma/ib_addr.h>
52 #include <rdma/opa_addr.h>
53 #include <rdma/rdma_cm.h>
55 #include "core_priv.h"
57 #define IB_SA_LOCAL_SVC_TIMEOUT_MIN 100
58 #define IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT 2000
59 #define IB_SA_LOCAL_SVC_TIMEOUT_MAX 200000
60 #define IB_SA_CPI_MAX_RETRY_CNT 3
61 #define IB_SA_CPI_RETRY_WAIT 1000 /*msecs */
62 static int sa_local_svc_timeout_ms = IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT;
71 enum rdma_class_port_info_type {
72 RDMA_CLASS_PORT_INFO_IB,
73 RDMA_CLASS_PORT_INFO_OPA
76 struct rdma_class_port_info {
77 enum rdma_class_port_info_type type;
79 struct ib_class_port_info ib;
80 struct opa_class_port_info opa;
84 struct ib_sa_classport_cache {
87 struct rdma_class_port_info data;
91 struct ib_mad_agent *agent;
92 struct ib_sa_sm_ah *sm_ah;
93 struct work_struct update_task;
94 struct ib_sa_classport_cache classport_info;
95 struct delayed_work ib_cpi_work;
96 spinlock_t classport_lock; /* protects class port info set */
101 struct ib_sa_device {
102 int start_port, end_port;
103 struct ib_event_handler event_handler;
104 struct ib_sa_port port[];
108 void (*callback)(struct ib_sa_query *sa_query, int status,
109 struct ib_sa_mad *mad);
110 void (*release)(struct ib_sa_query *);
111 struct ib_sa_client *client;
112 struct ib_sa_port *port;
113 struct ib_mad_send_buf *mad_buf;
114 struct ib_sa_sm_ah *sm_ah;
117 struct list_head list; /* Local svc request list */
118 u32 seq; /* Local svc request sequence number */
119 unsigned long timeout; /* Local svc timeout */
120 u8 path_use; /* How will the pathrecord be used */
123 #define IB_SA_ENABLE_LOCAL_SERVICE 0x00000001
124 #define IB_SA_CANCEL 0x00000002
125 #define IB_SA_QUERY_OPA 0x00000004
127 struct ib_sa_path_query {
128 void (*callback)(int status, struct sa_path_rec *rec,
129 unsigned int num_paths, void *context);
131 struct ib_sa_query sa_query;
132 struct sa_path_rec *conv_pr;
135 struct ib_sa_guidinfo_query {
136 void (*callback)(int, struct ib_sa_guidinfo_rec *, void *);
138 struct ib_sa_query sa_query;
141 struct ib_sa_classport_info_query {
142 void (*callback)(void *);
144 struct ib_sa_query sa_query;
147 struct ib_sa_mcmember_query {
148 void (*callback)(int, struct ib_sa_mcmember_rec *, void *);
150 struct ib_sa_query sa_query;
153 static LIST_HEAD(ib_nl_request_list);
154 static DEFINE_SPINLOCK(ib_nl_request_lock);
155 static atomic_t ib_nl_sa_request_seq;
156 static struct workqueue_struct *ib_nl_wq;
157 static struct delayed_work ib_nl_timed_work;
158 static const struct nla_policy ib_nl_policy[LS_NLA_TYPE_MAX] = {
159 [LS_NLA_TYPE_PATH_RECORD] = {.type = NLA_BINARY,
160 .len = sizeof(struct ib_path_rec_data)},
161 [LS_NLA_TYPE_TIMEOUT] = {.type = NLA_U32},
162 [LS_NLA_TYPE_SERVICE_ID] = {.type = NLA_U64},
163 [LS_NLA_TYPE_DGID] = {.type = NLA_BINARY,
164 .len = sizeof(struct rdma_nla_ls_gid)},
165 [LS_NLA_TYPE_SGID] = {.type = NLA_BINARY,
166 .len = sizeof(struct rdma_nla_ls_gid)},
167 [LS_NLA_TYPE_TCLASS] = {.type = NLA_U8},
168 [LS_NLA_TYPE_PKEY] = {.type = NLA_U16},
169 [LS_NLA_TYPE_QOS_CLASS] = {.type = NLA_U16},
173 static int ib_sa_add_one(struct ib_device *device);
174 static void ib_sa_remove_one(struct ib_device *device, void *client_data);
176 static struct ib_client sa_client = {
178 .add = ib_sa_add_one,
179 .remove = ib_sa_remove_one
182 static DEFINE_XARRAY_FLAGS(queries, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
184 static DEFINE_SPINLOCK(tid_lock);
187 #define PATH_REC_FIELD(field) \
188 .struct_offset_bytes = offsetof(struct sa_path_rec, field), \
189 .struct_size_bytes = sizeof_field(struct sa_path_rec, field), \
190 .field_name = "sa_path_rec:" #field
192 static const struct ib_field path_rec_table[] = {
193 { PATH_REC_FIELD(service_id),
197 { PATH_REC_FIELD(dgid),
201 { PATH_REC_FIELD(sgid),
205 { PATH_REC_FIELD(ib.dlid),
209 { PATH_REC_FIELD(ib.slid),
213 { PATH_REC_FIELD(ib.raw_traffic),
221 { PATH_REC_FIELD(flow_label),
225 { PATH_REC_FIELD(hop_limit),
229 { PATH_REC_FIELD(traffic_class),
233 { PATH_REC_FIELD(reversible),
237 { PATH_REC_FIELD(numb_path),
241 { PATH_REC_FIELD(pkey),
245 { PATH_REC_FIELD(qos_class),
249 { PATH_REC_FIELD(sl),
253 { PATH_REC_FIELD(mtu_selector),
257 { PATH_REC_FIELD(mtu),
261 { PATH_REC_FIELD(rate_selector),
265 { PATH_REC_FIELD(rate),
269 { PATH_REC_FIELD(packet_life_time_selector),
273 { PATH_REC_FIELD(packet_life_time),
277 { PATH_REC_FIELD(preference),
287 #define OPA_PATH_REC_FIELD(field) \
288 .struct_offset_bytes = \
289 offsetof(struct sa_path_rec, field), \
290 .struct_size_bytes = \
291 sizeof_field(struct sa_path_rec, field), \
292 .field_name = "sa_path_rec:" #field
294 static const struct ib_field opa_path_rec_table[] = {
295 { OPA_PATH_REC_FIELD(service_id),
299 { OPA_PATH_REC_FIELD(dgid),
303 { OPA_PATH_REC_FIELD(sgid),
307 { OPA_PATH_REC_FIELD(opa.dlid),
311 { OPA_PATH_REC_FIELD(opa.slid),
315 { OPA_PATH_REC_FIELD(opa.raw_traffic),
323 { OPA_PATH_REC_FIELD(flow_label),
327 { OPA_PATH_REC_FIELD(hop_limit),
331 { OPA_PATH_REC_FIELD(traffic_class),
335 { OPA_PATH_REC_FIELD(reversible),
339 { OPA_PATH_REC_FIELD(numb_path),
343 { OPA_PATH_REC_FIELD(pkey),
347 { OPA_PATH_REC_FIELD(opa.l2_8B),
351 { OPA_PATH_REC_FIELD(opa.l2_10B),
355 { OPA_PATH_REC_FIELD(opa.l2_9B),
359 { OPA_PATH_REC_FIELD(opa.l2_16B),
367 { OPA_PATH_REC_FIELD(opa.qos_type),
371 { OPA_PATH_REC_FIELD(opa.qos_priority),
379 { OPA_PATH_REC_FIELD(sl),
387 { OPA_PATH_REC_FIELD(mtu_selector),
391 { OPA_PATH_REC_FIELD(mtu),
395 { OPA_PATH_REC_FIELD(rate_selector),
399 { OPA_PATH_REC_FIELD(rate),
403 { OPA_PATH_REC_FIELD(packet_life_time_selector),
407 { OPA_PATH_REC_FIELD(packet_life_time),
411 { OPA_PATH_REC_FIELD(preference),
417 #define MCMEMBER_REC_FIELD(field) \
418 .struct_offset_bytes = offsetof(struct ib_sa_mcmember_rec, field), \
419 .struct_size_bytes = sizeof_field(struct ib_sa_mcmember_rec, field), \
420 .field_name = "sa_mcmember_rec:" #field
422 static const struct ib_field mcmember_rec_table[] = {
423 { MCMEMBER_REC_FIELD(mgid),
427 { MCMEMBER_REC_FIELD(port_gid),
431 { MCMEMBER_REC_FIELD(qkey),
435 { MCMEMBER_REC_FIELD(mlid),
439 { MCMEMBER_REC_FIELD(mtu_selector),
443 { MCMEMBER_REC_FIELD(mtu),
447 { MCMEMBER_REC_FIELD(traffic_class),
451 { MCMEMBER_REC_FIELD(pkey),
455 { MCMEMBER_REC_FIELD(rate_selector),
459 { MCMEMBER_REC_FIELD(rate),
463 { MCMEMBER_REC_FIELD(packet_life_time_selector),
467 { MCMEMBER_REC_FIELD(packet_life_time),
471 { MCMEMBER_REC_FIELD(sl),
475 { MCMEMBER_REC_FIELD(flow_label),
479 { MCMEMBER_REC_FIELD(hop_limit),
483 { MCMEMBER_REC_FIELD(scope),
487 { MCMEMBER_REC_FIELD(join_state),
491 { MCMEMBER_REC_FIELD(proxy_join),
501 #define CLASSPORTINFO_REC_FIELD(field) \
502 .struct_offset_bytes = offsetof(struct ib_class_port_info, field), \
503 .struct_size_bytes = sizeof_field(struct ib_class_port_info, field), \
504 .field_name = "ib_class_port_info:" #field
506 static const struct ib_field ib_classport_info_rec_table[] = {
507 { CLASSPORTINFO_REC_FIELD(base_version),
511 { CLASSPORTINFO_REC_FIELD(class_version),
515 { CLASSPORTINFO_REC_FIELD(capability_mask),
519 { CLASSPORTINFO_REC_FIELD(cap_mask2_resp_time),
523 { CLASSPORTINFO_REC_FIELD(redirect_gid),
527 { CLASSPORTINFO_REC_FIELD(redirect_tcslfl),
531 { CLASSPORTINFO_REC_FIELD(redirect_lid),
535 { CLASSPORTINFO_REC_FIELD(redirect_pkey),
540 { CLASSPORTINFO_REC_FIELD(redirect_qp),
544 { CLASSPORTINFO_REC_FIELD(redirect_qkey),
549 { CLASSPORTINFO_REC_FIELD(trap_gid),
553 { CLASSPORTINFO_REC_FIELD(trap_tcslfl),
558 { CLASSPORTINFO_REC_FIELD(trap_lid),
562 { CLASSPORTINFO_REC_FIELD(trap_pkey),
567 { CLASSPORTINFO_REC_FIELD(trap_hlqp),
571 { CLASSPORTINFO_REC_FIELD(trap_qkey),
577 #define OPA_CLASSPORTINFO_REC_FIELD(field) \
578 .struct_offset_bytes =\
579 offsetof(struct opa_class_port_info, field), \
580 .struct_size_bytes = \
581 sizeof_field(struct opa_class_port_info, field), \
582 .field_name = "opa_class_port_info:" #field
584 static const struct ib_field opa_classport_info_rec_table[] = {
585 { OPA_CLASSPORTINFO_REC_FIELD(base_version),
589 { OPA_CLASSPORTINFO_REC_FIELD(class_version),
593 { OPA_CLASSPORTINFO_REC_FIELD(cap_mask),
597 { OPA_CLASSPORTINFO_REC_FIELD(cap_mask2_resp_time),
601 { OPA_CLASSPORTINFO_REC_FIELD(redirect_gid),
605 { OPA_CLASSPORTINFO_REC_FIELD(redirect_tc_fl),
609 { OPA_CLASSPORTINFO_REC_FIELD(redirect_lid),
613 { OPA_CLASSPORTINFO_REC_FIELD(redirect_sl_qp),
617 { OPA_CLASSPORTINFO_REC_FIELD(redirect_qkey),
621 { OPA_CLASSPORTINFO_REC_FIELD(trap_gid),
625 { OPA_CLASSPORTINFO_REC_FIELD(trap_tc_fl),
629 { OPA_CLASSPORTINFO_REC_FIELD(trap_lid),
633 { OPA_CLASSPORTINFO_REC_FIELD(trap_hl_qp),
637 { OPA_CLASSPORTINFO_REC_FIELD(trap_qkey),
641 { OPA_CLASSPORTINFO_REC_FIELD(trap_pkey),
645 { OPA_CLASSPORTINFO_REC_FIELD(redirect_pkey),
649 { OPA_CLASSPORTINFO_REC_FIELD(trap_sl_rsvd),
659 #define GUIDINFO_REC_FIELD(field) \
660 .struct_offset_bytes = offsetof(struct ib_sa_guidinfo_rec, field), \
661 .struct_size_bytes = sizeof_field(struct ib_sa_guidinfo_rec, field), \
662 .field_name = "sa_guidinfo_rec:" #field
664 static const struct ib_field guidinfo_rec_table[] = {
665 { GUIDINFO_REC_FIELD(lid),
669 { GUIDINFO_REC_FIELD(block_num),
673 { GUIDINFO_REC_FIELD(res1),
677 { GUIDINFO_REC_FIELD(res2),
681 { GUIDINFO_REC_FIELD(guid_info_list),
687 #define RDMA_PRIMARY_PATH_MAX_REC_NUM 3
689 static inline void ib_sa_disable_local_svc(struct ib_sa_query *query)
691 query->flags &= ~IB_SA_ENABLE_LOCAL_SERVICE;
694 static inline int ib_sa_query_cancelled(struct ib_sa_query *query)
696 return (query->flags & IB_SA_CANCEL);
699 static void ib_nl_set_path_rec_attrs(struct sk_buff *skb,
700 struct ib_sa_query *query)
702 struct sa_path_rec *sa_rec = query->mad_buf->context[1];
703 struct ib_sa_mad *mad = query->mad_buf->mad;
704 ib_sa_comp_mask comp_mask = mad->sa_hdr.comp_mask;
707 struct rdma_ls_resolve_header *header;
709 query->mad_buf->context[1] = NULL;
711 /* Construct the family header first */
712 header = skb_put(skb, NLMSG_ALIGN(sizeof(*header)));
713 strscpy_pad(header->device_name,
714 dev_name(&query->port->agent->device->dev),
716 header->port_num = query->port->port_num;
718 if ((comp_mask & IB_SA_PATH_REC_REVERSIBLE) &&
719 sa_rec->reversible != 0)
720 query->path_use = LS_RESOLVE_PATH_USE_ALL;
722 query->path_use = LS_RESOLVE_PATH_USE_UNIDIRECTIONAL;
723 header->path_use = query->path_use;
725 /* Now build the attributes */
726 if (comp_mask & IB_SA_PATH_REC_SERVICE_ID) {
727 val64 = be64_to_cpu(sa_rec->service_id);
728 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SERVICE_ID,
729 sizeof(val64), &val64);
731 if (comp_mask & IB_SA_PATH_REC_DGID)
732 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_DGID,
733 sizeof(sa_rec->dgid), &sa_rec->dgid);
734 if (comp_mask & IB_SA_PATH_REC_SGID)
735 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SGID,
736 sizeof(sa_rec->sgid), &sa_rec->sgid);
737 if (comp_mask & IB_SA_PATH_REC_TRAFFIC_CLASS)
738 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_TCLASS,
739 sizeof(sa_rec->traffic_class), &sa_rec->traffic_class);
741 if (comp_mask & IB_SA_PATH_REC_PKEY) {
742 val16 = be16_to_cpu(sa_rec->pkey);
743 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_PKEY,
744 sizeof(val16), &val16);
746 if (comp_mask & IB_SA_PATH_REC_QOS_CLASS) {
747 val16 = be16_to_cpu(sa_rec->qos_class);
748 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_QOS_CLASS,
749 sizeof(val16), &val16);
753 static int ib_nl_get_path_rec_attrs_len(ib_sa_comp_mask comp_mask)
757 if (comp_mask & IB_SA_PATH_REC_SERVICE_ID)
758 len += nla_total_size(sizeof(u64));
759 if (comp_mask & IB_SA_PATH_REC_DGID)
760 len += nla_total_size(sizeof(struct rdma_nla_ls_gid));
761 if (comp_mask & IB_SA_PATH_REC_SGID)
762 len += nla_total_size(sizeof(struct rdma_nla_ls_gid));
763 if (comp_mask & IB_SA_PATH_REC_TRAFFIC_CLASS)
764 len += nla_total_size(sizeof(u8));
765 if (comp_mask & IB_SA_PATH_REC_PKEY)
766 len += nla_total_size(sizeof(u16));
767 if (comp_mask & IB_SA_PATH_REC_QOS_CLASS)
768 len += nla_total_size(sizeof(u16));
771 * Make sure that at least some of the required comp_mask bits are
774 if (WARN_ON(len == 0))
777 /* Add the family header */
778 len += NLMSG_ALIGN(sizeof(struct rdma_ls_resolve_header));
783 static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask)
785 struct sk_buff *skb = NULL;
786 struct nlmsghdr *nlh;
788 struct ib_sa_mad *mad;
795 INIT_LIST_HEAD(&query->list);
796 query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq);
798 mad = query->mad_buf->mad;
799 len = ib_nl_get_path_rec_attrs_len(mad->sa_hdr.comp_mask);
803 skb = nlmsg_new(len, gfp_mask);
807 /* Put nlmsg header only for now */
808 data = ibnl_put_msg(skb, &nlh, query->seq, 0, RDMA_NL_LS,
809 RDMA_NL_LS_OP_RESOLVE, NLM_F_REQUEST);
816 ib_nl_set_path_rec_attrs(skb, query);
818 /* Repair the nlmsg header length */
821 gfp_flag = ((gfp_mask & GFP_ATOMIC) == GFP_ATOMIC) ? GFP_ATOMIC :
824 spin_lock_irqsave(&ib_nl_request_lock, flags);
825 ret = rdma_nl_multicast(&init_net, skb, RDMA_NL_GROUP_LS, gfp_flag);
830 /* Put the request on the list.*/
831 delay = msecs_to_jiffies(sa_local_svc_timeout_ms);
832 query->timeout = delay + jiffies;
833 list_add_tail(&query->list, &ib_nl_request_list);
834 /* Start the timeout if this is the only request */
835 if (ib_nl_request_list.next == &query->list)
836 queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay);
839 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
844 static int ib_nl_cancel_request(struct ib_sa_query *query)
847 struct ib_sa_query *wait_query;
850 spin_lock_irqsave(&ib_nl_request_lock, flags);
851 list_for_each_entry(wait_query, &ib_nl_request_list, list) {
852 /* Let the timeout to take care of the callback */
853 if (query == wait_query) {
854 query->flags |= IB_SA_CANCEL;
855 query->timeout = jiffies;
856 list_move(&query->list, &ib_nl_request_list);
858 mod_delayed_work(ib_nl_wq, &ib_nl_timed_work, 1);
862 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
867 static void send_handler(struct ib_mad_agent *agent,
868 struct ib_mad_send_wc *mad_send_wc);
870 static void ib_nl_process_good_resolve_rsp(struct ib_sa_query *query,
871 const struct nlmsghdr *nlh)
873 struct sa_path_rec recs[RDMA_PRIMARY_PATH_MAX_REC_NUM];
874 struct ib_sa_path_query *path_query;
875 struct ib_path_rec_data *rec_data;
876 struct ib_mad_send_wc mad_send_wc;
877 const struct nlattr *head, *curr;
878 struct ib_sa_mad *mad = NULL;
879 int len, rem, status = -EIO;
880 unsigned int num_prs = 0;
883 if (!query->callback)
886 path_query = container_of(query, struct ib_sa_path_query, sa_query);
887 mad = query->mad_buf->mad;
889 head = (const struct nlattr *) nlmsg_data(nlh);
890 len = nlmsg_len(nlh);
891 switch (query->path_use) {
892 case LS_RESOLVE_PATH_USE_UNIDIRECTIONAL:
893 mask = IB_PATH_PRIMARY | IB_PATH_OUTBOUND;
896 case LS_RESOLVE_PATH_USE_ALL:
897 mask = IB_PATH_PRIMARY;
900 case LS_RESOLVE_PATH_USE_GMP:
902 mask = IB_PATH_PRIMARY | IB_PATH_GMP |
903 IB_PATH_BIDIRECTIONAL;
907 nla_for_each_attr(curr, head, len, rem) {
908 if (curr->nla_type != LS_NLA_TYPE_PATH_RECORD)
911 rec_data = nla_data(curr);
912 if ((rec_data->flags & mask) != mask)
915 if ((query->flags & IB_SA_QUERY_OPA) ||
916 path_query->conv_pr) {
917 mad->mad_hdr.method |= IB_MGMT_METHOD_RESP;
918 memcpy(mad->data, rec_data->path_rec,
919 sizeof(rec_data->path_rec));
920 query->callback(query, 0, mad);
925 ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table),
926 rec_data->path_rec, &recs[num_prs]);
927 recs[num_prs].flags = rec_data->flags;
928 recs[num_prs].rec_type = SA_PATH_REC_TYPE_IB;
929 sa_path_set_dmac_zero(&recs[num_prs]);
932 if (num_prs >= RDMA_PRIMARY_PATH_MAX_REC_NUM)
937 mad->mad_hdr.method |= IB_MGMT_METHOD_RESP;
938 path_query->callback(status, recs, num_prs,
939 path_query->context);
941 query->callback(query, status, mad);
944 mad_send_wc.send_buf = query->mad_buf;
945 mad_send_wc.status = IB_WC_SUCCESS;
946 send_handler(query->mad_buf->mad_agent, &mad_send_wc);
949 static void ib_nl_request_timeout(struct work_struct *work)
952 struct ib_sa_query *query;
954 struct ib_mad_send_wc mad_send_wc;
957 spin_lock_irqsave(&ib_nl_request_lock, flags);
958 while (!list_empty(&ib_nl_request_list)) {
959 query = list_entry(ib_nl_request_list.next,
960 struct ib_sa_query, list);
962 if (time_after(query->timeout, jiffies)) {
963 delay = query->timeout - jiffies;
964 if ((long)delay <= 0)
966 queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay);
970 list_del(&query->list);
971 ib_sa_disable_local_svc(query);
972 /* Hold the lock to protect against query cancellation */
973 if (ib_sa_query_cancelled(query))
976 ret = ib_post_send_mad(query->mad_buf, NULL);
978 mad_send_wc.send_buf = query->mad_buf;
979 mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
980 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
981 send_handler(query->port->agent, &mad_send_wc);
982 spin_lock_irqsave(&ib_nl_request_lock, flags);
985 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
988 int ib_nl_handle_set_timeout(struct sk_buff *skb,
989 struct nlmsghdr *nlh,
990 struct netlink_ext_ack *extack)
992 int timeout, delta, abs_delta;
993 const struct nlattr *attr;
995 struct ib_sa_query *query;
997 struct nlattr *tb[LS_NLA_TYPE_MAX];
1000 if (!(nlh->nlmsg_flags & NLM_F_REQUEST) ||
1001 !(NETLINK_CB(skb).sk))
1004 ret = nla_parse_deprecated(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
1005 nlmsg_len(nlh), ib_nl_policy, NULL);
1006 attr = (const struct nlattr *)tb[LS_NLA_TYPE_TIMEOUT];
1008 goto settimeout_out;
1010 timeout = *(int *) nla_data(attr);
1011 if (timeout < IB_SA_LOCAL_SVC_TIMEOUT_MIN)
1012 timeout = IB_SA_LOCAL_SVC_TIMEOUT_MIN;
1013 if (timeout > IB_SA_LOCAL_SVC_TIMEOUT_MAX)
1014 timeout = IB_SA_LOCAL_SVC_TIMEOUT_MAX;
1016 delta = timeout - sa_local_svc_timeout_ms;
1023 spin_lock_irqsave(&ib_nl_request_lock, flags);
1024 sa_local_svc_timeout_ms = timeout;
1025 list_for_each_entry(query, &ib_nl_request_list, list) {
1026 if (delta < 0 && abs_delta > query->timeout)
1029 query->timeout += delta;
1031 /* Get the new delay from the first entry */
1033 delay = query->timeout - jiffies;
1039 mod_delayed_work(ib_nl_wq, &ib_nl_timed_work,
1040 (unsigned long)delay);
1041 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
1048 static inline int ib_nl_is_good_resolve_resp(const struct nlmsghdr *nlh)
1050 struct nlattr *tb[LS_NLA_TYPE_MAX];
1053 if (nlh->nlmsg_flags & RDMA_NL_LS_F_ERR)
1056 ret = nla_parse_deprecated(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
1057 nlmsg_len(nlh), ib_nl_policy, NULL);
1064 int ib_nl_handle_resolve_resp(struct sk_buff *skb,
1065 struct nlmsghdr *nlh,
1066 struct netlink_ext_ack *extack)
1068 unsigned long flags;
1069 struct ib_sa_query *query = NULL, *iter;
1070 struct ib_mad_send_buf *send_buf;
1071 struct ib_mad_send_wc mad_send_wc;
1074 if ((nlh->nlmsg_flags & NLM_F_REQUEST) ||
1075 !(NETLINK_CB(skb).sk))
1078 spin_lock_irqsave(&ib_nl_request_lock, flags);
1079 list_for_each_entry(iter, &ib_nl_request_list, list) {
1081 * If the query is cancelled, let the timeout routine
1084 if (nlh->nlmsg_seq == iter->seq) {
1085 if (!ib_sa_query_cancelled(iter)) {
1086 list_del(&iter->list);
1094 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
1098 send_buf = query->mad_buf;
1100 if (!ib_nl_is_good_resolve_resp(nlh)) {
1101 /* if the result is a failure, send out the packet via IB */
1102 ib_sa_disable_local_svc(query);
1103 ret = ib_post_send_mad(query->mad_buf, NULL);
1104 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
1106 mad_send_wc.send_buf = send_buf;
1107 mad_send_wc.status = IB_WC_GENERAL_ERR;
1108 send_handler(query->port->agent, &mad_send_wc);
1111 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
1112 ib_nl_process_good_resolve_rsp(query, nlh);
1119 static void free_sm_ah(struct kref *kref)
1121 struct ib_sa_sm_ah *sm_ah = container_of(kref, struct ib_sa_sm_ah, ref);
1123 rdma_destroy_ah(sm_ah->ah, 0);
1127 void ib_sa_register_client(struct ib_sa_client *client)
1129 atomic_set(&client->users, 1);
1130 init_completion(&client->comp);
1132 EXPORT_SYMBOL(ib_sa_register_client);
1134 void ib_sa_unregister_client(struct ib_sa_client *client)
1136 ib_sa_client_put(client);
1137 wait_for_completion(&client->comp);
1139 EXPORT_SYMBOL(ib_sa_unregister_client);
1142 * ib_sa_cancel_query - try to cancel an SA query
1143 * @id:ID of query to cancel
1144 * @query:query pointer to cancel
1146 * Try to cancel an SA query. If the id and query don't match up or
1147 * the query has already completed, nothing is done. Otherwise the
1148 * query is canceled and will complete with a status of -EINTR.
1150 void ib_sa_cancel_query(int id, struct ib_sa_query *query)
1152 unsigned long flags;
1153 struct ib_mad_send_buf *mad_buf;
1155 xa_lock_irqsave(&queries, flags);
1156 if (xa_load(&queries, id) != query) {
1157 xa_unlock_irqrestore(&queries, flags);
1160 mad_buf = query->mad_buf;
1161 xa_unlock_irqrestore(&queries, flags);
1164 * If the query is still on the netlink request list, schedule
1165 * it to be cancelled by the timeout routine. Otherwise, it has been
1166 * sent to the MAD layer and has to be cancelled from there.
1168 if (!ib_nl_cancel_request(query))
1169 ib_cancel_mad(mad_buf);
1171 EXPORT_SYMBOL(ib_sa_cancel_query);
1173 static u8 get_src_path_mask(struct ib_device *device, u32 port_num)
1175 struct ib_sa_device *sa_dev;
1176 struct ib_sa_port *port;
1177 unsigned long flags;
1180 sa_dev = ib_get_client_data(device, &sa_client);
1184 port = &sa_dev->port[port_num - sa_dev->start_port];
1185 spin_lock_irqsave(&port->ah_lock, flags);
1186 src_path_mask = port->sm_ah ? port->sm_ah->src_path_mask : 0x7f;
1187 spin_unlock_irqrestore(&port->ah_lock, flags);
1189 return src_path_mask;
1192 static int init_ah_attr_grh_fields(struct ib_device *device, u32 port_num,
1193 struct sa_path_rec *rec,
1194 struct rdma_ah_attr *ah_attr,
1195 const struct ib_gid_attr *gid_attr)
1197 enum ib_gid_type type = sa_conv_pathrec_to_gid_type(rec);
1200 gid_attr = rdma_find_gid_by_port(device, &rec->sgid, type,
1202 if (IS_ERR(gid_attr))
1203 return PTR_ERR(gid_attr);
1205 rdma_hold_gid_attr(gid_attr);
1207 rdma_move_grh_sgid_attr(ah_attr, &rec->dgid,
1208 be32_to_cpu(rec->flow_label),
1209 rec->hop_limit, rec->traffic_class,
1215 * ib_init_ah_attr_from_path - Initialize address handle attributes based on
1216 * an SA path record.
1217 * @device: Device associated ah attributes initialization.
1218 * @port_num: Port on the specified device.
1219 * @rec: path record entry to use for ah attributes initialization.
1220 * @ah_attr: address handle attributes to initialization from path record.
1221 * @gid_attr: SGID attribute to consider during initialization.
1223 * When ib_init_ah_attr_from_path() returns success,
1224 * (a) for IB link layer it optionally contains a reference to SGID attribute
1225 * when GRH is present for IB link layer.
1226 * (b) for RoCE link layer it contains a reference to SGID attribute.
1227 * User must invoke rdma_destroy_ah_attr() to release reference to SGID
1228 * attributes which are initialized using ib_init_ah_attr_from_path().
1230 int ib_init_ah_attr_from_path(struct ib_device *device, u32 port_num,
1231 struct sa_path_rec *rec,
1232 struct rdma_ah_attr *ah_attr,
1233 const struct ib_gid_attr *gid_attr)
1237 memset(ah_attr, 0, sizeof(*ah_attr));
1238 ah_attr->type = rdma_ah_find_type(device, port_num);
1239 rdma_ah_set_sl(ah_attr, rec->sl);
1240 rdma_ah_set_port_num(ah_attr, port_num);
1241 rdma_ah_set_static_rate(ah_attr, rec->rate);
1243 if (sa_path_is_roce(rec)) {
1244 ret = roce_resolve_route_from_path(rec, gid_attr);
1248 memcpy(ah_attr->roce.dmac, sa_path_get_dmac(rec), ETH_ALEN);
1250 rdma_ah_set_dlid(ah_attr, be32_to_cpu(sa_path_get_dlid(rec)));
1251 if (sa_path_is_opa(rec) &&
1252 rdma_ah_get_dlid(ah_attr) == be16_to_cpu(IB_LID_PERMISSIVE))
1253 rdma_ah_set_make_grd(ah_attr, true);
1255 rdma_ah_set_path_bits(ah_attr,
1256 be32_to_cpu(sa_path_get_slid(rec)) &
1257 get_src_path_mask(device, port_num));
1260 if (rec->hop_limit > 0 || sa_path_is_roce(rec))
1261 ret = init_ah_attr_grh_fields(device, port_num,
1262 rec, ah_attr, gid_attr);
1265 EXPORT_SYMBOL(ib_init_ah_attr_from_path);
1267 static int alloc_mad(struct ib_sa_query *query, gfp_t gfp_mask)
1269 struct rdma_ah_attr ah_attr;
1270 unsigned long flags;
1272 spin_lock_irqsave(&query->port->ah_lock, flags);
1273 if (!query->port->sm_ah) {
1274 spin_unlock_irqrestore(&query->port->ah_lock, flags);
1277 kref_get(&query->port->sm_ah->ref);
1278 query->sm_ah = query->port->sm_ah;
1279 spin_unlock_irqrestore(&query->port->ah_lock, flags);
1282 * Always check if sm_ah has valid dlid assigned,
1283 * before querying for class port info
1285 if ((rdma_query_ah(query->sm_ah->ah, &ah_attr) < 0) ||
1286 !rdma_is_valid_unicast_lid(&ah_attr)) {
1287 kref_put(&query->sm_ah->ref, free_sm_ah);
1290 query->mad_buf = ib_create_send_mad(query->port->agent, 1,
1291 query->sm_ah->pkey_index,
1292 0, IB_MGMT_SA_HDR, IB_MGMT_SA_DATA,
1294 ((query->flags & IB_SA_QUERY_OPA) ?
1295 OPA_MGMT_BASE_VERSION :
1296 IB_MGMT_BASE_VERSION));
1297 if (IS_ERR(query->mad_buf)) {
1298 kref_put(&query->sm_ah->ref, free_sm_ah);
1302 query->mad_buf->ah = query->sm_ah->ah;
1307 static void free_mad(struct ib_sa_query *query)
1309 ib_free_send_mad(query->mad_buf);
1310 kref_put(&query->sm_ah->ref, free_sm_ah);
1313 static void init_mad(struct ib_sa_query *query, struct ib_mad_agent *agent)
1315 struct ib_sa_mad *mad = query->mad_buf->mad;
1316 unsigned long flags;
1318 memset(mad, 0, sizeof *mad);
1320 if (query->flags & IB_SA_QUERY_OPA) {
1321 mad->mad_hdr.base_version = OPA_MGMT_BASE_VERSION;
1322 mad->mad_hdr.class_version = OPA_SA_CLASS_VERSION;
1324 mad->mad_hdr.base_version = IB_MGMT_BASE_VERSION;
1325 mad->mad_hdr.class_version = IB_SA_CLASS_VERSION;
1327 mad->mad_hdr.mgmt_class = IB_MGMT_CLASS_SUBN_ADM;
1328 spin_lock_irqsave(&tid_lock, flags);
1330 cpu_to_be64(((u64) agent->hi_tid) << 32 | tid++);
1331 spin_unlock_irqrestore(&tid_lock, flags);
1334 static int send_mad(struct ib_sa_query *query, unsigned long timeout_ms,
1337 unsigned long flags;
1339 const int nmbr_sa_query_retries = 10;
1341 xa_lock_irqsave(&queries, flags);
1342 ret = __xa_alloc(&queries, &id, query, xa_limit_32b, gfp_mask);
1343 xa_unlock_irqrestore(&queries, flags);
1347 query->mad_buf->timeout_ms = timeout_ms / nmbr_sa_query_retries;
1348 query->mad_buf->retries = nmbr_sa_query_retries;
1349 if (!query->mad_buf->timeout_ms) {
1350 /* Special case, very small timeout_ms */
1351 query->mad_buf->timeout_ms = 1;
1352 query->mad_buf->retries = timeout_ms;
1354 query->mad_buf->context[0] = query;
1357 if ((query->flags & IB_SA_ENABLE_LOCAL_SERVICE) &&
1358 (!(query->flags & IB_SA_QUERY_OPA))) {
1359 if (rdma_nl_chk_listeners(RDMA_NL_GROUP_LS)) {
1360 if (!ib_nl_make_request(query, gfp_mask))
1363 ib_sa_disable_local_svc(query);
1366 ret = ib_post_send_mad(query->mad_buf, NULL);
1368 xa_lock_irqsave(&queries, flags);
1369 __xa_erase(&queries, id);
1370 xa_unlock_irqrestore(&queries, flags);
1374 * It's not safe to dereference query any more, because the
1375 * send may already have completed and freed the query in
1378 return ret ? ret : id;
1381 void ib_sa_unpack_path(void *attribute, struct sa_path_rec *rec)
1383 ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table), attribute, rec);
1385 EXPORT_SYMBOL(ib_sa_unpack_path);
1387 void ib_sa_pack_path(struct sa_path_rec *rec, void *attribute)
1389 ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), rec, attribute);
1391 EXPORT_SYMBOL(ib_sa_pack_path);
1393 static bool ib_sa_opa_pathrecord_support(struct ib_sa_client *client,
1394 struct ib_sa_device *sa_dev,
1397 struct ib_sa_port *port;
1398 unsigned long flags;
1401 port = &sa_dev->port[port_num - sa_dev->start_port];
1402 spin_lock_irqsave(&port->classport_lock, flags);
1403 if (!port->classport_info.valid)
1406 if (port->classport_info.data.type == RDMA_CLASS_PORT_INFO_OPA)
1407 ret = opa_get_cpi_capmask2(&port->classport_info.data.opa) &
1408 OPA_CLASS_PORT_INFO_PR_SUPPORT;
1410 spin_unlock_irqrestore(&port->classport_lock, flags);
1414 enum opa_pr_supported {
1421 * opa_pr_query_possible - Check if current PR query can be an OPA query.
1423 * Retuns PR_NOT_SUPPORTED if a path record query is not
1424 * possible, PR_OPA_SUPPORTED if an OPA path record query
1425 * is possible and PR_IB_SUPPORTED if an IB path record
1426 * query is possible.
1428 static int opa_pr_query_possible(struct ib_sa_client *client,
1429 struct ib_sa_device *sa_dev,
1430 struct ib_device *device, u32 port_num)
1432 struct ib_port_attr port_attr;
1434 if (ib_query_port(device, port_num, &port_attr))
1435 return PR_NOT_SUPPORTED;
1437 if (ib_sa_opa_pathrecord_support(client, sa_dev, port_num))
1438 return PR_OPA_SUPPORTED;
1440 if (port_attr.lid >= be16_to_cpu(IB_MULTICAST_LID_BASE))
1441 return PR_NOT_SUPPORTED;
1443 return PR_IB_SUPPORTED;
1446 static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query,
1447 int status, struct ib_sa_mad *mad)
1449 struct ib_sa_path_query *query =
1450 container_of(sa_query, struct ib_sa_path_query, sa_query);
1451 struct sa_path_rec rec = {};
1454 query->callback(status, NULL, 0, query->context);
1458 if (sa_query->flags & IB_SA_QUERY_OPA) {
1459 ib_unpack(opa_path_rec_table, ARRAY_SIZE(opa_path_rec_table),
1461 rec.rec_type = SA_PATH_REC_TYPE_OPA;
1462 query->callback(status, &rec, 1, query->context);
1466 ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table),
1468 rec.rec_type = SA_PATH_REC_TYPE_IB;
1469 sa_path_set_dmac_zero(&rec);
1471 if (query->conv_pr) {
1472 struct sa_path_rec opa;
1474 memset(&opa, 0, sizeof(struct sa_path_rec));
1475 sa_convert_path_ib_to_opa(&opa, &rec);
1476 query->callback(status, &opa, 1, query->context);
1478 query->callback(status, &rec, 1, query->context);
1482 static void ib_sa_path_rec_release(struct ib_sa_query *sa_query)
1484 struct ib_sa_path_query *query =
1485 container_of(sa_query, struct ib_sa_path_query, sa_query);
1487 kfree(query->conv_pr);
1492 * ib_sa_path_rec_get - Start a Path get query
1494 * @device:device to send query on
1495 * @port_num: port number to send query on
1496 * @rec:Path Record to send in query
1497 * @comp_mask:component mask to send in query
1498 * @timeout_ms:time to wait for response
1499 * @gfp_mask:GFP mask to use for internal allocations
1500 * @callback:function called when query completes, times out or is
1502 * @context:opaque user context passed to callback
1503 * @sa_query:query context, used to cancel query
1505 * Send a Path Record Get query to the SA to look up a path. The
1506 * callback function will be called when the query completes (or
1507 * fails); status is 0 for a successful response, -EINTR if the query
1508 * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error
1509 * occurred sending the query. The resp parameter of the callback is
1510 * only valid if status is 0.
1512 * If the return value of ib_sa_path_rec_get() is negative, it is an
1513 * error code. Otherwise it is a query ID that can be used to cancel
1516 int ib_sa_path_rec_get(struct ib_sa_client *client,
1517 struct ib_device *device, u32 port_num,
1518 struct sa_path_rec *rec,
1519 ib_sa_comp_mask comp_mask,
1520 unsigned long timeout_ms, gfp_t gfp_mask,
1521 void (*callback)(int status,
1522 struct sa_path_rec *resp,
1523 unsigned int num_paths, void *context),
1525 struct ib_sa_query **sa_query)
1527 struct ib_sa_path_query *query;
1528 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1529 struct ib_sa_port *port;
1530 struct ib_mad_agent *agent;
1531 struct ib_sa_mad *mad;
1532 enum opa_pr_supported status;
1538 if ((rec->rec_type != SA_PATH_REC_TYPE_IB) &&
1539 (rec->rec_type != SA_PATH_REC_TYPE_OPA))
1542 port = &sa_dev->port[port_num - sa_dev->start_port];
1543 agent = port->agent;
1545 query = kzalloc(sizeof(*query), gfp_mask);
1549 query->sa_query.port = port;
1550 if (rec->rec_type == SA_PATH_REC_TYPE_OPA) {
1551 status = opa_pr_query_possible(client, sa_dev, device, port_num);
1552 if (status == PR_NOT_SUPPORTED) {
1555 } else if (status == PR_OPA_SUPPORTED) {
1556 query->sa_query.flags |= IB_SA_QUERY_OPA;
1559 kmalloc(sizeof(*query->conv_pr), gfp_mask);
1560 if (!query->conv_pr) {
1567 ret = alloc_mad(&query->sa_query, gfp_mask);
1571 ib_sa_client_get(client);
1572 query->sa_query.client = client;
1573 query->callback = callback;
1574 query->context = context;
1576 mad = query->sa_query.mad_buf->mad;
1577 init_mad(&query->sa_query, agent);
1579 query->sa_query.callback = callback ? ib_sa_path_rec_callback : NULL;
1580 query->sa_query.release = ib_sa_path_rec_release;
1581 mad->mad_hdr.method = IB_MGMT_METHOD_GET;
1582 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_PATH_REC);
1583 mad->sa_hdr.comp_mask = comp_mask;
1585 if (query->sa_query.flags & IB_SA_QUERY_OPA) {
1586 ib_pack(opa_path_rec_table, ARRAY_SIZE(opa_path_rec_table),
1588 } else if (query->conv_pr) {
1589 sa_convert_path_opa_to_ib(query->conv_pr, rec);
1590 ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table),
1591 query->conv_pr, mad->data);
1593 ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table),
1597 *sa_query = &query->sa_query;
1599 query->sa_query.flags |= IB_SA_ENABLE_LOCAL_SERVICE;
1600 query->sa_query.mad_buf->context[1] = (query->conv_pr) ?
1601 query->conv_pr : rec;
1603 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1611 ib_sa_client_put(query->sa_query.client);
1612 free_mad(&query->sa_query);
1614 kfree(query->conv_pr);
1619 EXPORT_SYMBOL(ib_sa_path_rec_get);
1621 static void ib_sa_mcmember_rec_callback(struct ib_sa_query *sa_query,
1622 int status, struct ib_sa_mad *mad)
1624 struct ib_sa_mcmember_query *query =
1625 container_of(sa_query, struct ib_sa_mcmember_query, sa_query);
1628 struct ib_sa_mcmember_rec rec;
1630 ib_unpack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table),
1632 query->callback(status, &rec, query->context);
1634 query->callback(status, NULL, query->context);
1637 static void ib_sa_mcmember_rec_release(struct ib_sa_query *sa_query)
1639 kfree(container_of(sa_query, struct ib_sa_mcmember_query, sa_query));
1642 int ib_sa_mcmember_rec_query(struct ib_sa_client *client,
1643 struct ib_device *device, u32 port_num,
1645 struct ib_sa_mcmember_rec *rec,
1646 ib_sa_comp_mask comp_mask,
1647 unsigned long timeout_ms, gfp_t gfp_mask,
1648 void (*callback)(int status,
1649 struct ib_sa_mcmember_rec *resp,
1652 struct ib_sa_query **sa_query)
1654 struct ib_sa_mcmember_query *query;
1655 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1656 struct ib_sa_port *port;
1657 struct ib_mad_agent *agent;
1658 struct ib_sa_mad *mad;
1664 port = &sa_dev->port[port_num - sa_dev->start_port];
1665 agent = port->agent;
1667 query = kzalloc(sizeof(*query), gfp_mask);
1671 query->sa_query.port = port;
1672 ret = alloc_mad(&query->sa_query, gfp_mask);
1676 ib_sa_client_get(client);
1677 query->sa_query.client = client;
1678 query->callback = callback;
1679 query->context = context;
1681 mad = query->sa_query.mad_buf->mad;
1682 init_mad(&query->sa_query, agent);
1684 query->sa_query.callback = callback ? ib_sa_mcmember_rec_callback : NULL;
1685 query->sa_query.release = ib_sa_mcmember_rec_release;
1686 mad->mad_hdr.method = method;
1687 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC);
1688 mad->sa_hdr.comp_mask = comp_mask;
1690 ib_pack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table),
1693 *sa_query = &query->sa_query;
1695 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1703 ib_sa_client_put(query->sa_query.client);
1704 free_mad(&query->sa_query);
1711 /* Support GuidInfoRecord */
1712 static void ib_sa_guidinfo_rec_callback(struct ib_sa_query *sa_query,
1713 int status, struct ib_sa_mad *mad)
1715 struct ib_sa_guidinfo_query *query =
1716 container_of(sa_query, struct ib_sa_guidinfo_query, sa_query);
1719 struct ib_sa_guidinfo_rec rec;
1721 ib_unpack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table),
1723 query->callback(status, &rec, query->context);
1725 query->callback(status, NULL, query->context);
1728 static void ib_sa_guidinfo_rec_release(struct ib_sa_query *sa_query)
1730 kfree(container_of(sa_query, struct ib_sa_guidinfo_query, sa_query));
1733 int ib_sa_guid_info_rec_query(struct ib_sa_client *client,
1734 struct ib_device *device, u32 port_num,
1735 struct ib_sa_guidinfo_rec *rec,
1736 ib_sa_comp_mask comp_mask, u8 method,
1737 unsigned long timeout_ms, gfp_t gfp_mask,
1738 void (*callback)(int status,
1739 struct ib_sa_guidinfo_rec *resp,
1742 struct ib_sa_query **sa_query)
1744 struct ib_sa_guidinfo_query *query;
1745 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1746 struct ib_sa_port *port;
1747 struct ib_mad_agent *agent;
1748 struct ib_sa_mad *mad;
1754 if (method != IB_MGMT_METHOD_GET &&
1755 method != IB_MGMT_METHOD_SET &&
1756 method != IB_SA_METHOD_DELETE) {
1760 port = &sa_dev->port[port_num - sa_dev->start_port];
1761 agent = port->agent;
1763 query = kzalloc(sizeof(*query), gfp_mask);
1767 query->sa_query.port = port;
1768 ret = alloc_mad(&query->sa_query, gfp_mask);
1772 ib_sa_client_get(client);
1773 query->sa_query.client = client;
1774 query->callback = callback;
1775 query->context = context;
1777 mad = query->sa_query.mad_buf->mad;
1778 init_mad(&query->sa_query, agent);
1780 query->sa_query.callback = callback ? ib_sa_guidinfo_rec_callback : NULL;
1781 query->sa_query.release = ib_sa_guidinfo_rec_release;
1783 mad->mad_hdr.method = method;
1784 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_GUID_INFO_REC);
1785 mad->sa_hdr.comp_mask = comp_mask;
1787 ib_pack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table), rec,
1790 *sa_query = &query->sa_query;
1792 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1800 ib_sa_client_put(query->sa_query.client);
1801 free_mad(&query->sa_query);
1807 EXPORT_SYMBOL(ib_sa_guid_info_rec_query);
1809 struct ib_classport_info_context {
1810 struct completion done;
1811 struct ib_sa_query *sa_query;
1814 static void ib_classportinfo_cb(void *context)
1816 struct ib_classport_info_context *cb_ctx = context;
1818 complete(&cb_ctx->done);
1821 static void ib_sa_classport_info_rec_callback(struct ib_sa_query *sa_query,
1822 int status, struct ib_sa_mad *mad)
1824 unsigned long flags;
1825 struct ib_sa_classport_info_query *query =
1826 container_of(sa_query, struct ib_sa_classport_info_query, sa_query);
1827 struct ib_sa_classport_cache *info = &sa_query->port->classport_info;
1830 if (sa_query->flags & IB_SA_QUERY_OPA) {
1831 struct opa_class_port_info rec;
1833 ib_unpack(opa_classport_info_rec_table,
1834 ARRAY_SIZE(opa_classport_info_rec_table),
1837 spin_lock_irqsave(&sa_query->port->classport_lock,
1839 if (!status && !info->valid) {
1840 memcpy(&info->data.opa, &rec,
1841 sizeof(info->data.opa));
1844 info->data.type = RDMA_CLASS_PORT_INFO_OPA;
1846 spin_unlock_irqrestore(&sa_query->port->classport_lock,
1850 struct ib_class_port_info rec;
1852 ib_unpack(ib_classport_info_rec_table,
1853 ARRAY_SIZE(ib_classport_info_rec_table),
1856 spin_lock_irqsave(&sa_query->port->classport_lock,
1858 if (!status && !info->valid) {
1859 memcpy(&info->data.ib, &rec,
1860 sizeof(info->data.ib));
1863 info->data.type = RDMA_CLASS_PORT_INFO_IB;
1865 spin_unlock_irqrestore(&sa_query->port->classport_lock,
1869 query->callback(query->context);
1872 static void ib_sa_classport_info_rec_release(struct ib_sa_query *sa_query)
1874 kfree(container_of(sa_query, struct ib_sa_classport_info_query,
1878 static int ib_sa_classport_info_rec_query(struct ib_sa_port *port,
1879 unsigned long timeout_ms,
1880 void (*callback)(void *context),
1882 struct ib_sa_query **sa_query)
1884 struct ib_mad_agent *agent;
1885 struct ib_sa_classport_info_query *query;
1886 struct ib_sa_mad *mad;
1887 gfp_t gfp_mask = GFP_KERNEL;
1890 agent = port->agent;
1892 query = kzalloc(sizeof(*query), gfp_mask);
1896 query->sa_query.port = port;
1897 query->sa_query.flags |= rdma_cap_opa_ah(port->agent->device,
1899 IB_SA_QUERY_OPA : 0;
1900 ret = alloc_mad(&query->sa_query, gfp_mask);
1904 query->callback = callback;
1905 query->context = context;
1907 mad = query->sa_query.mad_buf->mad;
1908 init_mad(&query->sa_query, agent);
1910 query->sa_query.callback = ib_sa_classport_info_rec_callback;
1911 query->sa_query.release = ib_sa_classport_info_rec_release;
1912 mad->mad_hdr.method = IB_MGMT_METHOD_GET;
1913 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_CLASS_PORTINFO);
1914 mad->sa_hdr.comp_mask = 0;
1915 *sa_query = &query->sa_query;
1917 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1925 free_mad(&query->sa_query);
1932 static void update_ib_cpi(struct work_struct *work)
1934 struct ib_sa_port *port =
1935 container_of(work, struct ib_sa_port, ib_cpi_work.work);
1936 struct ib_classport_info_context *cb_context;
1937 unsigned long flags;
1940 /* If the classport info is valid, nothing
1943 spin_lock_irqsave(&port->classport_lock, flags);
1944 if (port->classport_info.valid) {
1945 spin_unlock_irqrestore(&port->classport_lock, flags);
1948 spin_unlock_irqrestore(&port->classport_lock, flags);
1950 cb_context = kmalloc(sizeof(*cb_context), GFP_KERNEL);
1954 init_completion(&cb_context->done);
1956 ret = ib_sa_classport_info_rec_query(port, 3000,
1957 ib_classportinfo_cb, cb_context,
1958 &cb_context->sa_query);
1961 wait_for_completion(&cb_context->done);
1964 spin_lock_irqsave(&port->classport_lock, flags);
1966 /* If the classport info is still not valid, the query should have
1967 * failed for some reason. Retry issuing the query
1969 if (!port->classport_info.valid) {
1970 port->classport_info.retry_cnt++;
1971 if (port->classport_info.retry_cnt <=
1972 IB_SA_CPI_MAX_RETRY_CNT) {
1973 unsigned long delay =
1974 msecs_to_jiffies(IB_SA_CPI_RETRY_WAIT);
1976 queue_delayed_work(ib_wq, &port->ib_cpi_work, delay);
1979 spin_unlock_irqrestore(&port->classport_lock, flags);
1985 static void send_handler(struct ib_mad_agent *agent,
1986 struct ib_mad_send_wc *mad_send_wc)
1988 struct ib_sa_query *query = mad_send_wc->send_buf->context[0];
1989 unsigned long flags;
1991 if (query->callback)
1992 switch (mad_send_wc->status) {
1994 /* No callback -- already got recv */
1996 case IB_WC_RESP_TIMEOUT_ERR:
1997 query->callback(query, -ETIMEDOUT, NULL);
1999 case IB_WC_WR_FLUSH_ERR:
2000 query->callback(query, -EINTR, NULL);
2003 query->callback(query, -EIO, NULL);
2007 xa_lock_irqsave(&queries, flags);
2008 __xa_erase(&queries, query->id);
2009 xa_unlock_irqrestore(&queries, flags);
2013 ib_sa_client_put(query->client);
2014 query->release(query);
2017 static void recv_handler(struct ib_mad_agent *mad_agent,
2018 struct ib_mad_send_buf *send_buf,
2019 struct ib_mad_recv_wc *mad_recv_wc)
2021 struct ib_sa_query *query;
2026 query = send_buf->context[0];
2027 if (query->callback) {
2028 if (mad_recv_wc->wc->status == IB_WC_SUCCESS)
2029 query->callback(query,
2030 mad_recv_wc->recv_buf.mad->mad_hdr.status ?
2032 (struct ib_sa_mad *) mad_recv_wc->recv_buf.mad);
2034 query->callback(query, -EIO, NULL);
2037 ib_free_recv_mad(mad_recv_wc);
2040 static void update_sm_ah(struct work_struct *work)
2042 struct ib_sa_port *port =
2043 container_of(work, struct ib_sa_port, update_task);
2044 struct ib_sa_sm_ah *new_ah;
2045 struct ib_port_attr port_attr;
2046 struct rdma_ah_attr ah_attr;
2049 if (ib_query_port(port->agent->device, port->port_num, &port_attr)) {
2050 pr_warn("Couldn't query port\n");
2054 new_ah = kmalloc(sizeof(*new_ah), GFP_KERNEL);
2058 kref_init(&new_ah->ref);
2059 new_ah->src_path_mask = (1 << port_attr.lmc) - 1;
2061 new_ah->pkey_index = 0;
2062 if (ib_find_pkey(port->agent->device, port->port_num,
2063 IB_DEFAULT_PKEY_FULL, &new_ah->pkey_index))
2064 pr_err("Couldn't find index for default PKey\n");
2066 memset(&ah_attr, 0, sizeof(ah_attr));
2067 ah_attr.type = rdma_ah_find_type(port->agent->device,
2069 rdma_ah_set_dlid(&ah_attr, port_attr.sm_lid);
2070 rdma_ah_set_sl(&ah_attr, port_attr.sm_sl);
2071 rdma_ah_set_port_num(&ah_attr, port->port_num);
2073 grh_required = rdma_is_grh_required(port->agent->device,
2077 * The OPA sm_lid of 0xFFFF needs special handling so that it can be
2078 * differentiated from a permissive LID of 0xFFFF. We set the
2079 * grh_required flag here so the SA can program the DGID in the
2080 * address handle appropriately
2082 if (ah_attr.type == RDMA_AH_ATTR_TYPE_OPA &&
2084 port_attr.sm_lid == be16_to_cpu(IB_LID_PERMISSIVE)))
2085 rdma_ah_set_make_grd(&ah_attr, true);
2087 if (ah_attr.type == RDMA_AH_ATTR_TYPE_IB && grh_required) {
2088 rdma_ah_set_ah_flags(&ah_attr, IB_AH_GRH);
2089 rdma_ah_set_subnet_prefix(&ah_attr,
2090 cpu_to_be64(port_attr.subnet_prefix));
2091 rdma_ah_set_interface_id(&ah_attr,
2092 cpu_to_be64(IB_SA_WELL_KNOWN_GUID));
2095 new_ah->ah = rdma_create_ah(port->agent->qp->pd, &ah_attr,
2096 RDMA_CREATE_AH_SLEEPABLE);
2097 if (IS_ERR(new_ah->ah)) {
2098 pr_warn("Couldn't create new SM AH\n");
2103 spin_lock_irq(&port->ah_lock);
2105 kref_put(&port->sm_ah->ref, free_sm_ah);
2106 port->sm_ah = new_ah;
2107 spin_unlock_irq(&port->ah_lock);
2110 static void ib_sa_event(struct ib_event_handler *handler,
2111 struct ib_event *event)
2113 if (event->event == IB_EVENT_PORT_ERR ||
2114 event->event == IB_EVENT_PORT_ACTIVE ||
2115 event->event == IB_EVENT_LID_CHANGE ||
2116 event->event == IB_EVENT_PKEY_CHANGE ||
2117 event->event == IB_EVENT_SM_CHANGE ||
2118 event->event == IB_EVENT_CLIENT_REREGISTER) {
2119 unsigned long flags;
2120 struct ib_sa_device *sa_dev =
2121 container_of(handler, typeof(*sa_dev), event_handler);
2122 u32 port_num = event->element.port_num - sa_dev->start_port;
2123 struct ib_sa_port *port = &sa_dev->port[port_num];
2125 if (!rdma_cap_ib_sa(handler->device, port->port_num))
2128 spin_lock_irqsave(&port->ah_lock, flags);
2130 kref_put(&port->sm_ah->ref, free_sm_ah);
2132 spin_unlock_irqrestore(&port->ah_lock, flags);
2134 if (event->event == IB_EVENT_SM_CHANGE ||
2135 event->event == IB_EVENT_CLIENT_REREGISTER ||
2136 event->event == IB_EVENT_LID_CHANGE ||
2137 event->event == IB_EVENT_PORT_ACTIVE) {
2138 unsigned long delay =
2139 msecs_to_jiffies(IB_SA_CPI_RETRY_WAIT);
2141 spin_lock_irqsave(&port->classport_lock, flags);
2142 port->classport_info.valid = false;
2143 port->classport_info.retry_cnt = 0;
2144 spin_unlock_irqrestore(&port->classport_lock, flags);
2145 queue_delayed_work(ib_wq,
2146 &port->ib_cpi_work, delay);
2148 queue_work(ib_wq, &sa_dev->port[port_num].update_task);
2152 static int ib_sa_add_one(struct ib_device *device)
2154 struct ib_sa_device *sa_dev;
2159 s = rdma_start_port(device);
2160 e = rdma_end_port(device);
2162 sa_dev = kzalloc(struct_size(sa_dev, port,
2163 size_add(size_sub(e, s), 1)),
2168 sa_dev->start_port = s;
2169 sa_dev->end_port = e;
2171 for (i = 0; i <= e - s; ++i) {
2172 spin_lock_init(&sa_dev->port[i].ah_lock);
2173 if (!rdma_cap_ib_sa(device, i + 1))
2176 sa_dev->port[i].sm_ah = NULL;
2177 sa_dev->port[i].port_num = i + s;
2179 spin_lock_init(&sa_dev->port[i].classport_lock);
2180 sa_dev->port[i].classport_info.valid = false;
2182 sa_dev->port[i].agent =
2183 ib_register_mad_agent(device, i + s, IB_QPT_GSI,
2184 NULL, 0, send_handler,
2185 recv_handler, sa_dev, 0);
2186 if (IS_ERR(sa_dev->port[i].agent)) {
2187 ret = PTR_ERR(sa_dev->port[i].agent);
2191 INIT_WORK(&sa_dev->port[i].update_task, update_sm_ah);
2192 INIT_DELAYED_WORK(&sa_dev->port[i].ib_cpi_work,
2203 ib_set_client_data(device, &sa_client, sa_dev);
2206 * We register our event handler after everything is set up,
2207 * and then update our cached info after the event handler is
2208 * registered to avoid any problems if a port changes state
2209 * during our initialization.
2212 INIT_IB_EVENT_HANDLER(&sa_dev->event_handler, device, ib_sa_event);
2213 ib_register_event_handler(&sa_dev->event_handler);
2215 for (i = 0; i <= e - s; ++i) {
2216 if (rdma_cap_ib_sa(device, i + 1))
2217 update_sm_ah(&sa_dev->port[i].update_task);
2224 if (rdma_cap_ib_sa(device, i + 1))
2225 ib_unregister_mad_agent(sa_dev->port[i].agent);
2232 static void ib_sa_remove_one(struct ib_device *device, void *client_data)
2234 struct ib_sa_device *sa_dev = client_data;
2237 ib_unregister_event_handler(&sa_dev->event_handler);
2238 flush_workqueue(ib_wq);
2240 for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) {
2241 if (rdma_cap_ib_sa(device, i + 1)) {
2242 cancel_delayed_work_sync(&sa_dev->port[i].ib_cpi_work);
2243 ib_unregister_mad_agent(sa_dev->port[i].agent);
2244 if (sa_dev->port[i].sm_ah)
2245 kref_put(&sa_dev->port[i].sm_ah->ref, free_sm_ah);
2253 int ib_sa_init(void)
2257 get_random_bytes(&tid, sizeof tid);
2259 atomic_set(&ib_nl_sa_request_seq, 0);
2261 ret = ib_register_client(&sa_client);
2263 pr_err("Couldn't register ib_sa client\n");
2269 pr_err("Couldn't initialize multicast handling\n");
2273 ib_nl_wq = alloc_ordered_workqueue("ib_nl_sa_wq", WQ_MEM_RECLAIM);
2279 INIT_DELAYED_WORK(&ib_nl_timed_work, ib_nl_request_timeout);
2286 ib_unregister_client(&sa_client);
2291 void ib_sa_cleanup(void)
2293 cancel_delayed_work(&ib_nl_timed_work);
2294 destroy_workqueue(ib_nl_wq);
2296 ib_unregister_client(&sa_client);
2297 WARN_ON(!xa_empty(&queries));