1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
3 * Copyright (c) 2004-2007 Intel Corporation. All rights reserved.
4 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
5 * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
6 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
7 * Copyright (c) 2019, Mellanox Technologies inc. All rights reserved.
10 #include <linux/completion.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/device.h>
13 #include <linux/module.h>
14 #include <linux/err.h>
15 #include <linux/idr.h>
16 #include <linux/interrupt.h>
17 #include <linux/random.h>
18 #include <linux/rbtree.h>
19 #include <linux/spinlock.h>
20 #include <linux/slab.h>
21 #include <linux/sysfs.h>
22 #include <linux/workqueue.h>
23 #include <linux/kdev_t.h>
24 #include <linux/etherdevice.h>
26 #include <rdma/ib_cache.h>
27 #include <rdma/ib_cm.h>
29 #include "core_priv.h"
32 MODULE_AUTHOR("Sean Hefty");
33 MODULE_DESCRIPTION("InfiniBand CM");
34 MODULE_LICENSE("Dual BSD/GPL");
36 static const char * const ibcm_rej_reason_strs[] = {
37 [IB_CM_REJ_NO_QP] = "no QP",
38 [IB_CM_REJ_NO_EEC] = "no EEC",
39 [IB_CM_REJ_NO_RESOURCES] = "no resources",
40 [IB_CM_REJ_TIMEOUT] = "timeout",
41 [IB_CM_REJ_UNSUPPORTED] = "unsupported",
42 [IB_CM_REJ_INVALID_COMM_ID] = "invalid comm ID",
43 [IB_CM_REJ_INVALID_COMM_INSTANCE] = "invalid comm instance",
44 [IB_CM_REJ_INVALID_SERVICE_ID] = "invalid service ID",
45 [IB_CM_REJ_INVALID_TRANSPORT_TYPE] = "invalid transport type",
46 [IB_CM_REJ_STALE_CONN] = "stale conn",
47 [IB_CM_REJ_RDC_NOT_EXIST] = "RDC not exist",
48 [IB_CM_REJ_INVALID_GID] = "invalid GID",
49 [IB_CM_REJ_INVALID_LID] = "invalid LID",
50 [IB_CM_REJ_INVALID_SL] = "invalid SL",
51 [IB_CM_REJ_INVALID_TRAFFIC_CLASS] = "invalid traffic class",
52 [IB_CM_REJ_INVALID_HOP_LIMIT] = "invalid hop limit",
53 [IB_CM_REJ_INVALID_PACKET_RATE] = "invalid packet rate",
54 [IB_CM_REJ_INVALID_ALT_GID] = "invalid alt GID",
55 [IB_CM_REJ_INVALID_ALT_LID] = "invalid alt LID",
56 [IB_CM_REJ_INVALID_ALT_SL] = "invalid alt SL",
57 [IB_CM_REJ_INVALID_ALT_TRAFFIC_CLASS] = "invalid alt traffic class",
58 [IB_CM_REJ_INVALID_ALT_HOP_LIMIT] = "invalid alt hop limit",
59 [IB_CM_REJ_INVALID_ALT_PACKET_RATE] = "invalid alt packet rate",
60 [IB_CM_REJ_PORT_CM_REDIRECT] = "port CM redirect",
61 [IB_CM_REJ_PORT_REDIRECT] = "port redirect",
62 [IB_CM_REJ_INVALID_MTU] = "invalid MTU",
63 [IB_CM_REJ_INSUFFICIENT_RESP_RESOURCES] = "insufficient resp resources",
64 [IB_CM_REJ_CONSUMER_DEFINED] = "consumer defined",
65 [IB_CM_REJ_INVALID_RNR_RETRY] = "invalid RNR retry",
66 [IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID] = "duplicate local comm ID",
67 [IB_CM_REJ_INVALID_CLASS_VERSION] = "invalid class version",
68 [IB_CM_REJ_INVALID_FLOW_LABEL] = "invalid flow label",
69 [IB_CM_REJ_INVALID_ALT_FLOW_LABEL] = "invalid alt flow label",
70 [IB_CM_REJ_VENDOR_OPTION_NOT_SUPPORTED] =
71 "vendor option is not supported",
74 const char *__attribute_const__ ibcm_reject_msg(int reason)
76 size_t index = reason;
78 if (index < ARRAY_SIZE(ibcm_rej_reason_strs) &&
79 ibcm_rej_reason_strs[index])
80 return ibcm_rej_reason_strs[index];
82 return "unrecognized reason";
84 EXPORT_SYMBOL(ibcm_reject_msg);
88 static int cm_add_one(struct ib_device *device);
89 static void cm_remove_one(struct ib_device *device, void *client_data);
90 static void cm_process_work(struct cm_id_private *cm_id_priv,
91 struct cm_work *work);
92 static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv,
93 struct ib_cm_sidr_rep_param *param);
94 static int cm_send_dreq_locked(struct cm_id_private *cm_id_priv,
95 const void *private_data, u8 private_data_len);
96 static int cm_send_drep_locked(struct cm_id_private *cm_id_priv,
97 void *private_data, u8 private_data_len);
98 static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
99 enum ib_cm_rej_reason reason, void *ari,
100 u8 ari_length, const void *private_data,
101 u8 private_data_len);
103 static struct ib_client cm_client = {
106 .remove = cm_remove_one
109 static struct ib_cm {
111 struct list_head device_list;
112 rwlock_t device_lock;
113 struct rb_root listen_service_table;
114 u64 listen_service_id;
115 /* struct rb_root peer_service_table; todo: fix peer to peer */
116 struct rb_root remote_qp_table;
117 struct rb_root remote_id_table;
118 struct rb_root remote_sidr_table;
119 struct xarray local_id_table;
121 __be32 random_id_operand;
122 struct list_head timewait_list;
123 struct workqueue_struct *wq;
124 /* Sync on cm change port state */
125 spinlock_t state_lock;
128 /* Counter indexes ordered by attribute ID */
142 CM_ATTR_ID_OFFSET = 0x0010,
153 static char const counter_group_names[CM_COUNTER_GROUPS]
154 [sizeof("cm_rx_duplicates")] = {
155 "cm_tx_msgs", "cm_tx_retries",
156 "cm_rx_msgs", "cm_rx_duplicates"
159 struct cm_counter_group {
161 atomic_long_t counter[CM_ATTR_COUNT];
164 struct cm_counter_attribute {
165 struct attribute attr;
169 #define CM_COUNTER_ATTR(_name, _index) \
170 struct cm_counter_attribute cm_##_name##_counter_attr = { \
171 .attr = { .name = __stringify(_name), .mode = 0444 }, \
175 static CM_COUNTER_ATTR(req, CM_REQ_COUNTER);
176 static CM_COUNTER_ATTR(mra, CM_MRA_COUNTER);
177 static CM_COUNTER_ATTR(rej, CM_REJ_COUNTER);
178 static CM_COUNTER_ATTR(rep, CM_REP_COUNTER);
179 static CM_COUNTER_ATTR(rtu, CM_RTU_COUNTER);
180 static CM_COUNTER_ATTR(dreq, CM_DREQ_COUNTER);
181 static CM_COUNTER_ATTR(drep, CM_DREP_COUNTER);
182 static CM_COUNTER_ATTR(sidr_req, CM_SIDR_REQ_COUNTER);
183 static CM_COUNTER_ATTR(sidr_rep, CM_SIDR_REP_COUNTER);
184 static CM_COUNTER_ATTR(lap, CM_LAP_COUNTER);
185 static CM_COUNTER_ATTR(apr, CM_APR_COUNTER);
187 static struct attribute *cm_counter_default_attrs[] = {
188 &cm_req_counter_attr.attr,
189 &cm_mra_counter_attr.attr,
190 &cm_rej_counter_attr.attr,
191 &cm_rep_counter_attr.attr,
192 &cm_rtu_counter_attr.attr,
193 &cm_dreq_counter_attr.attr,
194 &cm_drep_counter_attr.attr,
195 &cm_sidr_req_counter_attr.attr,
196 &cm_sidr_rep_counter_attr.attr,
197 &cm_lap_counter_attr.attr,
198 &cm_apr_counter_attr.attr,
203 struct cm_device *cm_dev;
204 struct ib_mad_agent *mad_agent;
206 struct list_head cm_priv_prim_list;
207 struct list_head cm_priv_altr_list;
208 struct cm_counter_group counter_group[CM_COUNTER_GROUPS];
212 struct list_head list;
213 struct ib_device *ib_device;
216 struct cm_port *port[];
220 struct cm_port *port;
222 struct rdma_ah_attr ah_attr;
228 struct delayed_work work;
229 struct list_head list;
230 struct cm_port *port;
231 struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */
232 __be32 local_id; /* Established / timewait */
234 struct ib_cm_event cm_event;
235 struct sa_path_rec path[];
238 struct cm_timewait_info {
240 struct list_head list;
241 struct rb_node remote_qp_node;
242 struct rb_node remote_id_node;
243 __be64 remote_ca_guid;
245 u8 inserted_remote_qp;
246 u8 inserted_remote_id;
249 struct cm_id_private {
252 struct rb_node service_node;
253 struct rb_node sidr_id_node;
254 spinlock_t lock; /* Do not acquire inside cm.lock */
255 struct completion comp;
257 /* Number of clients sharing this ib_cm_id. Only valid for listeners.
258 * Protected by the cm.lock spinlock. */
259 int listen_sharecount;
262 struct ib_mad_send_buf *msg;
263 struct cm_timewait_info *timewait_info;
264 /* todo: use alternate port on send failure */
272 enum ib_qp_type qp_type;
276 enum ib_mtu path_mtu;
280 u8 responder_resources;
287 struct list_head prim_list;
288 struct list_head altr_list;
289 /* Indicates that the send port mad is registered and av is set */
290 int prim_send_port_not_ready;
291 int altr_send_port_not_ready;
293 struct list_head work_list;
296 struct rdma_ucm_ece ece;
299 static void cm_work_handler(struct work_struct *work);
301 static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
303 if (refcount_dec_and_test(&cm_id_priv->refcount))
304 complete(&cm_id_priv->comp);
307 static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
308 struct ib_mad_send_buf **msg)
310 struct ib_mad_agent *mad_agent;
311 struct ib_mad_send_buf *m;
314 unsigned long flags, flags2;
317 /* don't let the port to be released till the agent is down */
318 spin_lock_irqsave(&cm.state_lock, flags2);
319 spin_lock_irqsave(&cm.lock, flags);
320 if (!cm_id_priv->prim_send_port_not_ready)
321 av = &cm_id_priv->av;
322 else if (!cm_id_priv->altr_send_port_not_ready &&
323 (cm_id_priv->alt_av.port))
324 av = &cm_id_priv->alt_av;
326 pr_info("%s: not valid CM id\n", __func__);
328 spin_unlock_irqrestore(&cm.lock, flags);
331 spin_unlock_irqrestore(&cm.lock, flags);
332 /* Make sure the port haven't released the mad yet */
333 mad_agent = cm_id_priv->av.port->mad_agent;
335 pr_info("%s: not a valid MAD agent\n", __func__);
339 ah = rdma_create_ah(mad_agent->qp->pd, &av->ah_attr, 0);
345 m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn,
347 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
349 IB_MGMT_BASE_VERSION);
351 rdma_destroy_ah(ah, 0);
356 /* Timeout set by caller if response is expected. */
358 m->retries = cm_id_priv->max_cm_retries;
360 refcount_inc(&cm_id_priv->refcount);
361 m->context[0] = cm_id_priv;
365 spin_unlock_irqrestore(&cm.state_lock, flags2);
369 static struct ib_mad_send_buf *cm_alloc_response_msg_no_ah(struct cm_port *port,
370 struct ib_mad_recv_wc *mad_recv_wc)
372 return ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index,
373 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
375 IB_MGMT_BASE_VERSION);
378 static int cm_create_response_msg_ah(struct cm_port *port,
379 struct ib_mad_recv_wc *mad_recv_wc,
380 struct ib_mad_send_buf *msg)
384 ah = ib_create_ah_from_wc(port->mad_agent->qp->pd, mad_recv_wc->wc,
385 mad_recv_wc->recv_buf.grh, port->port_num);
393 static void cm_free_msg(struct ib_mad_send_buf *msg)
396 rdma_destroy_ah(msg->ah, 0);
398 cm_deref_id(msg->context[0]);
399 ib_free_send_mad(msg);
402 static int cm_alloc_response_msg(struct cm_port *port,
403 struct ib_mad_recv_wc *mad_recv_wc,
404 struct ib_mad_send_buf **msg)
406 struct ib_mad_send_buf *m;
409 m = cm_alloc_response_msg_no_ah(port, mad_recv_wc);
413 ret = cm_create_response_msg_ah(port, mad_recv_wc, m);
423 static void * cm_copy_private_data(const void *private_data,
428 if (!private_data || !private_data_len)
431 data = kmemdup(private_data, private_data_len, GFP_KERNEL);
433 return ERR_PTR(-ENOMEM);
438 static void cm_set_private_data(struct cm_id_private *cm_id_priv,
439 void *private_data, u8 private_data_len)
441 if (cm_id_priv->private_data && cm_id_priv->private_data_len)
442 kfree(cm_id_priv->private_data);
444 cm_id_priv->private_data = private_data;
445 cm_id_priv->private_data_len = private_data_len;
448 static int cm_init_av_for_lap(struct cm_port *port, struct ib_wc *wc,
449 struct ib_grh *grh, struct cm_av *av)
451 struct rdma_ah_attr new_ah_attr;
455 av->pkey_index = wc->pkey_index;
458 * av->ah_attr might be initialized based on past wc during incoming
459 * connect request or while sending out connect request. So initialize
460 * a new ah_attr on stack. If initialization fails, old ah_attr is
461 * used for sending any responses. If initialization is successful,
462 * than new ah_attr is used by overwriting old one.
464 ret = ib_init_ah_attr_from_wc(port->cm_dev->ib_device,
470 rdma_move_ah_attr(&av->ah_attr, &new_ah_attr);
474 static int cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
475 struct ib_grh *grh, struct cm_av *av)
478 av->pkey_index = wc->pkey_index;
479 return ib_init_ah_attr_from_wc(port->cm_dev->ib_device,
484 static void add_cm_id_to_port_list(struct cm_id_private *cm_id_priv,
485 struct cm_av *av, struct cm_port *port)
489 spin_lock_irqsave(&cm.lock, flags);
490 if (&cm_id_priv->av == av)
491 list_add_tail(&cm_id_priv->prim_list, &port->cm_priv_prim_list);
492 else if (&cm_id_priv->alt_av == av)
493 list_add_tail(&cm_id_priv->altr_list, &port->cm_priv_altr_list);
496 spin_unlock_irqrestore(&cm.lock, flags);
499 static struct cm_port *
500 get_cm_port_from_path(struct sa_path_rec *path, const struct ib_gid_attr *attr)
502 struct cm_device *cm_dev;
503 struct cm_port *port = NULL;
507 read_lock_irqsave(&cm.device_lock, flags);
508 list_for_each_entry(cm_dev, &cm.device_list, list) {
509 if (cm_dev->ib_device == attr->device) {
510 port = cm_dev->port[attr->port_num - 1];
514 read_unlock_irqrestore(&cm.device_lock, flags);
516 /* SGID attribute can be NULL in following
518 * (a) Alternative path
519 * (b) IB link layer without GRH
520 * (c) LAP send messages
522 read_lock_irqsave(&cm.device_lock, flags);
523 list_for_each_entry(cm_dev, &cm.device_list, list) {
524 attr = rdma_find_gid(cm_dev->ib_device,
526 sa_conv_pathrec_to_gid_type(path),
529 port = cm_dev->port[attr->port_num - 1];
533 read_unlock_irqrestore(&cm.device_lock, flags);
535 rdma_put_gid_attr(attr);
540 static int cm_init_av_by_path(struct sa_path_rec *path,
541 const struct ib_gid_attr *sgid_attr,
543 struct cm_id_private *cm_id_priv)
545 struct rdma_ah_attr new_ah_attr;
546 struct cm_device *cm_dev;
547 struct cm_port *port;
550 port = get_cm_port_from_path(path, sgid_attr);
553 cm_dev = port->cm_dev;
555 ret = ib_find_cached_pkey(cm_dev->ib_device, port->port_num,
556 be16_to_cpu(path->pkey), &av->pkey_index);
563 * av->ah_attr might be initialized based on wc or during
564 * request processing time which might have reference to sgid_attr.
565 * So initialize a new ah_attr on stack.
566 * If initialization fails, old ah_attr is used for sending any
567 * responses. If initialization is successful, than new ah_attr
568 * is used by overwriting the old one. So that right ah_attr
569 * can be used to return an error response.
571 ret = ib_init_ah_attr_from_path(cm_dev->ib_device, port->port_num, path,
572 &new_ah_attr, sgid_attr);
576 av->timeout = path->packet_life_time + 1;
577 add_cm_id_to_port_list(cm_id_priv, av, port);
578 rdma_move_ah_attr(&av->ah_attr, &new_ah_attr);
582 static u32 cm_local_id(__be32 local_id)
584 return (__force u32) (local_id ^ cm.random_id_operand);
587 static struct cm_id_private *cm_acquire_id(__be32 local_id, __be32 remote_id)
589 struct cm_id_private *cm_id_priv;
592 cm_id_priv = xa_load(&cm.local_id_table, cm_local_id(local_id));
593 if (!cm_id_priv || cm_id_priv->id.remote_id != remote_id ||
594 !refcount_inc_not_zero(&cm_id_priv->refcount))
602 * Trivial helpers to strip endian annotation and compare; the
603 * endianness doesn't actually matter since we just need a stable
604 * order for the RB tree.
606 static int be32_lt(__be32 a, __be32 b)
608 return (__force u32) a < (__force u32) b;
611 static int be32_gt(__be32 a, __be32 b)
613 return (__force u32) a > (__force u32) b;
616 static int be64_lt(__be64 a, __be64 b)
618 return (__force u64) a < (__force u64) b;
621 static int be64_gt(__be64 a, __be64 b)
623 return (__force u64) a > (__force u64) b;
627 * Inserts a new cm_id_priv into the listen_service_table. Returns cm_id_priv
628 * if the new ID was inserted, NULL if it could not be inserted due to a
629 * collision, or the existing cm_id_priv ready for shared usage.
631 static struct cm_id_private *cm_insert_listen(struct cm_id_private *cm_id_priv,
632 ib_cm_handler shared_handler)
634 struct rb_node **link = &cm.listen_service_table.rb_node;
635 struct rb_node *parent = NULL;
636 struct cm_id_private *cur_cm_id_priv;
637 __be64 service_id = cm_id_priv->id.service_id;
638 __be64 service_mask = cm_id_priv->id.service_mask;
641 spin_lock_irqsave(&cm.lock, flags);
644 cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
646 if ((cur_cm_id_priv->id.service_mask & service_id) ==
647 (service_mask & cur_cm_id_priv->id.service_id) &&
648 (cm_id_priv->id.device == cur_cm_id_priv->id.device)) {
650 * Sharing an ib_cm_id with different handlers is not
653 if (cur_cm_id_priv->id.cm_handler != shared_handler ||
654 cur_cm_id_priv->id.context ||
655 WARN_ON(!cur_cm_id_priv->id.cm_handler)) {
656 spin_unlock_irqrestore(&cm.lock, flags);
659 refcount_inc(&cur_cm_id_priv->refcount);
660 cur_cm_id_priv->listen_sharecount++;
661 spin_unlock_irqrestore(&cm.lock, flags);
662 return cur_cm_id_priv;
665 if (cm_id_priv->id.device < cur_cm_id_priv->id.device)
666 link = &(*link)->rb_left;
667 else if (cm_id_priv->id.device > cur_cm_id_priv->id.device)
668 link = &(*link)->rb_right;
669 else if (be64_lt(service_id, cur_cm_id_priv->id.service_id))
670 link = &(*link)->rb_left;
671 else if (be64_gt(service_id, cur_cm_id_priv->id.service_id))
672 link = &(*link)->rb_right;
674 link = &(*link)->rb_right;
676 cm_id_priv->listen_sharecount++;
677 rb_link_node(&cm_id_priv->service_node, parent, link);
678 rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table);
679 spin_unlock_irqrestore(&cm.lock, flags);
683 static struct cm_id_private * cm_find_listen(struct ib_device *device,
686 struct rb_node *node = cm.listen_service_table.rb_node;
687 struct cm_id_private *cm_id_priv;
690 cm_id_priv = rb_entry(node, struct cm_id_private, service_node);
691 if ((cm_id_priv->id.service_mask & service_id) ==
692 cm_id_priv->id.service_id &&
693 (cm_id_priv->id.device == device)) {
694 refcount_inc(&cm_id_priv->refcount);
697 if (device < cm_id_priv->id.device)
698 node = node->rb_left;
699 else if (device > cm_id_priv->id.device)
700 node = node->rb_right;
701 else if (be64_lt(service_id, cm_id_priv->id.service_id))
702 node = node->rb_left;
703 else if (be64_gt(service_id, cm_id_priv->id.service_id))
704 node = node->rb_right;
706 node = node->rb_right;
711 static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info
714 struct rb_node **link = &cm.remote_id_table.rb_node;
715 struct rb_node *parent = NULL;
716 struct cm_timewait_info *cur_timewait_info;
717 __be64 remote_ca_guid = timewait_info->remote_ca_guid;
718 __be32 remote_id = timewait_info->work.remote_id;
722 cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
724 if (be32_lt(remote_id, cur_timewait_info->work.remote_id))
725 link = &(*link)->rb_left;
726 else if (be32_gt(remote_id, cur_timewait_info->work.remote_id))
727 link = &(*link)->rb_right;
728 else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
729 link = &(*link)->rb_left;
730 else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
731 link = &(*link)->rb_right;
733 return cur_timewait_info;
735 timewait_info->inserted_remote_id = 1;
736 rb_link_node(&timewait_info->remote_id_node, parent, link);
737 rb_insert_color(&timewait_info->remote_id_node, &cm.remote_id_table);
741 static struct cm_id_private *cm_find_remote_id(__be64 remote_ca_guid,
744 struct rb_node *node = cm.remote_id_table.rb_node;
745 struct cm_timewait_info *timewait_info;
746 struct cm_id_private *res = NULL;
748 spin_lock_irq(&cm.lock);
750 timewait_info = rb_entry(node, struct cm_timewait_info,
752 if (be32_lt(remote_id, timewait_info->work.remote_id))
753 node = node->rb_left;
754 else if (be32_gt(remote_id, timewait_info->work.remote_id))
755 node = node->rb_right;
756 else if (be64_lt(remote_ca_guid, timewait_info->remote_ca_guid))
757 node = node->rb_left;
758 else if (be64_gt(remote_ca_guid, timewait_info->remote_ca_guid))
759 node = node->rb_right;
761 res = cm_acquire_id(timewait_info->work.local_id,
762 timewait_info->work.remote_id);
766 spin_unlock_irq(&cm.lock);
770 static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info
773 struct rb_node **link = &cm.remote_qp_table.rb_node;
774 struct rb_node *parent = NULL;
775 struct cm_timewait_info *cur_timewait_info;
776 __be64 remote_ca_guid = timewait_info->remote_ca_guid;
777 __be32 remote_qpn = timewait_info->remote_qpn;
781 cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
783 if (be32_lt(remote_qpn, cur_timewait_info->remote_qpn))
784 link = &(*link)->rb_left;
785 else if (be32_gt(remote_qpn, cur_timewait_info->remote_qpn))
786 link = &(*link)->rb_right;
787 else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
788 link = &(*link)->rb_left;
789 else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
790 link = &(*link)->rb_right;
792 return cur_timewait_info;
794 timewait_info->inserted_remote_qp = 1;
795 rb_link_node(&timewait_info->remote_qp_node, parent, link);
796 rb_insert_color(&timewait_info->remote_qp_node, &cm.remote_qp_table);
800 static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private
803 struct rb_node **link = &cm.remote_sidr_table.rb_node;
804 struct rb_node *parent = NULL;
805 struct cm_id_private *cur_cm_id_priv;
806 union ib_gid *port_gid = &cm_id_priv->av.dgid;
807 __be32 remote_id = cm_id_priv->id.remote_id;
811 cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
813 if (be32_lt(remote_id, cur_cm_id_priv->id.remote_id))
814 link = &(*link)->rb_left;
815 else if (be32_gt(remote_id, cur_cm_id_priv->id.remote_id))
816 link = &(*link)->rb_right;
819 cmp = memcmp(port_gid, &cur_cm_id_priv->av.dgid,
822 link = &(*link)->rb_left;
824 link = &(*link)->rb_right;
826 return cur_cm_id_priv;
829 rb_link_node(&cm_id_priv->sidr_id_node, parent, link);
830 rb_insert_color(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
834 static struct cm_id_private *cm_alloc_id_priv(struct ib_device *device,
835 ib_cm_handler cm_handler,
838 struct cm_id_private *cm_id_priv;
842 cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL);
844 return ERR_PTR(-ENOMEM);
846 cm_id_priv->id.state = IB_CM_IDLE;
847 cm_id_priv->id.device = device;
848 cm_id_priv->id.cm_handler = cm_handler;
849 cm_id_priv->id.context = context;
850 cm_id_priv->id.remote_cm_qpn = 1;
852 RB_CLEAR_NODE(&cm_id_priv->service_node);
853 RB_CLEAR_NODE(&cm_id_priv->sidr_id_node);
854 spin_lock_init(&cm_id_priv->lock);
855 init_completion(&cm_id_priv->comp);
856 INIT_LIST_HEAD(&cm_id_priv->work_list);
857 INIT_LIST_HEAD(&cm_id_priv->prim_list);
858 INIT_LIST_HEAD(&cm_id_priv->altr_list);
859 atomic_set(&cm_id_priv->work_count, -1);
860 refcount_set(&cm_id_priv->refcount, 1);
862 ret = xa_alloc_cyclic(&cm.local_id_table, &id, NULL, xa_limit_32b,
863 &cm.local_id_next, GFP_KERNEL);
866 cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand;
876 * Make the ID visible to the MAD handlers and other threads that use the
879 static void cm_finalize_id(struct cm_id_private *cm_id_priv)
881 xa_store(&cm.local_id_table, cm_local_id(cm_id_priv->id.local_id),
882 cm_id_priv, GFP_ATOMIC);
885 struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
886 ib_cm_handler cm_handler,
889 struct cm_id_private *cm_id_priv;
891 cm_id_priv = cm_alloc_id_priv(device, cm_handler, context);
892 if (IS_ERR(cm_id_priv))
893 return ERR_CAST(cm_id_priv);
895 cm_finalize_id(cm_id_priv);
896 return &cm_id_priv->id;
898 EXPORT_SYMBOL(ib_create_cm_id);
900 static struct cm_work * cm_dequeue_work(struct cm_id_private *cm_id_priv)
902 struct cm_work *work;
904 if (list_empty(&cm_id_priv->work_list))
907 work = list_entry(cm_id_priv->work_list.next, struct cm_work, list);
908 list_del(&work->list);
912 static void cm_free_work(struct cm_work *work)
914 if (work->mad_recv_wc)
915 ib_free_recv_mad(work->mad_recv_wc);
919 static void cm_queue_work_unlock(struct cm_id_private *cm_id_priv,
920 struct cm_work *work)
921 __releases(&cm_id_priv->lock)
926 * To deliver the event to the user callback we have the drop the
927 * spinlock, however, we need to ensure that the user callback is single
928 * threaded and receives events in the temporal order. If there are
929 * already events being processed then thread new events onto a list,
930 * the thread currently processing will pick them up.
932 immediate = atomic_inc_and_test(&cm_id_priv->work_count);
934 list_add_tail(&work->list, &cm_id_priv->work_list);
936 * This routine always consumes incoming reference. Once queued
937 * to the work_list then a reference is held by the thread
938 * currently running cm_process_work() and this reference is not
941 cm_deref_id(cm_id_priv);
943 spin_unlock_irq(&cm_id_priv->lock);
946 cm_process_work(cm_id_priv, work);
949 static inline int cm_convert_to_ms(int iba_time)
951 /* approximate conversion to ms from 4.096us x 2^iba_time */
952 return 1 << max(iba_time - 8, 0);
956 * calculate: 4.096x2^ack_timeout = 4.096x2^ack_delay + 2x4.096x2^life_time
957 * Because of how ack_timeout is stored, adding one doubles the timeout.
958 * To avoid large timeouts, select the max(ack_delay, life_time + 1), and
959 * increment it (round up) only if the other is within 50%.
961 static u8 cm_ack_timeout(u8 ca_ack_delay, u8 packet_life_time)
963 int ack_timeout = packet_life_time + 1;
965 if (ack_timeout >= ca_ack_delay)
966 ack_timeout += (ca_ack_delay >= (ack_timeout - 1));
968 ack_timeout = ca_ack_delay +
969 (ack_timeout >= (ca_ack_delay - 1));
971 return min(31, ack_timeout);
974 static void cm_remove_remote(struct cm_id_private *cm_id_priv)
976 struct cm_timewait_info *timewait_info = cm_id_priv->timewait_info;
978 if (timewait_info->inserted_remote_id) {
979 rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table);
980 timewait_info->inserted_remote_id = 0;
983 if (timewait_info->inserted_remote_qp) {
984 rb_erase(&timewait_info->remote_qp_node, &cm.remote_qp_table);
985 timewait_info->inserted_remote_qp = 0;
989 static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id)
991 struct cm_timewait_info *timewait_info;
993 timewait_info = kzalloc(sizeof *timewait_info, GFP_KERNEL);
995 return ERR_PTR(-ENOMEM);
997 timewait_info->work.local_id = local_id;
998 INIT_DELAYED_WORK(&timewait_info->work.work, cm_work_handler);
999 timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT;
1000 return timewait_info;
1003 static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
1006 unsigned long flags;
1007 struct cm_device *cm_dev;
1009 lockdep_assert_held(&cm_id_priv->lock);
1011 cm_dev = ib_get_client_data(cm_id_priv->id.device, &cm_client);
1015 spin_lock_irqsave(&cm.lock, flags);
1016 cm_remove_remote(cm_id_priv);
1017 list_add_tail(&cm_id_priv->timewait_info->list, &cm.timewait_list);
1018 spin_unlock_irqrestore(&cm.lock, flags);
1021 * The cm_id could be destroyed by the user before we exit timewait.
1022 * To protect against this, we search for the cm_id after exiting
1023 * timewait before notifying the user that we've exited timewait.
1025 cm_id_priv->id.state = IB_CM_TIMEWAIT;
1026 wait_time = cm_convert_to_ms(cm_id_priv->av.timeout);
1028 /* Check if the device started its remove_one */
1029 spin_lock_irqsave(&cm.lock, flags);
1030 if (!cm_dev->going_down)
1031 queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
1032 msecs_to_jiffies(wait_time));
1033 spin_unlock_irqrestore(&cm.lock, flags);
1036 * The timewait_info is converted into a work and gets freed during
1037 * cm_free_work() in cm_timewait_handler().
1039 BUILD_BUG_ON(offsetof(struct cm_timewait_info, work) != 0);
1040 cm_id_priv->timewait_info = NULL;
1043 static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
1045 unsigned long flags;
1047 lockdep_assert_held(&cm_id_priv->lock);
1049 cm_id_priv->id.state = IB_CM_IDLE;
1050 if (cm_id_priv->timewait_info) {
1051 spin_lock_irqsave(&cm.lock, flags);
1052 cm_remove_remote(cm_id_priv);
1053 spin_unlock_irqrestore(&cm.lock, flags);
1054 kfree(cm_id_priv->timewait_info);
1055 cm_id_priv->timewait_info = NULL;
1059 static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
1061 struct cm_id_private *cm_id_priv;
1062 struct cm_work *work;
1064 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1065 spin_lock_irq(&cm_id_priv->lock);
1067 switch (cm_id->state) {
1069 spin_lock(&cm.lock);
1070 if (--cm_id_priv->listen_sharecount > 0) {
1071 /* The id is still shared. */
1072 WARN_ON(refcount_read(&cm_id_priv->refcount) == 1);
1073 spin_unlock(&cm.lock);
1074 spin_unlock_irq(&cm_id_priv->lock);
1075 cm_deref_id(cm_id_priv);
1078 cm_id->state = IB_CM_IDLE;
1079 rb_erase(&cm_id_priv->service_node, &cm.listen_service_table);
1080 RB_CLEAR_NODE(&cm_id_priv->service_node);
1081 spin_unlock(&cm.lock);
1083 case IB_CM_SIDR_REQ_SENT:
1084 cm_id->state = IB_CM_IDLE;
1085 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1087 case IB_CM_SIDR_REQ_RCVD:
1088 cm_send_sidr_rep_locked(cm_id_priv,
1089 &(struct ib_cm_sidr_rep_param){
1090 .status = IB_SIDR_REJECT });
1091 /* cm_send_sidr_rep_locked will not move to IDLE if it fails */
1092 cm_id->state = IB_CM_IDLE;
1094 case IB_CM_REQ_SENT:
1095 case IB_CM_MRA_REQ_RCVD:
1096 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1097 cm_send_rej_locked(cm_id_priv, IB_CM_REJ_TIMEOUT,
1098 &cm_id_priv->id.device->node_guid,
1099 sizeof(cm_id_priv->id.device->node_guid),
1102 case IB_CM_REQ_RCVD:
1103 if (err == -ENOMEM) {
1104 /* Do not reject to allow future retries. */
1105 cm_reset_to_idle(cm_id_priv);
1107 cm_send_rej_locked(cm_id_priv,
1108 IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
1112 case IB_CM_REP_SENT:
1113 case IB_CM_MRA_REP_RCVD:
1114 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1115 cm_send_rej_locked(cm_id_priv, IB_CM_REJ_CONSUMER_DEFINED, NULL,
1118 case IB_CM_MRA_REQ_SENT:
1119 case IB_CM_REP_RCVD:
1120 case IB_CM_MRA_REP_SENT:
1121 cm_send_rej_locked(cm_id_priv, IB_CM_REJ_CONSUMER_DEFINED, NULL,
1124 case IB_CM_ESTABLISHED:
1125 if (cm_id_priv->qp_type == IB_QPT_XRC_TGT) {
1126 cm_id->state = IB_CM_IDLE;
1129 cm_send_dreq_locked(cm_id_priv, NULL, 0);
1131 case IB_CM_DREQ_SENT:
1132 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1133 cm_enter_timewait(cm_id_priv);
1135 case IB_CM_DREQ_RCVD:
1136 cm_send_drep_locked(cm_id_priv, NULL, 0);
1137 WARN_ON(cm_id->state != IB_CM_TIMEWAIT);
1139 case IB_CM_TIMEWAIT:
1141 * The cm_acquire_id in cm_timewait_handler will stop working
1142 * once we do xa_erase below, so just move to idle here for
1145 cm_id->state = IB_CM_IDLE;
1150 WARN_ON(cm_id->state != IB_CM_IDLE);
1152 spin_lock(&cm.lock);
1153 /* Required for cleanup paths related cm_req_handler() */
1154 if (cm_id_priv->timewait_info) {
1155 cm_remove_remote(cm_id_priv);
1156 kfree(cm_id_priv->timewait_info);
1157 cm_id_priv->timewait_info = NULL;
1159 if (!list_empty(&cm_id_priv->altr_list) &&
1160 (!cm_id_priv->altr_send_port_not_ready))
1161 list_del(&cm_id_priv->altr_list);
1162 if (!list_empty(&cm_id_priv->prim_list) &&
1163 (!cm_id_priv->prim_send_port_not_ready))
1164 list_del(&cm_id_priv->prim_list);
1165 WARN_ON(cm_id_priv->listen_sharecount);
1166 WARN_ON(!RB_EMPTY_NODE(&cm_id_priv->service_node));
1167 if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node))
1168 rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
1169 spin_unlock(&cm.lock);
1170 spin_unlock_irq(&cm_id_priv->lock);
1172 xa_erase(&cm.local_id_table, cm_local_id(cm_id->local_id));
1173 cm_deref_id(cm_id_priv);
1174 wait_for_completion(&cm_id_priv->comp);
1175 while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
1178 rdma_destroy_ah_attr(&cm_id_priv->av.ah_attr);
1179 rdma_destroy_ah_attr(&cm_id_priv->alt_av.ah_attr);
1180 kfree(cm_id_priv->private_data);
1181 kfree_rcu(cm_id_priv, rcu);
1184 void ib_destroy_cm_id(struct ib_cm_id *cm_id)
1186 cm_destroy_id(cm_id, 0);
1188 EXPORT_SYMBOL(ib_destroy_cm_id);
1190 static int cm_init_listen(struct cm_id_private *cm_id_priv, __be64 service_id,
1191 __be64 service_mask)
1193 service_mask = service_mask ? service_mask : ~cpu_to_be64(0);
1194 service_id &= service_mask;
1195 if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID &&
1196 (service_id != IB_CM_ASSIGN_SERVICE_ID))
1199 if (service_id == IB_CM_ASSIGN_SERVICE_ID) {
1200 cm_id_priv->id.service_id = cpu_to_be64(cm.listen_service_id++);
1201 cm_id_priv->id.service_mask = ~cpu_to_be64(0);
1203 cm_id_priv->id.service_id = service_id;
1204 cm_id_priv->id.service_mask = service_mask;
1210 * ib_cm_listen - Initiates listening on the specified service ID for
1211 * connection and service ID resolution requests.
1212 * @cm_id: Connection identifier associated with the listen request.
1213 * @service_id: Service identifier matched against incoming connection
1214 * and service ID resolution requests. The service ID should be specified
1215 * network-byte order. If set to IB_CM_ASSIGN_SERVICE_ID, the CM will
1216 * assign a service ID to the caller.
1217 * @service_mask: Mask applied to service ID used to listen across a
1218 * range of service IDs. If set to 0, the service ID is matched
1219 * exactly. This parameter is ignored if %service_id is set to
1220 * IB_CM_ASSIGN_SERVICE_ID.
1222 int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask)
1224 struct cm_id_private *cm_id_priv =
1225 container_of(cm_id, struct cm_id_private, id);
1226 unsigned long flags;
1229 spin_lock_irqsave(&cm_id_priv->lock, flags);
1230 if (cm_id_priv->id.state != IB_CM_IDLE) {
1235 ret = cm_init_listen(cm_id_priv, service_id, service_mask);
1239 if (!cm_insert_listen(cm_id_priv, NULL)) {
1244 cm_id_priv->id.state = IB_CM_LISTEN;
1248 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1251 EXPORT_SYMBOL(ib_cm_listen);
1254 * Create a new listening ib_cm_id and listen on the given service ID.
1256 * If there's an existing ID listening on that same device and service ID,
1259 * @device: Device associated with the cm_id. All related communication will
1260 * be associated with the specified device.
1261 * @cm_handler: Callback invoked to notify the user of CM events.
1262 * @service_id: Service identifier matched against incoming connection
1263 * and service ID resolution requests. The service ID should be specified
1264 * network-byte order. If set to IB_CM_ASSIGN_SERVICE_ID, the CM will
1265 * assign a service ID to the caller.
1267 * Callers should call ib_destroy_cm_id when done with the listener ID.
1269 struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device,
1270 ib_cm_handler cm_handler,
1273 struct cm_id_private *listen_id_priv;
1274 struct cm_id_private *cm_id_priv;
1277 /* Create an ID in advance, since the creation may sleep */
1278 cm_id_priv = cm_alloc_id_priv(device, cm_handler, NULL);
1279 if (IS_ERR(cm_id_priv))
1280 return ERR_CAST(cm_id_priv);
1282 err = cm_init_listen(cm_id_priv, service_id, 0);
1284 ib_destroy_cm_id(&cm_id_priv->id);
1285 return ERR_PTR(err);
1288 spin_lock_irq(&cm_id_priv->lock);
1289 listen_id_priv = cm_insert_listen(cm_id_priv, cm_handler);
1290 if (listen_id_priv != cm_id_priv) {
1291 spin_unlock_irq(&cm_id_priv->lock);
1292 ib_destroy_cm_id(&cm_id_priv->id);
1293 if (!listen_id_priv)
1294 return ERR_PTR(-EINVAL);
1295 return &listen_id_priv->id;
1297 cm_id_priv->id.state = IB_CM_LISTEN;
1298 spin_unlock_irq(&cm_id_priv->lock);
1301 * A listen ID does not need to be in the xarray since it does not
1302 * receive mads, is not placed in the remote_id or remote_qpn rbtree,
1303 * and does not enter timewait.
1306 return &cm_id_priv->id;
1308 EXPORT_SYMBOL(ib_cm_insert_listen);
1310 static __be64 cm_form_tid(struct cm_id_private *cm_id_priv)
1312 u64 hi_tid, low_tid;
1314 hi_tid = ((u64) cm_id_priv->av.port->mad_agent->hi_tid) << 32;
1315 low_tid = (u64)cm_id_priv->id.local_id;
1316 return cpu_to_be64(hi_tid | low_tid);
1319 static void cm_format_mad_hdr(struct ib_mad_hdr *hdr,
1320 __be16 attr_id, __be64 tid)
1322 hdr->base_version = IB_MGMT_BASE_VERSION;
1323 hdr->mgmt_class = IB_MGMT_CLASS_CM;
1324 hdr->class_version = IB_CM_CLASS_VERSION;
1325 hdr->method = IB_MGMT_METHOD_SEND;
1326 hdr->attr_id = attr_id;
1330 static void cm_format_mad_ece_hdr(struct ib_mad_hdr *hdr, __be16 attr_id,
1331 __be64 tid, u32 attr_mod)
1333 cm_format_mad_hdr(hdr, attr_id, tid);
1334 hdr->attr_mod = cpu_to_be32(attr_mod);
1337 static void cm_format_req(struct cm_req_msg *req_msg,
1338 struct cm_id_private *cm_id_priv,
1339 struct ib_cm_req_param *param)
1341 struct sa_path_rec *pri_path = param->primary_path;
1342 struct sa_path_rec *alt_path = param->alternate_path;
1343 bool pri_ext = false;
1345 if (pri_path->rec_type == SA_PATH_REC_TYPE_OPA)
1346 pri_ext = opa_is_extended_lid(pri_path->opa.dlid,
1347 pri_path->opa.slid);
1349 cm_format_mad_ece_hdr(&req_msg->hdr, CM_REQ_ATTR_ID,
1350 cm_form_tid(cm_id_priv), param->ece.attr_mod);
1352 IBA_SET(CM_REQ_LOCAL_COMM_ID, req_msg,
1353 be32_to_cpu(cm_id_priv->id.local_id));
1354 IBA_SET(CM_REQ_SERVICE_ID, req_msg, be64_to_cpu(param->service_id));
1355 IBA_SET(CM_REQ_LOCAL_CA_GUID, req_msg,
1356 be64_to_cpu(cm_id_priv->id.device->node_guid));
1357 IBA_SET(CM_REQ_LOCAL_QPN, req_msg, param->qp_num);
1358 IBA_SET(CM_REQ_INITIATOR_DEPTH, req_msg, param->initiator_depth);
1359 IBA_SET(CM_REQ_REMOTE_CM_RESPONSE_TIMEOUT, req_msg,
1360 param->remote_cm_response_timeout);
1361 cm_req_set_qp_type(req_msg, param->qp_type);
1362 IBA_SET(CM_REQ_END_TO_END_FLOW_CONTROL, req_msg, param->flow_control);
1363 IBA_SET(CM_REQ_STARTING_PSN, req_msg, param->starting_psn);
1364 IBA_SET(CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT, req_msg,
1365 param->local_cm_response_timeout);
1366 IBA_SET(CM_REQ_PARTITION_KEY, req_msg,
1367 be16_to_cpu(param->primary_path->pkey));
1368 IBA_SET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg,
1369 param->primary_path->mtu);
1370 IBA_SET(CM_REQ_MAX_CM_RETRIES, req_msg, param->max_cm_retries);
1372 if (param->qp_type != IB_QPT_XRC_INI) {
1373 IBA_SET(CM_REQ_RESPONDER_RESOURCES, req_msg,
1374 param->responder_resources);
1375 IBA_SET(CM_REQ_RETRY_COUNT, req_msg, param->retry_count);
1376 IBA_SET(CM_REQ_RNR_RETRY_COUNT, req_msg,
1377 param->rnr_retry_count);
1378 IBA_SET(CM_REQ_SRQ, req_msg, param->srq);
1381 *IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg) =
1383 *IBA_GET_MEM_PTR(CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg) =
1386 IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg)
1387 ->global.interface_id =
1388 OPA_MAKE_ID(be32_to_cpu(pri_path->opa.slid));
1389 IBA_GET_MEM_PTR(CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg)
1390 ->global.interface_id =
1391 OPA_MAKE_ID(be32_to_cpu(pri_path->opa.dlid));
1393 if (pri_path->hop_limit <= 1) {
1394 IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg,
1395 be16_to_cpu(pri_ext ? 0 :
1396 htons(ntohl(sa_path_get_slid(
1398 IBA_SET(CM_REQ_PRIMARY_REMOTE_PORT_LID, req_msg,
1399 be16_to_cpu(pri_ext ? 0 :
1400 htons(ntohl(sa_path_get_dlid(
1403 /* Work-around until there's a way to obtain remote LID info */
1404 IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg,
1405 be16_to_cpu(IB_LID_PERMISSIVE));
1406 IBA_SET(CM_REQ_PRIMARY_REMOTE_PORT_LID, req_msg,
1407 be16_to_cpu(IB_LID_PERMISSIVE));
1409 IBA_SET(CM_REQ_PRIMARY_FLOW_LABEL, req_msg,
1410 be32_to_cpu(pri_path->flow_label));
1411 IBA_SET(CM_REQ_PRIMARY_PACKET_RATE, req_msg, pri_path->rate);
1412 IBA_SET(CM_REQ_PRIMARY_TRAFFIC_CLASS, req_msg, pri_path->traffic_class);
1413 IBA_SET(CM_REQ_PRIMARY_HOP_LIMIT, req_msg, pri_path->hop_limit);
1414 IBA_SET(CM_REQ_PRIMARY_SL, req_msg, pri_path->sl);
1415 IBA_SET(CM_REQ_PRIMARY_SUBNET_LOCAL, req_msg,
1416 (pri_path->hop_limit <= 1));
1417 IBA_SET(CM_REQ_PRIMARY_LOCAL_ACK_TIMEOUT, req_msg,
1418 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
1419 pri_path->packet_life_time));
1422 bool alt_ext = false;
1424 if (alt_path->rec_type == SA_PATH_REC_TYPE_OPA)
1425 alt_ext = opa_is_extended_lid(alt_path->opa.dlid,
1426 alt_path->opa.slid);
1428 *IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_LOCAL_PORT_GID, req_msg) =
1430 *IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_REMOTE_PORT_GID, req_msg) =
1433 IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_LOCAL_PORT_GID,
1435 ->global.interface_id =
1436 OPA_MAKE_ID(be32_to_cpu(alt_path->opa.slid));
1437 IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_REMOTE_PORT_GID,
1439 ->global.interface_id =
1440 OPA_MAKE_ID(be32_to_cpu(alt_path->opa.dlid));
1442 if (alt_path->hop_limit <= 1) {
1443 IBA_SET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg,
1446 htons(ntohl(sa_path_get_slid(
1448 IBA_SET(CM_REQ_ALTERNATE_REMOTE_PORT_LID, req_msg,
1451 htons(ntohl(sa_path_get_dlid(
1454 IBA_SET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg,
1455 be16_to_cpu(IB_LID_PERMISSIVE));
1456 IBA_SET(CM_REQ_ALTERNATE_REMOTE_PORT_LID, req_msg,
1457 be16_to_cpu(IB_LID_PERMISSIVE));
1459 IBA_SET(CM_REQ_ALTERNATE_FLOW_LABEL, req_msg,
1460 be32_to_cpu(alt_path->flow_label));
1461 IBA_SET(CM_REQ_ALTERNATE_PACKET_RATE, req_msg, alt_path->rate);
1462 IBA_SET(CM_REQ_ALTERNATE_TRAFFIC_CLASS, req_msg,
1463 alt_path->traffic_class);
1464 IBA_SET(CM_REQ_ALTERNATE_HOP_LIMIT, req_msg,
1465 alt_path->hop_limit);
1466 IBA_SET(CM_REQ_ALTERNATE_SL, req_msg, alt_path->sl);
1467 IBA_SET(CM_REQ_ALTERNATE_SUBNET_LOCAL, req_msg,
1468 (alt_path->hop_limit <= 1));
1469 IBA_SET(CM_REQ_ALTERNATE_LOCAL_ACK_TIMEOUT, req_msg,
1470 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
1471 alt_path->packet_life_time));
1473 IBA_SET(CM_REQ_VENDOR_ID, req_msg, param->ece.vendor_id);
1475 if (param->private_data && param->private_data_len)
1476 IBA_SET_MEM(CM_REQ_PRIVATE_DATA, req_msg, param->private_data,
1477 param->private_data_len);
1480 static int cm_validate_req_param(struct ib_cm_req_param *param)
1482 if (!param->primary_path)
1485 if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC &&
1486 param->qp_type != IB_QPT_XRC_INI)
1489 if (param->private_data &&
1490 param->private_data_len > IB_CM_REQ_PRIVATE_DATA_SIZE)
1493 if (param->alternate_path &&
1494 (param->alternate_path->pkey != param->primary_path->pkey ||
1495 param->alternate_path->mtu != param->primary_path->mtu))
1501 int ib_send_cm_req(struct ib_cm_id *cm_id,
1502 struct ib_cm_req_param *param)
1504 struct cm_id_private *cm_id_priv;
1505 struct cm_req_msg *req_msg;
1506 unsigned long flags;
1509 ret = cm_validate_req_param(param);
1513 /* Verify that we're not in timewait. */
1514 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1515 spin_lock_irqsave(&cm_id_priv->lock, flags);
1516 if (cm_id->state != IB_CM_IDLE || WARN_ON(cm_id_priv->timewait_info)) {
1517 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1521 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1523 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
1525 if (IS_ERR(cm_id_priv->timewait_info)) {
1526 ret = PTR_ERR(cm_id_priv->timewait_info);
1527 cm_id_priv->timewait_info = NULL;
1531 ret = cm_init_av_by_path(param->primary_path,
1532 param->ppath_sgid_attr, &cm_id_priv->av,
1536 if (param->alternate_path) {
1537 ret = cm_init_av_by_path(param->alternate_path, NULL,
1538 &cm_id_priv->alt_av, cm_id_priv);
1542 cm_id->service_id = param->service_id;
1543 cm_id->service_mask = ~cpu_to_be64(0);
1544 cm_id_priv->timeout_ms = cm_convert_to_ms(
1545 param->primary_path->packet_life_time) * 2 +
1547 param->remote_cm_response_timeout);
1548 cm_id_priv->max_cm_retries = param->max_cm_retries;
1549 cm_id_priv->initiator_depth = param->initiator_depth;
1550 cm_id_priv->responder_resources = param->responder_resources;
1551 cm_id_priv->retry_count = param->retry_count;
1552 cm_id_priv->path_mtu = param->primary_path->mtu;
1553 cm_id_priv->pkey = param->primary_path->pkey;
1554 cm_id_priv->qp_type = param->qp_type;
1556 ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg);
1560 req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad;
1561 cm_format_req(req_msg, cm_id_priv, param);
1562 cm_id_priv->tid = req_msg->hdr.tid;
1563 cm_id_priv->msg->timeout_ms = cm_id_priv->timeout_ms;
1564 cm_id_priv->msg->context[1] = (void *) (unsigned long) IB_CM_REQ_SENT;
1566 cm_id_priv->local_qpn = cpu_to_be32(IBA_GET(CM_REQ_LOCAL_QPN, req_msg));
1567 cm_id_priv->rq_psn = cpu_to_be32(IBA_GET(CM_REQ_STARTING_PSN, req_msg));
1569 trace_icm_send_req(&cm_id_priv->id);
1570 spin_lock_irqsave(&cm_id_priv->lock, flags);
1571 ret = ib_post_send_mad(cm_id_priv->msg, NULL);
1573 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1576 BUG_ON(cm_id->state != IB_CM_IDLE);
1577 cm_id->state = IB_CM_REQ_SENT;
1578 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1581 error2: cm_free_msg(cm_id_priv->msg);
1584 EXPORT_SYMBOL(ib_send_cm_req);
1586 static int cm_issue_rej(struct cm_port *port,
1587 struct ib_mad_recv_wc *mad_recv_wc,
1588 enum ib_cm_rej_reason reason,
1589 enum cm_msg_response msg_rejected,
1590 void *ari, u8 ari_length)
1592 struct ib_mad_send_buf *msg = NULL;
1593 struct cm_rej_msg *rej_msg, *rcv_msg;
1596 ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
1600 /* We just need common CM header information. Cast to any message. */
1601 rcv_msg = (struct cm_rej_msg *) mad_recv_wc->recv_buf.mad;
1602 rej_msg = (struct cm_rej_msg *) msg->mad;
1604 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, rcv_msg->hdr.tid);
1605 IBA_SET(CM_REJ_REMOTE_COMM_ID, rej_msg,
1606 IBA_GET(CM_REJ_LOCAL_COMM_ID, rcv_msg));
1607 IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg,
1608 IBA_GET(CM_REJ_REMOTE_COMM_ID, rcv_msg));
1609 IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, msg_rejected);
1610 IBA_SET(CM_REJ_REASON, rej_msg, reason);
1612 if (ari && ari_length) {
1613 IBA_SET(CM_REJ_REJECTED_INFO_LENGTH, rej_msg, ari_length);
1614 IBA_SET_MEM(CM_REJ_ARI, rej_msg, ari, ari_length);
1617 trace_icm_issue_rej(
1618 IBA_GET(CM_REJ_LOCAL_COMM_ID, rcv_msg),
1619 IBA_GET(CM_REJ_REMOTE_COMM_ID, rcv_msg));
1620 ret = ib_post_send_mad(msg, NULL);
1627 static bool cm_req_has_alt_path(struct cm_req_msg *req_msg)
1629 return ((cpu_to_be16(
1630 IBA_GET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg))) ||
1631 (ib_is_opa_gid(IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_LOCAL_PORT_GID,
1635 static void cm_path_set_rec_type(struct ib_device *ib_device, u8 port_num,
1636 struct sa_path_rec *path, union ib_gid *gid)
1638 if (ib_is_opa_gid(gid) && rdma_cap_opa_ah(ib_device, port_num))
1639 path->rec_type = SA_PATH_REC_TYPE_OPA;
1641 path->rec_type = SA_PATH_REC_TYPE_IB;
1644 static void cm_format_path_lid_from_req(struct cm_req_msg *req_msg,
1645 struct sa_path_rec *primary_path,
1646 struct sa_path_rec *alt_path,
1651 if (primary_path->rec_type != SA_PATH_REC_TYPE_OPA) {
1652 sa_path_set_dlid(primary_path, wc->slid);
1653 sa_path_set_slid(primary_path,
1654 IBA_GET(CM_REQ_PRIMARY_REMOTE_PORT_LID,
1657 lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
1658 CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg));
1659 sa_path_set_dlid(primary_path, lid);
1661 lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
1662 CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg));
1663 sa_path_set_slid(primary_path, lid);
1666 if (!cm_req_has_alt_path(req_msg))
1669 if (alt_path->rec_type != SA_PATH_REC_TYPE_OPA) {
1670 sa_path_set_dlid(alt_path,
1671 IBA_GET(CM_REQ_ALTERNATE_LOCAL_PORT_LID,
1673 sa_path_set_slid(alt_path,
1674 IBA_GET(CM_REQ_ALTERNATE_REMOTE_PORT_LID,
1677 lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
1678 CM_REQ_ALTERNATE_LOCAL_PORT_GID, req_msg));
1679 sa_path_set_dlid(alt_path, lid);
1681 lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
1682 CM_REQ_ALTERNATE_REMOTE_PORT_GID, req_msg));
1683 sa_path_set_slid(alt_path, lid);
1687 static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
1688 struct sa_path_rec *primary_path,
1689 struct sa_path_rec *alt_path,
1692 primary_path->dgid =
1693 *IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg);
1694 primary_path->sgid =
1695 *IBA_GET_MEM_PTR(CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg);
1696 primary_path->flow_label =
1697 cpu_to_be32(IBA_GET(CM_REQ_PRIMARY_FLOW_LABEL, req_msg));
1698 primary_path->hop_limit = IBA_GET(CM_REQ_PRIMARY_HOP_LIMIT, req_msg);
1699 primary_path->traffic_class =
1700 IBA_GET(CM_REQ_PRIMARY_TRAFFIC_CLASS, req_msg);
1701 primary_path->reversible = 1;
1702 primary_path->pkey =
1703 cpu_to_be16(IBA_GET(CM_REQ_PARTITION_KEY, req_msg));
1704 primary_path->sl = IBA_GET(CM_REQ_PRIMARY_SL, req_msg);
1705 primary_path->mtu_selector = IB_SA_EQ;
1706 primary_path->mtu = IBA_GET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg);
1707 primary_path->rate_selector = IB_SA_EQ;
1708 primary_path->rate = IBA_GET(CM_REQ_PRIMARY_PACKET_RATE, req_msg);
1709 primary_path->packet_life_time_selector = IB_SA_EQ;
1710 primary_path->packet_life_time =
1711 IBA_GET(CM_REQ_PRIMARY_LOCAL_ACK_TIMEOUT, req_msg);
1712 primary_path->packet_life_time -= (primary_path->packet_life_time > 0);
1713 primary_path->service_id =
1714 cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg));
1715 if (sa_path_is_roce(primary_path))
1716 primary_path->roce.route_resolved = false;
1718 if (cm_req_has_alt_path(req_msg)) {
1719 alt_path->dgid = *IBA_GET_MEM_PTR(
1720 CM_REQ_ALTERNATE_LOCAL_PORT_GID, req_msg);
1721 alt_path->sgid = *IBA_GET_MEM_PTR(
1722 CM_REQ_ALTERNATE_REMOTE_PORT_GID, req_msg);
1723 alt_path->flow_label = cpu_to_be32(
1724 IBA_GET(CM_REQ_ALTERNATE_FLOW_LABEL, req_msg));
1725 alt_path->hop_limit =
1726 IBA_GET(CM_REQ_ALTERNATE_HOP_LIMIT, req_msg);
1727 alt_path->traffic_class =
1728 IBA_GET(CM_REQ_ALTERNATE_TRAFFIC_CLASS, req_msg);
1729 alt_path->reversible = 1;
1731 cpu_to_be16(IBA_GET(CM_REQ_PARTITION_KEY, req_msg));
1732 alt_path->sl = IBA_GET(CM_REQ_ALTERNATE_SL, req_msg);
1733 alt_path->mtu_selector = IB_SA_EQ;
1735 IBA_GET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg);
1736 alt_path->rate_selector = IB_SA_EQ;
1737 alt_path->rate = IBA_GET(CM_REQ_ALTERNATE_PACKET_RATE, req_msg);
1738 alt_path->packet_life_time_selector = IB_SA_EQ;
1739 alt_path->packet_life_time =
1740 IBA_GET(CM_REQ_ALTERNATE_LOCAL_ACK_TIMEOUT, req_msg);
1741 alt_path->packet_life_time -= (alt_path->packet_life_time > 0);
1742 alt_path->service_id =
1743 cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg));
1745 if (sa_path_is_roce(alt_path))
1746 alt_path->roce.route_resolved = false;
1748 cm_format_path_lid_from_req(req_msg, primary_path, alt_path, wc);
1751 static u16 cm_get_bth_pkey(struct cm_work *work)
1753 struct ib_device *ib_dev = work->port->cm_dev->ib_device;
1754 u8 port_num = work->port->port_num;
1755 u16 pkey_index = work->mad_recv_wc->wc->pkey_index;
1759 ret = ib_get_cached_pkey(ib_dev, port_num, pkey_index, &pkey);
1761 dev_warn_ratelimited(&ib_dev->dev, "ib_cm: Couldn't retrieve pkey for incoming request (port %d, pkey index %d). %d\n",
1762 port_num, pkey_index, ret);
1770 * Convert OPA SGID to IB SGID
1771 * ULPs (such as IPoIB) do not understand OPA GIDs and will
1772 * reject them as the local_gid will not match the sgid. Therefore,
1773 * change the pathrec's SGID to an IB SGID.
1775 * @work: Work completion
1776 * @path: Path record
1778 static void cm_opa_to_ib_sgid(struct cm_work *work,
1779 struct sa_path_rec *path)
1781 struct ib_device *dev = work->port->cm_dev->ib_device;
1782 u8 port_num = work->port->port_num;
1784 if (rdma_cap_opa_ah(dev, port_num) &&
1785 (ib_is_opa_gid(&path->sgid))) {
1788 if (rdma_query_gid(dev, port_num, 0, &sgid)) {
1790 "Error updating sgid in CM request\n");
1798 static void cm_format_req_event(struct cm_work *work,
1799 struct cm_id_private *cm_id_priv,
1800 struct ib_cm_id *listen_id)
1802 struct cm_req_msg *req_msg;
1803 struct ib_cm_req_event_param *param;
1805 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1806 param = &work->cm_event.param.req_rcvd;
1807 param->listen_id = listen_id;
1808 param->bth_pkey = cm_get_bth_pkey(work);
1809 param->port = cm_id_priv->av.port->port_num;
1810 param->primary_path = &work->path[0];
1811 cm_opa_to_ib_sgid(work, param->primary_path);
1812 if (cm_req_has_alt_path(req_msg)) {
1813 param->alternate_path = &work->path[1];
1814 cm_opa_to_ib_sgid(work, param->alternate_path);
1816 param->alternate_path = NULL;
1818 param->remote_ca_guid =
1819 cpu_to_be64(IBA_GET(CM_REQ_LOCAL_CA_GUID, req_msg));
1820 param->remote_qkey = IBA_GET(CM_REQ_LOCAL_Q_KEY, req_msg);
1821 param->remote_qpn = IBA_GET(CM_REQ_LOCAL_QPN, req_msg);
1822 param->qp_type = cm_req_get_qp_type(req_msg);
1823 param->starting_psn = IBA_GET(CM_REQ_STARTING_PSN, req_msg);
1824 param->responder_resources = IBA_GET(CM_REQ_INITIATOR_DEPTH, req_msg);
1825 param->initiator_depth = IBA_GET(CM_REQ_RESPONDER_RESOURCES, req_msg);
1826 param->local_cm_response_timeout =
1827 IBA_GET(CM_REQ_REMOTE_CM_RESPONSE_TIMEOUT, req_msg);
1828 param->flow_control = IBA_GET(CM_REQ_END_TO_END_FLOW_CONTROL, req_msg);
1829 param->remote_cm_response_timeout =
1830 IBA_GET(CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT, req_msg);
1831 param->retry_count = IBA_GET(CM_REQ_RETRY_COUNT, req_msg);
1832 param->rnr_retry_count = IBA_GET(CM_REQ_RNR_RETRY_COUNT, req_msg);
1833 param->srq = IBA_GET(CM_REQ_SRQ, req_msg);
1834 param->ppath_sgid_attr = cm_id_priv->av.ah_attr.grh.sgid_attr;
1835 param->ece.vendor_id = IBA_GET(CM_REQ_VENDOR_ID, req_msg);
1836 param->ece.attr_mod = be32_to_cpu(req_msg->hdr.attr_mod);
1838 work->cm_event.private_data =
1839 IBA_GET_MEM_PTR(CM_REQ_PRIVATE_DATA, req_msg);
1842 static void cm_process_work(struct cm_id_private *cm_id_priv,
1843 struct cm_work *work)
1847 /* We will typically only have the current event to report. */
1848 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event);
1851 while (!ret && !atomic_add_negative(-1, &cm_id_priv->work_count)) {
1852 spin_lock_irq(&cm_id_priv->lock);
1853 work = cm_dequeue_work(cm_id_priv);
1854 spin_unlock_irq(&cm_id_priv->lock);
1858 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id,
1862 cm_deref_id(cm_id_priv);
1864 cm_destroy_id(&cm_id_priv->id, ret);
1867 static void cm_format_mra(struct cm_mra_msg *mra_msg,
1868 struct cm_id_private *cm_id_priv,
1869 enum cm_msg_response msg_mraed, u8 service_timeout,
1870 const void *private_data, u8 private_data_len)
1872 cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid);
1873 IBA_SET(CM_MRA_MESSAGE_MRAED, mra_msg, msg_mraed);
1874 IBA_SET(CM_MRA_LOCAL_COMM_ID, mra_msg,
1875 be32_to_cpu(cm_id_priv->id.local_id));
1876 IBA_SET(CM_MRA_REMOTE_COMM_ID, mra_msg,
1877 be32_to_cpu(cm_id_priv->id.remote_id));
1878 IBA_SET(CM_MRA_SERVICE_TIMEOUT, mra_msg, service_timeout);
1880 if (private_data && private_data_len)
1881 IBA_SET_MEM(CM_MRA_PRIVATE_DATA, mra_msg, private_data,
1885 static void cm_format_rej(struct cm_rej_msg *rej_msg,
1886 struct cm_id_private *cm_id_priv,
1887 enum ib_cm_rej_reason reason, void *ari,
1888 u8 ari_length, const void *private_data,
1889 u8 private_data_len, enum ib_cm_state state)
1891 lockdep_assert_held(&cm_id_priv->lock);
1893 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid);
1894 IBA_SET(CM_REJ_REMOTE_COMM_ID, rej_msg,
1895 be32_to_cpu(cm_id_priv->id.remote_id));
1898 case IB_CM_REQ_RCVD:
1899 IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg, be32_to_cpu(0));
1900 IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, CM_MSG_RESPONSE_REQ);
1902 case IB_CM_MRA_REQ_SENT:
1903 IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg,
1904 be32_to_cpu(cm_id_priv->id.local_id));
1905 IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, CM_MSG_RESPONSE_REQ);
1907 case IB_CM_REP_RCVD:
1908 case IB_CM_MRA_REP_SENT:
1909 IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg,
1910 be32_to_cpu(cm_id_priv->id.local_id));
1911 IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, CM_MSG_RESPONSE_REP);
1914 IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg,
1915 be32_to_cpu(cm_id_priv->id.local_id));
1916 IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg,
1917 CM_MSG_RESPONSE_OTHER);
1921 IBA_SET(CM_REJ_REASON, rej_msg, reason);
1922 if (ari && ari_length) {
1923 IBA_SET(CM_REJ_REJECTED_INFO_LENGTH, rej_msg, ari_length);
1924 IBA_SET_MEM(CM_REJ_ARI, rej_msg, ari, ari_length);
1927 if (private_data && private_data_len)
1928 IBA_SET_MEM(CM_REJ_PRIVATE_DATA, rej_msg, private_data,
1932 static void cm_dup_req_handler(struct cm_work *work,
1933 struct cm_id_private *cm_id_priv)
1935 struct ib_mad_send_buf *msg = NULL;
1938 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
1939 counter[CM_REQ_COUNTER]);
1941 /* Quick state check to discard duplicate REQs. */
1942 spin_lock_irq(&cm_id_priv->lock);
1943 if (cm_id_priv->id.state == IB_CM_REQ_RCVD) {
1944 spin_unlock_irq(&cm_id_priv->lock);
1947 spin_unlock_irq(&cm_id_priv->lock);
1949 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
1953 spin_lock_irq(&cm_id_priv->lock);
1954 switch (cm_id_priv->id.state) {
1955 case IB_CM_MRA_REQ_SENT:
1956 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
1957 CM_MSG_RESPONSE_REQ, cm_id_priv->service_timeout,
1958 cm_id_priv->private_data,
1959 cm_id_priv->private_data_len);
1961 case IB_CM_TIMEWAIT:
1962 cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv,
1963 IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0,
1969 spin_unlock_irq(&cm_id_priv->lock);
1971 trace_icm_send_dup_req(&cm_id_priv->id);
1972 ret = ib_post_send_mad(msg, NULL);
1977 unlock: spin_unlock_irq(&cm_id_priv->lock);
1978 free: cm_free_msg(msg);
1981 static struct cm_id_private * cm_match_req(struct cm_work *work,
1982 struct cm_id_private *cm_id_priv)
1984 struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv;
1985 struct cm_timewait_info *timewait_info;
1986 struct cm_req_msg *req_msg;
1988 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1990 /* Check for possible duplicate REQ. */
1991 spin_lock_irq(&cm.lock);
1992 timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info);
1993 if (timewait_info) {
1994 cur_cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
1995 timewait_info->work.remote_id);
1996 spin_unlock_irq(&cm.lock);
1997 if (cur_cm_id_priv) {
1998 cm_dup_req_handler(work, cur_cm_id_priv);
1999 cm_deref_id(cur_cm_id_priv);
2004 /* Check for stale connections. */
2005 timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
2006 if (timewait_info) {
2007 cm_remove_remote(cm_id_priv);
2008 cur_cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
2009 timewait_info->work.remote_id);
2011 spin_unlock_irq(&cm.lock);
2012 cm_issue_rej(work->port, work->mad_recv_wc,
2013 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ,
2015 if (cur_cm_id_priv) {
2016 ib_send_cm_dreq(&cur_cm_id_priv->id, NULL, 0);
2017 cm_deref_id(cur_cm_id_priv);
2022 /* Find matching listen request. */
2023 listen_cm_id_priv = cm_find_listen(
2024 cm_id_priv->id.device,
2025 cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg)));
2026 if (!listen_cm_id_priv) {
2027 cm_remove_remote(cm_id_priv);
2028 spin_unlock_irq(&cm.lock);
2029 cm_issue_rej(work->port, work->mad_recv_wc,
2030 IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ,
2034 spin_unlock_irq(&cm.lock);
2035 return listen_cm_id_priv;
2039 * Work-around for inter-subnet connections. If the LIDs are permissive,
2040 * we need to override the LID/SL data in the REQ with the LID information
2041 * in the work completion.
2043 static void cm_process_routed_req(struct cm_req_msg *req_msg, struct ib_wc *wc)
2045 if (!IBA_GET(CM_REQ_PRIMARY_SUBNET_LOCAL, req_msg)) {
2046 if (cpu_to_be16(IBA_GET(CM_REQ_PRIMARY_LOCAL_PORT_LID,
2047 req_msg)) == IB_LID_PERMISSIVE) {
2048 IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg,
2049 be16_to_cpu(ib_lid_be16(wc->slid)));
2050 IBA_SET(CM_REQ_PRIMARY_SL, req_msg, wc->sl);
2053 if (cpu_to_be16(IBA_GET(CM_REQ_PRIMARY_REMOTE_PORT_LID,
2054 req_msg)) == IB_LID_PERMISSIVE)
2055 IBA_SET(CM_REQ_PRIMARY_REMOTE_PORT_LID, req_msg,
2056 wc->dlid_path_bits);
2059 if (!IBA_GET(CM_REQ_ALTERNATE_SUBNET_LOCAL, req_msg)) {
2060 if (cpu_to_be16(IBA_GET(CM_REQ_ALTERNATE_LOCAL_PORT_LID,
2061 req_msg)) == IB_LID_PERMISSIVE) {
2062 IBA_SET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg,
2063 be16_to_cpu(ib_lid_be16(wc->slid)));
2064 IBA_SET(CM_REQ_ALTERNATE_SL, req_msg, wc->sl);
2067 if (cpu_to_be16(IBA_GET(CM_REQ_ALTERNATE_REMOTE_PORT_LID,
2068 req_msg)) == IB_LID_PERMISSIVE)
2069 IBA_SET(CM_REQ_ALTERNATE_REMOTE_PORT_LID, req_msg,
2070 wc->dlid_path_bits);
2074 static int cm_req_handler(struct cm_work *work)
2076 struct cm_id_private *cm_id_priv, *listen_cm_id_priv;
2077 struct cm_req_msg *req_msg;
2078 const struct ib_global_route *grh;
2079 const struct ib_gid_attr *gid_attr;
2082 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
2085 cm_alloc_id_priv(work->port->cm_dev->ib_device, NULL, NULL);
2086 if (IS_ERR(cm_id_priv))
2087 return PTR_ERR(cm_id_priv);
2089 cm_id_priv->id.remote_id =
2090 cpu_to_be32(IBA_GET(CM_REQ_LOCAL_COMM_ID, req_msg));
2091 cm_id_priv->id.service_id =
2092 cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg));
2093 cm_id_priv->id.service_mask = ~cpu_to_be64(0);
2094 cm_id_priv->tid = req_msg->hdr.tid;
2095 cm_id_priv->timeout_ms = cm_convert_to_ms(
2096 IBA_GET(CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT, req_msg));
2097 cm_id_priv->max_cm_retries = IBA_GET(CM_REQ_MAX_CM_RETRIES, req_msg);
2098 cm_id_priv->remote_qpn =
2099 cpu_to_be32(IBA_GET(CM_REQ_LOCAL_QPN, req_msg));
2100 cm_id_priv->initiator_depth =
2101 IBA_GET(CM_REQ_RESPONDER_RESOURCES, req_msg);
2102 cm_id_priv->responder_resources =
2103 IBA_GET(CM_REQ_INITIATOR_DEPTH, req_msg);
2104 cm_id_priv->path_mtu = IBA_GET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg);
2105 cm_id_priv->pkey = cpu_to_be16(IBA_GET(CM_REQ_PARTITION_KEY, req_msg));
2106 cm_id_priv->sq_psn = cpu_to_be32(IBA_GET(CM_REQ_STARTING_PSN, req_msg));
2107 cm_id_priv->retry_count = IBA_GET(CM_REQ_RETRY_COUNT, req_msg);
2108 cm_id_priv->rnr_retry_count = IBA_GET(CM_REQ_RNR_RETRY_COUNT, req_msg);
2109 cm_id_priv->qp_type = cm_req_get_qp_type(req_msg);
2111 ret = cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
2112 work->mad_recv_wc->recv_buf.grh,
2116 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
2118 if (IS_ERR(cm_id_priv->timewait_info)) {
2119 ret = PTR_ERR(cm_id_priv->timewait_info);
2120 cm_id_priv->timewait_info = NULL;
2123 cm_id_priv->timewait_info->work.remote_id = cm_id_priv->id.remote_id;
2124 cm_id_priv->timewait_info->remote_ca_guid =
2125 cpu_to_be64(IBA_GET(CM_REQ_LOCAL_CA_GUID, req_msg));
2126 cm_id_priv->timewait_info->remote_qpn = cm_id_priv->remote_qpn;
2129 * Note that the ID pointer is not in the xarray at this point,
2130 * so this set is only visible to the local thread.
2132 cm_id_priv->id.state = IB_CM_REQ_RCVD;
2134 listen_cm_id_priv = cm_match_req(work, cm_id_priv);
2135 if (!listen_cm_id_priv) {
2136 trace_icm_no_listener_err(&cm_id_priv->id);
2137 cm_id_priv->id.state = IB_CM_IDLE;
2142 if (cm_id_priv->av.ah_attr.type != RDMA_AH_ATTR_TYPE_ROCE)
2143 cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
2145 memset(&work->path[0], 0, sizeof(work->path[0]));
2146 if (cm_req_has_alt_path(req_msg))
2147 memset(&work->path[1], 0, sizeof(work->path[1]));
2148 grh = rdma_ah_read_grh(&cm_id_priv->av.ah_attr);
2149 gid_attr = grh->sgid_attr;
2152 rdma_protocol_roce(work->port->cm_dev->ib_device,
2153 work->port->port_num)) {
2154 work->path[0].rec_type =
2155 sa_conv_gid_to_pathrec_type(gid_attr->gid_type);
2157 cm_path_set_rec_type(
2158 work->port->cm_dev->ib_device, work->port->port_num,
2160 IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID,
2163 if (cm_req_has_alt_path(req_msg))
2164 work->path[1].rec_type = work->path[0].rec_type;
2165 cm_format_paths_from_req(req_msg, &work->path[0],
2166 &work->path[1], work->mad_recv_wc->wc);
2167 if (cm_id_priv->av.ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE)
2168 sa_path_set_dmac(&work->path[0],
2169 cm_id_priv->av.ah_attr.roce.dmac);
2170 work->path[0].hop_limit = grh->hop_limit;
2171 ret = cm_init_av_by_path(&work->path[0], gid_attr, &cm_id_priv->av,
2176 err = rdma_query_gid(work->port->cm_dev->ib_device,
2177 work->port->port_num, 0,
2178 &work->path[0].sgid);
2180 ib_send_cm_rej(&cm_id_priv->id, IB_CM_REJ_INVALID_GID,
2183 ib_send_cm_rej(&cm_id_priv->id, IB_CM_REJ_INVALID_GID,
2184 &work->path[0].sgid,
2185 sizeof(work->path[0].sgid),
2189 if (cm_req_has_alt_path(req_msg)) {
2190 ret = cm_init_av_by_path(&work->path[1], NULL,
2191 &cm_id_priv->alt_av, cm_id_priv);
2193 ib_send_cm_rej(&cm_id_priv->id,
2194 IB_CM_REJ_INVALID_ALT_GID,
2195 &work->path[0].sgid,
2196 sizeof(work->path[0].sgid), NULL, 0);
2201 cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
2202 cm_id_priv->id.context = listen_cm_id_priv->id.context;
2203 cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
2205 /* Now MAD handlers can see the new ID */
2206 spin_lock_irq(&cm_id_priv->lock);
2207 cm_finalize_id(cm_id_priv);
2209 /* Refcount belongs to the event, pairs with cm_process_work() */
2210 refcount_inc(&cm_id_priv->refcount);
2211 cm_queue_work_unlock(cm_id_priv, work);
2213 * Since this ID was just created and was not made visible to other MAD
2214 * handlers until the cm_finalize_id() above we know that the
2215 * cm_process_work() will deliver the event and the listen_cm_id
2216 * embedded in the event can be derefed here.
2218 cm_deref_id(listen_cm_id_priv);
2222 cm_deref_id(listen_cm_id_priv);
2224 ib_destroy_cm_id(&cm_id_priv->id);
2228 static void cm_format_rep(struct cm_rep_msg *rep_msg,
2229 struct cm_id_private *cm_id_priv,
2230 struct ib_cm_rep_param *param)
2232 cm_format_mad_ece_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid,
2233 param->ece.attr_mod);
2234 IBA_SET(CM_REP_LOCAL_COMM_ID, rep_msg,
2235 be32_to_cpu(cm_id_priv->id.local_id));
2236 IBA_SET(CM_REP_REMOTE_COMM_ID, rep_msg,
2237 be32_to_cpu(cm_id_priv->id.remote_id));
2238 IBA_SET(CM_REP_STARTING_PSN, rep_msg, param->starting_psn);
2239 IBA_SET(CM_REP_RESPONDER_RESOURCES, rep_msg,
2240 param->responder_resources);
2241 IBA_SET(CM_REP_TARGET_ACK_DELAY, rep_msg,
2242 cm_id_priv->av.port->cm_dev->ack_delay);
2243 IBA_SET(CM_REP_FAILOVER_ACCEPTED, rep_msg, param->failover_accepted);
2244 IBA_SET(CM_REP_RNR_RETRY_COUNT, rep_msg, param->rnr_retry_count);
2245 IBA_SET(CM_REP_LOCAL_CA_GUID, rep_msg,
2246 be64_to_cpu(cm_id_priv->id.device->node_guid));
2248 if (cm_id_priv->qp_type != IB_QPT_XRC_TGT) {
2249 IBA_SET(CM_REP_INITIATOR_DEPTH, rep_msg,
2250 param->initiator_depth);
2251 IBA_SET(CM_REP_END_TO_END_FLOW_CONTROL, rep_msg,
2252 param->flow_control);
2253 IBA_SET(CM_REP_SRQ, rep_msg, param->srq);
2254 IBA_SET(CM_REP_LOCAL_QPN, rep_msg, param->qp_num);
2256 IBA_SET(CM_REP_SRQ, rep_msg, 1);
2257 IBA_SET(CM_REP_LOCAL_EE_CONTEXT_NUMBER, rep_msg, param->qp_num);
2260 IBA_SET(CM_REP_VENDOR_ID_L, rep_msg, param->ece.vendor_id);
2261 IBA_SET(CM_REP_VENDOR_ID_M, rep_msg, param->ece.vendor_id >> 8);
2262 IBA_SET(CM_REP_VENDOR_ID_H, rep_msg, param->ece.vendor_id >> 16);
2264 if (param->private_data && param->private_data_len)
2265 IBA_SET_MEM(CM_REP_PRIVATE_DATA, rep_msg, param->private_data,
2266 param->private_data_len);
2269 int ib_send_cm_rep(struct ib_cm_id *cm_id,
2270 struct ib_cm_rep_param *param)
2272 struct cm_id_private *cm_id_priv;
2273 struct ib_mad_send_buf *msg;
2274 struct cm_rep_msg *rep_msg;
2275 unsigned long flags;
2278 if (param->private_data &&
2279 param->private_data_len > IB_CM_REP_PRIVATE_DATA_SIZE)
2282 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2283 spin_lock_irqsave(&cm_id_priv->lock, flags);
2284 if (cm_id->state != IB_CM_REQ_RCVD &&
2285 cm_id->state != IB_CM_MRA_REQ_SENT) {
2286 trace_icm_send_rep_err(cm_id_priv->id.local_id, cm_id->state);
2291 ret = cm_alloc_msg(cm_id_priv, &msg);
2295 rep_msg = (struct cm_rep_msg *) msg->mad;
2296 cm_format_rep(rep_msg, cm_id_priv, param);
2297 msg->timeout_ms = cm_id_priv->timeout_ms;
2298 msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT;
2300 trace_icm_send_rep(cm_id);
2301 ret = ib_post_send_mad(msg, NULL);
2303 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2308 cm_id->state = IB_CM_REP_SENT;
2309 cm_id_priv->msg = msg;
2310 cm_id_priv->initiator_depth = param->initiator_depth;
2311 cm_id_priv->responder_resources = param->responder_resources;
2312 cm_id_priv->rq_psn = cpu_to_be32(IBA_GET(CM_REP_STARTING_PSN, rep_msg));
2313 WARN_ONCE(param->qp_num & 0xFF000000,
2314 "IBTA declares QPN to be 24 bits, but it is 0x%X\n",
2316 cm_id_priv->local_qpn = cpu_to_be32(param->qp_num & 0xFFFFFF);
2318 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2321 EXPORT_SYMBOL(ib_send_cm_rep);
2323 static void cm_format_rtu(struct cm_rtu_msg *rtu_msg,
2324 struct cm_id_private *cm_id_priv,
2325 const void *private_data,
2326 u8 private_data_len)
2328 cm_format_mad_hdr(&rtu_msg->hdr, CM_RTU_ATTR_ID, cm_id_priv->tid);
2329 IBA_SET(CM_RTU_LOCAL_COMM_ID, rtu_msg,
2330 be32_to_cpu(cm_id_priv->id.local_id));
2331 IBA_SET(CM_RTU_REMOTE_COMM_ID, rtu_msg,
2332 be32_to_cpu(cm_id_priv->id.remote_id));
2334 if (private_data && private_data_len)
2335 IBA_SET_MEM(CM_RTU_PRIVATE_DATA, rtu_msg, private_data,
2339 int ib_send_cm_rtu(struct ib_cm_id *cm_id,
2340 const void *private_data,
2341 u8 private_data_len)
2343 struct cm_id_private *cm_id_priv;
2344 struct ib_mad_send_buf *msg;
2345 unsigned long flags;
2349 if (private_data && private_data_len > IB_CM_RTU_PRIVATE_DATA_SIZE)
2352 data = cm_copy_private_data(private_data, private_data_len);
2354 return PTR_ERR(data);
2356 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2357 spin_lock_irqsave(&cm_id_priv->lock, flags);
2358 if (cm_id->state != IB_CM_REP_RCVD &&
2359 cm_id->state != IB_CM_MRA_REP_SENT) {
2360 trace_icm_send_cm_rtu_err(cm_id);
2365 ret = cm_alloc_msg(cm_id_priv, &msg);
2369 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
2370 private_data, private_data_len);
2372 trace_icm_send_rtu(cm_id);
2373 ret = ib_post_send_mad(msg, NULL);
2375 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2381 cm_id->state = IB_CM_ESTABLISHED;
2382 cm_set_private_data(cm_id_priv, data, private_data_len);
2383 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2386 error: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2390 EXPORT_SYMBOL(ib_send_cm_rtu);
2392 static void cm_format_rep_event(struct cm_work *work, enum ib_qp_type qp_type)
2394 struct cm_rep_msg *rep_msg;
2395 struct ib_cm_rep_event_param *param;
2397 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
2398 param = &work->cm_event.param.rep_rcvd;
2399 param->remote_ca_guid =
2400 cpu_to_be64(IBA_GET(CM_REP_LOCAL_CA_GUID, rep_msg));
2401 param->remote_qkey = IBA_GET(CM_REP_LOCAL_Q_KEY, rep_msg);
2402 param->remote_qpn = be32_to_cpu(cm_rep_get_qpn(rep_msg, qp_type));
2403 param->starting_psn = IBA_GET(CM_REP_STARTING_PSN, rep_msg);
2404 param->responder_resources = IBA_GET(CM_REP_INITIATOR_DEPTH, rep_msg);
2405 param->initiator_depth = IBA_GET(CM_REP_RESPONDER_RESOURCES, rep_msg);
2406 param->target_ack_delay = IBA_GET(CM_REP_TARGET_ACK_DELAY, rep_msg);
2407 param->failover_accepted = IBA_GET(CM_REP_FAILOVER_ACCEPTED, rep_msg);
2408 param->flow_control = IBA_GET(CM_REP_END_TO_END_FLOW_CONTROL, rep_msg);
2409 param->rnr_retry_count = IBA_GET(CM_REP_RNR_RETRY_COUNT, rep_msg);
2410 param->srq = IBA_GET(CM_REP_SRQ, rep_msg);
2411 param->ece.vendor_id = IBA_GET(CM_REP_VENDOR_ID_H, rep_msg) << 16;
2412 param->ece.vendor_id |= IBA_GET(CM_REP_VENDOR_ID_M, rep_msg) << 8;
2413 param->ece.vendor_id |= IBA_GET(CM_REP_VENDOR_ID_L, rep_msg);
2414 param->ece.attr_mod = be32_to_cpu(rep_msg->hdr.attr_mod);
2416 work->cm_event.private_data =
2417 IBA_GET_MEM_PTR(CM_REP_PRIVATE_DATA, rep_msg);
2420 static void cm_dup_rep_handler(struct cm_work *work)
2422 struct cm_id_private *cm_id_priv;
2423 struct cm_rep_msg *rep_msg;
2424 struct ib_mad_send_buf *msg = NULL;
2427 rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad;
2428 cm_id_priv = cm_acquire_id(
2429 cpu_to_be32(IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg)),
2430 cpu_to_be32(IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg)));
2434 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2435 counter[CM_REP_COUNTER]);
2436 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
2440 spin_lock_irq(&cm_id_priv->lock);
2441 if (cm_id_priv->id.state == IB_CM_ESTABLISHED)
2442 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
2443 cm_id_priv->private_data,
2444 cm_id_priv->private_data_len);
2445 else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT)
2446 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2447 CM_MSG_RESPONSE_REP, cm_id_priv->service_timeout,
2448 cm_id_priv->private_data,
2449 cm_id_priv->private_data_len);
2452 spin_unlock_irq(&cm_id_priv->lock);
2454 trace_icm_send_dup_rep(&cm_id_priv->id);
2455 ret = ib_post_send_mad(msg, NULL);
2460 unlock: spin_unlock_irq(&cm_id_priv->lock);
2461 free: cm_free_msg(msg);
2462 deref: cm_deref_id(cm_id_priv);
2465 static int cm_rep_handler(struct cm_work *work)
2467 struct cm_id_private *cm_id_priv;
2468 struct cm_rep_msg *rep_msg;
2470 struct cm_id_private *cur_cm_id_priv;
2471 struct cm_timewait_info *timewait_info;
2473 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
2474 cm_id_priv = cm_acquire_id(
2475 cpu_to_be32(IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg)), 0);
2477 cm_dup_rep_handler(work);
2478 trace_icm_remote_no_priv_err(
2479 IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg));
2483 cm_format_rep_event(work, cm_id_priv->qp_type);
2485 spin_lock_irq(&cm_id_priv->lock);
2486 switch (cm_id_priv->id.state) {
2487 case IB_CM_REQ_SENT:
2488 case IB_CM_MRA_REQ_RCVD:
2492 trace_icm_rep_unknown_err(
2493 IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg),
2494 IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg),
2495 cm_id_priv->id.state);
2496 spin_unlock_irq(&cm_id_priv->lock);
2500 cm_id_priv->timewait_info->work.remote_id =
2501 cpu_to_be32(IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg));
2502 cm_id_priv->timewait_info->remote_ca_guid =
2503 cpu_to_be64(IBA_GET(CM_REP_LOCAL_CA_GUID, rep_msg));
2504 cm_id_priv->timewait_info->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type);
2506 spin_lock(&cm.lock);
2507 /* Check for duplicate REP. */
2508 if (cm_insert_remote_id(cm_id_priv->timewait_info)) {
2509 spin_unlock(&cm.lock);
2510 spin_unlock_irq(&cm_id_priv->lock);
2512 trace_icm_insert_failed_err(
2513 IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg));
2516 /* Check for a stale connection. */
2517 timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
2518 if (timewait_info) {
2519 cm_remove_remote(cm_id_priv);
2520 cur_cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
2521 timewait_info->work.remote_id);
2523 spin_unlock(&cm.lock);
2524 spin_unlock_irq(&cm_id_priv->lock);
2525 cm_issue_rej(work->port, work->mad_recv_wc,
2526 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP,
2529 trace_icm_staleconn_err(
2530 IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg),
2531 IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg));
2533 if (cur_cm_id_priv) {
2534 ib_send_cm_dreq(&cur_cm_id_priv->id, NULL, 0);
2535 cm_deref_id(cur_cm_id_priv);
2540 spin_unlock(&cm.lock);
2542 cm_id_priv->id.state = IB_CM_REP_RCVD;
2543 cm_id_priv->id.remote_id =
2544 cpu_to_be32(IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg));
2545 cm_id_priv->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type);
2546 cm_id_priv->initiator_depth =
2547 IBA_GET(CM_REP_RESPONDER_RESOURCES, rep_msg);
2548 cm_id_priv->responder_resources =
2549 IBA_GET(CM_REP_INITIATOR_DEPTH, rep_msg);
2550 cm_id_priv->sq_psn = cpu_to_be32(IBA_GET(CM_REP_STARTING_PSN, rep_msg));
2551 cm_id_priv->rnr_retry_count = IBA_GET(CM_REP_RNR_RETRY_COUNT, rep_msg);
2552 cm_id_priv->target_ack_delay =
2553 IBA_GET(CM_REP_TARGET_ACK_DELAY, rep_msg);
2554 cm_id_priv->av.timeout =
2555 cm_ack_timeout(cm_id_priv->target_ack_delay,
2556 cm_id_priv->av.timeout - 1);
2557 cm_id_priv->alt_av.timeout =
2558 cm_ack_timeout(cm_id_priv->target_ack_delay,
2559 cm_id_priv->alt_av.timeout - 1);
2561 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2562 cm_queue_work_unlock(cm_id_priv, work);
2566 cm_deref_id(cm_id_priv);
2570 static int cm_establish_handler(struct cm_work *work)
2572 struct cm_id_private *cm_id_priv;
2574 /* See comment in cm_establish about lookup. */
2575 cm_id_priv = cm_acquire_id(work->local_id, work->remote_id);
2579 spin_lock_irq(&cm_id_priv->lock);
2580 if (cm_id_priv->id.state != IB_CM_ESTABLISHED) {
2581 spin_unlock_irq(&cm_id_priv->lock);
2585 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2586 cm_queue_work_unlock(cm_id_priv, work);
2589 cm_deref_id(cm_id_priv);
2593 static int cm_rtu_handler(struct cm_work *work)
2595 struct cm_id_private *cm_id_priv;
2596 struct cm_rtu_msg *rtu_msg;
2598 rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad;
2599 cm_id_priv = cm_acquire_id(
2600 cpu_to_be32(IBA_GET(CM_RTU_REMOTE_COMM_ID, rtu_msg)),
2601 cpu_to_be32(IBA_GET(CM_RTU_LOCAL_COMM_ID, rtu_msg)));
2605 work->cm_event.private_data =
2606 IBA_GET_MEM_PTR(CM_RTU_PRIVATE_DATA, rtu_msg);
2608 spin_lock_irq(&cm_id_priv->lock);
2609 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
2610 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
2611 spin_unlock_irq(&cm_id_priv->lock);
2612 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2613 counter[CM_RTU_COUNTER]);
2616 cm_id_priv->id.state = IB_CM_ESTABLISHED;
2618 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2619 cm_queue_work_unlock(cm_id_priv, work);
2622 cm_deref_id(cm_id_priv);
2626 static void cm_format_dreq(struct cm_dreq_msg *dreq_msg,
2627 struct cm_id_private *cm_id_priv,
2628 const void *private_data,
2629 u8 private_data_len)
2631 cm_format_mad_hdr(&dreq_msg->hdr, CM_DREQ_ATTR_ID,
2632 cm_form_tid(cm_id_priv));
2633 IBA_SET(CM_DREQ_LOCAL_COMM_ID, dreq_msg,
2634 be32_to_cpu(cm_id_priv->id.local_id));
2635 IBA_SET(CM_DREQ_REMOTE_COMM_ID, dreq_msg,
2636 be32_to_cpu(cm_id_priv->id.remote_id));
2637 IBA_SET(CM_DREQ_REMOTE_QPN_EECN, dreq_msg,
2638 be32_to_cpu(cm_id_priv->remote_qpn));
2640 if (private_data && private_data_len)
2641 IBA_SET_MEM(CM_DREQ_PRIVATE_DATA, dreq_msg, private_data,
2645 static int cm_send_dreq_locked(struct cm_id_private *cm_id_priv,
2646 const void *private_data, u8 private_data_len)
2648 struct ib_mad_send_buf *msg;
2651 lockdep_assert_held(&cm_id_priv->lock);
2653 if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE)
2656 if (cm_id_priv->id.state != IB_CM_ESTABLISHED) {
2657 trace_icm_dreq_skipped(&cm_id_priv->id);
2661 if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT ||
2662 cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
2663 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2665 ret = cm_alloc_msg(cm_id_priv, &msg);
2667 cm_enter_timewait(cm_id_priv);
2671 cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv,
2672 private_data, private_data_len);
2673 msg->timeout_ms = cm_id_priv->timeout_ms;
2674 msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT;
2676 trace_icm_send_dreq(&cm_id_priv->id);
2677 ret = ib_post_send_mad(msg, NULL);
2679 cm_enter_timewait(cm_id_priv);
2684 cm_id_priv->id.state = IB_CM_DREQ_SENT;
2685 cm_id_priv->msg = msg;
2689 int ib_send_cm_dreq(struct ib_cm_id *cm_id, const void *private_data,
2690 u8 private_data_len)
2692 struct cm_id_private *cm_id_priv =
2693 container_of(cm_id, struct cm_id_private, id);
2694 unsigned long flags;
2697 spin_lock_irqsave(&cm_id_priv->lock, flags);
2698 ret = cm_send_dreq_locked(cm_id_priv, private_data, private_data_len);
2699 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2702 EXPORT_SYMBOL(ib_send_cm_dreq);
2704 static void cm_format_drep(struct cm_drep_msg *drep_msg,
2705 struct cm_id_private *cm_id_priv,
2706 const void *private_data,
2707 u8 private_data_len)
2709 cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, cm_id_priv->tid);
2710 IBA_SET(CM_DREP_LOCAL_COMM_ID, drep_msg,
2711 be32_to_cpu(cm_id_priv->id.local_id));
2712 IBA_SET(CM_DREP_REMOTE_COMM_ID, drep_msg,
2713 be32_to_cpu(cm_id_priv->id.remote_id));
2715 if (private_data && private_data_len)
2716 IBA_SET_MEM(CM_DREP_PRIVATE_DATA, drep_msg, private_data,
2720 static int cm_send_drep_locked(struct cm_id_private *cm_id_priv,
2721 void *private_data, u8 private_data_len)
2723 struct ib_mad_send_buf *msg;
2726 lockdep_assert_held(&cm_id_priv->lock);
2728 if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE)
2731 if (cm_id_priv->id.state != IB_CM_DREQ_RCVD) {
2732 trace_icm_send_drep_err(&cm_id_priv->id);
2733 kfree(private_data);
2737 cm_set_private_data(cm_id_priv, private_data, private_data_len);
2738 cm_enter_timewait(cm_id_priv);
2740 ret = cm_alloc_msg(cm_id_priv, &msg);
2744 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
2745 private_data, private_data_len);
2747 trace_icm_send_drep(&cm_id_priv->id);
2748 ret = ib_post_send_mad(msg, NULL);
2756 int ib_send_cm_drep(struct ib_cm_id *cm_id, const void *private_data,
2757 u8 private_data_len)
2759 struct cm_id_private *cm_id_priv =
2760 container_of(cm_id, struct cm_id_private, id);
2761 unsigned long flags;
2765 data = cm_copy_private_data(private_data, private_data_len);
2767 return PTR_ERR(data);
2769 spin_lock_irqsave(&cm_id_priv->lock, flags);
2770 ret = cm_send_drep_locked(cm_id_priv, data, private_data_len);
2771 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2774 EXPORT_SYMBOL(ib_send_cm_drep);
2776 static int cm_issue_drep(struct cm_port *port,
2777 struct ib_mad_recv_wc *mad_recv_wc)
2779 struct ib_mad_send_buf *msg = NULL;
2780 struct cm_dreq_msg *dreq_msg;
2781 struct cm_drep_msg *drep_msg;
2784 ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
2788 dreq_msg = (struct cm_dreq_msg *) mad_recv_wc->recv_buf.mad;
2789 drep_msg = (struct cm_drep_msg *) msg->mad;
2791 cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, dreq_msg->hdr.tid);
2792 IBA_SET(CM_DREP_REMOTE_COMM_ID, drep_msg,
2793 IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg));
2794 IBA_SET(CM_DREP_LOCAL_COMM_ID, drep_msg,
2795 IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg));
2797 trace_icm_issue_drep(
2798 IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg),
2799 IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg));
2800 ret = ib_post_send_mad(msg, NULL);
2807 static int cm_dreq_handler(struct cm_work *work)
2809 struct cm_id_private *cm_id_priv;
2810 struct cm_dreq_msg *dreq_msg;
2811 struct ib_mad_send_buf *msg = NULL;
2813 dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad;
2814 cm_id_priv = cm_acquire_id(
2815 cpu_to_be32(IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg)),
2816 cpu_to_be32(IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg)));
2818 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2819 counter[CM_DREQ_COUNTER]);
2820 cm_issue_drep(work->port, work->mad_recv_wc);
2821 trace_icm_no_priv_err(
2822 IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg),
2823 IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg));
2827 work->cm_event.private_data =
2828 IBA_GET_MEM_PTR(CM_DREQ_PRIVATE_DATA, dreq_msg);
2830 spin_lock_irq(&cm_id_priv->lock);
2831 if (cm_id_priv->local_qpn !=
2832 cpu_to_be32(IBA_GET(CM_DREQ_REMOTE_QPN_EECN, dreq_msg)))
2835 switch (cm_id_priv->id.state) {
2836 case IB_CM_REP_SENT:
2837 case IB_CM_DREQ_SENT:
2838 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2840 case IB_CM_ESTABLISHED:
2841 if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT ||
2842 cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
2843 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2845 case IB_CM_MRA_REP_RCVD:
2847 case IB_CM_TIMEWAIT:
2848 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2849 counter[CM_DREQ_COUNTER]);
2850 msg = cm_alloc_response_msg_no_ah(work->port, work->mad_recv_wc);
2854 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
2855 cm_id_priv->private_data,
2856 cm_id_priv->private_data_len);
2857 spin_unlock_irq(&cm_id_priv->lock);
2859 if (cm_create_response_msg_ah(work->port, work->mad_recv_wc, msg) ||
2860 ib_post_send_mad(msg, NULL))
2863 case IB_CM_DREQ_RCVD:
2864 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2865 counter[CM_DREQ_COUNTER]);
2868 trace_icm_dreq_unknown_err(&cm_id_priv->id);
2871 cm_id_priv->id.state = IB_CM_DREQ_RCVD;
2872 cm_id_priv->tid = dreq_msg->hdr.tid;
2873 cm_queue_work_unlock(cm_id_priv, work);
2876 unlock: spin_unlock_irq(&cm_id_priv->lock);
2877 deref: cm_deref_id(cm_id_priv);
2881 static int cm_drep_handler(struct cm_work *work)
2883 struct cm_id_private *cm_id_priv;
2884 struct cm_drep_msg *drep_msg;
2886 drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad;
2887 cm_id_priv = cm_acquire_id(
2888 cpu_to_be32(IBA_GET(CM_DREP_REMOTE_COMM_ID, drep_msg)),
2889 cpu_to_be32(IBA_GET(CM_DREP_LOCAL_COMM_ID, drep_msg)));
2893 work->cm_event.private_data =
2894 IBA_GET_MEM_PTR(CM_DREP_PRIVATE_DATA, drep_msg);
2896 spin_lock_irq(&cm_id_priv->lock);
2897 if (cm_id_priv->id.state != IB_CM_DREQ_SENT &&
2898 cm_id_priv->id.state != IB_CM_DREQ_RCVD) {
2899 spin_unlock_irq(&cm_id_priv->lock);
2902 cm_enter_timewait(cm_id_priv);
2904 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2905 cm_queue_work_unlock(cm_id_priv, work);
2908 cm_deref_id(cm_id_priv);
2912 static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
2913 enum ib_cm_rej_reason reason, void *ari,
2914 u8 ari_length, const void *private_data,
2915 u8 private_data_len)
2917 enum ib_cm_state state = cm_id_priv->id.state;
2918 struct ib_mad_send_buf *msg;
2921 lockdep_assert_held(&cm_id_priv->lock);
2923 if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) ||
2924 (ari && ari_length > IB_CM_REJ_ARI_LENGTH))
2928 case IB_CM_REQ_SENT:
2929 case IB_CM_MRA_REQ_RCVD:
2930 case IB_CM_REQ_RCVD:
2931 case IB_CM_MRA_REQ_SENT:
2932 case IB_CM_REP_RCVD:
2933 case IB_CM_MRA_REP_SENT:
2934 cm_reset_to_idle(cm_id_priv);
2935 ret = cm_alloc_msg(cm_id_priv, &msg);
2938 cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv, reason,
2939 ari, ari_length, private_data, private_data_len,
2942 case IB_CM_REP_SENT:
2943 case IB_CM_MRA_REP_RCVD:
2944 cm_enter_timewait(cm_id_priv);
2945 ret = cm_alloc_msg(cm_id_priv, &msg);
2948 cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv, reason,
2949 ari, ari_length, private_data, private_data_len,
2953 trace_icm_send_unknown_rej_err(&cm_id_priv->id);
2957 trace_icm_send_rej(&cm_id_priv->id, reason);
2958 ret = ib_post_send_mad(msg, NULL);
2967 int ib_send_cm_rej(struct ib_cm_id *cm_id, enum ib_cm_rej_reason reason,
2968 void *ari, u8 ari_length, const void *private_data,
2969 u8 private_data_len)
2971 struct cm_id_private *cm_id_priv =
2972 container_of(cm_id, struct cm_id_private, id);
2973 unsigned long flags;
2976 spin_lock_irqsave(&cm_id_priv->lock, flags);
2977 ret = cm_send_rej_locked(cm_id_priv, reason, ari, ari_length,
2978 private_data, private_data_len);
2979 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2982 EXPORT_SYMBOL(ib_send_cm_rej);
2984 static void cm_format_rej_event(struct cm_work *work)
2986 struct cm_rej_msg *rej_msg;
2987 struct ib_cm_rej_event_param *param;
2989 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
2990 param = &work->cm_event.param.rej_rcvd;
2991 param->ari = IBA_GET_MEM_PTR(CM_REJ_ARI, rej_msg);
2992 param->ari_length = IBA_GET(CM_REJ_REJECTED_INFO_LENGTH, rej_msg);
2993 param->reason = IBA_GET(CM_REJ_REASON, rej_msg);
2994 work->cm_event.private_data =
2995 IBA_GET_MEM_PTR(CM_REJ_PRIVATE_DATA, rej_msg);
2998 static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
3000 struct cm_id_private *cm_id_priv;
3003 remote_id = cpu_to_be32(IBA_GET(CM_REJ_LOCAL_COMM_ID, rej_msg));
3005 if (IBA_GET(CM_REJ_REASON, rej_msg) == IB_CM_REJ_TIMEOUT) {
3006 cm_id_priv = cm_find_remote_id(
3007 *((__be64 *)IBA_GET_MEM_PTR(CM_REJ_ARI, rej_msg)),
3009 } else if (IBA_GET(CM_REJ_MESSAGE_REJECTED, rej_msg) ==
3010 CM_MSG_RESPONSE_REQ)
3011 cm_id_priv = cm_acquire_id(
3012 cpu_to_be32(IBA_GET(CM_REJ_REMOTE_COMM_ID, rej_msg)),
3015 cm_id_priv = cm_acquire_id(
3016 cpu_to_be32(IBA_GET(CM_REJ_REMOTE_COMM_ID, rej_msg)),
3022 static int cm_rej_handler(struct cm_work *work)
3024 struct cm_id_private *cm_id_priv;
3025 struct cm_rej_msg *rej_msg;
3027 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
3028 cm_id_priv = cm_acquire_rejected_id(rej_msg);
3032 cm_format_rej_event(work);
3034 spin_lock_irq(&cm_id_priv->lock);
3035 switch (cm_id_priv->id.state) {
3036 case IB_CM_REQ_SENT:
3037 case IB_CM_MRA_REQ_RCVD:
3038 case IB_CM_REP_SENT:
3039 case IB_CM_MRA_REP_RCVD:
3040 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
3042 case IB_CM_REQ_RCVD:
3043 case IB_CM_MRA_REQ_SENT:
3044 if (IBA_GET(CM_REJ_REASON, rej_msg) == IB_CM_REJ_STALE_CONN)
3045 cm_enter_timewait(cm_id_priv);
3047 cm_reset_to_idle(cm_id_priv);
3049 case IB_CM_DREQ_SENT:
3050 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
3052 case IB_CM_REP_RCVD:
3053 case IB_CM_MRA_REP_SENT:
3054 cm_enter_timewait(cm_id_priv);
3056 case IB_CM_ESTABLISHED:
3057 if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT ||
3058 cm_id_priv->id.lap_state == IB_CM_LAP_SENT) {
3059 if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT)
3060 ib_cancel_mad(cm_id_priv->av.port->mad_agent,
3062 cm_enter_timewait(cm_id_priv);
3067 trace_icm_rej_unknown_err(&cm_id_priv->id);
3068 spin_unlock_irq(&cm_id_priv->lock);
3072 cm_queue_work_unlock(cm_id_priv, work);
3075 cm_deref_id(cm_id_priv);
3079 int ib_send_cm_mra(struct ib_cm_id *cm_id,
3081 const void *private_data,
3082 u8 private_data_len)
3084 struct cm_id_private *cm_id_priv;
3085 struct ib_mad_send_buf *msg;
3086 enum ib_cm_state cm_state;
3087 enum ib_cm_lap_state lap_state;
3088 enum cm_msg_response msg_response;
3090 unsigned long flags;
3093 if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE)
3096 data = cm_copy_private_data(private_data, private_data_len);
3098 return PTR_ERR(data);
3100 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3102 spin_lock_irqsave(&cm_id_priv->lock, flags);
3103 switch(cm_id_priv->id.state) {
3104 case IB_CM_REQ_RCVD:
3105 cm_state = IB_CM_MRA_REQ_SENT;
3106 lap_state = cm_id->lap_state;
3107 msg_response = CM_MSG_RESPONSE_REQ;
3109 case IB_CM_REP_RCVD:
3110 cm_state = IB_CM_MRA_REP_SENT;
3111 lap_state = cm_id->lap_state;
3112 msg_response = CM_MSG_RESPONSE_REP;
3114 case IB_CM_ESTABLISHED:
3115 if (cm_id->lap_state == IB_CM_LAP_RCVD) {
3116 cm_state = cm_id->state;
3117 lap_state = IB_CM_MRA_LAP_SENT;
3118 msg_response = CM_MSG_RESPONSE_OTHER;
3123 trace_icm_send_mra_unknown_err(&cm_id_priv->id);
3128 if (!(service_timeout & IB_CM_MRA_FLAG_DELAY)) {
3129 ret = cm_alloc_msg(cm_id_priv, &msg);
3133 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
3134 msg_response, service_timeout,
3135 private_data, private_data_len);
3136 trace_icm_send_mra(cm_id);
3137 ret = ib_post_send_mad(msg, NULL);
3142 cm_id->state = cm_state;
3143 cm_id->lap_state = lap_state;
3144 cm_id_priv->service_timeout = service_timeout;
3145 cm_set_private_data(cm_id_priv, data, private_data_len);
3146 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3149 error1: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3153 error2: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3158 EXPORT_SYMBOL(ib_send_cm_mra);
3160 static struct cm_id_private * cm_acquire_mraed_id(struct cm_mra_msg *mra_msg)
3162 switch (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg)) {
3163 case CM_MSG_RESPONSE_REQ:
3164 return cm_acquire_id(
3165 cpu_to_be32(IBA_GET(CM_MRA_REMOTE_COMM_ID, mra_msg)),
3167 case CM_MSG_RESPONSE_REP:
3168 case CM_MSG_RESPONSE_OTHER:
3169 return cm_acquire_id(
3170 cpu_to_be32(IBA_GET(CM_MRA_REMOTE_COMM_ID, mra_msg)),
3171 cpu_to_be32(IBA_GET(CM_MRA_LOCAL_COMM_ID, mra_msg)));
3177 static int cm_mra_handler(struct cm_work *work)
3179 struct cm_id_private *cm_id_priv;
3180 struct cm_mra_msg *mra_msg;
3183 mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad;
3184 cm_id_priv = cm_acquire_mraed_id(mra_msg);
3188 work->cm_event.private_data =
3189 IBA_GET_MEM_PTR(CM_MRA_PRIVATE_DATA, mra_msg);
3190 work->cm_event.param.mra_rcvd.service_timeout =
3191 IBA_GET(CM_MRA_SERVICE_TIMEOUT, mra_msg);
3192 timeout = cm_convert_to_ms(IBA_GET(CM_MRA_SERVICE_TIMEOUT, mra_msg)) +
3193 cm_convert_to_ms(cm_id_priv->av.timeout);
3195 spin_lock_irq(&cm_id_priv->lock);
3196 switch (cm_id_priv->id.state) {
3197 case IB_CM_REQ_SENT:
3198 if (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg) !=
3199 CM_MSG_RESPONSE_REQ ||
3200 ib_modify_mad(cm_id_priv->av.port->mad_agent,
3201 cm_id_priv->msg, timeout))
3203 cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD;
3205 case IB_CM_REP_SENT:
3206 if (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg) !=
3207 CM_MSG_RESPONSE_REP ||
3208 ib_modify_mad(cm_id_priv->av.port->mad_agent,
3209 cm_id_priv->msg, timeout))
3211 cm_id_priv->id.state = IB_CM_MRA_REP_RCVD;
3213 case IB_CM_ESTABLISHED:
3214 if (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg) !=
3215 CM_MSG_RESPONSE_OTHER ||
3216 cm_id_priv->id.lap_state != IB_CM_LAP_SENT ||
3217 ib_modify_mad(cm_id_priv->av.port->mad_agent,
3218 cm_id_priv->msg, timeout)) {
3219 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
3220 atomic_long_inc(&work->port->
3221 counter_group[CM_RECV_DUPLICATES].
3222 counter[CM_MRA_COUNTER]);
3225 cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD;
3227 case IB_CM_MRA_REQ_RCVD:
3228 case IB_CM_MRA_REP_RCVD:
3229 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
3230 counter[CM_MRA_COUNTER]);
3233 trace_icm_mra_unknown_err(&cm_id_priv->id);
3237 cm_id_priv->msg->context[1] = (void *) (unsigned long)
3238 cm_id_priv->id.state;
3239 cm_queue_work_unlock(cm_id_priv, work);
3242 spin_unlock_irq(&cm_id_priv->lock);
3243 cm_deref_id(cm_id_priv);
3247 static void cm_format_path_lid_from_lap(struct cm_lap_msg *lap_msg,
3248 struct sa_path_rec *path)
3252 if (path->rec_type != SA_PATH_REC_TYPE_OPA) {
3253 sa_path_set_dlid(path, IBA_GET(CM_LAP_ALTERNATE_LOCAL_PORT_LID,
3255 sa_path_set_slid(path, IBA_GET(CM_LAP_ALTERNATE_REMOTE_PORT_LID,
3258 lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
3259 CM_LAP_ALTERNATE_LOCAL_PORT_GID, lap_msg));
3260 sa_path_set_dlid(path, lid);
3262 lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
3263 CM_LAP_ALTERNATE_REMOTE_PORT_GID, lap_msg));
3264 sa_path_set_slid(path, lid);
3268 static void cm_format_path_from_lap(struct cm_id_private *cm_id_priv,
3269 struct sa_path_rec *path,
3270 struct cm_lap_msg *lap_msg)
3272 path->dgid = *IBA_GET_MEM_PTR(CM_LAP_ALTERNATE_LOCAL_PORT_GID, lap_msg);
3274 *IBA_GET_MEM_PTR(CM_LAP_ALTERNATE_REMOTE_PORT_GID, lap_msg);
3276 cpu_to_be32(IBA_GET(CM_LAP_ALTERNATE_FLOW_LABEL, lap_msg));
3277 path->hop_limit = IBA_GET(CM_LAP_ALTERNATE_HOP_LIMIT, lap_msg);
3278 path->traffic_class = IBA_GET(CM_LAP_ALTERNATE_TRAFFIC_CLASS, lap_msg);
3279 path->reversible = 1;
3280 path->pkey = cm_id_priv->pkey;
3281 path->sl = IBA_GET(CM_LAP_ALTERNATE_SL, lap_msg);
3282 path->mtu_selector = IB_SA_EQ;
3283 path->mtu = cm_id_priv->path_mtu;
3284 path->rate_selector = IB_SA_EQ;
3285 path->rate = IBA_GET(CM_LAP_ALTERNATE_PACKET_RATE, lap_msg);
3286 path->packet_life_time_selector = IB_SA_EQ;
3287 path->packet_life_time =
3288 IBA_GET(CM_LAP_ALTERNATE_LOCAL_ACK_TIMEOUT, lap_msg);
3289 path->packet_life_time -= (path->packet_life_time > 0);
3290 cm_format_path_lid_from_lap(lap_msg, path);
3293 static int cm_lap_handler(struct cm_work *work)
3295 struct cm_id_private *cm_id_priv;
3296 struct cm_lap_msg *lap_msg;
3297 struct ib_cm_lap_event_param *param;
3298 struct ib_mad_send_buf *msg = NULL;
3301 /* Currently Alternate path messages are not supported for
3304 if (rdma_protocol_roce(work->port->cm_dev->ib_device,
3305 work->port->port_num))
3308 /* todo: verify LAP request and send reject APR if invalid. */
3309 lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad;
3310 cm_id_priv = cm_acquire_id(
3311 cpu_to_be32(IBA_GET(CM_LAP_REMOTE_COMM_ID, lap_msg)),
3312 cpu_to_be32(IBA_GET(CM_LAP_LOCAL_COMM_ID, lap_msg)));
3316 param = &work->cm_event.param.lap_rcvd;
3317 memset(&work->path[0], 0, sizeof(work->path[1]));
3318 cm_path_set_rec_type(work->port->cm_dev->ib_device,
3319 work->port->port_num, &work->path[0],
3320 IBA_GET_MEM_PTR(CM_LAP_ALTERNATE_LOCAL_PORT_GID,
3322 param->alternate_path = &work->path[0];
3323 cm_format_path_from_lap(cm_id_priv, param->alternate_path, lap_msg);
3324 work->cm_event.private_data =
3325 IBA_GET_MEM_PTR(CM_LAP_PRIVATE_DATA, lap_msg);
3327 spin_lock_irq(&cm_id_priv->lock);
3328 if (cm_id_priv->id.state != IB_CM_ESTABLISHED)
3331 switch (cm_id_priv->id.lap_state) {
3332 case IB_CM_LAP_UNINIT:
3333 case IB_CM_LAP_IDLE:
3335 case IB_CM_MRA_LAP_SENT:
3336 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
3337 counter[CM_LAP_COUNTER]);
3338 msg = cm_alloc_response_msg_no_ah(work->port, work->mad_recv_wc);
3342 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
3343 CM_MSG_RESPONSE_OTHER,
3344 cm_id_priv->service_timeout,
3345 cm_id_priv->private_data,
3346 cm_id_priv->private_data_len);
3347 spin_unlock_irq(&cm_id_priv->lock);
3349 if (cm_create_response_msg_ah(work->port, work->mad_recv_wc, msg) ||
3350 ib_post_send_mad(msg, NULL))
3353 case IB_CM_LAP_RCVD:
3354 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
3355 counter[CM_LAP_COUNTER]);
3361 ret = cm_init_av_for_lap(work->port, work->mad_recv_wc->wc,
3362 work->mad_recv_wc->recv_buf.grh,
3367 ret = cm_init_av_by_path(param->alternate_path, NULL,
3368 &cm_id_priv->alt_av, cm_id_priv);
3372 cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
3373 cm_id_priv->tid = lap_msg->hdr.tid;
3374 cm_queue_work_unlock(cm_id_priv, work);
3377 unlock: spin_unlock_irq(&cm_id_priv->lock);
3378 deref: cm_deref_id(cm_id_priv);
3382 static int cm_apr_handler(struct cm_work *work)
3384 struct cm_id_private *cm_id_priv;
3385 struct cm_apr_msg *apr_msg;
3387 /* Currently Alternate path messages are not supported for
3390 if (rdma_protocol_roce(work->port->cm_dev->ib_device,
3391 work->port->port_num))
3394 apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad;
3395 cm_id_priv = cm_acquire_id(
3396 cpu_to_be32(IBA_GET(CM_APR_REMOTE_COMM_ID, apr_msg)),
3397 cpu_to_be32(IBA_GET(CM_APR_LOCAL_COMM_ID, apr_msg)));
3399 return -EINVAL; /* Unmatched reply. */
3401 work->cm_event.param.apr_rcvd.ap_status =
3402 IBA_GET(CM_APR_AR_STATUS, apr_msg);
3403 work->cm_event.param.apr_rcvd.apr_info =
3404 IBA_GET_MEM_PTR(CM_APR_ADDITIONAL_INFORMATION, apr_msg);
3405 work->cm_event.param.apr_rcvd.info_len =
3406 IBA_GET(CM_APR_ADDITIONAL_INFORMATION_LENGTH, apr_msg);
3407 work->cm_event.private_data =
3408 IBA_GET_MEM_PTR(CM_APR_PRIVATE_DATA, apr_msg);
3410 spin_lock_irq(&cm_id_priv->lock);
3411 if (cm_id_priv->id.state != IB_CM_ESTABLISHED ||
3412 (cm_id_priv->id.lap_state != IB_CM_LAP_SENT &&
3413 cm_id_priv->id.lap_state != IB_CM_MRA_LAP_RCVD)) {
3414 spin_unlock_irq(&cm_id_priv->lock);
3417 cm_id_priv->id.lap_state = IB_CM_LAP_IDLE;
3418 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
3419 cm_id_priv->msg = NULL;
3420 cm_queue_work_unlock(cm_id_priv, work);
3423 cm_deref_id(cm_id_priv);
3427 static int cm_timewait_handler(struct cm_work *work)
3429 struct cm_timewait_info *timewait_info;
3430 struct cm_id_private *cm_id_priv;
3432 timewait_info = container_of(work, struct cm_timewait_info, work);
3433 spin_lock_irq(&cm.lock);
3434 list_del(&timewait_info->list);
3435 spin_unlock_irq(&cm.lock);
3437 cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
3438 timewait_info->work.remote_id);
3442 spin_lock_irq(&cm_id_priv->lock);
3443 if (cm_id_priv->id.state != IB_CM_TIMEWAIT ||
3444 cm_id_priv->remote_qpn != timewait_info->remote_qpn) {
3445 spin_unlock_irq(&cm_id_priv->lock);
3448 cm_id_priv->id.state = IB_CM_IDLE;
3449 cm_queue_work_unlock(cm_id_priv, work);
3452 cm_deref_id(cm_id_priv);
3456 static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg,
3457 struct cm_id_private *cm_id_priv,
3458 struct ib_cm_sidr_req_param *param)
3460 cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID,
3461 cm_form_tid(cm_id_priv));
3462 IBA_SET(CM_SIDR_REQ_REQUESTID, sidr_req_msg,
3463 be32_to_cpu(cm_id_priv->id.local_id));
3464 IBA_SET(CM_SIDR_REQ_PARTITION_KEY, sidr_req_msg,
3465 be16_to_cpu(param->path->pkey));
3466 IBA_SET(CM_SIDR_REQ_SERVICEID, sidr_req_msg,
3467 be64_to_cpu(param->service_id));
3469 if (param->private_data && param->private_data_len)
3470 IBA_SET_MEM(CM_SIDR_REQ_PRIVATE_DATA, sidr_req_msg,
3471 param->private_data, param->private_data_len);
3474 int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
3475 struct ib_cm_sidr_req_param *param)
3477 struct cm_id_private *cm_id_priv;
3478 struct ib_mad_send_buf *msg;
3479 unsigned long flags;
3482 if (!param->path || (param->private_data &&
3483 param->private_data_len > IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE))
3486 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3487 ret = cm_init_av_by_path(param->path, param->sgid_attr,
3493 cm_id->service_id = param->service_id;
3494 cm_id->service_mask = ~cpu_to_be64(0);
3495 cm_id_priv->timeout_ms = param->timeout_ms;
3496 cm_id_priv->max_cm_retries = param->max_cm_retries;
3497 ret = cm_alloc_msg(cm_id_priv, &msg);
3501 cm_format_sidr_req((struct cm_sidr_req_msg *) msg->mad, cm_id_priv,
3503 msg->timeout_ms = cm_id_priv->timeout_ms;
3504 msg->context[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT;
3506 spin_lock_irqsave(&cm_id_priv->lock, flags);
3507 if (cm_id->state == IB_CM_IDLE) {
3508 trace_icm_send_sidr_req(&cm_id_priv->id);
3509 ret = ib_post_send_mad(msg, NULL);
3515 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3519 cm_id->state = IB_CM_SIDR_REQ_SENT;
3520 cm_id_priv->msg = msg;
3521 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3525 EXPORT_SYMBOL(ib_send_cm_sidr_req);
3527 static void cm_format_sidr_req_event(struct cm_work *work,
3528 const struct cm_id_private *rx_cm_id,
3529 struct ib_cm_id *listen_id)
3531 struct cm_sidr_req_msg *sidr_req_msg;
3532 struct ib_cm_sidr_req_event_param *param;
3534 sidr_req_msg = (struct cm_sidr_req_msg *)
3535 work->mad_recv_wc->recv_buf.mad;
3536 param = &work->cm_event.param.sidr_req_rcvd;
3537 param->pkey = IBA_GET(CM_SIDR_REQ_PARTITION_KEY, sidr_req_msg);
3538 param->listen_id = listen_id;
3540 cpu_to_be64(IBA_GET(CM_SIDR_REQ_SERVICEID, sidr_req_msg));
3541 param->bth_pkey = cm_get_bth_pkey(work);
3542 param->port = work->port->port_num;
3543 param->sgid_attr = rx_cm_id->av.ah_attr.grh.sgid_attr;
3544 work->cm_event.private_data =
3545 IBA_GET_MEM_PTR(CM_SIDR_REQ_PRIVATE_DATA, sidr_req_msg);
3548 static int cm_sidr_req_handler(struct cm_work *work)
3550 struct cm_id_private *cm_id_priv, *listen_cm_id_priv;
3551 struct cm_sidr_req_msg *sidr_req_msg;
3556 cm_alloc_id_priv(work->port->cm_dev->ib_device, NULL, NULL);
3557 if (IS_ERR(cm_id_priv))
3558 return PTR_ERR(cm_id_priv);
3560 /* Record SGID/SLID and request ID for lookup. */
3561 sidr_req_msg = (struct cm_sidr_req_msg *)
3562 work->mad_recv_wc->recv_buf.mad;
3564 cm_id_priv->id.remote_id =
3565 cpu_to_be32(IBA_GET(CM_SIDR_REQ_REQUESTID, sidr_req_msg));
3566 cm_id_priv->id.service_id =
3567 cpu_to_be64(IBA_GET(CM_SIDR_REQ_SERVICEID, sidr_req_msg));
3568 cm_id_priv->id.service_mask = ~cpu_to_be64(0);
3569 cm_id_priv->tid = sidr_req_msg->hdr.tid;
3571 wc = work->mad_recv_wc->wc;
3572 cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid);
3573 cm_id_priv->av.dgid.global.interface_id = 0;
3574 ret = cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
3575 work->mad_recv_wc->recv_buf.grh,
3580 spin_lock_irq(&cm.lock);
3581 listen_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
3582 if (listen_cm_id_priv) {
3583 spin_unlock_irq(&cm.lock);
3584 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
3585 counter[CM_SIDR_REQ_COUNTER]);
3586 goto out; /* Duplicate message. */
3588 cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD;
3589 listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device,
3590 cm_id_priv->id.service_id);
3591 if (!listen_cm_id_priv) {
3592 spin_unlock_irq(&cm.lock);
3593 ib_send_cm_sidr_rep(&cm_id_priv->id,
3594 &(struct ib_cm_sidr_rep_param){
3595 .status = IB_SIDR_UNSUPPORTED });
3596 goto out; /* No match. */
3598 spin_unlock_irq(&cm.lock);
3600 cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
3601 cm_id_priv->id.context = listen_cm_id_priv->id.context;
3604 * A SIDR ID does not need to be in the xarray since it does not receive
3605 * mads, is not placed in the remote_id or remote_qpn rbtree, and does
3606 * not enter timewait.
3609 cm_format_sidr_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
3610 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event);
3613 * A pointer to the listen_cm_id is held in the event, so this deref
3614 * must be after the event is delivered above.
3616 cm_deref_id(listen_cm_id_priv);
3618 cm_destroy_id(&cm_id_priv->id, ret);
3621 ib_destroy_cm_id(&cm_id_priv->id);
3625 static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg,
3626 struct cm_id_private *cm_id_priv,
3627 struct ib_cm_sidr_rep_param *param)
3629 cm_format_mad_ece_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID,
3630 cm_id_priv->tid, param->ece.attr_mod);
3631 IBA_SET(CM_SIDR_REP_REQUESTID, sidr_rep_msg,
3632 be32_to_cpu(cm_id_priv->id.remote_id));
3633 IBA_SET(CM_SIDR_REP_STATUS, sidr_rep_msg, param->status);
3634 IBA_SET(CM_SIDR_REP_QPN, sidr_rep_msg, param->qp_num);
3635 IBA_SET(CM_SIDR_REP_SERVICEID, sidr_rep_msg,
3636 be64_to_cpu(cm_id_priv->id.service_id));
3637 IBA_SET(CM_SIDR_REP_Q_KEY, sidr_rep_msg, param->qkey);
3638 IBA_SET(CM_SIDR_REP_VENDOR_ID_L, sidr_rep_msg,
3639 param->ece.vendor_id & 0xFF);
3640 IBA_SET(CM_SIDR_REP_VENDOR_ID_H, sidr_rep_msg,
3641 (param->ece.vendor_id >> 8) & 0xFF);
3643 if (param->info && param->info_length)
3644 IBA_SET_MEM(CM_SIDR_REP_ADDITIONAL_INFORMATION, sidr_rep_msg,
3645 param->info, param->info_length);
3647 if (param->private_data && param->private_data_len)
3648 IBA_SET_MEM(CM_SIDR_REP_PRIVATE_DATA, sidr_rep_msg,
3649 param->private_data, param->private_data_len);
3652 static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv,
3653 struct ib_cm_sidr_rep_param *param)
3655 struct ib_mad_send_buf *msg;
3656 unsigned long flags;
3659 lockdep_assert_held(&cm_id_priv->lock);
3661 if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) ||
3662 (param->private_data &&
3663 param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE))
3666 if (cm_id_priv->id.state != IB_CM_SIDR_REQ_RCVD)
3669 ret = cm_alloc_msg(cm_id_priv, &msg);
3673 cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv,
3675 trace_icm_send_sidr_rep(&cm_id_priv->id);
3676 ret = ib_post_send_mad(msg, NULL);
3681 cm_id_priv->id.state = IB_CM_IDLE;
3682 spin_lock_irqsave(&cm.lock, flags);
3683 if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) {
3684 rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
3685 RB_CLEAR_NODE(&cm_id_priv->sidr_id_node);
3687 spin_unlock_irqrestore(&cm.lock, flags);
3691 int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
3692 struct ib_cm_sidr_rep_param *param)
3694 struct cm_id_private *cm_id_priv =
3695 container_of(cm_id, struct cm_id_private, id);
3696 unsigned long flags;
3699 spin_lock_irqsave(&cm_id_priv->lock, flags);
3700 ret = cm_send_sidr_rep_locked(cm_id_priv, param);
3701 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3704 EXPORT_SYMBOL(ib_send_cm_sidr_rep);
3706 static void cm_format_sidr_rep_event(struct cm_work *work,
3707 const struct cm_id_private *cm_id_priv)
3709 struct cm_sidr_rep_msg *sidr_rep_msg;
3710 struct ib_cm_sidr_rep_event_param *param;
3712 sidr_rep_msg = (struct cm_sidr_rep_msg *)
3713 work->mad_recv_wc->recv_buf.mad;
3714 param = &work->cm_event.param.sidr_rep_rcvd;
3715 param->status = IBA_GET(CM_SIDR_REP_STATUS, sidr_rep_msg);
3716 param->qkey = IBA_GET(CM_SIDR_REP_Q_KEY, sidr_rep_msg);
3717 param->qpn = IBA_GET(CM_SIDR_REP_QPN, sidr_rep_msg);
3718 param->info = IBA_GET_MEM_PTR(CM_SIDR_REP_ADDITIONAL_INFORMATION,
3720 param->info_len = IBA_GET(CM_SIDR_REP_ADDITIONAL_INFORMATION_LENGTH,
3722 param->sgid_attr = cm_id_priv->av.ah_attr.grh.sgid_attr;
3723 work->cm_event.private_data =
3724 IBA_GET_MEM_PTR(CM_SIDR_REP_PRIVATE_DATA, sidr_rep_msg);
3727 static int cm_sidr_rep_handler(struct cm_work *work)
3729 struct cm_sidr_rep_msg *sidr_rep_msg;
3730 struct cm_id_private *cm_id_priv;
3732 sidr_rep_msg = (struct cm_sidr_rep_msg *)
3733 work->mad_recv_wc->recv_buf.mad;
3734 cm_id_priv = cm_acquire_id(
3735 cpu_to_be32(IBA_GET(CM_SIDR_REP_REQUESTID, sidr_rep_msg)), 0);
3737 return -EINVAL; /* Unmatched reply. */
3739 spin_lock_irq(&cm_id_priv->lock);
3740 if (cm_id_priv->id.state != IB_CM_SIDR_REQ_SENT) {
3741 spin_unlock_irq(&cm_id_priv->lock);
3744 cm_id_priv->id.state = IB_CM_IDLE;
3745 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
3746 spin_unlock_irq(&cm_id_priv->lock);
3748 cm_format_sidr_rep_event(work, cm_id_priv);
3749 cm_process_work(cm_id_priv, work);
3752 cm_deref_id(cm_id_priv);
3756 static void cm_process_send_error(struct ib_mad_send_buf *msg,
3757 enum ib_wc_status wc_status)
3759 struct cm_id_private *cm_id_priv;
3760 struct ib_cm_event cm_event;
3761 enum ib_cm_state state;
3764 memset(&cm_event, 0, sizeof cm_event);
3765 cm_id_priv = msg->context[0];
3767 /* Discard old sends or ones without a response. */
3768 spin_lock_irq(&cm_id_priv->lock);
3769 state = (enum ib_cm_state) (unsigned long) msg->context[1];
3770 if (msg != cm_id_priv->msg || state != cm_id_priv->id.state)
3773 trace_icm_mad_send_err(state, wc_status);
3775 case IB_CM_REQ_SENT:
3776 case IB_CM_MRA_REQ_RCVD:
3777 cm_reset_to_idle(cm_id_priv);
3778 cm_event.event = IB_CM_REQ_ERROR;
3780 case IB_CM_REP_SENT:
3781 case IB_CM_MRA_REP_RCVD:
3782 cm_reset_to_idle(cm_id_priv);
3783 cm_event.event = IB_CM_REP_ERROR;
3785 case IB_CM_DREQ_SENT:
3786 cm_enter_timewait(cm_id_priv);
3787 cm_event.event = IB_CM_DREQ_ERROR;
3789 case IB_CM_SIDR_REQ_SENT:
3790 cm_id_priv->id.state = IB_CM_IDLE;
3791 cm_event.event = IB_CM_SIDR_REQ_ERROR;
3796 spin_unlock_irq(&cm_id_priv->lock);
3797 cm_event.param.send_status = wc_status;
3799 /* No other events can occur on the cm_id at this point. */
3800 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &cm_event);
3803 ib_destroy_cm_id(&cm_id_priv->id);
3806 spin_unlock_irq(&cm_id_priv->lock);
3810 static void cm_send_handler(struct ib_mad_agent *mad_agent,
3811 struct ib_mad_send_wc *mad_send_wc)
3813 struct ib_mad_send_buf *msg = mad_send_wc->send_buf;
3814 struct cm_port *port;
3817 port = mad_agent->context;
3818 attr_index = be16_to_cpu(((struct ib_mad_hdr *)
3819 msg->mad)->attr_id) - CM_ATTR_ID_OFFSET;
3822 * If the send was in response to a received message (context[0] is not
3823 * set to a cm_id), and is not a REJ, then it is a send that was
3826 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
3829 atomic_long_add(1 + msg->retries,
3830 &port->counter_group[CM_XMIT].counter[attr_index]);
3832 atomic_long_add(msg->retries,
3833 &port->counter_group[CM_XMIT_RETRIES].
3834 counter[attr_index]);
3836 switch (mad_send_wc->status) {
3838 case IB_WC_WR_FLUSH_ERR:
3842 if (msg->context[0] && msg->context[1])
3843 cm_process_send_error(msg, mad_send_wc->status);
3850 static void cm_work_handler(struct work_struct *_work)
3852 struct cm_work *work = container_of(_work, struct cm_work, work.work);
3855 switch (work->cm_event.event) {
3856 case IB_CM_REQ_RECEIVED:
3857 ret = cm_req_handler(work);
3859 case IB_CM_MRA_RECEIVED:
3860 ret = cm_mra_handler(work);
3862 case IB_CM_REJ_RECEIVED:
3863 ret = cm_rej_handler(work);
3865 case IB_CM_REP_RECEIVED:
3866 ret = cm_rep_handler(work);
3868 case IB_CM_RTU_RECEIVED:
3869 ret = cm_rtu_handler(work);
3871 case IB_CM_USER_ESTABLISHED:
3872 ret = cm_establish_handler(work);
3874 case IB_CM_DREQ_RECEIVED:
3875 ret = cm_dreq_handler(work);
3877 case IB_CM_DREP_RECEIVED:
3878 ret = cm_drep_handler(work);
3880 case IB_CM_SIDR_REQ_RECEIVED:
3881 ret = cm_sidr_req_handler(work);
3883 case IB_CM_SIDR_REP_RECEIVED:
3884 ret = cm_sidr_rep_handler(work);
3886 case IB_CM_LAP_RECEIVED:
3887 ret = cm_lap_handler(work);
3889 case IB_CM_APR_RECEIVED:
3890 ret = cm_apr_handler(work);
3892 case IB_CM_TIMEWAIT_EXIT:
3893 ret = cm_timewait_handler(work);
3896 trace_icm_handler_err(work->cm_event.event);
3904 static int cm_establish(struct ib_cm_id *cm_id)
3906 struct cm_id_private *cm_id_priv;
3907 struct cm_work *work;
3908 unsigned long flags;
3910 struct cm_device *cm_dev;
3912 cm_dev = ib_get_client_data(cm_id->device, &cm_client);
3916 work = kmalloc(sizeof *work, GFP_ATOMIC);
3920 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3921 spin_lock_irqsave(&cm_id_priv->lock, flags);
3922 switch (cm_id->state)
3924 case IB_CM_REP_SENT:
3925 case IB_CM_MRA_REP_RCVD:
3926 cm_id->state = IB_CM_ESTABLISHED;
3928 case IB_CM_ESTABLISHED:
3932 trace_icm_establish_err(cm_id);
3936 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3944 * The CM worker thread may try to destroy the cm_id before it
3945 * can execute this work item. To prevent potential deadlock,
3946 * we need to find the cm_id once we're in the context of the
3947 * worker thread, rather than holding a reference on it.
3949 INIT_DELAYED_WORK(&work->work, cm_work_handler);
3950 work->local_id = cm_id->local_id;
3951 work->remote_id = cm_id->remote_id;
3952 work->mad_recv_wc = NULL;
3953 work->cm_event.event = IB_CM_USER_ESTABLISHED;
3955 /* Check if the device started its remove_one */
3956 spin_lock_irqsave(&cm.lock, flags);
3957 if (!cm_dev->going_down) {
3958 queue_delayed_work(cm.wq, &work->work, 0);
3963 spin_unlock_irqrestore(&cm.lock, flags);
3969 static int cm_migrate(struct ib_cm_id *cm_id)
3971 struct cm_id_private *cm_id_priv;
3972 struct cm_av tmp_av;
3973 unsigned long flags;
3974 int tmp_send_port_not_ready;
3977 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3978 spin_lock_irqsave(&cm_id_priv->lock, flags);
3979 if (cm_id->state == IB_CM_ESTABLISHED &&
3980 (cm_id->lap_state == IB_CM_LAP_UNINIT ||
3981 cm_id->lap_state == IB_CM_LAP_IDLE)) {
3982 cm_id->lap_state = IB_CM_LAP_IDLE;
3983 /* Swap address vector */
3984 tmp_av = cm_id_priv->av;
3985 cm_id_priv->av = cm_id_priv->alt_av;
3986 cm_id_priv->alt_av = tmp_av;
3987 /* Swap port send ready state */
3988 tmp_send_port_not_ready = cm_id_priv->prim_send_port_not_ready;
3989 cm_id_priv->prim_send_port_not_ready = cm_id_priv->altr_send_port_not_ready;
3990 cm_id_priv->altr_send_port_not_ready = tmp_send_port_not_ready;
3993 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3998 int ib_cm_notify(struct ib_cm_id *cm_id, enum ib_event_type event)
4003 case IB_EVENT_COMM_EST:
4004 ret = cm_establish(cm_id);
4006 case IB_EVENT_PATH_MIG:
4007 ret = cm_migrate(cm_id);
4014 EXPORT_SYMBOL(ib_cm_notify);
4016 static void cm_recv_handler(struct ib_mad_agent *mad_agent,
4017 struct ib_mad_send_buf *send_buf,
4018 struct ib_mad_recv_wc *mad_recv_wc)
4020 struct cm_port *port = mad_agent->context;
4021 struct cm_work *work;
4022 enum ib_cm_event_type event;
4023 bool alt_path = false;
4028 switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
4029 case CM_REQ_ATTR_ID:
4030 alt_path = cm_req_has_alt_path((struct cm_req_msg *)
4031 mad_recv_wc->recv_buf.mad);
4032 paths = 1 + (alt_path != 0);
4033 event = IB_CM_REQ_RECEIVED;
4035 case CM_MRA_ATTR_ID:
4036 event = IB_CM_MRA_RECEIVED;
4038 case CM_REJ_ATTR_ID:
4039 event = IB_CM_REJ_RECEIVED;
4041 case CM_REP_ATTR_ID:
4042 event = IB_CM_REP_RECEIVED;
4044 case CM_RTU_ATTR_ID:
4045 event = IB_CM_RTU_RECEIVED;
4047 case CM_DREQ_ATTR_ID:
4048 event = IB_CM_DREQ_RECEIVED;
4050 case CM_DREP_ATTR_ID:
4051 event = IB_CM_DREP_RECEIVED;
4053 case CM_SIDR_REQ_ATTR_ID:
4054 event = IB_CM_SIDR_REQ_RECEIVED;
4056 case CM_SIDR_REP_ATTR_ID:
4057 event = IB_CM_SIDR_REP_RECEIVED;
4059 case CM_LAP_ATTR_ID:
4061 event = IB_CM_LAP_RECEIVED;
4063 case CM_APR_ATTR_ID:
4064 event = IB_CM_APR_RECEIVED;
4067 ib_free_recv_mad(mad_recv_wc);
4071 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
4072 atomic_long_inc(&port->counter_group[CM_RECV].
4073 counter[attr_id - CM_ATTR_ID_OFFSET]);
4075 work = kmalloc(struct_size(work, path, paths), GFP_KERNEL);
4077 ib_free_recv_mad(mad_recv_wc);
4081 INIT_DELAYED_WORK(&work->work, cm_work_handler);
4082 work->cm_event.event = event;
4083 work->mad_recv_wc = mad_recv_wc;
4086 /* Check if the device started its remove_one */
4087 spin_lock_irq(&cm.lock);
4088 if (!port->cm_dev->going_down)
4089 queue_delayed_work(cm.wq, &work->work, 0);
4092 spin_unlock_irq(&cm.lock);
4096 ib_free_recv_mad(mad_recv_wc);
4100 static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
4101 struct ib_qp_attr *qp_attr,
4104 unsigned long flags;
4107 spin_lock_irqsave(&cm_id_priv->lock, flags);
4108 switch (cm_id_priv->id.state) {
4109 case IB_CM_REQ_SENT:
4110 case IB_CM_MRA_REQ_RCVD:
4111 case IB_CM_REQ_RCVD:
4112 case IB_CM_MRA_REQ_SENT:
4113 case IB_CM_REP_RCVD:
4114 case IB_CM_MRA_REP_SENT:
4115 case IB_CM_REP_SENT:
4116 case IB_CM_MRA_REP_RCVD:
4117 case IB_CM_ESTABLISHED:
4118 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS |
4119 IB_QP_PKEY_INDEX | IB_QP_PORT;
4120 qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE;
4121 if (cm_id_priv->responder_resources)
4122 qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ |
4123 IB_ACCESS_REMOTE_ATOMIC;
4124 qp_attr->pkey_index = cm_id_priv->av.pkey_index;
4125 qp_attr->port_num = cm_id_priv->av.port->port_num;
4129 trace_icm_qp_init_err(&cm_id_priv->id);
4133 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
4137 static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
4138 struct ib_qp_attr *qp_attr,
4141 unsigned long flags;
4144 spin_lock_irqsave(&cm_id_priv->lock, flags);
4145 switch (cm_id_priv->id.state) {
4146 case IB_CM_REQ_RCVD:
4147 case IB_CM_MRA_REQ_SENT:
4148 case IB_CM_REP_RCVD:
4149 case IB_CM_MRA_REP_SENT:
4150 case IB_CM_REP_SENT:
4151 case IB_CM_MRA_REP_RCVD:
4152 case IB_CM_ESTABLISHED:
4153 *qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU |
4154 IB_QP_DEST_QPN | IB_QP_RQ_PSN;
4155 qp_attr->ah_attr = cm_id_priv->av.ah_attr;
4156 qp_attr->path_mtu = cm_id_priv->path_mtu;
4157 qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn);
4158 qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn);
4159 if (cm_id_priv->qp_type == IB_QPT_RC ||
4160 cm_id_priv->qp_type == IB_QPT_XRC_TGT) {
4161 *qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC |
4162 IB_QP_MIN_RNR_TIMER;
4163 qp_attr->max_dest_rd_atomic =
4164 cm_id_priv->responder_resources;
4165 qp_attr->min_rnr_timer = 0;
4167 if (rdma_ah_get_dlid(&cm_id_priv->alt_av.ah_attr)) {
4168 *qp_attr_mask |= IB_QP_ALT_PATH;
4169 qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
4170 qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
4171 qp_attr->alt_timeout = cm_id_priv->alt_av.timeout;
4172 qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
4177 trace_icm_qp_rtr_err(&cm_id_priv->id);
4181 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
4185 static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
4186 struct ib_qp_attr *qp_attr,
4189 unsigned long flags;
4192 spin_lock_irqsave(&cm_id_priv->lock, flags);
4193 switch (cm_id_priv->id.state) {
4194 /* Allow transition to RTS before sending REP */
4195 case IB_CM_REQ_RCVD:
4196 case IB_CM_MRA_REQ_SENT:
4198 case IB_CM_REP_RCVD:
4199 case IB_CM_MRA_REP_SENT:
4200 case IB_CM_REP_SENT:
4201 case IB_CM_MRA_REP_RCVD:
4202 case IB_CM_ESTABLISHED:
4203 if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT) {
4204 *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN;
4205 qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn);
4206 switch (cm_id_priv->qp_type) {
4208 case IB_QPT_XRC_INI:
4209 *qp_attr_mask |= IB_QP_RETRY_CNT | IB_QP_RNR_RETRY |
4210 IB_QP_MAX_QP_RD_ATOMIC;
4211 qp_attr->retry_cnt = cm_id_priv->retry_count;
4212 qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
4213 qp_attr->max_rd_atomic = cm_id_priv->initiator_depth;
4215 case IB_QPT_XRC_TGT:
4216 *qp_attr_mask |= IB_QP_TIMEOUT;
4217 qp_attr->timeout = cm_id_priv->av.timeout;
4222 if (rdma_ah_get_dlid(&cm_id_priv->alt_av.ah_attr)) {
4223 *qp_attr_mask |= IB_QP_PATH_MIG_STATE;
4224 qp_attr->path_mig_state = IB_MIG_REARM;
4227 *qp_attr_mask = IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE;
4228 qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
4229 qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
4230 qp_attr->alt_timeout = cm_id_priv->alt_av.timeout;
4231 qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
4232 qp_attr->path_mig_state = IB_MIG_REARM;
4237 trace_icm_qp_rts_err(&cm_id_priv->id);
4241 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
4245 int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
4246 struct ib_qp_attr *qp_attr,
4249 struct cm_id_private *cm_id_priv;
4252 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
4253 switch (qp_attr->qp_state) {
4255 ret = cm_init_qp_init_attr(cm_id_priv, qp_attr, qp_attr_mask);
4258 ret = cm_init_qp_rtr_attr(cm_id_priv, qp_attr, qp_attr_mask);
4261 ret = cm_init_qp_rts_attr(cm_id_priv, qp_attr, qp_attr_mask);
4269 EXPORT_SYMBOL(ib_cm_init_qp_attr);
4271 static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
4274 struct cm_counter_group *group;
4275 struct cm_counter_attribute *cm_attr;
4277 group = container_of(obj, struct cm_counter_group, obj);
4278 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
4280 return sprintf(buf, "%ld\n",
4281 atomic_long_read(&group->counter[cm_attr->index]));
4284 static const struct sysfs_ops cm_counter_ops = {
4285 .show = cm_show_counter
4288 static struct kobj_type cm_counter_obj_type = {
4289 .sysfs_ops = &cm_counter_ops,
4290 .default_attrs = cm_counter_default_attrs
4293 static int cm_create_port_fs(struct cm_port *port)
4297 for (i = 0; i < CM_COUNTER_GROUPS; i++) {
4298 ret = ib_port_register_module_stat(port->cm_dev->ib_device,
4300 &port->counter_group[i].obj,
4301 &cm_counter_obj_type,
4302 counter_group_names[i]);
4311 ib_port_unregister_module_stat(&port->counter_group[i].obj);
4316 static void cm_remove_port_fs(struct cm_port *port)
4320 for (i = 0; i < CM_COUNTER_GROUPS; i++)
4321 ib_port_unregister_module_stat(&port->counter_group[i].obj);
4325 static int cm_add_one(struct ib_device *ib_device)
4327 struct cm_device *cm_dev;
4328 struct cm_port *port;
4329 struct ib_mad_reg_req reg_req = {
4330 .mgmt_class = IB_MGMT_CLASS_CM,
4331 .mgmt_class_version = IB_CM_CLASS_VERSION,
4333 struct ib_port_modify port_modify = {
4334 .set_port_cap_mask = IB_PORT_CM_SUP
4336 unsigned long flags;
4341 cm_dev = kzalloc(struct_size(cm_dev, port, ib_device->phys_port_cnt),
4346 cm_dev->ib_device = ib_device;
4347 cm_dev->ack_delay = ib_device->attrs.local_ca_ack_delay;
4348 cm_dev->going_down = 0;
4350 set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
4351 rdma_for_each_port (ib_device, i) {
4352 if (!rdma_cap_ib_cm(ib_device, i))
4355 port = kzalloc(sizeof *port, GFP_KERNEL);
4361 cm_dev->port[i-1] = port;
4362 port->cm_dev = cm_dev;
4365 INIT_LIST_HEAD(&port->cm_priv_prim_list);
4366 INIT_LIST_HEAD(&port->cm_priv_altr_list);
4368 ret = cm_create_port_fs(port);
4372 port->mad_agent = ib_register_mad_agent(ib_device, i,
4380 if (IS_ERR(port->mad_agent)) {
4381 ret = PTR_ERR(port->mad_agent);
4385 ret = ib_modify_port(ib_device, i, 0, &port_modify);
4397 ib_set_client_data(ib_device, &cm_client, cm_dev);
4399 write_lock_irqsave(&cm.device_lock, flags);
4400 list_add_tail(&cm_dev->list, &cm.device_list);
4401 write_unlock_irqrestore(&cm.device_lock, flags);
4405 ib_unregister_mad_agent(port->mad_agent);
4407 cm_remove_port_fs(port);
4409 port_modify.set_port_cap_mask = 0;
4410 port_modify.clr_port_cap_mask = IB_PORT_CM_SUP;
4413 if (!rdma_cap_ib_cm(ib_device, i))
4416 port = cm_dev->port[i-1];
4417 ib_modify_port(ib_device, port->port_num, 0, &port_modify);
4418 ib_unregister_mad_agent(port->mad_agent);
4419 cm_remove_port_fs(port);
4427 static void cm_remove_one(struct ib_device *ib_device, void *client_data)
4429 struct cm_device *cm_dev = client_data;
4430 struct cm_port *port;
4431 struct cm_id_private *cm_id_priv;
4432 struct ib_mad_agent *cur_mad_agent;
4433 struct ib_port_modify port_modify = {
4434 .clr_port_cap_mask = IB_PORT_CM_SUP
4436 unsigned long flags;
4439 write_lock_irqsave(&cm.device_lock, flags);
4440 list_del(&cm_dev->list);
4441 write_unlock_irqrestore(&cm.device_lock, flags);
4443 spin_lock_irq(&cm.lock);
4444 cm_dev->going_down = 1;
4445 spin_unlock_irq(&cm.lock);
4447 rdma_for_each_port (ib_device, i) {
4448 if (!rdma_cap_ib_cm(ib_device, i))
4451 port = cm_dev->port[i-1];
4452 ib_modify_port(ib_device, port->port_num, 0, &port_modify);
4453 /* Mark all the cm_id's as not valid */
4454 spin_lock_irq(&cm.lock);
4455 list_for_each_entry(cm_id_priv, &port->cm_priv_altr_list, altr_list)
4456 cm_id_priv->altr_send_port_not_ready = 1;
4457 list_for_each_entry(cm_id_priv, &port->cm_priv_prim_list, prim_list)
4458 cm_id_priv->prim_send_port_not_ready = 1;
4459 spin_unlock_irq(&cm.lock);
4461 * We flush the queue here after the going_down set, this
4462 * verify that no new works will be queued in the recv handler,
4463 * after that we can call the unregister_mad_agent
4465 flush_workqueue(cm.wq);
4466 spin_lock_irq(&cm.state_lock);
4467 cur_mad_agent = port->mad_agent;
4468 port->mad_agent = NULL;
4469 spin_unlock_irq(&cm.state_lock);
4470 ib_unregister_mad_agent(cur_mad_agent);
4471 cm_remove_port_fs(port);
4478 static int __init ib_cm_init(void)
4482 INIT_LIST_HEAD(&cm.device_list);
4483 rwlock_init(&cm.device_lock);
4484 spin_lock_init(&cm.lock);
4485 spin_lock_init(&cm.state_lock);
4486 cm.listen_service_table = RB_ROOT;
4487 cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
4488 cm.remote_id_table = RB_ROOT;
4489 cm.remote_qp_table = RB_ROOT;
4490 cm.remote_sidr_table = RB_ROOT;
4491 xa_init_flags(&cm.local_id_table, XA_FLAGS_ALLOC);
4492 get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand);
4493 INIT_LIST_HEAD(&cm.timewait_list);
4495 cm.wq = alloc_workqueue("ib_cm", 0, 1);
4501 ret = ib_register_client(&cm_client);
4507 destroy_workqueue(cm.wq);
4512 static void __exit ib_cm_cleanup(void)
4514 struct cm_timewait_info *timewait_info, *tmp;
4516 spin_lock_irq(&cm.lock);
4517 list_for_each_entry(timewait_info, &cm.timewait_list, list)
4518 cancel_delayed_work(&timewait_info->work.work);
4519 spin_unlock_irq(&cm.lock);
4521 ib_unregister_client(&cm_client);
4522 destroy_workqueue(cm.wq);
4524 list_for_each_entry_safe(timewait_info, tmp, &cm.timewait_list, list) {
4525 list_del(&timewait_info->list);
4526 kfree(timewait_info);
4529 WARN_ON(!xa_empty(&cm.local_id_table));
4532 module_init(ib_cm_init);
4533 module_exit(ib_cm_cleanup);