2 * Copyright (c) 2004-2007 Intel Corporation. All rights reserved.
3 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
4 * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
5 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/completion.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/device.h>
39 #include <linux/module.h>
40 #include <linux/err.h>
41 #include <linux/idr.h>
42 #include <linux/interrupt.h>
43 #include <linux/random.h>
44 #include <linux/rbtree.h>
45 #include <linux/spinlock.h>
46 #include <linux/slab.h>
47 #include <linux/sysfs.h>
48 #include <linux/workqueue.h>
49 #include <linux/kdev_t.h>
50 #include <linux/etherdevice.h>
52 #include <rdma/ib_cache.h>
53 #include <rdma/ib_cm.h>
56 MODULE_AUTHOR("Sean Hefty");
57 MODULE_DESCRIPTION("InfiniBand CM");
58 MODULE_LICENSE("Dual BSD/GPL");
60 static const char * const ibcm_rej_reason_strs[] = {
61 [IB_CM_REJ_NO_QP] = "no QP",
62 [IB_CM_REJ_NO_EEC] = "no EEC",
63 [IB_CM_REJ_NO_RESOURCES] = "no resources",
64 [IB_CM_REJ_TIMEOUT] = "timeout",
65 [IB_CM_REJ_UNSUPPORTED] = "unsupported",
66 [IB_CM_REJ_INVALID_COMM_ID] = "invalid comm ID",
67 [IB_CM_REJ_INVALID_COMM_INSTANCE] = "invalid comm instance",
68 [IB_CM_REJ_INVALID_SERVICE_ID] = "invalid service ID",
69 [IB_CM_REJ_INVALID_TRANSPORT_TYPE] = "invalid transport type",
70 [IB_CM_REJ_STALE_CONN] = "stale conn",
71 [IB_CM_REJ_RDC_NOT_EXIST] = "RDC not exist",
72 [IB_CM_REJ_INVALID_GID] = "invalid GID",
73 [IB_CM_REJ_INVALID_LID] = "invalid LID",
74 [IB_CM_REJ_INVALID_SL] = "invalid SL",
75 [IB_CM_REJ_INVALID_TRAFFIC_CLASS] = "invalid traffic class",
76 [IB_CM_REJ_INVALID_HOP_LIMIT] = "invalid hop limit",
77 [IB_CM_REJ_INVALID_PACKET_RATE] = "invalid packet rate",
78 [IB_CM_REJ_INVALID_ALT_GID] = "invalid alt GID",
79 [IB_CM_REJ_INVALID_ALT_LID] = "invalid alt LID",
80 [IB_CM_REJ_INVALID_ALT_SL] = "invalid alt SL",
81 [IB_CM_REJ_INVALID_ALT_TRAFFIC_CLASS] = "invalid alt traffic class",
82 [IB_CM_REJ_INVALID_ALT_HOP_LIMIT] = "invalid alt hop limit",
83 [IB_CM_REJ_INVALID_ALT_PACKET_RATE] = "invalid alt packet rate",
84 [IB_CM_REJ_PORT_CM_REDIRECT] = "port CM redirect",
85 [IB_CM_REJ_PORT_REDIRECT] = "port redirect",
86 [IB_CM_REJ_INVALID_MTU] = "invalid MTU",
87 [IB_CM_REJ_INSUFFICIENT_RESP_RESOURCES] = "insufficient resp resources",
88 [IB_CM_REJ_CONSUMER_DEFINED] = "consumer defined",
89 [IB_CM_REJ_INVALID_RNR_RETRY] = "invalid RNR retry",
90 [IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID] = "duplicate local comm ID",
91 [IB_CM_REJ_INVALID_CLASS_VERSION] = "invalid class version",
92 [IB_CM_REJ_INVALID_FLOW_LABEL] = "invalid flow label",
93 [IB_CM_REJ_INVALID_ALT_FLOW_LABEL] = "invalid alt flow label",
96 const char *__attribute_const__ ibcm_reject_msg(int reason)
98 size_t index = reason;
100 if (index < ARRAY_SIZE(ibcm_rej_reason_strs) &&
101 ibcm_rej_reason_strs[index])
102 return ibcm_rej_reason_strs[index];
104 return "unrecognized reason";
106 EXPORT_SYMBOL(ibcm_reject_msg);
108 static void cm_add_one(struct ib_device *device);
109 static void cm_remove_one(struct ib_device *device, void *client_data);
111 static struct ib_client cm_client = {
114 .remove = cm_remove_one
117 static struct ib_cm {
119 struct list_head device_list;
120 rwlock_t device_lock;
121 struct rb_root listen_service_table;
122 u64 listen_service_id;
123 /* struct rb_root peer_service_table; todo: fix peer to peer */
124 struct rb_root remote_qp_table;
125 struct rb_root remote_id_table;
126 struct rb_root remote_sidr_table;
127 struct idr local_id_table;
128 __be32 random_id_operand;
129 struct list_head timewait_list;
130 struct workqueue_struct *wq;
131 /* Sync on cm change port state */
132 spinlock_t state_lock;
135 /* Counter indexes ordered by attribute ID */
149 CM_ATTR_ID_OFFSET = 0x0010,
160 static char const counter_group_names[CM_COUNTER_GROUPS]
161 [sizeof("cm_rx_duplicates")] = {
162 "cm_tx_msgs", "cm_tx_retries",
163 "cm_rx_msgs", "cm_rx_duplicates"
166 struct cm_counter_group {
168 atomic_long_t counter[CM_ATTR_COUNT];
171 struct cm_counter_attribute {
172 struct attribute attr;
176 #define CM_COUNTER_ATTR(_name, _index) \
177 struct cm_counter_attribute cm_##_name##_counter_attr = { \
178 .attr = { .name = __stringify(_name), .mode = 0444 }, \
182 static CM_COUNTER_ATTR(req, CM_REQ_COUNTER);
183 static CM_COUNTER_ATTR(mra, CM_MRA_COUNTER);
184 static CM_COUNTER_ATTR(rej, CM_REJ_COUNTER);
185 static CM_COUNTER_ATTR(rep, CM_REP_COUNTER);
186 static CM_COUNTER_ATTR(rtu, CM_RTU_COUNTER);
187 static CM_COUNTER_ATTR(dreq, CM_DREQ_COUNTER);
188 static CM_COUNTER_ATTR(drep, CM_DREP_COUNTER);
189 static CM_COUNTER_ATTR(sidr_req, CM_SIDR_REQ_COUNTER);
190 static CM_COUNTER_ATTR(sidr_rep, CM_SIDR_REP_COUNTER);
191 static CM_COUNTER_ATTR(lap, CM_LAP_COUNTER);
192 static CM_COUNTER_ATTR(apr, CM_APR_COUNTER);
194 static struct attribute *cm_counter_default_attrs[] = {
195 &cm_req_counter_attr.attr,
196 &cm_mra_counter_attr.attr,
197 &cm_rej_counter_attr.attr,
198 &cm_rep_counter_attr.attr,
199 &cm_rtu_counter_attr.attr,
200 &cm_dreq_counter_attr.attr,
201 &cm_drep_counter_attr.attr,
202 &cm_sidr_req_counter_attr.attr,
203 &cm_sidr_rep_counter_attr.attr,
204 &cm_lap_counter_attr.attr,
205 &cm_apr_counter_attr.attr,
210 struct cm_device *cm_dev;
211 struct ib_mad_agent *mad_agent;
212 struct kobject port_obj;
214 struct list_head cm_priv_prim_list;
215 struct list_head cm_priv_altr_list;
216 struct cm_counter_group counter_group[CM_COUNTER_GROUPS];
220 struct list_head list;
221 struct ib_device *ib_device;
222 struct device *device;
225 struct cm_port *port[0];
229 struct cm_port *port;
231 struct rdma_ah_attr ah_attr;
237 struct delayed_work work;
238 struct list_head list;
239 struct cm_port *port;
240 struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */
241 __be32 local_id; /* Established / timewait */
243 struct ib_cm_event cm_event;
244 struct sa_path_rec path[0];
247 struct cm_timewait_info {
248 struct cm_work work; /* Must be first. */
249 struct list_head list;
250 struct rb_node remote_qp_node;
251 struct rb_node remote_id_node;
252 __be64 remote_ca_guid;
254 u8 inserted_remote_qp;
255 u8 inserted_remote_id;
258 struct cm_id_private {
261 struct rb_node service_node;
262 struct rb_node sidr_id_node;
263 spinlock_t lock; /* Do not acquire inside cm.lock */
264 struct completion comp;
266 /* Number of clients sharing this ib_cm_id. Only valid for listeners.
267 * Protected by the cm.lock spinlock. */
268 int listen_sharecount;
270 struct ib_mad_send_buf *msg;
271 struct cm_timewait_info *timewait_info;
272 /* todo: use alternate port on send failure */
280 enum ib_qp_type qp_type;
284 enum ib_mtu path_mtu;
289 u8 responder_resources;
296 struct list_head prim_list;
297 struct list_head altr_list;
298 /* Indicates that the send port mad is registered and av is set */
299 int prim_send_port_not_ready;
300 int altr_send_port_not_ready;
302 struct list_head work_list;
306 static void cm_work_handler(struct work_struct *work);
308 static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
310 if (atomic_dec_and_test(&cm_id_priv->refcount))
311 complete(&cm_id_priv->comp);
314 static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
315 struct ib_mad_send_buf **msg)
317 struct ib_mad_agent *mad_agent;
318 struct ib_mad_send_buf *m;
321 unsigned long flags, flags2;
324 /* don't let the port to be released till the agent is down */
325 spin_lock_irqsave(&cm.state_lock, flags2);
326 spin_lock_irqsave(&cm.lock, flags);
327 if (!cm_id_priv->prim_send_port_not_ready)
328 av = &cm_id_priv->av;
329 else if (!cm_id_priv->altr_send_port_not_ready &&
330 (cm_id_priv->alt_av.port))
331 av = &cm_id_priv->alt_av;
333 pr_info("%s: not valid CM id\n", __func__);
335 spin_unlock_irqrestore(&cm.lock, flags);
338 spin_unlock_irqrestore(&cm.lock, flags);
339 /* Make sure the port haven't released the mad yet */
340 mad_agent = cm_id_priv->av.port->mad_agent;
342 pr_info("%s: not a valid MAD agent\n", __func__);
346 ah = rdma_create_ah(mad_agent->qp->pd, &av->ah_attr);
352 m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn,
354 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
356 IB_MGMT_BASE_VERSION);
363 /* Timeout set by caller if response is expected. */
365 m->retries = cm_id_priv->max_cm_retries;
367 atomic_inc(&cm_id_priv->refcount);
368 m->context[0] = cm_id_priv;
372 spin_unlock_irqrestore(&cm.state_lock, flags2);
376 static struct ib_mad_send_buf *cm_alloc_response_msg_no_ah(struct cm_port *port,
377 struct ib_mad_recv_wc *mad_recv_wc)
379 return ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index,
380 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
382 IB_MGMT_BASE_VERSION);
385 static int cm_create_response_msg_ah(struct cm_port *port,
386 struct ib_mad_recv_wc *mad_recv_wc,
387 struct ib_mad_send_buf *msg)
391 ah = ib_create_ah_from_wc(port->mad_agent->qp->pd, mad_recv_wc->wc,
392 mad_recv_wc->recv_buf.grh, port->port_num);
400 static void cm_free_msg(struct ib_mad_send_buf *msg)
403 rdma_destroy_ah(msg->ah);
405 cm_deref_id(msg->context[0]);
406 ib_free_send_mad(msg);
409 static int cm_alloc_response_msg(struct cm_port *port,
410 struct ib_mad_recv_wc *mad_recv_wc,
411 struct ib_mad_send_buf **msg)
413 struct ib_mad_send_buf *m;
416 m = cm_alloc_response_msg_no_ah(port, mad_recv_wc);
420 ret = cm_create_response_msg_ah(port, mad_recv_wc, m);
430 static void * cm_copy_private_data(const void *private_data,
435 if (!private_data || !private_data_len)
438 data = kmemdup(private_data, private_data_len, GFP_KERNEL);
440 return ERR_PTR(-ENOMEM);
445 static void cm_set_private_data(struct cm_id_private *cm_id_priv,
446 void *private_data, u8 private_data_len)
448 if (cm_id_priv->private_data && cm_id_priv->private_data_len)
449 kfree(cm_id_priv->private_data);
451 cm_id_priv->private_data = private_data;
452 cm_id_priv->private_data_len = private_data_len;
455 static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
456 struct ib_grh *grh, struct cm_av *av)
459 av->pkey_index = wc->pkey_index;
460 ib_init_ah_from_wc(port->cm_dev->ib_device, port->port_num, wc,
464 static int cm_init_av_by_path(struct sa_path_rec *path, struct cm_av *av,
465 struct cm_id_private *cm_id_priv)
467 struct cm_device *cm_dev;
468 struct cm_port *port = NULL;
472 struct net_device *ndev = ib_get_ndev_from_path(path);
474 read_lock_irqsave(&cm.device_lock, flags);
475 list_for_each_entry(cm_dev, &cm.device_list, list) {
476 if (!ib_find_cached_gid(cm_dev->ib_device, &path->sgid,
477 sa_conv_pathrec_to_gid_type(path),
479 port = cm_dev->port[p-1];
483 read_unlock_irqrestore(&cm.device_lock, flags);
491 ret = ib_find_cached_pkey(cm_dev->ib_device, port->port_num,
492 be16_to_cpu(path->pkey), &av->pkey_index);
497 ib_init_ah_from_path(cm_dev->ib_device, port->port_num, path,
499 av->timeout = path->packet_life_time + 1;
501 spin_lock_irqsave(&cm.lock, flags);
502 if (&cm_id_priv->av == av)
503 list_add_tail(&cm_id_priv->prim_list, &port->cm_priv_prim_list);
504 else if (&cm_id_priv->alt_av == av)
505 list_add_tail(&cm_id_priv->altr_list, &port->cm_priv_altr_list);
509 spin_unlock_irqrestore(&cm.lock, flags);
514 static int cm_alloc_id(struct cm_id_private *cm_id_priv)
519 idr_preload(GFP_KERNEL);
520 spin_lock_irqsave(&cm.lock, flags);
522 id = idr_alloc_cyclic(&cm.local_id_table, cm_id_priv, 0, 0, GFP_NOWAIT);
524 spin_unlock_irqrestore(&cm.lock, flags);
527 cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand;
528 return id < 0 ? id : 0;
531 static void cm_free_id(__be32 local_id)
533 spin_lock_irq(&cm.lock);
534 idr_remove(&cm.local_id_table,
535 (__force int) (local_id ^ cm.random_id_operand));
536 spin_unlock_irq(&cm.lock);
539 static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id)
541 struct cm_id_private *cm_id_priv;
543 cm_id_priv = idr_find(&cm.local_id_table,
544 (__force int) (local_id ^ cm.random_id_operand));
546 if (cm_id_priv->id.remote_id == remote_id)
547 atomic_inc(&cm_id_priv->refcount);
555 static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id)
557 struct cm_id_private *cm_id_priv;
559 spin_lock_irq(&cm.lock);
560 cm_id_priv = cm_get_id(local_id, remote_id);
561 spin_unlock_irq(&cm.lock);
567 * Trivial helpers to strip endian annotation and compare; the
568 * endianness doesn't actually matter since we just need a stable
569 * order for the RB tree.
571 static int be32_lt(__be32 a, __be32 b)
573 return (__force u32) a < (__force u32) b;
576 static int be32_gt(__be32 a, __be32 b)
578 return (__force u32) a > (__force u32) b;
581 static int be64_lt(__be64 a, __be64 b)
583 return (__force u64) a < (__force u64) b;
586 static int be64_gt(__be64 a, __be64 b)
588 return (__force u64) a > (__force u64) b;
591 static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv)
593 struct rb_node **link = &cm.listen_service_table.rb_node;
594 struct rb_node *parent = NULL;
595 struct cm_id_private *cur_cm_id_priv;
596 __be64 service_id = cm_id_priv->id.service_id;
597 __be64 service_mask = cm_id_priv->id.service_mask;
601 cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
603 if ((cur_cm_id_priv->id.service_mask & service_id) ==
604 (service_mask & cur_cm_id_priv->id.service_id) &&
605 (cm_id_priv->id.device == cur_cm_id_priv->id.device))
606 return cur_cm_id_priv;
608 if (cm_id_priv->id.device < cur_cm_id_priv->id.device)
609 link = &(*link)->rb_left;
610 else if (cm_id_priv->id.device > cur_cm_id_priv->id.device)
611 link = &(*link)->rb_right;
612 else if (be64_lt(service_id, cur_cm_id_priv->id.service_id))
613 link = &(*link)->rb_left;
614 else if (be64_gt(service_id, cur_cm_id_priv->id.service_id))
615 link = &(*link)->rb_right;
617 link = &(*link)->rb_right;
619 rb_link_node(&cm_id_priv->service_node, parent, link);
620 rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table);
624 static struct cm_id_private * cm_find_listen(struct ib_device *device,
627 struct rb_node *node = cm.listen_service_table.rb_node;
628 struct cm_id_private *cm_id_priv;
631 cm_id_priv = rb_entry(node, struct cm_id_private, service_node);
632 if ((cm_id_priv->id.service_mask & service_id) ==
633 cm_id_priv->id.service_id &&
634 (cm_id_priv->id.device == device))
637 if (device < cm_id_priv->id.device)
638 node = node->rb_left;
639 else if (device > cm_id_priv->id.device)
640 node = node->rb_right;
641 else if (be64_lt(service_id, cm_id_priv->id.service_id))
642 node = node->rb_left;
643 else if (be64_gt(service_id, cm_id_priv->id.service_id))
644 node = node->rb_right;
646 node = node->rb_right;
651 static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info
654 struct rb_node **link = &cm.remote_id_table.rb_node;
655 struct rb_node *parent = NULL;
656 struct cm_timewait_info *cur_timewait_info;
657 __be64 remote_ca_guid = timewait_info->remote_ca_guid;
658 __be32 remote_id = timewait_info->work.remote_id;
662 cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
664 if (be32_lt(remote_id, cur_timewait_info->work.remote_id))
665 link = &(*link)->rb_left;
666 else if (be32_gt(remote_id, cur_timewait_info->work.remote_id))
667 link = &(*link)->rb_right;
668 else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
669 link = &(*link)->rb_left;
670 else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
671 link = &(*link)->rb_right;
673 return cur_timewait_info;
675 timewait_info->inserted_remote_id = 1;
676 rb_link_node(&timewait_info->remote_id_node, parent, link);
677 rb_insert_color(&timewait_info->remote_id_node, &cm.remote_id_table);
681 static struct cm_timewait_info * cm_find_remote_id(__be64 remote_ca_guid,
684 struct rb_node *node = cm.remote_id_table.rb_node;
685 struct cm_timewait_info *timewait_info;
688 timewait_info = rb_entry(node, struct cm_timewait_info,
690 if (be32_lt(remote_id, timewait_info->work.remote_id))
691 node = node->rb_left;
692 else if (be32_gt(remote_id, timewait_info->work.remote_id))
693 node = node->rb_right;
694 else if (be64_lt(remote_ca_guid, timewait_info->remote_ca_guid))
695 node = node->rb_left;
696 else if (be64_gt(remote_ca_guid, timewait_info->remote_ca_guid))
697 node = node->rb_right;
699 return timewait_info;
704 static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info
707 struct rb_node **link = &cm.remote_qp_table.rb_node;
708 struct rb_node *parent = NULL;
709 struct cm_timewait_info *cur_timewait_info;
710 __be64 remote_ca_guid = timewait_info->remote_ca_guid;
711 __be32 remote_qpn = timewait_info->remote_qpn;
715 cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
717 if (be32_lt(remote_qpn, cur_timewait_info->remote_qpn))
718 link = &(*link)->rb_left;
719 else if (be32_gt(remote_qpn, cur_timewait_info->remote_qpn))
720 link = &(*link)->rb_right;
721 else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
722 link = &(*link)->rb_left;
723 else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
724 link = &(*link)->rb_right;
726 return cur_timewait_info;
728 timewait_info->inserted_remote_qp = 1;
729 rb_link_node(&timewait_info->remote_qp_node, parent, link);
730 rb_insert_color(&timewait_info->remote_qp_node, &cm.remote_qp_table);
734 static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private
737 struct rb_node **link = &cm.remote_sidr_table.rb_node;
738 struct rb_node *parent = NULL;
739 struct cm_id_private *cur_cm_id_priv;
740 union ib_gid *port_gid = &cm_id_priv->av.dgid;
741 __be32 remote_id = cm_id_priv->id.remote_id;
745 cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
747 if (be32_lt(remote_id, cur_cm_id_priv->id.remote_id))
748 link = &(*link)->rb_left;
749 else if (be32_gt(remote_id, cur_cm_id_priv->id.remote_id))
750 link = &(*link)->rb_right;
753 cmp = memcmp(port_gid, &cur_cm_id_priv->av.dgid,
756 link = &(*link)->rb_left;
758 link = &(*link)->rb_right;
760 return cur_cm_id_priv;
763 rb_link_node(&cm_id_priv->sidr_id_node, parent, link);
764 rb_insert_color(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
768 static void cm_reject_sidr_req(struct cm_id_private *cm_id_priv,
769 enum ib_cm_sidr_status status)
771 struct ib_cm_sidr_rep_param param;
773 memset(¶m, 0, sizeof param);
774 param.status = status;
775 ib_send_cm_sidr_rep(&cm_id_priv->id, ¶m);
778 struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
779 ib_cm_handler cm_handler,
782 struct cm_id_private *cm_id_priv;
785 cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL);
787 return ERR_PTR(-ENOMEM);
789 cm_id_priv->id.state = IB_CM_IDLE;
790 cm_id_priv->id.device = device;
791 cm_id_priv->id.cm_handler = cm_handler;
792 cm_id_priv->id.context = context;
793 cm_id_priv->id.remote_cm_qpn = 1;
794 ret = cm_alloc_id(cm_id_priv);
798 spin_lock_init(&cm_id_priv->lock);
799 init_completion(&cm_id_priv->comp);
800 INIT_LIST_HEAD(&cm_id_priv->work_list);
801 INIT_LIST_HEAD(&cm_id_priv->prim_list);
802 INIT_LIST_HEAD(&cm_id_priv->altr_list);
803 atomic_set(&cm_id_priv->work_count, -1);
804 atomic_set(&cm_id_priv->refcount, 1);
805 return &cm_id_priv->id;
809 return ERR_PTR(-ENOMEM);
811 EXPORT_SYMBOL(ib_create_cm_id);
813 static struct cm_work * cm_dequeue_work(struct cm_id_private *cm_id_priv)
815 struct cm_work *work;
817 if (list_empty(&cm_id_priv->work_list))
820 work = list_entry(cm_id_priv->work_list.next, struct cm_work, list);
821 list_del(&work->list);
825 static void cm_free_work(struct cm_work *work)
827 if (work->mad_recv_wc)
828 ib_free_recv_mad(work->mad_recv_wc);
832 static inline int cm_convert_to_ms(int iba_time)
834 /* approximate conversion to ms from 4.096us x 2^iba_time */
835 return 1 << max(iba_time - 8, 0);
839 * calculate: 4.096x2^ack_timeout = 4.096x2^ack_delay + 2x4.096x2^life_time
840 * Because of how ack_timeout is stored, adding one doubles the timeout.
841 * To avoid large timeouts, select the max(ack_delay, life_time + 1), and
842 * increment it (round up) only if the other is within 50%.
844 static u8 cm_ack_timeout(u8 ca_ack_delay, u8 packet_life_time)
846 int ack_timeout = packet_life_time + 1;
848 if (ack_timeout >= ca_ack_delay)
849 ack_timeout += (ca_ack_delay >= (ack_timeout - 1));
851 ack_timeout = ca_ack_delay +
852 (ack_timeout >= (ca_ack_delay - 1));
854 return min(31, ack_timeout);
857 static void cm_cleanup_timewait(struct cm_timewait_info *timewait_info)
859 if (timewait_info->inserted_remote_id) {
860 rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table);
861 timewait_info->inserted_remote_id = 0;
864 if (timewait_info->inserted_remote_qp) {
865 rb_erase(&timewait_info->remote_qp_node, &cm.remote_qp_table);
866 timewait_info->inserted_remote_qp = 0;
870 static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id)
872 struct cm_timewait_info *timewait_info;
874 timewait_info = kzalloc(sizeof *timewait_info, GFP_KERNEL);
876 return ERR_PTR(-ENOMEM);
878 timewait_info->work.local_id = local_id;
879 INIT_DELAYED_WORK(&timewait_info->work.work, cm_work_handler);
880 timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT;
881 return timewait_info;
884 static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
888 struct cm_device *cm_dev;
890 cm_dev = ib_get_client_data(cm_id_priv->id.device, &cm_client);
894 spin_lock_irqsave(&cm.lock, flags);
895 cm_cleanup_timewait(cm_id_priv->timewait_info);
896 list_add_tail(&cm_id_priv->timewait_info->list, &cm.timewait_list);
897 spin_unlock_irqrestore(&cm.lock, flags);
900 * The cm_id could be destroyed by the user before we exit timewait.
901 * To protect against this, we search for the cm_id after exiting
902 * timewait before notifying the user that we've exited timewait.
904 cm_id_priv->id.state = IB_CM_TIMEWAIT;
905 wait_time = cm_convert_to_ms(cm_id_priv->av.timeout);
907 /* Check if the device started its remove_one */
908 spin_lock_irqsave(&cm.lock, flags);
909 if (!cm_dev->going_down)
910 queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
911 msecs_to_jiffies(wait_time));
912 spin_unlock_irqrestore(&cm.lock, flags);
914 cm_id_priv->timewait_info = NULL;
917 static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
921 cm_id_priv->id.state = IB_CM_IDLE;
922 if (cm_id_priv->timewait_info) {
923 spin_lock_irqsave(&cm.lock, flags);
924 cm_cleanup_timewait(cm_id_priv->timewait_info);
925 spin_unlock_irqrestore(&cm.lock, flags);
926 kfree(cm_id_priv->timewait_info);
927 cm_id_priv->timewait_info = NULL;
931 static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
933 struct cm_id_private *cm_id_priv;
934 struct cm_work *work;
936 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
938 spin_lock_irq(&cm_id_priv->lock);
939 switch (cm_id->state) {
941 spin_unlock_irq(&cm_id_priv->lock);
943 spin_lock_irq(&cm.lock);
944 if (--cm_id_priv->listen_sharecount > 0) {
945 /* The id is still shared. */
946 cm_deref_id(cm_id_priv);
947 spin_unlock_irq(&cm.lock);
950 rb_erase(&cm_id_priv->service_node, &cm.listen_service_table);
951 spin_unlock_irq(&cm.lock);
953 case IB_CM_SIDR_REQ_SENT:
954 cm_id->state = IB_CM_IDLE;
955 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
956 spin_unlock_irq(&cm_id_priv->lock);
958 case IB_CM_SIDR_REQ_RCVD:
959 spin_unlock_irq(&cm_id_priv->lock);
960 cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT);
961 spin_lock_irq(&cm.lock);
962 if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node))
963 rb_erase(&cm_id_priv->sidr_id_node,
964 &cm.remote_sidr_table);
965 spin_unlock_irq(&cm.lock);
968 case IB_CM_MRA_REQ_RCVD:
969 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
970 spin_unlock_irq(&cm_id_priv->lock);
971 ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT,
972 &cm_id_priv->id.device->node_guid,
973 sizeof cm_id_priv->id.device->node_guid,
977 if (err == -ENOMEM) {
978 /* Do not reject to allow future retries. */
979 cm_reset_to_idle(cm_id_priv);
980 spin_unlock_irq(&cm_id_priv->lock);
982 spin_unlock_irq(&cm_id_priv->lock);
983 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
988 case IB_CM_MRA_REP_RCVD:
989 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
991 case IB_CM_MRA_REQ_SENT:
993 case IB_CM_MRA_REP_SENT:
994 spin_unlock_irq(&cm_id_priv->lock);
995 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
998 case IB_CM_ESTABLISHED:
999 spin_unlock_irq(&cm_id_priv->lock);
1000 if (cm_id_priv->qp_type == IB_QPT_XRC_TGT)
1002 ib_send_cm_dreq(cm_id, NULL, 0);
1004 case IB_CM_DREQ_SENT:
1005 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1006 cm_enter_timewait(cm_id_priv);
1007 spin_unlock_irq(&cm_id_priv->lock);
1009 case IB_CM_DREQ_RCVD:
1010 spin_unlock_irq(&cm_id_priv->lock);
1011 ib_send_cm_drep(cm_id, NULL, 0);
1014 spin_unlock_irq(&cm_id_priv->lock);
1018 spin_lock_irq(&cm.lock);
1019 if (!list_empty(&cm_id_priv->altr_list) &&
1020 (!cm_id_priv->altr_send_port_not_ready))
1021 list_del(&cm_id_priv->altr_list);
1022 if (!list_empty(&cm_id_priv->prim_list) &&
1023 (!cm_id_priv->prim_send_port_not_ready))
1024 list_del(&cm_id_priv->prim_list);
1025 spin_unlock_irq(&cm.lock);
1027 cm_free_id(cm_id->local_id);
1028 cm_deref_id(cm_id_priv);
1029 wait_for_completion(&cm_id_priv->comp);
1030 while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
1032 kfree(cm_id_priv->private_data);
1036 void ib_destroy_cm_id(struct ib_cm_id *cm_id)
1038 cm_destroy_id(cm_id, 0);
1040 EXPORT_SYMBOL(ib_destroy_cm_id);
1043 * __ib_cm_listen - Initiates listening on the specified service ID for
1044 * connection and service ID resolution requests.
1045 * @cm_id: Connection identifier associated with the listen request.
1046 * @service_id: Service identifier matched against incoming connection
1047 * and service ID resolution requests. The service ID should be specified
1048 * network-byte order. If set to IB_CM_ASSIGN_SERVICE_ID, the CM will
1049 * assign a service ID to the caller.
1050 * @service_mask: Mask applied to service ID used to listen across a
1051 * range of service IDs. If set to 0, the service ID is matched
1052 * exactly. This parameter is ignored if %service_id is set to
1053 * IB_CM_ASSIGN_SERVICE_ID.
1055 static int __ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id,
1056 __be64 service_mask)
1058 struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
1061 service_mask = service_mask ? service_mask : ~cpu_to_be64(0);
1062 service_id &= service_mask;
1063 if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID &&
1064 (service_id != IB_CM_ASSIGN_SERVICE_ID))
1067 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1068 if (cm_id->state != IB_CM_IDLE)
1071 cm_id->state = IB_CM_LISTEN;
1072 ++cm_id_priv->listen_sharecount;
1074 if (service_id == IB_CM_ASSIGN_SERVICE_ID) {
1075 cm_id->service_id = cpu_to_be64(cm.listen_service_id++);
1076 cm_id->service_mask = ~cpu_to_be64(0);
1078 cm_id->service_id = service_id;
1079 cm_id->service_mask = service_mask;
1081 cur_cm_id_priv = cm_insert_listen(cm_id_priv);
1083 if (cur_cm_id_priv) {
1084 cm_id->state = IB_CM_IDLE;
1085 --cm_id_priv->listen_sharecount;
1091 int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask)
1093 unsigned long flags;
1096 spin_lock_irqsave(&cm.lock, flags);
1097 ret = __ib_cm_listen(cm_id, service_id, service_mask);
1098 spin_unlock_irqrestore(&cm.lock, flags);
1102 EXPORT_SYMBOL(ib_cm_listen);
1105 * Create a new listening ib_cm_id and listen on the given service ID.
1107 * If there's an existing ID listening on that same device and service ID,
1110 * @device: Device associated with the cm_id. All related communication will
1111 * be associated with the specified device.
1112 * @cm_handler: Callback invoked to notify the user of CM events.
1113 * @service_id: Service identifier matched against incoming connection
1114 * and service ID resolution requests. The service ID should be specified
1115 * network-byte order. If set to IB_CM_ASSIGN_SERVICE_ID, the CM will
1116 * assign a service ID to the caller.
1118 * Callers should call ib_destroy_cm_id when done with the listener ID.
1120 struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device,
1121 ib_cm_handler cm_handler,
1124 struct cm_id_private *cm_id_priv;
1125 struct ib_cm_id *cm_id;
1126 unsigned long flags;
1129 /* Create an ID in advance, since the creation may sleep */
1130 cm_id = ib_create_cm_id(device, cm_handler, NULL);
1134 spin_lock_irqsave(&cm.lock, flags);
1136 if (service_id == IB_CM_ASSIGN_SERVICE_ID)
1139 /* Find an existing ID */
1140 cm_id_priv = cm_find_listen(device, service_id);
1142 if (cm_id->cm_handler != cm_handler || cm_id->context) {
1143 /* Sharing an ib_cm_id with different handlers is not
1145 spin_unlock_irqrestore(&cm.lock, flags);
1146 ib_destroy_cm_id(cm_id);
1147 return ERR_PTR(-EINVAL);
1149 atomic_inc(&cm_id_priv->refcount);
1150 ++cm_id_priv->listen_sharecount;
1151 spin_unlock_irqrestore(&cm.lock, flags);
1153 ib_destroy_cm_id(cm_id);
1154 cm_id = &cm_id_priv->id;
1159 /* Use newly created ID */
1160 err = __ib_cm_listen(cm_id, service_id, 0);
1162 spin_unlock_irqrestore(&cm.lock, flags);
1165 ib_destroy_cm_id(cm_id);
1166 return ERR_PTR(err);
1170 EXPORT_SYMBOL(ib_cm_insert_listen);
1172 static __be64 cm_form_tid(struct cm_id_private *cm_id_priv,
1173 enum cm_msg_sequence msg_seq)
1175 u64 hi_tid, low_tid;
1177 hi_tid = ((u64) cm_id_priv->av.port->mad_agent->hi_tid) << 32;
1178 low_tid = (u64) ((__force u32)cm_id_priv->id.local_id |
1180 return cpu_to_be64(hi_tid | low_tid);
1183 static void cm_format_mad_hdr(struct ib_mad_hdr *hdr,
1184 __be16 attr_id, __be64 tid)
1186 hdr->base_version = IB_MGMT_BASE_VERSION;
1187 hdr->mgmt_class = IB_MGMT_CLASS_CM;
1188 hdr->class_version = IB_CM_CLASS_VERSION;
1189 hdr->method = IB_MGMT_METHOD_SEND;
1190 hdr->attr_id = attr_id;
1194 static void cm_format_req(struct cm_req_msg *req_msg,
1195 struct cm_id_private *cm_id_priv,
1196 struct ib_cm_req_param *param)
1198 struct sa_path_rec *pri_path = param->primary_path;
1199 struct sa_path_rec *alt_path = param->alternate_path;
1200 bool pri_ext = false;
1202 if (pri_path->rec_type == SA_PATH_REC_TYPE_OPA)
1203 pri_ext = opa_is_extended_lid(pri_path->opa.dlid,
1204 pri_path->opa.slid);
1206 cm_format_mad_hdr(&req_msg->hdr, CM_REQ_ATTR_ID,
1207 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_REQ));
1209 req_msg->local_comm_id = cm_id_priv->id.local_id;
1210 req_msg->service_id = param->service_id;
1211 req_msg->local_ca_guid = cm_id_priv->id.device->node_guid;
1212 cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num));
1213 cm_req_set_init_depth(req_msg, param->initiator_depth);
1214 cm_req_set_remote_resp_timeout(req_msg,
1215 param->remote_cm_response_timeout);
1216 cm_req_set_qp_type(req_msg, param->qp_type);
1217 cm_req_set_flow_ctrl(req_msg, param->flow_control);
1218 cm_req_set_starting_psn(req_msg, cpu_to_be32(param->starting_psn));
1219 cm_req_set_local_resp_timeout(req_msg,
1220 param->local_cm_response_timeout);
1221 req_msg->pkey = param->primary_path->pkey;
1222 cm_req_set_path_mtu(req_msg, param->primary_path->mtu);
1223 cm_req_set_max_cm_retries(req_msg, param->max_cm_retries);
1225 if (param->qp_type != IB_QPT_XRC_INI) {
1226 cm_req_set_resp_res(req_msg, param->responder_resources);
1227 cm_req_set_retry_count(req_msg, param->retry_count);
1228 cm_req_set_rnr_retry_count(req_msg, param->rnr_retry_count);
1229 cm_req_set_srq(req_msg, param->srq);
1232 req_msg->primary_local_gid = pri_path->sgid;
1233 req_msg->primary_remote_gid = pri_path->dgid;
1235 req_msg->primary_local_gid.global.interface_id
1236 = OPA_MAKE_ID(be32_to_cpu(pri_path->opa.slid));
1237 req_msg->primary_remote_gid.global.interface_id
1238 = OPA_MAKE_ID(be32_to_cpu(pri_path->opa.dlid));
1240 if (pri_path->hop_limit <= 1) {
1241 req_msg->primary_local_lid = pri_ext ? 0 :
1242 htons(ntohl(sa_path_get_slid(pri_path)));
1243 req_msg->primary_remote_lid = pri_ext ? 0 :
1244 htons(ntohl(sa_path_get_dlid(pri_path)));
1246 /* Work-around until there's a way to obtain remote LID info */
1247 req_msg->primary_local_lid = IB_LID_PERMISSIVE;
1248 req_msg->primary_remote_lid = IB_LID_PERMISSIVE;
1250 cm_req_set_primary_flow_label(req_msg, pri_path->flow_label);
1251 cm_req_set_primary_packet_rate(req_msg, pri_path->rate);
1252 req_msg->primary_traffic_class = pri_path->traffic_class;
1253 req_msg->primary_hop_limit = pri_path->hop_limit;
1254 cm_req_set_primary_sl(req_msg, pri_path->sl);
1255 cm_req_set_primary_subnet_local(req_msg, (pri_path->hop_limit <= 1));
1256 cm_req_set_primary_local_ack_timeout(req_msg,
1257 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
1258 pri_path->packet_life_time));
1261 bool alt_ext = false;
1263 if (alt_path->rec_type == SA_PATH_REC_TYPE_OPA)
1264 alt_ext = opa_is_extended_lid(alt_path->opa.dlid,
1265 alt_path->opa.slid);
1267 req_msg->alt_local_gid = alt_path->sgid;
1268 req_msg->alt_remote_gid = alt_path->dgid;
1270 req_msg->alt_local_gid.global.interface_id
1271 = OPA_MAKE_ID(be32_to_cpu(alt_path->opa.slid));
1272 req_msg->alt_remote_gid.global.interface_id
1273 = OPA_MAKE_ID(be32_to_cpu(alt_path->opa.dlid));
1275 if (alt_path->hop_limit <= 1) {
1276 req_msg->alt_local_lid = alt_ext ? 0 :
1277 htons(ntohl(sa_path_get_slid(alt_path)));
1278 req_msg->alt_remote_lid = alt_ext ? 0 :
1279 htons(ntohl(sa_path_get_dlid(alt_path)));
1281 req_msg->alt_local_lid = IB_LID_PERMISSIVE;
1282 req_msg->alt_remote_lid = IB_LID_PERMISSIVE;
1284 cm_req_set_alt_flow_label(req_msg,
1285 alt_path->flow_label);
1286 cm_req_set_alt_packet_rate(req_msg, alt_path->rate);
1287 req_msg->alt_traffic_class = alt_path->traffic_class;
1288 req_msg->alt_hop_limit = alt_path->hop_limit;
1289 cm_req_set_alt_sl(req_msg, alt_path->sl);
1290 cm_req_set_alt_subnet_local(req_msg, (alt_path->hop_limit <= 1));
1291 cm_req_set_alt_local_ack_timeout(req_msg,
1292 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
1293 alt_path->packet_life_time));
1296 if (param->private_data && param->private_data_len)
1297 memcpy(req_msg->private_data, param->private_data,
1298 param->private_data_len);
1301 static int cm_validate_req_param(struct ib_cm_req_param *param)
1303 /* peer-to-peer not supported */
1304 if (param->peer_to_peer)
1307 if (!param->primary_path)
1310 if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC &&
1311 param->qp_type != IB_QPT_XRC_INI)
1314 if (param->private_data &&
1315 param->private_data_len > IB_CM_REQ_PRIVATE_DATA_SIZE)
1318 if (param->alternate_path &&
1319 (param->alternate_path->pkey != param->primary_path->pkey ||
1320 param->alternate_path->mtu != param->primary_path->mtu))
1326 int ib_send_cm_req(struct ib_cm_id *cm_id,
1327 struct ib_cm_req_param *param)
1329 struct cm_id_private *cm_id_priv;
1330 struct cm_req_msg *req_msg;
1331 unsigned long flags;
1334 ret = cm_validate_req_param(param);
1338 /* Verify that we're not in timewait. */
1339 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1340 spin_lock_irqsave(&cm_id_priv->lock, flags);
1341 if (cm_id->state != IB_CM_IDLE) {
1342 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1346 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1348 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
1350 if (IS_ERR(cm_id_priv->timewait_info)) {
1351 ret = PTR_ERR(cm_id_priv->timewait_info);
1352 cm_id_priv->timewait_info = NULL;
1356 ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av,
1360 if (param->alternate_path) {
1361 ret = cm_init_av_by_path(param->alternate_path,
1362 &cm_id_priv->alt_av, cm_id_priv);
1366 cm_id->service_id = param->service_id;
1367 cm_id->service_mask = ~cpu_to_be64(0);
1368 cm_id_priv->timeout_ms = cm_convert_to_ms(
1369 param->primary_path->packet_life_time) * 2 +
1371 param->remote_cm_response_timeout);
1372 cm_id_priv->max_cm_retries = param->max_cm_retries;
1373 cm_id_priv->initiator_depth = param->initiator_depth;
1374 cm_id_priv->responder_resources = param->responder_resources;
1375 cm_id_priv->retry_count = param->retry_count;
1376 cm_id_priv->path_mtu = param->primary_path->mtu;
1377 cm_id_priv->pkey = param->primary_path->pkey;
1378 cm_id_priv->qp_type = param->qp_type;
1380 ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg);
1384 req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad;
1385 cm_format_req(req_msg, cm_id_priv, param);
1386 cm_id_priv->tid = req_msg->hdr.tid;
1387 cm_id_priv->msg->timeout_ms = cm_id_priv->timeout_ms;
1388 cm_id_priv->msg->context[1] = (void *) (unsigned long) IB_CM_REQ_SENT;
1390 cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg);
1391 cm_id_priv->rq_psn = cm_req_get_starting_psn(req_msg);
1393 spin_lock_irqsave(&cm_id_priv->lock, flags);
1394 ret = ib_post_send_mad(cm_id_priv->msg, NULL);
1396 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1399 BUG_ON(cm_id->state != IB_CM_IDLE);
1400 cm_id->state = IB_CM_REQ_SENT;
1401 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1404 error2: cm_free_msg(cm_id_priv->msg);
1405 error1: kfree(cm_id_priv->timewait_info);
1408 EXPORT_SYMBOL(ib_send_cm_req);
1410 static int cm_issue_rej(struct cm_port *port,
1411 struct ib_mad_recv_wc *mad_recv_wc,
1412 enum ib_cm_rej_reason reason,
1413 enum cm_msg_response msg_rejected,
1414 void *ari, u8 ari_length)
1416 struct ib_mad_send_buf *msg = NULL;
1417 struct cm_rej_msg *rej_msg, *rcv_msg;
1420 ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
1424 /* We just need common CM header information. Cast to any message. */
1425 rcv_msg = (struct cm_rej_msg *) mad_recv_wc->recv_buf.mad;
1426 rej_msg = (struct cm_rej_msg *) msg->mad;
1428 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, rcv_msg->hdr.tid);
1429 rej_msg->remote_comm_id = rcv_msg->local_comm_id;
1430 rej_msg->local_comm_id = rcv_msg->remote_comm_id;
1431 cm_rej_set_msg_rejected(rej_msg, msg_rejected);
1432 rej_msg->reason = cpu_to_be16(reason);
1434 if (ari && ari_length) {
1435 cm_rej_set_reject_info_len(rej_msg, ari_length);
1436 memcpy(rej_msg->ari, ari, ari_length);
1439 ret = ib_post_send_mad(msg, NULL);
1446 static inline int cm_is_active_peer(__be64 local_ca_guid, __be64 remote_ca_guid,
1447 __be32 local_qpn, __be32 remote_qpn)
1449 return (be64_to_cpu(local_ca_guid) > be64_to_cpu(remote_ca_guid) ||
1450 ((local_ca_guid == remote_ca_guid) &&
1451 (be32_to_cpu(local_qpn) > be32_to_cpu(remote_qpn))));
1454 static bool cm_req_has_alt_path(struct cm_req_msg *req_msg)
1456 return ((req_msg->alt_local_lid) ||
1457 (ib_is_opa_gid(&req_msg->alt_local_gid)));
1460 static void cm_path_set_rec_type(struct ib_device *ib_device, u8 port_num,
1461 struct sa_path_rec *path, union ib_gid *gid)
1463 if (ib_is_opa_gid(gid) && rdma_cap_opa_ah(ib_device, port_num))
1464 path->rec_type = SA_PATH_REC_TYPE_OPA;
1466 path->rec_type = SA_PATH_REC_TYPE_IB;
1469 static void cm_format_path_lid_from_req(struct cm_req_msg *req_msg,
1470 struct sa_path_rec *primary_path,
1471 struct sa_path_rec *alt_path)
1475 if (primary_path->rec_type != SA_PATH_REC_TYPE_OPA) {
1476 sa_path_set_dlid(primary_path,
1477 htonl(ntohs(req_msg->primary_local_lid)));
1478 sa_path_set_slid(primary_path,
1479 htonl(ntohs(req_msg->primary_remote_lid)));
1481 lid = opa_get_lid_from_gid(&req_msg->primary_local_gid);
1482 sa_path_set_dlid(primary_path, cpu_to_be32(lid));
1484 lid = opa_get_lid_from_gid(&req_msg->primary_remote_gid);
1485 sa_path_set_slid(primary_path, cpu_to_be32(lid));
1488 if (!cm_req_has_alt_path(req_msg))
1491 if (alt_path->rec_type != SA_PATH_REC_TYPE_OPA) {
1492 sa_path_set_dlid(alt_path,
1493 htonl(ntohs(req_msg->alt_local_lid)));
1494 sa_path_set_slid(alt_path,
1495 htonl(ntohs(req_msg->alt_remote_lid)));
1497 lid = opa_get_lid_from_gid(&req_msg->alt_local_gid);
1498 sa_path_set_dlid(alt_path, cpu_to_be32(lid));
1500 lid = opa_get_lid_from_gid(&req_msg->alt_remote_gid);
1501 sa_path_set_slid(alt_path, cpu_to_be32(lid));
1505 static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
1506 struct sa_path_rec *primary_path,
1507 struct sa_path_rec *alt_path)
1509 primary_path->dgid = req_msg->primary_local_gid;
1510 primary_path->sgid = req_msg->primary_remote_gid;
1511 primary_path->flow_label = cm_req_get_primary_flow_label(req_msg);
1512 primary_path->hop_limit = req_msg->primary_hop_limit;
1513 primary_path->traffic_class = req_msg->primary_traffic_class;
1514 primary_path->reversible = 1;
1515 primary_path->pkey = req_msg->pkey;
1516 primary_path->sl = cm_req_get_primary_sl(req_msg);
1517 primary_path->mtu_selector = IB_SA_EQ;
1518 primary_path->mtu = cm_req_get_path_mtu(req_msg);
1519 primary_path->rate_selector = IB_SA_EQ;
1520 primary_path->rate = cm_req_get_primary_packet_rate(req_msg);
1521 primary_path->packet_life_time_selector = IB_SA_EQ;
1522 primary_path->packet_life_time =
1523 cm_req_get_primary_local_ack_timeout(req_msg);
1524 primary_path->packet_life_time -= (primary_path->packet_life_time > 0);
1525 primary_path->service_id = req_msg->service_id;
1527 if (cm_req_has_alt_path(req_msg)) {
1528 alt_path->dgid = req_msg->alt_local_gid;
1529 alt_path->sgid = req_msg->alt_remote_gid;
1530 alt_path->flow_label = cm_req_get_alt_flow_label(req_msg);
1531 alt_path->hop_limit = req_msg->alt_hop_limit;
1532 alt_path->traffic_class = req_msg->alt_traffic_class;
1533 alt_path->reversible = 1;
1534 alt_path->pkey = req_msg->pkey;
1535 alt_path->sl = cm_req_get_alt_sl(req_msg);
1536 alt_path->mtu_selector = IB_SA_EQ;
1537 alt_path->mtu = cm_req_get_path_mtu(req_msg);
1538 alt_path->rate_selector = IB_SA_EQ;
1539 alt_path->rate = cm_req_get_alt_packet_rate(req_msg);
1540 alt_path->packet_life_time_selector = IB_SA_EQ;
1541 alt_path->packet_life_time =
1542 cm_req_get_alt_local_ack_timeout(req_msg);
1543 alt_path->packet_life_time -= (alt_path->packet_life_time > 0);
1544 alt_path->service_id = req_msg->service_id;
1546 cm_format_path_lid_from_req(req_msg, primary_path, alt_path);
1549 static u16 cm_get_bth_pkey(struct cm_work *work)
1551 struct ib_device *ib_dev = work->port->cm_dev->ib_device;
1552 u8 port_num = work->port->port_num;
1553 u16 pkey_index = work->mad_recv_wc->wc->pkey_index;
1557 ret = ib_get_cached_pkey(ib_dev, port_num, pkey_index, &pkey);
1559 dev_warn_ratelimited(&ib_dev->dev, "ib_cm: Couldn't retrieve pkey for incoming request (port %d, pkey index %d). %d\n",
1560 port_num, pkey_index, ret);
1567 static void cm_format_req_event(struct cm_work *work,
1568 struct cm_id_private *cm_id_priv,
1569 struct ib_cm_id *listen_id)
1571 struct cm_req_msg *req_msg;
1572 struct ib_cm_req_event_param *param;
1574 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1575 param = &work->cm_event.param.req_rcvd;
1576 param->listen_id = listen_id;
1577 param->bth_pkey = cm_get_bth_pkey(work);
1578 param->port = cm_id_priv->av.port->port_num;
1579 param->primary_path = &work->path[0];
1580 if (cm_req_has_alt_path(req_msg))
1581 param->alternate_path = &work->path[1];
1583 param->alternate_path = NULL;
1584 param->remote_ca_guid = req_msg->local_ca_guid;
1585 param->remote_qkey = be32_to_cpu(req_msg->local_qkey);
1586 param->remote_qpn = be32_to_cpu(cm_req_get_local_qpn(req_msg));
1587 param->qp_type = cm_req_get_qp_type(req_msg);
1588 param->starting_psn = be32_to_cpu(cm_req_get_starting_psn(req_msg));
1589 param->responder_resources = cm_req_get_init_depth(req_msg);
1590 param->initiator_depth = cm_req_get_resp_res(req_msg);
1591 param->local_cm_response_timeout =
1592 cm_req_get_remote_resp_timeout(req_msg);
1593 param->flow_control = cm_req_get_flow_ctrl(req_msg);
1594 param->remote_cm_response_timeout =
1595 cm_req_get_local_resp_timeout(req_msg);
1596 param->retry_count = cm_req_get_retry_count(req_msg);
1597 param->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
1598 param->srq = cm_req_get_srq(req_msg);
1599 work->cm_event.private_data = &req_msg->private_data;
1602 static void cm_process_work(struct cm_id_private *cm_id_priv,
1603 struct cm_work *work)
1607 /* We will typically only have the current event to report. */
1608 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event);
1611 while (!ret && !atomic_add_negative(-1, &cm_id_priv->work_count)) {
1612 spin_lock_irq(&cm_id_priv->lock);
1613 work = cm_dequeue_work(cm_id_priv);
1614 spin_unlock_irq(&cm_id_priv->lock);
1616 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id,
1620 cm_deref_id(cm_id_priv);
1622 cm_destroy_id(&cm_id_priv->id, ret);
1625 static void cm_format_mra(struct cm_mra_msg *mra_msg,
1626 struct cm_id_private *cm_id_priv,
1627 enum cm_msg_response msg_mraed, u8 service_timeout,
1628 const void *private_data, u8 private_data_len)
1630 cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid);
1631 cm_mra_set_msg_mraed(mra_msg, msg_mraed);
1632 mra_msg->local_comm_id = cm_id_priv->id.local_id;
1633 mra_msg->remote_comm_id = cm_id_priv->id.remote_id;
1634 cm_mra_set_service_timeout(mra_msg, service_timeout);
1636 if (private_data && private_data_len)
1637 memcpy(mra_msg->private_data, private_data, private_data_len);
1640 static void cm_format_rej(struct cm_rej_msg *rej_msg,
1641 struct cm_id_private *cm_id_priv,
1642 enum ib_cm_rej_reason reason,
1645 const void *private_data,
1646 u8 private_data_len)
1648 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid);
1649 rej_msg->remote_comm_id = cm_id_priv->id.remote_id;
1651 switch(cm_id_priv->id.state) {
1652 case IB_CM_REQ_RCVD:
1653 rej_msg->local_comm_id = 0;
1654 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
1656 case IB_CM_MRA_REQ_SENT:
1657 rej_msg->local_comm_id = cm_id_priv->id.local_id;
1658 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
1660 case IB_CM_REP_RCVD:
1661 case IB_CM_MRA_REP_SENT:
1662 rej_msg->local_comm_id = cm_id_priv->id.local_id;
1663 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REP);
1666 rej_msg->local_comm_id = cm_id_priv->id.local_id;
1667 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_OTHER);
1671 rej_msg->reason = cpu_to_be16(reason);
1672 if (ari && ari_length) {
1673 cm_rej_set_reject_info_len(rej_msg, ari_length);
1674 memcpy(rej_msg->ari, ari, ari_length);
1677 if (private_data && private_data_len)
1678 memcpy(rej_msg->private_data, private_data, private_data_len);
1681 static void cm_dup_req_handler(struct cm_work *work,
1682 struct cm_id_private *cm_id_priv)
1684 struct ib_mad_send_buf *msg = NULL;
1687 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
1688 counter[CM_REQ_COUNTER]);
1690 /* Quick state check to discard duplicate REQs. */
1691 if (cm_id_priv->id.state == IB_CM_REQ_RCVD)
1694 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
1698 spin_lock_irq(&cm_id_priv->lock);
1699 switch (cm_id_priv->id.state) {
1700 case IB_CM_MRA_REQ_SENT:
1701 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
1702 CM_MSG_RESPONSE_REQ, cm_id_priv->service_timeout,
1703 cm_id_priv->private_data,
1704 cm_id_priv->private_data_len);
1706 case IB_CM_TIMEWAIT:
1707 cm_format_rej((struct cm_rej_msg *) msg->mad, cm_id_priv,
1708 IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0);
1713 spin_unlock_irq(&cm_id_priv->lock);
1715 ret = ib_post_send_mad(msg, NULL);
1720 unlock: spin_unlock_irq(&cm_id_priv->lock);
1721 free: cm_free_msg(msg);
1724 static struct cm_id_private * cm_match_req(struct cm_work *work,
1725 struct cm_id_private *cm_id_priv)
1727 struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv;
1728 struct cm_timewait_info *timewait_info;
1729 struct cm_req_msg *req_msg;
1730 struct ib_cm_id *cm_id;
1732 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1734 /* Check for possible duplicate REQ. */
1735 spin_lock_irq(&cm.lock);
1736 timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info);
1737 if (timewait_info) {
1738 cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
1739 timewait_info->work.remote_id);
1740 spin_unlock_irq(&cm.lock);
1741 if (cur_cm_id_priv) {
1742 cm_dup_req_handler(work, cur_cm_id_priv);
1743 cm_deref_id(cur_cm_id_priv);
1748 /* Check for stale connections. */
1749 timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
1750 if (timewait_info) {
1751 cm_cleanup_timewait(cm_id_priv->timewait_info);
1752 cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
1753 timewait_info->work.remote_id);
1755 spin_unlock_irq(&cm.lock);
1756 cm_issue_rej(work->port, work->mad_recv_wc,
1757 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ,
1759 if (cur_cm_id_priv) {
1760 cm_id = &cur_cm_id_priv->id;
1761 ib_send_cm_dreq(cm_id, NULL, 0);
1762 cm_deref_id(cur_cm_id_priv);
1767 /* Find matching listen request. */
1768 listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device,
1769 req_msg->service_id);
1770 if (!listen_cm_id_priv) {
1771 cm_cleanup_timewait(cm_id_priv->timewait_info);
1772 spin_unlock_irq(&cm.lock);
1773 cm_issue_rej(work->port, work->mad_recv_wc,
1774 IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ,
1778 atomic_inc(&listen_cm_id_priv->refcount);
1779 atomic_inc(&cm_id_priv->refcount);
1780 cm_id_priv->id.state = IB_CM_REQ_RCVD;
1781 atomic_inc(&cm_id_priv->work_count);
1782 spin_unlock_irq(&cm.lock);
1784 return listen_cm_id_priv;
1788 * Work-around for inter-subnet connections. If the LIDs are permissive,
1789 * we need to override the LID/SL data in the REQ with the LID information
1790 * in the work completion.
1792 static void cm_process_routed_req(struct cm_req_msg *req_msg, struct ib_wc *wc)
1794 if (!cm_req_get_primary_subnet_local(req_msg)) {
1795 if (req_msg->primary_local_lid == IB_LID_PERMISSIVE) {
1796 req_msg->primary_local_lid = ib_lid_be16(wc->slid);
1797 cm_req_set_primary_sl(req_msg, wc->sl);
1800 if (req_msg->primary_remote_lid == IB_LID_PERMISSIVE)
1801 req_msg->primary_remote_lid = cpu_to_be16(wc->dlid_path_bits);
1804 if (!cm_req_get_alt_subnet_local(req_msg)) {
1805 if (req_msg->alt_local_lid == IB_LID_PERMISSIVE) {
1806 req_msg->alt_local_lid = ib_lid_be16(wc->slid);
1807 cm_req_set_alt_sl(req_msg, wc->sl);
1810 if (req_msg->alt_remote_lid == IB_LID_PERMISSIVE)
1811 req_msg->alt_remote_lid = cpu_to_be16(wc->dlid_path_bits);
1815 static int cm_req_handler(struct cm_work *work)
1817 struct ib_cm_id *cm_id;
1818 struct cm_id_private *cm_id_priv, *listen_cm_id_priv;
1819 struct cm_req_msg *req_msg;
1821 struct ib_gid_attr gid_attr;
1822 const struct ib_global_route *grh;
1825 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1827 cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL);
1829 return PTR_ERR(cm_id);
1831 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1832 cm_id_priv->id.remote_id = req_msg->local_comm_id;
1833 cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
1834 work->mad_recv_wc->recv_buf.grh,
1836 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
1838 if (IS_ERR(cm_id_priv->timewait_info)) {
1839 ret = PTR_ERR(cm_id_priv->timewait_info);
1840 cm_id_priv->timewait_info = NULL;
1843 cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id;
1844 cm_id_priv->timewait_info->remote_ca_guid = req_msg->local_ca_guid;
1845 cm_id_priv->timewait_info->remote_qpn = cm_req_get_local_qpn(req_msg);
1847 listen_cm_id_priv = cm_match_req(work, cm_id_priv);
1848 if (!listen_cm_id_priv) {
1850 kfree(cm_id_priv->timewait_info);
1854 cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
1855 cm_id_priv->id.context = listen_cm_id_priv->id.context;
1856 cm_id_priv->id.service_id = req_msg->service_id;
1857 cm_id_priv->id.service_mask = ~cpu_to_be64(0);
1859 cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
1861 memset(&work->path[0], 0, sizeof(work->path[0]));
1862 if (cm_req_has_alt_path(req_msg))
1863 memset(&work->path[1], 0, sizeof(work->path[1]));
1864 grh = rdma_ah_read_grh(&cm_id_priv->av.ah_attr);
1865 ret = ib_get_cached_gid(work->port->cm_dev->ib_device,
1866 work->port->port_num,
1870 if (gid_attr.ndev) {
1871 work->path[0].rec_type =
1872 sa_conv_gid_to_pathrec_type(gid_attr.gid_type);
1873 sa_path_set_ifindex(&work->path[0],
1874 gid_attr.ndev->ifindex);
1875 sa_path_set_ndev(&work->path[0],
1876 dev_net(gid_attr.ndev));
1877 dev_put(gid_attr.ndev);
1879 cm_path_set_rec_type(work->port->cm_dev->ib_device,
1880 work->port->port_num,
1882 &req_msg->primary_local_gid);
1884 if (cm_req_has_alt_path(req_msg))
1885 work->path[1].rec_type = work->path[0].rec_type;
1886 cm_format_paths_from_req(req_msg, &work->path[0],
1888 if (cm_id_priv->av.ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE)
1889 sa_path_set_dmac(&work->path[0],
1890 cm_id_priv->av.ah_attr.roce.dmac);
1891 work->path[0].hop_limit = grh->hop_limit;
1892 ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av,
1896 int err = ib_get_cached_gid(work->port->cm_dev->ib_device,
1897 work->port->port_num, 0,
1898 &work->path[0].sgid,
1900 if (!err && gid_attr.ndev) {
1901 work->path[0].rec_type =
1902 sa_conv_gid_to_pathrec_type(gid_attr.gid_type);
1903 sa_path_set_ifindex(&work->path[0],
1904 gid_attr.ndev->ifindex);
1905 sa_path_set_ndev(&work->path[0],
1906 dev_net(gid_attr.ndev));
1907 dev_put(gid_attr.ndev);
1909 cm_path_set_rec_type(work->port->cm_dev->ib_device,
1910 work->port->port_num,
1912 &req_msg->primary_local_gid);
1914 if (cm_req_has_alt_path(req_msg))
1915 work->path[1].rec_type = work->path[0].rec_type;
1916 ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID,
1917 &work->path[0].sgid, sizeof work->path[0].sgid,
1921 if (cm_req_has_alt_path(req_msg)) {
1922 ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av,
1925 ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID,
1926 &work->path[0].sgid,
1927 sizeof work->path[0].sgid, NULL, 0);
1931 cm_id_priv->tid = req_msg->hdr.tid;
1932 cm_id_priv->timeout_ms = cm_convert_to_ms(
1933 cm_req_get_local_resp_timeout(req_msg));
1934 cm_id_priv->max_cm_retries = cm_req_get_max_cm_retries(req_msg);
1935 cm_id_priv->remote_qpn = cm_req_get_local_qpn(req_msg);
1936 cm_id_priv->initiator_depth = cm_req_get_resp_res(req_msg);
1937 cm_id_priv->responder_resources = cm_req_get_init_depth(req_msg);
1938 cm_id_priv->path_mtu = cm_req_get_path_mtu(req_msg);
1939 cm_id_priv->pkey = req_msg->pkey;
1940 cm_id_priv->sq_psn = cm_req_get_starting_psn(req_msg);
1941 cm_id_priv->retry_count = cm_req_get_retry_count(req_msg);
1942 cm_id_priv->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
1943 cm_id_priv->qp_type = cm_req_get_qp_type(req_msg);
1945 cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
1946 cm_process_work(cm_id_priv, work);
1947 cm_deref_id(listen_cm_id_priv);
1951 atomic_dec(&cm_id_priv->refcount);
1952 cm_deref_id(listen_cm_id_priv);
1954 ib_destroy_cm_id(cm_id);
1958 static void cm_format_rep(struct cm_rep_msg *rep_msg,
1959 struct cm_id_private *cm_id_priv,
1960 struct ib_cm_rep_param *param)
1962 cm_format_mad_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid);
1963 rep_msg->local_comm_id = cm_id_priv->id.local_id;
1964 rep_msg->remote_comm_id = cm_id_priv->id.remote_id;
1965 cm_rep_set_starting_psn(rep_msg, cpu_to_be32(param->starting_psn));
1966 rep_msg->resp_resources = param->responder_resources;
1967 cm_rep_set_target_ack_delay(rep_msg,
1968 cm_id_priv->av.port->cm_dev->ack_delay);
1969 cm_rep_set_failover(rep_msg, param->failover_accepted);
1970 cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count);
1971 rep_msg->local_ca_guid = cm_id_priv->id.device->node_guid;
1973 if (cm_id_priv->qp_type != IB_QPT_XRC_TGT) {
1974 rep_msg->initiator_depth = param->initiator_depth;
1975 cm_rep_set_flow_ctrl(rep_msg, param->flow_control);
1976 cm_rep_set_srq(rep_msg, param->srq);
1977 cm_rep_set_local_qpn(rep_msg, cpu_to_be32(param->qp_num));
1979 cm_rep_set_srq(rep_msg, 1);
1980 cm_rep_set_local_eecn(rep_msg, cpu_to_be32(param->qp_num));
1983 if (param->private_data && param->private_data_len)
1984 memcpy(rep_msg->private_data, param->private_data,
1985 param->private_data_len);
1988 int ib_send_cm_rep(struct ib_cm_id *cm_id,
1989 struct ib_cm_rep_param *param)
1991 struct cm_id_private *cm_id_priv;
1992 struct ib_mad_send_buf *msg;
1993 struct cm_rep_msg *rep_msg;
1994 unsigned long flags;
1997 if (param->private_data &&
1998 param->private_data_len > IB_CM_REP_PRIVATE_DATA_SIZE)
2001 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2002 spin_lock_irqsave(&cm_id_priv->lock, flags);
2003 if (cm_id->state != IB_CM_REQ_RCVD &&
2004 cm_id->state != IB_CM_MRA_REQ_SENT) {
2009 ret = cm_alloc_msg(cm_id_priv, &msg);
2013 rep_msg = (struct cm_rep_msg *) msg->mad;
2014 cm_format_rep(rep_msg, cm_id_priv, param);
2015 msg->timeout_ms = cm_id_priv->timeout_ms;
2016 msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT;
2018 ret = ib_post_send_mad(msg, NULL);
2020 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2025 cm_id->state = IB_CM_REP_SENT;
2026 cm_id_priv->msg = msg;
2027 cm_id_priv->initiator_depth = param->initiator_depth;
2028 cm_id_priv->responder_resources = param->responder_resources;
2029 cm_id_priv->rq_psn = cm_rep_get_starting_psn(rep_msg);
2030 cm_id_priv->local_qpn = cpu_to_be32(param->qp_num & 0xFFFFFF);
2032 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2035 EXPORT_SYMBOL(ib_send_cm_rep);
2037 static void cm_format_rtu(struct cm_rtu_msg *rtu_msg,
2038 struct cm_id_private *cm_id_priv,
2039 const void *private_data,
2040 u8 private_data_len)
2042 cm_format_mad_hdr(&rtu_msg->hdr, CM_RTU_ATTR_ID, cm_id_priv->tid);
2043 rtu_msg->local_comm_id = cm_id_priv->id.local_id;
2044 rtu_msg->remote_comm_id = cm_id_priv->id.remote_id;
2046 if (private_data && private_data_len)
2047 memcpy(rtu_msg->private_data, private_data, private_data_len);
2050 int ib_send_cm_rtu(struct ib_cm_id *cm_id,
2051 const void *private_data,
2052 u8 private_data_len)
2054 struct cm_id_private *cm_id_priv;
2055 struct ib_mad_send_buf *msg;
2056 unsigned long flags;
2060 if (private_data && private_data_len > IB_CM_RTU_PRIVATE_DATA_SIZE)
2063 data = cm_copy_private_data(private_data, private_data_len);
2065 return PTR_ERR(data);
2067 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2068 spin_lock_irqsave(&cm_id_priv->lock, flags);
2069 if (cm_id->state != IB_CM_REP_RCVD &&
2070 cm_id->state != IB_CM_MRA_REP_SENT) {
2075 ret = cm_alloc_msg(cm_id_priv, &msg);
2079 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
2080 private_data, private_data_len);
2082 ret = ib_post_send_mad(msg, NULL);
2084 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2090 cm_id->state = IB_CM_ESTABLISHED;
2091 cm_set_private_data(cm_id_priv, data, private_data_len);
2092 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2095 error: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2099 EXPORT_SYMBOL(ib_send_cm_rtu);
2101 static void cm_format_rep_event(struct cm_work *work, enum ib_qp_type qp_type)
2103 struct cm_rep_msg *rep_msg;
2104 struct ib_cm_rep_event_param *param;
2106 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
2107 param = &work->cm_event.param.rep_rcvd;
2108 param->remote_ca_guid = rep_msg->local_ca_guid;
2109 param->remote_qkey = be32_to_cpu(rep_msg->local_qkey);
2110 param->remote_qpn = be32_to_cpu(cm_rep_get_qpn(rep_msg, qp_type));
2111 param->starting_psn = be32_to_cpu(cm_rep_get_starting_psn(rep_msg));
2112 param->responder_resources = rep_msg->initiator_depth;
2113 param->initiator_depth = rep_msg->resp_resources;
2114 param->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg);
2115 param->failover_accepted = cm_rep_get_failover(rep_msg);
2116 param->flow_control = cm_rep_get_flow_ctrl(rep_msg);
2117 param->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
2118 param->srq = cm_rep_get_srq(rep_msg);
2119 work->cm_event.private_data = &rep_msg->private_data;
2122 static void cm_dup_rep_handler(struct cm_work *work)
2124 struct cm_id_private *cm_id_priv;
2125 struct cm_rep_msg *rep_msg;
2126 struct ib_mad_send_buf *msg = NULL;
2129 rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad;
2130 cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id,
2131 rep_msg->local_comm_id);
2135 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2136 counter[CM_REP_COUNTER]);
2137 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
2141 spin_lock_irq(&cm_id_priv->lock);
2142 if (cm_id_priv->id.state == IB_CM_ESTABLISHED)
2143 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
2144 cm_id_priv->private_data,
2145 cm_id_priv->private_data_len);
2146 else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT)
2147 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2148 CM_MSG_RESPONSE_REP, cm_id_priv->service_timeout,
2149 cm_id_priv->private_data,
2150 cm_id_priv->private_data_len);
2153 spin_unlock_irq(&cm_id_priv->lock);
2155 ret = ib_post_send_mad(msg, NULL);
2160 unlock: spin_unlock_irq(&cm_id_priv->lock);
2161 free: cm_free_msg(msg);
2162 deref: cm_deref_id(cm_id_priv);
2165 static int cm_rep_handler(struct cm_work *work)
2167 struct cm_id_private *cm_id_priv;
2168 struct cm_rep_msg *rep_msg;
2170 struct cm_id_private *cur_cm_id_priv;
2171 struct ib_cm_id *cm_id;
2172 struct cm_timewait_info *timewait_info;
2174 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
2175 cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 0);
2177 cm_dup_rep_handler(work);
2181 cm_format_rep_event(work, cm_id_priv->qp_type);
2183 spin_lock_irq(&cm_id_priv->lock);
2184 switch (cm_id_priv->id.state) {
2185 case IB_CM_REQ_SENT:
2186 case IB_CM_MRA_REQ_RCVD:
2189 spin_unlock_irq(&cm_id_priv->lock);
2194 cm_id_priv->timewait_info->work.remote_id = rep_msg->local_comm_id;
2195 cm_id_priv->timewait_info->remote_ca_guid = rep_msg->local_ca_guid;
2196 cm_id_priv->timewait_info->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type);
2198 spin_lock(&cm.lock);
2199 /* Check for duplicate REP. */
2200 if (cm_insert_remote_id(cm_id_priv->timewait_info)) {
2201 spin_unlock(&cm.lock);
2202 spin_unlock_irq(&cm_id_priv->lock);
2206 /* Check for a stale connection. */
2207 timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
2208 if (timewait_info) {
2209 rb_erase(&cm_id_priv->timewait_info->remote_id_node,
2210 &cm.remote_id_table);
2211 cm_id_priv->timewait_info->inserted_remote_id = 0;
2212 cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
2213 timewait_info->work.remote_id);
2215 spin_unlock(&cm.lock);
2216 spin_unlock_irq(&cm_id_priv->lock);
2217 cm_issue_rej(work->port, work->mad_recv_wc,
2218 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP,
2221 if (cur_cm_id_priv) {
2222 cm_id = &cur_cm_id_priv->id;
2223 ib_send_cm_dreq(cm_id, NULL, 0);
2224 cm_deref_id(cur_cm_id_priv);
2229 spin_unlock(&cm.lock);
2231 cm_id_priv->id.state = IB_CM_REP_RCVD;
2232 cm_id_priv->id.remote_id = rep_msg->local_comm_id;
2233 cm_id_priv->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type);
2234 cm_id_priv->initiator_depth = rep_msg->resp_resources;
2235 cm_id_priv->responder_resources = rep_msg->initiator_depth;
2236 cm_id_priv->sq_psn = cm_rep_get_starting_psn(rep_msg);
2237 cm_id_priv->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
2238 cm_id_priv->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg);
2239 cm_id_priv->av.timeout =
2240 cm_ack_timeout(cm_id_priv->target_ack_delay,
2241 cm_id_priv->av.timeout - 1);
2242 cm_id_priv->alt_av.timeout =
2243 cm_ack_timeout(cm_id_priv->target_ack_delay,
2244 cm_id_priv->alt_av.timeout - 1);
2246 /* todo: handle peer_to_peer */
2248 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2249 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2251 list_add_tail(&work->list, &cm_id_priv->work_list);
2252 spin_unlock_irq(&cm_id_priv->lock);
2255 cm_process_work(cm_id_priv, work);
2257 cm_deref_id(cm_id_priv);
2261 cm_deref_id(cm_id_priv);
2265 static int cm_establish_handler(struct cm_work *work)
2267 struct cm_id_private *cm_id_priv;
2270 /* See comment in cm_establish about lookup. */
2271 cm_id_priv = cm_acquire_id(work->local_id, work->remote_id);
2275 spin_lock_irq(&cm_id_priv->lock);
2276 if (cm_id_priv->id.state != IB_CM_ESTABLISHED) {
2277 spin_unlock_irq(&cm_id_priv->lock);
2281 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2282 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2284 list_add_tail(&work->list, &cm_id_priv->work_list);
2285 spin_unlock_irq(&cm_id_priv->lock);
2288 cm_process_work(cm_id_priv, work);
2290 cm_deref_id(cm_id_priv);
2293 cm_deref_id(cm_id_priv);
2297 static int cm_rtu_handler(struct cm_work *work)
2299 struct cm_id_private *cm_id_priv;
2300 struct cm_rtu_msg *rtu_msg;
2303 rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad;
2304 cm_id_priv = cm_acquire_id(rtu_msg->remote_comm_id,
2305 rtu_msg->local_comm_id);
2309 work->cm_event.private_data = &rtu_msg->private_data;
2311 spin_lock_irq(&cm_id_priv->lock);
2312 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
2313 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
2314 spin_unlock_irq(&cm_id_priv->lock);
2315 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2316 counter[CM_RTU_COUNTER]);
2319 cm_id_priv->id.state = IB_CM_ESTABLISHED;
2321 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2322 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2324 list_add_tail(&work->list, &cm_id_priv->work_list);
2325 spin_unlock_irq(&cm_id_priv->lock);
2328 cm_process_work(cm_id_priv, work);
2330 cm_deref_id(cm_id_priv);
2333 cm_deref_id(cm_id_priv);
2337 static void cm_format_dreq(struct cm_dreq_msg *dreq_msg,
2338 struct cm_id_private *cm_id_priv,
2339 const void *private_data,
2340 u8 private_data_len)
2342 cm_format_mad_hdr(&dreq_msg->hdr, CM_DREQ_ATTR_ID,
2343 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_DREQ));
2344 dreq_msg->local_comm_id = cm_id_priv->id.local_id;
2345 dreq_msg->remote_comm_id = cm_id_priv->id.remote_id;
2346 cm_dreq_set_remote_qpn(dreq_msg, cm_id_priv->remote_qpn);
2348 if (private_data && private_data_len)
2349 memcpy(dreq_msg->private_data, private_data, private_data_len);
2352 int ib_send_cm_dreq(struct ib_cm_id *cm_id,
2353 const void *private_data,
2354 u8 private_data_len)
2356 struct cm_id_private *cm_id_priv;
2357 struct ib_mad_send_buf *msg;
2358 unsigned long flags;
2361 if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE)
2364 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2365 spin_lock_irqsave(&cm_id_priv->lock, flags);
2366 if (cm_id->state != IB_CM_ESTABLISHED) {
2371 if (cm_id->lap_state == IB_CM_LAP_SENT ||
2372 cm_id->lap_state == IB_CM_MRA_LAP_RCVD)
2373 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2375 ret = cm_alloc_msg(cm_id_priv, &msg);
2377 cm_enter_timewait(cm_id_priv);
2381 cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv,
2382 private_data, private_data_len);
2383 msg->timeout_ms = cm_id_priv->timeout_ms;
2384 msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT;
2386 ret = ib_post_send_mad(msg, NULL);
2388 cm_enter_timewait(cm_id_priv);
2389 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2394 cm_id->state = IB_CM_DREQ_SENT;
2395 cm_id_priv->msg = msg;
2396 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2399 EXPORT_SYMBOL(ib_send_cm_dreq);
2401 static void cm_format_drep(struct cm_drep_msg *drep_msg,
2402 struct cm_id_private *cm_id_priv,
2403 const void *private_data,
2404 u8 private_data_len)
2406 cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, cm_id_priv->tid);
2407 drep_msg->local_comm_id = cm_id_priv->id.local_id;
2408 drep_msg->remote_comm_id = cm_id_priv->id.remote_id;
2410 if (private_data && private_data_len)
2411 memcpy(drep_msg->private_data, private_data, private_data_len);
2414 int ib_send_cm_drep(struct ib_cm_id *cm_id,
2415 const void *private_data,
2416 u8 private_data_len)
2418 struct cm_id_private *cm_id_priv;
2419 struct ib_mad_send_buf *msg;
2420 unsigned long flags;
2424 if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE)
2427 data = cm_copy_private_data(private_data, private_data_len);
2429 return PTR_ERR(data);
2431 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2432 spin_lock_irqsave(&cm_id_priv->lock, flags);
2433 if (cm_id->state != IB_CM_DREQ_RCVD) {
2434 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2439 cm_set_private_data(cm_id_priv, data, private_data_len);
2440 cm_enter_timewait(cm_id_priv);
2442 ret = cm_alloc_msg(cm_id_priv, &msg);
2446 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
2447 private_data, private_data_len);
2449 ret = ib_post_send_mad(msg, NULL);
2451 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2456 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2459 EXPORT_SYMBOL(ib_send_cm_drep);
2461 static int cm_issue_drep(struct cm_port *port,
2462 struct ib_mad_recv_wc *mad_recv_wc)
2464 struct ib_mad_send_buf *msg = NULL;
2465 struct cm_dreq_msg *dreq_msg;
2466 struct cm_drep_msg *drep_msg;
2469 ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
2473 dreq_msg = (struct cm_dreq_msg *) mad_recv_wc->recv_buf.mad;
2474 drep_msg = (struct cm_drep_msg *) msg->mad;
2476 cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, dreq_msg->hdr.tid);
2477 drep_msg->remote_comm_id = dreq_msg->local_comm_id;
2478 drep_msg->local_comm_id = dreq_msg->remote_comm_id;
2480 ret = ib_post_send_mad(msg, NULL);
2487 static int cm_dreq_handler(struct cm_work *work)
2489 struct cm_id_private *cm_id_priv;
2490 struct cm_dreq_msg *dreq_msg;
2491 struct ib_mad_send_buf *msg = NULL;
2494 dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad;
2495 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
2496 dreq_msg->local_comm_id);
2498 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2499 counter[CM_DREQ_COUNTER]);
2500 cm_issue_drep(work->port, work->mad_recv_wc);
2504 work->cm_event.private_data = &dreq_msg->private_data;
2506 spin_lock_irq(&cm_id_priv->lock);
2507 if (cm_id_priv->local_qpn != cm_dreq_get_remote_qpn(dreq_msg))
2510 switch (cm_id_priv->id.state) {
2511 case IB_CM_REP_SENT:
2512 case IB_CM_DREQ_SENT:
2513 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2515 case IB_CM_ESTABLISHED:
2516 if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT ||
2517 cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
2518 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2520 case IB_CM_MRA_REP_RCVD:
2522 case IB_CM_TIMEWAIT:
2523 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2524 counter[CM_DREQ_COUNTER]);
2525 msg = cm_alloc_response_msg_no_ah(work->port, work->mad_recv_wc);
2529 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
2530 cm_id_priv->private_data,
2531 cm_id_priv->private_data_len);
2532 spin_unlock_irq(&cm_id_priv->lock);
2534 if (cm_create_response_msg_ah(work->port, work->mad_recv_wc, msg) ||
2535 ib_post_send_mad(msg, NULL))
2538 case IB_CM_DREQ_RCVD:
2539 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2540 counter[CM_DREQ_COUNTER]);
2545 cm_id_priv->id.state = IB_CM_DREQ_RCVD;
2546 cm_id_priv->tid = dreq_msg->hdr.tid;
2547 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2549 list_add_tail(&work->list, &cm_id_priv->work_list);
2550 spin_unlock_irq(&cm_id_priv->lock);
2553 cm_process_work(cm_id_priv, work);
2555 cm_deref_id(cm_id_priv);
2558 unlock: spin_unlock_irq(&cm_id_priv->lock);
2559 deref: cm_deref_id(cm_id_priv);
2563 static int cm_drep_handler(struct cm_work *work)
2565 struct cm_id_private *cm_id_priv;
2566 struct cm_drep_msg *drep_msg;
2569 drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad;
2570 cm_id_priv = cm_acquire_id(drep_msg->remote_comm_id,
2571 drep_msg->local_comm_id);
2575 work->cm_event.private_data = &drep_msg->private_data;
2577 spin_lock_irq(&cm_id_priv->lock);
2578 if (cm_id_priv->id.state != IB_CM_DREQ_SENT &&
2579 cm_id_priv->id.state != IB_CM_DREQ_RCVD) {
2580 spin_unlock_irq(&cm_id_priv->lock);
2583 cm_enter_timewait(cm_id_priv);
2585 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2586 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2588 list_add_tail(&work->list, &cm_id_priv->work_list);
2589 spin_unlock_irq(&cm_id_priv->lock);
2592 cm_process_work(cm_id_priv, work);
2594 cm_deref_id(cm_id_priv);
2597 cm_deref_id(cm_id_priv);
2601 int ib_send_cm_rej(struct ib_cm_id *cm_id,
2602 enum ib_cm_rej_reason reason,
2605 const void *private_data,
2606 u8 private_data_len)
2608 struct cm_id_private *cm_id_priv;
2609 struct ib_mad_send_buf *msg;
2610 unsigned long flags;
2613 if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) ||
2614 (ari && ari_length > IB_CM_REJ_ARI_LENGTH))
2617 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2619 spin_lock_irqsave(&cm_id_priv->lock, flags);
2620 switch (cm_id->state) {
2621 case IB_CM_REQ_SENT:
2622 case IB_CM_MRA_REQ_RCVD:
2623 case IB_CM_REQ_RCVD:
2624 case IB_CM_MRA_REQ_SENT:
2625 case IB_CM_REP_RCVD:
2626 case IB_CM_MRA_REP_SENT:
2627 ret = cm_alloc_msg(cm_id_priv, &msg);
2629 cm_format_rej((struct cm_rej_msg *) msg->mad,
2630 cm_id_priv, reason, ari, ari_length,
2631 private_data, private_data_len);
2633 cm_reset_to_idle(cm_id_priv);
2635 case IB_CM_REP_SENT:
2636 case IB_CM_MRA_REP_RCVD:
2637 ret = cm_alloc_msg(cm_id_priv, &msg);
2639 cm_format_rej((struct cm_rej_msg *) msg->mad,
2640 cm_id_priv, reason, ari, ari_length,
2641 private_data, private_data_len);
2643 cm_enter_timewait(cm_id_priv);
2653 ret = ib_post_send_mad(msg, NULL);
2657 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2660 EXPORT_SYMBOL(ib_send_cm_rej);
2662 static void cm_format_rej_event(struct cm_work *work)
2664 struct cm_rej_msg *rej_msg;
2665 struct ib_cm_rej_event_param *param;
2667 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
2668 param = &work->cm_event.param.rej_rcvd;
2669 param->ari = rej_msg->ari;
2670 param->ari_length = cm_rej_get_reject_info_len(rej_msg);
2671 param->reason = __be16_to_cpu(rej_msg->reason);
2672 work->cm_event.private_data = &rej_msg->private_data;
2675 static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
2677 struct cm_timewait_info *timewait_info;
2678 struct cm_id_private *cm_id_priv;
2681 remote_id = rej_msg->local_comm_id;
2683 if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_TIMEOUT) {
2684 spin_lock_irq(&cm.lock);
2685 timewait_info = cm_find_remote_id( *((__be64 *) rej_msg->ari),
2687 if (!timewait_info) {
2688 spin_unlock_irq(&cm.lock);
2691 cm_id_priv = idr_find(&cm.local_id_table, (__force int)
2692 (timewait_info->work.local_id ^
2693 cm.random_id_operand));
2695 if (cm_id_priv->id.remote_id == remote_id)
2696 atomic_inc(&cm_id_priv->refcount);
2700 spin_unlock_irq(&cm.lock);
2701 } else if (cm_rej_get_msg_rejected(rej_msg) == CM_MSG_RESPONSE_REQ)
2702 cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, 0);
2704 cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, remote_id);
2709 static int cm_rej_handler(struct cm_work *work)
2711 struct cm_id_private *cm_id_priv;
2712 struct cm_rej_msg *rej_msg;
2715 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
2716 cm_id_priv = cm_acquire_rejected_id(rej_msg);
2720 cm_format_rej_event(work);
2722 spin_lock_irq(&cm_id_priv->lock);
2723 switch (cm_id_priv->id.state) {
2724 case IB_CM_REQ_SENT:
2725 case IB_CM_MRA_REQ_RCVD:
2726 case IB_CM_REP_SENT:
2727 case IB_CM_MRA_REP_RCVD:
2728 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2730 case IB_CM_REQ_RCVD:
2731 case IB_CM_MRA_REQ_SENT:
2732 if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_STALE_CONN)
2733 cm_enter_timewait(cm_id_priv);
2735 cm_reset_to_idle(cm_id_priv);
2737 case IB_CM_DREQ_SENT:
2738 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2740 case IB_CM_REP_RCVD:
2741 case IB_CM_MRA_REP_SENT:
2742 cm_enter_timewait(cm_id_priv);
2744 case IB_CM_ESTABLISHED:
2745 if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT ||
2746 cm_id_priv->id.lap_state == IB_CM_LAP_SENT) {
2747 if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT)
2748 ib_cancel_mad(cm_id_priv->av.port->mad_agent,
2750 cm_enter_timewait(cm_id_priv);
2755 spin_unlock_irq(&cm_id_priv->lock);
2760 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2762 list_add_tail(&work->list, &cm_id_priv->work_list);
2763 spin_unlock_irq(&cm_id_priv->lock);
2766 cm_process_work(cm_id_priv, work);
2768 cm_deref_id(cm_id_priv);
2771 cm_deref_id(cm_id_priv);
2775 int ib_send_cm_mra(struct ib_cm_id *cm_id,
2777 const void *private_data,
2778 u8 private_data_len)
2780 struct cm_id_private *cm_id_priv;
2781 struct ib_mad_send_buf *msg;
2782 enum ib_cm_state cm_state;
2783 enum ib_cm_lap_state lap_state;
2784 enum cm_msg_response msg_response;
2786 unsigned long flags;
2789 if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE)
2792 data = cm_copy_private_data(private_data, private_data_len);
2794 return PTR_ERR(data);
2796 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2798 spin_lock_irqsave(&cm_id_priv->lock, flags);
2799 switch(cm_id_priv->id.state) {
2800 case IB_CM_REQ_RCVD:
2801 cm_state = IB_CM_MRA_REQ_SENT;
2802 lap_state = cm_id->lap_state;
2803 msg_response = CM_MSG_RESPONSE_REQ;
2805 case IB_CM_REP_RCVD:
2806 cm_state = IB_CM_MRA_REP_SENT;
2807 lap_state = cm_id->lap_state;
2808 msg_response = CM_MSG_RESPONSE_REP;
2810 case IB_CM_ESTABLISHED:
2811 if (cm_id->lap_state == IB_CM_LAP_RCVD) {
2812 cm_state = cm_id->state;
2813 lap_state = IB_CM_MRA_LAP_SENT;
2814 msg_response = CM_MSG_RESPONSE_OTHER;
2822 if (!(service_timeout & IB_CM_MRA_FLAG_DELAY)) {
2823 ret = cm_alloc_msg(cm_id_priv, &msg);
2827 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2828 msg_response, service_timeout,
2829 private_data, private_data_len);
2830 ret = ib_post_send_mad(msg, NULL);
2835 cm_id->state = cm_state;
2836 cm_id->lap_state = lap_state;
2837 cm_id_priv->service_timeout = service_timeout;
2838 cm_set_private_data(cm_id_priv, data, private_data_len);
2839 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2842 error1: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2846 error2: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2851 EXPORT_SYMBOL(ib_send_cm_mra);
2853 static struct cm_id_private * cm_acquire_mraed_id(struct cm_mra_msg *mra_msg)
2855 switch (cm_mra_get_msg_mraed(mra_msg)) {
2856 case CM_MSG_RESPONSE_REQ:
2857 return cm_acquire_id(mra_msg->remote_comm_id, 0);
2858 case CM_MSG_RESPONSE_REP:
2859 case CM_MSG_RESPONSE_OTHER:
2860 return cm_acquire_id(mra_msg->remote_comm_id,
2861 mra_msg->local_comm_id);
2867 static int cm_mra_handler(struct cm_work *work)
2869 struct cm_id_private *cm_id_priv;
2870 struct cm_mra_msg *mra_msg;
2873 mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad;
2874 cm_id_priv = cm_acquire_mraed_id(mra_msg);
2878 work->cm_event.private_data = &mra_msg->private_data;
2879 work->cm_event.param.mra_rcvd.service_timeout =
2880 cm_mra_get_service_timeout(mra_msg);
2881 timeout = cm_convert_to_ms(cm_mra_get_service_timeout(mra_msg)) +
2882 cm_convert_to_ms(cm_id_priv->av.timeout);
2884 spin_lock_irq(&cm_id_priv->lock);
2885 switch (cm_id_priv->id.state) {
2886 case IB_CM_REQ_SENT:
2887 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REQ ||
2888 ib_modify_mad(cm_id_priv->av.port->mad_agent,
2889 cm_id_priv->msg, timeout))
2891 cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD;
2893 case IB_CM_REP_SENT:
2894 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REP ||
2895 ib_modify_mad(cm_id_priv->av.port->mad_agent,
2896 cm_id_priv->msg, timeout))
2898 cm_id_priv->id.state = IB_CM_MRA_REP_RCVD;
2900 case IB_CM_ESTABLISHED:
2901 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_OTHER ||
2902 cm_id_priv->id.lap_state != IB_CM_LAP_SENT ||
2903 ib_modify_mad(cm_id_priv->av.port->mad_agent,
2904 cm_id_priv->msg, timeout)) {
2905 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
2906 atomic_long_inc(&work->port->
2907 counter_group[CM_RECV_DUPLICATES].
2908 counter[CM_MRA_COUNTER]);
2911 cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD;
2913 case IB_CM_MRA_REQ_RCVD:
2914 case IB_CM_MRA_REP_RCVD:
2915 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2916 counter[CM_MRA_COUNTER]);
2922 cm_id_priv->msg->context[1] = (void *) (unsigned long)
2923 cm_id_priv->id.state;
2924 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2926 list_add_tail(&work->list, &cm_id_priv->work_list);
2927 spin_unlock_irq(&cm_id_priv->lock);
2930 cm_process_work(cm_id_priv, work);
2932 cm_deref_id(cm_id_priv);
2935 spin_unlock_irq(&cm_id_priv->lock);
2936 cm_deref_id(cm_id_priv);
2940 static void cm_format_lap(struct cm_lap_msg *lap_msg,
2941 struct cm_id_private *cm_id_priv,
2942 struct sa_path_rec *alternate_path,
2943 const void *private_data,
2944 u8 private_data_len)
2946 bool alt_ext = false;
2948 if (alternate_path->rec_type == SA_PATH_REC_TYPE_OPA)
2949 alt_ext = opa_is_extended_lid(alternate_path->opa.dlid,
2950 alternate_path->opa.slid);
2951 cm_format_mad_hdr(&lap_msg->hdr, CM_LAP_ATTR_ID,
2952 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_LAP));
2953 lap_msg->local_comm_id = cm_id_priv->id.local_id;
2954 lap_msg->remote_comm_id = cm_id_priv->id.remote_id;
2955 cm_lap_set_remote_qpn(lap_msg, cm_id_priv->remote_qpn);
2956 /* todo: need remote CM response timeout */
2957 cm_lap_set_remote_resp_timeout(lap_msg, 0x1F);
2958 lap_msg->alt_local_lid =
2959 htons(ntohl(sa_path_get_slid(alternate_path)));
2960 lap_msg->alt_remote_lid =
2961 htons(ntohl(sa_path_get_dlid(alternate_path)));
2962 lap_msg->alt_local_gid = alternate_path->sgid;
2963 lap_msg->alt_remote_gid = alternate_path->dgid;
2965 lap_msg->alt_local_gid.global.interface_id
2966 = OPA_MAKE_ID(be32_to_cpu(alternate_path->opa.slid));
2967 lap_msg->alt_remote_gid.global.interface_id
2968 = OPA_MAKE_ID(be32_to_cpu(alternate_path->opa.dlid));
2970 cm_lap_set_flow_label(lap_msg, alternate_path->flow_label);
2971 cm_lap_set_traffic_class(lap_msg, alternate_path->traffic_class);
2972 lap_msg->alt_hop_limit = alternate_path->hop_limit;
2973 cm_lap_set_packet_rate(lap_msg, alternate_path->rate);
2974 cm_lap_set_sl(lap_msg, alternate_path->sl);
2975 cm_lap_set_subnet_local(lap_msg, 1); /* local only... */
2976 cm_lap_set_local_ack_timeout(lap_msg,
2977 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
2978 alternate_path->packet_life_time));
2980 if (private_data && private_data_len)
2981 memcpy(lap_msg->private_data, private_data, private_data_len);
2984 int ib_send_cm_lap(struct ib_cm_id *cm_id,
2985 struct sa_path_rec *alternate_path,
2986 const void *private_data,
2987 u8 private_data_len)
2989 struct cm_id_private *cm_id_priv;
2990 struct ib_mad_send_buf *msg;
2991 unsigned long flags;
2994 if (private_data && private_data_len > IB_CM_LAP_PRIVATE_DATA_SIZE)
2997 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2998 spin_lock_irqsave(&cm_id_priv->lock, flags);
2999 if (cm_id->state != IB_CM_ESTABLISHED ||
3000 (cm_id->lap_state != IB_CM_LAP_UNINIT &&
3001 cm_id->lap_state != IB_CM_LAP_IDLE)) {
3006 ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av,
3010 cm_id_priv->alt_av.timeout =
3011 cm_ack_timeout(cm_id_priv->target_ack_delay,
3012 cm_id_priv->alt_av.timeout - 1);
3014 ret = cm_alloc_msg(cm_id_priv, &msg);
3018 cm_format_lap((struct cm_lap_msg *) msg->mad, cm_id_priv,
3019 alternate_path, private_data, private_data_len);
3020 msg->timeout_ms = cm_id_priv->timeout_ms;
3021 msg->context[1] = (void *) (unsigned long) IB_CM_ESTABLISHED;
3023 ret = ib_post_send_mad(msg, NULL);
3025 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3030 cm_id->lap_state = IB_CM_LAP_SENT;
3031 cm_id_priv->msg = msg;
3033 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3036 EXPORT_SYMBOL(ib_send_cm_lap);
3038 static void cm_format_path_lid_from_lap(struct cm_lap_msg *lap_msg,
3039 struct sa_path_rec *path)
3043 if (path->rec_type != SA_PATH_REC_TYPE_OPA) {
3044 sa_path_set_dlid(path, htonl(ntohs(lap_msg->alt_local_lid)));
3045 sa_path_set_slid(path, htonl(ntohs(lap_msg->alt_remote_lid)));
3047 lid = opa_get_lid_from_gid(&lap_msg->alt_local_gid);
3048 sa_path_set_dlid(path, cpu_to_be32(lid));
3050 lid = opa_get_lid_from_gid(&lap_msg->alt_remote_gid);
3051 sa_path_set_slid(path, cpu_to_be32(lid));
3055 static void cm_format_path_from_lap(struct cm_id_private *cm_id_priv,
3056 struct sa_path_rec *path,
3057 struct cm_lap_msg *lap_msg)
3059 path->dgid = lap_msg->alt_local_gid;
3060 path->sgid = lap_msg->alt_remote_gid;
3061 path->flow_label = cm_lap_get_flow_label(lap_msg);
3062 path->hop_limit = lap_msg->alt_hop_limit;
3063 path->traffic_class = cm_lap_get_traffic_class(lap_msg);
3064 path->reversible = 1;
3065 path->pkey = cm_id_priv->pkey;
3066 path->sl = cm_lap_get_sl(lap_msg);
3067 path->mtu_selector = IB_SA_EQ;
3068 path->mtu = cm_id_priv->path_mtu;
3069 path->rate_selector = IB_SA_EQ;
3070 path->rate = cm_lap_get_packet_rate(lap_msg);
3071 path->packet_life_time_selector = IB_SA_EQ;
3072 path->packet_life_time = cm_lap_get_local_ack_timeout(lap_msg);
3073 path->packet_life_time -= (path->packet_life_time > 0);
3074 cm_format_path_lid_from_lap(lap_msg, path);
3077 static int cm_lap_handler(struct cm_work *work)
3079 struct cm_id_private *cm_id_priv;
3080 struct cm_lap_msg *lap_msg;
3081 struct ib_cm_lap_event_param *param;
3082 struct ib_mad_send_buf *msg = NULL;
3085 /* todo: verify LAP request and send reject APR if invalid. */
3086 lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad;
3087 cm_id_priv = cm_acquire_id(lap_msg->remote_comm_id,
3088 lap_msg->local_comm_id);
3092 param = &work->cm_event.param.lap_rcvd;
3093 memset(&work->path[0], 0, sizeof(work->path[1]));
3094 cm_path_set_rec_type(work->port->cm_dev->ib_device,
3095 work->port->port_num,
3097 &lap_msg->alt_local_gid);
3098 param->alternate_path = &work->path[0];
3099 cm_format_path_from_lap(cm_id_priv, param->alternate_path, lap_msg);
3100 work->cm_event.private_data = &lap_msg->private_data;
3102 spin_lock_irq(&cm_id_priv->lock);
3103 if (cm_id_priv->id.state != IB_CM_ESTABLISHED)
3106 switch (cm_id_priv->id.lap_state) {
3107 case IB_CM_LAP_UNINIT:
3108 case IB_CM_LAP_IDLE:
3110 case IB_CM_MRA_LAP_SENT:
3111 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
3112 counter[CM_LAP_COUNTER]);
3113 msg = cm_alloc_response_msg_no_ah(work->port, work->mad_recv_wc);
3117 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
3118 CM_MSG_RESPONSE_OTHER,
3119 cm_id_priv->service_timeout,
3120 cm_id_priv->private_data,
3121 cm_id_priv->private_data_len);
3122 spin_unlock_irq(&cm_id_priv->lock);
3124 if (cm_create_response_msg_ah(work->port, work->mad_recv_wc, msg) ||
3125 ib_post_send_mad(msg, NULL))
3128 case IB_CM_LAP_RCVD:
3129 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
3130 counter[CM_LAP_COUNTER]);
3136 cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
3137 cm_id_priv->tid = lap_msg->hdr.tid;
3138 cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
3139 work->mad_recv_wc->recv_buf.grh,
3141 cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av,
3143 ret = atomic_inc_and_test(&cm_id_priv->work_count);
3145 list_add_tail(&work->list, &cm_id_priv->work_list);
3146 spin_unlock_irq(&cm_id_priv->lock);
3149 cm_process_work(cm_id_priv, work);
3151 cm_deref_id(cm_id_priv);
3154 unlock: spin_unlock_irq(&cm_id_priv->lock);
3155 deref: cm_deref_id(cm_id_priv);
3159 static void cm_format_apr(struct cm_apr_msg *apr_msg,
3160 struct cm_id_private *cm_id_priv,
3161 enum ib_cm_apr_status status,
3164 const void *private_data,
3165 u8 private_data_len)
3167 cm_format_mad_hdr(&apr_msg->hdr, CM_APR_ATTR_ID, cm_id_priv->tid);
3168 apr_msg->local_comm_id = cm_id_priv->id.local_id;
3169 apr_msg->remote_comm_id = cm_id_priv->id.remote_id;
3170 apr_msg->ap_status = (u8) status;
3172 if (info && info_length) {
3173 apr_msg->info_length = info_length;
3174 memcpy(apr_msg->info, info, info_length);
3177 if (private_data && private_data_len)
3178 memcpy(apr_msg->private_data, private_data, private_data_len);
3181 int ib_send_cm_apr(struct ib_cm_id *cm_id,
3182 enum ib_cm_apr_status status,
3185 const void *private_data,
3186 u8 private_data_len)
3188 struct cm_id_private *cm_id_priv;
3189 struct ib_mad_send_buf *msg;
3190 unsigned long flags;
3193 if ((private_data && private_data_len > IB_CM_APR_PRIVATE_DATA_SIZE) ||
3194 (info && info_length > IB_CM_APR_INFO_LENGTH))
3197 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3198 spin_lock_irqsave(&cm_id_priv->lock, flags);
3199 if (cm_id->state != IB_CM_ESTABLISHED ||
3200 (cm_id->lap_state != IB_CM_LAP_RCVD &&
3201 cm_id->lap_state != IB_CM_MRA_LAP_SENT)) {
3206 ret = cm_alloc_msg(cm_id_priv, &msg);
3210 cm_format_apr((struct cm_apr_msg *) msg->mad, cm_id_priv, status,
3211 info, info_length, private_data, private_data_len);
3212 ret = ib_post_send_mad(msg, NULL);
3214 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3219 cm_id->lap_state = IB_CM_LAP_IDLE;
3220 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3223 EXPORT_SYMBOL(ib_send_cm_apr);
3225 static int cm_apr_handler(struct cm_work *work)
3227 struct cm_id_private *cm_id_priv;
3228 struct cm_apr_msg *apr_msg;
3231 apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad;
3232 cm_id_priv = cm_acquire_id(apr_msg->remote_comm_id,
3233 apr_msg->local_comm_id);
3235 return -EINVAL; /* Unmatched reply. */
3237 work->cm_event.param.apr_rcvd.ap_status = apr_msg->ap_status;
3238 work->cm_event.param.apr_rcvd.apr_info = &apr_msg->info;
3239 work->cm_event.param.apr_rcvd.info_len = apr_msg->info_length;
3240 work->cm_event.private_data = &apr_msg->private_data;
3242 spin_lock_irq(&cm_id_priv->lock);
3243 if (cm_id_priv->id.state != IB_CM_ESTABLISHED ||
3244 (cm_id_priv->id.lap_state != IB_CM_LAP_SENT &&
3245 cm_id_priv->id.lap_state != IB_CM_MRA_LAP_RCVD)) {
3246 spin_unlock_irq(&cm_id_priv->lock);
3249 cm_id_priv->id.lap_state = IB_CM_LAP_IDLE;
3250 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
3251 cm_id_priv->msg = NULL;
3253 ret = atomic_inc_and_test(&cm_id_priv->work_count);
3255 list_add_tail(&work->list, &cm_id_priv->work_list);
3256 spin_unlock_irq(&cm_id_priv->lock);
3259 cm_process_work(cm_id_priv, work);
3261 cm_deref_id(cm_id_priv);
3264 cm_deref_id(cm_id_priv);
3268 static int cm_timewait_handler(struct cm_work *work)
3270 struct cm_timewait_info *timewait_info;
3271 struct cm_id_private *cm_id_priv;
3274 timewait_info = (struct cm_timewait_info *)work;
3275 spin_lock_irq(&cm.lock);
3276 list_del(&timewait_info->list);
3277 spin_unlock_irq(&cm.lock);
3279 cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
3280 timewait_info->work.remote_id);
3284 spin_lock_irq(&cm_id_priv->lock);
3285 if (cm_id_priv->id.state != IB_CM_TIMEWAIT ||
3286 cm_id_priv->remote_qpn != timewait_info->remote_qpn) {
3287 spin_unlock_irq(&cm_id_priv->lock);
3290 cm_id_priv->id.state = IB_CM_IDLE;
3291 ret = atomic_inc_and_test(&cm_id_priv->work_count);
3293 list_add_tail(&work->list, &cm_id_priv->work_list);
3294 spin_unlock_irq(&cm_id_priv->lock);
3297 cm_process_work(cm_id_priv, work);
3299 cm_deref_id(cm_id_priv);
3302 cm_deref_id(cm_id_priv);
3306 static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg,
3307 struct cm_id_private *cm_id_priv,
3308 struct ib_cm_sidr_req_param *param)
3310 cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID,
3311 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_SIDR));
3312 sidr_req_msg->request_id = cm_id_priv->id.local_id;
3313 sidr_req_msg->pkey = param->path->pkey;
3314 sidr_req_msg->service_id = param->service_id;
3316 if (param->private_data && param->private_data_len)
3317 memcpy(sidr_req_msg->private_data, param->private_data,
3318 param->private_data_len);
3321 int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
3322 struct ib_cm_sidr_req_param *param)
3324 struct cm_id_private *cm_id_priv;
3325 struct ib_mad_send_buf *msg;
3326 unsigned long flags;
3329 if (!param->path || (param->private_data &&
3330 param->private_data_len > IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE))
3333 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3334 ret = cm_init_av_by_path(param->path, &cm_id_priv->av, cm_id_priv);
3338 cm_id->service_id = param->service_id;
3339 cm_id->service_mask = ~cpu_to_be64(0);
3340 cm_id_priv->timeout_ms = param->timeout_ms;
3341 cm_id_priv->max_cm_retries = param->max_cm_retries;
3342 ret = cm_alloc_msg(cm_id_priv, &msg);
3346 cm_format_sidr_req((struct cm_sidr_req_msg *) msg->mad, cm_id_priv,
3348 msg->timeout_ms = cm_id_priv->timeout_ms;
3349 msg->context[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT;
3351 spin_lock_irqsave(&cm_id_priv->lock, flags);
3352 if (cm_id->state == IB_CM_IDLE)
3353 ret = ib_post_send_mad(msg, NULL);
3358 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3362 cm_id->state = IB_CM_SIDR_REQ_SENT;
3363 cm_id_priv->msg = msg;
3364 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3368 EXPORT_SYMBOL(ib_send_cm_sidr_req);
3370 static void cm_format_sidr_req_event(struct cm_work *work,
3371 struct ib_cm_id *listen_id)
3373 struct cm_sidr_req_msg *sidr_req_msg;
3374 struct ib_cm_sidr_req_event_param *param;
3376 sidr_req_msg = (struct cm_sidr_req_msg *)
3377 work->mad_recv_wc->recv_buf.mad;
3378 param = &work->cm_event.param.sidr_req_rcvd;
3379 param->pkey = __be16_to_cpu(sidr_req_msg->pkey);
3380 param->listen_id = listen_id;
3381 param->service_id = sidr_req_msg->service_id;
3382 param->bth_pkey = cm_get_bth_pkey(work);
3383 param->port = work->port->port_num;
3384 work->cm_event.private_data = &sidr_req_msg->private_data;
3387 static int cm_sidr_req_handler(struct cm_work *work)
3389 struct ib_cm_id *cm_id;
3390 struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
3391 struct cm_sidr_req_msg *sidr_req_msg;
3394 cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL);
3396 return PTR_ERR(cm_id);
3397 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3399 /* Record SGID/SLID and request ID for lookup. */
3400 sidr_req_msg = (struct cm_sidr_req_msg *)
3401 work->mad_recv_wc->recv_buf.mad;
3402 wc = work->mad_recv_wc->wc;
3403 cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid);
3404 cm_id_priv->av.dgid.global.interface_id = 0;
3405 cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
3406 work->mad_recv_wc->recv_buf.grh,
3408 cm_id_priv->id.remote_id = sidr_req_msg->request_id;
3409 cm_id_priv->tid = sidr_req_msg->hdr.tid;
3410 atomic_inc(&cm_id_priv->work_count);
3412 spin_lock_irq(&cm.lock);
3413 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
3414 if (cur_cm_id_priv) {
3415 spin_unlock_irq(&cm.lock);
3416 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
3417 counter[CM_SIDR_REQ_COUNTER]);
3418 goto out; /* Duplicate message. */
3420 cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD;
3421 cur_cm_id_priv = cm_find_listen(cm_id->device,
3422 sidr_req_msg->service_id);
3423 if (!cur_cm_id_priv) {
3424 spin_unlock_irq(&cm.lock);
3425 cm_reject_sidr_req(cm_id_priv, IB_SIDR_UNSUPPORTED);
3426 goto out; /* No match. */
3428 atomic_inc(&cur_cm_id_priv->refcount);
3429 atomic_inc(&cm_id_priv->refcount);
3430 spin_unlock_irq(&cm.lock);
3432 cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler;
3433 cm_id_priv->id.context = cur_cm_id_priv->id.context;
3434 cm_id_priv->id.service_id = sidr_req_msg->service_id;
3435 cm_id_priv->id.service_mask = ~cpu_to_be64(0);
3437 cm_format_sidr_req_event(work, &cur_cm_id_priv->id);
3438 cm_process_work(cm_id_priv, work);
3439 cm_deref_id(cur_cm_id_priv);
3442 ib_destroy_cm_id(&cm_id_priv->id);
3446 static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg,
3447 struct cm_id_private *cm_id_priv,
3448 struct ib_cm_sidr_rep_param *param)
3450 cm_format_mad_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID,
3452 sidr_rep_msg->request_id = cm_id_priv->id.remote_id;
3453 sidr_rep_msg->status = param->status;
3454 cm_sidr_rep_set_qpn(sidr_rep_msg, cpu_to_be32(param->qp_num));
3455 sidr_rep_msg->service_id = cm_id_priv->id.service_id;
3456 sidr_rep_msg->qkey = cpu_to_be32(param->qkey);
3458 if (param->info && param->info_length)
3459 memcpy(sidr_rep_msg->info, param->info, param->info_length);
3461 if (param->private_data && param->private_data_len)
3462 memcpy(sidr_rep_msg->private_data, param->private_data,
3463 param->private_data_len);
3466 int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
3467 struct ib_cm_sidr_rep_param *param)
3469 struct cm_id_private *cm_id_priv;
3470 struct ib_mad_send_buf *msg;
3471 unsigned long flags;
3474 if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) ||
3475 (param->private_data &&
3476 param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE))
3479 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3480 spin_lock_irqsave(&cm_id_priv->lock, flags);
3481 if (cm_id->state != IB_CM_SIDR_REQ_RCVD) {
3486 ret = cm_alloc_msg(cm_id_priv, &msg);
3490 cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv,
3492 ret = ib_post_send_mad(msg, NULL);
3494 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3498 cm_id->state = IB_CM_IDLE;
3499 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3501 spin_lock_irqsave(&cm.lock, flags);
3502 if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) {
3503 rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
3504 RB_CLEAR_NODE(&cm_id_priv->sidr_id_node);
3506 spin_unlock_irqrestore(&cm.lock, flags);
3509 error: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3512 EXPORT_SYMBOL(ib_send_cm_sidr_rep);
3514 static void cm_format_sidr_rep_event(struct cm_work *work)
3516 struct cm_sidr_rep_msg *sidr_rep_msg;
3517 struct ib_cm_sidr_rep_event_param *param;
3519 sidr_rep_msg = (struct cm_sidr_rep_msg *)
3520 work->mad_recv_wc->recv_buf.mad;
3521 param = &work->cm_event.param.sidr_rep_rcvd;
3522 param->status = sidr_rep_msg->status;
3523 param->qkey = be32_to_cpu(sidr_rep_msg->qkey);
3524 param->qpn = be32_to_cpu(cm_sidr_rep_get_qpn(sidr_rep_msg));
3525 param->info = &sidr_rep_msg->info;
3526 param->info_len = sidr_rep_msg->info_length;
3527 work->cm_event.private_data = &sidr_rep_msg->private_data;
3530 static int cm_sidr_rep_handler(struct cm_work *work)
3532 struct cm_sidr_rep_msg *sidr_rep_msg;
3533 struct cm_id_private *cm_id_priv;
3535 sidr_rep_msg = (struct cm_sidr_rep_msg *)
3536 work->mad_recv_wc->recv_buf.mad;
3537 cm_id_priv = cm_acquire_id(sidr_rep_msg->request_id, 0);
3539 return -EINVAL; /* Unmatched reply. */
3541 spin_lock_irq(&cm_id_priv->lock);
3542 if (cm_id_priv->id.state != IB_CM_SIDR_REQ_SENT) {
3543 spin_unlock_irq(&cm_id_priv->lock);
3546 cm_id_priv->id.state = IB_CM_IDLE;
3547 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
3548 spin_unlock_irq(&cm_id_priv->lock);
3550 cm_format_sidr_rep_event(work);
3551 cm_process_work(cm_id_priv, work);
3554 cm_deref_id(cm_id_priv);
3558 static void cm_process_send_error(struct ib_mad_send_buf *msg,
3559 enum ib_wc_status wc_status)
3561 struct cm_id_private *cm_id_priv;
3562 struct ib_cm_event cm_event;
3563 enum ib_cm_state state;
3566 memset(&cm_event, 0, sizeof cm_event);
3567 cm_id_priv = msg->context[0];
3569 /* Discard old sends or ones without a response. */
3570 spin_lock_irq(&cm_id_priv->lock);
3571 state = (enum ib_cm_state) (unsigned long) msg->context[1];
3572 if (msg != cm_id_priv->msg || state != cm_id_priv->id.state)
3575 pr_debug_ratelimited("CM: failed sending MAD in state %d. (%s)\n",
3576 state, ib_wc_status_msg(wc_status));
3578 case IB_CM_REQ_SENT:
3579 case IB_CM_MRA_REQ_RCVD:
3580 cm_reset_to_idle(cm_id_priv);
3581 cm_event.event = IB_CM_REQ_ERROR;
3583 case IB_CM_REP_SENT:
3584 case IB_CM_MRA_REP_RCVD:
3585 cm_reset_to_idle(cm_id_priv);
3586 cm_event.event = IB_CM_REP_ERROR;
3588 case IB_CM_DREQ_SENT:
3589 cm_enter_timewait(cm_id_priv);
3590 cm_event.event = IB_CM_DREQ_ERROR;
3592 case IB_CM_SIDR_REQ_SENT:
3593 cm_id_priv->id.state = IB_CM_IDLE;
3594 cm_event.event = IB_CM_SIDR_REQ_ERROR;
3599 spin_unlock_irq(&cm_id_priv->lock);
3600 cm_event.param.send_status = wc_status;
3602 /* No other events can occur on the cm_id at this point. */
3603 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &cm_event);
3606 ib_destroy_cm_id(&cm_id_priv->id);
3609 spin_unlock_irq(&cm_id_priv->lock);
3613 static void cm_send_handler(struct ib_mad_agent *mad_agent,
3614 struct ib_mad_send_wc *mad_send_wc)
3616 struct ib_mad_send_buf *msg = mad_send_wc->send_buf;
3617 struct cm_port *port;
3620 port = mad_agent->context;
3621 attr_index = be16_to_cpu(((struct ib_mad_hdr *)
3622 msg->mad)->attr_id) - CM_ATTR_ID_OFFSET;
3625 * If the send was in response to a received message (context[0] is not
3626 * set to a cm_id), and is not a REJ, then it is a send that was
3629 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
3632 atomic_long_add(1 + msg->retries,
3633 &port->counter_group[CM_XMIT].counter[attr_index]);
3635 atomic_long_add(msg->retries,
3636 &port->counter_group[CM_XMIT_RETRIES].
3637 counter[attr_index]);
3639 switch (mad_send_wc->status) {
3641 case IB_WC_WR_FLUSH_ERR:
3645 if (msg->context[0] && msg->context[1])
3646 cm_process_send_error(msg, mad_send_wc->status);
3653 static void cm_work_handler(struct work_struct *_work)
3655 struct cm_work *work = container_of(_work, struct cm_work, work.work);
3658 switch (work->cm_event.event) {
3659 case IB_CM_REQ_RECEIVED:
3660 ret = cm_req_handler(work);
3662 case IB_CM_MRA_RECEIVED:
3663 ret = cm_mra_handler(work);
3665 case IB_CM_REJ_RECEIVED:
3666 ret = cm_rej_handler(work);
3668 case IB_CM_REP_RECEIVED:
3669 ret = cm_rep_handler(work);
3671 case IB_CM_RTU_RECEIVED:
3672 ret = cm_rtu_handler(work);
3674 case IB_CM_USER_ESTABLISHED:
3675 ret = cm_establish_handler(work);
3677 case IB_CM_DREQ_RECEIVED:
3678 ret = cm_dreq_handler(work);
3680 case IB_CM_DREP_RECEIVED:
3681 ret = cm_drep_handler(work);
3683 case IB_CM_SIDR_REQ_RECEIVED:
3684 ret = cm_sidr_req_handler(work);
3686 case IB_CM_SIDR_REP_RECEIVED:
3687 ret = cm_sidr_rep_handler(work);
3689 case IB_CM_LAP_RECEIVED:
3690 ret = cm_lap_handler(work);
3692 case IB_CM_APR_RECEIVED:
3693 ret = cm_apr_handler(work);
3695 case IB_CM_TIMEWAIT_EXIT:
3696 ret = cm_timewait_handler(work);
3706 static int cm_establish(struct ib_cm_id *cm_id)
3708 struct cm_id_private *cm_id_priv;
3709 struct cm_work *work;
3710 unsigned long flags;
3712 struct cm_device *cm_dev;
3714 cm_dev = ib_get_client_data(cm_id->device, &cm_client);
3718 work = kmalloc(sizeof *work, GFP_ATOMIC);
3722 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3723 spin_lock_irqsave(&cm_id_priv->lock, flags);
3724 switch (cm_id->state)
3726 case IB_CM_REP_SENT:
3727 case IB_CM_MRA_REP_RCVD:
3728 cm_id->state = IB_CM_ESTABLISHED;
3730 case IB_CM_ESTABLISHED:
3737 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3745 * The CM worker thread may try to destroy the cm_id before it
3746 * can execute this work item. To prevent potential deadlock,
3747 * we need to find the cm_id once we're in the context of the
3748 * worker thread, rather than holding a reference on it.
3750 INIT_DELAYED_WORK(&work->work, cm_work_handler);
3751 work->local_id = cm_id->local_id;
3752 work->remote_id = cm_id->remote_id;
3753 work->mad_recv_wc = NULL;
3754 work->cm_event.event = IB_CM_USER_ESTABLISHED;
3756 /* Check if the device started its remove_one */
3757 spin_lock_irqsave(&cm.lock, flags);
3758 if (!cm_dev->going_down) {
3759 queue_delayed_work(cm.wq, &work->work, 0);
3764 spin_unlock_irqrestore(&cm.lock, flags);
3770 static int cm_migrate(struct ib_cm_id *cm_id)
3772 struct cm_id_private *cm_id_priv;
3773 struct cm_av tmp_av;
3774 unsigned long flags;
3775 int tmp_send_port_not_ready;
3778 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3779 spin_lock_irqsave(&cm_id_priv->lock, flags);
3780 if (cm_id->state == IB_CM_ESTABLISHED &&
3781 (cm_id->lap_state == IB_CM_LAP_UNINIT ||
3782 cm_id->lap_state == IB_CM_LAP_IDLE)) {
3783 cm_id->lap_state = IB_CM_LAP_IDLE;
3784 /* Swap address vector */
3785 tmp_av = cm_id_priv->av;
3786 cm_id_priv->av = cm_id_priv->alt_av;
3787 cm_id_priv->alt_av = tmp_av;
3788 /* Swap port send ready state */
3789 tmp_send_port_not_ready = cm_id_priv->prim_send_port_not_ready;
3790 cm_id_priv->prim_send_port_not_ready = cm_id_priv->altr_send_port_not_ready;
3791 cm_id_priv->altr_send_port_not_ready = tmp_send_port_not_ready;
3794 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3799 int ib_cm_notify(struct ib_cm_id *cm_id, enum ib_event_type event)
3804 case IB_EVENT_COMM_EST:
3805 ret = cm_establish(cm_id);
3807 case IB_EVENT_PATH_MIG:
3808 ret = cm_migrate(cm_id);
3815 EXPORT_SYMBOL(ib_cm_notify);
3817 static void cm_recv_handler(struct ib_mad_agent *mad_agent,
3818 struct ib_mad_send_buf *send_buf,
3819 struct ib_mad_recv_wc *mad_recv_wc)
3821 struct cm_port *port = mad_agent->context;
3822 struct cm_work *work;
3823 enum ib_cm_event_type event;
3824 bool alt_path = false;
3829 switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
3830 case CM_REQ_ATTR_ID:
3831 alt_path = cm_req_has_alt_path((struct cm_req_msg *)
3832 mad_recv_wc->recv_buf.mad);
3833 paths = 1 + (alt_path != 0);
3834 event = IB_CM_REQ_RECEIVED;
3836 case CM_MRA_ATTR_ID:
3837 event = IB_CM_MRA_RECEIVED;
3839 case CM_REJ_ATTR_ID:
3840 event = IB_CM_REJ_RECEIVED;
3842 case CM_REP_ATTR_ID:
3843 event = IB_CM_REP_RECEIVED;
3845 case CM_RTU_ATTR_ID:
3846 event = IB_CM_RTU_RECEIVED;
3848 case CM_DREQ_ATTR_ID:
3849 event = IB_CM_DREQ_RECEIVED;
3851 case CM_DREP_ATTR_ID:
3852 event = IB_CM_DREP_RECEIVED;
3854 case CM_SIDR_REQ_ATTR_ID:
3855 event = IB_CM_SIDR_REQ_RECEIVED;
3857 case CM_SIDR_REP_ATTR_ID:
3858 event = IB_CM_SIDR_REP_RECEIVED;
3860 case CM_LAP_ATTR_ID:
3862 event = IB_CM_LAP_RECEIVED;
3864 case CM_APR_ATTR_ID:
3865 event = IB_CM_APR_RECEIVED;
3868 ib_free_recv_mad(mad_recv_wc);
3872 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
3873 atomic_long_inc(&port->counter_group[CM_RECV].
3874 counter[attr_id - CM_ATTR_ID_OFFSET]);
3876 work = kmalloc(sizeof(*work) + sizeof(struct sa_path_rec) * paths,
3879 ib_free_recv_mad(mad_recv_wc);
3883 INIT_DELAYED_WORK(&work->work, cm_work_handler);
3884 work->cm_event.event = event;
3885 work->mad_recv_wc = mad_recv_wc;
3888 /* Check if the device started its remove_one */
3889 spin_lock_irq(&cm.lock);
3890 if (!port->cm_dev->going_down)
3891 queue_delayed_work(cm.wq, &work->work, 0);
3894 spin_unlock_irq(&cm.lock);
3898 ib_free_recv_mad(mad_recv_wc);
3902 static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
3903 struct ib_qp_attr *qp_attr,
3906 unsigned long flags;
3909 spin_lock_irqsave(&cm_id_priv->lock, flags);
3910 switch (cm_id_priv->id.state) {
3911 case IB_CM_REQ_SENT:
3912 case IB_CM_MRA_REQ_RCVD:
3913 case IB_CM_REQ_RCVD:
3914 case IB_CM_MRA_REQ_SENT:
3915 case IB_CM_REP_RCVD:
3916 case IB_CM_MRA_REP_SENT:
3917 case IB_CM_REP_SENT:
3918 case IB_CM_MRA_REP_RCVD:
3919 case IB_CM_ESTABLISHED:
3920 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS |
3921 IB_QP_PKEY_INDEX | IB_QP_PORT;
3922 qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE;
3923 if (cm_id_priv->responder_resources)
3924 qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ |
3925 IB_ACCESS_REMOTE_ATOMIC;
3926 qp_attr->pkey_index = cm_id_priv->av.pkey_index;
3927 qp_attr->port_num = cm_id_priv->av.port->port_num;
3934 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3938 static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
3939 struct ib_qp_attr *qp_attr,
3942 unsigned long flags;
3945 spin_lock_irqsave(&cm_id_priv->lock, flags);
3946 switch (cm_id_priv->id.state) {
3947 case IB_CM_REQ_RCVD:
3948 case IB_CM_MRA_REQ_SENT:
3949 case IB_CM_REP_RCVD:
3950 case IB_CM_MRA_REP_SENT:
3951 case IB_CM_REP_SENT:
3952 case IB_CM_MRA_REP_RCVD:
3953 case IB_CM_ESTABLISHED:
3954 *qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU |
3955 IB_QP_DEST_QPN | IB_QP_RQ_PSN;
3956 qp_attr->ah_attr = cm_id_priv->av.ah_attr;
3957 qp_attr->path_mtu = cm_id_priv->path_mtu;
3958 qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn);
3959 qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn);
3960 if (cm_id_priv->qp_type == IB_QPT_RC ||
3961 cm_id_priv->qp_type == IB_QPT_XRC_TGT) {
3962 *qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC |
3963 IB_QP_MIN_RNR_TIMER;
3964 qp_attr->max_dest_rd_atomic =
3965 cm_id_priv->responder_resources;
3966 qp_attr->min_rnr_timer = 0;
3968 if (rdma_ah_get_dlid(&cm_id_priv->alt_av.ah_attr)) {
3969 *qp_attr_mask |= IB_QP_ALT_PATH;
3970 qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
3971 qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
3972 qp_attr->alt_timeout = cm_id_priv->alt_av.timeout;
3973 qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
3981 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3985 static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
3986 struct ib_qp_attr *qp_attr,
3989 unsigned long flags;
3992 spin_lock_irqsave(&cm_id_priv->lock, flags);
3993 switch (cm_id_priv->id.state) {
3994 /* Allow transition to RTS before sending REP */
3995 case IB_CM_REQ_RCVD:
3996 case IB_CM_MRA_REQ_SENT:
3998 case IB_CM_REP_RCVD:
3999 case IB_CM_MRA_REP_SENT:
4000 case IB_CM_REP_SENT:
4001 case IB_CM_MRA_REP_RCVD:
4002 case IB_CM_ESTABLISHED:
4003 if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT) {
4004 *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN;
4005 qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn);
4006 switch (cm_id_priv->qp_type) {
4008 case IB_QPT_XRC_INI:
4009 *qp_attr_mask |= IB_QP_RETRY_CNT | IB_QP_RNR_RETRY |
4010 IB_QP_MAX_QP_RD_ATOMIC;
4011 qp_attr->retry_cnt = cm_id_priv->retry_count;
4012 qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
4013 qp_attr->max_rd_atomic = cm_id_priv->initiator_depth;
4015 case IB_QPT_XRC_TGT:
4016 *qp_attr_mask |= IB_QP_TIMEOUT;
4017 qp_attr->timeout = cm_id_priv->av.timeout;
4022 if (rdma_ah_get_dlid(&cm_id_priv->alt_av.ah_attr)) {
4023 *qp_attr_mask |= IB_QP_PATH_MIG_STATE;
4024 qp_attr->path_mig_state = IB_MIG_REARM;
4027 *qp_attr_mask = IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE;
4028 qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
4029 qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
4030 qp_attr->alt_timeout = cm_id_priv->alt_av.timeout;
4031 qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
4032 qp_attr->path_mig_state = IB_MIG_REARM;
4040 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
4044 int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
4045 struct ib_qp_attr *qp_attr,
4048 struct cm_id_private *cm_id_priv;
4051 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
4052 switch (qp_attr->qp_state) {
4054 ret = cm_init_qp_init_attr(cm_id_priv, qp_attr, qp_attr_mask);
4057 ret = cm_init_qp_rtr_attr(cm_id_priv, qp_attr, qp_attr_mask);
4060 ret = cm_init_qp_rts_attr(cm_id_priv, qp_attr, qp_attr_mask);
4068 EXPORT_SYMBOL(ib_cm_init_qp_attr);
4070 static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
4073 struct cm_counter_group *group;
4074 struct cm_counter_attribute *cm_attr;
4076 group = container_of(obj, struct cm_counter_group, obj);
4077 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
4079 return sprintf(buf, "%ld\n",
4080 atomic_long_read(&group->counter[cm_attr->index]));
4083 static const struct sysfs_ops cm_counter_ops = {
4084 .show = cm_show_counter
4087 static struct kobj_type cm_counter_obj_type = {
4088 .sysfs_ops = &cm_counter_ops,
4089 .default_attrs = cm_counter_default_attrs
4092 static void cm_release_port_obj(struct kobject *obj)
4094 struct cm_port *cm_port;
4096 cm_port = container_of(obj, struct cm_port, port_obj);
4100 static struct kobj_type cm_port_obj_type = {
4101 .release = cm_release_port_obj
4104 static char *cm_devnode(struct device *dev, umode_t *mode)
4108 return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
4111 struct class cm_class = {
4112 .owner = THIS_MODULE,
4113 .name = "infiniband_cm",
4114 .devnode = cm_devnode,
4116 EXPORT_SYMBOL(cm_class);
4118 static int cm_create_port_fs(struct cm_port *port)
4122 ret = kobject_init_and_add(&port->port_obj, &cm_port_obj_type,
4123 &port->cm_dev->device->kobj,
4124 "%d", port->port_num);
4130 for (i = 0; i < CM_COUNTER_GROUPS; i++) {
4131 ret = kobject_init_and_add(&port->counter_group[i].obj,
4132 &cm_counter_obj_type,
4134 "%s", counter_group_names[i]);
4143 kobject_put(&port->counter_group[i].obj);
4144 kobject_put(&port->port_obj);
4149 static void cm_remove_port_fs(struct cm_port *port)
4153 for (i = 0; i < CM_COUNTER_GROUPS; i++)
4154 kobject_put(&port->counter_group[i].obj);
4156 kobject_put(&port->port_obj);
4159 static void cm_add_one(struct ib_device *ib_device)
4161 struct cm_device *cm_dev;
4162 struct cm_port *port;
4163 struct ib_mad_reg_req reg_req = {
4164 .mgmt_class = IB_MGMT_CLASS_CM,
4165 .mgmt_class_version = IB_CM_CLASS_VERSION,
4167 struct ib_port_modify port_modify = {
4168 .set_port_cap_mask = IB_PORT_CM_SUP
4170 unsigned long flags;
4175 cm_dev = kzalloc(sizeof(*cm_dev) + sizeof(*port) *
4176 ib_device->phys_port_cnt, GFP_KERNEL);
4180 cm_dev->ib_device = ib_device;
4181 cm_dev->ack_delay = ib_device->attrs.local_ca_ack_delay;
4182 cm_dev->going_down = 0;
4183 cm_dev->device = device_create(&cm_class, &ib_device->dev,
4185 "%s", ib_device->name);
4186 if (IS_ERR(cm_dev->device)) {
4191 set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
4192 for (i = 1; i <= ib_device->phys_port_cnt; i++) {
4193 if (!rdma_cap_ib_cm(ib_device, i))
4196 port = kzalloc(sizeof *port, GFP_KERNEL);
4200 cm_dev->port[i-1] = port;
4201 port->cm_dev = cm_dev;
4204 INIT_LIST_HEAD(&port->cm_priv_prim_list);
4205 INIT_LIST_HEAD(&port->cm_priv_altr_list);
4207 ret = cm_create_port_fs(port);
4211 port->mad_agent = ib_register_mad_agent(ib_device, i,
4219 if (IS_ERR(port->mad_agent))
4222 ret = ib_modify_port(ib_device, i, 0, &port_modify);
4232 ib_set_client_data(ib_device, &cm_client, cm_dev);
4234 write_lock_irqsave(&cm.device_lock, flags);
4235 list_add_tail(&cm_dev->list, &cm.device_list);
4236 write_unlock_irqrestore(&cm.device_lock, flags);
4240 ib_unregister_mad_agent(port->mad_agent);
4242 cm_remove_port_fs(port);
4244 port_modify.set_port_cap_mask = 0;
4245 port_modify.clr_port_cap_mask = IB_PORT_CM_SUP;
4247 if (!rdma_cap_ib_cm(ib_device, i))
4250 port = cm_dev->port[i-1];
4251 ib_modify_port(ib_device, port->port_num, 0, &port_modify);
4252 ib_unregister_mad_agent(port->mad_agent);
4253 cm_remove_port_fs(port);
4256 device_unregister(cm_dev->device);
4260 static void cm_remove_one(struct ib_device *ib_device, void *client_data)
4262 struct cm_device *cm_dev = client_data;
4263 struct cm_port *port;
4264 struct cm_id_private *cm_id_priv;
4265 struct ib_mad_agent *cur_mad_agent;
4266 struct ib_port_modify port_modify = {
4267 .clr_port_cap_mask = IB_PORT_CM_SUP
4269 unsigned long flags;
4275 write_lock_irqsave(&cm.device_lock, flags);
4276 list_del(&cm_dev->list);
4277 write_unlock_irqrestore(&cm.device_lock, flags);
4279 spin_lock_irq(&cm.lock);
4280 cm_dev->going_down = 1;
4281 spin_unlock_irq(&cm.lock);
4283 for (i = 1; i <= ib_device->phys_port_cnt; i++) {
4284 if (!rdma_cap_ib_cm(ib_device, i))
4287 port = cm_dev->port[i-1];
4288 ib_modify_port(ib_device, port->port_num, 0, &port_modify);
4289 /* Mark all the cm_id's as not valid */
4290 spin_lock_irq(&cm.lock);
4291 list_for_each_entry(cm_id_priv, &port->cm_priv_altr_list, altr_list)
4292 cm_id_priv->altr_send_port_not_ready = 1;
4293 list_for_each_entry(cm_id_priv, &port->cm_priv_prim_list, prim_list)
4294 cm_id_priv->prim_send_port_not_ready = 1;
4295 spin_unlock_irq(&cm.lock);
4297 * We flush the queue here after the going_down set, this
4298 * verify that no new works will be queued in the recv handler,
4299 * after that we can call the unregister_mad_agent
4301 flush_workqueue(cm.wq);
4302 spin_lock_irq(&cm.state_lock);
4303 cur_mad_agent = port->mad_agent;
4304 port->mad_agent = NULL;
4305 spin_unlock_irq(&cm.state_lock);
4306 ib_unregister_mad_agent(cur_mad_agent);
4307 cm_remove_port_fs(port);
4310 device_unregister(cm_dev->device);
4314 static int __init ib_cm_init(void)
4318 memset(&cm, 0, sizeof cm);
4319 INIT_LIST_HEAD(&cm.device_list);
4320 rwlock_init(&cm.device_lock);
4321 spin_lock_init(&cm.lock);
4322 spin_lock_init(&cm.state_lock);
4323 cm.listen_service_table = RB_ROOT;
4324 cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
4325 cm.remote_id_table = RB_ROOT;
4326 cm.remote_qp_table = RB_ROOT;
4327 cm.remote_sidr_table = RB_ROOT;
4328 idr_init(&cm.local_id_table);
4329 get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand);
4330 INIT_LIST_HEAD(&cm.timewait_list);
4332 ret = class_register(&cm_class);
4338 cm.wq = alloc_workqueue("ib_cm", 0, 1);
4344 ret = ib_register_client(&cm_client);
4350 destroy_workqueue(cm.wq);
4352 class_unregister(&cm_class);
4354 idr_destroy(&cm.local_id_table);
4358 static void __exit ib_cm_cleanup(void)
4360 struct cm_timewait_info *timewait_info, *tmp;
4362 spin_lock_irq(&cm.lock);
4363 list_for_each_entry(timewait_info, &cm.timewait_list, list)
4364 cancel_delayed_work(&timewait_info->work.work);
4365 spin_unlock_irq(&cm.lock);
4367 ib_unregister_client(&cm_client);
4368 destroy_workqueue(cm.wq);
4370 list_for_each_entry_safe(timewait_info, tmp, &cm.timewait_list, list) {
4371 list_del(&timewait_info->list);
4372 kfree(timewait_info);
4375 class_unregister(&cm_class);
4376 idr_destroy(&cm.local_id_table);
4379 module_init(ib_cm_init);
4380 module_exit(ib_cm_cleanup);