2 * Copyright (c) 2004-2007 Intel Corporation. All rights reserved.
3 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
4 * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
5 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/completion.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/device.h>
39 #include <linux/module.h>
40 #include <linux/err.h>
41 #include <linux/idr.h>
42 #include <linux/interrupt.h>
43 #include <linux/random.h>
44 #include <linux/rbtree.h>
45 #include <linux/spinlock.h>
46 #include <linux/slab.h>
47 #include <linux/sysfs.h>
48 #include <linux/workqueue.h>
49 #include <linux/kdev_t.h>
50 #include <linux/etherdevice.h>
52 #include <rdma/ib_cache.h>
53 #include <rdma/ib_cm.h>
56 MODULE_AUTHOR("Sean Hefty");
57 MODULE_DESCRIPTION("InfiniBand CM");
58 MODULE_LICENSE("Dual BSD/GPL");
60 static void cm_add_one(struct ib_device *device);
61 static void cm_remove_one(struct ib_device *device, void *client_data);
63 static struct ib_client cm_client = {
66 .remove = cm_remove_one
71 struct list_head device_list;
73 struct rb_root listen_service_table;
74 u64 listen_service_id;
75 /* struct rb_root peer_service_table; todo: fix peer to peer */
76 struct rb_root remote_qp_table;
77 struct rb_root remote_id_table;
78 struct rb_root remote_sidr_table;
79 struct idr local_id_table;
80 __be32 random_id_operand;
81 struct list_head timewait_list;
82 struct workqueue_struct *wq;
83 /* Sync on cm change port state */
84 spinlock_t state_lock;
87 /* Counter indexes ordered by attribute ID */
101 CM_ATTR_ID_OFFSET = 0x0010,
112 static char const counter_group_names[CM_COUNTER_GROUPS]
113 [sizeof("cm_rx_duplicates")] = {
114 "cm_tx_msgs", "cm_tx_retries",
115 "cm_rx_msgs", "cm_rx_duplicates"
118 struct cm_counter_group {
120 atomic_long_t counter[CM_ATTR_COUNT];
123 struct cm_counter_attribute {
124 struct attribute attr;
128 #define CM_COUNTER_ATTR(_name, _index) \
129 struct cm_counter_attribute cm_##_name##_counter_attr = { \
130 .attr = { .name = __stringify(_name), .mode = 0444 }, \
134 static CM_COUNTER_ATTR(req, CM_REQ_COUNTER);
135 static CM_COUNTER_ATTR(mra, CM_MRA_COUNTER);
136 static CM_COUNTER_ATTR(rej, CM_REJ_COUNTER);
137 static CM_COUNTER_ATTR(rep, CM_REP_COUNTER);
138 static CM_COUNTER_ATTR(rtu, CM_RTU_COUNTER);
139 static CM_COUNTER_ATTR(dreq, CM_DREQ_COUNTER);
140 static CM_COUNTER_ATTR(drep, CM_DREP_COUNTER);
141 static CM_COUNTER_ATTR(sidr_req, CM_SIDR_REQ_COUNTER);
142 static CM_COUNTER_ATTR(sidr_rep, CM_SIDR_REP_COUNTER);
143 static CM_COUNTER_ATTR(lap, CM_LAP_COUNTER);
144 static CM_COUNTER_ATTR(apr, CM_APR_COUNTER);
146 static struct attribute *cm_counter_default_attrs[] = {
147 &cm_req_counter_attr.attr,
148 &cm_mra_counter_attr.attr,
149 &cm_rej_counter_attr.attr,
150 &cm_rep_counter_attr.attr,
151 &cm_rtu_counter_attr.attr,
152 &cm_dreq_counter_attr.attr,
153 &cm_drep_counter_attr.attr,
154 &cm_sidr_req_counter_attr.attr,
155 &cm_sidr_rep_counter_attr.attr,
156 &cm_lap_counter_attr.attr,
157 &cm_apr_counter_attr.attr,
162 struct cm_device *cm_dev;
163 struct ib_mad_agent *mad_agent;
164 struct kobject port_obj;
166 struct list_head cm_priv_prim_list;
167 struct list_head cm_priv_altr_list;
168 struct cm_counter_group counter_group[CM_COUNTER_GROUPS];
172 struct list_head list;
173 struct ib_device *ib_device;
174 struct device *device;
177 struct cm_port *port[0];
181 struct cm_port *port;
183 struct ib_ah_attr ah_attr;
189 struct delayed_work work;
190 struct list_head list;
191 struct cm_port *port;
192 struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */
193 __be32 local_id; /* Established / timewait */
195 struct ib_cm_event cm_event;
196 struct ib_sa_path_rec path[0];
199 struct cm_timewait_info {
200 struct cm_work work; /* Must be first. */
201 struct list_head list;
202 struct rb_node remote_qp_node;
203 struct rb_node remote_id_node;
204 __be64 remote_ca_guid;
206 u8 inserted_remote_qp;
207 u8 inserted_remote_id;
210 struct cm_id_private {
213 struct rb_node service_node;
214 struct rb_node sidr_id_node;
215 spinlock_t lock; /* Do not acquire inside cm.lock */
216 struct completion comp;
218 /* Number of clients sharing this ib_cm_id. Only valid for listeners.
219 * Protected by the cm.lock spinlock. */
220 int listen_sharecount;
222 struct ib_mad_send_buf *msg;
223 struct cm_timewait_info *timewait_info;
224 /* todo: use alternate port on send failure */
232 enum ib_qp_type qp_type;
236 enum ib_mtu path_mtu;
241 u8 responder_resources;
248 struct list_head prim_list;
249 struct list_head altr_list;
250 /* Indicates that the send port mad is registered and av is set */
251 int prim_send_port_not_ready;
252 int altr_send_port_not_ready;
254 struct list_head work_list;
258 static void cm_work_handler(struct work_struct *work);
260 static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
262 if (atomic_dec_and_test(&cm_id_priv->refcount))
263 complete(&cm_id_priv->comp);
266 static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
267 struct ib_mad_send_buf **msg)
269 struct ib_mad_agent *mad_agent;
270 struct ib_mad_send_buf *m;
273 unsigned long flags, flags2;
276 /* don't let the port to be released till the agent is down */
277 spin_lock_irqsave(&cm.state_lock, flags2);
278 spin_lock_irqsave(&cm.lock, flags);
279 if (!cm_id_priv->prim_send_port_not_ready)
280 av = &cm_id_priv->av;
281 else if (!cm_id_priv->altr_send_port_not_ready &&
282 (cm_id_priv->alt_av.port))
283 av = &cm_id_priv->alt_av;
285 pr_info("%s: not valid CM id\n", __func__);
287 spin_unlock_irqrestore(&cm.lock, flags);
290 spin_unlock_irqrestore(&cm.lock, flags);
291 /* Make sure the port haven't released the mad yet */
292 mad_agent = cm_id_priv->av.port->mad_agent;
294 pr_info("%s: not a valid MAD agent\n", __func__);
298 ah = ib_create_ah(mad_agent->qp->pd, &av->ah_attr);
304 m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn,
306 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
308 IB_MGMT_BASE_VERSION);
315 /* Timeout set by caller if response is expected. */
317 m->retries = cm_id_priv->max_cm_retries;
319 atomic_inc(&cm_id_priv->refcount);
320 m->context[0] = cm_id_priv;
324 spin_unlock_irqrestore(&cm.state_lock, flags2);
328 static int cm_alloc_response_msg(struct cm_port *port,
329 struct ib_mad_recv_wc *mad_recv_wc,
330 struct ib_mad_send_buf **msg)
332 struct ib_mad_send_buf *m;
335 ah = ib_create_ah_from_wc(port->mad_agent->qp->pd, mad_recv_wc->wc,
336 mad_recv_wc->recv_buf.grh, port->port_num);
340 m = ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index,
341 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
343 IB_MGMT_BASE_VERSION);
353 static void cm_free_msg(struct ib_mad_send_buf *msg)
355 ib_destroy_ah(msg->ah);
357 cm_deref_id(msg->context[0]);
358 ib_free_send_mad(msg);
361 static void * cm_copy_private_data(const void *private_data,
366 if (!private_data || !private_data_len)
369 data = kmemdup(private_data, private_data_len, GFP_KERNEL);
371 return ERR_PTR(-ENOMEM);
376 static void cm_set_private_data(struct cm_id_private *cm_id_priv,
377 void *private_data, u8 private_data_len)
379 if (cm_id_priv->private_data && cm_id_priv->private_data_len)
380 kfree(cm_id_priv->private_data);
382 cm_id_priv->private_data = private_data;
383 cm_id_priv->private_data_len = private_data_len;
386 static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
387 struct ib_grh *grh, struct cm_av *av)
390 av->pkey_index = wc->pkey_index;
391 ib_init_ah_from_wc(port->cm_dev->ib_device, port->port_num, wc,
395 static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av,
396 struct cm_id_private *cm_id_priv)
398 struct cm_device *cm_dev;
399 struct cm_port *port = NULL;
403 struct net_device *ndev = ib_get_ndev_from_path(path);
405 read_lock_irqsave(&cm.device_lock, flags);
406 list_for_each_entry(cm_dev, &cm.device_list, list) {
407 if (!ib_find_cached_gid(cm_dev->ib_device, &path->sgid,
408 path->gid_type, ndev, &p, NULL)) {
409 port = cm_dev->port[p-1];
413 read_unlock_irqrestore(&cm.device_lock, flags);
421 ret = ib_find_cached_pkey(cm_dev->ib_device, port->port_num,
422 be16_to_cpu(path->pkey), &av->pkey_index);
427 ib_init_ah_from_path(cm_dev->ib_device, port->port_num, path,
429 av->timeout = path->packet_life_time + 1;
431 spin_lock_irqsave(&cm.lock, flags);
432 if (&cm_id_priv->av == av)
433 list_add_tail(&cm_id_priv->prim_list, &port->cm_priv_prim_list);
434 else if (&cm_id_priv->alt_av == av)
435 list_add_tail(&cm_id_priv->altr_list, &port->cm_priv_altr_list);
439 spin_unlock_irqrestore(&cm.lock, flags);
444 static int cm_alloc_id(struct cm_id_private *cm_id_priv)
449 idr_preload(GFP_KERNEL);
450 spin_lock_irqsave(&cm.lock, flags);
452 id = idr_alloc_cyclic(&cm.local_id_table, cm_id_priv, 0, 0, GFP_NOWAIT);
454 spin_unlock_irqrestore(&cm.lock, flags);
457 cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand;
458 return id < 0 ? id : 0;
461 static void cm_free_id(__be32 local_id)
463 spin_lock_irq(&cm.lock);
464 idr_remove(&cm.local_id_table,
465 (__force int) (local_id ^ cm.random_id_operand));
466 spin_unlock_irq(&cm.lock);
469 static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id)
471 struct cm_id_private *cm_id_priv;
473 cm_id_priv = idr_find(&cm.local_id_table,
474 (__force int) (local_id ^ cm.random_id_operand));
476 if (cm_id_priv->id.remote_id == remote_id)
477 atomic_inc(&cm_id_priv->refcount);
485 static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id)
487 struct cm_id_private *cm_id_priv;
489 spin_lock_irq(&cm.lock);
490 cm_id_priv = cm_get_id(local_id, remote_id);
491 spin_unlock_irq(&cm.lock);
497 * Trivial helpers to strip endian annotation and compare; the
498 * endianness doesn't actually matter since we just need a stable
499 * order for the RB tree.
501 static int be32_lt(__be32 a, __be32 b)
503 return (__force u32) a < (__force u32) b;
506 static int be32_gt(__be32 a, __be32 b)
508 return (__force u32) a > (__force u32) b;
511 static int be64_lt(__be64 a, __be64 b)
513 return (__force u64) a < (__force u64) b;
516 static int be64_gt(__be64 a, __be64 b)
518 return (__force u64) a > (__force u64) b;
521 static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv)
523 struct rb_node **link = &cm.listen_service_table.rb_node;
524 struct rb_node *parent = NULL;
525 struct cm_id_private *cur_cm_id_priv;
526 __be64 service_id = cm_id_priv->id.service_id;
527 __be64 service_mask = cm_id_priv->id.service_mask;
531 cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
533 if ((cur_cm_id_priv->id.service_mask & service_id) ==
534 (service_mask & cur_cm_id_priv->id.service_id) &&
535 (cm_id_priv->id.device == cur_cm_id_priv->id.device))
536 return cur_cm_id_priv;
538 if (cm_id_priv->id.device < cur_cm_id_priv->id.device)
539 link = &(*link)->rb_left;
540 else if (cm_id_priv->id.device > cur_cm_id_priv->id.device)
541 link = &(*link)->rb_right;
542 else if (be64_lt(service_id, cur_cm_id_priv->id.service_id))
543 link = &(*link)->rb_left;
544 else if (be64_gt(service_id, cur_cm_id_priv->id.service_id))
545 link = &(*link)->rb_right;
547 link = &(*link)->rb_right;
549 rb_link_node(&cm_id_priv->service_node, parent, link);
550 rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table);
554 static struct cm_id_private * cm_find_listen(struct ib_device *device,
557 struct rb_node *node = cm.listen_service_table.rb_node;
558 struct cm_id_private *cm_id_priv;
561 cm_id_priv = rb_entry(node, struct cm_id_private, service_node);
562 if ((cm_id_priv->id.service_mask & service_id) ==
563 cm_id_priv->id.service_id &&
564 (cm_id_priv->id.device == device))
567 if (device < cm_id_priv->id.device)
568 node = node->rb_left;
569 else if (device > cm_id_priv->id.device)
570 node = node->rb_right;
571 else if (be64_lt(service_id, cm_id_priv->id.service_id))
572 node = node->rb_left;
573 else if (be64_gt(service_id, cm_id_priv->id.service_id))
574 node = node->rb_right;
576 node = node->rb_right;
581 static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info
584 struct rb_node **link = &cm.remote_id_table.rb_node;
585 struct rb_node *parent = NULL;
586 struct cm_timewait_info *cur_timewait_info;
587 __be64 remote_ca_guid = timewait_info->remote_ca_guid;
588 __be32 remote_id = timewait_info->work.remote_id;
592 cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
594 if (be32_lt(remote_id, cur_timewait_info->work.remote_id))
595 link = &(*link)->rb_left;
596 else if (be32_gt(remote_id, cur_timewait_info->work.remote_id))
597 link = &(*link)->rb_right;
598 else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
599 link = &(*link)->rb_left;
600 else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
601 link = &(*link)->rb_right;
603 return cur_timewait_info;
605 timewait_info->inserted_remote_id = 1;
606 rb_link_node(&timewait_info->remote_id_node, parent, link);
607 rb_insert_color(&timewait_info->remote_id_node, &cm.remote_id_table);
611 static struct cm_timewait_info * cm_find_remote_id(__be64 remote_ca_guid,
614 struct rb_node *node = cm.remote_id_table.rb_node;
615 struct cm_timewait_info *timewait_info;
618 timewait_info = rb_entry(node, struct cm_timewait_info,
620 if (be32_lt(remote_id, timewait_info->work.remote_id))
621 node = node->rb_left;
622 else if (be32_gt(remote_id, timewait_info->work.remote_id))
623 node = node->rb_right;
624 else if (be64_lt(remote_ca_guid, timewait_info->remote_ca_guid))
625 node = node->rb_left;
626 else if (be64_gt(remote_ca_guid, timewait_info->remote_ca_guid))
627 node = node->rb_right;
629 return timewait_info;
634 static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info
637 struct rb_node **link = &cm.remote_qp_table.rb_node;
638 struct rb_node *parent = NULL;
639 struct cm_timewait_info *cur_timewait_info;
640 __be64 remote_ca_guid = timewait_info->remote_ca_guid;
641 __be32 remote_qpn = timewait_info->remote_qpn;
645 cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
647 if (be32_lt(remote_qpn, cur_timewait_info->remote_qpn))
648 link = &(*link)->rb_left;
649 else if (be32_gt(remote_qpn, cur_timewait_info->remote_qpn))
650 link = &(*link)->rb_right;
651 else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
652 link = &(*link)->rb_left;
653 else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
654 link = &(*link)->rb_right;
656 return cur_timewait_info;
658 timewait_info->inserted_remote_qp = 1;
659 rb_link_node(&timewait_info->remote_qp_node, parent, link);
660 rb_insert_color(&timewait_info->remote_qp_node, &cm.remote_qp_table);
664 static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private
667 struct rb_node **link = &cm.remote_sidr_table.rb_node;
668 struct rb_node *parent = NULL;
669 struct cm_id_private *cur_cm_id_priv;
670 union ib_gid *port_gid = &cm_id_priv->av.dgid;
671 __be32 remote_id = cm_id_priv->id.remote_id;
675 cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
677 if (be32_lt(remote_id, cur_cm_id_priv->id.remote_id))
678 link = &(*link)->rb_left;
679 else if (be32_gt(remote_id, cur_cm_id_priv->id.remote_id))
680 link = &(*link)->rb_right;
683 cmp = memcmp(port_gid, &cur_cm_id_priv->av.dgid,
686 link = &(*link)->rb_left;
688 link = &(*link)->rb_right;
690 return cur_cm_id_priv;
693 rb_link_node(&cm_id_priv->sidr_id_node, parent, link);
694 rb_insert_color(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
698 static void cm_reject_sidr_req(struct cm_id_private *cm_id_priv,
699 enum ib_cm_sidr_status status)
701 struct ib_cm_sidr_rep_param param;
703 memset(¶m, 0, sizeof param);
704 param.status = status;
705 ib_send_cm_sidr_rep(&cm_id_priv->id, ¶m);
708 struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
709 ib_cm_handler cm_handler,
712 struct cm_id_private *cm_id_priv;
715 cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL);
717 return ERR_PTR(-ENOMEM);
719 cm_id_priv->id.state = IB_CM_IDLE;
720 cm_id_priv->id.device = device;
721 cm_id_priv->id.cm_handler = cm_handler;
722 cm_id_priv->id.context = context;
723 cm_id_priv->id.remote_cm_qpn = 1;
724 ret = cm_alloc_id(cm_id_priv);
728 spin_lock_init(&cm_id_priv->lock);
729 init_completion(&cm_id_priv->comp);
730 INIT_LIST_HEAD(&cm_id_priv->work_list);
731 INIT_LIST_HEAD(&cm_id_priv->prim_list);
732 INIT_LIST_HEAD(&cm_id_priv->altr_list);
733 atomic_set(&cm_id_priv->work_count, -1);
734 atomic_set(&cm_id_priv->refcount, 1);
735 return &cm_id_priv->id;
739 return ERR_PTR(-ENOMEM);
741 EXPORT_SYMBOL(ib_create_cm_id);
743 static struct cm_work * cm_dequeue_work(struct cm_id_private *cm_id_priv)
745 struct cm_work *work;
747 if (list_empty(&cm_id_priv->work_list))
750 work = list_entry(cm_id_priv->work_list.next, struct cm_work, list);
751 list_del(&work->list);
755 static void cm_free_work(struct cm_work *work)
757 if (work->mad_recv_wc)
758 ib_free_recv_mad(work->mad_recv_wc);
762 static inline int cm_convert_to_ms(int iba_time)
764 /* approximate conversion to ms from 4.096us x 2^iba_time */
765 return 1 << max(iba_time - 8, 0);
769 * calculate: 4.096x2^ack_timeout = 4.096x2^ack_delay + 2x4.096x2^life_time
770 * Because of how ack_timeout is stored, adding one doubles the timeout.
771 * To avoid large timeouts, select the max(ack_delay, life_time + 1), and
772 * increment it (round up) only if the other is within 50%.
774 static u8 cm_ack_timeout(u8 ca_ack_delay, u8 packet_life_time)
776 int ack_timeout = packet_life_time + 1;
778 if (ack_timeout >= ca_ack_delay)
779 ack_timeout += (ca_ack_delay >= (ack_timeout - 1));
781 ack_timeout = ca_ack_delay +
782 (ack_timeout >= (ca_ack_delay - 1));
784 return min(31, ack_timeout);
787 static void cm_cleanup_timewait(struct cm_timewait_info *timewait_info)
789 if (timewait_info->inserted_remote_id) {
790 rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table);
791 timewait_info->inserted_remote_id = 0;
794 if (timewait_info->inserted_remote_qp) {
795 rb_erase(&timewait_info->remote_qp_node, &cm.remote_qp_table);
796 timewait_info->inserted_remote_qp = 0;
800 static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id)
802 struct cm_timewait_info *timewait_info;
804 timewait_info = kzalloc(sizeof *timewait_info, GFP_KERNEL);
806 return ERR_PTR(-ENOMEM);
808 timewait_info->work.local_id = local_id;
809 INIT_DELAYED_WORK(&timewait_info->work.work, cm_work_handler);
810 timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT;
811 return timewait_info;
814 static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
818 struct cm_device *cm_dev;
820 cm_dev = ib_get_client_data(cm_id_priv->id.device, &cm_client);
824 spin_lock_irqsave(&cm.lock, flags);
825 cm_cleanup_timewait(cm_id_priv->timewait_info);
826 list_add_tail(&cm_id_priv->timewait_info->list, &cm.timewait_list);
827 spin_unlock_irqrestore(&cm.lock, flags);
830 * The cm_id could be destroyed by the user before we exit timewait.
831 * To protect against this, we search for the cm_id after exiting
832 * timewait before notifying the user that we've exited timewait.
834 cm_id_priv->id.state = IB_CM_TIMEWAIT;
835 wait_time = cm_convert_to_ms(cm_id_priv->av.timeout);
837 /* Check if the device started its remove_one */
838 spin_lock_irqsave(&cm.lock, flags);
839 if (!cm_dev->going_down)
840 queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
841 msecs_to_jiffies(wait_time));
842 spin_unlock_irqrestore(&cm.lock, flags);
844 cm_id_priv->timewait_info = NULL;
847 static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
851 cm_id_priv->id.state = IB_CM_IDLE;
852 if (cm_id_priv->timewait_info) {
853 spin_lock_irqsave(&cm.lock, flags);
854 cm_cleanup_timewait(cm_id_priv->timewait_info);
855 spin_unlock_irqrestore(&cm.lock, flags);
856 kfree(cm_id_priv->timewait_info);
857 cm_id_priv->timewait_info = NULL;
861 static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
863 struct cm_id_private *cm_id_priv;
864 struct cm_work *work;
866 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
868 spin_lock_irq(&cm_id_priv->lock);
869 switch (cm_id->state) {
871 spin_unlock_irq(&cm_id_priv->lock);
873 spin_lock_irq(&cm.lock);
874 if (--cm_id_priv->listen_sharecount > 0) {
875 /* The id is still shared. */
876 cm_deref_id(cm_id_priv);
877 spin_unlock_irq(&cm.lock);
880 rb_erase(&cm_id_priv->service_node, &cm.listen_service_table);
881 spin_unlock_irq(&cm.lock);
883 case IB_CM_SIDR_REQ_SENT:
884 cm_id->state = IB_CM_IDLE;
885 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
886 spin_unlock_irq(&cm_id_priv->lock);
888 case IB_CM_SIDR_REQ_RCVD:
889 spin_unlock_irq(&cm_id_priv->lock);
890 cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT);
891 spin_lock_irq(&cm.lock);
892 if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node))
893 rb_erase(&cm_id_priv->sidr_id_node,
894 &cm.remote_sidr_table);
895 spin_unlock_irq(&cm.lock);
898 case IB_CM_MRA_REQ_RCVD:
899 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
900 spin_unlock_irq(&cm_id_priv->lock);
901 ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT,
902 &cm_id_priv->id.device->node_guid,
903 sizeof cm_id_priv->id.device->node_guid,
907 if (err == -ENOMEM) {
908 /* Do not reject to allow future retries. */
909 cm_reset_to_idle(cm_id_priv);
910 spin_unlock_irq(&cm_id_priv->lock);
912 spin_unlock_irq(&cm_id_priv->lock);
913 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
918 case IB_CM_MRA_REP_RCVD:
919 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
921 case IB_CM_MRA_REQ_SENT:
923 case IB_CM_MRA_REP_SENT:
924 spin_unlock_irq(&cm_id_priv->lock);
925 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
928 case IB_CM_ESTABLISHED:
929 spin_unlock_irq(&cm_id_priv->lock);
930 if (cm_id_priv->qp_type == IB_QPT_XRC_TGT)
932 ib_send_cm_dreq(cm_id, NULL, 0);
934 case IB_CM_DREQ_SENT:
935 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
936 cm_enter_timewait(cm_id_priv);
937 spin_unlock_irq(&cm_id_priv->lock);
939 case IB_CM_DREQ_RCVD:
940 spin_unlock_irq(&cm_id_priv->lock);
941 ib_send_cm_drep(cm_id, NULL, 0);
944 spin_unlock_irq(&cm_id_priv->lock);
948 spin_lock_irq(&cm.lock);
949 if (!list_empty(&cm_id_priv->altr_list) &&
950 (!cm_id_priv->altr_send_port_not_ready))
951 list_del(&cm_id_priv->altr_list);
952 if (!list_empty(&cm_id_priv->prim_list) &&
953 (!cm_id_priv->prim_send_port_not_ready))
954 list_del(&cm_id_priv->prim_list);
955 spin_unlock_irq(&cm.lock);
957 cm_free_id(cm_id->local_id);
958 cm_deref_id(cm_id_priv);
959 wait_for_completion(&cm_id_priv->comp);
960 while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
962 kfree(cm_id_priv->private_data);
966 void ib_destroy_cm_id(struct ib_cm_id *cm_id)
968 cm_destroy_id(cm_id, 0);
970 EXPORT_SYMBOL(ib_destroy_cm_id);
973 * __ib_cm_listen - Initiates listening on the specified service ID for
974 * connection and service ID resolution requests.
975 * @cm_id: Connection identifier associated with the listen request.
976 * @service_id: Service identifier matched against incoming connection
977 * and service ID resolution requests. The service ID should be specified
978 * network-byte order. If set to IB_CM_ASSIGN_SERVICE_ID, the CM will
979 * assign a service ID to the caller.
980 * @service_mask: Mask applied to service ID used to listen across a
981 * range of service IDs. If set to 0, the service ID is matched
982 * exactly. This parameter is ignored if %service_id is set to
983 * IB_CM_ASSIGN_SERVICE_ID.
985 static int __ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id,
988 struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
991 service_mask = service_mask ? service_mask : ~cpu_to_be64(0);
992 service_id &= service_mask;
993 if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID &&
994 (service_id != IB_CM_ASSIGN_SERVICE_ID))
997 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
998 if (cm_id->state != IB_CM_IDLE)
1001 cm_id->state = IB_CM_LISTEN;
1002 ++cm_id_priv->listen_sharecount;
1004 if (service_id == IB_CM_ASSIGN_SERVICE_ID) {
1005 cm_id->service_id = cpu_to_be64(cm.listen_service_id++);
1006 cm_id->service_mask = ~cpu_to_be64(0);
1008 cm_id->service_id = service_id;
1009 cm_id->service_mask = service_mask;
1011 cur_cm_id_priv = cm_insert_listen(cm_id_priv);
1013 if (cur_cm_id_priv) {
1014 cm_id->state = IB_CM_IDLE;
1015 --cm_id_priv->listen_sharecount;
1021 int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask)
1023 unsigned long flags;
1026 spin_lock_irqsave(&cm.lock, flags);
1027 ret = __ib_cm_listen(cm_id, service_id, service_mask);
1028 spin_unlock_irqrestore(&cm.lock, flags);
1032 EXPORT_SYMBOL(ib_cm_listen);
1035 * Create a new listening ib_cm_id and listen on the given service ID.
1037 * If there's an existing ID listening on that same device and service ID,
1040 * @device: Device associated with the cm_id. All related communication will
1041 * be associated with the specified device.
1042 * @cm_handler: Callback invoked to notify the user of CM events.
1043 * @service_id: Service identifier matched against incoming connection
1044 * and service ID resolution requests. The service ID should be specified
1045 * network-byte order. If set to IB_CM_ASSIGN_SERVICE_ID, the CM will
1046 * assign a service ID to the caller.
1048 * Callers should call ib_destroy_cm_id when done with the listener ID.
1050 struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device,
1051 ib_cm_handler cm_handler,
1054 struct cm_id_private *cm_id_priv;
1055 struct ib_cm_id *cm_id;
1056 unsigned long flags;
1059 /* Create an ID in advance, since the creation may sleep */
1060 cm_id = ib_create_cm_id(device, cm_handler, NULL);
1064 spin_lock_irqsave(&cm.lock, flags);
1066 if (service_id == IB_CM_ASSIGN_SERVICE_ID)
1069 /* Find an existing ID */
1070 cm_id_priv = cm_find_listen(device, service_id);
1072 if (cm_id->cm_handler != cm_handler || cm_id->context) {
1073 /* Sharing an ib_cm_id with different handlers is not
1075 spin_unlock_irqrestore(&cm.lock, flags);
1076 ib_destroy_cm_id(cm_id);
1077 return ERR_PTR(-EINVAL);
1079 atomic_inc(&cm_id_priv->refcount);
1080 ++cm_id_priv->listen_sharecount;
1081 spin_unlock_irqrestore(&cm.lock, flags);
1083 ib_destroy_cm_id(cm_id);
1084 cm_id = &cm_id_priv->id;
1089 /* Use newly created ID */
1090 err = __ib_cm_listen(cm_id, service_id, 0);
1092 spin_unlock_irqrestore(&cm.lock, flags);
1095 ib_destroy_cm_id(cm_id);
1096 return ERR_PTR(err);
1100 EXPORT_SYMBOL(ib_cm_insert_listen);
1102 static __be64 cm_form_tid(struct cm_id_private *cm_id_priv,
1103 enum cm_msg_sequence msg_seq)
1105 u64 hi_tid, low_tid;
1107 hi_tid = ((u64) cm_id_priv->av.port->mad_agent->hi_tid) << 32;
1108 low_tid = (u64) ((__force u32)cm_id_priv->id.local_id |
1110 return cpu_to_be64(hi_tid | low_tid);
1113 static void cm_format_mad_hdr(struct ib_mad_hdr *hdr,
1114 __be16 attr_id, __be64 tid)
1116 hdr->base_version = IB_MGMT_BASE_VERSION;
1117 hdr->mgmt_class = IB_MGMT_CLASS_CM;
1118 hdr->class_version = IB_CM_CLASS_VERSION;
1119 hdr->method = IB_MGMT_METHOD_SEND;
1120 hdr->attr_id = attr_id;
1124 static void cm_format_req(struct cm_req_msg *req_msg,
1125 struct cm_id_private *cm_id_priv,
1126 struct ib_cm_req_param *param)
1128 struct ib_sa_path_rec *pri_path = param->primary_path;
1129 struct ib_sa_path_rec *alt_path = param->alternate_path;
1131 cm_format_mad_hdr(&req_msg->hdr, CM_REQ_ATTR_ID,
1132 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_REQ));
1134 req_msg->local_comm_id = cm_id_priv->id.local_id;
1135 req_msg->service_id = param->service_id;
1136 req_msg->local_ca_guid = cm_id_priv->id.device->node_guid;
1137 cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num));
1138 cm_req_set_init_depth(req_msg, param->initiator_depth);
1139 cm_req_set_remote_resp_timeout(req_msg,
1140 param->remote_cm_response_timeout);
1141 cm_req_set_qp_type(req_msg, param->qp_type);
1142 cm_req_set_flow_ctrl(req_msg, param->flow_control);
1143 cm_req_set_starting_psn(req_msg, cpu_to_be32(param->starting_psn));
1144 cm_req_set_local_resp_timeout(req_msg,
1145 param->local_cm_response_timeout);
1146 req_msg->pkey = param->primary_path->pkey;
1147 cm_req_set_path_mtu(req_msg, param->primary_path->mtu);
1148 cm_req_set_max_cm_retries(req_msg, param->max_cm_retries);
1150 if (param->qp_type != IB_QPT_XRC_INI) {
1151 cm_req_set_resp_res(req_msg, param->responder_resources);
1152 cm_req_set_retry_count(req_msg, param->retry_count);
1153 cm_req_set_rnr_retry_count(req_msg, param->rnr_retry_count);
1154 cm_req_set_srq(req_msg, param->srq);
1157 if (pri_path->hop_limit <= 1) {
1158 req_msg->primary_local_lid = pri_path->slid;
1159 req_msg->primary_remote_lid = pri_path->dlid;
1161 /* Work-around until there's a way to obtain remote LID info */
1162 req_msg->primary_local_lid = IB_LID_PERMISSIVE;
1163 req_msg->primary_remote_lid = IB_LID_PERMISSIVE;
1165 req_msg->primary_local_gid = pri_path->sgid;
1166 req_msg->primary_remote_gid = pri_path->dgid;
1167 cm_req_set_primary_flow_label(req_msg, pri_path->flow_label);
1168 cm_req_set_primary_packet_rate(req_msg, pri_path->rate);
1169 req_msg->primary_traffic_class = pri_path->traffic_class;
1170 req_msg->primary_hop_limit = pri_path->hop_limit;
1171 cm_req_set_primary_sl(req_msg, pri_path->sl);
1172 cm_req_set_primary_subnet_local(req_msg, (pri_path->hop_limit <= 1));
1173 cm_req_set_primary_local_ack_timeout(req_msg,
1174 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
1175 pri_path->packet_life_time));
1178 if (alt_path->hop_limit <= 1) {
1179 req_msg->alt_local_lid = alt_path->slid;
1180 req_msg->alt_remote_lid = alt_path->dlid;
1182 req_msg->alt_local_lid = IB_LID_PERMISSIVE;
1183 req_msg->alt_remote_lid = IB_LID_PERMISSIVE;
1185 req_msg->alt_local_gid = alt_path->sgid;
1186 req_msg->alt_remote_gid = alt_path->dgid;
1187 cm_req_set_alt_flow_label(req_msg,
1188 alt_path->flow_label);
1189 cm_req_set_alt_packet_rate(req_msg, alt_path->rate);
1190 req_msg->alt_traffic_class = alt_path->traffic_class;
1191 req_msg->alt_hop_limit = alt_path->hop_limit;
1192 cm_req_set_alt_sl(req_msg, alt_path->sl);
1193 cm_req_set_alt_subnet_local(req_msg, (alt_path->hop_limit <= 1));
1194 cm_req_set_alt_local_ack_timeout(req_msg,
1195 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
1196 alt_path->packet_life_time));
1199 if (param->private_data && param->private_data_len)
1200 memcpy(req_msg->private_data, param->private_data,
1201 param->private_data_len);
1204 static int cm_validate_req_param(struct ib_cm_req_param *param)
1206 /* peer-to-peer not supported */
1207 if (param->peer_to_peer)
1210 if (!param->primary_path)
1213 if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC &&
1214 param->qp_type != IB_QPT_XRC_INI)
1217 if (param->private_data &&
1218 param->private_data_len > IB_CM_REQ_PRIVATE_DATA_SIZE)
1221 if (param->alternate_path &&
1222 (param->alternate_path->pkey != param->primary_path->pkey ||
1223 param->alternate_path->mtu != param->primary_path->mtu))
1229 int ib_send_cm_req(struct ib_cm_id *cm_id,
1230 struct ib_cm_req_param *param)
1232 struct cm_id_private *cm_id_priv;
1233 struct cm_req_msg *req_msg;
1234 unsigned long flags;
1237 ret = cm_validate_req_param(param);
1241 /* Verify that we're not in timewait. */
1242 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1243 spin_lock_irqsave(&cm_id_priv->lock, flags);
1244 if (cm_id->state != IB_CM_IDLE) {
1245 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1249 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1251 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
1253 if (IS_ERR(cm_id_priv->timewait_info)) {
1254 ret = PTR_ERR(cm_id_priv->timewait_info);
1255 cm_id_priv->timewait_info = NULL;
1259 ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av,
1263 if (param->alternate_path) {
1264 ret = cm_init_av_by_path(param->alternate_path,
1265 &cm_id_priv->alt_av, cm_id_priv);
1269 cm_id->service_id = param->service_id;
1270 cm_id->service_mask = ~cpu_to_be64(0);
1271 cm_id_priv->timeout_ms = cm_convert_to_ms(
1272 param->primary_path->packet_life_time) * 2 +
1274 param->remote_cm_response_timeout);
1275 cm_id_priv->max_cm_retries = param->max_cm_retries;
1276 cm_id_priv->initiator_depth = param->initiator_depth;
1277 cm_id_priv->responder_resources = param->responder_resources;
1278 cm_id_priv->retry_count = param->retry_count;
1279 cm_id_priv->path_mtu = param->primary_path->mtu;
1280 cm_id_priv->pkey = param->primary_path->pkey;
1281 cm_id_priv->qp_type = param->qp_type;
1283 ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg);
1287 req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad;
1288 cm_format_req(req_msg, cm_id_priv, param);
1289 cm_id_priv->tid = req_msg->hdr.tid;
1290 cm_id_priv->msg->timeout_ms = cm_id_priv->timeout_ms;
1291 cm_id_priv->msg->context[1] = (void *) (unsigned long) IB_CM_REQ_SENT;
1293 cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg);
1294 cm_id_priv->rq_psn = cm_req_get_starting_psn(req_msg);
1296 spin_lock_irqsave(&cm_id_priv->lock, flags);
1297 ret = ib_post_send_mad(cm_id_priv->msg, NULL);
1299 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1302 BUG_ON(cm_id->state != IB_CM_IDLE);
1303 cm_id->state = IB_CM_REQ_SENT;
1304 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1307 error2: cm_free_msg(cm_id_priv->msg);
1308 error1: kfree(cm_id_priv->timewait_info);
1311 EXPORT_SYMBOL(ib_send_cm_req);
1313 static int cm_issue_rej(struct cm_port *port,
1314 struct ib_mad_recv_wc *mad_recv_wc,
1315 enum ib_cm_rej_reason reason,
1316 enum cm_msg_response msg_rejected,
1317 void *ari, u8 ari_length)
1319 struct ib_mad_send_buf *msg = NULL;
1320 struct cm_rej_msg *rej_msg, *rcv_msg;
1323 ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
1327 /* We just need common CM header information. Cast to any message. */
1328 rcv_msg = (struct cm_rej_msg *) mad_recv_wc->recv_buf.mad;
1329 rej_msg = (struct cm_rej_msg *) msg->mad;
1331 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, rcv_msg->hdr.tid);
1332 rej_msg->remote_comm_id = rcv_msg->local_comm_id;
1333 rej_msg->local_comm_id = rcv_msg->remote_comm_id;
1334 cm_rej_set_msg_rejected(rej_msg, msg_rejected);
1335 rej_msg->reason = cpu_to_be16(reason);
1337 if (ari && ari_length) {
1338 cm_rej_set_reject_info_len(rej_msg, ari_length);
1339 memcpy(rej_msg->ari, ari, ari_length);
1342 ret = ib_post_send_mad(msg, NULL);
1349 static inline int cm_is_active_peer(__be64 local_ca_guid, __be64 remote_ca_guid,
1350 __be32 local_qpn, __be32 remote_qpn)
1352 return (be64_to_cpu(local_ca_guid) > be64_to_cpu(remote_ca_guid) ||
1353 ((local_ca_guid == remote_ca_guid) &&
1354 (be32_to_cpu(local_qpn) > be32_to_cpu(remote_qpn))));
1357 static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
1358 struct ib_sa_path_rec *primary_path,
1359 struct ib_sa_path_rec *alt_path)
1361 memset(primary_path, 0, sizeof *primary_path);
1362 primary_path->dgid = req_msg->primary_local_gid;
1363 primary_path->sgid = req_msg->primary_remote_gid;
1364 primary_path->dlid = req_msg->primary_local_lid;
1365 primary_path->slid = req_msg->primary_remote_lid;
1366 primary_path->flow_label = cm_req_get_primary_flow_label(req_msg);
1367 primary_path->hop_limit = req_msg->primary_hop_limit;
1368 primary_path->traffic_class = req_msg->primary_traffic_class;
1369 primary_path->reversible = 1;
1370 primary_path->pkey = req_msg->pkey;
1371 primary_path->sl = cm_req_get_primary_sl(req_msg);
1372 primary_path->mtu_selector = IB_SA_EQ;
1373 primary_path->mtu = cm_req_get_path_mtu(req_msg);
1374 primary_path->rate_selector = IB_SA_EQ;
1375 primary_path->rate = cm_req_get_primary_packet_rate(req_msg);
1376 primary_path->packet_life_time_selector = IB_SA_EQ;
1377 primary_path->packet_life_time =
1378 cm_req_get_primary_local_ack_timeout(req_msg);
1379 primary_path->packet_life_time -= (primary_path->packet_life_time > 0);
1380 primary_path->service_id = req_msg->service_id;
1382 if (req_msg->alt_local_lid) {
1383 memset(alt_path, 0, sizeof *alt_path);
1384 alt_path->dgid = req_msg->alt_local_gid;
1385 alt_path->sgid = req_msg->alt_remote_gid;
1386 alt_path->dlid = req_msg->alt_local_lid;
1387 alt_path->slid = req_msg->alt_remote_lid;
1388 alt_path->flow_label = cm_req_get_alt_flow_label(req_msg);
1389 alt_path->hop_limit = req_msg->alt_hop_limit;
1390 alt_path->traffic_class = req_msg->alt_traffic_class;
1391 alt_path->reversible = 1;
1392 alt_path->pkey = req_msg->pkey;
1393 alt_path->sl = cm_req_get_alt_sl(req_msg);
1394 alt_path->mtu_selector = IB_SA_EQ;
1395 alt_path->mtu = cm_req_get_path_mtu(req_msg);
1396 alt_path->rate_selector = IB_SA_EQ;
1397 alt_path->rate = cm_req_get_alt_packet_rate(req_msg);
1398 alt_path->packet_life_time_selector = IB_SA_EQ;
1399 alt_path->packet_life_time =
1400 cm_req_get_alt_local_ack_timeout(req_msg);
1401 alt_path->packet_life_time -= (alt_path->packet_life_time > 0);
1402 alt_path->service_id = req_msg->service_id;
1406 static u16 cm_get_bth_pkey(struct cm_work *work)
1408 struct ib_device *ib_dev = work->port->cm_dev->ib_device;
1409 u8 port_num = work->port->port_num;
1410 u16 pkey_index = work->mad_recv_wc->wc->pkey_index;
1414 ret = ib_get_cached_pkey(ib_dev, port_num, pkey_index, &pkey);
1416 dev_warn_ratelimited(&ib_dev->dev, "ib_cm: Couldn't retrieve pkey for incoming request (port %d, pkey index %d). %d\n",
1417 port_num, pkey_index, ret);
1424 static void cm_format_req_event(struct cm_work *work,
1425 struct cm_id_private *cm_id_priv,
1426 struct ib_cm_id *listen_id)
1428 struct cm_req_msg *req_msg;
1429 struct ib_cm_req_event_param *param;
1431 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1432 param = &work->cm_event.param.req_rcvd;
1433 param->listen_id = listen_id;
1434 param->bth_pkey = cm_get_bth_pkey(work);
1435 param->port = cm_id_priv->av.port->port_num;
1436 param->primary_path = &work->path[0];
1437 if (req_msg->alt_local_lid)
1438 param->alternate_path = &work->path[1];
1440 param->alternate_path = NULL;
1441 param->remote_ca_guid = req_msg->local_ca_guid;
1442 param->remote_qkey = be32_to_cpu(req_msg->local_qkey);
1443 param->remote_qpn = be32_to_cpu(cm_req_get_local_qpn(req_msg));
1444 param->qp_type = cm_req_get_qp_type(req_msg);
1445 param->starting_psn = be32_to_cpu(cm_req_get_starting_psn(req_msg));
1446 param->responder_resources = cm_req_get_init_depth(req_msg);
1447 param->initiator_depth = cm_req_get_resp_res(req_msg);
1448 param->local_cm_response_timeout =
1449 cm_req_get_remote_resp_timeout(req_msg);
1450 param->flow_control = cm_req_get_flow_ctrl(req_msg);
1451 param->remote_cm_response_timeout =
1452 cm_req_get_local_resp_timeout(req_msg);
1453 param->retry_count = cm_req_get_retry_count(req_msg);
1454 param->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
1455 param->srq = cm_req_get_srq(req_msg);
1456 work->cm_event.private_data = &req_msg->private_data;
1459 static void cm_process_work(struct cm_id_private *cm_id_priv,
1460 struct cm_work *work)
1464 /* We will typically only have the current event to report. */
1465 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event);
1468 while (!ret && !atomic_add_negative(-1, &cm_id_priv->work_count)) {
1469 spin_lock_irq(&cm_id_priv->lock);
1470 work = cm_dequeue_work(cm_id_priv);
1471 spin_unlock_irq(&cm_id_priv->lock);
1473 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id,
1477 cm_deref_id(cm_id_priv);
1479 cm_destroy_id(&cm_id_priv->id, ret);
1482 static void cm_format_mra(struct cm_mra_msg *mra_msg,
1483 struct cm_id_private *cm_id_priv,
1484 enum cm_msg_response msg_mraed, u8 service_timeout,
1485 const void *private_data, u8 private_data_len)
1487 cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid);
1488 cm_mra_set_msg_mraed(mra_msg, msg_mraed);
1489 mra_msg->local_comm_id = cm_id_priv->id.local_id;
1490 mra_msg->remote_comm_id = cm_id_priv->id.remote_id;
1491 cm_mra_set_service_timeout(mra_msg, service_timeout);
1493 if (private_data && private_data_len)
1494 memcpy(mra_msg->private_data, private_data, private_data_len);
1497 static void cm_format_rej(struct cm_rej_msg *rej_msg,
1498 struct cm_id_private *cm_id_priv,
1499 enum ib_cm_rej_reason reason,
1502 const void *private_data,
1503 u8 private_data_len)
1505 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid);
1506 rej_msg->remote_comm_id = cm_id_priv->id.remote_id;
1508 switch(cm_id_priv->id.state) {
1509 case IB_CM_REQ_RCVD:
1510 rej_msg->local_comm_id = 0;
1511 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
1513 case IB_CM_MRA_REQ_SENT:
1514 rej_msg->local_comm_id = cm_id_priv->id.local_id;
1515 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
1517 case IB_CM_REP_RCVD:
1518 case IB_CM_MRA_REP_SENT:
1519 rej_msg->local_comm_id = cm_id_priv->id.local_id;
1520 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REP);
1523 rej_msg->local_comm_id = cm_id_priv->id.local_id;
1524 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_OTHER);
1528 rej_msg->reason = cpu_to_be16(reason);
1529 if (ari && ari_length) {
1530 cm_rej_set_reject_info_len(rej_msg, ari_length);
1531 memcpy(rej_msg->ari, ari, ari_length);
1534 if (private_data && private_data_len)
1535 memcpy(rej_msg->private_data, private_data, private_data_len);
1538 static void cm_dup_req_handler(struct cm_work *work,
1539 struct cm_id_private *cm_id_priv)
1541 struct ib_mad_send_buf *msg = NULL;
1544 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
1545 counter[CM_REQ_COUNTER]);
1547 /* Quick state check to discard duplicate REQs. */
1548 if (cm_id_priv->id.state == IB_CM_REQ_RCVD)
1551 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
1555 spin_lock_irq(&cm_id_priv->lock);
1556 switch (cm_id_priv->id.state) {
1557 case IB_CM_MRA_REQ_SENT:
1558 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
1559 CM_MSG_RESPONSE_REQ, cm_id_priv->service_timeout,
1560 cm_id_priv->private_data,
1561 cm_id_priv->private_data_len);
1563 case IB_CM_TIMEWAIT:
1564 cm_format_rej((struct cm_rej_msg *) msg->mad, cm_id_priv,
1565 IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0);
1570 spin_unlock_irq(&cm_id_priv->lock);
1572 ret = ib_post_send_mad(msg, NULL);
1577 unlock: spin_unlock_irq(&cm_id_priv->lock);
1578 free: cm_free_msg(msg);
1581 static struct cm_id_private * cm_match_req(struct cm_work *work,
1582 struct cm_id_private *cm_id_priv)
1584 struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv;
1585 struct cm_timewait_info *timewait_info;
1586 struct cm_req_msg *req_msg;
1588 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1590 /* Check for possible duplicate REQ. */
1591 spin_lock_irq(&cm.lock);
1592 timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info);
1593 if (timewait_info) {
1594 cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
1595 timewait_info->work.remote_id);
1596 spin_unlock_irq(&cm.lock);
1597 if (cur_cm_id_priv) {
1598 cm_dup_req_handler(work, cur_cm_id_priv);
1599 cm_deref_id(cur_cm_id_priv);
1604 /* Check for stale connections. */
1605 timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
1606 if (timewait_info) {
1607 cm_cleanup_timewait(cm_id_priv->timewait_info);
1608 spin_unlock_irq(&cm.lock);
1609 cm_issue_rej(work->port, work->mad_recv_wc,
1610 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ,
1615 /* Find matching listen request. */
1616 listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device,
1617 req_msg->service_id);
1618 if (!listen_cm_id_priv) {
1619 cm_cleanup_timewait(cm_id_priv->timewait_info);
1620 spin_unlock_irq(&cm.lock);
1621 cm_issue_rej(work->port, work->mad_recv_wc,
1622 IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ,
1626 atomic_inc(&listen_cm_id_priv->refcount);
1627 atomic_inc(&cm_id_priv->refcount);
1628 cm_id_priv->id.state = IB_CM_REQ_RCVD;
1629 atomic_inc(&cm_id_priv->work_count);
1630 spin_unlock_irq(&cm.lock);
1632 return listen_cm_id_priv;
1636 * Work-around for inter-subnet connections. If the LIDs are permissive,
1637 * we need to override the LID/SL data in the REQ with the LID information
1638 * in the work completion.
1640 static void cm_process_routed_req(struct cm_req_msg *req_msg, struct ib_wc *wc)
1642 if (!cm_req_get_primary_subnet_local(req_msg)) {
1643 if (req_msg->primary_local_lid == IB_LID_PERMISSIVE) {
1644 req_msg->primary_local_lid = cpu_to_be16(wc->slid);
1645 cm_req_set_primary_sl(req_msg, wc->sl);
1648 if (req_msg->primary_remote_lid == IB_LID_PERMISSIVE)
1649 req_msg->primary_remote_lid = cpu_to_be16(wc->dlid_path_bits);
1652 if (!cm_req_get_alt_subnet_local(req_msg)) {
1653 if (req_msg->alt_local_lid == IB_LID_PERMISSIVE) {
1654 req_msg->alt_local_lid = cpu_to_be16(wc->slid);
1655 cm_req_set_alt_sl(req_msg, wc->sl);
1658 if (req_msg->alt_remote_lid == IB_LID_PERMISSIVE)
1659 req_msg->alt_remote_lid = cpu_to_be16(wc->dlid_path_bits);
1663 static int cm_req_handler(struct cm_work *work)
1665 struct ib_cm_id *cm_id;
1666 struct cm_id_private *cm_id_priv, *listen_cm_id_priv;
1667 struct cm_req_msg *req_msg;
1669 struct ib_gid_attr gid_attr;
1672 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1674 cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL);
1676 return PTR_ERR(cm_id);
1678 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1679 cm_id_priv->id.remote_id = req_msg->local_comm_id;
1680 cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
1681 work->mad_recv_wc->recv_buf.grh,
1683 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
1685 if (IS_ERR(cm_id_priv->timewait_info)) {
1686 ret = PTR_ERR(cm_id_priv->timewait_info);
1687 cm_id_priv->timewait_info = NULL;
1690 cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id;
1691 cm_id_priv->timewait_info->remote_ca_guid = req_msg->local_ca_guid;
1692 cm_id_priv->timewait_info->remote_qpn = cm_req_get_local_qpn(req_msg);
1694 listen_cm_id_priv = cm_match_req(work, cm_id_priv);
1695 if (!listen_cm_id_priv) {
1697 kfree(cm_id_priv->timewait_info);
1701 cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
1702 cm_id_priv->id.context = listen_cm_id_priv->id.context;
1703 cm_id_priv->id.service_id = req_msg->service_id;
1704 cm_id_priv->id.service_mask = ~cpu_to_be64(0);
1706 cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
1707 cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]);
1709 memcpy(work->path[0].dmac, cm_id_priv->av.ah_attr.dmac, ETH_ALEN);
1710 work->path[0].hop_limit = cm_id_priv->av.ah_attr.grh.hop_limit;
1711 ret = ib_get_cached_gid(work->port->cm_dev->ib_device,
1712 work->port->port_num,
1713 cm_id_priv->av.ah_attr.grh.sgid_index,
1716 if (gid_attr.ndev) {
1717 work->path[0].ifindex = gid_attr.ndev->ifindex;
1718 work->path[0].net = dev_net(gid_attr.ndev);
1719 dev_put(gid_attr.ndev);
1721 work->path[0].gid_type = gid_attr.gid_type;
1722 ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av,
1726 int err = ib_get_cached_gid(work->port->cm_dev->ib_device,
1727 work->port->port_num, 0,
1728 &work->path[0].sgid,
1730 if (!err && gid_attr.ndev) {
1731 work->path[0].ifindex = gid_attr.ndev->ifindex;
1732 work->path[0].net = dev_net(gid_attr.ndev);
1733 dev_put(gid_attr.ndev);
1735 work->path[0].gid_type = gid_attr.gid_type;
1736 ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID,
1737 &work->path[0].sgid, sizeof work->path[0].sgid,
1741 if (req_msg->alt_local_lid) {
1742 ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av,
1745 ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID,
1746 &work->path[0].sgid,
1747 sizeof work->path[0].sgid, NULL, 0);
1751 cm_id_priv->tid = req_msg->hdr.tid;
1752 cm_id_priv->timeout_ms = cm_convert_to_ms(
1753 cm_req_get_local_resp_timeout(req_msg));
1754 cm_id_priv->max_cm_retries = cm_req_get_max_cm_retries(req_msg);
1755 cm_id_priv->remote_qpn = cm_req_get_local_qpn(req_msg);
1756 cm_id_priv->initiator_depth = cm_req_get_resp_res(req_msg);
1757 cm_id_priv->responder_resources = cm_req_get_init_depth(req_msg);
1758 cm_id_priv->path_mtu = cm_req_get_path_mtu(req_msg);
1759 cm_id_priv->pkey = req_msg->pkey;
1760 cm_id_priv->sq_psn = cm_req_get_starting_psn(req_msg);
1761 cm_id_priv->retry_count = cm_req_get_retry_count(req_msg);
1762 cm_id_priv->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
1763 cm_id_priv->qp_type = cm_req_get_qp_type(req_msg);
1765 cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
1766 cm_process_work(cm_id_priv, work);
1767 cm_deref_id(listen_cm_id_priv);
1771 atomic_dec(&cm_id_priv->refcount);
1772 cm_deref_id(listen_cm_id_priv);
1774 ib_destroy_cm_id(cm_id);
1778 static void cm_format_rep(struct cm_rep_msg *rep_msg,
1779 struct cm_id_private *cm_id_priv,
1780 struct ib_cm_rep_param *param)
1782 cm_format_mad_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid);
1783 rep_msg->local_comm_id = cm_id_priv->id.local_id;
1784 rep_msg->remote_comm_id = cm_id_priv->id.remote_id;
1785 cm_rep_set_starting_psn(rep_msg, cpu_to_be32(param->starting_psn));
1786 rep_msg->resp_resources = param->responder_resources;
1787 cm_rep_set_target_ack_delay(rep_msg,
1788 cm_id_priv->av.port->cm_dev->ack_delay);
1789 cm_rep_set_failover(rep_msg, param->failover_accepted);
1790 cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count);
1791 rep_msg->local_ca_guid = cm_id_priv->id.device->node_guid;
1793 if (cm_id_priv->qp_type != IB_QPT_XRC_TGT) {
1794 rep_msg->initiator_depth = param->initiator_depth;
1795 cm_rep_set_flow_ctrl(rep_msg, param->flow_control);
1796 cm_rep_set_srq(rep_msg, param->srq);
1797 cm_rep_set_local_qpn(rep_msg, cpu_to_be32(param->qp_num));
1799 cm_rep_set_srq(rep_msg, 1);
1800 cm_rep_set_local_eecn(rep_msg, cpu_to_be32(param->qp_num));
1803 if (param->private_data && param->private_data_len)
1804 memcpy(rep_msg->private_data, param->private_data,
1805 param->private_data_len);
1808 int ib_send_cm_rep(struct ib_cm_id *cm_id,
1809 struct ib_cm_rep_param *param)
1811 struct cm_id_private *cm_id_priv;
1812 struct ib_mad_send_buf *msg;
1813 struct cm_rep_msg *rep_msg;
1814 unsigned long flags;
1817 if (param->private_data &&
1818 param->private_data_len > IB_CM_REP_PRIVATE_DATA_SIZE)
1821 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1822 spin_lock_irqsave(&cm_id_priv->lock, flags);
1823 if (cm_id->state != IB_CM_REQ_RCVD &&
1824 cm_id->state != IB_CM_MRA_REQ_SENT) {
1829 ret = cm_alloc_msg(cm_id_priv, &msg);
1833 rep_msg = (struct cm_rep_msg *) msg->mad;
1834 cm_format_rep(rep_msg, cm_id_priv, param);
1835 msg->timeout_ms = cm_id_priv->timeout_ms;
1836 msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT;
1838 ret = ib_post_send_mad(msg, NULL);
1840 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1845 cm_id->state = IB_CM_REP_SENT;
1846 cm_id_priv->msg = msg;
1847 cm_id_priv->initiator_depth = param->initiator_depth;
1848 cm_id_priv->responder_resources = param->responder_resources;
1849 cm_id_priv->rq_psn = cm_rep_get_starting_psn(rep_msg);
1850 cm_id_priv->local_qpn = cpu_to_be32(param->qp_num & 0xFFFFFF);
1852 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1855 EXPORT_SYMBOL(ib_send_cm_rep);
1857 static void cm_format_rtu(struct cm_rtu_msg *rtu_msg,
1858 struct cm_id_private *cm_id_priv,
1859 const void *private_data,
1860 u8 private_data_len)
1862 cm_format_mad_hdr(&rtu_msg->hdr, CM_RTU_ATTR_ID, cm_id_priv->tid);
1863 rtu_msg->local_comm_id = cm_id_priv->id.local_id;
1864 rtu_msg->remote_comm_id = cm_id_priv->id.remote_id;
1866 if (private_data && private_data_len)
1867 memcpy(rtu_msg->private_data, private_data, private_data_len);
1870 int ib_send_cm_rtu(struct ib_cm_id *cm_id,
1871 const void *private_data,
1872 u8 private_data_len)
1874 struct cm_id_private *cm_id_priv;
1875 struct ib_mad_send_buf *msg;
1876 unsigned long flags;
1880 if (private_data && private_data_len > IB_CM_RTU_PRIVATE_DATA_SIZE)
1883 data = cm_copy_private_data(private_data, private_data_len);
1885 return PTR_ERR(data);
1887 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1888 spin_lock_irqsave(&cm_id_priv->lock, flags);
1889 if (cm_id->state != IB_CM_REP_RCVD &&
1890 cm_id->state != IB_CM_MRA_REP_SENT) {
1895 ret = cm_alloc_msg(cm_id_priv, &msg);
1899 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
1900 private_data, private_data_len);
1902 ret = ib_post_send_mad(msg, NULL);
1904 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1910 cm_id->state = IB_CM_ESTABLISHED;
1911 cm_set_private_data(cm_id_priv, data, private_data_len);
1912 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1915 error: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1919 EXPORT_SYMBOL(ib_send_cm_rtu);
1921 static void cm_format_rep_event(struct cm_work *work, enum ib_qp_type qp_type)
1923 struct cm_rep_msg *rep_msg;
1924 struct ib_cm_rep_event_param *param;
1926 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
1927 param = &work->cm_event.param.rep_rcvd;
1928 param->remote_ca_guid = rep_msg->local_ca_guid;
1929 param->remote_qkey = be32_to_cpu(rep_msg->local_qkey);
1930 param->remote_qpn = be32_to_cpu(cm_rep_get_qpn(rep_msg, qp_type));
1931 param->starting_psn = be32_to_cpu(cm_rep_get_starting_psn(rep_msg));
1932 param->responder_resources = rep_msg->initiator_depth;
1933 param->initiator_depth = rep_msg->resp_resources;
1934 param->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg);
1935 param->failover_accepted = cm_rep_get_failover(rep_msg);
1936 param->flow_control = cm_rep_get_flow_ctrl(rep_msg);
1937 param->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
1938 param->srq = cm_rep_get_srq(rep_msg);
1939 work->cm_event.private_data = &rep_msg->private_data;
1942 static void cm_dup_rep_handler(struct cm_work *work)
1944 struct cm_id_private *cm_id_priv;
1945 struct cm_rep_msg *rep_msg;
1946 struct ib_mad_send_buf *msg = NULL;
1949 rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad;
1950 cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id,
1951 rep_msg->local_comm_id);
1955 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
1956 counter[CM_REP_COUNTER]);
1957 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
1961 spin_lock_irq(&cm_id_priv->lock);
1962 if (cm_id_priv->id.state == IB_CM_ESTABLISHED)
1963 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
1964 cm_id_priv->private_data,
1965 cm_id_priv->private_data_len);
1966 else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT)
1967 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
1968 CM_MSG_RESPONSE_REP, cm_id_priv->service_timeout,
1969 cm_id_priv->private_data,
1970 cm_id_priv->private_data_len);
1973 spin_unlock_irq(&cm_id_priv->lock);
1975 ret = ib_post_send_mad(msg, NULL);
1980 unlock: spin_unlock_irq(&cm_id_priv->lock);
1981 free: cm_free_msg(msg);
1982 deref: cm_deref_id(cm_id_priv);
1985 static int cm_rep_handler(struct cm_work *work)
1987 struct cm_id_private *cm_id_priv;
1988 struct cm_rep_msg *rep_msg;
1991 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
1992 cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 0);
1994 cm_dup_rep_handler(work);
1998 cm_format_rep_event(work, cm_id_priv->qp_type);
2000 spin_lock_irq(&cm_id_priv->lock);
2001 switch (cm_id_priv->id.state) {
2002 case IB_CM_REQ_SENT:
2003 case IB_CM_MRA_REQ_RCVD:
2006 spin_unlock_irq(&cm_id_priv->lock);
2011 cm_id_priv->timewait_info->work.remote_id = rep_msg->local_comm_id;
2012 cm_id_priv->timewait_info->remote_ca_guid = rep_msg->local_ca_guid;
2013 cm_id_priv->timewait_info->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type);
2015 spin_lock(&cm.lock);
2016 /* Check for duplicate REP. */
2017 if (cm_insert_remote_id(cm_id_priv->timewait_info)) {
2018 spin_unlock(&cm.lock);
2019 spin_unlock_irq(&cm_id_priv->lock);
2023 /* Check for a stale connection. */
2024 if (cm_insert_remote_qpn(cm_id_priv->timewait_info)) {
2025 rb_erase(&cm_id_priv->timewait_info->remote_id_node,
2026 &cm.remote_id_table);
2027 cm_id_priv->timewait_info->inserted_remote_id = 0;
2028 spin_unlock(&cm.lock);
2029 spin_unlock_irq(&cm_id_priv->lock);
2030 cm_issue_rej(work->port, work->mad_recv_wc,
2031 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP,
2036 spin_unlock(&cm.lock);
2038 cm_id_priv->id.state = IB_CM_REP_RCVD;
2039 cm_id_priv->id.remote_id = rep_msg->local_comm_id;
2040 cm_id_priv->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type);
2041 cm_id_priv->initiator_depth = rep_msg->resp_resources;
2042 cm_id_priv->responder_resources = rep_msg->initiator_depth;
2043 cm_id_priv->sq_psn = cm_rep_get_starting_psn(rep_msg);
2044 cm_id_priv->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
2045 cm_id_priv->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg);
2046 cm_id_priv->av.timeout =
2047 cm_ack_timeout(cm_id_priv->target_ack_delay,
2048 cm_id_priv->av.timeout - 1);
2049 cm_id_priv->alt_av.timeout =
2050 cm_ack_timeout(cm_id_priv->target_ack_delay,
2051 cm_id_priv->alt_av.timeout - 1);
2053 /* todo: handle peer_to_peer */
2055 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2056 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2058 list_add_tail(&work->list, &cm_id_priv->work_list);
2059 spin_unlock_irq(&cm_id_priv->lock);
2062 cm_process_work(cm_id_priv, work);
2064 cm_deref_id(cm_id_priv);
2068 cm_deref_id(cm_id_priv);
2072 static int cm_establish_handler(struct cm_work *work)
2074 struct cm_id_private *cm_id_priv;
2077 /* See comment in cm_establish about lookup. */
2078 cm_id_priv = cm_acquire_id(work->local_id, work->remote_id);
2082 spin_lock_irq(&cm_id_priv->lock);
2083 if (cm_id_priv->id.state != IB_CM_ESTABLISHED) {
2084 spin_unlock_irq(&cm_id_priv->lock);
2088 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2089 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2091 list_add_tail(&work->list, &cm_id_priv->work_list);
2092 spin_unlock_irq(&cm_id_priv->lock);
2095 cm_process_work(cm_id_priv, work);
2097 cm_deref_id(cm_id_priv);
2100 cm_deref_id(cm_id_priv);
2104 static int cm_rtu_handler(struct cm_work *work)
2106 struct cm_id_private *cm_id_priv;
2107 struct cm_rtu_msg *rtu_msg;
2110 rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad;
2111 cm_id_priv = cm_acquire_id(rtu_msg->remote_comm_id,
2112 rtu_msg->local_comm_id);
2116 work->cm_event.private_data = &rtu_msg->private_data;
2118 spin_lock_irq(&cm_id_priv->lock);
2119 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
2120 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
2121 spin_unlock_irq(&cm_id_priv->lock);
2122 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2123 counter[CM_RTU_COUNTER]);
2126 cm_id_priv->id.state = IB_CM_ESTABLISHED;
2128 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2129 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2131 list_add_tail(&work->list, &cm_id_priv->work_list);
2132 spin_unlock_irq(&cm_id_priv->lock);
2135 cm_process_work(cm_id_priv, work);
2137 cm_deref_id(cm_id_priv);
2140 cm_deref_id(cm_id_priv);
2144 static void cm_format_dreq(struct cm_dreq_msg *dreq_msg,
2145 struct cm_id_private *cm_id_priv,
2146 const void *private_data,
2147 u8 private_data_len)
2149 cm_format_mad_hdr(&dreq_msg->hdr, CM_DREQ_ATTR_ID,
2150 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_DREQ));
2151 dreq_msg->local_comm_id = cm_id_priv->id.local_id;
2152 dreq_msg->remote_comm_id = cm_id_priv->id.remote_id;
2153 cm_dreq_set_remote_qpn(dreq_msg, cm_id_priv->remote_qpn);
2155 if (private_data && private_data_len)
2156 memcpy(dreq_msg->private_data, private_data, private_data_len);
2159 int ib_send_cm_dreq(struct ib_cm_id *cm_id,
2160 const void *private_data,
2161 u8 private_data_len)
2163 struct cm_id_private *cm_id_priv;
2164 struct ib_mad_send_buf *msg;
2165 unsigned long flags;
2168 if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE)
2171 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2172 spin_lock_irqsave(&cm_id_priv->lock, flags);
2173 if (cm_id->state != IB_CM_ESTABLISHED) {
2178 if (cm_id->lap_state == IB_CM_LAP_SENT ||
2179 cm_id->lap_state == IB_CM_MRA_LAP_RCVD)
2180 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2182 ret = cm_alloc_msg(cm_id_priv, &msg);
2184 cm_enter_timewait(cm_id_priv);
2188 cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv,
2189 private_data, private_data_len);
2190 msg->timeout_ms = cm_id_priv->timeout_ms;
2191 msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT;
2193 ret = ib_post_send_mad(msg, NULL);
2195 cm_enter_timewait(cm_id_priv);
2196 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2201 cm_id->state = IB_CM_DREQ_SENT;
2202 cm_id_priv->msg = msg;
2203 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2206 EXPORT_SYMBOL(ib_send_cm_dreq);
2208 static void cm_format_drep(struct cm_drep_msg *drep_msg,
2209 struct cm_id_private *cm_id_priv,
2210 const void *private_data,
2211 u8 private_data_len)
2213 cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, cm_id_priv->tid);
2214 drep_msg->local_comm_id = cm_id_priv->id.local_id;
2215 drep_msg->remote_comm_id = cm_id_priv->id.remote_id;
2217 if (private_data && private_data_len)
2218 memcpy(drep_msg->private_data, private_data, private_data_len);
2221 int ib_send_cm_drep(struct ib_cm_id *cm_id,
2222 const void *private_data,
2223 u8 private_data_len)
2225 struct cm_id_private *cm_id_priv;
2226 struct ib_mad_send_buf *msg;
2227 unsigned long flags;
2231 if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE)
2234 data = cm_copy_private_data(private_data, private_data_len);
2236 return PTR_ERR(data);
2238 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2239 spin_lock_irqsave(&cm_id_priv->lock, flags);
2240 if (cm_id->state != IB_CM_DREQ_RCVD) {
2241 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2246 cm_set_private_data(cm_id_priv, data, private_data_len);
2247 cm_enter_timewait(cm_id_priv);
2249 ret = cm_alloc_msg(cm_id_priv, &msg);
2253 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
2254 private_data, private_data_len);
2256 ret = ib_post_send_mad(msg, NULL);
2258 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2263 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2266 EXPORT_SYMBOL(ib_send_cm_drep);
2268 static int cm_issue_drep(struct cm_port *port,
2269 struct ib_mad_recv_wc *mad_recv_wc)
2271 struct ib_mad_send_buf *msg = NULL;
2272 struct cm_dreq_msg *dreq_msg;
2273 struct cm_drep_msg *drep_msg;
2276 ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
2280 dreq_msg = (struct cm_dreq_msg *) mad_recv_wc->recv_buf.mad;
2281 drep_msg = (struct cm_drep_msg *) msg->mad;
2283 cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, dreq_msg->hdr.tid);
2284 drep_msg->remote_comm_id = dreq_msg->local_comm_id;
2285 drep_msg->local_comm_id = dreq_msg->remote_comm_id;
2287 ret = ib_post_send_mad(msg, NULL);
2294 static int cm_dreq_handler(struct cm_work *work)
2296 struct cm_id_private *cm_id_priv;
2297 struct cm_dreq_msg *dreq_msg;
2298 struct ib_mad_send_buf *msg = NULL;
2301 dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad;
2302 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
2303 dreq_msg->local_comm_id);
2305 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2306 counter[CM_DREQ_COUNTER]);
2307 cm_issue_drep(work->port, work->mad_recv_wc);
2311 work->cm_event.private_data = &dreq_msg->private_data;
2313 spin_lock_irq(&cm_id_priv->lock);
2314 if (cm_id_priv->local_qpn != cm_dreq_get_remote_qpn(dreq_msg))
2317 switch (cm_id_priv->id.state) {
2318 case IB_CM_REP_SENT:
2319 case IB_CM_DREQ_SENT:
2320 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2322 case IB_CM_ESTABLISHED:
2323 if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT ||
2324 cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
2325 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2327 case IB_CM_MRA_REP_RCVD:
2329 case IB_CM_TIMEWAIT:
2330 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2331 counter[CM_DREQ_COUNTER]);
2332 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
2335 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
2336 cm_id_priv->private_data,
2337 cm_id_priv->private_data_len);
2338 spin_unlock_irq(&cm_id_priv->lock);
2340 if (ib_post_send_mad(msg, NULL))
2343 case IB_CM_DREQ_RCVD:
2344 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2345 counter[CM_DREQ_COUNTER]);
2350 cm_id_priv->id.state = IB_CM_DREQ_RCVD;
2351 cm_id_priv->tid = dreq_msg->hdr.tid;
2352 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2354 list_add_tail(&work->list, &cm_id_priv->work_list);
2355 spin_unlock_irq(&cm_id_priv->lock);
2358 cm_process_work(cm_id_priv, work);
2360 cm_deref_id(cm_id_priv);
2363 unlock: spin_unlock_irq(&cm_id_priv->lock);
2364 deref: cm_deref_id(cm_id_priv);
2368 static int cm_drep_handler(struct cm_work *work)
2370 struct cm_id_private *cm_id_priv;
2371 struct cm_drep_msg *drep_msg;
2374 drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad;
2375 cm_id_priv = cm_acquire_id(drep_msg->remote_comm_id,
2376 drep_msg->local_comm_id);
2380 work->cm_event.private_data = &drep_msg->private_data;
2382 spin_lock_irq(&cm_id_priv->lock);
2383 if (cm_id_priv->id.state != IB_CM_DREQ_SENT &&
2384 cm_id_priv->id.state != IB_CM_DREQ_RCVD) {
2385 spin_unlock_irq(&cm_id_priv->lock);
2388 cm_enter_timewait(cm_id_priv);
2390 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2391 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2393 list_add_tail(&work->list, &cm_id_priv->work_list);
2394 spin_unlock_irq(&cm_id_priv->lock);
2397 cm_process_work(cm_id_priv, work);
2399 cm_deref_id(cm_id_priv);
2402 cm_deref_id(cm_id_priv);
2406 int ib_send_cm_rej(struct ib_cm_id *cm_id,
2407 enum ib_cm_rej_reason reason,
2410 const void *private_data,
2411 u8 private_data_len)
2413 struct cm_id_private *cm_id_priv;
2414 struct ib_mad_send_buf *msg;
2415 unsigned long flags;
2418 if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) ||
2419 (ari && ari_length > IB_CM_REJ_ARI_LENGTH))
2422 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2424 spin_lock_irqsave(&cm_id_priv->lock, flags);
2425 switch (cm_id->state) {
2426 case IB_CM_REQ_SENT:
2427 case IB_CM_MRA_REQ_RCVD:
2428 case IB_CM_REQ_RCVD:
2429 case IB_CM_MRA_REQ_SENT:
2430 case IB_CM_REP_RCVD:
2431 case IB_CM_MRA_REP_SENT:
2432 ret = cm_alloc_msg(cm_id_priv, &msg);
2434 cm_format_rej((struct cm_rej_msg *) msg->mad,
2435 cm_id_priv, reason, ari, ari_length,
2436 private_data, private_data_len);
2438 cm_reset_to_idle(cm_id_priv);
2440 case IB_CM_REP_SENT:
2441 case IB_CM_MRA_REP_RCVD:
2442 ret = cm_alloc_msg(cm_id_priv, &msg);
2444 cm_format_rej((struct cm_rej_msg *) msg->mad,
2445 cm_id_priv, reason, ari, ari_length,
2446 private_data, private_data_len);
2448 cm_enter_timewait(cm_id_priv);
2458 ret = ib_post_send_mad(msg, NULL);
2462 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2465 EXPORT_SYMBOL(ib_send_cm_rej);
2467 static void cm_format_rej_event(struct cm_work *work)
2469 struct cm_rej_msg *rej_msg;
2470 struct ib_cm_rej_event_param *param;
2472 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
2473 param = &work->cm_event.param.rej_rcvd;
2474 param->ari = rej_msg->ari;
2475 param->ari_length = cm_rej_get_reject_info_len(rej_msg);
2476 param->reason = __be16_to_cpu(rej_msg->reason);
2477 work->cm_event.private_data = &rej_msg->private_data;
2480 static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
2482 struct cm_timewait_info *timewait_info;
2483 struct cm_id_private *cm_id_priv;
2486 remote_id = rej_msg->local_comm_id;
2488 if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_TIMEOUT) {
2489 spin_lock_irq(&cm.lock);
2490 timewait_info = cm_find_remote_id( *((__be64 *) rej_msg->ari),
2492 if (!timewait_info) {
2493 spin_unlock_irq(&cm.lock);
2496 cm_id_priv = idr_find(&cm.local_id_table, (__force int)
2497 (timewait_info->work.local_id ^
2498 cm.random_id_operand));
2500 if (cm_id_priv->id.remote_id == remote_id)
2501 atomic_inc(&cm_id_priv->refcount);
2505 spin_unlock_irq(&cm.lock);
2506 } else if (cm_rej_get_msg_rejected(rej_msg) == CM_MSG_RESPONSE_REQ)
2507 cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, 0);
2509 cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, remote_id);
2514 static int cm_rej_handler(struct cm_work *work)
2516 struct cm_id_private *cm_id_priv;
2517 struct cm_rej_msg *rej_msg;
2520 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
2521 cm_id_priv = cm_acquire_rejected_id(rej_msg);
2525 cm_format_rej_event(work);
2527 spin_lock_irq(&cm_id_priv->lock);
2528 switch (cm_id_priv->id.state) {
2529 case IB_CM_REQ_SENT:
2530 case IB_CM_MRA_REQ_RCVD:
2531 case IB_CM_REP_SENT:
2532 case IB_CM_MRA_REP_RCVD:
2533 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2535 case IB_CM_REQ_RCVD:
2536 case IB_CM_MRA_REQ_SENT:
2537 if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_STALE_CONN)
2538 cm_enter_timewait(cm_id_priv);
2540 cm_reset_to_idle(cm_id_priv);
2542 case IB_CM_DREQ_SENT:
2543 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2545 case IB_CM_REP_RCVD:
2546 case IB_CM_MRA_REP_SENT:
2547 cm_enter_timewait(cm_id_priv);
2549 case IB_CM_ESTABLISHED:
2550 if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT ||
2551 cm_id_priv->id.lap_state == IB_CM_LAP_SENT) {
2552 if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT)
2553 ib_cancel_mad(cm_id_priv->av.port->mad_agent,
2555 cm_enter_timewait(cm_id_priv);
2560 spin_unlock_irq(&cm_id_priv->lock);
2565 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2567 list_add_tail(&work->list, &cm_id_priv->work_list);
2568 spin_unlock_irq(&cm_id_priv->lock);
2571 cm_process_work(cm_id_priv, work);
2573 cm_deref_id(cm_id_priv);
2576 cm_deref_id(cm_id_priv);
2580 int ib_send_cm_mra(struct ib_cm_id *cm_id,
2582 const void *private_data,
2583 u8 private_data_len)
2585 struct cm_id_private *cm_id_priv;
2586 struct ib_mad_send_buf *msg;
2587 enum ib_cm_state cm_state;
2588 enum ib_cm_lap_state lap_state;
2589 enum cm_msg_response msg_response;
2591 unsigned long flags;
2594 if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE)
2597 data = cm_copy_private_data(private_data, private_data_len);
2599 return PTR_ERR(data);
2601 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2603 spin_lock_irqsave(&cm_id_priv->lock, flags);
2604 switch(cm_id_priv->id.state) {
2605 case IB_CM_REQ_RCVD:
2606 cm_state = IB_CM_MRA_REQ_SENT;
2607 lap_state = cm_id->lap_state;
2608 msg_response = CM_MSG_RESPONSE_REQ;
2610 case IB_CM_REP_RCVD:
2611 cm_state = IB_CM_MRA_REP_SENT;
2612 lap_state = cm_id->lap_state;
2613 msg_response = CM_MSG_RESPONSE_REP;
2615 case IB_CM_ESTABLISHED:
2616 if (cm_id->lap_state == IB_CM_LAP_RCVD) {
2617 cm_state = cm_id->state;
2618 lap_state = IB_CM_MRA_LAP_SENT;
2619 msg_response = CM_MSG_RESPONSE_OTHER;
2627 if (!(service_timeout & IB_CM_MRA_FLAG_DELAY)) {
2628 ret = cm_alloc_msg(cm_id_priv, &msg);
2632 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2633 msg_response, service_timeout,
2634 private_data, private_data_len);
2635 ret = ib_post_send_mad(msg, NULL);
2640 cm_id->state = cm_state;
2641 cm_id->lap_state = lap_state;
2642 cm_id_priv->service_timeout = service_timeout;
2643 cm_set_private_data(cm_id_priv, data, private_data_len);
2644 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2647 error1: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2651 error2: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2656 EXPORT_SYMBOL(ib_send_cm_mra);
2658 static struct cm_id_private * cm_acquire_mraed_id(struct cm_mra_msg *mra_msg)
2660 switch (cm_mra_get_msg_mraed(mra_msg)) {
2661 case CM_MSG_RESPONSE_REQ:
2662 return cm_acquire_id(mra_msg->remote_comm_id, 0);
2663 case CM_MSG_RESPONSE_REP:
2664 case CM_MSG_RESPONSE_OTHER:
2665 return cm_acquire_id(mra_msg->remote_comm_id,
2666 mra_msg->local_comm_id);
2672 static int cm_mra_handler(struct cm_work *work)
2674 struct cm_id_private *cm_id_priv;
2675 struct cm_mra_msg *mra_msg;
2678 mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad;
2679 cm_id_priv = cm_acquire_mraed_id(mra_msg);
2683 work->cm_event.private_data = &mra_msg->private_data;
2684 work->cm_event.param.mra_rcvd.service_timeout =
2685 cm_mra_get_service_timeout(mra_msg);
2686 timeout = cm_convert_to_ms(cm_mra_get_service_timeout(mra_msg)) +
2687 cm_convert_to_ms(cm_id_priv->av.timeout);
2689 spin_lock_irq(&cm_id_priv->lock);
2690 switch (cm_id_priv->id.state) {
2691 case IB_CM_REQ_SENT:
2692 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REQ ||
2693 ib_modify_mad(cm_id_priv->av.port->mad_agent,
2694 cm_id_priv->msg, timeout))
2696 cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD;
2698 case IB_CM_REP_SENT:
2699 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REP ||
2700 ib_modify_mad(cm_id_priv->av.port->mad_agent,
2701 cm_id_priv->msg, timeout))
2703 cm_id_priv->id.state = IB_CM_MRA_REP_RCVD;
2705 case IB_CM_ESTABLISHED:
2706 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_OTHER ||
2707 cm_id_priv->id.lap_state != IB_CM_LAP_SENT ||
2708 ib_modify_mad(cm_id_priv->av.port->mad_agent,
2709 cm_id_priv->msg, timeout)) {
2710 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
2711 atomic_long_inc(&work->port->
2712 counter_group[CM_RECV_DUPLICATES].
2713 counter[CM_MRA_COUNTER]);
2716 cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD;
2718 case IB_CM_MRA_REQ_RCVD:
2719 case IB_CM_MRA_REP_RCVD:
2720 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2721 counter[CM_MRA_COUNTER]);
2727 cm_id_priv->msg->context[1] = (void *) (unsigned long)
2728 cm_id_priv->id.state;
2729 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2731 list_add_tail(&work->list, &cm_id_priv->work_list);
2732 spin_unlock_irq(&cm_id_priv->lock);
2735 cm_process_work(cm_id_priv, work);
2737 cm_deref_id(cm_id_priv);
2740 spin_unlock_irq(&cm_id_priv->lock);
2741 cm_deref_id(cm_id_priv);
2745 static void cm_format_lap(struct cm_lap_msg *lap_msg,
2746 struct cm_id_private *cm_id_priv,
2747 struct ib_sa_path_rec *alternate_path,
2748 const void *private_data,
2749 u8 private_data_len)
2751 cm_format_mad_hdr(&lap_msg->hdr, CM_LAP_ATTR_ID,
2752 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_LAP));
2753 lap_msg->local_comm_id = cm_id_priv->id.local_id;
2754 lap_msg->remote_comm_id = cm_id_priv->id.remote_id;
2755 cm_lap_set_remote_qpn(lap_msg, cm_id_priv->remote_qpn);
2756 /* todo: need remote CM response timeout */
2757 cm_lap_set_remote_resp_timeout(lap_msg, 0x1F);
2758 lap_msg->alt_local_lid = alternate_path->slid;
2759 lap_msg->alt_remote_lid = alternate_path->dlid;
2760 lap_msg->alt_local_gid = alternate_path->sgid;
2761 lap_msg->alt_remote_gid = alternate_path->dgid;
2762 cm_lap_set_flow_label(lap_msg, alternate_path->flow_label);
2763 cm_lap_set_traffic_class(lap_msg, alternate_path->traffic_class);
2764 lap_msg->alt_hop_limit = alternate_path->hop_limit;
2765 cm_lap_set_packet_rate(lap_msg, alternate_path->rate);
2766 cm_lap_set_sl(lap_msg, alternate_path->sl);
2767 cm_lap_set_subnet_local(lap_msg, 1); /* local only... */
2768 cm_lap_set_local_ack_timeout(lap_msg,
2769 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
2770 alternate_path->packet_life_time));
2772 if (private_data && private_data_len)
2773 memcpy(lap_msg->private_data, private_data, private_data_len);
2776 int ib_send_cm_lap(struct ib_cm_id *cm_id,
2777 struct ib_sa_path_rec *alternate_path,
2778 const void *private_data,
2779 u8 private_data_len)
2781 struct cm_id_private *cm_id_priv;
2782 struct ib_mad_send_buf *msg;
2783 unsigned long flags;
2786 if (private_data && private_data_len > IB_CM_LAP_PRIVATE_DATA_SIZE)
2789 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2790 spin_lock_irqsave(&cm_id_priv->lock, flags);
2791 if (cm_id->state != IB_CM_ESTABLISHED ||
2792 (cm_id->lap_state != IB_CM_LAP_UNINIT &&
2793 cm_id->lap_state != IB_CM_LAP_IDLE)) {
2798 ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av,
2802 cm_id_priv->alt_av.timeout =
2803 cm_ack_timeout(cm_id_priv->target_ack_delay,
2804 cm_id_priv->alt_av.timeout - 1);
2806 ret = cm_alloc_msg(cm_id_priv, &msg);
2810 cm_format_lap((struct cm_lap_msg *) msg->mad, cm_id_priv,
2811 alternate_path, private_data, private_data_len);
2812 msg->timeout_ms = cm_id_priv->timeout_ms;
2813 msg->context[1] = (void *) (unsigned long) IB_CM_ESTABLISHED;
2815 ret = ib_post_send_mad(msg, NULL);
2817 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2822 cm_id->lap_state = IB_CM_LAP_SENT;
2823 cm_id_priv->msg = msg;
2825 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2828 EXPORT_SYMBOL(ib_send_cm_lap);
2830 static void cm_format_path_from_lap(struct cm_id_private *cm_id_priv,
2831 struct ib_sa_path_rec *path,
2832 struct cm_lap_msg *lap_msg)
2834 memset(path, 0, sizeof *path);
2835 path->dgid = lap_msg->alt_local_gid;
2836 path->sgid = lap_msg->alt_remote_gid;
2837 path->dlid = lap_msg->alt_local_lid;
2838 path->slid = lap_msg->alt_remote_lid;
2839 path->flow_label = cm_lap_get_flow_label(lap_msg);
2840 path->hop_limit = lap_msg->alt_hop_limit;
2841 path->traffic_class = cm_lap_get_traffic_class(lap_msg);
2842 path->reversible = 1;
2843 path->pkey = cm_id_priv->pkey;
2844 path->sl = cm_lap_get_sl(lap_msg);
2845 path->mtu_selector = IB_SA_EQ;
2846 path->mtu = cm_id_priv->path_mtu;
2847 path->rate_selector = IB_SA_EQ;
2848 path->rate = cm_lap_get_packet_rate(lap_msg);
2849 path->packet_life_time_selector = IB_SA_EQ;
2850 path->packet_life_time = cm_lap_get_local_ack_timeout(lap_msg);
2851 path->packet_life_time -= (path->packet_life_time > 0);
2854 static int cm_lap_handler(struct cm_work *work)
2856 struct cm_id_private *cm_id_priv;
2857 struct cm_lap_msg *lap_msg;
2858 struct ib_cm_lap_event_param *param;
2859 struct ib_mad_send_buf *msg = NULL;
2862 /* todo: verify LAP request and send reject APR if invalid. */
2863 lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad;
2864 cm_id_priv = cm_acquire_id(lap_msg->remote_comm_id,
2865 lap_msg->local_comm_id);
2869 param = &work->cm_event.param.lap_rcvd;
2870 param->alternate_path = &work->path[0];
2871 cm_format_path_from_lap(cm_id_priv, param->alternate_path, lap_msg);
2872 work->cm_event.private_data = &lap_msg->private_data;
2874 spin_lock_irq(&cm_id_priv->lock);
2875 if (cm_id_priv->id.state != IB_CM_ESTABLISHED)
2878 switch (cm_id_priv->id.lap_state) {
2879 case IB_CM_LAP_UNINIT:
2880 case IB_CM_LAP_IDLE:
2882 case IB_CM_MRA_LAP_SENT:
2883 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2884 counter[CM_LAP_COUNTER]);
2885 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
2888 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2889 CM_MSG_RESPONSE_OTHER,
2890 cm_id_priv->service_timeout,
2891 cm_id_priv->private_data,
2892 cm_id_priv->private_data_len);
2893 spin_unlock_irq(&cm_id_priv->lock);
2895 if (ib_post_send_mad(msg, NULL))
2898 case IB_CM_LAP_RCVD:
2899 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2900 counter[CM_LAP_COUNTER]);
2906 cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
2907 cm_id_priv->tid = lap_msg->hdr.tid;
2908 cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
2909 work->mad_recv_wc->recv_buf.grh,
2911 cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av,
2913 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2915 list_add_tail(&work->list, &cm_id_priv->work_list);
2916 spin_unlock_irq(&cm_id_priv->lock);
2919 cm_process_work(cm_id_priv, work);
2921 cm_deref_id(cm_id_priv);
2924 unlock: spin_unlock_irq(&cm_id_priv->lock);
2925 deref: cm_deref_id(cm_id_priv);
2929 static void cm_format_apr(struct cm_apr_msg *apr_msg,
2930 struct cm_id_private *cm_id_priv,
2931 enum ib_cm_apr_status status,
2934 const void *private_data,
2935 u8 private_data_len)
2937 cm_format_mad_hdr(&apr_msg->hdr, CM_APR_ATTR_ID, cm_id_priv->tid);
2938 apr_msg->local_comm_id = cm_id_priv->id.local_id;
2939 apr_msg->remote_comm_id = cm_id_priv->id.remote_id;
2940 apr_msg->ap_status = (u8) status;
2942 if (info && info_length) {
2943 apr_msg->info_length = info_length;
2944 memcpy(apr_msg->info, info, info_length);
2947 if (private_data && private_data_len)
2948 memcpy(apr_msg->private_data, private_data, private_data_len);
2951 int ib_send_cm_apr(struct ib_cm_id *cm_id,
2952 enum ib_cm_apr_status status,
2955 const void *private_data,
2956 u8 private_data_len)
2958 struct cm_id_private *cm_id_priv;
2959 struct ib_mad_send_buf *msg;
2960 unsigned long flags;
2963 if ((private_data && private_data_len > IB_CM_APR_PRIVATE_DATA_SIZE) ||
2964 (info && info_length > IB_CM_APR_INFO_LENGTH))
2967 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2968 spin_lock_irqsave(&cm_id_priv->lock, flags);
2969 if (cm_id->state != IB_CM_ESTABLISHED ||
2970 (cm_id->lap_state != IB_CM_LAP_RCVD &&
2971 cm_id->lap_state != IB_CM_MRA_LAP_SENT)) {
2976 ret = cm_alloc_msg(cm_id_priv, &msg);
2980 cm_format_apr((struct cm_apr_msg *) msg->mad, cm_id_priv, status,
2981 info, info_length, private_data, private_data_len);
2982 ret = ib_post_send_mad(msg, NULL);
2984 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2989 cm_id->lap_state = IB_CM_LAP_IDLE;
2990 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2993 EXPORT_SYMBOL(ib_send_cm_apr);
2995 static int cm_apr_handler(struct cm_work *work)
2997 struct cm_id_private *cm_id_priv;
2998 struct cm_apr_msg *apr_msg;
3001 apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad;
3002 cm_id_priv = cm_acquire_id(apr_msg->remote_comm_id,
3003 apr_msg->local_comm_id);
3005 return -EINVAL; /* Unmatched reply. */
3007 work->cm_event.param.apr_rcvd.ap_status = apr_msg->ap_status;
3008 work->cm_event.param.apr_rcvd.apr_info = &apr_msg->info;
3009 work->cm_event.param.apr_rcvd.info_len = apr_msg->info_length;
3010 work->cm_event.private_data = &apr_msg->private_data;
3012 spin_lock_irq(&cm_id_priv->lock);
3013 if (cm_id_priv->id.state != IB_CM_ESTABLISHED ||
3014 (cm_id_priv->id.lap_state != IB_CM_LAP_SENT &&
3015 cm_id_priv->id.lap_state != IB_CM_MRA_LAP_RCVD)) {
3016 spin_unlock_irq(&cm_id_priv->lock);
3019 cm_id_priv->id.lap_state = IB_CM_LAP_IDLE;
3020 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
3021 cm_id_priv->msg = NULL;
3023 ret = atomic_inc_and_test(&cm_id_priv->work_count);
3025 list_add_tail(&work->list, &cm_id_priv->work_list);
3026 spin_unlock_irq(&cm_id_priv->lock);
3029 cm_process_work(cm_id_priv, work);
3031 cm_deref_id(cm_id_priv);
3034 cm_deref_id(cm_id_priv);
3038 static int cm_timewait_handler(struct cm_work *work)
3040 struct cm_timewait_info *timewait_info;
3041 struct cm_id_private *cm_id_priv;
3044 timewait_info = (struct cm_timewait_info *)work;
3045 spin_lock_irq(&cm.lock);
3046 list_del(&timewait_info->list);
3047 spin_unlock_irq(&cm.lock);
3049 cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
3050 timewait_info->work.remote_id);
3054 spin_lock_irq(&cm_id_priv->lock);
3055 if (cm_id_priv->id.state != IB_CM_TIMEWAIT ||
3056 cm_id_priv->remote_qpn != timewait_info->remote_qpn) {
3057 spin_unlock_irq(&cm_id_priv->lock);
3060 cm_id_priv->id.state = IB_CM_IDLE;
3061 ret = atomic_inc_and_test(&cm_id_priv->work_count);
3063 list_add_tail(&work->list, &cm_id_priv->work_list);
3064 spin_unlock_irq(&cm_id_priv->lock);
3067 cm_process_work(cm_id_priv, work);
3069 cm_deref_id(cm_id_priv);
3072 cm_deref_id(cm_id_priv);
3076 static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg,
3077 struct cm_id_private *cm_id_priv,
3078 struct ib_cm_sidr_req_param *param)
3080 cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID,
3081 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_SIDR));
3082 sidr_req_msg->request_id = cm_id_priv->id.local_id;
3083 sidr_req_msg->pkey = param->path->pkey;
3084 sidr_req_msg->service_id = param->service_id;
3086 if (param->private_data && param->private_data_len)
3087 memcpy(sidr_req_msg->private_data, param->private_data,
3088 param->private_data_len);
3091 int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
3092 struct ib_cm_sidr_req_param *param)
3094 struct cm_id_private *cm_id_priv;
3095 struct ib_mad_send_buf *msg;
3096 unsigned long flags;
3099 if (!param->path || (param->private_data &&
3100 param->private_data_len > IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE))
3103 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3104 ret = cm_init_av_by_path(param->path, &cm_id_priv->av, cm_id_priv);
3108 cm_id->service_id = param->service_id;
3109 cm_id->service_mask = ~cpu_to_be64(0);
3110 cm_id_priv->timeout_ms = param->timeout_ms;
3111 cm_id_priv->max_cm_retries = param->max_cm_retries;
3112 ret = cm_alloc_msg(cm_id_priv, &msg);
3116 cm_format_sidr_req((struct cm_sidr_req_msg *) msg->mad, cm_id_priv,
3118 msg->timeout_ms = cm_id_priv->timeout_ms;
3119 msg->context[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT;
3121 spin_lock_irqsave(&cm_id_priv->lock, flags);
3122 if (cm_id->state == IB_CM_IDLE)
3123 ret = ib_post_send_mad(msg, NULL);
3128 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3132 cm_id->state = IB_CM_SIDR_REQ_SENT;
3133 cm_id_priv->msg = msg;
3134 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3138 EXPORT_SYMBOL(ib_send_cm_sidr_req);
3140 static void cm_format_sidr_req_event(struct cm_work *work,
3141 struct ib_cm_id *listen_id)
3143 struct cm_sidr_req_msg *sidr_req_msg;
3144 struct ib_cm_sidr_req_event_param *param;
3146 sidr_req_msg = (struct cm_sidr_req_msg *)
3147 work->mad_recv_wc->recv_buf.mad;
3148 param = &work->cm_event.param.sidr_req_rcvd;
3149 param->pkey = __be16_to_cpu(sidr_req_msg->pkey);
3150 param->listen_id = listen_id;
3151 param->service_id = sidr_req_msg->service_id;
3152 param->bth_pkey = cm_get_bth_pkey(work);
3153 param->port = work->port->port_num;
3154 work->cm_event.private_data = &sidr_req_msg->private_data;
3157 static int cm_sidr_req_handler(struct cm_work *work)
3159 struct ib_cm_id *cm_id;
3160 struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
3161 struct cm_sidr_req_msg *sidr_req_msg;
3164 cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL);
3166 return PTR_ERR(cm_id);
3167 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3169 /* Record SGID/SLID and request ID for lookup. */
3170 sidr_req_msg = (struct cm_sidr_req_msg *)
3171 work->mad_recv_wc->recv_buf.mad;
3172 wc = work->mad_recv_wc->wc;
3173 cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid);
3174 cm_id_priv->av.dgid.global.interface_id = 0;
3175 cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
3176 work->mad_recv_wc->recv_buf.grh,
3178 cm_id_priv->id.remote_id = sidr_req_msg->request_id;
3179 cm_id_priv->tid = sidr_req_msg->hdr.tid;
3180 atomic_inc(&cm_id_priv->work_count);
3182 spin_lock_irq(&cm.lock);
3183 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
3184 if (cur_cm_id_priv) {
3185 spin_unlock_irq(&cm.lock);
3186 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
3187 counter[CM_SIDR_REQ_COUNTER]);
3188 goto out; /* Duplicate message. */
3190 cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD;
3191 cur_cm_id_priv = cm_find_listen(cm_id->device,
3192 sidr_req_msg->service_id);
3193 if (!cur_cm_id_priv) {
3194 spin_unlock_irq(&cm.lock);
3195 cm_reject_sidr_req(cm_id_priv, IB_SIDR_UNSUPPORTED);
3196 goto out; /* No match. */
3198 atomic_inc(&cur_cm_id_priv->refcount);
3199 atomic_inc(&cm_id_priv->refcount);
3200 spin_unlock_irq(&cm.lock);
3202 cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler;
3203 cm_id_priv->id.context = cur_cm_id_priv->id.context;
3204 cm_id_priv->id.service_id = sidr_req_msg->service_id;
3205 cm_id_priv->id.service_mask = ~cpu_to_be64(0);
3207 cm_format_sidr_req_event(work, &cur_cm_id_priv->id);
3208 cm_process_work(cm_id_priv, work);
3209 cm_deref_id(cur_cm_id_priv);
3212 ib_destroy_cm_id(&cm_id_priv->id);
3216 static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg,
3217 struct cm_id_private *cm_id_priv,
3218 struct ib_cm_sidr_rep_param *param)
3220 cm_format_mad_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID,
3222 sidr_rep_msg->request_id = cm_id_priv->id.remote_id;
3223 sidr_rep_msg->status = param->status;
3224 cm_sidr_rep_set_qpn(sidr_rep_msg, cpu_to_be32(param->qp_num));
3225 sidr_rep_msg->service_id = cm_id_priv->id.service_id;
3226 sidr_rep_msg->qkey = cpu_to_be32(param->qkey);
3228 if (param->info && param->info_length)
3229 memcpy(sidr_rep_msg->info, param->info, param->info_length);
3231 if (param->private_data && param->private_data_len)
3232 memcpy(sidr_rep_msg->private_data, param->private_data,
3233 param->private_data_len);
3236 int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
3237 struct ib_cm_sidr_rep_param *param)
3239 struct cm_id_private *cm_id_priv;
3240 struct ib_mad_send_buf *msg;
3241 unsigned long flags;
3244 if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) ||
3245 (param->private_data &&
3246 param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE))
3249 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3250 spin_lock_irqsave(&cm_id_priv->lock, flags);
3251 if (cm_id->state != IB_CM_SIDR_REQ_RCVD) {
3256 ret = cm_alloc_msg(cm_id_priv, &msg);
3260 cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv,
3262 ret = ib_post_send_mad(msg, NULL);
3264 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3268 cm_id->state = IB_CM_IDLE;
3269 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3271 spin_lock_irqsave(&cm.lock, flags);
3272 if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) {
3273 rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
3274 RB_CLEAR_NODE(&cm_id_priv->sidr_id_node);
3276 spin_unlock_irqrestore(&cm.lock, flags);
3279 error: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3282 EXPORT_SYMBOL(ib_send_cm_sidr_rep);
3284 static void cm_format_sidr_rep_event(struct cm_work *work)
3286 struct cm_sidr_rep_msg *sidr_rep_msg;
3287 struct ib_cm_sidr_rep_event_param *param;
3289 sidr_rep_msg = (struct cm_sidr_rep_msg *)
3290 work->mad_recv_wc->recv_buf.mad;
3291 param = &work->cm_event.param.sidr_rep_rcvd;
3292 param->status = sidr_rep_msg->status;
3293 param->qkey = be32_to_cpu(sidr_rep_msg->qkey);
3294 param->qpn = be32_to_cpu(cm_sidr_rep_get_qpn(sidr_rep_msg));
3295 param->info = &sidr_rep_msg->info;
3296 param->info_len = sidr_rep_msg->info_length;
3297 work->cm_event.private_data = &sidr_rep_msg->private_data;
3300 static int cm_sidr_rep_handler(struct cm_work *work)
3302 struct cm_sidr_rep_msg *sidr_rep_msg;
3303 struct cm_id_private *cm_id_priv;
3305 sidr_rep_msg = (struct cm_sidr_rep_msg *)
3306 work->mad_recv_wc->recv_buf.mad;
3307 cm_id_priv = cm_acquire_id(sidr_rep_msg->request_id, 0);
3309 return -EINVAL; /* Unmatched reply. */
3311 spin_lock_irq(&cm_id_priv->lock);
3312 if (cm_id_priv->id.state != IB_CM_SIDR_REQ_SENT) {
3313 spin_unlock_irq(&cm_id_priv->lock);
3316 cm_id_priv->id.state = IB_CM_IDLE;
3317 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
3318 spin_unlock_irq(&cm_id_priv->lock);
3320 cm_format_sidr_rep_event(work);
3321 cm_process_work(cm_id_priv, work);
3324 cm_deref_id(cm_id_priv);
3328 static void cm_process_send_error(struct ib_mad_send_buf *msg,
3329 enum ib_wc_status wc_status)
3331 struct cm_id_private *cm_id_priv;
3332 struct ib_cm_event cm_event;
3333 enum ib_cm_state state;
3336 memset(&cm_event, 0, sizeof cm_event);
3337 cm_id_priv = msg->context[0];
3339 /* Discard old sends or ones without a response. */
3340 spin_lock_irq(&cm_id_priv->lock);
3341 state = (enum ib_cm_state) (unsigned long) msg->context[1];
3342 if (msg != cm_id_priv->msg || state != cm_id_priv->id.state)
3346 case IB_CM_REQ_SENT:
3347 case IB_CM_MRA_REQ_RCVD:
3348 cm_reset_to_idle(cm_id_priv);
3349 cm_event.event = IB_CM_REQ_ERROR;
3351 case IB_CM_REP_SENT:
3352 case IB_CM_MRA_REP_RCVD:
3353 cm_reset_to_idle(cm_id_priv);
3354 cm_event.event = IB_CM_REP_ERROR;
3356 case IB_CM_DREQ_SENT:
3357 cm_enter_timewait(cm_id_priv);
3358 cm_event.event = IB_CM_DREQ_ERROR;
3360 case IB_CM_SIDR_REQ_SENT:
3361 cm_id_priv->id.state = IB_CM_IDLE;
3362 cm_event.event = IB_CM_SIDR_REQ_ERROR;
3367 spin_unlock_irq(&cm_id_priv->lock);
3368 cm_event.param.send_status = wc_status;
3370 /* No other events can occur on the cm_id at this point. */
3371 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &cm_event);
3374 ib_destroy_cm_id(&cm_id_priv->id);
3377 spin_unlock_irq(&cm_id_priv->lock);
3381 static void cm_send_handler(struct ib_mad_agent *mad_agent,
3382 struct ib_mad_send_wc *mad_send_wc)
3384 struct ib_mad_send_buf *msg = mad_send_wc->send_buf;
3385 struct cm_port *port;
3388 port = mad_agent->context;
3389 attr_index = be16_to_cpu(((struct ib_mad_hdr *)
3390 msg->mad)->attr_id) - CM_ATTR_ID_OFFSET;
3393 * If the send was in response to a received message (context[0] is not
3394 * set to a cm_id), and is not a REJ, then it is a send that was
3397 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
3400 atomic_long_add(1 + msg->retries,
3401 &port->counter_group[CM_XMIT].counter[attr_index]);
3403 atomic_long_add(msg->retries,
3404 &port->counter_group[CM_XMIT_RETRIES].
3405 counter[attr_index]);
3407 switch (mad_send_wc->status) {
3409 case IB_WC_WR_FLUSH_ERR:
3413 if (msg->context[0] && msg->context[1])
3414 cm_process_send_error(msg, mad_send_wc->status);
3421 static void cm_work_handler(struct work_struct *_work)
3423 struct cm_work *work = container_of(_work, struct cm_work, work.work);
3426 switch (work->cm_event.event) {
3427 case IB_CM_REQ_RECEIVED:
3428 ret = cm_req_handler(work);
3430 case IB_CM_MRA_RECEIVED:
3431 ret = cm_mra_handler(work);
3433 case IB_CM_REJ_RECEIVED:
3434 ret = cm_rej_handler(work);
3436 case IB_CM_REP_RECEIVED:
3437 ret = cm_rep_handler(work);
3439 case IB_CM_RTU_RECEIVED:
3440 ret = cm_rtu_handler(work);
3442 case IB_CM_USER_ESTABLISHED:
3443 ret = cm_establish_handler(work);
3445 case IB_CM_DREQ_RECEIVED:
3446 ret = cm_dreq_handler(work);
3448 case IB_CM_DREP_RECEIVED:
3449 ret = cm_drep_handler(work);
3451 case IB_CM_SIDR_REQ_RECEIVED:
3452 ret = cm_sidr_req_handler(work);
3454 case IB_CM_SIDR_REP_RECEIVED:
3455 ret = cm_sidr_rep_handler(work);
3457 case IB_CM_LAP_RECEIVED:
3458 ret = cm_lap_handler(work);
3460 case IB_CM_APR_RECEIVED:
3461 ret = cm_apr_handler(work);
3463 case IB_CM_TIMEWAIT_EXIT:
3464 ret = cm_timewait_handler(work);
3474 static int cm_establish(struct ib_cm_id *cm_id)
3476 struct cm_id_private *cm_id_priv;
3477 struct cm_work *work;
3478 unsigned long flags;
3480 struct cm_device *cm_dev;
3482 cm_dev = ib_get_client_data(cm_id->device, &cm_client);
3486 work = kmalloc(sizeof *work, GFP_ATOMIC);
3490 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3491 spin_lock_irqsave(&cm_id_priv->lock, flags);
3492 switch (cm_id->state)
3494 case IB_CM_REP_SENT:
3495 case IB_CM_MRA_REP_RCVD:
3496 cm_id->state = IB_CM_ESTABLISHED;
3498 case IB_CM_ESTABLISHED:
3505 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3513 * The CM worker thread may try to destroy the cm_id before it
3514 * can execute this work item. To prevent potential deadlock,
3515 * we need to find the cm_id once we're in the context of the
3516 * worker thread, rather than holding a reference on it.
3518 INIT_DELAYED_WORK(&work->work, cm_work_handler);
3519 work->local_id = cm_id->local_id;
3520 work->remote_id = cm_id->remote_id;
3521 work->mad_recv_wc = NULL;
3522 work->cm_event.event = IB_CM_USER_ESTABLISHED;
3524 /* Check if the device started its remove_one */
3525 spin_lock_irqsave(&cm.lock, flags);
3526 if (!cm_dev->going_down) {
3527 queue_delayed_work(cm.wq, &work->work, 0);
3532 spin_unlock_irqrestore(&cm.lock, flags);
3538 static int cm_migrate(struct ib_cm_id *cm_id)
3540 struct cm_id_private *cm_id_priv;
3541 struct cm_av tmp_av;
3542 unsigned long flags;
3543 int tmp_send_port_not_ready;
3546 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3547 spin_lock_irqsave(&cm_id_priv->lock, flags);
3548 if (cm_id->state == IB_CM_ESTABLISHED &&
3549 (cm_id->lap_state == IB_CM_LAP_UNINIT ||
3550 cm_id->lap_state == IB_CM_LAP_IDLE)) {
3551 cm_id->lap_state = IB_CM_LAP_IDLE;
3552 /* Swap address vector */
3553 tmp_av = cm_id_priv->av;
3554 cm_id_priv->av = cm_id_priv->alt_av;
3555 cm_id_priv->alt_av = tmp_av;
3556 /* Swap port send ready state */
3557 tmp_send_port_not_ready = cm_id_priv->prim_send_port_not_ready;
3558 cm_id_priv->prim_send_port_not_ready = cm_id_priv->altr_send_port_not_ready;
3559 cm_id_priv->altr_send_port_not_ready = tmp_send_port_not_ready;
3562 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3567 int ib_cm_notify(struct ib_cm_id *cm_id, enum ib_event_type event)
3572 case IB_EVENT_COMM_EST:
3573 ret = cm_establish(cm_id);
3575 case IB_EVENT_PATH_MIG:
3576 ret = cm_migrate(cm_id);
3583 EXPORT_SYMBOL(ib_cm_notify);
3585 static void cm_recv_handler(struct ib_mad_agent *mad_agent,
3586 struct ib_mad_send_buf *send_buf,
3587 struct ib_mad_recv_wc *mad_recv_wc)
3589 struct cm_port *port = mad_agent->context;
3590 struct cm_work *work;
3591 enum ib_cm_event_type event;
3596 switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
3597 case CM_REQ_ATTR_ID:
3598 paths = 1 + (((struct cm_req_msg *) mad_recv_wc->recv_buf.mad)->
3599 alt_local_lid != 0);
3600 event = IB_CM_REQ_RECEIVED;
3602 case CM_MRA_ATTR_ID:
3603 event = IB_CM_MRA_RECEIVED;
3605 case CM_REJ_ATTR_ID:
3606 event = IB_CM_REJ_RECEIVED;
3608 case CM_REP_ATTR_ID:
3609 event = IB_CM_REP_RECEIVED;
3611 case CM_RTU_ATTR_ID:
3612 event = IB_CM_RTU_RECEIVED;
3614 case CM_DREQ_ATTR_ID:
3615 event = IB_CM_DREQ_RECEIVED;
3617 case CM_DREP_ATTR_ID:
3618 event = IB_CM_DREP_RECEIVED;
3620 case CM_SIDR_REQ_ATTR_ID:
3621 event = IB_CM_SIDR_REQ_RECEIVED;
3623 case CM_SIDR_REP_ATTR_ID:
3624 event = IB_CM_SIDR_REP_RECEIVED;
3626 case CM_LAP_ATTR_ID:
3628 event = IB_CM_LAP_RECEIVED;
3630 case CM_APR_ATTR_ID:
3631 event = IB_CM_APR_RECEIVED;
3634 ib_free_recv_mad(mad_recv_wc);
3638 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
3639 atomic_long_inc(&port->counter_group[CM_RECV].
3640 counter[attr_id - CM_ATTR_ID_OFFSET]);
3642 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
3645 ib_free_recv_mad(mad_recv_wc);
3649 INIT_DELAYED_WORK(&work->work, cm_work_handler);
3650 work->cm_event.event = event;
3651 work->mad_recv_wc = mad_recv_wc;
3654 /* Check if the device started its remove_one */
3655 spin_lock_irq(&cm.lock);
3656 if (!port->cm_dev->going_down)
3657 queue_delayed_work(cm.wq, &work->work, 0);
3660 spin_unlock_irq(&cm.lock);
3664 ib_free_recv_mad(mad_recv_wc);
3668 static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
3669 struct ib_qp_attr *qp_attr,
3672 unsigned long flags;
3675 spin_lock_irqsave(&cm_id_priv->lock, flags);
3676 switch (cm_id_priv->id.state) {
3677 case IB_CM_REQ_SENT:
3678 case IB_CM_MRA_REQ_RCVD:
3679 case IB_CM_REQ_RCVD:
3680 case IB_CM_MRA_REQ_SENT:
3681 case IB_CM_REP_RCVD:
3682 case IB_CM_MRA_REP_SENT:
3683 case IB_CM_REP_SENT:
3684 case IB_CM_MRA_REP_RCVD:
3685 case IB_CM_ESTABLISHED:
3686 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS |
3687 IB_QP_PKEY_INDEX | IB_QP_PORT;
3688 qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE;
3689 if (cm_id_priv->responder_resources)
3690 qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ |
3691 IB_ACCESS_REMOTE_ATOMIC;
3692 qp_attr->pkey_index = cm_id_priv->av.pkey_index;
3693 qp_attr->port_num = cm_id_priv->av.port->port_num;
3700 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3704 static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
3705 struct ib_qp_attr *qp_attr,
3708 unsigned long flags;
3711 spin_lock_irqsave(&cm_id_priv->lock, flags);
3712 switch (cm_id_priv->id.state) {
3713 case IB_CM_REQ_RCVD:
3714 case IB_CM_MRA_REQ_SENT:
3715 case IB_CM_REP_RCVD:
3716 case IB_CM_MRA_REP_SENT:
3717 case IB_CM_REP_SENT:
3718 case IB_CM_MRA_REP_RCVD:
3719 case IB_CM_ESTABLISHED:
3720 *qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU |
3721 IB_QP_DEST_QPN | IB_QP_RQ_PSN;
3722 qp_attr->ah_attr = cm_id_priv->av.ah_attr;
3723 qp_attr->path_mtu = cm_id_priv->path_mtu;
3724 qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn);
3725 qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn);
3726 if (cm_id_priv->qp_type == IB_QPT_RC ||
3727 cm_id_priv->qp_type == IB_QPT_XRC_TGT) {
3728 *qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC |
3729 IB_QP_MIN_RNR_TIMER;
3730 qp_attr->max_dest_rd_atomic =
3731 cm_id_priv->responder_resources;
3732 qp_attr->min_rnr_timer = 0;
3734 if (cm_id_priv->alt_av.ah_attr.dlid) {
3735 *qp_attr_mask |= IB_QP_ALT_PATH;
3736 qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
3737 qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
3738 qp_attr->alt_timeout = cm_id_priv->alt_av.timeout;
3739 qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
3747 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3751 static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
3752 struct ib_qp_attr *qp_attr,
3755 unsigned long flags;
3758 spin_lock_irqsave(&cm_id_priv->lock, flags);
3759 switch (cm_id_priv->id.state) {
3760 /* Allow transition to RTS before sending REP */
3761 case IB_CM_REQ_RCVD:
3762 case IB_CM_MRA_REQ_SENT:
3764 case IB_CM_REP_RCVD:
3765 case IB_CM_MRA_REP_SENT:
3766 case IB_CM_REP_SENT:
3767 case IB_CM_MRA_REP_RCVD:
3768 case IB_CM_ESTABLISHED:
3769 if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT) {
3770 *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN;
3771 qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn);
3772 switch (cm_id_priv->qp_type) {
3774 case IB_QPT_XRC_INI:
3775 *qp_attr_mask |= IB_QP_RETRY_CNT | IB_QP_RNR_RETRY |
3776 IB_QP_MAX_QP_RD_ATOMIC;
3777 qp_attr->retry_cnt = cm_id_priv->retry_count;
3778 qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
3779 qp_attr->max_rd_atomic = cm_id_priv->initiator_depth;
3781 case IB_QPT_XRC_TGT:
3782 *qp_attr_mask |= IB_QP_TIMEOUT;
3783 qp_attr->timeout = cm_id_priv->av.timeout;
3788 if (cm_id_priv->alt_av.ah_attr.dlid) {
3789 *qp_attr_mask |= IB_QP_PATH_MIG_STATE;
3790 qp_attr->path_mig_state = IB_MIG_REARM;
3793 *qp_attr_mask = IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE;
3794 qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
3795 qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
3796 qp_attr->alt_timeout = cm_id_priv->alt_av.timeout;
3797 qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
3798 qp_attr->path_mig_state = IB_MIG_REARM;
3806 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3810 int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
3811 struct ib_qp_attr *qp_attr,
3814 struct cm_id_private *cm_id_priv;
3817 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3818 switch (qp_attr->qp_state) {
3820 ret = cm_init_qp_init_attr(cm_id_priv, qp_attr, qp_attr_mask);
3823 ret = cm_init_qp_rtr_attr(cm_id_priv, qp_attr, qp_attr_mask);
3826 ret = cm_init_qp_rts_attr(cm_id_priv, qp_attr, qp_attr_mask);
3834 EXPORT_SYMBOL(ib_cm_init_qp_attr);
3836 static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
3839 struct cm_counter_group *group;
3840 struct cm_counter_attribute *cm_attr;
3842 group = container_of(obj, struct cm_counter_group, obj);
3843 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
3845 return sprintf(buf, "%ld\n",
3846 atomic_long_read(&group->counter[cm_attr->index]));
3849 static const struct sysfs_ops cm_counter_ops = {
3850 .show = cm_show_counter
3853 static struct kobj_type cm_counter_obj_type = {
3854 .sysfs_ops = &cm_counter_ops,
3855 .default_attrs = cm_counter_default_attrs
3858 static void cm_release_port_obj(struct kobject *obj)
3860 struct cm_port *cm_port;
3862 cm_port = container_of(obj, struct cm_port, port_obj);
3866 static struct kobj_type cm_port_obj_type = {
3867 .release = cm_release_port_obj
3870 static char *cm_devnode(struct device *dev, umode_t *mode)
3874 return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
3877 struct class cm_class = {
3878 .owner = THIS_MODULE,
3879 .name = "infiniband_cm",
3880 .devnode = cm_devnode,
3882 EXPORT_SYMBOL(cm_class);
3884 static int cm_create_port_fs(struct cm_port *port)
3888 ret = kobject_init_and_add(&port->port_obj, &cm_port_obj_type,
3889 &port->cm_dev->device->kobj,
3890 "%d", port->port_num);
3896 for (i = 0; i < CM_COUNTER_GROUPS; i++) {
3897 ret = kobject_init_and_add(&port->counter_group[i].obj,
3898 &cm_counter_obj_type,
3900 "%s", counter_group_names[i]);
3909 kobject_put(&port->counter_group[i].obj);
3910 kobject_put(&port->port_obj);
3915 static void cm_remove_port_fs(struct cm_port *port)
3919 for (i = 0; i < CM_COUNTER_GROUPS; i++)
3920 kobject_put(&port->counter_group[i].obj);
3922 kobject_put(&port->port_obj);
3925 static void cm_add_one(struct ib_device *ib_device)
3927 struct cm_device *cm_dev;
3928 struct cm_port *port;
3929 struct ib_mad_reg_req reg_req = {
3930 .mgmt_class = IB_MGMT_CLASS_CM,
3931 .mgmt_class_version = IB_CM_CLASS_VERSION,
3933 struct ib_port_modify port_modify = {
3934 .set_port_cap_mask = IB_PORT_CM_SUP
3936 unsigned long flags;
3941 cm_dev = kzalloc(sizeof(*cm_dev) + sizeof(*port) *
3942 ib_device->phys_port_cnt, GFP_KERNEL);
3946 cm_dev->ib_device = ib_device;
3947 cm_dev->ack_delay = ib_device->attrs.local_ca_ack_delay;
3948 cm_dev->going_down = 0;
3949 cm_dev->device = device_create(&cm_class, &ib_device->dev,
3951 "%s", ib_device->name);
3952 if (IS_ERR(cm_dev->device)) {
3957 set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
3958 for (i = 1; i <= ib_device->phys_port_cnt; i++) {
3959 if (!rdma_cap_ib_cm(ib_device, i))
3962 port = kzalloc(sizeof *port, GFP_KERNEL);
3966 cm_dev->port[i-1] = port;
3967 port->cm_dev = cm_dev;
3970 INIT_LIST_HEAD(&port->cm_priv_prim_list);
3971 INIT_LIST_HEAD(&port->cm_priv_altr_list);
3973 ret = cm_create_port_fs(port);
3977 port->mad_agent = ib_register_mad_agent(ib_device, i,
3985 if (IS_ERR(port->mad_agent))
3988 ret = ib_modify_port(ib_device, i, 0, &port_modify);
3998 ib_set_client_data(ib_device, &cm_client, cm_dev);
4000 write_lock_irqsave(&cm.device_lock, flags);
4001 list_add_tail(&cm_dev->list, &cm.device_list);
4002 write_unlock_irqrestore(&cm.device_lock, flags);
4006 ib_unregister_mad_agent(port->mad_agent);
4008 cm_remove_port_fs(port);
4010 port_modify.set_port_cap_mask = 0;
4011 port_modify.clr_port_cap_mask = IB_PORT_CM_SUP;
4013 if (!rdma_cap_ib_cm(ib_device, i))
4016 port = cm_dev->port[i-1];
4017 ib_modify_port(ib_device, port->port_num, 0, &port_modify);
4018 ib_unregister_mad_agent(port->mad_agent);
4019 cm_remove_port_fs(port);
4022 device_unregister(cm_dev->device);
4026 static void cm_remove_one(struct ib_device *ib_device, void *client_data)
4028 struct cm_device *cm_dev = client_data;
4029 struct cm_port *port;
4030 struct cm_id_private *cm_id_priv;
4031 struct ib_mad_agent *cur_mad_agent;
4032 struct ib_port_modify port_modify = {
4033 .clr_port_cap_mask = IB_PORT_CM_SUP
4035 unsigned long flags;
4041 write_lock_irqsave(&cm.device_lock, flags);
4042 list_del(&cm_dev->list);
4043 write_unlock_irqrestore(&cm.device_lock, flags);
4045 spin_lock_irq(&cm.lock);
4046 cm_dev->going_down = 1;
4047 spin_unlock_irq(&cm.lock);
4049 for (i = 1; i <= ib_device->phys_port_cnt; i++) {
4050 if (!rdma_cap_ib_cm(ib_device, i))
4053 port = cm_dev->port[i-1];
4054 ib_modify_port(ib_device, port->port_num, 0, &port_modify);
4055 /* Mark all the cm_id's as not valid */
4056 spin_lock_irq(&cm.lock);
4057 list_for_each_entry(cm_id_priv, &port->cm_priv_altr_list, altr_list)
4058 cm_id_priv->altr_send_port_not_ready = 1;
4059 list_for_each_entry(cm_id_priv, &port->cm_priv_prim_list, prim_list)
4060 cm_id_priv->prim_send_port_not_ready = 1;
4061 spin_unlock_irq(&cm.lock);
4063 * We flush the queue here after the going_down set, this
4064 * verify that no new works will be queued in the recv handler,
4065 * after that we can call the unregister_mad_agent
4067 flush_workqueue(cm.wq);
4068 spin_lock_irq(&cm.state_lock);
4069 cur_mad_agent = port->mad_agent;
4070 port->mad_agent = NULL;
4071 spin_unlock_irq(&cm.state_lock);
4072 ib_unregister_mad_agent(cur_mad_agent);
4073 cm_remove_port_fs(port);
4076 device_unregister(cm_dev->device);
4080 static int __init ib_cm_init(void)
4084 memset(&cm, 0, sizeof cm);
4085 INIT_LIST_HEAD(&cm.device_list);
4086 rwlock_init(&cm.device_lock);
4087 spin_lock_init(&cm.lock);
4088 spin_lock_init(&cm.state_lock);
4089 cm.listen_service_table = RB_ROOT;
4090 cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
4091 cm.remote_id_table = RB_ROOT;
4092 cm.remote_qp_table = RB_ROOT;
4093 cm.remote_sidr_table = RB_ROOT;
4094 idr_init(&cm.local_id_table);
4095 get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand);
4096 INIT_LIST_HEAD(&cm.timewait_list);
4098 ret = class_register(&cm_class);
4104 cm.wq = create_workqueue("ib_cm");
4110 ret = ib_register_client(&cm_client);
4116 destroy_workqueue(cm.wq);
4118 class_unregister(&cm_class);
4120 idr_destroy(&cm.local_id_table);
4124 static void __exit ib_cm_cleanup(void)
4126 struct cm_timewait_info *timewait_info, *tmp;
4128 spin_lock_irq(&cm.lock);
4129 list_for_each_entry(timewait_info, &cm.timewait_list, list)
4130 cancel_delayed_work(&timewait_info->work.work);
4131 spin_unlock_irq(&cm.lock);
4133 ib_unregister_client(&cm_client);
4134 destroy_workqueue(cm.wq);
4136 list_for_each_entry_safe(timewait_info, tmp, &cm.timewait_list, list) {
4137 list_del(&timewait_info->list);
4138 kfree(timewait_info);
4141 class_unregister(&cm_class);
4142 idr_destroy(&cm.local_id_table);
4145 module_init(ib_cm_init);
4146 module_exit(ib_cm_cleanup);