2 * Copyright (c) 2012 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <rdma/ib_mad.h>
35 #include <linux/mlx4/cmd.h>
36 #include <linux/rbtree.h>
37 #include <linux/idr.h>
38 #include <rdma/ib_cm.h>
42 #define CM_CLEANUP_CACHE_TIMEOUT (30 * HZ)
51 struct mlx4_ib_dev *dev;
53 struct list_head list;
54 struct delayed_work timeout;
57 struct rej_tmout_entry {
60 struct delayed_work timeout;
61 struct xarray *xa_rej_tmout;
64 struct cm_generic_msg {
65 struct ib_mad_hdr hdr;
68 __be32 remote_comm_id;
69 unsigned char unused[2];
73 struct cm_sidr_generic_msg {
74 struct ib_mad_hdr hdr;
79 unsigned char unused[0x60];
80 union ib_gid primary_path_sgid;
84 static void set_local_comm_id(struct ib_mad *mad, u32 cm_id)
86 if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
87 struct cm_sidr_generic_msg *msg =
88 (struct cm_sidr_generic_msg *)mad;
89 msg->request_id = cpu_to_be32(cm_id);
90 } else if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) {
91 pr_err("trying to set local_comm_id in SIDR_REP\n");
94 struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
95 msg->local_comm_id = cpu_to_be32(cm_id);
99 static u32 get_local_comm_id(struct ib_mad *mad)
101 if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
102 struct cm_sidr_generic_msg *msg =
103 (struct cm_sidr_generic_msg *)mad;
104 return be32_to_cpu(msg->request_id);
105 } else if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) {
106 pr_err("trying to set local_comm_id in SIDR_REP\n");
109 struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
110 return be32_to_cpu(msg->local_comm_id);
114 static void set_remote_comm_id(struct ib_mad *mad, u32 cm_id)
116 if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) {
117 struct cm_sidr_generic_msg *msg =
118 (struct cm_sidr_generic_msg *)mad;
119 msg->request_id = cpu_to_be32(cm_id);
120 } else if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
121 pr_err("trying to set remote_comm_id in SIDR_REQ\n");
124 struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
125 msg->remote_comm_id = cpu_to_be32(cm_id);
129 static u32 get_remote_comm_id(struct ib_mad *mad)
131 if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) {
132 struct cm_sidr_generic_msg *msg =
133 (struct cm_sidr_generic_msg *)mad;
134 return be32_to_cpu(msg->request_id);
135 } else if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
136 pr_err("trying to set remote_comm_id in SIDR_REQ\n");
139 struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
140 return be32_to_cpu(msg->remote_comm_id);
144 static union ib_gid gid_from_req_msg(struct ib_device *ibdev, struct ib_mad *mad)
146 struct cm_req_msg *msg = (struct cm_req_msg *)mad;
148 return msg->primary_path_sgid;
151 /* Lock should be taken before called */
152 static struct id_map_entry *
153 id_map_find_by_sl_id(struct ib_device *ibdev, u32 slave_id, u32 sl_cm_id)
155 struct rb_root *sl_id_map = &to_mdev(ibdev)->sriov.sl_id_map;
156 struct rb_node *node = sl_id_map->rb_node;
159 struct id_map_entry *id_map_entry =
160 rb_entry(node, struct id_map_entry, node);
162 if (id_map_entry->sl_cm_id > sl_cm_id)
163 node = node->rb_left;
164 else if (id_map_entry->sl_cm_id < sl_cm_id)
165 node = node->rb_right;
166 else if (id_map_entry->slave_id > slave_id)
167 node = node->rb_left;
168 else if (id_map_entry->slave_id < slave_id)
169 node = node->rb_right;
176 static void id_map_ent_timeout(struct work_struct *work)
178 struct delayed_work *delay = to_delayed_work(work);
179 struct id_map_entry *ent = container_of(delay, struct id_map_entry, timeout);
180 struct id_map_entry *found_ent;
181 struct mlx4_ib_dev *dev = ent->dev;
182 struct mlx4_ib_sriov *sriov = &dev->sriov;
183 struct rb_root *sl_id_map = &sriov->sl_id_map;
185 spin_lock(&sriov->id_map_lock);
186 if (!xa_erase(&sriov->pv_id_table, ent->pv_cm_id))
188 found_ent = id_map_find_by_sl_id(&dev->ib_dev, ent->slave_id, ent->sl_cm_id);
189 if (found_ent && found_ent == ent)
190 rb_erase(&found_ent->node, sl_id_map);
193 list_del(&ent->list);
194 spin_unlock(&sriov->id_map_lock);
198 static void sl_id_map_add(struct ib_device *ibdev, struct id_map_entry *new)
200 struct rb_root *sl_id_map = &to_mdev(ibdev)->sriov.sl_id_map;
201 struct rb_node **link = &sl_id_map->rb_node, *parent = NULL;
202 struct id_map_entry *ent;
203 int slave_id = new->slave_id;
204 int sl_cm_id = new->sl_cm_id;
206 ent = id_map_find_by_sl_id(ibdev, slave_id, sl_cm_id);
208 pr_debug("overriding existing sl_id_map entry (cm_id = %x)\n",
211 rb_replace_node(&ent->node, &new->node, sl_id_map);
215 /* Go to the bottom of the tree */
218 ent = rb_entry(parent, struct id_map_entry, node);
220 if (ent->sl_cm_id > sl_cm_id || (ent->sl_cm_id == sl_cm_id && ent->slave_id > slave_id))
221 link = &(*link)->rb_left;
223 link = &(*link)->rb_right;
226 rb_link_node(&new->node, parent, link);
227 rb_insert_color(&new->node, sl_id_map);
230 static struct id_map_entry *
231 id_map_alloc(struct ib_device *ibdev, int slave_id, u32 sl_cm_id)
234 struct id_map_entry *ent;
235 struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
237 ent = kmalloc(sizeof (struct id_map_entry), GFP_KERNEL);
239 return ERR_PTR(-ENOMEM);
241 ent->sl_cm_id = sl_cm_id;
242 ent->slave_id = slave_id;
243 ent->scheduled_delete = 0;
244 ent->dev = to_mdev(ibdev);
245 INIT_DELAYED_WORK(&ent->timeout, id_map_ent_timeout);
247 ret = xa_alloc_cyclic(&sriov->pv_id_table, &ent->pv_cm_id, ent,
248 xa_limit_32b, &sriov->pv_id_next, GFP_KERNEL);
250 spin_lock(&sriov->id_map_lock);
251 sl_id_map_add(ibdev, ent);
252 list_add_tail(&ent->list, &sriov->cm_list);
253 spin_unlock(&sriov->id_map_lock);
259 mlx4_ib_warn(ibdev, "Allocation failed (err:0x%x)\n", ret);
260 return ERR_PTR(-ENOMEM);
263 static struct id_map_entry *
264 id_map_get(struct ib_device *ibdev, int *pv_cm_id, int slave_id, int sl_cm_id)
266 struct id_map_entry *ent;
267 struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
269 spin_lock(&sriov->id_map_lock);
270 if (*pv_cm_id == -1) {
271 ent = id_map_find_by_sl_id(ibdev, slave_id, sl_cm_id);
273 *pv_cm_id = (int) ent->pv_cm_id;
275 ent = xa_load(&sriov->pv_id_table, *pv_cm_id);
276 spin_unlock(&sriov->id_map_lock);
281 static void schedule_delayed(struct ib_device *ibdev, struct id_map_entry *id)
283 struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
286 spin_lock(&sriov->id_map_lock);
287 spin_lock_irqsave(&sriov->going_down_lock, flags);
288 /*make sure that there is no schedule inside the scheduled work.*/
289 if (!sriov->is_going_down && !id->scheduled_delete) {
290 id->scheduled_delete = 1;
291 schedule_delayed_work(&id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
292 } else if (id->scheduled_delete) {
293 /* Adjust timeout if already scheduled */
294 mod_delayed_work(system_wq, &id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
296 spin_unlock_irqrestore(&sriov->going_down_lock, flags);
297 spin_unlock(&sriov->id_map_lock);
300 #define REJ_REASON(m) be16_to_cpu(((struct cm_generic_msg *)(m))->rej_reason)
301 int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id,
304 struct id_map_entry *id;
308 if (mad->mad_hdr.attr_id == CM_REQ_ATTR_ID ||
309 mad->mad_hdr.attr_id == CM_REP_ATTR_ID ||
310 mad->mad_hdr.attr_id == CM_MRA_ATTR_ID ||
311 mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID ||
312 (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID && REJ_REASON(mad) == IB_CM_REJ_TIMEOUT)) {
313 sl_cm_id = get_local_comm_id(mad);
314 id = id_map_get(ibdev, &pv_cm_id, slave_id, sl_cm_id);
317 id = id_map_alloc(ibdev, slave_id, sl_cm_id);
319 mlx4_ib_warn(ibdev, "%s: id{slave: %d, sl_cm_id: 0x%x} Failed to id_map_alloc\n",
320 __func__, slave_id, sl_cm_id);
323 } else if (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID ||
324 mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) {
327 sl_cm_id = get_local_comm_id(mad);
328 id = id_map_get(ibdev, &pv_cm_id, slave_id, sl_cm_id);
332 pr_debug("id{slave: %d, sl_cm_id: 0x%x} is NULL! attr_id: 0x%x\n",
333 slave_id, sl_cm_id, be16_to_cpu(mad->mad_hdr.attr_id));
338 set_local_comm_id(mad, id->pv_cm_id);
340 if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID)
341 schedule_delayed(ibdev, id);
345 static void rej_tmout_timeout(struct work_struct *work)
347 struct delayed_work *delay = to_delayed_work(work);
348 struct rej_tmout_entry *item = container_of(delay, struct rej_tmout_entry, timeout);
349 struct rej_tmout_entry *deleted;
351 deleted = xa_cmpxchg(item->xa_rej_tmout, item->rem_pv_cm_id, item, NULL, 0);
354 pr_debug("deleted(%p) != item(%p)\n", deleted, item);
359 static int alloc_rej_tmout(struct mlx4_ib_sriov *sriov, u32 rem_pv_cm_id, int slave)
361 struct rej_tmout_entry *item;
362 struct rej_tmout_entry *old;
365 xa_lock(&sriov->xa_rej_tmout);
366 item = xa_load(&sriov->xa_rej_tmout, (unsigned long)rem_pv_cm_id);
372 /* If a retry, adjust delayed work */
373 mod_delayed_work(system_wq, &item->timeout, CM_CLEANUP_CACHE_TIMEOUT);
376 xa_unlock(&sriov->xa_rej_tmout);
378 item = kmalloc(sizeof(*item), GFP_KERNEL);
382 INIT_DELAYED_WORK(&item->timeout, rej_tmout_timeout);
384 item->rem_pv_cm_id = rem_pv_cm_id;
385 item->xa_rej_tmout = &sriov->xa_rej_tmout;
387 old = xa_cmpxchg(&sriov->xa_rej_tmout, (unsigned long)rem_pv_cm_id, NULL, item, GFP_KERNEL);
390 "Non-null old entry (%p) or error (%d) when inserting\n",
396 schedule_delayed_work(&item->timeout, CM_CLEANUP_CACHE_TIMEOUT);
401 xa_unlock(&sriov->xa_rej_tmout);
405 static int lookup_rej_tmout_slave(struct mlx4_ib_sriov *sriov, u32 rem_pv_cm_id)
407 struct rej_tmout_entry *item;
410 xa_lock(&sriov->xa_rej_tmout);
411 item = xa_load(&sriov->xa_rej_tmout, (unsigned long)rem_pv_cm_id);
413 if (!item || xa_err(item)) {
414 pr_debug("Could not find slave. rem_pv_cm_id 0x%x error: %d\n",
415 rem_pv_cm_id, xa_err(item));
416 slave = !item ? -ENOENT : xa_err(item);
420 xa_unlock(&sriov->xa_rej_tmout);
425 int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave,
428 struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
429 u32 rem_pv_cm_id = get_local_comm_id(mad);
431 struct id_map_entry *id;
434 if (mad->mad_hdr.attr_id == CM_REQ_ATTR_ID ||
435 mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
441 gid = gid_from_req_msg(ibdev, mad);
442 *slave = mlx4_ib_find_real_gid(ibdev, port, gid.global.interface_id);
444 mlx4_ib_warn(ibdev, "failed matching slave_id by gid (0x%llx)\n",
445 be64_to_cpu(gid.global.interface_id));
449 sts = alloc_rej_tmout(sriov, rem_pv_cm_id, *slave);
451 /* Even if this fails, we pass on the REQ to the slave */
452 pr_debug("Could not allocate rej_tmout entry. rem_pv_cm_id 0x%x slave %d status %d\n",
453 rem_pv_cm_id, *slave, sts);
458 pv_cm_id = get_remote_comm_id(mad);
459 id = id_map_get(ibdev, (int *)&pv_cm_id, -1, -1);
462 if (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID &&
463 REJ_REASON(mad) == IB_CM_REJ_TIMEOUT && slave) {
464 *slave = lookup_rej_tmout_slave(sriov, rem_pv_cm_id);
466 return (*slave < 0) ? *slave : 0;
468 pr_debug("Couldn't find an entry for pv_cm_id 0x%x, attr_id 0x%x\n",
469 pv_cm_id, be16_to_cpu(mad->mad_hdr.attr_id));
474 *slave = id->slave_id;
475 set_remote_comm_id(mad, id->sl_cm_id);
477 if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID ||
478 mad->mad_hdr.attr_id == CM_REJ_ATTR_ID)
479 schedule_delayed(ibdev, id);
484 void mlx4_ib_cm_paravirt_init(struct mlx4_ib_dev *dev)
486 spin_lock_init(&dev->sriov.id_map_lock);
487 INIT_LIST_HEAD(&dev->sriov.cm_list);
488 dev->sriov.sl_id_map = RB_ROOT;
489 xa_init_flags(&dev->sriov.pv_id_table, XA_FLAGS_ALLOC);
490 xa_init(&dev->sriov.xa_rej_tmout);
493 static void rej_tmout_xa_cleanup(struct mlx4_ib_sriov *sriov, int slave)
495 struct rej_tmout_entry *item;
496 bool flush_needed = false;
500 xa_lock(&sriov->xa_rej_tmout);
501 xa_for_each(&sriov->xa_rej_tmout, id, item) {
502 if (slave < 0 || slave == item->slave) {
503 mod_delayed_work(system_wq, &item->timeout, 0);
508 xa_unlock(&sriov->xa_rej_tmout);
511 flush_scheduled_work();
512 pr_debug("Deleted %d entries in xarray for slave %d during cleanup\n",
517 WARN_ON(!xa_empty(&sriov->xa_rej_tmout));
520 /* slave = -1 ==> all slaves */
521 /* TBD -- call paravirt clean for single slave. Need for slave RESET event */
522 void mlx4_ib_cm_paravirt_clean(struct mlx4_ib_dev *dev, int slave)
524 struct mlx4_ib_sriov *sriov = &dev->sriov;
525 struct rb_root *sl_id_map = &sriov->sl_id_map;
529 struct id_map_entry *map, *tmp_map;
530 /* cancel all delayed work queue entries */
532 spin_lock(&sriov->id_map_lock);
533 list_for_each_entry_safe(map, tmp_map, &dev->sriov.cm_list, list) {
534 if (slave < 0 || slave == map->slave_id) {
535 if (map->scheduled_delete)
536 need_flush |= !cancel_delayed_work(&map->timeout);
540 spin_unlock(&sriov->id_map_lock);
543 flush_scheduled_work(); /* make sure all timers were flushed */
545 /* now, remove all leftover entries from databases*/
546 spin_lock(&sriov->id_map_lock);
548 while (rb_first(sl_id_map)) {
549 struct id_map_entry *ent =
550 rb_entry(rb_first(sl_id_map),
551 struct id_map_entry, node);
553 rb_erase(&ent->node, sl_id_map);
554 xa_erase(&sriov->pv_id_table, ent->pv_cm_id);
556 list_splice_init(&dev->sriov.cm_list, &lh);
558 /* first, move nodes belonging to slave to db remove list */
559 nd = rb_first(sl_id_map);
561 struct id_map_entry *ent =
562 rb_entry(nd, struct id_map_entry, node);
564 if (ent->slave_id == slave)
565 list_move_tail(&ent->list, &lh);
567 /* remove those nodes from databases */
568 list_for_each_entry_safe(map, tmp_map, &lh, list) {
569 rb_erase(&map->node, sl_id_map);
570 xa_erase(&sriov->pv_id_table, map->pv_cm_id);
573 /* add remaining nodes from cm_list */
574 list_for_each_entry_safe(map, tmp_map, &dev->sriov.cm_list, list) {
575 if (slave == map->slave_id)
576 list_move_tail(&map->list, &lh);
580 spin_unlock(&sriov->id_map_lock);
582 /* free any map entries left behind due to cancel_delayed_work above */
583 list_for_each_entry_safe(map, tmp_map, &lh, list) {
584 list_del(&map->list);
588 rej_tmout_xa_cleanup(sriov, slave);