2 * Copyright (c) 2012 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <rdma/ib_mad.h>
35 #include <linux/mlx4/cmd.h>
36 #include <linux/rbtree.h>
37 #include <linux/idr.h>
38 #include <rdma/ib_cm.h>
42 #define CM_CLEANUP_CACHE_TIMEOUT (30 * HZ)
51 struct mlx4_ib_dev *dev;
53 struct list_head list;
54 struct delayed_work timeout;
57 struct cm_generic_msg {
58 struct ib_mad_hdr hdr;
61 __be32 remote_comm_id;
64 struct cm_sidr_generic_msg {
65 struct ib_mad_hdr hdr;
70 unsigned char unused[0x60];
71 union ib_gid primary_path_sgid;
75 static void set_local_comm_id(struct ib_mad *mad, u32 cm_id)
77 if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
78 struct cm_sidr_generic_msg *msg =
79 (struct cm_sidr_generic_msg *)mad;
80 msg->request_id = cpu_to_be32(cm_id);
81 } else if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) {
82 pr_err("trying to set local_comm_id in SIDR_REP\n");
85 struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
86 msg->local_comm_id = cpu_to_be32(cm_id);
90 static u32 get_local_comm_id(struct ib_mad *mad)
92 if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
93 struct cm_sidr_generic_msg *msg =
94 (struct cm_sidr_generic_msg *)mad;
95 return be32_to_cpu(msg->request_id);
96 } else if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) {
97 pr_err("trying to set local_comm_id in SIDR_REP\n");
100 struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
101 return be32_to_cpu(msg->local_comm_id);
105 static void set_remote_comm_id(struct ib_mad *mad, u32 cm_id)
107 if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) {
108 struct cm_sidr_generic_msg *msg =
109 (struct cm_sidr_generic_msg *)mad;
110 msg->request_id = cpu_to_be32(cm_id);
111 } else if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
112 pr_err("trying to set remote_comm_id in SIDR_REQ\n");
115 struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
116 msg->remote_comm_id = cpu_to_be32(cm_id);
120 static u32 get_remote_comm_id(struct ib_mad *mad)
122 if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) {
123 struct cm_sidr_generic_msg *msg =
124 (struct cm_sidr_generic_msg *)mad;
125 return be32_to_cpu(msg->request_id);
126 } else if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
127 pr_err("trying to set remote_comm_id in SIDR_REQ\n");
130 struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
131 return be32_to_cpu(msg->remote_comm_id);
135 static union ib_gid gid_from_req_msg(struct ib_device *ibdev, struct ib_mad *mad)
137 struct cm_req_msg *msg = (struct cm_req_msg *)mad;
139 return msg->primary_path_sgid;
142 /* Lock should be taken before called */
143 static struct id_map_entry *
144 id_map_find_by_sl_id(struct ib_device *ibdev, u32 slave_id, u32 sl_cm_id)
146 struct rb_root *sl_id_map = &to_mdev(ibdev)->sriov.sl_id_map;
147 struct rb_node *node = sl_id_map->rb_node;
150 struct id_map_entry *id_map_entry =
151 rb_entry(node, struct id_map_entry, node);
153 if (id_map_entry->sl_cm_id > sl_cm_id)
154 node = node->rb_left;
155 else if (id_map_entry->sl_cm_id < sl_cm_id)
156 node = node->rb_right;
157 else if (id_map_entry->slave_id > slave_id)
158 node = node->rb_left;
159 else if (id_map_entry->slave_id < slave_id)
160 node = node->rb_right;
167 static void id_map_ent_timeout(struct work_struct *work)
169 struct delayed_work *delay = to_delayed_work(work);
170 struct id_map_entry *ent = container_of(delay, struct id_map_entry, timeout);
171 struct id_map_entry *db_ent, *found_ent;
172 struct mlx4_ib_dev *dev = ent->dev;
173 struct mlx4_ib_sriov *sriov = &dev->sriov;
174 struct rb_root *sl_id_map = &sriov->sl_id_map;
175 int pv_id = (int) ent->pv_cm_id;
177 spin_lock(&sriov->id_map_lock);
178 db_ent = (struct id_map_entry *)idr_find(&sriov->pv_id_table, pv_id);
181 found_ent = id_map_find_by_sl_id(&dev->ib_dev, ent->slave_id, ent->sl_cm_id);
182 if (found_ent && found_ent == ent)
183 rb_erase(&found_ent->node, sl_id_map);
184 idr_remove(&sriov->pv_id_table, pv_id);
187 list_del(&ent->list);
188 spin_unlock(&sriov->id_map_lock);
192 static void id_map_find_del(struct ib_device *ibdev, int pv_cm_id)
194 struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
195 struct rb_root *sl_id_map = &sriov->sl_id_map;
196 struct id_map_entry *ent, *found_ent;
198 spin_lock(&sriov->id_map_lock);
199 ent = (struct id_map_entry *)idr_find(&sriov->pv_id_table, pv_cm_id);
202 found_ent = id_map_find_by_sl_id(ibdev, ent->slave_id, ent->sl_cm_id);
203 if (found_ent && found_ent == ent)
204 rb_erase(&found_ent->node, sl_id_map);
205 idr_remove(&sriov->pv_id_table, pv_cm_id);
207 spin_unlock(&sriov->id_map_lock);
210 static void sl_id_map_add(struct ib_device *ibdev, struct id_map_entry *new)
212 struct rb_root *sl_id_map = &to_mdev(ibdev)->sriov.sl_id_map;
213 struct rb_node **link = &sl_id_map->rb_node, *parent = NULL;
214 struct id_map_entry *ent;
215 int slave_id = new->slave_id;
216 int sl_cm_id = new->sl_cm_id;
218 ent = id_map_find_by_sl_id(ibdev, slave_id, sl_cm_id);
220 pr_debug("overriding existing sl_id_map entry (cm_id = %x)\n",
223 rb_replace_node(&ent->node, &new->node, sl_id_map);
227 /* Go to the bottom of the tree */
230 ent = rb_entry(parent, struct id_map_entry, node);
232 if (ent->sl_cm_id > sl_cm_id || (ent->sl_cm_id == sl_cm_id && ent->slave_id > slave_id))
233 link = &(*link)->rb_left;
235 link = &(*link)->rb_right;
238 rb_link_node(&new->node, parent, link);
239 rb_insert_color(&new->node, sl_id_map);
242 static struct id_map_entry *
243 id_map_alloc(struct ib_device *ibdev, int slave_id, u32 sl_cm_id)
246 struct id_map_entry *ent;
247 struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
249 ent = kmalloc(sizeof (struct id_map_entry), GFP_KERNEL);
251 mlx4_ib_warn(ibdev, "Couldn't allocate id cache entry - out of memory\n");
252 return ERR_PTR(-ENOMEM);
255 ent->sl_cm_id = sl_cm_id;
256 ent->slave_id = slave_id;
257 ent->scheduled_delete = 0;
258 ent->dev = to_mdev(ibdev);
259 INIT_DELAYED_WORK(&ent->timeout, id_map_ent_timeout);
261 idr_preload(GFP_KERNEL);
262 spin_lock(&to_mdev(ibdev)->sriov.id_map_lock);
264 ret = idr_alloc_cyclic(&sriov->pv_id_table, ent, 0, 0, GFP_NOWAIT);
266 ent->pv_cm_id = (u32)ret;
267 sl_id_map_add(ibdev, ent);
268 list_add_tail(&ent->list, &sriov->cm_list);
271 spin_unlock(&sriov->id_map_lock);
279 mlx4_ib_warn(ibdev, "No more space in the idr (err:0x%x)\n", ret);
280 return ERR_PTR(-ENOMEM);
283 static struct id_map_entry *
284 id_map_get(struct ib_device *ibdev, int *pv_cm_id, int sl_cm_id, int slave_id)
286 struct id_map_entry *ent;
287 struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
289 spin_lock(&sriov->id_map_lock);
290 if (*pv_cm_id == -1) {
291 ent = id_map_find_by_sl_id(ibdev, sl_cm_id, slave_id);
293 *pv_cm_id = (int) ent->pv_cm_id;
295 ent = (struct id_map_entry *)idr_find(&sriov->pv_id_table, *pv_cm_id);
296 spin_unlock(&sriov->id_map_lock);
301 static void schedule_delayed(struct ib_device *ibdev, struct id_map_entry *id)
303 struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
306 spin_lock(&sriov->id_map_lock);
307 spin_lock_irqsave(&sriov->going_down_lock, flags);
308 /*make sure that there is no schedule inside the scheduled work.*/
309 if (!sriov->is_going_down) {
310 id->scheduled_delete = 1;
311 schedule_delayed_work(&id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
312 } else if (id->scheduled_delete) {
313 /* Adjust timeout if already scheduled */
314 mod_delayed_work(system_wq, &id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
316 spin_unlock_irqrestore(&sriov->going_down_lock, flags);
317 spin_unlock(&sriov->id_map_lock);
320 int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id,
323 struct id_map_entry *id;
327 if (mad->mad_hdr.attr_id == CM_REQ_ATTR_ID ||
328 mad->mad_hdr.attr_id == CM_REP_ATTR_ID ||
329 mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
330 sl_cm_id = get_local_comm_id(mad);
331 id = id_map_alloc(ibdev, slave_id, sl_cm_id);
333 mlx4_ib_warn(ibdev, "%s: id{slave: %d, sl_cm_id: 0x%x} Failed to id_map_alloc\n",
334 __func__, slave_id, sl_cm_id);
337 } else if (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID ||
338 mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) {
341 sl_cm_id = get_local_comm_id(mad);
342 id = id_map_get(ibdev, &pv_cm_id, slave_id, sl_cm_id);
346 pr_debug("id{slave: %d, sl_cm_id: 0x%x} is NULL!\n",
351 set_local_comm_id(mad, id->pv_cm_id);
353 if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID)
354 schedule_delayed(ibdev, id);
355 else if (mad->mad_hdr.attr_id == CM_DREP_ATTR_ID)
356 id_map_find_del(ibdev, pv_cm_id);
361 int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave,
365 struct id_map_entry *id;
367 if (mad->mad_hdr.attr_id == CM_REQ_ATTR_ID ||
368 mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
374 gid = gid_from_req_msg(ibdev, mad);
375 *slave = mlx4_ib_find_real_gid(ibdev, port, gid.global.interface_id);
377 mlx4_ib_warn(ibdev, "failed matching slave_id by gid (0x%llx)\n",
378 be64_to_cpu(gid.global.interface_id));
384 pv_cm_id = get_remote_comm_id(mad);
385 id = id_map_get(ibdev, (int *)&pv_cm_id, -1, -1);
388 pr_debug("Couldn't find an entry for pv_cm_id 0x%x\n", pv_cm_id);
393 *slave = id->slave_id;
394 set_remote_comm_id(mad, id->sl_cm_id);
396 if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID)
397 schedule_delayed(ibdev, id);
398 else if (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID ||
399 mad->mad_hdr.attr_id == CM_DREP_ATTR_ID) {
400 id_map_find_del(ibdev, (int) pv_cm_id);
406 void mlx4_ib_cm_paravirt_init(struct mlx4_ib_dev *dev)
408 spin_lock_init(&dev->sriov.id_map_lock);
409 INIT_LIST_HEAD(&dev->sriov.cm_list);
410 dev->sriov.sl_id_map = RB_ROOT;
411 idr_init(&dev->sriov.pv_id_table);
414 /* slave = -1 ==> all slaves */
415 /* TBD -- call paravirt clean for single slave. Need for slave RESET event */
416 void mlx4_ib_cm_paravirt_clean(struct mlx4_ib_dev *dev, int slave)
418 struct mlx4_ib_sriov *sriov = &dev->sriov;
419 struct rb_root *sl_id_map = &sriov->sl_id_map;
423 struct id_map_entry *map, *tmp_map;
424 /* cancel all delayed work queue entries */
426 spin_lock(&sriov->id_map_lock);
427 list_for_each_entry_safe(map, tmp_map, &dev->sriov.cm_list, list) {
428 if (slave < 0 || slave == map->slave_id) {
429 if (map->scheduled_delete)
430 need_flush &= !!cancel_delayed_work(&map->timeout);
434 spin_unlock(&sriov->id_map_lock);
437 flush_scheduled_work(); /* make sure all timers were flushed */
439 /* now, remove all leftover entries from databases*/
440 spin_lock(&sriov->id_map_lock);
442 while (rb_first(sl_id_map)) {
443 struct id_map_entry *ent =
444 rb_entry(rb_first(sl_id_map),
445 struct id_map_entry, node);
447 rb_erase(&ent->node, sl_id_map);
448 idr_remove(&sriov->pv_id_table, (int) ent->pv_cm_id);
450 list_splice_init(&dev->sriov.cm_list, &lh);
452 /* first, move nodes belonging to slave to db remove list */
453 nd = rb_first(sl_id_map);
455 struct id_map_entry *ent =
456 rb_entry(nd, struct id_map_entry, node);
458 if (ent->slave_id == slave)
459 list_move_tail(&ent->list, &lh);
461 /* remove those nodes from databases */
462 list_for_each_entry_safe(map, tmp_map, &lh, list) {
463 rb_erase(&map->node, sl_id_map);
464 idr_remove(&sriov->pv_id_table, (int) map->pv_cm_id);
467 /* add remaining nodes from cm_list */
468 list_for_each_entry_safe(map, tmp_map, &dev->sriov.cm_list, list) {
469 if (slave == map->slave_id)
470 list_move_tail(&map->list, &lh);
474 spin_unlock(&sriov->id_map_lock);
476 /* free any map entries left behind due to cancel_delayed_work above */
477 list_for_each_entry_safe(map, tmp_map, &lh, list) {
478 list_del(&map->list);