2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
3 * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2016 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2016 Yotam Gigi <yotamg@mellanox.com>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 #include <linux/kernel.h>
38 #include <linux/types.h>
39 #include <linux/rhashtable.h>
40 #include <linux/bitops.h>
41 #include <linux/in6.h>
42 #include <linux/notifier.h>
43 #include <net/netevent.h>
44 #include <net/neighbour.h>
46 #include <net/ip_fib.h>
52 #define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
53 for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
56 mlxsw_sp_prefix_usage_subset(struct mlxsw_sp_prefix_usage *prefix_usage1,
57 struct mlxsw_sp_prefix_usage *prefix_usage2)
61 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage1) {
62 if (!test_bit(prefix, prefix_usage2->b))
69 mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
70 struct mlxsw_sp_prefix_usage *prefix_usage2)
72 return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
76 mlxsw_sp_prefix_usage_none(struct mlxsw_sp_prefix_usage *prefix_usage)
78 struct mlxsw_sp_prefix_usage prefix_usage_none = {{ 0 } };
80 return mlxsw_sp_prefix_usage_eq(prefix_usage, &prefix_usage_none);
84 mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
85 struct mlxsw_sp_prefix_usage *prefix_usage2)
87 memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
91 mlxsw_sp_prefix_usage_zero(struct mlxsw_sp_prefix_usage *prefix_usage)
93 memset(prefix_usage, 0, sizeof(*prefix_usage));
97 mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
98 unsigned char prefix_len)
100 set_bit(prefix_len, prefix_usage->b);
104 mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
105 unsigned char prefix_len)
107 clear_bit(prefix_len, prefix_usage->b);
110 struct mlxsw_sp_fib_key {
111 struct net_device *dev;
112 unsigned char addr[sizeof(struct in6_addr)];
113 unsigned char prefix_len;
116 enum mlxsw_sp_fib_entry_type {
117 MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
118 MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
119 MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
122 struct mlxsw_sp_nexthop_group;
124 struct mlxsw_sp_fib_entry {
125 struct rhash_head ht_node;
126 struct list_head list;
127 struct mlxsw_sp_fib_key key;
128 enum mlxsw_sp_fib_entry_type type;
129 unsigned int ref_count;
130 u16 rif; /* used for action local */
131 struct mlxsw_sp_vr *vr;
133 struct list_head nexthop_group_node;
134 struct mlxsw_sp_nexthop_group *nh_group;
137 struct mlxsw_sp_fib {
138 struct rhashtable ht;
139 struct list_head entry_list;
140 unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
141 struct mlxsw_sp_prefix_usage prefix_usage;
144 static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
145 .key_offset = offsetof(struct mlxsw_sp_fib_entry, key),
146 .head_offset = offsetof(struct mlxsw_sp_fib_entry, ht_node),
147 .key_len = sizeof(struct mlxsw_sp_fib_key),
148 .automatic_shrinking = true,
151 static int mlxsw_sp_fib_entry_insert(struct mlxsw_sp_fib *fib,
152 struct mlxsw_sp_fib_entry *fib_entry)
154 unsigned char prefix_len = fib_entry->key.prefix_len;
157 err = rhashtable_insert_fast(&fib->ht, &fib_entry->ht_node,
158 mlxsw_sp_fib_ht_params);
161 list_add_tail(&fib_entry->list, &fib->entry_list);
162 if (fib->prefix_ref_count[prefix_len]++ == 0)
163 mlxsw_sp_prefix_usage_set(&fib->prefix_usage, prefix_len);
167 static void mlxsw_sp_fib_entry_remove(struct mlxsw_sp_fib *fib,
168 struct mlxsw_sp_fib_entry *fib_entry)
170 unsigned char prefix_len = fib_entry->key.prefix_len;
172 if (--fib->prefix_ref_count[prefix_len] == 0)
173 mlxsw_sp_prefix_usage_clear(&fib->prefix_usage, prefix_len);
174 list_del(&fib_entry->list);
175 rhashtable_remove_fast(&fib->ht, &fib_entry->ht_node,
176 mlxsw_sp_fib_ht_params);
179 static struct mlxsw_sp_fib_entry *
180 mlxsw_sp_fib_entry_create(struct mlxsw_sp_fib *fib, const void *addr,
181 size_t addr_len, unsigned char prefix_len,
182 struct net_device *dev)
184 struct mlxsw_sp_fib_entry *fib_entry;
186 fib_entry = kzalloc(sizeof(*fib_entry), GFP_KERNEL);
189 fib_entry->key.dev = dev;
190 memcpy(fib_entry->key.addr, addr, addr_len);
191 fib_entry->key.prefix_len = prefix_len;
195 static void mlxsw_sp_fib_entry_destroy(struct mlxsw_sp_fib_entry *fib_entry)
200 static struct mlxsw_sp_fib_entry *
201 mlxsw_sp_fib_entry_lookup(struct mlxsw_sp_fib *fib, const void *addr,
202 size_t addr_len, unsigned char prefix_len,
203 struct net_device *dev)
205 struct mlxsw_sp_fib_key key;
207 memset(&key, 0, sizeof(key));
209 memcpy(key.addr, addr, addr_len);
210 key.prefix_len = prefix_len;
211 return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
214 static struct mlxsw_sp_fib *mlxsw_sp_fib_create(void)
216 struct mlxsw_sp_fib *fib;
219 fib = kzalloc(sizeof(*fib), GFP_KERNEL);
221 return ERR_PTR(-ENOMEM);
222 err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
224 goto err_rhashtable_init;
225 INIT_LIST_HEAD(&fib->entry_list);
233 static void mlxsw_sp_fib_destroy(struct mlxsw_sp_fib *fib)
235 rhashtable_destroy(&fib->ht);
239 static struct mlxsw_sp_lpm_tree *
240 mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp, bool one_reserved)
242 static struct mlxsw_sp_lpm_tree *lpm_tree;
245 for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) {
246 lpm_tree = &mlxsw_sp->router.lpm_trees[i];
247 if (lpm_tree->ref_count == 0) {
249 one_reserved = false;
257 static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
258 struct mlxsw_sp_lpm_tree *lpm_tree)
260 char ralta_pl[MLXSW_REG_RALTA_LEN];
262 mlxsw_reg_ralta_pack(ralta_pl, true,
263 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
265 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
268 static int mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
269 struct mlxsw_sp_lpm_tree *lpm_tree)
271 char ralta_pl[MLXSW_REG_RALTA_LEN];
273 mlxsw_reg_ralta_pack(ralta_pl, false,
274 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
276 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
280 mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
281 struct mlxsw_sp_prefix_usage *prefix_usage,
282 struct mlxsw_sp_lpm_tree *lpm_tree)
284 char ralst_pl[MLXSW_REG_RALST_LEN];
287 u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
289 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
292 mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id);
293 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
296 mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix,
297 MLXSW_REG_RALST_BIN_NO_CHILD);
298 last_prefix = prefix;
300 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
303 static struct mlxsw_sp_lpm_tree *
304 mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
305 struct mlxsw_sp_prefix_usage *prefix_usage,
306 enum mlxsw_sp_l3proto proto, bool one_reserved)
308 struct mlxsw_sp_lpm_tree *lpm_tree;
311 lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp, one_reserved);
313 return ERR_PTR(-EBUSY);
314 lpm_tree->proto = proto;
315 err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree);
319 err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage,
322 goto err_left_struct_set;
323 memcpy(&lpm_tree->prefix_usage, prefix_usage,
324 sizeof(lpm_tree->prefix_usage));
328 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
332 static int mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
333 struct mlxsw_sp_lpm_tree *lpm_tree)
335 return mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
338 static struct mlxsw_sp_lpm_tree *
339 mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
340 struct mlxsw_sp_prefix_usage *prefix_usage,
341 enum mlxsw_sp_l3proto proto, bool one_reserved)
343 struct mlxsw_sp_lpm_tree *lpm_tree;
346 for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) {
347 lpm_tree = &mlxsw_sp->router.lpm_trees[i];
348 if (lpm_tree->ref_count != 0 &&
349 lpm_tree->proto == proto &&
350 mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
354 lpm_tree = mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage,
355 proto, one_reserved);
356 if (IS_ERR(lpm_tree))
360 lpm_tree->ref_count++;
364 static int mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
365 struct mlxsw_sp_lpm_tree *lpm_tree)
367 if (--lpm_tree->ref_count == 0)
368 return mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
372 static void mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
374 struct mlxsw_sp_lpm_tree *lpm_tree;
377 for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) {
378 lpm_tree = &mlxsw_sp->router.lpm_trees[i];
379 lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
383 static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
385 struct mlxsw_resources *resources;
386 struct mlxsw_sp_vr *vr;
389 resources = mlxsw_core_resources_get(mlxsw_sp->core);
390 for (i = 0; i < resources->max_virtual_routers; i++) {
391 vr = &mlxsw_sp->router.vrs[i];
398 static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
399 struct mlxsw_sp_vr *vr)
401 char raltb_pl[MLXSW_REG_RALTB_LEN];
403 mlxsw_reg_raltb_pack(raltb_pl, vr->id,
404 (enum mlxsw_reg_ralxx_protocol) vr->proto,
406 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
409 static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
410 struct mlxsw_sp_vr *vr)
412 char raltb_pl[MLXSW_REG_RALTB_LEN];
414 /* Bind to tree 0 which is default */
415 mlxsw_reg_raltb_pack(raltb_pl, vr->id,
416 (enum mlxsw_reg_ralxx_protocol) vr->proto, 0);
417 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
420 static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
422 /* For our purpose, squash main and local table into one */
423 if (tb_id == RT_TABLE_LOCAL)
424 tb_id = RT_TABLE_MAIN;
428 static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
430 enum mlxsw_sp_l3proto proto)
432 struct mlxsw_resources *resources;
433 struct mlxsw_sp_vr *vr;
436 tb_id = mlxsw_sp_fix_tb_id(tb_id);
438 resources = mlxsw_core_resources_get(mlxsw_sp->core);
439 for (i = 0; i < resources->max_virtual_routers; i++) {
440 vr = &mlxsw_sp->router.vrs[i];
441 if (vr->used && vr->proto == proto && vr->tb_id == tb_id)
447 static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
448 unsigned char prefix_len,
450 enum mlxsw_sp_l3proto proto)
452 struct mlxsw_sp_prefix_usage req_prefix_usage;
453 struct mlxsw_sp_lpm_tree *lpm_tree;
454 struct mlxsw_sp_vr *vr;
457 vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
459 return ERR_PTR(-EBUSY);
460 vr->fib = mlxsw_sp_fib_create();
462 return ERR_CAST(vr->fib);
466 mlxsw_sp_prefix_usage_zero(&req_prefix_usage);
467 mlxsw_sp_prefix_usage_set(&req_prefix_usage, prefix_len);
468 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
470 if (IS_ERR(lpm_tree)) {
471 err = PTR_ERR(lpm_tree);
474 vr->lpm_tree = lpm_tree;
475 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, vr);
483 mlxsw_sp_lpm_tree_put(mlxsw_sp, vr->lpm_tree);
485 mlxsw_sp_fib_destroy(vr->fib);
490 static void mlxsw_sp_vr_destroy(struct mlxsw_sp *mlxsw_sp,
491 struct mlxsw_sp_vr *vr)
493 mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, vr);
494 mlxsw_sp_lpm_tree_put(mlxsw_sp, vr->lpm_tree);
495 mlxsw_sp_fib_destroy(vr->fib);
500 mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
501 struct mlxsw_sp_prefix_usage *req_prefix_usage)
503 struct mlxsw_sp_lpm_tree *lpm_tree = vr->lpm_tree;
504 struct mlxsw_sp_lpm_tree *new_tree;
507 if (mlxsw_sp_prefix_usage_eq(req_prefix_usage, &lpm_tree->prefix_usage))
510 new_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, req_prefix_usage,
512 if (IS_ERR(new_tree)) {
513 /* We failed to get a tree according to the required
514 * prefix usage. However, the current tree might be still good
515 * for us if our requirement is subset of the prefixes used
518 if (mlxsw_sp_prefix_usage_subset(req_prefix_usage,
519 &lpm_tree->prefix_usage))
521 return PTR_ERR(new_tree);
524 /* Prevent packet loss by overwriting existing binding */
525 vr->lpm_tree = new_tree;
526 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, vr);
529 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
534 vr->lpm_tree = lpm_tree;
535 mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
539 static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp,
540 unsigned char prefix_len,
542 enum mlxsw_sp_l3proto proto)
544 struct mlxsw_sp_vr *vr;
547 tb_id = mlxsw_sp_fix_tb_id(tb_id);
548 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id, proto);
550 vr = mlxsw_sp_vr_create(mlxsw_sp, prefix_len, tb_id, proto);
554 struct mlxsw_sp_prefix_usage req_prefix_usage;
556 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage,
557 &vr->fib->prefix_usage);
558 mlxsw_sp_prefix_usage_set(&req_prefix_usage, prefix_len);
559 /* Need to replace LPM tree in case new prefix is required. */
560 err = mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, vr,
568 static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr)
570 /* Destroy virtual router entity in case the associated FIB is empty
571 * and allow it to be used for other tables in future. Otherwise,
572 * check if some prefix usage did not disappear and change tree if
573 * that is the case. Note that in case new, smaller tree cannot be
574 * allocated, the original one will be kept being used.
576 if (mlxsw_sp_prefix_usage_none(&vr->fib->prefix_usage))
577 mlxsw_sp_vr_destroy(mlxsw_sp, vr);
579 mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, vr,
580 &vr->fib->prefix_usage);
583 static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
585 struct mlxsw_resources *resources;
586 struct mlxsw_sp_vr *vr;
589 resources = mlxsw_core_resources_get(mlxsw_sp->core);
590 if (!resources->max_virtual_routers_valid)
593 mlxsw_sp->router.vrs = kcalloc(resources->max_virtual_routers,
594 sizeof(struct mlxsw_sp_vr),
596 if (!mlxsw_sp->router.vrs)
599 for (i = 0; i < resources->max_virtual_routers; i++) {
600 vr = &mlxsw_sp->router.vrs[i];
607 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp);
609 static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
611 mlxsw_sp_router_fib_flush(mlxsw_sp);
612 kfree(mlxsw_sp->router.vrs);
615 struct mlxsw_sp_neigh_key {
619 struct mlxsw_sp_neigh_entry {
620 struct rhash_head ht_node;
621 struct mlxsw_sp_neigh_key key;
624 struct delayed_work dw;
625 struct mlxsw_sp_port *mlxsw_sp_port;
626 unsigned char ha[ETH_ALEN];
627 struct list_head nexthop_list; /* list of nexthops using
630 struct list_head nexthop_neighs_list_node;
633 static const struct rhashtable_params mlxsw_sp_neigh_ht_params = {
634 .key_offset = offsetof(struct mlxsw_sp_neigh_entry, key),
635 .head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node),
636 .key_len = sizeof(struct mlxsw_sp_neigh_key),
640 mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
641 struct mlxsw_sp_neigh_entry *neigh_entry)
643 return rhashtable_insert_fast(&mlxsw_sp->router.neigh_ht,
644 &neigh_entry->ht_node,
645 mlxsw_sp_neigh_ht_params);
649 mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
650 struct mlxsw_sp_neigh_entry *neigh_entry)
652 rhashtable_remove_fast(&mlxsw_sp->router.neigh_ht,
653 &neigh_entry->ht_node,
654 mlxsw_sp_neigh_ht_params);
657 static void mlxsw_sp_router_neigh_update_hw(struct work_struct *work);
659 static struct mlxsw_sp_neigh_entry *
660 mlxsw_sp_neigh_entry_create(struct neighbour *n, u16 rif)
662 struct mlxsw_sp_neigh_entry *neigh_entry;
664 neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_ATOMIC);
667 neigh_entry->key.n = n;
668 neigh_entry->rif = rif;
669 INIT_DELAYED_WORK(&neigh_entry->dw, mlxsw_sp_router_neigh_update_hw);
670 INIT_LIST_HEAD(&neigh_entry->nexthop_list);
675 mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp_neigh_entry *neigh_entry)
680 static struct mlxsw_sp_neigh_entry *
681 mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
683 struct mlxsw_sp_neigh_key key;
686 return rhashtable_lookup_fast(&mlxsw_sp->router.neigh_ht,
687 &key, mlxsw_sp_neigh_ht_params);
690 int mlxsw_sp_router_neigh_construct(struct net_device *dev,
693 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
694 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
695 struct mlxsw_sp_neigh_entry *neigh_entry;
696 struct mlxsw_sp_rif *r;
699 if (n->tbl != &arp_tbl)
702 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
706 r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
710 neigh_entry = mlxsw_sp_neigh_entry_create(n, r->rif);
713 err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
715 goto err_neigh_entry_insert;
718 err_neigh_entry_insert:
719 mlxsw_sp_neigh_entry_destroy(neigh_entry);
723 void mlxsw_sp_router_neigh_destroy(struct net_device *dev,
726 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
727 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
728 struct mlxsw_sp_neigh_entry *neigh_entry;
730 if (n->tbl != &arp_tbl)
733 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
736 mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
737 mlxsw_sp_neigh_entry_destroy(neigh_entry);
741 mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp)
743 unsigned long interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
745 mlxsw_sp->router.neighs_update.interval = jiffies_to_msecs(interval);
748 static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
752 struct net_device *dev;
758 mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
760 if (!mlxsw_sp->rifs[rif]) {
761 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
766 dev = mlxsw_sp->rifs[rif]->dev;
767 n = neigh_lookup(&arp_tbl, &dipn, dev);
771 netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
772 neigh_event_send(n, NULL);
776 static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp,
783 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
785 /* Hardware starts counting at 0, so add 1. */
788 /* Each record consists of several neighbour entries. */
789 for (i = 0; i < num_entries; i++) {
792 ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i;
793 mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl,
799 static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
800 char *rauhtd_pl, int rec_index)
802 switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) {
803 case MLXSW_REG_RAUHTD_TYPE_IPV4:
804 mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl,
807 case MLXSW_REG_RAUHTD_TYPE_IPV6:
813 static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl)
815 u8 num_rec, last_rec_index, num_entries;
817 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
818 last_rec_index = num_rec - 1;
820 if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM)
822 if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) ==
823 MLXSW_REG_RAUHTD_TYPE_IPV6)
826 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
828 if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC)
833 static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
839 rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
843 /* Make sure the neighbour's netdev isn't removed in the
848 mlxsw_reg_rauhtd_pack(rauhtd_pl, MLXSW_REG_RAUHTD_TYPE_IPV4);
849 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
852 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour talbe\n");
855 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
856 for (i = 0; i < num_rec; i++)
857 mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
859 } while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl));
866 static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
868 struct mlxsw_sp_neigh_entry *neigh_entry;
870 /* Take RTNL mutex here to prevent lists from changes */
872 list_for_each_entry(neigh_entry, &mlxsw_sp->router.nexthop_neighs_list,
873 nexthop_neighs_list_node) {
874 /* If this neigh have nexthops, make the kernel think this neigh
875 * is active regardless of the traffic.
877 if (!list_empty(&neigh_entry->nexthop_list))
878 neigh_event_send(neigh_entry->key.n, NULL);
884 mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
886 unsigned long interval = mlxsw_sp->router.neighs_update.interval;
888 mlxsw_core_schedule_dw(&mlxsw_sp->router.neighs_update.dw,
889 msecs_to_jiffies(interval));
892 static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
894 struct mlxsw_sp *mlxsw_sp = container_of(work, struct mlxsw_sp,
895 router.neighs_update.dw.work);
898 err = mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp);
900 dev_err(mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
902 mlxsw_sp_router_neighs_update_nh(mlxsw_sp);
904 mlxsw_sp_router_neighs_update_work_schedule(mlxsw_sp);
907 static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
909 struct mlxsw_sp_neigh_entry *neigh_entry;
910 struct mlxsw_sp *mlxsw_sp = container_of(work, struct mlxsw_sp,
911 router.nexthop_probe_dw.work);
913 /* Iterate over nexthop neighbours, find those who are unresolved and
914 * send arp on them. This solves the chicken-egg problem when
915 * the nexthop wouldn't get offloaded until the neighbor is resolved
916 * but it wouldn't get resolved ever in case traffic is flowing in HW
917 * using different nexthop.
919 * Take RTNL mutex here to prevent lists from changes.
922 list_for_each_entry(neigh_entry, &mlxsw_sp->router.nexthop_neighs_list,
923 nexthop_neighs_list_node) {
924 if (!(neigh_entry->key.n->nud_state & NUD_VALID) &&
925 !list_empty(&neigh_entry->nexthop_list))
926 neigh_event_send(neigh_entry->key.n, NULL);
930 mlxsw_core_schedule_dw(&mlxsw_sp->router.nexthop_probe_dw,
931 MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
935 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
936 struct mlxsw_sp_neigh_entry *neigh_entry,
939 static void mlxsw_sp_router_neigh_update_hw(struct work_struct *work)
941 struct mlxsw_sp_neigh_entry *neigh_entry =
942 container_of(work, struct mlxsw_sp_neigh_entry, dw.work);
943 struct neighbour *n = neigh_entry->key.n;
944 struct mlxsw_sp_port *mlxsw_sp_port = neigh_entry->mlxsw_sp_port;
945 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
946 char rauht_pl[MLXSW_REG_RAUHT_LEN];
947 struct net_device *dev;
948 bool entry_connected;
956 read_lock_bh(&n->lock);
957 dip = ntohl(*((__be32 *) n->primary_key));
958 memcpy(neigh_entry->ha, n->ha, sizeof(neigh_entry->ha));
959 nud_state = n->nud_state;
961 read_unlock_bh(&n->lock);
963 entry_connected = nud_state & NUD_VALID;
964 adding = (!neigh_entry->offloaded) && entry_connected;
965 updating = neigh_entry->offloaded && entry_connected;
966 removing = neigh_entry->offloaded && !entry_connected;
968 if (adding || updating) {
969 mlxsw_reg_rauht_pack4(rauht_pl, MLXSW_REG_RAUHT_OP_WRITE_ADD,
971 neigh_entry->ha, dip);
972 err = mlxsw_reg_write(mlxsw_sp->core,
973 MLXSW_REG(rauht), rauht_pl);
975 netdev_err(dev, "Could not add neigh %pI4h\n", &dip);
976 neigh_entry->offloaded = false;
978 neigh_entry->offloaded = true;
980 mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, false);
981 } else if (removing) {
982 mlxsw_reg_rauht_pack4(rauht_pl, MLXSW_REG_RAUHT_OP_WRITE_DELETE,
984 neigh_entry->ha, dip);
985 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht),
988 netdev_err(dev, "Could not delete neigh %pI4h\n", &dip);
989 neigh_entry->offloaded = true;
991 neigh_entry->offloaded = false;
993 mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, true);
997 mlxsw_sp_port_dev_put(mlxsw_sp_port);
1000 int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
1001 unsigned long event, void *ptr)
1003 struct mlxsw_sp_neigh_entry *neigh_entry;
1004 struct mlxsw_sp_port *mlxsw_sp_port;
1005 struct mlxsw_sp *mlxsw_sp;
1006 unsigned long interval;
1007 struct net_device *dev;
1008 struct neigh_parms *p;
1009 struct neighbour *n;
1013 case NETEVENT_DELAY_PROBE_TIME_UPDATE:
1016 /* We don't care about changes in the default table. */
1017 if (!p->dev || p->tbl != &arp_tbl)
1020 /* We are in atomic context and can't take RTNL mutex,
1021 * so use RCU variant to walk the device chain.
1023 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(p->dev);
1027 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1028 interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
1029 mlxsw_sp->router.neighs_update.interval = interval;
1031 mlxsw_sp_port_dev_put(mlxsw_sp_port);
1033 case NETEVENT_NEIGH_UPDATE:
1037 if (n->tbl != &arp_tbl)
1040 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(dev);
1044 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1045 dip = ntohl(*((__be32 *) n->primary_key));
1046 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
1047 if (WARN_ON(!neigh_entry)) {
1048 mlxsw_sp_port_dev_put(mlxsw_sp_port);
1051 neigh_entry->mlxsw_sp_port = mlxsw_sp_port;
1053 /* Take a reference to ensure the neighbour won't be
1054 * destructed until we drop the reference in delayed
1058 if (!mlxsw_core_schedule_dw(&neigh_entry->dw, 0)) {
1060 mlxsw_sp_port_dev_put(mlxsw_sp_port);
1068 static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
1072 err = rhashtable_init(&mlxsw_sp->router.neigh_ht,
1073 &mlxsw_sp_neigh_ht_params);
1077 /* Initialize the polling interval according to the default
1080 mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
1082 /* Create the delayed works for the activity_update */
1083 INIT_DELAYED_WORK(&mlxsw_sp->router.neighs_update.dw,
1084 mlxsw_sp_router_neighs_update_work);
1085 INIT_DELAYED_WORK(&mlxsw_sp->router.nexthop_probe_dw,
1086 mlxsw_sp_router_probe_unresolved_nexthops);
1087 mlxsw_core_schedule_dw(&mlxsw_sp->router.neighs_update.dw, 0);
1088 mlxsw_core_schedule_dw(&mlxsw_sp->router.nexthop_probe_dw, 0);
1092 static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
1094 cancel_delayed_work_sync(&mlxsw_sp->router.neighs_update.dw);
1095 cancel_delayed_work_sync(&mlxsw_sp->router.nexthop_probe_dw);
1096 rhashtable_destroy(&mlxsw_sp->router.neigh_ht);
1099 struct mlxsw_sp_nexthop {
1100 struct list_head neigh_list_node; /* member of neigh entry list */
1101 struct mlxsw_sp_nexthop_group *nh_grp; /* pointer back to the group
1104 u8 should_offload:1, /* set indicates this neigh is connected and
1105 * should be put to KVD linear area of this group.
1107 offloaded:1, /* set in case the neigh is actually put into
1108 * KVD linear area of this group.
1110 update:1; /* set indicates that MAC of this neigh should be
1113 struct mlxsw_sp_neigh_entry *neigh_entry;
1116 struct mlxsw_sp_nexthop_group {
1117 struct list_head list; /* node in mlxsw->router.nexthop_group_list */
1118 struct list_head fib_list; /* list of fib entries that use this group */
1119 u8 adj_index_valid:1;
1123 struct mlxsw_sp_nexthop nexthops[0];
1126 static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
1127 struct mlxsw_sp_vr *vr,
1128 u32 adj_index, u16 ecmp_size,
1132 char raleu_pl[MLXSW_REG_RALEU_LEN];
1134 mlxsw_reg_raleu_pack(raleu_pl,
1135 (enum mlxsw_reg_ralxx_protocol) vr->proto, vr->id,
1136 adj_index, ecmp_size, new_adj_index,
1138 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
1141 static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
1142 struct mlxsw_sp_nexthop_group *nh_grp,
1143 u32 old_adj_index, u16 old_ecmp_size)
1145 struct mlxsw_sp_fib_entry *fib_entry;
1146 struct mlxsw_sp_vr *vr = NULL;
1149 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
1150 if (vr == fib_entry->vr)
1153 err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, vr,
1164 static int mlxsw_sp_nexthop_mac_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
1165 struct mlxsw_sp_nexthop *nh)
1167 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
1168 char ratr_pl[MLXSW_REG_RATR_LEN];
1170 mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY,
1171 true, adj_index, neigh_entry->rif);
1172 mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
1173 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
1177 mlxsw_sp_nexthop_group_mac_update(struct mlxsw_sp *mlxsw_sp,
1178 struct mlxsw_sp_nexthop_group *nh_grp,
1181 u32 adj_index = nh_grp->adj_index; /* base */
1182 struct mlxsw_sp_nexthop *nh;
1186 for (i = 0; i < nh_grp->count; i++) {
1187 nh = &nh_grp->nexthops[i];
1189 if (!nh->should_offload) {
1194 if (nh->update || reallocate) {
1195 err = mlxsw_sp_nexthop_mac_update(mlxsw_sp,
1207 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1208 struct mlxsw_sp_fib_entry *fib_entry);
1211 mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
1212 struct mlxsw_sp_nexthop_group *nh_grp)
1214 struct mlxsw_sp_fib_entry *fib_entry;
1217 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
1218 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1226 mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
1227 struct mlxsw_sp_nexthop_group *nh_grp)
1229 struct mlxsw_sp_nexthop *nh;
1230 bool offload_change = false;
1233 bool old_adj_index_valid;
1240 for (i = 0; i < nh_grp->count; i++) {
1241 nh = &nh_grp->nexthops[i];
1243 if (nh->should_offload ^ nh->offloaded) {
1244 offload_change = true;
1245 if (nh->should_offload)
1248 if (nh->should_offload)
1251 if (!offload_change) {
1252 /* Nothing was added or removed, so no need to reallocate. Just
1253 * update MAC on existing adjacency indexes.
1255 err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp,
1258 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
1264 /* No neigh of this group is connected so we just set
1265 * the trap and let everthing flow through kernel.
1269 ret = mlxsw_sp_kvdl_alloc(mlxsw_sp, ecmp_size);
1271 /* We ran out of KVD linear space, just set the
1272 * trap and let everything flow through kernel.
1274 dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
1278 old_adj_index_valid = nh_grp->adj_index_valid;
1279 old_adj_index = nh_grp->adj_index;
1280 old_ecmp_size = nh_grp->ecmp_size;
1281 nh_grp->adj_index_valid = 1;
1282 nh_grp->adj_index = adj_index;
1283 nh_grp->ecmp_size = ecmp_size;
1284 err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp, true);
1286 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
1290 if (!old_adj_index_valid) {
1291 /* The trap was set for fib entries, so we have to call
1292 * fib entry update to unset it and use adjacency index.
1294 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
1296 dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
1302 err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
1303 old_adj_index, old_ecmp_size);
1304 mlxsw_sp_kvdl_free(mlxsw_sp, old_adj_index);
1306 dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
1312 old_adj_index_valid = nh_grp->adj_index_valid;
1313 nh_grp->adj_index_valid = 0;
1314 for (i = 0; i < nh_grp->count; i++) {
1315 nh = &nh_grp->nexthops[i];
1318 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
1320 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
1321 if (old_adj_index_valid)
1322 mlxsw_sp_kvdl_free(mlxsw_sp, nh_grp->adj_index);
1325 static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
1329 nh->should_offload = 1;
1331 nh->should_offload = 0;
1336 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
1337 struct mlxsw_sp_neigh_entry *neigh_entry,
1340 struct mlxsw_sp_nexthop *nh;
1342 /* Take RTNL mutex here to prevent lists from changes */
1344 list_for_each_entry(nh, &neigh_entry->nexthop_list,
1346 __mlxsw_sp_nexthop_neigh_update(nh, removing);
1347 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
1352 static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp,
1353 struct mlxsw_sp_nexthop_group *nh_grp,
1354 struct mlxsw_sp_nexthop *nh,
1355 struct fib_nh *fib_nh)
1357 struct mlxsw_sp_neigh_entry *neigh_entry;
1358 struct net_device *dev = fib_nh->nh_dev;
1359 struct neighbour *n;
1362 /* Take a reference of neigh here ensuring that neigh would
1363 * not be detructed before the nexthop entry is finished.
1364 * The reference is taken either in neigh_lookup() or
1365 * in neith_create() in case n is not found.
1367 n = neigh_lookup(&arp_tbl, &fib_nh->nh_gw, dev);
1369 n = neigh_create(&arp_tbl, &fib_nh->nh_gw, dev);
1372 neigh_event_send(n, NULL);
1374 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
1380 /* If that is the first nexthop connected to that neigh, add to
1381 * nexthop_neighs_list
1383 if (list_empty(&neigh_entry->nexthop_list))
1384 list_add_tail(&neigh_entry->nexthop_neighs_list_node,
1385 &mlxsw_sp->router.nexthop_neighs_list);
1387 nh->nh_grp = nh_grp;
1388 nh->neigh_entry = neigh_entry;
1389 list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
1390 read_lock_bh(&n->lock);
1391 nud_state = n->nud_state;
1392 read_unlock_bh(&n->lock);
1393 __mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID));
1398 static void mlxsw_sp_nexthop_fini(struct mlxsw_sp *mlxsw_sp,
1399 struct mlxsw_sp_nexthop *nh)
1401 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
1403 list_del(&nh->neigh_list_node);
1405 /* If that is the last nexthop connected to that neigh, remove from
1406 * nexthop_neighs_list
1408 if (list_empty(&nh->neigh_entry->nexthop_list))
1409 list_del(&nh->neigh_entry->nexthop_neighs_list_node);
1411 neigh_release(neigh_entry->key.n);
1414 static struct mlxsw_sp_nexthop_group *
1415 mlxsw_sp_nexthop_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
1417 struct mlxsw_sp_nexthop_group *nh_grp;
1418 struct mlxsw_sp_nexthop *nh;
1419 struct fib_nh *fib_nh;
1424 alloc_size = sizeof(*nh_grp) +
1425 fi->fib_nhs * sizeof(struct mlxsw_sp_nexthop);
1426 nh_grp = kzalloc(alloc_size, GFP_KERNEL);
1428 return ERR_PTR(-ENOMEM);
1429 INIT_LIST_HEAD(&nh_grp->fib_list);
1430 nh_grp->count = fi->fib_nhs;
1431 for (i = 0; i < nh_grp->count; i++) {
1432 nh = &nh_grp->nexthops[i];
1433 fib_nh = &fi->fib_nh[i];
1434 err = mlxsw_sp_nexthop_init(mlxsw_sp, nh_grp, nh, fib_nh);
1436 goto err_nexthop_init;
1438 list_add_tail(&nh_grp->list, &mlxsw_sp->router.nexthop_group_list);
1439 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
1443 for (i--; i >= 0; i--)
1444 mlxsw_sp_nexthop_fini(mlxsw_sp, nh);
1446 return ERR_PTR(err);
1450 mlxsw_sp_nexthop_group_destroy(struct mlxsw_sp *mlxsw_sp,
1451 struct mlxsw_sp_nexthop_group *nh_grp)
1453 struct mlxsw_sp_nexthop *nh;
1456 list_del(&nh_grp->list);
1457 for (i = 0; i < nh_grp->count; i++) {
1458 nh = &nh_grp->nexthops[i];
1459 mlxsw_sp_nexthop_fini(mlxsw_sp, nh);
1464 static bool mlxsw_sp_nexthop_match(struct mlxsw_sp_nexthop *nh,
1465 struct fib_info *fi)
1469 for (i = 0; i < fi->fib_nhs; i++) {
1470 struct fib_nh *fib_nh = &fi->fib_nh[i];
1471 struct neighbour *n = nh->neigh_entry->key.n;
1473 if (memcmp(n->primary_key, &fib_nh->nh_gw,
1474 sizeof(fib_nh->nh_gw)) == 0 &&
1475 n->dev == fib_nh->nh_dev)
1481 static bool mlxsw_sp_nexthop_group_match(struct mlxsw_sp_nexthop_group *nh_grp,
1482 struct fib_info *fi)
1486 if (nh_grp->count != fi->fib_nhs)
1488 for (i = 0; i < nh_grp->count; i++) {
1489 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
1491 if (!mlxsw_sp_nexthop_match(nh, fi))
1497 static struct mlxsw_sp_nexthop_group *
1498 mlxsw_sp_nexthop_group_find(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
1500 struct mlxsw_sp_nexthop_group *nh_grp;
1502 list_for_each_entry(nh_grp, &mlxsw_sp->router.nexthop_group_list,
1504 if (mlxsw_sp_nexthop_group_match(nh_grp, fi))
1510 static int mlxsw_sp_nexthop_group_get(struct mlxsw_sp *mlxsw_sp,
1511 struct mlxsw_sp_fib_entry *fib_entry,
1512 struct fib_info *fi)
1514 struct mlxsw_sp_nexthop_group *nh_grp;
1516 nh_grp = mlxsw_sp_nexthop_group_find(mlxsw_sp, fi);
1518 nh_grp = mlxsw_sp_nexthop_group_create(mlxsw_sp, fi);
1520 return PTR_ERR(nh_grp);
1522 list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
1523 fib_entry->nh_group = nh_grp;
1527 static void mlxsw_sp_nexthop_group_put(struct mlxsw_sp *mlxsw_sp,
1528 struct mlxsw_sp_fib_entry *fib_entry)
1530 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
1532 list_del(&fib_entry->nexthop_group_node);
1533 if (!list_empty(&nh_grp->fib_list))
1535 mlxsw_sp_nexthop_group_destroy(mlxsw_sp, nh_grp);
1538 static int mlxsw_sp_fib_entry_op4_remote(struct mlxsw_sp *mlxsw_sp,
1539 struct mlxsw_sp_fib_entry *fib_entry,
1540 enum mlxsw_reg_ralue_op op)
1542 char ralue_pl[MLXSW_REG_RALUE_LEN];
1543 u32 *p_dip = (u32 *) fib_entry->key.addr;
1544 struct mlxsw_sp_vr *vr = fib_entry->vr;
1545 enum mlxsw_reg_ralue_trap_action trap_action;
1547 u32 adjacency_index = 0;
1550 /* In case the nexthop group adjacency index is valid, use it
1551 * with provided ECMP size. Otherwise, setup trap and pass
1552 * traffic to kernel.
1554 if (fib_entry->nh_group->adj_index_valid) {
1555 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
1556 adjacency_index = fib_entry->nh_group->adj_index;
1557 ecmp_size = fib_entry->nh_group->ecmp_size;
1559 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
1560 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
1563 mlxsw_reg_ralue_pack4(ralue_pl,
1564 (enum mlxsw_reg_ralxx_protocol) vr->proto, op,
1565 vr->id, fib_entry->key.prefix_len, *p_dip);
1566 mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
1567 adjacency_index, ecmp_size);
1568 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
1571 static int mlxsw_sp_fib_entry_op4_local(struct mlxsw_sp *mlxsw_sp,
1572 struct mlxsw_sp_fib_entry *fib_entry,
1573 enum mlxsw_reg_ralue_op op)
1575 char ralue_pl[MLXSW_REG_RALUE_LEN];
1576 u32 *p_dip = (u32 *) fib_entry->key.addr;
1577 struct mlxsw_sp_vr *vr = fib_entry->vr;
1579 mlxsw_reg_ralue_pack4(ralue_pl,
1580 (enum mlxsw_reg_ralxx_protocol) vr->proto, op,
1581 vr->id, fib_entry->key.prefix_len, *p_dip);
1582 mlxsw_reg_ralue_act_local_pack(ralue_pl,
1583 MLXSW_REG_RALUE_TRAP_ACTION_NOP, 0,
1585 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
1588 static int mlxsw_sp_fib_entry_op4_trap(struct mlxsw_sp *mlxsw_sp,
1589 struct mlxsw_sp_fib_entry *fib_entry,
1590 enum mlxsw_reg_ralue_op op)
1592 char ralue_pl[MLXSW_REG_RALUE_LEN];
1593 u32 *p_dip = (u32 *) fib_entry->key.addr;
1594 struct mlxsw_sp_vr *vr = fib_entry->vr;
1596 mlxsw_reg_ralue_pack4(ralue_pl,
1597 (enum mlxsw_reg_ralxx_protocol) vr->proto, op,
1598 vr->id, fib_entry->key.prefix_len, *p_dip);
1599 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
1600 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
1603 static int mlxsw_sp_fib_entry_op4(struct mlxsw_sp *mlxsw_sp,
1604 struct mlxsw_sp_fib_entry *fib_entry,
1605 enum mlxsw_reg_ralue_op op)
1607 switch (fib_entry->type) {
1608 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
1609 return mlxsw_sp_fib_entry_op4_remote(mlxsw_sp, fib_entry, op);
1610 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
1611 return mlxsw_sp_fib_entry_op4_local(mlxsw_sp, fib_entry, op);
1612 case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
1613 return mlxsw_sp_fib_entry_op4_trap(mlxsw_sp, fib_entry, op);
1618 static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
1619 struct mlxsw_sp_fib_entry *fib_entry,
1620 enum mlxsw_reg_ralue_op op)
1622 switch (fib_entry->vr->proto) {
1623 case MLXSW_SP_L3_PROTO_IPV4:
1624 return mlxsw_sp_fib_entry_op4(mlxsw_sp, fib_entry, op);
1625 case MLXSW_SP_L3_PROTO_IPV6:
1631 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1632 struct mlxsw_sp_fib_entry *fib_entry)
1634 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
1635 MLXSW_REG_RALUE_OP_WRITE_WRITE);
1638 static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
1639 struct mlxsw_sp_fib_entry *fib_entry)
1641 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
1642 MLXSW_REG_RALUE_OP_WRITE_DELETE);
1646 mlxsw_sp_router_fib4_entry_init(struct mlxsw_sp *mlxsw_sp,
1647 const struct fib_entry_notifier_info *fen_info,
1648 struct mlxsw_sp_fib_entry *fib_entry)
1650 struct fib_info *fi = fen_info->fi;
1651 struct mlxsw_sp_rif *r = NULL;
1655 if (fen_info->type == RTN_LOCAL || fen_info->type == RTN_BROADCAST) {
1656 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1659 if (fen_info->type != RTN_UNICAST)
1662 for (nhsel = 0; nhsel < fi->fib_nhs; nhsel++) {
1663 const struct fib_nh *nh = &fi->fib_nh[nhsel];
1667 r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, nh->nh_dev);
1669 /* In case router interface is not found for
1670 * at least one of the nexthops, that means
1671 * the nexthop points to some device unrelated
1672 * to us. Set trap and pass the packets for
1673 * this prefix to kernel.
1680 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1684 if (fi->fib_scope != RT_SCOPE_UNIVERSE) {
1685 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
1686 fib_entry->rif = r->rif;
1688 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
1689 err = mlxsw_sp_nexthop_group_get(mlxsw_sp, fib_entry, fi);
1693 fib_info_offload_inc(fen_info->fi);
1698 mlxsw_sp_router_fib4_entry_fini(struct mlxsw_sp *mlxsw_sp,
1699 struct mlxsw_sp_fib_entry *fib_entry)
1701 if (fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_TRAP)
1702 fib_info_offload_dec(fib_entry->fi);
1703 if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_REMOTE)
1704 mlxsw_sp_nexthop_group_put(mlxsw_sp, fib_entry);
1707 static struct mlxsw_sp_fib_entry *
1708 mlxsw_sp_fib_entry_get(struct mlxsw_sp *mlxsw_sp,
1709 const struct fib_entry_notifier_info *fen_info)
1711 struct mlxsw_sp_fib_entry *fib_entry;
1712 struct fib_info *fi = fen_info->fi;
1713 struct mlxsw_sp_vr *vr;
1716 vr = mlxsw_sp_vr_get(mlxsw_sp, fen_info->dst_len, fen_info->tb_id,
1717 MLXSW_SP_L3_PROTO_IPV4);
1719 return ERR_CAST(vr);
1721 fib_entry = mlxsw_sp_fib_entry_lookup(vr->fib, &fen_info->dst,
1722 sizeof(fen_info->dst),
1723 fen_info->dst_len, fi->fib_dev);
1725 /* Already exists, just take a reference */
1726 fib_entry->ref_count++;
1729 fib_entry = mlxsw_sp_fib_entry_create(vr->fib, &fen_info->dst,
1730 sizeof(fen_info->dst),
1731 fen_info->dst_len, fi->fib_dev);
1734 goto err_fib_entry_create;
1738 fib_entry->ref_count = 1;
1740 err = mlxsw_sp_router_fib4_entry_init(mlxsw_sp, fen_info, fib_entry);
1742 goto err_fib4_entry_init;
1746 err_fib4_entry_init:
1747 mlxsw_sp_fib_entry_destroy(fib_entry);
1748 err_fib_entry_create:
1749 mlxsw_sp_vr_put(mlxsw_sp, vr);
1751 return ERR_PTR(err);
1754 static struct mlxsw_sp_fib_entry *
1755 mlxsw_sp_fib_entry_find(struct mlxsw_sp *mlxsw_sp,
1756 const struct fib_entry_notifier_info *fen_info)
1758 struct mlxsw_sp_vr *vr;
1760 vr = mlxsw_sp_vr_find(mlxsw_sp, fen_info->tb_id,
1761 MLXSW_SP_L3_PROTO_IPV4);
1765 return mlxsw_sp_fib_entry_lookup(vr->fib, &fen_info->dst,
1766 sizeof(fen_info->dst),
1768 fen_info->fi->fib_dev);
1771 static void mlxsw_sp_fib_entry_put(struct mlxsw_sp *mlxsw_sp,
1772 struct mlxsw_sp_fib_entry *fib_entry)
1774 struct mlxsw_sp_vr *vr = fib_entry->vr;
1776 if (--fib_entry->ref_count == 0) {
1777 mlxsw_sp_router_fib4_entry_fini(mlxsw_sp, fib_entry);
1778 mlxsw_sp_fib_entry_destroy(fib_entry);
1780 mlxsw_sp_vr_put(mlxsw_sp, vr);
1783 static void mlxsw_sp_fib_entry_put_all(struct mlxsw_sp *mlxsw_sp,
1784 struct mlxsw_sp_fib_entry *fib_entry)
1786 unsigned int last_ref_count;
1789 last_ref_count = fib_entry->ref_count;
1790 mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry);
1791 } while (last_ref_count != 1);
1794 static int mlxsw_sp_router_fib4_add(struct mlxsw_sp *mlxsw_sp,
1795 struct fib_entry_notifier_info *fen_info)
1797 struct mlxsw_sp_fib_entry *fib_entry;
1798 struct mlxsw_sp_vr *vr;
1801 if (mlxsw_sp->router.aborted)
1804 fib_entry = mlxsw_sp_fib_entry_get(mlxsw_sp, fen_info);
1805 if (IS_ERR(fib_entry)) {
1806 dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB4 entry being added.\n");
1807 return PTR_ERR(fib_entry);
1810 if (fib_entry->ref_count != 1)
1814 err = mlxsw_sp_fib_entry_insert(vr->fib, fib_entry);
1816 dev_warn(mlxsw_sp->bus_info->dev, "Failed to insert FIB4 entry being added.\n");
1817 goto err_fib_entry_insert;
1819 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1821 goto err_fib_entry_add;
1825 mlxsw_sp_fib_entry_remove(vr->fib, fib_entry);
1826 err_fib_entry_insert:
1827 mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry);
1831 static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
1832 struct fib_entry_notifier_info *fen_info)
1834 struct mlxsw_sp_fib_entry *fib_entry;
1836 if (mlxsw_sp->router.aborted)
1839 fib_entry = mlxsw_sp_fib_entry_find(mlxsw_sp, fen_info);
1843 if (fib_entry->ref_count == 1) {
1844 mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
1845 mlxsw_sp_fib_entry_remove(fib_entry->vr->fib, fib_entry);
1848 mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry);
1851 static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
1853 char ralta_pl[MLXSW_REG_RALTA_LEN];
1854 char ralst_pl[MLXSW_REG_RALST_LEN];
1855 char raltb_pl[MLXSW_REG_RALTB_LEN];
1856 char ralue_pl[MLXSW_REG_RALUE_LEN];
1859 mlxsw_reg_ralta_pack(ralta_pl, true, MLXSW_REG_RALXX_PROTOCOL_IPV4,
1860 MLXSW_SP_LPM_TREE_MIN);
1861 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
1865 mlxsw_reg_ralst_pack(ralst_pl, 0xff, MLXSW_SP_LPM_TREE_MIN);
1866 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
1870 mlxsw_reg_raltb_pack(raltb_pl, 0, MLXSW_REG_RALXX_PROTOCOL_IPV4,
1871 MLXSW_SP_LPM_TREE_MIN);
1872 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
1876 mlxsw_reg_ralue_pack4(ralue_pl, MLXSW_SP_L3_PROTO_IPV4,
1877 MLXSW_REG_RALUE_OP_WRITE_WRITE, 0, 0, 0);
1878 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
1879 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
1882 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
1884 struct mlxsw_resources *resources;
1885 struct mlxsw_sp_fib_entry *fib_entry;
1886 struct mlxsw_sp_fib_entry *tmp;
1887 struct mlxsw_sp_vr *vr;
1890 resources = mlxsw_core_resources_get(mlxsw_sp->core);
1891 for (i = 0; i < resources->max_virtual_routers; i++) {
1892 vr = &mlxsw_sp->router.vrs[i];
1897 list_for_each_entry_safe(fib_entry, tmp,
1898 &vr->fib->entry_list, list) {
1899 bool do_break = &tmp->list == &vr->fib->entry_list;
1901 mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
1902 mlxsw_sp_fib_entry_remove(fib_entry->vr->fib,
1904 mlxsw_sp_fib_entry_put_all(mlxsw_sp, fib_entry);
1911 static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp)
1915 mlxsw_sp_router_fib_flush(mlxsw_sp);
1916 mlxsw_sp->router.aborted = true;
1917 err = mlxsw_sp_router_set_abort_trap(mlxsw_sp);
1919 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n");
1922 static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
1924 struct mlxsw_resources *resources;
1925 char rgcr_pl[MLXSW_REG_RGCR_LEN];
1928 resources = mlxsw_core_resources_get(mlxsw_sp->core);
1929 if (!resources->max_rif_valid)
1932 mlxsw_sp->rifs = kcalloc(resources->max_rif,
1933 sizeof(struct mlxsw_sp_rif *), GFP_KERNEL);
1934 if (!mlxsw_sp->rifs)
1937 mlxsw_reg_rgcr_pack(rgcr_pl, true);
1938 mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, resources->max_rif);
1939 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
1946 kfree(mlxsw_sp->rifs);
1950 static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
1952 struct mlxsw_resources *resources;
1953 char rgcr_pl[MLXSW_REG_RGCR_LEN];
1956 mlxsw_reg_rgcr_pack(rgcr_pl, false);
1957 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
1959 resources = mlxsw_core_resources_get(mlxsw_sp->core);
1960 for (i = 0; i < resources->max_rif; i++)
1961 WARN_ON_ONCE(mlxsw_sp->rifs[i]);
1963 kfree(mlxsw_sp->rifs);
1966 static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
1967 unsigned long event, void *ptr)
1969 struct mlxsw_sp *mlxsw_sp = container_of(nb, struct mlxsw_sp, fib_nb);
1970 struct fib_entry_notifier_info *fen_info = ptr;
1973 if (!net_eq(fen_info->info.net, &init_net))
1977 case FIB_EVENT_ENTRY_ADD:
1978 err = mlxsw_sp_router_fib4_add(mlxsw_sp, fen_info);
1980 mlxsw_sp_router_fib4_abort(mlxsw_sp);
1982 case FIB_EVENT_ENTRY_DEL:
1983 mlxsw_sp_router_fib4_del(mlxsw_sp, fen_info);
1985 case FIB_EVENT_RULE_ADD: /* fall through */
1986 case FIB_EVENT_RULE_DEL:
1987 mlxsw_sp_router_fib4_abort(mlxsw_sp);
1993 int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
1997 INIT_LIST_HEAD(&mlxsw_sp->router.nexthop_neighs_list);
1998 INIT_LIST_HEAD(&mlxsw_sp->router.nexthop_group_list);
1999 err = __mlxsw_sp_router_init(mlxsw_sp);
2003 mlxsw_sp_lpm_init(mlxsw_sp);
2004 err = mlxsw_sp_vrs_init(mlxsw_sp);
2008 err = mlxsw_sp_neigh_init(mlxsw_sp);
2010 goto err_neigh_init;
2012 mlxsw_sp->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
2013 register_fib_notifier(&mlxsw_sp->fib_nb);
2017 mlxsw_sp_vrs_fini(mlxsw_sp);
2019 __mlxsw_sp_router_fini(mlxsw_sp);
2023 void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
2025 unregister_fib_notifier(&mlxsw_sp->fib_nb);
2026 mlxsw_sp_neigh_fini(mlxsw_sp);
2027 mlxsw_sp_vrs_fini(mlxsw_sp);
2028 __mlxsw_sp_router_fini(mlxsw_sp);