1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
4 #include <linux/rhashtable.h>
7 #include "spectrum_mr.h"
8 #include "spectrum_router.h"
11 const struct mlxsw_sp_mr_ops *mr_ops;
12 void *catchall_route_priv;
13 struct delayed_work stats_update_dw;
14 struct list_head table_list;
15 #define MLXSW_SP_MR_ROUTES_COUNTER_UPDATE_INTERVAL 5000 /* ms */
16 unsigned long priv[0];
17 /* priv has to be always the last item */
20 struct mlxsw_sp_mr_vif;
21 struct mlxsw_sp_mr_vif_ops {
22 bool (*is_regular)(const struct mlxsw_sp_mr_vif *vif);
25 struct mlxsw_sp_mr_vif {
26 struct net_device *dev;
27 const struct mlxsw_sp_rif *rif;
28 unsigned long vif_flags;
30 /* A list of route_vif_entry structs that point to routes that the VIF
31 * instance is used as one of the egress VIFs
33 struct list_head route_evif_list;
35 /* A list of route_vif_entry structs that point to routes that the VIF
36 * instance is used as an ingress VIF
38 struct list_head route_ivif_list;
40 /* Protocol specific operations for a VIF */
41 const struct mlxsw_sp_mr_vif_ops *ops;
44 struct mlxsw_sp_mr_route_vif_entry {
45 struct list_head vif_node;
46 struct list_head route_node;
47 struct mlxsw_sp_mr_vif *mr_vif;
48 struct mlxsw_sp_mr_route *mr_route;
51 struct mlxsw_sp_mr_table;
52 struct mlxsw_sp_mr_table_ops {
53 bool (*is_route_valid)(const struct mlxsw_sp_mr_table *mr_table,
54 const struct mr_mfc *mfc);
55 void (*key_create)(struct mlxsw_sp_mr_table *mr_table,
56 struct mlxsw_sp_mr_route_key *key,
58 bool (*is_route_starg)(const struct mlxsw_sp_mr_table *mr_table,
59 const struct mlxsw_sp_mr_route *mr_route);
62 struct mlxsw_sp_mr_table {
63 struct list_head node;
64 enum mlxsw_sp_l3proto proto;
65 struct mlxsw_sp *mlxsw_sp;
67 struct mlxsw_sp_mr_vif vifs[MAXVIFS];
68 struct list_head route_list;
69 struct rhashtable route_ht;
70 const struct mlxsw_sp_mr_table_ops *ops;
71 char catchall_route_priv[0];
72 /* catchall_route_priv has to be always the last item */
75 struct mlxsw_sp_mr_route {
76 struct list_head node;
77 struct rhash_head ht_node;
78 struct mlxsw_sp_mr_route_key key;
79 enum mlxsw_sp_mr_route_action route_action;
83 const struct mlxsw_sp_mr_table *mr_table;
84 /* A list of route_vif_entry structs that point to the egress VIFs */
85 struct list_head evif_list;
86 /* A route_vif_entry struct that point to the ingress VIF */
87 struct mlxsw_sp_mr_route_vif_entry ivif;
90 static const struct rhashtable_params mlxsw_sp_mr_route_ht_params = {
91 .key_len = sizeof(struct mlxsw_sp_mr_route_key),
92 .key_offset = offsetof(struct mlxsw_sp_mr_route, key),
93 .head_offset = offsetof(struct mlxsw_sp_mr_route, ht_node),
94 .automatic_shrinking = true,
97 static bool mlxsw_sp_mr_vif_valid(const struct mlxsw_sp_mr_vif *vif)
99 return vif->ops->is_regular(vif) && vif->dev && vif->rif;
102 static bool mlxsw_sp_mr_vif_exists(const struct mlxsw_sp_mr_vif *vif)
108 mlxsw_sp_mr_route_ivif_in_evifs(const struct mlxsw_sp_mr_route *mr_route)
110 vifi_t ivif = mr_route->mfc->mfc_parent;
112 return mr_route->mfc->mfc_un.res.ttls[ivif] != 255;
116 mlxsw_sp_mr_route_valid_evifs_num(const struct mlxsw_sp_mr_route *mr_route)
118 struct mlxsw_sp_mr_route_vif_entry *rve;
122 list_for_each_entry(rve, &mr_route->evif_list, route_node)
123 if (mlxsw_sp_mr_vif_valid(rve->mr_vif))
128 static enum mlxsw_sp_mr_route_action
129 mlxsw_sp_mr_route_action(const struct mlxsw_sp_mr_route *mr_route)
131 struct mlxsw_sp_mr_route_vif_entry *rve;
133 /* If the ingress port is not regular and resolved, trap the route */
134 if (!mlxsw_sp_mr_vif_valid(mr_route->ivif.mr_vif))
135 return MLXSW_SP_MR_ROUTE_ACTION_TRAP;
137 /* The kernel does not match a (*,G) route that the ingress interface is
138 * not one of the egress interfaces, so trap these kind of routes.
140 if (mr_route->mr_table->ops->is_route_starg(mr_route->mr_table,
142 !mlxsw_sp_mr_route_ivif_in_evifs(mr_route))
143 return MLXSW_SP_MR_ROUTE_ACTION_TRAP;
145 /* If the route has no valid eVIFs, trap it. */
146 if (!mlxsw_sp_mr_route_valid_evifs_num(mr_route))
147 return MLXSW_SP_MR_ROUTE_ACTION_TRAP;
149 /* If one of the eVIFs has no RIF, trap-and-forward the route as there
150 * is some more routing to do in software too.
152 list_for_each_entry(rve, &mr_route->evif_list, route_node)
153 if (mlxsw_sp_mr_vif_exists(rve->mr_vif) && !rve->mr_vif->rif)
154 return MLXSW_SP_MR_ROUTE_ACTION_TRAP_AND_FORWARD;
156 return MLXSW_SP_MR_ROUTE_ACTION_FORWARD;
159 static enum mlxsw_sp_mr_route_prio
160 mlxsw_sp_mr_route_prio(const struct mlxsw_sp_mr_route *mr_route)
162 return mr_route->mr_table->ops->is_route_starg(mr_route->mr_table,
164 MLXSW_SP_MR_ROUTE_PRIO_STARG : MLXSW_SP_MR_ROUTE_PRIO_SG;
167 static int mlxsw_sp_mr_route_evif_link(struct mlxsw_sp_mr_route *mr_route,
168 struct mlxsw_sp_mr_vif *mr_vif)
170 struct mlxsw_sp_mr_route_vif_entry *rve;
172 rve = kzalloc(sizeof(*rve), GFP_KERNEL);
175 rve->mr_route = mr_route;
176 rve->mr_vif = mr_vif;
177 list_add_tail(&rve->route_node, &mr_route->evif_list);
178 list_add_tail(&rve->vif_node, &mr_vif->route_evif_list);
183 mlxsw_sp_mr_route_evif_unlink(struct mlxsw_sp_mr_route_vif_entry *rve)
185 list_del(&rve->route_node);
186 list_del(&rve->vif_node);
190 static void mlxsw_sp_mr_route_ivif_link(struct mlxsw_sp_mr_route *mr_route,
191 struct mlxsw_sp_mr_vif *mr_vif)
193 mr_route->ivif.mr_route = mr_route;
194 mr_route->ivif.mr_vif = mr_vif;
195 list_add_tail(&mr_route->ivif.vif_node, &mr_vif->route_ivif_list);
198 static void mlxsw_sp_mr_route_ivif_unlink(struct mlxsw_sp_mr_route *mr_route)
200 list_del(&mr_route->ivif.vif_node);
204 mlxsw_sp_mr_route_info_create(struct mlxsw_sp_mr_table *mr_table,
205 struct mlxsw_sp_mr_route *mr_route,
206 struct mlxsw_sp_mr_route_info *route_info)
208 struct mlxsw_sp_mr_route_vif_entry *rve;
213 erif_indices = kmalloc_array(MAXVIFS, sizeof(*erif_indices),
218 list_for_each_entry(rve, &mr_route->evif_list, route_node) {
219 if (mlxsw_sp_mr_vif_valid(rve->mr_vif)) {
220 u16 rifi = mlxsw_sp_rif_index(rve->mr_vif->rif);
222 erif_indices[erif++] = rifi;
226 if (mlxsw_sp_mr_vif_valid(mr_route->ivif.mr_vif))
227 irif_index = mlxsw_sp_rif_index(mr_route->ivif.mr_vif->rif);
231 route_info->irif_index = irif_index;
232 route_info->erif_indices = erif_indices;
233 route_info->min_mtu = mr_route->min_mtu;
234 route_info->route_action = mr_route->route_action;
235 route_info->erif_num = erif;
240 mlxsw_sp_mr_route_info_destroy(struct mlxsw_sp_mr_route_info *route_info)
242 kfree(route_info->erif_indices);
245 static int mlxsw_sp_mr_route_write(struct mlxsw_sp_mr_table *mr_table,
246 struct mlxsw_sp_mr_route *mr_route,
249 struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
250 struct mlxsw_sp_mr_route_info route_info;
251 struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
254 err = mlxsw_sp_mr_route_info_create(mr_table, mr_route, &route_info);
259 struct mlxsw_sp_mr_route_params route_params;
261 mr_route->route_priv = kzalloc(mr->mr_ops->route_priv_size,
263 if (!mr_route->route_priv) {
268 route_params.key = mr_route->key;
269 route_params.value = route_info;
270 route_params.prio = mlxsw_sp_mr_route_prio(mr_route);
271 err = mr->mr_ops->route_create(mlxsw_sp, mr->priv,
272 mr_route->route_priv,
275 kfree(mr_route->route_priv);
277 err = mr->mr_ops->route_update(mlxsw_sp, mr_route->route_priv,
281 mlxsw_sp_mr_route_info_destroy(&route_info);
285 static void mlxsw_sp_mr_route_erase(struct mlxsw_sp_mr_table *mr_table,
286 struct mlxsw_sp_mr_route *mr_route)
288 struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
289 struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
291 mr->mr_ops->route_destroy(mlxsw_sp, mr->priv, mr_route->route_priv);
292 kfree(mr_route->route_priv);
295 static struct mlxsw_sp_mr_route *
296 mlxsw_sp_mr_route_create(struct mlxsw_sp_mr_table *mr_table,
299 struct mlxsw_sp_mr_route_vif_entry *rve, *tmp;
300 struct mlxsw_sp_mr_route *mr_route;
304 /* Allocate and init a new route and fill it with parameters */
305 mr_route = kzalloc(sizeof(*mr_route), GFP_KERNEL);
307 return ERR_PTR(-ENOMEM);
308 INIT_LIST_HEAD(&mr_route->evif_list);
310 /* Find min_mtu and link iVIF and eVIFs */
311 mr_route->min_mtu = ETH_MAX_MTU;
314 mr_table->ops->key_create(mr_table, &mr_route->key, mr_route->mfc);
316 mr_route->mr_table = mr_table;
317 for (i = 0; i < MAXVIFS; i++) {
318 if (mfc->mfc_un.res.ttls[i] != 255) {
319 err = mlxsw_sp_mr_route_evif_link(mr_route,
323 if (mr_table->vifs[i].dev &&
324 mr_table->vifs[i].dev->mtu < mr_route->min_mtu)
325 mr_route->min_mtu = mr_table->vifs[i].dev->mtu;
328 mlxsw_sp_mr_route_ivif_link(mr_route,
329 &mr_table->vifs[mfc->mfc_parent]);
331 mr_route->route_action = mlxsw_sp_mr_route_action(mr_route);
335 list_for_each_entry_safe(rve, tmp, &mr_route->evif_list, route_node)
336 mlxsw_sp_mr_route_evif_unlink(rve);
341 static void mlxsw_sp_mr_route_destroy(struct mlxsw_sp_mr_table *mr_table,
342 struct mlxsw_sp_mr_route *mr_route)
344 struct mlxsw_sp_mr_route_vif_entry *rve, *tmp;
346 mlxsw_sp_mr_route_ivif_unlink(mr_route);
347 mr_cache_put(mr_route->mfc);
348 list_for_each_entry_safe(rve, tmp, &mr_route->evif_list, route_node)
349 mlxsw_sp_mr_route_evif_unlink(rve);
353 static void mlxsw_sp_mr_mfc_offload_set(struct mlxsw_sp_mr_route *mr_route,
357 mr_route->mfc->mfc_flags |= MFC_OFFLOAD;
359 mr_route->mfc->mfc_flags &= ~MFC_OFFLOAD;
362 static void mlxsw_sp_mr_mfc_offload_update(struct mlxsw_sp_mr_route *mr_route)
366 offload = mr_route->route_action != MLXSW_SP_MR_ROUTE_ACTION_TRAP;
367 mlxsw_sp_mr_mfc_offload_set(mr_route, offload);
370 static void __mlxsw_sp_mr_route_del(struct mlxsw_sp_mr_table *mr_table,
371 struct mlxsw_sp_mr_route *mr_route)
373 mlxsw_sp_mr_mfc_offload_set(mr_route, false);
374 mlxsw_sp_mr_route_erase(mr_table, mr_route);
375 rhashtable_remove_fast(&mr_table->route_ht, &mr_route->ht_node,
376 mlxsw_sp_mr_route_ht_params);
377 list_del(&mr_route->node);
378 mlxsw_sp_mr_route_destroy(mr_table, mr_route);
381 int mlxsw_sp_mr_route_add(struct mlxsw_sp_mr_table *mr_table,
382 struct mr_mfc *mfc, bool replace)
384 struct mlxsw_sp_mr_route *mr_orig_route = NULL;
385 struct mlxsw_sp_mr_route *mr_route;
388 if (!mr_table->ops->is_route_valid(mr_table, mfc))
391 /* Create a new route */
392 mr_route = mlxsw_sp_mr_route_create(mr_table, mfc);
393 if (IS_ERR(mr_route))
394 return PTR_ERR(mr_route);
396 /* Find any route with a matching key */
397 mr_orig_route = rhashtable_lookup_fast(&mr_table->route_ht,
399 mlxsw_sp_mr_route_ht_params);
401 /* On replace case, make the route point to the new route_priv.
403 if (WARN_ON(!mr_orig_route)) {
405 goto err_no_orig_route;
407 mr_route->route_priv = mr_orig_route->route_priv;
408 } else if (mr_orig_route) {
409 /* On non replace case, if another route with the same key was
410 * found, abort, as duplicate routes are used for proxy routes.
412 dev_warn(mr_table->mlxsw_sp->bus_info->dev,
413 "Offloading proxy routes is not supported.\n");
415 goto err_duplicate_route;
418 /* Put it in the table data-structures */
419 list_add_tail(&mr_route->node, &mr_table->route_list);
420 err = rhashtable_insert_fast(&mr_table->route_ht,
422 mlxsw_sp_mr_route_ht_params);
424 goto err_rhashtable_insert;
426 /* Write the route to the hardware */
427 err = mlxsw_sp_mr_route_write(mr_table, mr_route, replace);
429 goto err_mr_route_write;
431 /* Destroy the original route */
433 rhashtable_remove_fast(&mr_table->route_ht,
434 &mr_orig_route->ht_node,
435 mlxsw_sp_mr_route_ht_params);
436 list_del(&mr_orig_route->node);
437 mlxsw_sp_mr_route_destroy(mr_table, mr_orig_route);
440 mlxsw_sp_mr_mfc_offload_update(mr_route);
444 rhashtable_remove_fast(&mr_table->route_ht, &mr_route->ht_node,
445 mlxsw_sp_mr_route_ht_params);
446 err_rhashtable_insert:
447 list_del(&mr_route->node);
450 mlxsw_sp_mr_route_destroy(mr_table, mr_route);
454 void mlxsw_sp_mr_route_del(struct mlxsw_sp_mr_table *mr_table,
457 struct mlxsw_sp_mr_route *mr_route;
458 struct mlxsw_sp_mr_route_key key;
460 mr_table->ops->key_create(mr_table, &key, mfc);
461 mr_route = rhashtable_lookup_fast(&mr_table->route_ht, &key,
462 mlxsw_sp_mr_route_ht_params);
464 __mlxsw_sp_mr_route_del(mr_table, mr_route);
467 /* Should be called after the VIF struct is updated */
469 mlxsw_sp_mr_route_ivif_resolve(struct mlxsw_sp_mr_table *mr_table,
470 struct mlxsw_sp_mr_route_vif_entry *rve)
472 struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
473 enum mlxsw_sp_mr_route_action route_action;
474 struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
478 route_action = mlxsw_sp_mr_route_action(rve->mr_route);
479 if (route_action == MLXSW_SP_MR_ROUTE_ACTION_TRAP)
482 /* rve->mr_vif->rif is guaranteed to be valid at this stage */
483 irif_index = mlxsw_sp_rif_index(rve->mr_vif->rif);
484 err = mr->mr_ops->route_irif_update(mlxsw_sp, rve->mr_route->route_priv,
489 err = mr->mr_ops->route_action_update(mlxsw_sp,
490 rve->mr_route->route_priv,
493 /* No need to rollback here because the iRIF change only takes
494 * place after the action has been updated.
498 rve->mr_route->route_action = route_action;
499 mlxsw_sp_mr_mfc_offload_update(rve->mr_route);
504 mlxsw_sp_mr_route_ivif_unresolve(struct mlxsw_sp_mr_table *mr_table,
505 struct mlxsw_sp_mr_route_vif_entry *rve)
507 struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
508 struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
510 mr->mr_ops->route_action_update(mlxsw_sp, rve->mr_route->route_priv,
511 MLXSW_SP_MR_ROUTE_ACTION_TRAP);
512 rve->mr_route->route_action = MLXSW_SP_MR_ROUTE_ACTION_TRAP;
513 mlxsw_sp_mr_mfc_offload_update(rve->mr_route);
516 /* Should be called after the RIF struct is updated */
518 mlxsw_sp_mr_route_evif_resolve(struct mlxsw_sp_mr_table *mr_table,
519 struct mlxsw_sp_mr_route_vif_entry *rve)
521 struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
522 enum mlxsw_sp_mr_route_action route_action;
523 struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
528 if (mlxsw_sp_mr_vif_valid(rve->mr_vif)) {
529 erif_index = mlxsw_sp_rif_index(rve->mr_vif->rif);
530 err = mr->mr_ops->route_erif_add(mlxsw_sp,
531 rve->mr_route->route_priv,
537 /* Update the route action, as the new eVIF can be a tunnel or a pimreg
538 * device which will require updating the action.
540 route_action = mlxsw_sp_mr_route_action(rve->mr_route);
541 if (route_action != rve->mr_route->route_action) {
542 err = mr->mr_ops->route_action_update(mlxsw_sp,
543 rve->mr_route->route_priv,
546 goto err_route_action_update;
549 /* Update the minimum MTU */
550 if (rve->mr_vif->dev->mtu < rve->mr_route->min_mtu) {
551 rve->mr_route->min_mtu = rve->mr_vif->dev->mtu;
552 err = mr->mr_ops->route_min_mtu_update(mlxsw_sp,
553 rve->mr_route->route_priv,
554 rve->mr_route->min_mtu);
556 goto err_route_min_mtu_update;
559 rve->mr_route->route_action = route_action;
560 mlxsw_sp_mr_mfc_offload_update(rve->mr_route);
563 err_route_min_mtu_update:
564 if (route_action != rve->mr_route->route_action)
565 mr->mr_ops->route_action_update(mlxsw_sp,
566 rve->mr_route->route_priv,
567 rve->mr_route->route_action);
568 err_route_action_update:
569 if (mlxsw_sp_mr_vif_valid(rve->mr_vif))
570 mr->mr_ops->route_erif_del(mlxsw_sp, rve->mr_route->route_priv,
575 /* Should be called before the RIF struct is updated */
577 mlxsw_sp_mr_route_evif_unresolve(struct mlxsw_sp_mr_table *mr_table,
578 struct mlxsw_sp_mr_route_vif_entry *rve)
580 struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
581 enum mlxsw_sp_mr_route_action route_action;
582 struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
585 /* If the unresolved RIF was not valid, no need to delete it */
586 if (!mlxsw_sp_mr_vif_valid(rve->mr_vif))
589 /* Update the route action: if there is only one valid eVIF in the
590 * route, set the action to trap as the VIF deletion will lead to zero
591 * valid eVIFs. On any other case, use the mlxsw_sp_mr_route_action to
592 * determine the route action.
594 if (mlxsw_sp_mr_route_valid_evifs_num(rve->mr_route) == 1)
595 route_action = MLXSW_SP_MR_ROUTE_ACTION_TRAP;
597 route_action = mlxsw_sp_mr_route_action(rve->mr_route);
598 if (route_action != rve->mr_route->route_action)
599 mr->mr_ops->route_action_update(mlxsw_sp,
600 rve->mr_route->route_priv,
603 /* Delete the erif from the route */
604 rifi = mlxsw_sp_rif_index(rve->mr_vif->rif);
605 mr->mr_ops->route_erif_del(mlxsw_sp, rve->mr_route->route_priv, rifi);
606 rve->mr_route->route_action = route_action;
607 mlxsw_sp_mr_mfc_offload_update(rve->mr_route);
610 static int mlxsw_sp_mr_vif_resolve(struct mlxsw_sp_mr_table *mr_table,
611 struct net_device *dev,
612 struct mlxsw_sp_mr_vif *mr_vif,
613 unsigned long vif_flags,
614 const struct mlxsw_sp_rif *rif)
616 struct mlxsw_sp_mr_route_vif_entry *irve, *erve;
622 mr_vif->vif_flags = vif_flags;
624 /* Update all routes where this VIF is used as an unresolved iRIF */
625 list_for_each_entry(irve, &mr_vif->route_ivif_list, vif_node) {
626 err = mlxsw_sp_mr_route_ivif_resolve(mr_table, irve);
628 goto err_irif_unresolve;
631 /* Update all routes where this VIF is used as an unresolved eRIF */
632 list_for_each_entry(erve, &mr_vif->route_evif_list, vif_node) {
633 err = mlxsw_sp_mr_route_evif_resolve(mr_table, erve);
635 goto err_erif_unresolve;
640 list_for_each_entry_continue_reverse(erve, &mr_vif->route_evif_list,
642 mlxsw_sp_mr_route_evif_unresolve(mr_table, erve);
644 list_for_each_entry_continue_reverse(irve, &mr_vif->route_ivif_list,
646 mlxsw_sp_mr_route_ivif_unresolve(mr_table, irve);
651 static void mlxsw_sp_mr_vif_unresolve(struct mlxsw_sp_mr_table *mr_table,
652 struct net_device *dev,
653 struct mlxsw_sp_mr_vif *mr_vif)
655 struct mlxsw_sp_mr_route_vif_entry *rve;
657 /* Update all routes where this VIF is used as an unresolved eRIF */
658 list_for_each_entry(rve, &mr_vif->route_evif_list, vif_node)
659 mlxsw_sp_mr_route_evif_unresolve(mr_table, rve);
661 /* Update all routes where this VIF is used as an unresolved iRIF */
662 list_for_each_entry(rve, &mr_vif->route_ivif_list, vif_node)
663 mlxsw_sp_mr_route_ivif_unresolve(mr_table, rve);
670 int mlxsw_sp_mr_vif_add(struct mlxsw_sp_mr_table *mr_table,
671 struct net_device *dev, vifi_t vif_index,
672 unsigned long vif_flags, const struct mlxsw_sp_rif *rif)
674 struct mlxsw_sp_mr_vif *mr_vif = &mr_table->vifs[vif_index];
676 if (WARN_ON(vif_index >= MAXVIFS))
680 return mlxsw_sp_mr_vif_resolve(mr_table, dev, mr_vif, vif_flags, rif);
683 void mlxsw_sp_mr_vif_del(struct mlxsw_sp_mr_table *mr_table, vifi_t vif_index)
685 struct mlxsw_sp_mr_vif *mr_vif = &mr_table->vifs[vif_index];
687 if (WARN_ON(vif_index >= MAXVIFS))
689 if (WARN_ON(!mr_vif->dev))
691 mlxsw_sp_mr_vif_unresolve(mr_table, NULL, mr_vif);
694 static struct mlxsw_sp_mr_vif *
695 mlxsw_sp_mr_dev_vif_lookup(struct mlxsw_sp_mr_table *mr_table,
696 const struct net_device *dev)
700 for (vif_index = 0; vif_index < MAXVIFS; vif_index++)
701 if (mr_table->vifs[vif_index].dev == dev)
702 return &mr_table->vifs[vif_index];
706 int mlxsw_sp_mr_rif_add(struct mlxsw_sp_mr_table *mr_table,
707 const struct mlxsw_sp_rif *rif)
709 const struct net_device *rif_dev = mlxsw_sp_rif_dev(rif);
710 struct mlxsw_sp_mr_vif *mr_vif;
715 mr_vif = mlxsw_sp_mr_dev_vif_lookup(mr_table, rif_dev);
718 return mlxsw_sp_mr_vif_resolve(mr_table, mr_vif->dev, mr_vif,
719 mr_vif->vif_flags, rif);
722 void mlxsw_sp_mr_rif_del(struct mlxsw_sp_mr_table *mr_table,
723 const struct mlxsw_sp_rif *rif)
725 const struct net_device *rif_dev = mlxsw_sp_rif_dev(rif);
726 struct mlxsw_sp_mr_vif *mr_vif;
731 mr_vif = mlxsw_sp_mr_dev_vif_lookup(mr_table, rif_dev);
734 mlxsw_sp_mr_vif_unresolve(mr_table, mr_vif->dev, mr_vif);
737 void mlxsw_sp_mr_rif_mtu_update(struct mlxsw_sp_mr_table *mr_table,
738 const struct mlxsw_sp_rif *rif, int mtu)
740 const struct net_device *rif_dev = mlxsw_sp_rif_dev(rif);
741 struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
742 struct mlxsw_sp_mr_route_vif_entry *rve;
743 struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
744 struct mlxsw_sp_mr_vif *mr_vif;
749 /* Search for a VIF that use that RIF */
750 mr_vif = mlxsw_sp_mr_dev_vif_lookup(mr_table, rif_dev);
754 /* Update all the routes that uses that VIF as eVIF */
755 list_for_each_entry(rve, &mr_vif->route_evif_list, vif_node) {
756 if (mtu < rve->mr_route->min_mtu) {
757 rve->mr_route->min_mtu = mtu;
758 mr->mr_ops->route_min_mtu_update(mlxsw_sp,
759 rve->mr_route->route_priv,
765 /* Protocol specific functions */
767 mlxsw_sp_mr_route4_validate(const struct mlxsw_sp_mr_table *mr_table,
768 const struct mr_mfc *c)
770 struct mfc_cache *mfc = (struct mfc_cache *) c;
772 /* If the route is a (*,*) route, abort, as these kind of routes are
773 * used for proxy routes.
775 if (mfc->mfc_origin == htonl(INADDR_ANY) &&
776 mfc->mfc_mcastgrp == htonl(INADDR_ANY)) {
777 dev_warn(mr_table->mlxsw_sp->bus_info->dev,
778 "Offloading proxy routes is not supported.\n");
784 static void mlxsw_sp_mr_route4_key(struct mlxsw_sp_mr_table *mr_table,
785 struct mlxsw_sp_mr_route_key *key,
788 const struct mfc_cache *mfc = (struct mfc_cache *) c;
791 starg = (mfc->mfc_origin == htonl(INADDR_ANY));
793 memset(key, 0, sizeof(*key));
794 key->vrid = mr_table->vr_id;
795 key->proto = MLXSW_SP_L3_PROTO_IPV4;
796 key->group.addr4 = mfc->mfc_mcastgrp;
797 key->group_mask.addr4 = htonl(0xffffffff);
798 key->source.addr4 = mfc->mfc_origin;
799 key->source_mask.addr4 = htonl(starg ? 0 : 0xffffffff);
802 static bool mlxsw_sp_mr_route4_starg(const struct mlxsw_sp_mr_table *mr_table,
803 const struct mlxsw_sp_mr_route *mr_route)
805 return mr_route->key.source_mask.addr4 == htonl(INADDR_ANY);
808 static bool mlxsw_sp_mr_vif4_is_regular(const struct mlxsw_sp_mr_vif *vif)
810 return !(vif->vif_flags & (VIFF_TUNNEL | VIFF_REGISTER));
814 mlxsw_sp_mr_route6_validate(const struct mlxsw_sp_mr_table *mr_table,
815 const struct mr_mfc *c)
817 struct mfc6_cache *mfc = (struct mfc6_cache *) c;
819 /* If the route is a (*,*) route, abort, as these kind of routes are
820 * used for proxy routes.
822 if (ipv6_addr_any(&mfc->mf6c_origin) &&
823 ipv6_addr_any(&mfc->mf6c_mcastgrp)) {
824 dev_warn(mr_table->mlxsw_sp->bus_info->dev,
825 "Offloading proxy routes is not supported.\n");
831 static void mlxsw_sp_mr_route6_key(struct mlxsw_sp_mr_table *mr_table,
832 struct mlxsw_sp_mr_route_key *key,
835 const struct mfc6_cache *mfc = (struct mfc6_cache *) c;
837 memset(key, 0, sizeof(*key));
838 key->vrid = mr_table->vr_id;
839 key->proto = MLXSW_SP_L3_PROTO_IPV6;
840 key->group.addr6 = mfc->mf6c_mcastgrp;
841 memset(&key->group_mask.addr6, 0xff, sizeof(key->group_mask.addr6));
842 key->source.addr6 = mfc->mf6c_origin;
843 if (!ipv6_addr_any(&mfc->mf6c_origin))
844 memset(&key->source_mask.addr6, 0xff,
845 sizeof(key->source_mask.addr6));
848 static bool mlxsw_sp_mr_route6_starg(const struct mlxsw_sp_mr_table *mr_table,
849 const struct mlxsw_sp_mr_route *mr_route)
851 return ipv6_addr_any(&mr_route->key.source_mask.addr6);
854 static bool mlxsw_sp_mr_vif6_is_regular(const struct mlxsw_sp_mr_vif *vif)
856 return !(vif->vif_flags & MIFF_REGISTER);
860 mlxsw_sp_mr_vif_ops mlxsw_sp_mr_vif_ops_arr[] = {
862 .is_regular = mlxsw_sp_mr_vif4_is_regular,
865 .is_regular = mlxsw_sp_mr_vif6_is_regular,
870 mlxsw_sp_mr_table_ops mlxsw_sp_mr_table_ops_arr[] = {
872 .is_route_valid = mlxsw_sp_mr_route4_validate,
873 .key_create = mlxsw_sp_mr_route4_key,
874 .is_route_starg = mlxsw_sp_mr_route4_starg,
877 .is_route_valid = mlxsw_sp_mr_route6_validate,
878 .key_create = mlxsw_sp_mr_route6_key,
879 .is_route_starg = mlxsw_sp_mr_route6_starg,
884 struct mlxsw_sp_mr_table *mlxsw_sp_mr_table_create(struct mlxsw_sp *mlxsw_sp,
886 enum mlxsw_sp_l3proto proto)
888 struct mlxsw_sp_mr_route_params catchall_route_params = {
889 .prio = MLXSW_SP_MR_ROUTE_PRIO_CATCHALL,
895 .route_action = MLXSW_SP_MR_ROUTE_ACTION_TRAP,
898 struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
899 struct mlxsw_sp_mr_table *mr_table;
903 mr_table = kzalloc(sizeof(*mr_table) + mr->mr_ops->route_priv_size,
906 return ERR_PTR(-ENOMEM);
908 mr_table->vr_id = vr_id;
909 mr_table->mlxsw_sp = mlxsw_sp;
910 mr_table->proto = proto;
911 mr_table->ops = &mlxsw_sp_mr_table_ops_arr[proto];
912 INIT_LIST_HEAD(&mr_table->route_list);
914 err = rhashtable_init(&mr_table->route_ht,
915 &mlxsw_sp_mr_route_ht_params);
917 goto err_route_rhashtable_init;
919 for (i = 0; i < MAXVIFS; i++) {
920 INIT_LIST_HEAD(&mr_table->vifs[i].route_evif_list);
921 INIT_LIST_HEAD(&mr_table->vifs[i].route_ivif_list);
922 mr_table->vifs[i].ops = &mlxsw_sp_mr_vif_ops_arr[proto];
925 err = mr->mr_ops->route_create(mlxsw_sp, mr->priv,
926 mr_table->catchall_route_priv,
927 &catchall_route_params);
929 goto err_ops_route_create;
930 list_add_tail(&mr_table->node, &mr->table_list);
933 err_ops_route_create:
934 rhashtable_destroy(&mr_table->route_ht);
935 err_route_rhashtable_init:
940 void mlxsw_sp_mr_table_destroy(struct mlxsw_sp_mr_table *mr_table)
942 struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
943 struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
945 WARN_ON(!mlxsw_sp_mr_table_empty(mr_table));
946 list_del(&mr_table->node);
947 mr->mr_ops->route_destroy(mlxsw_sp, mr->priv,
948 &mr_table->catchall_route_priv);
949 rhashtable_destroy(&mr_table->route_ht);
953 void mlxsw_sp_mr_table_flush(struct mlxsw_sp_mr_table *mr_table)
955 struct mlxsw_sp_mr_route *mr_route, *tmp;
958 list_for_each_entry_safe(mr_route, tmp, &mr_table->route_list, node)
959 __mlxsw_sp_mr_route_del(mr_table, mr_route);
961 for (i = 0; i < MAXVIFS; i++) {
962 mr_table->vifs[i].dev = NULL;
963 mr_table->vifs[i].rif = NULL;
967 bool mlxsw_sp_mr_table_empty(const struct mlxsw_sp_mr_table *mr_table)
971 for (i = 0; i < MAXVIFS; i++)
972 if (mr_table->vifs[i].dev)
974 return list_empty(&mr_table->route_list);
977 static void mlxsw_sp_mr_route_stats_update(struct mlxsw_sp *mlxsw_sp,
978 struct mlxsw_sp_mr_route *mr_route)
980 struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
983 if (mr_route->route_action == MLXSW_SP_MR_ROUTE_ACTION_TRAP)
986 mr->mr_ops->route_stats(mlxsw_sp, mr_route->route_priv, &packets,
989 if (mr_route->mfc->mfc_un.res.pkt != packets)
990 mr_route->mfc->mfc_un.res.lastuse = jiffies;
991 mr_route->mfc->mfc_un.res.pkt = packets;
992 mr_route->mfc->mfc_un.res.bytes = bytes;
995 static void mlxsw_sp_mr_stats_update(struct work_struct *work)
997 struct mlxsw_sp_mr *mr = container_of(work, struct mlxsw_sp_mr,
998 stats_update_dw.work);
999 struct mlxsw_sp_mr_table *mr_table;
1000 struct mlxsw_sp_mr_route *mr_route;
1001 unsigned long interval;
1004 list_for_each_entry(mr_table, &mr->table_list, node)
1005 list_for_each_entry(mr_route, &mr_table->route_list, node)
1006 mlxsw_sp_mr_route_stats_update(mr_table->mlxsw_sp,
1010 interval = msecs_to_jiffies(MLXSW_SP_MR_ROUTES_COUNTER_UPDATE_INTERVAL);
1011 mlxsw_core_schedule_dw(&mr->stats_update_dw, interval);
1014 int mlxsw_sp_mr_init(struct mlxsw_sp *mlxsw_sp,
1015 const struct mlxsw_sp_mr_ops *mr_ops)
1017 struct mlxsw_sp_mr *mr;
1018 unsigned long interval;
1021 mr = kzalloc(sizeof(*mr) + mr_ops->priv_size, GFP_KERNEL);
1024 mr->mr_ops = mr_ops;
1026 INIT_LIST_HEAD(&mr->table_list);
1028 err = mr_ops->init(mlxsw_sp, mr->priv);
1032 /* Create the delayed work for counter updates */
1033 INIT_DELAYED_WORK(&mr->stats_update_dw, mlxsw_sp_mr_stats_update);
1034 interval = msecs_to_jiffies(MLXSW_SP_MR_ROUTES_COUNTER_UPDATE_INTERVAL);
1035 mlxsw_core_schedule_dw(&mr->stats_update_dw, interval);
1042 void mlxsw_sp_mr_fini(struct mlxsw_sp *mlxsw_sp)
1044 struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
1046 cancel_delayed_work_sync(&mr->stats_update_dw);
1047 mr->mr_ops->fini(mlxsw_sp, mr->priv);