2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/list.h>
35 #include <linux/ipv6.h>
36 #include <linux/tcp.h>
37 #include <linux/mlx5/flow_table.h>
54 MLX5E_ACTION_NONE = 0,
59 struct mlx5e_eth_addr_hash_node {
60 struct hlist_node hlist;
62 struct mlx5e_eth_addr_info ai;
65 static inline int mlx5e_hash_eth_addr(u8 *addr)
70 static void mlx5e_add_eth_addr_to_hash(struct hlist_head *hash, u8 *addr)
72 struct mlx5e_eth_addr_hash_node *hn;
73 int ix = mlx5e_hash_eth_addr(addr);
76 hlist_for_each_entry(hn, &hash[ix], hlist)
77 if (ether_addr_equal_64bits(hn->ai.addr, addr)) {
83 hn->action = MLX5E_ACTION_NONE;
87 hn = kzalloc(sizeof(*hn), GFP_ATOMIC);
91 ether_addr_copy(hn->ai.addr, addr);
92 hn->action = MLX5E_ACTION_ADD;
94 hlist_add_head(&hn->hlist, &hash[ix]);
97 static void mlx5e_del_eth_addr_from_hash(struct mlx5e_eth_addr_hash_node *hn)
99 hlist_del(&hn->hlist);
103 static void mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv *priv,
104 struct mlx5e_eth_addr_info *ai)
106 void *ft = priv->ft.main;
108 if (ai->tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP))
109 mlx5_del_flow_table_entry(ft,
110 ai->ft_ix[MLX5E_TT_IPV6_IPSEC_ESP]);
112 if (ai->tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP))
113 mlx5_del_flow_table_entry(ft,
114 ai->ft_ix[MLX5E_TT_IPV4_IPSEC_ESP]);
116 if (ai->tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH))
117 mlx5_del_flow_table_entry(ft,
118 ai->ft_ix[MLX5E_TT_IPV6_IPSEC_AH]);
120 if (ai->tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH))
121 mlx5_del_flow_table_entry(ft,
122 ai->ft_ix[MLX5E_TT_IPV4_IPSEC_AH]);
124 if (ai->tt_vec & BIT(MLX5E_TT_IPV6_TCP))
125 mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_TCP]);
127 if (ai->tt_vec & BIT(MLX5E_TT_IPV4_TCP))
128 mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_TCP]);
130 if (ai->tt_vec & BIT(MLX5E_TT_IPV6_UDP))
131 mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_UDP]);
133 if (ai->tt_vec & BIT(MLX5E_TT_IPV4_UDP))
134 mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_UDP]);
136 if (ai->tt_vec & BIT(MLX5E_TT_IPV6))
137 mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6]);
139 if (ai->tt_vec & BIT(MLX5E_TT_IPV4))
140 mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4]);
142 if (ai->tt_vec & BIT(MLX5E_TT_ANY))
143 mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_ANY]);
146 static int mlx5e_get_eth_addr_type(u8 *addr)
148 if (is_unicast_ether_addr(addr))
151 if ((addr[0] == 0x01) &&
155 return MLX5E_MC_IPV4;
157 if ((addr[0] == 0x33) &&
159 return MLX5E_MC_IPV6;
161 return MLX5E_MC_OTHER;
164 static u32 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type)
170 case MLX5E_FULLMATCH:
171 eth_addr_type = mlx5e_get_eth_addr_type(ai->addr);
172 switch (eth_addr_type) {
175 BIT(MLX5E_TT_IPV4_TCP) |
176 BIT(MLX5E_TT_IPV6_TCP) |
177 BIT(MLX5E_TT_IPV4_UDP) |
178 BIT(MLX5E_TT_IPV6_UDP) |
179 BIT(MLX5E_TT_IPV4_IPSEC_AH) |
180 BIT(MLX5E_TT_IPV6_IPSEC_AH) |
181 BIT(MLX5E_TT_IPV4_IPSEC_ESP) |
182 BIT(MLX5E_TT_IPV6_IPSEC_ESP) |
191 BIT(MLX5E_TT_IPV4_UDP) |
198 BIT(MLX5E_TT_IPV6_UDP) |
214 BIT(MLX5E_TT_IPV4_UDP) |
215 BIT(MLX5E_TT_IPV6_UDP) |
222 default: /* MLX5E_PROMISC */
224 BIT(MLX5E_TT_IPV4_TCP) |
225 BIT(MLX5E_TT_IPV6_TCP) |
226 BIT(MLX5E_TT_IPV4_UDP) |
227 BIT(MLX5E_TT_IPV6_UDP) |
228 BIT(MLX5E_TT_IPV4_IPSEC_AH) |
229 BIT(MLX5E_TT_IPV6_IPSEC_AH) |
230 BIT(MLX5E_TT_IPV4_IPSEC_ESP) |
231 BIT(MLX5E_TT_IPV6_IPSEC_ESP) |
242 static int __mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
243 struct mlx5e_eth_addr_info *ai, int type,
244 void *flow_context, void *match_criteria)
246 u8 match_criteria_enable = 0;
250 u8 *match_criteria_dmac;
251 void *ft = priv->ft.main;
252 u32 *tirn = priv->tirn;
257 match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
258 dmac = MLX5_ADDR_OF(fte_match_param, match_value,
259 outer_headers.dmac_47_16);
260 match_criteria_dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
261 outer_headers.dmac_47_16);
262 dest = MLX5_ADDR_OF(flow_context, flow_context, destination);
264 MLX5_SET(flow_context, flow_context, action,
265 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
266 MLX5_SET(flow_context, flow_context, destination_list_size, 1);
267 MLX5_SET(dest_format_struct, dest, destination_type,
268 MLX5_FLOW_CONTEXT_DEST_TYPE_TIR);
271 case MLX5E_FULLMATCH:
272 match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
273 memset(match_criteria_dmac, 0xff, ETH_ALEN);
274 ether_addr_copy(dmac, ai->addr);
278 match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
279 match_criteria_dmac[0] = 0x01;
287 tt_vec = mlx5e_get_tt_vec(ai, type);
289 ft_ix = &ai->ft_ix[MLX5E_TT_ANY];
290 if (tt_vec & BIT(MLX5E_TT_ANY)) {
291 MLX5_SET(dest_format_struct, dest, destination_id,
293 err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
294 match_criteria, flow_context,
299 ai->tt_vec |= BIT(MLX5E_TT_ANY);
302 match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
303 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
304 outer_headers.ethertype);
306 ft_ix = &ai->ft_ix[MLX5E_TT_IPV4];
307 if (tt_vec & BIT(MLX5E_TT_IPV4)) {
308 MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
310 MLX5_SET(dest_format_struct, dest, destination_id,
311 tirn[MLX5E_TT_IPV4]);
312 err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
313 match_criteria, flow_context,
318 ai->tt_vec |= BIT(MLX5E_TT_IPV4);
321 ft_ix = &ai->ft_ix[MLX5E_TT_IPV6];
322 if (tt_vec & BIT(MLX5E_TT_IPV6)) {
323 MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
325 MLX5_SET(dest_format_struct, dest, destination_id,
326 tirn[MLX5E_TT_IPV6]);
327 err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
328 match_criteria, flow_context,
333 ai->tt_vec |= BIT(MLX5E_TT_IPV6);
336 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
337 outer_headers.ip_protocol);
338 MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
341 ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_UDP];
342 if (tt_vec & BIT(MLX5E_TT_IPV4_UDP)) {
343 MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
345 MLX5_SET(dest_format_struct, dest, destination_id,
346 tirn[MLX5E_TT_IPV4_UDP]);
347 err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
348 match_criteria, flow_context,
353 ai->tt_vec |= BIT(MLX5E_TT_IPV4_UDP);
356 ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_UDP];
357 if (tt_vec & BIT(MLX5E_TT_IPV6_UDP)) {
358 MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
360 MLX5_SET(dest_format_struct, dest, destination_id,
361 tirn[MLX5E_TT_IPV6_UDP]);
362 err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
363 match_criteria, flow_context,
368 ai->tt_vec |= BIT(MLX5E_TT_IPV6_UDP);
371 MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
374 ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_TCP];
375 if (tt_vec & BIT(MLX5E_TT_IPV4_TCP)) {
376 MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
378 MLX5_SET(dest_format_struct, dest, destination_id,
379 tirn[MLX5E_TT_IPV4_TCP]);
380 err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
381 match_criteria, flow_context,
386 ai->tt_vec |= BIT(MLX5E_TT_IPV4_TCP);
389 ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_TCP];
390 if (tt_vec & BIT(MLX5E_TT_IPV6_TCP)) {
391 MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
393 MLX5_SET(dest_format_struct, dest, destination_id,
394 tirn[MLX5E_TT_IPV6_TCP]);
395 err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
396 match_criteria, flow_context,
401 ai->tt_vec |= BIT(MLX5E_TT_IPV6_TCP);
404 MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
407 ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_IPSEC_AH];
408 if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH)) {
409 MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
411 MLX5_SET(dest_format_struct, dest, destination_id,
412 tirn[MLX5E_TT_IPV4_IPSEC_AH]);
413 err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
414 match_criteria, flow_context,
419 ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_AH);
422 ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_IPSEC_AH];
423 if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH)) {
424 MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
426 MLX5_SET(dest_format_struct, dest, destination_id,
427 tirn[MLX5E_TT_IPV6_IPSEC_AH]);
428 err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
429 match_criteria, flow_context,
434 ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_AH);
437 MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
440 ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_IPSEC_ESP];
441 if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP)) {
442 MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
444 MLX5_SET(dest_format_struct, dest, destination_id,
445 tirn[MLX5E_TT_IPV4_IPSEC_ESP]);
446 err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
447 match_criteria, flow_context,
452 ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_ESP);
455 ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_IPSEC_ESP];
456 if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP)) {
457 MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
459 MLX5_SET(dest_format_struct, dest, destination_id,
460 tirn[MLX5E_TT_IPV6_IPSEC_ESP]);
461 err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
462 match_criteria, flow_context,
467 ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_ESP);
473 mlx5e_del_eth_addr_from_flow_table(priv, ai);
478 static int mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
479 struct mlx5e_eth_addr_info *ai, int type)
485 flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context) +
486 MLX5_ST_SZ_BYTES(dest_format_struct));
487 match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
488 if (!flow_context || !match_criteria) {
489 netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
491 goto add_eth_addr_rule_out;
494 err = __mlx5e_add_eth_addr_rule(priv, ai, type, flow_context,
497 netdev_err(priv->netdev, "%s: failed\n", __func__);
499 add_eth_addr_rule_out:
500 kvfree(match_criteria);
501 kvfree(flow_context);
505 enum mlx5e_vlan_rule_type {
506 MLX5E_VLAN_RULE_TYPE_UNTAGGED,
507 MLX5E_VLAN_RULE_TYPE_ANY_VID,
508 MLX5E_VLAN_RULE_TYPE_MATCH_VID,
511 static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
512 enum mlx5e_vlan_rule_type rule_type, u16 vid)
514 u8 match_criteria_enable = 0;
522 flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context) +
523 MLX5_ST_SZ_BYTES(dest_format_struct));
524 match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
525 if (!flow_context || !match_criteria) {
526 netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
528 goto add_vlan_rule_out;
530 match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
531 dest = MLX5_ADDR_OF(flow_context, flow_context, destination);
533 MLX5_SET(flow_context, flow_context, action,
534 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
535 MLX5_SET(flow_context, flow_context, destination_list_size, 1);
536 MLX5_SET(dest_format_struct, dest, destination_type,
537 MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE);
538 MLX5_SET(dest_format_struct, dest, destination_id,
539 mlx5_get_flow_table_id(priv->ft.main));
541 match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
542 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
543 outer_headers.vlan_tag);
546 case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
547 ft_ix = &priv->vlan.untagged_rule_ft_ix;
549 case MLX5E_VLAN_RULE_TYPE_ANY_VID:
550 ft_ix = &priv->vlan.any_vlan_rule_ft_ix;
551 MLX5_SET(fte_match_param, match_value, outer_headers.vlan_tag,
554 default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */
555 ft_ix = &priv->vlan.active_vlans_ft_ix[vid];
556 MLX5_SET(fte_match_param, match_value, outer_headers.vlan_tag,
558 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
559 outer_headers.first_vid);
560 MLX5_SET(fte_match_param, match_value, outer_headers.first_vid,
565 err = mlx5_add_flow_table_entry(priv->ft.vlan, match_criteria_enable,
566 match_criteria, flow_context, ft_ix);
568 netdev_err(priv->netdev, "%s: failed\n", __func__);
571 kvfree(match_criteria);
572 kvfree(flow_context);
576 static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
577 enum mlx5e_vlan_rule_type rule_type, u16 vid)
580 case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
581 mlx5_del_flow_table_entry(priv->ft.vlan,
582 priv->vlan.untagged_rule_ft_ix);
584 case MLX5E_VLAN_RULE_TYPE_ANY_VID:
585 mlx5_del_flow_table_entry(priv->ft.vlan,
586 priv->vlan.any_vlan_rule_ft_ix);
588 case MLX5E_VLAN_RULE_TYPE_MATCH_VID:
589 mlx5_del_flow_table_entry(priv->ft.vlan,
590 priv->vlan.active_vlans_ft_ix[vid]);
595 void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
597 if (!priv->vlan.filter_disabled)
600 priv->vlan.filter_disabled = false;
601 if (priv->netdev->flags & IFF_PROMISC)
603 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
606 void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
608 if (priv->vlan.filter_disabled)
611 priv->vlan.filter_disabled = true;
612 if (priv->netdev->flags & IFF_PROMISC)
614 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
617 int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
620 struct mlx5e_priv *priv = netdev_priv(dev);
622 return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
625 int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
628 struct mlx5e_priv *priv = netdev_priv(dev);
630 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
635 #define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
636 for (i = 0; i < MLX5E_ETH_ADDR_HASH_SIZE; i++) \
637 hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
639 static void mlx5e_execute_action(struct mlx5e_priv *priv,
640 struct mlx5e_eth_addr_hash_node *hn)
642 switch (hn->action) {
643 case MLX5E_ACTION_ADD:
644 mlx5e_add_eth_addr_rule(priv, &hn->ai, MLX5E_FULLMATCH);
645 hn->action = MLX5E_ACTION_NONE;
648 case MLX5E_ACTION_DEL:
649 mlx5e_del_eth_addr_from_flow_table(priv, &hn->ai);
650 mlx5e_del_eth_addr_from_hash(hn);
655 static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv)
657 struct net_device *netdev = priv->netdev;
658 struct netdev_hw_addr *ha;
660 netif_addr_lock_bh(netdev);
662 mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_uc,
663 priv->netdev->dev_addr);
665 netdev_for_each_uc_addr(ha, netdev)
666 mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_uc, ha->addr);
668 netdev_for_each_mc_addr(ha, netdev)
669 mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_mc, ha->addr);
671 netif_addr_unlock_bh(netdev);
674 static void mlx5e_apply_netdev_addr(struct mlx5e_priv *priv)
676 struct mlx5e_eth_addr_hash_node *hn;
677 struct hlist_node *tmp;
680 mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_uc, i)
681 mlx5e_execute_action(priv, hn);
683 mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i)
684 mlx5e_execute_action(priv, hn);
687 static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv)
689 struct mlx5e_eth_addr_hash_node *hn;
690 struct hlist_node *tmp;
693 mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_uc, i)
694 hn->action = MLX5E_ACTION_DEL;
695 mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i)
696 hn->action = MLX5E_ACTION_DEL;
698 if (!test_bit(MLX5E_STATE_DESTROYING, &priv->state))
699 mlx5e_sync_netdev_addr(priv);
701 mlx5e_apply_netdev_addr(priv);
704 void mlx5e_set_rx_mode_work(struct work_struct *work)
706 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
709 struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
710 struct net_device *ndev = priv->netdev;
712 bool rx_mode_enable = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
713 bool promisc_enabled = rx_mode_enable && (ndev->flags & IFF_PROMISC);
714 bool allmulti_enabled = rx_mode_enable && (ndev->flags & IFF_ALLMULTI);
715 bool broadcast_enabled = rx_mode_enable;
717 bool enable_promisc = !ea->promisc_enabled && promisc_enabled;
718 bool disable_promisc = ea->promisc_enabled && !promisc_enabled;
719 bool enable_allmulti = !ea->allmulti_enabled && allmulti_enabled;
720 bool disable_allmulti = ea->allmulti_enabled && !allmulti_enabled;
721 bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled;
722 bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled;
724 if (enable_promisc) {
725 mlx5e_add_eth_addr_rule(priv, &ea->promisc, MLX5E_PROMISC);
726 if (!priv->vlan.filter_disabled)
727 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
731 mlx5e_add_eth_addr_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
732 if (enable_broadcast)
733 mlx5e_add_eth_addr_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
735 mlx5e_handle_netdev_addr(priv);
737 if (disable_broadcast)
738 mlx5e_del_eth_addr_from_flow_table(priv, &ea->broadcast);
739 if (disable_allmulti)
740 mlx5e_del_eth_addr_from_flow_table(priv, &ea->allmulti);
741 if (disable_promisc) {
742 if (!priv->vlan.filter_disabled)
743 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
745 mlx5e_del_eth_addr_from_flow_table(priv, &ea->promisc);
748 ea->promisc_enabled = promisc_enabled;
749 ea->allmulti_enabled = allmulti_enabled;
750 ea->broadcast_enabled = broadcast_enabled;
753 void mlx5e_init_eth_addr(struct mlx5e_priv *priv)
755 ether_addr_copy(priv->eth_addr.broadcast.addr, priv->netdev->broadcast);
758 static int mlx5e_create_main_flow_table(struct mlx5e_priv *priv)
760 struct mlx5_flow_table_group *g;
763 g = kcalloc(9, sizeof(*g), GFP_KERNEL);
768 g[0].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
769 MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
770 outer_headers.ethertype);
771 MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
772 outer_headers.ip_protocol);
775 g[1].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
776 MLX5_SET_TO_ONES(fte_match_param, g[1].match_criteria,
777 outer_headers.ethertype);
782 g[3].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
783 dmac = MLX5_ADDR_OF(fte_match_param, g[3].match_criteria,
784 outer_headers.dmac_47_16);
785 memset(dmac, 0xff, ETH_ALEN);
786 MLX5_SET_TO_ONES(fte_match_param, g[3].match_criteria,
787 outer_headers.ethertype);
788 MLX5_SET_TO_ONES(fte_match_param, g[3].match_criteria,
789 outer_headers.ip_protocol);
792 g[4].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
793 dmac = MLX5_ADDR_OF(fte_match_param, g[4].match_criteria,
794 outer_headers.dmac_47_16);
795 memset(dmac, 0xff, ETH_ALEN);
796 MLX5_SET_TO_ONES(fte_match_param, g[4].match_criteria,
797 outer_headers.ethertype);
800 g[5].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
801 dmac = MLX5_ADDR_OF(fte_match_param, g[5].match_criteria,
802 outer_headers.dmac_47_16);
803 memset(dmac, 0xff, ETH_ALEN);
806 g[6].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
807 dmac = MLX5_ADDR_OF(fte_match_param, g[6].match_criteria,
808 outer_headers.dmac_47_16);
810 MLX5_SET_TO_ONES(fte_match_param, g[6].match_criteria,
811 outer_headers.ethertype);
812 MLX5_SET_TO_ONES(fte_match_param, g[6].match_criteria,
813 outer_headers.ip_protocol);
816 g[7].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
817 dmac = MLX5_ADDR_OF(fte_match_param, g[7].match_criteria,
818 outer_headers.dmac_47_16);
820 MLX5_SET_TO_ONES(fte_match_param, g[7].match_criteria,
821 outer_headers.ethertype);
824 g[8].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
825 dmac = MLX5_ADDR_OF(fte_match_param, g[8].match_criteria,
826 outer_headers.dmac_47_16);
828 priv->ft.main = mlx5_create_flow_table(priv->mdev, 1,
829 MLX5_FLOW_TABLE_TYPE_NIC_RCV,
833 return priv->ft.main ? 0 : -ENOMEM;
836 static void mlx5e_destroy_main_flow_table(struct mlx5e_priv *priv)
838 mlx5_destroy_flow_table(priv->ft.main);
841 static int mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv)
843 struct mlx5_flow_table_group *g;
845 g = kcalloc(2, sizeof(*g), GFP_KERNEL);
850 g[0].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
851 MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
852 outer_headers.vlan_tag);
853 MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
854 outer_headers.first_vid);
856 /* untagged + any vlan id */
858 g[1].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
859 MLX5_SET_TO_ONES(fte_match_param, g[1].match_criteria,
860 outer_headers.vlan_tag);
862 priv->ft.vlan = mlx5_create_flow_table(priv->mdev, 0,
863 MLX5_FLOW_TABLE_TYPE_NIC_RCV,
867 return priv->ft.vlan ? 0 : -ENOMEM;
870 static void mlx5e_destroy_vlan_flow_table(struct mlx5e_priv *priv)
872 mlx5_destroy_flow_table(priv->ft.vlan);
875 int mlx5e_create_flow_tables(struct mlx5e_priv *priv)
879 err = mlx5e_create_main_flow_table(priv);
883 err = mlx5e_create_vlan_flow_table(priv);
885 goto err_destroy_main_flow_table;
887 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
889 goto err_destroy_vlan_flow_table;
893 err_destroy_vlan_flow_table:
894 mlx5e_destroy_vlan_flow_table(priv);
896 err_destroy_main_flow_table:
897 mlx5e_destroy_main_flow_table(priv);
902 void mlx5e_destroy_flow_tables(struct mlx5e_priv *priv)
904 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
905 mlx5e_destroy_vlan_flow_table(priv);
906 mlx5e_destroy_main_flow_table(priv);