2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/etherdevice.h>
34 #include <linux/mlx5/driver.h>
35 #include <linux/mlx5/mlx5_ifc.h>
36 #include <linux/mlx5/vport.h>
37 #include <linux/mlx5/fs.h>
38 #include "mlx5_core.h"
41 #define UPLINK_VPORT 0xFFFF
49 /* E-Switch UC L2 table hash node */
51 struct l2addr_node node;
56 /* E-Switch MC FDB table hash node */
57 struct esw_mc_addr { /* SRIOV only */
58 struct l2addr_node node;
59 struct mlx5_flow_rule *uplink_rule; /* Forward to uplink rule */
63 /* Vport UC/MC hash node */
65 struct l2addr_node node;
68 struct mlx5_flow_rule *flow_rule; /* SRIOV only */
69 /* A flag indicating that mac was added due to mc promiscuous vport */
74 UC_ADDR_CHANGE = BIT(0),
75 MC_ADDR_CHANGE = BIT(1),
76 PROMISC_CHANGE = BIT(3),
79 /* Vport context events */
80 #define SRIOV_VPORT_EVENTS (UC_ADDR_CHANGE | \
84 static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
87 int in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {0};
88 int out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0};
91 MLX5_SET(modify_nic_vport_context_in, in,
92 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
93 MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1);
94 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
95 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
96 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
97 in, nic_vport_context);
99 MLX5_SET(nic_vport_context, nic_vport_ctx, arm_change_event, 1);
101 if (events_mask & UC_ADDR_CHANGE)
102 MLX5_SET(nic_vport_context, nic_vport_ctx,
103 event_on_uc_address_change, 1);
104 if (events_mask & MC_ADDR_CHANGE)
105 MLX5_SET(nic_vport_context, nic_vport_ctx,
106 event_on_mc_address_change, 1);
107 if (events_mask & PROMISC_CHANGE)
108 MLX5_SET(nic_vport_context, nic_vport_ctx,
109 event_on_promisc_change, 1);
111 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
114 /* E-Switch vport context HW commands */
115 static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport,
118 u32 out[MLX5_ST_SZ_DW(modify_esw_vport_context_out)] = {0};
120 MLX5_SET(modify_esw_vport_context_in, in, opcode,
121 MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
122 MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
123 MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
124 return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
127 static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport,
128 u16 vlan, u8 qos, u8 set_flags)
130 u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {0};
132 if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) ||
133 !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist))
136 esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%x\n",
137 vport, vlan, qos, set_flags);
139 if (set_flags & SET_VLAN_STRIP)
140 MLX5_SET(modify_esw_vport_context_in, in,
141 esw_vport_context.vport_cvlan_strip, 1);
143 if (set_flags & SET_VLAN_INSERT) {
144 /* insert only if no vlan in packet */
145 MLX5_SET(modify_esw_vport_context_in, in,
146 esw_vport_context.vport_cvlan_insert, 1);
148 MLX5_SET(modify_esw_vport_context_in, in,
149 esw_vport_context.cvlan_pcp, qos);
150 MLX5_SET(modify_esw_vport_context_in, in,
151 esw_vport_context.cvlan_id, vlan);
154 MLX5_SET(modify_esw_vport_context_in, in,
155 field_select.vport_cvlan_strip, 1);
156 MLX5_SET(modify_esw_vport_context_in, in,
157 field_select.vport_cvlan_insert, 1);
159 return modify_esw_vport_context_cmd(dev, vport, in, sizeof(in));
162 /* HW L2 Table (MPFS) management */
163 static int set_l2_table_entry_cmd(struct mlx5_core_dev *dev, u32 index,
164 u8 *mac, u8 vlan_valid, u16 vlan)
166 u32 in[MLX5_ST_SZ_DW(set_l2_table_entry_in)] = {0};
167 u32 out[MLX5_ST_SZ_DW(set_l2_table_entry_out)] = {0};
170 MLX5_SET(set_l2_table_entry_in, in, opcode,
171 MLX5_CMD_OP_SET_L2_TABLE_ENTRY);
172 MLX5_SET(set_l2_table_entry_in, in, table_index, index);
173 MLX5_SET(set_l2_table_entry_in, in, vlan_valid, vlan_valid);
174 MLX5_SET(set_l2_table_entry_in, in, vlan, vlan);
176 in_mac_addr = MLX5_ADDR_OF(set_l2_table_entry_in, in, mac_address);
177 ether_addr_copy(&in_mac_addr[2], mac);
179 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
182 static int del_l2_table_entry_cmd(struct mlx5_core_dev *dev, u32 index)
184 u32 in[MLX5_ST_SZ_DW(delete_l2_table_entry_in)] = {0};
185 u32 out[MLX5_ST_SZ_DW(delete_l2_table_entry_out)] = {0};
187 MLX5_SET(delete_l2_table_entry_in, in, opcode,
188 MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY);
189 MLX5_SET(delete_l2_table_entry_in, in, table_index, index);
190 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
193 static int alloc_l2_table_index(struct mlx5_l2_table *l2_table, u32 *ix)
197 *ix = find_first_zero_bit(l2_table->bitmap, l2_table->size);
198 if (*ix >= l2_table->size)
201 __set_bit(*ix, l2_table->bitmap);
206 static void free_l2_table_index(struct mlx5_l2_table *l2_table, u32 ix)
208 __clear_bit(ix, l2_table->bitmap);
211 static int set_l2_table_entry(struct mlx5_core_dev *dev, u8 *mac,
212 u8 vlan_valid, u16 vlan,
215 struct mlx5_l2_table *l2_table = &dev->priv.eswitch->l2_table;
218 err = alloc_l2_table_index(l2_table, index);
222 err = set_l2_table_entry_cmd(dev, *index, mac, vlan_valid, vlan);
224 free_l2_table_index(l2_table, *index);
229 static void del_l2_table_entry(struct mlx5_core_dev *dev, u32 index)
231 struct mlx5_l2_table *l2_table = &dev->priv.eswitch->l2_table;
233 del_l2_table_entry_cmd(dev, index);
234 free_l2_table_index(l2_table, index);
238 static struct mlx5_flow_rule *
239 __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
240 u8 mac_c[ETH_ALEN], u8 mac_v[ETH_ALEN])
242 int match_header = (is_zero_ether_addr(mac_c) ? 0 :
243 MLX5_MATCH_OUTER_HEADERS);
244 struct mlx5_flow_rule *flow_rule = NULL;
245 struct mlx5_flow_destination dest;
246 struct mlx5_flow_spec *spec;
247 void *mv_misc = NULL;
248 void *mc_misc = NULL;
253 match_header |= MLX5_MATCH_MISC_PARAMETERS;
255 spec = mlx5_vzalloc(sizeof(*spec));
257 esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n");
260 dmac_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
261 outer_headers.dmac_47_16);
262 dmac_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
263 outer_headers.dmac_47_16);
265 if (match_header & MLX5_MATCH_OUTER_HEADERS) {
266 ether_addr_copy(dmac_v, mac_v);
267 ether_addr_copy(dmac_c, mac_c);
270 if (match_header & MLX5_MATCH_MISC_PARAMETERS) {
271 mv_misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
273 mc_misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
275 MLX5_SET(fte_match_set_misc, mv_misc, source_port, UPLINK_VPORT);
276 MLX5_SET_TO_ONES(fte_match_set_misc, mc_misc, source_port);
279 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
280 dest.vport_num = vport;
283 "\tFDB add rule dmac_v(%pM) dmac_c(%pM) -> vport(%d)\n",
284 dmac_v, dmac_c, vport);
285 spec->match_criteria_enable = match_header;
287 mlx5_add_flow_rule(esw->fdb_table.fdb, spec,
288 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
290 if (IS_ERR(flow_rule)) {
292 "FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n",
293 dmac_v, dmac_c, vport, PTR_ERR(flow_rule));
301 static struct mlx5_flow_rule *
302 esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport)
306 eth_broadcast_addr(mac_c);
307 return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac);
310 static struct mlx5_flow_rule *
311 esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch *esw, u32 vport)
316 eth_zero_addr(mac_c);
317 eth_zero_addr(mac_v);
320 return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac_v);
323 static struct mlx5_flow_rule *
324 esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u32 vport)
329 eth_zero_addr(mac_c);
330 eth_zero_addr(mac_v);
331 return __esw_fdb_set_vport_rule(esw, vport, true, mac_c, mac_v);
334 static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw, int nvports)
336 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
337 struct mlx5_core_dev *dev = esw->dev;
338 struct mlx5_flow_namespace *root_ns;
339 struct mlx5_flow_table *fdb;
340 struct mlx5_flow_group *g;
341 void *match_criteria;
347 esw_debug(dev, "Create FDB log_max_size(%d)\n",
348 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
350 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
352 esw_warn(dev, "Failed to get FDB flow namespace\n");
356 flow_group_in = mlx5_vzalloc(inlen);
359 memset(flow_group_in, 0, inlen);
361 table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
362 fdb = mlx5_create_flow_table(root_ns, 0, table_size, 0);
365 esw_warn(dev, "Failed to create FDB Table err %d\n", err);
368 esw->fdb_table.fdb = fdb;
370 /* Addresses group : Full match unicast/multicast addresses */
371 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
372 MLX5_MATCH_OUTER_HEADERS);
373 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
374 dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, outer_headers.dmac_47_16);
375 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
376 /* Preserve 2 entries for allmulti and promisc rules*/
377 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 3);
378 eth_broadcast_addr(dmac);
379 g = mlx5_create_flow_group(fdb, flow_group_in);
382 esw_warn(dev, "Failed to create flow group err(%d)\n", err);
385 esw->fdb_table.legacy.addr_grp = g;
387 /* Allmulti group : One rule that forwards any mcast traffic */
388 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
389 MLX5_MATCH_OUTER_HEADERS);
390 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 2);
391 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 2);
394 g = mlx5_create_flow_group(fdb, flow_group_in);
397 esw_warn(dev, "Failed to create allmulti flow group err(%d)\n", err);
400 esw->fdb_table.legacy.allmulti_grp = g;
402 /* Promiscuous group :
403 * One rule that forward all unmatched traffic from previous groups
406 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
407 MLX5_MATCH_MISC_PARAMETERS);
408 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
409 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 1);
410 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1);
411 g = mlx5_create_flow_group(fdb, flow_group_in);
414 esw_warn(dev, "Failed to create promisc flow group err(%d)\n", err);
417 esw->fdb_table.legacy.promisc_grp = g;
421 if (!IS_ERR_OR_NULL(esw->fdb_table.legacy.allmulti_grp)) {
422 mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp);
423 esw->fdb_table.legacy.allmulti_grp = NULL;
425 if (!IS_ERR_OR_NULL(esw->fdb_table.legacy.addr_grp)) {
426 mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp);
427 esw->fdb_table.legacy.addr_grp = NULL;
429 if (!IS_ERR_OR_NULL(esw->fdb_table.fdb)) {
430 mlx5_destroy_flow_table(esw->fdb_table.fdb);
431 esw->fdb_table.fdb = NULL;
435 kvfree(flow_group_in);
439 static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw)
441 if (!esw->fdb_table.fdb)
444 esw_debug(esw->dev, "Destroy FDB Table\n");
445 mlx5_destroy_flow_group(esw->fdb_table.legacy.promisc_grp);
446 mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp);
447 mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp);
448 mlx5_destroy_flow_table(esw->fdb_table.fdb);
449 esw->fdb_table.fdb = NULL;
450 esw->fdb_table.legacy.addr_grp = NULL;
451 esw->fdb_table.legacy.allmulti_grp = NULL;
452 esw->fdb_table.legacy.promisc_grp = NULL;
455 /* E-Switch vport UC/MC lists management */
456 typedef int (*vport_addr_action)(struct mlx5_eswitch *esw,
457 struct vport_addr *vaddr);
459 static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
461 struct hlist_head *hash = esw->l2_table.l2_hash;
462 struct esw_uc_addr *esw_uc;
463 u8 *mac = vaddr->node.addr;
464 u32 vport = vaddr->vport;
467 esw_uc = l2addr_hash_find(hash, mac, struct esw_uc_addr);
470 "Failed to set L2 mac(%pM) for vport(%d), mac is already in use by vport(%d)\n",
471 mac, vport, esw_uc->vport);
475 esw_uc = l2addr_hash_add(hash, mac, struct esw_uc_addr, GFP_KERNEL);
478 esw_uc->vport = vport;
480 err = set_l2_table_entry(esw->dev, mac, 0, 0, &esw_uc->table_index);
484 /* SRIOV is enabled: Forward UC MAC to vport */
485 if (esw->fdb_table.fdb && esw->mode == SRIOV_LEGACY)
486 vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
488 esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM index:%d fr(%p)\n",
489 vport, mac, esw_uc->table_index, vaddr->flow_rule);
492 l2addr_hash_del(esw_uc);
496 static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
498 struct hlist_head *hash = esw->l2_table.l2_hash;
499 struct esw_uc_addr *esw_uc;
500 u8 *mac = vaddr->node.addr;
501 u32 vport = vaddr->vport;
503 esw_uc = l2addr_hash_find(hash, mac, struct esw_uc_addr);
504 if (!esw_uc || esw_uc->vport != vport) {
506 "MAC(%pM) doesn't belong to vport (%d)\n",
510 esw_debug(esw->dev, "\tDELETE UC MAC: vport[%d] %pM index:%d fr(%p)\n",
511 vport, mac, esw_uc->table_index, vaddr->flow_rule);
513 del_l2_table_entry(esw->dev, esw_uc->table_index);
515 if (vaddr->flow_rule)
516 mlx5_del_flow_rule(vaddr->flow_rule);
517 vaddr->flow_rule = NULL;
519 l2addr_hash_del(esw_uc);
523 static void update_allmulti_vports(struct mlx5_eswitch *esw,
524 struct vport_addr *vaddr,
525 struct esw_mc_addr *esw_mc)
527 u8 *mac = vaddr->node.addr;
530 for (vport_idx = 0; vport_idx < esw->total_vports; vport_idx++) {
531 struct mlx5_vport *vport = &esw->vports[vport_idx];
532 struct hlist_head *vport_hash = vport->mc_list;
533 struct vport_addr *iter_vaddr =
534 l2addr_hash_find(vport_hash,
537 if (IS_ERR_OR_NULL(vport->allmulti_rule) ||
538 vaddr->vport == vport_idx)
540 switch (vaddr->action) {
541 case MLX5_ACTION_ADD:
544 iter_vaddr = l2addr_hash_add(vport_hash, mac,
549 "ALL-MULTI: Failed to add MAC(%pM) to vport[%d] DB\n",
553 iter_vaddr->vport = vport_idx;
554 iter_vaddr->flow_rule =
555 esw_fdb_set_vport_rule(esw,
558 iter_vaddr->mc_promisc = true;
560 case MLX5_ACTION_DEL:
563 mlx5_del_flow_rule(iter_vaddr->flow_rule);
564 l2addr_hash_del(iter_vaddr);
570 static int esw_add_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
572 struct hlist_head *hash = esw->mc_table;
573 struct esw_mc_addr *esw_mc;
574 u8 *mac = vaddr->node.addr;
575 u32 vport = vaddr->vport;
577 if (!esw->fdb_table.fdb)
580 esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr);
584 esw_mc = l2addr_hash_add(hash, mac, struct esw_mc_addr, GFP_KERNEL);
588 esw_mc->uplink_rule = /* Forward MC MAC to Uplink */
589 esw_fdb_set_vport_rule(esw, mac, UPLINK_VPORT);
591 /* Add this multicast mac to all the mc promiscuous vports */
592 update_allmulti_vports(esw, vaddr, esw_mc);
595 /* If the multicast mac is added as a result of mc promiscuous vport,
596 * don't increment the multicast ref count
598 if (!vaddr->mc_promisc)
601 /* Forward MC MAC to vport */
602 vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
604 "\tADDED MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n",
605 vport, mac, vaddr->flow_rule,
606 esw_mc->refcnt, esw_mc->uplink_rule);
610 static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
612 struct hlist_head *hash = esw->mc_table;
613 struct esw_mc_addr *esw_mc;
614 u8 *mac = vaddr->node.addr;
615 u32 vport = vaddr->vport;
617 if (!esw->fdb_table.fdb)
620 esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr);
623 "Failed to find eswitch MC addr for MAC(%pM) vport(%d)",
628 "\tDELETE MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n",
629 vport, mac, vaddr->flow_rule, esw_mc->refcnt,
630 esw_mc->uplink_rule);
632 if (vaddr->flow_rule)
633 mlx5_del_flow_rule(vaddr->flow_rule);
634 vaddr->flow_rule = NULL;
636 /* If the multicast mac is added as a result of mc promiscuous vport,
637 * don't decrement the multicast ref count.
639 if (vaddr->mc_promisc || (--esw_mc->refcnt > 0))
642 /* Remove this multicast mac from all the mc promiscuous vports */
643 update_allmulti_vports(esw, vaddr, esw_mc);
645 if (esw_mc->uplink_rule)
646 mlx5_del_flow_rule(esw_mc->uplink_rule);
648 l2addr_hash_del(esw_mc);
652 /* Apply vport UC/MC list to HW l2 table and FDB table */
653 static void esw_apply_vport_addr_list(struct mlx5_eswitch *esw,
654 u32 vport_num, int list_type)
656 struct mlx5_vport *vport = &esw->vports[vport_num];
657 bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC;
658 vport_addr_action vport_addr_add;
659 vport_addr_action vport_addr_del;
660 struct vport_addr *addr;
661 struct l2addr_node *node;
662 struct hlist_head *hash;
663 struct hlist_node *tmp;
666 vport_addr_add = is_uc ? esw_add_uc_addr :
668 vport_addr_del = is_uc ? esw_del_uc_addr :
671 hash = is_uc ? vport->uc_list : vport->mc_list;
672 for_each_l2hash_node(node, tmp, hash, hi) {
673 addr = container_of(node, struct vport_addr, node);
674 switch (addr->action) {
675 case MLX5_ACTION_ADD:
676 vport_addr_add(esw, addr);
677 addr->action = MLX5_ACTION_NONE;
679 case MLX5_ACTION_DEL:
680 vport_addr_del(esw, addr);
681 l2addr_hash_del(addr);
687 /* Sync vport UC/MC list from vport context */
688 static void esw_update_vport_addr_list(struct mlx5_eswitch *esw,
689 u32 vport_num, int list_type)
691 struct mlx5_vport *vport = &esw->vports[vport_num];
692 bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC;
693 u8 (*mac_list)[ETH_ALEN];
694 struct l2addr_node *node;
695 struct vport_addr *addr;
696 struct hlist_head *hash;
697 struct hlist_node *tmp;
703 size = is_uc ? MLX5_MAX_UC_PER_VPORT(esw->dev) :
704 MLX5_MAX_MC_PER_VPORT(esw->dev);
706 mac_list = kcalloc(size, ETH_ALEN, GFP_KERNEL);
710 hash = is_uc ? vport->uc_list : vport->mc_list;
712 for_each_l2hash_node(node, tmp, hash, hi) {
713 addr = container_of(node, struct vport_addr, node);
714 addr->action = MLX5_ACTION_DEL;
720 err = mlx5_query_nic_vport_mac_list(esw->dev, vport_num, list_type,
724 esw_debug(esw->dev, "vport[%d] context update %s list size (%d)\n",
725 vport_num, is_uc ? "UC" : "MC", size);
727 for (i = 0; i < size; i++) {
728 if (is_uc && !is_valid_ether_addr(mac_list[i]))
731 if (!is_uc && !is_multicast_ether_addr(mac_list[i]))
734 addr = l2addr_hash_find(hash, mac_list[i], struct vport_addr);
736 addr->action = MLX5_ACTION_NONE;
737 /* If this mac was previously added because of allmulti
738 * promiscuous rx mode, its now converted to be original
741 if (addr->mc_promisc) {
742 struct esw_mc_addr *esw_mc =
743 l2addr_hash_find(esw->mc_table,
748 "Failed to MAC(%pM) in mcast DB\n",
753 addr->mc_promisc = false;
758 addr = l2addr_hash_add(hash, mac_list[i], struct vport_addr,
762 "Failed to add MAC(%pM) to vport[%d] DB\n",
763 mac_list[i], vport_num);
766 addr->vport = vport_num;
767 addr->action = MLX5_ACTION_ADD;
773 /* Sync vport UC/MC list from vport context
774 * Must be called after esw_update_vport_addr_list
776 static void esw_update_vport_mc_promisc(struct mlx5_eswitch *esw, u32 vport_num)
778 struct mlx5_vport *vport = &esw->vports[vport_num];
779 struct l2addr_node *node;
780 struct vport_addr *addr;
781 struct hlist_head *hash;
782 struct hlist_node *tmp;
785 hash = vport->mc_list;
787 for_each_l2hash_node(node, tmp, esw->mc_table, hi) {
788 u8 *mac = node->addr;
790 addr = l2addr_hash_find(hash, mac, struct vport_addr);
792 if (addr->action == MLX5_ACTION_DEL)
793 addr->action = MLX5_ACTION_NONE;
796 addr = l2addr_hash_add(hash, mac, struct vport_addr,
800 "Failed to add allmulti MAC(%pM) to vport[%d] DB\n",
804 addr->vport = vport_num;
805 addr->action = MLX5_ACTION_ADD;
806 addr->mc_promisc = true;
810 /* Apply vport rx mode to HW FDB table */
811 static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw, u32 vport_num,
812 bool promisc, bool mc_promisc)
814 struct esw_mc_addr *allmulti_addr = esw->mc_promisc;
815 struct mlx5_vport *vport = &esw->vports[vport_num];
817 if (IS_ERR_OR_NULL(vport->allmulti_rule) != mc_promisc)
821 vport->allmulti_rule =
822 esw_fdb_set_vport_allmulti_rule(esw, vport_num);
823 if (!allmulti_addr->uplink_rule)
824 allmulti_addr->uplink_rule =
825 esw_fdb_set_vport_allmulti_rule(esw,
827 allmulti_addr->refcnt++;
828 } else if (vport->allmulti_rule) {
829 mlx5_del_flow_rule(vport->allmulti_rule);
830 vport->allmulti_rule = NULL;
832 if (--allmulti_addr->refcnt > 0)
835 if (allmulti_addr->uplink_rule)
836 mlx5_del_flow_rule(allmulti_addr->uplink_rule);
837 allmulti_addr->uplink_rule = NULL;
841 if (IS_ERR_OR_NULL(vport->promisc_rule) != promisc)
845 vport->promisc_rule = esw_fdb_set_vport_promisc_rule(esw,
847 } else if (vport->promisc_rule) {
848 mlx5_del_flow_rule(vport->promisc_rule);
849 vport->promisc_rule = NULL;
853 /* Sync vport rx mode from vport context */
854 static void esw_update_vport_rx_mode(struct mlx5_eswitch *esw, u32 vport_num)
856 struct mlx5_vport *vport = &esw->vports[vport_num];
862 err = mlx5_query_nic_vport_promisc(esw->dev,
869 esw_debug(esw->dev, "vport[%d] context update rx mode promisc_all=%d, all_multi=%d\n",
870 vport_num, promisc_all, promisc_mc);
872 if (!vport->info.trusted || !vport->enabled) {
878 esw_apply_vport_rx_mode(esw, vport_num, promisc_all,
879 (promisc_all || promisc_mc));
882 static void esw_vport_change_handle_locked(struct mlx5_vport *vport)
884 struct mlx5_core_dev *dev = vport->dev;
885 struct mlx5_eswitch *esw = dev->priv.eswitch;
888 mlx5_query_nic_vport_mac_address(dev, vport->vport, mac);
889 esw_debug(dev, "vport[%d] Context Changed: perm mac: %pM\n",
892 if (vport->enabled_events & UC_ADDR_CHANGE) {
893 esw_update_vport_addr_list(esw, vport->vport,
894 MLX5_NVPRT_LIST_TYPE_UC);
895 esw_apply_vport_addr_list(esw, vport->vport,
896 MLX5_NVPRT_LIST_TYPE_UC);
899 if (vport->enabled_events & MC_ADDR_CHANGE) {
900 esw_update_vport_addr_list(esw, vport->vport,
901 MLX5_NVPRT_LIST_TYPE_MC);
904 if (vport->enabled_events & PROMISC_CHANGE) {
905 esw_update_vport_rx_mode(esw, vport->vport);
906 if (!IS_ERR_OR_NULL(vport->allmulti_rule))
907 esw_update_vport_mc_promisc(esw, vport->vport);
910 if (vport->enabled_events & (PROMISC_CHANGE | MC_ADDR_CHANGE)) {
911 esw_apply_vport_addr_list(esw, vport->vport,
912 MLX5_NVPRT_LIST_TYPE_MC);
915 esw_debug(esw->dev, "vport[%d] Context Changed: Done\n", vport->vport);
917 arm_vport_context_events_cmd(dev, vport->vport,
918 vport->enabled_events);
921 static void esw_vport_change_handler(struct work_struct *work)
923 struct mlx5_vport *vport =
924 container_of(work, struct mlx5_vport, vport_change_handler);
925 struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
927 mutex_lock(&esw->state_lock);
928 esw_vport_change_handle_locked(vport);
929 mutex_unlock(&esw->state_lock);
932 static int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
933 struct mlx5_vport *vport)
935 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
936 struct mlx5_flow_group *vlan_grp = NULL;
937 struct mlx5_flow_group *drop_grp = NULL;
938 struct mlx5_core_dev *dev = esw->dev;
939 struct mlx5_flow_namespace *root_ns;
940 struct mlx5_flow_table *acl;
941 void *match_criteria;
943 /* The egress acl table contains 2 rules:
944 * 1)Allow traffic with vlan_tag=vst_vlan_id
945 * 2)Drop all other traffic.
950 if (!MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support))
953 if (!IS_ERR_OR_NULL(vport->egress.acl))
956 esw_debug(dev, "Create vport[%d] egress ACL log_max_size(%d)\n",
957 vport->vport, MLX5_CAP_ESW_EGRESS_ACL(dev, log_max_ft_size));
959 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS);
961 esw_warn(dev, "Failed to get E-Switch egress flow namespace\n");
965 flow_group_in = mlx5_vzalloc(inlen);
969 acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
972 esw_warn(dev, "Failed to create E-Switch vport[%d] egress flow Table, err(%d)\n",
977 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
978 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
979 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.vlan_tag);
980 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.first_vid);
981 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
982 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
984 vlan_grp = mlx5_create_flow_group(acl, flow_group_in);
985 if (IS_ERR(vlan_grp)) {
986 err = PTR_ERR(vlan_grp);
987 esw_warn(dev, "Failed to create E-Switch vport[%d] egress allowed vlans flow group, err(%d)\n",
992 memset(flow_group_in, 0, inlen);
993 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
994 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
995 drop_grp = mlx5_create_flow_group(acl, flow_group_in);
996 if (IS_ERR(drop_grp)) {
997 err = PTR_ERR(drop_grp);
998 esw_warn(dev, "Failed to create E-Switch vport[%d] egress drop flow group, err(%d)\n",
1003 vport->egress.acl = acl;
1004 vport->egress.drop_grp = drop_grp;
1005 vport->egress.allowed_vlans_grp = vlan_grp;
1007 kvfree(flow_group_in);
1008 if (err && !IS_ERR_OR_NULL(vlan_grp))
1009 mlx5_destroy_flow_group(vlan_grp);
1010 if (err && !IS_ERR_OR_NULL(acl))
1011 mlx5_destroy_flow_table(acl);
1015 static void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
1016 struct mlx5_vport *vport)
1018 if (!IS_ERR_OR_NULL(vport->egress.allowed_vlan))
1019 mlx5_del_flow_rule(vport->egress.allowed_vlan);
1021 if (!IS_ERR_OR_NULL(vport->egress.drop_rule))
1022 mlx5_del_flow_rule(vport->egress.drop_rule);
1024 vport->egress.allowed_vlan = NULL;
1025 vport->egress.drop_rule = NULL;
1028 static void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
1029 struct mlx5_vport *vport)
1031 if (IS_ERR_OR_NULL(vport->egress.acl))
1034 esw_debug(esw->dev, "Destroy vport[%d] E-Switch egress ACL\n", vport->vport);
1036 esw_vport_cleanup_egress_rules(esw, vport);
1037 mlx5_destroy_flow_group(vport->egress.allowed_vlans_grp);
1038 mlx5_destroy_flow_group(vport->egress.drop_grp);
1039 mlx5_destroy_flow_table(vport->egress.acl);
1040 vport->egress.allowed_vlans_grp = NULL;
1041 vport->egress.drop_grp = NULL;
1042 vport->egress.acl = NULL;
1045 static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
1046 struct mlx5_vport *vport)
1048 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1049 struct mlx5_core_dev *dev = esw->dev;
1050 struct mlx5_flow_namespace *root_ns;
1051 struct mlx5_flow_table *acl;
1052 struct mlx5_flow_group *g;
1053 void *match_criteria;
1055 /* The ingress acl table contains 4 groups
1056 * (2 active rules at the same time -
1057 * 1 allow rule from one of the first 3 groups.
1058 * 1 drop rule from the last group):
1059 * 1)Allow untagged traffic with smac=original mac.
1060 * 2)Allow untagged traffic.
1061 * 3)Allow traffic with smac=original mac.
1062 * 4)Drop all other traffic.
1067 if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
1070 if (!IS_ERR_OR_NULL(vport->ingress.acl))
1073 esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n",
1074 vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size));
1076 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS);
1078 esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n");
1082 flow_group_in = mlx5_vzalloc(inlen);
1086 acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
1089 esw_warn(dev, "Failed to create E-Switch vport[%d] ingress flow Table, err(%d)\n",
1093 vport->ingress.acl = acl;
1095 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
1097 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1098 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.vlan_tag);
1099 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
1100 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
1101 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1102 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
1104 g = mlx5_create_flow_group(acl, flow_group_in);
1107 esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged spoofchk flow group, err(%d)\n",
1111 vport->ingress.allow_untagged_spoofchk_grp = g;
1113 memset(flow_group_in, 0, inlen);
1114 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1115 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.vlan_tag);
1116 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
1117 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
1119 g = mlx5_create_flow_group(acl, flow_group_in);
1122 esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged flow group, err(%d)\n",
1126 vport->ingress.allow_untagged_only_grp = g;
1128 memset(flow_group_in, 0, inlen);
1129 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1130 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
1131 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
1132 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 2);
1133 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2);
1135 g = mlx5_create_flow_group(acl, flow_group_in);
1138 esw_warn(dev, "Failed to create E-Switch vport[%d] ingress spoofchk flow group, err(%d)\n",
1142 vport->ingress.allow_spoofchk_only_grp = g;
1144 memset(flow_group_in, 0, inlen);
1145 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 3);
1146 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 3);
1148 g = mlx5_create_flow_group(acl, flow_group_in);
1151 esw_warn(dev, "Failed to create E-Switch vport[%d] ingress drop flow group, err(%d)\n",
1155 vport->ingress.drop_grp = g;
1159 if (!IS_ERR_OR_NULL(vport->ingress.allow_spoofchk_only_grp))
1160 mlx5_destroy_flow_group(
1161 vport->ingress.allow_spoofchk_only_grp);
1162 if (!IS_ERR_OR_NULL(vport->ingress.allow_untagged_only_grp))
1163 mlx5_destroy_flow_group(
1164 vport->ingress.allow_untagged_only_grp);
1165 if (!IS_ERR_OR_NULL(vport->ingress.allow_untagged_spoofchk_grp))
1166 mlx5_destroy_flow_group(
1167 vport->ingress.allow_untagged_spoofchk_grp);
1168 if (!IS_ERR_OR_NULL(vport->ingress.acl))
1169 mlx5_destroy_flow_table(vport->ingress.acl);
1172 kvfree(flow_group_in);
1176 static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
1177 struct mlx5_vport *vport)
1179 if (!IS_ERR_OR_NULL(vport->ingress.drop_rule))
1180 mlx5_del_flow_rule(vport->ingress.drop_rule);
1182 if (!IS_ERR_OR_NULL(vport->ingress.allow_rule))
1183 mlx5_del_flow_rule(vport->ingress.allow_rule);
1185 vport->ingress.drop_rule = NULL;
1186 vport->ingress.allow_rule = NULL;
1189 static void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
1190 struct mlx5_vport *vport)
1192 if (IS_ERR_OR_NULL(vport->ingress.acl))
1195 esw_debug(esw->dev, "Destroy vport[%d] E-Switch ingress ACL\n", vport->vport);
1197 esw_vport_cleanup_ingress_rules(esw, vport);
1198 mlx5_destroy_flow_group(vport->ingress.allow_spoofchk_only_grp);
1199 mlx5_destroy_flow_group(vport->ingress.allow_untagged_only_grp);
1200 mlx5_destroy_flow_group(vport->ingress.allow_untagged_spoofchk_grp);
1201 mlx5_destroy_flow_group(vport->ingress.drop_grp);
1202 mlx5_destroy_flow_table(vport->ingress.acl);
1203 vport->ingress.acl = NULL;
1204 vport->ingress.drop_grp = NULL;
1205 vport->ingress.allow_spoofchk_only_grp = NULL;
1206 vport->ingress.allow_untagged_only_grp = NULL;
1207 vport->ingress.allow_untagged_spoofchk_grp = NULL;
1210 static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
1211 struct mlx5_vport *vport)
1213 struct mlx5_flow_spec *spec;
1217 esw_vport_cleanup_ingress_rules(esw, vport);
1219 if (!vport->info.vlan && !vport->info.qos && !vport->info.spoofchk) {
1220 esw_vport_disable_ingress_acl(esw, vport);
1224 err = esw_vport_enable_ingress_acl(esw, vport);
1226 mlx5_core_warn(esw->dev,
1227 "failed to enable ingress acl (%d) on vport[%d]\n",
1233 "vport[%d] configure ingress rules, vlan(%d) qos(%d)\n",
1234 vport->vport, vport->info.vlan, vport->info.qos);
1236 spec = mlx5_vzalloc(sizeof(*spec));
1239 esw_warn(esw->dev, "vport[%d] configure ingress rules failed, err(%d)\n",
1244 if (vport->info.vlan || vport->info.qos)
1245 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.vlan_tag);
1247 if (vport->info.spoofchk) {
1248 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_47_16);
1249 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_15_0);
1250 smac_v = MLX5_ADDR_OF(fte_match_param,
1252 outer_headers.smac_47_16);
1253 ether_addr_copy(smac_v, vport->info.mac);
1256 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1257 vport->ingress.allow_rule =
1258 mlx5_add_flow_rule(vport->ingress.acl, spec,
1259 MLX5_FLOW_CONTEXT_ACTION_ALLOW,
1261 if (IS_ERR(vport->ingress.allow_rule)) {
1262 err = PTR_ERR(vport->ingress.allow_rule);
1264 "vport[%d] configure ingress allow rule, err(%d)\n",
1266 vport->ingress.allow_rule = NULL;
1270 memset(spec, 0, sizeof(*spec));
1271 vport->ingress.drop_rule =
1272 mlx5_add_flow_rule(vport->ingress.acl, spec,
1273 MLX5_FLOW_CONTEXT_ACTION_DROP,
1275 if (IS_ERR(vport->ingress.drop_rule)) {
1276 err = PTR_ERR(vport->ingress.drop_rule);
1278 "vport[%d] configure ingress drop rule, err(%d)\n",
1280 vport->ingress.drop_rule = NULL;
1286 esw_vport_cleanup_ingress_rules(esw, vport);
1291 static int esw_vport_egress_config(struct mlx5_eswitch *esw,
1292 struct mlx5_vport *vport)
1294 struct mlx5_flow_spec *spec;
1297 esw_vport_cleanup_egress_rules(esw, vport);
1299 if (!vport->info.vlan && !vport->info.qos) {
1300 esw_vport_disable_egress_acl(esw, vport);
1304 err = esw_vport_enable_egress_acl(esw, vport);
1306 mlx5_core_warn(esw->dev,
1307 "failed to enable egress acl (%d) on vport[%d]\n",
1313 "vport[%d] configure egress rules, vlan(%d) qos(%d)\n",
1314 vport->vport, vport->info.vlan, vport->info.qos);
1316 spec = mlx5_vzalloc(sizeof(*spec));
1319 esw_warn(esw->dev, "vport[%d] configure egress rules failed, err(%d)\n",
1324 /* Allowed vlan rule */
1325 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.vlan_tag);
1326 MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.vlan_tag);
1327 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid);
1328 MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, vport->info.vlan);
1330 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1331 vport->egress.allowed_vlan =
1332 mlx5_add_flow_rule(vport->egress.acl, spec,
1333 MLX5_FLOW_CONTEXT_ACTION_ALLOW,
1335 if (IS_ERR(vport->egress.allowed_vlan)) {
1336 err = PTR_ERR(vport->egress.allowed_vlan);
1338 "vport[%d] configure egress allowed vlan rule failed, err(%d)\n",
1340 vport->egress.allowed_vlan = NULL;
1344 /* Drop others rule (star rule) */
1345 memset(spec, 0, sizeof(*spec));
1346 vport->egress.drop_rule =
1347 mlx5_add_flow_rule(vport->egress.acl, spec,
1348 MLX5_FLOW_CONTEXT_ACTION_DROP,
1350 if (IS_ERR(vport->egress.drop_rule)) {
1351 err = PTR_ERR(vport->egress.drop_rule);
1353 "vport[%d] configure egress drop rule failed, err(%d)\n",
1355 vport->egress.drop_rule = NULL;
1362 static void node_guid_gen_from_mac(u64 *node_guid, u8 mac[ETH_ALEN])
1364 ((u8 *)node_guid)[7] = mac[0];
1365 ((u8 *)node_guid)[6] = mac[1];
1366 ((u8 *)node_guid)[5] = mac[2];
1367 ((u8 *)node_guid)[4] = 0xff;
1368 ((u8 *)node_guid)[3] = 0xfe;
1369 ((u8 *)node_guid)[2] = mac[3];
1370 ((u8 *)node_guid)[1] = mac[4];
1371 ((u8 *)node_guid)[0] = mac[5];
1374 static void esw_apply_vport_conf(struct mlx5_eswitch *esw,
1375 struct mlx5_vport *vport)
1377 int vport_num = vport->vport;
1382 mlx5_modify_vport_admin_state(esw->dev,
1383 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
1385 vport->info.link_state);
1386 mlx5_modify_nic_vport_mac_address(esw->dev, vport_num, vport->info.mac);
1387 mlx5_modify_nic_vport_node_guid(esw->dev, vport_num, vport->info.node_guid);
1388 modify_esw_vport_cvlan(esw->dev, vport_num, vport->info.vlan, vport->info.qos,
1389 (vport->info.vlan || vport->info.qos));
1391 /* Only legacy mode needs ACLs */
1392 if (esw->mode == SRIOV_LEGACY) {
1393 esw_vport_ingress_config(esw, vport);
1394 esw_vport_egress_config(esw, vport);
1397 static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
1400 struct mlx5_vport *vport = &esw->vports[vport_num];
1402 mutex_lock(&esw->state_lock);
1403 WARN_ON(vport->enabled);
1405 esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num);
1407 /* Restore old vport configuration */
1408 esw_apply_vport_conf(esw, vport);
1410 /* Sync with current vport context */
1411 vport->enabled_events = enable_events;
1412 vport->enabled = true;
1414 /* only PF is trusted by default */
1416 vport->info.trusted = true;
1418 esw_vport_change_handle_locked(vport);
1420 esw->enabled_vports++;
1421 esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num);
1422 mutex_unlock(&esw->state_lock);
1425 static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
1427 struct mlx5_vport *vport = &esw->vports[vport_num];
1429 if (!vport->enabled)
1432 esw_debug(esw->dev, "Disabling vport(%d)\n", vport_num);
1433 /* Mark this vport as disabled to discard new events */
1434 vport->enabled = false;
1436 synchronize_irq(mlx5_get_msix_vec(esw->dev, MLX5_EQ_VEC_ASYNC));
1437 /* Wait for current already scheduled events to complete */
1438 flush_workqueue(esw->work_queue);
1439 /* Disable events from this vport */
1440 arm_vport_context_events_cmd(esw->dev, vport->vport, 0);
1441 mutex_lock(&esw->state_lock);
1442 /* We don't assume VFs will cleanup after themselves.
1443 * Calling vport change handler while vport is disabled will cleanup
1444 * the vport resources.
1446 esw_vport_change_handle_locked(vport);
1447 vport->enabled_events = 0;
1449 if (vport_num && esw->mode == SRIOV_LEGACY) {
1450 mlx5_modify_vport_admin_state(esw->dev,
1451 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
1453 MLX5_ESW_VPORT_ADMIN_STATE_DOWN);
1454 esw_vport_disable_egress_acl(esw, vport);
1455 esw_vport_disable_ingress_acl(esw, vport);
1457 esw->enabled_vports--;
1458 mutex_unlock(&esw->state_lock);
1461 /* Public E-Switch API */
1462 int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
1465 int i, enabled_events;
1467 if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
1468 MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1471 if (!MLX5_CAP_GEN(esw->dev, eswitch_flow_table) ||
1472 !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
1473 esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n");
1477 if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support))
1478 esw_warn(esw->dev, "E-Switch ingress ACL is not supported by FW\n");
1480 if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support))
1481 esw_warn(esw->dev, "E-Switch engress ACL is not supported by FW\n");
1483 esw_info(esw->dev, "E-Switch enable SRIOV: nvfs(%d) mode (%d)\n", nvfs, mode);
1485 esw_disable_vport(esw, 0);
1487 if (mode == SRIOV_LEGACY)
1488 err = esw_create_legacy_fdb_table(esw, nvfs + 1);
1490 err = esw_offloads_init(esw, nvfs + 1);
1494 enabled_events = (mode == SRIOV_LEGACY) ? SRIOV_VPORT_EVENTS : UC_ADDR_CHANGE;
1495 for (i = 0; i <= nvfs; i++)
1496 esw_enable_vport(esw, i, enabled_events);
1498 esw_info(esw->dev, "SRIOV enabled: active vports(%d)\n",
1499 esw->enabled_vports);
1503 esw_enable_vport(esw, 0, UC_ADDR_CHANGE);
1504 esw->mode = SRIOV_NONE;
1508 void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
1510 struct esw_mc_addr *mc_promisc;
1514 if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
1515 MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1518 esw_info(esw->dev, "disable SRIOV: active vports(%d) mode(%d)\n",
1519 esw->enabled_vports, esw->mode);
1521 mc_promisc = esw->mc_promisc;
1522 nvports = esw->enabled_vports;
1524 for (i = 0; i < esw->total_vports; i++)
1525 esw_disable_vport(esw, i);
1527 if (mc_promisc && mc_promisc->uplink_rule)
1528 mlx5_del_flow_rule(mc_promisc->uplink_rule);
1530 if (esw->mode == SRIOV_LEGACY)
1531 esw_destroy_legacy_fdb_table(esw);
1532 else if (esw->mode == SRIOV_OFFLOADS)
1533 esw_offloads_cleanup(esw, nvports);
1535 esw->mode = SRIOV_NONE;
1536 /* VPORT 0 (PF) must be enabled back with non-sriov configuration */
1537 esw_enable_vport(esw, 0, UC_ADDR_CHANGE);
1540 void mlx5_eswitch_attach(struct mlx5_eswitch *esw)
1542 if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
1543 MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1546 esw_enable_vport(esw, 0, UC_ADDR_CHANGE);
1547 /* VF Vports will be enabled when SRIOV is enabled */
1550 void mlx5_eswitch_detach(struct mlx5_eswitch *esw)
1552 if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
1553 MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1556 esw_disable_vport(esw, 0);
1559 int mlx5_eswitch_init(struct mlx5_core_dev *dev)
1561 int l2_table_size = 1 << MLX5_CAP_GEN(dev, log_max_l2_table);
1562 int total_vports = MLX5_TOTAL_VPORTS(dev);
1563 struct esw_mc_addr *mc_promisc;
1564 struct mlx5_eswitch *esw;
1568 if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
1569 MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1573 "Total vports %d, l2 table size(%d), per vport: max uc(%d) max mc(%d)\n",
1574 total_vports, l2_table_size,
1575 MLX5_MAX_UC_PER_VPORT(dev),
1576 MLX5_MAX_MC_PER_VPORT(dev));
1578 esw = kzalloc(sizeof(*esw), GFP_KERNEL);
1584 esw->l2_table.bitmap = kcalloc(BITS_TO_LONGS(l2_table_size),
1585 sizeof(uintptr_t), GFP_KERNEL);
1586 if (!esw->l2_table.bitmap) {
1590 esw->l2_table.size = l2_table_size;
1592 mc_promisc = kzalloc(sizeof(*mc_promisc), GFP_KERNEL);
1597 esw->mc_promisc = mc_promisc;
1599 esw->work_queue = create_singlethread_workqueue("mlx5_esw_wq");
1600 if (!esw->work_queue) {
1605 esw->vports = kcalloc(total_vports, sizeof(struct mlx5_vport),
1612 esw->offloads.vport_reps =
1613 kzalloc(total_vports * sizeof(struct mlx5_eswitch_rep),
1615 if (!esw->offloads.vport_reps) {
1620 mutex_init(&esw->state_lock);
1622 for (vport_num = 0; vport_num < total_vports; vport_num++) {
1623 struct mlx5_vport *vport = &esw->vports[vport_num];
1625 vport->vport = vport_num;
1626 vport->info.link_state = MLX5_ESW_VPORT_ADMIN_STATE_AUTO;
1628 INIT_WORK(&vport->vport_change_handler,
1629 esw_vport_change_handler);
1632 esw->total_vports = total_vports;
1633 esw->enabled_vports = 0;
1634 esw->mode = SRIOV_NONE;
1636 dev->priv.eswitch = esw;
1639 if (esw->work_queue)
1640 destroy_workqueue(esw->work_queue);
1641 kfree(esw->l2_table.bitmap);
1643 kfree(esw->offloads.vport_reps);
1648 void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
1650 if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
1651 MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1654 esw_info(esw->dev, "cleanup\n");
1656 esw->dev->priv.eswitch = NULL;
1657 destroy_workqueue(esw->work_queue);
1658 kfree(esw->l2_table.bitmap);
1659 kfree(esw->mc_promisc);
1660 kfree(esw->offloads.vport_reps);
1665 void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe)
1667 struct mlx5_eqe_vport_change *vc_eqe = &eqe->data.vport_change;
1668 u16 vport_num = be16_to_cpu(vc_eqe->vport_num);
1669 struct mlx5_vport *vport;
1672 pr_warn("MLX5 E-Switch: vport %d got an event while eswitch is not initialized\n",
1677 vport = &esw->vports[vport_num];
1679 queue_work(esw->work_queue, &vport->vport_change_handler);
1682 /* Vport Administration */
1683 #define ESW_ALLOWED(esw) \
1684 (esw && MLX5_CAP_GEN(esw->dev, vport_group_manager) && mlx5_core_is_pf(esw->dev))
1685 #define LEGAL_VPORT(esw, vport) (vport >= 0 && vport < esw->total_vports)
1687 int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
1688 int vport, u8 mac[ETH_ALEN])
1690 struct mlx5_vport *evport;
1694 if (!ESW_ALLOWED(esw))
1696 if (!LEGAL_VPORT(esw, vport) || is_multicast_ether_addr(mac))
1699 mutex_lock(&esw->state_lock);
1700 evport = &esw->vports[vport];
1702 if (evport->info.spoofchk && !is_valid_ether_addr(mac))
1703 mlx5_core_warn(esw->dev,
1704 "Set invalid MAC while spoofchk is on, vport(%d)\n",
1707 err = mlx5_modify_nic_vport_mac_address(esw->dev, vport, mac);
1709 mlx5_core_warn(esw->dev,
1710 "Failed to mlx5_modify_nic_vport_mac vport(%d) err=(%d)\n",
1715 node_guid_gen_from_mac(&node_guid, mac);
1716 err = mlx5_modify_nic_vport_node_guid(esw->dev, vport, node_guid);
1718 mlx5_core_warn(esw->dev,
1719 "Failed to set vport %d node guid, err = %d. RDMA_CM will not function properly for this VF.\n",
1722 ether_addr_copy(evport->info.mac, mac);
1723 evport->info.node_guid = node_guid;
1724 if (evport->enabled && esw->mode == SRIOV_LEGACY)
1725 err = esw_vport_ingress_config(esw, evport);
1728 mutex_unlock(&esw->state_lock);
1732 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
1733 int vport, int link_state)
1735 struct mlx5_vport *evport;
1738 if (!ESW_ALLOWED(esw))
1740 if (!LEGAL_VPORT(esw, vport))
1743 mutex_lock(&esw->state_lock);
1744 evport = &esw->vports[vport];
1746 err = mlx5_modify_vport_admin_state(esw->dev,
1747 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
1750 mlx5_core_warn(esw->dev,
1751 "Failed to set vport %d link state, err = %d",
1756 evport->info.link_state = link_state;
1759 mutex_unlock(&esw->state_lock);
1763 int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
1764 int vport, struct ifla_vf_info *ivi)
1766 struct mlx5_vport *evport;
1768 if (!ESW_ALLOWED(esw))
1770 if (!LEGAL_VPORT(esw, vport))
1773 evport = &esw->vports[vport];
1775 memset(ivi, 0, sizeof(*ivi));
1776 ivi->vf = vport - 1;
1778 mutex_lock(&esw->state_lock);
1779 ether_addr_copy(ivi->mac, evport->info.mac);
1780 ivi->linkstate = evport->info.link_state;
1781 ivi->vlan = evport->info.vlan;
1782 ivi->qos = evport->info.qos;
1783 ivi->spoofchk = evport->info.spoofchk;
1784 ivi->trusted = evport->info.trusted;
1785 mutex_unlock(&esw->state_lock);
1790 int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
1791 int vport, u16 vlan, u8 qos, u8 set_flags)
1793 struct mlx5_vport *evport;
1796 if (!ESW_ALLOWED(esw))
1798 if (!LEGAL_VPORT(esw, vport) || (vlan > 4095) || (qos > 7))
1801 mutex_lock(&esw->state_lock);
1802 evport = &esw->vports[vport];
1804 err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set_flags);
1808 evport->info.vlan = vlan;
1809 evport->info.qos = qos;
1810 if (evport->enabled && esw->mode == SRIOV_LEGACY) {
1811 err = esw_vport_ingress_config(esw, evport);
1814 err = esw_vport_egress_config(esw, evport);
1818 mutex_unlock(&esw->state_lock);
1822 int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
1823 int vport, u16 vlan, u8 qos)
1828 set_flags = SET_VLAN_STRIP | SET_VLAN_INSERT;
1830 return __mlx5_eswitch_set_vport_vlan(esw, vport, vlan, qos, set_flags);
1833 int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
1834 int vport, bool spoofchk)
1836 struct mlx5_vport *evport;
1840 if (!ESW_ALLOWED(esw))
1842 if (!LEGAL_VPORT(esw, vport))
1845 mutex_lock(&esw->state_lock);
1846 evport = &esw->vports[vport];
1847 pschk = evport->info.spoofchk;
1848 evport->info.spoofchk = spoofchk;
1849 if (pschk && !is_valid_ether_addr(evport->info.mac))
1850 mlx5_core_warn(esw->dev,
1851 "Spoofchk in set while MAC is invalid, vport(%d)\n",
1853 if (evport->enabled && esw->mode == SRIOV_LEGACY)
1854 err = esw_vport_ingress_config(esw, evport);
1856 evport->info.spoofchk = pschk;
1857 mutex_unlock(&esw->state_lock);
1862 int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
1863 int vport, bool setting)
1865 struct mlx5_vport *evport;
1867 if (!ESW_ALLOWED(esw))
1869 if (!LEGAL_VPORT(esw, vport))
1872 mutex_lock(&esw->state_lock);
1873 evport = &esw->vports[vport];
1874 evport->info.trusted = setting;
1875 if (evport->enabled)
1876 esw_vport_change_handle_locked(evport);
1877 mutex_unlock(&esw->state_lock);
1882 int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
1884 struct ifla_vf_stats *vf_stats)
1886 int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
1887 u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0};
1891 if (!ESW_ALLOWED(esw))
1893 if (!LEGAL_VPORT(esw, vport))
1896 out = mlx5_vzalloc(outlen);
1900 MLX5_SET(query_vport_counter_in, in, opcode,
1901 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
1902 MLX5_SET(query_vport_counter_in, in, op_mod, 0);
1903 MLX5_SET(query_vport_counter_in, in, vport_number, vport);
1905 MLX5_SET(query_vport_counter_in, in, other_vport, 1);
1907 memset(out, 0, outlen);
1908 err = mlx5_cmd_exec(esw->dev, in, sizeof(in), out, outlen);
1912 #define MLX5_GET_CTR(p, x) \
1913 MLX5_GET64(query_vport_counter_out, p, x)
1915 memset(vf_stats, 0, sizeof(*vf_stats));
1916 vf_stats->rx_packets =
1917 MLX5_GET_CTR(out, received_eth_unicast.packets) +
1918 MLX5_GET_CTR(out, received_ib_unicast.packets) +
1919 MLX5_GET_CTR(out, received_eth_multicast.packets) +
1920 MLX5_GET_CTR(out, received_ib_multicast.packets) +
1921 MLX5_GET_CTR(out, received_eth_broadcast.packets);
1923 vf_stats->rx_bytes =
1924 MLX5_GET_CTR(out, received_eth_unicast.octets) +
1925 MLX5_GET_CTR(out, received_ib_unicast.octets) +
1926 MLX5_GET_CTR(out, received_eth_multicast.octets) +
1927 MLX5_GET_CTR(out, received_ib_multicast.octets) +
1928 MLX5_GET_CTR(out, received_eth_broadcast.octets);
1930 vf_stats->tx_packets =
1931 MLX5_GET_CTR(out, transmitted_eth_unicast.packets) +
1932 MLX5_GET_CTR(out, transmitted_ib_unicast.packets) +
1933 MLX5_GET_CTR(out, transmitted_eth_multicast.packets) +
1934 MLX5_GET_CTR(out, transmitted_ib_multicast.packets) +
1935 MLX5_GET_CTR(out, transmitted_eth_broadcast.packets);
1937 vf_stats->tx_bytes =
1938 MLX5_GET_CTR(out, transmitted_eth_unicast.octets) +
1939 MLX5_GET_CTR(out, transmitted_ib_unicast.octets) +
1940 MLX5_GET_CTR(out, transmitted_eth_multicast.octets) +
1941 MLX5_GET_CTR(out, transmitted_ib_multicast.octets) +
1942 MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
1944 vf_stats->multicast =
1945 MLX5_GET_CTR(out, received_eth_multicast.packets) +
1946 MLX5_GET_CTR(out, received_ib_multicast.packets);
1948 vf_stats->broadcast =
1949 MLX5_GET_CTR(out, received_eth_broadcast.packets);