2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 #include <linux/kernel.h>
38 #include <linux/types.h>
39 #include <linux/netdevice.h>
40 #include <linux/etherdevice.h>
41 #include <linux/slab.h>
42 #include <linux/device.h>
43 #include <linux/skbuff.h>
44 #include <linux/if_vlan.h>
45 #include <linux/if_bridge.h>
46 #include <linux/workqueue.h>
47 #include <linux/jiffies.h>
48 #include <linux/rtnetlink.h>
49 #include <net/switchdev.h>
55 static u16 mlxsw_sp_port_vid_to_fid_get(struct mlxsw_sp_port *mlxsw_sp_port,
58 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_port);
61 fid = f ? f->fid : fid;
64 fid = mlxsw_sp_port->pvid;
69 static struct mlxsw_sp_port *
70 mlxsw_sp_port_orig_get(struct net_device *dev,
71 struct mlxsw_sp_port *mlxsw_sp_port)
73 struct mlxsw_sp_port *mlxsw_sp_vport;
76 if (!is_vlan_dev(dev))
79 vid = vlan_dev_vlan_id(dev);
80 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
81 WARN_ON(!mlxsw_sp_vport);
83 return mlxsw_sp_vport;
86 static int mlxsw_sp_port_attr_get(struct net_device *dev,
87 struct switchdev_attr *attr)
89 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
90 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
92 mlxsw_sp_port = mlxsw_sp_port_orig_get(attr->orig_dev, mlxsw_sp_port);
97 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
98 attr->u.ppid.id_len = sizeof(mlxsw_sp->base_mac);
99 memcpy(&attr->u.ppid.id, &mlxsw_sp->base_mac,
100 attr->u.ppid.id_len);
102 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
103 attr->u.brport_flags =
104 (mlxsw_sp_port->learning ? BR_LEARNING : 0) |
105 (mlxsw_sp_port->learning_sync ? BR_LEARNING_SYNC : 0) |
106 (mlxsw_sp_port->uc_flood ? BR_FLOOD : 0);
115 static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
118 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
119 enum mlxsw_reg_spms_state spms_state;
125 case BR_STATE_FORWARDING:
126 spms_state = MLXSW_REG_SPMS_STATE_FORWARDING;
128 case BR_STATE_LEARNING:
129 spms_state = MLXSW_REG_SPMS_STATE_LEARNING;
131 case BR_STATE_LISTENING: /* fall-through */
132 case BR_STATE_DISABLED: /* fall-through */
133 case BR_STATE_BLOCKING:
134 spms_state = MLXSW_REG_SPMS_STATE_DISCARDING;
140 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
143 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
145 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
146 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
147 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
149 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID)
150 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
153 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
158 static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
159 struct switchdev_trans *trans,
162 if (switchdev_trans_ph_prepare(trans))
165 mlxsw_sp_port->stp_state = state;
166 return mlxsw_sp_port_stp_state_set(mlxsw_sp_port, state);
169 static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
170 u16 idx_begin, u16 idx_end, bool uc_set,
173 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
174 u16 local_port = mlxsw_sp_port->local_port;
175 enum mlxsw_flood_table_type table_type;
176 u16 range = idx_end - idx_begin + 1;
180 if (mlxsw_sp_port_is_vport(mlxsw_sp_port))
181 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
183 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
185 sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
189 mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, idx_begin,
190 table_type, range, local_port, uc_set);
191 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
195 mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, idx_begin,
196 table_type, range, local_port, bm_set);
197 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
199 goto err_flood_bm_set;
204 mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, idx_begin,
205 table_type, range, local_port, !uc_set);
206 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
212 static int mlxsw_sp_port_uc_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
215 struct net_device *dev = mlxsw_sp_port->dev;
216 u16 vid, last_visited_vid;
219 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
220 u16 fid = mlxsw_sp_vport_fid_get(mlxsw_sp_port)->fid;
221 u16 vfid = mlxsw_sp_fid_to_vfid(fid);
223 return __mlxsw_sp_port_flood_set(mlxsw_sp_port, vfid, vfid,
227 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
228 err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, vid, set,
231 last_visited_vid = vid;
232 goto err_port_flood_set;
239 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
240 __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, vid, !set, true);
241 netdev_err(dev, "Failed to configure unicast flooding\n");
245 int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
250 /* In case of vFIDs, index into the flooding table is relative to
251 * the start of the vFIDs range.
253 vfid = mlxsw_sp_fid_to_vfid(fid);
254 return __mlxsw_sp_port_flood_set(mlxsw_sp_vport, vfid, vfid, set, set);
257 static int mlxsw_sp_port_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
263 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
264 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
266 return __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, vid,
270 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
271 err = __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, vid,
274 goto err_port_vid_learning_set;
279 err_port_vid_learning_set:
280 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID)
281 __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, vid, !set);
285 static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
286 struct switchdev_trans *trans,
287 unsigned long brport_flags)
289 unsigned long learning = mlxsw_sp_port->learning ? BR_LEARNING : 0;
290 unsigned long uc_flood = mlxsw_sp_port->uc_flood ? BR_FLOOD : 0;
293 if (!mlxsw_sp_port->bridged)
296 if (switchdev_trans_ph_prepare(trans))
299 if ((uc_flood ^ brport_flags) & BR_FLOOD) {
300 err = mlxsw_sp_port_uc_flood_set(mlxsw_sp_port,
301 !mlxsw_sp_port->uc_flood);
306 if ((learning ^ brport_flags) & BR_LEARNING) {
307 err = mlxsw_sp_port_learning_set(mlxsw_sp_port,
308 !mlxsw_sp_port->learning);
310 goto err_port_learning_set;
313 mlxsw_sp_port->uc_flood = brport_flags & BR_FLOOD ? 1 : 0;
314 mlxsw_sp_port->learning = brport_flags & BR_LEARNING ? 1 : 0;
315 mlxsw_sp_port->learning_sync = brport_flags & BR_LEARNING_SYNC ? 1 : 0;
319 err_port_learning_set:
320 if ((uc_flood ^ brport_flags) & BR_FLOOD)
321 mlxsw_sp_port_uc_flood_set(mlxsw_sp_port,
322 mlxsw_sp_port->uc_flood);
326 static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time)
328 char sfdat_pl[MLXSW_REG_SFDAT_LEN];
331 mlxsw_reg_sfdat_pack(sfdat_pl, ageing_time);
332 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdat), sfdat_pl);
335 mlxsw_sp->ageing_time = ageing_time;
339 static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port,
340 struct switchdev_trans *trans,
341 unsigned long ageing_clock_t)
343 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
344 unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
345 u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
347 if (switchdev_trans_ph_prepare(trans)) {
348 if (ageing_time < MLXSW_SP_MIN_AGEING_TIME ||
349 ageing_time > MLXSW_SP_MAX_AGEING_TIME)
355 return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time);
358 static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
359 struct switchdev_trans *trans,
360 struct net_device *orig_dev,
363 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
365 /* SWITCHDEV_TRANS_PREPARE phase */
366 if ((!vlan_enabled) && (mlxsw_sp->master_bridge.dev == orig_dev)) {
367 netdev_err(mlxsw_sp_port->dev, "Bridge must be vlan-aware\n");
374 static int mlxsw_sp_port_attr_set(struct net_device *dev,
375 const struct switchdev_attr *attr,
376 struct switchdev_trans *trans)
378 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
381 mlxsw_sp_port = mlxsw_sp_port_orig_get(attr->orig_dev, mlxsw_sp_port);
386 case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
387 err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port, trans,
390 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
391 err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port, trans,
392 attr->u.brport_flags);
394 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
395 err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port, trans,
396 attr->u.ageing_time);
398 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
399 err = mlxsw_sp_port_attr_br_vlan_set(mlxsw_sp_port, trans,
401 attr->u.vlan_filtering);
411 static int mlxsw_sp_fid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create)
413 char sfmr_pl[MLXSW_REG_SFMR_LEN];
415 mlxsw_reg_sfmr_pack(sfmr_pl, !create, fid, fid);
416 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
419 static int mlxsw_sp_fid_map(struct mlxsw_sp *mlxsw_sp, u16 fid, bool valid)
421 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_VID_TO_FID;
422 char svfa_pl[MLXSW_REG_SVFA_LEN];
424 mlxsw_reg_svfa_pack(svfa_pl, 0, mt, valid, fid, fid);
425 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
428 static struct mlxsw_sp_fid *mlxsw_sp_fid_alloc(u16 fid)
430 struct mlxsw_sp_fid *f;
432 f = kzalloc(sizeof(*f), GFP_KERNEL);
441 struct mlxsw_sp_fid *mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid)
443 struct mlxsw_sp_fid *f;
446 err = mlxsw_sp_fid_op(mlxsw_sp, fid, true);
450 /* Although all the ports member in the FID might be using a
451 * {Port, VID} to FID mapping, we create a global VID-to-FID
452 * mapping. This allows a port to transition to VLAN mode,
453 * knowing the global mapping exists.
455 err = mlxsw_sp_fid_map(mlxsw_sp, fid, true);
459 f = mlxsw_sp_fid_alloc(fid);
462 goto err_allocate_fid;
465 list_add(&f->list, &mlxsw_sp->fids);
470 mlxsw_sp_fid_map(mlxsw_sp, fid, false);
472 mlxsw_sp_fid_op(mlxsw_sp, fid, false);
476 void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *f)
483 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r);
487 mlxsw_sp_fid_map(mlxsw_sp, fid, false);
489 mlxsw_sp_fid_op(mlxsw_sp, fid, false);
492 static int __mlxsw_sp_port_fid_join(struct mlxsw_sp_port *mlxsw_sp_port,
495 struct mlxsw_sp_fid *f;
497 if (test_bit(fid, mlxsw_sp_port->active_vlans))
500 f = mlxsw_sp_fid_find(mlxsw_sp_port->mlxsw_sp, fid);
502 f = mlxsw_sp_fid_create(mlxsw_sp_port->mlxsw_sp, fid);
509 netdev_dbg(mlxsw_sp_port->dev, "Joined FID=%d\n", fid);
514 static void __mlxsw_sp_port_fid_leave(struct mlxsw_sp_port *mlxsw_sp_port,
517 struct mlxsw_sp_fid *f;
519 f = mlxsw_sp_fid_find(mlxsw_sp_port->mlxsw_sp, fid);
523 netdev_dbg(mlxsw_sp_port->dev, "Left FID=%d\n", fid);
525 mlxsw_sp_port_fdb_flush(mlxsw_sp_port, fid);
527 if (--f->ref_count == 0)
528 mlxsw_sp_fid_destroy(mlxsw_sp_port->mlxsw_sp, f);
531 static int mlxsw_sp_port_fid_map(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid,
534 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
536 /* If port doesn't have vPorts, then it can use the global
537 * VID-to-FID mapping.
539 if (list_empty(&mlxsw_sp_port->vports_list))
542 return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, valid, fid, fid);
545 static int mlxsw_sp_port_fid_join(struct mlxsw_sp_port *mlxsw_sp_port,
546 u16 fid_begin, u16 fid_end)
550 for (fid = fid_begin; fid <= fid_end; fid++) {
551 err = __mlxsw_sp_port_fid_join(mlxsw_sp_port, fid);
553 goto err_port_fid_join;
556 err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end,
557 mlxsw_sp_port->uc_flood, true);
559 goto err_port_flood_set;
561 for (fid = fid_begin; fid <= fid_end; fid++) {
562 err = mlxsw_sp_port_fid_map(mlxsw_sp_port, fid, true);
564 goto err_port_fid_map;
570 for (fid--; fid >= fid_begin; fid--)
571 mlxsw_sp_port_fid_map(mlxsw_sp_port, fid, false);
572 __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end, false,
577 for (fid--; fid >= fid_begin; fid--)
578 __mlxsw_sp_port_fid_leave(mlxsw_sp_port, fid);
582 static void mlxsw_sp_port_fid_leave(struct mlxsw_sp_port *mlxsw_sp_port,
583 u16 fid_begin, u16 fid_end)
587 for (fid = fid_begin; fid <= fid_end; fid++)
588 mlxsw_sp_port_fid_map(mlxsw_sp_port, fid, false);
590 __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end, false,
593 for (fid = fid_begin; fid <= fid_end; fid++)
594 __mlxsw_sp_port_fid_leave(mlxsw_sp_port, fid);
597 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port,
600 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
601 char spvid_pl[MLXSW_REG_SPVID_LEN];
603 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid);
604 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
607 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port,
610 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
611 char spaft_pl[MLXSW_REG_SPAFT_LEN];
613 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow);
614 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl);
617 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
619 struct net_device *dev = mlxsw_sp_port->dev;
623 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false);
625 netdev_err(dev, "Failed to disallow untagged traffic\n");
629 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid);
631 netdev_err(dev, "Failed to set PVID\n");
635 /* Only allow if not already allowed. */
636 if (!mlxsw_sp_port->pvid) {
637 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port,
640 netdev_err(dev, "Failed to allow untagged traffic\n");
641 goto err_port_allow_untagged_set;
646 mlxsw_sp_port->pvid = vid;
649 err_port_allow_untagged_set:
650 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid);
654 static int __mlxsw_sp_port_vlans_set(struct mlxsw_sp_port *mlxsw_sp_port,
655 u16 vid_begin, u16 vid_end, bool is_member,
661 for (vid = vid_begin; vid <= vid_end;
662 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) {
663 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1),
666 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e,
667 is_member, untagged);
675 static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
676 u16 vid_begin, u16 vid_end,
682 for (vid = vid_begin; vid <= vid_end;
683 vid += MLXSW_REG_SPVMLR_REC_MAX_COUNT) {
684 vid_e = min((u16) (vid + MLXSW_REG_SPVMLR_REC_MAX_COUNT - 1),
687 err = __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid,
688 vid_e, learn_enable);
696 static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
697 u16 vid_begin, u16 vid_end,
698 bool flag_untagged, bool flag_pvid)
700 struct net_device *dev = mlxsw_sp_port->dev;
704 if (!mlxsw_sp_port->bridged)
707 err = mlxsw_sp_port_fid_join(mlxsw_sp_port, vid_begin, vid_end);
709 netdev_err(dev, "Failed to join FIDs\n");
713 err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end,
714 true, flag_untagged);
716 netdev_err(dev, "Unable to add VIDs %d-%d\n", vid_begin,
718 goto err_port_vlans_set;
721 old_pvid = mlxsw_sp_port->pvid;
722 if (flag_pvid && old_pvid != vid_begin) {
723 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid_begin);
725 netdev_err(dev, "Unable to add PVID %d\n", vid_begin);
726 goto err_port_pvid_set;
728 } else if (!flag_pvid && old_pvid >= vid_begin && old_pvid <= vid_end) {
729 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0);
731 netdev_err(dev, "Unable to del PVID\n");
732 goto err_port_pvid_set;
736 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid_begin, vid_end,
737 mlxsw_sp_port->learning);
739 netdev_err(dev, "Failed to set learning for VIDs %d-%d\n",
741 goto err_port_vid_learning_set;
744 /* Changing activity bits only if HW operation succeded */
745 for (vid = vid_begin; vid <= vid_end; vid++) {
746 set_bit(vid, mlxsw_sp_port->active_vlans);
748 set_bit(vid, mlxsw_sp_port->untagged_vlans);
750 clear_bit(vid, mlxsw_sp_port->untagged_vlans);
753 /* STP state change must be done after we set active VLANs */
754 err = mlxsw_sp_port_stp_state_set(mlxsw_sp_port,
755 mlxsw_sp_port->stp_state);
757 netdev_err(dev, "Failed to set STP state\n");
758 goto err_port_stp_state_set;
763 err_port_stp_state_set:
764 for (vid = vid_begin; vid <= vid_end; vid++)
765 clear_bit(vid, mlxsw_sp_port->active_vlans);
766 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid_begin, vid_end,
768 err_port_vid_learning_set:
769 if (old_pvid != mlxsw_sp_port->pvid)
770 mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid);
772 __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, false,
775 mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end);
779 static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
780 const struct switchdev_obj_port_vlan *vlan,
781 struct switchdev_trans *trans)
783 bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
784 bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
786 if (switchdev_trans_ph_prepare(trans))
789 return __mlxsw_sp_port_vlans_add(mlxsw_sp_port,
790 vlan->vid_begin, vlan->vid_end,
791 flag_untagged, flag_pvid);
794 static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic)
796 return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS :
797 MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_MLAG;
800 static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
802 return adding ? MLXSW_REG_SFD_OP_WRITE_EDIT :
803 MLXSW_REG_SFD_OP_WRITE_REMOVE;
806 static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
807 const char *mac, u16 fid, bool adding,
808 enum mlxsw_reg_sfd_rec_action action,
809 enum mlxsw_reg_sfd_rec_policy policy)
815 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
819 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
820 mlxsw_reg_sfd_uc_pack(sfd_pl, 0, policy, mac, fid, action, local_port);
821 num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
822 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
826 if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
834 static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
835 const char *mac, u16 fid, bool adding,
838 return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding,
839 MLXSW_REG_SFD_REC_ACTION_NOP,
840 mlxsw_sp_sfd_rec_policy(dynamic));
843 int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
846 return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding,
847 MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER,
848 MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY);
851 static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
852 const char *mac, u16 fid, u16 lag_vid,
853 bool adding, bool dynamic)
859 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
863 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
864 mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
865 mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP,
867 num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
868 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
872 if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
881 mlxsw_sp_port_fdb_static_add(struct mlxsw_sp_port *mlxsw_sp_port,
882 const struct switchdev_obj_port_fdb *fdb,
883 struct switchdev_trans *trans)
885 u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, fdb->vid);
888 if (switchdev_trans_ph_prepare(trans))
891 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
892 lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
895 if (!mlxsw_sp_port->lagged)
896 return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port->mlxsw_sp,
897 mlxsw_sp_port->local_port,
898 fdb->addr, fid, true, false);
900 return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port->mlxsw_sp,
901 mlxsw_sp_port->lag_id,
902 fdb->addr, fid, lag_vid,
906 static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
907 u16 fid, u16 mid, bool adding)
913 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
917 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
918 mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid,
919 MLXSW_REG_SFD_REC_ACTION_NOP, mid);
920 num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
921 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
925 if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
933 static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mid,
934 bool add, bool clear_all_ports)
936 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
940 smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
944 mlxsw_reg_smid_pack(smid_pl, mid, mlxsw_sp_port->local_port, add);
945 if (clear_all_ports) {
946 for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++)
947 if (mlxsw_sp->ports[i])
948 mlxsw_reg_smid_port_mask_set(smid_pl, i, 1);
950 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
955 static struct mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp *mlxsw_sp,
956 const unsigned char *addr,
959 struct mlxsw_sp_mid *mid;
961 list_for_each_entry(mid, &mlxsw_sp->br_mids.list, list) {
962 if (ether_addr_equal(mid->addr, addr) && mid->fid == fid)
968 static struct mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp,
969 const unsigned char *addr,
972 struct mlxsw_sp_mid *mid;
975 mid_idx = find_first_zero_bit(mlxsw_sp->br_mids.mapped,
977 if (mid_idx == MLXSW_SP_MID_MAX)
980 mid = kzalloc(sizeof(*mid), GFP_KERNEL);
984 set_bit(mid_idx, mlxsw_sp->br_mids.mapped);
985 ether_addr_copy(mid->addr, addr);
989 list_add_tail(&mid->list, &mlxsw_sp->br_mids.list);
994 static int __mlxsw_sp_mc_dec_ref(struct mlxsw_sp *mlxsw_sp,
995 struct mlxsw_sp_mid *mid)
997 if (--mid->ref_count == 0) {
998 list_del(&mid->list);
999 clear_bit(mid->mid, mlxsw_sp->br_mids.mapped);
1006 static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
1007 const struct switchdev_obj_port_mdb *mdb,
1008 struct switchdev_trans *trans)
1010 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1011 struct net_device *dev = mlxsw_sp_port->dev;
1012 struct mlxsw_sp_mid *mid;
1013 u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, mdb->vid);
1016 if (switchdev_trans_ph_prepare(trans))
1019 mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, fid);
1021 mid = __mlxsw_sp_mc_alloc(mlxsw_sp, mdb->addr, fid);
1023 netdev_err(dev, "Unable to allocate MC group\n");
1029 err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, true,
1030 mid->ref_count == 1);
1032 netdev_err(dev, "Unable to set SMID\n");
1036 if (mid->ref_count == 1) {
1037 err = mlxsw_sp_port_mdb_op(mlxsw_sp, mdb->addr, fid, mid->mid,
1040 netdev_err(dev, "Unable to set MC SFD\n");
1048 __mlxsw_sp_mc_dec_ref(mlxsw_sp, mid);
1052 static int mlxsw_sp_port_obj_add(struct net_device *dev,
1053 const struct switchdev_obj *obj,
1054 struct switchdev_trans *trans)
1056 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1059 mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port);
1064 case SWITCHDEV_OBJ_ID_PORT_VLAN:
1065 if (mlxsw_sp_port_is_vport(mlxsw_sp_port))
1068 err = mlxsw_sp_port_vlans_add(mlxsw_sp_port,
1069 SWITCHDEV_OBJ_PORT_VLAN(obj),
1072 case SWITCHDEV_OBJ_ID_PORT_FDB:
1073 err = mlxsw_sp_port_fdb_static_add(mlxsw_sp_port,
1074 SWITCHDEV_OBJ_PORT_FDB(obj),
1077 case SWITCHDEV_OBJ_ID_PORT_MDB:
1078 err = mlxsw_sp_port_mdb_add(mlxsw_sp_port,
1079 SWITCHDEV_OBJ_PORT_MDB(obj),
1090 static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
1091 u16 vid_begin, u16 vid_end)
1095 if (!mlxsw_sp_port->bridged)
1098 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid_begin, vid_end,
1101 pvid = mlxsw_sp_port->pvid;
1102 if (pvid >= vid_begin && pvid <= vid_end)
1103 mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0);
1105 __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, false,
1108 mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end);
1110 /* Changing activity bits only if HW operation succeded */
1111 for (vid = vid_begin; vid <= vid_end; vid++)
1112 clear_bit(vid, mlxsw_sp_port->active_vlans);
1117 static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
1118 const struct switchdev_obj_port_vlan *vlan)
1120 return __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vlan->vid_begin,
1124 void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port)
1128 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID)
1129 __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vid, vid);
1133 mlxsw_sp_port_fdb_static_del(struct mlxsw_sp_port *mlxsw_sp_port,
1134 const struct switchdev_obj_port_fdb *fdb)
1136 u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, fdb->vid);
1139 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
1140 lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
1143 if (!mlxsw_sp_port->lagged)
1144 return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port->mlxsw_sp,
1145 mlxsw_sp_port->local_port,
1149 return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port->mlxsw_sp,
1150 mlxsw_sp_port->lag_id,
1151 fdb->addr, fid, lag_vid,
1155 static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
1156 const struct switchdev_obj_port_mdb *mdb)
1158 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1159 struct net_device *dev = mlxsw_sp_port->dev;
1160 struct mlxsw_sp_mid *mid;
1161 u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, mdb->vid);
1165 mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, fid);
1167 netdev_err(dev, "Unable to remove port from MC DB\n");
1171 err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false, false);
1173 netdev_err(dev, "Unable to remove port from SMID\n");
1176 if (__mlxsw_sp_mc_dec_ref(mlxsw_sp, mid)) {
1177 err = mlxsw_sp_port_mdb_op(mlxsw_sp, mdb->addr, fid, mid_idx,
1180 netdev_err(dev, "Unable to remove MC SFD\n");
1186 static int mlxsw_sp_port_obj_del(struct net_device *dev,
1187 const struct switchdev_obj *obj)
1189 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1192 mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port);
1197 case SWITCHDEV_OBJ_ID_PORT_VLAN:
1198 if (mlxsw_sp_port_is_vport(mlxsw_sp_port))
1201 err = mlxsw_sp_port_vlans_del(mlxsw_sp_port,
1202 SWITCHDEV_OBJ_PORT_VLAN(obj));
1204 case SWITCHDEV_OBJ_ID_PORT_FDB:
1205 err = mlxsw_sp_port_fdb_static_del(mlxsw_sp_port,
1206 SWITCHDEV_OBJ_PORT_FDB(obj));
1208 case SWITCHDEV_OBJ_ID_PORT_MDB:
1209 err = mlxsw_sp_port_mdb_del(mlxsw_sp_port,
1210 SWITCHDEV_OBJ_PORT_MDB(obj));
1220 static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp,
1223 struct mlxsw_sp_port *mlxsw_sp_port;
1224 struct mlxsw_resources *resources;
1227 resources = mlxsw_core_resources_get(mlxsw_sp->core);
1228 for (i = 0; i < resources->max_ports_in_lag; i++) {
1229 mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
1231 return mlxsw_sp_port;
1236 static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
1237 struct switchdev_obj_port_fdb *fdb,
1238 switchdev_obj_dump_cb_t *cb,
1239 struct net_device *orig_dev)
1241 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1242 struct mlxsw_sp_port *tmp;
1243 struct mlxsw_sp_fid *f;
1255 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1259 f = mlxsw_sp_vport_fid_get(mlxsw_sp_port);
1260 vport_fid = f ? f->fid : 0;
1262 mlxsw_reg_sfd_pack(sfd_pl, MLXSW_REG_SFD_OP_QUERY_DUMP, 0);
1264 mlxsw_reg_sfd_num_rec_set(sfd_pl, MLXSW_REG_SFD_REC_MAX_COUNT);
1265 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1269 num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1271 /* Even in case of error, we have to run the dump to the end
1272 * so the session in firmware is finished.
1277 for (i = 0; i < num_rec; i++) {
1278 switch (mlxsw_reg_sfd_rec_type_get(sfd_pl, i)) {
1279 case MLXSW_REG_SFD_REC_TYPE_UNICAST:
1280 mlxsw_reg_sfd_uc_unpack(sfd_pl, i, mac, &fid,
1282 if (local_port == mlxsw_sp_port->local_port) {
1283 if (vport_fid && vport_fid == fid)
1285 else if (!vport_fid &&
1286 !mlxsw_sp_fid_is_vfid(fid))
1290 ether_addr_copy(fdb->addr, mac);
1291 fdb->ndm_state = NUD_REACHABLE;
1292 err = cb(&fdb->obj);
1297 case MLXSW_REG_SFD_REC_TYPE_UNICAST_LAG:
1298 mlxsw_reg_sfd_uc_lag_unpack(sfd_pl, i,
1299 mac, &fid, &lag_id);
1300 tmp = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id);
1301 if (tmp && tmp->local_port ==
1302 mlxsw_sp_port->local_port) {
1303 /* LAG records can only point to LAG
1304 * devices or VLAN devices on top.
1306 if (!netif_is_lag_master(orig_dev) &&
1307 !is_vlan_dev(orig_dev))
1309 if (vport_fid && vport_fid == fid)
1311 else if (!vport_fid &&
1312 !mlxsw_sp_fid_is_vfid(fid))
1316 ether_addr_copy(fdb->addr, mac);
1317 fdb->ndm_state = NUD_REACHABLE;
1318 err = cb(&fdb->obj);
1325 } while (num_rec == MLXSW_REG_SFD_REC_MAX_COUNT);
1329 return stored_err ? stored_err : err;
1332 static int mlxsw_sp_port_vlan_dump(struct mlxsw_sp_port *mlxsw_sp_port,
1333 struct switchdev_obj_port_vlan *vlan,
1334 switchdev_obj_dump_cb_t *cb)
1339 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
1341 vlan->vid_begin = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
1342 vlan->vid_end = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
1343 return cb(&vlan->obj);
1346 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
1348 if (vid == mlxsw_sp_port->pvid)
1349 vlan->flags |= BRIDGE_VLAN_INFO_PVID;
1350 if (test_bit(vid, mlxsw_sp_port->untagged_vlans))
1351 vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
1352 vlan->vid_begin = vid;
1353 vlan->vid_end = vid;
1354 err = cb(&vlan->obj);
1361 static int mlxsw_sp_port_obj_dump(struct net_device *dev,
1362 struct switchdev_obj *obj,
1363 switchdev_obj_dump_cb_t *cb)
1365 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1368 mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port);
1373 case SWITCHDEV_OBJ_ID_PORT_VLAN:
1374 err = mlxsw_sp_port_vlan_dump(mlxsw_sp_port,
1375 SWITCHDEV_OBJ_PORT_VLAN(obj), cb);
1377 case SWITCHDEV_OBJ_ID_PORT_FDB:
1378 err = mlxsw_sp_port_fdb_dump(mlxsw_sp_port,
1379 SWITCHDEV_OBJ_PORT_FDB(obj), cb,
1390 static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = {
1391 .switchdev_port_attr_get = mlxsw_sp_port_attr_get,
1392 .switchdev_port_attr_set = mlxsw_sp_port_attr_set,
1393 .switchdev_port_obj_add = mlxsw_sp_port_obj_add,
1394 .switchdev_port_obj_del = mlxsw_sp_port_obj_del,
1395 .switchdev_port_obj_dump = mlxsw_sp_port_obj_dump,
1398 static void mlxsw_sp_fdb_call_notifiers(bool learning_sync, bool adding,
1400 struct net_device *dev)
1402 struct switchdev_notifier_fdb_info info;
1403 unsigned long notifier_type;
1405 if (learning_sync) {
1408 notifier_type = adding ? SWITCHDEV_FDB_ADD : SWITCHDEV_FDB_DEL;
1409 call_switchdev_notifiers(notifier_type, dev, &info.info);
1413 static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
1414 char *sfn_pl, int rec_index,
1417 struct mlxsw_sp_port *mlxsw_sp_port;
1421 bool do_notification = true;
1424 mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &fid, &local_port);
1425 mlxsw_sp_port = mlxsw_sp->ports[local_port];
1426 if (!mlxsw_sp_port) {
1427 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect local port in FDB notification\n");
1431 if (mlxsw_sp_fid_is_vfid(fid)) {
1432 struct mlxsw_sp_port *mlxsw_sp_vport;
1434 mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_fid(mlxsw_sp_port,
1436 if (!mlxsw_sp_vport) {
1437 netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n");
1441 /* Override the physical port with the vPort. */
1442 mlxsw_sp_port = mlxsw_sp_vport;
1448 err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid,
1451 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
1455 if (!do_notification)
1457 mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning_sync,
1458 adding, mac, vid, mlxsw_sp_port->dev);
1463 do_notification = false;
1467 static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
1468 char *sfn_pl, int rec_index,
1471 struct mlxsw_sp_port *mlxsw_sp_port;
1472 struct net_device *dev;
1477 bool do_notification = true;
1480 mlxsw_reg_sfn_mac_lag_unpack(sfn_pl, rec_index, mac, &fid, &lag_id);
1481 mlxsw_sp_port = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id);
1482 if (!mlxsw_sp_port) {
1483 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Cannot find port representor for LAG\n");
1487 if (mlxsw_sp_fid_is_vfid(fid)) {
1488 struct mlxsw_sp_port *mlxsw_sp_vport;
1490 mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_fid(mlxsw_sp_port,
1492 if (!mlxsw_sp_vport) {
1493 netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n");
1497 lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
1498 dev = mlxsw_sp_vport->dev;
1500 /* Override the physical port with the vPort. */
1501 mlxsw_sp_port = mlxsw_sp_vport;
1503 dev = mlxsw_sp_lag_get(mlxsw_sp, lag_id)->dev;
1508 err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid,
1511 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
1515 if (!do_notification)
1517 mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning_sync, adding, mac,
1523 do_notification = false;
1527 static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp,
1528 char *sfn_pl, int rec_index)
1530 switch (mlxsw_reg_sfn_rec_type_get(sfn_pl, rec_index)) {
1531 case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC:
1532 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
1535 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC:
1536 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
1539 case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC_LAG:
1540 mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
1543 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG:
1544 mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
1550 static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp)
1552 mlxsw_core_schedule_dw(&mlxsw_sp->fdb_notify.dw,
1553 msecs_to_jiffies(mlxsw_sp->fdb_notify.interval));
1556 static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
1558 struct mlxsw_sp *mlxsw_sp;
1564 sfn_pl = kmalloc(MLXSW_REG_SFN_LEN, GFP_KERNEL);
1568 mlxsw_sp = container_of(work, struct mlxsw_sp, fdb_notify.dw.work);
1571 mlxsw_reg_sfn_pack(sfn_pl);
1572 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
1574 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n");
1577 num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl);
1578 for (i = 0; i < num_rec; i++)
1579 mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
1584 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
1587 static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
1591 err = mlxsw_sp_ageing_set(mlxsw_sp, MLXSW_SP_DEFAULT_AGEING_TIME);
1593 dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n");
1596 INIT_DELAYED_WORK(&mlxsw_sp->fdb_notify.dw, mlxsw_sp_fdb_notify_work);
1597 mlxsw_sp->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL;
1598 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
1602 static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
1604 cancel_delayed_work_sync(&mlxsw_sp->fdb_notify.dw);
1607 int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
1609 return mlxsw_sp_fdb_init(mlxsw_sp);
1612 void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
1614 mlxsw_sp_fdb_fini(mlxsw_sp);
1617 void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port)
1619 mlxsw_sp_port->dev->switchdev_ops = &mlxsw_sp_port_switchdev_ops;
1622 void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port)