1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * net/dsa/dsa2.c - Hardware switch handling, binding version 2
4 * Copyright (c) 2008-2009 Marvell Semiconductor
5 * Copyright (c) 2013 Florian Fainelli <florian@openwrt.org>
6 * Copyright (c) 2016 Andrew Lunn <andrew@lunn.ch>
9 #include <linux/device.h>
10 #include <linux/err.h>
11 #include <linux/list.h>
12 #include <linux/netdevice.h>
13 #include <linux/slab.h>
14 #include <linux/rtnetlink.h>
16 #include <linux/of_mdio.h>
17 #include <linux/of_net.h>
18 #include <net/devlink.h>
19 #include <net/sch_generic.h>
23 static DEFINE_MUTEX(dsa2_mutex);
24 LIST_HEAD(dsa_tree_list);
26 /* Track the bridges with forwarding offload enabled */
27 static unsigned long dsa_fwd_offloading_bridges;
30 * dsa_tree_notify - Execute code for all switches in a DSA switch tree.
31 * @dst: collection of struct dsa_switch devices to notify.
32 * @e: event, must be of type DSA_NOTIFIER_*
33 * @v: event-specific value.
35 * Given a struct dsa_switch_tree, this can be used to run a function once for
36 * each member DSA switch. The other alternative of traversing the tree is only
37 * through its ports list, which does not uniquely list the switches.
39 int dsa_tree_notify(struct dsa_switch_tree *dst, unsigned long e, void *v)
41 struct raw_notifier_head *nh = &dst->nh;
44 err = raw_notifier_call_chain(nh, e, v);
46 return notifier_to_errno(err);
50 * dsa_broadcast - Notify all DSA trees in the system.
51 * @e: event, must be of type DSA_NOTIFIER_*
52 * @v: event-specific value.
54 * Can be used to notify the switching fabric of events such as cross-chip
55 * bridging between disjoint trees (such as islands of tagger-compatible
56 * switches bridged by an incompatible middle switch).
58 * WARNING: this function is not reliable during probe time, because probing
59 * between trees is asynchronous and not all DSA trees might have probed.
61 int dsa_broadcast(unsigned long e, void *v)
63 struct dsa_switch_tree *dst;
66 list_for_each_entry(dst, &dsa_tree_list, list) {
67 err = dsa_tree_notify(dst, e, v);
76 * dsa_lag_map() - Map LAG structure to a linear LAG array
77 * @dst: Tree in which to record the mapping.
78 * @lag: LAG structure that is to be mapped to the tree's array.
80 * dsa_lag_id/dsa_lag_by_id can then be used to translate between the
81 * two spaces. The size of the mapping space is determined by the
82 * driver by setting ds->num_lag_ids. It is perfectly legal to leave
83 * it unset if it is not needed, in which case these functions become
86 void dsa_lag_map(struct dsa_switch_tree *dst, struct dsa_lag *lag)
90 for (id = 1; id <= dst->lags_len; id++) {
91 if (!dsa_lag_by_id(dst, id)) {
92 dst->lags[id - 1] = lag;
98 /* No IDs left, which is OK. Some drivers do not need it. The
99 * ones that do, e.g. mv88e6xxx, will discover that dsa_lag_id
100 * returns an error for this device when joining the LAG. The
101 * driver can then return -EOPNOTSUPP back to DSA, which will
102 * fall back to a software LAG.
107 * dsa_lag_unmap() - Remove a LAG ID mapping
108 * @dst: Tree in which the mapping is recorded.
109 * @lag: LAG structure that was mapped.
111 * As there may be multiple users of the mapping, it is only removed
112 * if there are no other references to it.
114 void dsa_lag_unmap(struct dsa_switch_tree *dst, struct dsa_lag *lag)
118 dsa_lags_foreach_id(id, dst) {
119 if (dsa_lag_by_id(dst, id) == lag) {
120 dst->lags[id - 1] = NULL;
127 struct dsa_lag *dsa_tree_lag_find(struct dsa_switch_tree *dst,
128 const struct net_device *lag_dev)
132 list_for_each_entry(dp, &dst->ports, list)
133 if (dsa_port_lag_dev_get(dp) == lag_dev)
139 struct dsa_bridge *dsa_tree_bridge_find(struct dsa_switch_tree *dst,
140 const struct net_device *br)
144 list_for_each_entry(dp, &dst->ports, list)
145 if (dsa_port_bridge_dev_get(dp) == br)
151 static int dsa_bridge_num_find(const struct net_device *bridge_dev)
153 struct dsa_switch_tree *dst;
155 list_for_each_entry(dst, &dsa_tree_list, list) {
156 struct dsa_bridge *bridge;
158 bridge = dsa_tree_bridge_find(dst, bridge_dev);
166 unsigned int dsa_bridge_num_get(const struct net_device *bridge_dev, int max)
168 unsigned int bridge_num = dsa_bridge_num_find(bridge_dev);
170 /* Switches without FDB isolation support don't get unique
177 /* First port that requests FDB isolation or TX forwarding
178 * offload for this bridge
180 bridge_num = find_next_zero_bit(&dsa_fwd_offloading_bridges,
181 DSA_MAX_NUM_OFFLOADING_BRIDGES,
183 if (bridge_num >= max)
186 set_bit(bridge_num, &dsa_fwd_offloading_bridges);
192 void dsa_bridge_num_put(const struct net_device *bridge_dev,
193 unsigned int bridge_num)
195 /* Since we refcount bridges, we know that when we call this function
196 * it is no longer in use, so we can just go ahead and remove it from
199 clear_bit(bridge_num, &dsa_fwd_offloading_bridges);
202 struct dsa_switch *dsa_switch_find(int tree_index, int sw_index)
204 struct dsa_switch_tree *dst;
207 list_for_each_entry(dst, &dsa_tree_list, list) {
208 if (dst->index != tree_index)
211 list_for_each_entry(dp, &dst->ports, list) {
212 if (dp->ds->index != sw_index)
221 EXPORT_SYMBOL_GPL(dsa_switch_find);
223 static struct dsa_switch_tree *dsa_tree_find(int index)
225 struct dsa_switch_tree *dst;
227 list_for_each_entry(dst, &dsa_tree_list, list)
228 if (dst->index == index)
234 static struct dsa_switch_tree *dsa_tree_alloc(int index)
236 struct dsa_switch_tree *dst;
238 dst = kzalloc(sizeof(*dst), GFP_KERNEL);
244 INIT_LIST_HEAD(&dst->rtable);
246 INIT_LIST_HEAD(&dst->ports);
248 INIT_LIST_HEAD(&dst->list);
249 list_add_tail(&dst->list, &dsa_tree_list);
251 kref_init(&dst->refcount);
256 static void dsa_tree_free(struct dsa_switch_tree *dst)
259 dsa_tag_driver_put(dst->tag_ops);
260 list_del(&dst->list);
264 static struct dsa_switch_tree *dsa_tree_get(struct dsa_switch_tree *dst)
267 kref_get(&dst->refcount);
272 static struct dsa_switch_tree *dsa_tree_touch(int index)
274 struct dsa_switch_tree *dst;
276 dst = dsa_tree_find(index);
278 return dsa_tree_get(dst);
280 return dsa_tree_alloc(index);
283 static void dsa_tree_release(struct kref *ref)
285 struct dsa_switch_tree *dst;
287 dst = container_of(ref, struct dsa_switch_tree, refcount);
292 static void dsa_tree_put(struct dsa_switch_tree *dst)
295 kref_put(&dst->refcount, dsa_tree_release);
298 static struct dsa_port *dsa_tree_find_port_by_node(struct dsa_switch_tree *dst,
299 struct device_node *dn)
303 list_for_each_entry(dp, &dst->ports, list)
310 static struct dsa_link *dsa_link_touch(struct dsa_port *dp,
311 struct dsa_port *link_dp)
313 struct dsa_switch *ds = dp->ds;
314 struct dsa_switch_tree *dst;
319 list_for_each_entry(dl, &dst->rtable, list)
320 if (dl->dp == dp && dl->link_dp == link_dp)
323 dl = kzalloc(sizeof(*dl), GFP_KERNEL);
328 dl->link_dp = link_dp;
330 INIT_LIST_HEAD(&dl->list);
331 list_add_tail(&dl->list, &dst->rtable);
336 static bool dsa_port_setup_routing_table(struct dsa_port *dp)
338 struct dsa_switch *ds = dp->ds;
339 struct dsa_switch_tree *dst = ds->dst;
340 struct device_node *dn = dp->dn;
341 struct of_phandle_iterator it;
342 struct dsa_port *link_dp;
346 of_for_each_phandle(&it, err, dn, "link", NULL, 0) {
347 link_dp = dsa_tree_find_port_by_node(dst, it.node);
349 of_node_put(it.node);
353 dl = dsa_link_touch(dp, link_dp);
355 of_node_put(it.node);
363 static bool dsa_tree_setup_routing_table(struct dsa_switch_tree *dst)
365 bool complete = true;
368 list_for_each_entry(dp, &dst->ports, list) {
369 if (dsa_port_is_dsa(dp)) {
370 complete = dsa_port_setup_routing_table(dp);
379 static struct dsa_port *dsa_tree_find_first_cpu(struct dsa_switch_tree *dst)
383 list_for_each_entry(dp, &dst->ports, list)
384 if (dsa_port_is_cpu(dp))
390 struct net_device *dsa_tree_find_first_master(struct dsa_switch_tree *dst)
392 struct device_node *ethernet;
393 struct net_device *master;
394 struct dsa_port *cpu_dp;
396 cpu_dp = dsa_tree_find_first_cpu(dst);
397 ethernet = of_parse_phandle(cpu_dp->dn, "ethernet", 0);
398 master = of_find_net_device_by_node(ethernet);
399 of_node_put(ethernet);
404 /* Assign the default CPU port (the first one in the tree) to all ports of the
405 * fabric which don't already have one as part of their own switch.
407 static int dsa_tree_setup_default_cpu(struct dsa_switch_tree *dst)
409 struct dsa_port *cpu_dp, *dp;
411 cpu_dp = dsa_tree_find_first_cpu(dst);
413 pr_err("DSA: tree %d has no CPU port\n", dst->index);
417 list_for_each_entry(dp, &dst->ports, list) {
421 if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
428 /* Perform initial assignment of CPU ports to user ports and DSA links in the
429 * fabric, giving preference to CPU ports local to each switch. Default to
430 * using the first CPU port in the switch tree if the port does not have a CPU
431 * port local to this switch.
433 static int dsa_tree_setup_cpu_ports(struct dsa_switch_tree *dst)
435 struct dsa_port *cpu_dp, *dp;
437 list_for_each_entry(cpu_dp, &dst->ports, list) {
438 if (!dsa_port_is_cpu(cpu_dp))
441 /* Prefer a local CPU port */
442 dsa_switch_for_each_port(dp, cpu_dp->ds) {
443 /* Prefer the first local CPU port found */
447 if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
452 return dsa_tree_setup_default_cpu(dst);
455 static void dsa_tree_teardown_cpu_ports(struct dsa_switch_tree *dst)
459 list_for_each_entry(dp, &dst->ports, list)
460 if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
464 static int dsa_port_devlink_setup(struct dsa_port *dp)
466 struct devlink_port *dlp = &dp->devlink_port;
467 struct dsa_switch_tree *dst = dp->ds->dst;
468 struct devlink_port_attrs attrs = {};
469 struct devlink *dl = dp->ds->devlink;
470 struct dsa_switch *ds = dp->ds;
471 const unsigned char *id;
475 memset(dlp, 0, sizeof(*dlp));
476 devlink_port_init(dl, dlp);
478 if (ds->ops->port_setup) {
479 err = ds->ops->port_setup(ds, dp->index);
484 id = (const unsigned char *)&dst->index;
485 len = sizeof(dst->index);
487 attrs.phys.port_number = dp->index;
488 memcpy(attrs.switch_id.id, id, len);
489 attrs.switch_id.id_len = len;
492 case DSA_PORT_TYPE_UNUSED:
493 attrs.flavour = DEVLINK_PORT_FLAVOUR_UNUSED;
495 case DSA_PORT_TYPE_CPU:
496 attrs.flavour = DEVLINK_PORT_FLAVOUR_CPU;
498 case DSA_PORT_TYPE_DSA:
499 attrs.flavour = DEVLINK_PORT_FLAVOUR_DSA;
501 case DSA_PORT_TYPE_USER:
502 attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
506 devlink_port_attrs_set(dlp, &attrs);
507 err = devlink_port_register(dl, dlp, dp->index);
509 if (ds->ops->port_teardown)
510 ds->ops->port_teardown(ds, dp->index);
517 static void dsa_port_devlink_teardown(struct dsa_port *dp)
519 struct devlink_port *dlp = &dp->devlink_port;
520 struct dsa_switch *ds = dp->ds;
522 devlink_port_unregister(dlp);
524 if (ds->ops->port_teardown)
525 ds->ops->port_teardown(ds, dp->index);
527 devlink_port_fini(dlp);
530 static int dsa_port_setup(struct dsa_port *dp)
532 struct devlink_port *dlp = &dp->devlink_port;
533 bool dsa_port_link_registered = false;
534 struct dsa_switch *ds = dp->ds;
535 bool dsa_port_enabled = false;
541 err = dsa_port_devlink_setup(dp);
546 case DSA_PORT_TYPE_UNUSED:
547 dsa_port_disable(dp);
549 case DSA_PORT_TYPE_CPU:
551 err = dsa_shared_port_link_register_of(dp);
554 dsa_port_link_registered = true;
557 "skipping link registration for CPU port %d\n",
561 err = dsa_port_enable(dp, NULL);
564 dsa_port_enabled = true;
567 case DSA_PORT_TYPE_DSA:
569 err = dsa_shared_port_link_register_of(dp);
572 dsa_port_link_registered = true;
575 "skipping link registration for DSA port %d\n",
579 err = dsa_port_enable(dp, NULL);
582 dsa_port_enabled = true;
585 case DSA_PORT_TYPE_USER:
586 of_get_mac_address(dp->dn, dp->mac);
587 err = dsa_slave_create(dp);
591 devlink_port_type_eth_set(dlp, dp->slave);
595 if (err && dsa_port_enabled)
596 dsa_port_disable(dp);
597 if (err && dsa_port_link_registered)
598 dsa_shared_port_link_unregister_of(dp);
600 dsa_port_devlink_teardown(dp);
609 static void dsa_port_teardown(struct dsa_port *dp)
611 struct devlink_port *dlp = &dp->devlink_port;
616 devlink_port_type_clear(dlp);
619 case DSA_PORT_TYPE_UNUSED:
621 case DSA_PORT_TYPE_CPU:
622 dsa_port_disable(dp);
624 dsa_shared_port_link_unregister_of(dp);
626 case DSA_PORT_TYPE_DSA:
627 dsa_port_disable(dp);
629 dsa_shared_port_link_unregister_of(dp);
631 case DSA_PORT_TYPE_USER:
633 dsa_slave_destroy(dp->slave);
639 dsa_port_devlink_teardown(dp);
644 static int dsa_port_setup_as_unused(struct dsa_port *dp)
646 dp->type = DSA_PORT_TYPE_UNUSED;
647 return dsa_port_setup(dp);
650 static int dsa_devlink_info_get(struct devlink *dl,
651 struct devlink_info_req *req,
652 struct netlink_ext_ack *extack)
654 struct dsa_switch *ds = dsa_devlink_to_ds(dl);
656 if (ds->ops->devlink_info_get)
657 return ds->ops->devlink_info_get(ds, req, extack);
662 static int dsa_devlink_sb_pool_get(struct devlink *dl,
663 unsigned int sb_index, u16 pool_index,
664 struct devlink_sb_pool_info *pool_info)
666 struct dsa_switch *ds = dsa_devlink_to_ds(dl);
668 if (!ds->ops->devlink_sb_pool_get)
671 return ds->ops->devlink_sb_pool_get(ds, sb_index, pool_index,
675 static int dsa_devlink_sb_pool_set(struct devlink *dl, unsigned int sb_index,
676 u16 pool_index, u32 size,
677 enum devlink_sb_threshold_type threshold_type,
678 struct netlink_ext_ack *extack)
680 struct dsa_switch *ds = dsa_devlink_to_ds(dl);
682 if (!ds->ops->devlink_sb_pool_set)
685 return ds->ops->devlink_sb_pool_set(ds, sb_index, pool_index, size,
686 threshold_type, extack);
689 static int dsa_devlink_sb_port_pool_get(struct devlink_port *dlp,
690 unsigned int sb_index, u16 pool_index,
693 struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
694 int port = dsa_devlink_port_to_port(dlp);
696 if (!ds->ops->devlink_sb_port_pool_get)
699 return ds->ops->devlink_sb_port_pool_get(ds, port, sb_index,
700 pool_index, p_threshold);
703 static int dsa_devlink_sb_port_pool_set(struct devlink_port *dlp,
704 unsigned int sb_index, u16 pool_index,
706 struct netlink_ext_ack *extack)
708 struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
709 int port = dsa_devlink_port_to_port(dlp);
711 if (!ds->ops->devlink_sb_port_pool_set)
714 return ds->ops->devlink_sb_port_pool_set(ds, port, sb_index,
715 pool_index, threshold, extack);
719 dsa_devlink_sb_tc_pool_bind_get(struct devlink_port *dlp,
720 unsigned int sb_index, u16 tc_index,
721 enum devlink_sb_pool_type pool_type,
722 u16 *p_pool_index, u32 *p_threshold)
724 struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
725 int port = dsa_devlink_port_to_port(dlp);
727 if (!ds->ops->devlink_sb_tc_pool_bind_get)
730 return ds->ops->devlink_sb_tc_pool_bind_get(ds, port, sb_index,
732 p_pool_index, p_threshold);
736 dsa_devlink_sb_tc_pool_bind_set(struct devlink_port *dlp,
737 unsigned int sb_index, u16 tc_index,
738 enum devlink_sb_pool_type pool_type,
739 u16 pool_index, u32 threshold,
740 struct netlink_ext_ack *extack)
742 struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
743 int port = dsa_devlink_port_to_port(dlp);
745 if (!ds->ops->devlink_sb_tc_pool_bind_set)
748 return ds->ops->devlink_sb_tc_pool_bind_set(ds, port, sb_index,
750 pool_index, threshold,
754 static int dsa_devlink_sb_occ_snapshot(struct devlink *dl,
755 unsigned int sb_index)
757 struct dsa_switch *ds = dsa_devlink_to_ds(dl);
759 if (!ds->ops->devlink_sb_occ_snapshot)
762 return ds->ops->devlink_sb_occ_snapshot(ds, sb_index);
765 static int dsa_devlink_sb_occ_max_clear(struct devlink *dl,
766 unsigned int sb_index)
768 struct dsa_switch *ds = dsa_devlink_to_ds(dl);
770 if (!ds->ops->devlink_sb_occ_max_clear)
773 return ds->ops->devlink_sb_occ_max_clear(ds, sb_index);
776 static int dsa_devlink_sb_occ_port_pool_get(struct devlink_port *dlp,
777 unsigned int sb_index,
778 u16 pool_index, u32 *p_cur,
781 struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
782 int port = dsa_devlink_port_to_port(dlp);
784 if (!ds->ops->devlink_sb_occ_port_pool_get)
787 return ds->ops->devlink_sb_occ_port_pool_get(ds, port, sb_index,
788 pool_index, p_cur, p_max);
792 dsa_devlink_sb_occ_tc_port_bind_get(struct devlink_port *dlp,
793 unsigned int sb_index, u16 tc_index,
794 enum devlink_sb_pool_type pool_type,
795 u32 *p_cur, u32 *p_max)
797 struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
798 int port = dsa_devlink_port_to_port(dlp);
800 if (!ds->ops->devlink_sb_occ_tc_port_bind_get)
803 return ds->ops->devlink_sb_occ_tc_port_bind_get(ds, port,
809 static const struct devlink_ops dsa_devlink_ops = {
810 .info_get = dsa_devlink_info_get,
811 .sb_pool_get = dsa_devlink_sb_pool_get,
812 .sb_pool_set = dsa_devlink_sb_pool_set,
813 .sb_port_pool_get = dsa_devlink_sb_port_pool_get,
814 .sb_port_pool_set = dsa_devlink_sb_port_pool_set,
815 .sb_tc_pool_bind_get = dsa_devlink_sb_tc_pool_bind_get,
816 .sb_tc_pool_bind_set = dsa_devlink_sb_tc_pool_bind_set,
817 .sb_occ_snapshot = dsa_devlink_sb_occ_snapshot,
818 .sb_occ_max_clear = dsa_devlink_sb_occ_max_clear,
819 .sb_occ_port_pool_get = dsa_devlink_sb_occ_port_pool_get,
820 .sb_occ_tc_port_bind_get = dsa_devlink_sb_occ_tc_port_bind_get,
823 static int dsa_switch_setup_tag_protocol(struct dsa_switch *ds)
825 const struct dsa_device_ops *tag_ops = ds->dst->tag_ops;
826 struct dsa_switch_tree *dst = ds->dst;
829 if (tag_ops->proto == dst->default_proto)
833 err = ds->ops->change_tag_protocol(ds, tag_ops->proto);
836 dev_err(ds->dev, "Unable to use tag protocol \"%s\": %pe\n",
837 tag_ops->name, ERR_PTR(err));
842 if (tag_ops->connect) {
843 err = tag_ops->connect(ds);
848 if (ds->ops->connect_tag_protocol) {
849 err = ds->ops->connect_tag_protocol(ds, tag_ops->proto);
852 "Unable to connect to tag protocol \"%s\": %pe\n",
853 tag_ops->name, ERR_PTR(err));
861 if (tag_ops->disconnect)
862 tag_ops->disconnect(ds);
867 static void dsa_switch_teardown_tag_protocol(struct dsa_switch *ds)
869 const struct dsa_device_ops *tag_ops = ds->dst->tag_ops;
871 if (tag_ops->disconnect)
872 tag_ops->disconnect(ds);
875 static int dsa_switch_setup(struct dsa_switch *ds)
877 struct dsa_devlink_priv *dl_priv;
878 struct device_node *dn;
884 /* Initialize ds->phys_mii_mask before registering the slave MDIO bus
885 * driver and before ops->setup() has run, since the switch drivers and
886 * the slave MDIO bus driver rely on these values for probing PHY
889 ds->phys_mii_mask |= dsa_user_ports(ds);
891 /* Add the switch to devlink before calling setup, so that setup can
895 devlink_alloc(&dsa_devlink_ops, sizeof(*dl_priv), ds->dev);
898 dl_priv = devlink_priv(ds->devlink);
901 err = dsa_switch_register_notifier(ds);
905 ds->configure_vlan_while_not_filtering = true;
907 err = ds->ops->setup(ds);
909 goto unregister_notifier;
911 err = dsa_switch_setup_tag_protocol(ds);
915 if (!ds->slave_mii_bus && ds->ops->phy_read) {
916 ds->slave_mii_bus = mdiobus_alloc();
917 if (!ds->slave_mii_bus) {
922 dsa_slave_mii_bus_init(ds);
924 dn = of_get_child_by_name(ds->dev->of_node, "mdio");
926 err = of_mdiobus_register(ds->slave_mii_bus, dn);
929 goto free_slave_mii_bus;
933 devlink_register(ds->devlink);
937 if (ds->slave_mii_bus && ds->ops->phy_read)
938 mdiobus_free(ds->slave_mii_bus);
940 if (ds->ops->teardown)
941 ds->ops->teardown(ds);
943 dsa_switch_unregister_notifier(ds);
945 devlink_free(ds->devlink);
950 static void dsa_switch_teardown(struct dsa_switch *ds)
956 devlink_unregister(ds->devlink);
958 if (ds->slave_mii_bus && ds->ops->phy_read) {
959 mdiobus_unregister(ds->slave_mii_bus);
960 mdiobus_free(ds->slave_mii_bus);
961 ds->slave_mii_bus = NULL;
964 dsa_switch_teardown_tag_protocol(ds);
966 if (ds->ops->teardown)
967 ds->ops->teardown(ds);
969 dsa_switch_unregister_notifier(ds);
972 devlink_free(ds->devlink);
979 /* First tear down the non-shared, then the shared ports. This ensures that
980 * all work items scheduled by our switchdev handlers for user ports have
981 * completed before we destroy the refcounting kept on the shared ports.
983 static void dsa_tree_teardown_ports(struct dsa_switch_tree *dst)
987 list_for_each_entry(dp, &dst->ports, list)
988 if (dsa_port_is_user(dp) || dsa_port_is_unused(dp))
989 dsa_port_teardown(dp);
991 dsa_flush_workqueue();
993 list_for_each_entry(dp, &dst->ports, list)
994 if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp))
995 dsa_port_teardown(dp);
998 static void dsa_tree_teardown_switches(struct dsa_switch_tree *dst)
1000 struct dsa_port *dp;
1002 list_for_each_entry(dp, &dst->ports, list)
1003 dsa_switch_teardown(dp->ds);
1006 /* Bring shared ports up first, then non-shared ports */
1007 static int dsa_tree_setup_ports(struct dsa_switch_tree *dst)
1009 struct dsa_port *dp;
1012 list_for_each_entry(dp, &dst->ports, list) {
1013 if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp)) {
1014 err = dsa_port_setup(dp);
1020 list_for_each_entry(dp, &dst->ports, list) {
1021 if (dsa_port_is_user(dp) || dsa_port_is_unused(dp)) {
1022 err = dsa_port_setup(dp);
1024 err = dsa_port_setup_as_unused(dp);
1034 dsa_tree_teardown_ports(dst);
1039 static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
1041 struct dsa_port *dp;
1044 list_for_each_entry(dp, &dst->ports, list) {
1045 err = dsa_switch_setup(dp->ds);
1047 dsa_tree_teardown_switches(dst);
1055 static int dsa_tree_setup_master(struct dsa_switch_tree *dst)
1057 struct dsa_port *cpu_dp;
1062 dsa_tree_for_each_cpu_port(cpu_dp, dst) {
1063 struct net_device *master = cpu_dp->master;
1064 bool admin_up = (master->flags & IFF_UP) &&
1065 !qdisc_tx_is_noop(master);
1067 err = dsa_master_setup(master, cpu_dp);
1071 /* Replay master state event */
1072 dsa_tree_master_admin_state_change(dst, master, admin_up);
1073 dsa_tree_master_oper_state_change(dst, master,
1074 netif_oper_up(master));
1082 static void dsa_tree_teardown_master(struct dsa_switch_tree *dst)
1084 struct dsa_port *cpu_dp;
1088 dsa_tree_for_each_cpu_port(cpu_dp, dst) {
1089 struct net_device *master = cpu_dp->master;
1091 /* Synthesizing an "admin down" state is sufficient for
1092 * the switches to get a notification if the master is
1093 * currently up and running.
1095 dsa_tree_master_admin_state_change(dst, master, false);
1097 dsa_master_teardown(master);
1103 static int dsa_tree_setup_lags(struct dsa_switch_tree *dst)
1105 unsigned int len = 0;
1106 struct dsa_port *dp;
1108 list_for_each_entry(dp, &dst->ports, list) {
1109 if (dp->ds->num_lag_ids > len)
1110 len = dp->ds->num_lag_ids;
1116 dst->lags = kcalloc(len, sizeof(*dst->lags), GFP_KERNEL);
1120 dst->lags_len = len;
1124 static void dsa_tree_teardown_lags(struct dsa_switch_tree *dst)
1129 static int dsa_tree_setup(struct dsa_switch_tree *dst)
1135 pr_err("DSA: tree %d already setup! Disjoint trees?\n",
1140 complete = dsa_tree_setup_routing_table(dst);
1144 err = dsa_tree_setup_cpu_ports(dst);
1148 err = dsa_tree_setup_switches(dst);
1150 goto teardown_cpu_ports;
1152 err = dsa_tree_setup_ports(dst);
1154 goto teardown_switches;
1156 err = dsa_tree_setup_master(dst);
1158 goto teardown_ports;
1160 err = dsa_tree_setup_lags(dst);
1162 goto teardown_master;
1166 pr_info("DSA: tree %d setup\n", dst->index);
1171 dsa_tree_teardown_master(dst);
1173 dsa_tree_teardown_ports(dst);
1175 dsa_tree_teardown_switches(dst);
1177 dsa_tree_teardown_cpu_ports(dst);
1182 static void dsa_tree_teardown(struct dsa_switch_tree *dst)
1184 struct dsa_link *dl, *next;
1189 dsa_tree_teardown_lags(dst);
1191 dsa_tree_teardown_master(dst);
1193 dsa_tree_teardown_ports(dst);
1195 dsa_tree_teardown_switches(dst);
1197 dsa_tree_teardown_cpu_ports(dst);
1199 list_for_each_entry_safe(dl, next, &dst->rtable, list) {
1200 list_del(&dl->list);
1204 pr_info("DSA: tree %d torn down\n", dst->index);
1209 static int dsa_tree_bind_tag_proto(struct dsa_switch_tree *dst,
1210 const struct dsa_device_ops *tag_ops)
1212 const struct dsa_device_ops *old_tag_ops = dst->tag_ops;
1213 struct dsa_notifier_tag_proto_info info;
1216 dst->tag_ops = tag_ops;
1218 /* Notify the switches from this tree about the connection
1221 info.tag_ops = tag_ops;
1222 err = dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_CONNECT, &info);
1223 if (err && err != -EOPNOTSUPP)
1224 goto out_disconnect;
1226 /* Notify the old tagger about the disconnection from this tree */
1227 info.tag_ops = old_tag_ops;
1228 dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_DISCONNECT, &info);
1233 info.tag_ops = tag_ops;
1234 dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_DISCONNECT, &info);
1235 dst->tag_ops = old_tag_ops;
1240 /* Since the dsa/tagging sysfs device attribute is per master, the assumption
1241 * is that all DSA switches within a tree share the same tagger, otherwise
1242 * they would have formed disjoint trees (different "dsa,member" values).
1244 int dsa_tree_change_tag_proto(struct dsa_switch_tree *dst,
1245 const struct dsa_device_ops *tag_ops,
1246 const struct dsa_device_ops *old_tag_ops)
1248 struct dsa_notifier_tag_proto_info info;
1249 struct dsa_port *dp;
1252 if (!rtnl_trylock())
1253 return restart_syscall();
1255 /* At the moment we don't allow changing the tag protocol under
1256 * traffic. The rtnl_mutex also happens to serialize concurrent
1257 * attempts to change the tagging protocol. If we ever lift the IFF_UP
1258 * restriction, there needs to be another mutex which serializes this.
1260 dsa_tree_for_each_user_port(dp, dst) {
1261 if (dsa_port_to_master(dp)->flags & IFF_UP)
1264 if (dp->slave->flags & IFF_UP)
1268 /* Notify the tag protocol change */
1269 info.tag_ops = tag_ops;
1270 err = dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO, &info);
1272 goto out_unwind_tagger;
1274 err = dsa_tree_bind_tag_proto(dst, tag_ops);
1276 goto out_unwind_tagger;
1283 info.tag_ops = old_tag_ops;
1284 dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO, &info);
1290 static void dsa_tree_master_state_change(struct dsa_switch_tree *dst,
1291 struct net_device *master)
1293 struct dsa_notifier_master_state_info info;
1294 struct dsa_port *cpu_dp = master->dsa_ptr;
1296 info.master = master;
1297 info.operational = dsa_port_master_is_operational(cpu_dp);
1299 dsa_tree_notify(dst, DSA_NOTIFIER_MASTER_STATE_CHANGE, &info);
1302 void dsa_tree_master_admin_state_change(struct dsa_switch_tree *dst,
1303 struct net_device *master,
1306 struct dsa_port *cpu_dp = master->dsa_ptr;
1307 bool notify = false;
1309 /* Don't keep track of admin state on LAG DSA masters,
1310 * but rather just of physical DSA masters
1312 if (netif_is_lag_master(master))
1315 if ((dsa_port_master_is_operational(cpu_dp)) !=
1316 (up && cpu_dp->master_oper_up))
1319 cpu_dp->master_admin_up = up;
1322 dsa_tree_master_state_change(dst, master);
1325 void dsa_tree_master_oper_state_change(struct dsa_switch_tree *dst,
1326 struct net_device *master,
1329 struct dsa_port *cpu_dp = master->dsa_ptr;
1330 bool notify = false;
1332 /* Don't keep track of oper state on LAG DSA masters,
1333 * but rather just of physical DSA masters
1335 if (netif_is_lag_master(master))
1338 if ((dsa_port_master_is_operational(cpu_dp)) !=
1339 (cpu_dp->master_admin_up && up))
1342 cpu_dp->master_oper_up = up;
1345 dsa_tree_master_state_change(dst, master);
1348 static struct dsa_port *dsa_port_touch(struct dsa_switch *ds, int index)
1350 struct dsa_switch_tree *dst = ds->dst;
1351 struct dsa_port *dp;
1353 dsa_switch_for_each_port(dp, ds)
1354 if (dp->index == index)
1357 dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1364 mutex_init(&dp->addr_lists_lock);
1365 mutex_init(&dp->vlans_lock);
1366 INIT_LIST_HEAD(&dp->fdbs);
1367 INIT_LIST_HEAD(&dp->mdbs);
1368 INIT_LIST_HEAD(&dp->vlans);
1369 INIT_LIST_HEAD(&dp->list);
1370 list_add_tail(&dp->list, &dst->ports);
1375 static int dsa_port_parse_user(struct dsa_port *dp, const char *name)
1380 dp->type = DSA_PORT_TYPE_USER;
1386 static int dsa_port_parse_dsa(struct dsa_port *dp)
1388 dp->type = DSA_PORT_TYPE_DSA;
1393 static enum dsa_tag_protocol dsa_get_tag_protocol(struct dsa_port *dp,
1394 struct net_device *master)
1396 enum dsa_tag_protocol tag_protocol = DSA_TAG_PROTO_NONE;
1397 struct dsa_switch *mds, *ds = dp->ds;
1398 unsigned int mdp_upstream;
1399 struct dsa_port *mdp;
1401 /* It is possible to stack DSA switches onto one another when that
1402 * happens the switch driver may want to know if its tagging protocol
1403 * is going to work in such a configuration.
1405 if (dsa_slave_dev_check(master)) {
1406 mdp = dsa_slave_to_port(master);
1408 mdp_upstream = dsa_upstream_port(mds, mdp->index);
1409 tag_protocol = mds->ops->get_tag_protocol(mds, mdp_upstream,
1410 DSA_TAG_PROTO_NONE);
1413 /* If the master device is not itself a DSA slave in a disjoint DSA
1414 * tree, then return immediately.
1416 return ds->ops->get_tag_protocol(ds, dp->index, tag_protocol);
1419 static int dsa_port_parse_cpu(struct dsa_port *dp, struct net_device *master,
1420 const char *user_protocol)
1422 const struct dsa_device_ops *tag_ops = NULL;
1423 struct dsa_switch *ds = dp->ds;
1424 struct dsa_switch_tree *dst = ds->dst;
1425 enum dsa_tag_protocol default_proto;
1427 /* Find out which protocol the switch would prefer. */
1428 default_proto = dsa_get_tag_protocol(dp, master);
1429 if (dst->default_proto) {
1430 if (dst->default_proto != default_proto) {
1432 "A DSA switch tree can have only one tagging protocol\n");
1436 dst->default_proto = default_proto;
1439 /* See if the user wants to override that preference. */
1440 if (user_protocol) {
1441 if (!ds->ops->change_tag_protocol) {
1442 dev_err(ds->dev, "Tag protocol cannot be modified\n");
1446 tag_ops = dsa_find_tagger_by_name(user_protocol);
1447 if (IS_ERR(tag_ops)) {
1449 "Failed to find a tagging driver for protocol %s, using default\n",
1456 tag_ops = dsa_tag_driver_get(default_proto);
1458 if (IS_ERR(tag_ops)) {
1459 if (PTR_ERR(tag_ops) == -ENOPROTOOPT)
1460 return -EPROBE_DEFER;
1462 dev_warn(ds->dev, "No tagger for this switch\n");
1463 return PTR_ERR(tag_ops);
1467 if (dst->tag_ops != tag_ops) {
1469 "A DSA switch tree can have only one tagging protocol\n");
1471 dsa_tag_driver_put(tag_ops);
1475 /* In the case of multiple CPU ports per switch, the tagging
1476 * protocol is still reference-counted only per switch tree.
1478 dsa_tag_driver_put(tag_ops);
1480 dst->tag_ops = tag_ops;
1483 dp->master = master;
1484 dp->type = DSA_PORT_TYPE_CPU;
1485 dsa_port_set_tag_protocol(dp, dst->tag_ops);
1488 /* At this point, the tree may be configured to use a different
1489 * tagger than the one chosen by the switch driver during
1490 * .setup, in the case when a user selects a custom protocol
1493 * This is resolved by syncing the driver with the tree in
1494 * dsa_switch_setup_tag_protocol once .setup has run and the
1495 * driver is ready to accept calls to .change_tag_protocol. If
1496 * the driver does not support the custom protocol at that
1497 * point, the tree is wholly rejected, thereby ensuring that the
1498 * tree and driver are always in agreement on the protocol to
1504 static int dsa_port_parse_of(struct dsa_port *dp, struct device_node *dn)
1506 struct device_node *ethernet = of_parse_phandle(dn, "ethernet", 0);
1507 const char *name = of_get_property(dn, "label", NULL);
1508 bool link = of_property_read_bool(dn, "link");
1513 struct net_device *master;
1514 const char *user_protocol;
1516 master = of_find_net_device_by_node(ethernet);
1517 of_node_put(ethernet);
1519 return -EPROBE_DEFER;
1521 user_protocol = of_get_property(dn, "dsa-tag-protocol", NULL);
1522 return dsa_port_parse_cpu(dp, master, user_protocol);
1526 return dsa_port_parse_dsa(dp);
1528 return dsa_port_parse_user(dp, name);
1531 static int dsa_switch_parse_ports_of(struct dsa_switch *ds,
1532 struct device_node *dn)
1534 struct device_node *ports, *port;
1535 struct dsa_port *dp;
1539 ports = of_get_child_by_name(dn, "ports");
1541 /* The second possibility is "ethernet-ports" */
1542 ports = of_get_child_by_name(dn, "ethernet-ports");
1544 dev_err(ds->dev, "no ports child node found\n");
1549 for_each_available_child_of_node(ports, port) {
1550 err = of_property_read_u32(port, "reg", ®);
1556 if (reg >= ds->num_ports) {
1557 dev_err(ds->dev, "port %pOF index %u exceeds num_ports (%u)\n",
1558 port, reg, ds->num_ports);
1564 dp = dsa_to_port(ds, reg);
1566 err = dsa_port_parse_of(dp, port);
1578 static int dsa_switch_parse_member_of(struct dsa_switch *ds,
1579 struct device_node *dn)
1581 u32 m[2] = { 0, 0 };
1584 /* Don't error out if this optional property isn't found */
1585 sz = of_property_read_variable_u32_array(dn, "dsa,member", m, 2, 2);
1586 if (sz < 0 && sz != -EINVAL)
1591 ds->dst = dsa_tree_touch(m[0]);
1595 if (dsa_switch_find(ds->dst->index, ds->index)) {
1597 "A DSA switch with index %d already exists in tree %d\n",
1598 ds->index, ds->dst->index);
1602 if (ds->dst->last_switch < ds->index)
1603 ds->dst->last_switch = ds->index;
1608 static int dsa_switch_touch_ports(struct dsa_switch *ds)
1610 struct dsa_port *dp;
1613 for (port = 0; port < ds->num_ports; port++) {
1614 dp = dsa_port_touch(ds, port);
1622 static int dsa_switch_parse_of(struct dsa_switch *ds, struct device_node *dn)
1626 err = dsa_switch_parse_member_of(ds, dn);
1630 err = dsa_switch_touch_ports(ds);
1634 return dsa_switch_parse_ports_of(ds, dn);
1637 static int dsa_port_parse(struct dsa_port *dp, const char *name,
1640 if (!strcmp(name, "cpu")) {
1641 struct net_device *master;
1643 master = dsa_dev_to_net_device(dev);
1645 return -EPROBE_DEFER;
1649 return dsa_port_parse_cpu(dp, master, NULL);
1652 if (!strcmp(name, "dsa"))
1653 return dsa_port_parse_dsa(dp);
1655 return dsa_port_parse_user(dp, name);
1658 static int dsa_switch_parse_ports(struct dsa_switch *ds,
1659 struct dsa_chip_data *cd)
1661 bool valid_name_found = false;
1662 struct dsa_port *dp;
1668 for (i = 0; i < DSA_MAX_PORTS; i++) {
1669 name = cd->port_names[i];
1670 dev = cd->netdev[i];
1671 dp = dsa_to_port(ds, i);
1676 err = dsa_port_parse(dp, name, dev);
1680 valid_name_found = true;
1683 if (!valid_name_found && i == DSA_MAX_PORTS)
1689 static int dsa_switch_parse(struct dsa_switch *ds, struct dsa_chip_data *cd)
1695 /* We don't support interconnected switches nor multiple trees via
1696 * platform data, so this is the unique switch of the tree.
1699 ds->dst = dsa_tree_touch(0);
1703 err = dsa_switch_touch_ports(ds);
1707 return dsa_switch_parse_ports(ds, cd);
1710 static void dsa_switch_release_ports(struct dsa_switch *ds)
1712 struct dsa_port *dp, *next;
1714 dsa_switch_for_each_port_safe(dp, next, ds) {
1715 WARN_ON(!list_empty(&dp->fdbs));
1716 WARN_ON(!list_empty(&dp->mdbs));
1717 WARN_ON(!list_empty(&dp->vlans));
1718 list_del(&dp->list);
1723 static int dsa_switch_probe(struct dsa_switch *ds)
1725 struct dsa_switch_tree *dst;
1726 struct dsa_chip_data *pdata;
1727 struct device_node *np;
1733 pdata = ds->dev->platform_data;
1734 np = ds->dev->of_node;
1740 err = dsa_switch_parse_of(ds, np);
1742 dsa_switch_release_ports(ds);
1744 err = dsa_switch_parse(ds, pdata);
1746 dsa_switch_release_ports(ds);
1756 err = dsa_tree_setup(dst);
1758 dsa_switch_release_ports(ds);
1765 int dsa_register_switch(struct dsa_switch *ds)
1769 mutex_lock(&dsa2_mutex);
1770 err = dsa_switch_probe(ds);
1771 dsa_tree_put(ds->dst);
1772 mutex_unlock(&dsa2_mutex);
1776 EXPORT_SYMBOL_GPL(dsa_register_switch);
1778 static void dsa_switch_remove(struct dsa_switch *ds)
1780 struct dsa_switch_tree *dst = ds->dst;
1782 dsa_tree_teardown(dst);
1783 dsa_switch_release_ports(ds);
1787 void dsa_unregister_switch(struct dsa_switch *ds)
1789 mutex_lock(&dsa2_mutex);
1790 dsa_switch_remove(ds);
1791 mutex_unlock(&dsa2_mutex);
1793 EXPORT_SYMBOL_GPL(dsa_unregister_switch);
1795 /* If the DSA master chooses to unregister its net_device on .shutdown, DSA is
1796 * blocking that operation from completion, due to the dev_hold taken inside
1797 * netdev_upper_dev_link. Unlink the DSA slave interfaces from being uppers of
1798 * the DSA master, so that the system can reboot successfully.
1800 void dsa_switch_shutdown(struct dsa_switch *ds)
1802 struct net_device *master, *slave_dev;
1803 struct dsa_port *dp;
1805 mutex_lock(&dsa2_mutex);
1812 dsa_switch_for_each_user_port(dp, ds) {
1813 master = dsa_port_to_master(dp);
1814 slave_dev = dp->slave;
1816 netdev_upper_dev_unlink(master, slave_dev);
1819 /* Disconnect from further netdevice notifiers on the master,
1820 * since netdev_uses_dsa() will now return false.
1822 dsa_switch_for_each_cpu_port(dp, ds)
1823 dp->master->dsa_ptr = NULL;
1827 mutex_unlock(&dsa2_mutex);
1829 EXPORT_SYMBOL_GPL(dsa_switch_shutdown);