GNU Linux-libre 6.1.90-gnu
[releases.git] / net / dsa / dsa2.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/dsa/dsa2.c - Hardware switch handling, binding version 2
4  * Copyright (c) 2008-2009 Marvell Semiconductor
5  * Copyright (c) 2013 Florian Fainelli <florian@openwrt.org>
6  * Copyright (c) 2016 Andrew Lunn <andrew@lunn.ch>
7  */
8
9 #include <linux/device.h>
10 #include <linux/err.h>
11 #include <linux/list.h>
12 #include <linux/netdevice.h>
13 #include <linux/slab.h>
14 #include <linux/rtnetlink.h>
15 #include <linux/of.h>
16 #include <linux/of_mdio.h>
17 #include <linux/of_net.h>
18 #include <net/devlink.h>
19 #include <net/sch_generic.h>
20
21 #include "dsa_priv.h"
22
23 static DEFINE_MUTEX(dsa2_mutex);
24 LIST_HEAD(dsa_tree_list);
25
26 /* Track the bridges with forwarding offload enabled */
27 static unsigned long dsa_fwd_offloading_bridges;
28
29 /**
30  * dsa_tree_notify - Execute code for all switches in a DSA switch tree.
31  * @dst: collection of struct dsa_switch devices to notify.
32  * @e: event, must be of type DSA_NOTIFIER_*
33  * @v: event-specific value.
34  *
35  * Given a struct dsa_switch_tree, this can be used to run a function once for
36  * each member DSA switch. The other alternative of traversing the tree is only
37  * through its ports list, which does not uniquely list the switches.
38  */
39 int dsa_tree_notify(struct dsa_switch_tree *dst, unsigned long e, void *v)
40 {
41         struct raw_notifier_head *nh = &dst->nh;
42         int err;
43
44         err = raw_notifier_call_chain(nh, e, v);
45
46         return notifier_to_errno(err);
47 }
48
49 /**
50  * dsa_broadcast - Notify all DSA trees in the system.
51  * @e: event, must be of type DSA_NOTIFIER_*
52  * @v: event-specific value.
53  *
54  * Can be used to notify the switching fabric of events such as cross-chip
55  * bridging between disjoint trees (such as islands of tagger-compatible
56  * switches bridged by an incompatible middle switch).
57  *
58  * WARNING: this function is not reliable during probe time, because probing
59  * between trees is asynchronous and not all DSA trees might have probed.
60  */
61 int dsa_broadcast(unsigned long e, void *v)
62 {
63         struct dsa_switch_tree *dst;
64         int err = 0;
65
66         list_for_each_entry(dst, &dsa_tree_list, list) {
67                 err = dsa_tree_notify(dst, e, v);
68                 if (err)
69                         break;
70         }
71
72         return err;
73 }
74
75 /**
76  * dsa_lag_map() - Map LAG structure to a linear LAG array
77  * @dst: Tree in which to record the mapping.
78  * @lag: LAG structure that is to be mapped to the tree's array.
79  *
80  * dsa_lag_id/dsa_lag_by_id can then be used to translate between the
81  * two spaces. The size of the mapping space is determined by the
82  * driver by setting ds->num_lag_ids. It is perfectly legal to leave
83  * it unset if it is not needed, in which case these functions become
84  * no-ops.
85  */
86 void dsa_lag_map(struct dsa_switch_tree *dst, struct dsa_lag *lag)
87 {
88         unsigned int id;
89
90         for (id = 1; id <= dst->lags_len; id++) {
91                 if (!dsa_lag_by_id(dst, id)) {
92                         dst->lags[id - 1] = lag;
93                         lag->id = id;
94                         return;
95                 }
96         }
97
98         /* No IDs left, which is OK. Some drivers do not need it. The
99          * ones that do, e.g. mv88e6xxx, will discover that dsa_lag_id
100          * returns an error for this device when joining the LAG. The
101          * driver can then return -EOPNOTSUPP back to DSA, which will
102          * fall back to a software LAG.
103          */
104 }
105
106 /**
107  * dsa_lag_unmap() - Remove a LAG ID mapping
108  * @dst: Tree in which the mapping is recorded.
109  * @lag: LAG structure that was mapped.
110  *
111  * As there may be multiple users of the mapping, it is only removed
112  * if there are no other references to it.
113  */
114 void dsa_lag_unmap(struct dsa_switch_tree *dst, struct dsa_lag *lag)
115 {
116         unsigned int id;
117
118         dsa_lags_foreach_id(id, dst) {
119                 if (dsa_lag_by_id(dst, id) == lag) {
120                         dst->lags[id - 1] = NULL;
121                         lag->id = 0;
122                         break;
123                 }
124         }
125 }
126
127 struct dsa_lag *dsa_tree_lag_find(struct dsa_switch_tree *dst,
128                                   const struct net_device *lag_dev)
129 {
130         struct dsa_port *dp;
131
132         list_for_each_entry(dp, &dst->ports, list)
133                 if (dsa_port_lag_dev_get(dp) == lag_dev)
134                         return dp->lag;
135
136         return NULL;
137 }
138
139 struct dsa_bridge *dsa_tree_bridge_find(struct dsa_switch_tree *dst,
140                                         const struct net_device *br)
141 {
142         struct dsa_port *dp;
143
144         list_for_each_entry(dp, &dst->ports, list)
145                 if (dsa_port_bridge_dev_get(dp) == br)
146                         return dp->bridge;
147
148         return NULL;
149 }
150
151 static int dsa_bridge_num_find(const struct net_device *bridge_dev)
152 {
153         struct dsa_switch_tree *dst;
154
155         list_for_each_entry(dst, &dsa_tree_list, list) {
156                 struct dsa_bridge *bridge;
157
158                 bridge = dsa_tree_bridge_find(dst, bridge_dev);
159                 if (bridge)
160                         return bridge->num;
161         }
162
163         return 0;
164 }
165
166 unsigned int dsa_bridge_num_get(const struct net_device *bridge_dev, int max)
167 {
168         unsigned int bridge_num = dsa_bridge_num_find(bridge_dev);
169
170         /* Switches without FDB isolation support don't get unique
171          * bridge numbering
172          */
173         if (!max)
174                 return 0;
175
176         if (!bridge_num) {
177                 /* First port that requests FDB isolation or TX forwarding
178                  * offload for this bridge
179                  */
180                 bridge_num = find_next_zero_bit(&dsa_fwd_offloading_bridges,
181                                                 DSA_MAX_NUM_OFFLOADING_BRIDGES,
182                                                 1);
183                 if (bridge_num >= max)
184                         return 0;
185
186                 set_bit(bridge_num, &dsa_fwd_offloading_bridges);
187         }
188
189         return bridge_num;
190 }
191
192 void dsa_bridge_num_put(const struct net_device *bridge_dev,
193                         unsigned int bridge_num)
194 {
195         /* Since we refcount bridges, we know that when we call this function
196          * it is no longer in use, so we can just go ahead and remove it from
197          * the bit mask.
198          */
199         clear_bit(bridge_num, &dsa_fwd_offloading_bridges);
200 }
201
202 struct dsa_switch *dsa_switch_find(int tree_index, int sw_index)
203 {
204         struct dsa_switch_tree *dst;
205         struct dsa_port *dp;
206
207         list_for_each_entry(dst, &dsa_tree_list, list) {
208                 if (dst->index != tree_index)
209                         continue;
210
211                 list_for_each_entry(dp, &dst->ports, list) {
212                         if (dp->ds->index != sw_index)
213                                 continue;
214
215                         return dp->ds;
216                 }
217         }
218
219         return NULL;
220 }
221 EXPORT_SYMBOL_GPL(dsa_switch_find);
222
223 static struct dsa_switch_tree *dsa_tree_find(int index)
224 {
225         struct dsa_switch_tree *dst;
226
227         list_for_each_entry(dst, &dsa_tree_list, list)
228                 if (dst->index == index)
229                         return dst;
230
231         return NULL;
232 }
233
234 static struct dsa_switch_tree *dsa_tree_alloc(int index)
235 {
236         struct dsa_switch_tree *dst;
237
238         dst = kzalloc(sizeof(*dst), GFP_KERNEL);
239         if (!dst)
240                 return NULL;
241
242         dst->index = index;
243
244         INIT_LIST_HEAD(&dst->rtable);
245
246         INIT_LIST_HEAD(&dst->ports);
247
248         INIT_LIST_HEAD(&dst->list);
249         list_add_tail(&dst->list, &dsa_tree_list);
250
251         kref_init(&dst->refcount);
252
253         return dst;
254 }
255
256 static void dsa_tree_free(struct dsa_switch_tree *dst)
257 {
258         if (dst->tag_ops)
259                 dsa_tag_driver_put(dst->tag_ops);
260         list_del(&dst->list);
261         kfree(dst);
262 }
263
264 static struct dsa_switch_tree *dsa_tree_get(struct dsa_switch_tree *dst)
265 {
266         if (dst)
267                 kref_get(&dst->refcount);
268
269         return dst;
270 }
271
272 static struct dsa_switch_tree *dsa_tree_touch(int index)
273 {
274         struct dsa_switch_tree *dst;
275
276         dst = dsa_tree_find(index);
277         if (dst)
278                 return dsa_tree_get(dst);
279         else
280                 return dsa_tree_alloc(index);
281 }
282
283 static void dsa_tree_release(struct kref *ref)
284 {
285         struct dsa_switch_tree *dst;
286
287         dst = container_of(ref, struct dsa_switch_tree, refcount);
288
289         dsa_tree_free(dst);
290 }
291
292 static void dsa_tree_put(struct dsa_switch_tree *dst)
293 {
294         if (dst)
295                 kref_put(&dst->refcount, dsa_tree_release);
296 }
297
298 static struct dsa_port *dsa_tree_find_port_by_node(struct dsa_switch_tree *dst,
299                                                    struct device_node *dn)
300 {
301         struct dsa_port *dp;
302
303         list_for_each_entry(dp, &dst->ports, list)
304                 if (dp->dn == dn)
305                         return dp;
306
307         return NULL;
308 }
309
310 static struct dsa_link *dsa_link_touch(struct dsa_port *dp,
311                                        struct dsa_port *link_dp)
312 {
313         struct dsa_switch *ds = dp->ds;
314         struct dsa_switch_tree *dst;
315         struct dsa_link *dl;
316
317         dst = ds->dst;
318
319         list_for_each_entry(dl, &dst->rtable, list)
320                 if (dl->dp == dp && dl->link_dp == link_dp)
321                         return dl;
322
323         dl = kzalloc(sizeof(*dl), GFP_KERNEL);
324         if (!dl)
325                 return NULL;
326
327         dl->dp = dp;
328         dl->link_dp = link_dp;
329
330         INIT_LIST_HEAD(&dl->list);
331         list_add_tail(&dl->list, &dst->rtable);
332
333         return dl;
334 }
335
336 static bool dsa_port_setup_routing_table(struct dsa_port *dp)
337 {
338         struct dsa_switch *ds = dp->ds;
339         struct dsa_switch_tree *dst = ds->dst;
340         struct device_node *dn = dp->dn;
341         struct of_phandle_iterator it;
342         struct dsa_port *link_dp;
343         struct dsa_link *dl;
344         int err;
345
346         of_for_each_phandle(&it, err, dn, "link", NULL, 0) {
347                 link_dp = dsa_tree_find_port_by_node(dst, it.node);
348                 if (!link_dp) {
349                         of_node_put(it.node);
350                         return false;
351                 }
352
353                 dl = dsa_link_touch(dp, link_dp);
354                 if (!dl) {
355                         of_node_put(it.node);
356                         return false;
357                 }
358         }
359
360         return true;
361 }
362
363 static bool dsa_tree_setup_routing_table(struct dsa_switch_tree *dst)
364 {
365         bool complete = true;
366         struct dsa_port *dp;
367
368         list_for_each_entry(dp, &dst->ports, list) {
369                 if (dsa_port_is_dsa(dp)) {
370                         complete = dsa_port_setup_routing_table(dp);
371                         if (!complete)
372                                 break;
373                 }
374         }
375
376         return complete;
377 }
378
379 static struct dsa_port *dsa_tree_find_first_cpu(struct dsa_switch_tree *dst)
380 {
381         struct dsa_port *dp;
382
383         list_for_each_entry(dp, &dst->ports, list)
384                 if (dsa_port_is_cpu(dp))
385                         return dp;
386
387         return NULL;
388 }
389
390 struct net_device *dsa_tree_find_first_master(struct dsa_switch_tree *dst)
391 {
392         struct device_node *ethernet;
393         struct net_device *master;
394         struct dsa_port *cpu_dp;
395
396         cpu_dp = dsa_tree_find_first_cpu(dst);
397         ethernet = of_parse_phandle(cpu_dp->dn, "ethernet", 0);
398         master = of_find_net_device_by_node(ethernet);
399         of_node_put(ethernet);
400
401         return master;
402 }
403
404 /* Assign the default CPU port (the first one in the tree) to all ports of the
405  * fabric which don't already have one as part of their own switch.
406  */
407 static int dsa_tree_setup_default_cpu(struct dsa_switch_tree *dst)
408 {
409         struct dsa_port *cpu_dp, *dp;
410
411         cpu_dp = dsa_tree_find_first_cpu(dst);
412         if (!cpu_dp) {
413                 pr_err("DSA: tree %d has no CPU port\n", dst->index);
414                 return -EINVAL;
415         }
416
417         list_for_each_entry(dp, &dst->ports, list) {
418                 if (dp->cpu_dp)
419                         continue;
420
421                 if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
422                         dp->cpu_dp = cpu_dp;
423         }
424
425         return 0;
426 }
427
428 static struct dsa_port *
429 dsa_switch_preferred_default_local_cpu_port(struct dsa_switch *ds)
430 {
431         struct dsa_port *cpu_dp;
432
433         if (!ds->ops->preferred_default_local_cpu_port)
434                 return NULL;
435
436         cpu_dp = ds->ops->preferred_default_local_cpu_port(ds);
437         if (!cpu_dp)
438                 return NULL;
439
440         if (WARN_ON(!dsa_port_is_cpu(cpu_dp) || cpu_dp->ds != ds))
441                 return NULL;
442
443         return cpu_dp;
444 }
445
446 /* Perform initial assignment of CPU ports to user ports and DSA links in the
447  * fabric, giving preference to CPU ports local to each switch. Default to
448  * using the first CPU port in the switch tree if the port does not have a CPU
449  * port local to this switch.
450  */
451 static int dsa_tree_setup_cpu_ports(struct dsa_switch_tree *dst)
452 {
453         struct dsa_port *preferred_cpu_dp, *cpu_dp, *dp;
454
455         list_for_each_entry(cpu_dp, &dst->ports, list) {
456                 if (!dsa_port_is_cpu(cpu_dp))
457                         continue;
458
459                 preferred_cpu_dp = dsa_switch_preferred_default_local_cpu_port(cpu_dp->ds);
460                 if (preferred_cpu_dp && preferred_cpu_dp != cpu_dp)
461                         continue;
462
463                 /* Prefer a local CPU port */
464                 dsa_switch_for_each_port(dp, cpu_dp->ds) {
465                         /* Prefer the first local CPU port found */
466                         if (dp->cpu_dp)
467                                 continue;
468
469                         if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
470                                 dp->cpu_dp = cpu_dp;
471                 }
472         }
473
474         return dsa_tree_setup_default_cpu(dst);
475 }
476
477 static void dsa_tree_teardown_cpu_ports(struct dsa_switch_tree *dst)
478 {
479         struct dsa_port *dp;
480
481         list_for_each_entry(dp, &dst->ports, list)
482                 if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
483                         dp->cpu_dp = NULL;
484 }
485
486 static int dsa_port_devlink_setup(struct dsa_port *dp)
487 {
488         struct devlink_port *dlp = &dp->devlink_port;
489         struct dsa_switch_tree *dst = dp->ds->dst;
490         struct devlink_port_attrs attrs = {};
491         struct devlink *dl = dp->ds->devlink;
492         struct dsa_switch *ds = dp->ds;
493         const unsigned char *id;
494         unsigned char len;
495         int err;
496
497         memset(dlp, 0, sizeof(*dlp));
498         devlink_port_init(dl, dlp);
499
500         if (ds->ops->port_setup) {
501                 err = ds->ops->port_setup(ds, dp->index);
502                 if (err)
503                         return err;
504         }
505
506         id = (const unsigned char *)&dst->index;
507         len = sizeof(dst->index);
508
509         attrs.phys.port_number = dp->index;
510         memcpy(attrs.switch_id.id, id, len);
511         attrs.switch_id.id_len = len;
512
513         switch (dp->type) {
514         case DSA_PORT_TYPE_UNUSED:
515                 attrs.flavour = DEVLINK_PORT_FLAVOUR_UNUSED;
516                 break;
517         case DSA_PORT_TYPE_CPU:
518                 attrs.flavour = DEVLINK_PORT_FLAVOUR_CPU;
519                 break;
520         case DSA_PORT_TYPE_DSA:
521                 attrs.flavour = DEVLINK_PORT_FLAVOUR_DSA;
522                 break;
523         case DSA_PORT_TYPE_USER:
524                 attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
525                 break;
526         }
527
528         devlink_port_attrs_set(dlp, &attrs);
529         err = devlink_port_register(dl, dlp, dp->index);
530         if (err) {
531                 if (ds->ops->port_teardown)
532                         ds->ops->port_teardown(ds, dp->index);
533                 return err;
534         }
535
536         return 0;
537 }
538
539 static void dsa_port_devlink_teardown(struct dsa_port *dp)
540 {
541         struct devlink_port *dlp = &dp->devlink_port;
542         struct dsa_switch *ds = dp->ds;
543
544         devlink_port_unregister(dlp);
545
546         if (ds->ops->port_teardown)
547                 ds->ops->port_teardown(ds, dp->index);
548
549         devlink_port_fini(dlp);
550 }
551
552 static int dsa_port_setup(struct dsa_port *dp)
553 {
554         struct devlink_port *dlp = &dp->devlink_port;
555         bool dsa_port_link_registered = false;
556         struct dsa_switch *ds = dp->ds;
557         bool dsa_port_enabled = false;
558         int err = 0;
559
560         if (dp->setup)
561                 return 0;
562
563         err = dsa_port_devlink_setup(dp);
564         if (err)
565                 return err;
566
567         switch (dp->type) {
568         case DSA_PORT_TYPE_UNUSED:
569                 dsa_port_disable(dp);
570                 break;
571         case DSA_PORT_TYPE_CPU:
572                 if (dp->dn) {
573                         err = dsa_shared_port_link_register_of(dp);
574                         if (err)
575                                 break;
576                         dsa_port_link_registered = true;
577                 } else {
578                         dev_warn(ds->dev,
579                                  "skipping link registration for CPU port %d\n",
580                                  dp->index);
581                 }
582
583                 err = dsa_port_enable(dp, NULL);
584                 if (err)
585                         break;
586                 dsa_port_enabled = true;
587
588                 break;
589         case DSA_PORT_TYPE_DSA:
590                 if (dp->dn) {
591                         err = dsa_shared_port_link_register_of(dp);
592                         if (err)
593                                 break;
594                         dsa_port_link_registered = true;
595                 } else {
596                         dev_warn(ds->dev,
597                                  "skipping link registration for DSA port %d\n",
598                                  dp->index);
599                 }
600
601                 err = dsa_port_enable(dp, NULL);
602                 if (err)
603                         break;
604                 dsa_port_enabled = true;
605
606                 break;
607         case DSA_PORT_TYPE_USER:
608                 of_get_mac_address(dp->dn, dp->mac);
609                 err = dsa_slave_create(dp);
610                 if (err)
611                         break;
612
613                 devlink_port_type_eth_set(dlp, dp->slave);
614                 break;
615         }
616
617         if (err && dsa_port_enabled)
618                 dsa_port_disable(dp);
619         if (err && dsa_port_link_registered)
620                 dsa_shared_port_link_unregister_of(dp);
621         if (err) {
622                 dsa_port_devlink_teardown(dp);
623                 return err;
624         }
625
626         dp->setup = true;
627
628         return 0;
629 }
630
631 static void dsa_port_teardown(struct dsa_port *dp)
632 {
633         struct devlink_port *dlp = &dp->devlink_port;
634
635         if (!dp->setup)
636                 return;
637
638         devlink_port_type_clear(dlp);
639
640         switch (dp->type) {
641         case DSA_PORT_TYPE_UNUSED:
642                 break;
643         case DSA_PORT_TYPE_CPU:
644                 dsa_port_disable(dp);
645                 if (dp->dn)
646                         dsa_shared_port_link_unregister_of(dp);
647                 break;
648         case DSA_PORT_TYPE_DSA:
649                 dsa_port_disable(dp);
650                 if (dp->dn)
651                         dsa_shared_port_link_unregister_of(dp);
652                 break;
653         case DSA_PORT_TYPE_USER:
654                 if (dp->slave) {
655                         dsa_slave_destroy(dp->slave);
656                         dp->slave = NULL;
657                 }
658                 break;
659         }
660
661         dsa_port_devlink_teardown(dp);
662
663         dp->setup = false;
664 }
665
666 static int dsa_port_setup_as_unused(struct dsa_port *dp)
667 {
668         dp->type = DSA_PORT_TYPE_UNUSED;
669         return dsa_port_setup(dp);
670 }
671
672 static int dsa_devlink_info_get(struct devlink *dl,
673                                 struct devlink_info_req *req,
674                                 struct netlink_ext_ack *extack)
675 {
676         struct dsa_switch *ds = dsa_devlink_to_ds(dl);
677
678         if (ds->ops->devlink_info_get)
679                 return ds->ops->devlink_info_get(ds, req, extack);
680
681         return -EOPNOTSUPP;
682 }
683
684 static int dsa_devlink_sb_pool_get(struct devlink *dl,
685                                    unsigned int sb_index, u16 pool_index,
686                                    struct devlink_sb_pool_info *pool_info)
687 {
688         struct dsa_switch *ds = dsa_devlink_to_ds(dl);
689
690         if (!ds->ops->devlink_sb_pool_get)
691                 return -EOPNOTSUPP;
692
693         return ds->ops->devlink_sb_pool_get(ds, sb_index, pool_index,
694                                             pool_info);
695 }
696
697 static int dsa_devlink_sb_pool_set(struct devlink *dl, unsigned int sb_index,
698                                    u16 pool_index, u32 size,
699                                    enum devlink_sb_threshold_type threshold_type,
700                                    struct netlink_ext_ack *extack)
701 {
702         struct dsa_switch *ds = dsa_devlink_to_ds(dl);
703
704         if (!ds->ops->devlink_sb_pool_set)
705                 return -EOPNOTSUPP;
706
707         return ds->ops->devlink_sb_pool_set(ds, sb_index, pool_index, size,
708                                             threshold_type, extack);
709 }
710
711 static int dsa_devlink_sb_port_pool_get(struct devlink_port *dlp,
712                                         unsigned int sb_index, u16 pool_index,
713                                         u32 *p_threshold)
714 {
715         struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
716         int port = dsa_devlink_port_to_port(dlp);
717
718         if (!ds->ops->devlink_sb_port_pool_get)
719                 return -EOPNOTSUPP;
720
721         return ds->ops->devlink_sb_port_pool_get(ds, port, sb_index,
722                                                  pool_index, p_threshold);
723 }
724
725 static int dsa_devlink_sb_port_pool_set(struct devlink_port *dlp,
726                                         unsigned int sb_index, u16 pool_index,
727                                         u32 threshold,
728                                         struct netlink_ext_ack *extack)
729 {
730         struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
731         int port = dsa_devlink_port_to_port(dlp);
732
733         if (!ds->ops->devlink_sb_port_pool_set)
734                 return -EOPNOTSUPP;
735
736         return ds->ops->devlink_sb_port_pool_set(ds, port, sb_index,
737                                                  pool_index, threshold, extack);
738 }
739
740 static int
741 dsa_devlink_sb_tc_pool_bind_get(struct devlink_port *dlp,
742                                 unsigned int sb_index, u16 tc_index,
743                                 enum devlink_sb_pool_type pool_type,
744                                 u16 *p_pool_index, u32 *p_threshold)
745 {
746         struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
747         int port = dsa_devlink_port_to_port(dlp);
748
749         if (!ds->ops->devlink_sb_tc_pool_bind_get)
750                 return -EOPNOTSUPP;
751
752         return ds->ops->devlink_sb_tc_pool_bind_get(ds, port, sb_index,
753                                                     tc_index, pool_type,
754                                                     p_pool_index, p_threshold);
755 }
756
757 static int
758 dsa_devlink_sb_tc_pool_bind_set(struct devlink_port *dlp,
759                                 unsigned int sb_index, u16 tc_index,
760                                 enum devlink_sb_pool_type pool_type,
761                                 u16 pool_index, u32 threshold,
762                                 struct netlink_ext_ack *extack)
763 {
764         struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
765         int port = dsa_devlink_port_to_port(dlp);
766
767         if (!ds->ops->devlink_sb_tc_pool_bind_set)
768                 return -EOPNOTSUPP;
769
770         return ds->ops->devlink_sb_tc_pool_bind_set(ds, port, sb_index,
771                                                     tc_index, pool_type,
772                                                     pool_index, threshold,
773                                                     extack);
774 }
775
776 static int dsa_devlink_sb_occ_snapshot(struct devlink *dl,
777                                        unsigned int sb_index)
778 {
779         struct dsa_switch *ds = dsa_devlink_to_ds(dl);
780
781         if (!ds->ops->devlink_sb_occ_snapshot)
782                 return -EOPNOTSUPP;
783
784         return ds->ops->devlink_sb_occ_snapshot(ds, sb_index);
785 }
786
787 static int dsa_devlink_sb_occ_max_clear(struct devlink *dl,
788                                         unsigned int sb_index)
789 {
790         struct dsa_switch *ds = dsa_devlink_to_ds(dl);
791
792         if (!ds->ops->devlink_sb_occ_max_clear)
793                 return -EOPNOTSUPP;
794
795         return ds->ops->devlink_sb_occ_max_clear(ds, sb_index);
796 }
797
798 static int dsa_devlink_sb_occ_port_pool_get(struct devlink_port *dlp,
799                                             unsigned int sb_index,
800                                             u16 pool_index, u32 *p_cur,
801                                             u32 *p_max)
802 {
803         struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
804         int port = dsa_devlink_port_to_port(dlp);
805
806         if (!ds->ops->devlink_sb_occ_port_pool_get)
807                 return -EOPNOTSUPP;
808
809         return ds->ops->devlink_sb_occ_port_pool_get(ds, port, sb_index,
810                                                      pool_index, p_cur, p_max);
811 }
812
813 static int
814 dsa_devlink_sb_occ_tc_port_bind_get(struct devlink_port *dlp,
815                                     unsigned int sb_index, u16 tc_index,
816                                     enum devlink_sb_pool_type pool_type,
817                                     u32 *p_cur, u32 *p_max)
818 {
819         struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
820         int port = dsa_devlink_port_to_port(dlp);
821
822         if (!ds->ops->devlink_sb_occ_tc_port_bind_get)
823                 return -EOPNOTSUPP;
824
825         return ds->ops->devlink_sb_occ_tc_port_bind_get(ds, port,
826                                                         sb_index, tc_index,
827                                                         pool_type, p_cur,
828                                                         p_max);
829 }
830
831 static const struct devlink_ops dsa_devlink_ops = {
832         .info_get                       = dsa_devlink_info_get,
833         .sb_pool_get                    = dsa_devlink_sb_pool_get,
834         .sb_pool_set                    = dsa_devlink_sb_pool_set,
835         .sb_port_pool_get               = dsa_devlink_sb_port_pool_get,
836         .sb_port_pool_set               = dsa_devlink_sb_port_pool_set,
837         .sb_tc_pool_bind_get            = dsa_devlink_sb_tc_pool_bind_get,
838         .sb_tc_pool_bind_set            = dsa_devlink_sb_tc_pool_bind_set,
839         .sb_occ_snapshot                = dsa_devlink_sb_occ_snapshot,
840         .sb_occ_max_clear               = dsa_devlink_sb_occ_max_clear,
841         .sb_occ_port_pool_get           = dsa_devlink_sb_occ_port_pool_get,
842         .sb_occ_tc_port_bind_get        = dsa_devlink_sb_occ_tc_port_bind_get,
843 };
844
845 static int dsa_switch_setup_tag_protocol(struct dsa_switch *ds)
846 {
847         const struct dsa_device_ops *tag_ops = ds->dst->tag_ops;
848         struct dsa_switch_tree *dst = ds->dst;
849         int err;
850
851         if (tag_ops->proto == dst->default_proto)
852                 goto connect;
853
854         rtnl_lock();
855         err = ds->ops->change_tag_protocol(ds, tag_ops->proto);
856         rtnl_unlock();
857         if (err) {
858                 dev_err(ds->dev, "Unable to use tag protocol \"%s\": %pe\n",
859                         tag_ops->name, ERR_PTR(err));
860                 return err;
861         }
862
863 connect:
864         if (tag_ops->connect) {
865                 err = tag_ops->connect(ds);
866                 if (err)
867                         return err;
868         }
869
870         if (ds->ops->connect_tag_protocol) {
871                 err = ds->ops->connect_tag_protocol(ds, tag_ops->proto);
872                 if (err) {
873                         dev_err(ds->dev,
874                                 "Unable to connect to tag protocol \"%s\": %pe\n",
875                                 tag_ops->name, ERR_PTR(err));
876                         goto disconnect;
877                 }
878         }
879
880         return 0;
881
882 disconnect:
883         if (tag_ops->disconnect)
884                 tag_ops->disconnect(ds);
885
886         return err;
887 }
888
889 static void dsa_switch_teardown_tag_protocol(struct dsa_switch *ds)
890 {
891         const struct dsa_device_ops *tag_ops = ds->dst->tag_ops;
892
893         if (tag_ops->disconnect)
894                 tag_ops->disconnect(ds);
895 }
896
897 static int dsa_switch_setup(struct dsa_switch *ds)
898 {
899         struct dsa_devlink_priv *dl_priv;
900         struct device_node *dn;
901         int err;
902
903         if (ds->setup)
904                 return 0;
905
906         /* Initialize ds->phys_mii_mask before registering the slave MDIO bus
907          * driver and before ops->setup() has run, since the switch drivers and
908          * the slave MDIO bus driver rely on these values for probing PHY
909          * devices or not
910          */
911         ds->phys_mii_mask |= dsa_user_ports(ds);
912
913         /* Add the switch to devlink before calling setup, so that setup can
914          * add dpipe tables
915          */
916         ds->devlink =
917                 devlink_alloc(&dsa_devlink_ops, sizeof(*dl_priv), ds->dev);
918         if (!ds->devlink)
919                 return -ENOMEM;
920         dl_priv = devlink_priv(ds->devlink);
921         dl_priv->ds = ds;
922
923         err = dsa_switch_register_notifier(ds);
924         if (err)
925                 goto devlink_free;
926
927         ds->configure_vlan_while_not_filtering = true;
928
929         err = ds->ops->setup(ds);
930         if (err < 0)
931                 goto unregister_notifier;
932
933         err = dsa_switch_setup_tag_protocol(ds);
934         if (err)
935                 goto teardown;
936
937         if (!ds->slave_mii_bus && ds->ops->phy_read) {
938                 ds->slave_mii_bus = mdiobus_alloc();
939                 if (!ds->slave_mii_bus) {
940                         err = -ENOMEM;
941                         goto teardown;
942                 }
943
944                 dsa_slave_mii_bus_init(ds);
945
946                 dn = of_get_child_by_name(ds->dev->of_node, "mdio");
947
948                 err = of_mdiobus_register(ds->slave_mii_bus, dn);
949                 of_node_put(dn);
950                 if (err < 0)
951                         goto free_slave_mii_bus;
952         }
953
954         ds->setup = true;
955         devlink_register(ds->devlink);
956         return 0;
957
958 free_slave_mii_bus:
959         if (ds->slave_mii_bus && ds->ops->phy_read)
960                 mdiobus_free(ds->slave_mii_bus);
961 teardown:
962         if (ds->ops->teardown)
963                 ds->ops->teardown(ds);
964 unregister_notifier:
965         dsa_switch_unregister_notifier(ds);
966 devlink_free:
967         devlink_free(ds->devlink);
968         ds->devlink = NULL;
969         return err;
970 }
971
972 static void dsa_switch_teardown(struct dsa_switch *ds)
973 {
974         if (!ds->setup)
975                 return;
976
977         if (ds->devlink)
978                 devlink_unregister(ds->devlink);
979
980         if (ds->slave_mii_bus && ds->ops->phy_read) {
981                 mdiobus_unregister(ds->slave_mii_bus);
982                 mdiobus_free(ds->slave_mii_bus);
983                 ds->slave_mii_bus = NULL;
984         }
985
986         dsa_switch_teardown_tag_protocol(ds);
987
988         if (ds->ops->teardown)
989                 ds->ops->teardown(ds);
990
991         dsa_switch_unregister_notifier(ds);
992
993         if (ds->devlink) {
994                 devlink_free(ds->devlink);
995                 ds->devlink = NULL;
996         }
997
998         ds->setup = false;
999 }
1000
1001 /* First tear down the non-shared, then the shared ports. This ensures that
1002  * all work items scheduled by our switchdev handlers for user ports have
1003  * completed before we destroy the refcounting kept on the shared ports.
1004  */
1005 static void dsa_tree_teardown_ports(struct dsa_switch_tree *dst)
1006 {
1007         struct dsa_port *dp;
1008
1009         list_for_each_entry(dp, &dst->ports, list)
1010                 if (dsa_port_is_user(dp) || dsa_port_is_unused(dp))
1011                         dsa_port_teardown(dp);
1012
1013         dsa_flush_workqueue();
1014
1015         list_for_each_entry(dp, &dst->ports, list)
1016                 if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp))
1017                         dsa_port_teardown(dp);
1018 }
1019
1020 static void dsa_tree_teardown_switches(struct dsa_switch_tree *dst)
1021 {
1022         struct dsa_port *dp;
1023
1024         list_for_each_entry(dp, &dst->ports, list)
1025                 dsa_switch_teardown(dp->ds);
1026 }
1027
1028 /* Bring shared ports up first, then non-shared ports */
1029 static int dsa_tree_setup_ports(struct dsa_switch_tree *dst)
1030 {
1031         struct dsa_port *dp;
1032         int err = 0;
1033
1034         list_for_each_entry(dp, &dst->ports, list) {
1035                 if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp)) {
1036                         err = dsa_port_setup(dp);
1037                         if (err)
1038                                 goto teardown;
1039                 }
1040         }
1041
1042         list_for_each_entry(dp, &dst->ports, list) {
1043                 if (dsa_port_is_user(dp) || dsa_port_is_unused(dp)) {
1044                         err = dsa_port_setup(dp);
1045                         if (err) {
1046                                 err = dsa_port_setup_as_unused(dp);
1047                                 if (err)
1048                                         goto teardown;
1049                         }
1050                 }
1051         }
1052
1053         return 0;
1054
1055 teardown:
1056         dsa_tree_teardown_ports(dst);
1057
1058         return err;
1059 }
1060
1061 static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
1062 {
1063         struct dsa_port *dp;
1064         int err = 0;
1065
1066         list_for_each_entry(dp, &dst->ports, list) {
1067                 err = dsa_switch_setup(dp->ds);
1068                 if (err) {
1069                         dsa_tree_teardown_switches(dst);
1070                         break;
1071                 }
1072         }
1073
1074         return err;
1075 }
1076
1077 static int dsa_tree_setup_master(struct dsa_switch_tree *dst)
1078 {
1079         struct dsa_port *cpu_dp;
1080         int err = 0;
1081
1082         rtnl_lock();
1083
1084         dsa_tree_for_each_cpu_port(cpu_dp, dst) {
1085                 struct net_device *master = cpu_dp->master;
1086                 bool admin_up = (master->flags & IFF_UP) &&
1087                                 !qdisc_tx_is_noop(master);
1088
1089                 err = dsa_master_setup(master, cpu_dp);
1090                 if (err)
1091                         break;
1092
1093                 /* Replay master state event */
1094                 dsa_tree_master_admin_state_change(dst, master, admin_up);
1095                 dsa_tree_master_oper_state_change(dst, master,
1096                                                   netif_oper_up(master));
1097         }
1098
1099         rtnl_unlock();
1100
1101         return err;
1102 }
1103
1104 static void dsa_tree_teardown_master(struct dsa_switch_tree *dst)
1105 {
1106         struct dsa_port *cpu_dp;
1107
1108         rtnl_lock();
1109
1110         dsa_tree_for_each_cpu_port(cpu_dp, dst) {
1111                 struct net_device *master = cpu_dp->master;
1112
1113                 /* Synthesizing an "admin down" state is sufficient for
1114                  * the switches to get a notification if the master is
1115                  * currently up and running.
1116                  */
1117                 dsa_tree_master_admin_state_change(dst, master, false);
1118
1119                 dsa_master_teardown(master);
1120         }
1121
1122         rtnl_unlock();
1123 }
1124
1125 static int dsa_tree_setup_lags(struct dsa_switch_tree *dst)
1126 {
1127         unsigned int len = 0;
1128         struct dsa_port *dp;
1129
1130         list_for_each_entry(dp, &dst->ports, list) {
1131                 if (dp->ds->num_lag_ids > len)
1132                         len = dp->ds->num_lag_ids;
1133         }
1134
1135         if (!len)
1136                 return 0;
1137
1138         dst->lags = kcalloc(len, sizeof(*dst->lags), GFP_KERNEL);
1139         if (!dst->lags)
1140                 return -ENOMEM;
1141
1142         dst->lags_len = len;
1143         return 0;
1144 }
1145
1146 static void dsa_tree_teardown_lags(struct dsa_switch_tree *dst)
1147 {
1148         kfree(dst->lags);
1149 }
1150
1151 static int dsa_tree_setup(struct dsa_switch_tree *dst)
1152 {
1153         bool complete;
1154         int err;
1155
1156         if (dst->setup) {
1157                 pr_err("DSA: tree %d already setup! Disjoint trees?\n",
1158                        dst->index);
1159                 return -EEXIST;
1160         }
1161
1162         complete = dsa_tree_setup_routing_table(dst);
1163         if (!complete)
1164                 return 0;
1165
1166         err = dsa_tree_setup_cpu_ports(dst);
1167         if (err)
1168                 return err;
1169
1170         err = dsa_tree_setup_switches(dst);
1171         if (err)
1172                 goto teardown_cpu_ports;
1173
1174         err = dsa_tree_setup_ports(dst);
1175         if (err)
1176                 goto teardown_switches;
1177
1178         err = dsa_tree_setup_master(dst);
1179         if (err)
1180                 goto teardown_ports;
1181
1182         err = dsa_tree_setup_lags(dst);
1183         if (err)
1184                 goto teardown_master;
1185
1186         dst->setup = true;
1187
1188         pr_info("DSA: tree %d setup\n", dst->index);
1189
1190         return 0;
1191
1192 teardown_master:
1193         dsa_tree_teardown_master(dst);
1194 teardown_ports:
1195         dsa_tree_teardown_ports(dst);
1196 teardown_switches:
1197         dsa_tree_teardown_switches(dst);
1198 teardown_cpu_ports:
1199         dsa_tree_teardown_cpu_ports(dst);
1200
1201         return err;
1202 }
1203
1204 static void dsa_tree_teardown(struct dsa_switch_tree *dst)
1205 {
1206         struct dsa_link *dl, *next;
1207
1208         if (!dst->setup)
1209                 return;
1210
1211         dsa_tree_teardown_lags(dst);
1212
1213         dsa_tree_teardown_master(dst);
1214
1215         dsa_tree_teardown_ports(dst);
1216
1217         dsa_tree_teardown_switches(dst);
1218
1219         dsa_tree_teardown_cpu_ports(dst);
1220
1221         list_for_each_entry_safe(dl, next, &dst->rtable, list) {
1222                 list_del(&dl->list);
1223                 kfree(dl);
1224         }
1225
1226         pr_info("DSA: tree %d torn down\n", dst->index);
1227
1228         dst->setup = false;
1229 }
1230
1231 static int dsa_tree_bind_tag_proto(struct dsa_switch_tree *dst,
1232                                    const struct dsa_device_ops *tag_ops)
1233 {
1234         const struct dsa_device_ops *old_tag_ops = dst->tag_ops;
1235         struct dsa_notifier_tag_proto_info info;
1236         int err;
1237
1238         dst->tag_ops = tag_ops;
1239
1240         /* Notify the switches from this tree about the connection
1241          * to the new tagger
1242          */
1243         info.tag_ops = tag_ops;
1244         err = dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_CONNECT, &info);
1245         if (err && err != -EOPNOTSUPP)
1246                 goto out_disconnect;
1247
1248         /* Notify the old tagger about the disconnection from this tree */
1249         info.tag_ops = old_tag_ops;
1250         dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_DISCONNECT, &info);
1251
1252         return 0;
1253
1254 out_disconnect:
1255         info.tag_ops = tag_ops;
1256         dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_DISCONNECT, &info);
1257         dst->tag_ops = old_tag_ops;
1258
1259         return err;
1260 }
1261
1262 /* Since the dsa/tagging sysfs device attribute is per master, the assumption
1263  * is that all DSA switches within a tree share the same tagger, otherwise
1264  * they would have formed disjoint trees (different "dsa,member" values).
1265  */
1266 int dsa_tree_change_tag_proto(struct dsa_switch_tree *dst,
1267                               const struct dsa_device_ops *tag_ops,
1268                               const struct dsa_device_ops *old_tag_ops)
1269 {
1270         struct dsa_notifier_tag_proto_info info;
1271         struct dsa_port *dp;
1272         int err = -EBUSY;
1273
1274         if (!rtnl_trylock())
1275                 return restart_syscall();
1276
1277         /* At the moment we don't allow changing the tag protocol under
1278          * traffic. The rtnl_mutex also happens to serialize concurrent
1279          * attempts to change the tagging protocol. If we ever lift the IFF_UP
1280          * restriction, there needs to be another mutex which serializes this.
1281          */
1282         dsa_tree_for_each_user_port(dp, dst) {
1283                 if (dsa_port_to_master(dp)->flags & IFF_UP)
1284                         goto out_unlock;
1285
1286                 if (dp->slave->flags & IFF_UP)
1287                         goto out_unlock;
1288         }
1289
1290         /* Notify the tag protocol change */
1291         info.tag_ops = tag_ops;
1292         err = dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO, &info);
1293         if (err)
1294                 goto out_unwind_tagger;
1295
1296         err = dsa_tree_bind_tag_proto(dst, tag_ops);
1297         if (err)
1298                 goto out_unwind_tagger;
1299
1300         rtnl_unlock();
1301
1302         return 0;
1303
1304 out_unwind_tagger:
1305         info.tag_ops = old_tag_ops;
1306         dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO, &info);
1307 out_unlock:
1308         rtnl_unlock();
1309         return err;
1310 }
1311
1312 static void dsa_tree_master_state_change(struct dsa_switch_tree *dst,
1313                                          struct net_device *master)
1314 {
1315         struct dsa_notifier_master_state_info info;
1316         struct dsa_port *cpu_dp = master->dsa_ptr;
1317
1318         info.master = master;
1319         info.operational = dsa_port_master_is_operational(cpu_dp);
1320
1321         dsa_tree_notify(dst, DSA_NOTIFIER_MASTER_STATE_CHANGE, &info);
1322 }
1323
1324 void dsa_tree_master_admin_state_change(struct dsa_switch_tree *dst,
1325                                         struct net_device *master,
1326                                         bool up)
1327 {
1328         struct dsa_port *cpu_dp = master->dsa_ptr;
1329         bool notify = false;
1330
1331         /* Don't keep track of admin state on LAG DSA masters,
1332          * but rather just of physical DSA masters
1333          */
1334         if (netif_is_lag_master(master))
1335                 return;
1336
1337         if ((dsa_port_master_is_operational(cpu_dp)) !=
1338             (up && cpu_dp->master_oper_up))
1339                 notify = true;
1340
1341         cpu_dp->master_admin_up = up;
1342
1343         if (notify)
1344                 dsa_tree_master_state_change(dst, master);
1345 }
1346
1347 void dsa_tree_master_oper_state_change(struct dsa_switch_tree *dst,
1348                                        struct net_device *master,
1349                                        bool up)
1350 {
1351         struct dsa_port *cpu_dp = master->dsa_ptr;
1352         bool notify = false;
1353
1354         /* Don't keep track of oper state on LAG DSA masters,
1355          * but rather just of physical DSA masters
1356          */
1357         if (netif_is_lag_master(master))
1358                 return;
1359
1360         if ((dsa_port_master_is_operational(cpu_dp)) !=
1361             (cpu_dp->master_admin_up && up))
1362                 notify = true;
1363
1364         cpu_dp->master_oper_up = up;
1365
1366         if (notify)
1367                 dsa_tree_master_state_change(dst, master);
1368 }
1369
1370 static struct dsa_port *dsa_port_touch(struct dsa_switch *ds, int index)
1371 {
1372         struct dsa_switch_tree *dst = ds->dst;
1373         struct dsa_port *dp;
1374
1375         dsa_switch_for_each_port(dp, ds)
1376                 if (dp->index == index)
1377                         return dp;
1378
1379         dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1380         if (!dp)
1381                 return NULL;
1382
1383         dp->ds = ds;
1384         dp->index = index;
1385
1386         mutex_init(&dp->addr_lists_lock);
1387         mutex_init(&dp->vlans_lock);
1388         INIT_LIST_HEAD(&dp->fdbs);
1389         INIT_LIST_HEAD(&dp->mdbs);
1390         INIT_LIST_HEAD(&dp->vlans);
1391         INIT_LIST_HEAD(&dp->list);
1392         list_add_tail(&dp->list, &dst->ports);
1393
1394         return dp;
1395 }
1396
1397 static int dsa_port_parse_user(struct dsa_port *dp, const char *name)
1398 {
1399         if (!name)
1400                 name = "eth%d";
1401
1402         dp->type = DSA_PORT_TYPE_USER;
1403         dp->name = name;
1404
1405         return 0;
1406 }
1407
1408 static int dsa_port_parse_dsa(struct dsa_port *dp)
1409 {
1410         dp->type = DSA_PORT_TYPE_DSA;
1411
1412         return 0;
1413 }
1414
1415 static enum dsa_tag_protocol dsa_get_tag_protocol(struct dsa_port *dp,
1416                                                   struct net_device *master)
1417 {
1418         enum dsa_tag_protocol tag_protocol = DSA_TAG_PROTO_NONE;
1419         struct dsa_switch *mds, *ds = dp->ds;
1420         unsigned int mdp_upstream;
1421         struct dsa_port *mdp;
1422
1423         /* It is possible to stack DSA switches onto one another when that
1424          * happens the switch driver may want to know if its tagging protocol
1425          * is going to work in such a configuration.
1426          */
1427         if (dsa_slave_dev_check(master)) {
1428                 mdp = dsa_slave_to_port(master);
1429                 mds = mdp->ds;
1430                 mdp_upstream = dsa_upstream_port(mds, mdp->index);
1431                 tag_protocol = mds->ops->get_tag_protocol(mds, mdp_upstream,
1432                                                           DSA_TAG_PROTO_NONE);
1433         }
1434
1435         /* If the master device is not itself a DSA slave in a disjoint DSA
1436          * tree, then return immediately.
1437          */
1438         return ds->ops->get_tag_protocol(ds, dp->index, tag_protocol);
1439 }
1440
1441 static int dsa_port_parse_cpu(struct dsa_port *dp, struct net_device *master,
1442                               const char *user_protocol)
1443 {
1444         const struct dsa_device_ops *tag_ops = NULL;
1445         struct dsa_switch *ds = dp->ds;
1446         struct dsa_switch_tree *dst = ds->dst;
1447         enum dsa_tag_protocol default_proto;
1448
1449         /* Find out which protocol the switch would prefer. */
1450         default_proto = dsa_get_tag_protocol(dp, master);
1451         if (dst->default_proto) {
1452                 if (dst->default_proto != default_proto) {
1453                         dev_err(ds->dev,
1454                                 "A DSA switch tree can have only one tagging protocol\n");
1455                         return -EINVAL;
1456                 }
1457         } else {
1458                 dst->default_proto = default_proto;
1459         }
1460
1461         /* See if the user wants to override that preference. */
1462         if (user_protocol) {
1463                 if (!ds->ops->change_tag_protocol) {
1464                         dev_err(ds->dev, "Tag protocol cannot be modified\n");
1465                         return -EINVAL;
1466                 }
1467
1468                 tag_ops = dsa_find_tagger_by_name(user_protocol);
1469                 if (IS_ERR(tag_ops)) {
1470                         dev_warn(ds->dev,
1471                                  "Failed to find a tagging driver for protocol %s, using default\n",
1472                                  user_protocol);
1473                         tag_ops = NULL;
1474                 }
1475         }
1476
1477         if (!tag_ops)
1478                 tag_ops = dsa_tag_driver_get(default_proto);
1479
1480         if (IS_ERR(tag_ops)) {
1481                 if (PTR_ERR(tag_ops) == -ENOPROTOOPT)
1482                         return -EPROBE_DEFER;
1483
1484                 dev_warn(ds->dev, "No tagger for this switch\n");
1485                 return PTR_ERR(tag_ops);
1486         }
1487
1488         if (dst->tag_ops) {
1489                 if (dst->tag_ops != tag_ops) {
1490                         dev_err(ds->dev,
1491                                 "A DSA switch tree can have only one tagging protocol\n");
1492
1493                         dsa_tag_driver_put(tag_ops);
1494                         return -EINVAL;
1495                 }
1496
1497                 /* In the case of multiple CPU ports per switch, the tagging
1498                  * protocol is still reference-counted only per switch tree.
1499                  */
1500                 dsa_tag_driver_put(tag_ops);
1501         } else {
1502                 dst->tag_ops = tag_ops;
1503         }
1504
1505         dp->master = master;
1506         dp->type = DSA_PORT_TYPE_CPU;
1507         dsa_port_set_tag_protocol(dp, dst->tag_ops);
1508         dp->dst = dst;
1509
1510         /* At this point, the tree may be configured to use a different
1511          * tagger than the one chosen by the switch driver during
1512          * .setup, in the case when a user selects a custom protocol
1513          * through the DT.
1514          *
1515          * This is resolved by syncing the driver with the tree in
1516          * dsa_switch_setup_tag_protocol once .setup has run and the
1517          * driver is ready to accept calls to .change_tag_protocol. If
1518          * the driver does not support the custom protocol at that
1519          * point, the tree is wholly rejected, thereby ensuring that the
1520          * tree and driver are always in agreement on the protocol to
1521          * use.
1522          */
1523         return 0;
1524 }
1525
1526 static int dsa_port_parse_of(struct dsa_port *dp, struct device_node *dn)
1527 {
1528         struct device_node *ethernet = of_parse_phandle(dn, "ethernet", 0);
1529         const char *name = of_get_property(dn, "label", NULL);
1530         bool link = of_property_read_bool(dn, "link");
1531
1532         dp->dn = dn;
1533
1534         if (ethernet) {
1535                 struct net_device *master;
1536                 const char *user_protocol;
1537
1538                 master = of_find_net_device_by_node(ethernet);
1539                 of_node_put(ethernet);
1540                 if (!master)
1541                         return -EPROBE_DEFER;
1542
1543                 user_protocol = of_get_property(dn, "dsa-tag-protocol", NULL);
1544                 return dsa_port_parse_cpu(dp, master, user_protocol);
1545         }
1546
1547         if (link)
1548                 return dsa_port_parse_dsa(dp);
1549
1550         return dsa_port_parse_user(dp, name);
1551 }
1552
1553 static int dsa_switch_parse_ports_of(struct dsa_switch *ds,
1554                                      struct device_node *dn)
1555 {
1556         struct device_node *ports, *port;
1557         struct dsa_port *dp;
1558         int err = 0;
1559         u32 reg;
1560
1561         ports = of_get_child_by_name(dn, "ports");
1562         if (!ports) {
1563                 /* The second possibility is "ethernet-ports" */
1564                 ports = of_get_child_by_name(dn, "ethernet-ports");
1565                 if (!ports) {
1566                         dev_err(ds->dev, "no ports child node found\n");
1567                         return -EINVAL;
1568                 }
1569         }
1570
1571         for_each_available_child_of_node(ports, port) {
1572                 err = of_property_read_u32(port, "reg", &reg);
1573                 if (err) {
1574                         of_node_put(port);
1575                         goto out_put_node;
1576                 }
1577
1578                 if (reg >= ds->num_ports) {
1579                         dev_err(ds->dev, "port %pOF index %u exceeds num_ports (%u)\n",
1580                                 port, reg, ds->num_ports);
1581                         of_node_put(port);
1582                         err = -EINVAL;
1583                         goto out_put_node;
1584                 }
1585
1586                 dp = dsa_to_port(ds, reg);
1587
1588                 err = dsa_port_parse_of(dp, port);
1589                 if (err) {
1590                         of_node_put(port);
1591                         goto out_put_node;
1592                 }
1593         }
1594
1595 out_put_node:
1596         of_node_put(ports);
1597         return err;
1598 }
1599
1600 static int dsa_switch_parse_member_of(struct dsa_switch *ds,
1601                                       struct device_node *dn)
1602 {
1603         u32 m[2] = { 0, 0 };
1604         int sz;
1605
1606         /* Don't error out if this optional property isn't found */
1607         sz = of_property_read_variable_u32_array(dn, "dsa,member", m, 2, 2);
1608         if (sz < 0 && sz != -EINVAL)
1609                 return sz;
1610
1611         ds->index = m[1];
1612
1613         ds->dst = dsa_tree_touch(m[0]);
1614         if (!ds->dst)
1615                 return -ENOMEM;
1616
1617         if (dsa_switch_find(ds->dst->index, ds->index)) {
1618                 dev_err(ds->dev,
1619                         "A DSA switch with index %d already exists in tree %d\n",
1620                         ds->index, ds->dst->index);
1621                 return -EEXIST;
1622         }
1623
1624         if (ds->dst->last_switch < ds->index)
1625                 ds->dst->last_switch = ds->index;
1626
1627         return 0;
1628 }
1629
1630 static int dsa_switch_touch_ports(struct dsa_switch *ds)
1631 {
1632         struct dsa_port *dp;
1633         int port;
1634
1635         for (port = 0; port < ds->num_ports; port++) {
1636                 dp = dsa_port_touch(ds, port);
1637                 if (!dp)
1638                         return -ENOMEM;
1639         }
1640
1641         return 0;
1642 }
1643
1644 static int dsa_switch_parse_of(struct dsa_switch *ds, struct device_node *dn)
1645 {
1646         int err;
1647
1648         err = dsa_switch_parse_member_of(ds, dn);
1649         if (err)
1650                 return err;
1651
1652         err = dsa_switch_touch_ports(ds);
1653         if (err)
1654                 return err;
1655
1656         return dsa_switch_parse_ports_of(ds, dn);
1657 }
1658
1659 static int dsa_port_parse(struct dsa_port *dp, const char *name,
1660                           struct device *dev)
1661 {
1662         if (!strcmp(name, "cpu")) {
1663                 struct net_device *master;
1664
1665                 master = dsa_dev_to_net_device(dev);
1666                 if (!master)
1667                         return -EPROBE_DEFER;
1668
1669                 dev_put(master);
1670
1671                 return dsa_port_parse_cpu(dp, master, NULL);
1672         }
1673
1674         if (!strcmp(name, "dsa"))
1675                 return dsa_port_parse_dsa(dp);
1676
1677         return dsa_port_parse_user(dp, name);
1678 }
1679
1680 static int dsa_switch_parse_ports(struct dsa_switch *ds,
1681                                   struct dsa_chip_data *cd)
1682 {
1683         bool valid_name_found = false;
1684         struct dsa_port *dp;
1685         struct device *dev;
1686         const char *name;
1687         unsigned int i;
1688         int err;
1689
1690         for (i = 0; i < DSA_MAX_PORTS; i++) {
1691                 name = cd->port_names[i];
1692                 dev = cd->netdev[i];
1693                 dp = dsa_to_port(ds, i);
1694
1695                 if (!name)
1696                         continue;
1697
1698                 err = dsa_port_parse(dp, name, dev);
1699                 if (err)
1700                         return err;
1701
1702                 valid_name_found = true;
1703         }
1704
1705         if (!valid_name_found && i == DSA_MAX_PORTS)
1706                 return -EINVAL;
1707
1708         return 0;
1709 }
1710
1711 static int dsa_switch_parse(struct dsa_switch *ds, struct dsa_chip_data *cd)
1712 {
1713         int err;
1714
1715         ds->cd = cd;
1716
1717         /* We don't support interconnected switches nor multiple trees via
1718          * platform data, so this is the unique switch of the tree.
1719          */
1720         ds->index = 0;
1721         ds->dst = dsa_tree_touch(0);
1722         if (!ds->dst)
1723                 return -ENOMEM;
1724
1725         err = dsa_switch_touch_ports(ds);
1726         if (err)
1727                 return err;
1728
1729         return dsa_switch_parse_ports(ds, cd);
1730 }
1731
1732 static void dsa_switch_release_ports(struct dsa_switch *ds)
1733 {
1734         struct dsa_port *dp, *next;
1735
1736         dsa_switch_for_each_port_safe(dp, next, ds) {
1737                 WARN_ON(!list_empty(&dp->fdbs));
1738                 WARN_ON(!list_empty(&dp->mdbs));
1739                 WARN_ON(!list_empty(&dp->vlans));
1740                 list_del(&dp->list);
1741                 kfree(dp);
1742         }
1743 }
1744
1745 static int dsa_switch_probe(struct dsa_switch *ds)
1746 {
1747         struct dsa_switch_tree *dst;
1748         struct dsa_chip_data *pdata;
1749         struct device_node *np;
1750         int err;
1751
1752         if (!ds->dev)
1753                 return -ENODEV;
1754
1755         pdata = ds->dev->platform_data;
1756         np = ds->dev->of_node;
1757
1758         if (!ds->num_ports)
1759                 return -EINVAL;
1760
1761         if (np) {
1762                 err = dsa_switch_parse_of(ds, np);
1763                 if (err)
1764                         dsa_switch_release_ports(ds);
1765         } else if (pdata) {
1766                 err = dsa_switch_parse(ds, pdata);
1767                 if (err)
1768                         dsa_switch_release_ports(ds);
1769         } else {
1770                 err = -ENODEV;
1771         }
1772
1773         if (err)
1774                 return err;
1775
1776         dst = ds->dst;
1777         dsa_tree_get(dst);
1778         err = dsa_tree_setup(dst);
1779         if (err) {
1780                 dsa_switch_release_ports(ds);
1781                 dsa_tree_put(dst);
1782         }
1783
1784         return err;
1785 }
1786
1787 int dsa_register_switch(struct dsa_switch *ds)
1788 {
1789         int err;
1790
1791         mutex_lock(&dsa2_mutex);
1792         err = dsa_switch_probe(ds);
1793         dsa_tree_put(ds->dst);
1794         mutex_unlock(&dsa2_mutex);
1795
1796         return err;
1797 }
1798 EXPORT_SYMBOL_GPL(dsa_register_switch);
1799
1800 static void dsa_switch_remove(struct dsa_switch *ds)
1801 {
1802         struct dsa_switch_tree *dst = ds->dst;
1803
1804         dsa_tree_teardown(dst);
1805         dsa_switch_release_ports(ds);
1806         dsa_tree_put(dst);
1807 }
1808
1809 void dsa_unregister_switch(struct dsa_switch *ds)
1810 {
1811         mutex_lock(&dsa2_mutex);
1812         dsa_switch_remove(ds);
1813         mutex_unlock(&dsa2_mutex);
1814 }
1815 EXPORT_SYMBOL_GPL(dsa_unregister_switch);
1816
1817 /* If the DSA master chooses to unregister its net_device on .shutdown, DSA is
1818  * blocking that operation from completion, due to the dev_hold taken inside
1819  * netdev_upper_dev_link. Unlink the DSA slave interfaces from being uppers of
1820  * the DSA master, so that the system can reboot successfully.
1821  */
1822 void dsa_switch_shutdown(struct dsa_switch *ds)
1823 {
1824         struct net_device *master, *slave_dev;
1825         struct dsa_port *dp;
1826
1827         mutex_lock(&dsa2_mutex);
1828
1829         if (!ds->setup)
1830                 goto out;
1831
1832         rtnl_lock();
1833
1834         dsa_switch_for_each_user_port(dp, ds) {
1835                 master = dsa_port_to_master(dp);
1836                 slave_dev = dp->slave;
1837
1838                 netdev_upper_dev_unlink(master, slave_dev);
1839         }
1840
1841         /* Disconnect from further netdevice notifiers on the master,
1842          * since netdev_uses_dsa() will now return false.
1843          */
1844         dsa_switch_for_each_cpu_port(dp, ds)
1845                 dp->master->dsa_ptr = NULL;
1846
1847         rtnl_unlock();
1848 out:
1849         mutex_unlock(&dsa2_mutex);
1850 }
1851 EXPORT_SYMBOL_GPL(dsa_switch_shutdown);