GNU Linux-libre 5.10.217-gnu1
[releases.git] / drivers / net / team / team.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * drivers/net/team/team.c - Network team device driver
4  * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
5  */
6
7 #include <linux/kernel.h>
8 #include <linux/types.h>
9 #include <linux/module.h>
10 #include <linux/init.h>
11 #include <linux/slab.h>
12 #include <linux/rcupdate.h>
13 #include <linux/errno.h>
14 #include <linux/ctype.h>
15 #include <linux/notifier.h>
16 #include <linux/netdevice.h>
17 #include <linux/netpoll.h>
18 #include <linux/if_vlan.h>
19 #include <linux/if_arp.h>
20 #include <linux/socket.h>
21 #include <linux/etherdevice.h>
22 #include <linux/rtnetlink.h>
23 #include <net/rtnetlink.h>
24 #include <net/genetlink.h>
25 #include <net/netlink.h>
26 #include <net/sch_generic.h>
27 #include <generated/utsrelease.h>
28 #include <linux/if_team.h>
29
30 #define DRV_NAME "team"
31
32
33 /**********
34  * Helpers
35  **********/
36
37 static struct team_port *team_port_get_rtnl(const struct net_device *dev)
38 {
39         struct team_port *port = rtnl_dereference(dev->rx_handler_data);
40
41         return netif_is_team_port(dev) ? port : NULL;
42 }
43
44 /*
45  * Since the ability to change device address for open port device is tested in
46  * team_port_add, this function can be called without control of return value
47  */
48 static int __set_port_dev_addr(struct net_device *port_dev,
49                                const unsigned char *dev_addr)
50 {
51         struct sockaddr_storage addr;
52
53         memcpy(addr.__data, dev_addr, port_dev->addr_len);
54         addr.ss_family = port_dev->type;
55         return dev_set_mac_address(port_dev, (struct sockaddr *)&addr, NULL);
56 }
57
58 static int team_port_set_orig_dev_addr(struct team_port *port)
59 {
60         return __set_port_dev_addr(port->dev, port->orig.dev_addr);
61 }
62
63 static int team_port_set_team_dev_addr(struct team *team,
64                                        struct team_port *port)
65 {
66         return __set_port_dev_addr(port->dev, team->dev->dev_addr);
67 }
68
69 int team_modeop_port_enter(struct team *team, struct team_port *port)
70 {
71         return team_port_set_team_dev_addr(team, port);
72 }
73 EXPORT_SYMBOL(team_modeop_port_enter);
74
75 void team_modeop_port_change_dev_addr(struct team *team,
76                                       struct team_port *port)
77 {
78         team_port_set_team_dev_addr(team, port);
79 }
80 EXPORT_SYMBOL(team_modeop_port_change_dev_addr);
81
82 static void team_lower_state_changed(struct team_port *port)
83 {
84         struct netdev_lag_lower_state_info info;
85
86         info.link_up = port->linkup;
87         info.tx_enabled = team_port_enabled(port);
88         netdev_lower_state_changed(port->dev, &info);
89 }
90
91 static void team_refresh_port_linkup(struct team_port *port)
92 {
93         bool new_linkup = port->user.linkup_enabled ? port->user.linkup :
94                                                       port->state.linkup;
95
96         if (port->linkup != new_linkup) {
97                 port->linkup = new_linkup;
98                 team_lower_state_changed(port);
99         }
100 }
101
102
103 /*******************
104  * Options handling
105  *******************/
106
107 struct team_option_inst { /* One for each option instance */
108         struct list_head list;
109         struct list_head tmp_list;
110         struct team_option *option;
111         struct team_option_inst_info info;
112         bool changed;
113         bool removed;
114 };
115
116 static struct team_option *__team_find_option(struct team *team,
117                                               const char *opt_name)
118 {
119         struct team_option *option;
120
121         list_for_each_entry(option, &team->option_list, list) {
122                 if (strcmp(option->name, opt_name) == 0)
123                         return option;
124         }
125         return NULL;
126 }
127
128 static void __team_option_inst_del(struct team_option_inst *opt_inst)
129 {
130         list_del(&opt_inst->list);
131         kfree(opt_inst);
132 }
133
134 static void __team_option_inst_del_option(struct team *team,
135                                           struct team_option *option)
136 {
137         struct team_option_inst *opt_inst, *tmp;
138
139         list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) {
140                 if (opt_inst->option == option)
141                         __team_option_inst_del(opt_inst);
142         }
143 }
144
145 static int __team_option_inst_add(struct team *team, struct team_option *option,
146                                   struct team_port *port)
147 {
148         struct team_option_inst *opt_inst;
149         unsigned int array_size;
150         unsigned int i;
151         int err;
152
153         array_size = option->array_size;
154         if (!array_size)
155                 array_size = 1; /* No array but still need one instance */
156
157         for (i = 0; i < array_size; i++) {
158                 opt_inst = kmalloc(sizeof(*opt_inst), GFP_KERNEL);
159                 if (!opt_inst)
160                         return -ENOMEM;
161                 opt_inst->option = option;
162                 opt_inst->info.port = port;
163                 opt_inst->info.array_index = i;
164                 opt_inst->changed = true;
165                 opt_inst->removed = false;
166                 list_add_tail(&opt_inst->list, &team->option_inst_list);
167                 if (option->init) {
168                         err = option->init(team, &opt_inst->info);
169                         if (err)
170                                 return err;
171                 }
172
173         }
174         return 0;
175 }
176
177 static int __team_option_inst_add_option(struct team *team,
178                                          struct team_option *option)
179 {
180         int err;
181
182         if (!option->per_port) {
183                 err = __team_option_inst_add(team, option, NULL);
184                 if (err)
185                         goto inst_del_option;
186         }
187         return 0;
188
189 inst_del_option:
190         __team_option_inst_del_option(team, option);
191         return err;
192 }
193
194 static void __team_option_inst_mark_removed_option(struct team *team,
195                                                    struct team_option *option)
196 {
197         struct team_option_inst *opt_inst;
198
199         list_for_each_entry(opt_inst, &team->option_inst_list, list) {
200                 if (opt_inst->option == option) {
201                         opt_inst->changed = true;
202                         opt_inst->removed = true;
203                 }
204         }
205 }
206
207 static void __team_option_inst_del_port(struct team *team,
208                                         struct team_port *port)
209 {
210         struct team_option_inst *opt_inst, *tmp;
211
212         list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) {
213                 if (opt_inst->option->per_port &&
214                     opt_inst->info.port == port)
215                         __team_option_inst_del(opt_inst);
216         }
217 }
218
219 static int __team_option_inst_add_port(struct team *team,
220                                        struct team_port *port)
221 {
222         struct team_option *option;
223         int err;
224
225         list_for_each_entry(option, &team->option_list, list) {
226                 if (!option->per_port)
227                         continue;
228                 err = __team_option_inst_add(team, option, port);
229                 if (err)
230                         goto inst_del_port;
231         }
232         return 0;
233
234 inst_del_port:
235         __team_option_inst_del_port(team, port);
236         return err;
237 }
238
239 static void __team_option_inst_mark_removed_port(struct team *team,
240                                                  struct team_port *port)
241 {
242         struct team_option_inst *opt_inst;
243
244         list_for_each_entry(opt_inst, &team->option_inst_list, list) {
245                 if (opt_inst->info.port == port) {
246                         opt_inst->changed = true;
247                         opt_inst->removed = true;
248                 }
249         }
250 }
251
252 static int __team_options_register(struct team *team,
253                                    const struct team_option *option,
254                                    size_t option_count)
255 {
256         int i;
257         struct team_option **dst_opts;
258         int err;
259
260         dst_opts = kcalloc(option_count, sizeof(struct team_option *),
261                            GFP_KERNEL);
262         if (!dst_opts)
263                 return -ENOMEM;
264         for (i = 0; i < option_count; i++, option++) {
265                 if (__team_find_option(team, option->name)) {
266                         err = -EEXIST;
267                         goto alloc_rollback;
268                 }
269                 dst_opts[i] = kmemdup(option, sizeof(*option), GFP_KERNEL);
270                 if (!dst_opts[i]) {
271                         err = -ENOMEM;
272                         goto alloc_rollback;
273                 }
274         }
275
276         for (i = 0; i < option_count; i++) {
277                 err = __team_option_inst_add_option(team, dst_opts[i]);
278                 if (err)
279                         goto inst_rollback;
280                 list_add_tail(&dst_opts[i]->list, &team->option_list);
281         }
282
283         kfree(dst_opts);
284         return 0;
285
286 inst_rollback:
287         for (i--; i >= 0; i--) {
288                 __team_option_inst_del_option(team, dst_opts[i]);
289                 list_del(&dst_opts[i]->list);
290         }
291
292         i = option_count;
293 alloc_rollback:
294         for (i--; i >= 0; i--)
295                 kfree(dst_opts[i]);
296
297         kfree(dst_opts);
298         return err;
299 }
300
301 static void __team_options_mark_removed(struct team *team,
302                                         const struct team_option *option,
303                                         size_t option_count)
304 {
305         int i;
306
307         for (i = 0; i < option_count; i++, option++) {
308                 struct team_option *del_opt;
309
310                 del_opt = __team_find_option(team, option->name);
311                 if (del_opt)
312                         __team_option_inst_mark_removed_option(team, del_opt);
313         }
314 }
315
316 static void __team_options_unregister(struct team *team,
317                                       const struct team_option *option,
318                                       size_t option_count)
319 {
320         int i;
321
322         for (i = 0; i < option_count; i++, option++) {
323                 struct team_option *del_opt;
324
325                 del_opt = __team_find_option(team, option->name);
326                 if (del_opt) {
327                         __team_option_inst_del_option(team, del_opt);
328                         list_del(&del_opt->list);
329                         kfree(del_opt);
330                 }
331         }
332 }
333
334 static void __team_options_change_check(struct team *team);
335
336 int team_options_register(struct team *team,
337                           const struct team_option *option,
338                           size_t option_count)
339 {
340         int err;
341
342         err = __team_options_register(team, option, option_count);
343         if (err)
344                 return err;
345         __team_options_change_check(team);
346         return 0;
347 }
348 EXPORT_SYMBOL(team_options_register);
349
350 void team_options_unregister(struct team *team,
351                              const struct team_option *option,
352                              size_t option_count)
353 {
354         __team_options_mark_removed(team, option, option_count);
355         __team_options_change_check(team);
356         __team_options_unregister(team, option, option_count);
357 }
358 EXPORT_SYMBOL(team_options_unregister);
359
360 static int team_option_get(struct team *team,
361                            struct team_option_inst *opt_inst,
362                            struct team_gsetter_ctx *ctx)
363 {
364         if (!opt_inst->option->getter)
365                 return -EOPNOTSUPP;
366         return opt_inst->option->getter(team, ctx);
367 }
368
369 static int team_option_set(struct team *team,
370                            struct team_option_inst *opt_inst,
371                            struct team_gsetter_ctx *ctx)
372 {
373         if (!opt_inst->option->setter)
374                 return -EOPNOTSUPP;
375         return opt_inst->option->setter(team, ctx);
376 }
377
378 void team_option_inst_set_change(struct team_option_inst_info *opt_inst_info)
379 {
380         struct team_option_inst *opt_inst;
381
382         opt_inst = container_of(opt_inst_info, struct team_option_inst, info);
383         opt_inst->changed = true;
384 }
385 EXPORT_SYMBOL(team_option_inst_set_change);
386
387 void team_options_change_check(struct team *team)
388 {
389         __team_options_change_check(team);
390 }
391 EXPORT_SYMBOL(team_options_change_check);
392
393
394 /****************
395  * Mode handling
396  ****************/
397
398 static LIST_HEAD(mode_list);
399 static DEFINE_SPINLOCK(mode_list_lock);
400
401 struct team_mode_item {
402         struct list_head list;
403         const struct team_mode *mode;
404 };
405
406 static struct team_mode_item *__find_mode(const char *kind)
407 {
408         struct team_mode_item *mitem;
409
410         list_for_each_entry(mitem, &mode_list, list) {
411                 if (strcmp(mitem->mode->kind, kind) == 0)
412                         return mitem;
413         }
414         return NULL;
415 }
416
417 static bool is_good_mode_name(const char *name)
418 {
419         while (*name != '\0') {
420                 if (!isalpha(*name) && !isdigit(*name) && *name != '_')
421                         return false;
422                 name++;
423         }
424         return true;
425 }
426
427 int team_mode_register(const struct team_mode *mode)
428 {
429         int err = 0;
430         struct team_mode_item *mitem;
431
432         if (!is_good_mode_name(mode->kind) ||
433             mode->priv_size > TEAM_MODE_PRIV_SIZE)
434                 return -EINVAL;
435
436         mitem = kmalloc(sizeof(*mitem), GFP_KERNEL);
437         if (!mitem)
438                 return -ENOMEM;
439
440         spin_lock(&mode_list_lock);
441         if (__find_mode(mode->kind)) {
442                 err = -EEXIST;
443                 kfree(mitem);
444                 goto unlock;
445         }
446         mitem->mode = mode;
447         list_add_tail(&mitem->list, &mode_list);
448 unlock:
449         spin_unlock(&mode_list_lock);
450         return err;
451 }
452 EXPORT_SYMBOL(team_mode_register);
453
454 void team_mode_unregister(const struct team_mode *mode)
455 {
456         struct team_mode_item *mitem;
457
458         spin_lock(&mode_list_lock);
459         mitem = __find_mode(mode->kind);
460         if (mitem) {
461                 list_del_init(&mitem->list);
462                 kfree(mitem);
463         }
464         spin_unlock(&mode_list_lock);
465 }
466 EXPORT_SYMBOL(team_mode_unregister);
467
468 static const struct team_mode *team_mode_get(const char *kind)
469 {
470         struct team_mode_item *mitem;
471         const struct team_mode *mode = NULL;
472
473         if (!try_module_get(THIS_MODULE))
474                 return NULL;
475
476         spin_lock(&mode_list_lock);
477         mitem = __find_mode(kind);
478         if (!mitem) {
479                 spin_unlock(&mode_list_lock);
480                 request_module("team-mode-%s", kind);
481                 spin_lock(&mode_list_lock);
482                 mitem = __find_mode(kind);
483         }
484         if (mitem) {
485                 mode = mitem->mode;
486                 if (!try_module_get(mode->owner))
487                         mode = NULL;
488         }
489
490         spin_unlock(&mode_list_lock);
491         module_put(THIS_MODULE);
492         return mode;
493 }
494
495 static void team_mode_put(const struct team_mode *mode)
496 {
497         module_put(mode->owner);
498 }
499
500 static bool team_dummy_transmit(struct team *team, struct sk_buff *skb)
501 {
502         dev_kfree_skb_any(skb);
503         return false;
504 }
505
506 static rx_handler_result_t team_dummy_receive(struct team *team,
507                                               struct team_port *port,
508                                               struct sk_buff *skb)
509 {
510         return RX_HANDLER_ANOTHER;
511 }
512
513 static const struct team_mode __team_no_mode = {
514         .kind           = "*NOMODE*",
515 };
516
517 static bool team_is_mode_set(struct team *team)
518 {
519         return team->mode != &__team_no_mode;
520 }
521
522 static void team_set_no_mode(struct team *team)
523 {
524         team->user_carrier_enabled = false;
525         team->mode = &__team_no_mode;
526 }
527
528 static void team_adjust_ops(struct team *team)
529 {
530         /*
531          * To avoid checks in rx/tx skb paths, ensure here that non-null and
532          * correct ops are always set.
533          */
534
535         if (!team->en_port_count || !team_is_mode_set(team) ||
536             !team->mode->ops->transmit)
537                 team->ops.transmit = team_dummy_transmit;
538         else
539                 team->ops.transmit = team->mode->ops->transmit;
540
541         if (!team->en_port_count || !team_is_mode_set(team) ||
542             !team->mode->ops->receive)
543                 team->ops.receive = team_dummy_receive;
544         else
545                 team->ops.receive = team->mode->ops->receive;
546 }
547
548 /*
549  * We can benefit from the fact that it's ensured no port is present
550  * at the time of mode change. Therefore no packets are in fly so there's no
551  * need to set mode operations in any special way.
552  */
553 static int __team_change_mode(struct team *team,
554                               const struct team_mode *new_mode)
555 {
556         /* Check if mode was previously set and do cleanup if so */
557         if (team_is_mode_set(team)) {
558                 void (*exit_op)(struct team *team) = team->ops.exit;
559
560                 /* Clear ops area so no callback is called any longer */
561                 memset(&team->ops, 0, sizeof(struct team_mode_ops));
562                 team_adjust_ops(team);
563
564                 if (exit_op)
565                         exit_op(team);
566                 team_mode_put(team->mode);
567                 team_set_no_mode(team);
568                 /* zero private data area */
569                 memset(&team->mode_priv, 0,
570                        sizeof(struct team) - offsetof(struct team, mode_priv));
571         }
572
573         if (!new_mode)
574                 return 0;
575
576         if (new_mode->ops->init) {
577                 int err;
578
579                 err = new_mode->ops->init(team);
580                 if (err)
581                         return err;
582         }
583
584         team->mode = new_mode;
585         memcpy(&team->ops, new_mode->ops, sizeof(struct team_mode_ops));
586         team_adjust_ops(team);
587
588         return 0;
589 }
590
591 static int team_change_mode(struct team *team, const char *kind)
592 {
593         const struct team_mode *new_mode;
594         struct net_device *dev = team->dev;
595         int err;
596
597         if (!list_empty(&team->port_list)) {
598                 netdev_err(dev, "No ports can be present during mode change\n");
599                 return -EBUSY;
600         }
601
602         if (team_is_mode_set(team) && strcmp(team->mode->kind, kind) == 0) {
603                 netdev_err(dev, "Unable to change to the same mode the team is in\n");
604                 return -EINVAL;
605         }
606
607         new_mode = team_mode_get(kind);
608         if (!new_mode) {
609                 netdev_err(dev, "Mode \"%s\" not found\n", kind);
610                 return -EINVAL;
611         }
612
613         err = __team_change_mode(team, new_mode);
614         if (err) {
615                 netdev_err(dev, "Failed to change to mode \"%s\"\n", kind);
616                 team_mode_put(new_mode);
617                 return err;
618         }
619
620         netdev_info(dev, "Mode changed to \"%s\"\n", kind);
621         return 0;
622 }
623
624
625 /*********************
626  * Peers notification
627  *********************/
628
629 static void team_notify_peers_work(struct work_struct *work)
630 {
631         struct team *team;
632         int val;
633
634         team = container_of(work, struct team, notify_peers.dw.work);
635
636         if (!rtnl_trylock()) {
637                 schedule_delayed_work(&team->notify_peers.dw, 0);
638                 return;
639         }
640         val = atomic_dec_if_positive(&team->notify_peers.count_pending);
641         if (val < 0) {
642                 rtnl_unlock();
643                 return;
644         }
645         call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, team->dev);
646         rtnl_unlock();
647         if (val)
648                 schedule_delayed_work(&team->notify_peers.dw,
649                                       msecs_to_jiffies(team->notify_peers.interval));
650 }
651
652 static void team_notify_peers(struct team *team)
653 {
654         if (!team->notify_peers.count || !netif_running(team->dev))
655                 return;
656         atomic_add(team->notify_peers.count, &team->notify_peers.count_pending);
657         schedule_delayed_work(&team->notify_peers.dw, 0);
658 }
659
660 static void team_notify_peers_init(struct team *team)
661 {
662         INIT_DELAYED_WORK(&team->notify_peers.dw, team_notify_peers_work);
663 }
664
665 static void team_notify_peers_fini(struct team *team)
666 {
667         cancel_delayed_work_sync(&team->notify_peers.dw);
668 }
669
670
671 /*******************************
672  * Send multicast group rejoins
673  *******************************/
674
675 static void team_mcast_rejoin_work(struct work_struct *work)
676 {
677         struct team *team;
678         int val;
679
680         team = container_of(work, struct team, mcast_rejoin.dw.work);
681
682         if (!rtnl_trylock()) {
683                 schedule_delayed_work(&team->mcast_rejoin.dw, 0);
684                 return;
685         }
686         val = atomic_dec_if_positive(&team->mcast_rejoin.count_pending);
687         if (val < 0) {
688                 rtnl_unlock();
689                 return;
690         }
691         call_netdevice_notifiers(NETDEV_RESEND_IGMP, team->dev);
692         rtnl_unlock();
693         if (val)
694                 schedule_delayed_work(&team->mcast_rejoin.dw,
695                                       msecs_to_jiffies(team->mcast_rejoin.interval));
696 }
697
698 static void team_mcast_rejoin(struct team *team)
699 {
700         if (!team->mcast_rejoin.count || !netif_running(team->dev))
701                 return;
702         atomic_add(team->mcast_rejoin.count, &team->mcast_rejoin.count_pending);
703         schedule_delayed_work(&team->mcast_rejoin.dw, 0);
704 }
705
706 static void team_mcast_rejoin_init(struct team *team)
707 {
708         INIT_DELAYED_WORK(&team->mcast_rejoin.dw, team_mcast_rejoin_work);
709 }
710
711 static void team_mcast_rejoin_fini(struct team *team)
712 {
713         cancel_delayed_work_sync(&team->mcast_rejoin.dw);
714 }
715
716
717 /************************
718  * Rx path frame handler
719  ************************/
720
721 /* note: already called with rcu_read_lock */
722 static rx_handler_result_t team_handle_frame(struct sk_buff **pskb)
723 {
724         struct sk_buff *skb = *pskb;
725         struct team_port *port;
726         struct team *team;
727         rx_handler_result_t res;
728
729         skb = skb_share_check(skb, GFP_ATOMIC);
730         if (!skb)
731                 return RX_HANDLER_CONSUMED;
732
733         *pskb = skb;
734
735         port = team_port_get_rcu(skb->dev);
736         team = port->team;
737         if (!team_port_enabled(port)) {
738                 /* allow exact match delivery for disabled ports */
739                 res = RX_HANDLER_EXACT;
740         } else {
741                 res = team->ops.receive(team, port, skb);
742         }
743         if (res == RX_HANDLER_ANOTHER) {
744                 struct team_pcpu_stats *pcpu_stats;
745
746                 pcpu_stats = this_cpu_ptr(team->pcpu_stats);
747                 u64_stats_update_begin(&pcpu_stats->syncp);
748                 pcpu_stats->rx_packets++;
749                 pcpu_stats->rx_bytes += skb->len;
750                 if (skb->pkt_type == PACKET_MULTICAST)
751                         pcpu_stats->rx_multicast++;
752                 u64_stats_update_end(&pcpu_stats->syncp);
753
754                 skb->dev = team->dev;
755         } else if (res == RX_HANDLER_EXACT) {
756                 this_cpu_inc(team->pcpu_stats->rx_nohandler);
757         } else {
758                 this_cpu_inc(team->pcpu_stats->rx_dropped);
759         }
760
761         return res;
762 }
763
764
765 /*************************************
766  * Multiqueue Tx port select override
767  *************************************/
768
769 static int team_queue_override_init(struct team *team)
770 {
771         struct list_head *listarr;
772         unsigned int queue_cnt = team->dev->num_tx_queues - 1;
773         unsigned int i;
774
775         if (!queue_cnt)
776                 return 0;
777         listarr = kmalloc_array(queue_cnt, sizeof(struct list_head),
778                                 GFP_KERNEL);
779         if (!listarr)
780                 return -ENOMEM;
781         team->qom_lists = listarr;
782         for (i = 0; i < queue_cnt; i++)
783                 INIT_LIST_HEAD(listarr++);
784         return 0;
785 }
786
787 static void team_queue_override_fini(struct team *team)
788 {
789         kfree(team->qom_lists);
790 }
791
792 static struct list_head *__team_get_qom_list(struct team *team, u16 queue_id)
793 {
794         return &team->qom_lists[queue_id - 1];
795 }
796
797 /*
798  * note: already called with rcu_read_lock
799  */
800 static bool team_queue_override_transmit(struct team *team, struct sk_buff *skb)
801 {
802         struct list_head *qom_list;
803         struct team_port *port;
804
805         if (!team->queue_override_enabled || !skb->queue_mapping)
806                 return false;
807         qom_list = __team_get_qom_list(team, skb->queue_mapping);
808         list_for_each_entry_rcu(port, qom_list, qom_list) {
809                 if (!team_dev_queue_xmit(team, port, skb))
810                         return true;
811         }
812         return false;
813 }
814
815 static void __team_queue_override_port_del(struct team *team,
816                                            struct team_port *port)
817 {
818         if (!port->queue_id)
819                 return;
820         list_del_rcu(&port->qom_list);
821 }
822
823 static bool team_queue_override_port_has_gt_prio_than(struct team_port *port,
824                                                       struct team_port *cur)
825 {
826         if (port->priority < cur->priority)
827                 return true;
828         if (port->priority > cur->priority)
829                 return false;
830         if (port->index < cur->index)
831                 return true;
832         return false;
833 }
834
835 static void __team_queue_override_port_add(struct team *team,
836                                            struct team_port *port)
837 {
838         struct team_port *cur;
839         struct list_head *qom_list;
840         struct list_head *node;
841
842         if (!port->queue_id)
843                 return;
844         qom_list = __team_get_qom_list(team, port->queue_id);
845         node = qom_list;
846         list_for_each_entry(cur, qom_list, qom_list) {
847                 if (team_queue_override_port_has_gt_prio_than(port, cur))
848                         break;
849                 node = &cur->qom_list;
850         }
851         list_add_tail_rcu(&port->qom_list, node);
852 }
853
854 static void __team_queue_override_enabled_check(struct team *team)
855 {
856         struct team_port *port;
857         bool enabled = false;
858
859         list_for_each_entry(port, &team->port_list, list) {
860                 if (port->queue_id) {
861                         enabled = true;
862                         break;
863                 }
864         }
865         if (enabled == team->queue_override_enabled)
866                 return;
867         netdev_dbg(team->dev, "%s queue override\n",
868                    enabled ? "Enabling" : "Disabling");
869         team->queue_override_enabled = enabled;
870 }
871
872 static void team_queue_override_port_prio_changed(struct team *team,
873                                                   struct team_port *port)
874 {
875         if (!port->queue_id || team_port_enabled(port))
876                 return;
877         __team_queue_override_port_del(team, port);
878         __team_queue_override_port_add(team, port);
879         __team_queue_override_enabled_check(team);
880 }
881
882 static void team_queue_override_port_change_queue_id(struct team *team,
883                                                      struct team_port *port,
884                                                      u16 new_queue_id)
885 {
886         if (team_port_enabled(port)) {
887                 __team_queue_override_port_del(team, port);
888                 port->queue_id = new_queue_id;
889                 __team_queue_override_port_add(team, port);
890                 __team_queue_override_enabled_check(team);
891         } else {
892                 port->queue_id = new_queue_id;
893         }
894 }
895
896 static void team_queue_override_port_add(struct team *team,
897                                          struct team_port *port)
898 {
899         __team_queue_override_port_add(team, port);
900         __team_queue_override_enabled_check(team);
901 }
902
903 static void team_queue_override_port_del(struct team *team,
904                                          struct team_port *port)
905 {
906         __team_queue_override_port_del(team, port);
907         __team_queue_override_enabled_check(team);
908 }
909
910
911 /****************
912  * Port handling
913  ****************/
914
915 static bool team_port_find(const struct team *team,
916                            const struct team_port *port)
917 {
918         struct team_port *cur;
919
920         list_for_each_entry(cur, &team->port_list, list)
921                 if (cur == port)
922                         return true;
923         return false;
924 }
925
926 /*
927  * Enable/disable port by adding to enabled port hashlist and setting
928  * port->index (Might be racy so reader could see incorrect ifindex when
929  * processing a flying packet, but that is not a problem). Write guarded
930  * by team->lock.
931  */
932 static void team_port_enable(struct team *team,
933                              struct team_port *port)
934 {
935         if (team_port_enabled(port))
936                 return;
937         port->index = team->en_port_count++;
938         hlist_add_head_rcu(&port->hlist,
939                            team_port_index_hash(team, port->index));
940         team_adjust_ops(team);
941         team_queue_override_port_add(team, port);
942         if (team->ops.port_enabled)
943                 team->ops.port_enabled(team, port);
944         team_notify_peers(team);
945         team_mcast_rejoin(team);
946         team_lower_state_changed(port);
947 }
948
949 static void __reconstruct_port_hlist(struct team *team, int rm_index)
950 {
951         int i;
952         struct team_port *port;
953
954         for (i = rm_index + 1; i < team->en_port_count; i++) {
955                 port = team_get_port_by_index(team, i);
956                 hlist_del_rcu(&port->hlist);
957                 port->index--;
958                 hlist_add_head_rcu(&port->hlist,
959                                    team_port_index_hash(team, port->index));
960         }
961 }
962
963 static void team_port_disable(struct team *team,
964                               struct team_port *port)
965 {
966         if (!team_port_enabled(port))
967                 return;
968         if (team->ops.port_disabled)
969                 team->ops.port_disabled(team, port);
970         hlist_del_rcu(&port->hlist);
971         __reconstruct_port_hlist(team, port->index);
972         port->index = -1;
973         team->en_port_count--;
974         team_queue_override_port_del(team, port);
975         team_adjust_ops(team);
976         team_lower_state_changed(port);
977 }
978
979 #define TEAM_VLAN_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
980                             NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
981                             NETIF_F_HIGHDMA | NETIF_F_LRO)
982
983 #define TEAM_ENC_FEATURES       (NETIF_F_HW_CSUM | NETIF_F_SG | \
984                                  NETIF_F_RXCSUM | NETIF_F_ALL_TSO)
985
986 static void __team_compute_features(struct team *team)
987 {
988         struct team_port *port;
989         netdev_features_t vlan_features = TEAM_VLAN_FEATURES &
990                                           NETIF_F_ALL_FOR_ALL;
991         netdev_features_t enc_features  = TEAM_ENC_FEATURES;
992         unsigned short max_hard_header_len = ETH_HLEN;
993         unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE |
994                                         IFF_XMIT_DST_RELEASE_PERM;
995
996         rcu_read_lock();
997         list_for_each_entry_rcu(port, &team->port_list, list) {
998                 vlan_features = netdev_increment_features(vlan_features,
999                                         port->dev->vlan_features,
1000                                         TEAM_VLAN_FEATURES);
1001                 enc_features =
1002                         netdev_increment_features(enc_features,
1003                                                   port->dev->hw_enc_features,
1004                                                   TEAM_ENC_FEATURES);
1005
1006
1007                 dst_release_flag &= port->dev->priv_flags;
1008                 if (port->dev->hard_header_len > max_hard_header_len)
1009                         max_hard_header_len = port->dev->hard_header_len;
1010         }
1011         rcu_read_unlock();
1012
1013         team->dev->vlan_features = vlan_features;
1014         team->dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
1015                                      NETIF_F_HW_VLAN_CTAG_TX |
1016                                      NETIF_F_HW_VLAN_STAG_TX |
1017                                      NETIF_F_GSO_UDP_L4;
1018         team->dev->hard_header_len = max_hard_header_len;
1019
1020         team->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
1021         if (dst_release_flag == (IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM))
1022                 team->dev->priv_flags |= IFF_XMIT_DST_RELEASE;
1023 }
1024
1025 static void team_compute_features(struct team *team)
1026 {
1027         __team_compute_features(team);
1028         netdev_change_features(team->dev);
1029 }
1030
1031 static int team_port_enter(struct team *team, struct team_port *port)
1032 {
1033         int err = 0;
1034
1035         dev_hold(team->dev);
1036         if (team->ops.port_enter) {
1037                 err = team->ops.port_enter(team, port);
1038                 if (err) {
1039                         netdev_err(team->dev, "Device %s failed to enter team mode\n",
1040                                    port->dev->name);
1041                         goto err_port_enter;
1042                 }
1043         }
1044
1045         return 0;
1046
1047 err_port_enter:
1048         dev_put(team->dev);
1049
1050         return err;
1051 }
1052
1053 static void team_port_leave(struct team *team, struct team_port *port)
1054 {
1055         if (team->ops.port_leave)
1056                 team->ops.port_leave(team, port);
1057         dev_put(team->dev);
1058 }
1059
1060 #ifdef CONFIG_NET_POLL_CONTROLLER
1061 static int __team_port_enable_netpoll(struct team_port *port)
1062 {
1063         struct netpoll *np;
1064         int err;
1065
1066         np = kzalloc(sizeof(*np), GFP_KERNEL);
1067         if (!np)
1068                 return -ENOMEM;
1069
1070         err = __netpoll_setup(np, port->dev);
1071         if (err) {
1072                 kfree(np);
1073                 return err;
1074         }
1075         port->np = np;
1076         return err;
1077 }
1078
1079 static int team_port_enable_netpoll(struct team_port *port)
1080 {
1081         if (!port->team->dev->npinfo)
1082                 return 0;
1083
1084         return __team_port_enable_netpoll(port);
1085 }
1086
1087 static void team_port_disable_netpoll(struct team_port *port)
1088 {
1089         struct netpoll *np = port->np;
1090
1091         if (!np)
1092                 return;
1093         port->np = NULL;
1094
1095         __netpoll_free(np);
1096 }
1097 #else
1098 static int team_port_enable_netpoll(struct team_port *port)
1099 {
1100         return 0;
1101 }
1102 static void team_port_disable_netpoll(struct team_port *port)
1103 {
1104 }
1105 #endif
1106
1107 static int team_upper_dev_link(struct team *team, struct team_port *port,
1108                                struct netlink_ext_ack *extack)
1109 {
1110         struct netdev_lag_upper_info lag_upper_info;
1111         int err;
1112
1113         lag_upper_info.tx_type = team->mode->lag_tx_type;
1114         lag_upper_info.hash_type = NETDEV_LAG_HASH_UNKNOWN;
1115         err = netdev_master_upper_dev_link(port->dev, team->dev, NULL,
1116                                            &lag_upper_info, extack);
1117         if (err)
1118                 return err;
1119         port->dev->priv_flags |= IFF_TEAM_PORT;
1120         return 0;
1121 }
1122
1123 static void team_upper_dev_unlink(struct team *team, struct team_port *port)
1124 {
1125         netdev_upper_dev_unlink(port->dev, team->dev);
1126         port->dev->priv_flags &= ~IFF_TEAM_PORT;
1127 }
1128
1129 static void __team_port_change_port_added(struct team_port *port, bool linkup);
1130 static int team_dev_type_check_change(struct net_device *dev,
1131                                       struct net_device *port_dev);
1132
1133 static int team_port_add(struct team *team, struct net_device *port_dev,
1134                          struct netlink_ext_ack *extack)
1135 {
1136         struct net_device *dev = team->dev;
1137         struct team_port *port;
1138         char *portname = port_dev->name;
1139         int err;
1140
1141         if (port_dev->flags & IFF_LOOPBACK) {
1142                 NL_SET_ERR_MSG(extack, "Loopback device can't be added as a team port");
1143                 netdev_err(dev, "Device %s is loopback device. Loopback devices can't be added as a team port\n",
1144                            portname);
1145                 return -EINVAL;
1146         }
1147
1148         if (netif_is_team_port(port_dev)) {
1149                 NL_SET_ERR_MSG(extack, "Device is already a port of a team device");
1150                 netdev_err(dev, "Device %s is already a port "
1151                                 "of a team device\n", portname);
1152                 return -EBUSY;
1153         }
1154
1155         if (dev == port_dev) {
1156                 NL_SET_ERR_MSG(extack, "Cannot enslave team device to itself");
1157                 netdev_err(dev, "Cannot enslave team device to itself\n");
1158                 return -EINVAL;
1159         }
1160
1161         if (netdev_has_upper_dev(dev, port_dev)) {
1162                 NL_SET_ERR_MSG(extack, "Device is already an upper device of the team interface");
1163                 netdev_err(dev, "Device %s is already an upper device of the team interface\n",
1164                            portname);
1165                 return -EBUSY;
1166         }
1167
1168         if (port_dev->features & NETIF_F_VLAN_CHALLENGED &&
1169             vlan_uses_dev(dev)) {
1170                 NL_SET_ERR_MSG(extack, "Device is VLAN challenged and team device has VLAN set up");
1171                 netdev_err(dev, "Device %s is VLAN challenged and team device has VLAN set up\n",
1172                            portname);
1173                 return -EPERM;
1174         }
1175
1176         err = team_dev_type_check_change(dev, port_dev);
1177         if (err)
1178                 return err;
1179
1180         if (port_dev->flags & IFF_UP) {
1181                 NL_SET_ERR_MSG(extack, "Device is up. Set it down before adding it as a team port");
1182                 netdev_err(dev, "Device %s is up. Set it down before adding it as a team port\n",
1183                            portname);
1184                 return -EBUSY;
1185         }
1186
1187         port = kzalloc(sizeof(struct team_port) + team->mode->port_priv_size,
1188                        GFP_KERNEL);
1189         if (!port)
1190                 return -ENOMEM;
1191
1192         port->dev = port_dev;
1193         port->team = team;
1194         INIT_LIST_HEAD(&port->qom_list);
1195
1196         port->orig.mtu = port_dev->mtu;
1197         err = dev_set_mtu(port_dev, dev->mtu);
1198         if (err) {
1199                 netdev_dbg(dev, "Error %d calling dev_set_mtu\n", err);
1200                 goto err_set_mtu;
1201         }
1202
1203         memcpy(port->orig.dev_addr, port_dev->dev_addr, port_dev->addr_len);
1204
1205         err = team_port_enter(team, port);
1206         if (err) {
1207                 netdev_err(dev, "Device %s failed to enter team mode\n",
1208                            portname);
1209                 goto err_port_enter;
1210         }
1211
1212         err = dev_open(port_dev, extack);
1213         if (err) {
1214                 netdev_dbg(dev, "Device %s opening failed\n",
1215                            portname);
1216                 goto err_dev_open;
1217         }
1218
1219         err = vlan_vids_add_by_dev(port_dev, dev);
1220         if (err) {
1221                 netdev_err(dev, "Failed to add vlan ids to device %s\n",
1222                                 portname);
1223                 goto err_vids_add;
1224         }
1225
1226         err = team_port_enable_netpoll(port);
1227         if (err) {
1228                 netdev_err(dev, "Failed to enable netpoll on device %s\n",
1229                            portname);
1230                 goto err_enable_netpoll;
1231         }
1232
1233         if (!(dev->features & NETIF_F_LRO))
1234                 dev_disable_lro(port_dev);
1235
1236         err = netdev_rx_handler_register(port_dev, team_handle_frame,
1237                                          port);
1238         if (err) {
1239                 netdev_err(dev, "Device %s failed to register rx_handler\n",
1240                            portname);
1241                 goto err_handler_register;
1242         }
1243
1244         err = team_upper_dev_link(team, port, extack);
1245         if (err) {
1246                 netdev_err(dev, "Device %s failed to set upper link\n",
1247                            portname);
1248                 goto err_set_upper_link;
1249         }
1250
1251         err = __team_option_inst_add_port(team, port);
1252         if (err) {
1253                 netdev_err(dev, "Device %s failed to add per-port options\n",
1254                            portname);
1255                 goto err_option_port_add;
1256         }
1257
1258         /* set promiscuity level to new slave */
1259         if (dev->flags & IFF_PROMISC) {
1260                 err = dev_set_promiscuity(port_dev, 1);
1261                 if (err)
1262                         goto err_set_slave_promisc;
1263         }
1264
1265         /* set allmulti level to new slave */
1266         if (dev->flags & IFF_ALLMULTI) {
1267                 err = dev_set_allmulti(port_dev, 1);
1268                 if (err) {
1269                         if (dev->flags & IFF_PROMISC)
1270                                 dev_set_promiscuity(port_dev, -1);
1271                         goto err_set_slave_promisc;
1272                 }
1273         }
1274
1275         if (dev->flags & IFF_UP) {
1276                 netif_addr_lock_bh(dev);
1277                 dev_uc_sync_multiple(port_dev, dev);
1278                 dev_mc_sync_multiple(port_dev, dev);
1279                 netif_addr_unlock_bh(dev);
1280         }
1281
1282         port->index = -1;
1283         list_add_tail_rcu(&port->list, &team->port_list);
1284         team_port_enable(team, port);
1285         __team_compute_features(team);
1286         __team_port_change_port_added(port, !!netif_oper_up(port_dev));
1287         __team_options_change_check(team);
1288
1289         netdev_info(dev, "Port device %s added\n", portname);
1290
1291         return 0;
1292
1293 err_set_slave_promisc:
1294         __team_option_inst_del_port(team, port);
1295
1296 err_option_port_add:
1297         team_upper_dev_unlink(team, port);
1298
1299 err_set_upper_link:
1300         netdev_rx_handler_unregister(port_dev);
1301
1302 err_handler_register:
1303         team_port_disable_netpoll(port);
1304
1305 err_enable_netpoll:
1306         vlan_vids_del_by_dev(port_dev, dev);
1307
1308 err_vids_add:
1309         dev_close(port_dev);
1310
1311 err_dev_open:
1312         team_port_leave(team, port);
1313         team_port_set_orig_dev_addr(port);
1314
1315 err_port_enter:
1316         dev_set_mtu(port_dev, port->orig.mtu);
1317
1318 err_set_mtu:
1319         kfree(port);
1320
1321         return err;
1322 }
1323
1324 static void __team_port_change_port_removed(struct team_port *port);
1325
1326 static int team_port_del(struct team *team, struct net_device *port_dev)
1327 {
1328         struct net_device *dev = team->dev;
1329         struct team_port *port;
1330         char *portname = port_dev->name;
1331
1332         port = team_port_get_rtnl(port_dev);
1333         if (!port || !team_port_find(team, port)) {
1334                 netdev_err(dev, "Device %s does not act as a port of this team\n",
1335                            portname);
1336                 return -ENOENT;
1337         }
1338
1339         team_port_disable(team, port);
1340         list_del_rcu(&port->list);
1341
1342         if (dev->flags & IFF_PROMISC)
1343                 dev_set_promiscuity(port_dev, -1);
1344         if (dev->flags & IFF_ALLMULTI)
1345                 dev_set_allmulti(port_dev, -1);
1346
1347         team_upper_dev_unlink(team, port);
1348         netdev_rx_handler_unregister(port_dev);
1349         team_port_disable_netpoll(port);
1350         vlan_vids_del_by_dev(port_dev, dev);
1351         if (dev->flags & IFF_UP) {
1352                 dev_uc_unsync(port_dev, dev);
1353                 dev_mc_unsync(port_dev, dev);
1354         }
1355         dev_close(port_dev);
1356         team_port_leave(team, port);
1357
1358         __team_option_inst_mark_removed_port(team, port);
1359         __team_options_change_check(team);
1360         __team_option_inst_del_port(team, port);
1361         __team_port_change_port_removed(port);
1362
1363         team_port_set_orig_dev_addr(port);
1364         dev_set_mtu(port_dev, port->orig.mtu);
1365         kfree_rcu(port, rcu);
1366         netdev_info(dev, "Port device %s removed\n", portname);
1367         __team_compute_features(team);
1368
1369         return 0;
1370 }
1371
1372
1373 /*****************
1374  * Net device ops
1375  *****************/
1376
1377 static int team_mode_option_get(struct team *team, struct team_gsetter_ctx *ctx)
1378 {
1379         ctx->data.str_val = team->mode->kind;
1380         return 0;
1381 }
1382
1383 static int team_mode_option_set(struct team *team, struct team_gsetter_ctx *ctx)
1384 {
1385         return team_change_mode(team, ctx->data.str_val);
1386 }
1387
1388 static int team_notify_peers_count_get(struct team *team,
1389                                        struct team_gsetter_ctx *ctx)
1390 {
1391         ctx->data.u32_val = team->notify_peers.count;
1392         return 0;
1393 }
1394
1395 static int team_notify_peers_count_set(struct team *team,
1396                                        struct team_gsetter_ctx *ctx)
1397 {
1398         team->notify_peers.count = ctx->data.u32_val;
1399         return 0;
1400 }
1401
1402 static int team_notify_peers_interval_get(struct team *team,
1403                                           struct team_gsetter_ctx *ctx)
1404 {
1405         ctx->data.u32_val = team->notify_peers.interval;
1406         return 0;
1407 }
1408
1409 static int team_notify_peers_interval_set(struct team *team,
1410                                           struct team_gsetter_ctx *ctx)
1411 {
1412         team->notify_peers.interval = ctx->data.u32_val;
1413         return 0;
1414 }
1415
1416 static int team_mcast_rejoin_count_get(struct team *team,
1417                                        struct team_gsetter_ctx *ctx)
1418 {
1419         ctx->data.u32_val = team->mcast_rejoin.count;
1420         return 0;
1421 }
1422
1423 static int team_mcast_rejoin_count_set(struct team *team,
1424                                        struct team_gsetter_ctx *ctx)
1425 {
1426         team->mcast_rejoin.count = ctx->data.u32_val;
1427         return 0;
1428 }
1429
1430 static int team_mcast_rejoin_interval_get(struct team *team,
1431                                           struct team_gsetter_ctx *ctx)
1432 {
1433         ctx->data.u32_val = team->mcast_rejoin.interval;
1434         return 0;
1435 }
1436
1437 static int team_mcast_rejoin_interval_set(struct team *team,
1438                                           struct team_gsetter_ctx *ctx)
1439 {
1440         team->mcast_rejoin.interval = ctx->data.u32_val;
1441         return 0;
1442 }
1443
1444 static int team_port_en_option_get(struct team *team,
1445                                    struct team_gsetter_ctx *ctx)
1446 {
1447         struct team_port *port = ctx->info->port;
1448
1449         ctx->data.bool_val = team_port_enabled(port);
1450         return 0;
1451 }
1452
1453 static int team_port_en_option_set(struct team *team,
1454                                    struct team_gsetter_ctx *ctx)
1455 {
1456         struct team_port *port = ctx->info->port;
1457
1458         if (ctx->data.bool_val)
1459                 team_port_enable(team, port);
1460         else
1461                 team_port_disable(team, port);
1462         return 0;
1463 }
1464
1465 static int team_user_linkup_option_get(struct team *team,
1466                                        struct team_gsetter_ctx *ctx)
1467 {
1468         struct team_port *port = ctx->info->port;
1469
1470         ctx->data.bool_val = port->user.linkup;
1471         return 0;
1472 }
1473
1474 static void __team_carrier_check(struct team *team);
1475
1476 static int team_user_linkup_option_set(struct team *team,
1477                                        struct team_gsetter_ctx *ctx)
1478 {
1479         struct team_port *port = ctx->info->port;
1480
1481         port->user.linkup = ctx->data.bool_val;
1482         team_refresh_port_linkup(port);
1483         __team_carrier_check(port->team);
1484         return 0;
1485 }
1486
1487 static int team_user_linkup_en_option_get(struct team *team,
1488                                           struct team_gsetter_ctx *ctx)
1489 {
1490         struct team_port *port = ctx->info->port;
1491
1492         ctx->data.bool_val = port->user.linkup_enabled;
1493         return 0;
1494 }
1495
1496 static int team_user_linkup_en_option_set(struct team *team,
1497                                           struct team_gsetter_ctx *ctx)
1498 {
1499         struct team_port *port = ctx->info->port;
1500
1501         port->user.linkup_enabled = ctx->data.bool_val;
1502         team_refresh_port_linkup(port);
1503         __team_carrier_check(port->team);
1504         return 0;
1505 }
1506
1507 static int team_priority_option_get(struct team *team,
1508                                     struct team_gsetter_ctx *ctx)
1509 {
1510         struct team_port *port = ctx->info->port;
1511
1512         ctx->data.s32_val = port->priority;
1513         return 0;
1514 }
1515
1516 static int team_priority_option_set(struct team *team,
1517                                     struct team_gsetter_ctx *ctx)
1518 {
1519         struct team_port *port = ctx->info->port;
1520         s32 priority = ctx->data.s32_val;
1521
1522         if (port->priority == priority)
1523                 return 0;
1524         port->priority = priority;
1525         team_queue_override_port_prio_changed(team, port);
1526         return 0;
1527 }
1528
1529 static int team_queue_id_option_get(struct team *team,
1530                                     struct team_gsetter_ctx *ctx)
1531 {
1532         struct team_port *port = ctx->info->port;
1533
1534         ctx->data.u32_val = port->queue_id;
1535         return 0;
1536 }
1537
1538 static int team_queue_id_option_set(struct team *team,
1539                                     struct team_gsetter_ctx *ctx)
1540 {
1541         struct team_port *port = ctx->info->port;
1542         u16 new_queue_id = ctx->data.u32_val;
1543
1544         if (port->queue_id == new_queue_id)
1545                 return 0;
1546         if (new_queue_id >= team->dev->real_num_tx_queues)
1547                 return -EINVAL;
1548         team_queue_override_port_change_queue_id(team, port, new_queue_id);
1549         return 0;
1550 }
1551
1552 static const struct team_option team_options[] = {
1553         {
1554                 .name = "mode",
1555                 .type = TEAM_OPTION_TYPE_STRING,
1556                 .getter = team_mode_option_get,
1557                 .setter = team_mode_option_set,
1558         },
1559         {
1560                 .name = "notify_peers_count",
1561                 .type = TEAM_OPTION_TYPE_U32,
1562                 .getter = team_notify_peers_count_get,
1563                 .setter = team_notify_peers_count_set,
1564         },
1565         {
1566                 .name = "notify_peers_interval",
1567                 .type = TEAM_OPTION_TYPE_U32,
1568                 .getter = team_notify_peers_interval_get,
1569                 .setter = team_notify_peers_interval_set,
1570         },
1571         {
1572                 .name = "mcast_rejoin_count",
1573                 .type = TEAM_OPTION_TYPE_U32,
1574                 .getter = team_mcast_rejoin_count_get,
1575                 .setter = team_mcast_rejoin_count_set,
1576         },
1577         {
1578                 .name = "mcast_rejoin_interval",
1579                 .type = TEAM_OPTION_TYPE_U32,
1580                 .getter = team_mcast_rejoin_interval_get,
1581                 .setter = team_mcast_rejoin_interval_set,
1582         },
1583         {
1584                 .name = "enabled",
1585                 .type = TEAM_OPTION_TYPE_BOOL,
1586                 .per_port = true,
1587                 .getter = team_port_en_option_get,
1588                 .setter = team_port_en_option_set,
1589         },
1590         {
1591                 .name = "user_linkup",
1592                 .type = TEAM_OPTION_TYPE_BOOL,
1593                 .per_port = true,
1594                 .getter = team_user_linkup_option_get,
1595                 .setter = team_user_linkup_option_set,
1596         },
1597         {
1598                 .name = "user_linkup_enabled",
1599                 .type = TEAM_OPTION_TYPE_BOOL,
1600                 .per_port = true,
1601                 .getter = team_user_linkup_en_option_get,
1602                 .setter = team_user_linkup_en_option_set,
1603         },
1604         {
1605                 .name = "priority",
1606                 .type = TEAM_OPTION_TYPE_S32,
1607                 .per_port = true,
1608                 .getter = team_priority_option_get,
1609                 .setter = team_priority_option_set,
1610         },
1611         {
1612                 .name = "queue_id",
1613                 .type = TEAM_OPTION_TYPE_U32,
1614                 .per_port = true,
1615                 .getter = team_queue_id_option_get,
1616                 .setter = team_queue_id_option_set,
1617         },
1618 };
1619
1620
1621 static int team_init(struct net_device *dev)
1622 {
1623         struct team *team = netdev_priv(dev);
1624         int i;
1625         int err;
1626
1627         team->dev = dev;
1628         team_set_no_mode(team);
1629         team->notifier_ctx = false;
1630
1631         team->pcpu_stats = netdev_alloc_pcpu_stats(struct team_pcpu_stats);
1632         if (!team->pcpu_stats)
1633                 return -ENOMEM;
1634
1635         for (i = 0; i < TEAM_PORT_HASHENTRIES; i++)
1636                 INIT_HLIST_HEAD(&team->en_port_hlist[i]);
1637         INIT_LIST_HEAD(&team->port_list);
1638         err = team_queue_override_init(team);
1639         if (err)
1640                 goto err_team_queue_override_init;
1641
1642         team_adjust_ops(team);
1643
1644         INIT_LIST_HEAD(&team->option_list);
1645         INIT_LIST_HEAD(&team->option_inst_list);
1646
1647         team_notify_peers_init(team);
1648         team_mcast_rejoin_init(team);
1649
1650         err = team_options_register(team, team_options, ARRAY_SIZE(team_options));
1651         if (err)
1652                 goto err_options_register;
1653         netif_carrier_off(dev);
1654
1655         lockdep_register_key(&team->team_lock_key);
1656         __mutex_init(&team->lock, "team->team_lock_key", &team->team_lock_key);
1657         netdev_lockdep_set_classes(dev);
1658
1659         return 0;
1660
1661 err_options_register:
1662         team_mcast_rejoin_fini(team);
1663         team_notify_peers_fini(team);
1664         team_queue_override_fini(team);
1665 err_team_queue_override_init:
1666         free_percpu(team->pcpu_stats);
1667
1668         return err;
1669 }
1670
1671 static void team_uninit(struct net_device *dev)
1672 {
1673         struct team *team = netdev_priv(dev);
1674         struct team_port *port;
1675         struct team_port *tmp;
1676
1677         mutex_lock(&team->lock);
1678         list_for_each_entry_safe(port, tmp, &team->port_list, list)
1679                 team_port_del(team, port->dev);
1680
1681         __team_change_mode(team, NULL); /* cleanup */
1682         __team_options_unregister(team, team_options, ARRAY_SIZE(team_options));
1683         team_mcast_rejoin_fini(team);
1684         team_notify_peers_fini(team);
1685         team_queue_override_fini(team);
1686         mutex_unlock(&team->lock);
1687         netdev_change_features(dev);
1688         lockdep_unregister_key(&team->team_lock_key);
1689 }
1690
1691 static void team_destructor(struct net_device *dev)
1692 {
1693         struct team *team = netdev_priv(dev);
1694
1695         free_percpu(team->pcpu_stats);
1696 }
1697
1698 static int team_open(struct net_device *dev)
1699 {
1700         return 0;
1701 }
1702
1703 static int team_close(struct net_device *dev)
1704 {
1705         struct team *team = netdev_priv(dev);
1706         struct team_port *port;
1707
1708         list_for_each_entry(port, &team->port_list, list) {
1709                 dev_uc_unsync(port->dev, dev);
1710                 dev_mc_unsync(port->dev, dev);
1711         }
1712
1713         return 0;
1714 }
1715
1716 /*
1717  * note: already called with rcu_read_lock
1718  */
1719 static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
1720 {
1721         struct team *team = netdev_priv(dev);
1722         bool tx_success;
1723         unsigned int len = skb->len;
1724
1725         tx_success = team_queue_override_transmit(team, skb);
1726         if (!tx_success)
1727                 tx_success = team->ops.transmit(team, skb);
1728         if (tx_success) {
1729                 struct team_pcpu_stats *pcpu_stats;
1730
1731                 pcpu_stats = this_cpu_ptr(team->pcpu_stats);
1732                 u64_stats_update_begin(&pcpu_stats->syncp);
1733                 pcpu_stats->tx_packets++;
1734                 pcpu_stats->tx_bytes += len;
1735                 u64_stats_update_end(&pcpu_stats->syncp);
1736         } else {
1737                 this_cpu_inc(team->pcpu_stats->tx_dropped);
1738         }
1739
1740         return NETDEV_TX_OK;
1741 }
1742
1743 static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb,
1744                              struct net_device *sb_dev)
1745 {
1746         /*
1747          * This helper function exists to help dev_pick_tx get the correct
1748          * destination queue.  Using a helper function skips a call to
1749          * skb_tx_hash and will put the skbs in the queue we expect on their
1750          * way down to the team driver.
1751          */
1752         u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0;
1753
1754         /*
1755          * Save the original txq to restore before passing to the driver
1756          */
1757         qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping;
1758
1759         if (unlikely(txq >= dev->real_num_tx_queues)) {
1760                 do {
1761                         txq -= dev->real_num_tx_queues;
1762                 } while (txq >= dev->real_num_tx_queues);
1763         }
1764         return txq;
1765 }
1766
1767 static void team_change_rx_flags(struct net_device *dev, int change)
1768 {
1769         struct team *team = netdev_priv(dev);
1770         struct team_port *port;
1771         int inc;
1772
1773         rcu_read_lock();
1774         list_for_each_entry_rcu(port, &team->port_list, list) {
1775                 if (change & IFF_PROMISC) {
1776                         inc = dev->flags & IFF_PROMISC ? 1 : -1;
1777                         dev_set_promiscuity(port->dev, inc);
1778                 }
1779                 if (change & IFF_ALLMULTI) {
1780                         inc = dev->flags & IFF_ALLMULTI ? 1 : -1;
1781                         dev_set_allmulti(port->dev, inc);
1782                 }
1783         }
1784         rcu_read_unlock();
1785 }
1786
1787 static void team_set_rx_mode(struct net_device *dev)
1788 {
1789         struct team *team = netdev_priv(dev);
1790         struct team_port *port;
1791
1792         rcu_read_lock();
1793         list_for_each_entry_rcu(port, &team->port_list, list) {
1794                 dev_uc_sync_multiple(port->dev, dev);
1795                 dev_mc_sync_multiple(port->dev, dev);
1796         }
1797         rcu_read_unlock();
1798 }
1799
1800 static int team_set_mac_address(struct net_device *dev, void *p)
1801 {
1802         struct sockaddr *addr = p;
1803         struct team *team = netdev_priv(dev);
1804         struct team_port *port;
1805
1806         if (dev->type == ARPHRD_ETHER && !is_valid_ether_addr(addr->sa_data))
1807                 return -EADDRNOTAVAIL;
1808         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1809         mutex_lock(&team->lock);
1810         list_for_each_entry(port, &team->port_list, list)
1811                 if (team->ops.port_change_dev_addr)
1812                         team->ops.port_change_dev_addr(team, port);
1813         mutex_unlock(&team->lock);
1814         return 0;
1815 }
1816
1817 static int team_change_mtu(struct net_device *dev, int new_mtu)
1818 {
1819         struct team *team = netdev_priv(dev);
1820         struct team_port *port;
1821         int err;
1822
1823         /*
1824          * Alhough this is reader, it's guarded by team lock. It's not possible
1825          * to traverse list in reverse under rcu_read_lock
1826          */
1827         mutex_lock(&team->lock);
1828         team->port_mtu_change_allowed = true;
1829         list_for_each_entry(port, &team->port_list, list) {
1830                 err = dev_set_mtu(port->dev, new_mtu);
1831                 if (err) {
1832                         netdev_err(dev, "Device %s failed to change mtu",
1833                                    port->dev->name);
1834                         goto unwind;
1835                 }
1836         }
1837         team->port_mtu_change_allowed = false;
1838         mutex_unlock(&team->lock);
1839
1840         dev->mtu = new_mtu;
1841
1842         return 0;
1843
1844 unwind:
1845         list_for_each_entry_continue_reverse(port, &team->port_list, list)
1846                 dev_set_mtu(port->dev, dev->mtu);
1847         team->port_mtu_change_allowed = false;
1848         mutex_unlock(&team->lock);
1849
1850         return err;
1851 }
1852
1853 static void
1854 team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1855 {
1856         struct team *team = netdev_priv(dev);
1857         struct team_pcpu_stats *p;
1858         u64 rx_packets, rx_bytes, rx_multicast, tx_packets, tx_bytes;
1859         u32 rx_dropped = 0, tx_dropped = 0, rx_nohandler = 0;
1860         unsigned int start;
1861         int i;
1862
1863         for_each_possible_cpu(i) {
1864                 p = per_cpu_ptr(team->pcpu_stats, i);
1865                 do {
1866                         start = u64_stats_fetch_begin_irq(&p->syncp);
1867                         rx_packets      = p->rx_packets;
1868                         rx_bytes        = p->rx_bytes;
1869                         rx_multicast    = p->rx_multicast;
1870                         tx_packets      = p->tx_packets;
1871                         tx_bytes        = p->tx_bytes;
1872                 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
1873
1874                 stats->rx_packets       += rx_packets;
1875                 stats->rx_bytes         += rx_bytes;
1876                 stats->multicast        += rx_multicast;
1877                 stats->tx_packets       += tx_packets;
1878                 stats->tx_bytes         += tx_bytes;
1879                 /*
1880                  * rx_dropped, tx_dropped & rx_nohandler are u32,
1881                  * updated without syncp protection.
1882                  */
1883                 rx_dropped      += p->rx_dropped;
1884                 tx_dropped      += p->tx_dropped;
1885                 rx_nohandler    += p->rx_nohandler;
1886         }
1887         stats->rx_dropped       = rx_dropped;
1888         stats->tx_dropped       = tx_dropped;
1889         stats->rx_nohandler     = rx_nohandler;
1890 }
1891
1892 static int team_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
1893 {
1894         struct team *team = netdev_priv(dev);
1895         struct team_port *port;
1896         int err;
1897
1898         /*
1899          * Alhough this is reader, it's guarded by team lock. It's not possible
1900          * to traverse list in reverse under rcu_read_lock
1901          */
1902         mutex_lock(&team->lock);
1903         list_for_each_entry(port, &team->port_list, list) {
1904                 err = vlan_vid_add(port->dev, proto, vid);
1905                 if (err)
1906                         goto unwind;
1907         }
1908         mutex_unlock(&team->lock);
1909
1910         return 0;
1911
1912 unwind:
1913         list_for_each_entry_continue_reverse(port, &team->port_list, list)
1914                 vlan_vid_del(port->dev, proto, vid);
1915         mutex_unlock(&team->lock);
1916
1917         return err;
1918 }
1919
1920 static int team_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
1921 {
1922         struct team *team = netdev_priv(dev);
1923         struct team_port *port;
1924
1925         mutex_lock(&team->lock);
1926         list_for_each_entry(port, &team->port_list, list)
1927                 vlan_vid_del(port->dev, proto, vid);
1928         mutex_unlock(&team->lock);
1929
1930         return 0;
1931 }
1932
1933 #ifdef CONFIG_NET_POLL_CONTROLLER
1934 static void team_poll_controller(struct net_device *dev)
1935 {
1936 }
1937
1938 static void __team_netpoll_cleanup(struct team *team)
1939 {
1940         struct team_port *port;
1941
1942         list_for_each_entry(port, &team->port_list, list)
1943                 team_port_disable_netpoll(port);
1944 }
1945
1946 static void team_netpoll_cleanup(struct net_device *dev)
1947 {
1948         struct team *team = netdev_priv(dev);
1949
1950         mutex_lock(&team->lock);
1951         __team_netpoll_cleanup(team);
1952         mutex_unlock(&team->lock);
1953 }
1954
1955 static int team_netpoll_setup(struct net_device *dev,
1956                               struct netpoll_info *npifo)
1957 {
1958         struct team *team = netdev_priv(dev);
1959         struct team_port *port;
1960         int err = 0;
1961
1962         mutex_lock(&team->lock);
1963         list_for_each_entry(port, &team->port_list, list) {
1964                 err = __team_port_enable_netpoll(port);
1965                 if (err) {
1966                         __team_netpoll_cleanup(team);
1967                         break;
1968                 }
1969         }
1970         mutex_unlock(&team->lock);
1971         return err;
1972 }
1973 #endif
1974
1975 static int team_add_slave(struct net_device *dev, struct net_device *port_dev,
1976                           struct netlink_ext_ack *extack)
1977 {
1978         struct team *team = netdev_priv(dev);
1979         int err;
1980
1981         mutex_lock(&team->lock);
1982         err = team_port_add(team, port_dev, extack);
1983         mutex_unlock(&team->lock);
1984
1985         if (!err)
1986                 netdev_change_features(dev);
1987
1988         return err;
1989 }
1990
1991 static int team_del_slave(struct net_device *dev, struct net_device *port_dev)
1992 {
1993         struct team *team = netdev_priv(dev);
1994         int err;
1995
1996         mutex_lock(&team->lock);
1997         err = team_port_del(team, port_dev);
1998         mutex_unlock(&team->lock);
1999
2000         if (err)
2001                 return err;
2002
2003         if (netif_is_team_master(port_dev)) {
2004                 lockdep_unregister_key(&team->team_lock_key);
2005                 lockdep_register_key(&team->team_lock_key);
2006                 lockdep_set_class(&team->lock, &team->team_lock_key);
2007         }
2008         netdev_change_features(dev);
2009
2010         return err;
2011 }
2012
2013 static netdev_features_t team_fix_features(struct net_device *dev,
2014                                            netdev_features_t features)
2015 {
2016         struct team_port *port;
2017         struct team *team = netdev_priv(dev);
2018         netdev_features_t mask;
2019
2020         mask = features;
2021         features &= ~NETIF_F_ONE_FOR_ALL;
2022         features |= NETIF_F_ALL_FOR_ALL;
2023
2024         rcu_read_lock();
2025         list_for_each_entry_rcu(port, &team->port_list, list) {
2026                 features = netdev_increment_features(features,
2027                                                      port->dev->features,
2028                                                      mask);
2029         }
2030         rcu_read_unlock();
2031
2032         features = netdev_add_tso_features(features, mask);
2033
2034         return features;
2035 }
2036
2037 static int team_change_carrier(struct net_device *dev, bool new_carrier)
2038 {
2039         struct team *team = netdev_priv(dev);
2040
2041         team->user_carrier_enabled = true;
2042
2043         if (new_carrier)
2044                 netif_carrier_on(dev);
2045         else
2046                 netif_carrier_off(dev);
2047         return 0;
2048 }
2049
2050 static const struct net_device_ops team_netdev_ops = {
2051         .ndo_init               = team_init,
2052         .ndo_uninit             = team_uninit,
2053         .ndo_open               = team_open,
2054         .ndo_stop               = team_close,
2055         .ndo_start_xmit         = team_xmit,
2056         .ndo_select_queue       = team_select_queue,
2057         .ndo_change_rx_flags    = team_change_rx_flags,
2058         .ndo_set_rx_mode        = team_set_rx_mode,
2059         .ndo_set_mac_address    = team_set_mac_address,
2060         .ndo_change_mtu         = team_change_mtu,
2061         .ndo_get_stats64        = team_get_stats64,
2062         .ndo_vlan_rx_add_vid    = team_vlan_rx_add_vid,
2063         .ndo_vlan_rx_kill_vid   = team_vlan_rx_kill_vid,
2064 #ifdef CONFIG_NET_POLL_CONTROLLER
2065         .ndo_poll_controller    = team_poll_controller,
2066         .ndo_netpoll_setup      = team_netpoll_setup,
2067         .ndo_netpoll_cleanup    = team_netpoll_cleanup,
2068 #endif
2069         .ndo_add_slave          = team_add_slave,
2070         .ndo_del_slave          = team_del_slave,
2071         .ndo_fix_features       = team_fix_features,
2072         .ndo_change_carrier     = team_change_carrier,
2073         .ndo_features_check     = passthru_features_check,
2074 };
2075
2076 /***********************
2077  * ethtool interface
2078  ***********************/
2079
2080 static void team_ethtool_get_drvinfo(struct net_device *dev,
2081                                      struct ethtool_drvinfo *drvinfo)
2082 {
2083         strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
2084         strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
2085 }
2086
2087 static int team_ethtool_get_link_ksettings(struct net_device *dev,
2088                                            struct ethtool_link_ksettings *cmd)
2089 {
2090         struct team *team= netdev_priv(dev);
2091         unsigned long speed = 0;
2092         struct team_port *port;
2093
2094         cmd->base.duplex = DUPLEX_UNKNOWN;
2095         cmd->base.port = PORT_OTHER;
2096
2097         rcu_read_lock();
2098         list_for_each_entry_rcu(port, &team->port_list, list) {
2099                 if (team_port_txable(port)) {
2100                         if (port->state.speed != SPEED_UNKNOWN)
2101                                 speed += port->state.speed;
2102                         if (cmd->base.duplex == DUPLEX_UNKNOWN &&
2103                             port->state.duplex != DUPLEX_UNKNOWN)
2104                                 cmd->base.duplex = port->state.duplex;
2105                 }
2106         }
2107         rcu_read_unlock();
2108
2109         cmd->base.speed = speed ? : SPEED_UNKNOWN;
2110
2111         return 0;
2112 }
2113
2114 static const struct ethtool_ops team_ethtool_ops = {
2115         .get_drvinfo            = team_ethtool_get_drvinfo,
2116         .get_link               = ethtool_op_get_link,
2117         .get_link_ksettings     = team_ethtool_get_link_ksettings,
2118 };
2119
2120 /***********************
2121  * rt netlink interface
2122  ***********************/
2123
2124 static void team_setup_by_port(struct net_device *dev,
2125                                struct net_device *port_dev)
2126 {
2127         struct team *team = netdev_priv(dev);
2128
2129         if (port_dev->type == ARPHRD_ETHER)
2130                 dev->header_ops = team->header_ops_cache;
2131         else
2132                 dev->header_ops = port_dev->header_ops;
2133         dev->type = port_dev->type;
2134         dev->hard_header_len = port_dev->hard_header_len;
2135         dev->needed_headroom = port_dev->needed_headroom;
2136         dev->addr_len = port_dev->addr_len;
2137         dev->mtu = port_dev->mtu;
2138         memcpy(dev->broadcast, port_dev->broadcast, port_dev->addr_len);
2139         eth_hw_addr_inherit(dev, port_dev);
2140
2141         if (port_dev->flags & IFF_POINTOPOINT) {
2142                 dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
2143                 dev->flags |= (IFF_POINTOPOINT | IFF_NOARP);
2144         } else if ((port_dev->flags & (IFF_BROADCAST | IFF_MULTICAST)) ==
2145                     (IFF_BROADCAST | IFF_MULTICAST)) {
2146                 dev->flags |= (IFF_BROADCAST | IFF_MULTICAST);
2147                 dev->flags &= ~(IFF_POINTOPOINT | IFF_NOARP);
2148         }
2149 }
2150
2151 static int team_dev_type_check_change(struct net_device *dev,
2152                                       struct net_device *port_dev)
2153 {
2154         struct team *team = netdev_priv(dev);
2155         char *portname = port_dev->name;
2156         int err;
2157
2158         if (dev->type == port_dev->type)
2159                 return 0;
2160         if (!list_empty(&team->port_list)) {
2161                 netdev_err(dev, "Device %s is of different type\n", portname);
2162                 return -EBUSY;
2163         }
2164         err = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE, dev);
2165         err = notifier_to_errno(err);
2166         if (err) {
2167                 netdev_err(dev, "Refused to change device type\n");
2168                 return err;
2169         }
2170         dev_uc_flush(dev);
2171         dev_mc_flush(dev);
2172         team_setup_by_port(dev, port_dev);
2173         call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev);
2174         return 0;
2175 }
2176
2177 static void team_setup(struct net_device *dev)
2178 {
2179         struct team *team = netdev_priv(dev);
2180
2181         ether_setup(dev);
2182         dev->max_mtu = ETH_MAX_MTU;
2183         team->header_ops_cache = dev->header_ops;
2184
2185         dev->netdev_ops = &team_netdev_ops;
2186         dev->ethtool_ops = &team_ethtool_ops;
2187         dev->needs_free_netdev = true;
2188         dev->priv_destructor = team_destructor;
2189         dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
2190         dev->priv_flags |= IFF_NO_QUEUE;
2191         dev->priv_flags |= IFF_TEAM;
2192
2193         /*
2194          * Indicate we support unicast address filtering. That way core won't
2195          * bring us to promisc mode in case a unicast addr is added.
2196          * Let this up to underlay drivers.
2197          */
2198         dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE;
2199
2200         dev->features |= NETIF_F_LLTX;
2201         dev->features |= NETIF_F_GRO;
2202
2203         /* Don't allow team devices to change network namespaces. */
2204         dev->features |= NETIF_F_NETNS_LOCAL;
2205
2206         dev->hw_features = TEAM_VLAN_FEATURES |
2207                            NETIF_F_HW_VLAN_CTAG_RX |
2208                            NETIF_F_HW_VLAN_CTAG_FILTER |
2209                            NETIF_F_HW_VLAN_STAG_RX |
2210                            NETIF_F_HW_VLAN_STAG_FILTER;
2211
2212         dev->hw_features |= NETIF_F_GSO_ENCAP_ALL | NETIF_F_GSO_UDP_L4;
2213         dev->features |= dev->hw_features;
2214         dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
2215 }
2216
2217 static int team_newlink(struct net *src_net, struct net_device *dev,
2218                         struct nlattr *tb[], struct nlattr *data[],
2219                         struct netlink_ext_ack *extack)
2220 {
2221         if (tb[IFLA_ADDRESS] == NULL)
2222                 eth_hw_addr_random(dev);
2223
2224         return register_netdevice(dev);
2225 }
2226
2227 static int team_validate(struct nlattr *tb[], struct nlattr *data[],
2228                          struct netlink_ext_ack *extack)
2229 {
2230         if (tb[IFLA_ADDRESS]) {
2231                 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
2232                         return -EINVAL;
2233                 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
2234                         return -EADDRNOTAVAIL;
2235         }
2236         return 0;
2237 }
2238
2239 static unsigned int team_get_num_tx_queues(void)
2240 {
2241         return TEAM_DEFAULT_NUM_TX_QUEUES;
2242 }
2243
2244 static unsigned int team_get_num_rx_queues(void)
2245 {
2246         return TEAM_DEFAULT_NUM_RX_QUEUES;
2247 }
2248
2249 static struct rtnl_link_ops team_link_ops __read_mostly = {
2250         .kind                   = DRV_NAME,
2251         .priv_size              = sizeof(struct team),
2252         .setup                  = team_setup,
2253         .newlink                = team_newlink,
2254         .validate               = team_validate,
2255         .get_num_tx_queues      = team_get_num_tx_queues,
2256         .get_num_rx_queues      = team_get_num_rx_queues,
2257 };
2258
2259
2260 /***********************************
2261  * Generic netlink custom interface
2262  ***********************************/
2263
2264 static struct genl_family team_nl_family;
2265
2266 static const struct nla_policy team_nl_policy[TEAM_ATTR_MAX + 1] = {
2267         [TEAM_ATTR_UNSPEC]                      = { .type = NLA_UNSPEC, },
2268         [TEAM_ATTR_TEAM_IFINDEX]                = { .type = NLA_U32 },
2269         [TEAM_ATTR_LIST_OPTION]                 = { .type = NLA_NESTED },
2270         [TEAM_ATTR_LIST_PORT]                   = { .type = NLA_NESTED },
2271 };
2272
2273 static const struct nla_policy
2274 team_nl_option_policy[TEAM_ATTR_OPTION_MAX + 1] = {
2275         [TEAM_ATTR_OPTION_UNSPEC]               = { .type = NLA_UNSPEC, },
2276         [TEAM_ATTR_OPTION_NAME] = {
2277                 .type = NLA_STRING,
2278                 .len = TEAM_STRING_MAX_LEN,
2279         },
2280         [TEAM_ATTR_OPTION_CHANGED]              = { .type = NLA_FLAG },
2281         [TEAM_ATTR_OPTION_TYPE]                 = { .type = NLA_U8 },
2282         [TEAM_ATTR_OPTION_DATA]                 = { .type = NLA_BINARY },
2283         [TEAM_ATTR_OPTION_PORT_IFINDEX]         = { .type = NLA_U32 },
2284         [TEAM_ATTR_OPTION_ARRAY_INDEX]          = { .type = NLA_U32 },
2285 };
2286
2287 static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
2288 {
2289         struct sk_buff *msg;
2290         void *hdr;
2291         int err;
2292
2293         msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2294         if (!msg)
2295                 return -ENOMEM;
2296
2297         hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq,
2298                           &team_nl_family, 0, TEAM_CMD_NOOP);
2299         if (!hdr) {
2300                 err = -EMSGSIZE;
2301                 goto err_msg_put;
2302         }
2303
2304         genlmsg_end(msg, hdr);
2305
2306         return genlmsg_unicast(genl_info_net(info), msg, info->snd_portid);
2307
2308 err_msg_put:
2309         nlmsg_free(msg);
2310
2311         return err;
2312 }
2313
2314 /*
2315  * Netlink cmd functions should be locked by following two functions.
2316  * Since dev gets held here, that ensures dev won't disappear in between.
2317  */
2318 static struct team *team_nl_team_get(struct genl_info *info)
2319 {
2320         struct net *net = genl_info_net(info);
2321         int ifindex;
2322         struct net_device *dev;
2323         struct team *team;
2324
2325         if (!info->attrs[TEAM_ATTR_TEAM_IFINDEX])
2326                 return NULL;
2327
2328         ifindex = nla_get_u32(info->attrs[TEAM_ATTR_TEAM_IFINDEX]);
2329         dev = dev_get_by_index(net, ifindex);
2330         if (!dev || dev->netdev_ops != &team_netdev_ops) {
2331                 if (dev)
2332                         dev_put(dev);
2333                 return NULL;
2334         }
2335
2336         team = netdev_priv(dev);
2337         mutex_lock(&team->lock);
2338         return team;
2339 }
2340
2341 static void team_nl_team_put(struct team *team)
2342 {
2343         mutex_unlock(&team->lock);
2344         dev_put(team->dev);
2345 }
2346
2347 typedef int team_nl_send_func_t(struct sk_buff *skb,
2348                                 struct team *team, u32 portid);
2349
2350 static int team_nl_send_unicast(struct sk_buff *skb, struct team *team, u32 portid)
2351 {
2352         return genlmsg_unicast(dev_net(team->dev), skb, portid);
2353 }
2354
2355 static int team_nl_fill_one_option_get(struct sk_buff *skb, struct team *team,
2356                                        struct team_option_inst *opt_inst)
2357 {
2358         struct nlattr *option_item;
2359         struct team_option *option = opt_inst->option;
2360         struct team_option_inst_info *opt_inst_info = &opt_inst->info;
2361         struct team_gsetter_ctx ctx;
2362         int err;
2363
2364         ctx.info = opt_inst_info;
2365         err = team_option_get(team, opt_inst, &ctx);
2366         if (err)
2367                 return err;
2368
2369         option_item = nla_nest_start_noflag(skb, TEAM_ATTR_ITEM_OPTION);
2370         if (!option_item)
2371                 return -EMSGSIZE;
2372
2373         if (nla_put_string(skb, TEAM_ATTR_OPTION_NAME, option->name))
2374                 goto nest_cancel;
2375         if (opt_inst_info->port &&
2376             nla_put_u32(skb, TEAM_ATTR_OPTION_PORT_IFINDEX,
2377                         opt_inst_info->port->dev->ifindex))
2378                 goto nest_cancel;
2379         if (opt_inst->option->array_size &&
2380             nla_put_u32(skb, TEAM_ATTR_OPTION_ARRAY_INDEX,
2381                         opt_inst_info->array_index))
2382                 goto nest_cancel;
2383
2384         switch (option->type) {
2385         case TEAM_OPTION_TYPE_U32:
2386                 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_U32))
2387                         goto nest_cancel;
2388                 if (nla_put_u32(skb, TEAM_ATTR_OPTION_DATA, ctx.data.u32_val))
2389                         goto nest_cancel;
2390                 break;
2391         case TEAM_OPTION_TYPE_STRING:
2392                 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_STRING))
2393                         goto nest_cancel;
2394                 if (nla_put_string(skb, TEAM_ATTR_OPTION_DATA,
2395                                    ctx.data.str_val))
2396                         goto nest_cancel;
2397                 break;
2398         case TEAM_OPTION_TYPE_BINARY:
2399                 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_BINARY))
2400                         goto nest_cancel;
2401                 if (nla_put(skb, TEAM_ATTR_OPTION_DATA, ctx.data.bin_val.len,
2402                             ctx.data.bin_val.ptr))
2403                         goto nest_cancel;
2404                 break;
2405         case TEAM_OPTION_TYPE_BOOL:
2406                 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_FLAG))
2407                         goto nest_cancel;
2408                 if (ctx.data.bool_val &&
2409                     nla_put_flag(skb, TEAM_ATTR_OPTION_DATA))
2410                         goto nest_cancel;
2411                 break;
2412         case TEAM_OPTION_TYPE_S32:
2413                 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_S32))
2414                         goto nest_cancel;
2415                 if (nla_put_s32(skb, TEAM_ATTR_OPTION_DATA, ctx.data.s32_val))
2416                         goto nest_cancel;
2417                 break;
2418         default:
2419                 BUG();
2420         }
2421         if (opt_inst->removed && nla_put_flag(skb, TEAM_ATTR_OPTION_REMOVED))
2422                 goto nest_cancel;
2423         if (opt_inst->changed) {
2424                 if (nla_put_flag(skb, TEAM_ATTR_OPTION_CHANGED))
2425                         goto nest_cancel;
2426                 opt_inst->changed = false;
2427         }
2428         nla_nest_end(skb, option_item);
2429         return 0;
2430
2431 nest_cancel:
2432         nla_nest_cancel(skb, option_item);
2433         return -EMSGSIZE;
2434 }
2435
2436 static int __send_and_alloc_skb(struct sk_buff **pskb,
2437                                 struct team *team, u32 portid,
2438                                 team_nl_send_func_t *send_func)
2439 {
2440         int err;
2441
2442         if (*pskb) {
2443                 err = send_func(*pskb, team, portid);
2444                 if (err)
2445                         return err;
2446         }
2447         *pskb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
2448         if (!*pskb)
2449                 return -ENOMEM;
2450         return 0;
2451 }
2452
2453 static int team_nl_send_options_get(struct team *team, u32 portid, u32 seq,
2454                                     int flags, team_nl_send_func_t *send_func,
2455                                     struct list_head *sel_opt_inst_list)
2456 {
2457         struct nlattr *option_list;
2458         struct nlmsghdr *nlh;
2459         void *hdr;
2460         struct team_option_inst *opt_inst;
2461         int err;
2462         struct sk_buff *skb = NULL;
2463         bool incomplete;
2464         int i;
2465
2466         opt_inst = list_first_entry(sel_opt_inst_list,
2467                                     struct team_option_inst, tmp_list);
2468
2469 start_again:
2470         err = __send_and_alloc_skb(&skb, team, portid, send_func);
2471         if (err)
2472                 return err;
2473
2474         hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI,
2475                           TEAM_CMD_OPTIONS_GET);
2476         if (!hdr) {
2477                 nlmsg_free(skb);
2478                 return -EMSGSIZE;
2479         }
2480
2481         if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
2482                 goto nla_put_failure;
2483         option_list = nla_nest_start_noflag(skb, TEAM_ATTR_LIST_OPTION);
2484         if (!option_list)
2485                 goto nla_put_failure;
2486
2487         i = 0;
2488         incomplete = false;
2489         list_for_each_entry_from(opt_inst, sel_opt_inst_list, tmp_list) {
2490                 err = team_nl_fill_one_option_get(skb, team, opt_inst);
2491                 if (err) {
2492                         if (err == -EMSGSIZE) {
2493                                 if (!i)
2494                                         goto errout;
2495                                 incomplete = true;
2496                                 break;
2497                         }
2498                         goto errout;
2499                 }
2500                 i++;
2501         }
2502
2503         nla_nest_end(skb, option_list);
2504         genlmsg_end(skb, hdr);
2505         if (incomplete)
2506                 goto start_again;
2507
2508 send_done:
2509         nlh = nlmsg_put(skb, portid, seq, NLMSG_DONE, 0, flags | NLM_F_MULTI);
2510         if (!nlh) {
2511                 err = __send_and_alloc_skb(&skb, team, portid, send_func);
2512                 if (err)
2513                         return err;
2514                 goto send_done;
2515         }
2516
2517         return send_func(skb, team, portid);
2518
2519 nla_put_failure:
2520         err = -EMSGSIZE;
2521 errout:
2522         nlmsg_free(skb);
2523         return err;
2524 }
2525
2526 static int team_nl_cmd_options_get(struct sk_buff *skb, struct genl_info *info)
2527 {
2528         struct team *team;
2529         struct team_option_inst *opt_inst;
2530         int err;
2531         LIST_HEAD(sel_opt_inst_list);
2532
2533         team = team_nl_team_get(info);
2534         if (!team)
2535                 return -EINVAL;
2536
2537         list_for_each_entry(opt_inst, &team->option_inst_list, list)
2538                 list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list);
2539         err = team_nl_send_options_get(team, info->snd_portid, info->snd_seq,
2540                                        NLM_F_ACK, team_nl_send_unicast,
2541                                        &sel_opt_inst_list);
2542
2543         team_nl_team_put(team);
2544
2545         return err;
2546 }
2547
2548 static int team_nl_send_event_options_get(struct team *team,
2549                                           struct list_head *sel_opt_inst_list);
2550
2551 static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
2552 {
2553         struct team *team;
2554         int err = 0;
2555         int i;
2556         struct nlattr *nl_option;
2557
2558         rtnl_lock();
2559
2560         team = team_nl_team_get(info);
2561         if (!team) {
2562                 err = -EINVAL;
2563                 goto rtnl_unlock;
2564         }
2565
2566         err = -EINVAL;
2567         if (!info->attrs[TEAM_ATTR_LIST_OPTION]) {
2568                 err = -EINVAL;
2569                 goto team_put;
2570         }
2571
2572         nla_for_each_nested(nl_option, info->attrs[TEAM_ATTR_LIST_OPTION], i) {
2573                 struct nlattr *opt_attrs[TEAM_ATTR_OPTION_MAX + 1];
2574                 struct nlattr *attr;
2575                 struct nlattr *attr_data;
2576                 LIST_HEAD(opt_inst_list);
2577                 enum team_option_type opt_type;
2578                 int opt_port_ifindex = 0; /* != 0 for per-port options */
2579                 u32 opt_array_index = 0;
2580                 bool opt_is_array = false;
2581                 struct team_option_inst *opt_inst;
2582                 char *opt_name;
2583                 bool opt_found = false;
2584
2585                 if (nla_type(nl_option) != TEAM_ATTR_ITEM_OPTION) {
2586                         err = -EINVAL;
2587                         goto team_put;
2588                 }
2589                 err = nla_parse_nested_deprecated(opt_attrs,
2590                                                   TEAM_ATTR_OPTION_MAX,
2591                                                   nl_option,
2592                                                   team_nl_option_policy,
2593                                                   info->extack);
2594                 if (err)
2595                         goto team_put;
2596                 if (!opt_attrs[TEAM_ATTR_OPTION_NAME] ||
2597                     !opt_attrs[TEAM_ATTR_OPTION_TYPE]) {
2598                         err = -EINVAL;
2599                         goto team_put;
2600                 }
2601                 switch (nla_get_u8(opt_attrs[TEAM_ATTR_OPTION_TYPE])) {
2602                 case NLA_U32:
2603                         opt_type = TEAM_OPTION_TYPE_U32;
2604                         break;
2605                 case NLA_STRING:
2606                         opt_type = TEAM_OPTION_TYPE_STRING;
2607                         break;
2608                 case NLA_BINARY:
2609                         opt_type = TEAM_OPTION_TYPE_BINARY;
2610                         break;
2611                 case NLA_FLAG:
2612                         opt_type = TEAM_OPTION_TYPE_BOOL;
2613                         break;
2614                 case NLA_S32:
2615                         opt_type = TEAM_OPTION_TYPE_S32;
2616                         break;
2617                 default:
2618                         goto team_put;
2619                 }
2620
2621                 attr_data = opt_attrs[TEAM_ATTR_OPTION_DATA];
2622                 if (opt_type != TEAM_OPTION_TYPE_BOOL && !attr_data) {
2623                         err = -EINVAL;
2624                         goto team_put;
2625                 }
2626
2627                 opt_name = nla_data(opt_attrs[TEAM_ATTR_OPTION_NAME]);
2628                 attr = opt_attrs[TEAM_ATTR_OPTION_PORT_IFINDEX];
2629                 if (attr)
2630                         opt_port_ifindex = nla_get_u32(attr);
2631
2632                 attr = opt_attrs[TEAM_ATTR_OPTION_ARRAY_INDEX];
2633                 if (attr) {
2634                         opt_is_array = true;
2635                         opt_array_index = nla_get_u32(attr);
2636                 }
2637
2638                 list_for_each_entry(opt_inst, &team->option_inst_list, list) {
2639                         struct team_option *option = opt_inst->option;
2640                         struct team_gsetter_ctx ctx;
2641                         struct team_option_inst_info *opt_inst_info;
2642                         int tmp_ifindex;
2643
2644                         opt_inst_info = &opt_inst->info;
2645                         tmp_ifindex = opt_inst_info->port ?
2646                                       opt_inst_info->port->dev->ifindex : 0;
2647                         if (option->type != opt_type ||
2648                             strcmp(option->name, opt_name) ||
2649                             tmp_ifindex != opt_port_ifindex ||
2650                             (option->array_size && !opt_is_array) ||
2651                             opt_inst_info->array_index != opt_array_index)
2652                                 continue;
2653                         opt_found = true;
2654                         ctx.info = opt_inst_info;
2655                         switch (opt_type) {
2656                         case TEAM_OPTION_TYPE_U32:
2657                                 ctx.data.u32_val = nla_get_u32(attr_data);
2658                                 break;
2659                         case TEAM_OPTION_TYPE_STRING:
2660                                 if (nla_len(attr_data) > TEAM_STRING_MAX_LEN) {
2661                                         err = -EINVAL;
2662                                         goto team_put;
2663                                 }
2664                                 ctx.data.str_val = nla_data(attr_data);
2665                                 break;
2666                         case TEAM_OPTION_TYPE_BINARY:
2667                                 ctx.data.bin_val.len = nla_len(attr_data);
2668                                 ctx.data.bin_val.ptr = nla_data(attr_data);
2669                                 break;
2670                         case TEAM_OPTION_TYPE_BOOL:
2671                                 ctx.data.bool_val = attr_data ? true : false;
2672                                 break;
2673                         case TEAM_OPTION_TYPE_S32:
2674                                 ctx.data.s32_val = nla_get_s32(attr_data);
2675                                 break;
2676                         default:
2677                                 BUG();
2678                         }
2679                         err = team_option_set(team, opt_inst, &ctx);
2680                         if (err)
2681                                 goto team_put;
2682                         opt_inst->changed = true;
2683                         list_add(&opt_inst->tmp_list, &opt_inst_list);
2684                 }
2685                 if (!opt_found) {
2686                         err = -ENOENT;
2687                         goto team_put;
2688                 }
2689
2690                 err = team_nl_send_event_options_get(team, &opt_inst_list);
2691                 if (err)
2692                         break;
2693         }
2694
2695 team_put:
2696         team_nl_team_put(team);
2697 rtnl_unlock:
2698         rtnl_unlock();
2699         return err;
2700 }
2701
2702 static int team_nl_fill_one_port_get(struct sk_buff *skb,
2703                                      struct team_port *port)
2704 {
2705         struct nlattr *port_item;
2706
2707         port_item = nla_nest_start_noflag(skb, TEAM_ATTR_ITEM_PORT);
2708         if (!port_item)
2709                 goto nest_cancel;
2710         if (nla_put_u32(skb, TEAM_ATTR_PORT_IFINDEX, port->dev->ifindex))
2711                 goto nest_cancel;
2712         if (port->changed) {
2713                 if (nla_put_flag(skb, TEAM_ATTR_PORT_CHANGED))
2714                         goto nest_cancel;
2715                 port->changed = false;
2716         }
2717         if ((port->removed &&
2718              nla_put_flag(skb, TEAM_ATTR_PORT_REMOVED)) ||
2719             (port->state.linkup &&
2720              nla_put_flag(skb, TEAM_ATTR_PORT_LINKUP)) ||
2721             nla_put_u32(skb, TEAM_ATTR_PORT_SPEED, port->state.speed) ||
2722             nla_put_u8(skb, TEAM_ATTR_PORT_DUPLEX, port->state.duplex))
2723                 goto nest_cancel;
2724         nla_nest_end(skb, port_item);
2725         return 0;
2726
2727 nest_cancel:
2728         nla_nest_cancel(skb, port_item);
2729         return -EMSGSIZE;
2730 }
2731
2732 static int team_nl_send_port_list_get(struct team *team, u32 portid, u32 seq,
2733                                       int flags, team_nl_send_func_t *send_func,
2734                                       struct team_port *one_port)
2735 {
2736         struct nlattr *port_list;
2737         struct nlmsghdr *nlh;
2738         void *hdr;
2739         struct team_port *port;
2740         int err;
2741         struct sk_buff *skb = NULL;
2742         bool incomplete;
2743         int i;
2744
2745         port = list_first_entry_or_null(&team->port_list,
2746                                         struct team_port, list);
2747
2748 start_again:
2749         err = __send_and_alloc_skb(&skb, team, portid, send_func);
2750         if (err)
2751                 return err;
2752
2753         hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI,
2754                           TEAM_CMD_PORT_LIST_GET);
2755         if (!hdr) {
2756                 nlmsg_free(skb);
2757                 return -EMSGSIZE;
2758         }
2759
2760         if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
2761                 goto nla_put_failure;
2762         port_list = nla_nest_start_noflag(skb, TEAM_ATTR_LIST_PORT);
2763         if (!port_list)
2764                 goto nla_put_failure;
2765
2766         i = 0;
2767         incomplete = false;
2768
2769         /* If one port is selected, called wants to send port list containing
2770          * only this port. Otherwise go through all listed ports and send all
2771          */
2772         if (one_port) {
2773                 err = team_nl_fill_one_port_get(skb, one_port);
2774                 if (err)
2775                         goto errout;
2776         } else if (port) {
2777                 list_for_each_entry_from(port, &team->port_list, list) {
2778                         err = team_nl_fill_one_port_get(skb, port);
2779                         if (err) {
2780                                 if (err == -EMSGSIZE) {
2781                                         if (!i)
2782                                                 goto errout;
2783                                         incomplete = true;
2784                                         break;
2785                                 }
2786                                 goto errout;
2787                         }
2788                         i++;
2789                 }
2790         }
2791
2792         nla_nest_end(skb, port_list);
2793         genlmsg_end(skb, hdr);
2794         if (incomplete)
2795                 goto start_again;
2796
2797 send_done:
2798         nlh = nlmsg_put(skb, portid, seq, NLMSG_DONE, 0, flags | NLM_F_MULTI);
2799         if (!nlh) {
2800                 err = __send_and_alloc_skb(&skb, team, portid, send_func);
2801                 if (err)
2802                         return err;
2803                 goto send_done;
2804         }
2805
2806         return send_func(skb, team, portid);
2807
2808 nla_put_failure:
2809         err = -EMSGSIZE;
2810 errout:
2811         nlmsg_free(skb);
2812         return err;
2813 }
2814
2815 static int team_nl_cmd_port_list_get(struct sk_buff *skb,
2816                                      struct genl_info *info)
2817 {
2818         struct team *team;
2819         int err;
2820
2821         team = team_nl_team_get(info);
2822         if (!team)
2823                 return -EINVAL;
2824
2825         err = team_nl_send_port_list_get(team, info->snd_portid, info->snd_seq,
2826                                          NLM_F_ACK, team_nl_send_unicast, NULL);
2827
2828         team_nl_team_put(team);
2829
2830         return err;
2831 }
2832
2833 static const struct genl_small_ops team_nl_ops[] = {
2834         {
2835                 .cmd = TEAM_CMD_NOOP,
2836                 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2837                 .doit = team_nl_cmd_noop,
2838         },
2839         {
2840                 .cmd = TEAM_CMD_OPTIONS_SET,
2841                 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2842                 .doit = team_nl_cmd_options_set,
2843                 .flags = GENL_ADMIN_PERM,
2844         },
2845         {
2846                 .cmd = TEAM_CMD_OPTIONS_GET,
2847                 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2848                 .doit = team_nl_cmd_options_get,
2849                 .flags = GENL_ADMIN_PERM,
2850         },
2851         {
2852                 .cmd = TEAM_CMD_PORT_LIST_GET,
2853                 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2854                 .doit = team_nl_cmd_port_list_get,
2855                 .flags = GENL_ADMIN_PERM,
2856         },
2857 };
2858
2859 static const struct genl_multicast_group team_nl_mcgrps[] = {
2860         { .name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME, },
2861 };
2862
2863 static struct genl_family team_nl_family __ro_after_init = {
2864         .name           = TEAM_GENL_NAME,
2865         .version        = TEAM_GENL_VERSION,
2866         .maxattr        = TEAM_ATTR_MAX,
2867         .policy = team_nl_policy,
2868         .netnsok        = true,
2869         .module         = THIS_MODULE,
2870         .small_ops      = team_nl_ops,
2871         .n_small_ops    = ARRAY_SIZE(team_nl_ops),
2872         .mcgrps         = team_nl_mcgrps,
2873         .n_mcgrps       = ARRAY_SIZE(team_nl_mcgrps),
2874 };
2875
2876 static int team_nl_send_multicast(struct sk_buff *skb,
2877                                   struct team *team, u32 portid)
2878 {
2879         return genlmsg_multicast_netns(&team_nl_family, dev_net(team->dev),
2880                                        skb, 0, 0, GFP_KERNEL);
2881 }
2882
2883 static int team_nl_send_event_options_get(struct team *team,
2884                                           struct list_head *sel_opt_inst_list)
2885 {
2886         return team_nl_send_options_get(team, 0, 0, 0, team_nl_send_multicast,
2887                                         sel_opt_inst_list);
2888 }
2889
2890 static int team_nl_send_event_port_get(struct team *team,
2891                                        struct team_port *port)
2892 {
2893         return team_nl_send_port_list_get(team, 0, 0, 0, team_nl_send_multicast,
2894                                           port);
2895 }
2896
2897 static int __init team_nl_init(void)
2898 {
2899         return genl_register_family(&team_nl_family);
2900 }
2901
2902 static void team_nl_fini(void)
2903 {
2904         genl_unregister_family(&team_nl_family);
2905 }
2906
2907
2908 /******************
2909  * Change checkers
2910  ******************/
2911
2912 static void __team_options_change_check(struct team *team)
2913 {
2914         int err;
2915         struct team_option_inst *opt_inst;
2916         LIST_HEAD(sel_opt_inst_list);
2917
2918         list_for_each_entry(opt_inst, &team->option_inst_list, list) {
2919                 if (opt_inst->changed)
2920                         list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list);
2921         }
2922         err = team_nl_send_event_options_get(team, &sel_opt_inst_list);
2923         if (err && err != -ESRCH)
2924                 netdev_warn(team->dev, "Failed to send options change via netlink (err %d)\n",
2925                             err);
2926 }
2927
2928 /* rtnl lock is held */
2929
2930 static void __team_port_change_send(struct team_port *port, bool linkup)
2931 {
2932         int err;
2933
2934         port->changed = true;
2935         port->state.linkup = linkup;
2936         team_refresh_port_linkup(port);
2937         if (linkup) {
2938                 struct ethtool_link_ksettings ecmd;
2939
2940                 err = __ethtool_get_link_ksettings(port->dev, &ecmd);
2941                 if (!err) {
2942                         port->state.speed = ecmd.base.speed;
2943                         port->state.duplex = ecmd.base.duplex;
2944                         goto send_event;
2945                 }
2946         }
2947         port->state.speed = 0;
2948         port->state.duplex = 0;
2949
2950 send_event:
2951         err = team_nl_send_event_port_get(port->team, port);
2952         if (err && err != -ESRCH)
2953                 netdev_warn(port->team->dev, "Failed to send port change of device %s via netlink (err %d)\n",
2954                             port->dev->name, err);
2955
2956 }
2957
2958 static void __team_carrier_check(struct team *team)
2959 {
2960         struct team_port *port;
2961         bool team_linkup;
2962
2963         if (team->user_carrier_enabled)
2964                 return;
2965
2966         team_linkup = false;
2967         list_for_each_entry(port, &team->port_list, list) {
2968                 if (port->linkup) {
2969                         team_linkup = true;
2970                         break;
2971                 }
2972         }
2973
2974         if (team_linkup)
2975                 netif_carrier_on(team->dev);
2976         else
2977                 netif_carrier_off(team->dev);
2978 }
2979
2980 static void __team_port_change_check(struct team_port *port, bool linkup)
2981 {
2982         if (port->state.linkup != linkup)
2983                 __team_port_change_send(port, linkup);
2984         __team_carrier_check(port->team);
2985 }
2986
2987 static void __team_port_change_port_added(struct team_port *port, bool linkup)
2988 {
2989         __team_port_change_send(port, linkup);
2990         __team_carrier_check(port->team);
2991 }
2992
2993 static void __team_port_change_port_removed(struct team_port *port)
2994 {
2995         port->removed = true;
2996         __team_port_change_send(port, false);
2997         __team_carrier_check(port->team);
2998 }
2999
3000 static void team_port_change_check(struct team_port *port, bool linkup)
3001 {
3002         struct team *team = port->team;
3003
3004         mutex_lock(&team->lock);
3005         __team_port_change_check(port, linkup);
3006         mutex_unlock(&team->lock);
3007 }
3008
3009
3010 /************************************
3011  * Net device notifier event handler
3012  ************************************/
3013
3014 static int team_device_event(struct notifier_block *unused,
3015                              unsigned long event, void *ptr)
3016 {
3017         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3018         struct team_port *port;
3019
3020         port = team_port_get_rtnl(dev);
3021         if (!port)
3022                 return NOTIFY_DONE;
3023
3024         switch (event) {
3025         case NETDEV_UP:
3026                 if (netif_oper_up(dev))
3027                         team_port_change_check(port, true);
3028                 break;
3029         case NETDEV_DOWN:
3030                 team_port_change_check(port, false);
3031                 break;
3032         case NETDEV_CHANGE:
3033                 if (netif_running(port->dev))
3034                         team_port_change_check(port,
3035                                                !!netif_oper_up(port->dev));
3036                 break;
3037         case NETDEV_UNREGISTER:
3038                 team_del_slave(port->team->dev, dev);
3039                 break;
3040         case NETDEV_FEAT_CHANGE:
3041                 if (!port->team->notifier_ctx) {
3042                         port->team->notifier_ctx = true;
3043                         team_compute_features(port->team);
3044                         port->team->notifier_ctx = false;
3045                 }
3046                 break;
3047         case NETDEV_PRECHANGEMTU:
3048                 /* Forbid to change mtu of underlaying device */
3049                 if (!port->team->port_mtu_change_allowed)
3050                         return NOTIFY_BAD;
3051                 break;
3052         case NETDEV_PRE_TYPE_CHANGE:
3053                 /* Forbid to change type of underlaying device */
3054                 return NOTIFY_BAD;
3055         case NETDEV_RESEND_IGMP:
3056                 /* Propagate to master device */
3057                 call_netdevice_notifiers(event, port->team->dev);
3058                 break;
3059         }
3060         return NOTIFY_DONE;
3061 }
3062
3063 static struct notifier_block team_notifier_block __read_mostly = {
3064         .notifier_call = team_device_event,
3065 };
3066
3067
3068 /***********************
3069  * Module init and exit
3070  ***********************/
3071
3072 static int __init team_module_init(void)
3073 {
3074         int err;
3075
3076         register_netdevice_notifier(&team_notifier_block);
3077
3078         err = rtnl_link_register(&team_link_ops);
3079         if (err)
3080                 goto err_rtnl_reg;
3081
3082         err = team_nl_init();
3083         if (err)
3084                 goto err_nl_init;
3085
3086         return 0;
3087
3088 err_nl_init:
3089         rtnl_link_unregister(&team_link_ops);
3090
3091 err_rtnl_reg:
3092         unregister_netdevice_notifier(&team_notifier_block);
3093
3094         return err;
3095 }
3096
3097 static void __exit team_module_exit(void)
3098 {
3099         team_nl_fini();
3100         rtnl_link_unregister(&team_link_ops);
3101         unregister_netdevice_notifier(&team_notifier_block);
3102 }
3103
3104 module_init(team_module_init);
3105 module_exit(team_module_exit);
3106
3107 MODULE_LICENSE("GPL v2");
3108 MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>");
3109 MODULE_DESCRIPTION("Ethernet team device driver");
3110 MODULE_ALIAS_RTNL_LINK(DRV_NAME);