2 * drivers/net/team/team.c - Network team device driver
3 * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
11 #include <linux/kernel.h>
12 #include <linux/types.h>
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/slab.h>
16 #include <linux/rcupdate.h>
17 #include <linux/errno.h>
18 #include <linux/ctype.h>
19 #include <linux/notifier.h>
20 #include <linux/netdevice.h>
21 #include <linux/netpoll.h>
22 #include <linux/if_vlan.h>
23 #include <linux/if_arp.h>
24 #include <linux/socket.h>
25 #include <linux/etherdevice.h>
26 #include <linux/rtnetlink.h>
27 #include <net/rtnetlink.h>
28 #include <net/genetlink.h>
29 #include <net/netlink.h>
30 #include <net/sch_generic.h>
31 #include <net/switchdev.h>
32 #include <generated/utsrelease.h>
33 #include <linux/if_team.h>
35 #define DRV_NAME "team"
42 #define team_port_exists(dev) (dev->priv_flags & IFF_TEAM_PORT)
44 static struct team_port *team_port_get_rcu(const struct net_device *dev)
46 return rcu_dereference(dev->rx_handler_data);
49 static struct team_port *team_port_get_rtnl(const struct net_device *dev)
51 struct team_port *port = rtnl_dereference(dev->rx_handler_data);
53 return team_port_exists(dev) ? port : NULL;
57 * Since the ability to change device address for open port device is tested in
58 * team_port_add, this function can be called without control of return value
60 static int __set_port_dev_addr(struct net_device *port_dev,
61 const unsigned char *dev_addr)
65 memcpy(addr.sa_data, dev_addr, port_dev->addr_len);
66 addr.sa_family = port_dev->type;
67 return dev_set_mac_address(port_dev, &addr);
70 static int team_port_set_orig_dev_addr(struct team_port *port)
72 return __set_port_dev_addr(port->dev, port->orig.dev_addr);
75 static int team_port_set_team_dev_addr(struct team *team,
76 struct team_port *port)
78 return __set_port_dev_addr(port->dev, team->dev->dev_addr);
81 int team_modeop_port_enter(struct team *team, struct team_port *port)
83 return team_port_set_team_dev_addr(team, port);
85 EXPORT_SYMBOL(team_modeop_port_enter);
87 void team_modeop_port_change_dev_addr(struct team *team,
88 struct team_port *port)
90 team_port_set_team_dev_addr(team, port);
92 EXPORT_SYMBOL(team_modeop_port_change_dev_addr);
94 static void team_lower_state_changed(struct team_port *port)
96 struct netdev_lag_lower_state_info info;
98 info.link_up = port->linkup;
99 info.tx_enabled = team_port_enabled(port);
100 netdev_lower_state_changed(port->dev, &info);
103 static void team_refresh_port_linkup(struct team_port *port)
105 bool new_linkup = port->user.linkup_enabled ? port->user.linkup :
108 if (port->linkup != new_linkup) {
109 port->linkup = new_linkup;
110 team_lower_state_changed(port);
119 struct team_option_inst { /* One for each option instance */
120 struct list_head list;
121 struct list_head tmp_list;
122 struct team_option *option;
123 struct team_option_inst_info info;
128 static struct team_option *__team_find_option(struct team *team,
129 const char *opt_name)
131 struct team_option *option;
133 list_for_each_entry(option, &team->option_list, list) {
134 if (strcmp(option->name, opt_name) == 0)
140 static void __team_option_inst_del(struct team_option_inst *opt_inst)
142 list_del(&opt_inst->list);
146 static void __team_option_inst_del_option(struct team *team,
147 struct team_option *option)
149 struct team_option_inst *opt_inst, *tmp;
151 list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) {
152 if (opt_inst->option == option)
153 __team_option_inst_del(opt_inst);
157 static int __team_option_inst_add(struct team *team, struct team_option *option,
158 struct team_port *port)
160 struct team_option_inst *opt_inst;
161 unsigned int array_size;
165 array_size = option->array_size;
167 array_size = 1; /* No array but still need one instance */
169 for (i = 0; i < array_size; i++) {
170 opt_inst = kmalloc(sizeof(*opt_inst), GFP_KERNEL);
173 opt_inst->option = option;
174 opt_inst->info.port = port;
175 opt_inst->info.array_index = i;
176 opt_inst->changed = true;
177 opt_inst->removed = false;
178 list_add_tail(&opt_inst->list, &team->option_inst_list);
180 err = option->init(team, &opt_inst->info);
189 static int __team_option_inst_add_option(struct team *team,
190 struct team_option *option)
194 if (!option->per_port) {
195 err = __team_option_inst_add(team, option, NULL);
197 goto inst_del_option;
202 __team_option_inst_del_option(team, option);
206 static void __team_option_inst_mark_removed_option(struct team *team,
207 struct team_option *option)
209 struct team_option_inst *opt_inst;
211 list_for_each_entry(opt_inst, &team->option_inst_list, list) {
212 if (opt_inst->option == option) {
213 opt_inst->changed = true;
214 opt_inst->removed = true;
219 static void __team_option_inst_del_port(struct team *team,
220 struct team_port *port)
222 struct team_option_inst *opt_inst, *tmp;
224 list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) {
225 if (opt_inst->option->per_port &&
226 opt_inst->info.port == port)
227 __team_option_inst_del(opt_inst);
231 static int __team_option_inst_add_port(struct team *team,
232 struct team_port *port)
234 struct team_option *option;
237 list_for_each_entry(option, &team->option_list, list) {
238 if (!option->per_port)
240 err = __team_option_inst_add(team, option, port);
247 __team_option_inst_del_port(team, port);
251 static void __team_option_inst_mark_removed_port(struct team *team,
252 struct team_port *port)
254 struct team_option_inst *opt_inst;
256 list_for_each_entry(opt_inst, &team->option_inst_list, list) {
257 if (opt_inst->info.port == port) {
258 opt_inst->changed = true;
259 opt_inst->removed = true;
264 static int __team_options_register(struct team *team,
265 const struct team_option *option,
269 struct team_option **dst_opts;
272 dst_opts = kzalloc(sizeof(struct team_option *) * option_count,
276 for (i = 0; i < option_count; i++, option++) {
277 if (__team_find_option(team, option->name)) {
281 dst_opts[i] = kmemdup(option, sizeof(*option), GFP_KERNEL);
288 for (i = 0; i < option_count; i++) {
289 err = __team_option_inst_add_option(team, dst_opts[i]);
292 list_add_tail(&dst_opts[i]->list, &team->option_list);
299 for (i--; i >= 0; i--)
300 __team_option_inst_del_option(team, dst_opts[i]);
304 for (i--; i >= 0; i--)
311 static void __team_options_mark_removed(struct team *team,
312 const struct team_option *option,
317 for (i = 0; i < option_count; i++, option++) {
318 struct team_option *del_opt;
320 del_opt = __team_find_option(team, option->name);
322 __team_option_inst_mark_removed_option(team, del_opt);
326 static void __team_options_unregister(struct team *team,
327 const struct team_option *option,
332 for (i = 0; i < option_count; i++, option++) {
333 struct team_option *del_opt;
335 del_opt = __team_find_option(team, option->name);
337 __team_option_inst_del_option(team, del_opt);
338 list_del(&del_opt->list);
344 static void __team_options_change_check(struct team *team);
346 int team_options_register(struct team *team,
347 const struct team_option *option,
352 err = __team_options_register(team, option, option_count);
355 __team_options_change_check(team);
358 EXPORT_SYMBOL(team_options_register);
360 void team_options_unregister(struct team *team,
361 const struct team_option *option,
364 __team_options_mark_removed(team, option, option_count);
365 __team_options_change_check(team);
366 __team_options_unregister(team, option, option_count);
368 EXPORT_SYMBOL(team_options_unregister);
370 static int team_option_get(struct team *team,
371 struct team_option_inst *opt_inst,
372 struct team_gsetter_ctx *ctx)
374 if (!opt_inst->option->getter)
376 return opt_inst->option->getter(team, ctx);
379 static int team_option_set(struct team *team,
380 struct team_option_inst *opt_inst,
381 struct team_gsetter_ctx *ctx)
383 if (!opt_inst->option->setter)
385 return opt_inst->option->setter(team, ctx);
388 void team_option_inst_set_change(struct team_option_inst_info *opt_inst_info)
390 struct team_option_inst *opt_inst;
392 opt_inst = container_of(opt_inst_info, struct team_option_inst, info);
393 opt_inst->changed = true;
395 EXPORT_SYMBOL(team_option_inst_set_change);
397 void team_options_change_check(struct team *team)
399 __team_options_change_check(team);
401 EXPORT_SYMBOL(team_options_change_check);
408 static LIST_HEAD(mode_list);
409 static DEFINE_SPINLOCK(mode_list_lock);
411 struct team_mode_item {
412 struct list_head list;
413 const struct team_mode *mode;
416 static struct team_mode_item *__find_mode(const char *kind)
418 struct team_mode_item *mitem;
420 list_for_each_entry(mitem, &mode_list, list) {
421 if (strcmp(mitem->mode->kind, kind) == 0)
427 static bool is_good_mode_name(const char *name)
429 while (*name != '\0') {
430 if (!isalpha(*name) && !isdigit(*name) && *name != '_')
437 int team_mode_register(const struct team_mode *mode)
440 struct team_mode_item *mitem;
442 if (!is_good_mode_name(mode->kind) ||
443 mode->priv_size > TEAM_MODE_PRIV_SIZE)
446 mitem = kmalloc(sizeof(*mitem), GFP_KERNEL);
450 spin_lock(&mode_list_lock);
451 if (__find_mode(mode->kind)) {
457 list_add_tail(&mitem->list, &mode_list);
459 spin_unlock(&mode_list_lock);
462 EXPORT_SYMBOL(team_mode_register);
464 void team_mode_unregister(const struct team_mode *mode)
466 struct team_mode_item *mitem;
468 spin_lock(&mode_list_lock);
469 mitem = __find_mode(mode->kind);
471 list_del_init(&mitem->list);
474 spin_unlock(&mode_list_lock);
476 EXPORT_SYMBOL(team_mode_unregister);
478 static const struct team_mode *team_mode_get(const char *kind)
480 struct team_mode_item *mitem;
481 const struct team_mode *mode = NULL;
483 if (!try_module_get(THIS_MODULE))
486 spin_lock(&mode_list_lock);
487 mitem = __find_mode(kind);
489 spin_unlock(&mode_list_lock);
490 request_module("team-mode-%s", kind);
491 spin_lock(&mode_list_lock);
492 mitem = __find_mode(kind);
496 if (!try_module_get(mode->owner))
500 spin_unlock(&mode_list_lock);
501 module_put(THIS_MODULE);
505 static void team_mode_put(const struct team_mode *mode)
507 module_put(mode->owner);
510 static bool team_dummy_transmit(struct team *team, struct sk_buff *skb)
512 dev_kfree_skb_any(skb);
516 static rx_handler_result_t team_dummy_receive(struct team *team,
517 struct team_port *port,
520 return RX_HANDLER_ANOTHER;
523 static const struct team_mode __team_no_mode = {
527 static bool team_is_mode_set(struct team *team)
529 return team->mode != &__team_no_mode;
532 static void team_set_no_mode(struct team *team)
534 team->user_carrier_enabled = false;
535 team->mode = &__team_no_mode;
538 static void team_adjust_ops(struct team *team)
541 * To avoid checks in rx/tx skb paths, ensure here that non-null and
542 * correct ops are always set.
545 if (!team->en_port_count || !team_is_mode_set(team) ||
546 !team->mode->ops->transmit)
547 team->ops.transmit = team_dummy_transmit;
549 team->ops.transmit = team->mode->ops->transmit;
551 if (!team->en_port_count || !team_is_mode_set(team) ||
552 !team->mode->ops->receive)
553 team->ops.receive = team_dummy_receive;
555 team->ops.receive = team->mode->ops->receive;
559 * We can benefit from the fact that it's ensured no port is present
560 * at the time of mode change. Therefore no packets are in fly so there's no
561 * need to set mode operations in any special way.
563 static int __team_change_mode(struct team *team,
564 const struct team_mode *new_mode)
566 /* Check if mode was previously set and do cleanup if so */
567 if (team_is_mode_set(team)) {
568 void (*exit_op)(struct team *team) = team->ops.exit;
570 /* Clear ops area so no callback is called any longer */
571 memset(&team->ops, 0, sizeof(struct team_mode_ops));
572 team_adjust_ops(team);
576 team_mode_put(team->mode);
577 team_set_no_mode(team);
578 /* zero private data area */
579 memset(&team->mode_priv, 0,
580 sizeof(struct team) - offsetof(struct team, mode_priv));
586 if (new_mode->ops->init) {
589 err = new_mode->ops->init(team);
594 team->mode = new_mode;
595 memcpy(&team->ops, new_mode->ops, sizeof(struct team_mode_ops));
596 team_adjust_ops(team);
601 static int team_change_mode(struct team *team, const char *kind)
603 const struct team_mode *new_mode;
604 struct net_device *dev = team->dev;
607 if (!list_empty(&team->port_list)) {
608 netdev_err(dev, "No ports can be present during mode change\n");
612 if (team_is_mode_set(team) && strcmp(team->mode->kind, kind) == 0) {
613 netdev_err(dev, "Unable to change to the same mode the team is in\n");
617 new_mode = team_mode_get(kind);
619 netdev_err(dev, "Mode \"%s\" not found\n", kind);
623 err = __team_change_mode(team, new_mode);
625 netdev_err(dev, "Failed to change to mode \"%s\"\n", kind);
626 team_mode_put(new_mode);
630 netdev_info(dev, "Mode changed to \"%s\"\n", kind);
635 /*********************
637 *********************/
639 static void team_notify_peers_work(struct work_struct *work)
644 team = container_of(work, struct team, notify_peers.dw.work);
646 if (!rtnl_trylock()) {
647 schedule_delayed_work(&team->notify_peers.dw, 0);
650 val = atomic_dec_if_positive(&team->notify_peers.count_pending);
655 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, team->dev);
658 schedule_delayed_work(&team->notify_peers.dw,
659 msecs_to_jiffies(team->notify_peers.interval));
662 static void team_notify_peers(struct team *team)
664 if (!team->notify_peers.count || !netif_running(team->dev))
666 atomic_add(team->notify_peers.count, &team->notify_peers.count_pending);
667 schedule_delayed_work(&team->notify_peers.dw, 0);
670 static void team_notify_peers_init(struct team *team)
672 INIT_DELAYED_WORK(&team->notify_peers.dw, team_notify_peers_work);
675 static void team_notify_peers_fini(struct team *team)
677 cancel_delayed_work_sync(&team->notify_peers.dw);
681 /*******************************
682 * Send multicast group rejoins
683 *******************************/
685 static void team_mcast_rejoin_work(struct work_struct *work)
690 team = container_of(work, struct team, mcast_rejoin.dw.work);
692 if (!rtnl_trylock()) {
693 schedule_delayed_work(&team->mcast_rejoin.dw, 0);
696 val = atomic_dec_if_positive(&team->mcast_rejoin.count_pending);
701 call_netdevice_notifiers(NETDEV_RESEND_IGMP, team->dev);
704 schedule_delayed_work(&team->mcast_rejoin.dw,
705 msecs_to_jiffies(team->mcast_rejoin.interval));
708 static void team_mcast_rejoin(struct team *team)
710 if (!team->mcast_rejoin.count || !netif_running(team->dev))
712 atomic_add(team->mcast_rejoin.count, &team->mcast_rejoin.count_pending);
713 schedule_delayed_work(&team->mcast_rejoin.dw, 0);
716 static void team_mcast_rejoin_init(struct team *team)
718 INIT_DELAYED_WORK(&team->mcast_rejoin.dw, team_mcast_rejoin_work);
721 static void team_mcast_rejoin_fini(struct team *team)
723 cancel_delayed_work_sync(&team->mcast_rejoin.dw);
727 /************************
728 * Rx path frame handler
729 ************************/
731 /* note: already called with rcu_read_lock */
732 static rx_handler_result_t team_handle_frame(struct sk_buff **pskb)
734 struct sk_buff *skb = *pskb;
735 struct team_port *port;
737 rx_handler_result_t res;
739 skb = skb_share_check(skb, GFP_ATOMIC);
741 return RX_HANDLER_CONSUMED;
745 port = team_port_get_rcu(skb->dev);
747 if (!team_port_enabled(port)) {
748 /* allow exact match delivery for disabled ports */
749 res = RX_HANDLER_EXACT;
751 res = team->ops.receive(team, port, skb);
753 if (res == RX_HANDLER_ANOTHER) {
754 struct team_pcpu_stats *pcpu_stats;
756 pcpu_stats = this_cpu_ptr(team->pcpu_stats);
757 u64_stats_update_begin(&pcpu_stats->syncp);
758 pcpu_stats->rx_packets++;
759 pcpu_stats->rx_bytes += skb->len;
760 if (skb->pkt_type == PACKET_MULTICAST)
761 pcpu_stats->rx_multicast++;
762 u64_stats_update_end(&pcpu_stats->syncp);
764 skb->dev = team->dev;
765 } else if (res == RX_HANDLER_EXACT) {
766 this_cpu_inc(team->pcpu_stats->rx_nohandler);
768 this_cpu_inc(team->pcpu_stats->rx_dropped);
775 /*************************************
776 * Multiqueue Tx port select override
777 *************************************/
779 static int team_queue_override_init(struct team *team)
781 struct list_head *listarr;
782 unsigned int queue_cnt = team->dev->num_tx_queues - 1;
787 listarr = kmalloc(sizeof(struct list_head) * queue_cnt, GFP_KERNEL);
790 team->qom_lists = listarr;
791 for (i = 0; i < queue_cnt; i++)
792 INIT_LIST_HEAD(listarr++);
796 static void team_queue_override_fini(struct team *team)
798 kfree(team->qom_lists);
801 static struct list_head *__team_get_qom_list(struct team *team, u16 queue_id)
803 return &team->qom_lists[queue_id - 1];
807 * note: already called with rcu_read_lock
809 static bool team_queue_override_transmit(struct team *team, struct sk_buff *skb)
811 struct list_head *qom_list;
812 struct team_port *port;
814 if (!team->queue_override_enabled || !skb->queue_mapping)
816 qom_list = __team_get_qom_list(team, skb->queue_mapping);
817 list_for_each_entry_rcu(port, qom_list, qom_list) {
818 if (!team_dev_queue_xmit(team, port, skb))
824 static void __team_queue_override_port_del(struct team *team,
825 struct team_port *port)
829 list_del_rcu(&port->qom_list);
832 static bool team_queue_override_port_has_gt_prio_than(struct team_port *port,
833 struct team_port *cur)
835 if (port->priority < cur->priority)
837 if (port->priority > cur->priority)
839 if (port->index < cur->index)
844 static void __team_queue_override_port_add(struct team *team,
845 struct team_port *port)
847 struct team_port *cur;
848 struct list_head *qom_list;
849 struct list_head *node;
853 qom_list = __team_get_qom_list(team, port->queue_id);
855 list_for_each_entry(cur, qom_list, qom_list) {
856 if (team_queue_override_port_has_gt_prio_than(port, cur))
858 node = &cur->qom_list;
860 list_add_tail_rcu(&port->qom_list, node);
863 static void __team_queue_override_enabled_check(struct team *team)
865 struct team_port *port;
866 bool enabled = false;
868 list_for_each_entry(port, &team->port_list, list) {
869 if (port->queue_id) {
874 if (enabled == team->queue_override_enabled)
876 netdev_dbg(team->dev, "%s queue override\n",
877 enabled ? "Enabling" : "Disabling");
878 team->queue_override_enabled = enabled;
881 static void team_queue_override_port_prio_changed(struct team *team,
882 struct team_port *port)
884 if (!port->queue_id || team_port_enabled(port))
886 __team_queue_override_port_del(team, port);
887 __team_queue_override_port_add(team, port);
888 __team_queue_override_enabled_check(team);
891 static void team_queue_override_port_change_queue_id(struct team *team,
892 struct team_port *port,
895 if (team_port_enabled(port)) {
896 __team_queue_override_port_del(team, port);
897 port->queue_id = new_queue_id;
898 __team_queue_override_port_add(team, port);
899 __team_queue_override_enabled_check(team);
901 port->queue_id = new_queue_id;
905 static void team_queue_override_port_add(struct team *team,
906 struct team_port *port)
908 __team_queue_override_port_add(team, port);
909 __team_queue_override_enabled_check(team);
912 static void team_queue_override_port_del(struct team *team,
913 struct team_port *port)
915 __team_queue_override_port_del(team, port);
916 __team_queue_override_enabled_check(team);
924 static bool team_port_find(const struct team *team,
925 const struct team_port *port)
927 struct team_port *cur;
929 list_for_each_entry(cur, &team->port_list, list)
936 * Enable/disable port by adding to enabled port hashlist and setting
937 * port->index (Might be racy so reader could see incorrect ifindex when
938 * processing a flying packet, but that is not a problem). Write guarded
941 static void team_port_enable(struct team *team,
942 struct team_port *port)
944 if (team_port_enabled(port))
946 port->index = team->en_port_count++;
947 hlist_add_head_rcu(&port->hlist,
948 team_port_index_hash(team, port->index));
949 team_adjust_ops(team);
950 team_queue_override_port_add(team, port);
951 if (team->ops.port_enabled)
952 team->ops.port_enabled(team, port);
953 team_notify_peers(team);
954 team_mcast_rejoin(team);
955 team_lower_state_changed(port);
958 static void __reconstruct_port_hlist(struct team *team, int rm_index)
961 struct team_port *port;
963 for (i = rm_index + 1; i < team->en_port_count; i++) {
964 port = team_get_port_by_index(team, i);
965 hlist_del_rcu(&port->hlist);
967 hlist_add_head_rcu(&port->hlist,
968 team_port_index_hash(team, port->index));
972 static void team_port_disable(struct team *team,
973 struct team_port *port)
975 if (!team_port_enabled(port))
977 if (team->ops.port_disabled)
978 team->ops.port_disabled(team, port);
979 hlist_del_rcu(&port->hlist);
980 __reconstruct_port_hlist(team, port->index);
982 team->en_port_count--;
983 team_queue_override_port_del(team, port);
984 team_adjust_ops(team);
985 team_lower_state_changed(port);
988 #define TEAM_VLAN_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
989 NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
990 NETIF_F_HIGHDMA | NETIF_F_LRO)
992 #define TEAM_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
993 NETIF_F_RXCSUM | NETIF_F_ALL_TSO)
995 static void ___team_compute_features(struct team *team)
997 struct team_port *port;
998 netdev_features_t vlan_features = TEAM_VLAN_FEATURES &
1000 netdev_features_t enc_features = TEAM_ENC_FEATURES;
1001 unsigned short max_hard_header_len = ETH_HLEN;
1002 unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE |
1003 IFF_XMIT_DST_RELEASE_PERM;
1005 list_for_each_entry(port, &team->port_list, list) {
1006 vlan_features = netdev_increment_features(vlan_features,
1007 port->dev->vlan_features,
1008 TEAM_VLAN_FEATURES);
1010 netdev_increment_features(enc_features,
1011 port->dev->hw_enc_features,
1015 dst_release_flag &= port->dev->priv_flags;
1016 if (port->dev->hard_header_len > max_hard_header_len)
1017 max_hard_header_len = port->dev->hard_header_len;
1020 team->dev->vlan_features = vlan_features;
1021 team->dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
1022 NETIF_F_HW_VLAN_CTAG_TX |
1023 NETIF_F_HW_VLAN_STAG_TX;
1024 team->dev->hard_header_len = max_hard_header_len;
1026 team->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
1027 if (dst_release_flag == (IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM))
1028 team->dev->priv_flags |= IFF_XMIT_DST_RELEASE;
1031 static void __team_compute_features(struct team *team)
1033 ___team_compute_features(team);
1034 netdev_change_features(team->dev);
1037 static void team_compute_features(struct team *team)
1039 mutex_lock(&team->lock);
1040 ___team_compute_features(team);
1041 mutex_unlock(&team->lock);
1042 netdev_change_features(team->dev);
1045 static int team_port_enter(struct team *team, struct team_port *port)
1049 dev_hold(team->dev);
1050 if (team->ops.port_enter) {
1051 err = team->ops.port_enter(team, port);
1053 netdev_err(team->dev, "Device %s failed to enter team mode\n",
1055 goto err_port_enter;
1067 static void team_port_leave(struct team *team, struct team_port *port)
1069 if (team->ops.port_leave)
1070 team->ops.port_leave(team, port);
1074 #ifdef CONFIG_NET_POLL_CONTROLLER
1075 static int __team_port_enable_netpoll(struct team_port *port)
1080 np = kzalloc(sizeof(*np), GFP_KERNEL);
1084 err = __netpoll_setup(np, port->dev);
1093 static int team_port_enable_netpoll(struct team_port *port)
1095 if (!port->team->dev->npinfo)
1098 return __team_port_enable_netpoll(port);
1101 static void team_port_disable_netpoll(struct team_port *port)
1103 struct netpoll *np = port->np;
1109 /* Wait for transmitting packets to finish before freeing. */
1110 synchronize_rcu_bh();
1111 __netpoll_cleanup(np);
1115 static int team_port_enable_netpoll(struct team_port *port)
1119 static void team_port_disable_netpoll(struct team_port *port)
1124 static int team_upper_dev_link(struct team *team, struct team_port *port)
1126 struct netdev_lag_upper_info lag_upper_info;
1129 lag_upper_info.tx_type = team->mode->lag_tx_type;
1130 err = netdev_master_upper_dev_link(port->dev, team->dev, NULL,
1134 port->dev->priv_flags |= IFF_TEAM_PORT;
1138 static void team_upper_dev_unlink(struct team *team, struct team_port *port)
1140 netdev_upper_dev_unlink(port->dev, team->dev);
1141 port->dev->priv_flags &= ~IFF_TEAM_PORT;
1144 static void __team_port_change_port_added(struct team_port *port, bool linkup);
1145 static int team_dev_type_check_change(struct net_device *dev,
1146 struct net_device *port_dev);
1148 static int team_port_add(struct team *team, struct net_device *port_dev)
1150 struct net_device *dev = team->dev;
1151 struct team_port *port;
1152 char *portname = port_dev->name;
1155 if (port_dev->flags & IFF_LOOPBACK) {
1156 netdev_err(dev, "Device %s is loopback device. Loopback devices can't be added as a team port\n",
1161 if (team_port_exists(port_dev)) {
1162 netdev_err(dev, "Device %s is already a port "
1163 "of a team device\n", portname);
1167 if (dev == port_dev) {
1168 netdev_err(dev, "Cannot enslave team device to itself\n");
1172 if (netdev_has_upper_dev(dev, port_dev)) {
1173 netdev_err(dev, "Device %s is already an upper device of the team interface\n",
1178 if (port_dev->features & NETIF_F_VLAN_CHALLENGED &&
1179 vlan_uses_dev(dev)) {
1180 netdev_err(dev, "Device %s is VLAN challenged and team device has VLAN set up\n",
1185 err = team_dev_type_check_change(dev, port_dev);
1189 if (port_dev->flags & IFF_UP) {
1190 netdev_err(dev, "Device %s is up. Set it down before adding it as a team port\n",
1195 port = kzalloc(sizeof(struct team_port) + team->mode->port_priv_size,
1200 port->dev = port_dev;
1202 INIT_LIST_HEAD(&port->qom_list);
1204 port->orig.mtu = port_dev->mtu;
1205 err = dev_set_mtu(port_dev, dev->mtu);
1207 netdev_dbg(dev, "Error %d calling dev_set_mtu\n", err);
1211 memcpy(port->orig.dev_addr, port_dev->dev_addr, port_dev->addr_len);
1213 err = team_port_enter(team, port);
1215 netdev_err(dev, "Device %s failed to enter team mode\n",
1217 goto err_port_enter;
1220 err = dev_open(port_dev);
1222 netdev_dbg(dev, "Device %s opening failed\n",
1227 err = vlan_vids_add_by_dev(port_dev, dev);
1229 netdev_err(dev, "Failed to add vlan ids to device %s\n",
1234 err = team_port_enable_netpoll(port);
1236 netdev_err(dev, "Failed to enable netpoll on device %s\n",
1238 goto err_enable_netpoll;
1241 if (!(dev->features & NETIF_F_LRO))
1242 dev_disable_lro(port_dev);
1244 err = netdev_rx_handler_register(port_dev, team_handle_frame,
1247 netdev_err(dev, "Device %s failed to register rx_handler\n",
1249 goto err_handler_register;
1252 err = team_upper_dev_link(team, port);
1254 netdev_err(dev, "Device %s failed to set upper link\n",
1256 goto err_set_upper_link;
1259 err = __team_option_inst_add_port(team, port);
1261 netdev_err(dev, "Device %s failed to add per-port options\n",
1263 goto err_option_port_add;
1266 /* set promiscuity level to new slave */
1267 if (dev->flags & IFF_PROMISC) {
1268 err = dev_set_promiscuity(port_dev, 1);
1270 goto err_set_slave_promisc;
1273 /* set allmulti level to new slave */
1274 if (dev->flags & IFF_ALLMULTI) {
1275 err = dev_set_allmulti(port_dev, 1);
1277 if (dev->flags & IFF_PROMISC)
1278 dev_set_promiscuity(port_dev, -1);
1279 goto err_set_slave_promisc;
1283 if (dev->flags & IFF_UP) {
1284 netif_addr_lock_bh(dev);
1285 dev_uc_sync_multiple(port_dev, dev);
1286 dev_mc_sync_multiple(port_dev, dev);
1287 netif_addr_unlock_bh(dev);
1291 list_add_tail_rcu(&port->list, &team->port_list);
1292 team_port_enable(team, port);
1293 __team_compute_features(team);
1294 __team_port_change_port_added(port, !!netif_carrier_ok(port_dev));
1295 __team_options_change_check(team);
1297 netdev_info(dev, "Port device %s added\n", portname);
1301 err_set_slave_promisc:
1302 __team_option_inst_del_port(team, port);
1304 err_option_port_add:
1305 team_upper_dev_unlink(team, port);
1308 netdev_rx_handler_unregister(port_dev);
1310 err_handler_register:
1311 team_port_disable_netpoll(port);
1314 vlan_vids_del_by_dev(port_dev, dev);
1317 dev_close(port_dev);
1320 team_port_leave(team, port);
1321 team_port_set_orig_dev_addr(port);
1324 dev_set_mtu(port_dev, port->orig.mtu);
1332 static void __team_port_change_port_removed(struct team_port *port);
1334 static int team_port_del(struct team *team, struct net_device *port_dev)
1336 struct net_device *dev = team->dev;
1337 struct team_port *port;
1338 char *portname = port_dev->name;
1340 port = team_port_get_rtnl(port_dev);
1341 if (!port || !team_port_find(team, port)) {
1342 netdev_err(dev, "Device %s does not act as a port of this team\n",
1347 team_port_disable(team, port);
1348 list_del_rcu(&port->list);
1350 if (dev->flags & IFF_PROMISC)
1351 dev_set_promiscuity(port_dev, -1);
1352 if (dev->flags & IFF_ALLMULTI)
1353 dev_set_allmulti(port_dev, -1);
1355 team_upper_dev_unlink(team, port);
1356 netdev_rx_handler_unregister(port_dev);
1357 team_port_disable_netpoll(port);
1358 vlan_vids_del_by_dev(port_dev, dev);
1359 if (dev->flags & IFF_UP) {
1360 dev_uc_unsync(port_dev, dev);
1361 dev_mc_unsync(port_dev, dev);
1363 dev_close(port_dev);
1364 team_port_leave(team, port);
1366 __team_option_inst_mark_removed_port(team, port);
1367 __team_options_change_check(team);
1368 __team_option_inst_del_port(team, port);
1369 __team_port_change_port_removed(port);
1371 team_port_set_orig_dev_addr(port);
1372 dev_set_mtu(port_dev, port->orig.mtu);
1373 kfree_rcu(port, rcu);
1374 netdev_info(dev, "Port device %s removed\n", portname);
1375 __team_compute_features(team);
1385 static int team_mode_option_get(struct team *team, struct team_gsetter_ctx *ctx)
1387 ctx->data.str_val = team->mode->kind;
1391 static int team_mode_option_set(struct team *team, struct team_gsetter_ctx *ctx)
1393 return team_change_mode(team, ctx->data.str_val);
1396 static int team_notify_peers_count_get(struct team *team,
1397 struct team_gsetter_ctx *ctx)
1399 ctx->data.u32_val = team->notify_peers.count;
1403 static int team_notify_peers_count_set(struct team *team,
1404 struct team_gsetter_ctx *ctx)
1406 team->notify_peers.count = ctx->data.u32_val;
1410 static int team_notify_peers_interval_get(struct team *team,
1411 struct team_gsetter_ctx *ctx)
1413 ctx->data.u32_val = team->notify_peers.interval;
1417 static int team_notify_peers_interval_set(struct team *team,
1418 struct team_gsetter_ctx *ctx)
1420 team->notify_peers.interval = ctx->data.u32_val;
1424 static int team_mcast_rejoin_count_get(struct team *team,
1425 struct team_gsetter_ctx *ctx)
1427 ctx->data.u32_val = team->mcast_rejoin.count;
1431 static int team_mcast_rejoin_count_set(struct team *team,
1432 struct team_gsetter_ctx *ctx)
1434 team->mcast_rejoin.count = ctx->data.u32_val;
1438 static int team_mcast_rejoin_interval_get(struct team *team,
1439 struct team_gsetter_ctx *ctx)
1441 ctx->data.u32_val = team->mcast_rejoin.interval;
1445 static int team_mcast_rejoin_interval_set(struct team *team,
1446 struct team_gsetter_ctx *ctx)
1448 team->mcast_rejoin.interval = ctx->data.u32_val;
1452 static int team_port_en_option_get(struct team *team,
1453 struct team_gsetter_ctx *ctx)
1455 struct team_port *port = ctx->info->port;
1457 ctx->data.bool_val = team_port_enabled(port);
1461 static int team_port_en_option_set(struct team *team,
1462 struct team_gsetter_ctx *ctx)
1464 struct team_port *port = ctx->info->port;
1466 if (ctx->data.bool_val)
1467 team_port_enable(team, port);
1469 team_port_disable(team, port);
1473 static int team_user_linkup_option_get(struct team *team,
1474 struct team_gsetter_ctx *ctx)
1476 struct team_port *port = ctx->info->port;
1478 ctx->data.bool_val = port->user.linkup;
1482 static void __team_carrier_check(struct team *team);
1484 static int team_user_linkup_option_set(struct team *team,
1485 struct team_gsetter_ctx *ctx)
1487 struct team_port *port = ctx->info->port;
1489 port->user.linkup = ctx->data.bool_val;
1490 team_refresh_port_linkup(port);
1491 __team_carrier_check(port->team);
1495 static int team_user_linkup_en_option_get(struct team *team,
1496 struct team_gsetter_ctx *ctx)
1498 struct team_port *port = ctx->info->port;
1500 ctx->data.bool_val = port->user.linkup_enabled;
1504 static int team_user_linkup_en_option_set(struct team *team,
1505 struct team_gsetter_ctx *ctx)
1507 struct team_port *port = ctx->info->port;
1509 port->user.linkup_enabled = ctx->data.bool_val;
1510 team_refresh_port_linkup(port);
1511 __team_carrier_check(port->team);
1515 static int team_priority_option_get(struct team *team,
1516 struct team_gsetter_ctx *ctx)
1518 struct team_port *port = ctx->info->port;
1520 ctx->data.s32_val = port->priority;
1524 static int team_priority_option_set(struct team *team,
1525 struct team_gsetter_ctx *ctx)
1527 struct team_port *port = ctx->info->port;
1528 s32 priority = ctx->data.s32_val;
1530 if (port->priority == priority)
1532 port->priority = priority;
1533 team_queue_override_port_prio_changed(team, port);
1537 static int team_queue_id_option_get(struct team *team,
1538 struct team_gsetter_ctx *ctx)
1540 struct team_port *port = ctx->info->port;
1542 ctx->data.u32_val = port->queue_id;
1546 static int team_queue_id_option_set(struct team *team,
1547 struct team_gsetter_ctx *ctx)
1549 struct team_port *port = ctx->info->port;
1550 u16 new_queue_id = ctx->data.u32_val;
1552 if (port->queue_id == new_queue_id)
1554 if (new_queue_id >= team->dev->real_num_tx_queues)
1556 team_queue_override_port_change_queue_id(team, port, new_queue_id);
1560 static const struct team_option team_options[] = {
1563 .type = TEAM_OPTION_TYPE_STRING,
1564 .getter = team_mode_option_get,
1565 .setter = team_mode_option_set,
1568 .name = "notify_peers_count",
1569 .type = TEAM_OPTION_TYPE_U32,
1570 .getter = team_notify_peers_count_get,
1571 .setter = team_notify_peers_count_set,
1574 .name = "notify_peers_interval",
1575 .type = TEAM_OPTION_TYPE_U32,
1576 .getter = team_notify_peers_interval_get,
1577 .setter = team_notify_peers_interval_set,
1580 .name = "mcast_rejoin_count",
1581 .type = TEAM_OPTION_TYPE_U32,
1582 .getter = team_mcast_rejoin_count_get,
1583 .setter = team_mcast_rejoin_count_set,
1586 .name = "mcast_rejoin_interval",
1587 .type = TEAM_OPTION_TYPE_U32,
1588 .getter = team_mcast_rejoin_interval_get,
1589 .setter = team_mcast_rejoin_interval_set,
1593 .type = TEAM_OPTION_TYPE_BOOL,
1595 .getter = team_port_en_option_get,
1596 .setter = team_port_en_option_set,
1599 .name = "user_linkup",
1600 .type = TEAM_OPTION_TYPE_BOOL,
1602 .getter = team_user_linkup_option_get,
1603 .setter = team_user_linkup_option_set,
1606 .name = "user_linkup_enabled",
1607 .type = TEAM_OPTION_TYPE_BOOL,
1609 .getter = team_user_linkup_en_option_get,
1610 .setter = team_user_linkup_en_option_set,
1614 .type = TEAM_OPTION_TYPE_S32,
1616 .getter = team_priority_option_get,
1617 .setter = team_priority_option_set,
1621 .type = TEAM_OPTION_TYPE_U32,
1623 .getter = team_queue_id_option_get,
1624 .setter = team_queue_id_option_set,
1629 static int team_init(struct net_device *dev)
1631 struct team *team = netdev_priv(dev);
1636 mutex_init(&team->lock);
1637 team_set_no_mode(team);
1639 team->pcpu_stats = netdev_alloc_pcpu_stats(struct team_pcpu_stats);
1640 if (!team->pcpu_stats)
1643 for (i = 0; i < TEAM_PORT_HASHENTRIES; i++)
1644 INIT_HLIST_HEAD(&team->en_port_hlist[i]);
1645 INIT_LIST_HEAD(&team->port_list);
1646 err = team_queue_override_init(team);
1648 goto err_team_queue_override_init;
1650 team_adjust_ops(team);
1652 INIT_LIST_HEAD(&team->option_list);
1653 INIT_LIST_HEAD(&team->option_inst_list);
1655 team_notify_peers_init(team);
1656 team_mcast_rejoin_init(team);
1658 err = team_options_register(team, team_options, ARRAY_SIZE(team_options));
1660 goto err_options_register;
1661 netif_carrier_off(dev);
1663 netdev_lockdep_set_classes(dev);
1667 err_options_register:
1668 team_mcast_rejoin_fini(team);
1669 team_notify_peers_fini(team);
1670 team_queue_override_fini(team);
1671 err_team_queue_override_init:
1672 free_percpu(team->pcpu_stats);
1677 static void team_uninit(struct net_device *dev)
1679 struct team *team = netdev_priv(dev);
1680 struct team_port *port;
1681 struct team_port *tmp;
1683 mutex_lock(&team->lock);
1684 list_for_each_entry_safe(port, tmp, &team->port_list, list)
1685 team_port_del(team, port->dev);
1687 __team_change_mode(team, NULL); /* cleanup */
1688 __team_options_unregister(team, team_options, ARRAY_SIZE(team_options));
1689 team_mcast_rejoin_fini(team);
1690 team_notify_peers_fini(team);
1691 team_queue_override_fini(team);
1692 mutex_unlock(&team->lock);
1695 static void team_destructor(struct net_device *dev)
1697 struct team *team = netdev_priv(dev);
1699 free_percpu(team->pcpu_stats);
1703 static int team_open(struct net_device *dev)
1708 static int team_close(struct net_device *dev)
1710 struct team *team = netdev_priv(dev);
1711 struct team_port *port;
1713 list_for_each_entry(port, &team->port_list, list) {
1714 dev_uc_unsync(port->dev, dev);
1715 dev_mc_unsync(port->dev, dev);
1722 * note: already called with rcu_read_lock
1724 static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
1726 struct team *team = netdev_priv(dev);
1728 unsigned int len = skb->len;
1730 tx_success = team_queue_override_transmit(team, skb);
1732 tx_success = team->ops.transmit(team, skb);
1734 struct team_pcpu_stats *pcpu_stats;
1736 pcpu_stats = this_cpu_ptr(team->pcpu_stats);
1737 u64_stats_update_begin(&pcpu_stats->syncp);
1738 pcpu_stats->tx_packets++;
1739 pcpu_stats->tx_bytes += len;
1740 u64_stats_update_end(&pcpu_stats->syncp);
1742 this_cpu_inc(team->pcpu_stats->tx_dropped);
1745 return NETDEV_TX_OK;
1748 static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb,
1749 void *accel_priv, select_queue_fallback_t fallback)
1752 * This helper function exists to help dev_pick_tx get the correct
1753 * destination queue. Using a helper function skips a call to
1754 * skb_tx_hash and will put the skbs in the queue we expect on their
1755 * way down to the team driver.
1757 u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0;
1760 * Save the original txq to restore before passing to the driver
1762 qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping;
1764 if (unlikely(txq >= dev->real_num_tx_queues)) {
1766 txq -= dev->real_num_tx_queues;
1767 } while (txq >= dev->real_num_tx_queues);
1772 static void team_change_rx_flags(struct net_device *dev, int change)
1774 struct team *team = netdev_priv(dev);
1775 struct team_port *port;
1779 list_for_each_entry_rcu(port, &team->port_list, list) {
1780 if (change & IFF_PROMISC) {
1781 inc = dev->flags & IFF_PROMISC ? 1 : -1;
1782 dev_set_promiscuity(port->dev, inc);
1784 if (change & IFF_ALLMULTI) {
1785 inc = dev->flags & IFF_ALLMULTI ? 1 : -1;
1786 dev_set_allmulti(port->dev, inc);
1792 static void team_set_rx_mode(struct net_device *dev)
1794 struct team *team = netdev_priv(dev);
1795 struct team_port *port;
1798 list_for_each_entry_rcu(port, &team->port_list, list) {
1799 dev_uc_sync_multiple(port->dev, dev);
1800 dev_mc_sync_multiple(port->dev, dev);
1805 static int team_set_mac_address(struct net_device *dev, void *p)
1807 struct sockaddr *addr = p;
1808 struct team *team = netdev_priv(dev);
1809 struct team_port *port;
1811 if (dev->type == ARPHRD_ETHER && !is_valid_ether_addr(addr->sa_data))
1812 return -EADDRNOTAVAIL;
1813 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1814 mutex_lock(&team->lock);
1815 list_for_each_entry(port, &team->port_list, list)
1816 if (team->ops.port_change_dev_addr)
1817 team->ops.port_change_dev_addr(team, port);
1818 mutex_unlock(&team->lock);
1822 static int team_change_mtu(struct net_device *dev, int new_mtu)
1824 struct team *team = netdev_priv(dev);
1825 struct team_port *port;
1829 * Alhough this is reader, it's guarded by team lock. It's not possible
1830 * to traverse list in reverse under rcu_read_lock
1832 mutex_lock(&team->lock);
1833 team->port_mtu_change_allowed = true;
1834 list_for_each_entry(port, &team->port_list, list) {
1835 err = dev_set_mtu(port->dev, new_mtu);
1837 netdev_err(dev, "Device %s failed to change mtu",
1842 team->port_mtu_change_allowed = false;
1843 mutex_unlock(&team->lock);
1850 list_for_each_entry_continue_reverse(port, &team->port_list, list)
1851 dev_set_mtu(port->dev, dev->mtu);
1852 team->port_mtu_change_allowed = false;
1853 mutex_unlock(&team->lock);
1858 static struct rtnl_link_stats64 *
1859 team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1861 struct team *team = netdev_priv(dev);
1862 struct team_pcpu_stats *p;
1863 u64 rx_packets, rx_bytes, rx_multicast, tx_packets, tx_bytes;
1864 u32 rx_dropped = 0, tx_dropped = 0, rx_nohandler = 0;
1868 for_each_possible_cpu(i) {
1869 p = per_cpu_ptr(team->pcpu_stats, i);
1871 start = u64_stats_fetch_begin_irq(&p->syncp);
1872 rx_packets = p->rx_packets;
1873 rx_bytes = p->rx_bytes;
1874 rx_multicast = p->rx_multicast;
1875 tx_packets = p->tx_packets;
1876 tx_bytes = p->tx_bytes;
1877 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
1879 stats->rx_packets += rx_packets;
1880 stats->rx_bytes += rx_bytes;
1881 stats->multicast += rx_multicast;
1882 stats->tx_packets += tx_packets;
1883 stats->tx_bytes += tx_bytes;
1885 * rx_dropped, tx_dropped & rx_nohandler are u32,
1886 * updated without syncp protection.
1888 rx_dropped += p->rx_dropped;
1889 tx_dropped += p->tx_dropped;
1890 rx_nohandler += p->rx_nohandler;
1892 stats->rx_dropped = rx_dropped;
1893 stats->tx_dropped = tx_dropped;
1894 stats->rx_nohandler = rx_nohandler;
1898 static int team_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
1900 struct team *team = netdev_priv(dev);
1901 struct team_port *port;
1905 * Alhough this is reader, it's guarded by team lock. It's not possible
1906 * to traverse list in reverse under rcu_read_lock
1908 mutex_lock(&team->lock);
1909 list_for_each_entry(port, &team->port_list, list) {
1910 err = vlan_vid_add(port->dev, proto, vid);
1914 mutex_unlock(&team->lock);
1919 list_for_each_entry_continue_reverse(port, &team->port_list, list)
1920 vlan_vid_del(port->dev, proto, vid);
1921 mutex_unlock(&team->lock);
1926 static int team_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
1928 struct team *team = netdev_priv(dev);
1929 struct team_port *port;
1931 mutex_lock(&team->lock);
1932 list_for_each_entry(port, &team->port_list, list)
1933 vlan_vid_del(port->dev, proto, vid);
1934 mutex_unlock(&team->lock);
1939 #ifdef CONFIG_NET_POLL_CONTROLLER
1940 static void team_poll_controller(struct net_device *dev)
1944 static void __team_netpoll_cleanup(struct team *team)
1946 struct team_port *port;
1948 list_for_each_entry(port, &team->port_list, list)
1949 team_port_disable_netpoll(port);
1952 static void team_netpoll_cleanup(struct net_device *dev)
1954 struct team *team = netdev_priv(dev);
1956 mutex_lock(&team->lock);
1957 __team_netpoll_cleanup(team);
1958 mutex_unlock(&team->lock);
1961 static int team_netpoll_setup(struct net_device *dev,
1962 struct netpoll_info *npifo)
1964 struct team *team = netdev_priv(dev);
1965 struct team_port *port;
1968 mutex_lock(&team->lock);
1969 list_for_each_entry(port, &team->port_list, list) {
1970 err = __team_port_enable_netpoll(port);
1972 __team_netpoll_cleanup(team);
1976 mutex_unlock(&team->lock);
1981 static int team_add_slave(struct net_device *dev, struct net_device *port_dev)
1983 struct team *team = netdev_priv(dev);
1986 mutex_lock(&team->lock);
1987 err = team_port_add(team, port_dev);
1988 mutex_unlock(&team->lock);
1992 static int team_del_slave(struct net_device *dev, struct net_device *port_dev)
1994 struct team *team = netdev_priv(dev);
1997 mutex_lock(&team->lock);
1998 err = team_port_del(team, port_dev);
1999 mutex_unlock(&team->lock);
2003 static netdev_features_t team_fix_features(struct net_device *dev,
2004 netdev_features_t features)
2006 struct team_port *port;
2007 struct team *team = netdev_priv(dev);
2008 netdev_features_t mask;
2011 features &= ~NETIF_F_ONE_FOR_ALL;
2012 features |= NETIF_F_ALL_FOR_ALL;
2015 list_for_each_entry_rcu(port, &team->port_list, list) {
2016 features = netdev_increment_features(features,
2017 port->dev->features,
2022 features = netdev_add_tso_features(features, mask);
2027 static int team_change_carrier(struct net_device *dev, bool new_carrier)
2029 struct team *team = netdev_priv(dev);
2031 team->user_carrier_enabled = true;
2034 netif_carrier_on(dev);
2036 netif_carrier_off(dev);
2040 static const struct net_device_ops team_netdev_ops = {
2041 .ndo_init = team_init,
2042 .ndo_uninit = team_uninit,
2043 .ndo_open = team_open,
2044 .ndo_stop = team_close,
2045 .ndo_start_xmit = team_xmit,
2046 .ndo_select_queue = team_select_queue,
2047 .ndo_change_rx_flags = team_change_rx_flags,
2048 .ndo_set_rx_mode = team_set_rx_mode,
2049 .ndo_set_mac_address = team_set_mac_address,
2050 .ndo_change_mtu = team_change_mtu,
2051 .ndo_get_stats64 = team_get_stats64,
2052 .ndo_vlan_rx_add_vid = team_vlan_rx_add_vid,
2053 .ndo_vlan_rx_kill_vid = team_vlan_rx_kill_vid,
2054 #ifdef CONFIG_NET_POLL_CONTROLLER
2055 .ndo_poll_controller = team_poll_controller,
2056 .ndo_netpoll_setup = team_netpoll_setup,
2057 .ndo_netpoll_cleanup = team_netpoll_cleanup,
2059 .ndo_add_slave = team_add_slave,
2060 .ndo_del_slave = team_del_slave,
2061 .ndo_fix_features = team_fix_features,
2062 .ndo_neigh_construct = netdev_default_l2upper_neigh_construct,
2063 .ndo_neigh_destroy = netdev_default_l2upper_neigh_destroy,
2064 .ndo_change_carrier = team_change_carrier,
2065 .ndo_bridge_setlink = switchdev_port_bridge_setlink,
2066 .ndo_bridge_getlink = switchdev_port_bridge_getlink,
2067 .ndo_bridge_dellink = switchdev_port_bridge_dellink,
2068 .ndo_fdb_add = switchdev_port_fdb_add,
2069 .ndo_fdb_del = switchdev_port_fdb_del,
2070 .ndo_fdb_dump = switchdev_port_fdb_dump,
2071 .ndo_features_check = passthru_features_check,
2074 /***********************
2076 ***********************/
2078 static void team_ethtool_get_drvinfo(struct net_device *dev,
2079 struct ethtool_drvinfo *drvinfo)
2081 strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
2082 strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
2085 static const struct ethtool_ops team_ethtool_ops = {
2086 .get_drvinfo = team_ethtool_get_drvinfo,
2087 .get_link = ethtool_op_get_link,
2090 /***********************
2091 * rt netlink interface
2092 ***********************/
2094 static void team_setup_by_port(struct net_device *dev,
2095 struct net_device *port_dev)
2097 dev->header_ops = port_dev->header_ops;
2098 dev->type = port_dev->type;
2099 dev->hard_header_len = port_dev->hard_header_len;
2100 dev->needed_headroom = port_dev->needed_headroom;
2101 dev->addr_len = port_dev->addr_len;
2102 dev->mtu = port_dev->mtu;
2103 memcpy(dev->broadcast, port_dev->broadcast, port_dev->addr_len);
2104 eth_hw_addr_inherit(dev, port_dev);
2107 static int team_dev_type_check_change(struct net_device *dev,
2108 struct net_device *port_dev)
2110 struct team *team = netdev_priv(dev);
2111 char *portname = port_dev->name;
2114 if (dev->type == port_dev->type)
2116 if (!list_empty(&team->port_list)) {
2117 netdev_err(dev, "Device %s is of different type\n", portname);
2120 err = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE, dev);
2121 err = notifier_to_errno(err);
2123 netdev_err(dev, "Refused to change device type\n");
2128 team_setup_by_port(dev, port_dev);
2129 call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev);
2133 static void team_setup(struct net_device *dev)
2137 dev->netdev_ops = &team_netdev_ops;
2138 dev->ethtool_ops = &team_ethtool_ops;
2139 dev->destructor = team_destructor;
2140 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
2141 dev->priv_flags |= IFF_NO_QUEUE;
2142 dev->priv_flags |= IFF_TEAM;
2145 * Indicate we support unicast address filtering. That way core won't
2146 * bring us to promisc mode in case a unicast addr is added.
2147 * Let this up to underlay drivers.
2149 dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE;
2151 dev->features |= NETIF_F_LLTX;
2152 dev->features |= NETIF_F_GRO;
2154 /* Don't allow team devices to change network namespaces. */
2155 dev->features |= NETIF_F_NETNS_LOCAL;
2157 dev->hw_features = TEAM_VLAN_FEATURES |
2158 NETIF_F_HW_VLAN_CTAG_RX |
2159 NETIF_F_HW_VLAN_CTAG_FILTER;
2161 dev->hw_features |= NETIF_F_GSO_ENCAP_ALL;
2162 dev->features |= dev->hw_features;
2163 dev->features |= NETIF_F_HW_VLAN_CTAG_TX;
2166 static int team_newlink(struct net *src_net, struct net_device *dev,
2167 struct nlattr *tb[], struct nlattr *data[])
2169 if (tb[IFLA_ADDRESS] == NULL)
2170 eth_hw_addr_random(dev);
2172 return register_netdevice(dev);
2175 static int team_validate(struct nlattr *tb[], struct nlattr *data[])
2177 if (tb[IFLA_ADDRESS]) {
2178 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
2180 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
2181 return -EADDRNOTAVAIL;
2186 static unsigned int team_get_num_tx_queues(void)
2188 return TEAM_DEFAULT_NUM_TX_QUEUES;
2191 static unsigned int team_get_num_rx_queues(void)
2193 return TEAM_DEFAULT_NUM_RX_QUEUES;
2196 static struct rtnl_link_ops team_link_ops __read_mostly = {
2198 .priv_size = sizeof(struct team),
2199 .setup = team_setup,
2200 .newlink = team_newlink,
2201 .validate = team_validate,
2202 .get_num_tx_queues = team_get_num_tx_queues,
2203 .get_num_rx_queues = team_get_num_rx_queues,
2207 /***********************************
2208 * Generic netlink custom interface
2209 ***********************************/
2211 static struct genl_family team_nl_family = {
2212 .id = GENL_ID_GENERATE,
2213 .name = TEAM_GENL_NAME,
2214 .version = TEAM_GENL_VERSION,
2215 .maxattr = TEAM_ATTR_MAX,
2219 static const struct nla_policy team_nl_policy[TEAM_ATTR_MAX + 1] = {
2220 [TEAM_ATTR_UNSPEC] = { .type = NLA_UNSPEC, },
2221 [TEAM_ATTR_TEAM_IFINDEX] = { .type = NLA_U32 },
2222 [TEAM_ATTR_LIST_OPTION] = { .type = NLA_NESTED },
2223 [TEAM_ATTR_LIST_PORT] = { .type = NLA_NESTED },
2226 static const struct nla_policy
2227 team_nl_option_policy[TEAM_ATTR_OPTION_MAX + 1] = {
2228 [TEAM_ATTR_OPTION_UNSPEC] = { .type = NLA_UNSPEC, },
2229 [TEAM_ATTR_OPTION_NAME] = {
2231 .len = TEAM_STRING_MAX_LEN,
2233 [TEAM_ATTR_OPTION_CHANGED] = { .type = NLA_FLAG },
2234 [TEAM_ATTR_OPTION_TYPE] = { .type = NLA_U8 },
2235 [TEAM_ATTR_OPTION_DATA] = { .type = NLA_BINARY },
2236 [TEAM_ATTR_OPTION_PORT_IFINDEX] = { .type = NLA_U32 },
2237 [TEAM_ATTR_OPTION_ARRAY_INDEX] = { .type = NLA_U32 },
2240 static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
2242 struct sk_buff *msg;
2246 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2250 hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq,
2251 &team_nl_family, 0, TEAM_CMD_NOOP);
2257 genlmsg_end(msg, hdr);
2259 return genlmsg_unicast(genl_info_net(info), msg, info->snd_portid);
2268 * Netlink cmd functions should be locked by following two functions.
2269 * Since dev gets held here, that ensures dev won't disappear in between.
2271 static struct team *team_nl_team_get(struct genl_info *info)
2273 struct net *net = genl_info_net(info);
2275 struct net_device *dev;
2278 if (!info->attrs[TEAM_ATTR_TEAM_IFINDEX])
2281 ifindex = nla_get_u32(info->attrs[TEAM_ATTR_TEAM_IFINDEX]);
2282 dev = dev_get_by_index(net, ifindex);
2283 if (!dev || dev->netdev_ops != &team_netdev_ops) {
2289 team = netdev_priv(dev);
2290 mutex_lock(&team->lock);
2294 static void team_nl_team_put(struct team *team)
2296 mutex_unlock(&team->lock);
2300 typedef int team_nl_send_func_t(struct sk_buff *skb,
2301 struct team *team, u32 portid);
2303 static int team_nl_send_unicast(struct sk_buff *skb, struct team *team, u32 portid)
2305 return genlmsg_unicast(dev_net(team->dev), skb, portid);
2308 static int team_nl_fill_one_option_get(struct sk_buff *skb, struct team *team,
2309 struct team_option_inst *opt_inst)
2311 struct nlattr *option_item;
2312 struct team_option *option = opt_inst->option;
2313 struct team_option_inst_info *opt_inst_info = &opt_inst->info;
2314 struct team_gsetter_ctx ctx;
2317 ctx.info = opt_inst_info;
2318 err = team_option_get(team, opt_inst, &ctx);
2322 option_item = nla_nest_start(skb, TEAM_ATTR_ITEM_OPTION);
2326 if (nla_put_string(skb, TEAM_ATTR_OPTION_NAME, option->name))
2328 if (opt_inst_info->port &&
2329 nla_put_u32(skb, TEAM_ATTR_OPTION_PORT_IFINDEX,
2330 opt_inst_info->port->dev->ifindex))
2332 if (opt_inst->option->array_size &&
2333 nla_put_u32(skb, TEAM_ATTR_OPTION_ARRAY_INDEX,
2334 opt_inst_info->array_index))
2337 switch (option->type) {
2338 case TEAM_OPTION_TYPE_U32:
2339 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_U32))
2341 if (nla_put_u32(skb, TEAM_ATTR_OPTION_DATA, ctx.data.u32_val))
2344 case TEAM_OPTION_TYPE_STRING:
2345 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_STRING))
2347 if (nla_put_string(skb, TEAM_ATTR_OPTION_DATA,
2351 case TEAM_OPTION_TYPE_BINARY:
2352 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_BINARY))
2354 if (nla_put(skb, TEAM_ATTR_OPTION_DATA, ctx.data.bin_val.len,
2355 ctx.data.bin_val.ptr))
2358 case TEAM_OPTION_TYPE_BOOL:
2359 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_FLAG))
2361 if (ctx.data.bool_val &&
2362 nla_put_flag(skb, TEAM_ATTR_OPTION_DATA))
2365 case TEAM_OPTION_TYPE_S32:
2366 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_S32))
2368 if (nla_put_s32(skb, TEAM_ATTR_OPTION_DATA, ctx.data.s32_val))
2374 if (opt_inst->removed && nla_put_flag(skb, TEAM_ATTR_OPTION_REMOVED))
2376 if (opt_inst->changed) {
2377 if (nla_put_flag(skb, TEAM_ATTR_OPTION_CHANGED))
2379 opt_inst->changed = false;
2381 nla_nest_end(skb, option_item);
2385 nla_nest_cancel(skb, option_item);
2389 static int __send_and_alloc_skb(struct sk_buff **pskb,
2390 struct team *team, u32 portid,
2391 team_nl_send_func_t *send_func)
2396 err = send_func(*pskb, team, portid);
2400 *pskb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
2406 static int team_nl_send_options_get(struct team *team, u32 portid, u32 seq,
2407 int flags, team_nl_send_func_t *send_func,
2408 struct list_head *sel_opt_inst_list)
2410 struct nlattr *option_list;
2411 struct nlmsghdr *nlh;
2413 struct team_option_inst *opt_inst;
2415 struct sk_buff *skb = NULL;
2419 opt_inst = list_first_entry(sel_opt_inst_list,
2420 struct team_option_inst, tmp_list);
2423 err = __send_and_alloc_skb(&skb, team, portid, send_func);
2427 hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI,
2428 TEAM_CMD_OPTIONS_GET);
2434 if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
2435 goto nla_put_failure;
2436 option_list = nla_nest_start(skb, TEAM_ATTR_LIST_OPTION);
2438 goto nla_put_failure;
2442 list_for_each_entry_from(opt_inst, sel_opt_inst_list, tmp_list) {
2443 err = team_nl_fill_one_option_get(skb, team, opt_inst);
2445 if (err == -EMSGSIZE) {
2456 nla_nest_end(skb, option_list);
2457 genlmsg_end(skb, hdr);
2462 nlh = nlmsg_put(skb, portid, seq, NLMSG_DONE, 0, flags | NLM_F_MULTI);
2464 err = __send_and_alloc_skb(&skb, team, portid, send_func);
2470 return send_func(skb, team, portid);
2475 genlmsg_cancel(skb, hdr);
2480 static int team_nl_cmd_options_get(struct sk_buff *skb, struct genl_info *info)
2483 struct team_option_inst *opt_inst;
2485 LIST_HEAD(sel_opt_inst_list);
2487 team = team_nl_team_get(info);
2491 list_for_each_entry(opt_inst, &team->option_inst_list, list)
2492 list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list);
2493 err = team_nl_send_options_get(team, info->snd_portid, info->snd_seq,
2494 NLM_F_ACK, team_nl_send_unicast,
2495 &sel_opt_inst_list);
2497 team_nl_team_put(team);
2502 static int team_nl_send_event_options_get(struct team *team,
2503 struct list_head *sel_opt_inst_list);
2505 static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
2510 struct nlattr *nl_option;
2514 team = team_nl_team_get(info);
2521 if (!info->attrs[TEAM_ATTR_LIST_OPTION]) {
2526 nla_for_each_nested(nl_option, info->attrs[TEAM_ATTR_LIST_OPTION], i) {
2527 struct nlattr *opt_attrs[TEAM_ATTR_OPTION_MAX + 1];
2528 struct nlattr *attr;
2529 struct nlattr *attr_data;
2530 LIST_HEAD(opt_inst_list);
2531 enum team_option_type opt_type;
2532 int opt_port_ifindex = 0; /* != 0 for per-port options */
2533 u32 opt_array_index = 0;
2534 bool opt_is_array = false;
2535 struct team_option_inst *opt_inst;
2537 bool opt_found = false;
2539 if (nla_type(nl_option) != TEAM_ATTR_ITEM_OPTION) {
2543 err = nla_parse_nested(opt_attrs, TEAM_ATTR_OPTION_MAX,
2544 nl_option, team_nl_option_policy);
2547 if (!opt_attrs[TEAM_ATTR_OPTION_NAME] ||
2548 !opt_attrs[TEAM_ATTR_OPTION_TYPE]) {
2552 switch (nla_get_u8(opt_attrs[TEAM_ATTR_OPTION_TYPE])) {
2554 opt_type = TEAM_OPTION_TYPE_U32;
2557 opt_type = TEAM_OPTION_TYPE_STRING;
2560 opt_type = TEAM_OPTION_TYPE_BINARY;
2563 opt_type = TEAM_OPTION_TYPE_BOOL;
2566 opt_type = TEAM_OPTION_TYPE_S32;
2572 attr_data = opt_attrs[TEAM_ATTR_OPTION_DATA];
2573 if (opt_type != TEAM_OPTION_TYPE_BOOL && !attr_data) {
2578 opt_name = nla_data(opt_attrs[TEAM_ATTR_OPTION_NAME]);
2579 attr = opt_attrs[TEAM_ATTR_OPTION_PORT_IFINDEX];
2581 opt_port_ifindex = nla_get_u32(attr);
2583 attr = opt_attrs[TEAM_ATTR_OPTION_ARRAY_INDEX];
2585 opt_is_array = true;
2586 opt_array_index = nla_get_u32(attr);
2589 list_for_each_entry(opt_inst, &team->option_inst_list, list) {
2590 struct team_option *option = opt_inst->option;
2591 struct team_gsetter_ctx ctx;
2592 struct team_option_inst_info *opt_inst_info;
2595 opt_inst_info = &opt_inst->info;
2596 tmp_ifindex = opt_inst_info->port ?
2597 opt_inst_info->port->dev->ifindex : 0;
2598 if (option->type != opt_type ||
2599 strcmp(option->name, opt_name) ||
2600 tmp_ifindex != opt_port_ifindex ||
2601 (option->array_size && !opt_is_array) ||
2602 opt_inst_info->array_index != opt_array_index)
2605 ctx.info = opt_inst_info;
2607 case TEAM_OPTION_TYPE_U32:
2608 ctx.data.u32_val = nla_get_u32(attr_data);
2610 case TEAM_OPTION_TYPE_STRING:
2611 if (nla_len(attr_data) > TEAM_STRING_MAX_LEN) {
2615 ctx.data.str_val = nla_data(attr_data);
2617 case TEAM_OPTION_TYPE_BINARY:
2618 ctx.data.bin_val.len = nla_len(attr_data);
2619 ctx.data.bin_val.ptr = nla_data(attr_data);
2621 case TEAM_OPTION_TYPE_BOOL:
2622 ctx.data.bool_val = attr_data ? true : false;
2624 case TEAM_OPTION_TYPE_S32:
2625 ctx.data.s32_val = nla_get_s32(attr_data);
2630 err = team_option_set(team, opt_inst, &ctx);
2633 opt_inst->changed = true;
2634 list_add(&opt_inst->tmp_list, &opt_inst_list);
2641 err = team_nl_send_event_options_get(team, &opt_inst_list);
2647 team_nl_team_put(team);
2653 static int team_nl_fill_one_port_get(struct sk_buff *skb,
2654 struct team_port *port)
2656 struct nlattr *port_item;
2658 port_item = nla_nest_start(skb, TEAM_ATTR_ITEM_PORT);
2661 if (nla_put_u32(skb, TEAM_ATTR_PORT_IFINDEX, port->dev->ifindex))
2663 if (port->changed) {
2664 if (nla_put_flag(skb, TEAM_ATTR_PORT_CHANGED))
2666 port->changed = false;
2668 if ((port->removed &&
2669 nla_put_flag(skb, TEAM_ATTR_PORT_REMOVED)) ||
2670 (port->state.linkup &&
2671 nla_put_flag(skb, TEAM_ATTR_PORT_LINKUP)) ||
2672 nla_put_u32(skb, TEAM_ATTR_PORT_SPEED, port->state.speed) ||
2673 nla_put_u8(skb, TEAM_ATTR_PORT_DUPLEX, port->state.duplex))
2675 nla_nest_end(skb, port_item);
2679 nla_nest_cancel(skb, port_item);
2683 static int team_nl_send_port_list_get(struct team *team, u32 portid, u32 seq,
2684 int flags, team_nl_send_func_t *send_func,
2685 struct team_port *one_port)
2687 struct nlattr *port_list;
2688 struct nlmsghdr *nlh;
2690 struct team_port *port;
2692 struct sk_buff *skb = NULL;
2696 port = list_first_entry_or_null(&team->port_list,
2697 struct team_port, list);
2700 err = __send_and_alloc_skb(&skb, team, portid, send_func);
2704 hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI,
2705 TEAM_CMD_PORT_LIST_GET);
2711 if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
2712 goto nla_put_failure;
2713 port_list = nla_nest_start(skb, TEAM_ATTR_LIST_PORT);
2715 goto nla_put_failure;
2720 /* If one port is selected, called wants to send port list containing
2721 * only this port. Otherwise go through all listed ports and send all
2724 err = team_nl_fill_one_port_get(skb, one_port);
2728 list_for_each_entry_from(port, &team->port_list, list) {
2729 err = team_nl_fill_one_port_get(skb, port);
2731 if (err == -EMSGSIZE) {
2743 nla_nest_end(skb, port_list);
2744 genlmsg_end(skb, hdr);
2749 nlh = nlmsg_put(skb, portid, seq, NLMSG_DONE, 0, flags | NLM_F_MULTI);
2751 err = __send_and_alloc_skb(&skb, team, portid, send_func);
2757 return send_func(skb, team, portid);
2762 genlmsg_cancel(skb, hdr);
2767 static int team_nl_cmd_port_list_get(struct sk_buff *skb,
2768 struct genl_info *info)
2773 team = team_nl_team_get(info);
2777 err = team_nl_send_port_list_get(team, info->snd_portid, info->snd_seq,
2778 NLM_F_ACK, team_nl_send_unicast, NULL);
2780 team_nl_team_put(team);
2785 static const struct genl_ops team_nl_ops[] = {
2787 .cmd = TEAM_CMD_NOOP,
2788 .doit = team_nl_cmd_noop,
2789 .policy = team_nl_policy,
2792 .cmd = TEAM_CMD_OPTIONS_SET,
2793 .doit = team_nl_cmd_options_set,
2794 .policy = team_nl_policy,
2795 .flags = GENL_ADMIN_PERM,
2798 .cmd = TEAM_CMD_OPTIONS_GET,
2799 .doit = team_nl_cmd_options_get,
2800 .policy = team_nl_policy,
2801 .flags = GENL_ADMIN_PERM,
2804 .cmd = TEAM_CMD_PORT_LIST_GET,
2805 .doit = team_nl_cmd_port_list_get,
2806 .policy = team_nl_policy,
2807 .flags = GENL_ADMIN_PERM,
2811 static const struct genl_multicast_group team_nl_mcgrps[] = {
2812 { .name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME, },
2815 static int team_nl_send_multicast(struct sk_buff *skb,
2816 struct team *team, u32 portid)
2818 return genlmsg_multicast_netns(&team_nl_family, dev_net(team->dev),
2819 skb, 0, 0, GFP_KERNEL);
2822 static int team_nl_send_event_options_get(struct team *team,
2823 struct list_head *sel_opt_inst_list)
2825 return team_nl_send_options_get(team, 0, 0, 0, team_nl_send_multicast,
2829 static int team_nl_send_event_port_get(struct team *team,
2830 struct team_port *port)
2832 return team_nl_send_port_list_get(team, 0, 0, 0, team_nl_send_multicast,
2836 static int team_nl_init(void)
2838 return genl_register_family_with_ops_groups(&team_nl_family, team_nl_ops,
2842 static void team_nl_fini(void)
2844 genl_unregister_family(&team_nl_family);
2852 static void __team_options_change_check(struct team *team)
2855 struct team_option_inst *opt_inst;
2856 LIST_HEAD(sel_opt_inst_list);
2858 list_for_each_entry(opt_inst, &team->option_inst_list, list) {
2859 if (opt_inst->changed)
2860 list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list);
2862 err = team_nl_send_event_options_get(team, &sel_opt_inst_list);
2863 if (err && err != -ESRCH)
2864 netdev_warn(team->dev, "Failed to send options change via netlink (err %d)\n",
2868 /* rtnl lock is held */
2870 static void __team_port_change_send(struct team_port *port, bool linkup)
2874 port->changed = true;
2875 port->state.linkup = linkup;
2876 team_refresh_port_linkup(port);
2878 struct ethtool_link_ksettings ecmd;
2880 err = __ethtool_get_link_ksettings(port->dev, &ecmd);
2882 port->state.speed = ecmd.base.speed;
2883 port->state.duplex = ecmd.base.duplex;
2887 port->state.speed = 0;
2888 port->state.duplex = 0;
2891 err = team_nl_send_event_port_get(port->team, port);
2892 if (err && err != -ESRCH)
2893 netdev_warn(port->team->dev, "Failed to send port change of device %s via netlink (err %d)\n",
2894 port->dev->name, err);
2898 static void __team_carrier_check(struct team *team)
2900 struct team_port *port;
2903 if (team->user_carrier_enabled)
2906 team_linkup = false;
2907 list_for_each_entry(port, &team->port_list, list) {
2915 netif_carrier_on(team->dev);
2917 netif_carrier_off(team->dev);
2920 static void __team_port_change_check(struct team_port *port, bool linkup)
2922 if (port->state.linkup != linkup)
2923 __team_port_change_send(port, linkup);
2924 __team_carrier_check(port->team);
2927 static void __team_port_change_port_added(struct team_port *port, bool linkup)
2929 __team_port_change_send(port, linkup);
2930 __team_carrier_check(port->team);
2933 static void __team_port_change_port_removed(struct team_port *port)
2935 port->removed = true;
2936 __team_port_change_send(port, false);
2937 __team_carrier_check(port->team);
2940 static void team_port_change_check(struct team_port *port, bool linkup)
2942 struct team *team = port->team;
2944 mutex_lock(&team->lock);
2945 __team_port_change_check(port, linkup);
2946 mutex_unlock(&team->lock);
2950 /************************************
2951 * Net device notifier event handler
2952 ************************************/
2954 static int team_device_event(struct notifier_block *unused,
2955 unsigned long event, void *ptr)
2957 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2958 struct team_port *port;
2960 port = team_port_get_rtnl(dev);
2966 if (netif_carrier_ok(dev))
2967 team_port_change_check(port, true);
2970 team_port_change_check(port, false);
2973 if (netif_running(port->dev))
2974 team_port_change_check(port,
2975 !!netif_carrier_ok(port->dev));
2977 case NETDEV_UNREGISTER:
2978 team_del_slave(port->team->dev, dev);
2980 case NETDEV_FEAT_CHANGE:
2981 team_compute_features(port->team);
2983 case NETDEV_PRECHANGEMTU:
2984 /* Forbid to change mtu of underlaying device */
2985 if (!port->team->port_mtu_change_allowed)
2988 case NETDEV_PRE_TYPE_CHANGE:
2989 /* Forbid to change type of underlaying device */
2991 case NETDEV_RESEND_IGMP:
2992 /* Propagate to master device */
2993 call_netdevice_notifiers(event, port->team->dev);
2999 static struct notifier_block team_notifier_block __read_mostly = {
3000 .notifier_call = team_device_event,
3004 /***********************
3005 * Module init and exit
3006 ***********************/
3008 static int __init team_module_init(void)
3012 register_netdevice_notifier(&team_notifier_block);
3014 err = rtnl_link_register(&team_link_ops);
3018 err = team_nl_init();
3025 rtnl_link_unregister(&team_link_ops);
3028 unregister_netdevice_notifier(&team_notifier_block);
3033 static void __exit team_module_exit(void)
3036 rtnl_link_unregister(&team_link_ops);
3037 unregister_netdevice_notifier(&team_notifier_block);
3040 module_init(team_module_init);
3041 module_exit(team_module_exit);
3043 MODULE_LICENSE("GPL v2");
3044 MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>");
3045 MODULE_DESCRIPTION("Ethernet team device driver");
3046 MODULE_ALIAS_RTNL_LINK(DRV_NAME);