2 * originally based on the dummy device.
4 * Copyright 1999, Thomas Davis, tadavis@lbl.gov.
5 * Licensed under the GPL. Based on dummy.c, and eql.c devices.
7 * bonding.c: an Ethernet Bonding driver
9 * This is useful to talk to a Cisco EtherChannel compatible equipment:
11 * Sun Trunking (Solaris)
12 * Alteon AceDirector Trunks
14 * and probably many L2 switches ...
17 * ifconfig bond0 ipaddress netmask up
18 * will setup a network device, with an ip address. No mac address
19 * will be assigned at this time. The hw mac address will come from
20 * the first slave bonded to the channel. All slaves will then use
21 * this hw mac address.
24 * will release all slaves, marking them as down.
26 * ifenslave bond0 eth0
27 * will attach eth0 to bond0 as a slave. eth0 hw mac address will either
28 * a: be used as initial mac address
29 * b: if a hw mac address already is there, eth0's hw mac address
30 * will then be set from bond0.
34 #include <linux/kernel.h>
35 #include <linux/module.h>
36 #include <linux/types.h>
37 #include <linux/fcntl.h>
38 #include <linux/interrupt.h>
39 #include <linux/ptrace.h>
40 #include <linux/ioport.h>
44 #include <linux/icmp.h>
45 #include <linux/icmpv6.h>
46 #include <linux/tcp.h>
47 #include <linux/udp.h>
48 #include <linux/slab.h>
49 #include <linux/string.h>
50 #include <linux/init.h>
51 #include <linux/timer.h>
52 #include <linux/socket.h>
53 #include <linux/ctype.h>
54 #include <linux/inet.h>
55 #include <linux/bitops.h>
58 #include <linux/uaccess.h>
59 #include <linux/errno.h>
60 #include <linux/netdevice.h>
61 #include <linux/inetdevice.h>
62 #include <linux/igmp.h>
63 #include <linux/etherdevice.h>
64 #include <linux/skbuff.h>
66 #include <linux/rtnetlink.h>
67 #include <linux/smp.h>
68 #include <linux/if_ether.h>
70 #include <linux/mii.h>
71 #include <linux/ethtool.h>
72 #include <linux/if_vlan.h>
73 #include <linux/if_bonding.h>
74 #include <linux/jiffies.h>
75 #include <linux/preempt.h>
76 #include <net/route.h>
77 #include <net/net_namespace.h>
78 #include <net/netns/generic.h>
79 #include <net/pkt_sched.h>
80 #include <linux/rculist.h>
81 #include <net/flow_dissector.h>
83 #include <net/bonding.h>
84 #include <net/bond_3ad.h>
85 #include <net/bond_alb.h>
86 #if IS_ENABLED(CONFIG_TLS_DEVICE)
90 #include "bonding_priv.h"
92 /*---------------------------- Module parameters ----------------------------*/
94 /* monitor all links that often (in milliseconds). <=0 disables monitoring */
96 static int max_bonds = BOND_DEFAULT_MAX_BONDS;
97 static int tx_queues = BOND_DEFAULT_TX_QUEUES;
98 static int num_peer_notif = 1;
101 static int downdelay;
102 static int use_carrier = 1;
104 static char *primary;
105 static char *primary_reselect;
106 static char *lacp_rate;
107 static int min_links;
108 static char *ad_select;
109 static char *xmit_hash_policy;
110 static int arp_interval;
111 static char *arp_ip_target[BOND_MAX_ARP_TARGETS];
112 static char *arp_validate;
113 static char *arp_all_targets;
114 static char *fail_over_mac;
115 static int all_slaves_active;
116 static struct bond_params bonding_defaults;
117 static int resend_igmp = BOND_DEFAULT_RESEND_IGMP;
118 static int packets_per_slave = 1;
119 static int lp_interval = BOND_ALB_DEFAULT_LP_INTERVAL;
121 module_param(max_bonds, int, 0);
122 MODULE_PARM_DESC(max_bonds, "Max number of bonded devices");
123 module_param(tx_queues, int, 0);
124 MODULE_PARM_DESC(tx_queues, "Max number of transmit queues (default = 16)");
125 module_param_named(num_grat_arp, num_peer_notif, int, 0644);
126 MODULE_PARM_DESC(num_grat_arp, "Number of peer notifications to send on "
127 "failover event (alias of num_unsol_na)");
128 module_param_named(num_unsol_na, num_peer_notif, int, 0644);
129 MODULE_PARM_DESC(num_unsol_na, "Number of peer notifications to send on "
130 "failover event (alias of num_grat_arp)");
131 module_param(miimon, int, 0);
132 MODULE_PARM_DESC(miimon, "Link check interval in milliseconds");
133 module_param(updelay, int, 0);
134 MODULE_PARM_DESC(updelay, "Delay before considering link up, in milliseconds");
135 module_param(downdelay, int, 0);
136 MODULE_PARM_DESC(downdelay, "Delay before considering link down, "
138 module_param(use_carrier, int, 0);
139 MODULE_PARM_DESC(use_carrier, "Use netif_carrier_ok (vs MII ioctls) in miimon; "
140 "0 for off, 1 for on (default)");
141 module_param(mode, charp, 0);
142 MODULE_PARM_DESC(mode, "Mode of operation; 0 for balance-rr, "
143 "1 for active-backup, 2 for balance-xor, "
144 "3 for broadcast, 4 for 802.3ad, 5 for balance-tlb, "
145 "6 for balance-alb");
146 module_param(primary, charp, 0);
147 MODULE_PARM_DESC(primary, "Primary network device to use");
148 module_param(primary_reselect, charp, 0);
149 MODULE_PARM_DESC(primary_reselect, "Reselect primary slave "
151 "0 for always (default), "
152 "1 for only if speed of primary is "
154 "2 for only on active slave "
156 module_param(lacp_rate, charp, 0);
157 MODULE_PARM_DESC(lacp_rate, "LACPDU tx rate to request from 802.3ad partner; "
158 "0 for slow, 1 for fast");
159 module_param(ad_select, charp, 0);
160 MODULE_PARM_DESC(ad_select, "802.3ad aggregation selection logic; "
161 "0 for stable (default), 1 for bandwidth, "
163 module_param(min_links, int, 0);
164 MODULE_PARM_DESC(min_links, "Minimum number of available links before turning on carrier");
166 module_param(xmit_hash_policy, charp, 0);
167 MODULE_PARM_DESC(xmit_hash_policy, "balance-alb, balance-tlb, balance-xor, 802.3ad hashing method; "
168 "0 for layer 2 (default), 1 for layer 3+4, "
169 "2 for layer 2+3, 3 for encap layer 2+3, "
170 "4 for encap layer 3+4, 5 for vlan+srcmac");
171 module_param(arp_interval, int, 0);
172 MODULE_PARM_DESC(arp_interval, "arp interval in milliseconds");
173 module_param_array(arp_ip_target, charp, NULL, 0);
174 MODULE_PARM_DESC(arp_ip_target, "arp targets in n.n.n.n form");
175 module_param(arp_validate, charp, 0);
176 MODULE_PARM_DESC(arp_validate, "validate src/dst of ARP probes; "
177 "0 for none (default), 1 for active, "
178 "2 for backup, 3 for all");
179 module_param(arp_all_targets, charp, 0);
180 MODULE_PARM_DESC(arp_all_targets, "fail on any/all arp targets timeout; 0 for any (default), 1 for all");
181 module_param(fail_over_mac, charp, 0);
182 MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to "
183 "the same MAC; 0 for none (default), "
184 "1 for active, 2 for follow");
185 module_param(all_slaves_active, int, 0);
186 MODULE_PARM_DESC(all_slaves_active, "Keep all frames received on an interface "
187 "by setting active flag for all slaves; "
188 "0 for never (default), 1 for always.");
189 module_param(resend_igmp, int, 0);
190 MODULE_PARM_DESC(resend_igmp, "Number of IGMP membership reports to send on "
192 module_param(packets_per_slave, int, 0);
193 MODULE_PARM_DESC(packets_per_slave, "Packets to send per slave in balance-rr "
194 "mode; 0 for a random slave, 1 packet per "
195 "slave (default), >1 packets per slave.");
196 module_param(lp_interval, uint, 0);
197 MODULE_PARM_DESC(lp_interval, "The number of seconds between instances where "
198 "the bonding driver sends learning packets to "
199 "each slaves peer switch. The default is 1.");
201 /*----------------------------- Global variables ----------------------------*/
203 #ifdef CONFIG_NET_POLL_CONTROLLER
204 atomic_t netpoll_block_tx = ATOMIC_INIT(0);
207 unsigned int bond_net_id __read_mostly;
209 static const struct flow_dissector_key flow_keys_bonding_keys[] = {
211 .key_id = FLOW_DISSECTOR_KEY_CONTROL,
212 .offset = offsetof(struct flow_keys, control),
215 .key_id = FLOW_DISSECTOR_KEY_BASIC,
216 .offset = offsetof(struct flow_keys, basic),
219 .key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS,
220 .offset = offsetof(struct flow_keys, addrs.v4addrs),
223 .key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS,
224 .offset = offsetof(struct flow_keys, addrs.v6addrs),
227 .key_id = FLOW_DISSECTOR_KEY_TIPC,
228 .offset = offsetof(struct flow_keys, addrs.tipckey),
231 .key_id = FLOW_DISSECTOR_KEY_PORTS,
232 .offset = offsetof(struct flow_keys, ports),
235 .key_id = FLOW_DISSECTOR_KEY_ICMP,
236 .offset = offsetof(struct flow_keys, icmp),
239 .key_id = FLOW_DISSECTOR_KEY_VLAN,
240 .offset = offsetof(struct flow_keys, vlan),
243 .key_id = FLOW_DISSECTOR_KEY_FLOW_LABEL,
244 .offset = offsetof(struct flow_keys, tags),
247 .key_id = FLOW_DISSECTOR_KEY_GRE_KEYID,
248 .offset = offsetof(struct flow_keys, keyid),
252 static struct flow_dissector flow_keys_bonding __read_mostly;
254 /*-------------------------- Forward declarations ---------------------------*/
256 static int bond_init(struct net_device *bond_dev);
257 static void bond_uninit(struct net_device *bond_dev);
258 static void bond_get_stats(struct net_device *bond_dev,
259 struct rtnl_link_stats64 *stats);
260 static void bond_slave_arr_handler(struct work_struct *work);
261 static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
263 static void bond_netdev_notify_work(struct work_struct *work);
265 /*---------------------------- General routines -----------------------------*/
267 const char *bond_mode_name(int mode)
269 static const char *names[] = {
270 [BOND_MODE_ROUNDROBIN] = "load balancing (round-robin)",
271 [BOND_MODE_ACTIVEBACKUP] = "fault-tolerance (active-backup)",
272 [BOND_MODE_XOR] = "load balancing (xor)",
273 [BOND_MODE_BROADCAST] = "fault-tolerance (broadcast)",
274 [BOND_MODE_8023AD] = "IEEE 802.3ad Dynamic link aggregation",
275 [BOND_MODE_TLB] = "transmit load balancing",
276 [BOND_MODE_ALB] = "adaptive load balancing",
279 if (mode < BOND_MODE_ROUNDROBIN || mode > BOND_MODE_ALB)
286 * bond_dev_queue_xmit - Prepare skb for xmit.
288 * @bond: bond device that got this skb for tx.
289 * @skb: hw accel VLAN tagged skb to transmit
290 * @slave_dev: slave that is supposed to xmit this skbuff
292 netdev_tx_t bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
293 struct net_device *slave_dev)
295 skb->dev = slave_dev;
297 BUILD_BUG_ON(sizeof(skb->queue_mapping) !=
298 sizeof(qdisc_skb_cb(skb)->slave_dev_queue_mapping));
299 skb_set_queue_mapping(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping);
301 if (unlikely(netpoll_tx_running(bond->dev)))
302 return bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb);
304 return dev_queue_xmit(skb);
307 bool bond_sk_check(struct bonding *bond)
309 switch (BOND_MODE(bond)) {
310 case BOND_MODE_8023AD:
312 if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34)
320 static bool bond_xdp_check(struct bonding *bond)
322 switch (BOND_MODE(bond)) {
323 case BOND_MODE_ROUNDROBIN:
324 case BOND_MODE_ACTIVEBACKUP:
326 case BOND_MODE_8023AD:
328 /* vlan+srcmac is not supported with XDP as in most cases the 802.1q
329 * payload is not in the packet due to hardware offload.
331 if (bond->params.xmit_policy != BOND_XMIT_POLICY_VLAN_SRCMAC)
339 /*---------------------------------- VLAN -----------------------------------*/
341 /* In the following 2 functions, bond_vlan_rx_add_vid and bond_vlan_rx_kill_vid,
342 * We don't protect the slave list iteration with a lock because:
343 * a. This operation is performed in IOCTL context,
344 * b. The operation is protected by the RTNL semaphore in the 8021q code,
345 * c. Holding a lock with BH disabled while directly calling a base driver
346 * entry point is generally a BAD idea.
348 * The design of synchronization/protection for this operation in the 8021q
349 * module is good for one or more VLAN devices over a single physical device
350 * and cannot be extended for a teaming solution like bonding, so there is a
351 * potential race condition here where a net device from the vlan group might
352 * be referenced (either by a base driver or the 8021q code) while it is being
353 * removed from the system. However, it turns out we're not making matters
354 * worse, and if it works for regular VLAN usage it will work here too.
358 * bond_vlan_rx_add_vid - Propagates adding an id to slaves
359 * @bond_dev: bonding net device that got called
360 * @proto: network protocol ID
361 * @vid: vlan id being added
363 static int bond_vlan_rx_add_vid(struct net_device *bond_dev,
364 __be16 proto, u16 vid)
366 struct bonding *bond = netdev_priv(bond_dev);
367 struct slave *slave, *rollback_slave;
368 struct list_head *iter;
371 bond_for_each_slave(bond, slave, iter) {
372 res = vlan_vid_add(slave->dev, proto, vid);
380 /* unwind to the slave that failed */
381 bond_for_each_slave(bond, rollback_slave, iter) {
382 if (rollback_slave == slave)
385 vlan_vid_del(rollback_slave->dev, proto, vid);
392 * bond_vlan_rx_kill_vid - Propagates deleting an id to slaves
393 * @bond_dev: bonding net device that got called
394 * @proto: network protocol ID
395 * @vid: vlan id being removed
397 static int bond_vlan_rx_kill_vid(struct net_device *bond_dev,
398 __be16 proto, u16 vid)
400 struct bonding *bond = netdev_priv(bond_dev);
401 struct list_head *iter;
404 bond_for_each_slave(bond, slave, iter)
405 vlan_vid_del(slave->dev, proto, vid);
407 if (bond_is_lb(bond))
408 bond_alb_clear_vlan(bond, vid);
413 /*---------------------------------- XFRM -----------------------------------*/
415 #ifdef CONFIG_XFRM_OFFLOAD
417 * bond_ipsec_add_sa - program device with a security association
418 * @xs: pointer to transformer state struct
420 static int bond_ipsec_add_sa(struct xfrm_state *xs)
422 struct net_device *bond_dev = xs->xso.dev;
423 struct bond_ipsec *ipsec;
424 struct bonding *bond;
432 bond = netdev_priv(bond_dev);
433 slave = rcu_dereference(bond->curr_active_slave);
439 if (!slave->dev->xfrmdev_ops ||
440 !slave->dev->xfrmdev_ops->xdo_dev_state_add ||
441 netif_is_bond_master(slave->dev)) {
442 slave_warn(bond_dev, slave->dev, "Slave does not support ipsec offload\n");
447 ipsec = kmalloc(sizeof(*ipsec), GFP_ATOMIC);
452 xs->xso.real_dev = slave->dev;
454 err = slave->dev->xfrmdev_ops->xdo_dev_state_add(xs);
457 INIT_LIST_HEAD(&ipsec->list);
458 spin_lock_bh(&bond->ipsec_lock);
459 list_add(&ipsec->list, &bond->ipsec_list);
460 spin_unlock_bh(&bond->ipsec_lock);
468 static void bond_ipsec_add_sa_all(struct bonding *bond)
470 struct net_device *bond_dev = bond->dev;
471 struct bond_ipsec *ipsec;
475 slave = rcu_dereference(bond->curr_active_slave);
479 if (!slave->dev->xfrmdev_ops ||
480 !slave->dev->xfrmdev_ops->xdo_dev_state_add ||
481 netif_is_bond_master(slave->dev)) {
482 spin_lock_bh(&bond->ipsec_lock);
483 if (!list_empty(&bond->ipsec_list))
484 slave_warn(bond_dev, slave->dev,
485 "%s: no slave xdo_dev_state_add\n",
487 spin_unlock_bh(&bond->ipsec_lock);
491 spin_lock_bh(&bond->ipsec_lock);
492 list_for_each_entry(ipsec, &bond->ipsec_list, list) {
493 ipsec->xs->xso.real_dev = slave->dev;
494 if (slave->dev->xfrmdev_ops->xdo_dev_state_add(ipsec->xs)) {
495 slave_warn(bond_dev, slave->dev, "%s: failed to add SA\n", __func__);
496 ipsec->xs->xso.real_dev = NULL;
499 spin_unlock_bh(&bond->ipsec_lock);
505 * bond_ipsec_del_sa - clear out this specific SA
506 * @xs: pointer to transformer state struct
508 static void bond_ipsec_del_sa(struct xfrm_state *xs)
510 struct net_device *bond_dev = xs->xso.dev;
511 struct bond_ipsec *ipsec;
512 struct bonding *bond;
519 bond = netdev_priv(bond_dev);
520 slave = rcu_dereference(bond->curr_active_slave);
525 if (!xs->xso.real_dev)
528 WARN_ON(xs->xso.real_dev != slave->dev);
530 if (!slave->dev->xfrmdev_ops ||
531 !slave->dev->xfrmdev_ops->xdo_dev_state_delete ||
532 netif_is_bond_master(slave->dev)) {
533 slave_warn(bond_dev, slave->dev, "%s: no slave xdo_dev_state_delete\n", __func__);
537 slave->dev->xfrmdev_ops->xdo_dev_state_delete(xs);
539 spin_lock_bh(&bond->ipsec_lock);
540 list_for_each_entry(ipsec, &bond->ipsec_list, list) {
541 if (ipsec->xs == xs) {
542 list_del(&ipsec->list);
547 spin_unlock_bh(&bond->ipsec_lock);
551 static void bond_ipsec_del_sa_all(struct bonding *bond)
553 struct net_device *bond_dev = bond->dev;
554 struct bond_ipsec *ipsec;
558 slave = rcu_dereference(bond->curr_active_slave);
564 spin_lock_bh(&bond->ipsec_lock);
565 list_for_each_entry(ipsec, &bond->ipsec_list, list) {
566 if (!ipsec->xs->xso.real_dev)
569 if (!slave->dev->xfrmdev_ops ||
570 !slave->dev->xfrmdev_ops->xdo_dev_state_delete ||
571 netif_is_bond_master(slave->dev)) {
572 slave_warn(bond_dev, slave->dev,
573 "%s: no slave xdo_dev_state_delete\n",
576 slave->dev->xfrmdev_ops->xdo_dev_state_delete(ipsec->xs);
578 ipsec->xs->xso.real_dev = NULL;
580 spin_unlock_bh(&bond->ipsec_lock);
585 * bond_ipsec_offload_ok - can this packet use the xfrm hw offload
586 * @skb: current data packet
587 * @xs: pointer to transformer state struct
589 static bool bond_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
591 struct net_device *bond_dev = xs->xso.dev;
592 struct net_device *real_dev;
593 struct slave *curr_active;
594 struct bonding *bond;
597 bond = netdev_priv(bond_dev);
599 curr_active = rcu_dereference(bond->curr_active_slave);
600 real_dev = curr_active->dev;
602 if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
607 if (!xs->xso.real_dev) {
612 if (!real_dev->xfrmdev_ops ||
613 !real_dev->xfrmdev_ops->xdo_dev_offload_ok ||
614 netif_is_bond_master(real_dev)) {
619 err = real_dev->xfrmdev_ops->xdo_dev_offload_ok(skb, xs);
625 static const struct xfrmdev_ops bond_xfrmdev_ops = {
626 .xdo_dev_state_add = bond_ipsec_add_sa,
627 .xdo_dev_state_delete = bond_ipsec_del_sa,
628 .xdo_dev_offload_ok = bond_ipsec_offload_ok,
630 #endif /* CONFIG_XFRM_OFFLOAD */
632 /*------------------------------- Link status -------------------------------*/
634 /* Set the carrier state for the master according to the state of its
635 * slaves. If any slaves are up, the master is up. In 802.3ad mode,
636 * do special 802.3ad magic.
638 * Returns zero if carrier state does not change, nonzero if it does.
640 int bond_set_carrier(struct bonding *bond)
642 struct list_head *iter;
645 if (!bond_has_slaves(bond))
648 if (BOND_MODE(bond) == BOND_MODE_8023AD)
649 return bond_3ad_set_carrier(bond);
651 bond_for_each_slave(bond, slave, iter) {
652 if (slave->link == BOND_LINK_UP) {
653 if (!netif_carrier_ok(bond->dev)) {
654 netif_carrier_on(bond->dev);
662 if (netif_carrier_ok(bond->dev)) {
663 netif_carrier_off(bond->dev);
669 /* Get link speed and duplex from the slave's base driver
670 * using ethtool. If for some reason the call fails or the
671 * values are invalid, set speed and duplex to -1,
672 * and return. Return 1 if speed or duplex settings are
673 * UNKNOWN; 0 otherwise.
675 static int bond_update_speed_duplex(struct slave *slave)
677 struct net_device *slave_dev = slave->dev;
678 struct ethtool_link_ksettings ecmd;
681 slave->speed = SPEED_UNKNOWN;
682 slave->duplex = DUPLEX_UNKNOWN;
684 res = __ethtool_get_link_ksettings(slave_dev, &ecmd);
687 if (ecmd.base.speed == 0 || ecmd.base.speed == ((__u32)-1))
689 switch (ecmd.base.duplex) {
697 slave->speed = ecmd.base.speed;
698 slave->duplex = ecmd.base.duplex;
703 const char *bond_slave_link_status(s8 link)
719 /* if <dev> supports MII link status reporting, check its link status.
721 * We either do MII/ETHTOOL ioctls, or check netif_carrier_ok(),
722 * depending upon the setting of the use_carrier parameter.
724 * Return either BMSR_LSTATUS, meaning that the link is up (or we
725 * can't tell and just pretend it is), or 0, meaning that the link is
728 * If reporting is non-zero, instead of faking link up, return -1 if
729 * both ETHTOOL and MII ioctls fail (meaning the device does not
730 * support them). If use_carrier is set, return whatever it says.
731 * It'd be nice if there was a good way to tell if a driver supports
732 * netif_carrier, but there really isn't.
734 static int bond_check_dev_link(struct bonding *bond,
735 struct net_device *slave_dev, int reporting)
737 const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
738 int (*ioctl)(struct net_device *, struct ifreq *, int);
740 struct mii_ioctl_data *mii;
742 if (!reporting && !netif_running(slave_dev))
745 if (bond->params.use_carrier)
746 return netif_carrier_ok(slave_dev) ? BMSR_LSTATUS : 0;
748 /* Try to get link status using Ethtool first. */
749 if (slave_dev->ethtool_ops->get_link)
750 return slave_dev->ethtool_ops->get_link(slave_dev) ?
753 /* Ethtool can't be used, fallback to MII ioctls. */
754 ioctl = slave_ops->ndo_eth_ioctl;
756 /* TODO: set pointer to correct ioctl on a per team member
757 * bases to make this more efficient. that is, once
758 * we determine the correct ioctl, we will always
759 * call it and not the others for that team
763 /* We cannot assume that SIOCGMIIPHY will also read a
764 * register; not all network drivers (e.g., e100)
768 /* Yes, the mii is overlaid on the ifreq.ifr_ifru */
769 strscpy_pad(ifr.ifr_name, slave_dev->name, IFNAMSIZ);
771 if (ioctl(slave_dev, &ifr, SIOCGMIIPHY) == 0) {
772 mii->reg_num = MII_BMSR;
773 if (ioctl(slave_dev, &ifr, SIOCGMIIREG) == 0)
774 return mii->val_out & BMSR_LSTATUS;
778 /* If reporting, report that either there's no ndo_eth_ioctl,
779 * or both SIOCGMIIREG and get_link failed (meaning that we
780 * cannot report link status). If not reporting, pretend
783 return reporting ? -1 : BMSR_LSTATUS;
786 /*----------------------------- Multicast list ------------------------------*/
788 /* Push the promiscuity flag down to appropriate slaves */
789 static int bond_set_promiscuity(struct bonding *bond, int inc)
791 struct list_head *iter;
794 if (bond_uses_primary(bond)) {
795 struct slave *curr_active = rtnl_dereference(bond->curr_active_slave);
798 err = dev_set_promiscuity(curr_active->dev, inc);
802 bond_for_each_slave(bond, slave, iter) {
803 err = dev_set_promiscuity(slave->dev, inc);
811 /* Push the allmulti flag down to all slaves */
812 static int bond_set_allmulti(struct bonding *bond, int inc)
814 struct list_head *iter;
817 if (bond_uses_primary(bond)) {
818 struct slave *curr_active = rtnl_dereference(bond->curr_active_slave);
821 err = dev_set_allmulti(curr_active->dev, inc);
825 bond_for_each_slave(bond, slave, iter) {
826 err = dev_set_allmulti(slave->dev, inc);
834 /* Retrieve the list of registered multicast addresses for the bonding
835 * device and retransmit an IGMP JOIN request to the current active
838 static void bond_resend_igmp_join_requests_delayed(struct work_struct *work)
840 struct bonding *bond = container_of(work, struct bonding,
843 if (!rtnl_trylock()) {
844 queue_delayed_work(bond->wq, &bond->mcast_work, 1);
847 call_netdevice_notifiers(NETDEV_RESEND_IGMP, bond->dev);
849 if (bond->igmp_retrans > 1) {
850 bond->igmp_retrans--;
851 queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5);
856 /* Flush bond's hardware addresses from slave */
857 static void bond_hw_addr_flush(struct net_device *bond_dev,
858 struct net_device *slave_dev)
860 struct bonding *bond = netdev_priv(bond_dev);
862 dev_uc_unsync(slave_dev, bond_dev);
863 dev_mc_unsync(slave_dev, bond_dev);
865 if (BOND_MODE(bond) == BOND_MODE_8023AD) {
866 /* del lacpdu mc addr from mc list */
867 u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
869 dev_mc_del(slave_dev, lacpdu_multicast);
873 /*--------------------------- Active slave change ---------------------------*/
875 /* Update the hardware address list and promisc/allmulti for the new and
876 * old active slaves (if any). Modes that are not using primary keep all
877 * slaves up date at all times; only the modes that use primary need to call
878 * this function to swap these settings during a failover.
880 static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active,
881 struct slave *old_active)
884 if (bond->dev->flags & IFF_PROMISC)
885 dev_set_promiscuity(old_active->dev, -1);
887 if (bond->dev->flags & IFF_ALLMULTI)
888 dev_set_allmulti(old_active->dev, -1);
890 bond_hw_addr_flush(bond->dev, old_active->dev);
894 /* FIXME: Signal errors upstream. */
895 if (bond->dev->flags & IFF_PROMISC)
896 dev_set_promiscuity(new_active->dev, 1);
898 if (bond->dev->flags & IFF_ALLMULTI)
899 dev_set_allmulti(new_active->dev, 1);
901 netif_addr_lock_bh(bond->dev);
902 dev_uc_sync(new_active->dev, bond->dev);
903 dev_mc_sync(new_active->dev, bond->dev);
904 netif_addr_unlock_bh(bond->dev);
909 * bond_set_dev_addr - clone slave's address to bond
910 * @bond_dev: bond net device
911 * @slave_dev: slave net device
913 * Should be called with RTNL held.
915 static int bond_set_dev_addr(struct net_device *bond_dev,
916 struct net_device *slave_dev)
920 slave_dbg(bond_dev, slave_dev, "bond_dev=%p slave_dev=%p slave_dev->addr_len=%d\n",
921 bond_dev, slave_dev, slave_dev->addr_len);
922 err = dev_pre_changeaddr_notify(bond_dev, slave_dev->dev_addr, NULL);
926 memcpy(bond_dev->dev_addr, slave_dev->dev_addr, slave_dev->addr_len);
927 bond_dev->addr_assign_type = NET_ADDR_STOLEN;
928 call_netdevice_notifiers(NETDEV_CHANGEADDR, bond_dev);
932 static struct slave *bond_get_old_active(struct bonding *bond,
933 struct slave *new_active)
936 struct list_head *iter;
938 bond_for_each_slave(bond, slave, iter) {
939 if (slave == new_active)
942 if (ether_addr_equal(bond->dev->dev_addr, slave->dev->dev_addr))
949 /* bond_do_fail_over_mac
951 * Perform special MAC address swapping for fail_over_mac settings
955 static void bond_do_fail_over_mac(struct bonding *bond,
956 struct slave *new_active,
957 struct slave *old_active)
959 u8 tmp_mac[MAX_ADDR_LEN];
960 struct sockaddr_storage ss;
963 switch (bond->params.fail_over_mac) {
964 case BOND_FOM_ACTIVE:
966 rv = bond_set_dev_addr(bond->dev, new_active->dev);
968 slave_err(bond->dev, new_active->dev, "Error %d setting bond MAC from slave\n",
972 case BOND_FOM_FOLLOW:
973 /* if new_active && old_active, swap them
974 * if just old_active, do nothing (going to no active slave)
975 * if just new_active, set new_active to bond's MAC
981 old_active = bond_get_old_active(bond, new_active);
984 bond_hw_addr_copy(tmp_mac, new_active->dev->dev_addr,
985 new_active->dev->addr_len);
986 bond_hw_addr_copy(ss.__data,
987 old_active->dev->dev_addr,
988 old_active->dev->addr_len);
989 ss.ss_family = new_active->dev->type;
991 bond_hw_addr_copy(ss.__data, bond->dev->dev_addr,
992 bond->dev->addr_len);
993 ss.ss_family = bond->dev->type;
996 rv = dev_set_mac_address(new_active->dev,
997 (struct sockaddr *)&ss, NULL);
999 slave_err(bond->dev, new_active->dev, "Error %d setting MAC of new active slave\n",
1007 bond_hw_addr_copy(ss.__data, tmp_mac,
1008 new_active->dev->addr_len);
1009 ss.ss_family = old_active->dev->type;
1011 rv = dev_set_mac_address(old_active->dev,
1012 (struct sockaddr *)&ss, NULL);
1014 slave_err(bond->dev, old_active->dev, "Error %d setting MAC of old active slave\n",
1019 netdev_err(bond->dev, "bond_do_fail_over_mac impossible: bad policy %d\n",
1020 bond->params.fail_over_mac);
1026 static struct slave *bond_choose_primary_or_current(struct bonding *bond)
1028 struct slave *prim = rtnl_dereference(bond->primary_slave);
1029 struct slave *curr = rtnl_dereference(bond->curr_active_slave);
1031 if (!prim || prim->link != BOND_LINK_UP) {
1032 if (!curr || curr->link != BOND_LINK_UP)
1037 if (bond->force_primary) {
1038 bond->force_primary = false;
1042 if (!curr || curr->link != BOND_LINK_UP)
1045 /* At this point, prim and curr are both up */
1046 switch (bond->params.primary_reselect) {
1047 case BOND_PRI_RESELECT_ALWAYS:
1049 case BOND_PRI_RESELECT_BETTER:
1050 if (prim->speed < curr->speed)
1052 if (prim->speed == curr->speed && prim->duplex <= curr->duplex)
1055 case BOND_PRI_RESELECT_FAILURE:
1058 netdev_err(bond->dev, "impossible primary_reselect %d\n",
1059 bond->params.primary_reselect);
1065 * bond_find_best_slave - select the best available slave to be the active one
1066 * @bond: our bonding struct
1068 static struct slave *bond_find_best_slave(struct bonding *bond)
1070 struct slave *slave, *bestslave = NULL;
1071 struct list_head *iter;
1072 int mintime = bond->params.updelay;
1074 slave = bond_choose_primary_or_current(bond);
1078 bond_for_each_slave(bond, slave, iter) {
1079 if (slave->link == BOND_LINK_UP)
1081 if (slave->link == BOND_LINK_BACK && bond_slave_is_up(slave) &&
1082 slave->delay < mintime) {
1083 mintime = slave->delay;
1091 static bool bond_should_notify_peers(struct bonding *bond)
1093 struct slave *slave;
1096 slave = rcu_dereference(bond->curr_active_slave);
1099 if (!slave || !bond->send_peer_notif ||
1100 bond->send_peer_notif %
1101 max(1, bond->params.peer_notif_delay) != 0 ||
1102 !netif_carrier_ok(bond->dev) ||
1103 test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state))
1106 netdev_dbg(bond->dev, "bond_should_notify_peers: slave %s\n",
1107 slave ? slave->dev->name : "NULL");
1113 * bond_change_active_slave - change the active slave into the specified one
1114 * @bond: our bonding struct
1115 * @new_active: the new slave to make the active one
1117 * Set the new slave to the bond's settings and unset them on the old
1118 * curr_active_slave.
1119 * Setting include flags, mc-list, promiscuity, allmulti, etc.
1121 * If @new's link state is %BOND_LINK_BACK we'll set it to %BOND_LINK_UP,
1122 * because it is apparently the best available slave we have, even though its
1123 * updelay hasn't timed out yet.
1125 * Caller must hold RTNL.
1127 void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
1129 struct slave *old_active;
1133 old_active = rtnl_dereference(bond->curr_active_slave);
1135 if (old_active == new_active)
1138 #ifdef CONFIG_XFRM_OFFLOAD
1139 bond_ipsec_del_sa_all(bond);
1140 #endif /* CONFIG_XFRM_OFFLOAD */
1143 new_active->last_link_up = jiffies;
1145 if (new_active->link == BOND_LINK_BACK) {
1146 if (bond_uses_primary(bond)) {
1147 slave_info(bond->dev, new_active->dev, "making interface the new active one %d ms earlier\n",
1148 (bond->params.updelay - new_active->delay) * bond->params.miimon);
1151 new_active->delay = 0;
1152 bond_set_slave_link_state(new_active, BOND_LINK_UP,
1153 BOND_SLAVE_NOTIFY_NOW);
1155 if (BOND_MODE(bond) == BOND_MODE_8023AD)
1156 bond_3ad_handle_link_change(new_active, BOND_LINK_UP);
1158 if (bond_is_lb(bond))
1159 bond_alb_handle_link_change(bond, new_active, BOND_LINK_UP);
1161 if (bond_uses_primary(bond))
1162 slave_info(bond->dev, new_active->dev, "making interface the new active one\n");
1166 if (bond_uses_primary(bond))
1167 bond_hw_addr_swap(bond, new_active, old_active);
1169 if (bond_is_lb(bond)) {
1170 bond_alb_handle_active_change(bond, new_active);
1172 bond_set_slave_inactive_flags(old_active,
1173 BOND_SLAVE_NOTIFY_NOW);
1175 bond_set_slave_active_flags(new_active,
1176 BOND_SLAVE_NOTIFY_NOW);
1178 rcu_assign_pointer(bond->curr_active_slave, new_active);
1181 if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) {
1183 bond_set_slave_inactive_flags(old_active,
1184 BOND_SLAVE_NOTIFY_NOW);
1187 bool should_notify_peers = false;
1189 bond_set_slave_active_flags(new_active,
1190 BOND_SLAVE_NOTIFY_NOW);
1192 if (bond->params.fail_over_mac)
1193 bond_do_fail_over_mac(bond, new_active,
1196 if (netif_running(bond->dev)) {
1197 bond->send_peer_notif =
1198 bond->params.num_peer_notif *
1199 max(1, bond->params.peer_notif_delay);
1200 should_notify_peers =
1201 bond_should_notify_peers(bond);
1204 call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, bond->dev);
1205 if (should_notify_peers) {
1206 bond->send_peer_notif--;
1207 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
1213 #ifdef CONFIG_XFRM_OFFLOAD
1214 bond_ipsec_add_sa_all(bond);
1215 #endif /* CONFIG_XFRM_OFFLOAD */
1217 /* resend IGMP joins since active slave has changed or
1218 * all were sent on curr_active_slave.
1219 * resend only if bond is brought up with the affected
1220 * bonding modes and the retransmission is enabled
1222 if (netif_running(bond->dev) && (bond->params.resend_igmp > 0) &&
1223 ((bond_uses_primary(bond) && new_active) ||
1224 BOND_MODE(bond) == BOND_MODE_ROUNDROBIN)) {
1225 bond->igmp_retrans = bond->params.resend_igmp;
1226 queue_delayed_work(bond->wq, &bond->mcast_work, 1);
1231 * bond_select_active_slave - select a new active slave, if needed
1232 * @bond: our bonding struct
1234 * This functions should be called when one of the following occurs:
1235 * - The old curr_active_slave has been released or lost its link.
1236 * - The primary_slave has got its link back.
1237 * - A slave has got its link back and there's no old curr_active_slave.
1239 * Caller must hold RTNL.
1241 void bond_select_active_slave(struct bonding *bond)
1243 struct slave *best_slave;
1248 best_slave = bond_find_best_slave(bond);
1249 if (best_slave != rtnl_dereference(bond->curr_active_slave)) {
1250 bond_change_active_slave(bond, best_slave);
1251 rv = bond_set_carrier(bond);
1255 if (netif_carrier_ok(bond->dev))
1256 netdev_info(bond->dev, "active interface up!\n");
1258 netdev_info(bond->dev, "now running without any active interface!\n");
1262 #ifdef CONFIG_NET_POLL_CONTROLLER
1263 static inline int slave_enable_netpoll(struct slave *slave)
1268 np = kzalloc(sizeof(*np), GFP_KERNEL);
1273 err = __netpoll_setup(np, slave->dev);
1282 static inline void slave_disable_netpoll(struct slave *slave)
1284 struct netpoll *np = slave->np;
1294 static void bond_poll_controller(struct net_device *bond_dev)
1296 struct bonding *bond = netdev_priv(bond_dev);
1297 struct slave *slave = NULL;
1298 struct list_head *iter;
1299 struct ad_info ad_info;
1301 if (BOND_MODE(bond) == BOND_MODE_8023AD)
1302 if (bond_3ad_get_active_agg_info(bond, &ad_info))
1305 bond_for_each_slave_rcu(bond, slave, iter) {
1306 if (!bond_slave_is_up(slave))
1309 if (BOND_MODE(bond) == BOND_MODE_8023AD) {
1310 struct aggregator *agg =
1311 SLAVE_AD_INFO(slave)->port.aggregator;
1314 agg->aggregator_identifier != ad_info.aggregator_id)
1318 netpoll_poll_dev(slave->dev);
1322 static void bond_netpoll_cleanup(struct net_device *bond_dev)
1324 struct bonding *bond = netdev_priv(bond_dev);
1325 struct list_head *iter;
1326 struct slave *slave;
1328 bond_for_each_slave(bond, slave, iter)
1329 if (bond_slave_is_up(slave))
1330 slave_disable_netpoll(slave);
1333 static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni)
1335 struct bonding *bond = netdev_priv(dev);
1336 struct list_head *iter;
1337 struct slave *slave;
1340 bond_for_each_slave(bond, slave, iter) {
1341 err = slave_enable_netpoll(slave);
1343 bond_netpoll_cleanup(dev);
1350 static inline int slave_enable_netpoll(struct slave *slave)
1354 static inline void slave_disable_netpoll(struct slave *slave)
1357 static void bond_netpoll_cleanup(struct net_device *bond_dev)
1362 /*---------------------------------- IOCTL ----------------------------------*/
1364 static netdev_features_t bond_fix_features(struct net_device *dev,
1365 netdev_features_t features)
1367 struct bonding *bond = netdev_priv(dev);
1368 struct list_head *iter;
1369 netdev_features_t mask;
1370 struct slave *slave;
1372 #if IS_ENABLED(CONFIG_TLS_DEVICE)
1373 if (bond_sk_check(bond))
1374 features |= BOND_TLS_FEATURES;
1376 features &= ~BOND_TLS_FEATURES;
1381 features &= ~NETIF_F_ONE_FOR_ALL;
1382 features |= NETIF_F_ALL_FOR_ALL;
1384 bond_for_each_slave(bond, slave, iter) {
1385 features = netdev_increment_features(features,
1386 slave->dev->features,
1389 features = netdev_add_tso_features(features, mask);
1394 #define BOND_VLAN_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
1395 NETIF_F_FRAGLIST | NETIF_F_GSO_SOFTWARE | \
1396 NETIF_F_HIGHDMA | NETIF_F_LRO)
1398 #define BOND_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
1399 NETIF_F_RXCSUM | NETIF_F_GSO_SOFTWARE)
1401 #define BOND_MPLS_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
1402 NETIF_F_GSO_SOFTWARE)
1405 static void bond_compute_features(struct bonding *bond)
1407 unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE |
1408 IFF_XMIT_DST_RELEASE_PERM;
1409 netdev_features_t vlan_features = BOND_VLAN_FEATURES;
1410 netdev_features_t enc_features = BOND_ENC_FEATURES;
1411 #ifdef CONFIG_XFRM_OFFLOAD
1412 netdev_features_t xfrm_features = BOND_XFRM_FEATURES;
1413 #endif /* CONFIG_XFRM_OFFLOAD */
1414 netdev_features_t mpls_features = BOND_MPLS_FEATURES;
1415 struct net_device *bond_dev = bond->dev;
1416 struct list_head *iter;
1417 struct slave *slave;
1418 unsigned short max_hard_header_len = ETH_HLEN;
1419 unsigned int gso_max_size = GSO_MAX_SIZE;
1420 u16 gso_max_segs = GSO_MAX_SEGS;
1422 if (!bond_has_slaves(bond))
1424 vlan_features &= NETIF_F_ALL_FOR_ALL;
1425 mpls_features &= NETIF_F_ALL_FOR_ALL;
1427 bond_for_each_slave(bond, slave, iter) {
1428 vlan_features = netdev_increment_features(vlan_features,
1429 slave->dev->vlan_features, BOND_VLAN_FEATURES);
1431 enc_features = netdev_increment_features(enc_features,
1432 slave->dev->hw_enc_features,
1435 #ifdef CONFIG_XFRM_OFFLOAD
1436 xfrm_features = netdev_increment_features(xfrm_features,
1437 slave->dev->hw_enc_features,
1438 BOND_XFRM_FEATURES);
1439 #endif /* CONFIG_XFRM_OFFLOAD */
1441 mpls_features = netdev_increment_features(mpls_features,
1442 slave->dev->mpls_features,
1443 BOND_MPLS_FEATURES);
1445 dst_release_flag &= slave->dev->priv_flags;
1446 if (slave->dev->hard_header_len > max_hard_header_len)
1447 max_hard_header_len = slave->dev->hard_header_len;
1449 gso_max_size = min(gso_max_size, slave->dev->gso_max_size);
1450 gso_max_segs = min(gso_max_segs, slave->dev->gso_max_segs);
1452 bond_dev->hard_header_len = max_hard_header_len;
1455 bond_dev->vlan_features = vlan_features;
1456 bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
1457 NETIF_F_HW_VLAN_CTAG_TX |
1458 NETIF_F_HW_VLAN_STAG_TX;
1459 #ifdef CONFIG_XFRM_OFFLOAD
1460 bond_dev->hw_enc_features |= xfrm_features;
1461 #endif /* CONFIG_XFRM_OFFLOAD */
1462 bond_dev->mpls_features = mpls_features;
1463 bond_dev->gso_max_segs = gso_max_segs;
1464 netif_set_gso_max_size(bond_dev, gso_max_size);
1466 bond_dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
1467 if ((bond_dev->priv_flags & IFF_XMIT_DST_RELEASE_PERM) &&
1468 dst_release_flag == (IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM))
1469 bond_dev->priv_flags |= IFF_XMIT_DST_RELEASE;
1471 netdev_change_features(bond_dev);
1474 static void bond_setup_by_slave(struct net_device *bond_dev,
1475 struct net_device *slave_dev)
1477 bond_dev->header_ops = slave_dev->header_ops;
1479 bond_dev->type = slave_dev->type;
1480 bond_dev->hard_header_len = slave_dev->hard_header_len;
1481 bond_dev->needed_headroom = slave_dev->needed_headroom;
1482 bond_dev->addr_len = slave_dev->addr_len;
1484 memcpy(bond_dev->broadcast, slave_dev->broadcast,
1485 slave_dev->addr_len);
1488 /* On bonding slaves other than the currently active slave, suppress
1489 * duplicates except for alb non-mcast/bcast.
1491 static bool bond_should_deliver_exact_match(struct sk_buff *skb,
1492 struct slave *slave,
1493 struct bonding *bond)
1495 if (bond_is_slave_inactive(slave)) {
1496 if (BOND_MODE(bond) == BOND_MODE_ALB &&
1497 skb->pkt_type != PACKET_BROADCAST &&
1498 skb->pkt_type != PACKET_MULTICAST)
1505 static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
1507 struct sk_buff *skb = *pskb;
1508 struct slave *slave;
1509 struct bonding *bond;
1510 int (*recv_probe)(const struct sk_buff *, struct bonding *,
1512 int ret = RX_HANDLER_ANOTHER;
1514 skb = skb_share_check(skb, GFP_ATOMIC);
1516 return RX_HANDLER_CONSUMED;
1520 slave = bond_slave_get_rcu(skb->dev);
1523 recv_probe = READ_ONCE(bond->recv_probe);
1525 ret = recv_probe(skb, bond, slave);
1526 if (ret == RX_HANDLER_CONSUMED) {
1533 * For packets determined by bond_should_deliver_exact_match() call to
1534 * be suppressed we want to make an exception for link-local packets.
1535 * This is necessary for e.g. LLDP daemons to be able to monitor
1536 * inactive slave links without being forced to bind to them
1539 * At the same time, packets that are passed to the bonding master
1540 * (including link-local ones) can have their originating interface
1541 * determined via PACKET_ORIGDEV socket option.
1543 if (bond_should_deliver_exact_match(skb, slave, bond)) {
1544 if (is_link_local_ether_addr(eth_hdr(skb)->h_dest))
1545 return RX_HANDLER_PASS;
1546 return RX_HANDLER_EXACT;
1549 skb->dev = bond->dev;
1551 if (BOND_MODE(bond) == BOND_MODE_ALB &&
1552 netif_is_bridge_port(bond->dev) &&
1553 skb->pkt_type == PACKET_HOST) {
1555 if (unlikely(skb_cow_head(skb,
1556 skb->data - skb_mac_header(skb)))) {
1558 return RX_HANDLER_CONSUMED;
1560 bond_hw_addr_copy(eth_hdr(skb)->h_dest, bond->dev->dev_addr,
1561 bond->dev->addr_len);
1567 static enum netdev_lag_tx_type bond_lag_tx_type(struct bonding *bond)
1569 switch (BOND_MODE(bond)) {
1570 case BOND_MODE_ROUNDROBIN:
1571 return NETDEV_LAG_TX_TYPE_ROUNDROBIN;
1572 case BOND_MODE_ACTIVEBACKUP:
1573 return NETDEV_LAG_TX_TYPE_ACTIVEBACKUP;
1574 case BOND_MODE_BROADCAST:
1575 return NETDEV_LAG_TX_TYPE_BROADCAST;
1577 case BOND_MODE_8023AD:
1578 return NETDEV_LAG_TX_TYPE_HASH;
1580 return NETDEV_LAG_TX_TYPE_UNKNOWN;
1584 static enum netdev_lag_hash bond_lag_hash_type(struct bonding *bond,
1585 enum netdev_lag_tx_type type)
1587 if (type != NETDEV_LAG_TX_TYPE_HASH)
1588 return NETDEV_LAG_HASH_NONE;
1590 switch (bond->params.xmit_policy) {
1591 case BOND_XMIT_POLICY_LAYER2:
1592 return NETDEV_LAG_HASH_L2;
1593 case BOND_XMIT_POLICY_LAYER34:
1594 return NETDEV_LAG_HASH_L34;
1595 case BOND_XMIT_POLICY_LAYER23:
1596 return NETDEV_LAG_HASH_L23;
1597 case BOND_XMIT_POLICY_ENCAP23:
1598 return NETDEV_LAG_HASH_E23;
1599 case BOND_XMIT_POLICY_ENCAP34:
1600 return NETDEV_LAG_HASH_E34;
1601 case BOND_XMIT_POLICY_VLAN_SRCMAC:
1602 return NETDEV_LAG_HASH_VLAN_SRCMAC;
1604 return NETDEV_LAG_HASH_UNKNOWN;
1608 static int bond_master_upper_dev_link(struct bonding *bond, struct slave *slave,
1609 struct netlink_ext_ack *extack)
1611 struct netdev_lag_upper_info lag_upper_info;
1612 enum netdev_lag_tx_type type;
1614 type = bond_lag_tx_type(bond);
1615 lag_upper_info.tx_type = type;
1616 lag_upper_info.hash_type = bond_lag_hash_type(bond, type);
1618 return netdev_master_upper_dev_link(slave->dev, bond->dev, slave,
1619 &lag_upper_info, extack);
1622 static void bond_upper_dev_unlink(struct bonding *bond, struct slave *slave)
1624 netdev_upper_dev_unlink(slave->dev, bond->dev);
1625 slave->dev->flags &= ~IFF_SLAVE;
1628 static void slave_kobj_release(struct kobject *kobj)
1630 struct slave *slave = to_slave(kobj);
1631 struct bonding *bond = bond_get_bond_by_slave(slave);
1633 cancel_delayed_work_sync(&slave->notify_work);
1634 if (BOND_MODE(bond) == BOND_MODE_8023AD)
1635 kfree(SLAVE_AD_INFO(slave));
1640 static struct kobj_type slave_ktype = {
1641 .release = slave_kobj_release,
1643 .sysfs_ops = &slave_sysfs_ops,
1647 static int bond_kobj_init(struct slave *slave)
1651 err = kobject_init_and_add(&slave->kobj, &slave_ktype,
1652 &(slave->dev->dev.kobj), "bonding_slave");
1654 kobject_put(&slave->kobj);
1659 static struct slave *bond_alloc_slave(struct bonding *bond,
1660 struct net_device *slave_dev)
1662 struct slave *slave = NULL;
1664 slave = kzalloc(sizeof(*slave), GFP_KERNEL);
1669 slave->dev = slave_dev;
1670 INIT_DELAYED_WORK(&slave->notify_work, bond_netdev_notify_work);
1672 if (bond_kobj_init(slave))
1675 if (BOND_MODE(bond) == BOND_MODE_8023AD) {
1676 SLAVE_AD_INFO(slave) = kzalloc(sizeof(struct ad_slave_info),
1678 if (!SLAVE_AD_INFO(slave)) {
1679 kobject_put(&slave->kobj);
1687 static void bond_fill_ifbond(struct bonding *bond, struct ifbond *info)
1689 info->bond_mode = BOND_MODE(bond);
1690 info->miimon = bond->params.miimon;
1691 info->num_slaves = bond->slave_cnt;
1694 static void bond_fill_ifslave(struct slave *slave, struct ifslave *info)
1696 strcpy(info->slave_name, slave->dev->name);
1697 info->link = slave->link;
1698 info->state = bond_slave_state(slave);
1699 info->link_failure_count = slave->link_failure_count;
1702 static void bond_netdev_notify_work(struct work_struct *_work)
1704 struct slave *slave = container_of(_work, struct slave,
1707 if (rtnl_trylock()) {
1708 struct netdev_bonding_info binfo;
1710 bond_fill_ifslave(slave, &binfo.slave);
1711 bond_fill_ifbond(slave->bond, &binfo.master);
1712 netdev_bonding_info_change(slave->dev, &binfo);
1715 queue_delayed_work(slave->bond->wq, &slave->notify_work, 1);
1719 void bond_queue_slave_event(struct slave *slave)
1721 queue_delayed_work(slave->bond->wq, &slave->notify_work, 0);
1724 void bond_lower_state_changed(struct slave *slave)
1726 struct netdev_lag_lower_state_info info;
1728 info.link_up = slave->link == BOND_LINK_UP ||
1729 slave->link == BOND_LINK_FAIL;
1730 info.tx_enabled = bond_is_active_slave(slave);
1731 netdev_lower_state_changed(slave->dev, &info);
1734 #define BOND_NL_ERR(bond_dev, extack, errmsg) do { \
1736 NL_SET_ERR_MSG(extack, errmsg); \
1738 netdev_err(bond_dev, "Error: %s\n", errmsg); \
1741 #define SLAVE_NL_ERR(bond_dev, slave_dev, extack, errmsg) do { \
1743 NL_SET_ERR_MSG(extack, errmsg); \
1745 slave_err(bond_dev, slave_dev, "Error: %s\n", errmsg); \
1748 /* enslave device <slave> to bond device <master> */
1749 int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
1750 struct netlink_ext_ack *extack)
1752 struct bonding *bond = netdev_priv(bond_dev);
1753 const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
1754 struct slave *new_slave = NULL, *prev_slave;
1755 struct sockaddr_storage ss;
1759 if (slave_dev->flags & IFF_MASTER &&
1760 !netif_is_bond_master(slave_dev)) {
1761 BOND_NL_ERR(bond_dev, extack,
1762 "Device type (master device) cannot be enslaved");
1766 if (!bond->params.use_carrier &&
1767 slave_dev->ethtool_ops->get_link == NULL &&
1768 slave_ops->ndo_eth_ioctl == NULL) {
1769 slave_warn(bond_dev, slave_dev, "no link monitoring support\n");
1772 /* already in-use? */
1773 if (netdev_is_rx_handler_busy(slave_dev)) {
1774 SLAVE_NL_ERR(bond_dev, slave_dev, extack,
1775 "Device is in use and cannot be enslaved");
1779 if (bond_dev == slave_dev) {
1780 BOND_NL_ERR(bond_dev, extack, "Cannot enslave bond to itself.");
1784 /* vlan challenged mutual exclusion */
1785 /* no need to lock since we're protected by rtnl_lock */
1786 if (slave_dev->features & NETIF_F_VLAN_CHALLENGED) {
1787 slave_dbg(bond_dev, slave_dev, "is NETIF_F_VLAN_CHALLENGED\n");
1788 if (vlan_uses_dev(bond_dev)) {
1789 SLAVE_NL_ERR(bond_dev, slave_dev, extack,
1790 "Can not enslave VLAN challenged device to VLAN enabled bond");
1793 slave_warn(bond_dev, slave_dev, "enslaved VLAN challenged slave. Adding VLANs will be blocked as long as it is part of bond.\n");
1796 slave_dbg(bond_dev, slave_dev, "is !NETIF_F_VLAN_CHALLENGED\n");
1799 if (slave_dev->features & NETIF_F_HW_ESP)
1800 slave_dbg(bond_dev, slave_dev, "is esp-hw-offload capable\n");
1802 /* Old ifenslave binaries are no longer supported. These can
1803 * be identified with moderate accuracy by the state of the slave:
1804 * the current ifenslave will set the interface down prior to
1805 * enslaving it; the old ifenslave will not.
1807 if (slave_dev->flags & IFF_UP) {
1808 SLAVE_NL_ERR(bond_dev, slave_dev, extack,
1809 "Device can not be enslaved while up");
1813 /* set bonding device ether type by slave - bonding netdevices are
1814 * created with ether_setup, so when the slave type is not ARPHRD_ETHER
1815 * there is a need to override some of the type dependent attribs/funcs.
1817 * bond ether type mutual exclusion - don't allow slaves of dissimilar
1818 * ether type (eg ARPHRD_ETHER and ARPHRD_INFINIBAND) share the same bond
1820 if (!bond_has_slaves(bond)) {
1821 if (bond_dev->type != slave_dev->type) {
1822 slave_dbg(bond_dev, slave_dev, "change device type from %d to %d\n",
1823 bond_dev->type, slave_dev->type);
1825 res = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE,
1827 res = notifier_to_errno(res);
1829 slave_err(bond_dev, slave_dev, "refused to change device type\n");
1833 /* Flush unicast and multicast addresses */
1834 dev_uc_flush(bond_dev);
1835 dev_mc_flush(bond_dev);
1837 if (slave_dev->type != ARPHRD_ETHER)
1838 bond_setup_by_slave(bond_dev, slave_dev);
1840 ether_setup(bond_dev);
1841 bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1844 call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE,
1847 } else if (bond_dev->type != slave_dev->type) {
1848 SLAVE_NL_ERR(bond_dev, slave_dev, extack,
1849 "Device type is different from other slaves");
1853 if (slave_dev->type == ARPHRD_INFINIBAND &&
1854 BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
1855 SLAVE_NL_ERR(bond_dev, slave_dev, extack,
1856 "Only active-backup mode is supported for infiniband slaves");
1858 goto err_undo_flags;
1861 if (!slave_ops->ndo_set_mac_address ||
1862 slave_dev->type == ARPHRD_INFINIBAND) {
1863 slave_warn(bond_dev, slave_dev, "The slave device specified does not support setting the MAC address\n");
1864 if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP &&
1865 bond->params.fail_over_mac != BOND_FOM_ACTIVE) {
1866 if (!bond_has_slaves(bond)) {
1867 bond->params.fail_over_mac = BOND_FOM_ACTIVE;
1868 slave_warn(bond_dev, slave_dev, "Setting fail_over_mac to active for active-backup mode\n");
1870 SLAVE_NL_ERR(bond_dev, slave_dev, extack,
1871 "Slave device does not support setting the MAC address, but fail_over_mac is not set to active");
1873 goto err_undo_flags;
1878 call_netdevice_notifiers(NETDEV_JOIN, slave_dev);
1880 /* If this is the first slave, then we need to set the master's hardware
1881 * address to be the same as the slave's.
1883 if (!bond_has_slaves(bond) &&
1884 bond->dev->addr_assign_type == NET_ADDR_RANDOM) {
1885 res = bond_set_dev_addr(bond->dev, slave_dev);
1887 goto err_undo_flags;
1890 new_slave = bond_alloc_slave(bond, slave_dev);
1893 goto err_undo_flags;
1896 /* Set the new_slave's queue_id to be zero. Queue ID mapping
1897 * is set via sysfs or module option if desired.
1899 new_slave->queue_id = 0;
1901 /* Save slave's original mtu and then set it to match the bond */
1902 new_slave->original_mtu = slave_dev->mtu;
1903 res = dev_set_mtu(slave_dev, bond->dev->mtu);
1905 slave_err(bond_dev, slave_dev, "Error %d calling dev_set_mtu\n", res);
1909 /* Save slave's original ("permanent") mac address for modes
1910 * that need it, and for restoring it upon release, and then
1911 * set it to the master's address
1913 bond_hw_addr_copy(new_slave->perm_hwaddr, slave_dev->dev_addr,
1914 slave_dev->addr_len);
1916 if (!bond->params.fail_over_mac ||
1917 BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
1918 /* Set slave to master's mac address. The application already
1919 * set the master's mac address to that of the first slave
1921 memcpy(ss.__data, bond_dev->dev_addr, bond_dev->addr_len);
1922 ss.ss_family = slave_dev->type;
1923 res = dev_set_mac_address(slave_dev, (struct sockaddr *)&ss,
1926 slave_err(bond_dev, slave_dev, "Error %d calling set_mac_address\n", res);
1927 goto err_restore_mtu;
1931 /* set slave flag before open to prevent IPv6 addrconf */
1932 slave_dev->flags |= IFF_SLAVE;
1934 /* open the slave since the application closed it */
1935 res = dev_open(slave_dev, extack);
1937 slave_err(bond_dev, slave_dev, "Opening slave failed\n");
1938 goto err_restore_mac;
1941 slave_dev->priv_flags |= IFF_BONDING;
1942 /* initialize slave stats */
1943 dev_get_stats(new_slave->dev, &new_slave->slave_stats);
1945 if (bond_is_lb(bond)) {
1946 /* bond_alb_init_slave() must be called before all other stages since
1947 * it might fail and we do not want to have to undo everything
1949 res = bond_alb_init_slave(bond, new_slave);
1954 res = vlan_vids_add_by_dev(slave_dev, bond_dev);
1956 slave_err(bond_dev, slave_dev, "Couldn't add bond vlan ids\n");
1960 prev_slave = bond_last_slave(bond);
1962 new_slave->delay = 0;
1963 new_slave->link_failure_count = 0;
1965 if (bond_update_speed_duplex(new_slave) &&
1966 bond_needs_speed_duplex(bond))
1967 new_slave->link = BOND_LINK_DOWN;
1969 new_slave->last_rx = jiffies -
1970 (msecs_to_jiffies(bond->params.arp_interval) + 1);
1971 for (i = 0; i < BOND_MAX_ARP_TARGETS; i++)
1972 new_slave->target_last_arp_rx[i] = new_slave->last_rx;
1974 if (bond->params.miimon && !bond->params.use_carrier) {
1975 link_reporting = bond_check_dev_link(bond, slave_dev, 1);
1977 if ((link_reporting == -1) && !bond->params.arp_interval) {
1978 /* miimon is set but a bonded network driver
1979 * does not support ETHTOOL/MII and
1980 * arp_interval is not set. Note: if
1981 * use_carrier is enabled, we will never go
1982 * here (because netif_carrier is always
1983 * supported); thus, we don't need to change
1984 * the messages for netif_carrier.
1986 slave_warn(bond_dev, slave_dev, "MII and ETHTOOL support not available for slave, and arp_interval/arp_ip_target module parameters not specified, thus bonding will not detect link failures! see bonding.txt for details\n");
1987 } else if (link_reporting == -1) {
1988 /* unable get link status using mii/ethtool */
1989 slave_warn(bond_dev, slave_dev, "can't get link status from slave; the network driver associated with this interface does not support MII or ETHTOOL link status reporting, thus miimon has no effect on this interface\n");
1993 /* check for initial state */
1994 new_slave->link = BOND_LINK_NOCHANGE;
1995 if (bond->params.miimon) {
1996 if (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS) {
1997 if (bond->params.updelay) {
1998 bond_set_slave_link_state(new_slave,
2000 BOND_SLAVE_NOTIFY_NOW);
2001 new_slave->delay = bond->params.updelay;
2003 bond_set_slave_link_state(new_slave,
2005 BOND_SLAVE_NOTIFY_NOW);
2008 bond_set_slave_link_state(new_slave, BOND_LINK_DOWN,
2009 BOND_SLAVE_NOTIFY_NOW);
2011 } else if (bond->params.arp_interval) {
2012 bond_set_slave_link_state(new_slave,
2013 (netif_carrier_ok(slave_dev) ?
2014 BOND_LINK_UP : BOND_LINK_DOWN),
2015 BOND_SLAVE_NOTIFY_NOW);
2017 bond_set_slave_link_state(new_slave, BOND_LINK_UP,
2018 BOND_SLAVE_NOTIFY_NOW);
2021 if (new_slave->link != BOND_LINK_DOWN)
2022 new_slave->last_link_up = jiffies;
2023 slave_dbg(bond_dev, slave_dev, "Initial state of slave is BOND_LINK_%s\n",
2024 new_slave->link == BOND_LINK_DOWN ? "DOWN" :
2025 (new_slave->link == BOND_LINK_UP ? "UP" : "BACK"));
2027 if (bond_uses_primary(bond) && bond->params.primary[0]) {
2028 /* if there is a primary slave, remember it */
2029 if (strcmp(bond->params.primary, new_slave->dev->name) == 0) {
2030 rcu_assign_pointer(bond->primary_slave, new_slave);
2031 bond->force_primary = true;
2035 switch (BOND_MODE(bond)) {
2036 case BOND_MODE_ACTIVEBACKUP:
2037 bond_set_slave_inactive_flags(new_slave,
2038 BOND_SLAVE_NOTIFY_NOW);
2040 case BOND_MODE_8023AD:
2041 /* in 802.3ad mode, the internal mechanism
2042 * will activate the slaves in the selected
2045 bond_set_slave_inactive_flags(new_slave, BOND_SLAVE_NOTIFY_NOW);
2046 /* if this is the first slave */
2048 SLAVE_AD_INFO(new_slave)->id = 1;
2049 /* Initialize AD with the number of times that the AD timer is called in 1 second
2050 * can be called only after the mac address of the bond is set
2052 bond_3ad_initialize(bond, 1000/AD_TIMER_INTERVAL);
2054 SLAVE_AD_INFO(new_slave)->id =
2055 SLAVE_AD_INFO(prev_slave)->id + 1;
2058 bond_3ad_bind_slave(new_slave);
2062 bond_set_active_slave(new_slave);
2063 bond_set_slave_inactive_flags(new_slave, BOND_SLAVE_NOTIFY_NOW);
2066 slave_dbg(bond_dev, slave_dev, "This slave is always active in trunk mode\n");
2068 /* always active in trunk mode */
2069 bond_set_active_slave(new_slave);
2071 /* In trunking mode there is little meaning to curr_active_slave
2072 * anyway (it holds no special properties of the bond device),
2073 * so we can change it without calling change_active_interface()
2075 if (!rcu_access_pointer(bond->curr_active_slave) &&
2076 new_slave->link == BOND_LINK_UP)
2077 rcu_assign_pointer(bond->curr_active_slave, new_slave);
2080 } /* switch(bond_mode) */
2082 #ifdef CONFIG_NET_POLL_CONTROLLER
2083 if (bond->dev->npinfo) {
2084 if (slave_enable_netpoll(new_slave)) {
2085 slave_info(bond_dev, slave_dev, "master_dev is using netpoll, but new slave device does not support netpoll\n");
2092 if (!(bond_dev->features & NETIF_F_LRO))
2093 dev_disable_lro(slave_dev);
2095 res = netdev_rx_handler_register(slave_dev, bond_handle_frame,
2098 slave_dbg(bond_dev, slave_dev, "Error %d calling netdev_rx_handler_register\n", res);
2102 res = bond_master_upper_dev_link(bond, new_slave, extack);
2104 slave_dbg(bond_dev, slave_dev, "Error %d calling bond_master_upper_dev_link\n", res);
2105 goto err_unregister;
2108 bond_lower_state_changed(new_slave);
2110 res = bond_sysfs_slave_add(new_slave);
2112 slave_dbg(bond_dev, slave_dev, "Error %d calling bond_sysfs_slave_add\n", res);
2113 goto err_upper_unlink;
2116 /* If the mode uses primary, then the following is handled by
2117 * bond_change_active_slave().
2119 if (!bond_uses_primary(bond)) {
2120 /* set promiscuity level to new slave */
2121 if (bond_dev->flags & IFF_PROMISC) {
2122 res = dev_set_promiscuity(slave_dev, 1);
2127 /* set allmulti level to new slave */
2128 if (bond_dev->flags & IFF_ALLMULTI) {
2129 res = dev_set_allmulti(slave_dev, 1);
2131 if (bond_dev->flags & IFF_PROMISC)
2132 dev_set_promiscuity(slave_dev, -1);
2137 netif_addr_lock_bh(bond_dev);
2138 dev_mc_sync_multiple(slave_dev, bond_dev);
2139 dev_uc_sync_multiple(slave_dev, bond_dev);
2140 netif_addr_unlock_bh(bond_dev);
2142 if (BOND_MODE(bond) == BOND_MODE_8023AD) {
2143 /* add lacpdu mc addr to mc list */
2144 u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
2146 dev_mc_add(slave_dev, lacpdu_multicast);
2151 bond_compute_features(bond);
2152 bond_set_carrier(bond);
2154 if (bond_uses_primary(bond)) {
2156 bond_select_active_slave(bond);
2157 unblock_netpoll_tx();
2160 if (bond_mode_can_use_xmit_hash(bond))
2161 bond_update_slave_arr(bond, NULL);
2164 if (!slave_dev->netdev_ops->ndo_bpf ||
2165 !slave_dev->netdev_ops->ndo_xdp_xmit) {
2166 if (bond->xdp_prog) {
2167 SLAVE_NL_ERR(bond_dev, slave_dev, extack,
2168 "Slave does not support XDP");
2172 } else if (bond->xdp_prog) {
2173 struct netdev_bpf xdp = {
2174 .command = XDP_SETUP_PROG,
2176 .prog = bond->xdp_prog,
2180 if (dev_xdp_prog_count(slave_dev) > 0) {
2181 SLAVE_NL_ERR(bond_dev, slave_dev, extack,
2182 "Slave has XDP program loaded, please unload before enslaving");
2187 res = slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp);
2189 /* ndo_bpf() sets extack error message */
2190 slave_dbg(bond_dev, slave_dev, "Error %d calling ndo_bpf\n", res);
2194 bpf_prog_inc(bond->xdp_prog);
2197 slave_info(bond_dev, slave_dev, "Enslaving as %s interface with %s link\n",
2198 bond_is_active_slave(new_slave) ? "an active" : "a backup",
2199 new_slave->link != BOND_LINK_DOWN ? "an up" : "a down");
2201 /* enslave is successful */
2202 bond_queue_slave_event(new_slave);
2205 /* Undo stages on error */
2207 bond_sysfs_slave_del(new_slave);
2210 bond_upper_dev_unlink(bond, new_slave);
2213 netdev_rx_handler_unregister(slave_dev);
2216 vlan_vids_del_by_dev(slave_dev, bond_dev);
2217 if (rcu_access_pointer(bond->primary_slave) == new_slave)
2218 RCU_INIT_POINTER(bond->primary_slave, NULL);
2219 if (rcu_access_pointer(bond->curr_active_slave) == new_slave) {
2221 bond_change_active_slave(bond, NULL);
2222 bond_select_active_slave(bond);
2223 unblock_netpoll_tx();
2225 /* either primary_slave or curr_active_slave might've changed */
2227 slave_disable_netpoll(new_slave);
2230 if (!netif_is_bond_master(slave_dev))
2231 slave_dev->priv_flags &= ~IFF_BONDING;
2232 dev_close(slave_dev);
2235 slave_dev->flags &= ~IFF_SLAVE;
2236 if (!bond->params.fail_over_mac ||
2237 BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
2238 /* XXX TODO - fom follow mode needs to change master's
2239 * MAC if this slave's MAC is in use by the bond, or at
2240 * least print a warning.
2242 bond_hw_addr_copy(ss.__data, new_slave->perm_hwaddr,
2243 new_slave->dev->addr_len);
2244 ss.ss_family = slave_dev->type;
2245 dev_set_mac_address(slave_dev, (struct sockaddr *)&ss, NULL);
2249 dev_set_mtu(slave_dev, new_slave->original_mtu);
2252 kobject_put(&new_slave->kobj);
2255 /* Enslave of first slave has failed and we need to fix master's mac */
2256 if (!bond_has_slaves(bond)) {
2257 if (ether_addr_equal_64bits(bond_dev->dev_addr,
2258 slave_dev->dev_addr))
2259 eth_hw_addr_random(bond_dev);
2260 if (bond_dev->type != ARPHRD_ETHER) {
2261 dev_close(bond_dev);
2262 ether_setup(bond_dev);
2263 bond_dev->flags |= IFF_MASTER;
2264 bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
2271 /* Try to release the slave device <slave> from the bond device <master>
2272 * It is legal to access curr_active_slave without a lock because all the function
2273 * is RTNL-locked. If "all" is true it means that the function is being called
2274 * while destroying a bond interface and all slaves are being released.
2276 * The rules for slave state should be:
2277 * for Active/Backup:
2278 * Active stays on all backups go down
2279 * for Bonded connections:
2280 * The first up interface should be left on and all others downed.
2282 static int __bond_release_one(struct net_device *bond_dev,
2283 struct net_device *slave_dev,
2284 bool all, bool unregister)
2286 struct bonding *bond = netdev_priv(bond_dev);
2287 struct slave *slave, *oldcurrent;
2288 struct sockaddr_storage ss;
2289 int old_flags = bond_dev->flags;
2290 netdev_features_t old_features = bond_dev->features;
2292 /* slave is not a slave or master is not master of this slave */
2293 if (!(slave_dev->flags & IFF_SLAVE) ||
2294 !netdev_has_upper_dev(slave_dev, bond_dev)) {
2295 slave_dbg(bond_dev, slave_dev, "cannot release slave\n");
2301 slave = bond_get_slave_by_dev(bond, slave_dev);
2303 /* not a slave of this bond */
2304 slave_info(bond_dev, slave_dev, "interface not enslaved\n");
2305 unblock_netpoll_tx();
2309 bond_set_slave_inactive_flags(slave, BOND_SLAVE_NOTIFY_NOW);
2311 bond_sysfs_slave_del(slave);
2313 /* recompute stats just before removing the slave */
2314 bond_get_stats(bond->dev, &bond->bond_stats);
2316 if (bond->xdp_prog) {
2317 struct netdev_bpf xdp = {
2318 .command = XDP_SETUP_PROG,
2323 if (slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp))
2324 slave_warn(bond_dev, slave_dev, "failed to unload XDP program\n");
2327 /* unregister rx_handler early so bond_handle_frame wouldn't be called
2328 * for this slave anymore.
2330 netdev_rx_handler_unregister(slave_dev);
2332 if (BOND_MODE(bond) == BOND_MODE_8023AD)
2333 bond_3ad_unbind_slave(slave);
2335 bond_upper_dev_unlink(bond, slave);
2337 if (bond_mode_can_use_xmit_hash(bond))
2338 bond_update_slave_arr(bond, slave);
2340 slave_info(bond_dev, slave_dev, "Releasing %s interface\n",
2341 bond_is_active_slave(slave) ? "active" : "backup");
2343 oldcurrent = rcu_access_pointer(bond->curr_active_slave);
2345 RCU_INIT_POINTER(bond->current_arp_slave, NULL);
2347 if (!all && (!bond->params.fail_over_mac ||
2348 BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP)) {
2349 if (ether_addr_equal_64bits(bond_dev->dev_addr, slave->perm_hwaddr) &&
2350 bond_has_slaves(bond))
2351 slave_warn(bond_dev, slave_dev, "the permanent HWaddr of slave - %pM - is still in use by bond - set the HWaddr of slave to a different address to avoid conflicts\n",
2352 slave->perm_hwaddr);
2355 if (rtnl_dereference(bond->primary_slave) == slave)
2356 RCU_INIT_POINTER(bond->primary_slave, NULL);
2358 if (oldcurrent == slave)
2359 bond_change_active_slave(bond, NULL);
2361 if (bond_is_lb(bond)) {
2362 /* Must be called only after the slave has been
2363 * detached from the list and the curr_active_slave
2364 * has been cleared (if our_slave == old_current),
2365 * but before a new active slave is selected.
2367 bond_alb_deinit_slave(bond, slave);
2371 RCU_INIT_POINTER(bond->curr_active_slave, NULL);
2372 } else if (oldcurrent == slave) {
2373 /* Note that we hold RTNL over this sequence, so there
2374 * is no concern that another slave add/remove event
2377 bond_select_active_slave(bond);
2380 bond_set_carrier(bond);
2381 if (!bond_has_slaves(bond))
2382 eth_hw_addr_random(bond_dev);
2384 unblock_netpoll_tx();
2388 if (!bond_has_slaves(bond)) {
2389 call_netdevice_notifiers(NETDEV_CHANGEADDR, bond->dev);
2390 call_netdevice_notifiers(NETDEV_RELEASE, bond->dev);
2393 bond_compute_features(bond);
2394 if (!(bond_dev->features & NETIF_F_VLAN_CHALLENGED) &&
2395 (old_features & NETIF_F_VLAN_CHALLENGED))
2396 slave_info(bond_dev, slave_dev, "last VLAN challenged slave left bond - VLAN blocking is removed\n");
2398 vlan_vids_del_by_dev(slave_dev, bond_dev);
2400 /* If the mode uses primary, then this case was handled above by
2401 * bond_change_active_slave(..., NULL)
2403 if (!bond_uses_primary(bond)) {
2404 /* unset promiscuity level from slave
2405 * NOTE: The NETDEV_CHANGEADDR call above may change the value
2406 * of the IFF_PROMISC flag in the bond_dev, but we need the
2407 * value of that flag before that change, as that was the value
2408 * when this slave was attached, so we cache at the start of the
2409 * function and use it here. Same goes for ALLMULTI below
2411 if (old_flags & IFF_PROMISC)
2412 dev_set_promiscuity(slave_dev, -1);
2414 /* unset allmulti level from slave */
2415 if (old_flags & IFF_ALLMULTI)
2416 dev_set_allmulti(slave_dev, -1);
2418 bond_hw_addr_flush(bond_dev, slave_dev);
2421 slave_disable_netpoll(slave);
2423 /* close slave before restoring its mac address */
2424 dev_close(slave_dev);
2426 if (bond->params.fail_over_mac != BOND_FOM_ACTIVE ||
2427 BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
2428 /* restore original ("permanent") mac address */
2429 bond_hw_addr_copy(ss.__data, slave->perm_hwaddr,
2430 slave->dev->addr_len);
2431 ss.ss_family = slave_dev->type;
2432 dev_set_mac_address(slave_dev, (struct sockaddr *)&ss, NULL);
2436 __dev_set_mtu(slave_dev, slave->original_mtu);
2438 dev_set_mtu(slave_dev, slave->original_mtu);
2440 if (!netif_is_bond_master(slave_dev))
2441 slave_dev->priv_flags &= ~IFF_BONDING;
2443 kobject_put(&slave->kobj);
2448 /* A wrapper used because of ndo_del_link */
2449 int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
2451 return __bond_release_one(bond_dev, slave_dev, false, false);
2454 /* First release a slave and then destroy the bond if no more slaves are left.
2455 * Must be under rtnl_lock when this function is called.
2457 static int bond_release_and_destroy(struct net_device *bond_dev,
2458 struct net_device *slave_dev)
2460 struct bonding *bond = netdev_priv(bond_dev);
2463 ret = __bond_release_one(bond_dev, slave_dev, false, true);
2464 if (ret == 0 && !bond_has_slaves(bond) &&
2465 bond_dev->reg_state != NETREG_UNREGISTERING) {
2466 bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
2467 netdev_info(bond_dev, "Destroying bond\n");
2468 bond_remove_proc_entry(bond);
2469 unregister_netdevice(bond_dev);
2474 static void bond_info_query(struct net_device *bond_dev, struct ifbond *info)
2476 struct bonding *bond = netdev_priv(bond_dev);
2478 bond_fill_ifbond(bond, info);
2481 static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *info)
2483 struct bonding *bond = netdev_priv(bond_dev);
2484 struct list_head *iter;
2485 int i = 0, res = -ENODEV;
2486 struct slave *slave;
2488 bond_for_each_slave(bond, slave, iter) {
2489 if (i++ == (int)info->slave_id) {
2491 bond_fill_ifslave(slave, info);
2499 /*-------------------------------- Monitoring -------------------------------*/
2501 /* called with rcu_read_lock() */
2502 static int bond_miimon_inspect(struct bonding *bond)
2504 int link_state, commit = 0;
2505 struct list_head *iter;
2506 struct slave *slave;
2507 bool ignore_updelay;
2509 ignore_updelay = !rcu_dereference(bond->curr_active_slave);
2511 bond_for_each_slave_rcu(bond, slave, iter) {
2512 bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
2514 link_state = bond_check_dev_link(bond, slave->dev, 0);
2516 switch (slave->link) {
2521 bond_propose_link_state(slave, BOND_LINK_FAIL);
2523 slave->delay = bond->params.downdelay;
2525 slave_info(bond->dev, slave->dev, "link status down for %sinterface, disabling it in %d ms\n",
2527 BOND_MODE_ACTIVEBACKUP) ?
2528 (bond_is_active_slave(slave) ?
2529 "active " : "backup ") : "",
2530 bond->params.downdelay * bond->params.miimon);
2533 case BOND_LINK_FAIL:
2535 /* recovered before downdelay expired */
2536 bond_propose_link_state(slave, BOND_LINK_UP);
2537 slave->last_link_up = jiffies;
2538 slave_info(bond->dev, slave->dev, "link status up again after %d ms\n",
2539 (bond->params.downdelay - slave->delay) *
2540 bond->params.miimon);
2545 if (slave->delay <= 0) {
2546 bond_propose_link_state(slave, BOND_LINK_DOWN);
2554 case BOND_LINK_DOWN:
2558 bond_propose_link_state(slave, BOND_LINK_BACK);
2560 slave->delay = bond->params.updelay;
2563 slave_info(bond->dev, slave->dev, "link status up, enabling it in %d ms\n",
2564 ignore_updelay ? 0 :
2565 bond->params.updelay *
2566 bond->params.miimon);
2569 case BOND_LINK_BACK:
2571 bond_propose_link_state(slave, BOND_LINK_DOWN);
2572 slave_info(bond->dev, slave->dev, "link status down again after %d ms\n",
2573 (bond->params.updelay - slave->delay) *
2574 bond->params.miimon);
2582 if (slave->delay <= 0) {
2583 bond_propose_link_state(slave, BOND_LINK_UP);
2585 ignore_updelay = false;
2597 static void bond_miimon_link_change(struct bonding *bond,
2598 struct slave *slave,
2601 switch (BOND_MODE(bond)) {
2602 case BOND_MODE_8023AD:
2603 bond_3ad_handle_link_change(slave, link);
2607 bond_alb_handle_link_change(bond, slave, link);
2610 bond_update_slave_arr(bond, NULL);
2615 static void bond_miimon_commit(struct bonding *bond)
2617 struct list_head *iter;
2618 struct slave *slave, *primary;
2620 bond_for_each_slave(bond, slave, iter) {
2621 switch (slave->link_new_state) {
2622 case BOND_LINK_NOCHANGE:
2623 /* For 802.3ad mode, check current slave speed and
2624 * duplex again in case its port was disabled after
2625 * invalid speed/duplex reporting but recovered before
2626 * link monitoring could make a decision on the actual
2629 if (BOND_MODE(bond) == BOND_MODE_8023AD &&
2630 slave->link == BOND_LINK_UP)
2631 bond_3ad_adapter_speed_duplex_changed(slave);
2635 if (bond_update_speed_duplex(slave) &&
2636 bond_needs_speed_duplex(bond)) {
2637 slave->link = BOND_LINK_DOWN;
2638 if (net_ratelimit())
2639 slave_warn(bond->dev, slave->dev,
2640 "failed to get link speed/duplex\n");
2643 bond_set_slave_link_state(slave, BOND_LINK_UP,
2644 BOND_SLAVE_NOTIFY_NOW);
2645 slave->last_link_up = jiffies;
2647 primary = rtnl_dereference(bond->primary_slave);
2648 if (BOND_MODE(bond) == BOND_MODE_8023AD) {
2649 /* prevent it from being the active one */
2650 bond_set_backup_slave(slave);
2651 } else if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
2652 /* make it immediately active */
2653 bond_set_active_slave(slave);
2656 slave_info(bond->dev, slave->dev, "link status definitely up, %u Mbps %s duplex\n",
2657 slave->speed == SPEED_UNKNOWN ? 0 : slave->speed,
2658 slave->duplex ? "full" : "half");
2660 bond_miimon_link_change(bond, slave, BOND_LINK_UP);
2662 if (!bond->curr_active_slave || slave == primary)
2667 case BOND_LINK_DOWN:
2668 if (slave->link_failure_count < UINT_MAX)
2669 slave->link_failure_count++;
2671 bond_set_slave_link_state(slave, BOND_LINK_DOWN,
2672 BOND_SLAVE_NOTIFY_NOW);
2674 if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP ||
2675 BOND_MODE(bond) == BOND_MODE_8023AD)
2676 bond_set_slave_inactive_flags(slave,
2677 BOND_SLAVE_NOTIFY_NOW);
2679 slave_info(bond->dev, slave->dev, "link status definitely down, disabling slave\n");
2681 bond_miimon_link_change(bond, slave, BOND_LINK_DOWN);
2683 if (slave == rcu_access_pointer(bond->curr_active_slave))
2689 slave_err(bond->dev, slave->dev, "invalid new link %d on slave\n",
2690 slave->link_new_state);
2691 bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
2698 bond_select_active_slave(bond);
2699 unblock_netpoll_tx();
2702 bond_set_carrier(bond);
2707 * Really a wrapper that splits the mii monitor into two phases: an
2708 * inspection, then (if inspection indicates something needs to be done)
2709 * an acquisition of appropriate locks followed by a commit phase to
2710 * implement whatever link state changes are indicated.
2712 static void bond_mii_monitor(struct work_struct *work)
2714 struct bonding *bond = container_of(work, struct bonding,
2716 bool should_notify_peers = false;
2718 unsigned long delay;
2719 struct slave *slave;
2720 struct list_head *iter;
2722 delay = msecs_to_jiffies(bond->params.miimon);
2724 if (!bond_has_slaves(bond))
2728 should_notify_peers = bond_should_notify_peers(bond);
2729 commit = !!bond_miimon_inspect(bond);
2730 if (bond->send_peer_notif) {
2732 if (rtnl_trylock()) {
2733 bond->send_peer_notif--;
2741 /* Race avoidance with bond_close cancel of workqueue */
2742 if (!rtnl_trylock()) {
2744 should_notify_peers = false;
2748 bond_for_each_slave(bond, slave, iter) {
2749 bond_commit_link_state(slave, BOND_SLAVE_NOTIFY_LATER);
2751 bond_miimon_commit(bond);
2753 rtnl_unlock(); /* might sleep, hold no other locks */
2757 if (bond->params.miimon)
2758 queue_delayed_work(bond->wq, &bond->mii_work, delay);
2760 if (should_notify_peers) {
2761 if (!rtnl_trylock())
2763 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev);
2768 static int bond_upper_dev_walk(struct net_device *upper,
2769 struct netdev_nested_priv *priv)
2771 __be32 ip = *(__be32 *)priv->data;
2773 return ip == bond_confirm_addr(upper, 0, ip);
2776 static bool bond_has_this_ip(struct bonding *bond, __be32 ip)
2778 struct netdev_nested_priv priv = {
2779 .data = (void *)&ip,
2783 if (ip == bond_confirm_addr(bond->dev, 0, ip))
2787 if (netdev_walk_all_upper_dev_rcu(bond->dev, bond_upper_dev_walk, &priv))
2794 /* We go to the (large) trouble of VLAN tagging ARP frames because
2795 * switches in VLAN mode (especially if ports are configured as
2796 * "native" to a VLAN) might not pass non-tagged frames.
2798 static void bond_arp_send(struct slave *slave, int arp_op, __be32 dest_ip,
2799 __be32 src_ip, struct bond_vlan_tag *tags)
2801 struct sk_buff *skb;
2802 struct bond_vlan_tag *outer_tag = tags;
2803 struct net_device *slave_dev = slave->dev;
2804 struct net_device *bond_dev = slave->bond->dev;
2806 slave_dbg(bond_dev, slave_dev, "arp %d on slave: dst %pI4 src %pI4\n",
2807 arp_op, &dest_ip, &src_ip);
2809 skb = arp_create(arp_op, ETH_P_ARP, dest_ip, slave_dev, src_ip,
2810 NULL, slave_dev->dev_addr, NULL);
2813 net_err_ratelimited("ARP packet allocation failed\n");
2817 if (!tags || tags->vlan_proto == VLAN_N_VID)
2822 /* Go through all the tags backwards and add them to the packet */
2823 while (tags->vlan_proto != VLAN_N_VID) {
2824 if (!tags->vlan_id) {
2829 slave_dbg(bond_dev, slave_dev, "inner tag: proto %X vid %X\n",
2830 ntohs(outer_tag->vlan_proto), tags->vlan_id);
2831 skb = vlan_insert_tag_set_proto(skb, tags->vlan_proto,
2834 net_err_ratelimited("failed to insert inner VLAN tag\n");
2840 /* Set the outer tag */
2841 if (outer_tag->vlan_id) {
2842 slave_dbg(bond_dev, slave_dev, "outer tag: proto %X vid %X\n",
2843 ntohs(outer_tag->vlan_proto), outer_tag->vlan_id);
2844 __vlan_hwaccel_put_tag(skb, outer_tag->vlan_proto,
2845 outer_tag->vlan_id);
2852 /* Validate the device path between the @start_dev and the @end_dev.
2853 * The path is valid if the @end_dev is reachable through device
2855 * When the path is validated, collect any vlan information in the
2858 struct bond_vlan_tag *bond_verify_device_path(struct net_device *start_dev,
2859 struct net_device *end_dev,
2862 struct bond_vlan_tag *tags;
2863 struct net_device *upper;
2864 struct list_head *iter;
2866 if (start_dev == end_dev) {
2867 tags = kcalloc(level + 1, sizeof(*tags), GFP_ATOMIC);
2869 return ERR_PTR(-ENOMEM);
2870 tags[level].vlan_proto = VLAN_N_VID;
2874 netdev_for_each_upper_dev_rcu(start_dev, upper, iter) {
2875 tags = bond_verify_device_path(upper, end_dev, level + 1);
2876 if (IS_ERR_OR_NULL(tags)) {
2881 if (is_vlan_dev(upper)) {
2882 tags[level].vlan_proto = vlan_dev_vlan_proto(upper);
2883 tags[level].vlan_id = vlan_dev_vlan_id(upper);
2892 static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2895 struct bond_vlan_tag *tags;
2896 __be32 *targets = bond->params.arp_targets, addr;
2899 for (i = 0; i < BOND_MAX_ARP_TARGETS && targets[i]; i++) {
2900 slave_dbg(bond->dev, slave->dev, "%s: target %pI4\n",
2901 __func__, &targets[i]);
2904 /* Find out through which dev should the packet go */
2905 rt = ip_route_output(dev_net(bond->dev), targets[i], 0,
2908 /* there's no route to target - try to send arp
2909 * probe to generate any traffic (arp_validate=0)
2911 if (bond->params.arp_validate)
2912 pr_warn_once("%s: no route to arp_ip_target %pI4 and arp_validate is set\n",
2915 bond_arp_send(slave, ARPOP_REQUEST, targets[i],
2920 /* bond device itself */
2921 if (rt->dst.dev == bond->dev)
2925 tags = bond_verify_device_path(bond->dev, rt->dst.dev, 0);
2928 if (!IS_ERR_OR_NULL(tags))
2931 /* Not our device - skip */
2932 slave_dbg(bond->dev, slave->dev, "no path to arp_ip_target %pI4 via rt.dev %s\n",
2933 &targets[i], rt->dst.dev ? rt->dst.dev->name : "NULL");
2939 addr = bond_confirm_addr(rt->dst.dev, targets[i], 0);
2941 bond_arp_send(slave, ARPOP_REQUEST, targets[i], addr, tags);
2946 static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32 sip, __be32 tip)
2950 if (!sip || !bond_has_this_ip(bond, tip)) {
2951 slave_dbg(bond->dev, slave->dev, "%s: sip %pI4 tip %pI4 not found\n",
2952 __func__, &sip, &tip);
2956 i = bond_get_targets_ip(bond->params.arp_targets, sip);
2958 slave_dbg(bond->dev, slave->dev, "%s: sip %pI4 not found in targets\n",
2962 slave->last_rx = jiffies;
2963 slave->target_last_arp_rx[i] = jiffies;
2966 int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
2967 struct slave *slave)
2969 struct arphdr *arp = (struct arphdr *)skb->data;
2970 struct slave *curr_active_slave, *curr_arp_slave;
2971 unsigned char *arp_ptr;
2973 int is_arp = skb->protocol == __cpu_to_be16(ETH_P_ARP);
2976 if (!slave_do_arp_validate(bond, slave)) {
2977 if ((slave_do_arp_validate_only(bond) && is_arp) ||
2978 !slave_do_arp_validate_only(bond))
2979 slave->last_rx = jiffies;
2980 return RX_HANDLER_ANOTHER;
2981 } else if (!is_arp) {
2982 return RX_HANDLER_ANOTHER;
2985 alen = arp_hdr_len(bond->dev);
2987 slave_dbg(bond->dev, slave->dev, "%s: skb->dev %s\n",
2988 __func__, skb->dev->name);
2990 if (alen > skb_headlen(skb)) {
2991 arp = kmalloc(alen, GFP_ATOMIC);
2994 if (skb_copy_bits(skb, 0, arp, alen) < 0)
2998 if (arp->ar_hln != bond->dev->addr_len ||
2999 skb->pkt_type == PACKET_OTHERHOST ||
3000 skb->pkt_type == PACKET_LOOPBACK ||
3001 arp->ar_hrd != htons(ARPHRD_ETHER) ||
3002 arp->ar_pro != htons(ETH_P_IP) ||
3006 arp_ptr = (unsigned char *)(arp + 1);
3007 arp_ptr += bond->dev->addr_len;
3008 memcpy(&sip, arp_ptr, 4);
3009 arp_ptr += 4 + bond->dev->addr_len;
3010 memcpy(&tip, arp_ptr, 4);
3012 slave_dbg(bond->dev, slave->dev, "%s: %s/%d av %d sv %d sip %pI4 tip %pI4\n",
3013 __func__, slave->dev->name, bond_slave_state(slave),
3014 bond->params.arp_validate, slave_do_arp_validate(bond, slave),
3017 curr_active_slave = rcu_dereference(bond->curr_active_slave);
3018 curr_arp_slave = rcu_dereference(bond->current_arp_slave);
3020 /* We 'trust' the received ARP enough to validate it if:
3022 * (a) the slave receiving the ARP is active (which includes the
3023 * current ARP slave, if any), or
3025 * (b) the receiving slave isn't active, but there is a currently
3026 * active slave and it received valid arp reply(s) after it became
3027 * the currently active slave, or
3029 * (c) there is an ARP slave that sent an ARP during the prior ARP
3030 * interval, and we receive an ARP reply on any slave. We accept
3031 * these because switch FDB update delays may deliver the ARP
3032 * reply to a slave other than the sender of the ARP request.
3034 * Note: for (b), backup slaves are receiving the broadcast ARP
3035 * request, not a reply. This request passes from the sending
3036 * slave through the L2 switch(es) to the receiving slave. Since
3037 * this is checking the request, sip/tip are swapped for
3040 * This is done to avoid endless looping when we can't reach the
3041 * arp_ip_target and fool ourselves with our own arp requests.
3043 if (bond_is_active_slave(slave))
3044 bond_validate_arp(bond, slave, sip, tip);
3045 else if (curr_active_slave &&
3046 time_after(slave_last_rx(bond, curr_active_slave),
3047 curr_active_slave->last_link_up))
3048 bond_validate_arp(bond, slave, tip, sip);
3049 else if (curr_arp_slave && (arp->ar_op == htons(ARPOP_REPLY)) &&
3050 bond_time_in_interval(bond,
3051 dev_trans_start(curr_arp_slave->dev), 1))
3052 bond_validate_arp(bond, slave, sip, tip);
3055 if (arp != (struct arphdr *)skb->data)
3057 return RX_HANDLER_ANOTHER;
3060 /* function to verify if we're in the arp_interval timeslice, returns true if
3061 * (last_act - arp_interval) <= jiffies <= (last_act + mod * arp_interval +
3062 * arp_interval/2) . the arp_interval/2 is needed for really fast networks.
3064 static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
3067 int delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
3069 return time_in_range(jiffies,
3070 last_act - delta_in_ticks,
3071 last_act + mod * delta_in_ticks + delta_in_ticks/2);
3074 /* This function is called regularly to monitor each slave's link
3075 * ensuring that traffic is being sent and received when arp monitoring
3076 * is used in load-balancing mode. if the adapter has been dormant, then an
3077 * arp is transmitted to generate traffic. see activebackup_arp_monitor for
3078 * arp monitoring in active backup mode.
3080 static void bond_loadbalance_arp_mon(struct bonding *bond)
3082 struct slave *slave, *oldcurrent;
3083 struct list_head *iter;
3084 int do_failover = 0, slave_state_changed = 0;
3086 if (!bond_has_slaves(bond))
3091 oldcurrent = rcu_dereference(bond->curr_active_slave);
3092 /* see if any of the previous devices are up now (i.e. they have
3093 * xmt and rcv traffic). the curr_active_slave does not come into
3094 * the picture unless it is null. also, slave->last_link_up is not
3095 * needed here because we send an arp on each slave and give a slave
3096 * as long as it needs to get the tx/rx within the delta.
3097 * TODO: what about up/down delay in arp mode? it wasn't here before
3100 bond_for_each_slave_rcu(bond, slave, iter) {
3101 unsigned long trans_start = dev_trans_start(slave->dev);
3103 bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
3105 if (slave->link != BOND_LINK_UP) {
3106 if (bond_time_in_interval(bond, trans_start, 1) &&
3107 bond_time_in_interval(bond, slave->last_rx, 1)) {
3109 bond_propose_link_state(slave, BOND_LINK_UP);
3110 slave_state_changed = 1;
3112 /* primary_slave has no meaning in round-robin
3113 * mode. the window of a slave being up and
3114 * curr_active_slave being null after enslaving
3118 slave_info(bond->dev, slave->dev, "link status definitely up\n");
3121 slave_info(bond->dev, slave->dev, "interface is now up\n");
3125 /* slave->link == BOND_LINK_UP */
3127 /* not all switches will respond to an arp request
3128 * when the source ip is 0, so don't take the link down
3129 * if we don't know our ip yet
3131 if (!bond_time_in_interval(bond, trans_start, 2) ||
3132 !bond_time_in_interval(bond, slave->last_rx, 2)) {
3134 bond_propose_link_state(slave, BOND_LINK_DOWN);
3135 slave_state_changed = 1;
3137 if (slave->link_failure_count < UINT_MAX)
3138 slave->link_failure_count++;
3140 slave_info(bond->dev, slave->dev, "interface is now down\n");
3142 if (slave == oldcurrent)
3147 /* note: if switch is in round-robin mode, all links
3148 * must tx arp to ensure all links rx an arp - otherwise
3149 * links may oscillate or not come up at all; if switch is
3150 * in something like xor mode, there is nothing we can
3151 * do - all replies will be rx'ed on same link causing slaves
3152 * to be unstable during low/no traffic periods
3154 if (bond_slave_is_up(slave))
3155 bond_arp_send_all(bond, slave);
3160 if (do_failover || slave_state_changed) {
3161 if (!rtnl_trylock())
3164 bond_for_each_slave(bond, slave, iter) {
3165 if (slave->link_new_state != BOND_LINK_NOCHANGE)
3166 slave->link = slave->link_new_state;
3169 if (slave_state_changed) {
3170 bond_slave_state_change(bond);
3171 if (BOND_MODE(bond) == BOND_MODE_XOR)
3172 bond_update_slave_arr(bond, NULL);
3176 bond_select_active_slave(bond);
3177 unblock_netpoll_tx();
3183 if (bond->params.arp_interval)
3184 queue_delayed_work(bond->wq, &bond->arp_work,
3185 msecs_to_jiffies(bond->params.arp_interval));
3188 /* Called to inspect slaves for active-backup mode ARP monitor link state
3189 * changes. Sets proposed link state in slaves to specify what action
3190 * should take place for the slave. Returns 0 if no changes are found, >0
3191 * if changes to link states must be committed.
3193 * Called with rcu_read_lock held.
3195 static int bond_ab_arp_inspect(struct bonding *bond)
3197 unsigned long trans_start, last_rx;
3198 struct list_head *iter;
3199 struct slave *slave;
3202 bond_for_each_slave_rcu(bond, slave, iter) {
3203 bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
3204 last_rx = slave_last_rx(bond, slave);
3206 if (slave->link != BOND_LINK_UP) {
3207 if (bond_time_in_interval(bond, last_rx, 1)) {
3208 bond_propose_link_state(slave, BOND_LINK_UP);
3210 } else if (slave->link == BOND_LINK_BACK) {
3211 bond_propose_link_state(slave, BOND_LINK_FAIL);
3217 /* Give slaves 2*delta after being enslaved or made
3218 * active. This avoids bouncing, as the last receive
3219 * times need a full ARP monitor cycle to be updated.
3221 if (bond_time_in_interval(bond, slave->last_link_up, 2))
3224 /* Backup slave is down if:
3225 * - No current_arp_slave AND
3226 * - more than 3*delta since last receive AND
3227 * - the bond has an IP address
3229 * Note: a non-null current_arp_slave indicates
3230 * the curr_active_slave went down and we are
3231 * searching for a new one; under this condition
3232 * we only take the curr_active_slave down - this
3233 * gives each slave a chance to tx/rx traffic
3234 * before being taken out
3236 if (!bond_is_active_slave(slave) &&
3237 !rcu_access_pointer(bond->current_arp_slave) &&
3238 !bond_time_in_interval(bond, last_rx, 3)) {
3239 bond_propose_link_state(slave, BOND_LINK_DOWN);
3243 /* Active slave is down if:
3244 * - more than 2*delta since transmitting OR
3245 * - (more than 2*delta since receive AND
3246 * the bond has an IP address)
3248 trans_start = dev_trans_start(slave->dev);
3249 if (bond_is_active_slave(slave) &&
3250 (!bond_time_in_interval(bond, trans_start, 2) ||
3251 !bond_time_in_interval(bond, last_rx, 2))) {
3252 bond_propose_link_state(slave, BOND_LINK_DOWN);
3260 /* Called to commit link state changes noted by inspection step of
3261 * active-backup mode ARP monitor.
3263 * Called with RTNL hold.
3265 static void bond_ab_arp_commit(struct bonding *bond)
3267 unsigned long trans_start;
3268 struct list_head *iter;
3269 struct slave *slave;
3271 bond_for_each_slave(bond, slave, iter) {
3272 switch (slave->link_new_state) {
3273 case BOND_LINK_NOCHANGE:
3277 trans_start = dev_trans_start(slave->dev);
3278 if (rtnl_dereference(bond->curr_active_slave) != slave ||
3279 (!rtnl_dereference(bond->curr_active_slave) &&
3280 bond_time_in_interval(bond, trans_start, 1))) {
3281 struct slave *current_arp_slave;
3283 current_arp_slave = rtnl_dereference(bond->current_arp_slave);
3284 bond_set_slave_link_state(slave, BOND_LINK_UP,
3285 BOND_SLAVE_NOTIFY_NOW);
3286 if (current_arp_slave) {
3287 bond_set_slave_inactive_flags(
3289 BOND_SLAVE_NOTIFY_NOW);
3290 RCU_INIT_POINTER(bond->current_arp_slave, NULL);
3293 slave_info(bond->dev, slave->dev, "link status definitely up\n");
3295 if (!rtnl_dereference(bond->curr_active_slave) ||
3296 slave == rtnl_dereference(bond->primary_slave))
3303 case BOND_LINK_DOWN:
3304 if (slave->link_failure_count < UINT_MAX)
3305 slave->link_failure_count++;
3307 bond_set_slave_link_state(slave, BOND_LINK_DOWN,
3308 BOND_SLAVE_NOTIFY_NOW);
3309 bond_set_slave_inactive_flags(slave,
3310 BOND_SLAVE_NOTIFY_NOW);
3312 slave_info(bond->dev, slave->dev, "link status definitely down, disabling slave\n");
3314 if (slave == rtnl_dereference(bond->curr_active_slave)) {
3315 RCU_INIT_POINTER(bond->current_arp_slave, NULL);
3321 case BOND_LINK_FAIL:
3322 bond_set_slave_link_state(slave, BOND_LINK_FAIL,
3323 BOND_SLAVE_NOTIFY_NOW);
3324 bond_set_slave_inactive_flags(slave,
3325 BOND_SLAVE_NOTIFY_NOW);
3327 /* A slave has just been enslaved and has become
3328 * the current active slave.
3330 if (rtnl_dereference(bond->curr_active_slave))
3331 RCU_INIT_POINTER(bond->current_arp_slave, NULL);
3335 slave_err(bond->dev, slave->dev,
3336 "impossible: link_new_state %d on slave\n",
3337 slave->link_new_state);
3343 bond_select_active_slave(bond);
3344 unblock_netpoll_tx();
3347 bond_set_carrier(bond);
3350 /* Send ARP probes for active-backup mode ARP monitor.
3352 * Called with rcu_read_lock held.
3354 static bool bond_ab_arp_probe(struct bonding *bond)
3356 struct slave *slave, *before = NULL, *new_slave = NULL,
3357 *curr_arp_slave = rcu_dereference(bond->current_arp_slave),
3358 *curr_active_slave = rcu_dereference(bond->curr_active_slave);
3359 struct list_head *iter;
3361 bool should_notify_rtnl = BOND_SLAVE_NOTIFY_LATER;
3363 if (curr_arp_slave && curr_active_slave)
3364 netdev_info(bond->dev, "PROBE: c_arp %s && cas %s BAD\n",
3365 curr_arp_slave->dev->name,
3366 curr_active_slave->dev->name);
3368 if (curr_active_slave) {
3369 bond_arp_send_all(bond, curr_active_slave);
3370 return should_notify_rtnl;
3373 /* if we don't have a curr_active_slave, search for the next available
3374 * backup slave from the current_arp_slave and make it the candidate
3375 * for becoming the curr_active_slave
3378 if (!curr_arp_slave) {
3379 curr_arp_slave = bond_first_slave_rcu(bond);
3380 if (!curr_arp_slave)
3381 return should_notify_rtnl;
3384 bond_for_each_slave_rcu(bond, slave, iter) {
3385 if (!found && !before && bond_slave_is_up(slave))
3388 if (found && !new_slave && bond_slave_is_up(slave))
3390 /* if the link state is up at this point, we
3391 * mark it down - this can happen if we have
3392 * simultaneous link failures and
3393 * reselect_active_interface doesn't make this
3394 * one the current slave so it is still marked
3395 * up when it is actually down
3397 if (!bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) {
3398 bond_set_slave_link_state(slave, BOND_LINK_DOWN,
3399 BOND_SLAVE_NOTIFY_LATER);
3400 if (slave->link_failure_count < UINT_MAX)
3401 slave->link_failure_count++;
3403 bond_set_slave_inactive_flags(slave,
3404 BOND_SLAVE_NOTIFY_LATER);
3406 slave_info(bond->dev, slave->dev, "backup interface is now down\n");
3408 if (slave == curr_arp_slave)
3412 if (!new_slave && before)
3418 bond_set_slave_link_state(new_slave, BOND_LINK_BACK,
3419 BOND_SLAVE_NOTIFY_LATER);
3420 bond_set_slave_active_flags(new_slave, BOND_SLAVE_NOTIFY_LATER);
3421 bond_arp_send_all(bond, new_slave);
3422 new_slave->last_link_up = jiffies;
3423 rcu_assign_pointer(bond->current_arp_slave, new_slave);
3426 bond_for_each_slave_rcu(bond, slave, iter) {
3427 if (slave->should_notify || slave->should_notify_link) {
3428 should_notify_rtnl = BOND_SLAVE_NOTIFY_NOW;
3432 return should_notify_rtnl;
3435 static void bond_activebackup_arp_mon(struct bonding *bond)
3437 bool should_notify_peers = false;
3438 bool should_notify_rtnl = false;
3441 delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
3443 if (!bond_has_slaves(bond))
3448 should_notify_peers = bond_should_notify_peers(bond);
3450 if (bond_ab_arp_inspect(bond)) {
3453 /* Race avoidance with bond_close flush of workqueue */
3454 if (!rtnl_trylock()) {
3456 should_notify_peers = false;
3460 bond_ab_arp_commit(bond);
3466 should_notify_rtnl = bond_ab_arp_probe(bond);
3470 if (bond->params.arp_interval)
3471 queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks);
3473 if (should_notify_peers || should_notify_rtnl) {
3474 if (!rtnl_trylock())
3477 if (should_notify_peers) {
3478 bond->send_peer_notif--;
3479 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
3482 if (should_notify_rtnl) {
3483 bond_slave_state_notify(bond);
3484 bond_slave_link_notify(bond);
3491 static void bond_arp_monitor(struct work_struct *work)
3493 struct bonding *bond = container_of(work, struct bonding,
3496 if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
3497 bond_activebackup_arp_mon(bond);
3499 bond_loadbalance_arp_mon(bond);
3502 /*-------------------------- netdev event handling --------------------------*/
3504 /* Change device name */
3505 static int bond_event_changename(struct bonding *bond)
3507 bond_remove_proc_entry(bond);
3508 bond_create_proc_entry(bond);
3510 bond_debug_reregister(bond);
3515 static int bond_master_netdev_event(unsigned long event,
3516 struct net_device *bond_dev)
3518 struct bonding *event_bond = netdev_priv(bond_dev);
3520 netdev_dbg(bond_dev, "%s called\n", __func__);
3523 case NETDEV_CHANGENAME:
3524 return bond_event_changename(event_bond);
3525 case NETDEV_UNREGISTER:
3526 bond_remove_proc_entry(event_bond);
3527 #ifdef CONFIG_XFRM_OFFLOAD
3528 xfrm_dev_state_flush(dev_net(bond_dev), bond_dev, true);
3529 #endif /* CONFIG_XFRM_OFFLOAD */
3531 case NETDEV_REGISTER:
3532 bond_create_proc_entry(event_bond);
3541 static int bond_slave_netdev_event(unsigned long event,
3542 struct net_device *slave_dev)
3544 struct slave *slave = bond_slave_get_rtnl(slave_dev), *primary;
3545 struct bonding *bond;
3546 struct net_device *bond_dev;
3548 /* A netdev event can be generated while enslaving a device
3549 * before netdev_rx_handler_register is called in which case
3550 * slave will be NULL
3553 netdev_dbg(slave_dev, "%s called on NULL slave\n", __func__);
3557 bond_dev = slave->bond->dev;
3559 primary = rtnl_dereference(bond->primary_slave);
3561 slave_dbg(bond_dev, slave_dev, "%s called\n", __func__);
3564 case NETDEV_UNREGISTER:
3565 if (bond_dev->type != ARPHRD_ETHER)
3566 bond_release_and_destroy(bond_dev, slave_dev);
3568 __bond_release_one(bond_dev, slave_dev, false, true);
3572 /* For 802.3ad mode only:
3573 * Getting invalid Speed/Duplex values here will put slave
3574 * in weird state. Mark it as link-fail if the link was
3575 * previously up or link-down if it hasn't yet come up, and
3576 * let link-monitoring (miimon) set it right when correct
3577 * speeds/duplex are available.
3579 if (bond_update_speed_duplex(slave) &&
3580 BOND_MODE(bond) == BOND_MODE_8023AD) {
3581 if (slave->last_link_up)
3582 slave->link = BOND_LINK_FAIL;
3584 slave->link = BOND_LINK_DOWN;
3587 if (BOND_MODE(bond) == BOND_MODE_8023AD)
3588 bond_3ad_adapter_speed_duplex_changed(slave);
3591 /* Refresh slave-array if applicable!
3592 * If the setup does not use miimon or arpmon (mode-specific!),
3593 * then these events will not cause the slave-array to be
3594 * refreshed. This will cause xmit to use a slave that is not
3595 * usable. Avoid such situation by refeshing the array at these
3596 * events. If these (miimon/arpmon) parameters are configured
3597 * then array gets refreshed twice and that should be fine!
3599 if (bond_mode_can_use_xmit_hash(bond))
3600 bond_update_slave_arr(bond, NULL);
3602 case NETDEV_CHANGEMTU:
3603 /* TODO: Should slaves be allowed to
3604 * independently alter their MTU? For
3605 * an active-backup bond, slaves need
3606 * not be the same type of device, so
3607 * MTUs may vary. For other modes,
3608 * slaves arguably should have the
3609 * same MTUs. To do this, we'd need to
3610 * take over the slave's change_mtu
3611 * function for the duration of their
3615 case NETDEV_CHANGENAME:
3616 /* we don't care if we don't have primary set */
3617 if (!bond_uses_primary(bond) ||
3618 !bond->params.primary[0])
3621 if (slave == primary) {
3622 /* slave's name changed - he's no longer primary */
3623 RCU_INIT_POINTER(bond->primary_slave, NULL);
3624 } else if (!strcmp(slave_dev->name, bond->params.primary)) {
3625 /* we have a new primary slave */
3626 rcu_assign_pointer(bond->primary_slave, slave);
3627 } else { /* we didn't change primary - exit */
3631 netdev_info(bond->dev, "Primary slave changed to %s, reselecting active slave\n",
3632 primary ? slave_dev->name : "none");
3635 bond_select_active_slave(bond);
3636 unblock_netpoll_tx();
3638 case NETDEV_FEAT_CHANGE:
3639 bond_compute_features(bond);
3641 case NETDEV_RESEND_IGMP:
3642 /* Propagate to master device */
3643 call_netdevice_notifiers(event, slave->bond->dev);
3652 /* bond_netdev_event: handle netdev notifier chain events.
3654 * This function receives events for the netdev chain. The caller (an
3655 * ioctl handler calling blocking_notifier_call_chain) holds the necessary
3656 * locks for us to safely manipulate the slave devices (RTNL lock,
3659 static int bond_netdev_event(struct notifier_block *this,
3660 unsigned long event, void *ptr)
3662 struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
3664 netdev_dbg(event_dev, "%s received %s\n",
3665 __func__, netdev_cmd_to_name(event));
3667 if (!(event_dev->priv_flags & IFF_BONDING))
3670 if (event_dev->flags & IFF_MASTER) {
3673 ret = bond_master_netdev_event(event, event_dev);
3674 if (ret != NOTIFY_DONE)
3678 if (event_dev->flags & IFF_SLAVE)
3679 return bond_slave_netdev_event(event, event_dev);
3684 static struct notifier_block bond_netdev_notifier = {
3685 .notifier_call = bond_netdev_event,
3688 /*---------------------------- Hashing Policies -----------------------------*/
3690 /* Helper to access data in a packet, with or without a backing skb.
3691 * If skb is given the data is linearized if necessary via pskb_may_pull.
3693 static inline const void *bond_pull_data(struct sk_buff *skb,
3694 const void *data, int hlen, int n)
3696 if (likely(n <= hlen))
3698 else if (skb && likely(pskb_may_pull(skb, n)))
3704 /* L2 hash helper */
3705 static inline u32 bond_eth_hash(struct sk_buff *skb, const void *data, int mhoff, int hlen)
3709 data = bond_pull_data(skb, data, hlen, mhoff + sizeof(struct ethhdr));
3713 ep = (struct ethhdr *)(data + mhoff);
3714 return ep->h_dest[5] ^ ep->h_source[5] ^ be16_to_cpu(ep->h_proto);
3717 static bool bond_flow_ip(struct sk_buff *skb, struct flow_keys *fk, const void *data,
3718 int hlen, __be16 l2_proto, int *nhoff, int *ip_proto, bool l34)
3720 const struct ipv6hdr *iph6;
3721 const struct iphdr *iph;
3723 if (l2_proto == htons(ETH_P_IP)) {
3724 data = bond_pull_data(skb, data, hlen, *nhoff + sizeof(*iph));
3728 iph = (const struct iphdr *)(data + *nhoff);
3729 iph_to_flow_copy_v4addrs(fk, iph);
3730 *nhoff += iph->ihl << 2;
3731 if (!ip_is_fragment(iph))
3732 *ip_proto = iph->protocol;
3733 } else if (l2_proto == htons(ETH_P_IPV6)) {
3734 data = bond_pull_data(skb, data, hlen, *nhoff + sizeof(*iph6));
3738 iph6 = (const struct ipv6hdr *)(data + *nhoff);
3739 iph_to_flow_copy_v6addrs(fk, iph6);
3740 *nhoff += sizeof(*iph6);
3741 *ip_proto = iph6->nexthdr;
3746 if (l34 && *ip_proto >= 0)
3747 fk->ports.ports = __skb_flow_get_ports(skb, *nhoff, *ip_proto, data, hlen);
3752 static u32 bond_vlan_srcmac_hash(struct sk_buff *skb, const void *data, int mhoff, int hlen)
3754 u32 srcmac_vendor = 0, srcmac_dev = 0;
3755 struct ethhdr *mac_hdr;
3759 data = bond_pull_data(skb, data, hlen, mhoff + sizeof(struct ethhdr));
3762 mac_hdr = (struct ethhdr *)(data + mhoff);
3764 for (i = 0; i < 3; i++)
3765 srcmac_vendor = (srcmac_vendor << 8) | mac_hdr->h_source[i];
3767 for (i = 3; i < ETH_ALEN; i++)
3768 srcmac_dev = (srcmac_dev << 8) | mac_hdr->h_source[i];
3770 if (skb && skb_vlan_tag_present(skb))
3771 vlan = skb_vlan_tag_get(skb);
3773 return vlan ^ srcmac_vendor ^ srcmac_dev;
3776 /* Extract the appropriate headers based on bond's xmit policy */
3777 static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb, const void *data,
3778 __be16 l2_proto, int nhoff, int hlen, struct flow_keys *fk)
3780 bool l34 = bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34;
3783 switch (bond->params.xmit_policy) {
3784 case BOND_XMIT_POLICY_ENCAP23:
3785 case BOND_XMIT_POLICY_ENCAP34:
3786 memset(fk, 0, sizeof(*fk));
3787 return __skb_flow_dissect(NULL, skb, &flow_keys_bonding,
3788 fk, data, l2_proto, nhoff, hlen, 0);
3793 fk->ports.ports = 0;
3794 memset(&fk->icmp, 0, sizeof(fk->icmp));
3795 if (!bond_flow_ip(skb, fk, data, hlen, l2_proto, &nhoff, &ip_proto, l34))
3798 /* ICMP error packets contains at least 8 bytes of the header
3799 * of the packet which generated the error. Use this information
3800 * to correlate ICMP error packets within the same flow which
3801 * generated the error.
3803 if (ip_proto == IPPROTO_ICMP || ip_proto == IPPROTO_ICMPV6) {
3804 skb_flow_get_icmp_tci(skb, &fk->icmp, data, nhoff, hlen);
3805 if (ip_proto == IPPROTO_ICMP) {
3806 if (!icmp_is_err(fk->icmp.type))
3809 nhoff += sizeof(struct icmphdr);
3810 } else if (ip_proto == IPPROTO_ICMPV6) {
3811 if (!icmpv6_is_err(fk->icmp.type))
3814 nhoff += sizeof(struct icmp6hdr);
3816 return bond_flow_ip(skb, fk, data, hlen, l2_proto, &nhoff, &ip_proto, l34);
3822 static u32 bond_ip_hash(u32 hash, struct flow_keys *flow, int xmit_policy)
3824 hash ^= (__force u32)flow_get_u32_dst(flow) ^
3825 (__force u32)flow_get_u32_src(flow);
3826 hash ^= (hash >> 16);
3827 hash ^= (hash >> 8);
3829 /* discard lowest hash bit to deal with the common even ports pattern */
3830 if (xmit_policy == BOND_XMIT_POLICY_LAYER34 ||
3831 xmit_policy == BOND_XMIT_POLICY_ENCAP34)
3837 /* Generate hash based on xmit policy. If @skb is given it is used to linearize
3838 * the data as required, but this function can be used without it if the data is
3839 * known to be linear (e.g. with xdp_buff).
3841 static u32 __bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, const void *data,
3842 __be16 l2_proto, int mhoff, int nhoff, int hlen)
3844 struct flow_keys flow;
3847 if (bond->params.xmit_policy == BOND_XMIT_POLICY_VLAN_SRCMAC)
3848 return bond_vlan_srcmac_hash(skb, data, mhoff, hlen);
3850 if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER2 ||
3851 !bond_flow_dissect(bond, skb, data, l2_proto, nhoff, hlen, &flow))
3852 return bond_eth_hash(skb, data, mhoff, hlen);
3854 if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER23 ||
3855 bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP23) {
3856 hash = bond_eth_hash(skb, data, mhoff, hlen);
3859 memcpy(&hash, &flow.icmp, sizeof(hash));
3861 memcpy(&hash, &flow.ports.ports, sizeof(hash));
3864 return bond_ip_hash(hash, &flow, bond->params.xmit_policy);
3868 * bond_xmit_hash - generate a hash value based on the xmit policy
3869 * @bond: bonding device
3870 * @skb: buffer to use for headers
3872 * This function will extract the necessary headers from the skb buffer and use
3873 * them to generate a hash based on the xmit_policy set in the bonding device
3875 u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb)
3877 if (bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP34 &&
3881 return __bond_xmit_hash(bond, skb, skb->data, skb->protocol,
3882 skb_mac_offset(skb), skb_network_offset(skb),
3887 * bond_xmit_hash_xdp - generate a hash value based on the xmit policy
3888 * @bond: bonding device
3889 * @xdp: buffer to use for headers
3891 * The XDP variant of bond_xmit_hash.
3893 static u32 bond_xmit_hash_xdp(struct bonding *bond, struct xdp_buff *xdp)
3897 if (xdp->data + sizeof(struct ethhdr) > xdp->data_end)
3900 eth = (struct ethhdr *)xdp->data;
3902 return __bond_xmit_hash(bond, NULL, xdp->data, eth->h_proto, 0,
3903 sizeof(struct ethhdr), xdp->data_end - xdp->data);
3906 /*-------------------------- Device entry points ----------------------------*/
3908 void bond_work_init_all(struct bonding *bond)
3910 INIT_DELAYED_WORK(&bond->mcast_work,
3911 bond_resend_igmp_join_requests_delayed);
3912 INIT_DELAYED_WORK(&bond->alb_work, bond_alb_monitor);
3913 INIT_DELAYED_WORK(&bond->mii_work, bond_mii_monitor);
3914 INIT_DELAYED_WORK(&bond->arp_work, bond_arp_monitor);
3915 INIT_DELAYED_WORK(&bond->ad_work, bond_3ad_state_machine_handler);
3916 INIT_DELAYED_WORK(&bond->slave_arr_work, bond_slave_arr_handler);
3919 static void bond_work_cancel_all(struct bonding *bond)
3921 cancel_delayed_work_sync(&bond->mii_work);
3922 cancel_delayed_work_sync(&bond->arp_work);
3923 cancel_delayed_work_sync(&bond->alb_work);
3924 cancel_delayed_work_sync(&bond->ad_work);
3925 cancel_delayed_work_sync(&bond->mcast_work);
3926 cancel_delayed_work_sync(&bond->slave_arr_work);
3929 static int bond_open(struct net_device *bond_dev)
3931 struct bonding *bond = netdev_priv(bond_dev);
3932 struct list_head *iter;
3933 struct slave *slave;
3935 /* reset slave->backup and slave->inactive */
3936 if (bond_has_slaves(bond)) {
3937 bond_for_each_slave(bond, slave, iter) {
3938 if (bond_uses_primary(bond) &&
3939 slave != rcu_access_pointer(bond->curr_active_slave)) {
3940 bond_set_slave_inactive_flags(slave,
3941 BOND_SLAVE_NOTIFY_NOW);
3942 } else if (BOND_MODE(bond) != BOND_MODE_8023AD) {
3943 bond_set_slave_active_flags(slave,
3944 BOND_SLAVE_NOTIFY_NOW);
3949 if (bond_is_lb(bond)) {
3950 /* bond_alb_initialize must be called before the timer
3953 if (bond_alb_initialize(bond, (BOND_MODE(bond) == BOND_MODE_ALB)))
3955 if (bond->params.tlb_dynamic_lb || BOND_MODE(bond) == BOND_MODE_ALB)
3956 queue_delayed_work(bond->wq, &bond->alb_work, 0);
3959 if (bond->params.miimon) /* link check interval, in milliseconds. */
3960 queue_delayed_work(bond->wq, &bond->mii_work, 0);
3962 if (bond->params.arp_interval) { /* arp interval, in milliseconds. */
3963 queue_delayed_work(bond->wq, &bond->arp_work, 0);
3964 bond->recv_probe = bond_arp_rcv;
3967 if (BOND_MODE(bond) == BOND_MODE_8023AD) {
3968 queue_delayed_work(bond->wq, &bond->ad_work, 0);
3969 /* register to receive LACPDUs */
3970 bond->recv_probe = bond_3ad_lacpdu_recv;
3971 bond_3ad_initiate_agg_selection(bond, 1);
3974 if (bond_mode_can_use_xmit_hash(bond))
3975 bond_update_slave_arr(bond, NULL);
3980 static int bond_close(struct net_device *bond_dev)
3982 struct bonding *bond = netdev_priv(bond_dev);
3984 bond_work_cancel_all(bond);
3985 bond->send_peer_notif = 0;
3986 if (bond_is_lb(bond))
3987 bond_alb_deinitialize(bond);
3988 bond->recv_probe = NULL;
3993 /* fold stats, assuming all rtnl_link_stats64 fields are u64, but
3994 * that some drivers can provide 32bit values only.
3996 static void bond_fold_stats(struct rtnl_link_stats64 *_res,
3997 const struct rtnl_link_stats64 *_new,
3998 const struct rtnl_link_stats64 *_old)
4000 const u64 *new = (const u64 *)_new;
4001 const u64 *old = (const u64 *)_old;
4002 u64 *res = (u64 *)_res;
4005 for (i = 0; i < sizeof(*_res) / sizeof(u64); i++) {
4008 s64 delta = nv - ov;
4010 /* detects if this particular field is 32bit only */
4011 if (((nv | ov) >> 32) == 0)
4012 delta = (s64)(s32)((u32)nv - (u32)ov);
4014 /* filter anomalies, some drivers reset their stats
4015 * at down/up events.
4022 #ifdef CONFIG_LOCKDEP
4023 static int bond_get_lowest_level_rcu(struct net_device *dev)
4025 struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
4026 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
4027 int cur = 0, max = 0;
4030 iter = &dev->adj_list.lower;
4035 ldev = netdev_next_lower_dev_rcu(now, &iter);
4040 niter = &ldev->adj_list.lower;
4041 dev_stack[cur] = now;
4042 iter_stack[cur++] = iter;
4051 next = dev_stack[--cur];
4052 niter = iter_stack[cur];
4063 static void bond_get_stats(struct net_device *bond_dev,
4064 struct rtnl_link_stats64 *stats)
4066 struct bonding *bond = netdev_priv(bond_dev);
4067 struct rtnl_link_stats64 temp;
4068 struct list_head *iter;
4069 struct slave *slave;
4074 #ifdef CONFIG_LOCKDEP
4075 nest_level = bond_get_lowest_level_rcu(bond_dev);
4078 spin_lock_nested(&bond->stats_lock, nest_level);
4079 memcpy(stats, &bond->bond_stats, sizeof(*stats));
4081 bond_for_each_slave_rcu(bond, slave, iter) {
4082 const struct rtnl_link_stats64 *new =
4083 dev_get_stats(slave->dev, &temp);
4085 bond_fold_stats(stats, new, &slave->slave_stats);
4087 /* save off the slave stats for the next run */
4088 memcpy(&slave->slave_stats, new, sizeof(*new));
4091 memcpy(&bond->bond_stats, stats, sizeof(*stats));
4092 spin_unlock(&bond->stats_lock);
4096 static int bond_eth_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd)
4098 struct bonding *bond = netdev_priv(bond_dev);
4099 struct mii_ioctl_data *mii = NULL;
4102 netdev_dbg(bond_dev, "bond_eth_ioctl: cmd=%d\n", cmd);
4113 /* We do this again just in case we were called by SIOCGMIIREG
4114 * instead of SIOCGMIIPHY.
4120 if (mii->reg_num == 1) {
4122 if (netif_carrier_ok(bond->dev))
4123 mii->val_out = BMSR_LSTATUS;
4134 static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd)
4136 struct bonding *bond = netdev_priv(bond_dev);
4137 struct net_device *slave_dev = NULL;
4138 struct ifbond k_binfo;
4139 struct ifbond __user *u_binfo = NULL;
4140 struct ifslave k_sinfo;
4141 struct ifslave __user *u_sinfo = NULL;
4142 struct bond_opt_value newval;
4146 netdev_dbg(bond_dev, "bond_ioctl: cmd=%d\n", cmd);
4149 case SIOCBONDINFOQUERY:
4150 u_binfo = (struct ifbond __user *)ifr->ifr_data;
4152 if (copy_from_user(&k_binfo, u_binfo, sizeof(ifbond)))
4155 bond_info_query(bond_dev, &k_binfo);
4156 if (copy_to_user(u_binfo, &k_binfo, sizeof(ifbond)))
4160 case SIOCBONDSLAVEINFOQUERY:
4161 u_sinfo = (struct ifslave __user *)ifr->ifr_data;
4163 if (copy_from_user(&k_sinfo, u_sinfo, sizeof(ifslave)))
4166 res = bond_slave_info_query(bond_dev, &k_sinfo);
4168 copy_to_user(u_sinfo, &k_sinfo, sizeof(ifslave)))
4176 net = dev_net(bond_dev);
4178 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
4181 slave_dev = __dev_get_by_name(net, ifr->ifr_slave);
4183 slave_dbg(bond_dev, slave_dev, "slave_dev=%p:\n", slave_dev);
4189 case SIOCBONDENSLAVE:
4190 res = bond_enslave(bond_dev, slave_dev, NULL);
4192 case SIOCBONDRELEASE:
4193 res = bond_release(bond_dev, slave_dev);
4195 case SIOCBONDSETHWADDR:
4196 res = bond_set_dev_addr(bond_dev, slave_dev);
4198 case SIOCBONDCHANGEACTIVE:
4199 bond_opt_initstr(&newval, slave_dev->name);
4200 res = __bond_opt_set_notify(bond, BOND_OPT_ACTIVE_SLAVE,
4210 static int bond_siocdevprivate(struct net_device *bond_dev, struct ifreq *ifr,
4211 void __user *data, int cmd)
4213 struct ifreq ifrdata = { .ifr_data = data };
4216 case BOND_INFO_QUERY_OLD:
4217 return bond_do_ioctl(bond_dev, &ifrdata, SIOCBONDINFOQUERY);
4218 case BOND_SLAVE_INFO_QUERY_OLD:
4219 return bond_do_ioctl(bond_dev, &ifrdata, SIOCBONDSLAVEINFOQUERY);
4220 case BOND_ENSLAVE_OLD:
4221 return bond_do_ioctl(bond_dev, ifr, SIOCBONDENSLAVE);
4222 case BOND_RELEASE_OLD:
4223 return bond_do_ioctl(bond_dev, ifr, SIOCBONDRELEASE);
4224 case BOND_SETHWADDR_OLD:
4225 return bond_do_ioctl(bond_dev, ifr, SIOCBONDSETHWADDR);
4226 case BOND_CHANGE_ACTIVE_OLD:
4227 return bond_do_ioctl(bond_dev, ifr, SIOCBONDCHANGEACTIVE);
4233 static void bond_change_rx_flags(struct net_device *bond_dev, int change)
4235 struct bonding *bond = netdev_priv(bond_dev);
4237 if (change & IFF_PROMISC)
4238 bond_set_promiscuity(bond,
4239 bond_dev->flags & IFF_PROMISC ? 1 : -1);
4241 if (change & IFF_ALLMULTI)
4242 bond_set_allmulti(bond,
4243 bond_dev->flags & IFF_ALLMULTI ? 1 : -1);
4246 static void bond_set_rx_mode(struct net_device *bond_dev)
4248 struct bonding *bond = netdev_priv(bond_dev);
4249 struct list_head *iter;
4250 struct slave *slave;
4253 if (bond_uses_primary(bond)) {
4254 slave = rcu_dereference(bond->curr_active_slave);
4256 dev_uc_sync(slave->dev, bond_dev);
4257 dev_mc_sync(slave->dev, bond_dev);
4260 bond_for_each_slave_rcu(bond, slave, iter) {
4261 dev_uc_sync_multiple(slave->dev, bond_dev);
4262 dev_mc_sync_multiple(slave->dev, bond_dev);
4268 static int bond_neigh_init(struct neighbour *n)
4270 struct bonding *bond = netdev_priv(n->dev);
4271 const struct net_device_ops *slave_ops;
4272 struct neigh_parms parms;
4273 struct slave *slave;
4277 slave = bond_first_slave_rcu(bond);
4280 slave_ops = slave->dev->netdev_ops;
4281 if (!slave_ops->ndo_neigh_setup)
4284 /* TODO: find another way [1] to implement this.
4285 * Passing a zeroed structure is fragile,
4286 * but at least we do not pass garbage.
4288 * [1] One way would be that ndo_neigh_setup() never touch
4289 * struct neigh_parms, but propagate the new neigh_setup()
4290 * back to ___neigh_create() / neigh_parms_alloc()
4292 memset(&parms, 0, sizeof(parms));
4293 ret = slave_ops->ndo_neigh_setup(slave->dev, &parms);
4298 if (parms.neigh_setup)
4299 ret = parms.neigh_setup(n);
4305 /* The bonding ndo_neigh_setup is called at init time beofre any
4306 * slave exists. So we must declare proxy setup function which will
4307 * be used at run time to resolve the actual slave neigh param setup.
4309 * It's also called by master devices (such as vlans) to setup their
4310 * underlying devices. In that case - do nothing, we're already set up from
4313 static int bond_neigh_setup(struct net_device *dev,
4314 struct neigh_parms *parms)
4316 /* modify only our neigh_parms */
4317 if (parms->dev == dev)
4318 parms->neigh_setup = bond_neigh_init;
4323 /* Change the MTU of all of a master's slaves to match the master */
4324 static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
4326 struct bonding *bond = netdev_priv(bond_dev);
4327 struct slave *slave, *rollback_slave;
4328 struct list_head *iter;
4331 netdev_dbg(bond_dev, "bond=%p, new_mtu=%d\n", bond, new_mtu);
4333 bond_for_each_slave(bond, slave, iter) {
4334 slave_dbg(bond_dev, slave->dev, "s %p c_m %p\n",
4335 slave, slave->dev->netdev_ops->ndo_change_mtu);
4337 res = dev_set_mtu(slave->dev, new_mtu);
4340 /* If we failed to set the slave's mtu to the new value
4341 * we must abort the operation even in ACTIVE_BACKUP
4342 * mode, because if we allow the backup slaves to have
4343 * different mtu values than the active slave we'll
4344 * need to change their mtu when doing a failover. That
4345 * means changing their mtu from timer context, which
4346 * is probably not a good idea.
4348 slave_dbg(bond_dev, slave->dev, "err %d setting mtu to %d\n",
4354 bond_dev->mtu = new_mtu;
4359 /* unwind from head to the slave that failed */
4360 bond_for_each_slave(bond, rollback_slave, iter) {
4363 if (rollback_slave == slave)
4366 tmp_res = dev_set_mtu(rollback_slave->dev, bond_dev->mtu);
4368 slave_dbg(bond_dev, rollback_slave->dev, "unwind err %d\n",
4375 /* Change HW address
4377 * Note that many devices must be down to change the HW address, and
4378 * downing the master releases all slaves. We can make bonds full of
4379 * bonding devices to test this, however.
4381 static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
4383 struct bonding *bond = netdev_priv(bond_dev);
4384 struct slave *slave, *rollback_slave;
4385 struct sockaddr_storage *ss = addr, tmp_ss;
4386 struct list_head *iter;
4389 if (BOND_MODE(bond) == BOND_MODE_ALB)
4390 return bond_alb_set_mac_address(bond_dev, addr);
4393 netdev_dbg(bond_dev, "%s: bond=%p\n", __func__, bond);
4395 /* If fail_over_mac is enabled, do nothing and return success.
4396 * Returning an error causes ifenslave to fail.
4398 if (bond->params.fail_over_mac &&
4399 BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
4402 if (!is_valid_ether_addr(ss->__data))
4403 return -EADDRNOTAVAIL;
4405 bond_for_each_slave(bond, slave, iter) {
4406 slave_dbg(bond_dev, slave->dev, "%s: slave=%p\n",
4408 res = dev_set_mac_address(slave->dev, addr, NULL);
4410 /* TODO: consider downing the slave
4412 * User should expect communications
4413 * breakage anyway until ARP finish
4416 slave_dbg(bond_dev, slave->dev, "%s: err %d\n",
4423 memcpy(bond_dev->dev_addr, ss->__data, bond_dev->addr_len);
4427 memcpy(tmp_ss.__data, bond_dev->dev_addr, bond_dev->addr_len);
4428 tmp_ss.ss_family = bond_dev->type;
4430 /* unwind from head to the slave that failed */
4431 bond_for_each_slave(bond, rollback_slave, iter) {
4434 if (rollback_slave == slave)
4437 tmp_res = dev_set_mac_address(rollback_slave->dev,
4438 (struct sockaddr *)&tmp_ss, NULL);
4440 slave_dbg(bond_dev, rollback_slave->dev, "%s: unwind err %d\n",
4449 * bond_get_slave_by_id - get xmit slave with slave_id
4450 * @bond: bonding device that is transmitting
4451 * @slave_id: slave id up to slave_cnt-1 through which to transmit
4453 * This function tries to get slave with slave_id but in case
4454 * it fails, it tries to find the first available slave for transmission.
4456 static struct slave *bond_get_slave_by_id(struct bonding *bond,
4459 struct list_head *iter;
4460 struct slave *slave;
4463 /* Here we start from the slave with slave_id */
4464 bond_for_each_slave_rcu(bond, slave, iter) {
4466 if (bond_slave_can_tx(slave))
4471 /* Here we start from the first slave up to slave_id */
4473 bond_for_each_slave_rcu(bond, slave, iter) {
4476 if (bond_slave_can_tx(slave))
4479 /* no slave that can tx has been found */
4484 * bond_rr_gen_slave_id - generate slave id based on packets_per_slave
4485 * @bond: bonding device to use
4487 * Based on the value of the bonding device's packets_per_slave parameter
4488 * this function generates a slave id, which is usually used as the next
4489 * slave to transmit through.
4491 static u32 bond_rr_gen_slave_id(struct bonding *bond)
4494 struct reciprocal_value reciprocal_packets_per_slave;
4495 int packets_per_slave = bond->params.packets_per_slave;
4497 switch (packets_per_slave) {
4499 slave_id = prandom_u32();
4502 slave_id = this_cpu_inc_return(*bond->rr_tx_counter);
4505 reciprocal_packets_per_slave =
4506 bond->params.reciprocal_packets_per_slave;
4507 slave_id = this_cpu_inc_return(*bond->rr_tx_counter);
4508 slave_id = reciprocal_divide(slave_id,
4509 reciprocal_packets_per_slave);
4516 static struct slave *bond_xmit_roundrobin_slave_get(struct bonding *bond,
4517 struct sk_buff *skb)
4519 struct slave *slave;
4523 /* Start with the curr_active_slave that joined the bond as the
4524 * default for sending IGMP traffic. For failover purposes one
4525 * needs to maintain some consistency for the interface that will
4526 * send the join/membership reports. The curr_active_slave found
4527 * will send all of this type of traffic.
4529 if (skb->protocol == htons(ETH_P_IP)) {
4530 int noff = skb_network_offset(skb);
4533 if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph))))
4537 if (iph->protocol == IPPROTO_IGMP) {
4538 slave = rcu_dereference(bond->curr_active_slave);
4541 return bond_get_slave_by_id(bond, 0);
4546 slave_cnt = READ_ONCE(bond->slave_cnt);
4547 if (likely(slave_cnt)) {
4548 slave_id = bond_rr_gen_slave_id(bond) % slave_cnt;
4549 return bond_get_slave_by_id(bond, slave_id);
4554 static struct slave *bond_xdp_xmit_roundrobin_slave_get(struct bonding *bond,
4555 struct xdp_buff *xdp)
4557 struct slave *slave;
4560 const struct ethhdr *eth;
4561 void *data = xdp->data;
4563 if (data + sizeof(struct ethhdr) > xdp->data_end)
4566 eth = (struct ethhdr *)data;
4567 data += sizeof(struct ethhdr);
4569 /* See comment on IGMP in bond_xmit_roundrobin_slave_get() */
4570 if (eth->h_proto == htons(ETH_P_IP)) {
4571 const struct iphdr *iph;
4573 if (data + sizeof(struct iphdr) > xdp->data_end)
4576 iph = (struct iphdr *)data;
4578 if (iph->protocol == IPPROTO_IGMP) {
4579 slave = rcu_dereference(bond->curr_active_slave);
4582 return bond_get_slave_by_id(bond, 0);
4587 slave_cnt = READ_ONCE(bond->slave_cnt);
4588 if (likely(slave_cnt)) {
4589 slave_id = bond_rr_gen_slave_id(bond) % slave_cnt;
4590 return bond_get_slave_by_id(bond, slave_id);
4595 static netdev_tx_t bond_xmit_roundrobin(struct sk_buff *skb,
4596 struct net_device *bond_dev)
4598 struct bonding *bond = netdev_priv(bond_dev);
4599 struct slave *slave;
4601 slave = bond_xmit_roundrobin_slave_get(bond, skb);
4603 return bond_dev_queue_xmit(bond, skb, slave->dev);
4605 return bond_tx_drop(bond_dev, skb);
4608 static struct slave *bond_xmit_activebackup_slave_get(struct bonding *bond)
4610 return rcu_dereference(bond->curr_active_slave);
4613 /* In active-backup mode, we know that bond->curr_active_slave is always valid if
4614 * the bond has a usable interface.
4616 static netdev_tx_t bond_xmit_activebackup(struct sk_buff *skb,
4617 struct net_device *bond_dev)
4619 struct bonding *bond = netdev_priv(bond_dev);
4620 struct slave *slave;
4622 slave = bond_xmit_activebackup_slave_get(bond);
4624 return bond_dev_queue_xmit(bond, skb, slave->dev);
4626 return bond_tx_drop(bond_dev, skb);
4629 /* Use this to update slave_array when (a) it's not appropriate to update
4630 * slave_array right away (note that update_slave_array() may sleep)
4631 * and / or (b) RTNL is not held.
4633 void bond_slave_arr_work_rearm(struct bonding *bond, unsigned long delay)
4635 queue_delayed_work(bond->wq, &bond->slave_arr_work, delay);
4638 /* Slave array work handler. Holds only RTNL */
4639 static void bond_slave_arr_handler(struct work_struct *work)
4641 struct bonding *bond = container_of(work, struct bonding,
4642 slave_arr_work.work);
4645 if (!rtnl_trylock())
4648 ret = bond_update_slave_arr(bond, NULL);
4651 pr_warn_ratelimited("Failed to update slave array from WT\n");
4657 bond_slave_arr_work_rearm(bond, 1);
4660 static void bond_skip_slave(struct bond_up_slave *slaves,
4661 struct slave *skipslave)
4665 /* Rare situation where caller has asked to skip a specific
4666 * slave but allocation failed (most likely!). BTW this is
4667 * only possible when the call is initiated from
4668 * __bond_release_one(). In this situation; overwrite the
4669 * skipslave entry in the array with the last entry from the
4670 * array to avoid a situation where the xmit path may choose
4671 * this to-be-skipped slave to send a packet out.
4673 for (idx = 0; slaves && idx < slaves->count; idx++) {
4674 if (skipslave == slaves->arr[idx]) {
4676 slaves->arr[slaves->count - 1];
4683 static void bond_set_slave_arr(struct bonding *bond,
4684 struct bond_up_slave *usable_slaves,
4685 struct bond_up_slave *all_slaves)
4687 struct bond_up_slave *usable, *all;
4689 usable = rtnl_dereference(bond->usable_slaves);
4690 rcu_assign_pointer(bond->usable_slaves, usable_slaves);
4691 kfree_rcu(usable, rcu);
4693 all = rtnl_dereference(bond->all_slaves);
4694 rcu_assign_pointer(bond->all_slaves, all_slaves);
4695 kfree_rcu(all, rcu);
4698 static void bond_reset_slave_arr(struct bonding *bond)
4700 struct bond_up_slave *usable, *all;
4702 usable = rtnl_dereference(bond->usable_slaves);
4704 RCU_INIT_POINTER(bond->usable_slaves, NULL);
4705 kfree_rcu(usable, rcu);
4708 all = rtnl_dereference(bond->all_slaves);
4710 RCU_INIT_POINTER(bond->all_slaves, NULL);
4711 kfree_rcu(all, rcu);
4715 /* Build the usable slaves array in control path for modes that use xmit-hash
4716 * to determine the slave interface -
4717 * (a) BOND_MODE_8023AD
4719 * (c) (BOND_MODE_TLB || BOND_MODE_ALB) && tlb_dynamic_lb == 0
4721 * The caller is expected to hold RTNL only and NO other lock!
4723 int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave)
4725 struct bond_up_slave *usable_slaves = NULL, *all_slaves = NULL;
4726 struct slave *slave;
4727 struct list_head *iter;
4733 usable_slaves = kzalloc(struct_size(usable_slaves, arr,
4734 bond->slave_cnt), GFP_KERNEL);
4735 all_slaves = kzalloc(struct_size(all_slaves, arr,
4736 bond->slave_cnt), GFP_KERNEL);
4737 if (!usable_slaves || !all_slaves) {
4741 if (BOND_MODE(bond) == BOND_MODE_8023AD) {
4742 struct ad_info ad_info;
4744 spin_lock_bh(&bond->mode_lock);
4745 if (bond_3ad_get_active_agg_info(bond, &ad_info)) {
4746 spin_unlock_bh(&bond->mode_lock);
4747 pr_debug("bond_3ad_get_active_agg_info failed\n");
4748 /* No active aggragator means it's not safe to use
4749 * the previous array.
4751 bond_reset_slave_arr(bond);
4754 spin_unlock_bh(&bond->mode_lock);
4755 agg_id = ad_info.aggregator_id;
4757 bond_for_each_slave(bond, slave, iter) {
4758 if (skipslave == slave)
4761 all_slaves->arr[all_slaves->count++] = slave;
4762 if (BOND_MODE(bond) == BOND_MODE_8023AD) {
4763 struct aggregator *agg;
4765 agg = SLAVE_AD_INFO(slave)->port.aggregator;
4766 if (!agg || agg->aggregator_identifier != agg_id)
4769 if (!bond_slave_can_tx(slave))
4772 slave_dbg(bond->dev, slave->dev, "Adding slave to tx hash array[%d]\n",
4773 usable_slaves->count);
4775 usable_slaves->arr[usable_slaves->count++] = slave;
4778 bond_set_slave_arr(bond, usable_slaves, all_slaves);
4781 if (ret != 0 && skipslave) {
4782 bond_skip_slave(rtnl_dereference(bond->all_slaves),
4784 bond_skip_slave(rtnl_dereference(bond->usable_slaves),
4787 kfree_rcu(all_slaves, rcu);
4788 kfree_rcu(usable_slaves, rcu);
4793 static struct slave *bond_xmit_3ad_xor_slave_get(struct bonding *bond,
4794 struct sk_buff *skb,
4795 struct bond_up_slave *slaves)
4797 struct slave *slave;
4801 hash = bond_xmit_hash(bond, skb);
4802 count = slaves ? READ_ONCE(slaves->count) : 0;
4803 if (unlikely(!count))
4806 slave = slaves->arr[hash % count];
4810 static struct slave *bond_xdp_xmit_3ad_xor_slave_get(struct bonding *bond,
4811 struct xdp_buff *xdp)
4813 struct bond_up_slave *slaves;
4817 hash = bond_xmit_hash_xdp(bond, xdp);
4818 slaves = rcu_dereference(bond->usable_slaves);
4819 count = slaves ? READ_ONCE(slaves->count) : 0;
4820 if (unlikely(!count))
4823 return slaves->arr[hash % count];
4826 /* Use this Xmit function for 3AD as well as XOR modes. The current
4827 * usable slave array is formed in the control path. The xmit function
4828 * just calculates hash and sends the packet out.
4830 static netdev_tx_t bond_3ad_xor_xmit(struct sk_buff *skb,
4831 struct net_device *dev)
4833 struct bonding *bond = netdev_priv(dev);
4834 struct bond_up_slave *slaves;
4835 struct slave *slave;
4837 slaves = rcu_dereference(bond->usable_slaves);
4838 slave = bond_xmit_3ad_xor_slave_get(bond, skb, slaves);
4840 return bond_dev_queue_xmit(bond, skb, slave->dev);
4842 return bond_tx_drop(dev, skb);
4845 /* in broadcast mode, we send everything to all usable interfaces. */
4846 static netdev_tx_t bond_xmit_broadcast(struct sk_buff *skb,
4847 struct net_device *bond_dev)
4849 struct bonding *bond = netdev_priv(bond_dev);
4850 struct slave *slave = NULL;
4851 struct list_head *iter;
4852 bool xmit_suc = false;
4853 bool skb_used = false;
4855 bond_for_each_slave_rcu(bond, slave, iter) {
4856 struct sk_buff *skb2;
4858 if (!(bond_slave_is_up(slave) && slave->link == BOND_LINK_UP))
4861 if (bond_is_last_slave(bond, slave)) {
4865 skb2 = skb_clone(skb, GFP_ATOMIC);
4867 net_err_ratelimited("%s: Error: %s: skb_clone() failed\n",
4868 bond_dev->name, __func__);
4873 if (bond_dev_queue_xmit(bond, skb2, slave->dev) == NETDEV_TX_OK)
4878 dev_kfree_skb_any(skb);
4881 return NETDEV_TX_OK;
4883 atomic_long_inc(&bond_dev->tx_dropped);
4884 return NET_XMIT_DROP;
4887 /*------------------------- Device initialization ---------------------------*/
4889 /* Lookup the slave that corresponds to a qid */
4890 static inline int bond_slave_override(struct bonding *bond,
4891 struct sk_buff *skb)
4893 struct slave *slave = NULL;
4894 struct list_head *iter;
4896 if (!skb_rx_queue_recorded(skb))
4899 /* Find out if any slaves have the same mapping as this skb. */
4900 bond_for_each_slave_rcu(bond, slave, iter) {
4901 if (slave->queue_id == skb_get_queue_mapping(skb)) {
4902 if (bond_slave_is_up(slave) &&
4903 slave->link == BOND_LINK_UP) {
4904 bond_dev_queue_xmit(bond, skb, slave->dev);
4907 /* If the slave isn't UP, use default transmit policy. */
4916 static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb,
4917 struct net_device *sb_dev)
4919 /* This helper function exists to help dev_pick_tx get the correct
4920 * destination queue. Using a helper function skips a call to
4921 * skb_tx_hash and will put the skbs in the queue we expect on their
4922 * way down to the bonding driver.
4924 u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0;
4926 /* Save the original txq to restore before passing to the driver */
4927 qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb_get_queue_mapping(skb);
4929 if (unlikely(txq >= dev->real_num_tx_queues)) {
4931 txq -= dev->real_num_tx_queues;
4932 } while (txq >= dev->real_num_tx_queues);
4937 static struct net_device *bond_xmit_get_slave(struct net_device *master_dev,
4938 struct sk_buff *skb,
4941 struct bonding *bond = netdev_priv(master_dev);
4942 struct bond_up_slave *slaves;
4943 struct slave *slave = NULL;
4945 switch (BOND_MODE(bond)) {
4946 case BOND_MODE_ROUNDROBIN:
4947 slave = bond_xmit_roundrobin_slave_get(bond, skb);
4949 case BOND_MODE_ACTIVEBACKUP:
4950 slave = bond_xmit_activebackup_slave_get(bond);
4952 case BOND_MODE_8023AD:
4955 slaves = rcu_dereference(bond->all_slaves);
4957 slaves = rcu_dereference(bond->usable_slaves);
4958 slave = bond_xmit_3ad_xor_slave_get(bond, skb, slaves);
4960 case BOND_MODE_BROADCAST:
4963 slave = bond_xmit_alb_slave_get(bond, skb);
4966 slave = bond_xmit_tlb_slave_get(bond, skb);
4969 /* Should never happen, mode already checked */
4970 WARN_ONCE(true, "Unknown bonding mode");
4979 static void bond_sk_to_flow(struct sock *sk, struct flow_keys *flow)
4981 switch (sk->sk_family) {
4982 #if IS_ENABLED(CONFIG_IPV6)
4984 if (sk->sk_ipv6only ||
4985 ipv6_addr_type(&sk->sk_v6_daddr) != IPV6_ADDR_MAPPED) {
4986 flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
4987 flow->addrs.v6addrs.src = inet6_sk(sk)->saddr;
4988 flow->addrs.v6addrs.dst = sk->sk_v6_daddr;
4993 default: /* AF_INET */
4994 flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
4995 flow->addrs.v4addrs.src = inet_sk(sk)->inet_rcv_saddr;
4996 flow->addrs.v4addrs.dst = inet_sk(sk)->inet_daddr;
5000 flow->ports.src = inet_sk(sk)->inet_sport;
5001 flow->ports.dst = inet_sk(sk)->inet_dport;
5005 * bond_sk_hash_l34 - generate a hash value based on the socket's L3 and L4 fields
5006 * @sk: socket to use for headers
5008 * This function will extract the necessary field from the socket and use
5009 * them to generate a hash based on the LAYER34 xmit_policy.
5010 * Assumes that sk is a TCP or UDP socket.
5012 static u32 bond_sk_hash_l34(struct sock *sk)
5014 struct flow_keys flow;
5017 bond_sk_to_flow(sk, &flow);
5020 memcpy(&hash, &flow.ports.ports, sizeof(hash));
5022 return bond_ip_hash(hash, &flow, BOND_XMIT_POLICY_LAYER34);
5025 static struct net_device *__bond_sk_get_lower_dev(struct bonding *bond,
5028 struct bond_up_slave *slaves;
5029 struct slave *slave;
5033 slaves = rcu_dereference(bond->usable_slaves);
5034 count = slaves ? READ_ONCE(slaves->count) : 0;
5035 if (unlikely(!count))
5038 hash = bond_sk_hash_l34(sk);
5039 slave = slaves->arr[hash % count];
5044 static struct net_device *bond_sk_get_lower_dev(struct net_device *dev,
5047 struct bonding *bond = netdev_priv(dev);
5048 struct net_device *lower = NULL;
5051 if (bond_sk_check(bond))
5052 lower = __bond_sk_get_lower_dev(bond, sk);
5058 #if IS_ENABLED(CONFIG_TLS_DEVICE)
5059 static netdev_tx_t bond_tls_device_xmit(struct bonding *bond, struct sk_buff *skb,
5060 struct net_device *dev)
5062 if (likely(bond_get_slave_by_dev(bond, tls_get_ctx(skb->sk)->netdev)))
5063 return bond_dev_queue_xmit(bond, skb, tls_get_ctx(skb->sk)->netdev);
5064 return bond_tx_drop(dev, skb);
5068 static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
5070 struct bonding *bond = netdev_priv(dev);
5072 if (bond_should_override_tx_queue(bond) &&
5073 !bond_slave_override(bond, skb))
5074 return NETDEV_TX_OK;
5076 #if IS_ENABLED(CONFIG_TLS_DEVICE)
5077 if (skb->sk && tls_is_sk_tx_device_offloaded(skb->sk))
5078 return bond_tls_device_xmit(bond, skb, dev);
5081 switch (BOND_MODE(bond)) {
5082 case BOND_MODE_ROUNDROBIN:
5083 return bond_xmit_roundrobin(skb, dev);
5084 case BOND_MODE_ACTIVEBACKUP:
5085 return bond_xmit_activebackup(skb, dev);
5086 case BOND_MODE_8023AD:
5088 return bond_3ad_xor_xmit(skb, dev);
5089 case BOND_MODE_BROADCAST:
5090 return bond_xmit_broadcast(skb, dev);
5092 return bond_alb_xmit(skb, dev);
5094 return bond_tlb_xmit(skb, dev);
5096 /* Should never happen, mode already checked */
5097 netdev_err(dev, "Unknown bonding mode %d\n", BOND_MODE(bond));
5099 return bond_tx_drop(dev, skb);
5103 static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
5105 struct bonding *bond = netdev_priv(dev);
5106 netdev_tx_t ret = NETDEV_TX_OK;
5108 /* If we risk deadlock from transmitting this in the
5109 * netpoll path, tell netpoll to queue the frame for later tx
5111 if (unlikely(is_netpoll_tx_blocked(dev)))
5112 return NETDEV_TX_BUSY;
5115 if (bond_has_slaves(bond))
5116 ret = __bond_start_xmit(skb, dev);
5118 ret = bond_tx_drop(dev, skb);
5124 static struct net_device *
5125 bond_xdp_get_xmit_slave(struct net_device *bond_dev, struct xdp_buff *xdp)
5127 struct bonding *bond = netdev_priv(bond_dev);
5128 struct slave *slave;
5130 /* Caller needs to hold rcu_read_lock() */
5132 switch (BOND_MODE(bond)) {
5133 case BOND_MODE_ROUNDROBIN:
5134 slave = bond_xdp_xmit_roundrobin_slave_get(bond, xdp);
5137 case BOND_MODE_ACTIVEBACKUP:
5138 slave = bond_xmit_activebackup_slave_get(bond);
5141 case BOND_MODE_8023AD:
5143 slave = bond_xdp_xmit_3ad_xor_slave_get(bond, xdp);
5147 /* Should never happen. Mode guarded by bond_xdp_check() */
5148 netdev_err(bond_dev, "Unknown bonding mode %d for xdp xmit\n", BOND_MODE(bond));
5159 static int bond_xdp_xmit(struct net_device *bond_dev,
5160 int n, struct xdp_frame **frames, u32 flags)
5162 int nxmit, err = -ENXIO;
5166 for (nxmit = 0; nxmit < n; nxmit++) {
5167 struct xdp_frame *frame = frames[nxmit];
5168 struct xdp_frame *frames1[] = {frame};
5169 struct net_device *slave_dev;
5170 struct xdp_buff xdp;
5172 xdp_convert_frame_to_buff(frame, &xdp);
5174 slave_dev = bond_xdp_get_xmit_slave(bond_dev, &xdp);
5180 err = slave_dev->netdev_ops->ndo_xdp_xmit(slave_dev, 1, frames1, flags);
5187 /* If error happened on the first frame then we can pass the error up, otherwise
5188 * report the number of frames that were xmitted.
5191 return (nxmit == 0 ? err : nxmit);
5196 static int bond_xdp_set(struct net_device *dev, struct bpf_prog *prog,
5197 struct netlink_ext_ack *extack)
5199 struct bonding *bond = netdev_priv(dev);
5200 struct list_head *iter;
5201 struct slave *slave, *rollback_slave;
5202 struct bpf_prog *old_prog;
5203 struct netdev_bpf xdp = {
5204 .command = XDP_SETUP_PROG,
5213 if (!bond_xdp_check(bond))
5216 old_prog = bond->xdp_prog;
5217 bond->xdp_prog = prog;
5219 bond_for_each_slave(bond, slave, iter) {
5220 struct net_device *slave_dev = slave->dev;
5222 if (!slave_dev->netdev_ops->ndo_bpf ||
5223 !slave_dev->netdev_ops->ndo_xdp_xmit) {
5224 SLAVE_NL_ERR(dev, slave_dev, extack,
5225 "Slave device does not support XDP");
5230 if (dev_xdp_prog_count(slave_dev) > 0) {
5231 SLAVE_NL_ERR(dev, slave_dev, extack,
5232 "Slave has XDP program loaded, please unload before enslaving");
5237 err = slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp);
5239 /* ndo_bpf() sets extack error message */
5240 slave_err(dev, slave_dev, "Error %d calling ndo_bpf\n", err);
5248 static_branch_inc(&bpf_master_redirect_enabled_key);
5249 } else if (old_prog) {
5250 bpf_prog_put(old_prog);
5251 static_branch_dec(&bpf_master_redirect_enabled_key);
5257 /* unwind the program changes */
5258 bond->xdp_prog = old_prog;
5259 xdp.prog = old_prog;
5260 xdp.extack = NULL; /* do not overwrite original error */
5262 bond_for_each_slave(bond, rollback_slave, iter) {
5263 struct net_device *slave_dev = rollback_slave->dev;
5266 if (slave == rollback_slave)
5269 err_unwind = slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp);
5271 slave_err(dev, slave_dev,
5272 "Error %d when unwinding XDP program change\n", err_unwind);
5274 bpf_prog_inc(xdp.prog);
5279 static int bond_xdp(struct net_device *dev, struct netdev_bpf *xdp)
5281 switch (xdp->command) {
5282 case XDP_SETUP_PROG:
5283 return bond_xdp_set(dev, xdp->prog, xdp->extack);
5289 static u32 bond_mode_bcast_speed(struct slave *slave, u32 speed)
5291 if (speed == 0 || speed == SPEED_UNKNOWN)
5292 speed = slave->speed;
5294 speed = min(speed, slave->speed);
5299 static int bond_ethtool_get_link_ksettings(struct net_device *bond_dev,
5300 struct ethtool_link_ksettings *cmd)
5302 struct bonding *bond = netdev_priv(bond_dev);
5303 struct list_head *iter;
5304 struct slave *slave;
5307 cmd->base.duplex = DUPLEX_UNKNOWN;
5308 cmd->base.port = PORT_OTHER;
5310 /* Since bond_slave_can_tx returns false for all inactive or down slaves, we
5311 * do not need to check mode. Though link speed might not represent
5312 * the true receive or transmit bandwidth (not all modes are symmetric)
5313 * this is an accurate maximum.
5315 bond_for_each_slave(bond, slave, iter) {
5316 if (bond_slave_can_tx(slave)) {
5317 if (slave->speed != SPEED_UNKNOWN) {
5318 if (BOND_MODE(bond) == BOND_MODE_BROADCAST)
5319 speed = bond_mode_bcast_speed(slave,
5322 speed += slave->speed;
5324 if (cmd->base.duplex == DUPLEX_UNKNOWN &&
5325 slave->duplex != DUPLEX_UNKNOWN)
5326 cmd->base.duplex = slave->duplex;
5329 cmd->base.speed = speed ? : SPEED_UNKNOWN;
5334 static void bond_ethtool_get_drvinfo(struct net_device *bond_dev,
5335 struct ethtool_drvinfo *drvinfo)
5337 strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
5338 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d",
5342 static const struct ethtool_ops bond_ethtool_ops = {
5343 .get_drvinfo = bond_ethtool_get_drvinfo,
5344 .get_link = ethtool_op_get_link,
5345 .get_link_ksettings = bond_ethtool_get_link_ksettings,
5348 static const struct net_device_ops bond_netdev_ops = {
5349 .ndo_init = bond_init,
5350 .ndo_uninit = bond_uninit,
5351 .ndo_open = bond_open,
5352 .ndo_stop = bond_close,
5353 .ndo_start_xmit = bond_start_xmit,
5354 .ndo_select_queue = bond_select_queue,
5355 .ndo_get_stats64 = bond_get_stats,
5356 .ndo_eth_ioctl = bond_eth_ioctl,
5357 .ndo_siocbond = bond_do_ioctl,
5358 .ndo_siocdevprivate = bond_siocdevprivate,
5359 .ndo_change_rx_flags = bond_change_rx_flags,
5360 .ndo_set_rx_mode = bond_set_rx_mode,
5361 .ndo_change_mtu = bond_change_mtu,
5362 .ndo_set_mac_address = bond_set_mac_address,
5363 .ndo_neigh_setup = bond_neigh_setup,
5364 .ndo_vlan_rx_add_vid = bond_vlan_rx_add_vid,
5365 .ndo_vlan_rx_kill_vid = bond_vlan_rx_kill_vid,
5366 #ifdef CONFIG_NET_POLL_CONTROLLER
5367 .ndo_netpoll_setup = bond_netpoll_setup,
5368 .ndo_netpoll_cleanup = bond_netpoll_cleanup,
5369 .ndo_poll_controller = bond_poll_controller,
5371 .ndo_add_slave = bond_enslave,
5372 .ndo_del_slave = bond_release,
5373 .ndo_fix_features = bond_fix_features,
5374 .ndo_features_check = passthru_features_check,
5375 .ndo_get_xmit_slave = bond_xmit_get_slave,
5376 .ndo_sk_get_lower_dev = bond_sk_get_lower_dev,
5377 .ndo_bpf = bond_xdp,
5378 .ndo_xdp_xmit = bond_xdp_xmit,
5379 .ndo_xdp_get_xmit_slave = bond_xdp_get_xmit_slave,
5382 static const struct device_type bond_type = {
5386 static void bond_destructor(struct net_device *bond_dev)
5388 struct bonding *bond = netdev_priv(bond_dev);
5391 destroy_workqueue(bond->wq);
5393 if (bond->rr_tx_counter)
5394 free_percpu(bond->rr_tx_counter);
5397 void bond_setup(struct net_device *bond_dev)
5399 struct bonding *bond = netdev_priv(bond_dev);
5401 spin_lock_init(&bond->mode_lock);
5402 bond->params = bonding_defaults;
5404 /* Initialize pointers */
5405 bond->dev = bond_dev;
5407 /* Initialize the device entry points */
5408 ether_setup(bond_dev);
5409 bond_dev->max_mtu = ETH_MAX_MTU;
5410 bond_dev->netdev_ops = &bond_netdev_ops;
5411 bond_dev->ethtool_ops = &bond_ethtool_ops;
5413 bond_dev->needs_free_netdev = true;
5414 bond_dev->priv_destructor = bond_destructor;
5416 SET_NETDEV_DEVTYPE(bond_dev, &bond_type);
5418 /* Initialize the device options */
5419 bond_dev->flags |= IFF_MASTER;
5420 bond_dev->priv_flags |= IFF_BONDING | IFF_UNICAST_FLT | IFF_NO_QUEUE;
5421 bond_dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
5423 #ifdef CONFIG_XFRM_OFFLOAD
5424 /* set up xfrm device ops (only supported in active-backup right now) */
5425 bond_dev->xfrmdev_ops = &bond_xfrmdev_ops;
5426 INIT_LIST_HEAD(&bond->ipsec_list);
5427 spin_lock_init(&bond->ipsec_lock);
5428 #endif /* CONFIG_XFRM_OFFLOAD */
5430 /* don't acquire bond device's netif_tx_lock when transmitting */
5431 bond_dev->features |= NETIF_F_LLTX;
5433 /* By default, we declare the bond to be fully
5434 * VLAN hardware accelerated capable. Special
5435 * care is taken in the various xmit functions
5436 * when there are slaves that are not hw accel
5440 /* Don't allow bond devices to change network namespaces. */
5441 bond_dev->features |= NETIF_F_NETNS_LOCAL;
5443 bond_dev->hw_features = BOND_VLAN_FEATURES |
5444 NETIF_F_HW_VLAN_CTAG_RX |
5445 NETIF_F_HW_VLAN_CTAG_FILTER;
5447 bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL;
5448 bond_dev->features |= bond_dev->hw_features;
5449 bond_dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
5450 #ifdef CONFIG_XFRM_OFFLOAD
5451 bond_dev->hw_features |= BOND_XFRM_FEATURES;
5452 /* Only enable XFRM features if this is an active-backup config */
5453 if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
5454 bond_dev->features |= BOND_XFRM_FEATURES;
5455 #endif /* CONFIG_XFRM_OFFLOAD */
5456 #if IS_ENABLED(CONFIG_TLS_DEVICE)
5457 if (bond_sk_check(bond))
5458 bond_dev->features |= BOND_TLS_FEATURES;
5462 /* Destroy a bonding device.
5463 * Must be under rtnl_lock when this function is called.
5465 static void bond_uninit(struct net_device *bond_dev)
5467 struct bonding *bond = netdev_priv(bond_dev);
5468 struct bond_up_slave *usable, *all;
5469 struct list_head *iter;
5470 struct slave *slave;
5472 bond_netpoll_cleanup(bond_dev);
5474 /* Release the bonded slaves */
5475 bond_for_each_slave(bond, slave, iter)
5476 __bond_release_one(bond_dev, slave->dev, true, true);
5477 netdev_info(bond_dev, "Released all slaves\n");
5479 usable = rtnl_dereference(bond->usable_slaves);
5481 RCU_INIT_POINTER(bond->usable_slaves, NULL);
5482 kfree_rcu(usable, rcu);
5485 all = rtnl_dereference(bond->all_slaves);
5487 RCU_INIT_POINTER(bond->all_slaves, NULL);
5488 kfree_rcu(all, rcu);
5491 list_del(&bond->bond_list);
5493 bond_debug_unregister(bond);
5496 /*------------------------- Module initialization ---------------------------*/
5498 static int bond_check_params(struct bond_params *params)
5500 int arp_validate_value, fail_over_mac_value, primary_reselect_value, i;
5501 struct bond_opt_value newval;
5502 const struct bond_opt_value *valptr;
5503 int arp_all_targets_value = 0;
5504 u16 ad_actor_sys_prio = 0;
5505 u16 ad_user_port_key = 0;
5506 __be32 arp_target[BOND_MAX_ARP_TARGETS] = { 0 };
5508 int bond_mode = BOND_MODE_ROUNDROBIN;
5509 int xmit_hashtype = BOND_XMIT_POLICY_LAYER2;
5513 /* Convert string parameters. */
5515 bond_opt_initstr(&newval, mode);
5516 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_MODE), &newval);
5518 pr_err("Error: Invalid bonding mode \"%s\"\n", mode);
5521 bond_mode = valptr->value;
5524 if (xmit_hash_policy) {
5525 if (bond_mode == BOND_MODE_ROUNDROBIN ||
5526 bond_mode == BOND_MODE_ACTIVEBACKUP ||
5527 bond_mode == BOND_MODE_BROADCAST) {
5528 pr_info("xmit_hash_policy param is irrelevant in mode %s\n",
5529 bond_mode_name(bond_mode));
5531 bond_opt_initstr(&newval, xmit_hash_policy);
5532 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_XMIT_HASH),
5535 pr_err("Error: Invalid xmit_hash_policy \"%s\"\n",
5539 xmit_hashtype = valptr->value;
5544 if (bond_mode != BOND_MODE_8023AD) {
5545 pr_info("lacp_rate param is irrelevant in mode %s\n",
5546 bond_mode_name(bond_mode));
5548 bond_opt_initstr(&newval, lacp_rate);
5549 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_LACP_RATE),
5552 pr_err("Error: Invalid lacp rate \"%s\"\n",
5556 lacp_fast = valptr->value;
5561 bond_opt_initstr(&newval, ad_select);
5562 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_AD_SELECT),
5565 pr_err("Error: Invalid ad_select \"%s\"\n", ad_select);
5568 params->ad_select = valptr->value;
5569 if (bond_mode != BOND_MODE_8023AD)
5570 pr_warn("ad_select param only affects 802.3ad mode\n");
5572 params->ad_select = BOND_AD_STABLE;
5575 if (max_bonds < 0) {
5576 pr_warn("Warning: max_bonds (%d) not in range %d-%d, so it was reset to BOND_DEFAULT_MAX_BONDS (%d)\n",
5577 max_bonds, 0, INT_MAX, BOND_DEFAULT_MAX_BONDS);
5578 max_bonds = BOND_DEFAULT_MAX_BONDS;
5582 pr_warn("Warning: miimon module parameter (%d), not in range 0-%d, so it was reset to 0\n",
5588 pr_warn("Warning: updelay module parameter (%d), not in range 0-%d, so it was reset to 0\n",
5593 if (downdelay < 0) {
5594 pr_warn("Warning: downdelay module parameter (%d), not in range 0-%d, so it was reset to 0\n",
5595 downdelay, INT_MAX);
5599 if ((use_carrier != 0) && (use_carrier != 1)) {
5600 pr_warn("Warning: use_carrier module parameter (%d), not of valid value (0/1), so it was set to 1\n",
5605 if (num_peer_notif < 0 || num_peer_notif > 255) {
5606 pr_warn("Warning: num_grat_arp/num_unsol_na (%d) not in range 0-255 so it was reset to 1\n",
5611 /* reset values for 802.3ad/TLB/ALB */
5612 if (!bond_mode_uses_arp(bond_mode)) {
5614 pr_warn("Warning: miimon must be specified, otherwise bonding will not detect link failure, speed and duplex which are essential for 802.3ad operation\n");
5615 pr_warn("Forcing miimon to 100msec\n");
5616 miimon = BOND_DEFAULT_MIIMON;
5620 if (tx_queues < 1 || tx_queues > 255) {
5621 pr_warn("Warning: tx_queues (%d) should be between 1 and 255, resetting to %d\n",
5622 tx_queues, BOND_DEFAULT_TX_QUEUES);
5623 tx_queues = BOND_DEFAULT_TX_QUEUES;
5626 if ((all_slaves_active != 0) && (all_slaves_active != 1)) {
5627 pr_warn("Warning: all_slaves_active module parameter (%d), not of valid value (0/1), so it was set to 0\n",
5629 all_slaves_active = 0;
5632 if (resend_igmp < 0 || resend_igmp > 255) {
5633 pr_warn("Warning: resend_igmp (%d) should be between 0 and 255, resetting to %d\n",
5634 resend_igmp, BOND_DEFAULT_RESEND_IGMP);
5635 resend_igmp = BOND_DEFAULT_RESEND_IGMP;
5638 bond_opt_initval(&newval, packets_per_slave);
5639 if (!bond_opt_parse(bond_opt_get(BOND_OPT_PACKETS_PER_SLAVE), &newval)) {
5640 pr_warn("Warning: packets_per_slave (%d) should be between 0 and %u resetting to 1\n",
5641 packets_per_slave, USHRT_MAX);
5642 packets_per_slave = 1;
5645 if (bond_mode == BOND_MODE_ALB) {
5646 pr_notice("In ALB mode you might experience client disconnections upon reconnection of a link if the bonding module updelay parameter (%d msec) is incompatible with the forwarding delay time of the switch\n",
5651 if (updelay || downdelay) {
5652 /* just warn the user the up/down delay will have
5653 * no effect since miimon is zero...
5655 pr_warn("Warning: miimon module parameter not set and updelay (%d) or downdelay (%d) module parameter is set; updelay and downdelay have no effect unless miimon is set\n",
5656 updelay, downdelay);
5659 /* don't allow arp monitoring */
5661 pr_warn("Warning: miimon (%d) and arp_interval (%d) can't be used simultaneously, disabling ARP monitoring\n",
5662 miimon, arp_interval);
5666 if ((updelay % miimon) != 0) {
5667 pr_warn("Warning: updelay (%d) is not a multiple of miimon (%d), updelay rounded to %d ms\n",
5668 updelay, miimon, (updelay / miimon) * miimon);
5673 if ((downdelay % miimon) != 0) {
5674 pr_warn("Warning: downdelay (%d) is not a multiple of miimon (%d), downdelay rounded to %d ms\n",
5676 (downdelay / miimon) * miimon);
5679 downdelay /= miimon;
5682 if (arp_interval < 0) {
5683 pr_warn("Warning: arp_interval module parameter (%d), not in range 0-%d, so it was reset to 0\n",
5684 arp_interval, INT_MAX);
5688 for (arp_ip_count = 0, i = 0;
5689 (arp_ip_count < BOND_MAX_ARP_TARGETS) && arp_ip_target[i]; i++) {
5692 /* not a complete check, but good enough to catch mistakes */
5693 if (!in4_pton(arp_ip_target[i], -1, (u8 *)&ip, -1, NULL) ||
5694 !bond_is_ip_target_ok(ip)) {
5695 pr_warn("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n",
5699 if (bond_get_targets_ip(arp_target, ip) == -1)
5700 arp_target[arp_ip_count++] = ip;
5702 pr_warn("Warning: duplicate address %pI4 in arp_ip_target, skipping\n",
5707 if (arp_interval && !arp_ip_count) {
5708 /* don't allow arping if no arp_ip_target given... */
5709 pr_warn("Warning: arp_interval module parameter (%d) specified without providing an arp_ip_target parameter, arp_interval was reset to 0\n",
5715 if (!arp_interval) {
5716 pr_err("arp_validate requires arp_interval\n");
5720 bond_opt_initstr(&newval, arp_validate);
5721 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_ARP_VALIDATE),
5724 pr_err("Error: invalid arp_validate \"%s\"\n",
5728 arp_validate_value = valptr->value;
5730 arp_validate_value = 0;
5733 if (arp_all_targets) {
5734 bond_opt_initstr(&newval, arp_all_targets);
5735 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_ARP_ALL_TARGETS),
5738 pr_err("Error: invalid arp_all_targets_value \"%s\"\n",
5740 arp_all_targets_value = 0;
5742 arp_all_targets_value = valptr->value;
5747 pr_info("MII link monitoring set to %d ms\n", miimon);
5748 } else if (arp_interval) {
5749 valptr = bond_opt_get_val(BOND_OPT_ARP_VALIDATE,
5750 arp_validate_value);
5751 pr_info("ARP monitoring set to %d ms, validate %s, with %d target(s):",
5752 arp_interval, valptr->string, arp_ip_count);
5754 for (i = 0; i < arp_ip_count; i++)
5755 pr_cont(" %s", arp_ip_target[i]);
5759 } else if (max_bonds) {
5760 /* miimon and arp_interval not set, we need one so things
5761 * work as expected, see bonding.txt for details
5763 pr_debug("Warning: either miimon or arp_interval and arp_ip_target module parameters must be specified, otherwise bonding will not detect link failures! see bonding.txt for details\n");
5766 if (primary && !bond_mode_uses_primary(bond_mode)) {
5767 /* currently, using a primary only makes sense
5768 * in active backup, TLB or ALB modes
5770 pr_warn("Warning: %s primary device specified but has no effect in %s mode\n",
5771 primary, bond_mode_name(bond_mode));
5775 if (primary && primary_reselect) {
5776 bond_opt_initstr(&newval, primary_reselect);
5777 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_PRIMARY_RESELECT),
5780 pr_err("Error: Invalid primary_reselect \"%s\"\n",
5784 primary_reselect_value = valptr->value;
5786 primary_reselect_value = BOND_PRI_RESELECT_ALWAYS;
5789 if (fail_over_mac) {
5790 bond_opt_initstr(&newval, fail_over_mac);
5791 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_FAIL_OVER_MAC),
5794 pr_err("Error: invalid fail_over_mac \"%s\"\n",
5798 fail_over_mac_value = valptr->value;
5799 if (bond_mode != BOND_MODE_ACTIVEBACKUP)
5800 pr_warn("Warning: fail_over_mac only affects active-backup mode\n");
5802 fail_over_mac_value = BOND_FOM_NONE;
5805 bond_opt_initstr(&newval, "default");
5806 valptr = bond_opt_parse(
5807 bond_opt_get(BOND_OPT_AD_ACTOR_SYS_PRIO),
5810 pr_err("Error: No ad_actor_sys_prio default value");
5813 ad_actor_sys_prio = valptr->value;
5815 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_AD_USER_PORT_KEY),
5818 pr_err("Error: No ad_user_port_key default value");
5821 ad_user_port_key = valptr->value;
5823 bond_opt_initstr(&newval, "default");
5824 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_TLB_DYNAMIC_LB), &newval);
5826 pr_err("Error: No tlb_dynamic_lb default value");
5829 tlb_dynamic_lb = valptr->value;
5831 if (lp_interval == 0) {
5832 pr_warn("Warning: ip_interval must be between 1 and %d, so it was reset to %d\n",
5833 INT_MAX, BOND_ALB_DEFAULT_LP_INTERVAL);
5834 lp_interval = BOND_ALB_DEFAULT_LP_INTERVAL;
5837 /* fill params struct with the proper values */
5838 params->mode = bond_mode;
5839 params->xmit_policy = xmit_hashtype;
5840 params->miimon = miimon;
5841 params->num_peer_notif = num_peer_notif;
5842 params->arp_interval = arp_interval;
5843 params->arp_validate = arp_validate_value;
5844 params->arp_all_targets = arp_all_targets_value;
5845 params->updelay = updelay;
5846 params->downdelay = downdelay;
5847 params->peer_notif_delay = 0;
5848 params->use_carrier = use_carrier;
5849 params->lacp_active = 1;
5850 params->lacp_fast = lacp_fast;
5851 params->primary[0] = 0;
5852 params->primary_reselect = primary_reselect_value;
5853 params->fail_over_mac = fail_over_mac_value;
5854 params->tx_queues = tx_queues;
5855 params->all_slaves_active = all_slaves_active;
5856 params->resend_igmp = resend_igmp;
5857 params->min_links = min_links;
5858 params->lp_interval = lp_interval;
5859 params->packets_per_slave = packets_per_slave;
5860 params->tlb_dynamic_lb = tlb_dynamic_lb;
5861 params->ad_actor_sys_prio = ad_actor_sys_prio;
5862 eth_zero_addr(params->ad_actor_system);
5863 params->ad_user_port_key = ad_user_port_key;
5864 if (packets_per_slave > 0) {
5865 params->reciprocal_packets_per_slave =
5866 reciprocal_value(packets_per_slave);
5868 /* reciprocal_packets_per_slave is unused if
5869 * packets_per_slave is 0 or 1, just initialize it
5871 params->reciprocal_packets_per_slave =
5872 (struct reciprocal_value) { 0 };
5876 strscpy_pad(params->primary, primary, sizeof(params->primary));
5878 memcpy(params->arp_targets, arp_target, sizeof(arp_target));
5883 /* Called from registration process */
5884 static int bond_init(struct net_device *bond_dev)
5886 struct bonding *bond = netdev_priv(bond_dev);
5887 struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id);
5889 netdev_dbg(bond_dev, "Begin bond_init\n");
5891 bond->wq = alloc_ordered_workqueue(bond_dev->name, WQ_MEM_RECLAIM);
5895 if (BOND_MODE(bond) == BOND_MODE_ROUNDROBIN) {
5896 bond->rr_tx_counter = alloc_percpu(u32);
5897 if (!bond->rr_tx_counter) {
5898 destroy_workqueue(bond->wq);
5904 spin_lock_init(&bond->stats_lock);
5905 netdev_lockdep_set_classes(bond_dev);
5907 list_add_tail(&bond->bond_list, &bn->dev_list);
5909 bond_prepare_sysfs_group(bond);
5911 bond_debug_register(bond);
5913 /* Ensure valid dev_addr */
5914 if (is_zero_ether_addr(bond_dev->dev_addr) &&
5915 bond_dev->addr_assign_type == NET_ADDR_PERM)
5916 eth_hw_addr_random(bond_dev);
5921 unsigned int bond_get_num_tx_queues(void)
5926 /* Create a new bond based on the specified name and bonding parameters.
5927 * If name is NULL, obtain a suitable "bond%d" name for us.
5928 * Caller must NOT hold rtnl_lock; we need to release it here before we
5929 * set up our sysfs entries.
5931 int bond_create(struct net *net, const char *name)
5933 struct net_device *bond_dev;
5934 struct bonding *bond;
5935 struct alb_bond_info *bond_info;
5940 bond_dev = alloc_netdev_mq(sizeof(struct bonding),
5941 name ? name : "bond%d", NET_NAME_UNKNOWN,
5942 bond_setup, tx_queues);
5944 pr_err("%s: eek! can't alloc netdev!\n", name);
5950 * Initialize rx_hashtbl_used_head to RLB_NULL_INDEX.
5951 * It is set to 0 by default which is wrong.
5953 bond = netdev_priv(bond_dev);
5954 bond_info = &(BOND_ALB_INFO(bond));
5955 bond_info->rx_hashtbl_used_head = RLB_NULL_INDEX;
5957 dev_net_set(bond_dev, net);
5958 bond_dev->rtnl_link_ops = &bond_link_ops;
5960 res = register_netdevice(bond_dev);
5962 free_netdev(bond_dev);
5968 netif_carrier_off(bond_dev);
5970 bond_work_init_all(bond);
5976 static int __net_init bond_net_init(struct net *net)
5978 struct bond_net *bn = net_generic(net, bond_net_id);
5981 INIT_LIST_HEAD(&bn->dev_list);
5983 bond_create_proc_dir(bn);
5984 bond_create_sysfs(bn);
5989 static void __net_exit bond_net_exit(struct net *net)
5991 struct bond_net *bn = net_generic(net, bond_net_id);
5992 struct bonding *bond, *tmp_bond;
5995 bond_destroy_sysfs(bn);
5997 /* Kill off any bonds created after unregistering bond rtnl ops */
5999 list_for_each_entry_safe(bond, tmp_bond, &bn->dev_list, bond_list)
6000 unregister_netdevice_queue(bond->dev, &list);
6001 unregister_netdevice_many(&list);
6004 bond_destroy_proc_dir(bn);
6007 static struct pernet_operations bond_net_ops = {
6008 .init = bond_net_init,
6009 .exit = bond_net_exit,
6011 .size = sizeof(struct bond_net),
6014 static int __init bonding_init(void)
6019 res = bond_check_params(&bonding_defaults);
6023 res = register_pernet_subsys(&bond_net_ops);
6027 res = bond_netlink_init();
6031 bond_create_debugfs();
6033 for (i = 0; i < max_bonds; i++) {
6034 res = bond_create(&init_net, NULL);
6039 skb_flow_dissector_init(&flow_keys_bonding,
6040 flow_keys_bonding_keys,
6041 ARRAY_SIZE(flow_keys_bonding_keys));
6043 register_netdevice_notifier(&bond_netdev_notifier);
6047 bond_destroy_debugfs();
6048 bond_netlink_fini();
6050 unregister_pernet_subsys(&bond_net_ops);
6055 static void __exit bonding_exit(void)
6057 unregister_netdevice_notifier(&bond_netdev_notifier);
6059 bond_destroy_debugfs();
6061 bond_netlink_fini();
6062 unregister_pernet_subsys(&bond_net_ops);
6064 #ifdef CONFIG_NET_POLL_CONTROLLER
6065 /* Make sure we don't have an imbalance on our netpoll blocking */
6066 WARN_ON(atomic_read(&netpoll_block_tx));
6070 module_init(bonding_init);
6071 module_exit(bonding_exit);
6072 MODULE_LICENSE("GPL");
6073 MODULE_DESCRIPTION(DRV_DESCRIPTION);
6074 MODULE_AUTHOR("Thomas Davis, tadavis@lbl.gov and many others");