2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Routing netlink socket interface: protocol independent part.
8 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
16 * Vitaly E. Lavrov RTA_OK arithmetics was wrong.
19 #include <linux/bitops.h>
20 #include <linux/errno.h>
21 #include <linux/module.h>
22 #include <linux/types.h>
23 #include <linux/socket.h>
24 #include <linux/kernel.h>
25 #include <linux/timer.h>
26 #include <linux/string.h>
27 #include <linux/sockios.h>
28 #include <linux/net.h>
29 #include <linux/fcntl.h>
31 #include <linux/slab.h>
32 #include <linux/interrupt.h>
33 #include <linux/capability.h>
34 #include <linux/skbuff.h>
35 #include <linux/init.h>
36 #include <linux/security.h>
37 #include <linux/mutex.h>
38 #include <linux/if_addr.h>
39 #include <linux/if_bridge.h>
40 #include <linux/if_vlan.h>
41 #include <linux/pci.h>
42 #include <linux/etherdevice.h>
43 #include <linux/bpf.h>
45 #include <linux/uaccess.h>
47 #include <linux/inet.h>
48 #include <linux/netdevice.h>
49 #include <net/switchdev.h>
51 #include <net/protocol.h>
53 #include <net/route.h>
57 #include <net/pkt_sched.h>
58 #include <net/fib_rules.h>
59 #include <net/rtnetlink.h>
60 #include <net/net_namespace.h>
64 rtnl_dumpit_func dumpit;
68 static DEFINE_MUTEX(rtnl_mutex);
72 mutex_lock(&rtnl_mutex);
74 EXPORT_SYMBOL(rtnl_lock);
76 static struct sk_buff *defer_kfree_skb_list;
77 void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail)
80 tail->next = defer_kfree_skb_list;
81 defer_kfree_skb_list = head;
84 EXPORT_SYMBOL(rtnl_kfree_skbs);
86 void __rtnl_unlock(void)
88 struct sk_buff *head = defer_kfree_skb_list;
90 defer_kfree_skb_list = NULL;
92 mutex_unlock(&rtnl_mutex);
95 struct sk_buff *next = head->next;
103 void rtnl_unlock(void)
105 /* This fellow will unlock it for us. */
108 EXPORT_SYMBOL(rtnl_unlock);
110 int rtnl_trylock(void)
112 return mutex_trylock(&rtnl_mutex);
114 EXPORT_SYMBOL(rtnl_trylock);
116 int rtnl_is_locked(void)
118 return mutex_is_locked(&rtnl_mutex);
120 EXPORT_SYMBOL(rtnl_is_locked);
122 #ifdef CONFIG_PROVE_LOCKING
123 bool lockdep_rtnl_is_held(void)
125 return lockdep_is_held(&rtnl_mutex);
127 EXPORT_SYMBOL(lockdep_rtnl_is_held);
128 #endif /* #ifdef CONFIG_PROVE_LOCKING */
130 static struct rtnl_link __rcu *rtnl_msg_handlers[RTNL_FAMILY_MAX + 1];
131 static refcount_t rtnl_msg_handlers_ref[RTNL_FAMILY_MAX + 1];
133 static inline int rtm_msgindex(int msgtype)
135 int msgindex = msgtype - RTM_BASE;
138 * msgindex < 0 implies someone tried to register a netlink
139 * control code. msgindex >= RTM_NR_MSGTYPES may indicate that
140 * the message type has not been added to linux/rtnetlink.h
142 BUG_ON(msgindex < 0 || msgindex >= RTM_NR_MSGTYPES);
148 * __rtnl_register - Register a rtnetlink message type
149 * @protocol: Protocol family or PF_UNSPEC
150 * @msgtype: rtnetlink message type
151 * @doit: Function pointer called for each request message
152 * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message
153 * @flags: rtnl_link_flags to modifiy behaviour of doit/dumpit functions
155 * Registers the specified function pointers (at least one of them has
156 * to be non-NULL) to be called whenever a request message for the
157 * specified protocol family and message type is received.
159 * The special protocol family PF_UNSPEC may be used to define fallback
160 * function pointers for the case when no entry for the specific protocol
163 * Returns 0 on success or a negative error code.
165 int __rtnl_register(int protocol, int msgtype,
166 rtnl_doit_func doit, rtnl_dumpit_func dumpit,
169 struct rtnl_link *tab;
172 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
173 msgindex = rtm_msgindex(msgtype);
175 tab = rcu_dereference_raw(rtnl_msg_handlers[protocol]);
177 tab = kcalloc(RTM_NR_MSGTYPES, sizeof(*tab), GFP_KERNEL);
181 rcu_assign_pointer(rtnl_msg_handlers[protocol], tab);
185 tab[msgindex].doit = doit;
187 tab[msgindex].dumpit = dumpit;
188 tab[msgindex].flags |= flags;
192 EXPORT_SYMBOL_GPL(__rtnl_register);
195 * rtnl_register - Register a rtnetlink message type
197 * Identical to __rtnl_register() but panics on failure. This is useful
198 * as failure of this function is very unlikely, it can only happen due
199 * to lack of memory when allocating the chain to store all message
200 * handlers for a protocol. Meant for use in init functions where lack
201 * of memory implies no sense in continuing.
203 void rtnl_register(int protocol, int msgtype,
204 rtnl_doit_func doit, rtnl_dumpit_func dumpit,
207 if (__rtnl_register(protocol, msgtype, doit, dumpit, flags) < 0)
208 panic("Unable to register rtnetlink message handler, "
209 "protocol = %d, message type = %d\n",
212 EXPORT_SYMBOL_GPL(rtnl_register);
215 * rtnl_unregister - Unregister a rtnetlink message type
216 * @protocol: Protocol family or PF_UNSPEC
217 * @msgtype: rtnetlink message type
219 * Returns 0 on success or a negative error code.
221 int rtnl_unregister(int protocol, int msgtype)
223 struct rtnl_link *handlers;
226 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
227 msgindex = rtm_msgindex(msgtype);
230 handlers = rtnl_dereference(rtnl_msg_handlers[protocol]);
236 handlers[msgindex].doit = NULL;
237 handlers[msgindex].dumpit = NULL;
238 handlers[msgindex].flags = 0;
243 EXPORT_SYMBOL_GPL(rtnl_unregister);
246 * rtnl_unregister_all - Unregister all rtnetlink message type of a protocol
247 * @protocol : Protocol family or PF_UNSPEC
249 * Identical to calling rtnl_unregster() for all registered message types
250 * of a certain protocol family.
252 void rtnl_unregister_all(int protocol)
254 struct rtnl_link *handlers;
256 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
259 handlers = rtnl_dereference(rtnl_msg_handlers[protocol]);
260 RCU_INIT_POINTER(rtnl_msg_handlers[protocol], NULL);
265 while (refcount_read(&rtnl_msg_handlers_ref[protocol]) > 1)
269 EXPORT_SYMBOL_GPL(rtnl_unregister_all);
271 static LIST_HEAD(link_ops);
273 static const struct rtnl_link_ops *rtnl_link_ops_get(const char *kind)
275 const struct rtnl_link_ops *ops;
277 list_for_each_entry(ops, &link_ops, list) {
278 if (!strcmp(ops->kind, kind))
285 * __rtnl_link_register - Register rtnl_link_ops with rtnetlink.
286 * @ops: struct rtnl_link_ops * to register
288 * The caller must hold the rtnl_mutex. This function should be used
289 * by drivers that create devices during module initialization. It
290 * must be called before registering the devices.
292 * Returns 0 on success or a negative error code.
294 int __rtnl_link_register(struct rtnl_link_ops *ops)
296 if (rtnl_link_ops_get(ops->kind))
299 /* The check for setup is here because if ops
300 * does not have that filled up, it is not possible
301 * to use the ops for creating device. So do not
302 * fill up dellink as well. That disables rtnl_dellink.
304 if (ops->setup && !ops->dellink)
305 ops->dellink = unregister_netdevice_queue;
307 list_add_tail(&ops->list, &link_ops);
310 EXPORT_SYMBOL_GPL(__rtnl_link_register);
313 * rtnl_link_register - Register rtnl_link_ops with rtnetlink.
314 * @ops: struct rtnl_link_ops * to register
316 * Returns 0 on success or a negative error code.
318 int rtnl_link_register(struct rtnl_link_ops *ops)
323 err = __rtnl_link_register(ops);
327 EXPORT_SYMBOL_GPL(rtnl_link_register);
329 static void __rtnl_kill_links(struct net *net, struct rtnl_link_ops *ops)
331 struct net_device *dev;
332 LIST_HEAD(list_kill);
334 for_each_netdev(net, dev) {
335 if (dev->rtnl_link_ops == ops)
336 ops->dellink(dev, &list_kill);
338 unregister_netdevice_many(&list_kill);
342 * __rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
343 * @ops: struct rtnl_link_ops * to unregister
345 * The caller must hold the rtnl_mutex.
347 void __rtnl_link_unregister(struct rtnl_link_ops *ops)
352 __rtnl_kill_links(net, ops);
354 list_del(&ops->list);
356 EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
358 /* Return with the rtnl_lock held when there are no network
359 * devices unregistering in any network namespace.
361 static void rtnl_lock_unregistering_all(void)
365 DEFINE_WAIT_FUNC(wait, woken_wake_function);
367 add_wait_queue(&netdev_unregistering_wq, &wait);
369 unregistering = false;
372 if (net->dev_unreg_count > 0) {
373 unregistering = true;
381 wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
383 remove_wait_queue(&netdev_unregistering_wq, &wait);
387 * rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
388 * @ops: struct rtnl_link_ops * to unregister
390 void rtnl_link_unregister(struct rtnl_link_ops *ops)
392 /* Close the race with cleanup_net() */
393 mutex_lock(&net_mutex);
394 rtnl_lock_unregistering_all();
395 __rtnl_link_unregister(ops);
397 mutex_unlock(&net_mutex);
399 EXPORT_SYMBOL_GPL(rtnl_link_unregister);
401 static size_t rtnl_link_get_slave_info_data_size(const struct net_device *dev)
403 struct net_device *master_dev;
404 const struct rtnl_link_ops *ops;
409 master_dev = netdev_master_upper_dev_get_rcu((struct net_device *)dev);
413 ops = master_dev->rtnl_link_ops;
414 if (!ops || !ops->get_slave_size)
416 /* IFLA_INFO_SLAVE_DATA + nested data */
417 size = nla_total_size(sizeof(struct nlattr)) +
418 ops->get_slave_size(master_dev, dev);
425 static size_t rtnl_link_get_size(const struct net_device *dev)
427 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
433 size = nla_total_size(sizeof(struct nlattr)) + /* IFLA_LINKINFO */
434 nla_total_size(strlen(ops->kind) + 1); /* IFLA_INFO_KIND */
437 /* IFLA_INFO_DATA + nested data */
438 size += nla_total_size(sizeof(struct nlattr)) +
441 if (ops->get_xstats_size)
442 /* IFLA_INFO_XSTATS */
443 size += nla_total_size(ops->get_xstats_size(dev));
445 size += rtnl_link_get_slave_info_data_size(dev);
450 static LIST_HEAD(rtnl_af_ops);
452 static const struct rtnl_af_ops *rtnl_af_lookup(const int family)
454 const struct rtnl_af_ops *ops;
456 list_for_each_entry(ops, &rtnl_af_ops, list) {
457 if (ops->family == family)
465 * rtnl_af_register - Register rtnl_af_ops with rtnetlink.
466 * @ops: struct rtnl_af_ops * to register
468 * Returns 0 on success or a negative error code.
470 void rtnl_af_register(struct rtnl_af_ops *ops)
473 list_add_tail(&ops->list, &rtnl_af_ops);
476 EXPORT_SYMBOL_GPL(rtnl_af_register);
479 * __rtnl_af_unregister - Unregister rtnl_af_ops from rtnetlink.
480 * @ops: struct rtnl_af_ops * to unregister
482 * The caller must hold the rtnl_mutex.
484 void __rtnl_af_unregister(struct rtnl_af_ops *ops)
486 list_del(&ops->list);
488 EXPORT_SYMBOL_GPL(__rtnl_af_unregister);
491 * rtnl_af_unregister - Unregister rtnl_af_ops from rtnetlink.
492 * @ops: struct rtnl_af_ops * to unregister
494 void rtnl_af_unregister(struct rtnl_af_ops *ops)
497 __rtnl_af_unregister(ops);
500 EXPORT_SYMBOL_GPL(rtnl_af_unregister);
502 static size_t rtnl_link_get_af_size(const struct net_device *dev,
505 struct rtnl_af_ops *af_ops;
509 size = nla_total_size(sizeof(struct nlattr));
511 list_for_each_entry(af_ops, &rtnl_af_ops, list) {
512 if (af_ops->get_link_af_size) {
513 /* AF_* + nested data */
514 size += nla_total_size(sizeof(struct nlattr)) +
515 af_ops->get_link_af_size(dev, ext_filter_mask);
522 static bool rtnl_have_link_slave_info(const struct net_device *dev)
524 struct net_device *master_dev;
526 master_dev = netdev_master_upper_dev_get((struct net_device *) dev);
527 if (master_dev && master_dev->rtnl_link_ops)
532 static int rtnl_link_slave_info_fill(struct sk_buff *skb,
533 const struct net_device *dev)
535 struct net_device *master_dev;
536 const struct rtnl_link_ops *ops;
537 struct nlattr *slave_data;
540 master_dev = netdev_master_upper_dev_get((struct net_device *) dev);
543 ops = master_dev->rtnl_link_ops;
546 if (nla_put_string(skb, IFLA_INFO_SLAVE_KIND, ops->kind) < 0)
548 if (ops->fill_slave_info) {
549 slave_data = nla_nest_start(skb, IFLA_INFO_SLAVE_DATA);
552 err = ops->fill_slave_info(skb, master_dev, dev);
554 goto err_cancel_slave_data;
555 nla_nest_end(skb, slave_data);
559 err_cancel_slave_data:
560 nla_nest_cancel(skb, slave_data);
564 static int rtnl_link_info_fill(struct sk_buff *skb,
565 const struct net_device *dev)
567 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
573 if (nla_put_string(skb, IFLA_INFO_KIND, ops->kind) < 0)
575 if (ops->fill_xstats) {
576 err = ops->fill_xstats(skb, dev);
580 if (ops->fill_info) {
581 data = nla_nest_start(skb, IFLA_INFO_DATA);
584 err = ops->fill_info(skb, dev);
586 goto err_cancel_data;
587 nla_nest_end(skb, data);
592 nla_nest_cancel(skb, data);
596 static int rtnl_link_fill(struct sk_buff *skb, const struct net_device *dev)
598 struct nlattr *linkinfo;
601 linkinfo = nla_nest_start(skb, IFLA_LINKINFO);
602 if (linkinfo == NULL)
605 err = rtnl_link_info_fill(skb, dev);
607 goto err_cancel_link;
609 err = rtnl_link_slave_info_fill(skb, dev);
611 goto err_cancel_link;
613 nla_nest_end(skb, linkinfo);
617 nla_nest_cancel(skb, linkinfo);
622 int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int group, int echo)
624 struct sock *rtnl = net->rtnl;
627 NETLINK_CB(skb).dst_group = group;
629 refcount_inc(&skb->users);
630 netlink_broadcast(rtnl, skb, pid, group, GFP_KERNEL);
632 err = netlink_unicast(rtnl, skb, pid, MSG_DONTWAIT);
636 int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid)
638 struct sock *rtnl = net->rtnl;
640 return nlmsg_unicast(rtnl, skb, pid);
642 EXPORT_SYMBOL(rtnl_unicast);
644 void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group,
645 struct nlmsghdr *nlh, gfp_t flags)
647 struct sock *rtnl = net->rtnl;
651 report = nlmsg_report(nlh);
653 nlmsg_notify(rtnl, skb, pid, group, report, flags);
655 EXPORT_SYMBOL(rtnl_notify);
657 void rtnl_set_sk_err(struct net *net, u32 group, int error)
659 struct sock *rtnl = net->rtnl;
661 netlink_set_err(rtnl, 0, group, error);
663 EXPORT_SYMBOL(rtnl_set_sk_err);
665 int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics)
670 mx = nla_nest_start(skb, RTA_METRICS);
674 for (i = 0; i < RTAX_MAX; i++) {
676 if (i == RTAX_CC_ALGO - 1) {
677 char tmp[TCP_CA_NAME_MAX], *name;
679 name = tcp_ca_get_name_by_key(metrics[i], tmp);
682 if (nla_put_string(skb, i + 1, name))
683 goto nla_put_failure;
684 } else if (i == RTAX_FEATURES - 1) {
685 u32 user_features = metrics[i] & RTAX_FEATURE_MASK;
689 BUILD_BUG_ON(RTAX_FEATURE_MASK & DST_FEATURE_MASK);
690 if (nla_put_u32(skb, i + 1, user_features))
691 goto nla_put_failure;
693 if (nla_put_u32(skb, i + 1, metrics[i]))
694 goto nla_put_failure;
701 nla_nest_cancel(skb, mx);
705 return nla_nest_end(skb, mx);
708 nla_nest_cancel(skb, mx);
711 EXPORT_SYMBOL(rtnetlink_put_metrics);
713 int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id,
714 long expires, u32 error)
716 struct rta_cacheinfo ci = {
717 .rta_lastuse = jiffies_delta_to_clock_t(jiffies - dst->lastuse),
718 .rta_used = dst->__use,
719 .rta_clntref = atomic_read(&(dst->__refcnt)),
727 clock = jiffies_to_clock_t(abs(expires));
728 clock = min_t(unsigned long, clock, INT_MAX);
729 ci.rta_expires = (expires > 0) ? clock : -clock;
731 return nla_put(skb, RTA_CACHEINFO, sizeof(ci), &ci);
733 EXPORT_SYMBOL_GPL(rtnl_put_cacheinfo);
735 static void set_operstate(struct net_device *dev, unsigned char transition)
737 unsigned char operstate = dev->operstate;
739 switch (transition) {
741 if ((operstate == IF_OPER_DORMANT ||
742 operstate == IF_OPER_UNKNOWN) &&
744 operstate = IF_OPER_UP;
747 case IF_OPER_DORMANT:
748 if (operstate == IF_OPER_UP ||
749 operstate == IF_OPER_UNKNOWN)
750 operstate = IF_OPER_DORMANT;
754 if (dev->operstate != operstate) {
755 write_lock_bh(&dev_base_lock);
756 dev->operstate = operstate;
757 write_unlock_bh(&dev_base_lock);
758 netdev_state_change(dev);
762 static unsigned int rtnl_dev_get_flags(const struct net_device *dev)
764 return (dev->flags & ~(IFF_PROMISC | IFF_ALLMULTI)) |
765 (dev->gflags & (IFF_PROMISC | IFF_ALLMULTI));
768 static unsigned int rtnl_dev_combine_flags(const struct net_device *dev,
769 const struct ifinfomsg *ifm)
771 unsigned int flags = ifm->ifi_flags;
773 /* bugwards compatibility: ifi_change == 0 is treated as ~0 */
775 flags = (flags & ifm->ifi_change) |
776 (rtnl_dev_get_flags(dev) & ~ifm->ifi_change);
781 static void copy_rtnl_link_stats(struct rtnl_link_stats *a,
782 const struct rtnl_link_stats64 *b)
784 a->rx_packets = b->rx_packets;
785 a->tx_packets = b->tx_packets;
786 a->rx_bytes = b->rx_bytes;
787 a->tx_bytes = b->tx_bytes;
788 a->rx_errors = b->rx_errors;
789 a->tx_errors = b->tx_errors;
790 a->rx_dropped = b->rx_dropped;
791 a->tx_dropped = b->tx_dropped;
793 a->multicast = b->multicast;
794 a->collisions = b->collisions;
796 a->rx_length_errors = b->rx_length_errors;
797 a->rx_over_errors = b->rx_over_errors;
798 a->rx_crc_errors = b->rx_crc_errors;
799 a->rx_frame_errors = b->rx_frame_errors;
800 a->rx_fifo_errors = b->rx_fifo_errors;
801 a->rx_missed_errors = b->rx_missed_errors;
803 a->tx_aborted_errors = b->tx_aborted_errors;
804 a->tx_carrier_errors = b->tx_carrier_errors;
805 a->tx_fifo_errors = b->tx_fifo_errors;
806 a->tx_heartbeat_errors = b->tx_heartbeat_errors;
807 a->tx_window_errors = b->tx_window_errors;
809 a->rx_compressed = b->rx_compressed;
810 a->tx_compressed = b->tx_compressed;
812 a->rx_nohandler = b->rx_nohandler;
816 static inline int rtnl_vfinfo_size(const struct net_device *dev,
819 if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF)) {
820 int num_vfs = dev_num_vf(dev->dev.parent);
821 size_t size = nla_total_size(0);
824 nla_total_size(sizeof(struct ifla_vf_mac)) +
825 nla_total_size(sizeof(struct ifla_vf_vlan)) +
826 nla_total_size(0) + /* nest IFLA_VF_VLAN_LIST */
827 nla_total_size(MAX_VLAN_LIST_LEN *
828 sizeof(struct ifla_vf_vlan_info)) +
829 nla_total_size(sizeof(struct ifla_vf_spoofchk)) +
830 nla_total_size(sizeof(struct ifla_vf_tx_rate)) +
831 nla_total_size(sizeof(struct ifla_vf_rate)) +
832 nla_total_size(sizeof(struct ifla_vf_link_state)) +
833 nla_total_size(sizeof(struct ifla_vf_rss_query_en)) +
834 nla_total_size(0) + /* nest IFLA_VF_STATS */
835 /* IFLA_VF_STATS_RX_PACKETS */
836 nla_total_size_64bit(sizeof(__u64)) +
837 /* IFLA_VF_STATS_TX_PACKETS */
838 nla_total_size_64bit(sizeof(__u64)) +
839 /* IFLA_VF_STATS_RX_BYTES */
840 nla_total_size_64bit(sizeof(__u64)) +
841 /* IFLA_VF_STATS_TX_BYTES */
842 nla_total_size_64bit(sizeof(__u64)) +
843 /* IFLA_VF_STATS_BROADCAST */
844 nla_total_size_64bit(sizeof(__u64)) +
845 /* IFLA_VF_STATS_MULTICAST */
846 nla_total_size_64bit(sizeof(__u64)) +
847 nla_total_size(sizeof(struct ifla_vf_trust)));
853 static size_t rtnl_port_size(const struct net_device *dev,
856 size_t port_size = nla_total_size(4) /* PORT_VF */
857 + nla_total_size(PORT_PROFILE_MAX) /* PORT_PROFILE */
858 + nla_total_size(PORT_UUID_MAX) /* PORT_INSTANCE_UUID */
859 + nla_total_size(PORT_UUID_MAX) /* PORT_HOST_UUID */
860 + nla_total_size(1) /* PROT_VDP_REQUEST */
861 + nla_total_size(2); /* PORT_VDP_RESPONSE */
862 size_t vf_ports_size = nla_total_size(sizeof(struct nlattr));
863 size_t vf_port_size = nla_total_size(sizeof(struct nlattr))
865 size_t port_self_size = nla_total_size(sizeof(struct nlattr))
868 if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
869 !(ext_filter_mask & RTEXT_FILTER_VF))
871 if (dev_num_vf(dev->dev.parent))
872 return port_self_size + vf_ports_size +
873 vf_port_size * dev_num_vf(dev->dev.parent);
875 return port_self_size;
878 static size_t rtnl_xdp_size(void)
880 size_t xdp_size = nla_total_size(0) + /* nest IFLA_XDP */
881 nla_total_size(1) + /* XDP_ATTACHED */
882 nla_total_size(4); /* XDP_PROG_ID */
887 static noinline size_t if_nlmsg_size(const struct net_device *dev,
890 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
891 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
892 + nla_total_size(IFALIASZ) /* IFLA_IFALIAS */
893 + nla_total_size(IFNAMSIZ) /* IFLA_QDISC */
894 + nla_total_size_64bit(sizeof(struct rtnl_link_ifmap))
895 + nla_total_size(sizeof(struct rtnl_link_stats))
896 + nla_total_size_64bit(sizeof(struct rtnl_link_stats64))
897 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
898 + nla_total_size(MAX_ADDR_LEN) /* IFLA_BROADCAST */
899 + nla_total_size(4) /* IFLA_TXQLEN */
900 + nla_total_size(4) /* IFLA_WEIGHT */
901 + nla_total_size(4) /* IFLA_MTU */
902 + nla_total_size(4) /* IFLA_LINK */
903 + nla_total_size(4) /* IFLA_MASTER */
904 + nla_total_size(1) /* IFLA_CARRIER */
905 + nla_total_size(4) /* IFLA_PROMISCUITY */
906 + nla_total_size(4) /* IFLA_NUM_TX_QUEUES */
907 + nla_total_size(4) /* IFLA_NUM_RX_QUEUES */
908 + nla_total_size(4) /* IFLA_GSO_MAX_SEGS */
909 + nla_total_size(4) /* IFLA_GSO_MAX_SIZE */
910 + nla_total_size(1) /* IFLA_OPERSTATE */
911 + nla_total_size(1) /* IFLA_LINKMODE */
912 + nla_total_size(4) /* IFLA_CARRIER_CHANGES */
913 + nla_total_size(4) /* IFLA_LINK_NETNSID */
914 + nla_total_size(4) /* IFLA_GROUP */
915 + nla_total_size(ext_filter_mask
916 & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */
917 + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
918 + rtnl_port_size(dev, ext_filter_mask) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
919 + rtnl_link_get_size(dev) /* IFLA_LINKINFO */
920 + rtnl_link_get_af_size(dev, ext_filter_mask) /* IFLA_AF_SPEC */
921 + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_PORT_ID */
922 + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_SWITCH_ID */
923 + nla_total_size(IFNAMSIZ) /* IFLA_PHYS_PORT_NAME */
924 + rtnl_xdp_size() /* IFLA_XDP */
925 + nla_total_size(4) /* IFLA_EVENT */
926 + nla_total_size(1); /* IFLA_PROTO_DOWN */
930 static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev)
932 struct nlattr *vf_ports;
933 struct nlattr *vf_port;
937 vf_ports = nla_nest_start(skb, IFLA_VF_PORTS);
941 for (vf = 0; vf < dev_num_vf(dev->dev.parent); vf++) {
942 vf_port = nla_nest_start(skb, IFLA_VF_PORT);
944 goto nla_put_failure;
945 if (nla_put_u32(skb, IFLA_PORT_VF, vf))
946 goto nla_put_failure;
947 err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb);
948 if (err == -EMSGSIZE)
949 goto nla_put_failure;
951 nla_nest_cancel(skb, vf_port);
954 nla_nest_end(skb, vf_port);
957 nla_nest_end(skb, vf_ports);
962 nla_nest_cancel(skb, vf_ports);
966 static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev)
968 struct nlattr *port_self;
971 port_self = nla_nest_start(skb, IFLA_PORT_SELF);
975 err = dev->netdev_ops->ndo_get_vf_port(dev, PORT_SELF_VF, skb);
977 nla_nest_cancel(skb, port_self);
978 return (err == -EMSGSIZE) ? err : 0;
981 nla_nest_end(skb, port_self);
986 static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev,
991 if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
992 !(ext_filter_mask & RTEXT_FILTER_VF))
995 err = rtnl_port_self_fill(skb, dev);
999 if (dev_num_vf(dev->dev.parent)) {
1000 err = rtnl_vf_ports_fill(skb, dev);
1008 static int rtnl_phys_port_id_fill(struct sk_buff *skb, struct net_device *dev)
1011 struct netdev_phys_item_id ppid;
1013 err = dev_get_phys_port_id(dev, &ppid);
1015 if (err == -EOPNOTSUPP)
1020 if (nla_put(skb, IFLA_PHYS_PORT_ID, ppid.id_len, ppid.id))
1026 static int rtnl_phys_port_name_fill(struct sk_buff *skb, struct net_device *dev)
1028 char name[IFNAMSIZ];
1031 err = dev_get_phys_port_name(dev, name, sizeof(name));
1033 if (err == -EOPNOTSUPP)
1038 if (nla_put_string(skb, IFLA_PHYS_PORT_NAME, name))
1044 static int rtnl_phys_switch_id_fill(struct sk_buff *skb, struct net_device *dev)
1047 struct switchdev_attr attr = {
1049 .id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
1050 .flags = SWITCHDEV_F_NO_RECURSE,
1053 err = switchdev_port_attr_get(dev, &attr);
1055 if (err == -EOPNOTSUPP)
1060 if (nla_put(skb, IFLA_PHYS_SWITCH_ID, attr.u.ppid.id_len,
1067 static noinline_for_stack int rtnl_fill_stats(struct sk_buff *skb,
1068 struct net_device *dev)
1070 struct rtnl_link_stats64 *sp;
1071 struct nlattr *attr;
1073 attr = nla_reserve_64bit(skb, IFLA_STATS64,
1074 sizeof(struct rtnl_link_stats64), IFLA_PAD);
1078 sp = nla_data(attr);
1079 dev_get_stats(dev, sp);
1081 attr = nla_reserve(skb, IFLA_STATS,
1082 sizeof(struct rtnl_link_stats));
1086 copy_rtnl_link_stats(nla_data(attr), sp);
1091 static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
1092 struct net_device *dev,
1094 struct nlattr *vfinfo)
1096 struct ifla_vf_rss_query_en vf_rss_query_en;
1097 struct nlattr *vf, *vfstats, *vfvlanlist;
1098 struct ifla_vf_link_state vf_linkstate;
1099 struct ifla_vf_vlan_info vf_vlan_info;
1100 struct ifla_vf_spoofchk vf_spoofchk;
1101 struct ifla_vf_tx_rate vf_tx_rate;
1102 struct ifla_vf_stats vf_stats;
1103 struct ifla_vf_trust vf_trust;
1104 struct ifla_vf_vlan vf_vlan;
1105 struct ifla_vf_rate vf_rate;
1106 struct ifla_vf_mac vf_mac;
1107 struct ifla_vf_info ivi;
1109 memset(&ivi, 0, sizeof(ivi));
1111 /* Not all SR-IOV capable drivers support the
1112 * spoofcheck and "RSS query enable" query. Preset to
1113 * -1 so the user space tool can detect that the driver
1114 * didn't report anything.
1117 ivi.rss_query_en = -1;
1119 /* The default value for VF link state is "auto"
1120 * IFLA_VF_LINK_STATE_AUTO which equals zero
1123 /* VLAN Protocol by default is 802.1Q */
1124 ivi.vlan_proto = htons(ETH_P_8021Q);
1125 if (dev->netdev_ops->ndo_get_vf_config(dev, vfs_num, &ivi))
1128 memset(&vf_vlan_info, 0, sizeof(vf_vlan_info));
1137 vf_rss_query_en.vf =
1138 vf_trust.vf = ivi.vf;
1140 memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac));
1141 vf_vlan.vlan = ivi.vlan;
1142 vf_vlan.qos = ivi.qos;
1143 vf_vlan_info.vlan = ivi.vlan;
1144 vf_vlan_info.qos = ivi.qos;
1145 vf_vlan_info.vlan_proto = ivi.vlan_proto;
1146 vf_tx_rate.rate = ivi.max_tx_rate;
1147 vf_rate.min_tx_rate = ivi.min_tx_rate;
1148 vf_rate.max_tx_rate = ivi.max_tx_rate;
1149 vf_spoofchk.setting = ivi.spoofchk;
1150 vf_linkstate.link_state = ivi.linkstate;
1151 vf_rss_query_en.setting = ivi.rss_query_en;
1152 vf_trust.setting = ivi.trusted;
1153 vf = nla_nest_start(skb, IFLA_VF_INFO);
1155 goto nla_put_vfinfo_failure;
1156 if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) ||
1157 nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) ||
1158 nla_put(skb, IFLA_VF_RATE, sizeof(vf_rate),
1160 nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate),
1162 nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk),
1164 nla_put(skb, IFLA_VF_LINK_STATE, sizeof(vf_linkstate),
1166 nla_put(skb, IFLA_VF_RSS_QUERY_EN,
1167 sizeof(vf_rss_query_en),
1168 &vf_rss_query_en) ||
1169 nla_put(skb, IFLA_VF_TRUST,
1170 sizeof(vf_trust), &vf_trust))
1171 goto nla_put_vf_failure;
1172 vfvlanlist = nla_nest_start(skb, IFLA_VF_VLAN_LIST);
1174 goto nla_put_vf_failure;
1175 if (nla_put(skb, IFLA_VF_VLAN_INFO, sizeof(vf_vlan_info),
1177 nla_nest_cancel(skb, vfvlanlist);
1178 goto nla_put_vf_failure;
1180 nla_nest_end(skb, vfvlanlist);
1181 memset(&vf_stats, 0, sizeof(vf_stats));
1182 if (dev->netdev_ops->ndo_get_vf_stats)
1183 dev->netdev_ops->ndo_get_vf_stats(dev, vfs_num,
1185 vfstats = nla_nest_start(skb, IFLA_VF_STATS);
1187 goto nla_put_vf_failure;
1188 if (nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_PACKETS,
1189 vf_stats.rx_packets, IFLA_VF_STATS_PAD) ||
1190 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_PACKETS,
1191 vf_stats.tx_packets, IFLA_VF_STATS_PAD) ||
1192 nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_BYTES,
1193 vf_stats.rx_bytes, IFLA_VF_STATS_PAD) ||
1194 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_BYTES,
1195 vf_stats.tx_bytes, IFLA_VF_STATS_PAD) ||
1196 nla_put_u64_64bit(skb, IFLA_VF_STATS_BROADCAST,
1197 vf_stats.broadcast, IFLA_VF_STATS_PAD) ||
1198 nla_put_u64_64bit(skb, IFLA_VF_STATS_MULTICAST,
1199 vf_stats.multicast, IFLA_VF_STATS_PAD)) {
1200 nla_nest_cancel(skb, vfstats);
1201 goto nla_put_vf_failure;
1203 nla_nest_end(skb, vfstats);
1204 nla_nest_end(skb, vf);
1208 nla_nest_cancel(skb, vf);
1209 nla_put_vfinfo_failure:
1210 nla_nest_cancel(skb, vfinfo);
1214 static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev)
1216 struct rtnl_link_ifmap map;
1218 memset(&map, 0, sizeof(map));
1219 map.mem_start = dev->mem_start;
1220 map.mem_end = dev->mem_end;
1221 map.base_addr = dev->base_addr;
1224 map.port = dev->if_port;
1226 if (nla_put_64bit(skb, IFLA_MAP, sizeof(map), &map, IFLA_PAD))
1232 static u8 rtnl_xdp_attached_mode(struct net_device *dev, u32 *prog_id)
1234 const struct net_device_ops *ops = dev->netdev_ops;
1235 const struct bpf_prog *generic_xdp_prog;
1240 generic_xdp_prog = rtnl_dereference(dev->xdp_prog);
1241 if (generic_xdp_prog) {
1242 *prog_id = generic_xdp_prog->aux->id;
1243 return XDP_ATTACHED_SKB;
1246 return XDP_ATTACHED_NONE;
1248 return __dev_xdp_attached(dev, ops->ndo_xdp, prog_id);
1251 static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev)
1257 xdp = nla_nest_start(skb, IFLA_XDP);
1261 err = nla_put_u8(skb, IFLA_XDP_ATTACHED,
1262 rtnl_xdp_attached_mode(dev, &prog_id));
1267 err = nla_put_u32(skb, IFLA_XDP_PROG_ID, prog_id);
1272 nla_nest_end(skb, xdp);
1276 nla_nest_cancel(skb, xdp);
1280 static u32 rtnl_get_event(unsigned long event)
1282 u32 rtnl_event_type = IFLA_EVENT_NONE;
1286 rtnl_event_type = IFLA_EVENT_REBOOT;
1288 case NETDEV_FEAT_CHANGE:
1289 rtnl_event_type = IFLA_EVENT_FEATURES;
1291 case NETDEV_BONDING_FAILOVER:
1292 rtnl_event_type = IFLA_EVENT_BONDING_FAILOVER;
1294 case NETDEV_NOTIFY_PEERS:
1295 rtnl_event_type = IFLA_EVENT_NOTIFY_PEERS;
1297 case NETDEV_RESEND_IGMP:
1298 rtnl_event_type = IFLA_EVENT_IGMP_RESEND;
1300 case NETDEV_CHANGEINFODATA:
1301 rtnl_event_type = IFLA_EVENT_BONDING_OPTIONS;
1307 return rtnl_event_type;
1310 static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
1311 int type, u32 pid, u32 seq, u32 change,
1312 unsigned int flags, u32 ext_filter_mask,
1315 struct ifinfomsg *ifm;
1316 struct nlmsghdr *nlh;
1317 struct nlattr *af_spec;
1318 struct rtnl_af_ops *af_ops;
1319 struct net_device *upper_dev = netdev_master_upper_dev_get(dev);
1322 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags);
1326 ifm = nlmsg_data(nlh);
1327 ifm->ifi_family = AF_UNSPEC;
1329 ifm->ifi_type = dev->type;
1330 ifm->ifi_index = dev->ifindex;
1331 ifm->ifi_flags = dev_get_flags(dev);
1332 ifm->ifi_change = change;
1334 if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
1335 nla_put_u32(skb, IFLA_TXQLEN, dev->tx_queue_len) ||
1336 nla_put_u8(skb, IFLA_OPERSTATE,
1337 netif_running(dev) ? dev->operstate : IF_OPER_DOWN) ||
1338 nla_put_u8(skb, IFLA_LINKMODE, dev->link_mode) ||
1339 nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
1340 nla_put_u32(skb, IFLA_GROUP, dev->group) ||
1341 nla_put_u32(skb, IFLA_PROMISCUITY, dev->promiscuity) ||
1342 nla_put_u32(skb, IFLA_NUM_TX_QUEUES, dev->num_tx_queues) ||
1343 nla_put_u32(skb, IFLA_GSO_MAX_SEGS, dev->gso_max_segs) ||
1344 nla_put_u32(skb, IFLA_GSO_MAX_SIZE, dev->gso_max_size) ||
1346 nla_put_u32(skb, IFLA_NUM_RX_QUEUES, dev->num_rx_queues) ||
1348 (dev->ifindex != dev_get_iflink(dev) &&
1349 nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))) ||
1351 nla_put_u32(skb, IFLA_MASTER, upper_dev->ifindex)) ||
1352 nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) ||
1354 nla_put_string(skb, IFLA_QDISC, dev->qdisc->ops->id)) ||
1356 nla_put_string(skb, IFLA_IFALIAS, dev->ifalias)) ||
1357 nla_put_u32(skb, IFLA_CARRIER_CHANGES,
1358 atomic_read(&dev->carrier_changes)) ||
1359 nla_put_u8(skb, IFLA_PROTO_DOWN, dev->proto_down))
1360 goto nla_put_failure;
1362 if (event != IFLA_EVENT_NONE) {
1363 if (nla_put_u32(skb, IFLA_EVENT, event))
1364 goto nla_put_failure;
1367 if (rtnl_fill_link_ifmap(skb, dev))
1368 goto nla_put_failure;
1370 if (dev->addr_len) {
1371 if (nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr) ||
1372 nla_put(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast))
1373 goto nla_put_failure;
1376 if (rtnl_phys_port_id_fill(skb, dev))
1377 goto nla_put_failure;
1379 if (rtnl_phys_port_name_fill(skb, dev))
1380 goto nla_put_failure;
1382 if (rtnl_phys_switch_id_fill(skb, dev))
1383 goto nla_put_failure;
1385 if (rtnl_fill_stats(skb, dev))
1386 goto nla_put_failure;
1388 if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF) &&
1389 nla_put_u32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent)))
1390 goto nla_put_failure;
1392 if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent &&
1393 ext_filter_mask & RTEXT_FILTER_VF) {
1395 struct nlattr *vfinfo;
1396 int num_vfs = dev_num_vf(dev->dev.parent);
1398 vfinfo = nla_nest_start(skb, IFLA_VFINFO_LIST);
1400 goto nla_put_failure;
1401 for (i = 0; i < num_vfs; i++) {
1402 if (rtnl_fill_vfinfo(skb, dev, i, vfinfo))
1403 goto nla_put_failure;
1406 nla_nest_end(skb, vfinfo);
1409 if (rtnl_port_fill(skb, dev, ext_filter_mask))
1410 goto nla_put_failure;
1412 if (rtnl_xdp_fill(skb, dev))
1413 goto nla_put_failure;
1415 if (dev->rtnl_link_ops || rtnl_have_link_slave_info(dev)) {
1416 if (rtnl_link_fill(skb, dev) < 0)
1417 goto nla_put_failure;
1420 if (dev->rtnl_link_ops &&
1421 dev->rtnl_link_ops->get_link_net) {
1422 struct net *link_net = dev->rtnl_link_ops->get_link_net(dev);
1424 if (!net_eq(dev_net(dev), link_net)) {
1425 int id = peernet2id_alloc(dev_net(dev), link_net);
1427 if (nla_put_s32(skb, IFLA_LINK_NETNSID, id))
1428 goto nla_put_failure;
1432 if (!(af_spec = nla_nest_start(skb, IFLA_AF_SPEC)))
1433 goto nla_put_failure;
1435 list_for_each_entry(af_ops, &rtnl_af_ops, list) {
1436 if (af_ops->fill_link_af) {
1440 if (!(af = nla_nest_start(skb, af_ops->family)))
1441 goto nla_put_failure;
1443 err = af_ops->fill_link_af(skb, dev, ext_filter_mask);
1446 * Caller may return ENODATA to indicate that there
1447 * was no data to be dumped. This is not an error, it
1448 * means we should trim the attribute header and
1451 if (err == -ENODATA)
1452 nla_nest_cancel(skb, af);
1454 goto nla_put_failure;
1456 nla_nest_end(skb, af);
1460 nla_nest_end(skb, af_spec);
1462 nlmsg_end(skb, nlh);
1466 nlmsg_cancel(skb, nlh);
1470 static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
1471 [IFLA_IFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ-1 },
1472 [IFLA_ADDRESS] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1473 [IFLA_BROADCAST] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1474 [IFLA_MAP] = { .len = sizeof(struct rtnl_link_ifmap) },
1475 [IFLA_MTU] = { .type = NLA_U32 },
1476 [IFLA_LINK] = { .type = NLA_U32 },
1477 [IFLA_MASTER] = { .type = NLA_U32 },
1478 [IFLA_CARRIER] = { .type = NLA_U8 },
1479 [IFLA_TXQLEN] = { .type = NLA_U32 },
1480 [IFLA_WEIGHT] = { .type = NLA_U32 },
1481 [IFLA_OPERSTATE] = { .type = NLA_U8 },
1482 [IFLA_LINKMODE] = { .type = NLA_U8 },
1483 [IFLA_LINKINFO] = { .type = NLA_NESTED },
1484 [IFLA_NET_NS_PID] = { .type = NLA_U32 },
1485 [IFLA_NET_NS_FD] = { .type = NLA_U32 },
1486 /* IFLA_IFALIAS is a string, but policy is set to NLA_BINARY to
1487 * allow 0-length string (needed to remove an alias).
1489 [IFLA_IFALIAS] = { .type = NLA_BINARY, .len = IFALIASZ - 1 },
1490 [IFLA_VFINFO_LIST] = {. type = NLA_NESTED },
1491 [IFLA_VF_PORTS] = { .type = NLA_NESTED },
1492 [IFLA_PORT_SELF] = { .type = NLA_NESTED },
1493 [IFLA_AF_SPEC] = { .type = NLA_NESTED },
1494 [IFLA_EXT_MASK] = { .type = NLA_U32 },
1495 [IFLA_PROMISCUITY] = { .type = NLA_U32 },
1496 [IFLA_NUM_TX_QUEUES] = { .type = NLA_U32 },
1497 [IFLA_NUM_RX_QUEUES] = { .type = NLA_U32 },
1498 [IFLA_PHYS_PORT_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
1499 [IFLA_CARRIER_CHANGES] = { .type = NLA_U32 }, /* ignored */
1500 [IFLA_PHYS_SWITCH_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
1501 [IFLA_LINK_NETNSID] = { .type = NLA_S32 },
1502 [IFLA_PROTO_DOWN] = { .type = NLA_U8 },
1503 [IFLA_XDP] = { .type = NLA_NESTED },
1504 [IFLA_EVENT] = { .type = NLA_U32 },
1505 [IFLA_GROUP] = { .type = NLA_U32 },
1508 static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
1509 [IFLA_INFO_KIND] = { .type = NLA_STRING },
1510 [IFLA_INFO_DATA] = { .type = NLA_NESTED },
1511 [IFLA_INFO_SLAVE_KIND] = { .type = NLA_STRING },
1512 [IFLA_INFO_SLAVE_DATA] = { .type = NLA_NESTED },
1515 static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = {
1516 [IFLA_VF_MAC] = { .len = sizeof(struct ifla_vf_mac) },
1517 [IFLA_VF_VLAN] = { .len = sizeof(struct ifla_vf_vlan) },
1518 [IFLA_VF_VLAN_LIST] = { .type = NLA_NESTED },
1519 [IFLA_VF_TX_RATE] = { .len = sizeof(struct ifla_vf_tx_rate) },
1520 [IFLA_VF_SPOOFCHK] = { .len = sizeof(struct ifla_vf_spoofchk) },
1521 [IFLA_VF_RATE] = { .len = sizeof(struct ifla_vf_rate) },
1522 [IFLA_VF_LINK_STATE] = { .len = sizeof(struct ifla_vf_link_state) },
1523 [IFLA_VF_RSS_QUERY_EN] = { .len = sizeof(struct ifla_vf_rss_query_en) },
1524 [IFLA_VF_STATS] = { .type = NLA_NESTED },
1525 [IFLA_VF_TRUST] = { .len = sizeof(struct ifla_vf_trust) },
1526 [IFLA_VF_IB_NODE_GUID] = { .len = sizeof(struct ifla_vf_guid) },
1527 [IFLA_VF_IB_PORT_GUID] = { .len = sizeof(struct ifla_vf_guid) },
1530 static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = {
1531 [IFLA_PORT_VF] = { .type = NLA_U32 },
1532 [IFLA_PORT_PROFILE] = { .type = NLA_STRING,
1533 .len = PORT_PROFILE_MAX },
1534 [IFLA_PORT_INSTANCE_UUID] = { .type = NLA_BINARY,
1535 .len = PORT_UUID_MAX },
1536 [IFLA_PORT_HOST_UUID] = { .type = NLA_STRING,
1537 .len = PORT_UUID_MAX },
1538 [IFLA_PORT_REQUEST] = { .type = NLA_U8, },
1539 [IFLA_PORT_RESPONSE] = { .type = NLA_U16, },
1541 /* Unused, but we need to keep it here since user space could
1542 * fill it. It's also broken with regard to NLA_BINARY use in
1543 * combination with structs.
1545 [IFLA_PORT_VSI_TYPE] = { .type = NLA_BINARY,
1546 .len = sizeof(struct ifla_port_vsi) },
1549 static const struct nla_policy ifla_xdp_policy[IFLA_XDP_MAX + 1] = {
1550 [IFLA_XDP_FD] = { .type = NLA_S32 },
1551 [IFLA_XDP_ATTACHED] = { .type = NLA_U8 },
1552 [IFLA_XDP_FLAGS] = { .type = NLA_U32 },
1553 [IFLA_XDP_PROG_ID] = { .type = NLA_U32 },
1556 static const struct rtnl_link_ops *linkinfo_to_kind_ops(const struct nlattr *nla)
1558 const struct rtnl_link_ops *ops = NULL;
1559 struct nlattr *linfo[IFLA_INFO_MAX + 1];
1561 if (nla_parse_nested(linfo, IFLA_INFO_MAX, nla,
1562 ifla_info_policy, NULL) < 0)
1565 if (linfo[IFLA_INFO_KIND]) {
1566 char kind[MODULE_NAME_LEN];
1568 nla_strlcpy(kind, linfo[IFLA_INFO_KIND], sizeof(kind));
1569 ops = rtnl_link_ops_get(kind);
1575 static bool link_master_filtered(struct net_device *dev, int master_idx)
1577 struct net_device *master;
1582 master = netdev_master_upper_dev_get(dev);
1583 if (!master || master->ifindex != master_idx)
1589 static bool link_kind_filtered(const struct net_device *dev,
1590 const struct rtnl_link_ops *kind_ops)
1592 if (kind_ops && dev->rtnl_link_ops != kind_ops)
1598 static bool link_dump_filtered(struct net_device *dev,
1600 const struct rtnl_link_ops *kind_ops)
1602 if (link_master_filtered(dev, master_idx) ||
1603 link_kind_filtered(dev, kind_ops))
1609 static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
1611 struct net *net = sock_net(skb->sk);
1614 struct net_device *dev;
1615 struct hlist_head *head;
1616 struct nlattr *tb[IFLA_MAX+1];
1617 u32 ext_filter_mask = 0;
1618 const struct rtnl_link_ops *kind_ops = NULL;
1619 unsigned int flags = NLM_F_MULTI;
1625 s_idx = cb->args[1];
1627 /* A hack to preserve kernel<->userspace interface.
1628 * The correct header is ifinfomsg. It is consistent with rtnl_getlink.
1629 * However, before Linux v3.9 the code here assumed rtgenmsg and that's
1630 * what iproute2 < v3.9.0 used.
1631 * We can detect the old iproute2. Even including the IFLA_EXT_MASK
1632 * attribute, its netlink message is shorter than struct ifinfomsg.
1634 hdrlen = nlmsg_len(cb->nlh) < sizeof(struct ifinfomsg) ?
1635 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
1637 if (nlmsg_parse(cb->nlh, hdrlen, tb, IFLA_MAX,
1638 ifla_policy, NULL) >= 0) {
1639 if (tb[IFLA_EXT_MASK])
1640 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
1642 if (tb[IFLA_MASTER])
1643 master_idx = nla_get_u32(tb[IFLA_MASTER]);
1645 if (tb[IFLA_LINKINFO])
1646 kind_ops = linkinfo_to_kind_ops(tb[IFLA_LINKINFO]);
1648 if (master_idx || kind_ops)
1649 flags |= NLM_F_DUMP_FILTERED;
1652 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
1654 head = &net->dev_index_head[h];
1655 hlist_for_each_entry(dev, head, index_hlist) {
1656 if (link_dump_filtered(dev, master_idx, kind_ops))
1660 err = rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
1661 NETLINK_CB(cb->skb).portid,
1662 cb->nlh->nlmsg_seq, 0,
1664 ext_filter_mask, 0);
1667 if (likely(skb->len))
1681 cb->seq = net->dev_base_seq;
1682 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
1687 int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len,
1688 struct netlink_ext_ack *exterr)
1690 return nla_parse(tb, IFLA_MAX, head, len, ifla_policy, exterr);
1692 EXPORT_SYMBOL(rtnl_nla_parse_ifla);
1694 struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[])
1697 /* Examine the link attributes and figure out which
1698 * network namespace we are talking about.
1700 if (tb[IFLA_NET_NS_PID])
1701 net = get_net_ns_by_pid(nla_get_u32(tb[IFLA_NET_NS_PID]));
1702 else if (tb[IFLA_NET_NS_FD])
1703 net = get_net_ns_by_fd(nla_get_u32(tb[IFLA_NET_NS_FD]));
1705 net = get_net(src_net);
1708 EXPORT_SYMBOL(rtnl_link_get_net);
1710 static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[])
1713 if (tb[IFLA_ADDRESS] &&
1714 nla_len(tb[IFLA_ADDRESS]) < dev->addr_len)
1717 if (tb[IFLA_BROADCAST] &&
1718 nla_len(tb[IFLA_BROADCAST]) < dev->addr_len)
1722 if (tb[IFLA_AF_SPEC]) {
1726 nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
1727 const struct rtnl_af_ops *af_ops;
1729 if (!(af_ops = rtnl_af_lookup(nla_type(af))))
1730 return -EAFNOSUPPORT;
1732 if (!af_ops->set_link_af)
1735 if (af_ops->validate_link_af) {
1736 err = af_ops->validate_link_af(dev, af);
1746 static int handle_infiniband_guid(struct net_device *dev, struct ifla_vf_guid *ivt,
1749 const struct net_device_ops *ops = dev->netdev_ops;
1751 return ops->ndo_set_vf_guid(dev, ivt->vf, ivt->guid, guid_type);
1754 static int handle_vf_guid(struct net_device *dev, struct ifla_vf_guid *ivt, int guid_type)
1756 if (dev->type != ARPHRD_INFINIBAND)
1759 return handle_infiniband_guid(dev, ivt, guid_type);
1762 static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
1764 const struct net_device_ops *ops = dev->netdev_ops;
1767 if (tb[IFLA_VF_MAC]) {
1768 struct ifla_vf_mac *ivm = nla_data(tb[IFLA_VF_MAC]);
1770 if (ivm->vf >= INT_MAX)
1773 if (ops->ndo_set_vf_mac)
1774 err = ops->ndo_set_vf_mac(dev, ivm->vf,
1780 if (tb[IFLA_VF_VLAN]) {
1781 struct ifla_vf_vlan *ivv = nla_data(tb[IFLA_VF_VLAN]);
1783 if (ivv->vf >= INT_MAX)
1786 if (ops->ndo_set_vf_vlan)
1787 err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan,
1789 htons(ETH_P_8021Q));
1794 if (tb[IFLA_VF_VLAN_LIST]) {
1795 struct ifla_vf_vlan_info *ivvl[MAX_VLAN_LIST_LEN];
1796 struct nlattr *attr;
1800 if (!ops->ndo_set_vf_vlan)
1803 nla_for_each_nested(attr, tb[IFLA_VF_VLAN_LIST], rem) {
1804 if (nla_type(attr) != IFLA_VF_VLAN_INFO ||
1805 nla_len(attr) < NLA_HDRLEN) {
1808 if (len >= MAX_VLAN_LIST_LEN)
1810 ivvl[len] = nla_data(attr);
1817 if (ivvl[0]->vf >= INT_MAX)
1819 err = ops->ndo_set_vf_vlan(dev, ivvl[0]->vf, ivvl[0]->vlan,
1820 ivvl[0]->qos, ivvl[0]->vlan_proto);
1825 if (tb[IFLA_VF_TX_RATE]) {
1826 struct ifla_vf_tx_rate *ivt = nla_data(tb[IFLA_VF_TX_RATE]);
1827 struct ifla_vf_info ivf;
1829 if (ivt->vf >= INT_MAX)
1832 if (ops->ndo_get_vf_config)
1833 err = ops->ndo_get_vf_config(dev, ivt->vf, &ivf);
1838 if (ops->ndo_set_vf_rate)
1839 err = ops->ndo_set_vf_rate(dev, ivt->vf,
1846 if (tb[IFLA_VF_RATE]) {
1847 struct ifla_vf_rate *ivt = nla_data(tb[IFLA_VF_RATE]);
1849 if (ivt->vf >= INT_MAX)
1852 if (ops->ndo_set_vf_rate)
1853 err = ops->ndo_set_vf_rate(dev, ivt->vf,
1860 if (tb[IFLA_VF_SPOOFCHK]) {
1861 struct ifla_vf_spoofchk *ivs = nla_data(tb[IFLA_VF_SPOOFCHK]);
1863 if (ivs->vf >= INT_MAX)
1866 if (ops->ndo_set_vf_spoofchk)
1867 err = ops->ndo_set_vf_spoofchk(dev, ivs->vf,
1873 if (tb[IFLA_VF_LINK_STATE]) {
1874 struct ifla_vf_link_state *ivl = nla_data(tb[IFLA_VF_LINK_STATE]);
1876 if (ivl->vf >= INT_MAX)
1879 if (ops->ndo_set_vf_link_state)
1880 err = ops->ndo_set_vf_link_state(dev, ivl->vf,
1886 if (tb[IFLA_VF_RSS_QUERY_EN]) {
1887 struct ifla_vf_rss_query_en *ivrssq_en;
1890 ivrssq_en = nla_data(tb[IFLA_VF_RSS_QUERY_EN]);
1891 if (ivrssq_en->vf >= INT_MAX)
1893 if (ops->ndo_set_vf_rss_query_en)
1894 err = ops->ndo_set_vf_rss_query_en(dev, ivrssq_en->vf,
1895 ivrssq_en->setting);
1900 if (tb[IFLA_VF_TRUST]) {
1901 struct ifla_vf_trust *ivt = nla_data(tb[IFLA_VF_TRUST]);
1903 if (ivt->vf >= INT_MAX)
1906 if (ops->ndo_set_vf_trust)
1907 err = ops->ndo_set_vf_trust(dev, ivt->vf, ivt->setting);
1912 if (tb[IFLA_VF_IB_NODE_GUID]) {
1913 struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_NODE_GUID]);
1915 if (ivt->vf >= INT_MAX)
1917 if (!ops->ndo_set_vf_guid)
1919 return handle_vf_guid(dev, ivt, IFLA_VF_IB_NODE_GUID);
1922 if (tb[IFLA_VF_IB_PORT_GUID]) {
1923 struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_PORT_GUID]);
1925 if (ivt->vf >= INT_MAX)
1927 if (!ops->ndo_set_vf_guid)
1930 return handle_vf_guid(dev, ivt, IFLA_VF_IB_PORT_GUID);
1936 static int do_set_master(struct net_device *dev, int ifindex)
1938 struct net_device *upper_dev = netdev_master_upper_dev_get(dev);
1939 const struct net_device_ops *ops;
1943 if (upper_dev->ifindex == ifindex)
1945 ops = upper_dev->netdev_ops;
1946 if (ops->ndo_del_slave) {
1947 err = ops->ndo_del_slave(upper_dev, dev);
1956 upper_dev = __dev_get_by_index(dev_net(dev), ifindex);
1959 ops = upper_dev->netdev_ops;
1960 if (ops->ndo_add_slave) {
1961 err = ops->ndo_add_slave(upper_dev, dev);
1971 #define DO_SETLINK_MODIFIED 0x01
1972 /* notify flag means notify + modified. */
1973 #define DO_SETLINK_NOTIFY 0x03
1974 static int do_setlink(const struct sk_buff *skb,
1975 struct net_device *dev, struct ifinfomsg *ifm,
1976 struct netlink_ext_ack *extack,
1977 struct nlattr **tb, char *ifname, int status)
1979 const struct net_device_ops *ops = dev->netdev_ops;
1982 err = validate_linkmsg(dev, tb);
1986 if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD]) {
1987 struct net *net = rtnl_link_get_net(dev_net(dev), tb);
1992 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) {
1997 err = dev_change_net_namespace(dev, net, ifname);
2001 status |= DO_SETLINK_MODIFIED;
2005 struct rtnl_link_ifmap *u_map;
2008 if (!ops->ndo_set_config) {
2013 if (!netif_device_present(dev)) {
2018 u_map = nla_data(tb[IFLA_MAP]);
2019 k_map.mem_start = (unsigned long) u_map->mem_start;
2020 k_map.mem_end = (unsigned long) u_map->mem_end;
2021 k_map.base_addr = (unsigned short) u_map->base_addr;
2022 k_map.irq = (unsigned char) u_map->irq;
2023 k_map.dma = (unsigned char) u_map->dma;
2024 k_map.port = (unsigned char) u_map->port;
2026 err = ops->ndo_set_config(dev, &k_map);
2030 status |= DO_SETLINK_NOTIFY;
2033 if (tb[IFLA_ADDRESS]) {
2034 struct sockaddr *sa;
2037 len = sizeof(sa_family_t) + max_t(size_t, dev->addr_len,
2039 sa = kmalloc(len, GFP_KERNEL);
2044 sa->sa_family = dev->type;
2045 memcpy(sa->sa_data, nla_data(tb[IFLA_ADDRESS]),
2047 err = dev_set_mac_address(dev, sa);
2051 status |= DO_SETLINK_MODIFIED;
2055 err = dev_set_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
2058 status |= DO_SETLINK_MODIFIED;
2061 if (tb[IFLA_GROUP]) {
2062 dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
2063 status |= DO_SETLINK_NOTIFY;
2067 * Interface selected by interface index but interface
2068 * name provided implies that a name change has been
2071 if (ifm->ifi_index > 0 && ifname[0]) {
2072 err = dev_change_name(dev, ifname);
2075 status |= DO_SETLINK_MODIFIED;
2078 if (tb[IFLA_IFALIAS]) {
2079 err = dev_set_alias(dev, nla_data(tb[IFLA_IFALIAS]),
2080 nla_len(tb[IFLA_IFALIAS]));
2083 status |= DO_SETLINK_NOTIFY;
2086 if (tb[IFLA_BROADCAST]) {
2087 nla_memcpy(dev->broadcast, tb[IFLA_BROADCAST], dev->addr_len);
2088 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
2091 if (ifm->ifi_flags || ifm->ifi_change) {
2092 err = dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm));
2097 if (tb[IFLA_MASTER]) {
2098 err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]));
2101 status |= DO_SETLINK_MODIFIED;
2104 if (tb[IFLA_CARRIER]) {
2105 err = dev_change_carrier(dev, nla_get_u8(tb[IFLA_CARRIER]));
2108 status |= DO_SETLINK_MODIFIED;
2111 if (tb[IFLA_TXQLEN]) {
2112 unsigned int value = nla_get_u32(tb[IFLA_TXQLEN]);
2113 unsigned int orig_len = dev->tx_queue_len;
2115 if (dev->tx_queue_len ^ value) {
2116 dev->tx_queue_len = value;
2117 err = call_netdevice_notifiers(
2118 NETDEV_CHANGE_TX_QUEUE_LEN, dev);
2119 err = notifier_to_errno(err);
2121 dev->tx_queue_len = orig_len;
2124 status |= DO_SETLINK_MODIFIED;
2128 if (tb[IFLA_OPERSTATE])
2129 set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
2131 if (tb[IFLA_LINKMODE]) {
2132 unsigned char value = nla_get_u8(tb[IFLA_LINKMODE]);
2134 write_lock_bh(&dev_base_lock);
2135 if (dev->link_mode ^ value)
2136 status |= DO_SETLINK_NOTIFY;
2137 dev->link_mode = value;
2138 write_unlock_bh(&dev_base_lock);
2141 if (tb[IFLA_VFINFO_LIST]) {
2142 struct nlattr *vfinfo[IFLA_VF_MAX + 1];
2143 struct nlattr *attr;
2146 nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) {
2147 if (nla_type(attr) != IFLA_VF_INFO ||
2148 nla_len(attr) < NLA_HDRLEN) {
2152 err = nla_parse_nested(vfinfo, IFLA_VF_MAX, attr,
2153 ifla_vf_policy, NULL);
2156 err = do_setvfinfo(dev, vfinfo);
2159 status |= DO_SETLINK_NOTIFY;
2164 if (tb[IFLA_VF_PORTS]) {
2165 struct nlattr *port[IFLA_PORT_MAX+1];
2166 struct nlattr *attr;
2171 if (!ops->ndo_set_vf_port)
2174 nla_for_each_nested(attr, tb[IFLA_VF_PORTS], rem) {
2175 if (nla_type(attr) != IFLA_VF_PORT ||
2176 nla_len(attr) < NLA_HDRLEN) {
2180 err = nla_parse_nested(port, IFLA_PORT_MAX, attr,
2181 ifla_port_policy, NULL);
2184 if (!port[IFLA_PORT_VF]) {
2188 vf = nla_get_u32(port[IFLA_PORT_VF]);
2189 err = ops->ndo_set_vf_port(dev, vf, port);
2192 status |= DO_SETLINK_NOTIFY;
2197 if (tb[IFLA_PORT_SELF]) {
2198 struct nlattr *port[IFLA_PORT_MAX+1];
2200 err = nla_parse_nested(port, IFLA_PORT_MAX,
2201 tb[IFLA_PORT_SELF], ifla_port_policy,
2207 if (ops->ndo_set_vf_port)
2208 err = ops->ndo_set_vf_port(dev, PORT_SELF_VF, port);
2211 status |= DO_SETLINK_NOTIFY;
2214 if (tb[IFLA_AF_SPEC]) {
2218 nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
2219 const struct rtnl_af_ops *af_ops;
2221 if (!(af_ops = rtnl_af_lookup(nla_type(af))))
2224 err = af_ops->set_link_af(dev, af);
2228 status |= DO_SETLINK_NOTIFY;
2233 if (tb[IFLA_PROTO_DOWN]) {
2234 err = dev_change_proto_down(dev,
2235 nla_get_u8(tb[IFLA_PROTO_DOWN]));
2238 status |= DO_SETLINK_NOTIFY;
2242 struct nlattr *xdp[IFLA_XDP_MAX + 1];
2245 err = nla_parse_nested(xdp, IFLA_XDP_MAX, tb[IFLA_XDP],
2246 ifla_xdp_policy, NULL);
2250 if (xdp[IFLA_XDP_ATTACHED] || xdp[IFLA_XDP_PROG_ID]) {
2255 if (xdp[IFLA_XDP_FLAGS]) {
2256 xdp_flags = nla_get_u32(xdp[IFLA_XDP_FLAGS]);
2257 if (xdp_flags & ~XDP_FLAGS_MASK) {
2261 if (hweight32(xdp_flags & XDP_FLAGS_MODES) > 1) {
2267 if (xdp[IFLA_XDP_FD]) {
2268 err = dev_change_xdp_fd(dev, extack,
2269 nla_get_s32(xdp[IFLA_XDP_FD]),
2273 status |= DO_SETLINK_NOTIFY;
2278 if (status & DO_SETLINK_MODIFIED) {
2279 if ((status & DO_SETLINK_NOTIFY) == DO_SETLINK_NOTIFY)
2280 netdev_state_change(dev);
2283 net_warn_ratelimited("A link change request failed with some changes committed already. Interface %s may have been left with an inconsistent configuration, please check.\n",
2290 static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
2291 struct netlink_ext_ack *extack)
2293 struct net *net = sock_net(skb->sk);
2294 struct ifinfomsg *ifm;
2295 struct net_device *dev;
2297 struct nlattr *tb[IFLA_MAX+1];
2298 char ifname[IFNAMSIZ];
2300 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy,
2305 if (tb[IFLA_IFNAME])
2306 nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
2311 ifm = nlmsg_data(nlh);
2312 if (ifm->ifi_index > 0)
2313 dev = __dev_get_by_index(net, ifm->ifi_index);
2314 else if (tb[IFLA_IFNAME])
2315 dev = __dev_get_by_name(net, ifname);
2324 err = do_setlink(skb, dev, ifm, extack, tb, ifname, 0);
2329 static int rtnl_group_dellink(const struct net *net, int group)
2331 struct net_device *dev, *aux;
2332 LIST_HEAD(list_kill);
2338 for_each_netdev(net, dev) {
2339 if (dev->group == group) {
2340 const struct rtnl_link_ops *ops;
2343 ops = dev->rtnl_link_ops;
2344 if (!ops || !ops->dellink)
2352 for_each_netdev_safe(net, dev, aux) {
2353 if (dev->group == group) {
2354 const struct rtnl_link_ops *ops;
2356 ops = dev->rtnl_link_ops;
2357 ops->dellink(dev, &list_kill);
2360 unregister_netdevice_many(&list_kill);
2365 int rtnl_delete_link(struct net_device *dev)
2367 const struct rtnl_link_ops *ops;
2368 LIST_HEAD(list_kill);
2370 ops = dev->rtnl_link_ops;
2371 if (!ops || !ops->dellink)
2374 ops->dellink(dev, &list_kill);
2375 unregister_netdevice_many(&list_kill);
2379 EXPORT_SYMBOL_GPL(rtnl_delete_link);
2381 static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
2382 struct netlink_ext_ack *extack)
2384 struct net *net = sock_net(skb->sk);
2385 struct net_device *dev;
2386 struct ifinfomsg *ifm;
2387 char ifname[IFNAMSIZ];
2388 struct nlattr *tb[IFLA_MAX+1];
2391 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy, extack);
2395 if (tb[IFLA_IFNAME])
2396 nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
2398 ifm = nlmsg_data(nlh);
2399 if (ifm->ifi_index > 0)
2400 dev = __dev_get_by_index(net, ifm->ifi_index);
2401 else if (tb[IFLA_IFNAME])
2402 dev = __dev_get_by_name(net, ifname);
2403 else if (tb[IFLA_GROUP])
2404 return rtnl_group_dellink(net, nla_get_u32(tb[IFLA_GROUP]));
2411 return rtnl_delete_link(dev);
2414 int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm)
2416 unsigned int old_flags;
2419 old_flags = dev->flags;
2420 if (ifm && (ifm->ifi_flags || ifm->ifi_change)) {
2421 err = __dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm));
2426 if (dev->rtnl_link_state == RTNL_LINK_INITIALIZED) {
2427 __dev_notify_flags(dev, old_flags, (old_flags ^ dev->flags));
2429 dev->rtnl_link_state = RTNL_LINK_INITIALIZED;
2430 __dev_notify_flags(dev, old_flags, ~0U);
2434 EXPORT_SYMBOL(rtnl_configure_link);
2436 struct net_device *rtnl_create_link(struct net *net,
2437 const char *ifname, unsigned char name_assign_type,
2438 const struct rtnl_link_ops *ops, struct nlattr *tb[])
2440 struct net_device *dev;
2441 unsigned int num_tx_queues = 1;
2442 unsigned int num_rx_queues = 1;
2444 if (tb[IFLA_NUM_TX_QUEUES])
2445 num_tx_queues = nla_get_u32(tb[IFLA_NUM_TX_QUEUES]);
2446 else if (ops->get_num_tx_queues)
2447 num_tx_queues = ops->get_num_tx_queues();
2449 if (tb[IFLA_NUM_RX_QUEUES])
2450 num_rx_queues = nla_get_u32(tb[IFLA_NUM_RX_QUEUES]);
2451 else if (ops->get_num_rx_queues)
2452 num_rx_queues = ops->get_num_rx_queues();
2454 if (num_tx_queues < 1 || num_tx_queues > 4096)
2455 return ERR_PTR(-EINVAL);
2457 if (num_rx_queues < 1 || num_rx_queues > 4096)
2458 return ERR_PTR(-EINVAL);
2460 dev = alloc_netdev_mqs(ops->priv_size, ifname, name_assign_type,
2461 ops->setup, num_tx_queues, num_rx_queues);
2463 return ERR_PTR(-ENOMEM);
2465 dev_net_set(dev, net);
2466 dev->rtnl_link_ops = ops;
2467 dev->rtnl_link_state = RTNL_LINK_INITIALIZING;
2470 u32 mtu = nla_get_u32(tb[IFLA_MTU]);
2473 err = dev_validate_mtu(dev, mtu);
2476 return ERR_PTR(err);
2480 if (tb[IFLA_ADDRESS]) {
2481 memcpy(dev->dev_addr, nla_data(tb[IFLA_ADDRESS]),
2482 nla_len(tb[IFLA_ADDRESS]));
2483 dev->addr_assign_type = NET_ADDR_SET;
2485 if (tb[IFLA_BROADCAST])
2486 memcpy(dev->broadcast, nla_data(tb[IFLA_BROADCAST]),
2487 nla_len(tb[IFLA_BROADCAST]));
2488 if (tb[IFLA_TXQLEN])
2489 dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]);
2490 if (tb[IFLA_OPERSTATE])
2491 set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
2492 if (tb[IFLA_LINKMODE])
2493 dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]);
2495 dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
2499 EXPORT_SYMBOL(rtnl_create_link);
2501 static int rtnl_group_changelink(const struct sk_buff *skb,
2502 struct net *net, int group,
2503 struct ifinfomsg *ifm,
2504 struct netlink_ext_ack *extack,
2507 struct net_device *dev, *aux;
2510 for_each_netdev_safe(net, dev, aux) {
2511 if (dev->group == group) {
2512 err = do_setlink(skb, dev, ifm, extack, tb, NULL, 0);
2521 static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
2522 struct netlink_ext_ack *extack)
2524 struct net *net = sock_net(skb->sk);
2525 const struct rtnl_link_ops *ops;
2526 const struct rtnl_link_ops *m_ops;
2527 struct net_device *dev;
2528 struct net_device *master_dev;
2529 struct ifinfomsg *ifm;
2530 char kind[MODULE_NAME_LEN];
2531 char ifname[IFNAMSIZ];
2532 struct nlattr *tb[IFLA_MAX+1];
2533 struct nlattr *linkinfo[IFLA_INFO_MAX+1];
2534 unsigned char name_assign_type = NET_NAME_USER;
2537 #ifdef CONFIG_MODULES
2540 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy, extack);
2544 if (tb[IFLA_IFNAME])
2545 nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
2549 ifm = nlmsg_data(nlh);
2550 if (ifm->ifi_index > 0)
2551 dev = __dev_get_by_index(net, ifm->ifi_index);
2554 dev = __dev_get_by_name(net, ifname);
2562 master_dev = netdev_master_upper_dev_get(dev);
2564 m_ops = master_dev->rtnl_link_ops;
2567 err = validate_linkmsg(dev, tb);
2571 if (tb[IFLA_LINKINFO]) {
2572 err = nla_parse_nested(linkinfo, IFLA_INFO_MAX,
2573 tb[IFLA_LINKINFO], ifla_info_policy,
2578 memset(linkinfo, 0, sizeof(linkinfo));
2580 if (linkinfo[IFLA_INFO_KIND]) {
2581 nla_strlcpy(kind, linkinfo[IFLA_INFO_KIND], sizeof(kind));
2582 ops = rtnl_link_ops_get(kind);
2589 struct nlattr *attr[ops ? ops->maxtype + 1 : 1];
2590 struct nlattr *slave_attr[m_ops ? m_ops->slave_maxtype + 1 : 1];
2591 struct nlattr **data = NULL;
2592 struct nlattr **slave_data = NULL;
2593 struct net *dest_net, *link_net = NULL;
2596 if (ops->maxtype && linkinfo[IFLA_INFO_DATA]) {
2597 err = nla_parse_nested(attr, ops->maxtype,
2598 linkinfo[IFLA_INFO_DATA],
2604 if (ops->validate) {
2605 err = ops->validate(tb, data, extack);
2612 if (m_ops->slave_maxtype &&
2613 linkinfo[IFLA_INFO_SLAVE_DATA]) {
2614 err = nla_parse_nested(slave_attr,
2615 m_ops->slave_maxtype,
2616 linkinfo[IFLA_INFO_SLAVE_DATA],
2617 m_ops->slave_policy,
2621 slave_data = slave_attr;
2623 if (m_ops->slave_validate) {
2624 err = m_ops->slave_validate(tb, slave_data,
2634 if (nlh->nlmsg_flags & NLM_F_EXCL)
2636 if (nlh->nlmsg_flags & NLM_F_REPLACE)
2639 if (linkinfo[IFLA_INFO_DATA]) {
2640 if (!ops || ops != dev->rtnl_link_ops ||
2644 err = ops->changelink(dev, tb, data, extack);
2647 status |= DO_SETLINK_NOTIFY;
2650 if (linkinfo[IFLA_INFO_SLAVE_DATA]) {
2651 if (!m_ops || !m_ops->slave_changelink)
2654 err = m_ops->slave_changelink(master_dev, dev,
2659 status |= DO_SETLINK_NOTIFY;
2662 return do_setlink(skb, dev, ifm, extack, tb, ifname,
2666 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
2667 if (ifm->ifi_index == 0 && tb[IFLA_GROUP])
2668 return rtnl_group_changelink(skb, net,
2669 nla_get_u32(tb[IFLA_GROUP]),
2674 if (tb[IFLA_MAP] || tb[IFLA_PROTINFO])
2678 #ifdef CONFIG_MODULES
2681 request_module("rtnl-link-%s", kind);
2683 ops = rtnl_link_ops_get(kind);
2695 snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind);
2696 name_assign_type = NET_NAME_ENUM;
2699 dest_net = rtnl_link_get_net(net, tb);
2700 if (IS_ERR(dest_net))
2701 return PTR_ERR(dest_net);
2704 if (!netlink_ns_capable(skb, dest_net->user_ns, CAP_NET_ADMIN))
2707 if (tb[IFLA_LINK_NETNSID]) {
2708 int id = nla_get_s32(tb[IFLA_LINK_NETNSID]);
2710 link_net = get_net_ns_by_id(dest_net, id);
2716 if (!netlink_ns_capable(skb, link_net->user_ns, CAP_NET_ADMIN))
2720 dev = rtnl_create_link(link_net ? : dest_net, ifname,
2721 name_assign_type, ops, tb);
2727 dev->ifindex = ifm->ifi_index;
2730 err = ops->newlink(link_net ? : net, dev, tb, data,
2732 /* Drivers should call free_netdev() in ->destructor
2733 * and unregister it on failure after registration
2734 * so that device could be finally freed in rtnl_unlock.
2737 /* If device is not registered at all, free it now */
2738 if (dev->reg_state == NETREG_UNINITIALIZED ||
2739 dev->reg_state == NETREG_UNREGISTERED)
2744 err = register_netdevice(dev);
2750 err = rtnl_configure_link(dev, ifm);
2752 goto out_unregister;
2754 err = dev_change_net_namespace(dev, dest_net, ifname);
2756 goto out_unregister;
2758 if (tb[IFLA_MASTER]) {
2759 err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]));
2761 goto out_unregister;
2770 LIST_HEAD(list_kill);
2772 ops->dellink(dev, &list_kill);
2773 unregister_netdevice_many(&list_kill);
2775 unregister_netdevice(dev);
2781 static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr *nlh,
2782 struct netlink_ext_ack *extack)
2784 struct net *net = sock_net(skb->sk);
2785 struct ifinfomsg *ifm;
2786 char ifname[IFNAMSIZ];
2787 struct nlattr *tb[IFLA_MAX+1];
2788 struct net_device *dev = NULL;
2789 struct sk_buff *nskb;
2791 u32 ext_filter_mask = 0;
2793 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy, extack);
2797 if (tb[IFLA_IFNAME])
2798 nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
2800 if (tb[IFLA_EXT_MASK])
2801 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
2803 ifm = nlmsg_data(nlh);
2804 if (ifm->ifi_index > 0)
2805 dev = __dev_get_by_index(net, ifm->ifi_index);
2806 else if (tb[IFLA_IFNAME])
2807 dev = __dev_get_by_name(net, ifname);
2814 nskb = nlmsg_new(if_nlmsg_size(dev, ext_filter_mask), GFP_KERNEL);
2818 err = rtnl_fill_ifinfo(nskb, dev, RTM_NEWLINK, NETLINK_CB(skb).portid,
2819 nlh->nlmsg_seq, 0, 0, ext_filter_mask, 0);
2821 /* -EMSGSIZE implies BUG in if_nlmsg_size */
2822 WARN_ON(err == -EMSGSIZE);
2825 err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid);
2830 static u16 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh)
2832 struct net *net = sock_net(skb->sk);
2833 struct net_device *dev;
2834 struct nlattr *tb[IFLA_MAX+1];
2835 u32 ext_filter_mask = 0;
2836 u16 min_ifinfo_dump_size = 0;
2839 /* Same kernel<->userspace interface hack as in rtnl_dump_ifinfo. */
2840 hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ?
2841 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
2843 if (nlmsg_parse(nlh, hdrlen, tb, IFLA_MAX, ifla_policy, NULL) >= 0) {
2844 if (tb[IFLA_EXT_MASK])
2845 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
2848 if (!ext_filter_mask)
2849 return NLMSG_GOODSIZE;
2851 * traverse the list of net devices and compute the minimum
2852 * buffer size based upon the filter mask.
2855 for_each_netdev_rcu(net, dev) {
2856 min_ifinfo_dump_size = max_t(u16, min_ifinfo_dump_size,
2862 return nlmsg_total_size(min_ifinfo_dump_size);
2865 static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
2868 int s_idx = cb->family;
2873 for (idx = 1; idx <= RTNL_FAMILY_MAX; idx++) {
2874 int type = cb->nlh->nlmsg_type-RTM_BASE;
2875 struct rtnl_link *handlers;
2876 rtnl_dumpit_func dumpit;
2878 if (idx < s_idx || idx == PF_PACKET)
2881 handlers = rtnl_dereference(rtnl_msg_handlers[idx]);
2885 dumpit = READ_ONCE(handlers[type].dumpit);
2890 memset(&cb->args[0], 0, sizeof(cb->args));
2894 if (dumpit(skb, cb))
2902 struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev,
2903 unsigned int change,
2904 u32 event, gfp_t flags)
2906 struct net *net = dev_net(dev);
2907 struct sk_buff *skb;
2909 size_t if_info_size;
2911 skb = nlmsg_new((if_info_size = if_nlmsg_size(dev, 0)), flags);
2915 err = rtnl_fill_ifinfo(skb, dev, type, 0, 0, change, 0, 0, event);
2917 /* -EMSGSIZE implies BUG in if_nlmsg_size() */
2918 WARN_ON(err == -EMSGSIZE);
2925 rtnl_set_sk_err(net, RTNLGRP_LINK, err);
2929 void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev, gfp_t flags)
2931 struct net *net = dev_net(dev);
2933 rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, flags);
2936 static void rtmsg_ifinfo_event(int type, struct net_device *dev,
2937 unsigned int change, u32 event,
2940 struct sk_buff *skb;
2942 if (dev->reg_state != NETREG_REGISTERED)
2945 skb = rtmsg_ifinfo_build_skb(type, dev, change, event, flags);
2947 rtmsg_ifinfo_send(skb, dev, flags);
2950 void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change,
2953 rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags);
2955 EXPORT_SYMBOL(rtmsg_ifinfo);
2957 static int nlmsg_populate_fdb_fill(struct sk_buff *skb,
2958 struct net_device *dev,
2959 u8 *addr, u16 vid, u32 pid, u32 seq,
2960 int type, unsigned int flags,
2961 int nlflags, u16 ndm_state)
2963 struct nlmsghdr *nlh;
2966 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), nlflags);
2970 ndm = nlmsg_data(nlh);
2971 ndm->ndm_family = AF_BRIDGE;
2974 ndm->ndm_flags = flags;
2976 ndm->ndm_ifindex = dev->ifindex;
2977 ndm->ndm_state = ndm_state;
2979 if (nla_put(skb, NDA_LLADDR, ETH_ALEN, addr))
2980 goto nla_put_failure;
2982 if (nla_put(skb, NDA_VLAN, sizeof(u16), &vid))
2983 goto nla_put_failure;
2985 nlmsg_end(skb, nlh);
2989 nlmsg_cancel(skb, nlh);
2993 static inline size_t rtnl_fdb_nlmsg_size(void)
2995 return NLMSG_ALIGN(sizeof(struct ndmsg)) +
2996 nla_total_size(ETH_ALEN) + /* NDA_LLADDR */
2997 nla_total_size(sizeof(u16)) + /* NDA_VLAN */
3001 static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, u16 vid, int type,
3004 struct net *net = dev_net(dev);
3005 struct sk_buff *skb;
3008 skb = nlmsg_new(rtnl_fdb_nlmsg_size(), GFP_ATOMIC);
3012 err = nlmsg_populate_fdb_fill(skb, dev, addr, vid,
3013 0, 0, type, NTF_SELF, 0, ndm_state);
3019 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
3022 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
3026 * ndo_dflt_fdb_add - default netdevice operation to add an FDB entry
3028 int ndo_dflt_fdb_add(struct ndmsg *ndm,
3029 struct nlattr *tb[],
3030 struct net_device *dev,
3031 const unsigned char *addr, u16 vid,
3036 /* If aging addresses are supported device will need to
3037 * implement its own handler for this.
3039 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
3040 pr_info("%s: FDB only supports static addresses\n", dev->name);
3045 pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name);
3049 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
3050 err = dev_uc_add_excl(dev, addr);
3051 else if (is_multicast_ether_addr(addr))
3052 err = dev_mc_add_excl(dev, addr);
3054 /* Only return duplicate errors if NLM_F_EXCL is set */
3055 if (err == -EEXIST && !(flags & NLM_F_EXCL))
3060 EXPORT_SYMBOL(ndo_dflt_fdb_add);
3062 static int fdb_vid_parse(struct nlattr *vlan_attr, u16 *p_vid)
3067 if (nla_len(vlan_attr) != sizeof(u16)) {
3068 pr_info("PF_BRIDGE: RTM_NEWNEIGH with invalid vlan\n");
3072 vid = nla_get_u16(vlan_attr);
3074 if (!vid || vid >= VLAN_VID_MASK) {
3075 pr_info("PF_BRIDGE: RTM_NEWNEIGH with invalid vlan id %d\n",
3084 static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
3085 struct netlink_ext_ack *extack)
3087 struct net *net = sock_net(skb->sk);
3089 struct nlattr *tb[NDA_MAX+1];
3090 struct net_device *dev;
3095 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL, extack);
3099 ndm = nlmsg_data(nlh);
3100 if (ndm->ndm_ifindex == 0) {
3101 pr_info("PF_BRIDGE: RTM_NEWNEIGH with invalid ifindex\n");
3105 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
3107 pr_info("PF_BRIDGE: RTM_NEWNEIGH with unknown ifindex\n");
3111 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
3112 pr_info("PF_BRIDGE: RTM_NEWNEIGH with invalid address\n");
3116 if (dev->type != ARPHRD_ETHER) {
3117 NL_SET_ERR_MSG(extack, "FDB add only supported for Ethernet devices");
3121 addr = nla_data(tb[NDA_LLADDR]);
3123 err = fdb_vid_parse(tb[NDA_VLAN], &vid);
3129 /* Support fdb on master device the net/bridge default case */
3130 if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
3131 (dev->priv_flags & IFF_BRIDGE_PORT)) {
3132 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
3133 const struct net_device_ops *ops = br_dev->netdev_ops;
3135 err = ops->ndo_fdb_add(ndm, tb, dev, addr, vid,
3140 ndm->ndm_flags &= ~NTF_MASTER;
3143 /* Embedded bridge, macvlan, and any other device support */
3144 if ((ndm->ndm_flags & NTF_SELF)) {
3145 if (dev->netdev_ops->ndo_fdb_add)
3146 err = dev->netdev_ops->ndo_fdb_add(ndm, tb, dev, addr,
3150 err = ndo_dflt_fdb_add(ndm, tb, dev, addr, vid,
3154 rtnl_fdb_notify(dev, addr, vid, RTM_NEWNEIGH,
3156 ndm->ndm_flags &= ~NTF_SELF;
3164 * ndo_dflt_fdb_del - default netdevice operation to delete an FDB entry
3166 int ndo_dflt_fdb_del(struct ndmsg *ndm,
3167 struct nlattr *tb[],
3168 struct net_device *dev,
3169 const unsigned char *addr, u16 vid)
3173 /* If aging addresses are supported device will need to
3174 * implement its own handler for this.
3176 if (!(ndm->ndm_state & NUD_PERMANENT)) {
3177 pr_info("%s: FDB only supports static addresses\n", dev->name);
3181 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
3182 err = dev_uc_del(dev, addr);
3183 else if (is_multicast_ether_addr(addr))
3184 err = dev_mc_del(dev, addr);
3188 EXPORT_SYMBOL(ndo_dflt_fdb_del);
3190 static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
3191 struct netlink_ext_ack *extack)
3193 struct net *net = sock_net(skb->sk);
3195 struct nlattr *tb[NDA_MAX+1];
3196 struct net_device *dev;
3201 if (!netlink_capable(skb, CAP_NET_ADMIN))
3204 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL, extack);
3208 ndm = nlmsg_data(nlh);
3209 if (ndm->ndm_ifindex == 0) {
3210 pr_info("PF_BRIDGE: RTM_DELNEIGH with invalid ifindex\n");
3214 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
3216 pr_info("PF_BRIDGE: RTM_DELNEIGH with unknown ifindex\n");
3220 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
3221 pr_info("PF_BRIDGE: RTM_DELNEIGH with invalid address\n");
3225 if (dev->type != ARPHRD_ETHER) {
3226 NL_SET_ERR_MSG(extack, "FDB delete only supported for Ethernet devices");
3230 addr = nla_data(tb[NDA_LLADDR]);
3232 err = fdb_vid_parse(tb[NDA_VLAN], &vid);
3238 /* Support fdb on master device the net/bridge default case */
3239 if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
3240 (dev->priv_flags & IFF_BRIDGE_PORT)) {
3241 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
3242 const struct net_device_ops *ops = br_dev->netdev_ops;
3244 if (ops->ndo_fdb_del)
3245 err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid);
3250 ndm->ndm_flags &= ~NTF_MASTER;
3253 /* Embedded bridge, macvlan, and any other device support */
3254 if (ndm->ndm_flags & NTF_SELF) {
3255 if (dev->netdev_ops->ndo_fdb_del)
3256 err = dev->netdev_ops->ndo_fdb_del(ndm, tb, dev, addr,
3259 err = ndo_dflt_fdb_del(ndm, tb, dev, addr, vid);
3262 rtnl_fdb_notify(dev, addr, vid, RTM_DELNEIGH,
3264 ndm->ndm_flags &= ~NTF_SELF;
3271 static int nlmsg_populate_fdb(struct sk_buff *skb,
3272 struct netlink_callback *cb,
3273 struct net_device *dev,
3275 struct netdev_hw_addr_list *list)
3277 struct netdev_hw_addr *ha;
3281 portid = NETLINK_CB(cb->skb).portid;
3282 seq = cb->nlh->nlmsg_seq;
3284 list_for_each_entry(ha, &list->list, list) {
3285 if (*idx < cb->args[2])
3288 err = nlmsg_populate_fdb_fill(skb, dev, ha->addr, 0,
3290 RTM_NEWNEIGH, NTF_SELF,
3291 NLM_F_MULTI, NUD_PERMANENT);
3301 * ndo_dflt_fdb_dump - default netdevice operation to dump an FDB table.
3302 * @nlh: netlink message header
3305 * Default netdevice operation to dump the existing unicast address list.
3306 * Returns number of addresses from list put in skb.
3308 int ndo_dflt_fdb_dump(struct sk_buff *skb,
3309 struct netlink_callback *cb,
3310 struct net_device *dev,
3311 struct net_device *filter_dev,
3316 if (dev->type != ARPHRD_ETHER)
3319 netif_addr_lock_bh(dev);
3320 err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->uc);
3323 err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->mc);
3325 netif_addr_unlock_bh(dev);
3328 EXPORT_SYMBOL(ndo_dflt_fdb_dump);
3330 static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
3332 struct net_device *dev;
3333 struct nlattr *tb[IFLA_MAX+1];
3334 struct net_device *br_dev = NULL;
3335 const struct net_device_ops *ops = NULL;
3336 const struct net_device_ops *cops = NULL;
3337 struct ifinfomsg *ifm = nlmsg_data(cb->nlh);
3338 struct net *net = sock_net(skb->sk);
3339 struct hlist_head *head;
3347 /* A hack to preserve kernel<->userspace interface.
3348 * Before Linux v4.12 this code accepted ndmsg since iproute2 v3.3.0.
3349 * However, ndmsg is shorter than ifinfomsg thus nlmsg_parse() bails.
3350 * So, check for ndmsg with an optional u32 attribute (not used here).
3351 * Fortunately these sizes don't conflict with the size of ifinfomsg
3352 * with an optional attribute.
3354 if (nlmsg_len(cb->nlh) != sizeof(struct ndmsg) &&
3355 (nlmsg_len(cb->nlh) != sizeof(struct ndmsg) +
3356 nla_attr_size(sizeof(u32)))) {
3357 err = nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb,
3358 IFLA_MAX, ifla_policy, NULL);
3361 } else if (err == 0) {
3362 if (tb[IFLA_MASTER])
3363 br_idx = nla_get_u32(tb[IFLA_MASTER]);
3366 brport_idx = ifm->ifi_index;
3370 br_dev = __dev_get_by_index(net, br_idx);
3374 ops = br_dev->netdev_ops;
3378 s_idx = cb->args[1];
3380 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
3382 head = &net->dev_index_head[h];
3383 hlist_for_each_entry(dev, head, index_hlist) {
3385 if (brport_idx && (dev->ifindex != brport_idx))
3388 if (!br_idx) { /* user did not specify a specific bridge */
3389 if (dev->priv_flags & IFF_BRIDGE_PORT) {
3390 br_dev = netdev_master_upper_dev_get(dev);
3391 cops = br_dev->netdev_ops;
3394 if (dev != br_dev &&
3395 !(dev->priv_flags & IFF_BRIDGE_PORT))
3398 if (br_dev != netdev_master_upper_dev_get(dev) &&
3399 !(dev->priv_flags & IFF_EBRIDGE))
3407 if (dev->priv_flags & IFF_BRIDGE_PORT) {
3408 if (cops && cops->ndo_fdb_dump) {
3409 err = cops->ndo_fdb_dump(skb, cb,
3412 if (err == -EMSGSIZE)
3417 if (dev->netdev_ops->ndo_fdb_dump)
3418 err = dev->netdev_ops->ndo_fdb_dump(skb, cb,
3422 err = ndo_dflt_fdb_dump(skb, cb, dev, NULL,
3424 if (err == -EMSGSIZE)
3429 /* reset fdb offset to 0 for rest of the interfaces */
3445 static int brport_nla_put_flag(struct sk_buff *skb, u32 flags, u32 mask,
3446 unsigned int attrnum, unsigned int flag)
3449 return nla_put_u8(skb, attrnum, !!(flags & flag));
3453 int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
3454 struct net_device *dev, u16 mode,
3455 u32 flags, u32 mask, int nlflags,
3457 int (*vlan_fill)(struct sk_buff *skb,
3458 struct net_device *dev,
3461 struct nlmsghdr *nlh;
3462 struct ifinfomsg *ifm;
3463 struct nlattr *br_afspec;
3464 struct nlattr *protinfo;
3465 u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
3466 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
3469 nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*ifm), nlflags);
3473 ifm = nlmsg_data(nlh);
3474 ifm->ifi_family = AF_BRIDGE;
3476 ifm->ifi_type = dev->type;
3477 ifm->ifi_index = dev->ifindex;
3478 ifm->ifi_flags = dev_get_flags(dev);
3479 ifm->ifi_change = 0;
3482 if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
3483 nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
3484 nla_put_u8(skb, IFLA_OPERSTATE, operstate) ||
3486 nla_put_u32(skb, IFLA_MASTER, br_dev->ifindex)) ||
3488 nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
3489 (dev->ifindex != dev_get_iflink(dev) &&
3490 nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))))
3491 goto nla_put_failure;
3493 br_afspec = nla_nest_start(skb, IFLA_AF_SPEC);
3495 goto nla_put_failure;
3497 if (nla_put_u16(skb, IFLA_BRIDGE_FLAGS, BRIDGE_FLAGS_SELF)) {
3498 nla_nest_cancel(skb, br_afspec);
3499 goto nla_put_failure;
3502 if (mode != BRIDGE_MODE_UNDEF) {
3503 if (nla_put_u16(skb, IFLA_BRIDGE_MODE, mode)) {
3504 nla_nest_cancel(skb, br_afspec);
3505 goto nla_put_failure;
3509 err = vlan_fill(skb, dev, filter_mask);
3511 nla_nest_cancel(skb, br_afspec);
3512 goto nla_put_failure;
3515 nla_nest_end(skb, br_afspec);
3517 protinfo = nla_nest_start(skb, IFLA_PROTINFO | NLA_F_NESTED);
3519 goto nla_put_failure;
3521 if (brport_nla_put_flag(skb, flags, mask,
3522 IFLA_BRPORT_MODE, BR_HAIRPIN_MODE) ||
3523 brport_nla_put_flag(skb, flags, mask,
3524 IFLA_BRPORT_GUARD, BR_BPDU_GUARD) ||
3525 brport_nla_put_flag(skb, flags, mask,
3526 IFLA_BRPORT_FAST_LEAVE,
3527 BR_MULTICAST_FAST_LEAVE) ||
3528 brport_nla_put_flag(skb, flags, mask,
3529 IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK) ||
3530 brport_nla_put_flag(skb, flags, mask,
3531 IFLA_BRPORT_LEARNING, BR_LEARNING) ||
3532 brport_nla_put_flag(skb, flags, mask,
3533 IFLA_BRPORT_LEARNING_SYNC, BR_LEARNING_SYNC) ||
3534 brport_nla_put_flag(skb, flags, mask,
3535 IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD) ||
3536 brport_nla_put_flag(skb, flags, mask,
3537 IFLA_BRPORT_PROXYARP, BR_PROXYARP)) {
3538 nla_nest_cancel(skb, protinfo);
3539 goto nla_put_failure;
3542 nla_nest_end(skb, protinfo);
3544 nlmsg_end(skb, nlh);
3547 nlmsg_cancel(skb, nlh);
3548 return err ? err : -EMSGSIZE;
3550 EXPORT_SYMBOL_GPL(ndo_dflt_bridge_getlink);
3552 static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
3554 struct net *net = sock_net(skb->sk);
3555 struct net_device *dev;
3557 u32 portid = NETLINK_CB(cb->skb).portid;
3558 u32 seq = cb->nlh->nlmsg_seq;
3559 u32 filter_mask = 0;
3562 if (nlmsg_len(cb->nlh) > sizeof(struct ifinfomsg)) {
3563 struct nlattr *extfilt;
3565 extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct ifinfomsg),
3568 if (nla_len(extfilt) < sizeof(filter_mask))
3571 filter_mask = nla_get_u32(extfilt);
3576 for_each_netdev_rcu(net, dev) {
3577 const struct net_device_ops *ops = dev->netdev_ops;
3578 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
3580 if (br_dev && br_dev->netdev_ops->ndo_bridge_getlink) {
3581 if (idx >= cb->args[0]) {
3582 err = br_dev->netdev_ops->ndo_bridge_getlink(
3583 skb, portid, seq, dev,
3584 filter_mask, NLM_F_MULTI);
3585 if (err < 0 && err != -EOPNOTSUPP) {
3586 if (likely(skb->len))
3595 if (ops->ndo_bridge_getlink) {
3596 if (idx >= cb->args[0]) {
3597 err = ops->ndo_bridge_getlink(skb, portid,
3601 if (err < 0 && err != -EOPNOTSUPP) {
3602 if (likely(skb->len))
3619 static inline size_t bridge_nlmsg_size(void)
3621 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
3622 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
3623 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
3624 + nla_total_size(sizeof(u32)) /* IFLA_MASTER */
3625 + nla_total_size(sizeof(u32)) /* IFLA_MTU */
3626 + nla_total_size(sizeof(u32)) /* IFLA_LINK */
3627 + nla_total_size(sizeof(u32)) /* IFLA_OPERSTATE */
3628 + nla_total_size(sizeof(u8)) /* IFLA_PROTINFO */
3629 + nla_total_size(sizeof(struct nlattr)) /* IFLA_AF_SPEC */
3630 + nla_total_size(sizeof(u16)) /* IFLA_BRIDGE_FLAGS */
3631 + nla_total_size(sizeof(u16)); /* IFLA_BRIDGE_MODE */
3634 static int rtnl_bridge_notify(struct net_device *dev)
3636 struct net *net = dev_net(dev);
3637 struct sk_buff *skb;
3638 int err = -EOPNOTSUPP;
3640 if (!dev->netdev_ops->ndo_bridge_getlink)
3643 skb = nlmsg_new(bridge_nlmsg_size(), GFP_ATOMIC);
3649 err = dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev, 0, 0);
3653 /* Notification info is only filled for bridge ports, not the bridge
3654 * device itself. Therefore, a zero notification length is valid and
3655 * should not result in an error.
3660 rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
3663 WARN_ON(err == -EMSGSIZE);
3666 rtnl_set_sk_err(net, RTNLGRP_LINK, err);
3670 static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
3671 struct netlink_ext_ack *extack)
3673 struct net *net = sock_net(skb->sk);
3674 struct ifinfomsg *ifm;
3675 struct net_device *dev;
3676 struct nlattr *br_spec, *attr = NULL;
3677 int rem, err = -EOPNOTSUPP;
3679 bool have_flags = false;
3681 if (nlmsg_len(nlh) < sizeof(*ifm))
3684 ifm = nlmsg_data(nlh);
3685 if (ifm->ifi_family != AF_BRIDGE)
3686 return -EPFNOSUPPORT;
3688 dev = __dev_get_by_index(net, ifm->ifi_index);
3690 pr_info("PF_BRIDGE: RTM_SETLINK with unknown ifindex\n");
3694 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
3696 nla_for_each_nested(attr, br_spec, rem) {
3697 if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
3698 if (nla_len(attr) < sizeof(flags))
3702 flags = nla_get_u16(attr);
3708 if (!flags || (flags & BRIDGE_FLAGS_MASTER)) {
3709 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
3711 if (!br_dev || !br_dev->netdev_ops->ndo_bridge_setlink) {
3716 err = br_dev->netdev_ops->ndo_bridge_setlink(dev, nlh, flags);
3720 flags &= ~BRIDGE_FLAGS_MASTER;
3723 if ((flags & BRIDGE_FLAGS_SELF)) {
3724 if (!dev->netdev_ops->ndo_bridge_setlink)
3727 err = dev->netdev_ops->ndo_bridge_setlink(dev, nlh,
3730 flags &= ~BRIDGE_FLAGS_SELF;
3732 /* Generate event to notify upper layer of bridge
3735 err = rtnl_bridge_notify(dev);
3740 memcpy(nla_data(attr), &flags, sizeof(flags));
3745 static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
3746 struct netlink_ext_ack *extack)
3748 struct net *net = sock_net(skb->sk);
3749 struct ifinfomsg *ifm;
3750 struct net_device *dev;
3751 struct nlattr *br_spec, *attr = NULL;
3752 int rem, err = -EOPNOTSUPP;
3754 bool have_flags = false;
3756 if (nlmsg_len(nlh) < sizeof(*ifm))
3759 ifm = nlmsg_data(nlh);
3760 if (ifm->ifi_family != AF_BRIDGE)
3761 return -EPFNOSUPPORT;
3763 dev = __dev_get_by_index(net, ifm->ifi_index);
3765 pr_info("PF_BRIDGE: RTM_SETLINK with unknown ifindex\n");
3769 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
3771 nla_for_each_nested(attr, br_spec, rem) {
3772 if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
3773 if (nla_len(attr) < sizeof(flags))
3777 flags = nla_get_u16(attr);
3783 if (!flags || (flags & BRIDGE_FLAGS_MASTER)) {
3784 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
3786 if (!br_dev || !br_dev->netdev_ops->ndo_bridge_dellink) {
3791 err = br_dev->netdev_ops->ndo_bridge_dellink(dev, nlh, flags);
3795 flags &= ~BRIDGE_FLAGS_MASTER;
3798 if ((flags & BRIDGE_FLAGS_SELF)) {
3799 if (!dev->netdev_ops->ndo_bridge_dellink)
3802 err = dev->netdev_ops->ndo_bridge_dellink(dev, nlh,
3806 flags &= ~BRIDGE_FLAGS_SELF;
3808 /* Generate event to notify upper layer of bridge
3811 err = rtnl_bridge_notify(dev);
3816 memcpy(nla_data(attr), &flags, sizeof(flags));
3821 static bool stats_attr_valid(unsigned int mask, int attrid, int idxattr)
3823 return (mask & IFLA_STATS_FILTER_BIT(attrid)) &&
3824 (!idxattr || idxattr == attrid);
3827 #define IFLA_OFFLOAD_XSTATS_FIRST (IFLA_OFFLOAD_XSTATS_UNSPEC + 1)
3828 static int rtnl_get_offload_stats_attr_size(int attr_id)
3831 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
3832 return sizeof(struct rtnl_link_stats64);
3838 static int rtnl_get_offload_stats(struct sk_buff *skb, struct net_device *dev,
3841 struct nlattr *attr = NULL;
3846 if (!(dev->netdev_ops && dev->netdev_ops->ndo_has_offload_stats &&
3847 dev->netdev_ops->ndo_get_offload_stats))
3850 for (attr_id = IFLA_OFFLOAD_XSTATS_FIRST;
3851 attr_id <= IFLA_OFFLOAD_XSTATS_MAX; attr_id++) {
3852 if (attr_id < *prividx)
3855 size = rtnl_get_offload_stats_attr_size(attr_id);
3859 if (!dev->netdev_ops->ndo_has_offload_stats(dev, attr_id))
3862 attr = nla_reserve_64bit(skb, attr_id, size,
3863 IFLA_OFFLOAD_XSTATS_UNSPEC);
3865 goto nla_put_failure;
3867 attr_data = nla_data(attr);
3868 memset(attr_data, 0, size);
3869 err = dev->netdev_ops->ndo_get_offload_stats(attr_id, dev,
3872 goto get_offload_stats_failure;
3883 get_offload_stats_failure:
3888 static int rtnl_get_offload_stats_size(const struct net_device *dev)
3894 if (!(dev->netdev_ops && dev->netdev_ops->ndo_has_offload_stats &&
3895 dev->netdev_ops->ndo_get_offload_stats))
3898 for (attr_id = IFLA_OFFLOAD_XSTATS_FIRST;
3899 attr_id <= IFLA_OFFLOAD_XSTATS_MAX; attr_id++) {
3900 if (!dev->netdev_ops->ndo_has_offload_stats(dev, attr_id))
3902 size = rtnl_get_offload_stats_attr_size(attr_id);
3903 nla_size += nla_total_size_64bit(size);
3907 nla_size += nla_total_size(0);
3912 static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev,
3913 int type, u32 pid, u32 seq, u32 change,
3914 unsigned int flags, unsigned int filter_mask,
3915 int *idxattr, int *prividx)
3917 struct if_stats_msg *ifsm;
3918 struct nlmsghdr *nlh;
3919 struct nlattr *attr;
3920 int s_prividx = *prividx;
3925 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifsm), flags);
3929 ifsm = nlmsg_data(nlh);
3930 ifsm->family = PF_UNSPEC;
3933 ifsm->ifindex = dev->ifindex;
3934 ifsm->filter_mask = filter_mask;
3936 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, *idxattr)) {
3937 struct rtnl_link_stats64 *sp;
3939 attr = nla_reserve_64bit(skb, IFLA_STATS_LINK_64,
3940 sizeof(struct rtnl_link_stats64),
3943 goto nla_put_failure;
3945 sp = nla_data(attr);
3946 dev_get_stats(dev, sp);
3949 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, *idxattr)) {
3950 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
3952 if (ops && ops->fill_linkxstats) {
3953 *idxattr = IFLA_STATS_LINK_XSTATS;
3954 attr = nla_nest_start(skb,
3955 IFLA_STATS_LINK_XSTATS);
3957 goto nla_put_failure;
3959 err = ops->fill_linkxstats(skb, dev, prividx, *idxattr);
3960 nla_nest_end(skb, attr);
3962 goto nla_put_failure;
3967 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE,
3969 const struct rtnl_link_ops *ops = NULL;
3970 const struct net_device *master;
3972 master = netdev_master_upper_dev_get(dev);
3974 ops = master->rtnl_link_ops;
3975 if (ops && ops->fill_linkxstats) {
3976 *idxattr = IFLA_STATS_LINK_XSTATS_SLAVE;
3977 attr = nla_nest_start(skb,
3978 IFLA_STATS_LINK_XSTATS_SLAVE);
3980 goto nla_put_failure;
3982 err = ops->fill_linkxstats(skb, dev, prividx, *idxattr);
3983 nla_nest_end(skb, attr);
3985 goto nla_put_failure;
3990 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS,
3992 *idxattr = IFLA_STATS_LINK_OFFLOAD_XSTATS;
3993 attr = nla_nest_start(skb, IFLA_STATS_LINK_OFFLOAD_XSTATS);
3995 goto nla_put_failure;
3997 err = rtnl_get_offload_stats(skb, dev, prividx);
3998 if (err == -ENODATA)
3999 nla_nest_cancel(skb, attr);
4001 nla_nest_end(skb, attr);
4003 if (err && err != -ENODATA)
4004 goto nla_put_failure;
4008 if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, *idxattr)) {
4009 struct rtnl_af_ops *af_ops;
4011 *idxattr = IFLA_STATS_AF_SPEC;
4012 attr = nla_nest_start(skb, IFLA_STATS_AF_SPEC);
4014 goto nla_put_failure;
4016 list_for_each_entry(af_ops, &rtnl_af_ops, list) {
4017 if (af_ops->fill_stats_af) {
4021 af = nla_nest_start(skb, af_ops->family);
4023 goto nla_put_failure;
4025 err = af_ops->fill_stats_af(skb, dev);
4027 if (err == -ENODATA)
4028 nla_nest_cancel(skb, af);
4030 goto nla_put_failure;
4032 nla_nest_end(skb, af);
4036 nla_nest_end(skb, attr);
4041 nlmsg_end(skb, nlh);
4046 /* not a multi message or no progress mean a real error */
4047 if (!(flags & NLM_F_MULTI) || s_prividx == *prividx)
4048 nlmsg_cancel(skb, nlh);
4050 nlmsg_end(skb, nlh);
4055 static size_t if_nlmsg_stats_size(const struct net_device *dev,
4058 size_t size = NLMSG_ALIGN(sizeof(struct if_stats_msg));
4060 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, 0))
4061 size += nla_total_size_64bit(sizeof(struct rtnl_link_stats64));
4063 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, 0)) {
4064 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
4065 int attr = IFLA_STATS_LINK_XSTATS;
4067 if (ops && ops->get_linkxstats_size) {
4068 size += nla_total_size(ops->get_linkxstats_size(dev,
4070 /* for IFLA_STATS_LINK_XSTATS */
4071 size += nla_total_size(0);
4075 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE, 0)) {
4076 struct net_device *_dev = (struct net_device *)dev;
4077 const struct rtnl_link_ops *ops = NULL;
4078 const struct net_device *master;
4080 /* netdev_master_upper_dev_get can't take const */
4081 master = netdev_master_upper_dev_get(_dev);
4083 ops = master->rtnl_link_ops;
4084 if (ops && ops->get_linkxstats_size) {
4085 int attr = IFLA_STATS_LINK_XSTATS_SLAVE;
4087 size += nla_total_size(ops->get_linkxstats_size(dev,
4089 /* for IFLA_STATS_LINK_XSTATS_SLAVE */
4090 size += nla_total_size(0);
4094 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS, 0))
4095 size += rtnl_get_offload_stats_size(dev);
4097 if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, 0)) {
4098 struct rtnl_af_ops *af_ops;
4100 /* for IFLA_STATS_AF_SPEC */
4101 size += nla_total_size(0);
4103 list_for_each_entry(af_ops, &rtnl_af_ops, list) {
4104 if (af_ops->get_stats_af_size) {
4105 size += nla_total_size(
4106 af_ops->get_stats_af_size(dev));
4109 size += nla_total_size(0);
4117 static int rtnl_stats_get(struct sk_buff *skb, struct nlmsghdr *nlh,
4118 struct netlink_ext_ack *extack)
4120 struct net *net = sock_net(skb->sk);
4121 struct net_device *dev = NULL;
4122 int idxattr = 0, prividx = 0;
4123 struct if_stats_msg *ifsm;
4124 struct sk_buff *nskb;
4128 if (nlmsg_len(nlh) < sizeof(*ifsm))
4131 ifsm = nlmsg_data(nlh);
4132 if (ifsm->ifindex > 0)
4133 dev = __dev_get_by_index(net, ifsm->ifindex);
4140 filter_mask = ifsm->filter_mask;
4144 nskb = nlmsg_new(if_nlmsg_stats_size(dev, filter_mask), GFP_KERNEL);
4148 err = rtnl_fill_statsinfo(nskb, dev, RTM_NEWSTATS,
4149 NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
4150 0, filter_mask, &idxattr, &prividx);
4152 /* -EMSGSIZE implies BUG in if_nlmsg_stats_size */
4153 WARN_ON(err == -EMSGSIZE);
4156 err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid);
4162 static int rtnl_stats_dump(struct sk_buff *skb, struct netlink_callback *cb)
4164 int h, s_h, err, s_idx, s_idxattr, s_prividx;
4165 struct net *net = sock_net(skb->sk);
4166 unsigned int flags = NLM_F_MULTI;
4167 struct if_stats_msg *ifsm;
4168 struct hlist_head *head;
4169 struct net_device *dev;
4170 u32 filter_mask = 0;
4174 s_idx = cb->args[1];
4175 s_idxattr = cb->args[2];
4176 s_prividx = cb->args[3];
4178 cb->seq = net->dev_base_seq;
4180 if (nlmsg_len(cb->nlh) < sizeof(*ifsm))
4183 ifsm = nlmsg_data(cb->nlh);
4184 filter_mask = ifsm->filter_mask;
4188 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
4190 head = &net->dev_index_head[h];
4191 hlist_for_each_entry(dev, head, index_hlist) {
4194 err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS,
4195 NETLINK_CB(cb->skb).portid,
4196 cb->nlh->nlmsg_seq, 0,
4198 &s_idxattr, &s_prividx);
4199 /* If we ran out of room on the first message,
4202 WARN_ON((err == -EMSGSIZE) && (skb->len == 0));
4208 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
4214 cb->args[3] = s_prividx;
4215 cb->args[2] = s_idxattr;
4222 /* Process one rtnetlink message. */
4224 static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
4225 struct netlink_ext_ack *extack)
4227 struct net *net = sock_net(skb->sk);
4228 struct rtnl_link *handlers;
4229 int err = -EOPNOTSUPP;
4230 rtnl_doit_func doit;
4236 type = nlh->nlmsg_type;
4242 /* All the messages must have at least 1 byte length */
4243 if (nlmsg_len(nlh) < sizeof(struct rtgenmsg))
4246 family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
4249 if (kind != 2 && !netlink_net_capable(skb, CAP_NET_ADMIN))
4252 if (family >= ARRAY_SIZE(rtnl_msg_handlers))
4256 handlers = rcu_dereference(rtnl_msg_handlers[family]);
4259 handlers = rcu_dereference(rtnl_msg_handlers[family]);
4262 if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) {
4264 rtnl_dumpit_func dumpit;
4265 u16 min_dump_alloc = 0;
4267 dumpit = READ_ONCE(handlers[type].dumpit);
4270 handlers = rcu_dereference(rtnl_msg_handlers[PF_UNSPEC]);
4274 dumpit = READ_ONCE(handlers[type].dumpit);
4279 refcount_inc(&rtnl_msg_handlers_ref[family]);
4281 if (type == RTM_GETLINK - RTM_BASE)
4282 min_dump_alloc = rtnl_calcit(skb, nlh);
4288 struct netlink_dump_control c = {
4290 .min_dump_alloc = min_dump_alloc,
4292 err = netlink_dump_start(rtnl, skb, nlh, &c);
4294 refcount_dec(&rtnl_msg_handlers_ref[family]);
4298 doit = READ_ONCE(handlers[type].doit);
4301 handlers = rcu_dereference(rtnl_msg_handlers[family]);
4304 flags = READ_ONCE(handlers[type].flags);
4305 if (flags & RTNL_FLAG_DOIT_UNLOCKED) {
4306 refcount_inc(&rtnl_msg_handlers_ref[family]);
4307 doit = READ_ONCE(handlers[type].doit);
4310 err = doit(skb, nlh, extack);
4311 refcount_dec(&rtnl_msg_handlers_ref[family]);
4318 handlers = rtnl_dereference(rtnl_msg_handlers[family]);
4320 doit = READ_ONCE(handlers[type].doit);
4322 err = doit(skb, nlh, extack);
4332 static void rtnetlink_rcv(struct sk_buff *skb)
4334 netlink_rcv_skb(skb, &rtnetlink_rcv_msg);
4337 static int rtnetlink_bind(struct net *net, int group)
4340 case RTNLGRP_IPV4_MROUTE_R:
4341 case RTNLGRP_IPV6_MROUTE_R:
4342 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
4349 static int rtnetlink_event(struct notifier_block *this, unsigned long event, void *ptr)
4351 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4355 case NETDEV_CHANGEMTU:
4356 case NETDEV_CHANGEADDR:
4357 case NETDEV_CHANGENAME:
4358 case NETDEV_FEAT_CHANGE:
4359 case NETDEV_BONDING_FAILOVER:
4360 case NETDEV_POST_TYPE_CHANGE:
4361 case NETDEV_NOTIFY_PEERS:
4362 case NETDEV_CHANGEUPPER:
4363 case NETDEV_RESEND_IGMP:
4364 case NETDEV_CHANGEINFODATA:
4365 case NETDEV_CHANGE_TX_QUEUE_LEN:
4366 rtmsg_ifinfo_event(RTM_NEWLINK, dev, 0, rtnl_get_event(event),
4375 static struct notifier_block rtnetlink_dev_notifier = {
4376 .notifier_call = rtnetlink_event,
4380 static int __net_init rtnetlink_net_init(struct net *net)
4383 struct netlink_kernel_cfg cfg = {
4384 .groups = RTNLGRP_MAX,
4385 .input = rtnetlink_rcv,
4386 .cb_mutex = &rtnl_mutex,
4387 .flags = NL_CFG_F_NONROOT_RECV,
4388 .bind = rtnetlink_bind,
4391 sk = netlink_kernel_create(net, NETLINK_ROUTE, &cfg);
4398 static void __net_exit rtnetlink_net_exit(struct net *net)
4400 netlink_kernel_release(net->rtnl);
4404 static struct pernet_operations rtnetlink_net_ops = {
4405 .init = rtnetlink_net_init,
4406 .exit = rtnetlink_net_exit,
4409 void __init rtnetlink_init(void)
4413 for (i = 0; i < ARRAY_SIZE(rtnl_msg_handlers_ref); i++)
4414 refcount_set(&rtnl_msg_handlers_ref[i], 1);
4416 if (register_pernet_subsys(&rtnetlink_net_ops))
4417 panic("rtnetlink_init: cannot initialize rtnetlink\n");
4419 register_netdevice_notifier(&rtnetlink_dev_notifier);
4421 rtnl_register(PF_UNSPEC, RTM_GETLINK, rtnl_getlink,
4422 rtnl_dump_ifinfo, 0);
4423 rtnl_register(PF_UNSPEC, RTM_SETLINK, rtnl_setlink, NULL, 0);
4424 rtnl_register(PF_UNSPEC, RTM_NEWLINK, rtnl_newlink, NULL, 0);
4425 rtnl_register(PF_UNSPEC, RTM_DELLINK, rtnl_dellink, NULL, 0);
4427 rtnl_register(PF_UNSPEC, RTM_GETADDR, NULL, rtnl_dump_all, 0);
4428 rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all, 0);
4429 rtnl_register(PF_UNSPEC, RTM_GETNETCONF, NULL, rtnl_dump_all, 0);
4431 rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, rtnl_fdb_add, NULL, 0);
4432 rtnl_register(PF_BRIDGE, RTM_DELNEIGH, rtnl_fdb_del, NULL, 0);
4433 rtnl_register(PF_BRIDGE, RTM_GETNEIGH, NULL, rtnl_fdb_dump, 0);
4435 rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL, rtnl_bridge_getlink, 0);
4436 rtnl_register(PF_BRIDGE, RTM_DELLINK, rtnl_bridge_dellink, NULL, 0);
4437 rtnl_register(PF_BRIDGE, RTM_SETLINK, rtnl_bridge_setlink, NULL, 0);
4439 rtnl_register(PF_UNSPEC, RTM_GETSTATS, rtnl_stats_get, rtnl_stats_dump,