1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
7 * Routing netlink socket interface: protocol independent part.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
12 * Vitaly E. Lavrov RTA_OK arithmetics was wrong.
15 #include <linux/bitops.h>
16 #include <linux/errno.h>
17 #include <linux/module.h>
18 #include <linux/types.h>
19 #include <linux/socket.h>
20 #include <linux/kernel.h>
21 #include <linux/timer.h>
22 #include <linux/string.h>
23 #include <linux/sockios.h>
24 #include <linux/net.h>
25 #include <linux/fcntl.h>
27 #include <linux/slab.h>
28 #include <linux/interrupt.h>
29 #include <linux/capability.h>
30 #include <linux/skbuff.h>
31 #include <linux/init.h>
32 #include <linux/security.h>
33 #include <linux/mutex.h>
34 #include <linux/if_addr.h>
35 #include <linux/if_bridge.h>
36 #include <linux/if_vlan.h>
37 #include <linux/pci.h>
38 #include <linux/etherdevice.h>
39 #include <linux/bpf.h>
41 #include <linux/uaccess.h>
43 #include <linux/inet.h>
44 #include <linux/netdevice.h>
46 #include <net/protocol.h>
48 #include <net/route.h>
52 #include <net/pkt_sched.h>
53 #include <net/fib_rules.h>
54 #include <net/rtnetlink.h>
55 #include <net/net_namespace.h>
57 #define RTNL_MAX_TYPE 50
58 #define RTNL_SLAVE_MAX_TYPE 36
62 rtnl_dumpit_func dumpit;
68 static DEFINE_MUTEX(rtnl_mutex);
72 mutex_lock(&rtnl_mutex);
74 EXPORT_SYMBOL(rtnl_lock);
76 int rtnl_lock_killable(void)
78 return mutex_lock_killable(&rtnl_mutex);
80 EXPORT_SYMBOL(rtnl_lock_killable);
82 static struct sk_buff *defer_kfree_skb_list;
83 void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail)
86 tail->next = defer_kfree_skb_list;
87 defer_kfree_skb_list = head;
90 EXPORT_SYMBOL(rtnl_kfree_skbs);
92 void __rtnl_unlock(void)
94 struct sk_buff *head = defer_kfree_skb_list;
96 defer_kfree_skb_list = NULL;
98 mutex_unlock(&rtnl_mutex);
101 struct sk_buff *next = head->next;
109 void rtnl_unlock(void)
111 /* This fellow will unlock it for us. */
114 EXPORT_SYMBOL(rtnl_unlock);
116 int rtnl_trylock(void)
118 return mutex_trylock(&rtnl_mutex);
120 EXPORT_SYMBOL(rtnl_trylock);
122 int rtnl_is_locked(void)
124 return mutex_is_locked(&rtnl_mutex);
126 EXPORT_SYMBOL(rtnl_is_locked);
128 bool refcount_dec_and_rtnl_lock(refcount_t *r)
130 return refcount_dec_and_mutex_lock(r, &rtnl_mutex);
132 EXPORT_SYMBOL(refcount_dec_and_rtnl_lock);
134 #ifdef CONFIG_PROVE_LOCKING
135 bool lockdep_rtnl_is_held(void)
137 return lockdep_is_held(&rtnl_mutex);
139 EXPORT_SYMBOL(lockdep_rtnl_is_held);
140 #endif /* #ifdef CONFIG_PROVE_LOCKING */
142 static struct rtnl_link *__rcu *rtnl_msg_handlers[RTNL_FAMILY_MAX + 1];
144 static inline int rtm_msgindex(int msgtype)
146 int msgindex = msgtype - RTM_BASE;
149 * msgindex < 0 implies someone tried to register a netlink
150 * control code. msgindex >= RTM_NR_MSGTYPES may indicate that
151 * the message type has not been added to linux/rtnetlink.h
153 BUG_ON(msgindex < 0 || msgindex >= RTM_NR_MSGTYPES);
158 static struct rtnl_link *rtnl_get_link(int protocol, int msgtype)
160 struct rtnl_link **tab;
162 if (protocol >= ARRAY_SIZE(rtnl_msg_handlers))
163 protocol = PF_UNSPEC;
165 tab = rcu_dereference_rtnl(rtnl_msg_handlers[protocol]);
167 tab = rcu_dereference_rtnl(rtnl_msg_handlers[PF_UNSPEC]);
172 static int rtnl_register_internal(struct module *owner,
173 int protocol, int msgtype,
174 rtnl_doit_func doit, rtnl_dumpit_func dumpit,
177 struct rtnl_link *link, *old;
178 struct rtnl_link __rcu **tab;
182 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
183 msgindex = rtm_msgindex(msgtype);
186 tab = rtnl_msg_handlers[protocol];
188 tab = kcalloc(RTM_NR_MSGTYPES, sizeof(void *), GFP_KERNEL);
192 /* ensures we see the 0 stores */
193 rcu_assign_pointer(rtnl_msg_handlers[protocol], tab);
196 old = rtnl_dereference(tab[msgindex]);
198 link = kmemdup(old, sizeof(*old), GFP_KERNEL);
202 link = kzalloc(sizeof(*link), GFP_KERNEL);
207 WARN_ON(link->owner && link->owner != owner);
210 WARN_ON(doit && link->doit && link->doit != doit);
213 WARN_ON(dumpit && link->dumpit && link->dumpit != dumpit);
215 link->dumpit = dumpit;
217 link->flags |= flags;
219 /* publish protocol:msgtype */
220 rcu_assign_pointer(tab[msgindex], link);
230 * rtnl_register_module - Register a rtnetlink message type
232 * @owner: module registering the hook (THIS_MODULE)
233 * @protocol: Protocol family or PF_UNSPEC
234 * @msgtype: rtnetlink message type
235 * @doit: Function pointer called for each request message
236 * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message
237 * @flags: rtnl_link_flags to modifiy behaviour of doit/dumpit functions
239 * Like rtnl_register, but for use by removable modules.
241 int rtnl_register_module(struct module *owner,
242 int protocol, int msgtype,
243 rtnl_doit_func doit, rtnl_dumpit_func dumpit,
246 return rtnl_register_internal(owner, protocol, msgtype,
247 doit, dumpit, flags);
249 EXPORT_SYMBOL_GPL(rtnl_register_module);
252 * rtnl_register - Register a rtnetlink message type
253 * @protocol: Protocol family or PF_UNSPEC
254 * @msgtype: rtnetlink message type
255 * @doit: Function pointer called for each request message
256 * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message
257 * @flags: rtnl_link_flags to modifiy behaviour of doit/dumpit functions
259 * Registers the specified function pointers (at least one of them has
260 * to be non-NULL) to be called whenever a request message for the
261 * specified protocol family and message type is received.
263 * The special protocol family PF_UNSPEC may be used to define fallback
264 * function pointers for the case when no entry for the specific protocol
267 void rtnl_register(int protocol, int msgtype,
268 rtnl_doit_func doit, rtnl_dumpit_func dumpit,
273 err = rtnl_register_internal(NULL, protocol, msgtype, doit, dumpit,
276 pr_err("Unable to register rtnetlink message handler, "
277 "protocol = %d, message type = %d\n", protocol, msgtype);
281 * rtnl_unregister - Unregister a rtnetlink message type
282 * @protocol: Protocol family or PF_UNSPEC
283 * @msgtype: rtnetlink message type
285 * Returns 0 on success or a negative error code.
287 int rtnl_unregister(int protocol, int msgtype)
289 struct rtnl_link **tab, *link;
292 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
293 msgindex = rtm_msgindex(msgtype);
296 tab = rtnl_dereference(rtnl_msg_handlers[protocol]);
302 link = tab[msgindex];
303 rcu_assign_pointer(tab[msgindex], NULL);
306 kfree_rcu(link, rcu);
310 EXPORT_SYMBOL_GPL(rtnl_unregister);
313 * rtnl_unregister_all - Unregister all rtnetlink message type of a protocol
314 * @protocol : Protocol family or PF_UNSPEC
316 * Identical to calling rtnl_unregster() for all registered message types
317 * of a certain protocol family.
319 void rtnl_unregister_all(int protocol)
321 struct rtnl_link **tab, *link;
324 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
327 tab = rtnl_msg_handlers[protocol];
332 RCU_INIT_POINTER(rtnl_msg_handlers[protocol], NULL);
333 for (msgindex = 0; msgindex < RTM_NR_MSGTYPES; msgindex++) {
334 link = tab[msgindex];
338 rcu_assign_pointer(tab[msgindex], NULL);
339 kfree_rcu(link, rcu);
347 EXPORT_SYMBOL_GPL(rtnl_unregister_all);
349 static LIST_HEAD(link_ops);
351 static const struct rtnl_link_ops *rtnl_link_ops_get(const char *kind)
353 const struct rtnl_link_ops *ops;
355 list_for_each_entry(ops, &link_ops, list) {
356 if (!strcmp(ops->kind, kind))
363 * __rtnl_link_register - Register rtnl_link_ops with rtnetlink.
364 * @ops: struct rtnl_link_ops * to register
366 * The caller must hold the rtnl_mutex. This function should be used
367 * by drivers that create devices during module initialization. It
368 * must be called before registering the devices.
370 * Returns 0 on success or a negative error code.
372 int __rtnl_link_register(struct rtnl_link_ops *ops)
374 if (rtnl_link_ops_get(ops->kind))
377 /* The check for setup is here because if ops
378 * does not have that filled up, it is not possible
379 * to use the ops for creating device. So do not
380 * fill up dellink as well. That disables rtnl_dellink.
382 if (ops->setup && !ops->dellink)
383 ops->dellink = unregister_netdevice_queue;
385 list_add_tail(&ops->list, &link_ops);
388 EXPORT_SYMBOL_GPL(__rtnl_link_register);
391 * rtnl_link_register - Register rtnl_link_ops with rtnetlink.
392 * @ops: struct rtnl_link_ops * to register
394 * Returns 0 on success or a negative error code.
396 int rtnl_link_register(struct rtnl_link_ops *ops)
400 /* Sanity-check max sizes to avoid stack buffer overflow. */
401 if (WARN_ON(ops->maxtype > RTNL_MAX_TYPE ||
402 ops->slave_maxtype > RTNL_SLAVE_MAX_TYPE))
406 err = __rtnl_link_register(ops);
410 EXPORT_SYMBOL_GPL(rtnl_link_register);
412 static void __rtnl_kill_links(struct net *net, struct rtnl_link_ops *ops)
414 struct net_device *dev;
415 LIST_HEAD(list_kill);
417 for_each_netdev(net, dev) {
418 if (dev->rtnl_link_ops == ops)
419 ops->dellink(dev, &list_kill);
421 unregister_netdevice_many(&list_kill);
425 * __rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
426 * @ops: struct rtnl_link_ops * to unregister
428 * The caller must hold the rtnl_mutex and guarantee net_namespace_list
429 * integrity (hold pernet_ops_rwsem for writing to close the race
430 * with setup_net() and cleanup_net()).
432 void __rtnl_link_unregister(struct rtnl_link_ops *ops)
437 __rtnl_kill_links(net, ops);
439 list_del(&ops->list);
441 EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
443 /* Return with the rtnl_lock held when there are no network
444 * devices unregistering in any network namespace.
446 static void rtnl_lock_unregistering_all(void)
450 DEFINE_WAIT_FUNC(wait, woken_wake_function);
452 add_wait_queue(&netdev_unregistering_wq, &wait);
454 unregistering = false;
456 /* We held write locked pernet_ops_rwsem, and parallel
457 * setup_net() and cleanup_net() are not possible.
460 if (net->dev_unreg_count > 0) {
461 unregistering = true;
469 wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
471 remove_wait_queue(&netdev_unregistering_wq, &wait);
475 * rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
476 * @ops: struct rtnl_link_ops * to unregister
478 void rtnl_link_unregister(struct rtnl_link_ops *ops)
480 /* Close the race with setup_net() and cleanup_net() */
481 down_write(&pernet_ops_rwsem);
482 rtnl_lock_unregistering_all();
483 __rtnl_link_unregister(ops);
485 up_write(&pernet_ops_rwsem);
487 EXPORT_SYMBOL_GPL(rtnl_link_unregister);
489 static size_t rtnl_link_get_slave_info_data_size(const struct net_device *dev)
491 struct net_device *master_dev;
492 const struct rtnl_link_ops *ops;
497 master_dev = netdev_master_upper_dev_get_rcu((struct net_device *)dev);
501 ops = master_dev->rtnl_link_ops;
502 if (!ops || !ops->get_slave_size)
504 /* IFLA_INFO_SLAVE_DATA + nested data */
505 size = nla_total_size(sizeof(struct nlattr)) +
506 ops->get_slave_size(master_dev, dev);
513 static size_t rtnl_link_get_size(const struct net_device *dev)
515 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
521 size = nla_total_size(sizeof(struct nlattr)) + /* IFLA_LINKINFO */
522 nla_total_size(strlen(ops->kind) + 1); /* IFLA_INFO_KIND */
525 /* IFLA_INFO_DATA + nested data */
526 size += nla_total_size(sizeof(struct nlattr)) +
529 if (ops->get_xstats_size)
530 /* IFLA_INFO_XSTATS */
531 size += nla_total_size(ops->get_xstats_size(dev));
533 size += rtnl_link_get_slave_info_data_size(dev);
538 static LIST_HEAD(rtnl_af_ops);
540 static const struct rtnl_af_ops *rtnl_af_lookup(const int family)
542 const struct rtnl_af_ops *ops;
544 list_for_each_entry_rcu(ops, &rtnl_af_ops, list) {
545 if (ops->family == family)
553 * rtnl_af_register - Register rtnl_af_ops with rtnetlink.
554 * @ops: struct rtnl_af_ops * to register
556 * Returns 0 on success or a negative error code.
558 void rtnl_af_register(struct rtnl_af_ops *ops)
561 list_add_tail_rcu(&ops->list, &rtnl_af_ops);
564 EXPORT_SYMBOL_GPL(rtnl_af_register);
567 * rtnl_af_unregister - Unregister rtnl_af_ops from rtnetlink.
568 * @ops: struct rtnl_af_ops * to unregister
570 void rtnl_af_unregister(struct rtnl_af_ops *ops)
573 list_del_rcu(&ops->list);
578 EXPORT_SYMBOL_GPL(rtnl_af_unregister);
580 static size_t rtnl_link_get_af_size(const struct net_device *dev,
583 struct rtnl_af_ops *af_ops;
587 size = nla_total_size(sizeof(struct nlattr));
590 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
591 if (af_ops->get_link_af_size) {
592 /* AF_* + nested data */
593 size += nla_total_size(sizeof(struct nlattr)) +
594 af_ops->get_link_af_size(dev, ext_filter_mask);
602 static bool rtnl_have_link_slave_info(const struct net_device *dev)
604 struct net_device *master_dev;
609 master_dev = netdev_master_upper_dev_get_rcu((struct net_device *)dev);
610 if (master_dev && master_dev->rtnl_link_ops)
616 static int rtnl_link_slave_info_fill(struct sk_buff *skb,
617 const struct net_device *dev)
619 struct net_device *master_dev;
620 const struct rtnl_link_ops *ops;
621 struct nlattr *slave_data;
624 master_dev = netdev_master_upper_dev_get((struct net_device *) dev);
627 ops = master_dev->rtnl_link_ops;
630 if (nla_put_string(skb, IFLA_INFO_SLAVE_KIND, ops->kind) < 0)
632 if (ops->fill_slave_info) {
633 slave_data = nla_nest_start_noflag(skb, IFLA_INFO_SLAVE_DATA);
636 err = ops->fill_slave_info(skb, master_dev, dev);
638 goto err_cancel_slave_data;
639 nla_nest_end(skb, slave_data);
643 err_cancel_slave_data:
644 nla_nest_cancel(skb, slave_data);
648 static int rtnl_link_info_fill(struct sk_buff *skb,
649 const struct net_device *dev)
651 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
657 if (nla_put_string(skb, IFLA_INFO_KIND, ops->kind) < 0)
659 if (ops->fill_xstats) {
660 err = ops->fill_xstats(skb, dev);
664 if (ops->fill_info) {
665 data = nla_nest_start_noflag(skb, IFLA_INFO_DATA);
668 err = ops->fill_info(skb, dev);
670 goto err_cancel_data;
671 nla_nest_end(skb, data);
676 nla_nest_cancel(skb, data);
680 static int rtnl_link_fill(struct sk_buff *skb, const struct net_device *dev)
682 struct nlattr *linkinfo;
685 linkinfo = nla_nest_start_noflag(skb, IFLA_LINKINFO);
686 if (linkinfo == NULL)
689 err = rtnl_link_info_fill(skb, dev);
691 goto err_cancel_link;
693 err = rtnl_link_slave_info_fill(skb, dev);
695 goto err_cancel_link;
697 nla_nest_end(skb, linkinfo);
701 nla_nest_cancel(skb, linkinfo);
706 int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int group, int echo)
708 struct sock *rtnl = net->rtnl;
711 NETLINK_CB(skb).dst_group = group;
713 refcount_inc(&skb->users);
714 netlink_broadcast(rtnl, skb, pid, group, GFP_KERNEL);
716 err = netlink_unicast(rtnl, skb, pid, MSG_DONTWAIT);
720 int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid)
722 struct sock *rtnl = net->rtnl;
724 return nlmsg_unicast(rtnl, skb, pid);
726 EXPORT_SYMBOL(rtnl_unicast);
728 void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group,
729 struct nlmsghdr *nlh, gfp_t flags)
731 struct sock *rtnl = net->rtnl;
735 report = nlmsg_report(nlh);
737 nlmsg_notify(rtnl, skb, pid, group, report, flags);
739 EXPORT_SYMBOL(rtnl_notify);
741 void rtnl_set_sk_err(struct net *net, u32 group, int error)
743 struct sock *rtnl = net->rtnl;
745 netlink_set_err(rtnl, 0, group, error);
747 EXPORT_SYMBOL(rtnl_set_sk_err);
749 int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics)
754 /* nothing is dumped for dst_default_metrics, so just skip the loop */
755 if (metrics == dst_default_metrics.metrics)
758 mx = nla_nest_start_noflag(skb, RTA_METRICS);
762 for (i = 0; i < RTAX_MAX; i++) {
764 if (i == RTAX_CC_ALGO - 1) {
765 char tmp[TCP_CA_NAME_MAX], *name;
767 name = tcp_ca_get_name_by_key(metrics[i], tmp);
770 if (nla_put_string(skb, i + 1, name))
771 goto nla_put_failure;
772 } else if (i == RTAX_FEATURES - 1) {
773 u32 user_features = metrics[i] & RTAX_FEATURE_MASK;
777 BUILD_BUG_ON(RTAX_FEATURE_MASK & DST_FEATURE_MASK);
778 if (nla_put_u32(skb, i + 1, user_features))
779 goto nla_put_failure;
781 if (nla_put_u32(skb, i + 1, metrics[i]))
782 goto nla_put_failure;
789 nla_nest_cancel(skb, mx);
793 return nla_nest_end(skb, mx);
796 nla_nest_cancel(skb, mx);
799 EXPORT_SYMBOL(rtnetlink_put_metrics);
801 int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id,
802 long expires, u32 error)
804 struct rta_cacheinfo ci = {
810 ci.rta_lastuse = jiffies_delta_to_clock_t(jiffies - dst->lastuse);
811 ci.rta_used = dst->__use;
812 ci.rta_clntref = atomic_read(&dst->__refcnt);
817 clock = jiffies_to_clock_t(abs(expires));
818 clock = min_t(unsigned long, clock, INT_MAX);
819 ci.rta_expires = (expires > 0) ? clock : -clock;
821 return nla_put(skb, RTA_CACHEINFO, sizeof(ci), &ci);
823 EXPORT_SYMBOL_GPL(rtnl_put_cacheinfo);
825 static void set_operstate(struct net_device *dev, unsigned char transition)
827 unsigned char operstate = dev->operstate;
829 switch (transition) {
831 if ((operstate == IF_OPER_DORMANT ||
832 operstate == IF_OPER_TESTING ||
833 operstate == IF_OPER_UNKNOWN) &&
834 !netif_dormant(dev) && !netif_testing(dev))
835 operstate = IF_OPER_UP;
838 case IF_OPER_TESTING:
839 if (operstate == IF_OPER_UP ||
840 operstate == IF_OPER_UNKNOWN)
841 operstate = IF_OPER_TESTING;
844 case IF_OPER_DORMANT:
845 if (operstate == IF_OPER_UP ||
846 operstate == IF_OPER_UNKNOWN)
847 operstate = IF_OPER_DORMANT;
851 if (dev->operstate != operstate) {
852 write_lock_bh(&dev_base_lock);
853 dev->operstate = operstate;
854 write_unlock_bh(&dev_base_lock);
855 netdev_state_change(dev);
859 static unsigned int rtnl_dev_get_flags(const struct net_device *dev)
861 return (dev->flags & ~(IFF_PROMISC | IFF_ALLMULTI)) |
862 (dev->gflags & (IFF_PROMISC | IFF_ALLMULTI));
865 static unsigned int rtnl_dev_combine_flags(const struct net_device *dev,
866 const struct ifinfomsg *ifm)
868 unsigned int flags = ifm->ifi_flags;
870 /* bugwards compatibility: ifi_change == 0 is treated as ~0 */
872 flags = (flags & ifm->ifi_change) |
873 (rtnl_dev_get_flags(dev) & ~ifm->ifi_change);
878 static void copy_rtnl_link_stats(struct rtnl_link_stats *a,
879 const struct rtnl_link_stats64 *b)
881 a->rx_packets = b->rx_packets;
882 a->tx_packets = b->tx_packets;
883 a->rx_bytes = b->rx_bytes;
884 a->tx_bytes = b->tx_bytes;
885 a->rx_errors = b->rx_errors;
886 a->tx_errors = b->tx_errors;
887 a->rx_dropped = b->rx_dropped;
888 a->tx_dropped = b->tx_dropped;
890 a->multicast = b->multicast;
891 a->collisions = b->collisions;
893 a->rx_length_errors = b->rx_length_errors;
894 a->rx_over_errors = b->rx_over_errors;
895 a->rx_crc_errors = b->rx_crc_errors;
896 a->rx_frame_errors = b->rx_frame_errors;
897 a->rx_fifo_errors = b->rx_fifo_errors;
898 a->rx_missed_errors = b->rx_missed_errors;
900 a->tx_aborted_errors = b->tx_aborted_errors;
901 a->tx_carrier_errors = b->tx_carrier_errors;
902 a->tx_fifo_errors = b->tx_fifo_errors;
903 a->tx_heartbeat_errors = b->tx_heartbeat_errors;
904 a->tx_window_errors = b->tx_window_errors;
906 a->rx_compressed = b->rx_compressed;
907 a->tx_compressed = b->tx_compressed;
909 a->rx_nohandler = b->rx_nohandler;
913 static inline int rtnl_vfinfo_size(const struct net_device *dev,
916 if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF)) {
917 int num_vfs = dev_num_vf(dev->dev.parent);
918 size_t size = nla_total_size(0);
921 nla_total_size(sizeof(struct ifla_vf_mac)) +
922 nla_total_size(sizeof(struct ifla_vf_broadcast)) +
923 nla_total_size(sizeof(struct ifla_vf_vlan)) +
924 nla_total_size(0) + /* nest IFLA_VF_VLAN_LIST */
925 nla_total_size(MAX_VLAN_LIST_LEN *
926 sizeof(struct ifla_vf_vlan_info)) +
927 nla_total_size(sizeof(struct ifla_vf_spoofchk)) +
928 nla_total_size(sizeof(struct ifla_vf_tx_rate)) +
929 nla_total_size(sizeof(struct ifla_vf_rate)) +
930 nla_total_size(sizeof(struct ifla_vf_link_state)) +
931 nla_total_size(sizeof(struct ifla_vf_rss_query_en)) +
932 nla_total_size(0) + /* nest IFLA_VF_STATS */
933 /* IFLA_VF_STATS_RX_PACKETS */
934 nla_total_size_64bit(sizeof(__u64)) +
935 /* IFLA_VF_STATS_TX_PACKETS */
936 nla_total_size_64bit(sizeof(__u64)) +
937 /* IFLA_VF_STATS_RX_BYTES */
938 nla_total_size_64bit(sizeof(__u64)) +
939 /* IFLA_VF_STATS_TX_BYTES */
940 nla_total_size_64bit(sizeof(__u64)) +
941 /* IFLA_VF_STATS_BROADCAST */
942 nla_total_size_64bit(sizeof(__u64)) +
943 /* IFLA_VF_STATS_MULTICAST */
944 nla_total_size_64bit(sizeof(__u64)) +
945 /* IFLA_VF_STATS_RX_DROPPED */
946 nla_total_size_64bit(sizeof(__u64)) +
947 /* IFLA_VF_STATS_TX_DROPPED */
948 nla_total_size_64bit(sizeof(__u64)) +
949 nla_total_size(sizeof(struct ifla_vf_trust)));
955 static size_t rtnl_port_size(const struct net_device *dev,
958 size_t port_size = nla_total_size(4) /* PORT_VF */
959 + nla_total_size(PORT_PROFILE_MAX) /* PORT_PROFILE */
960 + nla_total_size(PORT_UUID_MAX) /* PORT_INSTANCE_UUID */
961 + nla_total_size(PORT_UUID_MAX) /* PORT_HOST_UUID */
962 + nla_total_size(1) /* PROT_VDP_REQUEST */
963 + nla_total_size(2); /* PORT_VDP_RESPONSE */
964 size_t vf_ports_size = nla_total_size(sizeof(struct nlattr));
965 size_t vf_port_size = nla_total_size(sizeof(struct nlattr))
967 size_t port_self_size = nla_total_size(sizeof(struct nlattr))
970 if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
971 !(ext_filter_mask & RTEXT_FILTER_VF))
973 if (dev_num_vf(dev->dev.parent))
974 return port_self_size + vf_ports_size +
975 vf_port_size * dev_num_vf(dev->dev.parent);
977 return port_self_size;
980 static size_t rtnl_xdp_size(void)
982 size_t xdp_size = nla_total_size(0) + /* nest IFLA_XDP */
983 nla_total_size(1) + /* XDP_ATTACHED */
984 nla_total_size(4) + /* XDP_PROG_ID (or 1st mode) */
985 nla_total_size(4); /* XDP_<mode>_PROG_ID */
990 static size_t rtnl_prop_list_size(const struct net_device *dev)
992 struct netdev_name_node *name_node;
995 if (list_empty(&dev->name_node->list))
997 size = nla_total_size(0);
998 list_for_each_entry(name_node, &dev->name_node->list, list)
999 size += nla_total_size(ALTIFNAMSIZ);
1003 static size_t rtnl_proto_down_size(const struct net_device *dev)
1005 size_t size = nla_total_size(1);
1007 if (dev->proto_down_reason)
1008 size += nla_total_size(0) + nla_total_size(4);
1013 static noinline size_t if_nlmsg_size(const struct net_device *dev,
1014 u32 ext_filter_mask)
1016 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
1017 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
1018 + nla_total_size(IFALIASZ) /* IFLA_IFALIAS */
1019 + nla_total_size(IFNAMSIZ) /* IFLA_QDISC */
1020 + nla_total_size_64bit(sizeof(struct rtnl_link_ifmap))
1021 + nla_total_size(sizeof(struct rtnl_link_stats))
1022 + nla_total_size_64bit(sizeof(struct rtnl_link_stats64))
1023 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
1024 + nla_total_size(MAX_ADDR_LEN) /* IFLA_BROADCAST */
1025 + nla_total_size(4) /* IFLA_TXQLEN */
1026 + nla_total_size(4) /* IFLA_WEIGHT */
1027 + nla_total_size(4) /* IFLA_MTU */
1028 + nla_total_size(4) /* IFLA_LINK */
1029 + nla_total_size(4) /* IFLA_MASTER */
1030 + nla_total_size(1) /* IFLA_CARRIER */
1031 + nla_total_size(4) /* IFLA_PROMISCUITY */
1032 + nla_total_size(4) /* IFLA_NUM_TX_QUEUES */
1033 + nla_total_size(4) /* IFLA_NUM_RX_QUEUES */
1034 + nla_total_size(4) /* IFLA_GSO_MAX_SEGS */
1035 + nla_total_size(4) /* IFLA_GSO_MAX_SIZE */
1036 + nla_total_size(1) /* IFLA_OPERSTATE */
1037 + nla_total_size(1) /* IFLA_LINKMODE */
1038 + nla_total_size(4) /* IFLA_CARRIER_CHANGES */
1039 + nla_total_size(4) /* IFLA_LINK_NETNSID */
1040 + nla_total_size(4) /* IFLA_GROUP */
1041 + nla_total_size(ext_filter_mask
1042 & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */
1043 + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
1044 + rtnl_port_size(dev, ext_filter_mask) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
1045 + rtnl_link_get_size(dev) /* IFLA_LINKINFO */
1046 + rtnl_link_get_af_size(dev, ext_filter_mask) /* IFLA_AF_SPEC */
1047 + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_PORT_ID */
1048 + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_SWITCH_ID */
1049 + nla_total_size(IFNAMSIZ) /* IFLA_PHYS_PORT_NAME */
1050 + rtnl_xdp_size() /* IFLA_XDP */
1051 + nla_total_size(4) /* IFLA_EVENT */
1052 + nla_total_size(4) /* IFLA_NEW_NETNSID */
1053 + nla_total_size(4) /* IFLA_NEW_IFINDEX */
1054 + rtnl_proto_down_size(dev) /* proto down */
1055 + nla_total_size(4) /* IFLA_TARGET_NETNSID */
1056 + nla_total_size(4) /* IFLA_CARRIER_UP_COUNT */
1057 + nla_total_size(4) /* IFLA_CARRIER_DOWN_COUNT */
1058 + nla_total_size(4) /* IFLA_MIN_MTU */
1059 + nla_total_size(4) /* IFLA_MAX_MTU */
1060 + rtnl_prop_list_size(dev)
1061 + nla_total_size(MAX_ADDR_LEN) /* IFLA_PERM_ADDRESS */
1065 static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev)
1067 struct nlattr *vf_ports;
1068 struct nlattr *vf_port;
1072 vf_ports = nla_nest_start_noflag(skb, IFLA_VF_PORTS);
1076 for (vf = 0; vf < dev_num_vf(dev->dev.parent); vf++) {
1077 vf_port = nla_nest_start_noflag(skb, IFLA_VF_PORT);
1079 goto nla_put_failure;
1080 if (nla_put_u32(skb, IFLA_PORT_VF, vf))
1081 goto nla_put_failure;
1082 err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb);
1083 if (err == -EMSGSIZE)
1084 goto nla_put_failure;
1086 nla_nest_cancel(skb, vf_port);
1089 nla_nest_end(skb, vf_port);
1092 nla_nest_end(skb, vf_ports);
1097 nla_nest_cancel(skb, vf_ports);
1101 static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev)
1103 struct nlattr *port_self;
1106 port_self = nla_nest_start_noflag(skb, IFLA_PORT_SELF);
1110 err = dev->netdev_ops->ndo_get_vf_port(dev, PORT_SELF_VF, skb);
1112 nla_nest_cancel(skb, port_self);
1113 return (err == -EMSGSIZE) ? err : 0;
1116 nla_nest_end(skb, port_self);
1121 static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev,
1122 u32 ext_filter_mask)
1126 if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
1127 !(ext_filter_mask & RTEXT_FILTER_VF))
1130 err = rtnl_port_self_fill(skb, dev);
1134 if (dev_num_vf(dev->dev.parent)) {
1135 err = rtnl_vf_ports_fill(skb, dev);
1143 static int rtnl_phys_port_id_fill(struct sk_buff *skb, struct net_device *dev)
1146 struct netdev_phys_item_id ppid;
1148 err = dev_get_phys_port_id(dev, &ppid);
1150 if (err == -EOPNOTSUPP)
1155 if (nla_put(skb, IFLA_PHYS_PORT_ID, ppid.id_len, ppid.id))
1161 static int rtnl_phys_port_name_fill(struct sk_buff *skb, struct net_device *dev)
1163 char name[IFNAMSIZ];
1166 err = dev_get_phys_port_name(dev, name, sizeof(name));
1168 if (err == -EOPNOTSUPP)
1173 if (nla_put_string(skb, IFLA_PHYS_PORT_NAME, name))
1179 static int rtnl_phys_switch_id_fill(struct sk_buff *skb, struct net_device *dev)
1181 struct netdev_phys_item_id ppid = { };
1184 err = dev_get_port_parent_id(dev, &ppid, false);
1186 if (err == -EOPNOTSUPP)
1191 if (nla_put(skb, IFLA_PHYS_SWITCH_ID, ppid.id_len, ppid.id))
1197 static noinline_for_stack int rtnl_fill_stats(struct sk_buff *skb,
1198 struct net_device *dev)
1200 struct rtnl_link_stats64 *sp;
1201 struct nlattr *attr;
1203 attr = nla_reserve_64bit(skb, IFLA_STATS64,
1204 sizeof(struct rtnl_link_stats64), IFLA_PAD);
1208 sp = nla_data(attr);
1209 dev_get_stats(dev, sp);
1211 attr = nla_reserve(skb, IFLA_STATS,
1212 sizeof(struct rtnl_link_stats));
1216 copy_rtnl_link_stats(nla_data(attr), sp);
1221 static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
1222 struct net_device *dev,
1224 struct nlattr *vfinfo)
1226 struct ifla_vf_rss_query_en vf_rss_query_en;
1227 struct nlattr *vf, *vfstats, *vfvlanlist;
1228 struct ifla_vf_link_state vf_linkstate;
1229 struct ifla_vf_vlan_info vf_vlan_info;
1230 struct ifla_vf_spoofchk vf_spoofchk;
1231 struct ifla_vf_tx_rate vf_tx_rate;
1232 struct ifla_vf_stats vf_stats;
1233 struct ifla_vf_trust vf_trust;
1234 struct ifla_vf_vlan vf_vlan;
1235 struct ifla_vf_rate vf_rate;
1236 struct ifla_vf_mac vf_mac;
1237 struct ifla_vf_broadcast vf_broadcast;
1238 struct ifla_vf_info ivi;
1239 struct ifla_vf_guid node_guid;
1240 struct ifla_vf_guid port_guid;
1242 memset(&ivi, 0, sizeof(ivi));
1244 /* Not all SR-IOV capable drivers support the
1245 * spoofcheck and "RSS query enable" query. Preset to
1246 * -1 so the user space tool can detect that the driver
1247 * didn't report anything.
1250 ivi.rss_query_en = -1;
1252 /* The default value for VF link state is "auto"
1253 * IFLA_VF_LINK_STATE_AUTO which equals zero
1256 /* VLAN Protocol by default is 802.1Q */
1257 ivi.vlan_proto = htons(ETH_P_8021Q);
1258 if (dev->netdev_ops->ndo_get_vf_config(dev, vfs_num, &ivi))
1261 memset(&vf_vlan_info, 0, sizeof(vf_vlan_info));
1262 memset(&node_guid, 0, sizeof(node_guid));
1263 memset(&port_guid, 0, sizeof(port_guid));
1272 vf_rss_query_en.vf =
1275 port_guid.vf = ivi.vf;
1277 memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac));
1278 memcpy(vf_broadcast.broadcast, dev->broadcast, dev->addr_len);
1279 vf_vlan.vlan = ivi.vlan;
1280 vf_vlan.qos = ivi.qos;
1281 vf_vlan_info.vlan = ivi.vlan;
1282 vf_vlan_info.qos = ivi.qos;
1283 vf_vlan_info.vlan_proto = ivi.vlan_proto;
1284 vf_tx_rate.rate = ivi.max_tx_rate;
1285 vf_rate.min_tx_rate = ivi.min_tx_rate;
1286 vf_rate.max_tx_rate = ivi.max_tx_rate;
1287 vf_spoofchk.setting = ivi.spoofchk;
1288 vf_linkstate.link_state = ivi.linkstate;
1289 vf_rss_query_en.setting = ivi.rss_query_en;
1290 vf_trust.setting = ivi.trusted;
1291 vf = nla_nest_start_noflag(skb, IFLA_VF_INFO);
1293 goto nla_put_vfinfo_failure;
1294 if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) ||
1295 nla_put(skb, IFLA_VF_BROADCAST, sizeof(vf_broadcast), &vf_broadcast) ||
1296 nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) ||
1297 nla_put(skb, IFLA_VF_RATE, sizeof(vf_rate),
1299 nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate),
1301 nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk),
1303 nla_put(skb, IFLA_VF_LINK_STATE, sizeof(vf_linkstate),
1305 nla_put(skb, IFLA_VF_RSS_QUERY_EN,
1306 sizeof(vf_rss_query_en),
1307 &vf_rss_query_en) ||
1308 nla_put(skb, IFLA_VF_TRUST,
1309 sizeof(vf_trust), &vf_trust))
1310 goto nla_put_vf_failure;
1312 if (dev->netdev_ops->ndo_get_vf_guid &&
1313 !dev->netdev_ops->ndo_get_vf_guid(dev, vfs_num, &node_guid,
1315 if (nla_put(skb, IFLA_VF_IB_NODE_GUID, sizeof(node_guid),
1317 nla_put(skb, IFLA_VF_IB_PORT_GUID, sizeof(port_guid),
1319 goto nla_put_vf_failure;
1321 vfvlanlist = nla_nest_start_noflag(skb, IFLA_VF_VLAN_LIST);
1323 goto nla_put_vf_failure;
1324 if (nla_put(skb, IFLA_VF_VLAN_INFO, sizeof(vf_vlan_info),
1326 nla_nest_cancel(skb, vfvlanlist);
1327 goto nla_put_vf_failure;
1329 nla_nest_end(skb, vfvlanlist);
1330 memset(&vf_stats, 0, sizeof(vf_stats));
1331 if (dev->netdev_ops->ndo_get_vf_stats)
1332 dev->netdev_ops->ndo_get_vf_stats(dev, vfs_num,
1334 vfstats = nla_nest_start_noflag(skb, IFLA_VF_STATS);
1336 goto nla_put_vf_failure;
1337 if (nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_PACKETS,
1338 vf_stats.rx_packets, IFLA_VF_STATS_PAD) ||
1339 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_PACKETS,
1340 vf_stats.tx_packets, IFLA_VF_STATS_PAD) ||
1341 nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_BYTES,
1342 vf_stats.rx_bytes, IFLA_VF_STATS_PAD) ||
1343 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_BYTES,
1344 vf_stats.tx_bytes, IFLA_VF_STATS_PAD) ||
1345 nla_put_u64_64bit(skb, IFLA_VF_STATS_BROADCAST,
1346 vf_stats.broadcast, IFLA_VF_STATS_PAD) ||
1347 nla_put_u64_64bit(skb, IFLA_VF_STATS_MULTICAST,
1348 vf_stats.multicast, IFLA_VF_STATS_PAD) ||
1349 nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_DROPPED,
1350 vf_stats.rx_dropped, IFLA_VF_STATS_PAD) ||
1351 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_DROPPED,
1352 vf_stats.tx_dropped, IFLA_VF_STATS_PAD)) {
1353 nla_nest_cancel(skb, vfstats);
1354 goto nla_put_vf_failure;
1356 nla_nest_end(skb, vfstats);
1357 nla_nest_end(skb, vf);
1361 nla_nest_cancel(skb, vf);
1362 nla_put_vfinfo_failure:
1363 nla_nest_cancel(skb, vfinfo);
1367 static noinline_for_stack int rtnl_fill_vf(struct sk_buff *skb,
1368 struct net_device *dev,
1369 u32 ext_filter_mask)
1371 struct nlattr *vfinfo;
1374 if (!dev->dev.parent || ((ext_filter_mask & RTEXT_FILTER_VF) == 0))
1377 num_vfs = dev_num_vf(dev->dev.parent);
1378 if (nla_put_u32(skb, IFLA_NUM_VF, num_vfs))
1381 if (!dev->netdev_ops->ndo_get_vf_config)
1384 vfinfo = nla_nest_start_noflag(skb, IFLA_VFINFO_LIST);
1388 for (i = 0; i < num_vfs; i++) {
1389 if (rtnl_fill_vfinfo(skb, dev, i, vfinfo))
1393 nla_nest_end(skb, vfinfo);
1397 static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev)
1399 struct rtnl_link_ifmap map;
1401 memset(&map, 0, sizeof(map));
1402 map.mem_start = dev->mem_start;
1403 map.mem_end = dev->mem_end;
1404 map.base_addr = dev->base_addr;
1407 map.port = dev->if_port;
1409 if (nla_put_64bit(skb, IFLA_MAP, sizeof(map), &map, IFLA_PAD))
1415 static u32 rtnl_xdp_prog_skb(struct net_device *dev)
1417 const struct bpf_prog *generic_xdp_prog;
1421 generic_xdp_prog = rtnl_dereference(dev->xdp_prog);
1422 if (!generic_xdp_prog)
1424 return generic_xdp_prog->aux->id;
1427 static u32 rtnl_xdp_prog_drv(struct net_device *dev)
1429 return dev_xdp_prog_id(dev, XDP_MODE_DRV);
1432 static u32 rtnl_xdp_prog_hw(struct net_device *dev)
1434 return dev_xdp_prog_id(dev, XDP_MODE_HW);
1437 static int rtnl_xdp_report_one(struct sk_buff *skb, struct net_device *dev,
1438 u32 *prog_id, u8 *mode, u8 tgt_mode, u32 attr,
1439 u32 (*get_prog_id)(struct net_device *dev))
1444 curr_id = get_prog_id(dev);
1449 err = nla_put_u32(skb, attr, curr_id);
1453 if (*mode != XDP_ATTACHED_NONE)
1454 *mode = XDP_ATTACHED_MULTI;
1461 static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev)
1468 xdp = nla_nest_start_noflag(skb, IFLA_XDP);
1473 mode = XDP_ATTACHED_NONE;
1474 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_SKB,
1475 IFLA_XDP_SKB_PROG_ID, rtnl_xdp_prog_skb);
1478 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_DRV,
1479 IFLA_XDP_DRV_PROG_ID, rtnl_xdp_prog_drv);
1482 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_HW,
1483 IFLA_XDP_HW_PROG_ID, rtnl_xdp_prog_hw);
1487 err = nla_put_u8(skb, IFLA_XDP_ATTACHED, mode);
1491 if (prog_id && mode != XDP_ATTACHED_MULTI) {
1492 err = nla_put_u32(skb, IFLA_XDP_PROG_ID, prog_id);
1497 nla_nest_end(skb, xdp);
1501 nla_nest_cancel(skb, xdp);
1505 static u32 rtnl_get_event(unsigned long event)
1507 u32 rtnl_event_type = IFLA_EVENT_NONE;
1511 rtnl_event_type = IFLA_EVENT_REBOOT;
1513 case NETDEV_FEAT_CHANGE:
1514 rtnl_event_type = IFLA_EVENT_FEATURES;
1516 case NETDEV_BONDING_FAILOVER:
1517 rtnl_event_type = IFLA_EVENT_BONDING_FAILOVER;
1519 case NETDEV_NOTIFY_PEERS:
1520 rtnl_event_type = IFLA_EVENT_NOTIFY_PEERS;
1522 case NETDEV_RESEND_IGMP:
1523 rtnl_event_type = IFLA_EVENT_IGMP_RESEND;
1525 case NETDEV_CHANGEINFODATA:
1526 rtnl_event_type = IFLA_EVENT_BONDING_OPTIONS;
1532 return rtnl_event_type;
1535 static int put_master_ifindex(struct sk_buff *skb, struct net_device *dev)
1537 const struct net_device *upper_dev;
1542 upper_dev = netdev_master_upper_dev_get_rcu(dev);
1544 ret = nla_put_u32(skb, IFLA_MASTER, upper_dev->ifindex);
1550 static int nla_put_iflink(struct sk_buff *skb, const struct net_device *dev,
1553 int ifindex = dev_get_iflink(dev);
1555 if (force || dev->ifindex != ifindex)
1556 return nla_put_u32(skb, IFLA_LINK, ifindex);
1561 static noinline_for_stack int nla_put_ifalias(struct sk_buff *skb,
1562 struct net_device *dev)
1567 ret = dev_get_alias(dev, buf, sizeof(buf));
1568 return ret > 0 ? nla_put_string(skb, IFLA_IFALIAS, buf) : 0;
1571 static int rtnl_fill_link_netnsid(struct sk_buff *skb,
1572 const struct net_device *dev,
1573 struct net *src_net, gfp_t gfp)
1575 bool put_iflink = false;
1577 if (dev->rtnl_link_ops && dev->rtnl_link_ops->get_link_net) {
1578 struct net *link_net = dev->rtnl_link_ops->get_link_net(dev);
1580 if (!net_eq(dev_net(dev), link_net)) {
1581 int id = peernet2id_alloc(src_net, link_net, gfp);
1583 if (nla_put_s32(skb, IFLA_LINK_NETNSID, id))
1590 return nla_put_iflink(skb, dev, put_iflink);
1593 static int rtnl_fill_link_af(struct sk_buff *skb,
1594 const struct net_device *dev,
1595 u32 ext_filter_mask)
1597 const struct rtnl_af_ops *af_ops;
1598 struct nlattr *af_spec;
1600 af_spec = nla_nest_start_noflag(skb, IFLA_AF_SPEC);
1604 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
1608 if (!af_ops->fill_link_af)
1611 af = nla_nest_start_noflag(skb, af_ops->family);
1615 err = af_ops->fill_link_af(skb, dev, ext_filter_mask);
1617 * Caller may return ENODATA to indicate that there
1618 * was no data to be dumped. This is not an error, it
1619 * means we should trim the attribute header and
1622 if (err == -ENODATA)
1623 nla_nest_cancel(skb, af);
1627 nla_nest_end(skb, af);
1630 nla_nest_end(skb, af_spec);
1634 static int rtnl_fill_alt_ifnames(struct sk_buff *skb,
1635 const struct net_device *dev)
1637 struct netdev_name_node *name_node;
1640 list_for_each_entry(name_node, &dev->name_node->list, list) {
1641 if (nla_put_string(skb, IFLA_ALT_IFNAME, name_node->name))
1648 static int rtnl_fill_prop_list(struct sk_buff *skb,
1649 const struct net_device *dev)
1651 struct nlattr *prop_list;
1654 prop_list = nla_nest_start(skb, IFLA_PROP_LIST);
1658 ret = rtnl_fill_alt_ifnames(skb, dev);
1662 nla_nest_end(skb, prop_list);
1666 nla_nest_cancel(skb, prop_list);
1670 static int rtnl_fill_proto_down(struct sk_buff *skb,
1671 const struct net_device *dev)
1676 if (nla_put_u8(skb, IFLA_PROTO_DOWN, dev->proto_down))
1677 goto nla_put_failure;
1679 preason = dev->proto_down_reason;
1683 pr = nla_nest_start(skb, IFLA_PROTO_DOWN_REASON);
1687 if (nla_put_u32(skb, IFLA_PROTO_DOWN_REASON_VALUE, preason)) {
1688 nla_nest_cancel(skb, pr);
1689 goto nla_put_failure;
1692 nla_nest_end(skb, pr);
1699 static int rtnl_fill_ifinfo(struct sk_buff *skb,
1700 struct net_device *dev, struct net *src_net,
1701 int type, u32 pid, u32 seq, u32 change,
1702 unsigned int flags, u32 ext_filter_mask,
1703 u32 event, int *new_nsid, int new_ifindex,
1704 int tgt_netnsid, gfp_t gfp)
1706 struct ifinfomsg *ifm;
1707 struct nlmsghdr *nlh;
1708 struct Qdisc *qdisc;
1711 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags);
1715 ifm = nlmsg_data(nlh);
1716 ifm->ifi_family = AF_UNSPEC;
1718 ifm->ifi_type = dev->type;
1719 ifm->ifi_index = dev->ifindex;
1720 ifm->ifi_flags = dev_get_flags(dev);
1721 ifm->ifi_change = change;
1723 if (tgt_netnsid >= 0 && nla_put_s32(skb, IFLA_TARGET_NETNSID, tgt_netnsid))
1724 goto nla_put_failure;
1726 qdisc = rtnl_dereference(dev->qdisc);
1727 if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
1728 nla_put_u32(skb, IFLA_TXQLEN, dev->tx_queue_len) ||
1729 nla_put_u8(skb, IFLA_OPERSTATE,
1730 netif_running(dev) ? dev->operstate : IF_OPER_DOWN) ||
1731 nla_put_u8(skb, IFLA_LINKMODE, dev->link_mode) ||
1732 nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
1733 nla_put_u32(skb, IFLA_MIN_MTU, dev->min_mtu) ||
1734 nla_put_u32(skb, IFLA_MAX_MTU, dev->max_mtu) ||
1735 nla_put_u32(skb, IFLA_GROUP, dev->group) ||
1736 nla_put_u32(skb, IFLA_PROMISCUITY, dev->promiscuity) ||
1737 nla_put_u32(skb, IFLA_NUM_TX_QUEUES, dev->num_tx_queues) ||
1738 nla_put_u32(skb, IFLA_GSO_MAX_SEGS, dev->gso_max_segs) ||
1739 nla_put_u32(skb, IFLA_GSO_MAX_SIZE, dev->gso_max_size) ||
1741 nla_put_u32(skb, IFLA_NUM_RX_QUEUES, dev->num_rx_queues) ||
1743 put_master_ifindex(skb, dev) ||
1744 nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) ||
1746 nla_put_string(skb, IFLA_QDISC, qdisc->ops->id)) ||
1747 nla_put_ifalias(skb, dev) ||
1748 nla_put_u32(skb, IFLA_CARRIER_CHANGES,
1749 atomic_read(&dev->carrier_up_count) +
1750 atomic_read(&dev->carrier_down_count)) ||
1751 nla_put_u32(skb, IFLA_CARRIER_UP_COUNT,
1752 atomic_read(&dev->carrier_up_count)) ||
1753 nla_put_u32(skb, IFLA_CARRIER_DOWN_COUNT,
1754 atomic_read(&dev->carrier_down_count)))
1755 goto nla_put_failure;
1757 if (rtnl_fill_proto_down(skb, dev))
1758 goto nla_put_failure;
1760 if (event != IFLA_EVENT_NONE) {
1761 if (nla_put_u32(skb, IFLA_EVENT, event))
1762 goto nla_put_failure;
1765 if (rtnl_fill_link_ifmap(skb, dev))
1766 goto nla_put_failure;
1768 if (dev->addr_len) {
1769 if (nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr) ||
1770 nla_put(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast))
1771 goto nla_put_failure;
1774 if (rtnl_phys_port_id_fill(skb, dev))
1775 goto nla_put_failure;
1777 if (rtnl_phys_port_name_fill(skb, dev))
1778 goto nla_put_failure;
1780 if (rtnl_phys_switch_id_fill(skb, dev))
1781 goto nla_put_failure;
1783 if (rtnl_fill_stats(skb, dev))
1784 goto nla_put_failure;
1786 if (rtnl_fill_vf(skb, dev, ext_filter_mask))
1787 goto nla_put_failure;
1789 if (rtnl_port_fill(skb, dev, ext_filter_mask))
1790 goto nla_put_failure;
1792 if (rtnl_xdp_fill(skb, dev))
1793 goto nla_put_failure;
1795 if (dev->rtnl_link_ops || rtnl_have_link_slave_info(dev)) {
1796 if (rtnl_link_fill(skb, dev) < 0)
1797 goto nla_put_failure;
1800 if (rtnl_fill_link_netnsid(skb, dev, src_net, gfp))
1801 goto nla_put_failure;
1804 nla_put_s32(skb, IFLA_NEW_NETNSID, *new_nsid) < 0)
1805 goto nla_put_failure;
1807 nla_put_s32(skb, IFLA_NEW_IFINDEX, new_ifindex) < 0)
1808 goto nla_put_failure;
1810 if (memchr_inv(dev->perm_addr, '\0', dev->addr_len) &&
1811 nla_put(skb, IFLA_PERM_ADDRESS, dev->addr_len, dev->perm_addr))
1812 goto nla_put_failure;
1815 if (rtnl_fill_link_af(skb, dev, ext_filter_mask))
1816 goto nla_put_failure_rcu;
1819 if (rtnl_fill_prop_list(skb, dev))
1820 goto nla_put_failure;
1822 nlmsg_end(skb, nlh);
1825 nla_put_failure_rcu:
1828 nlmsg_cancel(skb, nlh);
1832 static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
1833 [IFLA_IFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ-1 },
1834 [IFLA_ADDRESS] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1835 [IFLA_BROADCAST] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1836 [IFLA_MAP] = { .len = sizeof(struct rtnl_link_ifmap) },
1837 [IFLA_MTU] = { .type = NLA_U32 },
1838 [IFLA_LINK] = { .type = NLA_U32 },
1839 [IFLA_MASTER] = { .type = NLA_U32 },
1840 [IFLA_CARRIER] = { .type = NLA_U8 },
1841 [IFLA_TXQLEN] = { .type = NLA_U32 },
1842 [IFLA_WEIGHT] = { .type = NLA_U32 },
1843 [IFLA_OPERSTATE] = { .type = NLA_U8 },
1844 [IFLA_LINKMODE] = { .type = NLA_U8 },
1845 [IFLA_LINKINFO] = { .type = NLA_NESTED },
1846 [IFLA_NET_NS_PID] = { .type = NLA_U32 },
1847 [IFLA_NET_NS_FD] = { .type = NLA_U32 },
1848 /* IFLA_IFALIAS is a string, but policy is set to NLA_BINARY to
1849 * allow 0-length string (needed to remove an alias).
1851 [IFLA_IFALIAS] = { .type = NLA_BINARY, .len = IFALIASZ - 1 },
1852 [IFLA_VFINFO_LIST] = {. type = NLA_NESTED },
1853 [IFLA_VF_PORTS] = { .type = NLA_NESTED },
1854 [IFLA_PORT_SELF] = { .type = NLA_NESTED },
1855 [IFLA_AF_SPEC] = { .type = NLA_NESTED },
1856 [IFLA_EXT_MASK] = { .type = NLA_U32 },
1857 [IFLA_PROMISCUITY] = { .type = NLA_U32 },
1858 [IFLA_NUM_TX_QUEUES] = { .type = NLA_U32 },
1859 [IFLA_NUM_RX_QUEUES] = { .type = NLA_U32 },
1860 [IFLA_GSO_MAX_SEGS] = { .type = NLA_U32 },
1861 [IFLA_GSO_MAX_SIZE] = { .type = NLA_U32 },
1862 [IFLA_PHYS_PORT_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
1863 [IFLA_CARRIER_CHANGES] = { .type = NLA_U32 }, /* ignored */
1864 [IFLA_PHYS_SWITCH_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
1865 [IFLA_LINK_NETNSID] = { .type = NLA_S32 },
1866 [IFLA_PROTO_DOWN] = { .type = NLA_U8 },
1867 [IFLA_XDP] = { .type = NLA_NESTED },
1868 [IFLA_EVENT] = { .type = NLA_U32 },
1869 [IFLA_GROUP] = { .type = NLA_U32 },
1870 [IFLA_TARGET_NETNSID] = { .type = NLA_S32 },
1871 [IFLA_CARRIER_UP_COUNT] = { .type = NLA_U32 },
1872 [IFLA_CARRIER_DOWN_COUNT] = { .type = NLA_U32 },
1873 [IFLA_MIN_MTU] = { .type = NLA_U32 },
1874 [IFLA_MAX_MTU] = { .type = NLA_U32 },
1875 [IFLA_PROP_LIST] = { .type = NLA_NESTED },
1876 [IFLA_ALT_IFNAME] = { .type = NLA_STRING,
1877 .len = ALTIFNAMSIZ - 1 },
1878 [IFLA_PERM_ADDRESS] = { .type = NLA_REJECT },
1879 [IFLA_PROTO_DOWN_REASON] = { .type = NLA_NESTED },
1882 static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
1883 [IFLA_INFO_KIND] = { .type = NLA_STRING },
1884 [IFLA_INFO_DATA] = { .type = NLA_NESTED },
1885 [IFLA_INFO_SLAVE_KIND] = { .type = NLA_STRING },
1886 [IFLA_INFO_SLAVE_DATA] = { .type = NLA_NESTED },
1889 static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = {
1890 [IFLA_VF_MAC] = { .len = sizeof(struct ifla_vf_mac) },
1891 [IFLA_VF_BROADCAST] = { .type = NLA_REJECT },
1892 [IFLA_VF_VLAN] = { .len = sizeof(struct ifla_vf_vlan) },
1893 [IFLA_VF_VLAN_LIST] = { .type = NLA_NESTED },
1894 [IFLA_VF_TX_RATE] = { .len = sizeof(struct ifla_vf_tx_rate) },
1895 [IFLA_VF_SPOOFCHK] = { .len = sizeof(struct ifla_vf_spoofchk) },
1896 [IFLA_VF_RATE] = { .len = sizeof(struct ifla_vf_rate) },
1897 [IFLA_VF_LINK_STATE] = { .len = sizeof(struct ifla_vf_link_state) },
1898 [IFLA_VF_RSS_QUERY_EN] = { .len = sizeof(struct ifla_vf_rss_query_en) },
1899 [IFLA_VF_STATS] = { .type = NLA_NESTED },
1900 [IFLA_VF_TRUST] = { .len = sizeof(struct ifla_vf_trust) },
1901 [IFLA_VF_IB_NODE_GUID] = { .len = sizeof(struct ifla_vf_guid) },
1902 [IFLA_VF_IB_PORT_GUID] = { .len = sizeof(struct ifla_vf_guid) },
1905 static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = {
1906 [IFLA_PORT_VF] = { .type = NLA_U32 },
1907 [IFLA_PORT_PROFILE] = { .type = NLA_STRING,
1908 .len = PORT_PROFILE_MAX },
1909 [IFLA_PORT_INSTANCE_UUID] = { .type = NLA_BINARY,
1910 .len = PORT_UUID_MAX },
1911 [IFLA_PORT_HOST_UUID] = { .type = NLA_STRING,
1912 .len = PORT_UUID_MAX },
1913 [IFLA_PORT_REQUEST] = { .type = NLA_U8, },
1914 [IFLA_PORT_RESPONSE] = { .type = NLA_U16, },
1916 /* Unused, but we need to keep it here since user space could
1917 * fill it. It's also broken with regard to NLA_BINARY use in
1918 * combination with structs.
1920 [IFLA_PORT_VSI_TYPE] = { .type = NLA_BINARY,
1921 .len = sizeof(struct ifla_port_vsi) },
1924 static const struct nla_policy ifla_xdp_policy[IFLA_XDP_MAX + 1] = {
1925 [IFLA_XDP_UNSPEC] = { .strict_start_type = IFLA_XDP_EXPECTED_FD },
1926 [IFLA_XDP_FD] = { .type = NLA_S32 },
1927 [IFLA_XDP_EXPECTED_FD] = { .type = NLA_S32 },
1928 [IFLA_XDP_ATTACHED] = { .type = NLA_U8 },
1929 [IFLA_XDP_FLAGS] = { .type = NLA_U32 },
1930 [IFLA_XDP_PROG_ID] = { .type = NLA_U32 },
1933 static const struct rtnl_link_ops *linkinfo_to_kind_ops(const struct nlattr *nla)
1935 const struct rtnl_link_ops *ops = NULL;
1936 struct nlattr *linfo[IFLA_INFO_MAX + 1];
1938 if (nla_parse_nested_deprecated(linfo, IFLA_INFO_MAX, nla, ifla_info_policy, NULL) < 0)
1941 if (linfo[IFLA_INFO_KIND]) {
1942 char kind[MODULE_NAME_LEN];
1944 nla_strlcpy(kind, linfo[IFLA_INFO_KIND], sizeof(kind));
1945 ops = rtnl_link_ops_get(kind);
1951 static bool link_master_filtered(struct net_device *dev, int master_idx)
1953 struct net_device *master;
1958 master = netdev_master_upper_dev_get(dev);
1959 if (!master || master->ifindex != master_idx)
1965 static bool link_kind_filtered(const struct net_device *dev,
1966 const struct rtnl_link_ops *kind_ops)
1968 if (kind_ops && dev->rtnl_link_ops != kind_ops)
1974 static bool link_dump_filtered(struct net_device *dev,
1976 const struct rtnl_link_ops *kind_ops)
1978 if (link_master_filtered(dev, master_idx) ||
1979 link_kind_filtered(dev, kind_ops))
1986 * rtnl_get_net_ns_capable - Get netns if sufficiently privileged.
1987 * @sk: netlink socket
1988 * @netnsid: network namespace identifier
1990 * Returns the network namespace identified by netnsid on success or an error
1991 * pointer on failure.
1993 struct net *rtnl_get_net_ns_capable(struct sock *sk, int netnsid)
1997 net = get_net_ns_by_id(sock_net(sk), netnsid);
1999 return ERR_PTR(-EINVAL);
2001 /* For now, the caller is required to have CAP_NET_ADMIN in
2002 * the user namespace owning the target net ns.
2004 if (!sk_ns_capable(sk, net->user_ns, CAP_NET_ADMIN)) {
2006 return ERR_PTR(-EACCES);
2010 EXPORT_SYMBOL_GPL(rtnl_get_net_ns_capable);
2012 static int rtnl_valid_dump_ifinfo_req(const struct nlmsghdr *nlh,
2013 bool strict_check, struct nlattr **tb,
2014 struct netlink_ext_ack *extack)
2019 struct ifinfomsg *ifm;
2021 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
2022 NL_SET_ERR_MSG(extack, "Invalid header for link dump");
2026 ifm = nlmsg_data(nlh);
2027 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
2029 NL_SET_ERR_MSG(extack, "Invalid values in header for link dump request");
2032 if (ifm->ifi_index) {
2033 NL_SET_ERR_MSG(extack, "Filter by device index not supported for link dumps");
2037 return nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb,
2038 IFLA_MAX, ifla_policy,
2042 /* A hack to preserve kernel<->userspace interface.
2043 * The correct header is ifinfomsg. It is consistent with rtnl_getlink.
2044 * However, before Linux v3.9 the code here assumed rtgenmsg and that's
2045 * what iproute2 < v3.9.0 used.
2046 * We can detect the old iproute2. Even including the IFLA_EXT_MASK
2047 * attribute, its netlink message is shorter than struct ifinfomsg.
2049 hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ?
2050 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
2052 return nlmsg_parse_deprecated(nlh, hdrlen, tb, IFLA_MAX, ifla_policy,
2056 static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
2058 struct netlink_ext_ack *extack = cb->extack;
2059 const struct nlmsghdr *nlh = cb->nlh;
2060 struct net *net = sock_net(skb->sk);
2061 struct net *tgt_net = net;
2064 struct net_device *dev;
2065 struct hlist_head *head;
2066 struct nlattr *tb[IFLA_MAX+1];
2067 u32 ext_filter_mask = 0;
2068 const struct rtnl_link_ops *kind_ops = NULL;
2069 unsigned int flags = NLM_F_MULTI;
2075 s_idx = cb->args[1];
2077 err = rtnl_valid_dump_ifinfo_req(nlh, cb->strict_check, tb, extack);
2079 if (cb->strict_check)
2085 for (i = 0; i <= IFLA_MAX; ++i) {
2089 /* new attributes should only be added with strict checking */
2091 case IFLA_TARGET_NETNSID:
2092 netnsid = nla_get_s32(tb[i]);
2093 tgt_net = rtnl_get_net_ns_capable(skb->sk, netnsid);
2094 if (IS_ERR(tgt_net)) {
2095 NL_SET_ERR_MSG(extack, "Invalid target network namespace id");
2096 return PTR_ERR(tgt_net);
2100 ext_filter_mask = nla_get_u32(tb[i]);
2103 master_idx = nla_get_u32(tb[i]);
2106 kind_ops = linkinfo_to_kind_ops(tb[i]);
2109 if (cb->strict_check) {
2110 NL_SET_ERR_MSG(extack, "Unsupported attribute in link dump request");
2116 if (master_idx || kind_ops)
2117 flags |= NLM_F_DUMP_FILTERED;
2120 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
2122 head = &tgt_net->dev_index_head[h];
2123 hlist_for_each_entry(dev, head, index_hlist) {
2124 if (link_dump_filtered(dev, master_idx, kind_ops))
2128 err = rtnl_fill_ifinfo(skb, dev, net,
2130 NETLINK_CB(cb->skb).portid,
2131 nlh->nlmsg_seq, 0, flags,
2132 ext_filter_mask, 0, NULL, 0,
2133 netnsid, GFP_KERNEL);
2136 if (likely(skb->len))
2150 cb->seq = net->dev_base_seq;
2151 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
2158 int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len,
2159 struct netlink_ext_ack *exterr)
2161 return nla_parse_deprecated(tb, IFLA_MAX, head, len, ifla_policy,
2164 EXPORT_SYMBOL(rtnl_nla_parse_ifla);
2166 struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[])
2169 /* Examine the link attributes and figure out which
2170 * network namespace we are talking about.
2172 if (tb[IFLA_NET_NS_PID])
2173 net = get_net_ns_by_pid(nla_get_u32(tb[IFLA_NET_NS_PID]));
2174 else if (tb[IFLA_NET_NS_FD])
2175 net = get_net_ns_by_fd(nla_get_u32(tb[IFLA_NET_NS_FD]));
2177 net = get_net(src_net);
2180 EXPORT_SYMBOL(rtnl_link_get_net);
2182 /* Figure out which network namespace we are talking about by
2183 * examining the link attributes in the following order:
2185 * 1. IFLA_NET_NS_PID
2187 * 3. IFLA_TARGET_NETNSID
2189 static struct net *rtnl_link_get_net_by_nlattr(struct net *src_net,
2190 struct nlattr *tb[])
2194 if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD])
2195 return rtnl_link_get_net(src_net, tb);
2197 if (!tb[IFLA_TARGET_NETNSID])
2198 return get_net(src_net);
2200 net = get_net_ns_by_id(src_net, nla_get_u32(tb[IFLA_TARGET_NETNSID]));
2202 return ERR_PTR(-EINVAL);
2207 static struct net *rtnl_link_get_net_capable(const struct sk_buff *skb,
2208 struct net *src_net,
2209 struct nlattr *tb[], int cap)
2213 net = rtnl_link_get_net_by_nlattr(src_net, tb);
2217 if (!netlink_ns_capable(skb, net->user_ns, cap)) {
2219 return ERR_PTR(-EPERM);
2225 /* Verify that rtnetlink requests do not pass additional properties
2226 * potentially referring to different network namespaces.
2228 static int rtnl_ensure_unique_netns(struct nlattr *tb[],
2229 struct netlink_ext_ack *extack,
2233 if (netns_id_only) {
2234 if (!tb[IFLA_NET_NS_PID] && !tb[IFLA_NET_NS_FD])
2237 NL_SET_ERR_MSG(extack, "specified netns attribute not supported");
2241 if (tb[IFLA_TARGET_NETNSID] && (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD]))
2244 if (tb[IFLA_NET_NS_PID] && (tb[IFLA_TARGET_NETNSID] || tb[IFLA_NET_NS_FD]))
2247 if (tb[IFLA_NET_NS_FD] && (tb[IFLA_TARGET_NETNSID] || tb[IFLA_NET_NS_PID]))
2253 NL_SET_ERR_MSG(extack, "multiple netns identifying attributes specified");
2257 static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[])
2260 if (tb[IFLA_ADDRESS] &&
2261 nla_len(tb[IFLA_ADDRESS]) < dev->addr_len)
2264 if (tb[IFLA_BROADCAST] &&
2265 nla_len(tb[IFLA_BROADCAST]) < dev->addr_len)
2269 if (tb[IFLA_AF_SPEC]) {
2273 nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
2274 const struct rtnl_af_ops *af_ops;
2277 af_ops = rtnl_af_lookup(nla_type(af));
2280 return -EAFNOSUPPORT;
2283 if (!af_ops->set_link_af) {
2288 if (af_ops->validate_link_af) {
2289 err = af_ops->validate_link_af(dev, af);
2303 static int handle_infiniband_guid(struct net_device *dev, struct ifla_vf_guid *ivt,
2306 const struct net_device_ops *ops = dev->netdev_ops;
2308 return ops->ndo_set_vf_guid(dev, ivt->vf, ivt->guid, guid_type);
2311 static int handle_vf_guid(struct net_device *dev, struct ifla_vf_guid *ivt, int guid_type)
2313 if (dev->type != ARPHRD_INFINIBAND)
2316 return handle_infiniband_guid(dev, ivt, guid_type);
2319 static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
2321 const struct net_device_ops *ops = dev->netdev_ops;
2324 if (tb[IFLA_VF_MAC]) {
2325 struct ifla_vf_mac *ivm = nla_data(tb[IFLA_VF_MAC]);
2327 if (ivm->vf >= INT_MAX)
2330 if (ops->ndo_set_vf_mac)
2331 err = ops->ndo_set_vf_mac(dev, ivm->vf,
2337 if (tb[IFLA_VF_VLAN]) {
2338 struct ifla_vf_vlan *ivv = nla_data(tb[IFLA_VF_VLAN]);
2340 if (ivv->vf >= INT_MAX)
2343 if (ops->ndo_set_vf_vlan)
2344 err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan,
2346 htons(ETH_P_8021Q));
2351 if (tb[IFLA_VF_VLAN_LIST]) {
2352 struct ifla_vf_vlan_info *ivvl[MAX_VLAN_LIST_LEN];
2353 struct nlattr *attr;
2357 if (!ops->ndo_set_vf_vlan)
2360 nla_for_each_nested(attr, tb[IFLA_VF_VLAN_LIST], rem) {
2361 if (nla_type(attr) != IFLA_VF_VLAN_INFO ||
2362 nla_len(attr) < NLA_HDRLEN) {
2365 if (len >= MAX_VLAN_LIST_LEN)
2367 ivvl[len] = nla_data(attr);
2374 if (ivvl[0]->vf >= INT_MAX)
2376 err = ops->ndo_set_vf_vlan(dev, ivvl[0]->vf, ivvl[0]->vlan,
2377 ivvl[0]->qos, ivvl[0]->vlan_proto);
2382 if (tb[IFLA_VF_TX_RATE]) {
2383 struct ifla_vf_tx_rate *ivt = nla_data(tb[IFLA_VF_TX_RATE]);
2384 struct ifla_vf_info ivf;
2386 if (ivt->vf >= INT_MAX)
2389 if (ops->ndo_get_vf_config)
2390 err = ops->ndo_get_vf_config(dev, ivt->vf, &ivf);
2395 if (ops->ndo_set_vf_rate)
2396 err = ops->ndo_set_vf_rate(dev, ivt->vf,
2403 if (tb[IFLA_VF_RATE]) {
2404 struct ifla_vf_rate *ivt = nla_data(tb[IFLA_VF_RATE]);
2406 if (ivt->vf >= INT_MAX)
2409 if (ops->ndo_set_vf_rate)
2410 err = ops->ndo_set_vf_rate(dev, ivt->vf,
2417 if (tb[IFLA_VF_SPOOFCHK]) {
2418 struct ifla_vf_spoofchk *ivs = nla_data(tb[IFLA_VF_SPOOFCHK]);
2420 if (ivs->vf >= INT_MAX)
2423 if (ops->ndo_set_vf_spoofchk)
2424 err = ops->ndo_set_vf_spoofchk(dev, ivs->vf,
2430 if (tb[IFLA_VF_LINK_STATE]) {
2431 struct ifla_vf_link_state *ivl = nla_data(tb[IFLA_VF_LINK_STATE]);
2433 if (ivl->vf >= INT_MAX)
2436 if (ops->ndo_set_vf_link_state)
2437 err = ops->ndo_set_vf_link_state(dev, ivl->vf,
2443 if (tb[IFLA_VF_RSS_QUERY_EN]) {
2444 struct ifla_vf_rss_query_en *ivrssq_en;
2447 ivrssq_en = nla_data(tb[IFLA_VF_RSS_QUERY_EN]);
2448 if (ivrssq_en->vf >= INT_MAX)
2450 if (ops->ndo_set_vf_rss_query_en)
2451 err = ops->ndo_set_vf_rss_query_en(dev, ivrssq_en->vf,
2452 ivrssq_en->setting);
2457 if (tb[IFLA_VF_TRUST]) {
2458 struct ifla_vf_trust *ivt = nla_data(tb[IFLA_VF_TRUST]);
2460 if (ivt->vf >= INT_MAX)
2463 if (ops->ndo_set_vf_trust)
2464 err = ops->ndo_set_vf_trust(dev, ivt->vf, ivt->setting);
2469 if (tb[IFLA_VF_IB_NODE_GUID]) {
2470 struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_NODE_GUID]);
2472 if (ivt->vf >= INT_MAX)
2474 if (!ops->ndo_set_vf_guid)
2476 return handle_vf_guid(dev, ivt, IFLA_VF_IB_NODE_GUID);
2479 if (tb[IFLA_VF_IB_PORT_GUID]) {
2480 struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_PORT_GUID]);
2482 if (ivt->vf >= INT_MAX)
2484 if (!ops->ndo_set_vf_guid)
2487 return handle_vf_guid(dev, ivt, IFLA_VF_IB_PORT_GUID);
2493 static int do_set_master(struct net_device *dev, int ifindex,
2494 struct netlink_ext_ack *extack)
2496 struct net_device *upper_dev = netdev_master_upper_dev_get(dev);
2497 const struct net_device_ops *ops;
2501 if (upper_dev->ifindex == ifindex)
2503 ops = upper_dev->netdev_ops;
2504 if (ops->ndo_del_slave) {
2505 err = ops->ndo_del_slave(upper_dev, dev);
2514 upper_dev = __dev_get_by_index(dev_net(dev), ifindex);
2517 ops = upper_dev->netdev_ops;
2518 if (ops->ndo_add_slave) {
2519 err = ops->ndo_add_slave(upper_dev, dev, extack);
2529 static const struct nla_policy ifla_proto_down_reason_policy[IFLA_PROTO_DOWN_REASON_VALUE + 1] = {
2530 [IFLA_PROTO_DOWN_REASON_MASK] = { .type = NLA_U32 },
2531 [IFLA_PROTO_DOWN_REASON_VALUE] = { .type = NLA_U32 },
2534 static int do_set_proto_down(struct net_device *dev,
2535 struct nlattr *nl_proto_down,
2536 struct nlattr *nl_proto_down_reason,
2537 struct netlink_ext_ack *extack)
2539 struct nlattr *pdreason[IFLA_PROTO_DOWN_REASON_MAX + 1];
2540 const struct net_device_ops *ops = dev->netdev_ops;
2541 unsigned long mask = 0;
2546 if (!ops->ndo_change_proto_down) {
2547 NL_SET_ERR_MSG(extack, "Protodown not supported by device");
2551 if (nl_proto_down_reason) {
2552 err = nla_parse_nested_deprecated(pdreason,
2553 IFLA_PROTO_DOWN_REASON_MAX,
2554 nl_proto_down_reason,
2555 ifla_proto_down_reason_policy,
2560 if (!pdreason[IFLA_PROTO_DOWN_REASON_VALUE]) {
2561 NL_SET_ERR_MSG(extack, "Invalid protodown reason value");
2565 value = nla_get_u32(pdreason[IFLA_PROTO_DOWN_REASON_VALUE]);
2567 if (pdreason[IFLA_PROTO_DOWN_REASON_MASK])
2568 mask = nla_get_u32(pdreason[IFLA_PROTO_DOWN_REASON_MASK]);
2570 dev_change_proto_down_reason(dev, mask, value);
2573 if (nl_proto_down) {
2574 proto_down = nla_get_u8(nl_proto_down);
2576 /* Dont turn off protodown if there are active reasons */
2577 if (!proto_down && dev->proto_down_reason) {
2578 NL_SET_ERR_MSG(extack, "Cannot clear protodown, active reasons");
2581 err = dev_change_proto_down(dev,
2590 #define DO_SETLINK_MODIFIED 0x01
2591 /* notify flag means notify + modified. */
2592 #define DO_SETLINK_NOTIFY 0x03
2593 static int do_setlink(const struct sk_buff *skb,
2594 struct net_device *dev, struct ifinfomsg *ifm,
2595 struct netlink_ext_ack *extack,
2596 struct nlattr **tb, char *ifname, int status)
2598 const struct net_device_ops *ops = dev->netdev_ops;
2601 err = validate_linkmsg(dev, tb);
2605 if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD] || tb[IFLA_TARGET_NETNSID]) {
2606 const char *pat = ifname && ifname[0] ? ifname : NULL;
2607 struct net *net = rtnl_link_get_net_capable(skb, dev_net(dev),
2614 err = dev_change_net_namespace(dev, net, pat);
2618 status |= DO_SETLINK_MODIFIED;
2622 struct rtnl_link_ifmap *u_map;
2625 if (!ops->ndo_set_config) {
2630 if (!netif_device_present(dev)) {
2635 u_map = nla_data(tb[IFLA_MAP]);
2636 k_map.mem_start = (unsigned long) u_map->mem_start;
2637 k_map.mem_end = (unsigned long) u_map->mem_end;
2638 k_map.base_addr = (unsigned short) u_map->base_addr;
2639 k_map.irq = (unsigned char) u_map->irq;
2640 k_map.dma = (unsigned char) u_map->dma;
2641 k_map.port = (unsigned char) u_map->port;
2643 err = ops->ndo_set_config(dev, &k_map);
2647 status |= DO_SETLINK_NOTIFY;
2650 if (tb[IFLA_ADDRESS]) {
2651 struct sockaddr *sa;
2654 len = sizeof(sa_family_t) + max_t(size_t, dev->addr_len,
2656 sa = kmalloc(len, GFP_KERNEL);
2661 sa->sa_family = dev->type;
2662 memcpy(sa->sa_data, nla_data(tb[IFLA_ADDRESS]),
2664 err = dev_set_mac_address_user(dev, sa, extack);
2668 status |= DO_SETLINK_MODIFIED;
2672 err = dev_set_mtu_ext(dev, nla_get_u32(tb[IFLA_MTU]), extack);
2675 status |= DO_SETLINK_MODIFIED;
2678 if (tb[IFLA_GROUP]) {
2679 dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
2680 status |= DO_SETLINK_NOTIFY;
2684 * Interface selected by interface index but interface
2685 * name provided implies that a name change has been
2688 if (ifm->ifi_index > 0 && ifname[0]) {
2689 err = dev_change_name(dev, ifname);
2692 status |= DO_SETLINK_MODIFIED;
2695 if (tb[IFLA_IFALIAS]) {
2696 err = dev_set_alias(dev, nla_data(tb[IFLA_IFALIAS]),
2697 nla_len(tb[IFLA_IFALIAS]));
2700 status |= DO_SETLINK_NOTIFY;
2703 if (tb[IFLA_BROADCAST]) {
2704 nla_memcpy(dev->broadcast, tb[IFLA_BROADCAST], dev->addr_len);
2705 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
2708 if (ifm->ifi_flags || ifm->ifi_change) {
2709 err = dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm),
2715 if (tb[IFLA_MASTER]) {
2716 err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack);
2719 status |= DO_SETLINK_MODIFIED;
2722 if (tb[IFLA_CARRIER]) {
2723 err = dev_change_carrier(dev, nla_get_u8(tb[IFLA_CARRIER]));
2726 status |= DO_SETLINK_MODIFIED;
2729 if (tb[IFLA_TXQLEN]) {
2730 unsigned int value = nla_get_u32(tb[IFLA_TXQLEN]);
2732 err = dev_change_tx_queue_len(dev, value);
2735 status |= DO_SETLINK_MODIFIED;
2738 if (tb[IFLA_GSO_MAX_SIZE]) {
2739 u32 max_size = nla_get_u32(tb[IFLA_GSO_MAX_SIZE]);
2741 if (max_size > GSO_MAX_SIZE) {
2746 if (dev->gso_max_size ^ max_size) {
2747 netif_set_gso_max_size(dev, max_size);
2748 status |= DO_SETLINK_MODIFIED;
2752 if (tb[IFLA_GSO_MAX_SEGS]) {
2753 u32 max_segs = nla_get_u32(tb[IFLA_GSO_MAX_SEGS]);
2755 if (max_segs > GSO_MAX_SEGS) {
2760 if (dev->gso_max_segs ^ max_segs) {
2761 dev->gso_max_segs = max_segs;
2762 status |= DO_SETLINK_MODIFIED;
2766 if (tb[IFLA_OPERSTATE])
2767 set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
2769 if (tb[IFLA_LINKMODE]) {
2770 unsigned char value = nla_get_u8(tb[IFLA_LINKMODE]);
2772 write_lock_bh(&dev_base_lock);
2773 if (dev->link_mode ^ value)
2774 status |= DO_SETLINK_NOTIFY;
2775 dev->link_mode = value;
2776 write_unlock_bh(&dev_base_lock);
2779 if (tb[IFLA_VFINFO_LIST]) {
2780 struct nlattr *vfinfo[IFLA_VF_MAX + 1];
2781 struct nlattr *attr;
2784 nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) {
2785 if (nla_type(attr) != IFLA_VF_INFO ||
2786 nla_len(attr) < NLA_HDRLEN) {
2790 err = nla_parse_nested_deprecated(vfinfo, IFLA_VF_MAX,
2796 err = do_setvfinfo(dev, vfinfo);
2799 status |= DO_SETLINK_NOTIFY;
2804 if (tb[IFLA_VF_PORTS]) {
2805 struct nlattr *port[IFLA_PORT_MAX+1];
2806 struct nlattr *attr;
2811 if (!ops->ndo_set_vf_port)
2814 nla_for_each_nested(attr, tb[IFLA_VF_PORTS], rem) {
2815 if (nla_type(attr) != IFLA_VF_PORT ||
2816 nla_len(attr) < NLA_HDRLEN) {
2820 err = nla_parse_nested_deprecated(port, IFLA_PORT_MAX,
2826 if (!port[IFLA_PORT_VF]) {
2830 vf = nla_get_u32(port[IFLA_PORT_VF]);
2831 err = ops->ndo_set_vf_port(dev, vf, port);
2834 status |= DO_SETLINK_NOTIFY;
2839 if (tb[IFLA_PORT_SELF]) {
2840 struct nlattr *port[IFLA_PORT_MAX+1];
2842 err = nla_parse_nested_deprecated(port, IFLA_PORT_MAX,
2844 ifla_port_policy, NULL);
2849 if (ops->ndo_set_vf_port)
2850 err = ops->ndo_set_vf_port(dev, PORT_SELF_VF, port);
2853 status |= DO_SETLINK_NOTIFY;
2856 if (tb[IFLA_AF_SPEC]) {
2860 nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
2861 const struct rtnl_af_ops *af_ops;
2865 BUG_ON(!(af_ops = rtnl_af_lookup(nla_type(af))));
2867 err = af_ops->set_link_af(dev, af);
2874 status |= DO_SETLINK_NOTIFY;
2879 if (tb[IFLA_PROTO_DOWN] || tb[IFLA_PROTO_DOWN_REASON]) {
2880 err = do_set_proto_down(dev, tb[IFLA_PROTO_DOWN],
2881 tb[IFLA_PROTO_DOWN_REASON], extack);
2884 status |= DO_SETLINK_NOTIFY;
2888 struct nlattr *xdp[IFLA_XDP_MAX + 1];
2891 err = nla_parse_nested_deprecated(xdp, IFLA_XDP_MAX,
2893 ifla_xdp_policy, NULL);
2897 if (xdp[IFLA_XDP_ATTACHED] || xdp[IFLA_XDP_PROG_ID]) {
2902 if (xdp[IFLA_XDP_FLAGS]) {
2903 xdp_flags = nla_get_u32(xdp[IFLA_XDP_FLAGS]);
2904 if (xdp_flags & ~XDP_FLAGS_MASK) {
2908 if (hweight32(xdp_flags & XDP_FLAGS_MODES) > 1) {
2914 if (xdp[IFLA_XDP_FD]) {
2915 int expected_fd = -1;
2917 if (xdp_flags & XDP_FLAGS_REPLACE) {
2918 if (!xdp[IFLA_XDP_EXPECTED_FD]) {
2923 nla_get_s32(xdp[IFLA_XDP_EXPECTED_FD]);
2926 err = dev_change_xdp_fd(dev, extack,
2927 nla_get_s32(xdp[IFLA_XDP_FD]),
2932 status |= DO_SETLINK_NOTIFY;
2937 if (status & DO_SETLINK_MODIFIED) {
2938 if ((status & DO_SETLINK_NOTIFY) == DO_SETLINK_NOTIFY)
2939 netdev_state_change(dev);
2942 net_warn_ratelimited("A link change request failed with some changes committed already. Interface %s may have been left with an inconsistent configuration, please check.\n",
2949 static struct net_device *rtnl_dev_get(struct net *net,
2950 struct nlattr *ifname_attr,
2951 struct nlattr *altifname_attr,
2954 char buffer[ALTIFNAMSIZ];
2959 nla_strlcpy(ifname, ifname_attr, IFNAMSIZ);
2960 else if (altifname_attr)
2961 nla_strlcpy(ifname, altifname_attr, ALTIFNAMSIZ);
2966 return __dev_get_by_name(net, ifname);
2969 static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
2970 struct netlink_ext_ack *extack)
2972 struct net *net = sock_net(skb->sk);
2973 struct ifinfomsg *ifm;
2974 struct net_device *dev;
2976 struct nlattr *tb[IFLA_MAX+1];
2977 char ifname[IFNAMSIZ];
2979 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
2980 ifla_policy, extack);
2984 err = rtnl_ensure_unique_netns(tb, extack, false);
2988 if (tb[IFLA_IFNAME])
2989 nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
2994 ifm = nlmsg_data(nlh);
2995 if (ifm->ifi_index > 0)
2996 dev = __dev_get_by_index(net, ifm->ifi_index);
2997 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
2998 dev = rtnl_dev_get(net, NULL, tb[IFLA_ALT_IFNAME], ifname);
3007 err = do_setlink(skb, dev, ifm, extack, tb, ifname, 0);
3012 static int rtnl_group_dellink(const struct net *net, int group)
3014 struct net_device *dev, *aux;
3015 LIST_HEAD(list_kill);
3021 for_each_netdev(net, dev) {
3022 if (dev->group == group) {
3023 const struct rtnl_link_ops *ops;
3026 ops = dev->rtnl_link_ops;
3027 if (!ops || !ops->dellink)
3035 for_each_netdev_safe(net, dev, aux) {
3036 if (dev->group == group) {
3037 const struct rtnl_link_ops *ops;
3039 ops = dev->rtnl_link_ops;
3040 ops->dellink(dev, &list_kill);
3043 unregister_netdevice_many(&list_kill);
3048 int rtnl_delete_link(struct net_device *dev)
3050 const struct rtnl_link_ops *ops;
3051 LIST_HEAD(list_kill);
3053 ops = dev->rtnl_link_ops;
3054 if (!ops || !ops->dellink)
3057 ops->dellink(dev, &list_kill);
3058 unregister_netdevice_many(&list_kill);
3062 EXPORT_SYMBOL_GPL(rtnl_delete_link);
3064 static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
3065 struct netlink_ext_ack *extack)
3067 struct net *net = sock_net(skb->sk);
3068 struct net *tgt_net = net;
3069 struct net_device *dev = NULL;
3070 struct ifinfomsg *ifm;
3071 struct nlattr *tb[IFLA_MAX+1];
3075 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
3076 ifla_policy, extack);
3080 err = rtnl_ensure_unique_netns(tb, extack, true);
3084 if (tb[IFLA_TARGET_NETNSID]) {
3085 netnsid = nla_get_s32(tb[IFLA_TARGET_NETNSID]);
3086 tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, netnsid);
3087 if (IS_ERR(tgt_net))
3088 return PTR_ERR(tgt_net);
3092 ifm = nlmsg_data(nlh);
3093 if (ifm->ifi_index > 0)
3094 dev = __dev_get_by_index(tgt_net, ifm->ifi_index);
3095 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
3096 dev = rtnl_dev_get(net, tb[IFLA_IFNAME],
3097 tb[IFLA_ALT_IFNAME], NULL);
3098 else if (tb[IFLA_GROUP])
3099 err = rtnl_group_dellink(tgt_net, nla_get_u32(tb[IFLA_GROUP]));
3104 if (tb[IFLA_IFNAME] || ifm->ifi_index > 0)
3110 err = rtnl_delete_link(dev);
3119 int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm)
3121 unsigned int old_flags;
3124 old_flags = dev->flags;
3125 if (ifm && (ifm->ifi_flags || ifm->ifi_change)) {
3126 err = __dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm),
3132 if (dev->rtnl_link_state == RTNL_LINK_INITIALIZED) {
3133 __dev_notify_flags(dev, old_flags, (old_flags ^ dev->flags));
3135 dev->rtnl_link_state = RTNL_LINK_INITIALIZED;
3136 __dev_notify_flags(dev, old_flags, ~0U);
3140 EXPORT_SYMBOL(rtnl_configure_link);
3142 struct net_device *rtnl_create_link(struct net *net, const char *ifname,
3143 unsigned char name_assign_type,
3144 const struct rtnl_link_ops *ops,
3145 struct nlattr *tb[],
3146 struct netlink_ext_ack *extack)
3148 struct net_device *dev;
3149 unsigned int num_tx_queues = 1;
3150 unsigned int num_rx_queues = 1;
3152 if (tb[IFLA_NUM_TX_QUEUES])
3153 num_tx_queues = nla_get_u32(tb[IFLA_NUM_TX_QUEUES]);
3154 else if (ops->get_num_tx_queues)
3155 num_tx_queues = ops->get_num_tx_queues();
3157 if (tb[IFLA_NUM_RX_QUEUES])
3158 num_rx_queues = nla_get_u32(tb[IFLA_NUM_RX_QUEUES]);
3159 else if (ops->get_num_rx_queues)
3160 num_rx_queues = ops->get_num_rx_queues();
3162 if (num_tx_queues < 1 || num_tx_queues > 4096) {
3163 NL_SET_ERR_MSG(extack, "Invalid number of transmit queues");
3164 return ERR_PTR(-EINVAL);
3167 if (num_rx_queues < 1 || num_rx_queues > 4096) {
3168 NL_SET_ERR_MSG(extack, "Invalid number of receive queues");
3169 return ERR_PTR(-EINVAL);
3172 dev = alloc_netdev_mqs(ops->priv_size, ifname, name_assign_type,
3173 ops->setup, num_tx_queues, num_rx_queues);
3175 return ERR_PTR(-ENOMEM);
3177 dev_net_set(dev, net);
3178 dev->rtnl_link_ops = ops;
3179 dev->rtnl_link_state = RTNL_LINK_INITIALIZING;
3182 u32 mtu = nla_get_u32(tb[IFLA_MTU]);
3185 err = dev_validate_mtu(dev, mtu, extack);
3188 return ERR_PTR(err);
3192 if (tb[IFLA_ADDRESS]) {
3193 memcpy(dev->dev_addr, nla_data(tb[IFLA_ADDRESS]),
3194 nla_len(tb[IFLA_ADDRESS]));
3195 dev->addr_assign_type = NET_ADDR_SET;
3197 if (tb[IFLA_BROADCAST])
3198 memcpy(dev->broadcast, nla_data(tb[IFLA_BROADCAST]),
3199 nla_len(tb[IFLA_BROADCAST]));
3200 if (tb[IFLA_TXQLEN])
3201 dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]);
3202 if (tb[IFLA_OPERSTATE])
3203 set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
3204 if (tb[IFLA_LINKMODE])
3205 dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]);
3207 dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
3208 if (tb[IFLA_GSO_MAX_SIZE])
3209 netif_set_gso_max_size(dev, nla_get_u32(tb[IFLA_GSO_MAX_SIZE]));
3210 if (tb[IFLA_GSO_MAX_SEGS])
3211 dev->gso_max_segs = nla_get_u32(tb[IFLA_GSO_MAX_SEGS]);
3215 EXPORT_SYMBOL(rtnl_create_link);
3217 static int rtnl_group_changelink(const struct sk_buff *skb,
3218 struct net *net, int group,
3219 struct ifinfomsg *ifm,
3220 struct netlink_ext_ack *extack,
3223 struct net_device *dev, *aux;
3226 for_each_netdev_safe(net, dev, aux) {
3227 if (dev->group == group) {
3228 err = do_setlink(skb, dev, ifm, extack, tb, NULL, 0);
3237 static int __rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
3238 struct nlattr **attr, struct netlink_ext_ack *extack)
3240 struct nlattr *slave_attr[RTNL_SLAVE_MAX_TYPE + 1];
3241 unsigned char name_assign_type = NET_NAME_USER;
3242 struct nlattr *linkinfo[IFLA_INFO_MAX + 1];
3243 const struct rtnl_link_ops *m_ops;
3244 struct net_device *master_dev;
3245 struct net *net = sock_net(skb->sk);
3246 const struct rtnl_link_ops *ops;
3247 struct nlattr *tb[IFLA_MAX + 1];
3248 struct net *dest_net, *link_net;
3249 struct nlattr **slave_data;
3250 char kind[MODULE_NAME_LEN];
3251 struct net_device *dev;
3252 struct ifinfomsg *ifm;
3253 char ifname[IFNAMSIZ];
3254 struct nlattr **data;
3257 #ifdef CONFIG_MODULES
3260 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
3261 ifla_policy, extack);
3265 err = rtnl_ensure_unique_netns(tb, extack, false);
3269 if (tb[IFLA_IFNAME])
3270 nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
3274 ifm = nlmsg_data(nlh);
3275 if (ifm->ifi_index > 0)
3276 dev = __dev_get_by_index(net, ifm->ifi_index);
3277 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
3278 dev = rtnl_dev_get(net, NULL, tb[IFLA_ALT_IFNAME], ifname);
3285 master_dev = netdev_master_upper_dev_get(dev);
3287 m_ops = master_dev->rtnl_link_ops;
3290 err = validate_linkmsg(dev, tb);
3294 if (tb[IFLA_LINKINFO]) {
3295 err = nla_parse_nested_deprecated(linkinfo, IFLA_INFO_MAX,
3297 ifla_info_policy, NULL);
3301 memset(linkinfo, 0, sizeof(linkinfo));
3303 if (linkinfo[IFLA_INFO_KIND]) {
3304 nla_strlcpy(kind, linkinfo[IFLA_INFO_KIND], sizeof(kind));
3305 ops = rtnl_link_ops_get(kind);
3313 if (ops->maxtype > RTNL_MAX_TYPE)
3316 if (ops->maxtype && linkinfo[IFLA_INFO_DATA]) {
3317 err = nla_parse_nested_deprecated(attr, ops->maxtype,
3318 linkinfo[IFLA_INFO_DATA],
3319 ops->policy, extack);
3324 if (ops->validate) {
3325 err = ops->validate(tb, data, extack);
3333 if (m_ops->slave_maxtype > RTNL_SLAVE_MAX_TYPE)
3336 if (m_ops->slave_maxtype &&
3337 linkinfo[IFLA_INFO_SLAVE_DATA]) {
3338 err = nla_parse_nested_deprecated(slave_attr,
3339 m_ops->slave_maxtype,
3340 linkinfo[IFLA_INFO_SLAVE_DATA],
3341 m_ops->slave_policy,
3345 slave_data = slave_attr;
3352 if (nlh->nlmsg_flags & NLM_F_EXCL)
3354 if (nlh->nlmsg_flags & NLM_F_REPLACE)
3357 if (linkinfo[IFLA_INFO_DATA]) {
3358 if (!ops || ops != dev->rtnl_link_ops ||
3362 err = ops->changelink(dev, tb, data, extack);
3365 status |= DO_SETLINK_NOTIFY;
3368 if (linkinfo[IFLA_INFO_SLAVE_DATA]) {
3369 if (!m_ops || !m_ops->slave_changelink)
3372 err = m_ops->slave_changelink(master_dev, dev, tb,
3373 slave_data, extack);
3376 status |= DO_SETLINK_NOTIFY;
3379 return do_setlink(skb, dev, ifm, extack, tb, ifname, status);
3382 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
3383 if (ifm->ifi_index == 0 && tb[IFLA_GROUP])
3384 return rtnl_group_changelink(skb, net,
3385 nla_get_u32(tb[IFLA_GROUP]),
3390 if (tb[IFLA_MAP] || tb[IFLA_PROTINFO])
3394 #ifdef CONFIG_MODULES
3397 request_module("rtnl-link-%s", kind);
3399 ops = rtnl_link_ops_get(kind);
3404 NL_SET_ERR_MSG(extack, "Unknown device type");
3412 snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind);
3413 name_assign_type = NET_NAME_ENUM;
3416 dest_net = rtnl_link_get_net_capable(skb, net, tb, CAP_NET_ADMIN);
3417 if (IS_ERR(dest_net))
3418 return PTR_ERR(dest_net);
3420 if (tb[IFLA_LINK_NETNSID]) {
3421 int id = nla_get_s32(tb[IFLA_LINK_NETNSID]);
3423 link_net = get_net_ns_by_id(dest_net, id);
3425 NL_SET_ERR_MSG(extack, "Unknown network namespace id");
3430 if (!netlink_ns_capable(skb, link_net->user_ns, CAP_NET_ADMIN))
3436 dev = rtnl_create_link(link_net ? : dest_net, ifname,
3437 name_assign_type, ops, tb, extack);
3443 dev->ifindex = ifm->ifi_index;
3446 err = ops->newlink(link_net ? : net, dev, tb, data, extack);
3448 err = register_netdevice(dev);
3454 err = rtnl_configure_link(dev, ifm);
3456 goto out_unregister;
3458 err = dev_change_net_namespace(dev, dest_net, ifname);
3460 goto out_unregister;
3462 if (tb[IFLA_MASTER]) {
3463 err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack);
3465 goto out_unregister;
3474 LIST_HEAD(list_kill);
3476 ops->dellink(dev, &list_kill);
3477 unregister_netdevice_many(&list_kill);
3479 unregister_netdevice(dev);
3484 static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
3485 struct netlink_ext_ack *extack)
3487 struct nlattr **attr;
3490 attr = kmalloc_array(RTNL_MAX_TYPE + 1, sizeof(*attr), GFP_KERNEL);
3494 ret = __rtnl_newlink(skb, nlh, attr, extack);
3499 static int rtnl_valid_getlink_req(struct sk_buff *skb,
3500 const struct nlmsghdr *nlh,
3502 struct netlink_ext_ack *extack)
3504 struct ifinfomsg *ifm;
3507 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
3508 NL_SET_ERR_MSG(extack, "Invalid header for get link");
3512 if (!netlink_strict_get_check(skb))
3513 return nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
3514 ifla_policy, extack);
3516 ifm = nlmsg_data(nlh);
3517 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
3519 NL_SET_ERR_MSG(extack, "Invalid values in header for get link request");
3523 err = nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, IFLA_MAX,
3524 ifla_policy, extack);
3528 for (i = 0; i <= IFLA_MAX; i++) {
3534 case IFLA_ALT_IFNAME:
3536 case IFLA_TARGET_NETNSID:
3539 NL_SET_ERR_MSG(extack, "Unsupported attribute in get link request");
3547 static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr *nlh,
3548 struct netlink_ext_ack *extack)
3550 struct net *net = sock_net(skb->sk);
3551 struct net *tgt_net = net;
3552 struct ifinfomsg *ifm;
3553 struct nlattr *tb[IFLA_MAX+1];
3554 struct net_device *dev = NULL;
3555 struct sk_buff *nskb;
3558 u32 ext_filter_mask = 0;
3560 err = rtnl_valid_getlink_req(skb, nlh, tb, extack);
3564 err = rtnl_ensure_unique_netns(tb, extack, true);
3568 if (tb[IFLA_TARGET_NETNSID]) {
3569 netnsid = nla_get_s32(tb[IFLA_TARGET_NETNSID]);
3570 tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, netnsid);
3571 if (IS_ERR(tgt_net))
3572 return PTR_ERR(tgt_net);
3575 if (tb[IFLA_EXT_MASK])
3576 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
3579 ifm = nlmsg_data(nlh);
3580 if (ifm->ifi_index > 0)
3581 dev = __dev_get_by_index(tgt_net, ifm->ifi_index);
3582 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
3583 dev = rtnl_dev_get(tgt_net, tb[IFLA_IFNAME],
3584 tb[IFLA_ALT_IFNAME], NULL);
3593 nskb = nlmsg_new(if_nlmsg_size(dev, ext_filter_mask), GFP_KERNEL);
3597 err = rtnl_fill_ifinfo(nskb, dev, net,
3598 RTM_NEWLINK, NETLINK_CB(skb).portid,
3599 nlh->nlmsg_seq, 0, 0, ext_filter_mask,
3600 0, NULL, 0, netnsid, GFP_KERNEL);
3602 /* -EMSGSIZE implies BUG in if_nlmsg_size */
3603 WARN_ON(err == -EMSGSIZE);
3606 err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid);
3614 static int rtnl_alt_ifname(int cmd, struct net_device *dev, struct nlattr *attr,
3615 bool *changed, struct netlink_ext_ack *extack)
3621 err = nla_validate(attr, attr->nla_len, IFLA_MAX, ifla_policy, extack);
3625 if (cmd == RTM_NEWLINKPROP) {
3626 size = rtnl_prop_list_size(dev);
3627 size += nla_total_size(ALTIFNAMSIZ);
3628 if (size >= U16_MAX) {
3629 NL_SET_ERR_MSG(extack,
3630 "effective property list too long");
3635 alt_ifname = nla_strdup(attr, GFP_KERNEL_ACCOUNT);
3639 if (cmd == RTM_NEWLINKPROP) {
3640 err = netdev_name_node_alt_create(dev, alt_ifname);
3643 } else if (cmd == RTM_DELLINKPROP) {
3644 err = netdev_name_node_alt_destroy(dev, alt_ifname);
3656 static int rtnl_linkprop(int cmd, struct sk_buff *skb, struct nlmsghdr *nlh,
3657 struct netlink_ext_ack *extack)
3659 struct net *net = sock_net(skb->sk);
3660 struct nlattr *tb[IFLA_MAX + 1];
3661 struct net_device *dev;
3662 struct ifinfomsg *ifm;
3663 bool changed = false;
3664 struct nlattr *attr;
3667 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy, extack);
3671 err = rtnl_ensure_unique_netns(tb, extack, true);
3675 ifm = nlmsg_data(nlh);
3676 if (ifm->ifi_index > 0)
3677 dev = __dev_get_by_index(net, ifm->ifi_index);
3678 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
3679 dev = rtnl_dev_get(net, tb[IFLA_IFNAME],
3680 tb[IFLA_ALT_IFNAME], NULL);
3687 if (!tb[IFLA_PROP_LIST])
3690 nla_for_each_nested(attr, tb[IFLA_PROP_LIST], rem) {
3691 switch (nla_type(attr)) {
3692 case IFLA_ALT_IFNAME:
3693 err = rtnl_alt_ifname(cmd, dev, attr, &changed, extack);
3701 netdev_state_change(dev);
3705 static int rtnl_newlinkprop(struct sk_buff *skb, struct nlmsghdr *nlh,
3706 struct netlink_ext_ack *extack)
3708 return rtnl_linkprop(RTM_NEWLINKPROP, skb, nlh, extack);
3711 static int rtnl_dellinkprop(struct sk_buff *skb, struct nlmsghdr *nlh,
3712 struct netlink_ext_ack *extack)
3714 return rtnl_linkprop(RTM_DELLINKPROP, skb, nlh, extack);
3717 static u32 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh)
3719 struct net *net = sock_net(skb->sk);
3720 size_t min_ifinfo_dump_size = 0;
3721 struct nlattr *tb[IFLA_MAX+1];
3722 u32 ext_filter_mask = 0;
3723 struct net_device *dev;
3726 /* Same kernel<->userspace interface hack as in rtnl_dump_ifinfo. */
3727 hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ?
3728 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
3730 if (nlmsg_parse_deprecated(nlh, hdrlen, tb, IFLA_MAX, ifla_policy, NULL) >= 0) {
3731 if (tb[IFLA_EXT_MASK])
3732 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
3735 if (!ext_filter_mask)
3736 return NLMSG_GOODSIZE;
3738 * traverse the list of net devices and compute the minimum
3739 * buffer size based upon the filter mask.
3742 for_each_netdev_rcu(net, dev) {
3743 min_ifinfo_dump_size = max(min_ifinfo_dump_size,
3744 if_nlmsg_size(dev, ext_filter_mask));
3748 return nlmsg_total_size(min_ifinfo_dump_size);
3751 static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
3754 int s_idx = cb->family;
3755 int type = cb->nlh->nlmsg_type - RTM_BASE;
3761 for (idx = 1; idx <= RTNL_FAMILY_MAX; idx++) {
3762 struct rtnl_link **tab;
3763 struct rtnl_link *link;
3764 rtnl_dumpit_func dumpit;
3766 if (idx < s_idx || idx == PF_PACKET)
3769 if (type < 0 || type >= RTM_NR_MSGTYPES)
3772 tab = rcu_dereference_rtnl(rtnl_msg_handlers[idx]);
3780 dumpit = link->dumpit;
3785 memset(&cb->args[0], 0, sizeof(cb->args));
3789 ret = dumpit(skb, cb);
3795 return skb->len ? : ret;
3798 struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev,
3799 unsigned int change,
3800 u32 event, gfp_t flags, int *new_nsid,
3803 struct net *net = dev_net(dev);
3804 struct sk_buff *skb;
3806 size_t if_info_size;
3808 skb = nlmsg_new((if_info_size = if_nlmsg_size(dev, 0)), flags);
3812 err = rtnl_fill_ifinfo(skb, dev, dev_net(dev),
3813 type, 0, 0, change, 0, 0, event,
3814 new_nsid, new_ifindex, -1, flags);
3816 /* -EMSGSIZE implies BUG in if_nlmsg_size() */
3817 WARN_ON(err == -EMSGSIZE);
3824 rtnl_set_sk_err(net, RTNLGRP_LINK, err);
3828 void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev, gfp_t flags)
3830 struct net *net = dev_net(dev);
3832 rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, flags);
3835 static void rtmsg_ifinfo_event(int type, struct net_device *dev,
3836 unsigned int change, u32 event,
3837 gfp_t flags, int *new_nsid, int new_ifindex)
3839 struct sk_buff *skb;
3841 if (dev->reg_state != NETREG_REGISTERED)
3844 skb = rtmsg_ifinfo_build_skb(type, dev, change, event, flags, new_nsid,
3847 rtmsg_ifinfo_send(skb, dev, flags);
3850 void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change,
3853 rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags,
3857 void rtmsg_ifinfo_newnet(int type, struct net_device *dev, unsigned int change,
3858 gfp_t flags, int *new_nsid, int new_ifindex)
3860 rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags,
3861 new_nsid, new_ifindex);
3864 static int nlmsg_populate_fdb_fill(struct sk_buff *skb,
3865 struct net_device *dev,
3866 u8 *addr, u16 vid, u32 pid, u32 seq,
3867 int type, unsigned int flags,
3868 int nlflags, u16 ndm_state)
3870 struct nlmsghdr *nlh;
3873 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), nlflags);
3877 ndm = nlmsg_data(nlh);
3878 ndm->ndm_family = AF_BRIDGE;
3881 ndm->ndm_flags = flags;
3883 ndm->ndm_ifindex = dev->ifindex;
3884 ndm->ndm_state = ndm_state;
3886 if (nla_put(skb, NDA_LLADDR, ETH_ALEN, addr))
3887 goto nla_put_failure;
3889 if (nla_put(skb, NDA_VLAN, sizeof(u16), &vid))
3890 goto nla_put_failure;
3892 nlmsg_end(skb, nlh);
3896 nlmsg_cancel(skb, nlh);
3900 static inline size_t rtnl_fdb_nlmsg_size(void)
3902 return NLMSG_ALIGN(sizeof(struct ndmsg)) +
3903 nla_total_size(ETH_ALEN) + /* NDA_LLADDR */
3904 nla_total_size(sizeof(u16)) + /* NDA_VLAN */
3908 static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, u16 vid, int type,
3911 struct net *net = dev_net(dev);
3912 struct sk_buff *skb;
3915 skb = nlmsg_new(rtnl_fdb_nlmsg_size(), GFP_ATOMIC);
3919 err = nlmsg_populate_fdb_fill(skb, dev, addr, vid,
3920 0, 0, type, NTF_SELF, 0, ndm_state);
3926 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
3929 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
3933 * ndo_dflt_fdb_add - default netdevice operation to add an FDB entry
3935 int ndo_dflt_fdb_add(struct ndmsg *ndm,
3936 struct nlattr *tb[],
3937 struct net_device *dev,
3938 const unsigned char *addr, u16 vid,
3943 /* If aging addresses are supported device will need to
3944 * implement its own handler for this.
3946 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
3947 pr_info("%s: FDB only supports static addresses\n", dev->name);
3952 pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name);
3956 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
3957 err = dev_uc_add_excl(dev, addr);
3958 else if (is_multicast_ether_addr(addr))
3959 err = dev_mc_add_excl(dev, addr);
3961 /* Only return duplicate errors if NLM_F_EXCL is set */
3962 if (err == -EEXIST && !(flags & NLM_F_EXCL))
3967 EXPORT_SYMBOL(ndo_dflt_fdb_add);
3969 static int fdb_vid_parse(struct nlattr *vlan_attr, u16 *p_vid,
3970 struct netlink_ext_ack *extack)
3975 if (nla_len(vlan_attr) != sizeof(u16)) {
3976 NL_SET_ERR_MSG(extack, "invalid vlan attribute size");
3980 vid = nla_get_u16(vlan_attr);
3982 if (!vid || vid >= VLAN_VID_MASK) {
3983 NL_SET_ERR_MSG(extack, "invalid vlan id");
3991 static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
3992 struct netlink_ext_ack *extack)
3994 struct net *net = sock_net(skb->sk);
3996 struct nlattr *tb[NDA_MAX+1];
3997 struct net_device *dev;
4002 err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX, NULL,
4007 ndm = nlmsg_data(nlh);
4008 if (ndm->ndm_ifindex == 0) {
4009 NL_SET_ERR_MSG(extack, "invalid ifindex");
4013 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
4015 NL_SET_ERR_MSG(extack, "unknown ifindex");
4019 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
4020 NL_SET_ERR_MSG(extack, "invalid address");
4024 if (dev->type != ARPHRD_ETHER) {
4025 NL_SET_ERR_MSG(extack, "FDB add only supported for Ethernet devices");
4029 addr = nla_data(tb[NDA_LLADDR]);
4031 err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack);
4037 /* Support fdb on master device the net/bridge default case */
4038 if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
4039 netif_is_bridge_port(dev)) {
4040 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
4041 const struct net_device_ops *ops = br_dev->netdev_ops;
4043 err = ops->ndo_fdb_add(ndm, tb, dev, addr, vid,
4044 nlh->nlmsg_flags, extack);
4048 ndm->ndm_flags &= ~NTF_MASTER;
4051 /* Embedded bridge, macvlan, and any other device support */
4052 if ((ndm->ndm_flags & NTF_SELF)) {
4053 if (dev->netdev_ops->ndo_fdb_add)
4054 err = dev->netdev_ops->ndo_fdb_add(ndm, tb, dev, addr,
4059 err = ndo_dflt_fdb_add(ndm, tb, dev, addr, vid,
4063 rtnl_fdb_notify(dev, addr, vid, RTM_NEWNEIGH,
4065 ndm->ndm_flags &= ~NTF_SELF;
4073 * ndo_dflt_fdb_del - default netdevice operation to delete an FDB entry
4075 int ndo_dflt_fdb_del(struct ndmsg *ndm,
4076 struct nlattr *tb[],
4077 struct net_device *dev,
4078 const unsigned char *addr, u16 vid)
4082 /* If aging addresses are supported device will need to
4083 * implement its own handler for this.
4085 if (!(ndm->ndm_state & NUD_PERMANENT)) {
4086 pr_info("%s: FDB only supports static addresses\n", dev->name);
4090 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
4091 err = dev_uc_del(dev, addr);
4092 else if (is_multicast_ether_addr(addr))
4093 err = dev_mc_del(dev, addr);
4097 EXPORT_SYMBOL(ndo_dflt_fdb_del);
4099 static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
4100 struct netlink_ext_ack *extack)
4102 struct net *net = sock_net(skb->sk);
4104 struct nlattr *tb[NDA_MAX+1];
4105 struct net_device *dev;
4110 if (!netlink_capable(skb, CAP_NET_ADMIN))
4113 err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX, NULL,
4118 ndm = nlmsg_data(nlh);
4119 if (ndm->ndm_ifindex == 0) {
4120 NL_SET_ERR_MSG(extack, "invalid ifindex");
4124 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
4126 NL_SET_ERR_MSG(extack, "unknown ifindex");
4130 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
4131 NL_SET_ERR_MSG(extack, "invalid address");
4135 if (dev->type != ARPHRD_ETHER) {
4136 NL_SET_ERR_MSG(extack, "FDB delete only supported for Ethernet devices");
4140 addr = nla_data(tb[NDA_LLADDR]);
4142 err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack);
4148 /* Support fdb on master device the net/bridge default case */
4149 if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
4150 netif_is_bridge_port(dev)) {
4151 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
4152 const struct net_device_ops *ops = br_dev->netdev_ops;
4154 if (ops->ndo_fdb_del)
4155 err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid);
4160 ndm->ndm_flags &= ~NTF_MASTER;
4163 /* Embedded bridge, macvlan, and any other device support */
4164 if (ndm->ndm_flags & NTF_SELF) {
4165 if (dev->netdev_ops->ndo_fdb_del)
4166 err = dev->netdev_ops->ndo_fdb_del(ndm, tb, dev, addr,
4169 err = ndo_dflt_fdb_del(ndm, tb, dev, addr, vid);
4172 rtnl_fdb_notify(dev, addr, vid, RTM_DELNEIGH,
4174 ndm->ndm_flags &= ~NTF_SELF;
4181 static int nlmsg_populate_fdb(struct sk_buff *skb,
4182 struct netlink_callback *cb,
4183 struct net_device *dev,
4185 struct netdev_hw_addr_list *list)
4187 struct netdev_hw_addr *ha;
4191 portid = NETLINK_CB(cb->skb).portid;
4192 seq = cb->nlh->nlmsg_seq;
4194 list_for_each_entry(ha, &list->list, list) {
4195 if (*idx < cb->args[2])
4198 err = nlmsg_populate_fdb_fill(skb, dev, ha->addr, 0,
4200 RTM_NEWNEIGH, NTF_SELF,
4201 NLM_F_MULTI, NUD_PERMANENT);
4211 * ndo_dflt_fdb_dump - default netdevice operation to dump an FDB table.
4212 * @skb: socket buffer to store message in
4213 * @cb: netlink callback
4215 * @filter_dev: ignored
4216 * @idx: the number of FDB table entries dumped is added to *@idx
4218 * Default netdevice operation to dump the existing unicast address list.
4219 * Returns number of addresses from list put in skb.
4221 int ndo_dflt_fdb_dump(struct sk_buff *skb,
4222 struct netlink_callback *cb,
4223 struct net_device *dev,
4224 struct net_device *filter_dev,
4229 if (dev->type != ARPHRD_ETHER)
4232 netif_addr_lock_bh(dev);
4233 err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->uc);
4236 err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->mc);
4238 netif_addr_unlock_bh(dev);
4241 EXPORT_SYMBOL(ndo_dflt_fdb_dump);
4243 static int valid_fdb_dump_strict(const struct nlmsghdr *nlh,
4244 int *br_idx, int *brport_idx,
4245 struct netlink_ext_ack *extack)
4247 struct nlattr *tb[NDA_MAX + 1];
4251 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
4252 NL_SET_ERR_MSG(extack, "Invalid header for fdb dump request");
4256 ndm = nlmsg_data(nlh);
4257 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state ||
4258 ndm->ndm_flags || ndm->ndm_type) {
4259 NL_SET_ERR_MSG(extack, "Invalid values in header for fdb dump request");
4263 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb,
4264 NDA_MAX, NULL, extack);
4268 *brport_idx = ndm->ndm_ifindex;
4269 for (i = 0; i <= NDA_MAX; ++i) {
4275 if (nla_len(tb[i]) != sizeof(u32)) {
4276 NL_SET_ERR_MSG(extack, "Invalid IFINDEX attribute in fdb dump request");
4279 *brport_idx = nla_get_u32(tb[NDA_IFINDEX]);
4282 if (nla_len(tb[i]) != sizeof(u32)) {
4283 NL_SET_ERR_MSG(extack, "Invalid MASTER attribute in fdb dump request");
4286 *br_idx = nla_get_u32(tb[NDA_MASTER]);
4289 NL_SET_ERR_MSG(extack, "Unsupported attribute in fdb dump request");
4297 static int valid_fdb_dump_legacy(const struct nlmsghdr *nlh,
4298 int *br_idx, int *brport_idx,
4299 struct netlink_ext_ack *extack)
4301 struct nlattr *tb[IFLA_MAX+1];
4304 /* A hack to preserve kernel<->userspace interface.
4305 * Before Linux v4.12 this code accepted ndmsg since iproute2 v3.3.0.
4306 * However, ndmsg is shorter than ifinfomsg thus nlmsg_parse() bails.
4307 * So, check for ndmsg with an optional u32 attribute (not used here).
4308 * Fortunately these sizes don't conflict with the size of ifinfomsg
4309 * with an optional attribute.
4311 if (nlmsg_len(nlh) != sizeof(struct ndmsg) &&
4312 (nlmsg_len(nlh) != sizeof(struct ndmsg) +
4313 nla_attr_size(sizeof(u32)))) {
4314 struct ifinfomsg *ifm;
4316 err = nlmsg_parse_deprecated(nlh, sizeof(struct ifinfomsg),
4317 tb, IFLA_MAX, ifla_policy,
4321 } else if (err == 0) {
4322 if (tb[IFLA_MASTER])
4323 *br_idx = nla_get_u32(tb[IFLA_MASTER]);
4326 ifm = nlmsg_data(nlh);
4327 *brport_idx = ifm->ifi_index;
4332 static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
4334 struct net_device *dev;
4335 struct net_device *br_dev = NULL;
4336 const struct net_device_ops *ops = NULL;
4337 const struct net_device_ops *cops = NULL;
4338 struct net *net = sock_net(skb->sk);
4339 struct hlist_head *head;
4347 if (cb->strict_check)
4348 err = valid_fdb_dump_strict(cb->nlh, &br_idx, &brport_idx,
4351 err = valid_fdb_dump_legacy(cb->nlh, &br_idx, &brport_idx,
4357 br_dev = __dev_get_by_index(net, br_idx);
4361 ops = br_dev->netdev_ops;
4365 s_idx = cb->args[1];
4367 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
4369 head = &net->dev_index_head[h];
4370 hlist_for_each_entry(dev, head, index_hlist) {
4372 if (brport_idx && (dev->ifindex != brport_idx))
4375 if (!br_idx) { /* user did not specify a specific bridge */
4376 if (netif_is_bridge_port(dev)) {
4377 br_dev = netdev_master_upper_dev_get(dev);
4378 cops = br_dev->netdev_ops;
4381 if (dev != br_dev &&
4382 !netif_is_bridge_port(dev))
4385 if (br_dev != netdev_master_upper_dev_get(dev) &&
4386 !(dev->priv_flags & IFF_EBRIDGE))
4394 if (netif_is_bridge_port(dev)) {
4395 if (cops && cops->ndo_fdb_dump) {
4396 err = cops->ndo_fdb_dump(skb, cb,
4399 if (err == -EMSGSIZE)
4404 if (dev->netdev_ops->ndo_fdb_dump)
4405 err = dev->netdev_ops->ndo_fdb_dump(skb, cb,
4409 err = ndo_dflt_fdb_dump(skb, cb, dev, NULL,
4411 if (err == -EMSGSIZE)
4416 /* reset fdb offset to 0 for rest of the interfaces */
4432 static int valid_fdb_get_strict(const struct nlmsghdr *nlh,
4433 struct nlattr **tb, u8 *ndm_flags,
4434 int *br_idx, int *brport_idx, u8 **addr,
4435 u16 *vid, struct netlink_ext_ack *extack)
4440 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
4441 NL_SET_ERR_MSG(extack, "Invalid header for fdb get request");
4445 ndm = nlmsg_data(nlh);
4446 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state ||
4448 NL_SET_ERR_MSG(extack, "Invalid values in header for fdb get request");
4452 if (ndm->ndm_flags & ~(NTF_MASTER | NTF_SELF)) {
4453 NL_SET_ERR_MSG(extack, "Invalid flags in header for fdb get request");
4457 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb,
4458 NDA_MAX, nda_policy, extack);
4462 *ndm_flags = ndm->ndm_flags;
4463 *brport_idx = ndm->ndm_ifindex;
4464 for (i = 0; i <= NDA_MAX; ++i) {
4470 *br_idx = nla_get_u32(tb[i]);
4473 if (nla_len(tb[i]) != ETH_ALEN) {
4474 NL_SET_ERR_MSG(extack, "Invalid address in fdb get request");
4477 *addr = nla_data(tb[i]);
4480 err = fdb_vid_parse(tb[i], vid, extack);
4487 NL_SET_ERR_MSG(extack, "Unsupported attribute in fdb get request");
4495 static int rtnl_fdb_get(struct sk_buff *in_skb, struct nlmsghdr *nlh,
4496 struct netlink_ext_ack *extack)
4498 struct net_device *dev = NULL, *br_dev = NULL;
4499 const struct net_device_ops *ops = NULL;
4500 struct net *net = sock_net(in_skb->sk);
4501 struct nlattr *tb[NDA_MAX + 1];
4502 struct sk_buff *skb;
4510 err = valid_fdb_get_strict(nlh, tb, &ndm_flags, &br_idx,
4511 &brport_idx, &addr, &vid, extack);
4516 NL_SET_ERR_MSG(extack, "Missing lookup address for fdb get request");
4521 dev = __dev_get_by_index(net, brport_idx);
4523 NL_SET_ERR_MSG(extack, "Unknown device ifindex");
4530 NL_SET_ERR_MSG(extack, "Master and device are mutually exclusive");
4534 br_dev = __dev_get_by_index(net, br_idx);
4536 NL_SET_ERR_MSG(extack, "Invalid master ifindex");
4539 ops = br_dev->netdev_ops;
4543 if (!ndm_flags || (ndm_flags & NTF_MASTER)) {
4544 if (!netif_is_bridge_port(dev)) {
4545 NL_SET_ERR_MSG(extack, "Device is not a bridge port");
4548 br_dev = netdev_master_upper_dev_get(dev);
4550 NL_SET_ERR_MSG(extack, "Master of device not found");
4553 ops = br_dev->netdev_ops;
4555 if (!(ndm_flags & NTF_SELF)) {
4556 NL_SET_ERR_MSG(extack, "Missing NTF_SELF");
4559 ops = dev->netdev_ops;
4563 if (!br_dev && !dev) {
4564 NL_SET_ERR_MSG(extack, "No device specified");
4568 if (!ops || !ops->ndo_fdb_get) {
4569 NL_SET_ERR_MSG(extack, "Fdb get operation not supported by device");
4573 skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
4579 err = ops->ndo_fdb_get(skb, tb, dev, addr, vid,
4580 NETLINK_CB(in_skb).portid,
4581 nlh->nlmsg_seq, extack);
4585 return rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
4591 static int brport_nla_put_flag(struct sk_buff *skb, u32 flags, u32 mask,
4592 unsigned int attrnum, unsigned int flag)
4595 return nla_put_u8(skb, attrnum, !!(flags & flag));
4599 int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
4600 struct net_device *dev, u16 mode,
4601 u32 flags, u32 mask, int nlflags,
4603 int (*vlan_fill)(struct sk_buff *skb,
4604 struct net_device *dev,
4607 struct nlmsghdr *nlh;
4608 struct ifinfomsg *ifm;
4609 struct nlattr *br_afspec;
4610 struct nlattr *protinfo;
4611 u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
4612 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
4615 nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*ifm), nlflags);
4619 ifm = nlmsg_data(nlh);
4620 ifm->ifi_family = AF_BRIDGE;
4622 ifm->ifi_type = dev->type;
4623 ifm->ifi_index = dev->ifindex;
4624 ifm->ifi_flags = dev_get_flags(dev);
4625 ifm->ifi_change = 0;
4628 if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
4629 nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
4630 nla_put_u8(skb, IFLA_OPERSTATE, operstate) ||
4632 nla_put_u32(skb, IFLA_MASTER, br_dev->ifindex)) ||
4634 nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
4635 (dev->ifindex != dev_get_iflink(dev) &&
4636 nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))))
4637 goto nla_put_failure;
4639 br_afspec = nla_nest_start_noflag(skb, IFLA_AF_SPEC);
4641 goto nla_put_failure;
4643 if (nla_put_u16(skb, IFLA_BRIDGE_FLAGS, BRIDGE_FLAGS_SELF)) {
4644 nla_nest_cancel(skb, br_afspec);
4645 goto nla_put_failure;
4648 if (mode != BRIDGE_MODE_UNDEF) {
4649 if (nla_put_u16(skb, IFLA_BRIDGE_MODE, mode)) {
4650 nla_nest_cancel(skb, br_afspec);
4651 goto nla_put_failure;
4655 err = vlan_fill(skb, dev, filter_mask);
4657 nla_nest_cancel(skb, br_afspec);
4658 goto nla_put_failure;
4661 nla_nest_end(skb, br_afspec);
4663 protinfo = nla_nest_start(skb, IFLA_PROTINFO);
4665 goto nla_put_failure;
4667 if (brport_nla_put_flag(skb, flags, mask,
4668 IFLA_BRPORT_MODE, BR_HAIRPIN_MODE) ||
4669 brport_nla_put_flag(skb, flags, mask,
4670 IFLA_BRPORT_GUARD, BR_BPDU_GUARD) ||
4671 brport_nla_put_flag(skb, flags, mask,
4672 IFLA_BRPORT_FAST_LEAVE,
4673 BR_MULTICAST_FAST_LEAVE) ||
4674 brport_nla_put_flag(skb, flags, mask,
4675 IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK) ||
4676 brport_nla_put_flag(skb, flags, mask,
4677 IFLA_BRPORT_LEARNING, BR_LEARNING) ||
4678 brport_nla_put_flag(skb, flags, mask,
4679 IFLA_BRPORT_LEARNING_SYNC, BR_LEARNING_SYNC) ||
4680 brport_nla_put_flag(skb, flags, mask,
4681 IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD) ||
4682 brport_nla_put_flag(skb, flags, mask,
4683 IFLA_BRPORT_PROXYARP, BR_PROXYARP) ||
4684 brport_nla_put_flag(skb, flags, mask,
4685 IFLA_BRPORT_MCAST_FLOOD, BR_MCAST_FLOOD) ||
4686 brport_nla_put_flag(skb, flags, mask,
4687 IFLA_BRPORT_BCAST_FLOOD, BR_BCAST_FLOOD)) {
4688 nla_nest_cancel(skb, protinfo);
4689 goto nla_put_failure;
4692 nla_nest_end(skb, protinfo);
4694 nlmsg_end(skb, nlh);
4697 nlmsg_cancel(skb, nlh);
4698 return err ? err : -EMSGSIZE;
4700 EXPORT_SYMBOL_GPL(ndo_dflt_bridge_getlink);
4702 static int valid_bridge_getlink_req(const struct nlmsghdr *nlh,
4703 bool strict_check, u32 *filter_mask,
4704 struct netlink_ext_ack *extack)
4706 struct nlattr *tb[IFLA_MAX+1];
4710 struct ifinfomsg *ifm;
4712 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
4713 NL_SET_ERR_MSG(extack, "Invalid header for bridge link dump");
4717 ifm = nlmsg_data(nlh);
4718 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
4719 ifm->ifi_change || ifm->ifi_index) {
4720 NL_SET_ERR_MSG(extack, "Invalid values in header for bridge link dump request");
4724 err = nlmsg_parse_deprecated_strict(nlh,
4725 sizeof(struct ifinfomsg),
4726 tb, IFLA_MAX, ifla_policy,
4729 err = nlmsg_parse_deprecated(nlh, sizeof(struct ifinfomsg),
4730 tb, IFLA_MAX, ifla_policy,
4736 /* new attributes should only be added with strict checking */
4737 for (i = 0; i <= IFLA_MAX; ++i) {
4743 *filter_mask = nla_get_u32(tb[i]);
4747 NL_SET_ERR_MSG(extack, "Unsupported attribute in bridge link dump request");
4756 static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
4758 const struct nlmsghdr *nlh = cb->nlh;
4759 struct net *net = sock_net(skb->sk);
4760 struct net_device *dev;
4762 u32 portid = NETLINK_CB(cb->skb).portid;
4763 u32 seq = nlh->nlmsg_seq;
4764 u32 filter_mask = 0;
4767 err = valid_bridge_getlink_req(nlh, cb->strict_check, &filter_mask,
4769 if (err < 0 && cb->strict_check)
4773 for_each_netdev_rcu(net, dev) {
4774 const struct net_device_ops *ops = dev->netdev_ops;
4775 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
4777 if (br_dev && br_dev->netdev_ops->ndo_bridge_getlink) {
4778 if (idx >= cb->args[0]) {
4779 err = br_dev->netdev_ops->ndo_bridge_getlink(
4780 skb, portid, seq, dev,
4781 filter_mask, NLM_F_MULTI);
4782 if (err < 0 && err != -EOPNOTSUPP) {
4783 if (likely(skb->len))
4792 if (ops->ndo_bridge_getlink) {
4793 if (idx >= cb->args[0]) {
4794 err = ops->ndo_bridge_getlink(skb, portid,
4798 if (err < 0 && err != -EOPNOTSUPP) {
4799 if (likely(skb->len))
4816 static inline size_t bridge_nlmsg_size(void)
4818 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
4819 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
4820 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
4821 + nla_total_size(sizeof(u32)) /* IFLA_MASTER */
4822 + nla_total_size(sizeof(u32)) /* IFLA_MTU */
4823 + nla_total_size(sizeof(u32)) /* IFLA_LINK */
4824 + nla_total_size(sizeof(u32)) /* IFLA_OPERSTATE */
4825 + nla_total_size(sizeof(u8)) /* IFLA_PROTINFO */
4826 + nla_total_size(sizeof(struct nlattr)) /* IFLA_AF_SPEC */
4827 + nla_total_size(sizeof(u16)) /* IFLA_BRIDGE_FLAGS */
4828 + nla_total_size(sizeof(u16)); /* IFLA_BRIDGE_MODE */
4831 static int rtnl_bridge_notify(struct net_device *dev)
4833 struct net *net = dev_net(dev);
4834 struct sk_buff *skb;
4835 int err = -EOPNOTSUPP;
4837 if (!dev->netdev_ops->ndo_bridge_getlink)
4840 skb = nlmsg_new(bridge_nlmsg_size(), GFP_ATOMIC);
4846 err = dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev, 0, 0);
4850 /* Notification info is only filled for bridge ports, not the bridge
4851 * device itself. Therefore, a zero notification length is valid and
4852 * should not result in an error.
4857 rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
4860 WARN_ON(err == -EMSGSIZE);
4863 rtnl_set_sk_err(net, RTNLGRP_LINK, err);
4867 static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
4868 struct netlink_ext_ack *extack)
4870 struct net *net = sock_net(skb->sk);
4871 struct ifinfomsg *ifm;
4872 struct net_device *dev;
4873 struct nlattr *br_spec, *attr = NULL;
4874 int rem, err = -EOPNOTSUPP;
4876 bool have_flags = false;
4878 if (nlmsg_len(nlh) < sizeof(*ifm))
4881 ifm = nlmsg_data(nlh);
4882 if (ifm->ifi_family != AF_BRIDGE)
4883 return -EPFNOSUPPORT;
4885 dev = __dev_get_by_index(net, ifm->ifi_index);
4887 NL_SET_ERR_MSG(extack, "unknown ifindex");
4891 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4893 nla_for_each_nested(attr, br_spec, rem) {
4894 if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
4895 if (nla_len(attr) < sizeof(flags))
4899 flags = nla_get_u16(attr);
4905 if (!flags || (flags & BRIDGE_FLAGS_MASTER)) {
4906 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
4908 if (!br_dev || !br_dev->netdev_ops->ndo_bridge_setlink) {
4913 err = br_dev->netdev_ops->ndo_bridge_setlink(dev, nlh, flags,
4918 flags &= ~BRIDGE_FLAGS_MASTER;
4921 if ((flags & BRIDGE_FLAGS_SELF)) {
4922 if (!dev->netdev_ops->ndo_bridge_setlink)
4925 err = dev->netdev_ops->ndo_bridge_setlink(dev, nlh,
4929 flags &= ~BRIDGE_FLAGS_SELF;
4931 /* Generate event to notify upper layer of bridge
4934 err = rtnl_bridge_notify(dev);
4939 memcpy(nla_data(attr), &flags, sizeof(flags));
4944 static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
4945 struct netlink_ext_ack *extack)
4947 struct net *net = sock_net(skb->sk);
4948 struct ifinfomsg *ifm;
4949 struct net_device *dev;
4950 struct nlattr *br_spec, *attr = NULL;
4951 int rem, err = -EOPNOTSUPP;
4953 bool have_flags = false;
4955 if (nlmsg_len(nlh) < sizeof(*ifm))
4958 ifm = nlmsg_data(nlh);
4959 if (ifm->ifi_family != AF_BRIDGE)
4960 return -EPFNOSUPPORT;
4962 dev = __dev_get_by_index(net, ifm->ifi_index);
4964 NL_SET_ERR_MSG(extack, "unknown ifindex");
4968 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4970 nla_for_each_nested(attr, br_spec, rem) {
4971 if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
4972 if (nla_len(attr) < sizeof(flags))
4976 flags = nla_get_u16(attr);
4982 if (!flags || (flags & BRIDGE_FLAGS_MASTER)) {
4983 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
4985 if (!br_dev || !br_dev->netdev_ops->ndo_bridge_dellink) {
4990 err = br_dev->netdev_ops->ndo_bridge_dellink(dev, nlh, flags);
4994 flags &= ~BRIDGE_FLAGS_MASTER;
4997 if ((flags & BRIDGE_FLAGS_SELF)) {
4998 if (!dev->netdev_ops->ndo_bridge_dellink)
5001 err = dev->netdev_ops->ndo_bridge_dellink(dev, nlh,
5005 flags &= ~BRIDGE_FLAGS_SELF;
5007 /* Generate event to notify upper layer of bridge
5010 err = rtnl_bridge_notify(dev);
5015 memcpy(nla_data(attr), &flags, sizeof(flags));
5020 static bool stats_attr_valid(unsigned int mask, int attrid, int idxattr)
5022 return (mask & IFLA_STATS_FILTER_BIT(attrid)) &&
5023 (!idxattr || idxattr == attrid);
5026 #define IFLA_OFFLOAD_XSTATS_FIRST (IFLA_OFFLOAD_XSTATS_UNSPEC + 1)
5027 static int rtnl_get_offload_stats_attr_size(int attr_id)
5030 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
5031 return sizeof(struct rtnl_link_stats64);
5037 static int rtnl_get_offload_stats(struct sk_buff *skb, struct net_device *dev,
5040 struct nlattr *attr = NULL;
5045 if (!(dev->netdev_ops && dev->netdev_ops->ndo_has_offload_stats &&
5046 dev->netdev_ops->ndo_get_offload_stats))
5049 for (attr_id = IFLA_OFFLOAD_XSTATS_FIRST;
5050 attr_id <= IFLA_OFFLOAD_XSTATS_MAX; attr_id++) {
5051 if (attr_id < *prividx)
5054 size = rtnl_get_offload_stats_attr_size(attr_id);
5058 if (!dev->netdev_ops->ndo_has_offload_stats(dev, attr_id))
5061 attr = nla_reserve_64bit(skb, attr_id, size,
5062 IFLA_OFFLOAD_XSTATS_UNSPEC);
5064 goto nla_put_failure;
5066 attr_data = nla_data(attr);
5067 memset(attr_data, 0, size);
5068 err = dev->netdev_ops->ndo_get_offload_stats(attr_id, dev,
5071 goto get_offload_stats_failure;
5082 get_offload_stats_failure:
5087 static int rtnl_get_offload_stats_size(const struct net_device *dev)
5093 if (!(dev->netdev_ops && dev->netdev_ops->ndo_has_offload_stats &&
5094 dev->netdev_ops->ndo_get_offload_stats))
5097 for (attr_id = IFLA_OFFLOAD_XSTATS_FIRST;
5098 attr_id <= IFLA_OFFLOAD_XSTATS_MAX; attr_id++) {
5099 if (!dev->netdev_ops->ndo_has_offload_stats(dev, attr_id))
5101 size = rtnl_get_offload_stats_attr_size(attr_id);
5102 nla_size += nla_total_size_64bit(size);
5106 nla_size += nla_total_size(0);
5111 static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev,
5112 int type, u32 pid, u32 seq, u32 change,
5113 unsigned int flags, unsigned int filter_mask,
5114 int *idxattr, int *prividx)
5116 struct if_stats_msg *ifsm;
5117 struct nlmsghdr *nlh;
5118 struct nlattr *attr;
5119 int s_prividx = *prividx;
5124 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifsm), flags);
5128 ifsm = nlmsg_data(nlh);
5129 ifsm->family = PF_UNSPEC;
5132 ifsm->ifindex = dev->ifindex;
5133 ifsm->filter_mask = filter_mask;
5135 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, *idxattr)) {
5136 struct rtnl_link_stats64 *sp;
5138 attr = nla_reserve_64bit(skb, IFLA_STATS_LINK_64,
5139 sizeof(struct rtnl_link_stats64),
5142 goto nla_put_failure;
5144 sp = nla_data(attr);
5145 dev_get_stats(dev, sp);
5148 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, *idxattr)) {
5149 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
5151 if (ops && ops->fill_linkxstats) {
5152 *idxattr = IFLA_STATS_LINK_XSTATS;
5153 attr = nla_nest_start_noflag(skb,
5154 IFLA_STATS_LINK_XSTATS);
5156 goto nla_put_failure;
5158 err = ops->fill_linkxstats(skb, dev, prividx, *idxattr);
5159 nla_nest_end(skb, attr);
5161 goto nla_put_failure;
5166 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE,
5168 const struct rtnl_link_ops *ops = NULL;
5169 const struct net_device *master;
5171 master = netdev_master_upper_dev_get(dev);
5173 ops = master->rtnl_link_ops;
5174 if (ops && ops->fill_linkxstats) {
5175 *idxattr = IFLA_STATS_LINK_XSTATS_SLAVE;
5176 attr = nla_nest_start_noflag(skb,
5177 IFLA_STATS_LINK_XSTATS_SLAVE);
5179 goto nla_put_failure;
5181 err = ops->fill_linkxstats(skb, dev, prividx, *idxattr);
5182 nla_nest_end(skb, attr);
5184 goto nla_put_failure;
5189 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS,
5191 *idxattr = IFLA_STATS_LINK_OFFLOAD_XSTATS;
5192 attr = nla_nest_start_noflag(skb,
5193 IFLA_STATS_LINK_OFFLOAD_XSTATS);
5195 goto nla_put_failure;
5197 err = rtnl_get_offload_stats(skb, dev, prividx);
5198 if (err == -ENODATA)
5199 nla_nest_cancel(skb, attr);
5201 nla_nest_end(skb, attr);
5203 if (err && err != -ENODATA)
5204 goto nla_put_failure;
5208 if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, *idxattr)) {
5209 struct rtnl_af_ops *af_ops;
5211 *idxattr = IFLA_STATS_AF_SPEC;
5212 attr = nla_nest_start_noflag(skb, IFLA_STATS_AF_SPEC);
5214 goto nla_put_failure;
5217 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
5218 if (af_ops->fill_stats_af) {
5222 af = nla_nest_start_noflag(skb,
5226 goto nla_put_failure;
5228 err = af_ops->fill_stats_af(skb, dev);
5230 if (err == -ENODATA) {
5231 nla_nest_cancel(skb, af);
5232 } else if (err < 0) {
5234 goto nla_put_failure;
5237 nla_nest_end(skb, af);
5242 nla_nest_end(skb, attr);
5247 nlmsg_end(skb, nlh);
5252 /* not a multi message or no progress mean a real error */
5253 if (!(flags & NLM_F_MULTI) || s_prividx == *prividx)
5254 nlmsg_cancel(skb, nlh);
5256 nlmsg_end(skb, nlh);
5261 static size_t if_nlmsg_stats_size(const struct net_device *dev,
5264 size_t size = NLMSG_ALIGN(sizeof(struct if_stats_msg));
5266 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, 0))
5267 size += nla_total_size_64bit(sizeof(struct rtnl_link_stats64));
5269 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, 0)) {
5270 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
5271 int attr = IFLA_STATS_LINK_XSTATS;
5273 if (ops && ops->get_linkxstats_size) {
5274 size += nla_total_size(ops->get_linkxstats_size(dev,
5276 /* for IFLA_STATS_LINK_XSTATS */
5277 size += nla_total_size(0);
5281 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE, 0)) {
5282 struct net_device *_dev = (struct net_device *)dev;
5283 const struct rtnl_link_ops *ops = NULL;
5284 const struct net_device *master;
5286 /* netdev_master_upper_dev_get can't take const */
5287 master = netdev_master_upper_dev_get(_dev);
5289 ops = master->rtnl_link_ops;
5290 if (ops && ops->get_linkxstats_size) {
5291 int attr = IFLA_STATS_LINK_XSTATS_SLAVE;
5293 size += nla_total_size(ops->get_linkxstats_size(dev,
5295 /* for IFLA_STATS_LINK_XSTATS_SLAVE */
5296 size += nla_total_size(0);
5300 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS, 0))
5301 size += rtnl_get_offload_stats_size(dev);
5303 if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, 0)) {
5304 struct rtnl_af_ops *af_ops;
5306 /* for IFLA_STATS_AF_SPEC */
5307 size += nla_total_size(0);
5310 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
5311 if (af_ops->get_stats_af_size) {
5312 size += nla_total_size(
5313 af_ops->get_stats_af_size(dev));
5316 size += nla_total_size(0);
5325 static int rtnl_valid_stats_req(const struct nlmsghdr *nlh, bool strict_check,
5326 bool is_dump, struct netlink_ext_ack *extack)
5328 struct if_stats_msg *ifsm;
5330 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifsm))) {
5331 NL_SET_ERR_MSG(extack, "Invalid header for stats dump");
5338 ifsm = nlmsg_data(nlh);
5340 /* only requests using strict checks can pass data to influence
5341 * the dump. The legacy exception is filter_mask.
5343 if (ifsm->pad1 || ifsm->pad2 || (is_dump && ifsm->ifindex)) {
5344 NL_SET_ERR_MSG(extack, "Invalid values in header for stats dump request");
5347 if (nlmsg_attrlen(nlh, sizeof(*ifsm))) {
5348 NL_SET_ERR_MSG(extack, "Invalid attributes after stats header");
5351 if (ifsm->filter_mask >= IFLA_STATS_FILTER_BIT(IFLA_STATS_MAX + 1)) {
5352 NL_SET_ERR_MSG(extack, "Invalid stats requested through filter mask");
5359 static int rtnl_stats_get(struct sk_buff *skb, struct nlmsghdr *nlh,
5360 struct netlink_ext_ack *extack)
5362 struct net *net = sock_net(skb->sk);
5363 struct net_device *dev = NULL;
5364 int idxattr = 0, prividx = 0;
5365 struct if_stats_msg *ifsm;
5366 struct sk_buff *nskb;
5370 err = rtnl_valid_stats_req(nlh, netlink_strict_get_check(skb),
5375 ifsm = nlmsg_data(nlh);
5376 if (ifsm->ifindex > 0)
5377 dev = __dev_get_by_index(net, ifsm->ifindex);
5384 filter_mask = ifsm->filter_mask;
5388 nskb = nlmsg_new(if_nlmsg_stats_size(dev, filter_mask), GFP_KERNEL);
5392 err = rtnl_fill_statsinfo(nskb, dev, RTM_NEWSTATS,
5393 NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
5394 0, filter_mask, &idxattr, &prividx);
5396 /* -EMSGSIZE implies BUG in if_nlmsg_stats_size */
5397 WARN_ON(err == -EMSGSIZE);
5400 err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid);
5406 static int rtnl_stats_dump(struct sk_buff *skb, struct netlink_callback *cb)
5408 struct netlink_ext_ack *extack = cb->extack;
5409 int h, s_h, err, s_idx, s_idxattr, s_prividx;
5410 struct net *net = sock_net(skb->sk);
5411 unsigned int flags = NLM_F_MULTI;
5412 struct if_stats_msg *ifsm;
5413 struct hlist_head *head;
5414 struct net_device *dev;
5415 u32 filter_mask = 0;
5419 s_idx = cb->args[1];
5420 s_idxattr = cb->args[2];
5421 s_prividx = cb->args[3];
5423 cb->seq = net->dev_base_seq;
5425 err = rtnl_valid_stats_req(cb->nlh, cb->strict_check, true, extack);
5429 ifsm = nlmsg_data(cb->nlh);
5430 filter_mask = ifsm->filter_mask;
5432 NL_SET_ERR_MSG(extack, "Filter mask must be set for stats dump");
5436 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
5438 head = &net->dev_index_head[h];
5439 hlist_for_each_entry(dev, head, index_hlist) {
5442 err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS,
5443 NETLINK_CB(cb->skb).portid,
5444 cb->nlh->nlmsg_seq, 0,
5446 &s_idxattr, &s_prividx);
5447 /* If we ran out of room on the first message,
5450 WARN_ON((err == -EMSGSIZE) && (skb->len == 0));
5456 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
5462 cb->args[3] = s_prividx;
5463 cb->args[2] = s_idxattr;
5470 /* Process one rtnetlink message. */
5472 static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
5473 struct netlink_ext_ack *extack)
5475 struct net *net = sock_net(skb->sk);
5476 struct rtnl_link *link;
5477 struct module *owner;
5478 int err = -EOPNOTSUPP;
5479 rtnl_doit_func doit;
5485 type = nlh->nlmsg_type;
5491 /* All the messages must have at least 1 byte length */
5492 if (nlmsg_len(nlh) < sizeof(struct rtgenmsg))
5495 family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
5498 if (kind != 2 && !netlink_net_capable(skb, CAP_NET_ADMIN))
5502 if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) {
5504 rtnl_dumpit_func dumpit;
5505 u32 min_dump_alloc = 0;
5507 link = rtnl_get_link(family, type);
5508 if (!link || !link->dumpit) {
5510 link = rtnl_get_link(family, type);
5511 if (!link || !link->dumpit)
5514 owner = link->owner;
5515 dumpit = link->dumpit;
5517 if (type == RTM_GETLINK - RTM_BASE)
5518 min_dump_alloc = rtnl_calcit(skb, nlh);
5521 /* need to do this before rcu_read_unlock() */
5522 if (!try_module_get(owner))
5523 err = -EPROTONOSUPPORT;
5529 struct netlink_dump_control c = {
5531 .min_dump_alloc = min_dump_alloc,
5534 err = netlink_dump_start(rtnl, skb, nlh, &c);
5535 /* netlink_dump_start() will keep a reference on
5536 * module if dump is still in progress.
5543 link = rtnl_get_link(family, type);
5544 if (!link || !link->doit) {
5546 link = rtnl_get_link(PF_UNSPEC, type);
5547 if (!link || !link->doit)
5551 owner = link->owner;
5552 if (!try_module_get(owner)) {
5553 err = -EPROTONOSUPPORT;
5557 flags = link->flags;
5558 if (flags & RTNL_FLAG_DOIT_UNLOCKED) {
5562 err = doit(skb, nlh, extack);
5569 link = rtnl_get_link(family, type);
5570 if (link && link->doit)
5571 err = link->doit(skb, nlh, extack);
5587 static void rtnetlink_rcv(struct sk_buff *skb)
5589 netlink_rcv_skb(skb, &rtnetlink_rcv_msg);
5592 static int rtnetlink_bind(struct net *net, int group)
5595 case RTNLGRP_IPV4_MROUTE_R:
5596 case RTNLGRP_IPV6_MROUTE_R:
5597 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
5604 static int rtnetlink_event(struct notifier_block *this, unsigned long event, void *ptr)
5606 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
5610 case NETDEV_CHANGEMTU:
5611 case NETDEV_CHANGEADDR:
5612 case NETDEV_CHANGENAME:
5613 case NETDEV_FEAT_CHANGE:
5614 case NETDEV_BONDING_FAILOVER:
5615 case NETDEV_POST_TYPE_CHANGE:
5616 case NETDEV_NOTIFY_PEERS:
5617 case NETDEV_CHANGEUPPER:
5618 case NETDEV_RESEND_IGMP:
5619 case NETDEV_CHANGEINFODATA:
5620 case NETDEV_CHANGELOWERSTATE:
5621 case NETDEV_CHANGE_TX_QUEUE_LEN:
5622 rtmsg_ifinfo_event(RTM_NEWLINK, dev, 0, rtnl_get_event(event),
5623 GFP_KERNEL, NULL, 0);
5631 static struct notifier_block rtnetlink_dev_notifier = {
5632 .notifier_call = rtnetlink_event,
5636 static int __net_init rtnetlink_net_init(struct net *net)
5639 struct netlink_kernel_cfg cfg = {
5640 .groups = RTNLGRP_MAX,
5641 .input = rtnetlink_rcv,
5642 .cb_mutex = &rtnl_mutex,
5643 .flags = NL_CFG_F_NONROOT_RECV,
5644 .bind = rtnetlink_bind,
5647 sk = netlink_kernel_create(net, NETLINK_ROUTE, &cfg);
5654 static void __net_exit rtnetlink_net_exit(struct net *net)
5656 netlink_kernel_release(net->rtnl);
5660 static struct pernet_operations rtnetlink_net_ops = {
5661 .init = rtnetlink_net_init,
5662 .exit = rtnetlink_net_exit,
5665 void __init rtnetlink_init(void)
5667 if (register_pernet_subsys(&rtnetlink_net_ops))
5668 panic("rtnetlink_init: cannot initialize rtnetlink\n");
5670 register_netdevice_notifier(&rtnetlink_dev_notifier);
5672 rtnl_register(PF_UNSPEC, RTM_GETLINK, rtnl_getlink,
5673 rtnl_dump_ifinfo, 0);
5674 rtnl_register(PF_UNSPEC, RTM_SETLINK, rtnl_setlink, NULL, 0);
5675 rtnl_register(PF_UNSPEC, RTM_NEWLINK, rtnl_newlink, NULL, 0);
5676 rtnl_register(PF_UNSPEC, RTM_DELLINK, rtnl_dellink, NULL, 0);
5678 rtnl_register(PF_UNSPEC, RTM_GETADDR, NULL, rtnl_dump_all, 0);
5679 rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all, 0);
5680 rtnl_register(PF_UNSPEC, RTM_GETNETCONF, NULL, rtnl_dump_all, 0);
5682 rtnl_register(PF_UNSPEC, RTM_NEWLINKPROP, rtnl_newlinkprop, NULL, 0);
5683 rtnl_register(PF_UNSPEC, RTM_DELLINKPROP, rtnl_dellinkprop, NULL, 0);
5685 rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, rtnl_fdb_add, NULL, 0);
5686 rtnl_register(PF_BRIDGE, RTM_DELNEIGH, rtnl_fdb_del, NULL, 0);
5687 rtnl_register(PF_BRIDGE, RTM_GETNEIGH, rtnl_fdb_get, rtnl_fdb_dump, 0);
5689 rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL, rtnl_bridge_getlink, 0);
5690 rtnl_register(PF_BRIDGE, RTM_DELLINK, rtnl_bridge_dellink, NULL, 0);
5691 rtnl_register(PF_BRIDGE, RTM_SETLINK, rtnl_bridge_setlink, NULL, 0);
5693 rtnl_register(PF_UNSPEC, RTM_GETSTATS, rtnl_stats_get, rtnl_stats_dump,