2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Routing netlink socket interface: protocol independent part.
8 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
16 * Vitaly E. Lavrov RTA_OK arithmetics was wrong.
19 #include <linux/bitops.h>
20 #include <linux/errno.h>
21 #include <linux/module.h>
22 #include <linux/types.h>
23 #include <linux/socket.h>
24 #include <linux/kernel.h>
25 #include <linux/timer.h>
26 #include <linux/string.h>
27 #include <linux/sockios.h>
28 #include <linux/net.h>
29 #include <linux/fcntl.h>
31 #include <linux/slab.h>
32 #include <linux/interrupt.h>
33 #include <linux/capability.h>
34 #include <linux/skbuff.h>
35 #include <linux/init.h>
36 #include <linux/security.h>
37 #include <linux/mutex.h>
38 #include <linux/if_addr.h>
39 #include <linux/if_bridge.h>
40 #include <linux/if_vlan.h>
41 #include <linux/pci.h>
42 #include <linux/etherdevice.h>
43 #include <linux/bpf.h>
45 #include <linux/uaccess.h>
47 #include <linux/inet.h>
48 #include <linux/netdevice.h>
49 #include <net/switchdev.h>
51 #include <net/protocol.h>
53 #include <net/route.h>
57 #include <net/pkt_sched.h>
58 #include <net/fib_rules.h>
59 #include <net/rtnetlink.h>
60 #include <net/net_namespace.h>
62 #define RTNL_MAX_TYPE 48
63 #define RTNL_SLAVE_MAX_TYPE 36
67 rtnl_dumpit_func dumpit;
73 static DEFINE_MUTEX(rtnl_mutex);
77 mutex_lock(&rtnl_mutex);
79 EXPORT_SYMBOL(rtnl_lock);
81 int rtnl_lock_killable(void)
83 return mutex_lock_killable(&rtnl_mutex);
85 EXPORT_SYMBOL(rtnl_lock_killable);
87 static struct sk_buff *defer_kfree_skb_list;
88 void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail)
91 tail->next = defer_kfree_skb_list;
92 defer_kfree_skb_list = head;
95 EXPORT_SYMBOL(rtnl_kfree_skbs);
97 void __rtnl_unlock(void)
99 struct sk_buff *head = defer_kfree_skb_list;
101 defer_kfree_skb_list = NULL;
103 mutex_unlock(&rtnl_mutex);
106 struct sk_buff *next = head->next;
114 void rtnl_unlock(void)
116 /* This fellow will unlock it for us. */
119 EXPORT_SYMBOL(rtnl_unlock);
121 int rtnl_trylock(void)
123 return mutex_trylock(&rtnl_mutex);
125 EXPORT_SYMBOL(rtnl_trylock);
127 int rtnl_is_locked(void)
129 return mutex_is_locked(&rtnl_mutex);
131 EXPORT_SYMBOL(rtnl_is_locked);
133 bool refcount_dec_and_rtnl_lock(refcount_t *r)
135 return refcount_dec_and_mutex_lock(r, &rtnl_mutex);
137 EXPORT_SYMBOL(refcount_dec_and_rtnl_lock);
139 #ifdef CONFIG_PROVE_LOCKING
140 bool lockdep_rtnl_is_held(void)
142 return lockdep_is_held(&rtnl_mutex);
144 EXPORT_SYMBOL(lockdep_rtnl_is_held);
145 #endif /* #ifdef CONFIG_PROVE_LOCKING */
147 static struct rtnl_link *__rcu *rtnl_msg_handlers[RTNL_FAMILY_MAX + 1];
149 static inline int rtm_msgindex(int msgtype)
151 int msgindex = msgtype - RTM_BASE;
154 * msgindex < 0 implies someone tried to register a netlink
155 * control code. msgindex >= RTM_NR_MSGTYPES may indicate that
156 * the message type has not been added to linux/rtnetlink.h
158 BUG_ON(msgindex < 0 || msgindex >= RTM_NR_MSGTYPES);
163 static struct rtnl_link *rtnl_get_link(int protocol, int msgtype)
165 struct rtnl_link **tab;
167 if (protocol >= ARRAY_SIZE(rtnl_msg_handlers))
168 protocol = PF_UNSPEC;
170 tab = rcu_dereference_rtnl(rtnl_msg_handlers[protocol]);
172 tab = rcu_dereference_rtnl(rtnl_msg_handlers[PF_UNSPEC]);
177 static int rtnl_register_internal(struct module *owner,
178 int protocol, int msgtype,
179 rtnl_doit_func doit, rtnl_dumpit_func dumpit,
182 struct rtnl_link *link, *old;
183 struct rtnl_link __rcu **tab;
187 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
188 msgindex = rtm_msgindex(msgtype);
191 tab = rtnl_msg_handlers[protocol];
193 tab = kcalloc(RTM_NR_MSGTYPES, sizeof(void *), GFP_KERNEL);
197 /* ensures we see the 0 stores */
198 rcu_assign_pointer(rtnl_msg_handlers[protocol], tab);
201 old = rtnl_dereference(tab[msgindex]);
203 link = kmemdup(old, sizeof(*old), GFP_KERNEL);
207 link = kzalloc(sizeof(*link), GFP_KERNEL);
212 WARN_ON(link->owner && link->owner != owner);
215 WARN_ON(doit && link->doit && link->doit != doit);
218 WARN_ON(dumpit && link->dumpit && link->dumpit != dumpit);
220 link->dumpit = dumpit;
222 link->flags |= flags;
224 /* publish protocol:msgtype */
225 rcu_assign_pointer(tab[msgindex], link);
235 * rtnl_register_module - Register a rtnetlink message type
237 * @owner: module registering the hook (THIS_MODULE)
238 * @protocol: Protocol family or PF_UNSPEC
239 * @msgtype: rtnetlink message type
240 * @doit: Function pointer called for each request message
241 * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message
242 * @flags: rtnl_link_flags to modifiy behaviour of doit/dumpit functions
244 * Like rtnl_register, but for use by removable modules.
246 int rtnl_register_module(struct module *owner,
247 int protocol, int msgtype,
248 rtnl_doit_func doit, rtnl_dumpit_func dumpit,
251 return rtnl_register_internal(owner, protocol, msgtype,
252 doit, dumpit, flags);
254 EXPORT_SYMBOL_GPL(rtnl_register_module);
257 * rtnl_register - Register a rtnetlink message type
258 * @protocol: Protocol family or PF_UNSPEC
259 * @msgtype: rtnetlink message type
260 * @doit: Function pointer called for each request message
261 * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message
262 * @flags: rtnl_link_flags to modifiy behaviour of doit/dumpit functions
264 * Registers the specified function pointers (at least one of them has
265 * to be non-NULL) to be called whenever a request message for the
266 * specified protocol family and message type is received.
268 * The special protocol family PF_UNSPEC may be used to define fallback
269 * function pointers for the case when no entry for the specific protocol
272 void rtnl_register(int protocol, int msgtype,
273 rtnl_doit_func doit, rtnl_dumpit_func dumpit,
278 err = rtnl_register_internal(NULL, protocol, msgtype, doit, dumpit,
281 pr_err("Unable to register rtnetlink message handler, "
282 "protocol = %d, message type = %d\n", protocol, msgtype);
286 * rtnl_unregister - Unregister a rtnetlink message type
287 * @protocol: Protocol family or PF_UNSPEC
288 * @msgtype: rtnetlink message type
290 * Returns 0 on success or a negative error code.
292 int rtnl_unregister(int protocol, int msgtype)
294 struct rtnl_link **tab, *link;
297 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
298 msgindex = rtm_msgindex(msgtype);
301 tab = rtnl_dereference(rtnl_msg_handlers[protocol]);
307 link = tab[msgindex];
308 rcu_assign_pointer(tab[msgindex], NULL);
311 kfree_rcu(link, rcu);
315 EXPORT_SYMBOL_GPL(rtnl_unregister);
318 * rtnl_unregister_all - Unregister all rtnetlink message type of a protocol
319 * @protocol : Protocol family or PF_UNSPEC
321 * Identical to calling rtnl_unregster() for all registered message types
322 * of a certain protocol family.
324 void rtnl_unregister_all(int protocol)
326 struct rtnl_link **tab, *link;
329 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
332 tab = rtnl_msg_handlers[protocol];
337 RCU_INIT_POINTER(rtnl_msg_handlers[protocol], NULL);
338 for (msgindex = 0; msgindex < RTM_NR_MSGTYPES; msgindex++) {
339 link = tab[msgindex];
343 rcu_assign_pointer(tab[msgindex], NULL);
344 kfree_rcu(link, rcu);
352 EXPORT_SYMBOL_GPL(rtnl_unregister_all);
354 static LIST_HEAD(link_ops);
356 static const struct rtnl_link_ops *rtnl_link_ops_get(const char *kind)
358 const struct rtnl_link_ops *ops;
360 list_for_each_entry(ops, &link_ops, list) {
361 if (!strcmp(ops->kind, kind))
368 * __rtnl_link_register - Register rtnl_link_ops with rtnetlink.
369 * @ops: struct rtnl_link_ops * to register
371 * The caller must hold the rtnl_mutex. This function should be used
372 * by drivers that create devices during module initialization. It
373 * must be called before registering the devices.
375 * Returns 0 on success or a negative error code.
377 int __rtnl_link_register(struct rtnl_link_ops *ops)
379 if (rtnl_link_ops_get(ops->kind))
382 /* The check for setup is here because if ops
383 * does not have that filled up, it is not possible
384 * to use the ops for creating device. So do not
385 * fill up dellink as well. That disables rtnl_dellink.
387 if (ops->setup && !ops->dellink)
388 ops->dellink = unregister_netdevice_queue;
390 list_add_tail(&ops->list, &link_ops);
393 EXPORT_SYMBOL_GPL(__rtnl_link_register);
396 * rtnl_link_register - Register rtnl_link_ops with rtnetlink.
397 * @ops: struct rtnl_link_ops * to register
399 * Returns 0 on success or a negative error code.
401 int rtnl_link_register(struct rtnl_link_ops *ops)
405 /* Sanity-check max sizes to avoid stack buffer overflow. */
406 if (WARN_ON(ops->maxtype > RTNL_MAX_TYPE ||
407 ops->slave_maxtype > RTNL_SLAVE_MAX_TYPE))
411 err = __rtnl_link_register(ops);
415 EXPORT_SYMBOL_GPL(rtnl_link_register);
417 static void __rtnl_kill_links(struct net *net, struct rtnl_link_ops *ops)
419 struct net_device *dev;
420 LIST_HEAD(list_kill);
422 for_each_netdev(net, dev) {
423 if (dev->rtnl_link_ops == ops)
424 ops->dellink(dev, &list_kill);
426 unregister_netdevice_many(&list_kill);
430 * __rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
431 * @ops: struct rtnl_link_ops * to unregister
433 * The caller must hold the rtnl_mutex and guarantee net_namespace_list
434 * integrity (hold pernet_ops_rwsem for writing to close the race
435 * with setup_net() and cleanup_net()).
437 void __rtnl_link_unregister(struct rtnl_link_ops *ops)
442 __rtnl_kill_links(net, ops);
444 list_del(&ops->list);
446 EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
448 /* Return with the rtnl_lock held when there are no network
449 * devices unregistering in any network namespace.
451 static void rtnl_lock_unregistering_all(void)
455 DEFINE_WAIT_FUNC(wait, woken_wake_function);
457 add_wait_queue(&netdev_unregistering_wq, &wait);
459 unregistering = false;
461 /* We held write locked pernet_ops_rwsem, and parallel
462 * setup_net() and cleanup_net() are not possible.
465 if (net->dev_unreg_count > 0) {
466 unregistering = true;
474 wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
476 remove_wait_queue(&netdev_unregistering_wq, &wait);
480 * rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
481 * @ops: struct rtnl_link_ops * to unregister
483 void rtnl_link_unregister(struct rtnl_link_ops *ops)
485 /* Close the race with setup_net() and cleanup_net() */
486 down_write(&pernet_ops_rwsem);
487 rtnl_lock_unregistering_all();
488 __rtnl_link_unregister(ops);
490 up_write(&pernet_ops_rwsem);
492 EXPORT_SYMBOL_GPL(rtnl_link_unregister);
494 static size_t rtnl_link_get_slave_info_data_size(const struct net_device *dev)
496 struct net_device *master_dev;
497 const struct rtnl_link_ops *ops;
502 master_dev = netdev_master_upper_dev_get_rcu((struct net_device *)dev);
506 ops = master_dev->rtnl_link_ops;
507 if (!ops || !ops->get_slave_size)
509 /* IFLA_INFO_SLAVE_DATA + nested data */
510 size = nla_total_size(sizeof(struct nlattr)) +
511 ops->get_slave_size(master_dev, dev);
518 static size_t rtnl_link_get_size(const struct net_device *dev)
520 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
526 size = nla_total_size(sizeof(struct nlattr)) + /* IFLA_LINKINFO */
527 nla_total_size(strlen(ops->kind) + 1); /* IFLA_INFO_KIND */
530 /* IFLA_INFO_DATA + nested data */
531 size += nla_total_size(sizeof(struct nlattr)) +
534 if (ops->get_xstats_size)
535 /* IFLA_INFO_XSTATS */
536 size += nla_total_size(ops->get_xstats_size(dev));
538 size += rtnl_link_get_slave_info_data_size(dev);
543 static LIST_HEAD(rtnl_af_ops);
545 static const struct rtnl_af_ops *rtnl_af_lookup(const int family)
547 const struct rtnl_af_ops *ops;
549 list_for_each_entry_rcu(ops, &rtnl_af_ops, list) {
550 if (ops->family == family)
558 * rtnl_af_register - Register rtnl_af_ops with rtnetlink.
559 * @ops: struct rtnl_af_ops * to register
561 * Returns 0 on success or a negative error code.
563 void rtnl_af_register(struct rtnl_af_ops *ops)
566 list_add_tail_rcu(&ops->list, &rtnl_af_ops);
569 EXPORT_SYMBOL_GPL(rtnl_af_register);
572 * rtnl_af_unregister - Unregister rtnl_af_ops from rtnetlink.
573 * @ops: struct rtnl_af_ops * to unregister
575 void rtnl_af_unregister(struct rtnl_af_ops *ops)
578 list_del_rcu(&ops->list);
583 EXPORT_SYMBOL_GPL(rtnl_af_unregister);
585 static size_t rtnl_link_get_af_size(const struct net_device *dev,
588 struct rtnl_af_ops *af_ops;
592 size = nla_total_size(sizeof(struct nlattr));
595 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
596 if (af_ops->get_link_af_size) {
597 /* AF_* + nested data */
598 size += nla_total_size(sizeof(struct nlattr)) +
599 af_ops->get_link_af_size(dev, ext_filter_mask);
607 static bool rtnl_have_link_slave_info(const struct net_device *dev)
609 struct net_device *master_dev;
614 master_dev = netdev_master_upper_dev_get_rcu((struct net_device *)dev);
615 if (master_dev && master_dev->rtnl_link_ops)
621 static int rtnl_link_slave_info_fill(struct sk_buff *skb,
622 const struct net_device *dev)
624 struct net_device *master_dev;
625 const struct rtnl_link_ops *ops;
626 struct nlattr *slave_data;
629 master_dev = netdev_master_upper_dev_get((struct net_device *) dev);
632 ops = master_dev->rtnl_link_ops;
635 if (nla_put_string(skb, IFLA_INFO_SLAVE_KIND, ops->kind) < 0)
637 if (ops->fill_slave_info) {
638 slave_data = nla_nest_start(skb, IFLA_INFO_SLAVE_DATA);
641 err = ops->fill_slave_info(skb, master_dev, dev);
643 goto err_cancel_slave_data;
644 nla_nest_end(skb, slave_data);
648 err_cancel_slave_data:
649 nla_nest_cancel(skb, slave_data);
653 static int rtnl_link_info_fill(struct sk_buff *skb,
654 const struct net_device *dev)
656 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
662 if (nla_put_string(skb, IFLA_INFO_KIND, ops->kind) < 0)
664 if (ops->fill_xstats) {
665 err = ops->fill_xstats(skb, dev);
669 if (ops->fill_info) {
670 data = nla_nest_start(skb, IFLA_INFO_DATA);
673 err = ops->fill_info(skb, dev);
675 goto err_cancel_data;
676 nla_nest_end(skb, data);
681 nla_nest_cancel(skb, data);
685 static int rtnl_link_fill(struct sk_buff *skb, const struct net_device *dev)
687 struct nlattr *linkinfo;
690 linkinfo = nla_nest_start(skb, IFLA_LINKINFO);
691 if (linkinfo == NULL)
694 err = rtnl_link_info_fill(skb, dev);
696 goto err_cancel_link;
698 err = rtnl_link_slave_info_fill(skb, dev);
700 goto err_cancel_link;
702 nla_nest_end(skb, linkinfo);
706 nla_nest_cancel(skb, linkinfo);
711 int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int group, int echo)
713 struct sock *rtnl = net->rtnl;
716 NETLINK_CB(skb).dst_group = group;
718 refcount_inc(&skb->users);
719 netlink_broadcast(rtnl, skb, pid, group, GFP_KERNEL);
721 err = netlink_unicast(rtnl, skb, pid, MSG_DONTWAIT);
725 int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid)
727 struct sock *rtnl = net->rtnl;
729 return nlmsg_unicast(rtnl, skb, pid);
731 EXPORT_SYMBOL(rtnl_unicast);
733 void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group,
734 struct nlmsghdr *nlh, gfp_t flags)
736 struct sock *rtnl = net->rtnl;
740 report = nlmsg_report(nlh);
742 nlmsg_notify(rtnl, skb, pid, group, report, flags);
744 EXPORT_SYMBOL(rtnl_notify);
746 void rtnl_set_sk_err(struct net *net, u32 group, int error)
748 struct sock *rtnl = net->rtnl;
750 netlink_set_err(rtnl, 0, group, error);
752 EXPORT_SYMBOL(rtnl_set_sk_err);
754 int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics)
759 mx = nla_nest_start(skb, RTA_METRICS);
763 for (i = 0; i < RTAX_MAX; i++) {
765 if (i == RTAX_CC_ALGO - 1) {
766 char tmp[TCP_CA_NAME_MAX], *name;
768 name = tcp_ca_get_name_by_key(metrics[i], tmp);
771 if (nla_put_string(skb, i + 1, name))
772 goto nla_put_failure;
773 } else if (i == RTAX_FEATURES - 1) {
774 u32 user_features = metrics[i] & RTAX_FEATURE_MASK;
778 BUILD_BUG_ON(RTAX_FEATURE_MASK & DST_FEATURE_MASK);
779 if (nla_put_u32(skb, i + 1, user_features))
780 goto nla_put_failure;
782 if (nla_put_u32(skb, i + 1, metrics[i]))
783 goto nla_put_failure;
790 nla_nest_cancel(skb, mx);
794 return nla_nest_end(skb, mx);
797 nla_nest_cancel(skb, mx);
800 EXPORT_SYMBOL(rtnetlink_put_metrics);
802 int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id,
803 long expires, u32 error)
805 struct rta_cacheinfo ci = {
811 ci.rta_lastuse = jiffies_delta_to_clock_t(jiffies - dst->lastuse);
812 ci.rta_used = dst->__use;
813 ci.rta_clntref = atomic_read(&dst->__refcnt);
818 clock = jiffies_to_clock_t(abs(expires));
819 clock = min_t(unsigned long, clock, INT_MAX);
820 ci.rta_expires = (expires > 0) ? clock : -clock;
822 return nla_put(skb, RTA_CACHEINFO, sizeof(ci), &ci);
824 EXPORT_SYMBOL_GPL(rtnl_put_cacheinfo);
826 static void set_operstate(struct net_device *dev, unsigned char transition)
828 unsigned char operstate = dev->operstate;
830 switch (transition) {
832 if ((operstate == IF_OPER_DORMANT ||
833 operstate == IF_OPER_UNKNOWN) &&
835 operstate = IF_OPER_UP;
838 case IF_OPER_DORMANT:
839 if (operstate == IF_OPER_UP ||
840 operstate == IF_OPER_UNKNOWN)
841 operstate = IF_OPER_DORMANT;
845 if (dev->operstate != operstate) {
846 write_lock_bh(&dev_base_lock);
847 dev->operstate = operstate;
848 write_unlock_bh(&dev_base_lock);
849 netdev_state_change(dev);
853 static unsigned int rtnl_dev_get_flags(const struct net_device *dev)
855 return (dev->flags & ~(IFF_PROMISC | IFF_ALLMULTI)) |
856 (dev->gflags & (IFF_PROMISC | IFF_ALLMULTI));
859 static unsigned int rtnl_dev_combine_flags(const struct net_device *dev,
860 const struct ifinfomsg *ifm)
862 unsigned int flags = ifm->ifi_flags;
864 /* bugwards compatibility: ifi_change == 0 is treated as ~0 */
866 flags = (flags & ifm->ifi_change) |
867 (rtnl_dev_get_flags(dev) & ~ifm->ifi_change);
872 static void copy_rtnl_link_stats(struct rtnl_link_stats *a,
873 const struct rtnl_link_stats64 *b)
875 a->rx_packets = b->rx_packets;
876 a->tx_packets = b->tx_packets;
877 a->rx_bytes = b->rx_bytes;
878 a->tx_bytes = b->tx_bytes;
879 a->rx_errors = b->rx_errors;
880 a->tx_errors = b->tx_errors;
881 a->rx_dropped = b->rx_dropped;
882 a->tx_dropped = b->tx_dropped;
884 a->multicast = b->multicast;
885 a->collisions = b->collisions;
887 a->rx_length_errors = b->rx_length_errors;
888 a->rx_over_errors = b->rx_over_errors;
889 a->rx_crc_errors = b->rx_crc_errors;
890 a->rx_frame_errors = b->rx_frame_errors;
891 a->rx_fifo_errors = b->rx_fifo_errors;
892 a->rx_missed_errors = b->rx_missed_errors;
894 a->tx_aborted_errors = b->tx_aborted_errors;
895 a->tx_carrier_errors = b->tx_carrier_errors;
896 a->tx_fifo_errors = b->tx_fifo_errors;
897 a->tx_heartbeat_errors = b->tx_heartbeat_errors;
898 a->tx_window_errors = b->tx_window_errors;
900 a->rx_compressed = b->rx_compressed;
901 a->tx_compressed = b->tx_compressed;
903 a->rx_nohandler = b->rx_nohandler;
907 static inline int rtnl_vfinfo_size(const struct net_device *dev,
910 if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF)) {
911 int num_vfs = dev_num_vf(dev->dev.parent);
912 size_t size = nla_total_size(0);
915 nla_total_size(sizeof(struct ifla_vf_mac)) +
916 nla_total_size(sizeof(struct ifla_vf_vlan)) +
917 nla_total_size(0) + /* nest IFLA_VF_VLAN_LIST */
918 nla_total_size(MAX_VLAN_LIST_LEN *
919 sizeof(struct ifla_vf_vlan_info)) +
920 nla_total_size(sizeof(struct ifla_vf_spoofchk)) +
921 nla_total_size(sizeof(struct ifla_vf_tx_rate)) +
922 nla_total_size(sizeof(struct ifla_vf_rate)) +
923 nla_total_size(sizeof(struct ifla_vf_link_state)) +
924 nla_total_size(sizeof(struct ifla_vf_rss_query_en)) +
925 nla_total_size(0) + /* nest IFLA_VF_STATS */
926 /* IFLA_VF_STATS_RX_PACKETS */
927 nla_total_size_64bit(sizeof(__u64)) +
928 /* IFLA_VF_STATS_TX_PACKETS */
929 nla_total_size_64bit(sizeof(__u64)) +
930 /* IFLA_VF_STATS_RX_BYTES */
931 nla_total_size_64bit(sizeof(__u64)) +
932 /* IFLA_VF_STATS_TX_BYTES */
933 nla_total_size_64bit(sizeof(__u64)) +
934 /* IFLA_VF_STATS_BROADCAST */
935 nla_total_size_64bit(sizeof(__u64)) +
936 /* IFLA_VF_STATS_MULTICAST */
937 nla_total_size_64bit(sizeof(__u64)) +
938 /* IFLA_VF_STATS_RX_DROPPED */
939 nla_total_size_64bit(sizeof(__u64)) +
940 /* IFLA_VF_STATS_TX_DROPPED */
941 nla_total_size_64bit(sizeof(__u64)) +
942 nla_total_size(sizeof(struct ifla_vf_trust)));
948 static size_t rtnl_port_size(const struct net_device *dev,
951 size_t port_size = nla_total_size(4) /* PORT_VF */
952 + nla_total_size(PORT_PROFILE_MAX) /* PORT_PROFILE */
953 + nla_total_size(PORT_UUID_MAX) /* PORT_INSTANCE_UUID */
954 + nla_total_size(PORT_UUID_MAX) /* PORT_HOST_UUID */
955 + nla_total_size(1) /* PROT_VDP_REQUEST */
956 + nla_total_size(2); /* PORT_VDP_RESPONSE */
957 size_t vf_ports_size = nla_total_size(sizeof(struct nlattr));
958 size_t vf_port_size = nla_total_size(sizeof(struct nlattr))
960 size_t port_self_size = nla_total_size(sizeof(struct nlattr))
963 if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
964 !(ext_filter_mask & RTEXT_FILTER_VF))
966 if (dev_num_vf(dev->dev.parent))
967 return port_self_size + vf_ports_size +
968 vf_port_size * dev_num_vf(dev->dev.parent);
970 return port_self_size;
973 static size_t rtnl_xdp_size(void)
975 size_t xdp_size = nla_total_size(0) + /* nest IFLA_XDP */
976 nla_total_size(1) + /* XDP_ATTACHED */
977 nla_total_size(4) + /* XDP_PROG_ID (or 1st mode) */
978 nla_total_size(4); /* XDP_<mode>_PROG_ID */
983 static noinline size_t if_nlmsg_size(const struct net_device *dev,
986 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
987 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
988 + nla_total_size(IFALIASZ) /* IFLA_IFALIAS */
989 + nla_total_size(IFNAMSIZ) /* IFLA_QDISC */
990 + nla_total_size_64bit(sizeof(struct rtnl_link_ifmap))
991 + nla_total_size(sizeof(struct rtnl_link_stats))
992 + nla_total_size_64bit(sizeof(struct rtnl_link_stats64))
993 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
994 + nla_total_size(MAX_ADDR_LEN) /* IFLA_BROADCAST */
995 + nla_total_size(4) /* IFLA_TXQLEN */
996 + nla_total_size(4) /* IFLA_WEIGHT */
997 + nla_total_size(4) /* IFLA_MTU */
998 + nla_total_size(4) /* IFLA_LINK */
999 + nla_total_size(4) /* IFLA_MASTER */
1000 + nla_total_size(1) /* IFLA_CARRIER */
1001 + nla_total_size(4) /* IFLA_PROMISCUITY */
1002 + nla_total_size(4) /* IFLA_NUM_TX_QUEUES */
1003 + nla_total_size(4) /* IFLA_NUM_RX_QUEUES */
1004 + nla_total_size(4) /* IFLA_GSO_MAX_SEGS */
1005 + nla_total_size(4) /* IFLA_GSO_MAX_SIZE */
1006 + nla_total_size(1) /* IFLA_OPERSTATE */
1007 + nla_total_size(1) /* IFLA_LINKMODE */
1008 + nla_total_size(4) /* IFLA_CARRIER_CHANGES */
1009 + nla_total_size(4) /* IFLA_LINK_NETNSID */
1010 + nla_total_size(4) /* IFLA_GROUP */
1011 + nla_total_size(ext_filter_mask
1012 & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */
1013 + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
1014 + rtnl_port_size(dev, ext_filter_mask) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
1015 + rtnl_link_get_size(dev) /* IFLA_LINKINFO */
1016 + rtnl_link_get_af_size(dev, ext_filter_mask) /* IFLA_AF_SPEC */
1017 + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_PORT_ID */
1018 + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_SWITCH_ID */
1019 + nla_total_size(IFNAMSIZ) /* IFLA_PHYS_PORT_NAME */
1020 + rtnl_xdp_size() /* IFLA_XDP */
1021 + nla_total_size(4) /* IFLA_EVENT */
1022 + nla_total_size(4) /* IFLA_NEW_NETNSID */
1023 + nla_total_size(4) /* IFLA_NEW_IFINDEX */
1024 + nla_total_size(1) /* IFLA_PROTO_DOWN */
1025 + nla_total_size(4) /* IFLA_IF_NETNSID */
1026 + nla_total_size(4) /* IFLA_CARRIER_UP_COUNT */
1027 + nla_total_size(4) /* IFLA_CARRIER_DOWN_COUNT */
1028 + nla_total_size(4) /* IFLA_MIN_MTU */
1029 + nla_total_size(4) /* IFLA_MAX_MTU */
1033 static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev)
1035 struct nlattr *vf_ports;
1036 struct nlattr *vf_port;
1040 vf_ports = nla_nest_start(skb, IFLA_VF_PORTS);
1044 for (vf = 0; vf < dev_num_vf(dev->dev.parent); vf++) {
1045 vf_port = nla_nest_start(skb, IFLA_VF_PORT);
1047 goto nla_put_failure;
1048 if (nla_put_u32(skb, IFLA_PORT_VF, vf))
1049 goto nla_put_failure;
1050 err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb);
1051 if (err == -EMSGSIZE)
1052 goto nla_put_failure;
1054 nla_nest_cancel(skb, vf_port);
1057 nla_nest_end(skb, vf_port);
1060 nla_nest_end(skb, vf_ports);
1065 nla_nest_cancel(skb, vf_ports);
1069 static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev)
1071 struct nlattr *port_self;
1074 port_self = nla_nest_start(skb, IFLA_PORT_SELF);
1078 err = dev->netdev_ops->ndo_get_vf_port(dev, PORT_SELF_VF, skb);
1080 nla_nest_cancel(skb, port_self);
1081 return (err == -EMSGSIZE) ? err : 0;
1084 nla_nest_end(skb, port_self);
1089 static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev,
1090 u32 ext_filter_mask)
1094 if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
1095 !(ext_filter_mask & RTEXT_FILTER_VF))
1098 err = rtnl_port_self_fill(skb, dev);
1102 if (dev_num_vf(dev->dev.parent)) {
1103 err = rtnl_vf_ports_fill(skb, dev);
1111 static int rtnl_phys_port_id_fill(struct sk_buff *skb, struct net_device *dev)
1114 struct netdev_phys_item_id ppid;
1116 err = dev_get_phys_port_id(dev, &ppid);
1118 if (err == -EOPNOTSUPP)
1123 if (nla_put(skb, IFLA_PHYS_PORT_ID, ppid.id_len, ppid.id))
1129 static int rtnl_phys_port_name_fill(struct sk_buff *skb, struct net_device *dev)
1131 char name[IFNAMSIZ];
1134 err = dev_get_phys_port_name(dev, name, sizeof(name));
1136 if (err == -EOPNOTSUPP)
1141 if (nla_put_string(skb, IFLA_PHYS_PORT_NAME, name))
1147 static int rtnl_phys_switch_id_fill(struct sk_buff *skb, struct net_device *dev)
1150 struct switchdev_attr attr = {
1152 .id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
1153 .flags = SWITCHDEV_F_NO_RECURSE,
1156 err = switchdev_port_attr_get(dev, &attr);
1158 if (err == -EOPNOTSUPP)
1163 if (nla_put(skb, IFLA_PHYS_SWITCH_ID, attr.u.ppid.id_len,
1170 static noinline_for_stack int rtnl_fill_stats(struct sk_buff *skb,
1171 struct net_device *dev)
1173 struct rtnl_link_stats64 *sp;
1174 struct nlattr *attr;
1176 attr = nla_reserve_64bit(skb, IFLA_STATS64,
1177 sizeof(struct rtnl_link_stats64), IFLA_PAD);
1181 sp = nla_data(attr);
1182 dev_get_stats(dev, sp);
1184 attr = nla_reserve(skb, IFLA_STATS,
1185 sizeof(struct rtnl_link_stats));
1189 copy_rtnl_link_stats(nla_data(attr), sp);
1194 static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
1195 struct net_device *dev,
1197 struct nlattr *vfinfo)
1199 struct ifla_vf_rss_query_en vf_rss_query_en;
1200 struct nlattr *vf, *vfstats, *vfvlanlist;
1201 struct ifla_vf_link_state vf_linkstate;
1202 struct ifla_vf_vlan_info vf_vlan_info;
1203 struct ifla_vf_spoofchk vf_spoofchk;
1204 struct ifla_vf_tx_rate vf_tx_rate;
1205 struct ifla_vf_stats vf_stats;
1206 struct ifla_vf_trust vf_trust;
1207 struct ifla_vf_vlan vf_vlan;
1208 struct ifla_vf_rate vf_rate;
1209 struct ifla_vf_mac vf_mac;
1210 struct ifla_vf_info ivi;
1212 memset(&ivi, 0, sizeof(ivi));
1214 /* Not all SR-IOV capable drivers support the
1215 * spoofcheck and "RSS query enable" query. Preset to
1216 * -1 so the user space tool can detect that the driver
1217 * didn't report anything.
1220 ivi.rss_query_en = -1;
1222 /* The default value for VF link state is "auto"
1223 * IFLA_VF_LINK_STATE_AUTO which equals zero
1226 /* VLAN Protocol by default is 802.1Q */
1227 ivi.vlan_proto = htons(ETH_P_8021Q);
1228 if (dev->netdev_ops->ndo_get_vf_config(dev, vfs_num, &ivi))
1231 memset(&vf_vlan_info, 0, sizeof(vf_vlan_info));
1240 vf_rss_query_en.vf =
1241 vf_trust.vf = ivi.vf;
1243 memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac));
1244 vf_vlan.vlan = ivi.vlan;
1245 vf_vlan.qos = ivi.qos;
1246 vf_vlan_info.vlan = ivi.vlan;
1247 vf_vlan_info.qos = ivi.qos;
1248 vf_vlan_info.vlan_proto = ivi.vlan_proto;
1249 vf_tx_rate.rate = ivi.max_tx_rate;
1250 vf_rate.min_tx_rate = ivi.min_tx_rate;
1251 vf_rate.max_tx_rate = ivi.max_tx_rate;
1252 vf_spoofchk.setting = ivi.spoofchk;
1253 vf_linkstate.link_state = ivi.linkstate;
1254 vf_rss_query_en.setting = ivi.rss_query_en;
1255 vf_trust.setting = ivi.trusted;
1256 vf = nla_nest_start(skb, IFLA_VF_INFO);
1258 goto nla_put_vfinfo_failure;
1259 if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) ||
1260 nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) ||
1261 nla_put(skb, IFLA_VF_RATE, sizeof(vf_rate),
1263 nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate),
1265 nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk),
1267 nla_put(skb, IFLA_VF_LINK_STATE, sizeof(vf_linkstate),
1269 nla_put(skb, IFLA_VF_RSS_QUERY_EN,
1270 sizeof(vf_rss_query_en),
1271 &vf_rss_query_en) ||
1272 nla_put(skb, IFLA_VF_TRUST,
1273 sizeof(vf_trust), &vf_trust))
1274 goto nla_put_vf_failure;
1275 vfvlanlist = nla_nest_start(skb, IFLA_VF_VLAN_LIST);
1277 goto nla_put_vf_failure;
1278 if (nla_put(skb, IFLA_VF_VLAN_INFO, sizeof(vf_vlan_info),
1280 nla_nest_cancel(skb, vfvlanlist);
1281 goto nla_put_vf_failure;
1283 nla_nest_end(skb, vfvlanlist);
1284 memset(&vf_stats, 0, sizeof(vf_stats));
1285 if (dev->netdev_ops->ndo_get_vf_stats)
1286 dev->netdev_ops->ndo_get_vf_stats(dev, vfs_num,
1288 vfstats = nla_nest_start(skb, IFLA_VF_STATS);
1290 goto nla_put_vf_failure;
1291 if (nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_PACKETS,
1292 vf_stats.rx_packets, IFLA_VF_STATS_PAD) ||
1293 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_PACKETS,
1294 vf_stats.tx_packets, IFLA_VF_STATS_PAD) ||
1295 nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_BYTES,
1296 vf_stats.rx_bytes, IFLA_VF_STATS_PAD) ||
1297 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_BYTES,
1298 vf_stats.tx_bytes, IFLA_VF_STATS_PAD) ||
1299 nla_put_u64_64bit(skb, IFLA_VF_STATS_BROADCAST,
1300 vf_stats.broadcast, IFLA_VF_STATS_PAD) ||
1301 nla_put_u64_64bit(skb, IFLA_VF_STATS_MULTICAST,
1302 vf_stats.multicast, IFLA_VF_STATS_PAD) ||
1303 nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_DROPPED,
1304 vf_stats.rx_dropped, IFLA_VF_STATS_PAD) ||
1305 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_DROPPED,
1306 vf_stats.tx_dropped, IFLA_VF_STATS_PAD)) {
1307 nla_nest_cancel(skb, vfstats);
1308 goto nla_put_vf_failure;
1310 nla_nest_end(skb, vfstats);
1311 nla_nest_end(skb, vf);
1315 nla_nest_cancel(skb, vf);
1316 nla_put_vfinfo_failure:
1317 nla_nest_cancel(skb, vfinfo);
1321 static noinline_for_stack int rtnl_fill_vf(struct sk_buff *skb,
1322 struct net_device *dev,
1323 u32 ext_filter_mask)
1325 struct nlattr *vfinfo;
1328 if (!dev->dev.parent || ((ext_filter_mask & RTEXT_FILTER_VF) == 0))
1331 num_vfs = dev_num_vf(dev->dev.parent);
1332 if (nla_put_u32(skb, IFLA_NUM_VF, num_vfs))
1335 if (!dev->netdev_ops->ndo_get_vf_config)
1338 vfinfo = nla_nest_start(skb, IFLA_VFINFO_LIST);
1342 for (i = 0; i < num_vfs; i++) {
1343 if (rtnl_fill_vfinfo(skb, dev, i, vfinfo))
1347 nla_nest_end(skb, vfinfo);
1351 static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev)
1353 struct rtnl_link_ifmap map;
1355 memset(&map, 0, sizeof(map));
1356 map.mem_start = dev->mem_start;
1357 map.mem_end = dev->mem_end;
1358 map.base_addr = dev->base_addr;
1361 map.port = dev->if_port;
1363 if (nla_put_64bit(skb, IFLA_MAP, sizeof(map), &map, IFLA_PAD))
1369 static u32 rtnl_xdp_prog_skb(struct net_device *dev)
1371 const struct bpf_prog *generic_xdp_prog;
1375 generic_xdp_prog = rtnl_dereference(dev->xdp_prog);
1376 if (!generic_xdp_prog)
1378 return generic_xdp_prog->aux->id;
1381 static u32 rtnl_xdp_prog_drv(struct net_device *dev)
1383 return __dev_xdp_query(dev, dev->netdev_ops->ndo_bpf, XDP_QUERY_PROG);
1386 static u32 rtnl_xdp_prog_hw(struct net_device *dev)
1388 return __dev_xdp_query(dev, dev->netdev_ops->ndo_bpf,
1392 static int rtnl_xdp_report_one(struct sk_buff *skb, struct net_device *dev,
1393 u32 *prog_id, u8 *mode, u8 tgt_mode, u32 attr,
1394 u32 (*get_prog_id)(struct net_device *dev))
1399 curr_id = get_prog_id(dev);
1404 err = nla_put_u32(skb, attr, curr_id);
1408 if (*mode != XDP_ATTACHED_NONE)
1409 *mode = XDP_ATTACHED_MULTI;
1416 static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev)
1423 xdp = nla_nest_start(skb, IFLA_XDP);
1428 mode = XDP_ATTACHED_NONE;
1429 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_SKB,
1430 IFLA_XDP_SKB_PROG_ID, rtnl_xdp_prog_skb);
1433 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_DRV,
1434 IFLA_XDP_DRV_PROG_ID, rtnl_xdp_prog_drv);
1437 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_HW,
1438 IFLA_XDP_HW_PROG_ID, rtnl_xdp_prog_hw);
1442 err = nla_put_u8(skb, IFLA_XDP_ATTACHED, mode);
1446 if (prog_id && mode != XDP_ATTACHED_MULTI) {
1447 err = nla_put_u32(skb, IFLA_XDP_PROG_ID, prog_id);
1452 nla_nest_end(skb, xdp);
1456 nla_nest_cancel(skb, xdp);
1460 static u32 rtnl_get_event(unsigned long event)
1462 u32 rtnl_event_type = IFLA_EVENT_NONE;
1466 rtnl_event_type = IFLA_EVENT_REBOOT;
1468 case NETDEV_FEAT_CHANGE:
1469 rtnl_event_type = IFLA_EVENT_FEATURES;
1471 case NETDEV_BONDING_FAILOVER:
1472 rtnl_event_type = IFLA_EVENT_BONDING_FAILOVER;
1474 case NETDEV_NOTIFY_PEERS:
1475 rtnl_event_type = IFLA_EVENT_NOTIFY_PEERS;
1477 case NETDEV_RESEND_IGMP:
1478 rtnl_event_type = IFLA_EVENT_IGMP_RESEND;
1480 case NETDEV_CHANGEINFODATA:
1481 rtnl_event_type = IFLA_EVENT_BONDING_OPTIONS;
1487 return rtnl_event_type;
1490 static int put_master_ifindex(struct sk_buff *skb, struct net_device *dev)
1492 const struct net_device *upper_dev;
1497 upper_dev = netdev_master_upper_dev_get_rcu(dev);
1499 ret = nla_put_u32(skb, IFLA_MASTER, upper_dev->ifindex);
1505 static int nla_put_iflink(struct sk_buff *skb, const struct net_device *dev,
1508 int ifindex = dev_get_iflink(dev);
1510 if (force || dev->ifindex != ifindex)
1511 return nla_put_u32(skb, IFLA_LINK, ifindex);
1516 static noinline_for_stack int nla_put_ifalias(struct sk_buff *skb,
1517 struct net_device *dev)
1522 ret = dev_get_alias(dev, buf, sizeof(buf));
1523 return ret > 0 ? nla_put_string(skb, IFLA_IFALIAS, buf) : 0;
1526 static int rtnl_fill_link_netnsid(struct sk_buff *skb,
1527 const struct net_device *dev,
1528 struct net *src_net, gfp_t gfp)
1530 bool put_iflink = false;
1532 if (dev->rtnl_link_ops && dev->rtnl_link_ops->get_link_net) {
1533 struct net *link_net = dev->rtnl_link_ops->get_link_net(dev);
1535 if (!net_eq(dev_net(dev), link_net)) {
1536 int id = peernet2id_alloc(src_net, link_net, gfp);
1538 if (nla_put_s32(skb, IFLA_LINK_NETNSID, id))
1545 return nla_put_iflink(skb, dev, put_iflink);
1548 static int rtnl_fill_link_af(struct sk_buff *skb,
1549 const struct net_device *dev,
1550 u32 ext_filter_mask)
1552 const struct rtnl_af_ops *af_ops;
1553 struct nlattr *af_spec;
1555 af_spec = nla_nest_start(skb, IFLA_AF_SPEC);
1559 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
1563 if (!af_ops->fill_link_af)
1566 af = nla_nest_start(skb, af_ops->family);
1570 err = af_ops->fill_link_af(skb, dev, ext_filter_mask);
1572 * Caller may return ENODATA to indicate that there
1573 * was no data to be dumped. This is not an error, it
1574 * means we should trim the attribute header and
1577 if (err == -ENODATA)
1578 nla_nest_cancel(skb, af);
1582 nla_nest_end(skb, af);
1585 nla_nest_end(skb, af_spec);
1589 static int rtnl_fill_ifinfo(struct sk_buff *skb,
1590 struct net_device *dev, struct net *src_net,
1591 int type, u32 pid, u32 seq, u32 change,
1592 unsigned int flags, u32 ext_filter_mask,
1593 u32 event, int *new_nsid, int new_ifindex,
1594 int tgt_netnsid, gfp_t gfp)
1596 struct ifinfomsg *ifm;
1597 struct nlmsghdr *nlh;
1600 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags);
1604 ifm = nlmsg_data(nlh);
1605 ifm->ifi_family = AF_UNSPEC;
1607 ifm->ifi_type = dev->type;
1608 ifm->ifi_index = dev->ifindex;
1609 ifm->ifi_flags = dev_get_flags(dev);
1610 ifm->ifi_change = change;
1612 if (tgt_netnsid >= 0 && nla_put_s32(skb, IFLA_IF_NETNSID, tgt_netnsid))
1613 goto nla_put_failure;
1615 if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
1616 nla_put_u32(skb, IFLA_TXQLEN, dev->tx_queue_len) ||
1617 nla_put_u8(skb, IFLA_OPERSTATE,
1618 netif_running(dev) ? dev->operstate : IF_OPER_DOWN) ||
1619 nla_put_u8(skb, IFLA_LINKMODE, dev->link_mode) ||
1620 nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
1621 nla_put_u32(skb, IFLA_MIN_MTU, dev->min_mtu) ||
1622 nla_put_u32(skb, IFLA_MAX_MTU, dev->max_mtu) ||
1623 nla_put_u32(skb, IFLA_GROUP, dev->group) ||
1624 nla_put_u32(skb, IFLA_PROMISCUITY, dev->promiscuity) ||
1625 nla_put_u32(skb, IFLA_NUM_TX_QUEUES, dev->num_tx_queues) ||
1626 nla_put_u32(skb, IFLA_GSO_MAX_SEGS, dev->gso_max_segs) ||
1627 nla_put_u32(skb, IFLA_GSO_MAX_SIZE, dev->gso_max_size) ||
1629 nla_put_u32(skb, IFLA_NUM_RX_QUEUES, dev->num_rx_queues) ||
1631 put_master_ifindex(skb, dev) ||
1632 nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) ||
1634 nla_put_string(skb, IFLA_QDISC, dev->qdisc->ops->id)) ||
1635 nla_put_ifalias(skb, dev) ||
1636 nla_put_u32(skb, IFLA_CARRIER_CHANGES,
1637 atomic_read(&dev->carrier_up_count) +
1638 atomic_read(&dev->carrier_down_count)) ||
1639 nla_put_u8(skb, IFLA_PROTO_DOWN, dev->proto_down) ||
1640 nla_put_u32(skb, IFLA_CARRIER_UP_COUNT,
1641 atomic_read(&dev->carrier_up_count)) ||
1642 nla_put_u32(skb, IFLA_CARRIER_DOWN_COUNT,
1643 atomic_read(&dev->carrier_down_count)))
1644 goto nla_put_failure;
1646 if (event != IFLA_EVENT_NONE) {
1647 if (nla_put_u32(skb, IFLA_EVENT, event))
1648 goto nla_put_failure;
1651 if (rtnl_fill_link_ifmap(skb, dev))
1652 goto nla_put_failure;
1654 if (dev->addr_len) {
1655 if (nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr) ||
1656 nla_put(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast))
1657 goto nla_put_failure;
1660 if (rtnl_phys_port_id_fill(skb, dev))
1661 goto nla_put_failure;
1663 if (rtnl_phys_port_name_fill(skb, dev))
1664 goto nla_put_failure;
1666 if (rtnl_phys_switch_id_fill(skb, dev))
1667 goto nla_put_failure;
1669 if (rtnl_fill_stats(skb, dev))
1670 goto nla_put_failure;
1672 if (rtnl_fill_vf(skb, dev, ext_filter_mask))
1673 goto nla_put_failure;
1675 if (rtnl_port_fill(skb, dev, ext_filter_mask))
1676 goto nla_put_failure;
1678 if (rtnl_xdp_fill(skb, dev))
1679 goto nla_put_failure;
1681 if (dev->rtnl_link_ops || rtnl_have_link_slave_info(dev)) {
1682 if (rtnl_link_fill(skb, dev) < 0)
1683 goto nla_put_failure;
1686 if (rtnl_fill_link_netnsid(skb, dev, src_net, gfp))
1687 goto nla_put_failure;
1690 nla_put_s32(skb, IFLA_NEW_NETNSID, *new_nsid) < 0)
1691 goto nla_put_failure;
1693 nla_put_s32(skb, IFLA_NEW_IFINDEX, new_ifindex) < 0)
1694 goto nla_put_failure;
1698 if (rtnl_fill_link_af(skb, dev, ext_filter_mask))
1699 goto nla_put_failure_rcu;
1702 nlmsg_end(skb, nlh);
1705 nla_put_failure_rcu:
1708 nlmsg_cancel(skb, nlh);
1712 static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
1713 [IFLA_IFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ-1 },
1714 [IFLA_ADDRESS] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1715 [IFLA_BROADCAST] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1716 [IFLA_MAP] = { .len = sizeof(struct rtnl_link_ifmap) },
1717 [IFLA_MTU] = { .type = NLA_U32 },
1718 [IFLA_LINK] = { .type = NLA_U32 },
1719 [IFLA_MASTER] = { .type = NLA_U32 },
1720 [IFLA_CARRIER] = { .type = NLA_U8 },
1721 [IFLA_TXQLEN] = { .type = NLA_U32 },
1722 [IFLA_WEIGHT] = { .type = NLA_U32 },
1723 [IFLA_OPERSTATE] = { .type = NLA_U8 },
1724 [IFLA_LINKMODE] = { .type = NLA_U8 },
1725 [IFLA_LINKINFO] = { .type = NLA_NESTED },
1726 [IFLA_NET_NS_PID] = { .type = NLA_U32 },
1727 [IFLA_NET_NS_FD] = { .type = NLA_U32 },
1728 /* IFLA_IFALIAS is a string, but policy is set to NLA_BINARY to
1729 * allow 0-length string (needed to remove an alias).
1731 [IFLA_IFALIAS] = { .type = NLA_BINARY, .len = IFALIASZ - 1 },
1732 [IFLA_VFINFO_LIST] = {. type = NLA_NESTED },
1733 [IFLA_VF_PORTS] = { .type = NLA_NESTED },
1734 [IFLA_PORT_SELF] = { .type = NLA_NESTED },
1735 [IFLA_AF_SPEC] = { .type = NLA_NESTED },
1736 [IFLA_EXT_MASK] = { .type = NLA_U32 },
1737 [IFLA_PROMISCUITY] = { .type = NLA_U32 },
1738 [IFLA_NUM_TX_QUEUES] = { .type = NLA_U32 },
1739 [IFLA_NUM_RX_QUEUES] = { .type = NLA_U32 },
1740 [IFLA_GSO_MAX_SEGS] = { .type = NLA_U32 },
1741 [IFLA_GSO_MAX_SIZE] = { .type = NLA_U32 },
1742 [IFLA_PHYS_PORT_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
1743 [IFLA_CARRIER_CHANGES] = { .type = NLA_U32 }, /* ignored */
1744 [IFLA_PHYS_SWITCH_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
1745 [IFLA_LINK_NETNSID] = { .type = NLA_S32 },
1746 [IFLA_PROTO_DOWN] = { .type = NLA_U8 },
1747 [IFLA_XDP] = { .type = NLA_NESTED },
1748 [IFLA_EVENT] = { .type = NLA_U32 },
1749 [IFLA_GROUP] = { .type = NLA_U32 },
1750 [IFLA_IF_NETNSID] = { .type = NLA_S32 },
1751 [IFLA_CARRIER_UP_COUNT] = { .type = NLA_U32 },
1752 [IFLA_CARRIER_DOWN_COUNT] = { .type = NLA_U32 },
1753 [IFLA_MIN_MTU] = { .type = NLA_U32 },
1754 [IFLA_MAX_MTU] = { .type = NLA_U32 },
1757 static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
1758 [IFLA_INFO_KIND] = { .type = NLA_STRING },
1759 [IFLA_INFO_DATA] = { .type = NLA_NESTED },
1760 [IFLA_INFO_SLAVE_KIND] = { .type = NLA_STRING },
1761 [IFLA_INFO_SLAVE_DATA] = { .type = NLA_NESTED },
1764 static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = {
1765 [IFLA_VF_MAC] = { .len = sizeof(struct ifla_vf_mac) },
1766 [IFLA_VF_VLAN] = { .len = sizeof(struct ifla_vf_vlan) },
1767 [IFLA_VF_VLAN_LIST] = { .type = NLA_NESTED },
1768 [IFLA_VF_TX_RATE] = { .len = sizeof(struct ifla_vf_tx_rate) },
1769 [IFLA_VF_SPOOFCHK] = { .len = sizeof(struct ifla_vf_spoofchk) },
1770 [IFLA_VF_RATE] = { .len = sizeof(struct ifla_vf_rate) },
1771 [IFLA_VF_LINK_STATE] = { .len = sizeof(struct ifla_vf_link_state) },
1772 [IFLA_VF_RSS_QUERY_EN] = { .len = sizeof(struct ifla_vf_rss_query_en) },
1773 [IFLA_VF_STATS] = { .type = NLA_NESTED },
1774 [IFLA_VF_TRUST] = { .len = sizeof(struct ifla_vf_trust) },
1775 [IFLA_VF_IB_NODE_GUID] = { .len = sizeof(struct ifla_vf_guid) },
1776 [IFLA_VF_IB_PORT_GUID] = { .len = sizeof(struct ifla_vf_guid) },
1779 static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = {
1780 [IFLA_PORT_VF] = { .type = NLA_U32 },
1781 [IFLA_PORT_PROFILE] = { .type = NLA_STRING,
1782 .len = PORT_PROFILE_MAX },
1783 [IFLA_PORT_INSTANCE_UUID] = { .type = NLA_BINARY,
1784 .len = PORT_UUID_MAX },
1785 [IFLA_PORT_HOST_UUID] = { .type = NLA_STRING,
1786 .len = PORT_UUID_MAX },
1787 [IFLA_PORT_REQUEST] = { .type = NLA_U8, },
1788 [IFLA_PORT_RESPONSE] = { .type = NLA_U16, },
1790 /* Unused, but we need to keep it here since user space could
1791 * fill it. It's also broken with regard to NLA_BINARY use in
1792 * combination with structs.
1794 [IFLA_PORT_VSI_TYPE] = { .type = NLA_BINARY,
1795 .len = sizeof(struct ifla_port_vsi) },
1798 static const struct nla_policy ifla_xdp_policy[IFLA_XDP_MAX + 1] = {
1799 [IFLA_XDP_FD] = { .type = NLA_S32 },
1800 [IFLA_XDP_ATTACHED] = { .type = NLA_U8 },
1801 [IFLA_XDP_FLAGS] = { .type = NLA_U32 },
1802 [IFLA_XDP_PROG_ID] = { .type = NLA_U32 },
1805 static const struct rtnl_link_ops *linkinfo_to_kind_ops(const struct nlattr *nla)
1807 const struct rtnl_link_ops *ops = NULL;
1808 struct nlattr *linfo[IFLA_INFO_MAX + 1];
1810 if (nla_parse_nested(linfo, IFLA_INFO_MAX, nla,
1811 ifla_info_policy, NULL) < 0)
1814 if (linfo[IFLA_INFO_KIND]) {
1815 char kind[MODULE_NAME_LEN];
1817 nla_strlcpy(kind, linfo[IFLA_INFO_KIND], sizeof(kind));
1818 ops = rtnl_link_ops_get(kind);
1824 static bool link_master_filtered(struct net_device *dev, int master_idx)
1826 struct net_device *master;
1831 master = netdev_master_upper_dev_get(dev);
1832 if (!master || master->ifindex != master_idx)
1838 static bool link_kind_filtered(const struct net_device *dev,
1839 const struct rtnl_link_ops *kind_ops)
1841 if (kind_ops && dev->rtnl_link_ops != kind_ops)
1847 static bool link_dump_filtered(struct net_device *dev,
1849 const struct rtnl_link_ops *kind_ops)
1851 if (link_master_filtered(dev, master_idx) ||
1852 link_kind_filtered(dev, kind_ops))
1858 static struct net *get_target_net(struct sock *sk, int netnsid)
1862 net = get_net_ns_by_id(sock_net(sk), netnsid);
1864 return ERR_PTR(-EINVAL);
1866 /* For now, the caller is required to have CAP_NET_ADMIN in
1867 * the user namespace owning the target net ns.
1869 if (!sk_ns_capable(sk, net->user_ns, CAP_NET_ADMIN)) {
1871 return ERR_PTR(-EACCES);
1876 static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
1878 struct net *net = sock_net(skb->sk);
1879 struct net *tgt_net = net;
1882 struct net_device *dev;
1883 struct hlist_head *head;
1884 struct nlattr *tb[IFLA_MAX+1];
1885 u32 ext_filter_mask = 0;
1886 const struct rtnl_link_ops *kind_ops = NULL;
1887 unsigned int flags = NLM_F_MULTI;
1894 s_idx = cb->args[1];
1896 /* A hack to preserve kernel<->userspace interface.
1897 * The correct header is ifinfomsg. It is consistent with rtnl_getlink.
1898 * However, before Linux v3.9 the code here assumed rtgenmsg and that's
1899 * what iproute2 < v3.9.0 used.
1900 * We can detect the old iproute2. Even including the IFLA_EXT_MASK
1901 * attribute, its netlink message is shorter than struct ifinfomsg.
1903 hdrlen = nlmsg_len(cb->nlh) < sizeof(struct ifinfomsg) ?
1904 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
1906 if (nlmsg_parse(cb->nlh, hdrlen, tb, IFLA_MAX,
1907 ifla_policy, NULL) >= 0) {
1908 if (tb[IFLA_IF_NETNSID]) {
1909 netnsid = nla_get_s32(tb[IFLA_IF_NETNSID]);
1910 tgt_net = get_target_net(skb->sk, netnsid);
1911 if (IS_ERR(tgt_net))
1912 return PTR_ERR(tgt_net);
1915 if (tb[IFLA_EXT_MASK])
1916 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
1918 if (tb[IFLA_MASTER])
1919 master_idx = nla_get_u32(tb[IFLA_MASTER]);
1921 if (tb[IFLA_LINKINFO])
1922 kind_ops = linkinfo_to_kind_ops(tb[IFLA_LINKINFO]);
1924 if (master_idx || kind_ops)
1925 flags |= NLM_F_DUMP_FILTERED;
1928 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
1930 head = &tgt_net->dev_index_head[h];
1931 hlist_for_each_entry(dev, head, index_hlist) {
1932 if (link_dump_filtered(dev, master_idx, kind_ops))
1936 err = rtnl_fill_ifinfo(skb, dev, net,
1938 NETLINK_CB(cb->skb).portid,
1939 cb->nlh->nlmsg_seq, 0,
1941 ext_filter_mask, 0, NULL, 0,
1942 netnsid, GFP_KERNEL);
1945 if (likely(skb->len))
1959 cb->seq = net->dev_base_seq;
1960 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
1967 int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len,
1968 struct netlink_ext_ack *exterr)
1970 return nla_parse(tb, IFLA_MAX, head, len, ifla_policy, exterr);
1972 EXPORT_SYMBOL(rtnl_nla_parse_ifla);
1974 struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[])
1977 /* Examine the link attributes and figure out which
1978 * network namespace we are talking about.
1980 if (tb[IFLA_NET_NS_PID])
1981 net = get_net_ns_by_pid(nla_get_u32(tb[IFLA_NET_NS_PID]));
1982 else if (tb[IFLA_NET_NS_FD])
1983 net = get_net_ns_by_fd(nla_get_u32(tb[IFLA_NET_NS_FD]));
1985 net = get_net(src_net);
1988 EXPORT_SYMBOL(rtnl_link_get_net);
1990 /* Figure out which network namespace we are talking about by
1991 * examining the link attributes in the following order:
1993 * 1. IFLA_NET_NS_PID
1995 * 3. IFLA_IF_NETNSID
1997 static struct net *rtnl_link_get_net_by_nlattr(struct net *src_net,
1998 struct nlattr *tb[])
2002 if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD])
2003 return rtnl_link_get_net(src_net, tb);
2005 if (!tb[IFLA_IF_NETNSID])
2006 return get_net(src_net);
2008 net = get_net_ns_by_id(src_net, nla_get_u32(tb[IFLA_IF_NETNSID]));
2010 return ERR_PTR(-EINVAL);
2015 static struct net *rtnl_link_get_net_capable(const struct sk_buff *skb,
2016 struct net *src_net,
2017 struct nlattr *tb[], int cap)
2021 net = rtnl_link_get_net_by_nlattr(src_net, tb);
2025 if (!netlink_ns_capable(skb, net->user_ns, cap)) {
2027 return ERR_PTR(-EPERM);
2033 /* Verify that rtnetlink requests do not pass additional properties
2034 * potentially referring to different network namespaces.
2036 static int rtnl_ensure_unique_netns(struct nlattr *tb[],
2037 struct netlink_ext_ack *extack,
2041 if (netns_id_only) {
2042 if (!tb[IFLA_NET_NS_PID] && !tb[IFLA_NET_NS_FD])
2045 NL_SET_ERR_MSG(extack, "specified netns attribute not supported");
2049 if (tb[IFLA_IF_NETNSID] && (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD]))
2052 if (tb[IFLA_NET_NS_PID] && (tb[IFLA_IF_NETNSID] || tb[IFLA_NET_NS_FD]))
2055 if (tb[IFLA_NET_NS_FD] && (tb[IFLA_IF_NETNSID] || tb[IFLA_NET_NS_PID]))
2061 NL_SET_ERR_MSG(extack, "multiple netns identifying attributes specified");
2065 static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[])
2068 if (tb[IFLA_ADDRESS] &&
2069 nla_len(tb[IFLA_ADDRESS]) < dev->addr_len)
2072 if (tb[IFLA_BROADCAST] &&
2073 nla_len(tb[IFLA_BROADCAST]) < dev->addr_len)
2077 if (tb[IFLA_AF_SPEC]) {
2081 nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
2082 const struct rtnl_af_ops *af_ops;
2085 af_ops = rtnl_af_lookup(nla_type(af));
2088 return -EAFNOSUPPORT;
2091 if (!af_ops->set_link_af) {
2096 if (af_ops->validate_link_af) {
2097 err = af_ops->validate_link_af(dev, af);
2111 static int handle_infiniband_guid(struct net_device *dev, struct ifla_vf_guid *ivt,
2114 const struct net_device_ops *ops = dev->netdev_ops;
2116 return ops->ndo_set_vf_guid(dev, ivt->vf, ivt->guid, guid_type);
2119 static int handle_vf_guid(struct net_device *dev, struct ifla_vf_guid *ivt, int guid_type)
2121 if (dev->type != ARPHRD_INFINIBAND)
2124 return handle_infiniband_guid(dev, ivt, guid_type);
2127 static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
2129 const struct net_device_ops *ops = dev->netdev_ops;
2132 if (tb[IFLA_VF_MAC]) {
2133 struct ifla_vf_mac *ivm = nla_data(tb[IFLA_VF_MAC]);
2135 if (ivm->vf >= INT_MAX)
2138 if (ops->ndo_set_vf_mac)
2139 err = ops->ndo_set_vf_mac(dev, ivm->vf,
2145 if (tb[IFLA_VF_VLAN]) {
2146 struct ifla_vf_vlan *ivv = nla_data(tb[IFLA_VF_VLAN]);
2148 if (ivv->vf >= INT_MAX)
2151 if (ops->ndo_set_vf_vlan)
2152 err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan,
2154 htons(ETH_P_8021Q));
2159 if (tb[IFLA_VF_VLAN_LIST]) {
2160 struct ifla_vf_vlan_info *ivvl[MAX_VLAN_LIST_LEN];
2161 struct nlattr *attr;
2165 if (!ops->ndo_set_vf_vlan)
2168 nla_for_each_nested(attr, tb[IFLA_VF_VLAN_LIST], rem) {
2169 if (nla_type(attr) != IFLA_VF_VLAN_INFO ||
2170 nla_len(attr) < NLA_HDRLEN) {
2173 if (len >= MAX_VLAN_LIST_LEN)
2175 ivvl[len] = nla_data(attr);
2182 if (ivvl[0]->vf >= INT_MAX)
2184 err = ops->ndo_set_vf_vlan(dev, ivvl[0]->vf, ivvl[0]->vlan,
2185 ivvl[0]->qos, ivvl[0]->vlan_proto);
2190 if (tb[IFLA_VF_TX_RATE]) {
2191 struct ifla_vf_tx_rate *ivt = nla_data(tb[IFLA_VF_TX_RATE]);
2192 struct ifla_vf_info ivf;
2194 if (ivt->vf >= INT_MAX)
2197 if (ops->ndo_get_vf_config)
2198 err = ops->ndo_get_vf_config(dev, ivt->vf, &ivf);
2203 if (ops->ndo_set_vf_rate)
2204 err = ops->ndo_set_vf_rate(dev, ivt->vf,
2211 if (tb[IFLA_VF_RATE]) {
2212 struct ifla_vf_rate *ivt = nla_data(tb[IFLA_VF_RATE]);
2214 if (ivt->vf >= INT_MAX)
2217 if (ops->ndo_set_vf_rate)
2218 err = ops->ndo_set_vf_rate(dev, ivt->vf,
2225 if (tb[IFLA_VF_SPOOFCHK]) {
2226 struct ifla_vf_spoofchk *ivs = nla_data(tb[IFLA_VF_SPOOFCHK]);
2228 if (ivs->vf >= INT_MAX)
2231 if (ops->ndo_set_vf_spoofchk)
2232 err = ops->ndo_set_vf_spoofchk(dev, ivs->vf,
2238 if (tb[IFLA_VF_LINK_STATE]) {
2239 struct ifla_vf_link_state *ivl = nla_data(tb[IFLA_VF_LINK_STATE]);
2241 if (ivl->vf >= INT_MAX)
2244 if (ops->ndo_set_vf_link_state)
2245 err = ops->ndo_set_vf_link_state(dev, ivl->vf,
2251 if (tb[IFLA_VF_RSS_QUERY_EN]) {
2252 struct ifla_vf_rss_query_en *ivrssq_en;
2255 ivrssq_en = nla_data(tb[IFLA_VF_RSS_QUERY_EN]);
2256 if (ivrssq_en->vf >= INT_MAX)
2258 if (ops->ndo_set_vf_rss_query_en)
2259 err = ops->ndo_set_vf_rss_query_en(dev, ivrssq_en->vf,
2260 ivrssq_en->setting);
2265 if (tb[IFLA_VF_TRUST]) {
2266 struct ifla_vf_trust *ivt = nla_data(tb[IFLA_VF_TRUST]);
2268 if (ivt->vf >= INT_MAX)
2271 if (ops->ndo_set_vf_trust)
2272 err = ops->ndo_set_vf_trust(dev, ivt->vf, ivt->setting);
2277 if (tb[IFLA_VF_IB_NODE_GUID]) {
2278 struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_NODE_GUID]);
2280 if (ivt->vf >= INT_MAX)
2282 if (!ops->ndo_set_vf_guid)
2284 return handle_vf_guid(dev, ivt, IFLA_VF_IB_NODE_GUID);
2287 if (tb[IFLA_VF_IB_PORT_GUID]) {
2288 struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_PORT_GUID]);
2290 if (ivt->vf >= INT_MAX)
2292 if (!ops->ndo_set_vf_guid)
2295 return handle_vf_guid(dev, ivt, IFLA_VF_IB_PORT_GUID);
2301 static int do_set_master(struct net_device *dev, int ifindex,
2302 struct netlink_ext_ack *extack)
2304 struct net_device *upper_dev = netdev_master_upper_dev_get(dev);
2305 const struct net_device_ops *ops;
2309 if (upper_dev->ifindex == ifindex)
2311 ops = upper_dev->netdev_ops;
2312 if (ops->ndo_del_slave) {
2313 err = ops->ndo_del_slave(upper_dev, dev);
2322 upper_dev = __dev_get_by_index(dev_net(dev), ifindex);
2325 ops = upper_dev->netdev_ops;
2326 if (ops->ndo_add_slave) {
2327 err = ops->ndo_add_slave(upper_dev, dev, extack);
2337 #define DO_SETLINK_MODIFIED 0x01
2338 /* notify flag means notify + modified. */
2339 #define DO_SETLINK_NOTIFY 0x03
2340 static int do_setlink(const struct sk_buff *skb,
2341 struct net_device *dev, struct ifinfomsg *ifm,
2342 struct netlink_ext_ack *extack,
2343 struct nlattr **tb, char *ifname, int status)
2345 const struct net_device_ops *ops = dev->netdev_ops;
2348 err = validate_linkmsg(dev, tb);
2352 if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD] || tb[IFLA_IF_NETNSID]) {
2353 struct net *net = rtnl_link_get_net_capable(skb, dev_net(dev),
2360 err = dev_change_net_namespace(dev, net, ifname);
2364 status |= DO_SETLINK_MODIFIED;
2368 struct rtnl_link_ifmap *u_map;
2371 if (!ops->ndo_set_config) {
2376 if (!netif_device_present(dev)) {
2381 u_map = nla_data(tb[IFLA_MAP]);
2382 k_map.mem_start = (unsigned long) u_map->mem_start;
2383 k_map.mem_end = (unsigned long) u_map->mem_end;
2384 k_map.base_addr = (unsigned short) u_map->base_addr;
2385 k_map.irq = (unsigned char) u_map->irq;
2386 k_map.dma = (unsigned char) u_map->dma;
2387 k_map.port = (unsigned char) u_map->port;
2389 err = ops->ndo_set_config(dev, &k_map);
2393 status |= DO_SETLINK_NOTIFY;
2396 if (tb[IFLA_ADDRESS]) {
2397 struct sockaddr *sa;
2400 len = sizeof(sa_family_t) + max_t(size_t, dev->addr_len,
2402 sa = kmalloc(len, GFP_KERNEL);
2407 sa->sa_family = dev->type;
2408 memcpy(sa->sa_data, nla_data(tb[IFLA_ADDRESS]),
2410 err = dev_set_mac_address(dev, sa);
2414 status |= DO_SETLINK_MODIFIED;
2418 err = dev_set_mtu_ext(dev, nla_get_u32(tb[IFLA_MTU]), extack);
2421 status |= DO_SETLINK_MODIFIED;
2424 if (tb[IFLA_GROUP]) {
2425 dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
2426 status |= DO_SETLINK_NOTIFY;
2430 * Interface selected by interface index but interface
2431 * name provided implies that a name change has been
2434 if (ifm->ifi_index > 0 && ifname[0]) {
2435 err = dev_change_name(dev, ifname);
2438 status |= DO_SETLINK_MODIFIED;
2441 if (tb[IFLA_IFALIAS]) {
2442 err = dev_set_alias(dev, nla_data(tb[IFLA_IFALIAS]),
2443 nla_len(tb[IFLA_IFALIAS]));
2446 status |= DO_SETLINK_NOTIFY;
2449 if (tb[IFLA_BROADCAST]) {
2450 nla_memcpy(dev->broadcast, tb[IFLA_BROADCAST], dev->addr_len);
2451 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
2454 if (ifm->ifi_flags || ifm->ifi_change) {
2455 err = dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm));
2460 if (tb[IFLA_MASTER]) {
2461 err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack);
2464 status |= DO_SETLINK_MODIFIED;
2467 if (tb[IFLA_CARRIER]) {
2468 err = dev_change_carrier(dev, nla_get_u8(tb[IFLA_CARRIER]));
2471 status |= DO_SETLINK_MODIFIED;
2474 if (tb[IFLA_TXQLEN]) {
2475 unsigned int value = nla_get_u32(tb[IFLA_TXQLEN]);
2477 err = dev_change_tx_queue_len(dev, value);
2480 status |= DO_SETLINK_MODIFIED;
2483 if (tb[IFLA_GSO_MAX_SIZE]) {
2484 u32 max_size = nla_get_u32(tb[IFLA_GSO_MAX_SIZE]);
2486 if (max_size > GSO_MAX_SIZE) {
2491 if (dev->gso_max_size ^ max_size) {
2492 netif_set_gso_max_size(dev, max_size);
2493 status |= DO_SETLINK_MODIFIED;
2497 if (tb[IFLA_GSO_MAX_SEGS]) {
2498 u32 max_segs = nla_get_u32(tb[IFLA_GSO_MAX_SEGS]);
2500 if (max_segs > GSO_MAX_SEGS) {
2505 if (dev->gso_max_segs ^ max_segs) {
2506 dev->gso_max_segs = max_segs;
2507 status |= DO_SETLINK_MODIFIED;
2511 if (tb[IFLA_OPERSTATE])
2512 set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
2514 if (tb[IFLA_LINKMODE]) {
2515 unsigned char value = nla_get_u8(tb[IFLA_LINKMODE]);
2517 write_lock_bh(&dev_base_lock);
2518 if (dev->link_mode ^ value)
2519 status |= DO_SETLINK_NOTIFY;
2520 dev->link_mode = value;
2521 write_unlock_bh(&dev_base_lock);
2524 if (tb[IFLA_VFINFO_LIST]) {
2525 struct nlattr *vfinfo[IFLA_VF_MAX + 1];
2526 struct nlattr *attr;
2529 nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) {
2530 if (nla_type(attr) != IFLA_VF_INFO ||
2531 nla_len(attr) < NLA_HDRLEN) {
2535 err = nla_parse_nested(vfinfo, IFLA_VF_MAX, attr,
2536 ifla_vf_policy, NULL);
2539 err = do_setvfinfo(dev, vfinfo);
2542 status |= DO_SETLINK_NOTIFY;
2547 if (tb[IFLA_VF_PORTS]) {
2548 struct nlattr *port[IFLA_PORT_MAX+1];
2549 struct nlattr *attr;
2554 if (!ops->ndo_set_vf_port)
2557 nla_for_each_nested(attr, tb[IFLA_VF_PORTS], rem) {
2558 if (nla_type(attr) != IFLA_VF_PORT ||
2559 nla_len(attr) < NLA_HDRLEN) {
2563 err = nla_parse_nested(port, IFLA_PORT_MAX, attr,
2564 ifla_port_policy, NULL);
2567 if (!port[IFLA_PORT_VF]) {
2571 vf = nla_get_u32(port[IFLA_PORT_VF]);
2572 err = ops->ndo_set_vf_port(dev, vf, port);
2575 status |= DO_SETLINK_NOTIFY;
2580 if (tb[IFLA_PORT_SELF]) {
2581 struct nlattr *port[IFLA_PORT_MAX+1];
2583 err = nla_parse_nested(port, IFLA_PORT_MAX,
2584 tb[IFLA_PORT_SELF], ifla_port_policy,
2590 if (ops->ndo_set_vf_port)
2591 err = ops->ndo_set_vf_port(dev, PORT_SELF_VF, port);
2594 status |= DO_SETLINK_NOTIFY;
2597 if (tb[IFLA_AF_SPEC]) {
2601 nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
2602 const struct rtnl_af_ops *af_ops;
2606 BUG_ON(!(af_ops = rtnl_af_lookup(nla_type(af))));
2608 err = af_ops->set_link_af(dev, af);
2615 status |= DO_SETLINK_NOTIFY;
2620 if (tb[IFLA_PROTO_DOWN]) {
2621 err = dev_change_proto_down(dev,
2622 nla_get_u8(tb[IFLA_PROTO_DOWN]));
2625 status |= DO_SETLINK_NOTIFY;
2629 struct nlattr *xdp[IFLA_XDP_MAX + 1];
2632 err = nla_parse_nested(xdp, IFLA_XDP_MAX, tb[IFLA_XDP],
2633 ifla_xdp_policy, NULL);
2637 if (xdp[IFLA_XDP_ATTACHED] || xdp[IFLA_XDP_PROG_ID]) {
2642 if (xdp[IFLA_XDP_FLAGS]) {
2643 xdp_flags = nla_get_u32(xdp[IFLA_XDP_FLAGS]);
2644 if (xdp_flags & ~XDP_FLAGS_MASK) {
2648 if (hweight32(xdp_flags & XDP_FLAGS_MODES) > 1) {
2654 if (xdp[IFLA_XDP_FD]) {
2655 err = dev_change_xdp_fd(dev, extack,
2656 nla_get_s32(xdp[IFLA_XDP_FD]),
2660 status |= DO_SETLINK_NOTIFY;
2665 if (status & DO_SETLINK_MODIFIED) {
2666 if ((status & DO_SETLINK_NOTIFY) == DO_SETLINK_NOTIFY)
2667 netdev_state_change(dev);
2670 net_warn_ratelimited("A link change request failed with some changes committed already. Interface %s may have been left with an inconsistent configuration, please check.\n",
2677 static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
2678 struct netlink_ext_ack *extack)
2680 struct net *net = sock_net(skb->sk);
2681 struct ifinfomsg *ifm;
2682 struct net_device *dev;
2684 struct nlattr *tb[IFLA_MAX+1];
2685 char ifname[IFNAMSIZ];
2687 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy,
2692 err = rtnl_ensure_unique_netns(tb, extack, false);
2696 if (tb[IFLA_IFNAME])
2697 nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
2702 ifm = nlmsg_data(nlh);
2703 if (ifm->ifi_index > 0)
2704 dev = __dev_get_by_index(net, ifm->ifi_index);
2705 else if (tb[IFLA_IFNAME])
2706 dev = __dev_get_by_name(net, ifname);
2715 err = do_setlink(skb, dev, ifm, extack, tb, ifname, 0);
2720 static int rtnl_group_dellink(const struct net *net, int group)
2722 struct net_device *dev, *aux;
2723 LIST_HEAD(list_kill);
2729 for_each_netdev(net, dev) {
2730 if (dev->group == group) {
2731 const struct rtnl_link_ops *ops;
2734 ops = dev->rtnl_link_ops;
2735 if (!ops || !ops->dellink)
2743 for_each_netdev_safe(net, dev, aux) {
2744 if (dev->group == group) {
2745 const struct rtnl_link_ops *ops;
2747 ops = dev->rtnl_link_ops;
2748 ops->dellink(dev, &list_kill);
2751 unregister_netdevice_many(&list_kill);
2756 int rtnl_delete_link(struct net_device *dev)
2758 const struct rtnl_link_ops *ops;
2759 LIST_HEAD(list_kill);
2761 ops = dev->rtnl_link_ops;
2762 if (!ops || !ops->dellink)
2765 ops->dellink(dev, &list_kill);
2766 unregister_netdevice_many(&list_kill);
2770 EXPORT_SYMBOL_GPL(rtnl_delete_link);
2772 static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
2773 struct netlink_ext_ack *extack)
2775 struct net *net = sock_net(skb->sk);
2776 struct net *tgt_net = net;
2777 struct net_device *dev = NULL;
2778 struct ifinfomsg *ifm;
2779 char ifname[IFNAMSIZ];
2780 struct nlattr *tb[IFLA_MAX+1];
2784 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy, extack);
2788 err = rtnl_ensure_unique_netns(tb, extack, true);
2792 if (tb[IFLA_IFNAME])
2793 nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
2795 if (tb[IFLA_IF_NETNSID]) {
2796 netnsid = nla_get_s32(tb[IFLA_IF_NETNSID]);
2797 tgt_net = get_target_net(NETLINK_CB(skb).sk, netnsid);
2798 if (IS_ERR(tgt_net))
2799 return PTR_ERR(tgt_net);
2803 ifm = nlmsg_data(nlh);
2804 if (ifm->ifi_index > 0)
2805 dev = __dev_get_by_index(tgt_net, ifm->ifi_index);
2806 else if (tb[IFLA_IFNAME])
2807 dev = __dev_get_by_name(tgt_net, ifname);
2808 else if (tb[IFLA_GROUP])
2809 err = rtnl_group_dellink(tgt_net, nla_get_u32(tb[IFLA_GROUP]));
2814 if (tb[IFLA_IFNAME] || ifm->ifi_index > 0)
2820 err = rtnl_delete_link(dev);
2829 int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm)
2831 unsigned int old_flags;
2834 old_flags = dev->flags;
2835 if (ifm && (ifm->ifi_flags || ifm->ifi_change)) {
2836 err = __dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm));
2841 if (dev->rtnl_link_state == RTNL_LINK_INITIALIZED) {
2842 __dev_notify_flags(dev, old_flags, (old_flags ^ dev->flags));
2844 dev->rtnl_link_state = RTNL_LINK_INITIALIZED;
2845 __dev_notify_flags(dev, old_flags, ~0U);
2849 EXPORT_SYMBOL(rtnl_configure_link);
2851 struct net_device *rtnl_create_link(struct net *net,
2852 const char *ifname, unsigned char name_assign_type,
2853 const struct rtnl_link_ops *ops, struct nlattr *tb[])
2855 struct net_device *dev;
2856 unsigned int num_tx_queues = 1;
2857 unsigned int num_rx_queues = 1;
2859 if (tb[IFLA_NUM_TX_QUEUES])
2860 num_tx_queues = nla_get_u32(tb[IFLA_NUM_TX_QUEUES]);
2861 else if (ops->get_num_tx_queues)
2862 num_tx_queues = ops->get_num_tx_queues();
2864 if (tb[IFLA_NUM_RX_QUEUES])
2865 num_rx_queues = nla_get_u32(tb[IFLA_NUM_RX_QUEUES]);
2866 else if (ops->get_num_rx_queues)
2867 num_rx_queues = ops->get_num_rx_queues();
2869 if (num_tx_queues < 1 || num_tx_queues > 4096)
2870 return ERR_PTR(-EINVAL);
2872 if (num_rx_queues < 1 || num_rx_queues > 4096)
2873 return ERR_PTR(-EINVAL);
2875 dev = alloc_netdev_mqs(ops->priv_size, ifname, name_assign_type,
2876 ops->setup, num_tx_queues, num_rx_queues);
2878 return ERR_PTR(-ENOMEM);
2880 dev_net_set(dev, net);
2881 dev->rtnl_link_ops = ops;
2882 dev->rtnl_link_state = RTNL_LINK_INITIALIZING;
2885 u32 mtu = nla_get_u32(tb[IFLA_MTU]);
2888 err = dev_validate_mtu(dev, mtu, NULL);
2891 return ERR_PTR(err);
2895 if (tb[IFLA_ADDRESS]) {
2896 memcpy(dev->dev_addr, nla_data(tb[IFLA_ADDRESS]),
2897 nla_len(tb[IFLA_ADDRESS]));
2898 dev->addr_assign_type = NET_ADDR_SET;
2900 if (tb[IFLA_BROADCAST])
2901 memcpy(dev->broadcast, nla_data(tb[IFLA_BROADCAST]),
2902 nla_len(tb[IFLA_BROADCAST]));
2903 if (tb[IFLA_TXQLEN])
2904 dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]);
2905 if (tb[IFLA_OPERSTATE])
2906 set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
2907 if (tb[IFLA_LINKMODE])
2908 dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]);
2910 dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
2911 if (tb[IFLA_GSO_MAX_SIZE])
2912 netif_set_gso_max_size(dev, nla_get_u32(tb[IFLA_GSO_MAX_SIZE]));
2913 if (tb[IFLA_GSO_MAX_SEGS])
2914 dev->gso_max_segs = nla_get_u32(tb[IFLA_GSO_MAX_SEGS]);
2918 EXPORT_SYMBOL(rtnl_create_link);
2920 static int rtnl_group_changelink(const struct sk_buff *skb,
2921 struct net *net, int group,
2922 struct ifinfomsg *ifm,
2923 struct netlink_ext_ack *extack,
2926 struct net_device *dev, *aux;
2929 for_each_netdev_safe(net, dev, aux) {
2930 if (dev->group == group) {
2931 err = do_setlink(skb, dev, ifm, extack, tb, NULL, 0);
2940 static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
2941 struct netlink_ext_ack *extack)
2943 struct net *net = sock_net(skb->sk);
2944 const struct rtnl_link_ops *ops;
2945 const struct rtnl_link_ops *m_ops;
2946 struct net_device *dev;
2947 struct net_device *master_dev;
2948 struct ifinfomsg *ifm;
2949 char kind[MODULE_NAME_LEN];
2950 char ifname[IFNAMSIZ];
2951 struct nlattr *tb[IFLA_MAX+1];
2952 struct nlattr *linkinfo[IFLA_INFO_MAX+1];
2953 unsigned char name_assign_type = NET_NAME_USER;
2956 #ifdef CONFIG_MODULES
2959 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy, extack);
2963 err = rtnl_ensure_unique_netns(tb, extack, false);
2967 if (tb[IFLA_IFNAME])
2968 nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
2972 ifm = nlmsg_data(nlh);
2973 if (ifm->ifi_index > 0)
2974 dev = __dev_get_by_index(net, ifm->ifi_index);
2977 dev = __dev_get_by_name(net, ifname);
2985 master_dev = netdev_master_upper_dev_get(dev);
2987 m_ops = master_dev->rtnl_link_ops;
2990 err = validate_linkmsg(dev, tb);
2994 if (tb[IFLA_LINKINFO]) {
2995 err = nla_parse_nested(linkinfo, IFLA_INFO_MAX,
2996 tb[IFLA_LINKINFO], ifla_info_policy,
3001 memset(linkinfo, 0, sizeof(linkinfo));
3003 if (linkinfo[IFLA_INFO_KIND]) {
3004 nla_strlcpy(kind, linkinfo[IFLA_INFO_KIND], sizeof(kind));
3005 ops = rtnl_link_ops_get(kind);
3012 struct nlattr *attr[RTNL_MAX_TYPE + 1];
3013 struct nlattr *slave_attr[RTNL_SLAVE_MAX_TYPE + 1];
3014 struct nlattr **data = NULL;
3015 struct nlattr **slave_data = NULL;
3016 struct net *dest_net, *link_net = NULL;
3019 if (ops->maxtype > RTNL_MAX_TYPE)
3022 if (ops->maxtype && linkinfo[IFLA_INFO_DATA]) {
3023 err = nla_parse_nested(attr, ops->maxtype,
3024 linkinfo[IFLA_INFO_DATA],
3030 if (ops->validate) {
3031 err = ops->validate(tb, data, extack);
3038 if (m_ops->slave_maxtype > RTNL_SLAVE_MAX_TYPE)
3041 if (m_ops->slave_maxtype &&
3042 linkinfo[IFLA_INFO_SLAVE_DATA]) {
3043 err = nla_parse_nested(slave_attr,
3044 m_ops->slave_maxtype,
3045 linkinfo[IFLA_INFO_SLAVE_DATA],
3046 m_ops->slave_policy,
3050 slave_data = slave_attr;
3057 if (nlh->nlmsg_flags & NLM_F_EXCL)
3059 if (nlh->nlmsg_flags & NLM_F_REPLACE)
3062 if (linkinfo[IFLA_INFO_DATA]) {
3063 if (!ops || ops != dev->rtnl_link_ops ||
3067 err = ops->changelink(dev, tb, data, extack);
3070 status |= DO_SETLINK_NOTIFY;
3073 if (linkinfo[IFLA_INFO_SLAVE_DATA]) {
3074 if (!m_ops || !m_ops->slave_changelink)
3077 err = m_ops->slave_changelink(master_dev, dev,
3082 status |= DO_SETLINK_NOTIFY;
3085 return do_setlink(skb, dev, ifm, extack, tb, ifname,
3089 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
3090 if (ifm->ifi_index == 0 && tb[IFLA_GROUP])
3091 return rtnl_group_changelink(skb, net,
3092 nla_get_u32(tb[IFLA_GROUP]),
3097 if (tb[IFLA_MAP] || tb[IFLA_PROTINFO])
3101 #ifdef CONFIG_MODULES
3104 request_module("rtnl-link-%s", kind);
3106 ops = rtnl_link_ops_get(kind);
3118 snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind);
3119 name_assign_type = NET_NAME_ENUM;
3122 dest_net = rtnl_link_get_net_capable(skb, net, tb, CAP_NET_ADMIN);
3123 if (IS_ERR(dest_net))
3124 return PTR_ERR(dest_net);
3126 if (tb[IFLA_LINK_NETNSID]) {
3127 int id = nla_get_s32(tb[IFLA_LINK_NETNSID]);
3129 link_net = get_net_ns_by_id(dest_net, id);
3135 if (!netlink_ns_capable(skb, link_net->user_ns, CAP_NET_ADMIN))
3139 dev = rtnl_create_link(link_net ? : dest_net, ifname,
3140 name_assign_type, ops, tb);
3146 dev->ifindex = ifm->ifi_index;
3149 err = ops->newlink(link_net ? : net, dev, tb, data,
3151 /* Drivers should call free_netdev() in ->destructor
3152 * and unregister it on failure after registration
3153 * so that device could be finally freed in rtnl_unlock.
3156 /* If device is not registered at all, free it now */
3157 if (dev->reg_state == NETREG_UNINITIALIZED ||
3158 dev->reg_state == NETREG_UNREGISTERED)
3163 err = register_netdevice(dev);
3169 err = rtnl_configure_link(dev, ifm);
3171 goto out_unregister;
3173 err = dev_change_net_namespace(dev, dest_net, ifname);
3175 goto out_unregister;
3177 if (tb[IFLA_MASTER]) {
3178 err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]),
3181 goto out_unregister;
3190 LIST_HEAD(list_kill);
3192 ops->dellink(dev, &list_kill);
3193 unregister_netdevice_many(&list_kill);
3195 unregister_netdevice(dev);
3201 static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr *nlh,
3202 struct netlink_ext_ack *extack)
3204 struct net *net = sock_net(skb->sk);
3205 struct net *tgt_net = net;
3206 struct ifinfomsg *ifm;
3207 char ifname[IFNAMSIZ];
3208 struct nlattr *tb[IFLA_MAX+1];
3209 struct net_device *dev = NULL;
3210 struct sk_buff *nskb;
3213 u32 ext_filter_mask = 0;
3215 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy, extack);
3219 err = rtnl_ensure_unique_netns(tb, extack, true);
3223 if (tb[IFLA_IF_NETNSID]) {
3224 netnsid = nla_get_s32(tb[IFLA_IF_NETNSID]);
3225 tgt_net = get_target_net(NETLINK_CB(skb).sk, netnsid);
3226 if (IS_ERR(tgt_net))
3227 return PTR_ERR(tgt_net);
3230 if (tb[IFLA_IFNAME])
3231 nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
3233 if (tb[IFLA_EXT_MASK])
3234 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
3237 ifm = nlmsg_data(nlh);
3238 if (ifm->ifi_index > 0)
3239 dev = __dev_get_by_index(tgt_net, ifm->ifi_index);
3240 else if (tb[IFLA_IFNAME])
3241 dev = __dev_get_by_name(tgt_net, ifname);
3250 nskb = nlmsg_new(if_nlmsg_size(dev, ext_filter_mask), GFP_KERNEL);
3254 err = rtnl_fill_ifinfo(nskb, dev, net,
3255 RTM_NEWLINK, NETLINK_CB(skb).portid,
3256 nlh->nlmsg_seq, 0, 0, ext_filter_mask,
3257 0, NULL, 0, netnsid, GFP_KERNEL);
3259 /* -EMSGSIZE implies BUG in if_nlmsg_size */
3260 WARN_ON(err == -EMSGSIZE);
3263 err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid);
3271 static u16 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh)
3273 struct net *net = sock_net(skb->sk);
3274 struct net_device *dev;
3275 struct nlattr *tb[IFLA_MAX+1];
3276 u32 ext_filter_mask = 0;
3277 u16 min_ifinfo_dump_size = 0;
3280 /* Same kernel<->userspace interface hack as in rtnl_dump_ifinfo. */
3281 hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ?
3282 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
3284 if (nlmsg_parse(nlh, hdrlen, tb, IFLA_MAX, ifla_policy, NULL) >= 0) {
3285 if (tb[IFLA_EXT_MASK])
3286 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
3289 if (!ext_filter_mask)
3290 return NLMSG_GOODSIZE;
3292 * traverse the list of net devices and compute the minimum
3293 * buffer size based upon the filter mask.
3296 for_each_netdev_rcu(net, dev) {
3297 min_ifinfo_dump_size = max_t(u16, min_ifinfo_dump_size,
3303 return nlmsg_total_size(min_ifinfo_dump_size);
3306 static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
3309 int s_idx = cb->family;
3310 int type = cb->nlh->nlmsg_type - RTM_BASE;
3315 for (idx = 1; idx <= RTNL_FAMILY_MAX; idx++) {
3316 struct rtnl_link **tab;
3317 struct rtnl_link *link;
3318 rtnl_dumpit_func dumpit;
3320 if (idx < s_idx || idx == PF_PACKET)
3323 if (type < 0 || type >= RTM_NR_MSGTYPES)
3326 tab = rcu_dereference_rtnl(rtnl_msg_handlers[idx]);
3334 dumpit = link->dumpit;
3339 memset(&cb->args[0], 0, sizeof(cb->args));
3343 if (dumpit(skb, cb))
3351 struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev,
3352 unsigned int change,
3353 u32 event, gfp_t flags, int *new_nsid,
3356 struct net *net = dev_net(dev);
3357 struct sk_buff *skb;
3359 size_t if_info_size;
3361 skb = nlmsg_new((if_info_size = if_nlmsg_size(dev, 0)), flags);
3365 err = rtnl_fill_ifinfo(skb, dev, dev_net(dev),
3366 type, 0, 0, change, 0, 0, event,
3367 new_nsid, new_ifindex, -1, flags);
3369 /* -EMSGSIZE implies BUG in if_nlmsg_size() */
3370 WARN_ON(err == -EMSGSIZE);
3377 rtnl_set_sk_err(net, RTNLGRP_LINK, err);
3381 void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev, gfp_t flags)
3383 struct net *net = dev_net(dev);
3385 rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, flags);
3388 static void rtmsg_ifinfo_event(int type, struct net_device *dev,
3389 unsigned int change, u32 event,
3390 gfp_t flags, int *new_nsid, int new_ifindex)
3392 struct sk_buff *skb;
3394 if (dev->reg_state != NETREG_REGISTERED)
3397 skb = rtmsg_ifinfo_build_skb(type, dev, change, event, flags, new_nsid,
3400 rtmsg_ifinfo_send(skb, dev, flags);
3403 void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change,
3406 rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags,
3410 void rtmsg_ifinfo_newnet(int type, struct net_device *dev, unsigned int change,
3411 gfp_t flags, int *new_nsid, int new_ifindex)
3413 rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags,
3414 new_nsid, new_ifindex);
3417 static int nlmsg_populate_fdb_fill(struct sk_buff *skb,
3418 struct net_device *dev,
3419 u8 *addr, u16 vid, u32 pid, u32 seq,
3420 int type, unsigned int flags,
3421 int nlflags, u16 ndm_state)
3423 struct nlmsghdr *nlh;
3426 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), nlflags);
3430 ndm = nlmsg_data(nlh);
3431 ndm->ndm_family = AF_BRIDGE;
3434 ndm->ndm_flags = flags;
3436 ndm->ndm_ifindex = dev->ifindex;
3437 ndm->ndm_state = ndm_state;
3439 if (nla_put(skb, NDA_LLADDR, ETH_ALEN, addr))
3440 goto nla_put_failure;
3442 if (nla_put(skb, NDA_VLAN, sizeof(u16), &vid))
3443 goto nla_put_failure;
3445 nlmsg_end(skb, nlh);
3449 nlmsg_cancel(skb, nlh);
3453 static inline size_t rtnl_fdb_nlmsg_size(void)
3455 return NLMSG_ALIGN(sizeof(struct ndmsg)) +
3456 nla_total_size(ETH_ALEN) + /* NDA_LLADDR */
3457 nla_total_size(sizeof(u16)) + /* NDA_VLAN */
3461 static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, u16 vid, int type,
3464 struct net *net = dev_net(dev);
3465 struct sk_buff *skb;
3468 skb = nlmsg_new(rtnl_fdb_nlmsg_size(), GFP_ATOMIC);
3472 err = nlmsg_populate_fdb_fill(skb, dev, addr, vid,
3473 0, 0, type, NTF_SELF, 0, ndm_state);
3479 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
3482 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
3486 * ndo_dflt_fdb_add - default netdevice operation to add an FDB entry
3488 int ndo_dflt_fdb_add(struct ndmsg *ndm,
3489 struct nlattr *tb[],
3490 struct net_device *dev,
3491 const unsigned char *addr, u16 vid,
3496 /* If aging addresses are supported device will need to
3497 * implement its own handler for this.
3499 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
3500 pr_info("%s: FDB only supports static addresses\n", dev->name);
3505 pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name);
3509 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
3510 err = dev_uc_add_excl(dev, addr);
3511 else if (is_multicast_ether_addr(addr))
3512 err = dev_mc_add_excl(dev, addr);
3514 /* Only return duplicate errors if NLM_F_EXCL is set */
3515 if (err == -EEXIST && !(flags & NLM_F_EXCL))
3520 EXPORT_SYMBOL(ndo_dflt_fdb_add);
3522 static int fdb_vid_parse(struct nlattr *vlan_attr, u16 *p_vid,
3523 struct netlink_ext_ack *extack)
3528 if (nla_len(vlan_attr) != sizeof(u16)) {
3529 NL_SET_ERR_MSG(extack, "invalid vlan attribute size");
3533 vid = nla_get_u16(vlan_attr);
3535 if (!vid || vid >= VLAN_VID_MASK) {
3536 NL_SET_ERR_MSG(extack, "invalid vlan id");
3544 static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
3545 struct netlink_ext_ack *extack)
3547 struct net *net = sock_net(skb->sk);
3549 struct nlattr *tb[NDA_MAX+1];
3550 struct net_device *dev;
3555 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL, extack);
3559 ndm = nlmsg_data(nlh);
3560 if (ndm->ndm_ifindex == 0) {
3561 NL_SET_ERR_MSG(extack, "invalid ifindex");
3565 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
3567 NL_SET_ERR_MSG(extack, "unknown ifindex");
3571 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
3572 NL_SET_ERR_MSG(extack, "invalid address");
3576 if (dev->type != ARPHRD_ETHER) {
3577 NL_SET_ERR_MSG(extack, "FDB delete only supported for Ethernet devices");
3581 addr = nla_data(tb[NDA_LLADDR]);
3583 err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack);
3589 /* Support fdb on master device the net/bridge default case */
3590 if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
3591 (dev->priv_flags & IFF_BRIDGE_PORT)) {
3592 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
3593 const struct net_device_ops *ops = br_dev->netdev_ops;
3595 err = ops->ndo_fdb_add(ndm, tb, dev, addr, vid,
3600 ndm->ndm_flags &= ~NTF_MASTER;
3603 /* Embedded bridge, macvlan, and any other device support */
3604 if ((ndm->ndm_flags & NTF_SELF)) {
3605 if (dev->netdev_ops->ndo_fdb_add)
3606 err = dev->netdev_ops->ndo_fdb_add(ndm, tb, dev, addr,
3610 err = ndo_dflt_fdb_add(ndm, tb, dev, addr, vid,
3614 rtnl_fdb_notify(dev, addr, vid, RTM_NEWNEIGH,
3616 ndm->ndm_flags &= ~NTF_SELF;
3624 * ndo_dflt_fdb_del - default netdevice operation to delete an FDB entry
3626 int ndo_dflt_fdb_del(struct ndmsg *ndm,
3627 struct nlattr *tb[],
3628 struct net_device *dev,
3629 const unsigned char *addr, u16 vid)
3633 /* If aging addresses are supported device will need to
3634 * implement its own handler for this.
3636 if (!(ndm->ndm_state & NUD_PERMANENT)) {
3637 pr_info("%s: FDB only supports static addresses\n", dev->name);
3641 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
3642 err = dev_uc_del(dev, addr);
3643 else if (is_multicast_ether_addr(addr))
3644 err = dev_mc_del(dev, addr);
3648 EXPORT_SYMBOL(ndo_dflt_fdb_del);
3650 static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
3651 struct netlink_ext_ack *extack)
3653 struct net *net = sock_net(skb->sk);
3655 struct nlattr *tb[NDA_MAX+1];
3656 struct net_device *dev;
3661 if (!netlink_capable(skb, CAP_NET_ADMIN))
3664 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL, extack);
3668 ndm = nlmsg_data(nlh);
3669 if (ndm->ndm_ifindex == 0) {
3670 NL_SET_ERR_MSG(extack, "invalid ifindex");
3674 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
3676 NL_SET_ERR_MSG(extack, "unknown ifindex");
3680 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
3681 NL_SET_ERR_MSG(extack, "invalid address");
3685 if (dev->type != ARPHRD_ETHER) {
3686 NL_SET_ERR_MSG(extack, "FDB add only supported for Ethernet devices");
3690 addr = nla_data(tb[NDA_LLADDR]);
3692 err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack);
3698 /* Support fdb on master device the net/bridge default case */
3699 if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
3700 (dev->priv_flags & IFF_BRIDGE_PORT)) {
3701 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
3702 const struct net_device_ops *ops = br_dev->netdev_ops;
3704 if (ops->ndo_fdb_del)
3705 err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid);
3710 ndm->ndm_flags &= ~NTF_MASTER;
3713 /* Embedded bridge, macvlan, and any other device support */
3714 if (ndm->ndm_flags & NTF_SELF) {
3715 if (dev->netdev_ops->ndo_fdb_del)
3716 err = dev->netdev_ops->ndo_fdb_del(ndm, tb, dev, addr,
3719 err = ndo_dflt_fdb_del(ndm, tb, dev, addr, vid);
3722 rtnl_fdb_notify(dev, addr, vid, RTM_DELNEIGH,
3724 ndm->ndm_flags &= ~NTF_SELF;
3731 static int nlmsg_populate_fdb(struct sk_buff *skb,
3732 struct netlink_callback *cb,
3733 struct net_device *dev,
3735 struct netdev_hw_addr_list *list)
3737 struct netdev_hw_addr *ha;
3741 portid = NETLINK_CB(cb->skb).portid;
3742 seq = cb->nlh->nlmsg_seq;
3744 list_for_each_entry(ha, &list->list, list) {
3745 if (*idx < cb->args[2])
3748 err = nlmsg_populate_fdb_fill(skb, dev, ha->addr, 0,
3750 RTM_NEWNEIGH, NTF_SELF,
3751 NLM_F_MULTI, NUD_PERMANENT);
3761 * ndo_dflt_fdb_dump - default netdevice operation to dump an FDB table.
3762 * @nlh: netlink message header
3765 * Default netdevice operation to dump the existing unicast address list.
3766 * Returns number of addresses from list put in skb.
3768 int ndo_dflt_fdb_dump(struct sk_buff *skb,
3769 struct netlink_callback *cb,
3770 struct net_device *dev,
3771 struct net_device *filter_dev,
3776 if (dev->type != ARPHRD_ETHER)
3779 netif_addr_lock_bh(dev);
3780 err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->uc);
3783 err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->mc);
3785 netif_addr_unlock_bh(dev);
3788 EXPORT_SYMBOL(ndo_dflt_fdb_dump);
3790 static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
3792 struct net_device *dev;
3793 struct nlattr *tb[IFLA_MAX+1];
3794 struct net_device *br_dev = NULL;
3795 const struct net_device_ops *ops = NULL;
3796 const struct net_device_ops *cops = NULL;
3797 struct ifinfomsg *ifm = nlmsg_data(cb->nlh);
3798 struct net *net = sock_net(skb->sk);
3799 struct hlist_head *head;
3807 /* A hack to preserve kernel<->userspace interface.
3808 * Before Linux v4.12 this code accepted ndmsg since iproute2 v3.3.0.
3809 * However, ndmsg is shorter than ifinfomsg thus nlmsg_parse() bails.
3810 * So, check for ndmsg with an optional u32 attribute (not used here).
3811 * Fortunately these sizes don't conflict with the size of ifinfomsg
3812 * with an optional attribute.
3814 if (nlmsg_len(cb->nlh) != sizeof(struct ndmsg) &&
3815 (nlmsg_len(cb->nlh) != sizeof(struct ndmsg) +
3816 nla_attr_size(sizeof(u32)))) {
3817 err = nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb,
3818 IFLA_MAX, ifla_policy, NULL);
3821 } else if (err == 0) {
3822 if (tb[IFLA_MASTER])
3823 br_idx = nla_get_u32(tb[IFLA_MASTER]);
3826 brport_idx = ifm->ifi_index;
3830 br_dev = __dev_get_by_index(net, br_idx);
3834 ops = br_dev->netdev_ops;
3838 s_idx = cb->args[1];
3840 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
3842 head = &net->dev_index_head[h];
3843 hlist_for_each_entry(dev, head, index_hlist) {
3845 if (brport_idx && (dev->ifindex != brport_idx))
3848 if (!br_idx) { /* user did not specify a specific bridge */
3849 if (dev->priv_flags & IFF_BRIDGE_PORT) {
3850 br_dev = netdev_master_upper_dev_get(dev);
3851 cops = br_dev->netdev_ops;
3854 if (dev != br_dev &&
3855 !(dev->priv_flags & IFF_BRIDGE_PORT))
3858 if (br_dev != netdev_master_upper_dev_get(dev) &&
3859 !(dev->priv_flags & IFF_EBRIDGE))
3867 if (dev->priv_flags & IFF_BRIDGE_PORT) {
3868 if (cops && cops->ndo_fdb_dump) {
3869 err = cops->ndo_fdb_dump(skb, cb,
3872 if (err == -EMSGSIZE)
3877 if (dev->netdev_ops->ndo_fdb_dump)
3878 err = dev->netdev_ops->ndo_fdb_dump(skb, cb,
3882 err = ndo_dflt_fdb_dump(skb, cb, dev, NULL,
3884 if (err == -EMSGSIZE)
3889 /* reset fdb offset to 0 for rest of the interfaces */
3905 static int brport_nla_put_flag(struct sk_buff *skb, u32 flags, u32 mask,
3906 unsigned int attrnum, unsigned int flag)
3909 return nla_put_u8(skb, attrnum, !!(flags & flag));
3913 int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
3914 struct net_device *dev, u16 mode,
3915 u32 flags, u32 mask, int nlflags,
3917 int (*vlan_fill)(struct sk_buff *skb,
3918 struct net_device *dev,
3921 struct nlmsghdr *nlh;
3922 struct ifinfomsg *ifm;
3923 struct nlattr *br_afspec;
3924 struct nlattr *protinfo;
3925 u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
3926 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
3929 nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*ifm), nlflags);
3933 ifm = nlmsg_data(nlh);
3934 ifm->ifi_family = AF_BRIDGE;
3936 ifm->ifi_type = dev->type;
3937 ifm->ifi_index = dev->ifindex;
3938 ifm->ifi_flags = dev_get_flags(dev);
3939 ifm->ifi_change = 0;
3942 if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
3943 nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
3944 nla_put_u8(skb, IFLA_OPERSTATE, operstate) ||
3946 nla_put_u32(skb, IFLA_MASTER, br_dev->ifindex)) ||
3948 nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
3949 (dev->ifindex != dev_get_iflink(dev) &&
3950 nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))))
3951 goto nla_put_failure;
3953 br_afspec = nla_nest_start(skb, IFLA_AF_SPEC);
3955 goto nla_put_failure;
3957 if (nla_put_u16(skb, IFLA_BRIDGE_FLAGS, BRIDGE_FLAGS_SELF)) {
3958 nla_nest_cancel(skb, br_afspec);
3959 goto nla_put_failure;
3962 if (mode != BRIDGE_MODE_UNDEF) {
3963 if (nla_put_u16(skb, IFLA_BRIDGE_MODE, mode)) {
3964 nla_nest_cancel(skb, br_afspec);
3965 goto nla_put_failure;
3969 err = vlan_fill(skb, dev, filter_mask);
3971 nla_nest_cancel(skb, br_afspec);
3972 goto nla_put_failure;
3975 nla_nest_end(skb, br_afspec);
3977 protinfo = nla_nest_start(skb, IFLA_PROTINFO | NLA_F_NESTED);
3979 goto nla_put_failure;
3981 if (brport_nla_put_flag(skb, flags, mask,
3982 IFLA_BRPORT_MODE, BR_HAIRPIN_MODE) ||
3983 brport_nla_put_flag(skb, flags, mask,
3984 IFLA_BRPORT_GUARD, BR_BPDU_GUARD) ||
3985 brport_nla_put_flag(skb, flags, mask,
3986 IFLA_BRPORT_FAST_LEAVE,
3987 BR_MULTICAST_FAST_LEAVE) ||
3988 brport_nla_put_flag(skb, flags, mask,
3989 IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK) ||
3990 brport_nla_put_flag(skb, flags, mask,
3991 IFLA_BRPORT_LEARNING, BR_LEARNING) ||
3992 brport_nla_put_flag(skb, flags, mask,
3993 IFLA_BRPORT_LEARNING_SYNC, BR_LEARNING_SYNC) ||
3994 brport_nla_put_flag(skb, flags, mask,
3995 IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD) ||
3996 brport_nla_put_flag(skb, flags, mask,
3997 IFLA_BRPORT_PROXYARP, BR_PROXYARP)) {
3998 nla_nest_cancel(skb, protinfo);
3999 goto nla_put_failure;
4002 nla_nest_end(skb, protinfo);
4004 nlmsg_end(skb, nlh);
4007 nlmsg_cancel(skb, nlh);
4008 return err ? err : -EMSGSIZE;
4010 EXPORT_SYMBOL_GPL(ndo_dflt_bridge_getlink);
4012 static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
4014 struct net *net = sock_net(skb->sk);
4015 struct net_device *dev;
4017 u32 portid = NETLINK_CB(cb->skb).portid;
4018 u32 seq = cb->nlh->nlmsg_seq;
4019 u32 filter_mask = 0;
4022 if (nlmsg_len(cb->nlh) > sizeof(struct ifinfomsg)) {
4023 struct nlattr *extfilt;
4025 extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct ifinfomsg),
4028 if (nla_len(extfilt) < sizeof(filter_mask))
4031 filter_mask = nla_get_u32(extfilt);
4036 for_each_netdev_rcu(net, dev) {
4037 const struct net_device_ops *ops = dev->netdev_ops;
4038 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
4040 if (br_dev && br_dev->netdev_ops->ndo_bridge_getlink) {
4041 if (idx >= cb->args[0]) {
4042 err = br_dev->netdev_ops->ndo_bridge_getlink(
4043 skb, portid, seq, dev,
4044 filter_mask, NLM_F_MULTI);
4045 if (err < 0 && err != -EOPNOTSUPP) {
4046 if (likely(skb->len))
4055 if (ops->ndo_bridge_getlink) {
4056 if (idx >= cb->args[0]) {
4057 err = ops->ndo_bridge_getlink(skb, portid,
4061 if (err < 0 && err != -EOPNOTSUPP) {
4062 if (likely(skb->len))
4079 static inline size_t bridge_nlmsg_size(void)
4081 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
4082 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
4083 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
4084 + nla_total_size(sizeof(u32)) /* IFLA_MASTER */
4085 + nla_total_size(sizeof(u32)) /* IFLA_MTU */
4086 + nla_total_size(sizeof(u32)) /* IFLA_LINK */
4087 + nla_total_size(sizeof(u32)) /* IFLA_OPERSTATE */
4088 + nla_total_size(sizeof(u8)) /* IFLA_PROTINFO */
4089 + nla_total_size(sizeof(struct nlattr)) /* IFLA_AF_SPEC */
4090 + nla_total_size(sizeof(u16)) /* IFLA_BRIDGE_FLAGS */
4091 + nla_total_size(sizeof(u16)); /* IFLA_BRIDGE_MODE */
4094 static int rtnl_bridge_notify(struct net_device *dev)
4096 struct net *net = dev_net(dev);
4097 struct sk_buff *skb;
4098 int err = -EOPNOTSUPP;
4100 if (!dev->netdev_ops->ndo_bridge_getlink)
4103 skb = nlmsg_new(bridge_nlmsg_size(), GFP_ATOMIC);
4109 err = dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev, 0, 0);
4113 /* Notification info is only filled for bridge ports, not the bridge
4114 * device itself. Therefore, a zero notification length is valid and
4115 * should not result in an error.
4120 rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
4123 WARN_ON(err == -EMSGSIZE);
4126 rtnl_set_sk_err(net, RTNLGRP_LINK, err);
4130 static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
4131 struct netlink_ext_ack *extack)
4133 struct net *net = sock_net(skb->sk);
4134 struct ifinfomsg *ifm;
4135 struct net_device *dev;
4136 struct nlattr *br_spec, *attr = NULL;
4137 int rem, err = -EOPNOTSUPP;
4139 bool have_flags = false;
4141 if (nlmsg_len(nlh) < sizeof(*ifm))
4144 ifm = nlmsg_data(nlh);
4145 if (ifm->ifi_family != AF_BRIDGE)
4146 return -EPFNOSUPPORT;
4148 dev = __dev_get_by_index(net, ifm->ifi_index);
4150 NL_SET_ERR_MSG(extack, "unknown ifindex");
4154 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4156 nla_for_each_nested(attr, br_spec, rem) {
4157 if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
4158 if (nla_len(attr) < sizeof(flags))
4162 flags = nla_get_u16(attr);
4168 if (!flags || (flags & BRIDGE_FLAGS_MASTER)) {
4169 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
4171 if (!br_dev || !br_dev->netdev_ops->ndo_bridge_setlink) {
4176 err = br_dev->netdev_ops->ndo_bridge_setlink(dev, nlh, flags);
4180 flags &= ~BRIDGE_FLAGS_MASTER;
4183 if ((flags & BRIDGE_FLAGS_SELF)) {
4184 if (!dev->netdev_ops->ndo_bridge_setlink)
4187 err = dev->netdev_ops->ndo_bridge_setlink(dev, nlh,
4190 flags &= ~BRIDGE_FLAGS_SELF;
4192 /* Generate event to notify upper layer of bridge
4195 err = rtnl_bridge_notify(dev);
4200 memcpy(nla_data(attr), &flags, sizeof(flags));
4205 static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
4206 struct netlink_ext_ack *extack)
4208 struct net *net = sock_net(skb->sk);
4209 struct ifinfomsg *ifm;
4210 struct net_device *dev;
4211 struct nlattr *br_spec, *attr = NULL;
4212 int rem, err = -EOPNOTSUPP;
4214 bool have_flags = false;
4216 if (nlmsg_len(nlh) < sizeof(*ifm))
4219 ifm = nlmsg_data(nlh);
4220 if (ifm->ifi_family != AF_BRIDGE)
4221 return -EPFNOSUPPORT;
4223 dev = __dev_get_by_index(net, ifm->ifi_index);
4225 NL_SET_ERR_MSG(extack, "unknown ifindex");
4229 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4231 nla_for_each_nested(attr, br_spec, rem) {
4232 if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
4233 if (nla_len(attr) < sizeof(flags))
4237 flags = nla_get_u16(attr);
4243 if (!flags || (flags & BRIDGE_FLAGS_MASTER)) {
4244 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
4246 if (!br_dev || !br_dev->netdev_ops->ndo_bridge_dellink) {
4251 err = br_dev->netdev_ops->ndo_bridge_dellink(dev, nlh, flags);
4255 flags &= ~BRIDGE_FLAGS_MASTER;
4258 if ((flags & BRIDGE_FLAGS_SELF)) {
4259 if (!dev->netdev_ops->ndo_bridge_dellink)
4262 err = dev->netdev_ops->ndo_bridge_dellink(dev, nlh,
4266 flags &= ~BRIDGE_FLAGS_SELF;
4268 /* Generate event to notify upper layer of bridge
4271 err = rtnl_bridge_notify(dev);
4276 memcpy(nla_data(attr), &flags, sizeof(flags));
4281 static bool stats_attr_valid(unsigned int mask, int attrid, int idxattr)
4283 return (mask & IFLA_STATS_FILTER_BIT(attrid)) &&
4284 (!idxattr || idxattr == attrid);
4287 #define IFLA_OFFLOAD_XSTATS_FIRST (IFLA_OFFLOAD_XSTATS_UNSPEC + 1)
4288 static int rtnl_get_offload_stats_attr_size(int attr_id)
4291 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
4292 return sizeof(struct rtnl_link_stats64);
4298 static int rtnl_get_offload_stats(struct sk_buff *skb, struct net_device *dev,
4301 struct nlattr *attr = NULL;
4306 if (!(dev->netdev_ops && dev->netdev_ops->ndo_has_offload_stats &&
4307 dev->netdev_ops->ndo_get_offload_stats))
4310 for (attr_id = IFLA_OFFLOAD_XSTATS_FIRST;
4311 attr_id <= IFLA_OFFLOAD_XSTATS_MAX; attr_id++) {
4312 if (attr_id < *prividx)
4315 size = rtnl_get_offload_stats_attr_size(attr_id);
4319 if (!dev->netdev_ops->ndo_has_offload_stats(dev, attr_id))
4322 attr = nla_reserve_64bit(skb, attr_id, size,
4323 IFLA_OFFLOAD_XSTATS_UNSPEC);
4325 goto nla_put_failure;
4327 attr_data = nla_data(attr);
4328 memset(attr_data, 0, size);
4329 err = dev->netdev_ops->ndo_get_offload_stats(attr_id, dev,
4332 goto get_offload_stats_failure;
4343 get_offload_stats_failure:
4348 static int rtnl_get_offload_stats_size(const struct net_device *dev)
4354 if (!(dev->netdev_ops && dev->netdev_ops->ndo_has_offload_stats &&
4355 dev->netdev_ops->ndo_get_offload_stats))
4358 for (attr_id = IFLA_OFFLOAD_XSTATS_FIRST;
4359 attr_id <= IFLA_OFFLOAD_XSTATS_MAX; attr_id++) {
4360 if (!dev->netdev_ops->ndo_has_offload_stats(dev, attr_id))
4362 size = rtnl_get_offload_stats_attr_size(attr_id);
4363 nla_size += nla_total_size_64bit(size);
4367 nla_size += nla_total_size(0);
4372 static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev,
4373 int type, u32 pid, u32 seq, u32 change,
4374 unsigned int flags, unsigned int filter_mask,
4375 int *idxattr, int *prividx)
4377 struct if_stats_msg *ifsm;
4378 struct nlmsghdr *nlh;
4379 struct nlattr *attr;
4380 int s_prividx = *prividx;
4385 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifsm), flags);
4389 ifsm = nlmsg_data(nlh);
4390 ifsm->family = PF_UNSPEC;
4393 ifsm->ifindex = dev->ifindex;
4394 ifsm->filter_mask = filter_mask;
4396 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, *idxattr)) {
4397 struct rtnl_link_stats64 *sp;
4399 attr = nla_reserve_64bit(skb, IFLA_STATS_LINK_64,
4400 sizeof(struct rtnl_link_stats64),
4403 goto nla_put_failure;
4405 sp = nla_data(attr);
4406 dev_get_stats(dev, sp);
4409 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, *idxattr)) {
4410 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
4412 if (ops && ops->fill_linkxstats) {
4413 *idxattr = IFLA_STATS_LINK_XSTATS;
4414 attr = nla_nest_start(skb,
4415 IFLA_STATS_LINK_XSTATS);
4417 goto nla_put_failure;
4419 err = ops->fill_linkxstats(skb, dev, prividx, *idxattr);
4420 nla_nest_end(skb, attr);
4422 goto nla_put_failure;
4427 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE,
4429 const struct rtnl_link_ops *ops = NULL;
4430 const struct net_device *master;
4432 master = netdev_master_upper_dev_get(dev);
4434 ops = master->rtnl_link_ops;
4435 if (ops && ops->fill_linkxstats) {
4436 *idxattr = IFLA_STATS_LINK_XSTATS_SLAVE;
4437 attr = nla_nest_start(skb,
4438 IFLA_STATS_LINK_XSTATS_SLAVE);
4440 goto nla_put_failure;
4442 err = ops->fill_linkxstats(skb, dev, prividx, *idxattr);
4443 nla_nest_end(skb, attr);
4445 goto nla_put_failure;
4450 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS,
4452 *idxattr = IFLA_STATS_LINK_OFFLOAD_XSTATS;
4453 attr = nla_nest_start(skb, IFLA_STATS_LINK_OFFLOAD_XSTATS);
4455 goto nla_put_failure;
4457 err = rtnl_get_offload_stats(skb, dev, prividx);
4458 if (err == -ENODATA)
4459 nla_nest_cancel(skb, attr);
4461 nla_nest_end(skb, attr);
4463 if (err && err != -ENODATA)
4464 goto nla_put_failure;
4468 if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, *idxattr)) {
4469 struct rtnl_af_ops *af_ops;
4471 *idxattr = IFLA_STATS_AF_SPEC;
4472 attr = nla_nest_start(skb, IFLA_STATS_AF_SPEC);
4474 goto nla_put_failure;
4477 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
4478 if (af_ops->fill_stats_af) {
4482 af = nla_nest_start(skb, af_ops->family);
4485 goto nla_put_failure;
4487 err = af_ops->fill_stats_af(skb, dev);
4489 if (err == -ENODATA) {
4490 nla_nest_cancel(skb, af);
4491 } else if (err < 0) {
4493 goto nla_put_failure;
4496 nla_nest_end(skb, af);
4501 nla_nest_end(skb, attr);
4506 nlmsg_end(skb, nlh);
4511 /* not a multi message or no progress mean a real error */
4512 if (!(flags & NLM_F_MULTI) || s_prividx == *prividx)
4513 nlmsg_cancel(skb, nlh);
4515 nlmsg_end(skb, nlh);
4520 static size_t if_nlmsg_stats_size(const struct net_device *dev,
4523 size_t size = NLMSG_ALIGN(sizeof(struct if_stats_msg));
4525 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, 0))
4526 size += nla_total_size_64bit(sizeof(struct rtnl_link_stats64));
4528 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, 0)) {
4529 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
4530 int attr = IFLA_STATS_LINK_XSTATS;
4532 if (ops && ops->get_linkxstats_size) {
4533 size += nla_total_size(ops->get_linkxstats_size(dev,
4535 /* for IFLA_STATS_LINK_XSTATS */
4536 size += nla_total_size(0);
4540 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE, 0)) {
4541 struct net_device *_dev = (struct net_device *)dev;
4542 const struct rtnl_link_ops *ops = NULL;
4543 const struct net_device *master;
4545 /* netdev_master_upper_dev_get can't take const */
4546 master = netdev_master_upper_dev_get(_dev);
4548 ops = master->rtnl_link_ops;
4549 if (ops && ops->get_linkxstats_size) {
4550 int attr = IFLA_STATS_LINK_XSTATS_SLAVE;
4552 size += nla_total_size(ops->get_linkxstats_size(dev,
4554 /* for IFLA_STATS_LINK_XSTATS_SLAVE */
4555 size += nla_total_size(0);
4559 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS, 0))
4560 size += rtnl_get_offload_stats_size(dev);
4562 if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, 0)) {
4563 struct rtnl_af_ops *af_ops;
4565 /* for IFLA_STATS_AF_SPEC */
4566 size += nla_total_size(0);
4569 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
4570 if (af_ops->get_stats_af_size) {
4571 size += nla_total_size(
4572 af_ops->get_stats_af_size(dev));
4575 size += nla_total_size(0);
4584 static int rtnl_stats_get(struct sk_buff *skb, struct nlmsghdr *nlh,
4585 struct netlink_ext_ack *extack)
4587 struct net *net = sock_net(skb->sk);
4588 struct net_device *dev = NULL;
4589 int idxattr = 0, prividx = 0;
4590 struct if_stats_msg *ifsm;
4591 struct sk_buff *nskb;
4595 if (nlmsg_len(nlh) < sizeof(*ifsm))
4598 ifsm = nlmsg_data(nlh);
4599 if (ifsm->ifindex > 0)
4600 dev = __dev_get_by_index(net, ifsm->ifindex);
4607 filter_mask = ifsm->filter_mask;
4611 nskb = nlmsg_new(if_nlmsg_stats_size(dev, filter_mask), GFP_KERNEL);
4615 err = rtnl_fill_statsinfo(nskb, dev, RTM_NEWSTATS,
4616 NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
4617 0, filter_mask, &idxattr, &prividx);
4619 /* -EMSGSIZE implies BUG in if_nlmsg_stats_size */
4620 WARN_ON(err == -EMSGSIZE);
4623 err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid);
4629 static int rtnl_stats_dump(struct sk_buff *skb, struct netlink_callback *cb)
4631 int h, s_h, err, s_idx, s_idxattr, s_prividx;
4632 struct net *net = sock_net(skb->sk);
4633 unsigned int flags = NLM_F_MULTI;
4634 struct if_stats_msg *ifsm;
4635 struct hlist_head *head;
4636 struct net_device *dev;
4637 u32 filter_mask = 0;
4641 s_idx = cb->args[1];
4642 s_idxattr = cb->args[2];
4643 s_prividx = cb->args[3];
4645 cb->seq = net->dev_base_seq;
4647 if (nlmsg_len(cb->nlh) < sizeof(*ifsm))
4650 ifsm = nlmsg_data(cb->nlh);
4651 filter_mask = ifsm->filter_mask;
4655 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
4657 head = &net->dev_index_head[h];
4658 hlist_for_each_entry(dev, head, index_hlist) {
4661 err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS,
4662 NETLINK_CB(cb->skb).portid,
4663 cb->nlh->nlmsg_seq, 0,
4665 &s_idxattr, &s_prividx);
4666 /* If we ran out of room on the first message,
4669 WARN_ON((err == -EMSGSIZE) && (skb->len == 0));
4675 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
4681 cb->args[3] = s_prividx;
4682 cb->args[2] = s_idxattr;
4689 /* Process one rtnetlink message. */
4691 static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
4692 struct netlink_ext_ack *extack)
4694 struct net *net = sock_net(skb->sk);
4695 struct rtnl_link *link;
4696 struct module *owner;
4697 int err = -EOPNOTSUPP;
4698 rtnl_doit_func doit;
4704 type = nlh->nlmsg_type;
4710 /* All the messages must have at least 1 byte length */
4711 if (nlmsg_len(nlh) < sizeof(struct rtgenmsg))
4714 family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
4717 if (kind != 2 && !netlink_net_capable(skb, CAP_NET_ADMIN))
4721 if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) {
4723 rtnl_dumpit_func dumpit;
4724 u16 min_dump_alloc = 0;
4726 link = rtnl_get_link(family, type);
4727 if (!link || !link->dumpit) {
4729 link = rtnl_get_link(family, type);
4730 if (!link || !link->dumpit)
4733 owner = link->owner;
4734 dumpit = link->dumpit;
4736 if (type == RTM_GETLINK - RTM_BASE)
4737 min_dump_alloc = rtnl_calcit(skb, nlh);
4740 /* need to do this before rcu_read_unlock() */
4741 if (!try_module_get(owner))
4742 err = -EPROTONOSUPPORT;
4748 struct netlink_dump_control c = {
4750 .min_dump_alloc = min_dump_alloc,
4753 err = netlink_dump_start(rtnl, skb, nlh, &c);
4754 /* netlink_dump_start() will keep a reference on
4755 * module if dump is still in progress.
4762 link = rtnl_get_link(family, type);
4763 if (!link || !link->doit) {
4765 link = rtnl_get_link(PF_UNSPEC, type);
4766 if (!link || !link->doit)
4770 owner = link->owner;
4771 if (!try_module_get(owner)) {
4772 err = -EPROTONOSUPPORT;
4776 flags = link->flags;
4777 if (flags & RTNL_FLAG_DOIT_UNLOCKED) {
4781 err = doit(skb, nlh, extack);
4788 link = rtnl_get_link(family, type);
4789 if (link && link->doit)
4790 err = link->doit(skb, nlh, extack);
4806 static void rtnetlink_rcv(struct sk_buff *skb)
4808 netlink_rcv_skb(skb, &rtnetlink_rcv_msg);
4811 static int rtnetlink_bind(struct net *net, int group)
4814 case RTNLGRP_IPV4_MROUTE_R:
4815 case RTNLGRP_IPV6_MROUTE_R:
4816 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
4823 static int rtnetlink_event(struct notifier_block *this, unsigned long event, void *ptr)
4825 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4829 case NETDEV_CHANGEMTU:
4830 case NETDEV_CHANGEADDR:
4831 case NETDEV_CHANGENAME:
4832 case NETDEV_FEAT_CHANGE:
4833 case NETDEV_BONDING_FAILOVER:
4834 case NETDEV_POST_TYPE_CHANGE:
4835 case NETDEV_NOTIFY_PEERS:
4836 case NETDEV_CHANGEUPPER:
4837 case NETDEV_RESEND_IGMP:
4838 case NETDEV_CHANGEINFODATA:
4839 case NETDEV_CHANGELOWERSTATE:
4840 case NETDEV_CHANGE_TX_QUEUE_LEN:
4841 rtmsg_ifinfo_event(RTM_NEWLINK, dev, 0, rtnl_get_event(event),
4842 GFP_KERNEL, NULL, 0);
4850 static struct notifier_block rtnetlink_dev_notifier = {
4851 .notifier_call = rtnetlink_event,
4855 static int __net_init rtnetlink_net_init(struct net *net)
4858 struct netlink_kernel_cfg cfg = {
4859 .groups = RTNLGRP_MAX,
4860 .input = rtnetlink_rcv,
4861 .cb_mutex = &rtnl_mutex,
4862 .flags = NL_CFG_F_NONROOT_RECV,
4863 .bind = rtnetlink_bind,
4866 sk = netlink_kernel_create(net, NETLINK_ROUTE, &cfg);
4873 static void __net_exit rtnetlink_net_exit(struct net *net)
4875 netlink_kernel_release(net->rtnl);
4879 static struct pernet_operations rtnetlink_net_ops = {
4880 .init = rtnetlink_net_init,
4881 .exit = rtnetlink_net_exit,
4884 void __init rtnetlink_init(void)
4886 if (register_pernet_subsys(&rtnetlink_net_ops))
4887 panic("rtnetlink_init: cannot initialize rtnetlink\n");
4889 register_netdevice_notifier(&rtnetlink_dev_notifier);
4891 rtnl_register(PF_UNSPEC, RTM_GETLINK, rtnl_getlink,
4892 rtnl_dump_ifinfo, 0);
4893 rtnl_register(PF_UNSPEC, RTM_SETLINK, rtnl_setlink, NULL, 0);
4894 rtnl_register(PF_UNSPEC, RTM_NEWLINK, rtnl_newlink, NULL, 0);
4895 rtnl_register(PF_UNSPEC, RTM_DELLINK, rtnl_dellink, NULL, 0);
4897 rtnl_register(PF_UNSPEC, RTM_GETADDR, NULL, rtnl_dump_all, 0);
4898 rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all, 0);
4899 rtnl_register(PF_UNSPEC, RTM_GETNETCONF, NULL, rtnl_dump_all, 0);
4901 rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, rtnl_fdb_add, NULL, 0);
4902 rtnl_register(PF_BRIDGE, RTM_DELNEIGH, rtnl_fdb_del, NULL, 0);
4903 rtnl_register(PF_BRIDGE, RTM_GETNEIGH, NULL, rtnl_fdb_dump, 0);
4905 rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL, rtnl_bridge_getlink, 0);
4906 rtnl_register(PF_BRIDGE, RTM_DELLINK, rtnl_bridge_dellink, NULL, 0);
4907 rtnl_register(PF_BRIDGE, RTM_SETLINK, rtnl_bridge_setlink, NULL, 0);
4909 rtnl_register(PF_UNSPEC, RTM_GETSTATS, rtnl_stats_get, rtnl_stats_dump,