1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
7 * Routing netlink socket interface: protocol independent part.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
12 * Vitaly E. Lavrov RTA_OK arithmetic was wrong.
15 #include <linux/bitops.h>
16 #include <linux/errno.h>
17 #include <linux/module.h>
18 #include <linux/types.h>
19 #include <linux/socket.h>
20 #include <linux/kernel.h>
21 #include <linux/timer.h>
22 #include <linux/string.h>
23 #include <linux/sockios.h>
24 #include <linux/net.h>
25 #include <linux/fcntl.h>
27 #include <linux/slab.h>
28 #include <linux/interrupt.h>
29 #include <linux/capability.h>
30 #include <linux/skbuff.h>
31 #include <linux/init.h>
32 #include <linux/security.h>
33 #include <linux/mutex.h>
34 #include <linux/if_addr.h>
35 #include <linux/if_bridge.h>
36 #include <linux/if_vlan.h>
37 #include <linux/pci.h>
38 #include <linux/etherdevice.h>
39 #include <linux/bpf.h>
41 #include <linux/uaccess.h>
43 #include <linux/inet.h>
44 #include <linux/netdevice.h>
46 #include <net/protocol.h>
48 #include <net/route.h>
52 #include <net/pkt_sched.h>
53 #include <net/fib_rules.h>
54 #include <net/rtnetlink.h>
55 #include <net/net_namespace.h>
59 #define RTNL_MAX_TYPE 50
60 #define RTNL_SLAVE_MAX_TYPE 40
64 rtnl_dumpit_func dumpit;
70 static DEFINE_MUTEX(rtnl_mutex);
74 mutex_lock(&rtnl_mutex);
76 EXPORT_SYMBOL(rtnl_lock);
78 int rtnl_lock_killable(void)
80 return mutex_lock_killable(&rtnl_mutex);
82 EXPORT_SYMBOL(rtnl_lock_killable);
84 static struct sk_buff *defer_kfree_skb_list;
85 void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail)
88 tail->next = defer_kfree_skb_list;
89 defer_kfree_skb_list = head;
92 EXPORT_SYMBOL(rtnl_kfree_skbs);
94 void __rtnl_unlock(void)
96 struct sk_buff *head = defer_kfree_skb_list;
98 defer_kfree_skb_list = NULL;
100 /* Ensure that we didn't actually add any TODO item when __rtnl_unlock()
101 * is used. In some places, e.g. in cfg80211, we have code that will do
108 * and because netdev_run_todo() acquires the RTNL for items on the list
109 * we could cause a situation such as this:
112 * unregister_netdevice()
120 * // list not empty now
121 * // because of thread 2
123 * while (!list_empty(...))
128 * However, usage of __rtnl_unlock() is rare, and so we can ensure that
129 * it's not used in cases where something is added to do the list.
131 WARN_ON(!list_empty(&net_todo_list));
133 mutex_unlock(&rtnl_mutex);
136 struct sk_buff *next = head->next;
144 void rtnl_unlock(void)
146 /* This fellow will unlock it for us. */
149 EXPORT_SYMBOL(rtnl_unlock);
151 int rtnl_trylock(void)
153 return mutex_trylock(&rtnl_mutex);
155 EXPORT_SYMBOL(rtnl_trylock);
157 int rtnl_is_locked(void)
159 return mutex_is_locked(&rtnl_mutex);
161 EXPORT_SYMBOL(rtnl_is_locked);
163 bool refcount_dec_and_rtnl_lock(refcount_t *r)
165 return refcount_dec_and_mutex_lock(r, &rtnl_mutex);
167 EXPORT_SYMBOL(refcount_dec_and_rtnl_lock);
169 #ifdef CONFIG_PROVE_LOCKING
170 bool lockdep_rtnl_is_held(void)
172 return lockdep_is_held(&rtnl_mutex);
174 EXPORT_SYMBOL(lockdep_rtnl_is_held);
175 #endif /* #ifdef CONFIG_PROVE_LOCKING */
177 static struct rtnl_link __rcu *__rcu *rtnl_msg_handlers[RTNL_FAMILY_MAX + 1];
179 static inline int rtm_msgindex(int msgtype)
181 int msgindex = msgtype - RTM_BASE;
184 * msgindex < 0 implies someone tried to register a netlink
185 * control code. msgindex >= RTM_NR_MSGTYPES may indicate that
186 * the message type has not been added to linux/rtnetlink.h
188 BUG_ON(msgindex < 0 || msgindex >= RTM_NR_MSGTYPES);
193 static struct rtnl_link *rtnl_get_link(int protocol, int msgtype)
195 struct rtnl_link __rcu **tab;
197 if (protocol >= ARRAY_SIZE(rtnl_msg_handlers))
198 protocol = PF_UNSPEC;
200 tab = rcu_dereference_rtnl(rtnl_msg_handlers[protocol]);
202 tab = rcu_dereference_rtnl(rtnl_msg_handlers[PF_UNSPEC]);
204 return rcu_dereference_rtnl(tab[msgtype]);
207 static int rtnl_register_internal(struct module *owner,
208 int protocol, int msgtype,
209 rtnl_doit_func doit, rtnl_dumpit_func dumpit,
212 struct rtnl_link *link, *old;
213 struct rtnl_link __rcu **tab;
217 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
218 msgindex = rtm_msgindex(msgtype);
221 tab = rtnl_dereference(rtnl_msg_handlers[protocol]);
223 tab = kcalloc(RTM_NR_MSGTYPES, sizeof(void *), GFP_KERNEL);
227 /* ensures we see the 0 stores */
228 rcu_assign_pointer(rtnl_msg_handlers[protocol], tab);
231 old = rtnl_dereference(tab[msgindex]);
233 link = kmemdup(old, sizeof(*old), GFP_KERNEL);
237 link = kzalloc(sizeof(*link), GFP_KERNEL);
242 WARN_ON(link->owner && link->owner != owner);
245 WARN_ON(doit && link->doit && link->doit != doit);
248 WARN_ON(dumpit && link->dumpit && link->dumpit != dumpit);
250 link->dumpit = dumpit;
252 WARN_ON(rtnl_msgtype_kind(msgtype) != RTNL_KIND_DEL &&
253 (flags & RTNL_FLAG_BULK_DEL_SUPPORTED));
254 link->flags |= flags;
256 /* publish protocol:msgtype */
257 rcu_assign_pointer(tab[msgindex], link);
267 * rtnl_register_module - Register a rtnetlink message type
269 * @owner: module registering the hook (THIS_MODULE)
270 * @protocol: Protocol family or PF_UNSPEC
271 * @msgtype: rtnetlink message type
272 * @doit: Function pointer called for each request message
273 * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message
274 * @flags: rtnl_link_flags to modify behaviour of doit/dumpit functions
276 * Like rtnl_register, but for use by removable modules.
278 int rtnl_register_module(struct module *owner,
279 int protocol, int msgtype,
280 rtnl_doit_func doit, rtnl_dumpit_func dumpit,
283 return rtnl_register_internal(owner, protocol, msgtype,
284 doit, dumpit, flags);
286 EXPORT_SYMBOL_GPL(rtnl_register_module);
289 * rtnl_register - Register a rtnetlink message type
290 * @protocol: Protocol family or PF_UNSPEC
291 * @msgtype: rtnetlink message type
292 * @doit: Function pointer called for each request message
293 * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message
294 * @flags: rtnl_link_flags to modify behaviour of doit/dumpit functions
296 * Registers the specified function pointers (at least one of them has
297 * to be non-NULL) to be called whenever a request message for the
298 * specified protocol family and message type is received.
300 * The special protocol family PF_UNSPEC may be used to define fallback
301 * function pointers for the case when no entry for the specific protocol
304 void rtnl_register(int protocol, int msgtype,
305 rtnl_doit_func doit, rtnl_dumpit_func dumpit,
310 err = rtnl_register_internal(NULL, protocol, msgtype, doit, dumpit,
313 pr_err("Unable to register rtnetlink message handler, "
314 "protocol = %d, message type = %d\n", protocol, msgtype);
318 * rtnl_unregister - Unregister a rtnetlink message type
319 * @protocol: Protocol family or PF_UNSPEC
320 * @msgtype: rtnetlink message type
322 * Returns 0 on success or a negative error code.
324 int rtnl_unregister(int protocol, int msgtype)
326 struct rtnl_link __rcu **tab;
327 struct rtnl_link *link;
330 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
331 msgindex = rtm_msgindex(msgtype);
334 tab = rtnl_dereference(rtnl_msg_handlers[protocol]);
340 link = rtnl_dereference(tab[msgindex]);
341 RCU_INIT_POINTER(tab[msgindex], NULL);
344 kfree_rcu(link, rcu);
348 EXPORT_SYMBOL_GPL(rtnl_unregister);
351 * rtnl_unregister_all - Unregister all rtnetlink message type of a protocol
352 * @protocol : Protocol family or PF_UNSPEC
354 * Identical to calling rtnl_unregster() for all registered message types
355 * of a certain protocol family.
357 void rtnl_unregister_all(int protocol)
359 struct rtnl_link __rcu **tab;
360 struct rtnl_link *link;
363 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
366 tab = rtnl_dereference(rtnl_msg_handlers[protocol]);
371 RCU_INIT_POINTER(rtnl_msg_handlers[protocol], NULL);
372 for (msgindex = 0; msgindex < RTM_NR_MSGTYPES; msgindex++) {
373 link = rtnl_dereference(tab[msgindex]);
377 RCU_INIT_POINTER(tab[msgindex], NULL);
378 kfree_rcu(link, rcu);
386 EXPORT_SYMBOL_GPL(rtnl_unregister_all);
388 static LIST_HEAD(link_ops);
390 static const struct rtnl_link_ops *rtnl_link_ops_get(const char *kind)
392 const struct rtnl_link_ops *ops;
394 list_for_each_entry(ops, &link_ops, list) {
395 if (!strcmp(ops->kind, kind))
402 * __rtnl_link_register - Register rtnl_link_ops with rtnetlink.
403 * @ops: struct rtnl_link_ops * to register
405 * The caller must hold the rtnl_mutex. This function should be used
406 * by drivers that create devices during module initialization. It
407 * must be called before registering the devices.
409 * Returns 0 on success or a negative error code.
411 int __rtnl_link_register(struct rtnl_link_ops *ops)
413 if (rtnl_link_ops_get(ops->kind))
416 /* The check for alloc/setup is here because if ops
417 * does not have that filled up, it is not possible
418 * to use the ops for creating device. So do not
419 * fill up dellink as well. That disables rtnl_dellink.
421 if ((ops->alloc || ops->setup) && !ops->dellink)
422 ops->dellink = unregister_netdevice_queue;
424 list_add_tail(&ops->list, &link_ops);
427 EXPORT_SYMBOL_GPL(__rtnl_link_register);
430 * rtnl_link_register - Register rtnl_link_ops with rtnetlink.
431 * @ops: struct rtnl_link_ops * to register
433 * Returns 0 on success or a negative error code.
435 int rtnl_link_register(struct rtnl_link_ops *ops)
439 /* Sanity-check max sizes to avoid stack buffer overflow. */
440 if (WARN_ON(ops->maxtype > RTNL_MAX_TYPE ||
441 ops->slave_maxtype > RTNL_SLAVE_MAX_TYPE))
445 err = __rtnl_link_register(ops);
449 EXPORT_SYMBOL_GPL(rtnl_link_register);
451 static void __rtnl_kill_links(struct net *net, struct rtnl_link_ops *ops)
453 struct net_device *dev;
454 LIST_HEAD(list_kill);
456 for_each_netdev(net, dev) {
457 if (dev->rtnl_link_ops == ops)
458 ops->dellink(dev, &list_kill);
460 unregister_netdevice_many(&list_kill);
464 * __rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
465 * @ops: struct rtnl_link_ops * to unregister
467 * The caller must hold the rtnl_mutex and guarantee net_namespace_list
468 * integrity (hold pernet_ops_rwsem for writing to close the race
469 * with setup_net() and cleanup_net()).
471 void __rtnl_link_unregister(struct rtnl_link_ops *ops)
476 __rtnl_kill_links(net, ops);
478 list_del(&ops->list);
480 EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
482 /* Return with the rtnl_lock held when there are no network
483 * devices unregistering in any network namespace.
485 static void rtnl_lock_unregistering_all(void)
489 DEFINE_WAIT_FUNC(wait, woken_wake_function);
491 add_wait_queue(&netdev_unregistering_wq, &wait);
493 unregistering = false;
495 /* We held write locked pernet_ops_rwsem, and parallel
496 * setup_net() and cleanup_net() are not possible.
499 if (atomic_read(&net->dev_unreg_count) > 0) {
500 unregistering = true;
508 wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
510 remove_wait_queue(&netdev_unregistering_wq, &wait);
514 * rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
515 * @ops: struct rtnl_link_ops * to unregister
517 void rtnl_link_unregister(struct rtnl_link_ops *ops)
519 /* Close the race with setup_net() and cleanup_net() */
520 down_write(&pernet_ops_rwsem);
521 rtnl_lock_unregistering_all();
522 __rtnl_link_unregister(ops);
524 up_write(&pernet_ops_rwsem);
526 EXPORT_SYMBOL_GPL(rtnl_link_unregister);
528 static size_t rtnl_link_get_slave_info_data_size(const struct net_device *dev)
530 struct net_device *master_dev;
531 const struct rtnl_link_ops *ops;
536 master_dev = netdev_master_upper_dev_get_rcu((struct net_device *)dev);
540 ops = master_dev->rtnl_link_ops;
541 if (!ops || !ops->get_slave_size)
543 /* IFLA_INFO_SLAVE_DATA + nested data */
544 size = nla_total_size(sizeof(struct nlattr)) +
545 ops->get_slave_size(master_dev, dev);
552 static size_t rtnl_link_get_size(const struct net_device *dev)
554 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
560 size = nla_total_size(sizeof(struct nlattr)) + /* IFLA_LINKINFO */
561 nla_total_size(strlen(ops->kind) + 1); /* IFLA_INFO_KIND */
564 /* IFLA_INFO_DATA + nested data */
565 size += nla_total_size(sizeof(struct nlattr)) +
568 if (ops->get_xstats_size)
569 /* IFLA_INFO_XSTATS */
570 size += nla_total_size(ops->get_xstats_size(dev));
572 size += rtnl_link_get_slave_info_data_size(dev);
577 static LIST_HEAD(rtnl_af_ops);
579 static const struct rtnl_af_ops *rtnl_af_lookup(const int family)
581 const struct rtnl_af_ops *ops;
585 list_for_each_entry(ops, &rtnl_af_ops, list) {
586 if (ops->family == family)
594 * rtnl_af_register - Register rtnl_af_ops with rtnetlink.
595 * @ops: struct rtnl_af_ops * to register
597 * Returns 0 on success or a negative error code.
599 void rtnl_af_register(struct rtnl_af_ops *ops)
602 list_add_tail_rcu(&ops->list, &rtnl_af_ops);
605 EXPORT_SYMBOL_GPL(rtnl_af_register);
608 * rtnl_af_unregister - Unregister rtnl_af_ops from rtnetlink.
609 * @ops: struct rtnl_af_ops * to unregister
611 void rtnl_af_unregister(struct rtnl_af_ops *ops)
614 list_del_rcu(&ops->list);
619 EXPORT_SYMBOL_GPL(rtnl_af_unregister);
621 static size_t rtnl_link_get_af_size(const struct net_device *dev,
624 struct rtnl_af_ops *af_ops;
628 size = nla_total_size(sizeof(struct nlattr));
631 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
632 if (af_ops->get_link_af_size) {
633 /* AF_* + nested data */
634 size += nla_total_size(sizeof(struct nlattr)) +
635 af_ops->get_link_af_size(dev, ext_filter_mask);
643 static bool rtnl_have_link_slave_info(const struct net_device *dev)
645 struct net_device *master_dev;
650 master_dev = netdev_master_upper_dev_get_rcu((struct net_device *)dev);
651 if (master_dev && master_dev->rtnl_link_ops)
657 static int rtnl_link_slave_info_fill(struct sk_buff *skb,
658 const struct net_device *dev)
660 struct net_device *master_dev;
661 const struct rtnl_link_ops *ops;
662 struct nlattr *slave_data;
665 master_dev = netdev_master_upper_dev_get((struct net_device *) dev);
668 ops = master_dev->rtnl_link_ops;
671 if (nla_put_string(skb, IFLA_INFO_SLAVE_KIND, ops->kind) < 0)
673 if (ops->fill_slave_info) {
674 slave_data = nla_nest_start_noflag(skb, IFLA_INFO_SLAVE_DATA);
677 err = ops->fill_slave_info(skb, master_dev, dev);
679 goto err_cancel_slave_data;
680 nla_nest_end(skb, slave_data);
684 err_cancel_slave_data:
685 nla_nest_cancel(skb, slave_data);
689 static int rtnl_link_info_fill(struct sk_buff *skb,
690 const struct net_device *dev)
692 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
698 if (nla_put_string(skb, IFLA_INFO_KIND, ops->kind) < 0)
700 if (ops->fill_xstats) {
701 err = ops->fill_xstats(skb, dev);
705 if (ops->fill_info) {
706 data = nla_nest_start_noflag(skb, IFLA_INFO_DATA);
709 err = ops->fill_info(skb, dev);
711 goto err_cancel_data;
712 nla_nest_end(skb, data);
717 nla_nest_cancel(skb, data);
721 static int rtnl_link_fill(struct sk_buff *skb, const struct net_device *dev)
723 struct nlattr *linkinfo;
726 linkinfo = nla_nest_start_noflag(skb, IFLA_LINKINFO);
727 if (linkinfo == NULL)
730 err = rtnl_link_info_fill(skb, dev);
732 goto err_cancel_link;
734 err = rtnl_link_slave_info_fill(skb, dev);
736 goto err_cancel_link;
738 nla_nest_end(skb, linkinfo);
742 nla_nest_cancel(skb, linkinfo);
747 int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int group, int echo)
749 struct sock *rtnl = net->rtnl;
751 return nlmsg_notify(rtnl, skb, pid, group, echo, GFP_KERNEL);
754 int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid)
756 struct sock *rtnl = net->rtnl;
758 return nlmsg_unicast(rtnl, skb, pid);
760 EXPORT_SYMBOL(rtnl_unicast);
762 void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group,
763 struct nlmsghdr *nlh, gfp_t flags)
765 struct sock *rtnl = net->rtnl;
767 nlmsg_notify(rtnl, skb, pid, group, nlmsg_report(nlh), flags);
769 EXPORT_SYMBOL(rtnl_notify);
771 void rtnl_set_sk_err(struct net *net, u32 group, int error)
773 struct sock *rtnl = net->rtnl;
775 netlink_set_err(rtnl, 0, group, error);
777 EXPORT_SYMBOL(rtnl_set_sk_err);
779 int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics)
784 /* nothing is dumped for dst_default_metrics, so just skip the loop */
785 if (metrics == dst_default_metrics.metrics)
788 mx = nla_nest_start_noflag(skb, RTA_METRICS);
792 for (i = 0; i < RTAX_MAX; i++) {
794 if (i == RTAX_CC_ALGO - 1) {
795 char tmp[TCP_CA_NAME_MAX], *name;
797 name = tcp_ca_get_name_by_key(metrics[i], tmp);
800 if (nla_put_string(skb, i + 1, name))
801 goto nla_put_failure;
802 } else if (i == RTAX_FEATURES - 1) {
803 u32 user_features = metrics[i] & RTAX_FEATURE_MASK;
807 BUILD_BUG_ON(RTAX_FEATURE_MASK & DST_FEATURE_MASK);
808 if (nla_put_u32(skb, i + 1, user_features))
809 goto nla_put_failure;
811 if (nla_put_u32(skb, i + 1, metrics[i]))
812 goto nla_put_failure;
819 nla_nest_cancel(skb, mx);
823 return nla_nest_end(skb, mx);
826 nla_nest_cancel(skb, mx);
829 EXPORT_SYMBOL(rtnetlink_put_metrics);
831 int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id,
832 long expires, u32 error)
834 struct rta_cacheinfo ci = {
840 ci.rta_lastuse = jiffies_delta_to_clock_t(jiffies - dst->lastuse);
841 ci.rta_used = dst->__use;
842 ci.rta_clntref = atomic_read(&dst->__refcnt);
847 clock = jiffies_to_clock_t(abs(expires));
848 clock = min_t(unsigned long, clock, INT_MAX);
849 ci.rta_expires = (expires > 0) ? clock : -clock;
851 return nla_put(skb, RTA_CACHEINFO, sizeof(ci), &ci);
853 EXPORT_SYMBOL_GPL(rtnl_put_cacheinfo);
855 static void set_operstate(struct net_device *dev, unsigned char transition)
857 unsigned char operstate = dev->operstate;
859 switch (transition) {
861 if ((operstate == IF_OPER_DORMANT ||
862 operstate == IF_OPER_TESTING ||
863 operstate == IF_OPER_UNKNOWN) &&
864 !netif_dormant(dev) && !netif_testing(dev))
865 operstate = IF_OPER_UP;
868 case IF_OPER_TESTING:
869 if (netif_oper_up(dev))
870 operstate = IF_OPER_TESTING;
873 case IF_OPER_DORMANT:
874 if (netif_oper_up(dev))
875 operstate = IF_OPER_DORMANT;
879 if (dev->operstate != operstate) {
880 write_lock(&dev_base_lock);
881 dev->operstate = operstate;
882 write_unlock(&dev_base_lock);
883 netdev_state_change(dev);
887 static unsigned int rtnl_dev_get_flags(const struct net_device *dev)
889 return (dev->flags & ~(IFF_PROMISC | IFF_ALLMULTI)) |
890 (dev->gflags & (IFF_PROMISC | IFF_ALLMULTI));
893 static unsigned int rtnl_dev_combine_flags(const struct net_device *dev,
894 const struct ifinfomsg *ifm)
896 unsigned int flags = ifm->ifi_flags;
898 /* bugwards compatibility: ifi_change == 0 is treated as ~0 */
900 flags = (flags & ifm->ifi_change) |
901 (rtnl_dev_get_flags(dev) & ~ifm->ifi_change);
906 static void copy_rtnl_link_stats(struct rtnl_link_stats *a,
907 const struct rtnl_link_stats64 *b)
909 a->rx_packets = b->rx_packets;
910 a->tx_packets = b->tx_packets;
911 a->rx_bytes = b->rx_bytes;
912 a->tx_bytes = b->tx_bytes;
913 a->rx_errors = b->rx_errors;
914 a->tx_errors = b->tx_errors;
915 a->rx_dropped = b->rx_dropped;
916 a->tx_dropped = b->tx_dropped;
918 a->multicast = b->multicast;
919 a->collisions = b->collisions;
921 a->rx_length_errors = b->rx_length_errors;
922 a->rx_over_errors = b->rx_over_errors;
923 a->rx_crc_errors = b->rx_crc_errors;
924 a->rx_frame_errors = b->rx_frame_errors;
925 a->rx_fifo_errors = b->rx_fifo_errors;
926 a->rx_missed_errors = b->rx_missed_errors;
928 a->tx_aborted_errors = b->tx_aborted_errors;
929 a->tx_carrier_errors = b->tx_carrier_errors;
930 a->tx_fifo_errors = b->tx_fifo_errors;
931 a->tx_heartbeat_errors = b->tx_heartbeat_errors;
932 a->tx_window_errors = b->tx_window_errors;
934 a->rx_compressed = b->rx_compressed;
935 a->tx_compressed = b->tx_compressed;
937 a->rx_nohandler = b->rx_nohandler;
941 static inline int rtnl_vfinfo_size(const struct net_device *dev,
944 if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF)) {
945 int num_vfs = dev_num_vf(dev->dev.parent);
946 size_t size = nla_total_size(0);
949 nla_total_size(sizeof(struct ifla_vf_mac)) +
950 nla_total_size(sizeof(struct ifla_vf_broadcast)) +
951 nla_total_size(sizeof(struct ifla_vf_vlan)) +
952 nla_total_size(0) + /* nest IFLA_VF_VLAN_LIST */
953 nla_total_size(MAX_VLAN_LIST_LEN *
954 sizeof(struct ifla_vf_vlan_info)) +
955 nla_total_size(sizeof(struct ifla_vf_spoofchk)) +
956 nla_total_size(sizeof(struct ifla_vf_tx_rate)) +
957 nla_total_size(sizeof(struct ifla_vf_rate)) +
958 nla_total_size(sizeof(struct ifla_vf_link_state)) +
959 nla_total_size(sizeof(struct ifla_vf_rss_query_en)) +
960 nla_total_size(sizeof(struct ifla_vf_trust)));
961 if (~ext_filter_mask & RTEXT_FILTER_SKIP_STATS) {
963 (nla_total_size(0) + /* nest IFLA_VF_STATS */
964 /* IFLA_VF_STATS_RX_PACKETS */
965 nla_total_size_64bit(sizeof(__u64)) +
966 /* IFLA_VF_STATS_TX_PACKETS */
967 nla_total_size_64bit(sizeof(__u64)) +
968 /* IFLA_VF_STATS_RX_BYTES */
969 nla_total_size_64bit(sizeof(__u64)) +
970 /* IFLA_VF_STATS_TX_BYTES */
971 nla_total_size_64bit(sizeof(__u64)) +
972 /* IFLA_VF_STATS_BROADCAST */
973 nla_total_size_64bit(sizeof(__u64)) +
974 /* IFLA_VF_STATS_MULTICAST */
975 nla_total_size_64bit(sizeof(__u64)) +
976 /* IFLA_VF_STATS_RX_DROPPED */
977 nla_total_size_64bit(sizeof(__u64)) +
978 /* IFLA_VF_STATS_TX_DROPPED */
979 nla_total_size_64bit(sizeof(__u64)));
986 static size_t rtnl_port_size(const struct net_device *dev,
989 size_t port_size = nla_total_size(4) /* PORT_VF */
990 + nla_total_size(PORT_PROFILE_MAX) /* PORT_PROFILE */
991 + nla_total_size(PORT_UUID_MAX) /* PORT_INSTANCE_UUID */
992 + nla_total_size(PORT_UUID_MAX) /* PORT_HOST_UUID */
993 + nla_total_size(1) /* PROT_VDP_REQUEST */
994 + nla_total_size(2); /* PORT_VDP_RESPONSE */
995 size_t vf_ports_size = nla_total_size(sizeof(struct nlattr));
996 size_t vf_port_size = nla_total_size(sizeof(struct nlattr))
998 size_t port_self_size = nla_total_size(sizeof(struct nlattr))
1001 if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
1002 !(ext_filter_mask & RTEXT_FILTER_VF))
1004 if (dev_num_vf(dev->dev.parent))
1005 return port_self_size + vf_ports_size +
1006 vf_port_size * dev_num_vf(dev->dev.parent);
1008 return port_self_size;
1011 static size_t rtnl_xdp_size(void)
1013 size_t xdp_size = nla_total_size(0) + /* nest IFLA_XDP */
1014 nla_total_size(1) + /* XDP_ATTACHED */
1015 nla_total_size(4) + /* XDP_PROG_ID (or 1st mode) */
1016 nla_total_size(4); /* XDP_<mode>_PROG_ID */
1021 static size_t rtnl_prop_list_size(const struct net_device *dev)
1023 struct netdev_name_node *name_node;
1026 if (list_empty(&dev->name_node->list))
1028 size = nla_total_size(0);
1029 list_for_each_entry(name_node, &dev->name_node->list, list)
1030 size += nla_total_size(ALTIFNAMSIZ);
1034 static size_t rtnl_proto_down_size(const struct net_device *dev)
1036 size_t size = nla_total_size(1);
1038 if (dev->proto_down_reason)
1039 size += nla_total_size(0) + nla_total_size(4);
1044 static noinline size_t if_nlmsg_size(const struct net_device *dev,
1045 u32 ext_filter_mask)
1047 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
1048 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
1049 + nla_total_size(IFALIASZ) /* IFLA_IFALIAS */
1050 + nla_total_size(IFNAMSIZ) /* IFLA_QDISC */
1051 + nla_total_size_64bit(sizeof(struct rtnl_link_ifmap))
1052 + nla_total_size(sizeof(struct rtnl_link_stats))
1053 + nla_total_size_64bit(sizeof(struct rtnl_link_stats64))
1054 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
1055 + nla_total_size(MAX_ADDR_LEN) /* IFLA_BROADCAST */
1056 + nla_total_size(4) /* IFLA_TXQLEN */
1057 + nla_total_size(4) /* IFLA_WEIGHT */
1058 + nla_total_size(4) /* IFLA_MTU */
1059 + nla_total_size(4) /* IFLA_LINK */
1060 + nla_total_size(4) /* IFLA_MASTER */
1061 + nla_total_size(1) /* IFLA_CARRIER */
1062 + nla_total_size(4) /* IFLA_PROMISCUITY */
1063 + nla_total_size(4) /* IFLA_ALLMULTI */
1064 + nla_total_size(4) /* IFLA_NUM_TX_QUEUES */
1065 + nla_total_size(4) /* IFLA_NUM_RX_QUEUES */
1066 + nla_total_size(4) /* IFLA_GSO_MAX_SEGS */
1067 + nla_total_size(4) /* IFLA_GSO_MAX_SIZE */
1068 + nla_total_size(4) /* IFLA_GRO_MAX_SIZE */
1069 + nla_total_size(4) /* IFLA_TSO_MAX_SIZE */
1070 + nla_total_size(4) /* IFLA_TSO_MAX_SEGS */
1071 + nla_total_size(1) /* IFLA_OPERSTATE */
1072 + nla_total_size(1) /* IFLA_LINKMODE */
1073 + nla_total_size(4) /* IFLA_CARRIER_CHANGES */
1074 + nla_total_size(4) /* IFLA_LINK_NETNSID */
1075 + nla_total_size(4) /* IFLA_GROUP */
1076 + nla_total_size(ext_filter_mask
1077 & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */
1078 + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
1079 + rtnl_port_size(dev, ext_filter_mask) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
1080 + rtnl_link_get_size(dev) /* IFLA_LINKINFO */
1081 + rtnl_link_get_af_size(dev, ext_filter_mask) /* IFLA_AF_SPEC */
1082 + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_PORT_ID */
1083 + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_SWITCH_ID */
1084 + nla_total_size(IFNAMSIZ) /* IFLA_PHYS_PORT_NAME */
1085 + rtnl_xdp_size() /* IFLA_XDP */
1086 + nla_total_size(4) /* IFLA_EVENT */
1087 + nla_total_size(4) /* IFLA_NEW_NETNSID */
1088 + nla_total_size(4) /* IFLA_NEW_IFINDEX */
1089 + rtnl_proto_down_size(dev) /* proto down */
1090 + nla_total_size(4) /* IFLA_TARGET_NETNSID */
1091 + nla_total_size(4) /* IFLA_CARRIER_UP_COUNT */
1092 + nla_total_size(4) /* IFLA_CARRIER_DOWN_COUNT */
1093 + nla_total_size(4) /* IFLA_MIN_MTU */
1094 + nla_total_size(4) /* IFLA_MAX_MTU */
1095 + rtnl_prop_list_size(dev)
1096 + nla_total_size(MAX_ADDR_LEN) /* IFLA_PERM_ADDRESS */
1100 static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev)
1102 struct nlattr *vf_ports;
1103 struct nlattr *vf_port;
1107 vf_ports = nla_nest_start_noflag(skb, IFLA_VF_PORTS);
1111 for (vf = 0; vf < dev_num_vf(dev->dev.parent); vf++) {
1112 vf_port = nla_nest_start_noflag(skb, IFLA_VF_PORT);
1114 goto nla_put_failure;
1115 if (nla_put_u32(skb, IFLA_PORT_VF, vf))
1116 goto nla_put_failure;
1117 err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb);
1118 if (err == -EMSGSIZE)
1119 goto nla_put_failure;
1121 nla_nest_cancel(skb, vf_port);
1124 nla_nest_end(skb, vf_port);
1127 nla_nest_end(skb, vf_ports);
1132 nla_nest_cancel(skb, vf_ports);
1136 static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev)
1138 struct nlattr *port_self;
1141 port_self = nla_nest_start_noflag(skb, IFLA_PORT_SELF);
1145 err = dev->netdev_ops->ndo_get_vf_port(dev, PORT_SELF_VF, skb);
1147 nla_nest_cancel(skb, port_self);
1148 return (err == -EMSGSIZE) ? err : 0;
1151 nla_nest_end(skb, port_self);
1156 static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev,
1157 u32 ext_filter_mask)
1161 if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
1162 !(ext_filter_mask & RTEXT_FILTER_VF))
1165 err = rtnl_port_self_fill(skb, dev);
1169 if (dev_num_vf(dev->dev.parent)) {
1170 err = rtnl_vf_ports_fill(skb, dev);
1178 static int rtnl_phys_port_id_fill(struct sk_buff *skb, struct net_device *dev)
1181 struct netdev_phys_item_id ppid;
1183 err = dev_get_phys_port_id(dev, &ppid);
1185 if (err == -EOPNOTSUPP)
1190 if (nla_put(skb, IFLA_PHYS_PORT_ID, ppid.id_len, ppid.id))
1196 static int rtnl_phys_port_name_fill(struct sk_buff *skb, struct net_device *dev)
1198 char name[IFNAMSIZ];
1201 err = dev_get_phys_port_name(dev, name, sizeof(name));
1203 if (err == -EOPNOTSUPP)
1208 if (nla_put_string(skb, IFLA_PHYS_PORT_NAME, name))
1214 static int rtnl_phys_switch_id_fill(struct sk_buff *skb, struct net_device *dev)
1216 struct netdev_phys_item_id ppid = { };
1219 err = dev_get_port_parent_id(dev, &ppid, false);
1221 if (err == -EOPNOTSUPP)
1226 if (nla_put(skb, IFLA_PHYS_SWITCH_ID, ppid.id_len, ppid.id))
1232 static noinline_for_stack int rtnl_fill_stats(struct sk_buff *skb,
1233 struct net_device *dev)
1235 struct rtnl_link_stats64 *sp;
1236 struct nlattr *attr;
1238 attr = nla_reserve_64bit(skb, IFLA_STATS64,
1239 sizeof(struct rtnl_link_stats64), IFLA_PAD);
1243 sp = nla_data(attr);
1244 dev_get_stats(dev, sp);
1246 attr = nla_reserve(skb, IFLA_STATS,
1247 sizeof(struct rtnl_link_stats));
1251 copy_rtnl_link_stats(nla_data(attr), sp);
1256 static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
1257 struct net_device *dev,
1259 struct nlattr *vfinfo,
1260 u32 ext_filter_mask)
1262 struct ifla_vf_rss_query_en vf_rss_query_en;
1263 struct nlattr *vf, *vfstats, *vfvlanlist;
1264 struct ifla_vf_link_state vf_linkstate;
1265 struct ifla_vf_vlan_info vf_vlan_info;
1266 struct ifla_vf_spoofchk vf_spoofchk;
1267 struct ifla_vf_tx_rate vf_tx_rate;
1268 struct ifla_vf_stats vf_stats;
1269 struct ifla_vf_trust vf_trust;
1270 struct ifla_vf_vlan vf_vlan;
1271 struct ifla_vf_rate vf_rate;
1272 struct ifla_vf_mac vf_mac;
1273 struct ifla_vf_broadcast vf_broadcast;
1274 struct ifla_vf_info ivi;
1275 struct ifla_vf_guid node_guid;
1276 struct ifla_vf_guid port_guid;
1278 memset(&ivi, 0, sizeof(ivi));
1280 /* Not all SR-IOV capable drivers support the
1281 * spoofcheck and "RSS query enable" query. Preset to
1282 * -1 so the user space tool can detect that the driver
1283 * didn't report anything.
1286 ivi.rss_query_en = -1;
1288 /* The default value for VF link state is "auto"
1289 * IFLA_VF_LINK_STATE_AUTO which equals zero
1292 /* VLAN Protocol by default is 802.1Q */
1293 ivi.vlan_proto = htons(ETH_P_8021Q);
1294 if (dev->netdev_ops->ndo_get_vf_config(dev, vfs_num, &ivi))
1297 memset(&vf_vlan_info, 0, sizeof(vf_vlan_info));
1298 memset(&node_guid, 0, sizeof(node_guid));
1299 memset(&port_guid, 0, sizeof(port_guid));
1308 vf_rss_query_en.vf =
1311 port_guid.vf = ivi.vf;
1313 memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac));
1314 memcpy(vf_broadcast.broadcast, dev->broadcast, dev->addr_len);
1315 vf_vlan.vlan = ivi.vlan;
1316 vf_vlan.qos = ivi.qos;
1317 vf_vlan_info.vlan = ivi.vlan;
1318 vf_vlan_info.qos = ivi.qos;
1319 vf_vlan_info.vlan_proto = ivi.vlan_proto;
1320 vf_tx_rate.rate = ivi.max_tx_rate;
1321 vf_rate.min_tx_rate = ivi.min_tx_rate;
1322 vf_rate.max_tx_rate = ivi.max_tx_rate;
1323 vf_spoofchk.setting = ivi.spoofchk;
1324 vf_linkstate.link_state = ivi.linkstate;
1325 vf_rss_query_en.setting = ivi.rss_query_en;
1326 vf_trust.setting = ivi.trusted;
1327 vf = nla_nest_start_noflag(skb, IFLA_VF_INFO);
1329 goto nla_put_vfinfo_failure;
1330 if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) ||
1331 nla_put(skb, IFLA_VF_BROADCAST, sizeof(vf_broadcast), &vf_broadcast) ||
1332 nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) ||
1333 nla_put(skb, IFLA_VF_RATE, sizeof(vf_rate),
1335 nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate),
1337 nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk),
1339 nla_put(skb, IFLA_VF_LINK_STATE, sizeof(vf_linkstate),
1341 nla_put(skb, IFLA_VF_RSS_QUERY_EN,
1342 sizeof(vf_rss_query_en),
1343 &vf_rss_query_en) ||
1344 nla_put(skb, IFLA_VF_TRUST,
1345 sizeof(vf_trust), &vf_trust))
1346 goto nla_put_vf_failure;
1348 if (dev->netdev_ops->ndo_get_vf_guid &&
1349 !dev->netdev_ops->ndo_get_vf_guid(dev, vfs_num, &node_guid,
1351 if (nla_put(skb, IFLA_VF_IB_NODE_GUID, sizeof(node_guid),
1353 nla_put(skb, IFLA_VF_IB_PORT_GUID, sizeof(port_guid),
1355 goto nla_put_vf_failure;
1357 vfvlanlist = nla_nest_start_noflag(skb, IFLA_VF_VLAN_LIST);
1359 goto nla_put_vf_failure;
1360 if (nla_put(skb, IFLA_VF_VLAN_INFO, sizeof(vf_vlan_info),
1362 nla_nest_cancel(skb, vfvlanlist);
1363 goto nla_put_vf_failure;
1365 nla_nest_end(skb, vfvlanlist);
1366 if (~ext_filter_mask & RTEXT_FILTER_SKIP_STATS) {
1367 memset(&vf_stats, 0, sizeof(vf_stats));
1368 if (dev->netdev_ops->ndo_get_vf_stats)
1369 dev->netdev_ops->ndo_get_vf_stats(dev, vfs_num,
1371 vfstats = nla_nest_start_noflag(skb, IFLA_VF_STATS);
1373 goto nla_put_vf_failure;
1374 if (nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_PACKETS,
1375 vf_stats.rx_packets, IFLA_VF_STATS_PAD) ||
1376 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_PACKETS,
1377 vf_stats.tx_packets, IFLA_VF_STATS_PAD) ||
1378 nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_BYTES,
1379 vf_stats.rx_bytes, IFLA_VF_STATS_PAD) ||
1380 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_BYTES,
1381 vf_stats.tx_bytes, IFLA_VF_STATS_PAD) ||
1382 nla_put_u64_64bit(skb, IFLA_VF_STATS_BROADCAST,
1383 vf_stats.broadcast, IFLA_VF_STATS_PAD) ||
1384 nla_put_u64_64bit(skb, IFLA_VF_STATS_MULTICAST,
1385 vf_stats.multicast, IFLA_VF_STATS_PAD) ||
1386 nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_DROPPED,
1387 vf_stats.rx_dropped, IFLA_VF_STATS_PAD) ||
1388 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_DROPPED,
1389 vf_stats.tx_dropped, IFLA_VF_STATS_PAD)) {
1390 nla_nest_cancel(skb, vfstats);
1391 goto nla_put_vf_failure;
1393 nla_nest_end(skb, vfstats);
1395 nla_nest_end(skb, vf);
1399 nla_nest_cancel(skb, vf);
1400 nla_put_vfinfo_failure:
1401 nla_nest_cancel(skb, vfinfo);
1405 static noinline_for_stack int rtnl_fill_vf(struct sk_buff *skb,
1406 struct net_device *dev,
1407 u32 ext_filter_mask)
1409 struct nlattr *vfinfo;
1412 if (!dev->dev.parent || ((ext_filter_mask & RTEXT_FILTER_VF) == 0))
1415 num_vfs = dev_num_vf(dev->dev.parent);
1416 if (nla_put_u32(skb, IFLA_NUM_VF, num_vfs))
1419 if (!dev->netdev_ops->ndo_get_vf_config)
1422 vfinfo = nla_nest_start_noflag(skb, IFLA_VFINFO_LIST);
1426 for (i = 0; i < num_vfs; i++) {
1427 if (rtnl_fill_vfinfo(skb, dev, i, vfinfo, ext_filter_mask))
1431 nla_nest_end(skb, vfinfo);
1435 static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev)
1437 struct rtnl_link_ifmap map;
1439 memset(&map, 0, sizeof(map));
1440 map.mem_start = dev->mem_start;
1441 map.mem_end = dev->mem_end;
1442 map.base_addr = dev->base_addr;
1445 map.port = dev->if_port;
1447 if (nla_put_64bit(skb, IFLA_MAP, sizeof(map), &map, IFLA_PAD))
1453 static u32 rtnl_xdp_prog_skb(struct net_device *dev)
1455 const struct bpf_prog *generic_xdp_prog;
1459 generic_xdp_prog = rtnl_dereference(dev->xdp_prog);
1460 if (!generic_xdp_prog)
1462 return generic_xdp_prog->aux->id;
1465 static u32 rtnl_xdp_prog_drv(struct net_device *dev)
1467 return dev_xdp_prog_id(dev, XDP_MODE_DRV);
1470 static u32 rtnl_xdp_prog_hw(struct net_device *dev)
1472 return dev_xdp_prog_id(dev, XDP_MODE_HW);
1475 static int rtnl_xdp_report_one(struct sk_buff *skb, struct net_device *dev,
1476 u32 *prog_id, u8 *mode, u8 tgt_mode, u32 attr,
1477 u32 (*get_prog_id)(struct net_device *dev))
1482 curr_id = get_prog_id(dev);
1487 err = nla_put_u32(skb, attr, curr_id);
1491 if (*mode != XDP_ATTACHED_NONE)
1492 *mode = XDP_ATTACHED_MULTI;
1499 static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev)
1506 xdp = nla_nest_start_noflag(skb, IFLA_XDP);
1511 mode = XDP_ATTACHED_NONE;
1512 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_SKB,
1513 IFLA_XDP_SKB_PROG_ID, rtnl_xdp_prog_skb);
1516 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_DRV,
1517 IFLA_XDP_DRV_PROG_ID, rtnl_xdp_prog_drv);
1520 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_HW,
1521 IFLA_XDP_HW_PROG_ID, rtnl_xdp_prog_hw);
1525 err = nla_put_u8(skb, IFLA_XDP_ATTACHED, mode);
1529 if (prog_id && mode != XDP_ATTACHED_MULTI) {
1530 err = nla_put_u32(skb, IFLA_XDP_PROG_ID, prog_id);
1535 nla_nest_end(skb, xdp);
1539 nla_nest_cancel(skb, xdp);
1543 static u32 rtnl_get_event(unsigned long event)
1545 u32 rtnl_event_type = IFLA_EVENT_NONE;
1549 rtnl_event_type = IFLA_EVENT_REBOOT;
1551 case NETDEV_FEAT_CHANGE:
1552 rtnl_event_type = IFLA_EVENT_FEATURES;
1554 case NETDEV_BONDING_FAILOVER:
1555 rtnl_event_type = IFLA_EVENT_BONDING_FAILOVER;
1557 case NETDEV_NOTIFY_PEERS:
1558 rtnl_event_type = IFLA_EVENT_NOTIFY_PEERS;
1560 case NETDEV_RESEND_IGMP:
1561 rtnl_event_type = IFLA_EVENT_IGMP_RESEND;
1563 case NETDEV_CHANGEINFODATA:
1564 rtnl_event_type = IFLA_EVENT_BONDING_OPTIONS;
1570 return rtnl_event_type;
1573 static int put_master_ifindex(struct sk_buff *skb, struct net_device *dev)
1575 const struct net_device *upper_dev;
1580 upper_dev = netdev_master_upper_dev_get_rcu(dev);
1582 ret = nla_put_u32(skb, IFLA_MASTER, upper_dev->ifindex);
1588 static int nla_put_iflink(struct sk_buff *skb, const struct net_device *dev,
1591 int ifindex = dev_get_iflink(dev);
1593 if (force || dev->ifindex != ifindex)
1594 return nla_put_u32(skb, IFLA_LINK, ifindex);
1599 static noinline_for_stack int nla_put_ifalias(struct sk_buff *skb,
1600 struct net_device *dev)
1605 ret = dev_get_alias(dev, buf, sizeof(buf));
1606 return ret > 0 ? nla_put_string(skb, IFLA_IFALIAS, buf) : 0;
1609 static int rtnl_fill_link_netnsid(struct sk_buff *skb,
1610 const struct net_device *dev,
1611 struct net *src_net, gfp_t gfp)
1613 bool put_iflink = false;
1615 if (dev->rtnl_link_ops && dev->rtnl_link_ops->get_link_net) {
1616 struct net *link_net = dev->rtnl_link_ops->get_link_net(dev);
1618 if (!net_eq(dev_net(dev), link_net)) {
1619 int id = peernet2id_alloc(src_net, link_net, gfp);
1621 if (nla_put_s32(skb, IFLA_LINK_NETNSID, id))
1628 return nla_put_iflink(skb, dev, put_iflink);
1631 static int rtnl_fill_link_af(struct sk_buff *skb,
1632 const struct net_device *dev,
1633 u32 ext_filter_mask)
1635 const struct rtnl_af_ops *af_ops;
1636 struct nlattr *af_spec;
1638 af_spec = nla_nest_start_noflag(skb, IFLA_AF_SPEC);
1642 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
1646 if (!af_ops->fill_link_af)
1649 af = nla_nest_start_noflag(skb, af_ops->family);
1653 err = af_ops->fill_link_af(skb, dev, ext_filter_mask);
1655 * Caller may return ENODATA to indicate that there
1656 * was no data to be dumped. This is not an error, it
1657 * means we should trim the attribute header and
1660 if (err == -ENODATA)
1661 nla_nest_cancel(skb, af);
1665 nla_nest_end(skb, af);
1668 nla_nest_end(skb, af_spec);
1672 static int rtnl_fill_alt_ifnames(struct sk_buff *skb,
1673 const struct net_device *dev)
1675 struct netdev_name_node *name_node;
1678 list_for_each_entry(name_node, &dev->name_node->list, list) {
1679 if (nla_put_string(skb, IFLA_ALT_IFNAME, name_node->name))
1686 static int rtnl_fill_prop_list(struct sk_buff *skb,
1687 const struct net_device *dev)
1689 struct nlattr *prop_list;
1692 prop_list = nla_nest_start(skb, IFLA_PROP_LIST);
1696 ret = rtnl_fill_alt_ifnames(skb, dev);
1700 nla_nest_end(skb, prop_list);
1704 nla_nest_cancel(skb, prop_list);
1708 static int rtnl_fill_proto_down(struct sk_buff *skb,
1709 const struct net_device *dev)
1714 if (nla_put_u8(skb, IFLA_PROTO_DOWN, dev->proto_down))
1715 goto nla_put_failure;
1717 preason = dev->proto_down_reason;
1721 pr = nla_nest_start(skb, IFLA_PROTO_DOWN_REASON);
1725 if (nla_put_u32(skb, IFLA_PROTO_DOWN_REASON_VALUE, preason)) {
1726 nla_nest_cancel(skb, pr);
1727 goto nla_put_failure;
1730 nla_nest_end(skb, pr);
1737 static int rtnl_fill_ifinfo(struct sk_buff *skb,
1738 struct net_device *dev, struct net *src_net,
1739 int type, u32 pid, u32 seq, u32 change,
1740 unsigned int flags, u32 ext_filter_mask,
1741 u32 event, int *new_nsid, int new_ifindex,
1742 int tgt_netnsid, gfp_t gfp)
1744 struct ifinfomsg *ifm;
1745 struct nlmsghdr *nlh;
1746 struct Qdisc *qdisc;
1749 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags);
1753 ifm = nlmsg_data(nlh);
1754 ifm->ifi_family = AF_UNSPEC;
1756 ifm->ifi_type = dev->type;
1757 ifm->ifi_index = dev->ifindex;
1758 ifm->ifi_flags = dev_get_flags(dev);
1759 ifm->ifi_change = change;
1761 if (tgt_netnsid >= 0 && nla_put_s32(skb, IFLA_TARGET_NETNSID, tgt_netnsid))
1762 goto nla_put_failure;
1764 qdisc = rtnl_dereference(dev->qdisc);
1765 if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
1766 nla_put_u32(skb, IFLA_TXQLEN, dev->tx_queue_len) ||
1767 nla_put_u8(skb, IFLA_OPERSTATE,
1768 netif_running(dev) ? dev->operstate : IF_OPER_DOWN) ||
1769 nla_put_u8(skb, IFLA_LINKMODE, dev->link_mode) ||
1770 nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
1771 nla_put_u32(skb, IFLA_MIN_MTU, dev->min_mtu) ||
1772 nla_put_u32(skb, IFLA_MAX_MTU, dev->max_mtu) ||
1773 nla_put_u32(skb, IFLA_GROUP, dev->group) ||
1774 nla_put_u32(skb, IFLA_PROMISCUITY, dev->promiscuity) ||
1775 nla_put_u32(skb, IFLA_ALLMULTI, dev->allmulti) ||
1776 nla_put_u32(skb, IFLA_NUM_TX_QUEUES, dev->num_tx_queues) ||
1777 nla_put_u32(skb, IFLA_GSO_MAX_SEGS, dev->gso_max_segs) ||
1778 nla_put_u32(skb, IFLA_GSO_MAX_SIZE, dev->gso_max_size) ||
1779 nla_put_u32(skb, IFLA_GRO_MAX_SIZE, dev->gro_max_size) ||
1780 nla_put_u32(skb, IFLA_TSO_MAX_SIZE, dev->tso_max_size) ||
1781 nla_put_u32(skb, IFLA_TSO_MAX_SEGS, dev->tso_max_segs) ||
1783 nla_put_u32(skb, IFLA_NUM_RX_QUEUES, dev->num_rx_queues) ||
1785 put_master_ifindex(skb, dev) ||
1786 nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) ||
1788 nla_put_string(skb, IFLA_QDISC, qdisc->ops->id)) ||
1789 nla_put_ifalias(skb, dev) ||
1790 nla_put_u32(skb, IFLA_CARRIER_CHANGES,
1791 atomic_read(&dev->carrier_up_count) +
1792 atomic_read(&dev->carrier_down_count)) ||
1793 nla_put_u32(skb, IFLA_CARRIER_UP_COUNT,
1794 atomic_read(&dev->carrier_up_count)) ||
1795 nla_put_u32(skb, IFLA_CARRIER_DOWN_COUNT,
1796 atomic_read(&dev->carrier_down_count)))
1797 goto nla_put_failure;
1799 if (rtnl_fill_proto_down(skb, dev))
1800 goto nla_put_failure;
1802 if (event != IFLA_EVENT_NONE) {
1803 if (nla_put_u32(skb, IFLA_EVENT, event))
1804 goto nla_put_failure;
1807 if (rtnl_fill_link_ifmap(skb, dev))
1808 goto nla_put_failure;
1810 if (dev->addr_len) {
1811 if (nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr) ||
1812 nla_put(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast))
1813 goto nla_put_failure;
1816 if (rtnl_phys_port_id_fill(skb, dev))
1817 goto nla_put_failure;
1819 if (rtnl_phys_port_name_fill(skb, dev))
1820 goto nla_put_failure;
1822 if (rtnl_phys_switch_id_fill(skb, dev))
1823 goto nla_put_failure;
1825 if (rtnl_fill_stats(skb, dev))
1826 goto nla_put_failure;
1828 if (rtnl_fill_vf(skb, dev, ext_filter_mask))
1829 goto nla_put_failure;
1831 if (rtnl_port_fill(skb, dev, ext_filter_mask))
1832 goto nla_put_failure;
1834 if (rtnl_xdp_fill(skb, dev))
1835 goto nla_put_failure;
1837 if (dev->rtnl_link_ops || rtnl_have_link_slave_info(dev)) {
1838 if (rtnl_link_fill(skb, dev) < 0)
1839 goto nla_put_failure;
1842 if (rtnl_fill_link_netnsid(skb, dev, src_net, gfp))
1843 goto nla_put_failure;
1846 nla_put_s32(skb, IFLA_NEW_NETNSID, *new_nsid) < 0)
1847 goto nla_put_failure;
1849 nla_put_s32(skb, IFLA_NEW_IFINDEX, new_ifindex) < 0)
1850 goto nla_put_failure;
1852 if (memchr_inv(dev->perm_addr, '\0', dev->addr_len) &&
1853 nla_put(skb, IFLA_PERM_ADDRESS, dev->addr_len, dev->perm_addr))
1854 goto nla_put_failure;
1857 if (rtnl_fill_link_af(skb, dev, ext_filter_mask))
1858 goto nla_put_failure_rcu;
1861 if (rtnl_fill_prop_list(skb, dev))
1862 goto nla_put_failure;
1864 if (dev->dev.parent &&
1865 nla_put_string(skb, IFLA_PARENT_DEV_NAME,
1866 dev_name(dev->dev.parent)))
1867 goto nla_put_failure;
1869 if (dev->dev.parent && dev->dev.parent->bus &&
1870 nla_put_string(skb, IFLA_PARENT_DEV_BUS_NAME,
1871 dev->dev.parent->bus->name))
1872 goto nla_put_failure;
1874 nlmsg_end(skb, nlh);
1877 nla_put_failure_rcu:
1880 nlmsg_cancel(skb, nlh);
1884 static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
1885 [IFLA_IFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ-1 },
1886 [IFLA_ADDRESS] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1887 [IFLA_BROADCAST] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1888 [IFLA_MAP] = { .len = sizeof(struct rtnl_link_ifmap) },
1889 [IFLA_MTU] = { .type = NLA_U32 },
1890 [IFLA_LINK] = { .type = NLA_U32 },
1891 [IFLA_MASTER] = { .type = NLA_U32 },
1892 [IFLA_CARRIER] = { .type = NLA_U8 },
1893 [IFLA_TXQLEN] = { .type = NLA_U32 },
1894 [IFLA_WEIGHT] = { .type = NLA_U32 },
1895 [IFLA_OPERSTATE] = { .type = NLA_U8 },
1896 [IFLA_LINKMODE] = { .type = NLA_U8 },
1897 [IFLA_LINKINFO] = { .type = NLA_NESTED },
1898 [IFLA_NET_NS_PID] = { .type = NLA_U32 },
1899 [IFLA_NET_NS_FD] = { .type = NLA_U32 },
1900 /* IFLA_IFALIAS is a string, but policy is set to NLA_BINARY to
1901 * allow 0-length string (needed to remove an alias).
1903 [IFLA_IFALIAS] = { .type = NLA_BINARY, .len = IFALIASZ - 1 },
1904 [IFLA_VFINFO_LIST] = {. type = NLA_NESTED },
1905 [IFLA_VF_PORTS] = { .type = NLA_NESTED },
1906 [IFLA_PORT_SELF] = { .type = NLA_NESTED },
1907 [IFLA_AF_SPEC] = { .type = NLA_NESTED },
1908 [IFLA_EXT_MASK] = { .type = NLA_U32 },
1909 [IFLA_PROMISCUITY] = { .type = NLA_U32 },
1910 [IFLA_NUM_TX_QUEUES] = { .type = NLA_U32 },
1911 [IFLA_NUM_RX_QUEUES] = { .type = NLA_U32 },
1912 [IFLA_GSO_MAX_SEGS] = { .type = NLA_U32 },
1913 [IFLA_GSO_MAX_SIZE] = { .type = NLA_U32 },
1914 [IFLA_PHYS_PORT_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
1915 [IFLA_CARRIER_CHANGES] = { .type = NLA_U32 }, /* ignored */
1916 [IFLA_PHYS_SWITCH_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
1917 [IFLA_LINK_NETNSID] = { .type = NLA_S32 },
1918 [IFLA_PROTO_DOWN] = { .type = NLA_U8 },
1919 [IFLA_XDP] = { .type = NLA_NESTED },
1920 [IFLA_EVENT] = { .type = NLA_U32 },
1921 [IFLA_GROUP] = { .type = NLA_U32 },
1922 [IFLA_TARGET_NETNSID] = { .type = NLA_S32 },
1923 [IFLA_CARRIER_UP_COUNT] = { .type = NLA_U32 },
1924 [IFLA_CARRIER_DOWN_COUNT] = { .type = NLA_U32 },
1925 [IFLA_MIN_MTU] = { .type = NLA_U32 },
1926 [IFLA_MAX_MTU] = { .type = NLA_U32 },
1927 [IFLA_PROP_LIST] = { .type = NLA_NESTED },
1928 [IFLA_ALT_IFNAME] = { .type = NLA_STRING,
1929 .len = ALTIFNAMSIZ - 1 },
1930 [IFLA_PERM_ADDRESS] = { .type = NLA_REJECT },
1931 [IFLA_PROTO_DOWN_REASON] = { .type = NLA_NESTED },
1932 [IFLA_NEW_IFINDEX] = NLA_POLICY_MIN(NLA_S32, 1),
1933 [IFLA_PARENT_DEV_NAME] = { .type = NLA_NUL_STRING },
1934 [IFLA_GRO_MAX_SIZE] = { .type = NLA_U32 },
1935 [IFLA_TSO_MAX_SIZE] = { .type = NLA_REJECT },
1936 [IFLA_TSO_MAX_SEGS] = { .type = NLA_REJECT },
1937 [IFLA_ALLMULTI] = { .type = NLA_REJECT },
1940 static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
1941 [IFLA_INFO_KIND] = { .type = NLA_STRING },
1942 [IFLA_INFO_DATA] = { .type = NLA_NESTED },
1943 [IFLA_INFO_SLAVE_KIND] = { .type = NLA_STRING },
1944 [IFLA_INFO_SLAVE_DATA] = { .type = NLA_NESTED },
1947 static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = {
1948 [IFLA_VF_MAC] = { .len = sizeof(struct ifla_vf_mac) },
1949 [IFLA_VF_BROADCAST] = { .type = NLA_REJECT },
1950 [IFLA_VF_VLAN] = { .len = sizeof(struct ifla_vf_vlan) },
1951 [IFLA_VF_VLAN_LIST] = { .type = NLA_NESTED },
1952 [IFLA_VF_TX_RATE] = { .len = sizeof(struct ifla_vf_tx_rate) },
1953 [IFLA_VF_SPOOFCHK] = { .len = sizeof(struct ifla_vf_spoofchk) },
1954 [IFLA_VF_RATE] = { .len = sizeof(struct ifla_vf_rate) },
1955 [IFLA_VF_LINK_STATE] = { .len = sizeof(struct ifla_vf_link_state) },
1956 [IFLA_VF_RSS_QUERY_EN] = { .len = sizeof(struct ifla_vf_rss_query_en) },
1957 [IFLA_VF_STATS] = { .type = NLA_NESTED },
1958 [IFLA_VF_TRUST] = { .len = sizeof(struct ifla_vf_trust) },
1959 [IFLA_VF_IB_NODE_GUID] = { .len = sizeof(struct ifla_vf_guid) },
1960 [IFLA_VF_IB_PORT_GUID] = { .len = sizeof(struct ifla_vf_guid) },
1963 static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = {
1964 [IFLA_PORT_VF] = { .type = NLA_U32 },
1965 [IFLA_PORT_PROFILE] = { .type = NLA_STRING,
1966 .len = PORT_PROFILE_MAX },
1967 [IFLA_PORT_INSTANCE_UUID] = { .type = NLA_BINARY,
1968 .len = PORT_UUID_MAX },
1969 [IFLA_PORT_HOST_UUID] = { .type = NLA_STRING,
1970 .len = PORT_UUID_MAX },
1971 [IFLA_PORT_REQUEST] = { .type = NLA_U8, },
1972 [IFLA_PORT_RESPONSE] = { .type = NLA_U16, },
1974 /* Unused, but we need to keep it here since user space could
1975 * fill it. It's also broken with regard to NLA_BINARY use in
1976 * combination with structs.
1978 [IFLA_PORT_VSI_TYPE] = { .type = NLA_BINARY,
1979 .len = sizeof(struct ifla_port_vsi) },
1982 static const struct nla_policy ifla_xdp_policy[IFLA_XDP_MAX + 1] = {
1983 [IFLA_XDP_UNSPEC] = { .strict_start_type = IFLA_XDP_EXPECTED_FD },
1984 [IFLA_XDP_FD] = { .type = NLA_S32 },
1985 [IFLA_XDP_EXPECTED_FD] = { .type = NLA_S32 },
1986 [IFLA_XDP_ATTACHED] = { .type = NLA_U8 },
1987 [IFLA_XDP_FLAGS] = { .type = NLA_U32 },
1988 [IFLA_XDP_PROG_ID] = { .type = NLA_U32 },
1991 static const struct rtnl_link_ops *linkinfo_to_kind_ops(const struct nlattr *nla)
1993 const struct rtnl_link_ops *ops = NULL;
1994 struct nlattr *linfo[IFLA_INFO_MAX + 1];
1996 if (nla_parse_nested_deprecated(linfo, IFLA_INFO_MAX, nla, ifla_info_policy, NULL) < 0)
1999 if (linfo[IFLA_INFO_KIND]) {
2000 char kind[MODULE_NAME_LEN];
2002 nla_strscpy(kind, linfo[IFLA_INFO_KIND], sizeof(kind));
2003 ops = rtnl_link_ops_get(kind);
2009 static bool link_master_filtered(struct net_device *dev, int master_idx)
2011 struct net_device *master;
2016 master = netdev_master_upper_dev_get(dev);
2018 /* 0 is already used to denote IFLA_MASTER wasn't passed, therefore need
2019 * another invalid value for ifindex to denote "no master".
2021 if (master_idx == -1)
2024 if (!master || master->ifindex != master_idx)
2030 static bool link_kind_filtered(const struct net_device *dev,
2031 const struct rtnl_link_ops *kind_ops)
2033 if (kind_ops && dev->rtnl_link_ops != kind_ops)
2039 static bool link_dump_filtered(struct net_device *dev,
2041 const struct rtnl_link_ops *kind_ops)
2043 if (link_master_filtered(dev, master_idx) ||
2044 link_kind_filtered(dev, kind_ops))
2051 * rtnl_get_net_ns_capable - Get netns if sufficiently privileged.
2052 * @sk: netlink socket
2053 * @netnsid: network namespace identifier
2055 * Returns the network namespace identified by netnsid on success or an error
2056 * pointer on failure.
2058 struct net *rtnl_get_net_ns_capable(struct sock *sk, int netnsid)
2062 net = get_net_ns_by_id(sock_net(sk), netnsid);
2064 return ERR_PTR(-EINVAL);
2066 /* For now, the caller is required to have CAP_NET_ADMIN in
2067 * the user namespace owning the target net ns.
2069 if (!sk_ns_capable(sk, net->user_ns, CAP_NET_ADMIN)) {
2071 return ERR_PTR(-EACCES);
2075 EXPORT_SYMBOL_GPL(rtnl_get_net_ns_capable);
2077 static int rtnl_valid_dump_ifinfo_req(const struct nlmsghdr *nlh,
2078 bool strict_check, struct nlattr **tb,
2079 struct netlink_ext_ack *extack)
2084 struct ifinfomsg *ifm;
2086 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
2087 NL_SET_ERR_MSG(extack, "Invalid header for link dump");
2091 ifm = nlmsg_data(nlh);
2092 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
2094 NL_SET_ERR_MSG(extack, "Invalid values in header for link dump request");
2097 if (ifm->ifi_index) {
2098 NL_SET_ERR_MSG(extack, "Filter by device index not supported for link dumps");
2102 return nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb,
2103 IFLA_MAX, ifla_policy,
2107 /* A hack to preserve kernel<->userspace interface.
2108 * The correct header is ifinfomsg. It is consistent with rtnl_getlink.
2109 * However, before Linux v3.9 the code here assumed rtgenmsg and that's
2110 * what iproute2 < v3.9.0 used.
2111 * We can detect the old iproute2. Even including the IFLA_EXT_MASK
2112 * attribute, its netlink message is shorter than struct ifinfomsg.
2114 hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ?
2115 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
2117 return nlmsg_parse_deprecated(nlh, hdrlen, tb, IFLA_MAX, ifla_policy,
2121 static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
2123 struct netlink_ext_ack *extack = cb->extack;
2124 const struct nlmsghdr *nlh = cb->nlh;
2125 struct net *net = sock_net(skb->sk);
2126 struct net *tgt_net = net;
2129 struct net_device *dev;
2130 struct hlist_head *head;
2131 struct nlattr *tb[IFLA_MAX+1];
2132 u32 ext_filter_mask = 0;
2133 const struct rtnl_link_ops *kind_ops = NULL;
2134 unsigned int flags = NLM_F_MULTI;
2140 s_idx = cb->args[1];
2142 err = rtnl_valid_dump_ifinfo_req(nlh, cb->strict_check, tb, extack);
2144 if (cb->strict_check)
2150 for (i = 0; i <= IFLA_MAX; ++i) {
2154 /* new attributes should only be added with strict checking */
2156 case IFLA_TARGET_NETNSID:
2157 netnsid = nla_get_s32(tb[i]);
2158 tgt_net = rtnl_get_net_ns_capable(skb->sk, netnsid);
2159 if (IS_ERR(tgt_net)) {
2160 NL_SET_ERR_MSG(extack, "Invalid target network namespace id");
2161 return PTR_ERR(tgt_net);
2165 ext_filter_mask = nla_get_u32(tb[i]);
2168 master_idx = nla_get_u32(tb[i]);
2171 kind_ops = linkinfo_to_kind_ops(tb[i]);
2174 if (cb->strict_check) {
2175 NL_SET_ERR_MSG(extack, "Unsupported attribute in link dump request");
2181 if (master_idx || kind_ops)
2182 flags |= NLM_F_DUMP_FILTERED;
2185 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
2187 head = &tgt_net->dev_index_head[h];
2188 hlist_for_each_entry(dev, head, index_hlist) {
2189 if (link_dump_filtered(dev, master_idx, kind_ops))
2193 err = rtnl_fill_ifinfo(skb, dev, net,
2195 NETLINK_CB(cb->skb).portid,
2196 nlh->nlmsg_seq, 0, flags,
2197 ext_filter_mask, 0, NULL, 0,
2198 netnsid, GFP_KERNEL);
2201 if (likely(skb->len))
2215 cb->seq = tgt_net->dev_base_seq;
2216 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
2223 int rtnl_nla_parse_ifinfomsg(struct nlattr **tb, const struct nlattr *nla_peer,
2224 struct netlink_ext_ack *exterr)
2226 const struct ifinfomsg *ifmp;
2227 const struct nlattr *attrs;
2230 ifmp = nla_data(nla_peer);
2231 attrs = nla_data(nla_peer) + sizeof(struct ifinfomsg);
2232 len = nla_len(nla_peer) - sizeof(struct ifinfomsg);
2234 if (ifmp->ifi_index < 0) {
2235 NL_SET_ERR_MSG_ATTR(exterr, nla_peer,
2236 "ifindex can't be negative");
2240 return nla_parse_deprecated(tb, IFLA_MAX, attrs, len, ifla_policy,
2243 EXPORT_SYMBOL(rtnl_nla_parse_ifinfomsg);
2245 struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[])
2248 /* Examine the link attributes and figure out which
2249 * network namespace we are talking about.
2251 if (tb[IFLA_NET_NS_PID])
2252 net = get_net_ns_by_pid(nla_get_u32(tb[IFLA_NET_NS_PID]));
2253 else if (tb[IFLA_NET_NS_FD])
2254 net = get_net_ns_by_fd(nla_get_u32(tb[IFLA_NET_NS_FD]));
2256 net = get_net(src_net);
2259 EXPORT_SYMBOL(rtnl_link_get_net);
2261 /* Figure out which network namespace we are talking about by
2262 * examining the link attributes in the following order:
2264 * 1. IFLA_NET_NS_PID
2266 * 3. IFLA_TARGET_NETNSID
2268 static struct net *rtnl_link_get_net_by_nlattr(struct net *src_net,
2269 struct nlattr *tb[])
2273 if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD])
2274 return rtnl_link_get_net(src_net, tb);
2276 if (!tb[IFLA_TARGET_NETNSID])
2277 return get_net(src_net);
2279 net = get_net_ns_by_id(src_net, nla_get_u32(tb[IFLA_TARGET_NETNSID]));
2281 return ERR_PTR(-EINVAL);
2286 static struct net *rtnl_link_get_net_capable(const struct sk_buff *skb,
2287 struct net *src_net,
2288 struct nlattr *tb[], int cap)
2292 net = rtnl_link_get_net_by_nlattr(src_net, tb);
2296 if (!netlink_ns_capable(skb, net->user_ns, cap)) {
2298 return ERR_PTR(-EPERM);
2304 /* Verify that rtnetlink requests do not pass additional properties
2305 * potentially referring to different network namespaces.
2307 static int rtnl_ensure_unique_netns(struct nlattr *tb[],
2308 struct netlink_ext_ack *extack,
2312 if (netns_id_only) {
2313 if (!tb[IFLA_NET_NS_PID] && !tb[IFLA_NET_NS_FD])
2316 NL_SET_ERR_MSG(extack, "specified netns attribute not supported");
2320 if (tb[IFLA_TARGET_NETNSID] && (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD]))
2323 if (tb[IFLA_NET_NS_PID] && (tb[IFLA_TARGET_NETNSID] || tb[IFLA_NET_NS_FD]))
2326 if (tb[IFLA_NET_NS_FD] && (tb[IFLA_TARGET_NETNSID] || tb[IFLA_NET_NS_PID]))
2332 NL_SET_ERR_MSG(extack, "multiple netns identifying attributes specified");
2336 static int rtnl_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
2339 const struct net_device_ops *ops = dev->netdev_ops;
2341 if (!ops->ndo_set_vf_rate)
2343 if (max_tx_rate && max_tx_rate < min_tx_rate)
2346 return ops->ndo_set_vf_rate(dev, vf, min_tx_rate, max_tx_rate);
2349 static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[],
2350 struct netlink_ext_ack *extack)
2353 if (tb[IFLA_ADDRESS] &&
2354 nla_len(tb[IFLA_ADDRESS]) < dev->addr_len)
2357 if (tb[IFLA_BROADCAST] &&
2358 nla_len(tb[IFLA_BROADCAST]) < dev->addr_len)
2362 if (tb[IFLA_AF_SPEC]) {
2366 nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
2367 const struct rtnl_af_ops *af_ops;
2369 af_ops = rtnl_af_lookup(nla_type(af));
2371 return -EAFNOSUPPORT;
2373 if (!af_ops->set_link_af)
2376 if (af_ops->validate_link_af) {
2377 err = af_ops->validate_link_af(dev, af, extack);
2387 static int handle_infiniband_guid(struct net_device *dev, struct ifla_vf_guid *ivt,
2390 const struct net_device_ops *ops = dev->netdev_ops;
2392 return ops->ndo_set_vf_guid(dev, ivt->vf, ivt->guid, guid_type);
2395 static int handle_vf_guid(struct net_device *dev, struct ifla_vf_guid *ivt, int guid_type)
2397 if (dev->type != ARPHRD_INFINIBAND)
2400 return handle_infiniband_guid(dev, ivt, guid_type);
2403 static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
2405 const struct net_device_ops *ops = dev->netdev_ops;
2408 if (tb[IFLA_VF_MAC]) {
2409 struct ifla_vf_mac *ivm = nla_data(tb[IFLA_VF_MAC]);
2411 if (ivm->vf >= INT_MAX)
2414 if (ops->ndo_set_vf_mac)
2415 err = ops->ndo_set_vf_mac(dev, ivm->vf,
2421 if (tb[IFLA_VF_VLAN]) {
2422 struct ifla_vf_vlan *ivv = nla_data(tb[IFLA_VF_VLAN]);
2424 if (ivv->vf >= INT_MAX)
2427 if (ops->ndo_set_vf_vlan)
2428 err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan,
2430 htons(ETH_P_8021Q));
2435 if (tb[IFLA_VF_VLAN_LIST]) {
2436 struct ifla_vf_vlan_info *ivvl[MAX_VLAN_LIST_LEN];
2437 struct nlattr *attr;
2441 if (!ops->ndo_set_vf_vlan)
2444 nla_for_each_nested(attr, tb[IFLA_VF_VLAN_LIST], rem) {
2445 if (nla_type(attr) != IFLA_VF_VLAN_INFO ||
2446 nla_len(attr) < NLA_HDRLEN) {
2449 if (len >= MAX_VLAN_LIST_LEN)
2451 ivvl[len] = nla_data(attr);
2458 if (ivvl[0]->vf >= INT_MAX)
2460 err = ops->ndo_set_vf_vlan(dev, ivvl[0]->vf, ivvl[0]->vlan,
2461 ivvl[0]->qos, ivvl[0]->vlan_proto);
2466 if (tb[IFLA_VF_TX_RATE]) {
2467 struct ifla_vf_tx_rate *ivt = nla_data(tb[IFLA_VF_TX_RATE]);
2468 struct ifla_vf_info ivf;
2470 if (ivt->vf >= INT_MAX)
2473 if (ops->ndo_get_vf_config)
2474 err = ops->ndo_get_vf_config(dev, ivt->vf, &ivf);
2478 err = rtnl_set_vf_rate(dev, ivt->vf,
2479 ivf.min_tx_rate, ivt->rate);
2484 if (tb[IFLA_VF_RATE]) {
2485 struct ifla_vf_rate *ivt = nla_data(tb[IFLA_VF_RATE]);
2487 if (ivt->vf >= INT_MAX)
2490 err = rtnl_set_vf_rate(dev, ivt->vf,
2491 ivt->min_tx_rate, ivt->max_tx_rate);
2496 if (tb[IFLA_VF_SPOOFCHK]) {
2497 struct ifla_vf_spoofchk *ivs = nla_data(tb[IFLA_VF_SPOOFCHK]);
2499 if (ivs->vf >= INT_MAX)
2502 if (ops->ndo_set_vf_spoofchk)
2503 err = ops->ndo_set_vf_spoofchk(dev, ivs->vf,
2509 if (tb[IFLA_VF_LINK_STATE]) {
2510 struct ifla_vf_link_state *ivl = nla_data(tb[IFLA_VF_LINK_STATE]);
2512 if (ivl->vf >= INT_MAX)
2515 if (ops->ndo_set_vf_link_state)
2516 err = ops->ndo_set_vf_link_state(dev, ivl->vf,
2522 if (tb[IFLA_VF_RSS_QUERY_EN]) {
2523 struct ifla_vf_rss_query_en *ivrssq_en;
2526 ivrssq_en = nla_data(tb[IFLA_VF_RSS_QUERY_EN]);
2527 if (ivrssq_en->vf >= INT_MAX)
2529 if (ops->ndo_set_vf_rss_query_en)
2530 err = ops->ndo_set_vf_rss_query_en(dev, ivrssq_en->vf,
2531 ivrssq_en->setting);
2536 if (tb[IFLA_VF_TRUST]) {
2537 struct ifla_vf_trust *ivt = nla_data(tb[IFLA_VF_TRUST]);
2539 if (ivt->vf >= INT_MAX)
2542 if (ops->ndo_set_vf_trust)
2543 err = ops->ndo_set_vf_trust(dev, ivt->vf, ivt->setting);
2548 if (tb[IFLA_VF_IB_NODE_GUID]) {
2549 struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_NODE_GUID]);
2551 if (ivt->vf >= INT_MAX)
2553 if (!ops->ndo_set_vf_guid)
2555 return handle_vf_guid(dev, ivt, IFLA_VF_IB_NODE_GUID);
2558 if (tb[IFLA_VF_IB_PORT_GUID]) {
2559 struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_PORT_GUID]);
2561 if (ivt->vf >= INT_MAX)
2563 if (!ops->ndo_set_vf_guid)
2566 return handle_vf_guid(dev, ivt, IFLA_VF_IB_PORT_GUID);
2572 static int do_set_master(struct net_device *dev, int ifindex,
2573 struct netlink_ext_ack *extack)
2575 struct net_device *upper_dev = netdev_master_upper_dev_get(dev);
2576 const struct net_device_ops *ops;
2580 if (upper_dev->ifindex == ifindex)
2582 ops = upper_dev->netdev_ops;
2583 if (ops->ndo_del_slave) {
2584 err = ops->ndo_del_slave(upper_dev, dev);
2593 upper_dev = __dev_get_by_index(dev_net(dev), ifindex);
2596 ops = upper_dev->netdev_ops;
2597 if (ops->ndo_add_slave) {
2598 err = ops->ndo_add_slave(upper_dev, dev, extack);
2608 static const struct nla_policy ifla_proto_down_reason_policy[IFLA_PROTO_DOWN_REASON_VALUE + 1] = {
2609 [IFLA_PROTO_DOWN_REASON_MASK] = { .type = NLA_U32 },
2610 [IFLA_PROTO_DOWN_REASON_VALUE] = { .type = NLA_U32 },
2613 static int do_set_proto_down(struct net_device *dev,
2614 struct nlattr *nl_proto_down,
2615 struct nlattr *nl_proto_down_reason,
2616 struct netlink_ext_ack *extack)
2618 struct nlattr *pdreason[IFLA_PROTO_DOWN_REASON_MAX + 1];
2619 unsigned long mask = 0;
2624 if (!(dev->priv_flags & IFF_CHANGE_PROTO_DOWN)) {
2625 NL_SET_ERR_MSG(extack, "Protodown not supported by device");
2629 if (nl_proto_down_reason) {
2630 err = nla_parse_nested_deprecated(pdreason,
2631 IFLA_PROTO_DOWN_REASON_MAX,
2632 nl_proto_down_reason,
2633 ifla_proto_down_reason_policy,
2638 if (!pdreason[IFLA_PROTO_DOWN_REASON_VALUE]) {
2639 NL_SET_ERR_MSG(extack, "Invalid protodown reason value");
2643 value = nla_get_u32(pdreason[IFLA_PROTO_DOWN_REASON_VALUE]);
2645 if (pdreason[IFLA_PROTO_DOWN_REASON_MASK])
2646 mask = nla_get_u32(pdreason[IFLA_PROTO_DOWN_REASON_MASK]);
2648 dev_change_proto_down_reason(dev, mask, value);
2651 if (nl_proto_down) {
2652 proto_down = nla_get_u8(nl_proto_down);
2654 /* Don't turn off protodown if there are active reasons */
2655 if (!proto_down && dev->proto_down_reason) {
2656 NL_SET_ERR_MSG(extack, "Cannot clear protodown, active reasons");
2659 err = dev_change_proto_down(dev,
2668 #define DO_SETLINK_MODIFIED 0x01
2669 /* notify flag means notify + modified. */
2670 #define DO_SETLINK_NOTIFY 0x03
2671 static int do_setlink(const struct sk_buff *skb,
2672 struct net_device *dev, struct ifinfomsg *ifm,
2673 struct netlink_ext_ack *extack,
2674 struct nlattr **tb, int status)
2676 const struct net_device_ops *ops = dev->netdev_ops;
2677 char ifname[IFNAMSIZ];
2680 err = validate_linkmsg(dev, tb, extack);
2684 if (tb[IFLA_IFNAME])
2685 nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
2689 if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD] || tb[IFLA_TARGET_NETNSID]) {
2690 const char *pat = ifname[0] ? ifname : NULL;
2694 net = rtnl_link_get_net_capable(skb, dev_net(dev),
2701 if (tb[IFLA_NEW_IFINDEX])
2702 new_ifindex = nla_get_s32(tb[IFLA_NEW_IFINDEX]);
2706 err = __dev_change_net_namespace(dev, net, pat, new_ifindex);
2710 status |= DO_SETLINK_MODIFIED;
2714 struct rtnl_link_ifmap *u_map;
2717 if (!ops->ndo_set_config) {
2722 if (!netif_device_present(dev)) {
2727 u_map = nla_data(tb[IFLA_MAP]);
2728 k_map.mem_start = (unsigned long) u_map->mem_start;
2729 k_map.mem_end = (unsigned long) u_map->mem_end;
2730 k_map.base_addr = (unsigned short) u_map->base_addr;
2731 k_map.irq = (unsigned char) u_map->irq;
2732 k_map.dma = (unsigned char) u_map->dma;
2733 k_map.port = (unsigned char) u_map->port;
2735 err = ops->ndo_set_config(dev, &k_map);
2739 status |= DO_SETLINK_NOTIFY;
2742 if (tb[IFLA_ADDRESS]) {
2743 struct sockaddr *sa;
2746 len = sizeof(sa_family_t) + max_t(size_t, dev->addr_len,
2748 sa = kmalloc(len, GFP_KERNEL);
2753 sa->sa_family = dev->type;
2754 memcpy(sa->sa_data, nla_data(tb[IFLA_ADDRESS]),
2756 err = dev_set_mac_address_user(dev, sa, extack);
2760 status |= DO_SETLINK_MODIFIED;
2764 err = dev_set_mtu_ext(dev, nla_get_u32(tb[IFLA_MTU]), extack);
2767 status |= DO_SETLINK_MODIFIED;
2770 if (tb[IFLA_GROUP]) {
2771 dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
2772 status |= DO_SETLINK_NOTIFY;
2776 * Interface selected by interface index but interface
2777 * name provided implies that a name change has been
2780 if (ifm->ifi_index > 0 && ifname[0]) {
2781 err = dev_change_name(dev, ifname);
2784 status |= DO_SETLINK_MODIFIED;
2787 if (tb[IFLA_IFALIAS]) {
2788 err = dev_set_alias(dev, nla_data(tb[IFLA_IFALIAS]),
2789 nla_len(tb[IFLA_IFALIAS]));
2792 status |= DO_SETLINK_NOTIFY;
2795 if (tb[IFLA_BROADCAST]) {
2796 nla_memcpy(dev->broadcast, tb[IFLA_BROADCAST], dev->addr_len);
2797 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
2800 if (ifm->ifi_flags || ifm->ifi_change) {
2801 err = dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm),
2807 if (tb[IFLA_MASTER]) {
2808 err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack);
2811 status |= DO_SETLINK_MODIFIED;
2814 if (tb[IFLA_CARRIER]) {
2815 err = dev_change_carrier(dev, nla_get_u8(tb[IFLA_CARRIER]));
2818 status |= DO_SETLINK_MODIFIED;
2821 if (tb[IFLA_TXQLEN]) {
2822 unsigned int value = nla_get_u32(tb[IFLA_TXQLEN]);
2824 err = dev_change_tx_queue_len(dev, value);
2827 status |= DO_SETLINK_MODIFIED;
2830 if (tb[IFLA_GSO_MAX_SIZE]) {
2831 u32 max_size = nla_get_u32(tb[IFLA_GSO_MAX_SIZE]);
2833 if (max_size > dev->tso_max_size) {
2838 if (dev->gso_max_size ^ max_size) {
2839 netif_set_gso_max_size(dev, max_size);
2840 status |= DO_SETLINK_MODIFIED;
2844 if (tb[IFLA_GSO_MAX_SEGS]) {
2845 u32 max_segs = nla_get_u32(tb[IFLA_GSO_MAX_SEGS]);
2847 if (max_segs > GSO_MAX_SEGS || max_segs > dev->tso_max_segs) {
2852 if (dev->gso_max_segs ^ max_segs) {
2853 netif_set_gso_max_segs(dev, max_segs);
2854 status |= DO_SETLINK_MODIFIED;
2858 if (tb[IFLA_GRO_MAX_SIZE]) {
2859 u32 gro_max_size = nla_get_u32(tb[IFLA_GRO_MAX_SIZE]);
2861 if (dev->gro_max_size ^ gro_max_size) {
2862 netif_set_gro_max_size(dev, gro_max_size);
2863 status |= DO_SETLINK_MODIFIED;
2867 if (tb[IFLA_OPERSTATE])
2868 set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
2870 if (tb[IFLA_LINKMODE]) {
2871 unsigned char value = nla_get_u8(tb[IFLA_LINKMODE]);
2873 write_lock(&dev_base_lock);
2874 if (dev->link_mode ^ value)
2875 status |= DO_SETLINK_NOTIFY;
2876 dev->link_mode = value;
2877 write_unlock(&dev_base_lock);
2880 if (tb[IFLA_VFINFO_LIST]) {
2881 struct nlattr *vfinfo[IFLA_VF_MAX + 1];
2882 struct nlattr *attr;
2885 nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) {
2886 if (nla_type(attr) != IFLA_VF_INFO ||
2887 nla_len(attr) < NLA_HDRLEN) {
2891 err = nla_parse_nested_deprecated(vfinfo, IFLA_VF_MAX,
2897 err = do_setvfinfo(dev, vfinfo);
2900 status |= DO_SETLINK_NOTIFY;
2905 if (tb[IFLA_VF_PORTS]) {
2906 struct nlattr *port[IFLA_PORT_MAX+1];
2907 struct nlattr *attr;
2912 if (!ops->ndo_set_vf_port)
2915 nla_for_each_nested(attr, tb[IFLA_VF_PORTS], rem) {
2916 if (nla_type(attr) != IFLA_VF_PORT ||
2917 nla_len(attr) < NLA_HDRLEN) {
2921 err = nla_parse_nested_deprecated(port, IFLA_PORT_MAX,
2927 if (!port[IFLA_PORT_VF]) {
2931 vf = nla_get_u32(port[IFLA_PORT_VF]);
2932 err = ops->ndo_set_vf_port(dev, vf, port);
2935 status |= DO_SETLINK_NOTIFY;
2940 if (tb[IFLA_PORT_SELF]) {
2941 struct nlattr *port[IFLA_PORT_MAX+1];
2943 err = nla_parse_nested_deprecated(port, IFLA_PORT_MAX,
2945 ifla_port_policy, NULL);
2950 if (ops->ndo_set_vf_port)
2951 err = ops->ndo_set_vf_port(dev, PORT_SELF_VF, port);
2954 status |= DO_SETLINK_NOTIFY;
2957 if (tb[IFLA_AF_SPEC]) {
2961 nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
2962 const struct rtnl_af_ops *af_ops;
2964 BUG_ON(!(af_ops = rtnl_af_lookup(nla_type(af))));
2966 err = af_ops->set_link_af(dev, af, extack);
2970 status |= DO_SETLINK_NOTIFY;
2975 if (tb[IFLA_PROTO_DOWN] || tb[IFLA_PROTO_DOWN_REASON]) {
2976 err = do_set_proto_down(dev, tb[IFLA_PROTO_DOWN],
2977 tb[IFLA_PROTO_DOWN_REASON], extack);
2980 status |= DO_SETLINK_NOTIFY;
2984 struct nlattr *xdp[IFLA_XDP_MAX + 1];
2987 err = nla_parse_nested_deprecated(xdp, IFLA_XDP_MAX,
2989 ifla_xdp_policy, NULL);
2993 if (xdp[IFLA_XDP_ATTACHED] || xdp[IFLA_XDP_PROG_ID]) {
2998 if (xdp[IFLA_XDP_FLAGS]) {
2999 xdp_flags = nla_get_u32(xdp[IFLA_XDP_FLAGS]);
3000 if (xdp_flags & ~XDP_FLAGS_MASK) {
3004 if (hweight32(xdp_flags & XDP_FLAGS_MODES) > 1) {
3010 if (xdp[IFLA_XDP_FD]) {
3011 int expected_fd = -1;
3013 if (xdp_flags & XDP_FLAGS_REPLACE) {
3014 if (!xdp[IFLA_XDP_EXPECTED_FD]) {
3019 nla_get_s32(xdp[IFLA_XDP_EXPECTED_FD]);
3022 err = dev_change_xdp_fd(dev, extack,
3023 nla_get_s32(xdp[IFLA_XDP_FD]),
3028 status |= DO_SETLINK_NOTIFY;
3033 if (status & DO_SETLINK_MODIFIED) {
3034 if ((status & DO_SETLINK_NOTIFY) == DO_SETLINK_NOTIFY)
3035 netdev_state_change(dev);
3038 net_warn_ratelimited("A link change request failed with some changes committed already. Interface %s may have been left with an inconsistent configuration, please check.\n",
3045 static struct net_device *rtnl_dev_get(struct net *net,
3046 struct nlattr *tb[])
3048 char ifname[ALTIFNAMSIZ];
3050 if (tb[IFLA_IFNAME])
3051 nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
3052 else if (tb[IFLA_ALT_IFNAME])
3053 nla_strscpy(ifname, tb[IFLA_ALT_IFNAME], ALTIFNAMSIZ);
3057 return __dev_get_by_name(net, ifname);
3060 static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
3061 struct netlink_ext_ack *extack)
3063 struct net *net = sock_net(skb->sk);
3064 struct ifinfomsg *ifm;
3065 struct net_device *dev;
3067 struct nlattr *tb[IFLA_MAX+1];
3069 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
3070 ifla_policy, extack);
3074 err = rtnl_ensure_unique_netns(tb, extack, false);
3079 ifm = nlmsg_data(nlh);
3080 if (ifm->ifi_index > 0)
3081 dev = __dev_get_by_index(net, ifm->ifi_index);
3082 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
3083 dev = rtnl_dev_get(net, tb);
3092 err = do_setlink(skb, dev, ifm, extack, tb, 0);
3097 static int rtnl_group_dellink(const struct net *net, int group)
3099 struct net_device *dev, *aux;
3100 LIST_HEAD(list_kill);
3106 for_each_netdev(net, dev) {
3107 if (dev->group == group) {
3108 const struct rtnl_link_ops *ops;
3111 ops = dev->rtnl_link_ops;
3112 if (!ops || !ops->dellink)
3120 for_each_netdev_safe(net, dev, aux) {
3121 if (dev->group == group) {
3122 const struct rtnl_link_ops *ops;
3124 ops = dev->rtnl_link_ops;
3125 ops->dellink(dev, &list_kill);
3128 unregister_netdevice_many(&list_kill);
3133 int rtnl_delete_link(struct net_device *dev)
3135 const struct rtnl_link_ops *ops;
3136 LIST_HEAD(list_kill);
3138 ops = dev->rtnl_link_ops;
3139 if (!ops || !ops->dellink)
3142 ops->dellink(dev, &list_kill);
3143 unregister_netdevice_many(&list_kill);
3147 EXPORT_SYMBOL_GPL(rtnl_delete_link);
3149 static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
3150 struct netlink_ext_ack *extack)
3152 struct net *net = sock_net(skb->sk);
3153 struct net *tgt_net = net;
3154 struct net_device *dev = NULL;
3155 struct ifinfomsg *ifm;
3156 struct nlattr *tb[IFLA_MAX+1];
3160 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
3161 ifla_policy, extack);
3165 err = rtnl_ensure_unique_netns(tb, extack, true);
3169 if (tb[IFLA_TARGET_NETNSID]) {
3170 netnsid = nla_get_s32(tb[IFLA_TARGET_NETNSID]);
3171 tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, netnsid);
3172 if (IS_ERR(tgt_net))
3173 return PTR_ERR(tgt_net);
3177 ifm = nlmsg_data(nlh);
3178 if (ifm->ifi_index > 0)
3179 dev = __dev_get_by_index(tgt_net, ifm->ifi_index);
3180 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
3181 dev = rtnl_dev_get(net, tb);
3182 else if (tb[IFLA_GROUP])
3183 err = rtnl_group_dellink(tgt_net, nla_get_u32(tb[IFLA_GROUP]));
3188 if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME] || ifm->ifi_index > 0)
3194 err = rtnl_delete_link(dev);
3203 int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm)
3205 unsigned int old_flags;
3208 old_flags = dev->flags;
3209 if (ifm && (ifm->ifi_flags || ifm->ifi_change)) {
3210 err = __dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm),
3216 if (dev->rtnl_link_state == RTNL_LINK_INITIALIZED) {
3217 __dev_notify_flags(dev, old_flags, (old_flags ^ dev->flags));
3219 dev->rtnl_link_state = RTNL_LINK_INITIALIZED;
3220 __dev_notify_flags(dev, old_flags, ~0U);
3224 EXPORT_SYMBOL(rtnl_configure_link);
3226 struct net_device *rtnl_create_link(struct net *net, const char *ifname,
3227 unsigned char name_assign_type,
3228 const struct rtnl_link_ops *ops,
3229 struct nlattr *tb[],
3230 struct netlink_ext_ack *extack)
3232 struct net_device *dev;
3233 unsigned int num_tx_queues = 1;
3234 unsigned int num_rx_queues = 1;
3237 if (tb[IFLA_NUM_TX_QUEUES])
3238 num_tx_queues = nla_get_u32(tb[IFLA_NUM_TX_QUEUES]);
3239 else if (ops->get_num_tx_queues)
3240 num_tx_queues = ops->get_num_tx_queues();
3242 if (tb[IFLA_NUM_RX_QUEUES])
3243 num_rx_queues = nla_get_u32(tb[IFLA_NUM_RX_QUEUES]);
3244 else if (ops->get_num_rx_queues)
3245 num_rx_queues = ops->get_num_rx_queues();
3247 if (num_tx_queues < 1 || num_tx_queues > 4096) {
3248 NL_SET_ERR_MSG(extack, "Invalid number of transmit queues");
3249 return ERR_PTR(-EINVAL);
3252 if (num_rx_queues < 1 || num_rx_queues > 4096) {
3253 NL_SET_ERR_MSG(extack, "Invalid number of receive queues");
3254 return ERR_PTR(-EINVAL);
3258 dev = ops->alloc(tb, ifname, name_assign_type,
3259 num_tx_queues, num_rx_queues);
3263 dev = alloc_netdev_mqs(ops->priv_size, ifname,
3264 name_assign_type, ops->setup,
3265 num_tx_queues, num_rx_queues);
3269 return ERR_PTR(-ENOMEM);
3271 err = validate_linkmsg(dev, tb, extack);
3274 return ERR_PTR(err);
3277 dev_net_set(dev, net);
3278 dev->rtnl_link_ops = ops;
3279 dev->rtnl_link_state = RTNL_LINK_INITIALIZING;
3282 u32 mtu = nla_get_u32(tb[IFLA_MTU]);
3284 err = dev_validate_mtu(dev, mtu, extack);
3287 return ERR_PTR(err);
3291 if (tb[IFLA_ADDRESS]) {
3292 __dev_addr_set(dev, nla_data(tb[IFLA_ADDRESS]),
3293 nla_len(tb[IFLA_ADDRESS]));
3294 dev->addr_assign_type = NET_ADDR_SET;
3296 if (tb[IFLA_BROADCAST])
3297 memcpy(dev->broadcast, nla_data(tb[IFLA_BROADCAST]),
3298 nla_len(tb[IFLA_BROADCAST]));
3299 if (tb[IFLA_TXQLEN])
3300 dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]);
3301 if (tb[IFLA_OPERSTATE])
3302 set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
3303 if (tb[IFLA_LINKMODE])
3304 dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]);
3306 dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
3307 if (tb[IFLA_GSO_MAX_SIZE])
3308 netif_set_gso_max_size(dev, nla_get_u32(tb[IFLA_GSO_MAX_SIZE]));
3309 if (tb[IFLA_GSO_MAX_SEGS])
3310 netif_set_gso_max_segs(dev, nla_get_u32(tb[IFLA_GSO_MAX_SEGS]));
3311 if (tb[IFLA_GRO_MAX_SIZE])
3312 netif_set_gro_max_size(dev, nla_get_u32(tb[IFLA_GRO_MAX_SIZE]));
3316 EXPORT_SYMBOL(rtnl_create_link);
3318 static int rtnl_group_changelink(const struct sk_buff *skb,
3319 struct net *net, int group,
3320 struct ifinfomsg *ifm,
3321 struct netlink_ext_ack *extack,
3324 struct net_device *dev, *aux;
3327 for_each_netdev_safe(net, dev, aux) {
3328 if (dev->group == group) {
3329 err = do_setlink(skb, dev, ifm, extack, tb, 0);
3338 static int rtnl_newlink_create(struct sk_buff *skb, struct ifinfomsg *ifm,
3339 const struct rtnl_link_ops *ops,
3340 struct nlattr **tb, struct nlattr **data,
3341 struct netlink_ext_ack *extack)
3343 unsigned char name_assign_type = NET_NAME_USER;
3344 struct net *net = sock_net(skb->sk);
3345 struct net *dest_net, *link_net;
3346 struct net_device *dev;
3347 char ifname[IFNAMSIZ];
3350 if (!ops->alloc && !ops->setup)
3353 if (tb[IFLA_IFNAME]) {
3354 nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
3356 snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind);
3357 name_assign_type = NET_NAME_ENUM;
3360 dest_net = rtnl_link_get_net_capable(skb, net, tb, CAP_NET_ADMIN);
3361 if (IS_ERR(dest_net))
3362 return PTR_ERR(dest_net);
3364 if (tb[IFLA_LINK_NETNSID]) {
3365 int id = nla_get_s32(tb[IFLA_LINK_NETNSID]);
3367 link_net = get_net_ns_by_id(dest_net, id);
3369 NL_SET_ERR_MSG(extack, "Unknown network namespace id");
3374 if (!netlink_ns_capable(skb, link_net->user_ns, CAP_NET_ADMIN))
3380 dev = rtnl_create_link(link_net ? : dest_net, ifname,
3381 name_assign_type, ops, tb, extack);
3387 dev->ifindex = ifm->ifi_index;
3390 err = ops->newlink(link_net ? : net, dev, tb, data, extack);
3392 err = register_netdevice(dev);
3398 err = rtnl_configure_link(dev, ifm);
3400 goto out_unregister;
3402 err = dev_change_net_namespace(dev, dest_net, ifname);
3404 goto out_unregister;
3406 if (tb[IFLA_MASTER]) {
3407 err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack);
3409 goto out_unregister;
3418 LIST_HEAD(list_kill);
3420 ops->dellink(dev, &list_kill);
3421 unregister_netdevice_many(&list_kill);
3423 unregister_netdevice(dev);
3428 struct rtnl_newlink_tbs {
3429 struct nlattr *tb[IFLA_MAX + 1];
3430 struct nlattr *attr[RTNL_MAX_TYPE + 1];
3431 struct nlattr *slave_attr[RTNL_SLAVE_MAX_TYPE + 1];
3434 static int __rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
3435 struct rtnl_newlink_tbs *tbs,
3436 struct netlink_ext_ack *extack)
3438 struct nlattr *linkinfo[IFLA_INFO_MAX + 1];
3439 struct nlattr ** const tb = tbs->tb;
3440 const struct rtnl_link_ops *m_ops;
3441 struct net_device *master_dev;
3442 struct net *net = sock_net(skb->sk);
3443 const struct rtnl_link_ops *ops;
3444 struct nlattr **slave_data;
3445 char kind[MODULE_NAME_LEN];
3446 struct net_device *dev;
3447 struct ifinfomsg *ifm;
3448 struct nlattr **data;
3449 bool link_specified;
3452 #ifdef CONFIG_MODULES
3455 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
3456 ifla_policy, extack);
3460 err = rtnl_ensure_unique_netns(tb, extack, false);
3464 ifm = nlmsg_data(nlh);
3465 if (ifm->ifi_index > 0) {
3466 link_specified = true;
3467 dev = __dev_get_by_index(net, ifm->ifi_index);
3468 } else if (ifm->ifi_index < 0) {
3469 NL_SET_ERR_MSG(extack, "ifindex can't be negative");
3471 } else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) {
3472 link_specified = true;
3473 dev = rtnl_dev_get(net, tb);
3475 link_specified = false;
3482 master_dev = netdev_master_upper_dev_get(dev);
3484 m_ops = master_dev->rtnl_link_ops;
3487 err = validate_linkmsg(dev, tb, extack);
3491 if (tb[IFLA_LINKINFO]) {
3492 err = nla_parse_nested_deprecated(linkinfo, IFLA_INFO_MAX,
3494 ifla_info_policy, NULL);
3498 memset(linkinfo, 0, sizeof(linkinfo));
3500 if (linkinfo[IFLA_INFO_KIND]) {
3501 nla_strscpy(kind, linkinfo[IFLA_INFO_KIND], sizeof(kind));
3502 ops = rtnl_link_ops_get(kind);
3510 if (ops->maxtype > RTNL_MAX_TYPE)
3513 if (ops->maxtype && linkinfo[IFLA_INFO_DATA]) {
3514 err = nla_parse_nested_deprecated(tbs->attr, ops->maxtype,
3515 linkinfo[IFLA_INFO_DATA],
3516 ops->policy, extack);
3521 if (ops->validate) {
3522 err = ops->validate(tb, data, extack);
3530 if (m_ops->slave_maxtype > RTNL_SLAVE_MAX_TYPE)
3533 if (m_ops->slave_maxtype &&
3534 linkinfo[IFLA_INFO_SLAVE_DATA]) {
3535 err = nla_parse_nested_deprecated(tbs->slave_attr,
3536 m_ops->slave_maxtype,
3537 linkinfo[IFLA_INFO_SLAVE_DATA],
3538 m_ops->slave_policy,
3542 slave_data = tbs->slave_attr;
3549 if (nlh->nlmsg_flags & NLM_F_EXCL)
3551 if (nlh->nlmsg_flags & NLM_F_REPLACE)
3554 if (linkinfo[IFLA_INFO_DATA]) {
3555 if (!ops || ops != dev->rtnl_link_ops ||
3559 err = ops->changelink(dev, tb, data, extack);
3562 status |= DO_SETLINK_NOTIFY;
3565 if (linkinfo[IFLA_INFO_SLAVE_DATA]) {
3566 if (!m_ops || !m_ops->slave_changelink)
3569 err = m_ops->slave_changelink(master_dev, dev, tb,
3570 slave_data, extack);
3573 status |= DO_SETLINK_NOTIFY;
3576 return do_setlink(skb, dev, ifm, extack, tb, status);
3579 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
3580 /* No dev found and NLM_F_CREATE not set. Requested dev does not exist,
3581 * or it's for a group
3586 return rtnl_group_changelink(skb, net,
3587 nla_get_u32(tb[IFLA_GROUP]),
3592 if (tb[IFLA_MAP] || tb[IFLA_PROTINFO])
3596 #ifdef CONFIG_MODULES
3599 request_module("rtnl-link-%s", kind);
3601 ops = rtnl_link_ops_get(kind);
3606 NL_SET_ERR_MSG(extack, "Unknown device type");
3610 return rtnl_newlink_create(skb, ifm, ops, tb, data, extack);
3613 static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
3614 struct netlink_ext_ack *extack)
3616 struct rtnl_newlink_tbs *tbs;
3619 tbs = kmalloc(sizeof(*tbs), GFP_KERNEL);
3623 ret = __rtnl_newlink(skb, nlh, tbs, extack);
3628 static int rtnl_valid_getlink_req(struct sk_buff *skb,
3629 const struct nlmsghdr *nlh,
3631 struct netlink_ext_ack *extack)
3633 struct ifinfomsg *ifm;
3636 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
3637 NL_SET_ERR_MSG(extack, "Invalid header for get link");
3641 if (!netlink_strict_get_check(skb))
3642 return nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
3643 ifla_policy, extack);
3645 ifm = nlmsg_data(nlh);
3646 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
3648 NL_SET_ERR_MSG(extack, "Invalid values in header for get link request");
3652 err = nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, IFLA_MAX,
3653 ifla_policy, extack);
3657 for (i = 0; i <= IFLA_MAX; i++) {
3663 case IFLA_ALT_IFNAME:
3665 case IFLA_TARGET_NETNSID:
3668 NL_SET_ERR_MSG(extack, "Unsupported attribute in get link request");
3676 static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr *nlh,
3677 struct netlink_ext_ack *extack)
3679 struct net *net = sock_net(skb->sk);
3680 struct net *tgt_net = net;
3681 struct ifinfomsg *ifm;
3682 struct nlattr *tb[IFLA_MAX+1];
3683 struct net_device *dev = NULL;
3684 struct sk_buff *nskb;
3687 u32 ext_filter_mask = 0;
3689 err = rtnl_valid_getlink_req(skb, nlh, tb, extack);
3693 err = rtnl_ensure_unique_netns(tb, extack, true);
3697 if (tb[IFLA_TARGET_NETNSID]) {
3698 netnsid = nla_get_s32(tb[IFLA_TARGET_NETNSID]);
3699 tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, netnsid);
3700 if (IS_ERR(tgt_net))
3701 return PTR_ERR(tgt_net);
3704 if (tb[IFLA_EXT_MASK])
3705 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
3708 ifm = nlmsg_data(nlh);
3709 if (ifm->ifi_index > 0)
3710 dev = __dev_get_by_index(tgt_net, ifm->ifi_index);
3711 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
3712 dev = rtnl_dev_get(tgt_net, tb);
3721 nskb = nlmsg_new(if_nlmsg_size(dev, ext_filter_mask), GFP_KERNEL);
3725 err = rtnl_fill_ifinfo(nskb, dev, net,
3726 RTM_NEWLINK, NETLINK_CB(skb).portid,
3727 nlh->nlmsg_seq, 0, 0, ext_filter_mask,
3728 0, NULL, 0, netnsid, GFP_KERNEL);
3730 /* -EMSGSIZE implies BUG in if_nlmsg_size */
3731 WARN_ON(err == -EMSGSIZE);
3734 err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid);
3742 static int rtnl_alt_ifname(int cmd, struct net_device *dev, struct nlattr *attr,
3743 bool *changed, struct netlink_ext_ack *extack)
3749 err = nla_validate(attr, attr->nla_len, IFLA_MAX, ifla_policy, extack);
3753 if (cmd == RTM_NEWLINKPROP) {
3754 size = rtnl_prop_list_size(dev);
3755 size += nla_total_size(ALTIFNAMSIZ);
3756 if (size >= U16_MAX) {
3757 NL_SET_ERR_MSG(extack,
3758 "effective property list too long");
3763 alt_ifname = nla_strdup(attr, GFP_KERNEL_ACCOUNT);
3767 if (cmd == RTM_NEWLINKPROP) {
3768 err = netdev_name_node_alt_create(dev, alt_ifname);
3771 } else if (cmd == RTM_DELLINKPROP) {
3772 err = netdev_name_node_alt_destroy(dev, alt_ifname);
3784 static int rtnl_linkprop(int cmd, struct sk_buff *skb, struct nlmsghdr *nlh,
3785 struct netlink_ext_ack *extack)
3787 struct net *net = sock_net(skb->sk);
3788 struct nlattr *tb[IFLA_MAX + 1];
3789 struct net_device *dev;
3790 struct ifinfomsg *ifm;
3791 bool changed = false;
3792 struct nlattr *attr;
3795 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy, extack);
3799 err = rtnl_ensure_unique_netns(tb, extack, true);
3803 ifm = nlmsg_data(nlh);
3804 if (ifm->ifi_index > 0)
3805 dev = __dev_get_by_index(net, ifm->ifi_index);
3806 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
3807 dev = rtnl_dev_get(net, tb);
3814 if (!tb[IFLA_PROP_LIST])
3817 nla_for_each_nested(attr, tb[IFLA_PROP_LIST], rem) {
3818 switch (nla_type(attr)) {
3819 case IFLA_ALT_IFNAME:
3820 err = rtnl_alt_ifname(cmd, dev, attr, &changed, extack);
3828 netdev_state_change(dev);
3832 static int rtnl_newlinkprop(struct sk_buff *skb, struct nlmsghdr *nlh,
3833 struct netlink_ext_ack *extack)
3835 return rtnl_linkprop(RTM_NEWLINKPROP, skb, nlh, extack);
3838 static int rtnl_dellinkprop(struct sk_buff *skb, struct nlmsghdr *nlh,
3839 struct netlink_ext_ack *extack)
3841 return rtnl_linkprop(RTM_DELLINKPROP, skb, nlh, extack);
3844 static u32 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh)
3846 struct net *net = sock_net(skb->sk);
3847 size_t min_ifinfo_dump_size = 0;
3848 struct nlattr *tb[IFLA_MAX+1];
3849 u32 ext_filter_mask = 0;
3850 struct net_device *dev;
3853 /* Same kernel<->userspace interface hack as in rtnl_dump_ifinfo. */
3854 hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ?
3855 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
3857 if (nlmsg_parse_deprecated(nlh, hdrlen, tb, IFLA_MAX, ifla_policy, NULL) >= 0) {
3858 if (tb[IFLA_EXT_MASK])
3859 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
3862 if (!ext_filter_mask)
3863 return NLMSG_GOODSIZE;
3865 * traverse the list of net devices and compute the minimum
3866 * buffer size based upon the filter mask.
3869 for_each_netdev_rcu(net, dev) {
3870 min_ifinfo_dump_size = max(min_ifinfo_dump_size,
3871 if_nlmsg_size(dev, ext_filter_mask));
3875 return nlmsg_total_size(min_ifinfo_dump_size);
3878 static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
3881 int s_idx = cb->family;
3882 int type = cb->nlh->nlmsg_type - RTM_BASE;
3888 for (idx = 1; idx <= RTNL_FAMILY_MAX; idx++) {
3889 struct rtnl_link __rcu **tab;
3890 struct rtnl_link *link;
3891 rtnl_dumpit_func dumpit;
3893 if (idx < s_idx || idx == PF_PACKET)
3896 if (type < 0 || type >= RTM_NR_MSGTYPES)
3899 tab = rcu_dereference_rtnl(rtnl_msg_handlers[idx]);
3903 link = rcu_dereference_rtnl(tab[type]);
3907 dumpit = link->dumpit;
3912 memset(&cb->args[0], 0, sizeof(cb->args));
3916 ret = dumpit(skb, cb);
3922 return skb->len ? : ret;
3925 struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev,
3926 unsigned int change,
3927 u32 event, gfp_t flags, int *new_nsid,
3930 struct net *net = dev_net(dev);
3931 struct sk_buff *skb;
3934 skb = nlmsg_new(if_nlmsg_size(dev, 0), flags);
3938 err = rtnl_fill_ifinfo(skb, dev, dev_net(dev),
3939 type, 0, 0, change, 0, 0, event,
3940 new_nsid, new_ifindex, -1, flags);
3942 /* -EMSGSIZE implies BUG in if_nlmsg_size() */
3943 WARN_ON(err == -EMSGSIZE);
3950 rtnl_set_sk_err(net, RTNLGRP_LINK, err);
3954 void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev, gfp_t flags)
3956 struct net *net = dev_net(dev);
3958 rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, flags);
3961 static void rtmsg_ifinfo_event(int type, struct net_device *dev,
3962 unsigned int change, u32 event,
3963 gfp_t flags, int *new_nsid, int new_ifindex)
3965 struct sk_buff *skb;
3967 if (dev->reg_state != NETREG_REGISTERED)
3970 skb = rtmsg_ifinfo_build_skb(type, dev, change, event, flags, new_nsid,
3973 rtmsg_ifinfo_send(skb, dev, flags);
3976 void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change,
3979 rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags,
3983 void rtmsg_ifinfo_newnet(int type, struct net_device *dev, unsigned int change,
3984 gfp_t flags, int *new_nsid, int new_ifindex)
3986 rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags,
3987 new_nsid, new_ifindex);
3990 static int nlmsg_populate_fdb_fill(struct sk_buff *skb,
3991 struct net_device *dev,
3992 u8 *addr, u16 vid, u32 pid, u32 seq,
3993 int type, unsigned int flags,
3994 int nlflags, u16 ndm_state)
3996 struct nlmsghdr *nlh;
3999 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), nlflags);
4003 ndm = nlmsg_data(nlh);
4004 ndm->ndm_family = AF_BRIDGE;
4007 ndm->ndm_flags = flags;
4009 ndm->ndm_ifindex = dev->ifindex;
4010 ndm->ndm_state = ndm_state;
4012 if (nla_put(skb, NDA_LLADDR, dev->addr_len, addr))
4013 goto nla_put_failure;
4015 if (nla_put(skb, NDA_VLAN, sizeof(u16), &vid))
4016 goto nla_put_failure;
4018 nlmsg_end(skb, nlh);
4022 nlmsg_cancel(skb, nlh);
4026 static inline size_t rtnl_fdb_nlmsg_size(const struct net_device *dev)
4028 return NLMSG_ALIGN(sizeof(struct ndmsg)) +
4029 nla_total_size(dev->addr_len) + /* NDA_LLADDR */
4030 nla_total_size(sizeof(u16)) + /* NDA_VLAN */
4034 static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, u16 vid, int type,
4037 struct net *net = dev_net(dev);
4038 struct sk_buff *skb;
4041 skb = nlmsg_new(rtnl_fdb_nlmsg_size(dev), GFP_ATOMIC);
4045 err = nlmsg_populate_fdb_fill(skb, dev, addr, vid,
4046 0, 0, type, NTF_SELF, 0, ndm_state);
4052 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
4055 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
4059 * ndo_dflt_fdb_add - default netdevice operation to add an FDB entry
4061 int ndo_dflt_fdb_add(struct ndmsg *ndm,
4062 struct nlattr *tb[],
4063 struct net_device *dev,
4064 const unsigned char *addr, u16 vid,
4069 /* If aging addresses are supported device will need to
4070 * implement its own handler for this.
4072 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
4073 netdev_info(dev, "default FDB implementation only supports local addresses\n");
4078 netdev_info(dev, "vlans aren't supported yet for dev_uc|mc_add()\n");
4082 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
4083 err = dev_uc_add_excl(dev, addr);
4084 else if (is_multicast_ether_addr(addr))
4085 err = dev_mc_add_excl(dev, addr);
4087 /* Only return duplicate errors if NLM_F_EXCL is set */
4088 if (err == -EEXIST && !(flags & NLM_F_EXCL))
4093 EXPORT_SYMBOL(ndo_dflt_fdb_add);
4095 static int fdb_vid_parse(struct nlattr *vlan_attr, u16 *p_vid,
4096 struct netlink_ext_ack *extack)
4101 if (nla_len(vlan_attr) != sizeof(u16)) {
4102 NL_SET_ERR_MSG(extack, "invalid vlan attribute size");
4106 vid = nla_get_u16(vlan_attr);
4108 if (!vid || vid >= VLAN_VID_MASK) {
4109 NL_SET_ERR_MSG(extack, "invalid vlan id");
4117 static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
4118 struct netlink_ext_ack *extack)
4120 struct net *net = sock_net(skb->sk);
4122 struct nlattr *tb[NDA_MAX+1];
4123 struct net_device *dev;
4128 err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX, NULL,
4133 ndm = nlmsg_data(nlh);
4134 if (ndm->ndm_ifindex == 0) {
4135 NL_SET_ERR_MSG(extack, "invalid ifindex");
4139 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
4141 NL_SET_ERR_MSG(extack, "unknown ifindex");
4145 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
4146 NL_SET_ERR_MSG(extack, "invalid address");
4150 if (dev->type != ARPHRD_ETHER) {
4151 NL_SET_ERR_MSG(extack, "FDB add only supported for Ethernet devices");
4155 addr = nla_data(tb[NDA_LLADDR]);
4157 err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack);
4163 /* Support fdb on master device the net/bridge default case */
4164 if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
4165 netif_is_bridge_port(dev)) {
4166 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
4167 const struct net_device_ops *ops = br_dev->netdev_ops;
4169 err = ops->ndo_fdb_add(ndm, tb, dev, addr, vid,
4170 nlh->nlmsg_flags, extack);
4174 ndm->ndm_flags &= ~NTF_MASTER;
4177 /* Embedded bridge, macvlan, and any other device support */
4178 if ((ndm->ndm_flags & NTF_SELF)) {
4179 if (dev->netdev_ops->ndo_fdb_add)
4180 err = dev->netdev_ops->ndo_fdb_add(ndm, tb, dev, addr,
4185 err = ndo_dflt_fdb_add(ndm, tb, dev, addr, vid,
4189 rtnl_fdb_notify(dev, addr, vid, RTM_NEWNEIGH,
4191 ndm->ndm_flags &= ~NTF_SELF;
4199 * ndo_dflt_fdb_del - default netdevice operation to delete an FDB entry
4201 int ndo_dflt_fdb_del(struct ndmsg *ndm,
4202 struct nlattr *tb[],
4203 struct net_device *dev,
4204 const unsigned char *addr, u16 vid)
4208 /* If aging addresses are supported device will need to
4209 * implement its own handler for this.
4211 if (!(ndm->ndm_state & NUD_PERMANENT)) {
4212 netdev_info(dev, "default FDB implementation only supports local addresses\n");
4216 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
4217 err = dev_uc_del(dev, addr);
4218 else if (is_multicast_ether_addr(addr))
4219 err = dev_mc_del(dev, addr);
4223 EXPORT_SYMBOL(ndo_dflt_fdb_del);
4225 static const struct nla_policy fdb_del_bulk_policy[NDA_MAX + 1] = {
4226 [NDA_VLAN] = { .type = NLA_U16 },
4227 [NDA_IFINDEX] = NLA_POLICY_MIN(NLA_S32, 1),
4228 [NDA_NDM_STATE_MASK] = { .type = NLA_U16 },
4229 [NDA_NDM_FLAGS_MASK] = { .type = NLA_U8 },
4232 static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
4233 struct netlink_ext_ack *extack)
4235 bool del_bulk = !!(nlh->nlmsg_flags & NLM_F_BULK);
4236 struct net *net = sock_net(skb->sk);
4237 const struct net_device_ops *ops;
4239 struct nlattr *tb[NDA_MAX+1];
4240 struct net_device *dev;
4245 if (!netlink_capable(skb, CAP_NET_ADMIN))
4249 err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX,
4252 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX,
4253 fdb_del_bulk_policy, extack);
4258 ndm = nlmsg_data(nlh);
4259 if (ndm->ndm_ifindex == 0) {
4260 NL_SET_ERR_MSG(extack, "invalid ifindex");
4264 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
4266 NL_SET_ERR_MSG(extack, "unknown ifindex");
4271 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
4272 NL_SET_ERR_MSG(extack, "invalid address");
4275 addr = nla_data(tb[NDA_LLADDR]);
4278 if (dev->type != ARPHRD_ETHER) {
4279 NL_SET_ERR_MSG(extack, "FDB delete only supported for Ethernet devices");
4283 err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack);
4289 /* Support fdb on master device the net/bridge default case */
4290 if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
4291 netif_is_bridge_port(dev)) {
4292 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
4294 ops = br_dev->netdev_ops;
4296 if (ops->ndo_fdb_del)
4297 err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid, extack);
4299 if (ops->ndo_fdb_del_bulk)
4300 err = ops->ndo_fdb_del_bulk(ndm, tb, dev, vid,
4307 ndm->ndm_flags &= ~NTF_MASTER;
4310 /* Embedded bridge, macvlan, and any other device support */
4311 if (ndm->ndm_flags & NTF_SELF) {
4312 ops = dev->netdev_ops;
4314 if (ops->ndo_fdb_del)
4315 err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid, extack);
4317 err = ndo_dflt_fdb_del(ndm, tb, dev, addr, vid);
4319 /* in case err was cleared by NTF_MASTER call */
4321 if (ops->ndo_fdb_del_bulk)
4322 err = ops->ndo_fdb_del_bulk(ndm, tb, dev, vid,
4328 rtnl_fdb_notify(dev, addr, vid, RTM_DELNEIGH,
4330 ndm->ndm_flags &= ~NTF_SELF;
4337 static int nlmsg_populate_fdb(struct sk_buff *skb,
4338 struct netlink_callback *cb,
4339 struct net_device *dev,
4341 struct netdev_hw_addr_list *list)
4343 struct netdev_hw_addr *ha;
4347 portid = NETLINK_CB(cb->skb).portid;
4348 seq = cb->nlh->nlmsg_seq;
4350 list_for_each_entry(ha, &list->list, list) {
4351 if (*idx < cb->args[2])
4354 err = nlmsg_populate_fdb_fill(skb, dev, ha->addr, 0,
4356 RTM_NEWNEIGH, NTF_SELF,
4357 NLM_F_MULTI, NUD_PERMANENT);
4367 * ndo_dflt_fdb_dump - default netdevice operation to dump an FDB table.
4368 * @skb: socket buffer to store message in
4369 * @cb: netlink callback
4371 * @filter_dev: ignored
4372 * @idx: the number of FDB table entries dumped is added to *@idx
4374 * Default netdevice operation to dump the existing unicast address list.
4375 * Returns number of addresses from list put in skb.
4377 int ndo_dflt_fdb_dump(struct sk_buff *skb,
4378 struct netlink_callback *cb,
4379 struct net_device *dev,
4380 struct net_device *filter_dev,
4385 if (dev->type != ARPHRD_ETHER)
4388 netif_addr_lock_bh(dev);
4389 err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->uc);
4392 err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->mc);
4394 netif_addr_unlock_bh(dev);
4397 EXPORT_SYMBOL(ndo_dflt_fdb_dump);
4399 static int valid_fdb_dump_strict(const struct nlmsghdr *nlh,
4400 int *br_idx, int *brport_idx,
4401 struct netlink_ext_ack *extack)
4403 struct nlattr *tb[NDA_MAX + 1];
4407 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
4408 NL_SET_ERR_MSG(extack, "Invalid header for fdb dump request");
4412 ndm = nlmsg_data(nlh);
4413 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state ||
4414 ndm->ndm_flags || ndm->ndm_type) {
4415 NL_SET_ERR_MSG(extack, "Invalid values in header for fdb dump request");
4419 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb,
4420 NDA_MAX, NULL, extack);
4424 *brport_idx = ndm->ndm_ifindex;
4425 for (i = 0; i <= NDA_MAX; ++i) {
4431 if (nla_len(tb[i]) != sizeof(u32)) {
4432 NL_SET_ERR_MSG(extack, "Invalid IFINDEX attribute in fdb dump request");
4435 *brport_idx = nla_get_u32(tb[NDA_IFINDEX]);
4438 if (nla_len(tb[i]) != sizeof(u32)) {
4439 NL_SET_ERR_MSG(extack, "Invalid MASTER attribute in fdb dump request");
4442 *br_idx = nla_get_u32(tb[NDA_MASTER]);
4445 NL_SET_ERR_MSG(extack, "Unsupported attribute in fdb dump request");
4453 static int valid_fdb_dump_legacy(const struct nlmsghdr *nlh,
4454 int *br_idx, int *brport_idx,
4455 struct netlink_ext_ack *extack)
4457 struct nlattr *tb[IFLA_MAX+1];
4460 /* A hack to preserve kernel<->userspace interface.
4461 * Before Linux v4.12 this code accepted ndmsg since iproute2 v3.3.0.
4462 * However, ndmsg is shorter than ifinfomsg thus nlmsg_parse() bails.
4463 * So, check for ndmsg with an optional u32 attribute (not used here).
4464 * Fortunately these sizes don't conflict with the size of ifinfomsg
4465 * with an optional attribute.
4467 if (nlmsg_len(nlh) != sizeof(struct ndmsg) &&
4468 (nlmsg_len(nlh) != sizeof(struct ndmsg) +
4469 nla_attr_size(sizeof(u32)))) {
4470 struct ifinfomsg *ifm;
4472 err = nlmsg_parse_deprecated(nlh, sizeof(struct ifinfomsg),
4473 tb, IFLA_MAX, ifla_policy,
4477 } else if (err == 0) {
4478 if (tb[IFLA_MASTER])
4479 *br_idx = nla_get_u32(tb[IFLA_MASTER]);
4482 ifm = nlmsg_data(nlh);
4483 *brport_idx = ifm->ifi_index;
4488 static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
4490 struct net_device *dev;
4491 struct net_device *br_dev = NULL;
4492 const struct net_device_ops *ops = NULL;
4493 const struct net_device_ops *cops = NULL;
4494 struct net *net = sock_net(skb->sk);
4495 struct hlist_head *head;
4503 if (cb->strict_check)
4504 err = valid_fdb_dump_strict(cb->nlh, &br_idx, &brport_idx,
4507 err = valid_fdb_dump_legacy(cb->nlh, &br_idx, &brport_idx,
4513 br_dev = __dev_get_by_index(net, br_idx);
4517 ops = br_dev->netdev_ops;
4521 s_idx = cb->args[1];
4523 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
4525 head = &net->dev_index_head[h];
4526 hlist_for_each_entry(dev, head, index_hlist) {
4528 if (brport_idx && (dev->ifindex != brport_idx))
4531 if (!br_idx) { /* user did not specify a specific bridge */
4532 if (netif_is_bridge_port(dev)) {
4533 br_dev = netdev_master_upper_dev_get(dev);
4534 cops = br_dev->netdev_ops;
4537 if (dev != br_dev &&
4538 !netif_is_bridge_port(dev))
4541 if (br_dev != netdev_master_upper_dev_get(dev) &&
4542 !netif_is_bridge_master(dev))
4550 if (netif_is_bridge_port(dev)) {
4551 if (cops && cops->ndo_fdb_dump) {
4552 err = cops->ndo_fdb_dump(skb, cb,
4555 if (err == -EMSGSIZE)
4560 if (dev->netdev_ops->ndo_fdb_dump)
4561 err = dev->netdev_ops->ndo_fdb_dump(skb, cb,
4565 err = ndo_dflt_fdb_dump(skb, cb, dev, NULL,
4567 if (err == -EMSGSIZE)
4572 /* reset fdb offset to 0 for rest of the interfaces */
4588 static int valid_fdb_get_strict(const struct nlmsghdr *nlh,
4589 struct nlattr **tb, u8 *ndm_flags,
4590 int *br_idx, int *brport_idx, u8 **addr,
4591 u16 *vid, struct netlink_ext_ack *extack)
4596 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
4597 NL_SET_ERR_MSG(extack, "Invalid header for fdb get request");
4601 ndm = nlmsg_data(nlh);
4602 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state ||
4604 NL_SET_ERR_MSG(extack, "Invalid values in header for fdb get request");
4608 if (ndm->ndm_flags & ~(NTF_MASTER | NTF_SELF)) {
4609 NL_SET_ERR_MSG(extack, "Invalid flags in header for fdb get request");
4613 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb,
4614 NDA_MAX, nda_policy, extack);
4618 *ndm_flags = ndm->ndm_flags;
4619 *brport_idx = ndm->ndm_ifindex;
4620 for (i = 0; i <= NDA_MAX; ++i) {
4626 *br_idx = nla_get_u32(tb[i]);
4629 if (nla_len(tb[i]) != ETH_ALEN) {
4630 NL_SET_ERR_MSG(extack, "Invalid address in fdb get request");
4633 *addr = nla_data(tb[i]);
4636 err = fdb_vid_parse(tb[i], vid, extack);
4643 NL_SET_ERR_MSG(extack, "Unsupported attribute in fdb get request");
4651 static int rtnl_fdb_get(struct sk_buff *in_skb, struct nlmsghdr *nlh,
4652 struct netlink_ext_ack *extack)
4654 struct net_device *dev = NULL, *br_dev = NULL;
4655 const struct net_device_ops *ops = NULL;
4656 struct net *net = sock_net(in_skb->sk);
4657 struct nlattr *tb[NDA_MAX + 1];
4658 struct sk_buff *skb;
4666 err = valid_fdb_get_strict(nlh, tb, &ndm_flags, &br_idx,
4667 &brport_idx, &addr, &vid, extack);
4672 NL_SET_ERR_MSG(extack, "Missing lookup address for fdb get request");
4677 dev = __dev_get_by_index(net, brport_idx);
4679 NL_SET_ERR_MSG(extack, "Unknown device ifindex");
4686 NL_SET_ERR_MSG(extack, "Master and device are mutually exclusive");
4690 br_dev = __dev_get_by_index(net, br_idx);
4692 NL_SET_ERR_MSG(extack, "Invalid master ifindex");
4695 ops = br_dev->netdev_ops;
4699 if (!ndm_flags || (ndm_flags & NTF_MASTER)) {
4700 if (!netif_is_bridge_port(dev)) {
4701 NL_SET_ERR_MSG(extack, "Device is not a bridge port");
4704 br_dev = netdev_master_upper_dev_get(dev);
4706 NL_SET_ERR_MSG(extack, "Master of device not found");
4709 ops = br_dev->netdev_ops;
4711 if (!(ndm_flags & NTF_SELF)) {
4712 NL_SET_ERR_MSG(extack, "Missing NTF_SELF");
4715 ops = dev->netdev_ops;
4719 if (!br_dev && !dev) {
4720 NL_SET_ERR_MSG(extack, "No device specified");
4724 if (!ops || !ops->ndo_fdb_get) {
4725 NL_SET_ERR_MSG(extack, "Fdb get operation not supported by device");
4729 skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
4735 err = ops->ndo_fdb_get(skb, tb, dev, addr, vid,
4736 NETLINK_CB(in_skb).portid,
4737 nlh->nlmsg_seq, extack);
4741 return rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
4747 static int brport_nla_put_flag(struct sk_buff *skb, u32 flags, u32 mask,
4748 unsigned int attrnum, unsigned int flag)
4751 return nla_put_u8(skb, attrnum, !!(flags & flag));
4755 int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
4756 struct net_device *dev, u16 mode,
4757 u32 flags, u32 mask, int nlflags,
4759 int (*vlan_fill)(struct sk_buff *skb,
4760 struct net_device *dev,
4763 struct nlmsghdr *nlh;
4764 struct ifinfomsg *ifm;
4765 struct nlattr *br_afspec;
4766 struct nlattr *protinfo;
4767 u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
4768 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
4771 nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*ifm), nlflags);
4775 ifm = nlmsg_data(nlh);
4776 ifm->ifi_family = AF_BRIDGE;
4778 ifm->ifi_type = dev->type;
4779 ifm->ifi_index = dev->ifindex;
4780 ifm->ifi_flags = dev_get_flags(dev);
4781 ifm->ifi_change = 0;
4784 if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
4785 nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
4786 nla_put_u8(skb, IFLA_OPERSTATE, operstate) ||
4788 nla_put_u32(skb, IFLA_MASTER, br_dev->ifindex)) ||
4790 nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
4791 (dev->ifindex != dev_get_iflink(dev) &&
4792 nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))))
4793 goto nla_put_failure;
4795 br_afspec = nla_nest_start_noflag(skb, IFLA_AF_SPEC);
4797 goto nla_put_failure;
4799 if (nla_put_u16(skb, IFLA_BRIDGE_FLAGS, BRIDGE_FLAGS_SELF)) {
4800 nla_nest_cancel(skb, br_afspec);
4801 goto nla_put_failure;
4804 if (mode != BRIDGE_MODE_UNDEF) {
4805 if (nla_put_u16(skb, IFLA_BRIDGE_MODE, mode)) {
4806 nla_nest_cancel(skb, br_afspec);
4807 goto nla_put_failure;
4811 err = vlan_fill(skb, dev, filter_mask);
4813 nla_nest_cancel(skb, br_afspec);
4814 goto nla_put_failure;
4817 nla_nest_end(skb, br_afspec);
4819 protinfo = nla_nest_start(skb, IFLA_PROTINFO);
4821 goto nla_put_failure;
4823 if (brport_nla_put_flag(skb, flags, mask,
4824 IFLA_BRPORT_MODE, BR_HAIRPIN_MODE) ||
4825 brport_nla_put_flag(skb, flags, mask,
4826 IFLA_BRPORT_GUARD, BR_BPDU_GUARD) ||
4827 brport_nla_put_flag(skb, flags, mask,
4828 IFLA_BRPORT_FAST_LEAVE,
4829 BR_MULTICAST_FAST_LEAVE) ||
4830 brport_nla_put_flag(skb, flags, mask,
4831 IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK) ||
4832 brport_nla_put_flag(skb, flags, mask,
4833 IFLA_BRPORT_LEARNING, BR_LEARNING) ||
4834 brport_nla_put_flag(skb, flags, mask,
4835 IFLA_BRPORT_LEARNING_SYNC, BR_LEARNING_SYNC) ||
4836 brport_nla_put_flag(skb, flags, mask,
4837 IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD) ||
4838 brport_nla_put_flag(skb, flags, mask,
4839 IFLA_BRPORT_PROXYARP, BR_PROXYARP) ||
4840 brport_nla_put_flag(skb, flags, mask,
4841 IFLA_BRPORT_MCAST_FLOOD, BR_MCAST_FLOOD) ||
4842 brport_nla_put_flag(skb, flags, mask,
4843 IFLA_BRPORT_BCAST_FLOOD, BR_BCAST_FLOOD)) {
4844 nla_nest_cancel(skb, protinfo);
4845 goto nla_put_failure;
4848 nla_nest_end(skb, protinfo);
4850 nlmsg_end(skb, nlh);
4853 nlmsg_cancel(skb, nlh);
4854 return err ? err : -EMSGSIZE;
4856 EXPORT_SYMBOL_GPL(ndo_dflt_bridge_getlink);
4858 static int valid_bridge_getlink_req(const struct nlmsghdr *nlh,
4859 bool strict_check, u32 *filter_mask,
4860 struct netlink_ext_ack *extack)
4862 struct nlattr *tb[IFLA_MAX+1];
4866 struct ifinfomsg *ifm;
4868 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
4869 NL_SET_ERR_MSG(extack, "Invalid header for bridge link dump");
4873 ifm = nlmsg_data(nlh);
4874 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
4875 ifm->ifi_change || ifm->ifi_index) {
4876 NL_SET_ERR_MSG(extack, "Invalid values in header for bridge link dump request");
4880 err = nlmsg_parse_deprecated_strict(nlh,
4881 sizeof(struct ifinfomsg),
4882 tb, IFLA_MAX, ifla_policy,
4885 err = nlmsg_parse_deprecated(nlh, sizeof(struct ifinfomsg),
4886 tb, IFLA_MAX, ifla_policy,
4892 /* new attributes should only be added with strict checking */
4893 for (i = 0; i <= IFLA_MAX; ++i) {
4899 *filter_mask = nla_get_u32(tb[i]);
4903 NL_SET_ERR_MSG(extack, "Unsupported attribute in bridge link dump request");
4912 static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
4914 const struct nlmsghdr *nlh = cb->nlh;
4915 struct net *net = sock_net(skb->sk);
4916 struct net_device *dev;
4918 u32 portid = NETLINK_CB(cb->skb).portid;
4919 u32 seq = nlh->nlmsg_seq;
4920 u32 filter_mask = 0;
4923 err = valid_bridge_getlink_req(nlh, cb->strict_check, &filter_mask,
4925 if (err < 0 && cb->strict_check)
4929 for_each_netdev_rcu(net, dev) {
4930 const struct net_device_ops *ops = dev->netdev_ops;
4931 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
4933 if (br_dev && br_dev->netdev_ops->ndo_bridge_getlink) {
4934 if (idx >= cb->args[0]) {
4935 err = br_dev->netdev_ops->ndo_bridge_getlink(
4936 skb, portid, seq, dev,
4937 filter_mask, NLM_F_MULTI);
4938 if (err < 0 && err != -EOPNOTSUPP) {
4939 if (likely(skb->len))
4948 if (ops->ndo_bridge_getlink) {
4949 if (idx >= cb->args[0]) {
4950 err = ops->ndo_bridge_getlink(skb, portid,
4954 if (err < 0 && err != -EOPNOTSUPP) {
4955 if (likely(skb->len))
4972 static inline size_t bridge_nlmsg_size(void)
4974 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
4975 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
4976 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
4977 + nla_total_size(sizeof(u32)) /* IFLA_MASTER */
4978 + nla_total_size(sizeof(u32)) /* IFLA_MTU */
4979 + nla_total_size(sizeof(u32)) /* IFLA_LINK */
4980 + nla_total_size(sizeof(u32)) /* IFLA_OPERSTATE */
4981 + nla_total_size(sizeof(u8)) /* IFLA_PROTINFO */
4982 + nla_total_size(sizeof(struct nlattr)) /* IFLA_AF_SPEC */
4983 + nla_total_size(sizeof(u16)) /* IFLA_BRIDGE_FLAGS */
4984 + nla_total_size(sizeof(u16)); /* IFLA_BRIDGE_MODE */
4987 static int rtnl_bridge_notify(struct net_device *dev)
4989 struct net *net = dev_net(dev);
4990 struct sk_buff *skb;
4991 int err = -EOPNOTSUPP;
4993 if (!dev->netdev_ops->ndo_bridge_getlink)
4996 skb = nlmsg_new(bridge_nlmsg_size(), GFP_ATOMIC);
5002 err = dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev, 0, 0);
5006 /* Notification info is only filled for bridge ports, not the bridge
5007 * device itself. Therefore, a zero notification length is valid and
5008 * should not result in an error.
5013 rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
5016 WARN_ON(err == -EMSGSIZE);
5019 rtnl_set_sk_err(net, RTNLGRP_LINK, err);
5023 static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
5024 struct netlink_ext_ack *extack)
5026 struct net *net = sock_net(skb->sk);
5027 struct ifinfomsg *ifm;
5028 struct net_device *dev;
5029 struct nlattr *br_spec, *attr, *br_flags_attr = NULL;
5030 int rem, err = -EOPNOTSUPP;
5033 if (nlmsg_len(nlh) < sizeof(*ifm))
5036 ifm = nlmsg_data(nlh);
5037 if (ifm->ifi_family != AF_BRIDGE)
5038 return -EPFNOSUPPORT;
5040 dev = __dev_get_by_index(net, ifm->ifi_index);
5042 NL_SET_ERR_MSG(extack, "unknown ifindex");
5046 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
5048 nla_for_each_nested(attr, br_spec, rem) {
5049 if (nla_type(attr) == IFLA_BRIDGE_FLAGS && !br_flags_attr) {
5050 if (nla_len(attr) < sizeof(flags))
5053 br_flags_attr = attr;
5054 flags = nla_get_u16(attr);
5057 if (nla_type(attr) == IFLA_BRIDGE_MODE) {
5058 if (nla_len(attr) < sizeof(u16))
5064 if (!flags || (flags & BRIDGE_FLAGS_MASTER)) {
5065 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
5067 if (!br_dev || !br_dev->netdev_ops->ndo_bridge_setlink) {
5072 err = br_dev->netdev_ops->ndo_bridge_setlink(dev, nlh, flags,
5077 flags &= ~BRIDGE_FLAGS_MASTER;
5080 if ((flags & BRIDGE_FLAGS_SELF)) {
5081 if (!dev->netdev_ops->ndo_bridge_setlink)
5084 err = dev->netdev_ops->ndo_bridge_setlink(dev, nlh,
5088 flags &= ~BRIDGE_FLAGS_SELF;
5090 /* Generate event to notify upper layer of bridge
5093 err = rtnl_bridge_notify(dev);
5098 memcpy(nla_data(br_flags_attr), &flags, sizeof(flags));
5103 static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
5104 struct netlink_ext_ack *extack)
5106 struct net *net = sock_net(skb->sk);
5107 struct ifinfomsg *ifm;
5108 struct net_device *dev;
5109 struct nlattr *br_spec, *attr = NULL;
5110 int rem, err = -EOPNOTSUPP;
5112 bool have_flags = false;
5114 if (nlmsg_len(nlh) < sizeof(*ifm))
5117 ifm = nlmsg_data(nlh);
5118 if (ifm->ifi_family != AF_BRIDGE)
5119 return -EPFNOSUPPORT;
5121 dev = __dev_get_by_index(net, ifm->ifi_index);
5123 NL_SET_ERR_MSG(extack, "unknown ifindex");
5127 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
5129 nla_for_each_nested(attr, br_spec, rem) {
5130 if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
5131 if (nla_len(attr) < sizeof(flags))
5135 flags = nla_get_u16(attr);
5141 if (!flags || (flags & BRIDGE_FLAGS_MASTER)) {
5142 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
5144 if (!br_dev || !br_dev->netdev_ops->ndo_bridge_dellink) {
5149 err = br_dev->netdev_ops->ndo_bridge_dellink(dev, nlh, flags);
5153 flags &= ~BRIDGE_FLAGS_MASTER;
5156 if ((flags & BRIDGE_FLAGS_SELF)) {
5157 if (!dev->netdev_ops->ndo_bridge_dellink)
5160 err = dev->netdev_ops->ndo_bridge_dellink(dev, nlh,
5164 flags &= ~BRIDGE_FLAGS_SELF;
5166 /* Generate event to notify upper layer of bridge
5169 err = rtnl_bridge_notify(dev);
5174 memcpy(nla_data(attr), &flags, sizeof(flags));
5179 static bool stats_attr_valid(unsigned int mask, int attrid, int idxattr)
5181 return (mask & IFLA_STATS_FILTER_BIT(attrid)) &&
5182 (!idxattr || idxattr == attrid);
5186 rtnl_offload_xstats_have_ndo(const struct net_device *dev, int attr_id)
5188 return dev->netdev_ops &&
5189 dev->netdev_ops->ndo_has_offload_stats &&
5190 dev->netdev_ops->ndo_get_offload_stats &&
5191 dev->netdev_ops->ndo_has_offload_stats(dev, attr_id);
5195 rtnl_offload_xstats_get_size_ndo(const struct net_device *dev, int attr_id)
5197 return rtnl_offload_xstats_have_ndo(dev, attr_id) ?
5198 sizeof(struct rtnl_link_stats64) : 0;
5202 rtnl_offload_xstats_fill_ndo(struct net_device *dev, int attr_id,
5203 struct sk_buff *skb)
5205 unsigned int size = rtnl_offload_xstats_get_size_ndo(dev, attr_id);
5206 struct nlattr *attr = NULL;
5213 attr = nla_reserve_64bit(skb, attr_id, size,
5214 IFLA_OFFLOAD_XSTATS_UNSPEC);
5218 attr_data = nla_data(attr);
5219 memset(attr_data, 0, size);
5221 err = dev->netdev_ops->ndo_get_offload_stats(attr_id, dev, attr_data);
5229 rtnl_offload_xstats_get_size_stats(const struct net_device *dev,
5230 enum netdev_offload_xstats_type type)
5232 bool enabled = netdev_offload_xstats_enabled(dev, type);
5234 return enabled ? sizeof(struct rtnl_hw_stats64) : 0;
5237 struct rtnl_offload_xstats_request_used {
5243 rtnl_offload_xstats_get_stats(struct net_device *dev,
5244 enum netdev_offload_xstats_type type,
5245 struct rtnl_offload_xstats_request_used *ru,
5246 struct rtnl_hw_stats64 *stats,
5247 struct netlink_ext_ack *extack)
5253 request = netdev_offload_xstats_enabled(dev, type);
5259 err = netdev_offload_xstats_get(dev, type, stats, &used, extack);
5265 ru->request = request;
5272 rtnl_offload_xstats_fill_hw_s_info_one(struct sk_buff *skb, int attr_id,
5273 struct rtnl_offload_xstats_request_used *ru)
5275 struct nlattr *nest;
5277 nest = nla_nest_start(skb, attr_id);
5281 if (nla_put_u8(skb, IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST, ru->request))
5282 goto nla_put_failure;
5284 if (nla_put_u8(skb, IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED, ru->used))
5285 goto nla_put_failure;
5287 nla_nest_end(skb, nest);
5291 nla_nest_cancel(skb, nest);
5296 rtnl_offload_xstats_fill_hw_s_info(struct sk_buff *skb, struct net_device *dev,
5297 struct netlink_ext_ack *extack)
5299 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3;
5300 struct rtnl_offload_xstats_request_used ru_l3;
5301 struct nlattr *nest;
5304 err = rtnl_offload_xstats_get_stats(dev, t_l3, &ru_l3, NULL, extack);
5308 nest = nla_nest_start(skb, IFLA_OFFLOAD_XSTATS_HW_S_INFO);
5312 if (rtnl_offload_xstats_fill_hw_s_info_one(skb,
5313 IFLA_OFFLOAD_XSTATS_L3_STATS,
5315 goto nla_put_failure;
5317 nla_nest_end(skb, nest);
5321 nla_nest_cancel(skb, nest);
5325 static int rtnl_offload_xstats_fill(struct sk_buff *skb, struct net_device *dev,
5326 int *prividx, u32 off_filter_mask,
5327 struct netlink_ext_ack *extack)
5329 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3;
5330 int attr_id_hw_s_info = IFLA_OFFLOAD_XSTATS_HW_S_INFO;
5331 int attr_id_l3_stats = IFLA_OFFLOAD_XSTATS_L3_STATS;
5332 int attr_id_cpu_hit = IFLA_OFFLOAD_XSTATS_CPU_HIT;
5333 bool have_data = false;
5336 if (*prividx <= attr_id_cpu_hit &&
5338 IFLA_STATS_FILTER_BIT(attr_id_cpu_hit))) {
5339 err = rtnl_offload_xstats_fill_ndo(dev, attr_id_cpu_hit, skb);
5342 } else if (err != -ENODATA) {
5343 *prividx = attr_id_cpu_hit;
5348 if (*prividx <= attr_id_hw_s_info &&
5349 (off_filter_mask & IFLA_STATS_FILTER_BIT(attr_id_hw_s_info))) {
5350 *prividx = attr_id_hw_s_info;
5352 err = rtnl_offload_xstats_fill_hw_s_info(skb, dev, extack);
5360 if (*prividx <= attr_id_l3_stats &&
5361 (off_filter_mask & IFLA_STATS_FILTER_BIT(attr_id_l3_stats))) {
5362 unsigned int size_l3;
5363 struct nlattr *attr;
5365 *prividx = attr_id_l3_stats;
5367 size_l3 = rtnl_offload_xstats_get_size_stats(dev, t_l3);
5370 attr = nla_reserve_64bit(skb, attr_id_l3_stats, size_l3,
5371 IFLA_OFFLOAD_XSTATS_UNSPEC);
5375 err = rtnl_offload_xstats_get_stats(dev, t_l3, NULL,
5376 nla_data(attr), extack);
5393 rtnl_offload_xstats_get_size_hw_s_info_one(const struct net_device *dev,
5394 enum netdev_offload_xstats_type type)
5396 return nla_total_size(0) +
5397 /* IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST */
5398 nla_total_size(sizeof(u8)) +
5399 /* IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED */
5400 nla_total_size(sizeof(u8)) +
5405 rtnl_offload_xstats_get_size_hw_s_info(const struct net_device *dev)
5407 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3;
5409 return nla_total_size(0) +
5410 /* IFLA_OFFLOAD_XSTATS_L3_STATS */
5411 rtnl_offload_xstats_get_size_hw_s_info_one(dev, t_l3) +
5415 static int rtnl_offload_xstats_get_size(const struct net_device *dev,
5416 u32 off_filter_mask)
5418 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3;
5419 int attr_id_cpu_hit = IFLA_OFFLOAD_XSTATS_CPU_HIT;
5423 if (off_filter_mask &
5424 IFLA_STATS_FILTER_BIT(attr_id_cpu_hit)) {
5425 size = rtnl_offload_xstats_get_size_ndo(dev, attr_id_cpu_hit);
5426 nla_size += nla_total_size_64bit(size);
5429 if (off_filter_mask &
5430 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO))
5431 nla_size += rtnl_offload_xstats_get_size_hw_s_info(dev);
5433 if (off_filter_mask &
5434 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_L3_STATS)) {
5435 size = rtnl_offload_xstats_get_size_stats(dev, t_l3);
5436 nla_size += nla_total_size_64bit(size);
5440 nla_size += nla_total_size(0);
5445 struct rtnl_stats_dump_filters {
5446 /* mask[0] filters outer attributes. Then individual nests have their
5447 * filtering mask at the index of the nested attribute.
5449 u32 mask[IFLA_STATS_MAX + 1];
5452 static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev,
5453 int type, u32 pid, u32 seq, u32 change,
5455 const struct rtnl_stats_dump_filters *filters,
5456 int *idxattr, int *prividx,
5457 struct netlink_ext_ack *extack)
5459 unsigned int filter_mask = filters->mask[0];
5460 struct if_stats_msg *ifsm;
5461 struct nlmsghdr *nlh;
5462 struct nlattr *attr;
5463 int s_prividx = *prividx;
5468 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifsm), flags);
5472 ifsm = nlmsg_data(nlh);
5473 ifsm->family = PF_UNSPEC;
5476 ifsm->ifindex = dev->ifindex;
5477 ifsm->filter_mask = filter_mask;
5479 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, *idxattr)) {
5480 struct rtnl_link_stats64 *sp;
5482 attr = nla_reserve_64bit(skb, IFLA_STATS_LINK_64,
5483 sizeof(struct rtnl_link_stats64),
5487 goto nla_put_failure;
5490 sp = nla_data(attr);
5491 dev_get_stats(dev, sp);
5494 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, *idxattr)) {
5495 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
5497 if (ops && ops->fill_linkxstats) {
5498 *idxattr = IFLA_STATS_LINK_XSTATS;
5499 attr = nla_nest_start_noflag(skb,
5500 IFLA_STATS_LINK_XSTATS);
5503 goto nla_put_failure;
5506 err = ops->fill_linkxstats(skb, dev, prividx, *idxattr);
5507 nla_nest_end(skb, attr);
5509 goto nla_put_failure;
5514 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE,
5516 const struct rtnl_link_ops *ops = NULL;
5517 const struct net_device *master;
5519 master = netdev_master_upper_dev_get(dev);
5521 ops = master->rtnl_link_ops;
5522 if (ops && ops->fill_linkxstats) {
5523 *idxattr = IFLA_STATS_LINK_XSTATS_SLAVE;
5524 attr = nla_nest_start_noflag(skb,
5525 IFLA_STATS_LINK_XSTATS_SLAVE);
5528 goto nla_put_failure;
5531 err = ops->fill_linkxstats(skb, dev, prividx, *idxattr);
5532 nla_nest_end(skb, attr);
5534 goto nla_put_failure;
5539 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS,
5541 u32 off_filter_mask;
5543 off_filter_mask = filters->mask[IFLA_STATS_LINK_OFFLOAD_XSTATS];
5544 *idxattr = IFLA_STATS_LINK_OFFLOAD_XSTATS;
5545 attr = nla_nest_start_noflag(skb,
5546 IFLA_STATS_LINK_OFFLOAD_XSTATS);
5549 goto nla_put_failure;
5552 err = rtnl_offload_xstats_fill(skb, dev, prividx,
5553 off_filter_mask, extack);
5554 if (err == -ENODATA)
5555 nla_nest_cancel(skb, attr);
5557 nla_nest_end(skb, attr);
5559 if (err && err != -ENODATA)
5560 goto nla_put_failure;
5564 if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, *idxattr)) {
5565 struct rtnl_af_ops *af_ops;
5567 *idxattr = IFLA_STATS_AF_SPEC;
5568 attr = nla_nest_start_noflag(skb, IFLA_STATS_AF_SPEC);
5571 goto nla_put_failure;
5575 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
5576 if (af_ops->fill_stats_af) {
5579 af = nla_nest_start_noflag(skb,
5584 goto nla_put_failure;
5586 err = af_ops->fill_stats_af(skb, dev);
5588 if (err == -ENODATA) {
5589 nla_nest_cancel(skb, af);
5590 } else if (err < 0) {
5592 goto nla_put_failure;
5595 nla_nest_end(skb, af);
5600 nla_nest_end(skb, attr);
5605 nlmsg_end(skb, nlh);
5610 /* not a multi message or no progress mean a real error */
5611 if (!(flags & NLM_F_MULTI) || s_prividx == *prividx)
5612 nlmsg_cancel(skb, nlh);
5614 nlmsg_end(skb, nlh);
5619 static size_t if_nlmsg_stats_size(const struct net_device *dev,
5620 const struct rtnl_stats_dump_filters *filters)
5622 size_t size = NLMSG_ALIGN(sizeof(struct if_stats_msg));
5623 unsigned int filter_mask = filters->mask[0];
5625 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, 0))
5626 size += nla_total_size_64bit(sizeof(struct rtnl_link_stats64));
5628 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, 0)) {
5629 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
5630 int attr = IFLA_STATS_LINK_XSTATS;
5632 if (ops && ops->get_linkxstats_size) {
5633 size += nla_total_size(ops->get_linkxstats_size(dev,
5635 /* for IFLA_STATS_LINK_XSTATS */
5636 size += nla_total_size(0);
5640 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE, 0)) {
5641 struct net_device *_dev = (struct net_device *)dev;
5642 const struct rtnl_link_ops *ops = NULL;
5643 const struct net_device *master;
5645 /* netdev_master_upper_dev_get can't take const */
5646 master = netdev_master_upper_dev_get(_dev);
5648 ops = master->rtnl_link_ops;
5649 if (ops && ops->get_linkxstats_size) {
5650 int attr = IFLA_STATS_LINK_XSTATS_SLAVE;
5652 size += nla_total_size(ops->get_linkxstats_size(dev,
5654 /* for IFLA_STATS_LINK_XSTATS_SLAVE */
5655 size += nla_total_size(0);
5659 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS, 0)) {
5660 u32 off_filter_mask;
5662 off_filter_mask = filters->mask[IFLA_STATS_LINK_OFFLOAD_XSTATS];
5663 size += rtnl_offload_xstats_get_size(dev, off_filter_mask);
5666 if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, 0)) {
5667 struct rtnl_af_ops *af_ops;
5669 /* for IFLA_STATS_AF_SPEC */
5670 size += nla_total_size(0);
5673 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
5674 if (af_ops->get_stats_af_size) {
5675 size += nla_total_size(
5676 af_ops->get_stats_af_size(dev));
5679 size += nla_total_size(0);
5688 #define RTNL_STATS_OFFLOAD_XSTATS_VALID ((1 << __IFLA_OFFLOAD_XSTATS_MAX) - 1)
5690 static const struct nla_policy
5691 rtnl_stats_get_policy_filters[IFLA_STATS_MAX + 1] = {
5692 [IFLA_STATS_LINK_OFFLOAD_XSTATS] =
5693 NLA_POLICY_MASK(NLA_U32, RTNL_STATS_OFFLOAD_XSTATS_VALID),
5696 static const struct nla_policy
5697 rtnl_stats_get_policy[IFLA_STATS_GETSET_MAX + 1] = {
5698 [IFLA_STATS_GET_FILTERS] =
5699 NLA_POLICY_NESTED(rtnl_stats_get_policy_filters),
5702 static const struct nla_policy
5703 ifla_stats_set_policy[IFLA_STATS_GETSET_MAX + 1] = {
5704 [IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS] = NLA_POLICY_MAX(NLA_U8, 1),
5707 static int rtnl_stats_get_parse_filters(struct nlattr *ifla_filters,
5708 struct rtnl_stats_dump_filters *filters,
5709 struct netlink_ext_ack *extack)
5711 struct nlattr *tb[IFLA_STATS_MAX + 1];
5715 err = nla_parse_nested(tb, IFLA_STATS_MAX, ifla_filters,
5716 rtnl_stats_get_policy_filters, extack);
5720 for (at = 1; at <= IFLA_STATS_MAX; at++) {
5722 if (!(filters->mask[0] & IFLA_STATS_FILTER_BIT(at))) {
5723 NL_SET_ERR_MSG(extack, "Filtered attribute not enabled in filter_mask");
5726 filters->mask[at] = nla_get_u32(tb[at]);
5733 static int rtnl_stats_get_parse(const struct nlmsghdr *nlh,
5735 struct rtnl_stats_dump_filters *filters,
5736 struct netlink_ext_ack *extack)
5738 struct nlattr *tb[IFLA_STATS_GETSET_MAX + 1];
5742 filters->mask[0] = filter_mask;
5743 for (i = 1; i < ARRAY_SIZE(filters->mask); i++)
5744 filters->mask[i] = -1U;
5746 err = nlmsg_parse(nlh, sizeof(struct if_stats_msg), tb,
5747 IFLA_STATS_GETSET_MAX, rtnl_stats_get_policy, extack);
5751 if (tb[IFLA_STATS_GET_FILTERS]) {
5752 err = rtnl_stats_get_parse_filters(tb[IFLA_STATS_GET_FILTERS],
5761 static int rtnl_valid_stats_req(const struct nlmsghdr *nlh, bool strict_check,
5762 bool is_dump, struct netlink_ext_ack *extack)
5764 struct if_stats_msg *ifsm;
5766 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifsm))) {
5767 NL_SET_ERR_MSG(extack, "Invalid header for stats dump");
5774 ifsm = nlmsg_data(nlh);
5776 /* only requests using strict checks can pass data to influence
5777 * the dump. The legacy exception is filter_mask.
5779 if (ifsm->pad1 || ifsm->pad2 || (is_dump && ifsm->ifindex)) {
5780 NL_SET_ERR_MSG(extack, "Invalid values in header for stats dump request");
5783 if (ifsm->filter_mask >= IFLA_STATS_FILTER_BIT(IFLA_STATS_MAX + 1)) {
5784 NL_SET_ERR_MSG(extack, "Invalid stats requested through filter mask");
5791 static int rtnl_stats_get(struct sk_buff *skb, struct nlmsghdr *nlh,
5792 struct netlink_ext_ack *extack)
5794 struct rtnl_stats_dump_filters filters;
5795 struct net *net = sock_net(skb->sk);
5796 struct net_device *dev = NULL;
5797 int idxattr = 0, prividx = 0;
5798 struct if_stats_msg *ifsm;
5799 struct sk_buff *nskb;
5802 err = rtnl_valid_stats_req(nlh, netlink_strict_get_check(skb),
5807 ifsm = nlmsg_data(nlh);
5808 if (ifsm->ifindex > 0)
5809 dev = __dev_get_by_index(net, ifsm->ifindex);
5816 if (!ifsm->filter_mask) {
5817 NL_SET_ERR_MSG(extack, "Filter mask must be set for stats get");
5821 err = rtnl_stats_get_parse(nlh, ifsm->filter_mask, &filters, extack);
5825 nskb = nlmsg_new(if_nlmsg_stats_size(dev, &filters), GFP_KERNEL);
5829 err = rtnl_fill_statsinfo(nskb, dev, RTM_NEWSTATS,
5830 NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
5831 0, &filters, &idxattr, &prividx, extack);
5833 /* -EMSGSIZE implies BUG in if_nlmsg_stats_size */
5834 WARN_ON(err == -EMSGSIZE);
5837 err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid);
5843 static int rtnl_stats_dump(struct sk_buff *skb, struct netlink_callback *cb)
5845 struct netlink_ext_ack *extack = cb->extack;
5846 int h, s_h, err, s_idx, s_idxattr, s_prividx;
5847 struct rtnl_stats_dump_filters filters;
5848 struct net *net = sock_net(skb->sk);
5849 unsigned int flags = NLM_F_MULTI;
5850 struct if_stats_msg *ifsm;
5851 struct hlist_head *head;
5852 struct net_device *dev;
5856 s_idx = cb->args[1];
5857 s_idxattr = cb->args[2];
5858 s_prividx = cb->args[3];
5860 cb->seq = net->dev_base_seq;
5862 err = rtnl_valid_stats_req(cb->nlh, cb->strict_check, true, extack);
5866 ifsm = nlmsg_data(cb->nlh);
5867 if (!ifsm->filter_mask) {
5868 NL_SET_ERR_MSG(extack, "Filter mask must be set for stats dump");
5872 err = rtnl_stats_get_parse(cb->nlh, ifsm->filter_mask, &filters,
5877 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
5879 head = &net->dev_index_head[h];
5880 hlist_for_each_entry(dev, head, index_hlist) {
5883 err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS,
5884 NETLINK_CB(cb->skb).portid,
5885 cb->nlh->nlmsg_seq, 0,
5887 &s_idxattr, &s_prividx,
5889 /* If we ran out of room on the first message,
5892 WARN_ON((err == -EMSGSIZE) && (skb->len == 0));
5898 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
5904 cb->args[3] = s_prividx;
5905 cb->args[2] = s_idxattr;
5912 void rtnl_offload_xstats_notify(struct net_device *dev)
5914 struct rtnl_stats_dump_filters response_filters = {};
5915 struct net *net = dev_net(dev);
5916 int idxattr = 0, prividx = 0;
5917 struct sk_buff *skb;
5922 response_filters.mask[0] |=
5923 IFLA_STATS_FILTER_BIT(IFLA_STATS_LINK_OFFLOAD_XSTATS);
5924 response_filters.mask[IFLA_STATS_LINK_OFFLOAD_XSTATS] |=
5925 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO);
5927 skb = nlmsg_new(if_nlmsg_stats_size(dev, &response_filters),
5932 err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS, 0, 0, 0, 0,
5933 &response_filters, &idxattr, &prividx, NULL);
5939 rtnl_notify(skb, net, 0, RTNLGRP_STATS, NULL, GFP_KERNEL);
5943 rtnl_set_sk_err(net, RTNLGRP_STATS, err);
5945 EXPORT_SYMBOL(rtnl_offload_xstats_notify);
5947 static int rtnl_stats_set(struct sk_buff *skb, struct nlmsghdr *nlh,
5948 struct netlink_ext_ack *extack)
5950 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3;
5951 struct rtnl_stats_dump_filters response_filters = {};
5952 struct nlattr *tb[IFLA_STATS_GETSET_MAX + 1];
5953 struct net *net = sock_net(skb->sk);
5954 struct net_device *dev = NULL;
5955 struct if_stats_msg *ifsm;
5956 bool notify = false;
5959 err = rtnl_valid_stats_req(nlh, netlink_strict_get_check(skb),
5964 ifsm = nlmsg_data(nlh);
5965 if (ifsm->family != AF_UNSPEC) {
5966 NL_SET_ERR_MSG(extack, "Address family should be AF_UNSPEC");
5970 if (ifsm->ifindex > 0)
5971 dev = __dev_get_by_index(net, ifsm->ifindex);
5978 if (ifsm->filter_mask) {
5979 NL_SET_ERR_MSG(extack, "Filter mask must be 0 for stats set");
5983 err = nlmsg_parse(nlh, sizeof(*ifsm), tb, IFLA_STATS_GETSET_MAX,
5984 ifla_stats_set_policy, extack);
5988 if (tb[IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS]) {
5989 u8 req = nla_get_u8(tb[IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS]);
5992 err = netdev_offload_xstats_enable(dev, t_l3, extack);
5994 err = netdev_offload_xstats_disable(dev, t_l3);
5998 else if (err != -EALREADY)
6001 response_filters.mask[0] |=
6002 IFLA_STATS_FILTER_BIT(IFLA_STATS_LINK_OFFLOAD_XSTATS);
6003 response_filters.mask[IFLA_STATS_LINK_OFFLOAD_XSTATS] |=
6004 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO);
6008 rtnl_offload_xstats_notify(dev);
6013 /* Process one rtnetlink message. */
6015 static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
6016 struct netlink_ext_ack *extack)
6018 struct net *net = sock_net(skb->sk);
6019 struct rtnl_link *link;
6020 enum rtnl_kinds kind;
6021 struct module *owner;
6022 int err = -EOPNOTSUPP;
6023 rtnl_doit_func doit;
6028 type = nlh->nlmsg_type;
6034 /* All the messages must have at least 1 byte length */
6035 if (nlmsg_len(nlh) < sizeof(struct rtgenmsg))
6038 family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
6039 kind = rtnl_msgtype_kind(type);
6041 if (kind != RTNL_KIND_GET && !netlink_net_capable(skb, CAP_NET_ADMIN))
6045 if (kind == RTNL_KIND_GET && (nlh->nlmsg_flags & NLM_F_DUMP)) {
6047 rtnl_dumpit_func dumpit;
6048 u32 min_dump_alloc = 0;
6050 link = rtnl_get_link(family, type);
6051 if (!link || !link->dumpit) {
6053 link = rtnl_get_link(family, type);
6054 if (!link || !link->dumpit)
6057 owner = link->owner;
6058 dumpit = link->dumpit;
6060 if (type == RTM_GETLINK - RTM_BASE)
6061 min_dump_alloc = rtnl_calcit(skb, nlh);
6064 /* need to do this before rcu_read_unlock() */
6065 if (!try_module_get(owner))
6066 err = -EPROTONOSUPPORT;
6072 struct netlink_dump_control c = {
6074 .min_dump_alloc = min_dump_alloc,
6077 err = netlink_dump_start(rtnl, skb, nlh, &c);
6078 /* netlink_dump_start() will keep a reference on
6079 * module if dump is still in progress.
6086 link = rtnl_get_link(family, type);
6087 if (!link || !link->doit) {
6089 link = rtnl_get_link(PF_UNSPEC, type);
6090 if (!link || !link->doit)
6094 owner = link->owner;
6095 if (!try_module_get(owner)) {
6096 err = -EPROTONOSUPPORT;
6100 flags = link->flags;
6101 if (kind == RTNL_KIND_DEL && (nlh->nlmsg_flags & NLM_F_BULK) &&
6102 !(flags & RTNL_FLAG_BULK_DEL_SUPPORTED)) {
6103 NL_SET_ERR_MSG(extack, "Bulk delete is not supported");
6108 if (flags & RTNL_FLAG_DOIT_UNLOCKED) {
6112 err = doit(skb, nlh, extack);
6119 link = rtnl_get_link(family, type);
6120 if (link && link->doit)
6121 err = link->doit(skb, nlh, extack);
6137 static void rtnetlink_rcv(struct sk_buff *skb)
6139 netlink_rcv_skb(skb, &rtnetlink_rcv_msg);
6142 static int rtnetlink_bind(struct net *net, int group)
6145 case RTNLGRP_IPV4_MROUTE_R:
6146 case RTNLGRP_IPV6_MROUTE_R:
6147 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
6154 static int rtnetlink_event(struct notifier_block *this, unsigned long event, void *ptr)
6156 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6160 case NETDEV_CHANGEMTU:
6161 case NETDEV_CHANGEADDR:
6162 case NETDEV_CHANGENAME:
6163 case NETDEV_FEAT_CHANGE:
6164 case NETDEV_BONDING_FAILOVER:
6165 case NETDEV_POST_TYPE_CHANGE:
6166 case NETDEV_NOTIFY_PEERS:
6167 case NETDEV_CHANGEUPPER:
6168 case NETDEV_RESEND_IGMP:
6169 case NETDEV_CHANGEINFODATA:
6170 case NETDEV_CHANGELOWERSTATE:
6171 case NETDEV_CHANGE_TX_QUEUE_LEN:
6172 rtmsg_ifinfo_event(RTM_NEWLINK, dev, 0, rtnl_get_event(event),
6173 GFP_KERNEL, NULL, 0);
6181 static struct notifier_block rtnetlink_dev_notifier = {
6182 .notifier_call = rtnetlink_event,
6186 static int __net_init rtnetlink_net_init(struct net *net)
6189 struct netlink_kernel_cfg cfg = {
6190 .groups = RTNLGRP_MAX,
6191 .input = rtnetlink_rcv,
6192 .cb_mutex = &rtnl_mutex,
6193 .flags = NL_CFG_F_NONROOT_RECV,
6194 .bind = rtnetlink_bind,
6197 sk = netlink_kernel_create(net, NETLINK_ROUTE, &cfg);
6204 static void __net_exit rtnetlink_net_exit(struct net *net)
6206 netlink_kernel_release(net->rtnl);
6210 static struct pernet_operations rtnetlink_net_ops = {
6211 .init = rtnetlink_net_init,
6212 .exit = rtnetlink_net_exit,
6215 void __init rtnetlink_init(void)
6217 if (register_pernet_subsys(&rtnetlink_net_ops))
6218 panic("rtnetlink_init: cannot initialize rtnetlink\n");
6220 register_netdevice_notifier(&rtnetlink_dev_notifier);
6222 rtnl_register(PF_UNSPEC, RTM_GETLINK, rtnl_getlink,
6223 rtnl_dump_ifinfo, 0);
6224 rtnl_register(PF_UNSPEC, RTM_SETLINK, rtnl_setlink, NULL, 0);
6225 rtnl_register(PF_UNSPEC, RTM_NEWLINK, rtnl_newlink, NULL, 0);
6226 rtnl_register(PF_UNSPEC, RTM_DELLINK, rtnl_dellink, NULL, 0);
6228 rtnl_register(PF_UNSPEC, RTM_GETADDR, NULL, rtnl_dump_all, 0);
6229 rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all, 0);
6230 rtnl_register(PF_UNSPEC, RTM_GETNETCONF, NULL, rtnl_dump_all, 0);
6232 rtnl_register(PF_UNSPEC, RTM_NEWLINKPROP, rtnl_newlinkprop, NULL, 0);
6233 rtnl_register(PF_UNSPEC, RTM_DELLINKPROP, rtnl_dellinkprop, NULL, 0);
6235 rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, rtnl_fdb_add, NULL, 0);
6236 rtnl_register(PF_BRIDGE, RTM_DELNEIGH, rtnl_fdb_del, NULL,
6237 RTNL_FLAG_BULK_DEL_SUPPORTED);
6238 rtnl_register(PF_BRIDGE, RTM_GETNEIGH, rtnl_fdb_get, rtnl_fdb_dump, 0);
6240 rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL, rtnl_bridge_getlink, 0);
6241 rtnl_register(PF_BRIDGE, RTM_DELLINK, rtnl_bridge_dellink, NULL, 0);
6242 rtnl_register(PF_BRIDGE, RTM_SETLINK, rtnl_bridge_setlink, NULL, 0);
6244 rtnl_register(PF_UNSPEC, RTM_GETSTATS, rtnl_stats_get, rtnl_stats_dump,
6246 rtnl_register(PF_UNSPEC, RTM_SETSTATS, rtnl_stats_set, NULL, 0);