2 * NET3 IP device support routines.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Derived from the IP parts of dev.c 1.0.19
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
15 * Alan Cox, <gw4pts@gw4pts.ampr.org>
16 * Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
19 * Alexey Kuznetsov: pa_* fields are replaced with ifaddr
21 * Cyrus Durgin: updated for kmod
22 * Matthias Andree: in devinet_ioctl, compare label and
23 * address (4.4BSD alias style support),
24 * fall back to comparing just the label
29 #include <asm/uaccess.h>
30 #include <linux/bitops.h>
31 #include <linux/capability.h>
32 #include <linux/module.h>
33 #include <linux/types.h>
34 #include <linux/kernel.h>
35 #include <linux/string.h>
37 #include <linux/socket.h>
38 #include <linux/sockios.h>
40 #include <linux/errno.h>
41 #include <linux/interrupt.h>
42 #include <linux/if_addr.h>
43 #include <linux/if_ether.h>
44 #include <linux/inet.h>
45 #include <linux/netdevice.h>
46 #include <linux/etherdevice.h>
47 #include <linux/skbuff.h>
48 #include <linux/init.h>
49 #include <linux/notifier.h>
50 #include <linux/inetdevice.h>
51 #include <linux/igmp.h>
52 #include <linux/slab.h>
53 #include <linux/hash.h>
55 #include <linux/sysctl.h>
57 #include <linux/kmod.h>
58 #include <linux/netconf.h>
62 #include <net/route.h>
63 #include <net/ip_fib.h>
64 #include <net/rtnetlink.h>
65 #include <net/net_namespace.h>
66 #include <net/addrconf.h>
68 #include "fib_lookup.h"
70 #define IPV6ONLY_FLAGS \
71 (IFA_F_NODAD | IFA_F_OPTIMISTIC | IFA_F_DADFAILED | \
72 IFA_F_HOMEADDRESS | IFA_F_TENTATIVE | \
73 IFA_F_MANAGETEMPADDR | IFA_F_STABLE_PRIVACY)
75 static struct ipv4_devconf ipv4_devconf = {
77 [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
78 [IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
79 [IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
80 [IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
81 [IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL - 1] = 10000 /*ms*/,
82 [IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL - 1] = 1000 /*ms*/,
86 static struct ipv4_devconf ipv4_devconf_dflt = {
88 [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
89 [IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
90 [IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
91 [IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
92 [IPV4_DEVCONF_ACCEPT_SOURCE_ROUTE - 1] = 1,
93 [IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL - 1] = 10000 /*ms*/,
94 [IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL - 1] = 1000 /*ms*/,
98 #define IPV4_DEVCONF_DFLT(net, attr) \
99 IPV4_DEVCONF((*net->ipv4.devconf_dflt), attr)
101 static const struct nla_policy ifa_ipv4_policy[IFA_MAX+1] = {
102 [IFA_LOCAL] = { .type = NLA_U32 },
103 [IFA_ADDRESS] = { .type = NLA_U32 },
104 [IFA_BROADCAST] = { .type = NLA_U32 },
105 [IFA_LABEL] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 },
106 [IFA_CACHEINFO] = { .len = sizeof(struct ifa_cacheinfo) },
107 [IFA_FLAGS] = { .type = NLA_U32 },
110 #define IN4_ADDR_HSIZE_SHIFT 8
111 #define IN4_ADDR_HSIZE (1U << IN4_ADDR_HSIZE_SHIFT)
113 static struct hlist_head inet_addr_lst[IN4_ADDR_HSIZE];
115 static u32 inet_addr_hash(const struct net *net, __be32 addr)
117 u32 val = (__force u32) addr ^ net_hash_mix(net);
119 return hash_32(val, IN4_ADDR_HSIZE_SHIFT);
122 static void inet_hash_insert(struct net *net, struct in_ifaddr *ifa)
124 u32 hash = inet_addr_hash(net, ifa->ifa_local);
127 hlist_add_head_rcu(&ifa->hash, &inet_addr_lst[hash]);
130 static void inet_hash_remove(struct in_ifaddr *ifa)
133 hlist_del_init_rcu(&ifa->hash);
137 * __ip_dev_find - find the first device with a given source address.
138 * @net: the net namespace
139 * @addr: the source address
140 * @devref: if true, take a reference on the found device
142 * If a caller uses devref=false, it should be protected by RCU, or RTNL
144 struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref)
146 u32 hash = inet_addr_hash(net, addr);
147 struct net_device *result = NULL;
148 struct in_ifaddr *ifa;
151 hlist_for_each_entry_rcu(ifa, &inet_addr_lst[hash], hash) {
152 if (ifa->ifa_local == addr) {
153 struct net_device *dev = ifa->ifa_dev->dev;
155 if (!net_eq(dev_net(dev), net))
162 struct flowi4 fl4 = { .daddr = addr };
163 struct fib_result res = { 0 };
164 struct fib_table *local;
166 /* Fallback to FIB local table so that communication
167 * over loopback subnets work.
169 local = fib_get_table(net, RT_TABLE_LOCAL);
171 !fib_table_lookup(local, &fl4, &res, FIB_LOOKUP_NOREF) &&
172 res.type == RTN_LOCAL)
173 result = FIB_RES_DEV(res);
175 if (result && devref)
180 EXPORT_SYMBOL(__ip_dev_find);
182 static void rtmsg_ifa(int event, struct in_ifaddr *, struct nlmsghdr *, u32);
184 static BLOCKING_NOTIFIER_HEAD(inetaddr_chain);
185 static void inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
188 static int devinet_sysctl_register(struct in_device *idev);
189 static void devinet_sysctl_unregister(struct in_device *idev);
191 static int devinet_sysctl_register(struct in_device *idev)
195 static void devinet_sysctl_unregister(struct in_device *idev)
200 /* Locks all the inet devices. */
202 static struct in_ifaddr *inet_alloc_ifa(void)
204 return kzalloc(sizeof(struct in_ifaddr), GFP_KERNEL);
207 static void inet_rcu_free_ifa(struct rcu_head *head)
209 struct in_ifaddr *ifa = container_of(head, struct in_ifaddr, rcu_head);
211 in_dev_put(ifa->ifa_dev);
215 static void inet_free_ifa(struct in_ifaddr *ifa)
217 call_rcu(&ifa->rcu_head, inet_rcu_free_ifa);
220 void in_dev_finish_destroy(struct in_device *idev)
222 struct net_device *dev = idev->dev;
224 WARN_ON(idev->ifa_list);
225 WARN_ON(idev->mc_list);
226 kfree(rcu_dereference_protected(idev->mc_hash, 1));
227 #ifdef NET_REFCNT_DEBUG
228 pr_debug("%s: %p=%s\n", __func__, idev, dev ? dev->name : "NIL");
232 pr_err("Freeing alive in_device %p\n", idev);
236 EXPORT_SYMBOL(in_dev_finish_destroy);
238 static struct in_device *inetdev_init(struct net_device *dev)
240 struct in_device *in_dev;
245 in_dev = kzalloc(sizeof(*in_dev), GFP_KERNEL);
248 memcpy(&in_dev->cnf, dev_net(dev)->ipv4.devconf_dflt,
249 sizeof(in_dev->cnf));
250 in_dev->cnf.sysctl = NULL;
252 in_dev->arp_parms = neigh_parms_alloc(dev, &arp_tbl);
253 if (!in_dev->arp_parms)
255 if (IPV4_DEVCONF(in_dev->cnf, FORWARDING))
256 dev_disable_lro(dev);
257 /* Reference in_dev->dev */
259 /* Account for reference dev->ip_ptr (below) */
262 err = devinet_sysctl_register(in_dev);
265 neigh_parms_release(&arp_tbl, in_dev->arp_parms);
270 ip_mc_init_dev(in_dev);
271 if (dev->flags & IFF_UP)
274 /* we can receive as soon as ip_ptr is set -- do this last */
275 rcu_assign_pointer(dev->ip_ptr, in_dev);
277 return in_dev ?: ERR_PTR(err);
284 static void in_dev_rcu_put(struct rcu_head *head)
286 struct in_device *idev = container_of(head, struct in_device, rcu_head);
290 static void inetdev_destroy(struct in_device *in_dev)
292 struct in_ifaddr *ifa;
293 struct net_device *dev;
301 ip_mc_destroy_dev(in_dev);
303 while ((ifa = in_dev->ifa_list) != NULL) {
304 inet_del_ifa(in_dev, &in_dev->ifa_list, 0);
308 RCU_INIT_POINTER(dev->ip_ptr, NULL);
310 devinet_sysctl_unregister(in_dev);
311 neigh_parms_release(&arp_tbl, in_dev->arp_parms);
314 call_rcu(&in_dev->rcu_head, in_dev_rcu_put);
317 int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b)
320 for_primary_ifa(in_dev) {
321 if (inet_ifa_match(a, ifa)) {
322 if (!b || inet_ifa_match(b, ifa)) {
327 } endfor_ifa(in_dev);
332 static void __inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
333 int destroy, struct nlmsghdr *nlh, u32 portid)
335 struct in_ifaddr *promote = NULL;
336 struct in_ifaddr *ifa, *ifa1 = *ifap;
337 struct in_ifaddr *last_prim = in_dev->ifa_list;
338 struct in_ifaddr *prev_prom = NULL;
339 int do_promote = IN_DEV_PROMOTE_SECONDARIES(in_dev);
346 /* 1. Deleting primary ifaddr forces deletion all secondaries
347 * unless alias promotion is set
350 if (!(ifa1->ifa_flags & IFA_F_SECONDARY)) {
351 struct in_ifaddr **ifap1 = &ifa1->ifa_next;
353 while ((ifa = *ifap1) != NULL) {
354 if (!(ifa->ifa_flags & IFA_F_SECONDARY) &&
355 ifa1->ifa_scope <= ifa->ifa_scope)
358 if (!(ifa->ifa_flags & IFA_F_SECONDARY) ||
359 ifa1->ifa_mask != ifa->ifa_mask ||
360 !inet_ifa_match(ifa1->ifa_address, ifa)) {
361 ifap1 = &ifa->ifa_next;
367 inet_hash_remove(ifa);
368 *ifap1 = ifa->ifa_next;
370 rtmsg_ifa(RTM_DELADDR, ifa, nlh, portid);
371 blocking_notifier_call_chain(&inetaddr_chain,
381 /* On promotion all secondaries from subnet are changing
382 * the primary IP, we must remove all their routes silently
383 * and later to add them back with new prefsrc. Do this
384 * while all addresses are on the device list.
386 for (ifa = promote; ifa; ifa = ifa->ifa_next) {
387 if (ifa1->ifa_mask == ifa->ifa_mask &&
388 inet_ifa_match(ifa1->ifa_address, ifa))
389 fib_del_ifaddr(ifa, ifa1);
395 *ifap = ifa1->ifa_next;
396 inet_hash_remove(ifa1);
398 /* 3. Announce address deletion */
400 /* Send message first, then call notifier.
401 At first sight, FIB update triggered by notifier
402 will refer to already deleted ifaddr, that could confuse
403 netlink listeners. It is not true: look, gated sees
404 that route deleted and if it still thinks that ifaddr
405 is valid, it will try to restore deleted routes... Grr.
406 So that, this order is correct.
408 rtmsg_ifa(RTM_DELADDR, ifa1, nlh, portid);
409 blocking_notifier_call_chain(&inetaddr_chain, NETDEV_DOWN, ifa1);
412 struct in_ifaddr *next_sec = promote->ifa_next;
415 prev_prom->ifa_next = promote->ifa_next;
416 promote->ifa_next = last_prim->ifa_next;
417 last_prim->ifa_next = promote;
420 promote->ifa_flags &= ~IFA_F_SECONDARY;
421 rtmsg_ifa(RTM_NEWADDR, promote, nlh, portid);
422 blocking_notifier_call_chain(&inetaddr_chain,
424 for (ifa = next_sec; ifa; ifa = ifa->ifa_next) {
425 if (ifa1->ifa_mask != ifa->ifa_mask ||
426 !inet_ifa_match(ifa1->ifa_address, ifa))
436 static void inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
439 __inet_del_ifa(in_dev, ifap, destroy, NULL, 0);
442 static void check_lifetime(struct work_struct *work);
444 static DECLARE_DELAYED_WORK(check_lifetime_work, check_lifetime);
446 static int __inet_insert_ifa(struct in_ifaddr *ifa, struct nlmsghdr *nlh,
449 struct in_device *in_dev = ifa->ifa_dev;
450 struct in_ifaddr *ifa1, **ifap, **last_primary;
454 if (!ifa->ifa_local) {
459 ifa->ifa_flags &= ~IFA_F_SECONDARY;
460 last_primary = &in_dev->ifa_list;
462 /* Don't set IPv6 only flags to IPv4 addresses */
463 ifa->ifa_flags &= ~IPV6ONLY_FLAGS;
465 for (ifap = &in_dev->ifa_list; (ifa1 = *ifap) != NULL;
466 ifap = &ifa1->ifa_next) {
467 if (!(ifa1->ifa_flags & IFA_F_SECONDARY) &&
468 ifa->ifa_scope <= ifa1->ifa_scope)
469 last_primary = &ifa1->ifa_next;
470 if (ifa1->ifa_mask == ifa->ifa_mask &&
471 inet_ifa_match(ifa1->ifa_address, ifa)) {
472 if (ifa1->ifa_local == ifa->ifa_local) {
476 if (ifa1->ifa_scope != ifa->ifa_scope) {
480 ifa->ifa_flags |= IFA_F_SECONDARY;
484 if (!(ifa->ifa_flags & IFA_F_SECONDARY)) {
485 prandom_seed((__force u32) ifa->ifa_local);
489 ifa->ifa_next = *ifap;
492 inet_hash_insert(dev_net(in_dev->dev), ifa);
494 cancel_delayed_work(&check_lifetime_work);
495 queue_delayed_work(system_power_efficient_wq, &check_lifetime_work, 0);
497 /* Send message first, then call notifier.
498 Notifier will trigger FIB update, so that
499 listeners of netlink will know about new ifaddr */
500 rtmsg_ifa(RTM_NEWADDR, ifa, nlh, portid);
501 blocking_notifier_call_chain(&inetaddr_chain, NETDEV_UP, ifa);
506 static int inet_insert_ifa(struct in_ifaddr *ifa)
508 return __inet_insert_ifa(ifa, NULL, 0);
511 static int inet_set_ifa(struct net_device *dev, struct in_ifaddr *ifa)
513 struct in_device *in_dev = __in_dev_get_rtnl(dev);
521 ipv4_devconf_setall(in_dev);
522 neigh_parms_data_state_setall(in_dev->arp_parms);
523 if (ifa->ifa_dev != in_dev) {
524 WARN_ON(ifa->ifa_dev);
526 ifa->ifa_dev = in_dev;
528 if (ipv4_is_loopback(ifa->ifa_local))
529 ifa->ifa_scope = RT_SCOPE_HOST;
530 return inet_insert_ifa(ifa);
533 /* Caller must hold RCU or RTNL :
534 * We dont take a reference on found in_device
536 struct in_device *inetdev_by_index(struct net *net, int ifindex)
538 struct net_device *dev;
539 struct in_device *in_dev = NULL;
542 dev = dev_get_by_index_rcu(net, ifindex);
544 in_dev = rcu_dereference_rtnl(dev->ip_ptr);
548 EXPORT_SYMBOL(inetdev_by_index);
550 /* Called only from RTNL semaphored context. No locks. */
552 struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix,
557 for_primary_ifa(in_dev) {
558 if (ifa->ifa_mask == mask && inet_ifa_match(prefix, ifa))
560 } endfor_ifa(in_dev);
564 static int ip_mc_autojoin_config(struct net *net, bool join,
565 const struct in_ifaddr *ifa)
567 #if defined(CONFIG_IP_MULTICAST)
568 struct ip_mreqn mreq = {
569 .imr_multiaddr.s_addr = ifa->ifa_address,
570 .imr_ifindex = ifa->ifa_dev->dev->ifindex,
572 struct sock *sk = net->ipv4.mc_autojoin_sk;
579 ret = ip_mc_join_group(sk, &mreq);
581 ret = ip_mc_leave_group(sk, &mreq);
590 static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh)
592 struct net *net = sock_net(skb->sk);
593 struct nlattr *tb[IFA_MAX+1];
594 struct in_device *in_dev;
595 struct ifaddrmsg *ifm;
596 struct in_ifaddr *ifa, **ifap;
601 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv4_policy);
605 ifm = nlmsg_data(nlh);
606 in_dev = inetdev_by_index(net, ifm->ifa_index);
612 for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
613 ifap = &ifa->ifa_next) {
615 ifa->ifa_local != nla_get_in_addr(tb[IFA_LOCAL]))
618 if (tb[IFA_LABEL] && nla_strcmp(tb[IFA_LABEL], ifa->ifa_label))
621 if (tb[IFA_ADDRESS] &&
622 (ifm->ifa_prefixlen != ifa->ifa_prefixlen ||
623 !inet_ifa_match(nla_get_in_addr(tb[IFA_ADDRESS]), ifa)))
626 if (ipv4_is_multicast(ifa->ifa_address))
627 ip_mc_autojoin_config(net, false, ifa);
628 __inet_del_ifa(in_dev, ifap, 1, nlh, NETLINK_CB(skb).portid);
632 err = -EADDRNOTAVAIL;
637 #define INFINITY_LIFE_TIME 0xFFFFFFFF
639 static void check_lifetime(struct work_struct *work)
641 unsigned long now, next, next_sec, next_sched;
642 struct in_ifaddr *ifa;
643 struct hlist_node *n;
647 next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY);
649 for (i = 0; i < IN4_ADDR_HSIZE; i++) {
650 bool change_needed = false;
653 hlist_for_each_entry_rcu(ifa, &inet_addr_lst[i], hash) {
656 if (ifa->ifa_flags & IFA_F_PERMANENT)
659 /* We try to batch several events at once. */
660 age = (now - ifa->ifa_tstamp +
661 ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
663 if (ifa->ifa_valid_lft != INFINITY_LIFE_TIME &&
664 age >= ifa->ifa_valid_lft) {
665 change_needed = true;
666 } else if (ifa->ifa_preferred_lft ==
667 INFINITY_LIFE_TIME) {
669 } else if (age >= ifa->ifa_preferred_lft) {
670 if (time_before(ifa->ifa_tstamp +
671 ifa->ifa_valid_lft * HZ, next))
672 next = ifa->ifa_tstamp +
673 ifa->ifa_valid_lft * HZ;
675 if (!(ifa->ifa_flags & IFA_F_DEPRECATED))
676 change_needed = true;
677 } else if (time_before(ifa->ifa_tstamp +
678 ifa->ifa_preferred_lft * HZ,
680 next = ifa->ifa_tstamp +
681 ifa->ifa_preferred_lft * HZ;
688 hlist_for_each_entry_safe(ifa, n, &inet_addr_lst[i], hash) {
691 if (ifa->ifa_flags & IFA_F_PERMANENT)
694 /* We try to batch several events at once. */
695 age = (now - ifa->ifa_tstamp +
696 ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
698 if (ifa->ifa_valid_lft != INFINITY_LIFE_TIME &&
699 age >= ifa->ifa_valid_lft) {
700 struct in_ifaddr **ifap;
702 for (ifap = &ifa->ifa_dev->ifa_list;
703 *ifap != NULL; ifap = &(*ifap)->ifa_next) {
705 inet_del_ifa(ifa->ifa_dev,
710 } else if (ifa->ifa_preferred_lft !=
711 INFINITY_LIFE_TIME &&
712 age >= ifa->ifa_preferred_lft &&
713 !(ifa->ifa_flags & IFA_F_DEPRECATED)) {
714 ifa->ifa_flags |= IFA_F_DEPRECATED;
715 rtmsg_ifa(RTM_NEWADDR, ifa, NULL, 0);
721 next_sec = round_jiffies_up(next);
724 /* If rounded timeout is accurate enough, accept it. */
725 if (time_before(next_sec, next + ADDRCONF_TIMER_FUZZ))
726 next_sched = next_sec;
729 /* And minimum interval is ADDRCONF_TIMER_FUZZ_MAX. */
730 if (time_before(next_sched, now + ADDRCONF_TIMER_FUZZ_MAX))
731 next_sched = now + ADDRCONF_TIMER_FUZZ_MAX;
733 queue_delayed_work(system_power_efficient_wq, &check_lifetime_work,
737 static void set_ifa_lifetime(struct in_ifaddr *ifa, __u32 valid_lft,
740 unsigned long timeout;
742 ifa->ifa_flags &= ~(IFA_F_PERMANENT | IFA_F_DEPRECATED);
744 timeout = addrconf_timeout_fixup(valid_lft, HZ);
745 if (addrconf_finite_timeout(timeout))
746 ifa->ifa_valid_lft = timeout;
748 ifa->ifa_flags |= IFA_F_PERMANENT;
750 timeout = addrconf_timeout_fixup(prefered_lft, HZ);
751 if (addrconf_finite_timeout(timeout)) {
753 ifa->ifa_flags |= IFA_F_DEPRECATED;
754 ifa->ifa_preferred_lft = timeout;
756 ifa->ifa_tstamp = jiffies;
757 if (!ifa->ifa_cstamp)
758 ifa->ifa_cstamp = ifa->ifa_tstamp;
761 static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh,
762 __u32 *pvalid_lft, __u32 *pprefered_lft)
764 struct nlattr *tb[IFA_MAX+1];
765 struct in_ifaddr *ifa;
766 struct ifaddrmsg *ifm;
767 struct net_device *dev;
768 struct in_device *in_dev;
771 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv4_policy);
775 ifm = nlmsg_data(nlh);
777 if (ifm->ifa_prefixlen > 32 || !tb[IFA_LOCAL])
780 dev = __dev_get_by_index(net, ifm->ifa_index);
785 in_dev = __in_dev_get_rtnl(dev);
790 ifa = inet_alloc_ifa();
793 * A potential indev allocation can be left alive, it stays
794 * assigned to its device and is destroy with it.
798 ipv4_devconf_setall(in_dev);
799 neigh_parms_data_state_setall(in_dev->arp_parms);
802 if (!tb[IFA_ADDRESS])
803 tb[IFA_ADDRESS] = tb[IFA_LOCAL];
805 INIT_HLIST_NODE(&ifa->hash);
806 ifa->ifa_prefixlen = ifm->ifa_prefixlen;
807 ifa->ifa_mask = inet_make_mask(ifm->ifa_prefixlen);
808 ifa->ifa_flags = tb[IFA_FLAGS] ? nla_get_u32(tb[IFA_FLAGS]) :
810 ifa->ifa_scope = ifm->ifa_scope;
811 ifa->ifa_dev = in_dev;
813 ifa->ifa_local = nla_get_in_addr(tb[IFA_LOCAL]);
814 ifa->ifa_address = nla_get_in_addr(tb[IFA_ADDRESS]);
816 if (tb[IFA_BROADCAST])
817 ifa->ifa_broadcast = nla_get_in_addr(tb[IFA_BROADCAST]);
820 nla_strlcpy(ifa->ifa_label, tb[IFA_LABEL], IFNAMSIZ);
822 memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
824 if (tb[IFA_CACHEINFO]) {
825 struct ifa_cacheinfo *ci;
827 ci = nla_data(tb[IFA_CACHEINFO]);
828 if (!ci->ifa_valid || ci->ifa_prefered > ci->ifa_valid) {
832 *pvalid_lft = ci->ifa_valid;
833 *pprefered_lft = ci->ifa_prefered;
844 static struct in_ifaddr *find_matching_ifa(struct in_ifaddr *ifa)
846 struct in_device *in_dev = ifa->ifa_dev;
847 struct in_ifaddr *ifa1, **ifap;
852 for (ifap = &in_dev->ifa_list; (ifa1 = *ifap) != NULL;
853 ifap = &ifa1->ifa_next) {
854 if (ifa1->ifa_mask == ifa->ifa_mask &&
855 inet_ifa_match(ifa1->ifa_address, ifa) &&
856 ifa1->ifa_local == ifa->ifa_local)
862 static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh)
864 struct net *net = sock_net(skb->sk);
865 struct in_ifaddr *ifa;
866 struct in_ifaddr *ifa_existing;
867 __u32 valid_lft = INFINITY_LIFE_TIME;
868 __u32 prefered_lft = INFINITY_LIFE_TIME;
872 ifa = rtm_to_ifaddr(net, nlh, &valid_lft, &prefered_lft);
876 ifa_existing = find_matching_ifa(ifa);
878 /* It would be best to check for !NLM_F_CREATE here but
879 * userspace already relies on not having to provide this.
881 set_ifa_lifetime(ifa, valid_lft, prefered_lft);
882 if (ifa->ifa_flags & IFA_F_MCAUTOJOIN) {
883 int ret = ip_mc_autojoin_config(net, true, ifa);
890 return __inet_insert_ifa(ifa, nlh, NETLINK_CB(skb).portid);
894 if (nlh->nlmsg_flags & NLM_F_EXCL ||
895 !(nlh->nlmsg_flags & NLM_F_REPLACE))
898 set_ifa_lifetime(ifa, valid_lft, prefered_lft);
899 cancel_delayed_work(&check_lifetime_work);
900 queue_delayed_work(system_power_efficient_wq,
901 &check_lifetime_work, 0);
902 rtmsg_ifa(RTM_NEWADDR, ifa, nlh, NETLINK_CB(skb).portid);
908 * Determine a default network mask, based on the IP address.
911 static int inet_abc_len(__be32 addr)
913 int rc = -1; /* Something else, probably a multicast. */
915 if (ipv4_is_zeronet(addr))
918 __u32 haddr = ntohl(addr);
920 if (IN_CLASSA(haddr))
922 else if (IN_CLASSB(haddr))
924 else if (IN_CLASSC(haddr))
932 int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
935 struct sockaddr_in sin_orig;
936 struct sockaddr_in *sin = (struct sockaddr_in *)&ifr.ifr_addr;
937 struct in_device *in_dev;
938 struct in_ifaddr **ifap = NULL;
939 struct in_ifaddr *ifa = NULL;
940 struct net_device *dev;
943 int tryaddrmatch = 0;
946 * Fetch the caller's info block into kernel space
949 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
951 ifr.ifr_name[IFNAMSIZ - 1] = 0;
953 /* save original address for comparison */
954 memcpy(&sin_orig, sin, sizeof(*sin));
956 colon = strchr(ifr.ifr_name, ':');
960 dev_load(net, ifr.ifr_name);
963 case SIOCGIFADDR: /* Get interface address */
964 case SIOCGIFBRDADDR: /* Get the broadcast address */
965 case SIOCGIFDSTADDR: /* Get the destination address */
966 case SIOCGIFNETMASK: /* Get the netmask for the interface */
967 /* Note that these ioctls will not sleep,
968 so that we do not impose a lock.
969 One day we will be forced to put shlock here (I mean SMP)
971 tryaddrmatch = (sin_orig.sin_family == AF_INET);
972 memset(sin, 0, sizeof(*sin));
973 sin->sin_family = AF_INET;
978 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
981 case SIOCSIFADDR: /* Set interface address (and family) */
982 case SIOCSIFBRDADDR: /* Set the broadcast address */
983 case SIOCSIFDSTADDR: /* Set the destination address */
984 case SIOCSIFNETMASK: /* Set the netmask for the interface */
986 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
989 if (sin->sin_family != AF_INET)
1000 dev = __dev_get_by_name(net, ifr.ifr_name);
1007 in_dev = __in_dev_get_rtnl(dev);
1010 /* Matthias Andree */
1011 /* compare label and address (4.4BSD style) */
1012 /* note: we only do this for a limited set of ioctls
1013 and only if the original address family was AF_INET.
1014 This is checked above. */
1015 for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
1016 ifap = &ifa->ifa_next) {
1017 if (!strcmp(ifr.ifr_name, ifa->ifa_label) &&
1018 sin_orig.sin_addr.s_addr ==
1024 /* we didn't get a match, maybe the application is
1025 4.3BSD-style and passed in junk so we fall back to
1026 comparing just the label */
1028 for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
1029 ifap = &ifa->ifa_next)
1030 if (!strcmp(ifr.ifr_name, ifa->ifa_label))
1035 ret = -EADDRNOTAVAIL;
1036 if (!ifa && cmd != SIOCSIFADDR && cmd != SIOCSIFFLAGS)
1040 case SIOCGIFADDR: /* Get interface address */
1041 sin->sin_addr.s_addr = ifa->ifa_local;
1044 case SIOCGIFBRDADDR: /* Get the broadcast address */
1045 sin->sin_addr.s_addr = ifa->ifa_broadcast;
1048 case SIOCGIFDSTADDR: /* Get the destination address */
1049 sin->sin_addr.s_addr = ifa->ifa_address;
1052 case SIOCGIFNETMASK: /* Get the netmask for the interface */
1053 sin->sin_addr.s_addr = ifa->ifa_mask;
1058 ret = -EADDRNOTAVAIL;
1062 if (!(ifr.ifr_flags & IFF_UP))
1063 inet_del_ifa(in_dev, ifap, 1);
1066 ret = dev_change_flags(dev, ifr.ifr_flags);
1069 case SIOCSIFADDR: /* Set interface address (and family) */
1071 if (inet_abc_len(sin->sin_addr.s_addr) < 0)
1076 ifa = inet_alloc_ifa();
1079 INIT_HLIST_NODE(&ifa->hash);
1081 memcpy(ifa->ifa_label, ifr.ifr_name, IFNAMSIZ);
1083 memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
1086 if (ifa->ifa_local == sin->sin_addr.s_addr)
1088 inet_del_ifa(in_dev, ifap, 0);
1089 ifa->ifa_broadcast = 0;
1093 ifa->ifa_address = ifa->ifa_local = sin->sin_addr.s_addr;
1095 if (!(dev->flags & IFF_POINTOPOINT)) {
1096 ifa->ifa_prefixlen = inet_abc_len(ifa->ifa_address);
1097 ifa->ifa_mask = inet_make_mask(ifa->ifa_prefixlen);
1098 if ((dev->flags & IFF_BROADCAST) &&
1099 ifa->ifa_prefixlen < 31)
1100 ifa->ifa_broadcast = ifa->ifa_address |
1103 ifa->ifa_prefixlen = 32;
1104 ifa->ifa_mask = inet_make_mask(32);
1106 set_ifa_lifetime(ifa, INFINITY_LIFE_TIME, INFINITY_LIFE_TIME);
1107 ret = inet_set_ifa(dev, ifa);
1110 case SIOCSIFBRDADDR: /* Set the broadcast address */
1112 if (ifa->ifa_broadcast != sin->sin_addr.s_addr) {
1113 inet_del_ifa(in_dev, ifap, 0);
1114 ifa->ifa_broadcast = sin->sin_addr.s_addr;
1115 inet_insert_ifa(ifa);
1119 case SIOCSIFDSTADDR: /* Set the destination address */
1121 if (ifa->ifa_address == sin->sin_addr.s_addr)
1124 if (inet_abc_len(sin->sin_addr.s_addr) < 0)
1127 inet_del_ifa(in_dev, ifap, 0);
1128 ifa->ifa_address = sin->sin_addr.s_addr;
1129 inet_insert_ifa(ifa);
1132 case SIOCSIFNETMASK: /* Set the netmask for the interface */
1135 * The mask we set must be legal.
1138 if (bad_mask(sin->sin_addr.s_addr, 0))
1141 if (ifa->ifa_mask != sin->sin_addr.s_addr) {
1142 __be32 old_mask = ifa->ifa_mask;
1143 inet_del_ifa(in_dev, ifap, 0);
1144 ifa->ifa_mask = sin->sin_addr.s_addr;
1145 ifa->ifa_prefixlen = inet_mask_len(ifa->ifa_mask);
1147 /* See if current broadcast address matches
1148 * with current netmask, then recalculate
1149 * the broadcast address. Otherwise it's a
1150 * funny address, so don't touch it since
1151 * the user seems to know what (s)he's doing...
1153 if ((dev->flags & IFF_BROADCAST) &&
1154 (ifa->ifa_prefixlen < 31) &&
1155 (ifa->ifa_broadcast ==
1156 (ifa->ifa_local|~old_mask))) {
1157 ifa->ifa_broadcast = (ifa->ifa_local |
1158 ~sin->sin_addr.s_addr);
1160 inet_insert_ifa(ifa);
1170 ret = copy_to_user(arg, &ifr, sizeof(struct ifreq)) ? -EFAULT : 0;
1174 static int inet_gifconf(struct net_device *dev, char __user *buf, int len)
1176 struct in_device *in_dev = __in_dev_get_rtnl(dev);
1177 struct in_ifaddr *ifa;
1184 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
1186 done += sizeof(ifr);
1189 if (len < (int) sizeof(ifr))
1191 memset(&ifr, 0, sizeof(struct ifreq));
1192 strcpy(ifr.ifr_name, ifa->ifa_label);
1194 (*(struct sockaddr_in *)&ifr.ifr_addr).sin_family = AF_INET;
1195 (*(struct sockaddr_in *)&ifr.ifr_addr).sin_addr.s_addr =
1198 if (copy_to_user(buf, &ifr, sizeof(struct ifreq))) {
1202 buf += sizeof(struct ifreq);
1203 len -= sizeof(struct ifreq);
1204 done += sizeof(struct ifreq);
1210 __be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope)
1213 struct in_device *in_dev;
1214 struct net *net = dev_net(dev);
1218 in_dev = __in_dev_get_rcu(dev);
1222 for_primary_ifa(in_dev) {
1223 if (ifa->ifa_scope > scope)
1225 if (!dst || inet_ifa_match(dst, ifa)) {
1226 addr = ifa->ifa_local;
1230 addr = ifa->ifa_local;
1231 } endfor_ifa(in_dev);
1236 master_idx = l3mdev_master_ifindex_rcu(dev);
1238 /* For VRFs, the VRF device takes the place of the loopback device,
1239 * with addresses on it being preferred. Note in such cases the
1240 * loopback device will be among the devices that fail the master_idx
1241 * equality check in the loop below.
1244 (dev = dev_get_by_index_rcu(net, master_idx)) &&
1245 (in_dev = __in_dev_get_rcu(dev))) {
1246 for_primary_ifa(in_dev) {
1247 if (ifa->ifa_scope != RT_SCOPE_LINK &&
1248 ifa->ifa_scope <= scope) {
1249 addr = ifa->ifa_local;
1252 } endfor_ifa(in_dev);
1255 /* Not loopback addresses on loopback should be preferred
1256 in this case. It is important that lo is the first interface
1259 for_each_netdev_rcu(net, dev) {
1260 if (l3mdev_master_ifindex_rcu(dev) != master_idx)
1263 in_dev = __in_dev_get_rcu(dev);
1267 for_primary_ifa(in_dev) {
1268 if (ifa->ifa_scope != RT_SCOPE_LINK &&
1269 ifa->ifa_scope <= scope) {
1270 addr = ifa->ifa_local;
1273 } endfor_ifa(in_dev);
1279 EXPORT_SYMBOL(inet_select_addr);
1281 static __be32 confirm_addr_indev(struct in_device *in_dev, __be32 dst,
1282 __be32 local, int scope)
1289 (local == ifa->ifa_local || !local) &&
1290 ifa->ifa_scope <= scope) {
1291 addr = ifa->ifa_local;
1296 same = (!local || inet_ifa_match(local, ifa)) &&
1297 (!dst || inet_ifa_match(dst, ifa));
1301 /* Is the selected addr into dst subnet? */
1302 if (inet_ifa_match(addr, ifa))
1304 /* No, then can we use new local src? */
1305 if (ifa->ifa_scope <= scope) {
1306 addr = ifa->ifa_local;
1309 /* search for large dst subnet for addr */
1313 } endfor_ifa(in_dev);
1315 return same ? addr : 0;
1319 * Confirm that local IP address exists using wildcards:
1320 * - net: netns to check, cannot be NULL
1321 * - in_dev: only on this interface, NULL=any interface
1322 * - dst: only in the same subnet as dst, 0=any dst
1323 * - local: address, 0=autoselect the local address
1324 * - scope: maximum allowed scope value for the local address
1326 __be32 inet_confirm_addr(struct net *net, struct in_device *in_dev,
1327 __be32 dst, __be32 local, int scope)
1330 struct net_device *dev;
1333 return confirm_addr_indev(in_dev, dst, local, scope);
1336 for_each_netdev_rcu(net, dev) {
1337 in_dev = __in_dev_get_rcu(dev);
1339 addr = confirm_addr_indev(in_dev, dst, local, scope);
1348 EXPORT_SYMBOL(inet_confirm_addr);
1354 int register_inetaddr_notifier(struct notifier_block *nb)
1356 return blocking_notifier_chain_register(&inetaddr_chain, nb);
1358 EXPORT_SYMBOL(register_inetaddr_notifier);
1360 int unregister_inetaddr_notifier(struct notifier_block *nb)
1362 return blocking_notifier_chain_unregister(&inetaddr_chain, nb);
1364 EXPORT_SYMBOL(unregister_inetaddr_notifier);
1366 /* Rename ifa_labels for a device name change. Make some effort to preserve
1367 * existing alias numbering and to create unique labels if possible.
1369 static void inetdev_changename(struct net_device *dev, struct in_device *in_dev)
1371 struct in_ifaddr *ifa;
1374 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
1375 char old[IFNAMSIZ], *dot;
1377 memcpy(old, ifa->ifa_label, IFNAMSIZ);
1378 memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
1381 dot = strchr(old, ':');
1383 sprintf(old, ":%d", named);
1386 if (strlen(dot) + strlen(dev->name) < IFNAMSIZ)
1387 strcat(ifa->ifa_label, dot);
1389 strcpy(ifa->ifa_label + (IFNAMSIZ - strlen(dot) - 1), dot);
1391 rtmsg_ifa(RTM_NEWADDR, ifa, NULL, 0);
1395 static void inetdev_send_gratuitous_arp(struct net_device *dev,
1396 struct in_device *in_dev)
1399 struct in_ifaddr *ifa;
1401 for (ifa = in_dev->ifa_list; ifa;
1402 ifa = ifa->ifa_next) {
1403 arp_send(ARPOP_REQUEST, ETH_P_ARP,
1404 ifa->ifa_local, dev,
1405 ifa->ifa_local, NULL,
1406 dev->dev_addr, NULL);
1410 /* Called only under RTNL semaphore */
1412 static int inetdev_event(struct notifier_block *this, unsigned long event,
1415 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1416 struct in_device *in_dev = __in_dev_get_rtnl(dev);
1421 if (event == NETDEV_REGISTER) {
1422 in_dev = inetdev_init(dev);
1424 return notifier_from_errno(PTR_ERR(in_dev));
1425 if (dev->flags & IFF_LOOPBACK) {
1426 IN_DEV_CONF_SET(in_dev, NOXFRM, 1);
1427 IN_DEV_CONF_SET(in_dev, NOPOLICY, 1);
1429 } else if (event == NETDEV_CHANGEMTU) {
1430 /* Re-enabling IP */
1431 if (inetdev_valid_mtu(dev->mtu))
1432 in_dev = inetdev_init(dev);
1438 case NETDEV_REGISTER:
1439 pr_debug("%s: bug\n", __func__);
1440 RCU_INIT_POINTER(dev->ip_ptr, NULL);
1443 if (!inetdev_valid_mtu(dev->mtu))
1445 if (dev->flags & IFF_LOOPBACK) {
1446 struct in_ifaddr *ifa = inet_alloc_ifa();
1449 INIT_HLIST_NODE(&ifa->hash);
1451 ifa->ifa_address = htonl(INADDR_LOOPBACK);
1452 ifa->ifa_prefixlen = 8;
1453 ifa->ifa_mask = inet_make_mask(8);
1454 in_dev_hold(in_dev);
1455 ifa->ifa_dev = in_dev;
1456 ifa->ifa_scope = RT_SCOPE_HOST;
1457 memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
1458 set_ifa_lifetime(ifa, INFINITY_LIFE_TIME,
1459 INFINITY_LIFE_TIME);
1460 ipv4_devconf_setall(in_dev);
1461 neigh_parms_data_state_setall(in_dev->arp_parms);
1462 inet_insert_ifa(ifa);
1467 case NETDEV_CHANGEADDR:
1468 if (!IN_DEV_ARP_NOTIFY(in_dev))
1471 case NETDEV_NOTIFY_PEERS:
1472 /* Send gratuitous ARP to notify of link change */
1473 inetdev_send_gratuitous_arp(dev, in_dev);
1478 case NETDEV_PRE_TYPE_CHANGE:
1479 ip_mc_unmap(in_dev);
1481 case NETDEV_POST_TYPE_CHANGE:
1482 ip_mc_remap(in_dev);
1484 case NETDEV_CHANGEMTU:
1485 if (inetdev_valid_mtu(dev->mtu))
1487 /* disable IP when MTU is not enough */
1488 case NETDEV_UNREGISTER:
1489 inetdev_destroy(in_dev);
1491 case NETDEV_CHANGENAME:
1492 /* Do not notify about label change, this event is
1493 * not interesting to applications using netlink.
1495 inetdev_changename(dev, in_dev);
1497 devinet_sysctl_unregister(in_dev);
1498 devinet_sysctl_register(in_dev);
1505 static struct notifier_block ip_netdev_notifier = {
1506 .notifier_call = inetdev_event,
1509 static size_t inet_nlmsg_size(void)
1511 return NLMSG_ALIGN(sizeof(struct ifaddrmsg))
1512 + nla_total_size(4) /* IFA_ADDRESS */
1513 + nla_total_size(4) /* IFA_LOCAL */
1514 + nla_total_size(4) /* IFA_BROADCAST */
1515 + nla_total_size(IFNAMSIZ) /* IFA_LABEL */
1516 + nla_total_size(4) /* IFA_FLAGS */
1517 + nla_total_size(sizeof(struct ifa_cacheinfo)); /* IFA_CACHEINFO */
1520 static inline u32 cstamp_delta(unsigned long cstamp)
1522 return (cstamp - INITIAL_JIFFIES) * 100UL / HZ;
1525 static int put_cacheinfo(struct sk_buff *skb, unsigned long cstamp,
1526 unsigned long tstamp, u32 preferred, u32 valid)
1528 struct ifa_cacheinfo ci;
1530 ci.cstamp = cstamp_delta(cstamp);
1531 ci.tstamp = cstamp_delta(tstamp);
1532 ci.ifa_prefered = preferred;
1533 ci.ifa_valid = valid;
1535 return nla_put(skb, IFA_CACHEINFO, sizeof(ci), &ci);
1538 static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa,
1539 u32 portid, u32 seq, int event, unsigned int flags)
1541 struct ifaddrmsg *ifm;
1542 struct nlmsghdr *nlh;
1543 u32 preferred, valid;
1545 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*ifm), flags);
1549 ifm = nlmsg_data(nlh);
1550 ifm->ifa_family = AF_INET;
1551 ifm->ifa_prefixlen = ifa->ifa_prefixlen;
1552 ifm->ifa_flags = ifa->ifa_flags;
1553 ifm->ifa_scope = ifa->ifa_scope;
1554 ifm->ifa_index = ifa->ifa_dev->dev->ifindex;
1556 if (!(ifm->ifa_flags & IFA_F_PERMANENT)) {
1557 preferred = ifa->ifa_preferred_lft;
1558 valid = ifa->ifa_valid_lft;
1559 if (preferred != INFINITY_LIFE_TIME) {
1560 long tval = (jiffies - ifa->ifa_tstamp) / HZ;
1562 if (preferred > tval)
1566 if (valid != INFINITY_LIFE_TIME) {
1574 preferred = INFINITY_LIFE_TIME;
1575 valid = INFINITY_LIFE_TIME;
1577 if ((ifa->ifa_address &&
1578 nla_put_in_addr(skb, IFA_ADDRESS, ifa->ifa_address)) ||
1580 nla_put_in_addr(skb, IFA_LOCAL, ifa->ifa_local)) ||
1581 (ifa->ifa_broadcast &&
1582 nla_put_in_addr(skb, IFA_BROADCAST, ifa->ifa_broadcast)) ||
1583 (ifa->ifa_label[0] &&
1584 nla_put_string(skb, IFA_LABEL, ifa->ifa_label)) ||
1585 nla_put_u32(skb, IFA_FLAGS, ifa->ifa_flags) ||
1586 put_cacheinfo(skb, ifa->ifa_cstamp, ifa->ifa_tstamp,
1588 goto nla_put_failure;
1590 nlmsg_end(skb, nlh);
1594 nlmsg_cancel(skb, nlh);
1598 static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
1600 struct net *net = sock_net(skb->sk);
1603 int ip_idx, s_ip_idx;
1604 struct net_device *dev;
1605 struct in_device *in_dev;
1606 struct in_ifaddr *ifa;
1607 struct hlist_head *head;
1610 s_idx = idx = cb->args[1];
1611 s_ip_idx = ip_idx = cb->args[2];
1613 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
1615 head = &net->dev_index_head[h];
1617 cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
1619 hlist_for_each_entry_rcu(dev, head, index_hlist) {
1622 if (h > s_h || idx > s_idx)
1624 in_dev = __in_dev_get_rcu(dev);
1628 for (ifa = in_dev->ifa_list, ip_idx = 0; ifa;
1629 ifa = ifa->ifa_next, ip_idx++) {
1630 if (ip_idx < s_ip_idx)
1632 if (inet_fill_ifaddr(skb, ifa,
1633 NETLINK_CB(cb->skb).portid,
1635 RTM_NEWADDR, NLM_F_MULTI) < 0) {
1639 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
1650 cb->args[2] = ip_idx;
1655 static void rtmsg_ifa(int event, struct in_ifaddr *ifa, struct nlmsghdr *nlh,
1658 struct sk_buff *skb;
1659 u32 seq = nlh ? nlh->nlmsg_seq : 0;
1663 net = dev_net(ifa->ifa_dev->dev);
1664 skb = nlmsg_new(inet_nlmsg_size(), GFP_KERNEL);
1668 err = inet_fill_ifaddr(skb, ifa, portid, seq, event, 0);
1670 /* -EMSGSIZE implies BUG in inet_nlmsg_size() */
1671 WARN_ON(err == -EMSGSIZE);
1675 rtnl_notify(skb, net, portid, RTNLGRP_IPV4_IFADDR, nlh, GFP_KERNEL);
1679 rtnl_set_sk_err(net, RTNLGRP_IPV4_IFADDR, err);
1682 static size_t inet_get_link_af_size(const struct net_device *dev,
1683 u32 ext_filter_mask)
1685 struct in_device *in_dev = rcu_dereference_rtnl(dev->ip_ptr);
1690 return nla_total_size(IPV4_DEVCONF_MAX * 4); /* IFLA_INET_CONF */
1693 static int inet_fill_link_af(struct sk_buff *skb, const struct net_device *dev,
1694 u32 ext_filter_mask)
1696 struct in_device *in_dev = rcu_dereference_rtnl(dev->ip_ptr);
1703 nla = nla_reserve(skb, IFLA_INET_CONF, IPV4_DEVCONF_MAX * 4);
1707 for (i = 0; i < IPV4_DEVCONF_MAX; i++)
1708 ((u32 *) nla_data(nla))[i] = in_dev->cnf.data[i];
1713 static const struct nla_policy inet_af_policy[IFLA_INET_MAX+1] = {
1714 [IFLA_INET_CONF] = { .type = NLA_NESTED },
1717 static int inet_validate_link_af(const struct net_device *dev,
1718 const struct nlattr *nla)
1720 struct nlattr *a, *tb[IFLA_INET_MAX+1];
1723 if (dev && !__in_dev_get_rtnl(dev))
1724 return -EAFNOSUPPORT;
1726 err = nla_parse_nested(tb, IFLA_INET_MAX, nla, inet_af_policy);
1730 if (tb[IFLA_INET_CONF]) {
1731 nla_for_each_nested(a, tb[IFLA_INET_CONF], rem) {
1732 int cfgid = nla_type(a);
1737 if (cfgid <= 0 || cfgid > IPV4_DEVCONF_MAX)
1745 static int inet_set_link_af(struct net_device *dev, const struct nlattr *nla)
1747 struct in_device *in_dev = __in_dev_get_rtnl(dev);
1748 struct nlattr *a, *tb[IFLA_INET_MAX+1];
1752 return -EAFNOSUPPORT;
1754 if (nla_parse_nested(tb, IFLA_INET_MAX, nla, NULL) < 0)
1757 if (tb[IFLA_INET_CONF]) {
1758 nla_for_each_nested(a, tb[IFLA_INET_CONF], rem)
1759 ipv4_devconf_set(in_dev, nla_type(a), nla_get_u32(a));
1765 static int inet_netconf_msgsize_devconf(int type)
1767 int size = NLMSG_ALIGN(sizeof(struct netconfmsg))
1768 + nla_total_size(4); /* NETCONFA_IFINDEX */
1771 if (type == NETCONFA_ALL)
1774 if (all || type == NETCONFA_FORWARDING)
1775 size += nla_total_size(4);
1776 if (all || type == NETCONFA_RP_FILTER)
1777 size += nla_total_size(4);
1778 if (all || type == NETCONFA_MC_FORWARDING)
1779 size += nla_total_size(4);
1780 if (all || type == NETCONFA_PROXY_NEIGH)
1781 size += nla_total_size(4);
1782 if (all || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN)
1783 size += nla_total_size(4);
1788 static int inet_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
1789 struct ipv4_devconf *devconf, u32 portid,
1790 u32 seq, int event, unsigned int flags,
1793 struct nlmsghdr *nlh;
1794 struct netconfmsg *ncm;
1797 nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct netconfmsg),
1802 if (type == NETCONFA_ALL)
1805 ncm = nlmsg_data(nlh);
1806 ncm->ncm_family = AF_INET;
1808 if (nla_put_s32(skb, NETCONFA_IFINDEX, ifindex) < 0)
1809 goto nla_put_failure;
1811 if ((all || type == NETCONFA_FORWARDING) &&
1812 nla_put_s32(skb, NETCONFA_FORWARDING,
1813 IPV4_DEVCONF(*devconf, FORWARDING)) < 0)
1814 goto nla_put_failure;
1815 if ((all || type == NETCONFA_RP_FILTER) &&
1816 nla_put_s32(skb, NETCONFA_RP_FILTER,
1817 IPV4_DEVCONF(*devconf, RP_FILTER)) < 0)
1818 goto nla_put_failure;
1819 if ((all || type == NETCONFA_MC_FORWARDING) &&
1820 nla_put_s32(skb, NETCONFA_MC_FORWARDING,
1821 IPV4_DEVCONF(*devconf, MC_FORWARDING)) < 0)
1822 goto nla_put_failure;
1823 if ((all || type == NETCONFA_PROXY_NEIGH) &&
1824 nla_put_s32(skb, NETCONFA_PROXY_NEIGH,
1825 IPV4_DEVCONF(*devconf, PROXY_ARP)) < 0)
1826 goto nla_put_failure;
1827 if ((all || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN) &&
1828 nla_put_s32(skb, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
1829 IPV4_DEVCONF(*devconf, IGNORE_ROUTES_WITH_LINKDOWN)) < 0)
1830 goto nla_put_failure;
1832 nlmsg_end(skb, nlh);
1836 nlmsg_cancel(skb, nlh);
1840 void inet_netconf_notify_devconf(struct net *net, int type, int ifindex,
1841 struct ipv4_devconf *devconf)
1843 struct sk_buff *skb;
1846 skb = nlmsg_new(inet_netconf_msgsize_devconf(type), GFP_KERNEL);
1850 err = inet_netconf_fill_devconf(skb, ifindex, devconf, 0, 0,
1851 RTM_NEWNETCONF, 0, type);
1853 /* -EMSGSIZE implies BUG in inet_netconf_msgsize_devconf() */
1854 WARN_ON(err == -EMSGSIZE);
1858 rtnl_notify(skb, net, 0, RTNLGRP_IPV4_NETCONF, NULL, GFP_KERNEL);
1862 rtnl_set_sk_err(net, RTNLGRP_IPV4_NETCONF, err);
1865 static const struct nla_policy devconf_ipv4_policy[NETCONFA_MAX+1] = {
1866 [NETCONFA_IFINDEX] = { .len = sizeof(int) },
1867 [NETCONFA_FORWARDING] = { .len = sizeof(int) },
1868 [NETCONFA_RP_FILTER] = { .len = sizeof(int) },
1869 [NETCONFA_PROXY_NEIGH] = { .len = sizeof(int) },
1870 [NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN] = { .len = sizeof(int) },
1873 static int inet_netconf_get_devconf(struct sk_buff *in_skb,
1874 struct nlmsghdr *nlh)
1876 struct net *net = sock_net(in_skb->sk);
1877 struct nlattr *tb[NETCONFA_MAX+1];
1878 struct netconfmsg *ncm;
1879 struct sk_buff *skb;
1880 struct ipv4_devconf *devconf;
1881 struct in_device *in_dev;
1882 struct net_device *dev;
1886 err = nlmsg_parse(nlh, sizeof(*ncm), tb, NETCONFA_MAX,
1887 devconf_ipv4_policy);
1892 if (!tb[NETCONFA_IFINDEX])
1895 ifindex = nla_get_s32(tb[NETCONFA_IFINDEX]);
1897 case NETCONFA_IFINDEX_ALL:
1898 devconf = net->ipv4.devconf_all;
1900 case NETCONFA_IFINDEX_DEFAULT:
1901 devconf = net->ipv4.devconf_dflt;
1904 dev = __dev_get_by_index(net, ifindex);
1907 in_dev = __in_dev_get_rtnl(dev);
1910 devconf = &in_dev->cnf;
1915 skb = nlmsg_new(inet_netconf_msgsize_devconf(NETCONFA_ALL), GFP_KERNEL);
1919 err = inet_netconf_fill_devconf(skb, ifindex, devconf,
1920 NETLINK_CB(in_skb).portid,
1921 nlh->nlmsg_seq, RTM_NEWNETCONF, 0,
1924 /* -EMSGSIZE implies BUG in inet_netconf_msgsize_devconf() */
1925 WARN_ON(err == -EMSGSIZE);
1929 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
1934 static int inet_netconf_dump_devconf(struct sk_buff *skb,
1935 struct netlink_callback *cb)
1937 struct net *net = sock_net(skb->sk);
1940 struct net_device *dev;
1941 struct in_device *in_dev;
1942 struct hlist_head *head;
1945 s_idx = idx = cb->args[1];
1947 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
1949 head = &net->dev_index_head[h];
1951 cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
1953 hlist_for_each_entry_rcu(dev, head, index_hlist) {
1956 in_dev = __in_dev_get_rcu(dev);
1960 if (inet_netconf_fill_devconf(skb, dev->ifindex,
1962 NETLINK_CB(cb->skb).portid,
1966 NETCONFA_ALL) < 0) {
1970 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
1976 if (h == NETDEV_HASHENTRIES) {
1977 if (inet_netconf_fill_devconf(skb, NETCONFA_IFINDEX_ALL,
1978 net->ipv4.devconf_all,
1979 NETLINK_CB(cb->skb).portid,
1981 RTM_NEWNETCONF, NLM_F_MULTI,
1987 if (h == NETDEV_HASHENTRIES + 1) {
1988 if (inet_netconf_fill_devconf(skb, NETCONFA_IFINDEX_DEFAULT,
1989 net->ipv4.devconf_dflt,
1990 NETLINK_CB(cb->skb).portid,
1992 RTM_NEWNETCONF, NLM_F_MULTI,
2005 #ifdef CONFIG_SYSCTL
2007 static void devinet_copy_dflt_conf(struct net *net, int i)
2009 struct net_device *dev;
2012 for_each_netdev_rcu(net, dev) {
2013 struct in_device *in_dev;
2015 in_dev = __in_dev_get_rcu(dev);
2016 if (in_dev && !test_bit(i, in_dev->cnf.state))
2017 in_dev->cnf.data[i] = net->ipv4.devconf_dflt->data[i];
2022 /* called with RTNL locked */
2023 static void inet_forward_change(struct net *net)
2025 struct net_device *dev;
2026 int on = IPV4_DEVCONF_ALL(net, FORWARDING);
2028 IPV4_DEVCONF_ALL(net, ACCEPT_REDIRECTS) = !on;
2029 IPV4_DEVCONF_DFLT(net, FORWARDING) = on;
2030 inet_netconf_notify_devconf(net, NETCONFA_FORWARDING,
2031 NETCONFA_IFINDEX_ALL,
2032 net->ipv4.devconf_all);
2033 inet_netconf_notify_devconf(net, NETCONFA_FORWARDING,
2034 NETCONFA_IFINDEX_DEFAULT,
2035 net->ipv4.devconf_dflt);
2037 for_each_netdev(net, dev) {
2038 struct in_device *in_dev;
2041 dev_disable_lro(dev);
2043 in_dev = __in_dev_get_rtnl(dev);
2045 IN_DEV_CONF_SET(in_dev, FORWARDING, on);
2046 inet_netconf_notify_devconf(net, NETCONFA_FORWARDING,
2047 dev->ifindex, &in_dev->cnf);
2052 static int devinet_conf_ifindex(struct net *net, struct ipv4_devconf *cnf)
2054 if (cnf == net->ipv4.devconf_dflt)
2055 return NETCONFA_IFINDEX_DEFAULT;
2056 else if (cnf == net->ipv4.devconf_all)
2057 return NETCONFA_IFINDEX_ALL;
2059 struct in_device *idev
2060 = container_of(cnf, struct in_device, cnf);
2061 return idev->dev->ifindex;
2065 static int devinet_conf_proc(struct ctl_table *ctl, int write,
2066 void __user *buffer,
2067 size_t *lenp, loff_t *ppos)
2069 int old_value = *(int *)ctl->data;
2070 int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
2071 int new_value = *(int *)ctl->data;
2074 struct ipv4_devconf *cnf = ctl->extra1;
2075 struct net *net = ctl->extra2;
2076 int i = (int *)ctl->data - cnf->data;
2079 set_bit(i, cnf->state);
2081 if (cnf == net->ipv4.devconf_dflt)
2082 devinet_copy_dflt_conf(net, i);
2083 if (i == IPV4_DEVCONF_ACCEPT_LOCAL - 1 ||
2084 i == IPV4_DEVCONF_ROUTE_LOCALNET - 1)
2085 if ((new_value == 0) && (old_value != 0))
2086 rt_cache_flush(net);
2088 if (i == IPV4_DEVCONF_RP_FILTER - 1 &&
2089 new_value != old_value) {
2090 ifindex = devinet_conf_ifindex(net, cnf);
2091 inet_netconf_notify_devconf(net, NETCONFA_RP_FILTER,
2094 if (i == IPV4_DEVCONF_PROXY_ARP - 1 &&
2095 new_value != old_value) {
2096 ifindex = devinet_conf_ifindex(net, cnf);
2097 inet_netconf_notify_devconf(net, NETCONFA_PROXY_NEIGH,
2100 if (i == IPV4_DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN - 1 &&
2101 new_value != old_value) {
2102 ifindex = devinet_conf_ifindex(net, cnf);
2103 inet_netconf_notify_devconf(net, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
2111 static int devinet_sysctl_forward(struct ctl_table *ctl, int write,
2112 void __user *buffer,
2113 size_t *lenp, loff_t *ppos)
2115 int *valp = ctl->data;
2118 int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
2120 if (write && *valp != val) {
2121 struct net *net = ctl->extra2;
2123 if (valp != &IPV4_DEVCONF_DFLT(net, FORWARDING)) {
2124 if (!rtnl_trylock()) {
2125 /* Restore the original values before restarting */
2128 return restart_syscall();
2130 if (valp == &IPV4_DEVCONF_ALL(net, FORWARDING)) {
2131 inet_forward_change(net);
2133 struct ipv4_devconf *cnf = ctl->extra1;
2134 struct in_device *idev =
2135 container_of(cnf, struct in_device, cnf);
2137 dev_disable_lro(idev->dev);
2138 inet_netconf_notify_devconf(net,
2139 NETCONFA_FORWARDING,
2144 rt_cache_flush(net);
2146 inet_netconf_notify_devconf(net, NETCONFA_FORWARDING,
2147 NETCONFA_IFINDEX_DEFAULT,
2148 net->ipv4.devconf_dflt);
2154 static int ipv4_doint_and_flush(struct ctl_table *ctl, int write,
2155 void __user *buffer,
2156 size_t *lenp, loff_t *ppos)
2158 int *valp = ctl->data;
2160 int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
2161 struct net *net = ctl->extra2;
2163 if (write && *valp != val)
2164 rt_cache_flush(net);
2169 #define DEVINET_SYSCTL_ENTRY(attr, name, mval, proc) \
2172 .data = ipv4_devconf.data + \
2173 IPV4_DEVCONF_ ## attr - 1, \
2174 .maxlen = sizeof(int), \
2176 .proc_handler = proc, \
2177 .extra1 = &ipv4_devconf, \
2180 #define DEVINET_SYSCTL_RW_ENTRY(attr, name) \
2181 DEVINET_SYSCTL_ENTRY(attr, name, 0644, devinet_conf_proc)
2183 #define DEVINET_SYSCTL_RO_ENTRY(attr, name) \
2184 DEVINET_SYSCTL_ENTRY(attr, name, 0444, devinet_conf_proc)
2186 #define DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, proc) \
2187 DEVINET_SYSCTL_ENTRY(attr, name, 0644, proc)
2189 #define DEVINET_SYSCTL_FLUSHING_ENTRY(attr, name) \
2190 DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, ipv4_doint_and_flush)
2192 static struct devinet_sysctl_table {
2193 struct ctl_table_header *sysctl_header;
2194 struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
2195 } devinet_sysctl = {
2197 DEVINET_SYSCTL_COMPLEX_ENTRY(FORWARDING, "forwarding",
2198 devinet_sysctl_forward),
2199 DEVINET_SYSCTL_RO_ENTRY(MC_FORWARDING, "mc_forwarding"),
2201 DEVINET_SYSCTL_RW_ENTRY(ACCEPT_REDIRECTS, "accept_redirects"),
2202 DEVINET_SYSCTL_RW_ENTRY(SECURE_REDIRECTS, "secure_redirects"),
2203 DEVINET_SYSCTL_RW_ENTRY(SHARED_MEDIA, "shared_media"),
2204 DEVINET_SYSCTL_RW_ENTRY(RP_FILTER, "rp_filter"),
2205 DEVINET_SYSCTL_RW_ENTRY(SEND_REDIRECTS, "send_redirects"),
2206 DEVINET_SYSCTL_RW_ENTRY(ACCEPT_SOURCE_ROUTE,
2207 "accept_source_route"),
2208 DEVINET_SYSCTL_RW_ENTRY(ACCEPT_LOCAL, "accept_local"),
2209 DEVINET_SYSCTL_RW_ENTRY(SRC_VMARK, "src_valid_mark"),
2210 DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP, "proxy_arp"),
2211 DEVINET_SYSCTL_RW_ENTRY(MEDIUM_ID, "medium_id"),
2212 DEVINET_SYSCTL_RW_ENTRY(BOOTP_RELAY, "bootp_relay"),
2213 DEVINET_SYSCTL_RW_ENTRY(LOG_MARTIANS, "log_martians"),
2214 DEVINET_SYSCTL_RW_ENTRY(TAG, "tag"),
2215 DEVINET_SYSCTL_RW_ENTRY(ARPFILTER, "arp_filter"),
2216 DEVINET_SYSCTL_RW_ENTRY(ARP_ANNOUNCE, "arp_announce"),
2217 DEVINET_SYSCTL_RW_ENTRY(ARP_IGNORE, "arp_ignore"),
2218 DEVINET_SYSCTL_RW_ENTRY(ARP_ACCEPT, "arp_accept"),
2219 DEVINET_SYSCTL_RW_ENTRY(ARP_NOTIFY, "arp_notify"),
2220 DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP_PVLAN, "proxy_arp_pvlan"),
2221 DEVINET_SYSCTL_RW_ENTRY(FORCE_IGMP_VERSION,
2222 "force_igmp_version"),
2223 DEVINET_SYSCTL_RW_ENTRY(IGMPV2_UNSOLICITED_REPORT_INTERVAL,
2224 "igmpv2_unsolicited_report_interval"),
2225 DEVINET_SYSCTL_RW_ENTRY(IGMPV3_UNSOLICITED_REPORT_INTERVAL,
2226 "igmpv3_unsolicited_report_interval"),
2227 DEVINET_SYSCTL_RW_ENTRY(IGNORE_ROUTES_WITH_LINKDOWN,
2228 "ignore_routes_with_linkdown"),
2229 DEVINET_SYSCTL_RW_ENTRY(DROP_GRATUITOUS_ARP,
2230 "drop_gratuitous_arp"),
2232 DEVINET_SYSCTL_FLUSHING_ENTRY(NOXFRM, "disable_xfrm"),
2233 DEVINET_SYSCTL_FLUSHING_ENTRY(NOPOLICY, "disable_policy"),
2234 DEVINET_SYSCTL_FLUSHING_ENTRY(PROMOTE_SECONDARIES,
2235 "promote_secondaries"),
2236 DEVINET_SYSCTL_FLUSHING_ENTRY(ROUTE_LOCALNET,
2238 DEVINET_SYSCTL_FLUSHING_ENTRY(DROP_UNICAST_IN_L2_MULTICAST,
2239 "drop_unicast_in_l2_multicast"),
2243 static int __devinet_sysctl_register(struct net *net, char *dev_name,
2244 int ifindex, struct ipv4_devconf *p)
2247 struct devinet_sysctl_table *t;
2248 char path[sizeof("net/ipv4/conf/") + IFNAMSIZ];
2250 t = kmemdup(&devinet_sysctl, sizeof(*t), GFP_KERNEL);
2254 for (i = 0; i < ARRAY_SIZE(t->devinet_vars) - 1; i++) {
2255 t->devinet_vars[i].data += (char *)p - (char *)&ipv4_devconf;
2256 t->devinet_vars[i].extra1 = p;
2257 t->devinet_vars[i].extra2 = net;
2260 snprintf(path, sizeof(path), "net/ipv4/conf/%s", dev_name);
2262 t->sysctl_header = register_net_sysctl(net, path, t->devinet_vars);
2263 if (!t->sysctl_header)
2268 inet_netconf_notify_devconf(net, NETCONFA_ALL, ifindex, p);
2277 static void __devinet_sysctl_unregister(struct ipv4_devconf *cnf)
2279 struct devinet_sysctl_table *t = cnf->sysctl;
2285 unregister_net_sysctl_table(t->sysctl_header);
2289 static int devinet_sysctl_register(struct in_device *idev)
2293 if (!sysctl_dev_name_is_allowed(idev->dev->name))
2296 err = neigh_sysctl_register(idev->dev, idev->arp_parms, NULL);
2299 err = __devinet_sysctl_register(dev_net(idev->dev), idev->dev->name,
2300 idev->dev->ifindex, &idev->cnf);
2302 neigh_sysctl_unregister(idev->arp_parms);
2306 static void devinet_sysctl_unregister(struct in_device *idev)
2308 __devinet_sysctl_unregister(&idev->cnf);
2309 neigh_sysctl_unregister(idev->arp_parms);
2312 static struct ctl_table ctl_forward_entry[] = {
2314 .procname = "ip_forward",
2315 .data = &ipv4_devconf.data[
2316 IPV4_DEVCONF_FORWARDING - 1],
2317 .maxlen = sizeof(int),
2319 .proc_handler = devinet_sysctl_forward,
2320 .extra1 = &ipv4_devconf,
2321 .extra2 = &init_net,
2327 static __net_init int devinet_init_net(struct net *net)
2330 struct ipv4_devconf *all, *dflt;
2331 #ifdef CONFIG_SYSCTL
2332 struct ctl_table *tbl = ctl_forward_entry;
2333 struct ctl_table_header *forw_hdr;
2337 all = &ipv4_devconf;
2338 dflt = &ipv4_devconf_dflt;
2340 if (!net_eq(net, &init_net)) {
2341 all = kmemdup(all, sizeof(ipv4_devconf), GFP_KERNEL);
2345 dflt = kmemdup(dflt, sizeof(ipv4_devconf_dflt), GFP_KERNEL);
2347 goto err_alloc_dflt;
2349 #ifdef CONFIG_SYSCTL
2350 tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL);
2354 tbl[0].data = &all->data[IPV4_DEVCONF_FORWARDING - 1];
2355 tbl[0].extra1 = all;
2356 tbl[0].extra2 = net;
2360 #ifdef CONFIG_SYSCTL
2361 err = __devinet_sysctl_register(net, "all", NETCONFA_IFINDEX_ALL, all);
2365 err = __devinet_sysctl_register(net, "default",
2366 NETCONFA_IFINDEX_DEFAULT, dflt);
2371 forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
2374 net->ipv4.forw_hdr = forw_hdr;
2377 net->ipv4.devconf_all = all;
2378 net->ipv4.devconf_dflt = dflt;
2381 #ifdef CONFIG_SYSCTL
2383 __devinet_sysctl_unregister(dflt);
2385 __devinet_sysctl_unregister(all);
2387 if (tbl != ctl_forward_entry)
2391 if (dflt != &ipv4_devconf_dflt)
2394 if (all != &ipv4_devconf)
2400 static __net_exit void devinet_exit_net(struct net *net)
2402 #ifdef CONFIG_SYSCTL
2403 struct ctl_table *tbl;
2405 tbl = net->ipv4.forw_hdr->ctl_table_arg;
2406 unregister_net_sysctl_table(net->ipv4.forw_hdr);
2407 __devinet_sysctl_unregister(net->ipv4.devconf_dflt);
2408 __devinet_sysctl_unregister(net->ipv4.devconf_all);
2411 kfree(net->ipv4.devconf_dflt);
2412 kfree(net->ipv4.devconf_all);
2415 static __net_initdata struct pernet_operations devinet_ops = {
2416 .init = devinet_init_net,
2417 .exit = devinet_exit_net,
2420 static struct rtnl_af_ops inet_af_ops __read_mostly = {
2422 .fill_link_af = inet_fill_link_af,
2423 .get_link_af_size = inet_get_link_af_size,
2424 .validate_link_af = inet_validate_link_af,
2425 .set_link_af = inet_set_link_af,
2428 void __init devinet_init(void)
2432 for (i = 0; i < IN4_ADDR_HSIZE; i++)
2433 INIT_HLIST_HEAD(&inet_addr_lst[i]);
2435 register_pernet_subsys(&devinet_ops);
2437 register_gifconf(PF_INET, inet_gifconf);
2438 register_netdevice_notifier(&ip_netdev_notifier);
2440 queue_delayed_work(system_power_efficient_wq, &check_lifetime_work, 0);
2442 rtnl_af_register(&inet_af_ops);
2444 rtnl_register(PF_INET, RTM_NEWADDR, inet_rtm_newaddr, NULL, NULL);
2445 rtnl_register(PF_INET, RTM_DELADDR, inet_rtm_deladdr, NULL, NULL);
2446 rtnl_register(PF_INET, RTM_GETADDR, NULL, inet_dump_ifaddr, NULL);
2447 rtnl_register(PF_INET, RTM_GETNETCONF, inet_netconf_get_devconf,
2448 inet_netconf_dump_devconf, NULL);