2 * NET3 IP device support routines.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Derived from the IP parts of dev.c 1.0.19
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
15 * Alan Cox, <gw4pts@gw4pts.ampr.org>
16 * Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
19 * Alexey Kuznetsov: pa_* fields are replaced with ifaddr
21 * Cyrus Durgin: updated for kmod
22 * Matthias Andree: in devinet_ioctl, compare label and
23 * address (4.4BSD alias style support),
24 * fall back to comparing just the label
29 #include <asm/uaccess.h>
30 #include <linux/bitops.h>
31 #include <linux/capability.h>
32 #include <linux/module.h>
33 #include <linux/types.h>
34 #include <linux/kernel.h>
35 #include <linux/string.h>
37 #include <linux/socket.h>
38 #include <linux/sockios.h>
40 #include <linux/errno.h>
41 #include <linux/interrupt.h>
42 #include <linux/if_addr.h>
43 #include <linux/if_ether.h>
44 #include <linux/inet.h>
45 #include <linux/netdevice.h>
46 #include <linux/etherdevice.h>
47 #include <linux/skbuff.h>
48 #include <linux/init.h>
49 #include <linux/notifier.h>
50 #include <linux/inetdevice.h>
51 #include <linux/igmp.h>
52 #include <linux/slab.h>
53 #include <linux/hash.h>
55 #include <linux/sysctl.h>
57 #include <linux/kmod.h>
58 #include <linux/netconf.h>
62 #include <net/route.h>
63 #include <net/ip_fib.h>
64 #include <net/rtnetlink.h>
65 #include <net/net_namespace.h>
66 #include <net/addrconf.h>
68 #include "fib_lookup.h"
70 #define IPV6ONLY_FLAGS \
71 (IFA_F_NODAD | IFA_F_OPTIMISTIC | IFA_F_DADFAILED | \
72 IFA_F_HOMEADDRESS | IFA_F_TENTATIVE | \
73 IFA_F_MANAGETEMPADDR | IFA_F_STABLE_PRIVACY)
75 static struct ipv4_devconf ipv4_devconf = {
77 [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
78 [IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
79 [IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
80 [IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
81 [IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL - 1] = 10000 /*ms*/,
82 [IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL - 1] = 1000 /*ms*/,
86 static struct ipv4_devconf ipv4_devconf_dflt = {
88 [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
89 [IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
90 [IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
91 [IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
92 [IPV4_DEVCONF_ACCEPT_SOURCE_ROUTE - 1] = 1,
93 [IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL - 1] = 10000 /*ms*/,
94 [IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL - 1] = 1000 /*ms*/,
98 #define IPV4_DEVCONF_DFLT(net, attr) \
99 IPV4_DEVCONF((*net->ipv4.devconf_dflt), attr)
101 static const struct nla_policy ifa_ipv4_policy[IFA_MAX+1] = {
102 [IFA_LOCAL] = { .type = NLA_U32 },
103 [IFA_ADDRESS] = { .type = NLA_U32 },
104 [IFA_BROADCAST] = { .type = NLA_U32 },
105 [IFA_LABEL] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 },
106 [IFA_CACHEINFO] = { .len = sizeof(struct ifa_cacheinfo) },
107 [IFA_FLAGS] = { .type = NLA_U32 },
110 #define IN4_ADDR_HSIZE_SHIFT 8
111 #define IN4_ADDR_HSIZE (1U << IN4_ADDR_HSIZE_SHIFT)
113 static struct hlist_head inet_addr_lst[IN4_ADDR_HSIZE];
115 static u32 inet_addr_hash(const struct net *net, __be32 addr)
117 u32 val = (__force u32) addr ^ net_hash_mix(net);
119 return hash_32(val, IN4_ADDR_HSIZE_SHIFT);
122 static void inet_hash_insert(struct net *net, struct in_ifaddr *ifa)
124 u32 hash = inet_addr_hash(net, ifa->ifa_local);
127 hlist_add_head_rcu(&ifa->hash, &inet_addr_lst[hash]);
130 static void inet_hash_remove(struct in_ifaddr *ifa)
133 hlist_del_init_rcu(&ifa->hash);
137 * __ip_dev_find - find the first device with a given source address.
138 * @net: the net namespace
139 * @addr: the source address
140 * @devref: if true, take a reference on the found device
142 * If a caller uses devref=false, it should be protected by RCU, or RTNL
144 struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref)
146 u32 hash = inet_addr_hash(net, addr);
147 struct net_device *result = NULL;
148 struct in_ifaddr *ifa;
151 hlist_for_each_entry_rcu(ifa, &inet_addr_lst[hash], hash) {
152 if (ifa->ifa_local == addr) {
153 struct net_device *dev = ifa->ifa_dev->dev;
155 if (!net_eq(dev_net(dev), net))
162 struct flowi4 fl4 = { .daddr = addr };
163 struct fib_result res = { 0 };
164 struct fib_table *local;
166 /* Fallback to FIB local table so that communication
167 * over loopback subnets work.
169 local = fib_get_table(net, RT_TABLE_LOCAL);
171 !fib_table_lookup(local, &fl4, &res, FIB_LOOKUP_NOREF) &&
172 res.type == RTN_LOCAL)
173 result = FIB_RES_DEV(res);
175 if (result && devref)
180 EXPORT_SYMBOL(__ip_dev_find);
182 static void rtmsg_ifa(int event, struct in_ifaddr *, struct nlmsghdr *, u32);
184 static BLOCKING_NOTIFIER_HEAD(inetaddr_chain);
185 static void inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
188 static int devinet_sysctl_register(struct in_device *idev);
189 static void devinet_sysctl_unregister(struct in_device *idev);
191 static int devinet_sysctl_register(struct in_device *idev)
195 static void devinet_sysctl_unregister(struct in_device *idev)
200 /* Locks all the inet devices. */
202 static struct in_ifaddr *inet_alloc_ifa(void)
204 return kzalloc(sizeof(struct in_ifaddr), GFP_KERNEL);
207 static void inet_rcu_free_ifa(struct rcu_head *head)
209 struct in_ifaddr *ifa = container_of(head, struct in_ifaddr, rcu_head);
211 in_dev_put(ifa->ifa_dev);
215 static void inet_free_ifa(struct in_ifaddr *ifa)
217 call_rcu(&ifa->rcu_head, inet_rcu_free_ifa);
220 void in_dev_finish_destroy(struct in_device *idev)
222 struct net_device *dev = idev->dev;
224 WARN_ON(idev->ifa_list);
225 WARN_ON(idev->mc_list);
226 kfree(rcu_dereference_protected(idev->mc_hash, 1));
227 #ifdef NET_REFCNT_DEBUG
228 pr_debug("%s: %p=%s\n", __func__, idev, dev ? dev->name : "NIL");
232 pr_err("Freeing alive in_device %p\n", idev);
236 EXPORT_SYMBOL(in_dev_finish_destroy);
238 static struct in_device *inetdev_init(struct net_device *dev)
240 struct in_device *in_dev;
245 in_dev = kzalloc(sizeof(*in_dev), GFP_KERNEL);
248 memcpy(&in_dev->cnf, dev_net(dev)->ipv4.devconf_dflt,
249 sizeof(in_dev->cnf));
250 in_dev->cnf.sysctl = NULL;
252 in_dev->arp_parms = neigh_parms_alloc(dev, &arp_tbl);
253 if (!in_dev->arp_parms)
255 if (IPV4_DEVCONF(in_dev->cnf, FORWARDING))
256 dev_disable_lro(dev);
257 /* Reference in_dev->dev */
259 /* Account for reference dev->ip_ptr (below) */
262 err = devinet_sysctl_register(in_dev);
265 neigh_parms_release(&arp_tbl, in_dev->arp_parms);
270 ip_mc_init_dev(in_dev);
271 if (dev->flags & IFF_UP)
274 /* we can receive as soon as ip_ptr is set -- do this last */
275 rcu_assign_pointer(dev->ip_ptr, in_dev);
277 return in_dev ?: ERR_PTR(err);
284 static void in_dev_rcu_put(struct rcu_head *head)
286 struct in_device *idev = container_of(head, struct in_device, rcu_head);
290 static void inetdev_destroy(struct in_device *in_dev)
292 struct in_ifaddr *ifa;
293 struct net_device *dev;
301 ip_mc_destroy_dev(in_dev);
303 while ((ifa = in_dev->ifa_list) != NULL) {
304 inet_del_ifa(in_dev, &in_dev->ifa_list, 0);
308 RCU_INIT_POINTER(dev->ip_ptr, NULL);
310 devinet_sysctl_unregister(in_dev);
311 neigh_parms_release(&arp_tbl, in_dev->arp_parms);
314 call_rcu(&in_dev->rcu_head, in_dev_rcu_put);
317 int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b)
320 for_primary_ifa(in_dev) {
321 if (inet_ifa_match(a, ifa)) {
322 if (!b || inet_ifa_match(b, ifa)) {
327 } endfor_ifa(in_dev);
332 static void __inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
333 int destroy, struct nlmsghdr *nlh, u32 portid)
335 struct in_ifaddr *promote = NULL;
336 struct in_ifaddr *ifa, *ifa1 = *ifap;
337 struct in_ifaddr *last_prim = in_dev->ifa_list;
338 struct in_ifaddr *prev_prom = NULL;
339 int do_promote = IN_DEV_PROMOTE_SECONDARIES(in_dev);
346 /* 1. Deleting primary ifaddr forces deletion all secondaries
347 * unless alias promotion is set
350 if (!(ifa1->ifa_flags & IFA_F_SECONDARY)) {
351 struct in_ifaddr **ifap1 = &ifa1->ifa_next;
353 while ((ifa = *ifap1) != NULL) {
354 if (!(ifa->ifa_flags & IFA_F_SECONDARY) &&
355 ifa1->ifa_scope <= ifa->ifa_scope)
358 if (!(ifa->ifa_flags & IFA_F_SECONDARY) ||
359 ifa1->ifa_mask != ifa->ifa_mask ||
360 !inet_ifa_match(ifa1->ifa_address, ifa)) {
361 ifap1 = &ifa->ifa_next;
367 inet_hash_remove(ifa);
368 *ifap1 = ifa->ifa_next;
370 rtmsg_ifa(RTM_DELADDR, ifa, nlh, portid);
371 blocking_notifier_call_chain(&inetaddr_chain,
381 /* On promotion all secondaries from subnet are changing
382 * the primary IP, we must remove all their routes silently
383 * and later to add them back with new prefsrc. Do this
384 * while all addresses are on the device list.
386 for (ifa = promote; ifa; ifa = ifa->ifa_next) {
387 if (ifa1->ifa_mask == ifa->ifa_mask &&
388 inet_ifa_match(ifa1->ifa_address, ifa))
389 fib_del_ifaddr(ifa, ifa1);
395 *ifap = ifa1->ifa_next;
396 inet_hash_remove(ifa1);
398 /* 3. Announce address deletion */
400 /* Send message first, then call notifier.
401 At first sight, FIB update triggered by notifier
402 will refer to already deleted ifaddr, that could confuse
403 netlink listeners. It is not true: look, gated sees
404 that route deleted and if it still thinks that ifaddr
405 is valid, it will try to restore deleted routes... Grr.
406 So that, this order is correct.
408 rtmsg_ifa(RTM_DELADDR, ifa1, nlh, portid);
409 blocking_notifier_call_chain(&inetaddr_chain, NETDEV_DOWN, ifa1);
412 struct in_ifaddr *next_sec = promote->ifa_next;
415 prev_prom->ifa_next = promote->ifa_next;
416 promote->ifa_next = last_prim->ifa_next;
417 last_prim->ifa_next = promote;
420 promote->ifa_flags &= ~IFA_F_SECONDARY;
421 rtmsg_ifa(RTM_NEWADDR, promote, nlh, portid);
422 blocking_notifier_call_chain(&inetaddr_chain,
424 for (ifa = next_sec; ifa; ifa = ifa->ifa_next) {
425 if (ifa1->ifa_mask != ifa->ifa_mask ||
426 !inet_ifa_match(ifa1->ifa_address, ifa))
436 static void inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
439 __inet_del_ifa(in_dev, ifap, destroy, NULL, 0);
442 static void check_lifetime(struct work_struct *work);
444 static DECLARE_DELAYED_WORK(check_lifetime_work, check_lifetime);
446 static int __inet_insert_ifa(struct in_ifaddr *ifa, struct nlmsghdr *nlh,
449 struct in_device *in_dev = ifa->ifa_dev;
450 struct in_ifaddr *ifa1, **ifap, **last_primary;
454 if (!ifa->ifa_local) {
459 ifa->ifa_flags &= ~IFA_F_SECONDARY;
460 last_primary = &in_dev->ifa_list;
462 /* Don't set IPv6 only flags to IPv4 addresses */
463 ifa->ifa_flags &= ~IPV6ONLY_FLAGS;
465 for (ifap = &in_dev->ifa_list; (ifa1 = *ifap) != NULL;
466 ifap = &ifa1->ifa_next) {
467 if (!(ifa1->ifa_flags & IFA_F_SECONDARY) &&
468 ifa->ifa_scope <= ifa1->ifa_scope)
469 last_primary = &ifa1->ifa_next;
470 if (ifa1->ifa_mask == ifa->ifa_mask &&
471 inet_ifa_match(ifa1->ifa_address, ifa)) {
472 if (ifa1->ifa_local == ifa->ifa_local) {
476 if (ifa1->ifa_scope != ifa->ifa_scope) {
480 ifa->ifa_flags |= IFA_F_SECONDARY;
484 if (!(ifa->ifa_flags & IFA_F_SECONDARY)) {
485 prandom_seed((__force u32) ifa->ifa_local);
489 ifa->ifa_next = *ifap;
492 inet_hash_insert(dev_net(in_dev->dev), ifa);
494 cancel_delayed_work(&check_lifetime_work);
495 queue_delayed_work(system_power_efficient_wq, &check_lifetime_work, 0);
497 /* Send message first, then call notifier.
498 Notifier will trigger FIB update, so that
499 listeners of netlink will know about new ifaddr */
500 rtmsg_ifa(RTM_NEWADDR, ifa, nlh, portid);
501 blocking_notifier_call_chain(&inetaddr_chain, NETDEV_UP, ifa);
506 static int inet_insert_ifa(struct in_ifaddr *ifa)
508 return __inet_insert_ifa(ifa, NULL, 0);
511 static int inet_set_ifa(struct net_device *dev, struct in_ifaddr *ifa)
513 struct in_device *in_dev = __in_dev_get_rtnl(dev);
521 ipv4_devconf_setall(in_dev);
522 neigh_parms_data_state_setall(in_dev->arp_parms);
523 if (ifa->ifa_dev != in_dev) {
524 WARN_ON(ifa->ifa_dev);
526 ifa->ifa_dev = in_dev;
528 if (ipv4_is_loopback(ifa->ifa_local))
529 ifa->ifa_scope = RT_SCOPE_HOST;
530 return inet_insert_ifa(ifa);
533 /* Caller must hold RCU or RTNL :
534 * We dont take a reference on found in_device
536 struct in_device *inetdev_by_index(struct net *net, int ifindex)
538 struct net_device *dev;
539 struct in_device *in_dev = NULL;
542 dev = dev_get_by_index_rcu(net, ifindex);
544 in_dev = rcu_dereference_rtnl(dev->ip_ptr);
548 EXPORT_SYMBOL(inetdev_by_index);
550 /* Called only from RTNL semaphored context. No locks. */
552 struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix,
557 for_primary_ifa(in_dev) {
558 if (ifa->ifa_mask == mask && inet_ifa_match(prefix, ifa))
560 } endfor_ifa(in_dev);
564 static int ip_mc_autojoin_config(struct net *net, bool join,
565 const struct in_ifaddr *ifa)
567 #if defined(CONFIG_IP_MULTICAST)
568 struct ip_mreqn mreq = {
569 .imr_multiaddr.s_addr = ifa->ifa_address,
570 .imr_ifindex = ifa->ifa_dev->dev->ifindex,
572 struct sock *sk = net->ipv4.mc_autojoin_sk;
579 ret = ip_mc_join_group(sk, &mreq);
581 ret = ip_mc_leave_group(sk, &mreq);
590 static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh)
592 struct net *net = sock_net(skb->sk);
593 struct nlattr *tb[IFA_MAX+1];
594 struct in_device *in_dev;
595 struct ifaddrmsg *ifm;
596 struct in_ifaddr *ifa, **ifap;
601 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv4_policy);
605 ifm = nlmsg_data(nlh);
606 in_dev = inetdev_by_index(net, ifm->ifa_index);
612 for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
613 ifap = &ifa->ifa_next) {
615 ifa->ifa_local != nla_get_in_addr(tb[IFA_LOCAL]))
618 if (tb[IFA_LABEL] && nla_strcmp(tb[IFA_LABEL], ifa->ifa_label))
621 if (tb[IFA_ADDRESS] &&
622 (ifm->ifa_prefixlen != ifa->ifa_prefixlen ||
623 !inet_ifa_match(nla_get_in_addr(tb[IFA_ADDRESS]), ifa)))
626 if (ipv4_is_multicast(ifa->ifa_address))
627 ip_mc_autojoin_config(net, false, ifa);
628 __inet_del_ifa(in_dev, ifap, 1, nlh, NETLINK_CB(skb).portid);
632 err = -EADDRNOTAVAIL;
637 #define INFINITY_LIFE_TIME 0xFFFFFFFF
639 static void check_lifetime(struct work_struct *work)
641 unsigned long now, next, next_sec, next_sched;
642 struct in_ifaddr *ifa;
643 struct hlist_node *n;
647 next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY);
649 for (i = 0; i < IN4_ADDR_HSIZE; i++) {
650 bool change_needed = false;
653 hlist_for_each_entry_rcu(ifa, &inet_addr_lst[i], hash) {
656 if (ifa->ifa_flags & IFA_F_PERMANENT)
659 /* We try to batch several events at once. */
660 age = (now - ifa->ifa_tstamp +
661 ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
663 if (ifa->ifa_valid_lft != INFINITY_LIFE_TIME &&
664 age >= ifa->ifa_valid_lft) {
665 change_needed = true;
666 } else if (ifa->ifa_preferred_lft ==
667 INFINITY_LIFE_TIME) {
669 } else if (age >= ifa->ifa_preferred_lft) {
670 if (time_before(ifa->ifa_tstamp +
671 ifa->ifa_valid_lft * HZ, next))
672 next = ifa->ifa_tstamp +
673 ifa->ifa_valid_lft * HZ;
675 if (!(ifa->ifa_flags & IFA_F_DEPRECATED))
676 change_needed = true;
677 } else if (time_before(ifa->ifa_tstamp +
678 ifa->ifa_preferred_lft * HZ,
680 next = ifa->ifa_tstamp +
681 ifa->ifa_preferred_lft * HZ;
688 hlist_for_each_entry_safe(ifa, n, &inet_addr_lst[i], hash) {
691 if (ifa->ifa_flags & IFA_F_PERMANENT)
694 /* We try to batch several events at once. */
695 age = (now - ifa->ifa_tstamp +
696 ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
698 if (ifa->ifa_valid_lft != INFINITY_LIFE_TIME &&
699 age >= ifa->ifa_valid_lft) {
700 struct in_ifaddr **ifap;
702 for (ifap = &ifa->ifa_dev->ifa_list;
703 *ifap != NULL; ifap = &(*ifap)->ifa_next) {
705 inet_del_ifa(ifa->ifa_dev,
710 } else if (ifa->ifa_preferred_lft !=
711 INFINITY_LIFE_TIME &&
712 age >= ifa->ifa_preferred_lft &&
713 !(ifa->ifa_flags & IFA_F_DEPRECATED)) {
714 ifa->ifa_flags |= IFA_F_DEPRECATED;
715 rtmsg_ifa(RTM_NEWADDR, ifa, NULL, 0);
721 next_sec = round_jiffies_up(next);
724 /* If rounded timeout is accurate enough, accept it. */
725 if (time_before(next_sec, next + ADDRCONF_TIMER_FUZZ))
726 next_sched = next_sec;
729 /* And minimum interval is ADDRCONF_TIMER_FUZZ_MAX. */
730 if (time_before(next_sched, now + ADDRCONF_TIMER_FUZZ_MAX))
731 next_sched = now + ADDRCONF_TIMER_FUZZ_MAX;
733 queue_delayed_work(system_power_efficient_wq, &check_lifetime_work,
737 static void set_ifa_lifetime(struct in_ifaddr *ifa, __u32 valid_lft,
740 unsigned long timeout;
742 ifa->ifa_flags &= ~(IFA_F_PERMANENT | IFA_F_DEPRECATED);
744 timeout = addrconf_timeout_fixup(valid_lft, HZ);
745 if (addrconf_finite_timeout(timeout))
746 ifa->ifa_valid_lft = timeout;
748 ifa->ifa_flags |= IFA_F_PERMANENT;
750 timeout = addrconf_timeout_fixup(prefered_lft, HZ);
751 if (addrconf_finite_timeout(timeout)) {
753 ifa->ifa_flags |= IFA_F_DEPRECATED;
754 ifa->ifa_preferred_lft = timeout;
756 ifa->ifa_tstamp = jiffies;
757 if (!ifa->ifa_cstamp)
758 ifa->ifa_cstamp = ifa->ifa_tstamp;
761 static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh,
762 __u32 *pvalid_lft, __u32 *pprefered_lft)
764 struct nlattr *tb[IFA_MAX+1];
765 struct in_ifaddr *ifa;
766 struct ifaddrmsg *ifm;
767 struct net_device *dev;
768 struct in_device *in_dev;
771 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv4_policy);
775 ifm = nlmsg_data(nlh);
777 if (ifm->ifa_prefixlen > 32 || !tb[IFA_LOCAL])
780 dev = __dev_get_by_index(net, ifm->ifa_index);
785 in_dev = __in_dev_get_rtnl(dev);
790 ifa = inet_alloc_ifa();
793 * A potential indev allocation can be left alive, it stays
794 * assigned to its device and is destroy with it.
798 ipv4_devconf_setall(in_dev);
799 neigh_parms_data_state_setall(in_dev->arp_parms);
802 if (!tb[IFA_ADDRESS])
803 tb[IFA_ADDRESS] = tb[IFA_LOCAL];
805 INIT_HLIST_NODE(&ifa->hash);
806 ifa->ifa_prefixlen = ifm->ifa_prefixlen;
807 ifa->ifa_mask = inet_make_mask(ifm->ifa_prefixlen);
808 ifa->ifa_flags = tb[IFA_FLAGS] ? nla_get_u32(tb[IFA_FLAGS]) :
810 ifa->ifa_scope = ifm->ifa_scope;
811 ifa->ifa_dev = in_dev;
813 ifa->ifa_local = nla_get_in_addr(tb[IFA_LOCAL]);
814 ifa->ifa_address = nla_get_in_addr(tb[IFA_ADDRESS]);
816 if (tb[IFA_BROADCAST])
817 ifa->ifa_broadcast = nla_get_in_addr(tb[IFA_BROADCAST]);
820 nla_strlcpy(ifa->ifa_label, tb[IFA_LABEL], IFNAMSIZ);
822 memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
824 if (tb[IFA_CACHEINFO]) {
825 struct ifa_cacheinfo *ci;
827 ci = nla_data(tb[IFA_CACHEINFO]);
828 if (!ci->ifa_valid || ci->ifa_prefered > ci->ifa_valid) {
832 *pvalid_lft = ci->ifa_valid;
833 *pprefered_lft = ci->ifa_prefered;
844 static struct in_ifaddr *find_matching_ifa(struct in_ifaddr *ifa)
846 struct in_device *in_dev = ifa->ifa_dev;
847 struct in_ifaddr *ifa1, **ifap;
852 for (ifap = &in_dev->ifa_list; (ifa1 = *ifap) != NULL;
853 ifap = &ifa1->ifa_next) {
854 if (ifa1->ifa_mask == ifa->ifa_mask &&
855 inet_ifa_match(ifa1->ifa_address, ifa) &&
856 ifa1->ifa_local == ifa->ifa_local)
862 static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh)
864 struct net *net = sock_net(skb->sk);
865 struct in_ifaddr *ifa;
866 struct in_ifaddr *ifa_existing;
867 __u32 valid_lft = INFINITY_LIFE_TIME;
868 __u32 prefered_lft = INFINITY_LIFE_TIME;
872 ifa = rtm_to_ifaddr(net, nlh, &valid_lft, &prefered_lft);
876 ifa_existing = find_matching_ifa(ifa);
878 /* It would be best to check for !NLM_F_CREATE here but
879 * userspace already relies on not having to provide this.
881 set_ifa_lifetime(ifa, valid_lft, prefered_lft);
882 if (ifa->ifa_flags & IFA_F_MCAUTOJOIN) {
883 int ret = ip_mc_autojoin_config(net, true, ifa);
890 return __inet_insert_ifa(ifa, nlh, NETLINK_CB(skb).portid);
894 if (nlh->nlmsg_flags & NLM_F_EXCL ||
895 !(nlh->nlmsg_flags & NLM_F_REPLACE))
898 set_ifa_lifetime(ifa, valid_lft, prefered_lft);
899 cancel_delayed_work(&check_lifetime_work);
900 queue_delayed_work(system_power_efficient_wq,
901 &check_lifetime_work, 0);
902 rtmsg_ifa(RTM_NEWADDR, ifa, nlh, NETLINK_CB(skb).portid);
908 * Determine a default network mask, based on the IP address.
911 static int inet_abc_len(__be32 addr)
913 int rc = -1; /* Something else, probably a multicast. */
915 if (ipv4_is_zeronet(addr))
918 __u32 haddr = ntohl(addr);
920 if (IN_CLASSA(haddr))
922 else if (IN_CLASSB(haddr))
924 else if (IN_CLASSC(haddr))
932 int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
935 struct sockaddr_in sin_orig;
936 struct sockaddr_in *sin = (struct sockaddr_in *)&ifr.ifr_addr;
937 struct in_device *in_dev;
938 struct in_ifaddr **ifap = NULL;
939 struct in_ifaddr *ifa = NULL;
940 struct net_device *dev;
943 int tryaddrmatch = 0;
946 * Fetch the caller's info block into kernel space
949 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
951 ifr.ifr_name[IFNAMSIZ - 1] = 0;
953 /* save original address for comparison */
954 memcpy(&sin_orig, sin, sizeof(*sin));
956 colon = strchr(ifr.ifr_name, ':');
960 dev_load(net, ifr.ifr_name);
963 case SIOCGIFADDR: /* Get interface address */
964 case SIOCGIFBRDADDR: /* Get the broadcast address */
965 case SIOCGIFDSTADDR: /* Get the destination address */
966 case SIOCGIFNETMASK: /* Get the netmask for the interface */
967 /* Note that these ioctls will not sleep,
968 so that we do not impose a lock.
969 One day we will be forced to put shlock here (I mean SMP)
971 tryaddrmatch = (sin_orig.sin_family == AF_INET);
972 memset(sin, 0, sizeof(*sin));
973 sin->sin_family = AF_INET;
978 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
981 case SIOCSIFADDR: /* Set interface address (and family) */
982 case SIOCSIFBRDADDR: /* Set the broadcast address */
983 case SIOCSIFDSTADDR: /* Set the destination address */
984 case SIOCSIFNETMASK: /* Set the netmask for the interface */
986 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
989 if (sin->sin_family != AF_INET)
1000 dev = __dev_get_by_name(net, ifr.ifr_name);
1007 in_dev = __in_dev_get_rtnl(dev);
1010 /* Matthias Andree */
1011 /* compare label and address (4.4BSD style) */
1012 /* note: we only do this for a limited set of ioctls
1013 and only if the original address family was AF_INET.
1014 This is checked above. */
1015 for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
1016 ifap = &ifa->ifa_next) {
1017 if (!strcmp(ifr.ifr_name, ifa->ifa_label) &&
1018 sin_orig.sin_addr.s_addr ==
1024 /* we didn't get a match, maybe the application is
1025 4.3BSD-style and passed in junk so we fall back to
1026 comparing just the label */
1028 for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
1029 ifap = &ifa->ifa_next)
1030 if (!strcmp(ifr.ifr_name, ifa->ifa_label))
1035 ret = -EADDRNOTAVAIL;
1036 if (!ifa && cmd != SIOCSIFADDR && cmd != SIOCSIFFLAGS)
1040 case SIOCGIFADDR: /* Get interface address */
1041 sin->sin_addr.s_addr = ifa->ifa_local;
1044 case SIOCGIFBRDADDR: /* Get the broadcast address */
1045 sin->sin_addr.s_addr = ifa->ifa_broadcast;
1048 case SIOCGIFDSTADDR: /* Get the destination address */
1049 sin->sin_addr.s_addr = ifa->ifa_address;
1052 case SIOCGIFNETMASK: /* Get the netmask for the interface */
1053 sin->sin_addr.s_addr = ifa->ifa_mask;
1058 ret = -EADDRNOTAVAIL;
1062 if (!(ifr.ifr_flags & IFF_UP))
1063 inet_del_ifa(in_dev, ifap, 1);
1066 ret = dev_change_flags(dev, ifr.ifr_flags);
1069 case SIOCSIFADDR: /* Set interface address (and family) */
1071 if (inet_abc_len(sin->sin_addr.s_addr) < 0)
1076 ifa = inet_alloc_ifa();
1079 INIT_HLIST_NODE(&ifa->hash);
1081 memcpy(ifa->ifa_label, ifr.ifr_name, IFNAMSIZ);
1083 memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
1086 if (ifa->ifa_local == sin->sin_addr.s_addr)
1088 inet_del_ifa(in_dev, ifap, 0);
1089 ifa->ifa_broadcast = 0;
1093 ifa->ifa_address = ifa->ifa_local = sin->sin_addr.s_addr;
1095 if (!(dev->flags & IFF_POINTOPOINT)) {
1096 ifa->ifa_prefixlen = inet_abc_len(ifa->ifa_address);
1097 ifa->ifa_mask = inet_make_mask(ifa->ifa_prefixlen);
1098 if ((dev->flags & IFF_BROADCAST) &&
1099 ifa->ifa_prefixlen < 31)
1100 ifa->ifa_broadcast = ifa->ifa_address |
1103 ifa->ifa_prefixlen = 32;
1104 ifa->ifa_mask = inet_make_mask(32);
1106 set_ifa_lifetime(ifa, INFINITY_LIFE_TIME, INFINITY_LIFE_TIME);
1107 ret = inet_set_ifa(dev, ifa);
1110 case SIOCSIFBRDADDR: /* Set the broadcast address */
1112 if (ifa->ifa_broadcast != sin->sin_addr.s_addr) {
1113 inet_del_ifa(in_dev, ifap, 0);
1114 ifa->ifa_broadcast = sin->sin_addr.s_addr;
1115 inet_insert_ifa(ifa);
1119 case SIOCSIFDSTADDR: /* Set the destination address */
1121 if (ifa->ifa_address == sin->sin_addr.s_addr)
1124 if (inet_abc_len(sin->sin_addr.s_addr) < 0)
1127 inet_del_ifa(in_dev, ifap, 0);
1128 ifa->ifa_address = sin->sin_addr.s_addr;
1129 inet_insert_ifa(ifa);
1132 case SIOCSIFNETMASK: /* Set the netmask for the interface */
1135 * The mask we set must be legal.
1138 if (bad_mask(sin->sin_addr.s_addr, 0))
1141 if (ifa->ifa_mask != sin->sin_addr.s_addr) {
1142 __be32 old_mask = ifa->ifa_mask;
1143 inet_del_ifa(in_dev, ifap, 0);
1144 ifa->ifa_mask = sin->sin_addr.s_addr;
1145 ifa->ifa_prefixlen = inet_mask_len(ifa->ifa_mask);
1147 /* See if current broadcast address matches
1148 * with current netmask, then recalculate
1149 * the broadcast address. Otherwise it's a
1150 * funny address, so don't touch it since
1151 * the user seems to know what (s)he's doing...
1153 if ((dev->flags & IFF_BROADCAST) &&
1154 (ifa->ifa_prefixlen < 31) &&
1155 (ifa->ifa_broadcast ==
1156 (ifa->ifa_local|~old_mask))) {
1157 ifa->ifa_broadcast = (ifa->ifa_local |
1158 ~sin->sin_addr.s_addr);
1160 inet_insert_ifa(ifa);
1170 ret = copy_to_user(arg, &ifr, sizeof(struct ifreq)) ? -EFAULT : 0;
1174 static int inet_gifconf(struct net_device *dev, char __user *buf, int len)
1176 struct in_device *in_dev = __in_dev_get_rtnl(dev);
1177 struct in_ifaddr *ifa;
1184 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
1186 done += sizeof(ifr);
1189 if (len < (int) sizeof(ifr))
1191 memset(&ifr, 0, sizeof(struct ifreq));
1192 strcpy(ifr.ifr_name, ifa->ifa_label);
1194 (*(struct sockaddr_in *)&ifr.ifr_addr).sin_family = AF_INET;
1195 (*(struct sockaddr_in *)&ifr.ifr_addr).sin_addr.s_addr =
1198 if (copy_to_user(buf, &ifr, sizeof(struct ifreq))) {
1202 buf += sizeof(struct ifreq);
1203 len -= sizeof(struct ifreq);
1204 done += sizeof(struct ifreq);
1210 __be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope)
1213 struct in_device *in_dev;
1214 struct net *net = dev_net(dev);
1217 in_dev = __in_dev_get_rcu(dev);
1221 for_primary_ifa(in_dev) {
1222 if (ifa->ifa_scope > scope)
1224 if (!dst || inet_ifa_match(dst, ifa)) {
1225 addr = ifa->ifa_local;
1229 addr = ifa->ifa_local;
1230 } endfor_ifa(in_dev);
1236 /* Not loopback addresses on loopback should be preferred
1237 in this case. It is important that lo is the first interface
1240 for_each_netdev_rcu(net, dev) {
1241 in_dev = __in_dev_get_rcu(dev);
1245 for_primary_ifa(in_dev) {
1246 if (ifa->ifa_scope != RT_SCOPE_LINK &&
1247 ifa->ifa_scope <= scope) {
1248 addr = ifa->ifa_local;
1251 } endfor_ifa(in_dev);
1257 EXPORT_SYMBOL(inet_select_addr);
1259 static __be32 confirm_addr_indev(struct in_device *in_dev, __be32 dst,
1260 __be32 local, int scope)
1267 (local == ifa->ifa_local || !local) &&
1268 ifa->ifa_scope <= scope) {
1269 addr = ifa->ifa_local;
1274 same = (!local || inet_ifa_match(local, ifa)) &&
1275 (!dst || inet_ifa_match(dst, ifa));
1279 /* Is the selected addr into dst subnet? */
1280 if (inet_ifa_match(addr, ifa))
1282 /* No, then can we use new local src? */
1283 if (ifa->ifa_scope <= scope) {
1284 addr = ifa->ifa_local;
1287 /* search for large dst subnet for addr */
1291 } endfor_ifa(in_dev);
1293 return same ? addr : 0;
1297 * Confirm that local IP address exists using wildcards:
1298 * - net: netns to check, cannot be NULL
1299 * - in_dev: only on this interface, NULL=any interface
1300 * - dst: only in the same subnet as dst, 0=any dst
1301 * - local: address, 0=autoselect the local address
1302 * - scope: maximum allowed scope value for the local address
1304 __be32 inet_confirm_addr(struct net *net, struct in_device *in_dev,
1305 __be32 dst, __be32 local, int scope)
1308 struct net_device *dev;
1311 return confirm_addr_indev(in_dev, dst, local, scope);
1314 for_each_netdev_rcu(net, dev) {
1315 in_dev = __in_dev_get_rcu(dev);
1317 addr = confirm_addr_indev(in_dev, dst, local, scope);
1326 EXPORT_SYMBOL(inet_confirm_addr);
1332 int register_inetaddr_notifier(struct notifier_block *nb)
1334 return blocking_notifier_chain_register(&inetaddr_chain, nb);
1336 EXPORT_SYMBOL(register_inetaddr_notifier);
1338 int unregister_inetaddr_notifier(struct notifier_block *nb)
1340 return blocking_notifier_chain_unregister(&inetaddr_chain, nb);
1342 EXPORT_SYMBOL(unregister_inetaddr_notifier);
1344 /* Rename ifa_labels for a device name change. Make some effort to preserve
1345 * existing alias numbering and to create unique labels if possible.
1347 static void inetdev_changename(struct net_device *dev, struct in_device *in_dev)
1349 struct in_ifaddr *ifa;
1352 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
1353 char old[IFNAMSIZ], *dot;
1355 memcpy(old, ifa->ifa_label, IFNAMSIZ);
1356 memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
1359 dot = strchr(old, ':');
1361 sprintf(old, ":%d", named);
1364 if (strlen(dot) + strlen(dev->name) < IFNAMSIZ)
1365 strcat(ifa->ifa_label, dot);
1367 strcpy(ifa->ifa_label + (IFNAMSIZ - strlen(dot) - 1), dot);
1369 rtmsg_ifa(RTM_NEWADDR, ifa, NULL, 0);
1373 static void inetdev_send_gratuitous_arp(struct net_device *dev,
1374 struct in_device *in_dev)
1377 struct in_ifaddr *ifa;
1379 for (ifa = in_dev->ifa_list; ifa;
1380 ifa = ifa->ifa_next) {
1381 arp_send(ARPOP_REQUEST, ETH_P_ARP,
1382 ifa->ifa_local, dev,
1383 ifa->ifa_local, NULL,
1384 dev->dev_addr, NULL);
1388 /* Called only under RTNL semaphore */
1390 static int inetdev_event(struct notifier_block *this, unsigned long event,
1393 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1394 struct in_device *in_dev = __in_dev_get_rtnl(dev);
1399 if (event == NETDEV_REGISTER) {
1400 in_dev = inetdev_init(dev);
1402 return notifier_from_errno(PTR_ERR(in_dev));
1403 if (dev->flags & IFF_LOOPBACK) {
1404 IN_DEV_CONF_SET(in_dev, NOXFRM, 1);
1405 IN_DEV_CONF_SET(in_dev, NOPOLICY, 1);
1407 } else if (event == NETDEV_CHANGEMTU) {
1408 /* Re-enabling IP */
1409 if (inetdev_valid_mtu(dev->mtu))
1410 in_dev = inetdev_init(dev);
1416 case NETDEV_REGISTER:
1417 pr_debug("%s: bug\n", __func__);
1418 RCU_INIT_POINTER(dev->ip_ptr, NULL);
1421 if (!inetdev_valid_mtu(dev->mtu))
1423 if (dev->flags & IFF_LOOPBACK) {
1424 struct in_ifaddr *ifa = inet_alloc_ifa();
1427 INIT_HLIST_NODE(&ifa->hash);
1429 ifa->ifa_address = htonl(INADDR_LOOPBACK);
1430 ifa->ifa_prefixlen = 8;
1431 ifa->ifa_mask = inet_make_mask(8);
1432 in_dev_hold(in_dev);
1433 ifa->ifa_dev = in_dev;
1434 ifa->ifa_scope = RT_SCOPE_HOST;
1435 memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
1436 set_ifa_lifetime(ifa, INFINITY_LIFE_TIME,
1437 INFINITY_LIFE_TIME);
1438 ipv4_devconf_setall(in_dev);
1439 neigh_parms_data_state_setall(in_dev->arp_parms);
1440 inet_insert_ifa(ifa);
1445 case NETDEV_CHANGEADDR:
1446 if (!IN_DEV_ARP_NOTIFY(in_dev))
1449 case NETDEV_NOTIFY_PEERS:
1450 /* Send gratuitous ARP to notify of link change */
1451 inetdev_send_gratuitous_arp(dev, in_dev);
1456 case NETDEV_PRE_TYPE_CHANGE:
1457 ip_mc_unmap(in_dev);
1459 case NETDEV_POST_TYPE_CHANGE:
1460 ip_mc_remap(in_dev);
1462 case NETDEV_CHANGEMTU:
1463 if (inetdev_valid_mtu(dev->mtu))
1465 /* disable IP when MTU is not enough */
1466 case NETDEV_UNREGISTER:
1467 inetdev_destroy(in_dev);
1469 case NETDEV_CHANGENAME:
1470 /* Do not notify about label change, this event is
1471 * not interesting to applications using netlink.
1473 inetdev_changename(dev, in_dev);
1475 devinet_sysctl_unregister(in_dev);
1476 devinet_sysctl_register(in_dev);
1483 static struct notifier_block ip_netdev_notifier = {
1484 .notifier_call = inetdev_event,
1487 static size_t inet_nlmsg_size(void)
1489 return NLMSG_ALIGN(sizeof(struct ifaddrmsg))
1490 + nla_total_size(4) /* IFA_ADDRESS */
1491 + nla_total_size(4) /* IFA_LOCAL */
1492 + nla_total_size(4) /* IFA_BROADCAST */
1493 + nla_total_size(IFNAMSIZ) /* IFA_LABEL */
1494 + nla_total_size(4) /* IFA_FLAGS */
1495 + nla_total_size(sizeof(struct ifa_cacheinfo)); /* IFA_CACHEINFO */
1498 static inline u32 cstamp_delta(unsigned long cstamp)
1500 return (cstamp - INITIAL_JIFFIES) * 100UL / HZ;
1503 static int put_cacheinfo(struct sk_buff *skb, unsigned long cstamp,
1504 unsigned long tstamp, u32 preferred, u32 valid)
1506 struct ifa_cacheinfo ci;
1508 ci.cstamp = cstamp_delta(cstamp);
1509 ci.tstamp = cstamp_delta(tstamp);
1510 ci.ifa_prefered = preferred;
1511 ci.ifa_valid = valid;
1513 return nla_put(skb, IFA_CACHEINFO, sizeof(ci), &ci);
1516 static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa,
1517 u32 portid, u32 seq, int event, unsigned int flags)
1519 struct ifaddrmsg *ifm;
1520 struct nlmsghdr *nlh;
1521 u32 preferred, valid;
1523 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*ifm), flags);
1527 ifm = nlmsg_data(nlh);
1528 ifm->ifa_family = AF_INET;
1529 ifm->ifa_prefixlen = ifa->ifa_prefixlen;
1530 ifm->ifa_flags = ifa->ifa_flags;
1531 ifm->ifa_scope = ifa->ifa_scope;
1532 ifm->ifa_index = ifa->ifa_dev->dev->ifindex;
1534 if (!(ifm->ifa_flags & IFA_F_PERMANENT)) {
1535 preferred = ifa->ifa_preferred_lft;
1536 valid = ifa->ifa_valid_lft;
1537 if (preferred != INFINITY_LIFE_TIME) {
1538 long tval = (jiffies - ifa->ifa_tstamp) / HZ;
1540 if (preferred > tval)
1544 if (valid != INFINITY_LIFE_TIME) {
1552 preferred = INFINITY_LIFE_TIME;
1553 valid = INFINITY_LIFE_TIME;
1555 if ((ifa->ifa_address &&
1556 nla_put_in_addr(skb, IFA_ADDRESS, ifa->ifa_address)) ||
1558 nla_put_in_addr(skb, IFA_LOCAL, ifa->ifa_local)) ||
1559 (ifa->ifa_broadcast &&
1560 nla_put_in_addr(skb, IFA_BROADCAST, ifa->ifa_broadcast)) ||
1561 (ifa->ifa_label[0] &&
1562 nla_put_string(skb, IFA_LABEL, ifa->ifa_label)) ||
1563 nla_put_u32(skb, IFA_FLAGS, ifa->ifa_flags) ||
1564 put_cacheinfo(skb, ifa->ifa_cstamp, ifa->ifa_tstamp,
1566 goto nla_put_failure;
1568 nlmsg_end(skb, nlh);
1572 nlmsg_cancel(skb, nlh);
1576 static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
1578 struct net *net = sock_net(skb->sk);
1581 int ip_idx, s_ip_idx;
1582 struct net_device *dev;
1583 struct in_device *in_dev;
1584 struct in_ifaddr *ifa;
1585 struct hlist_head *head;
1588 s_idx = idx = cb->args[1];
1589 s_ip_idx = ip_idx = cb->args[2];
1591 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
1593 head = &net->dev_index_head[h];
1595 cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
1597 hlist_for_each_entry_rcu(dev, head, index_hlist) {
1600 if (h > s_h || idx > s_idx)
1602 in_dev = __in_dev_get_rcu(dev);
1606 for (ifa = in_dev->ifa_list, ip_idx = 0; ifa;
1607 ifa = ifa->ifa_next, ip_idx++) {
1608 if (ip_idx < s_ip_idx)
1610 if (inet_fill_ifaddr(skb, ifa,
1611 NETLINK_CB(cb->skb).portid,
1613 RTM_NEWADDR, NLM_F_MULTI) < 0) {
1617 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
1628 cb->args[2] = ip_idx;
1633 static void rtmsg_ifa(int event, struct in_ifaddr *ifa, struct nlmsghdr *nlh,
1636 struct sk_buff *skb;
1637 u32 seq = nlh ? nlh->nlmsg_seq : 0;
1641 net = dev_net(ifa->ifa_dev->dev);
1642 skb = nlmsg_new(inet_nlmsg_size(), GFP_KERNEL);
1646 err = inet_fill_ifaddr(skb, ifa, portid, seq, event, 0);
1648 /* -EMSGSIZE implies BUG in inet_nlmsg_size() */
1649 WARN_ON(err == -EMSGSIZE);
1653 rtnl_notify(skb, net, portid, RTNLGRP_IPV4_IFADDR, nlh, GFP_KERNEL);
1657 rtnl_set_sk_err(net, RTNLGRP_IPV4_IFADDR, err);
1660 static size_t inet_get_link_af_size(const struct net_device *dev,
1661 u32 ext_filter_mask)
1663 struct in_device *in_dev = rcu_dereference_rtnl(dev->ip_ptr);
1668 return nla_total_size(IPV4_DEVCONF_MAX * 4); /* IFLA_INET_CONF */
1671 static int inet_fill_link_af(struct sk_buff *skb, const struct net_device *dev,
1672 u32 ext_filter_mask)
1674 struct in_device *in_dev = rcu_dereference_rtnl(dev->ip_ptr);
1681 nla = nla_reserve(skb, IFLA_INET_CONF, IPV4_DEVCONF_MAX * 4);
1685 for (i = 0; i < IPV4_DEVCONF_MAX; i++)
1686 ((u32 *) nla_data(nla))[i] = in_dev->cnf.data[i];
1691 static const struct nla_policy inet_af_policy[IFLA_INET_MAX+1] = {
1692 [IFLA_INET_CONF] = { .type = NLA_NESTED },
1695 static int inet_validate_link_af(const struct net_device *dev,
1696 const struct nlattr *nla)
1698 struct nlattr *a, *tb[IFLA_INET_MAX+1];
1701 if (dev && !__in_dev_get_rtnl(dev))
1702 return -EAFNOSUPPORT;
1704 err = nla_parse_nested(tb, IFLA_INET_MAX, nla, inet_af_policy);
1708 if (tb[IFLA_INET_CONF]) {
1709 nla_for_each_nested(a, tb[IFLA_INET_CONF], rem) {
1710 int cfgid = nla_type(a);
1715 if (cfgid <= 0 || cfgid > IPV4_DEVCONF_MAX)
1723 static int inet_set_link_af(struct net_device *dev, const struct nlattr *nla)
1725 struct in_device *in_dev = __in_dev_get_rtnl(dev);
1726 struct nlattr *a, *tb[IFLA_INET_MAX+1];
1730 return -EAFNOSUPPORT;
1732 if (nla_parse_nested(tb, IFLA_INET_MAX, nla, NULL) < 0)
1735 if (tb[IFLA_INET_CONF]) {
1736 nla_for_each_nested(a, tb[IFLA_INET_CONF], rem)
1737 ipv4_devconf_set(in_dev, nla_type(a), nla_get_u32(a));
1743 static int inet_netconf_msgsize_devconf(int type)
1745 int size = NLMSG_ALIGN(sizeof(struct netconfmsg))
1746 + nla_total_size(4); /* NETCONFA_IFINDEX */
1748 /* type -1 is used for ALL */
1749 if (type == -1 || type == NETCONFA_FORWARDING)
1750 size += nla_total_size(4);
1751 if (type == -1 || type == NETCONFA_RP_FILTER)
1752 size += nla_total_size(4);
1753 if (type == -1 || type == NETCONFA_MC_FORWARDING)
1754 size += nla_total_size(4);
1755 if (type == -1 || type == NETCONFA_PROXY_NEIGH)
1756 size += nla_total_size(4);
1757 if (type == -1 || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN)
1758 size += nla_total_size(4);
1763 static int inet_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
1764 struct ipv4_devconf *devconf, u32 portid,
1765 u32 seq, int event, unsigned int flags,
1768 struct nlmsghdr *nlh;
1769 struct netconfmsg *ncm;
1771 nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct netconfmsg),
1776 ncm = nlmsg_data(nlh);
1777 ncm->ncm_family = AF_INET;
1779 if (nla_put_s32(skb, NETCONFA_IFINDEX, ifindex) < 0)
1780 goto nla_put_failure;
1782 /* type -1 is used for ALL */
1783 if ((type == -1 || type == NETCONFA_FORWARDING) &&
1784 nla_put_s32(skb, NETCONFA_FORWARDING,
1785 IPV4_DEVCONF(*devconf, FORWARDING)) < 0)
1786 goto nla_put_failure;
1787 if ((type == -1 || type == NETCONFA_RP_FILTER) &&
1788 nla_put_s32(skb, NETCONFA_RP_FILTER,
1789 IPV4_DEVCONF(*devconf, RP_FILTER)) < 0)
1790 goto nla_put_failure;
1791 if ((type == -1 || type == NETCONFA_MC_FORWARDING) &&
1792 nla_put_s32(skb, NETCONFA_MC_FORWARDING,
1793 IPV4_DEVCONF(*devconf, MC_FORWARDING)) < 0)
1794 goto nla_put_failure;
1795 if ((type == -1 || type == NETCONFA_PROXY_NEIGH) &&
1796 nla_put_s32(skb, NETCONFA_PROXY_NEIGH,
1797 IPV4_DEVCONF(*devconf, PROXY_ARP)) < 0)
1798 goto nla_put_failure;
1799 if ((type == -1 || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN) &&
1800 nla_put_s32(skb, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
1801 IPV4_DEVCONF(*devconf, IGNORE_ROUTES_WITH_LINKDOWN)) < 0)
1802 goto nla_put_failure;
1804 nlmsg_end(skb, nlh);
1808 nlmsg_cancel(skb, nlh);
1812 void inet_netconf_notify_devconf(struct net *net, int type, int ifindex,
1813 struct ipv4_devconf *devconf)
1815 struct sk_buff *skb;
1818 skb = nlmsg_new(inet_netconf_msgsize_devconf(type), GFP_KERNEL);
1822 err = inet_netconf_fill_devconf(skb, ifindex, devconf, 0, 0,
1823 RTM_NEWNETCONF, 0, type);
1825 /* -EMSGSIZE implies BUG in inet_netconf_msgsize_devconf() */
1826 WARN_ON(err == -EMSGSIZE);
1830 rtnl_notify(skb, net, 0, RTNLGRP_IPV4_NETCONF, NULL, GFP_KERNEL);
1834 rtnl_set_sk_err(net, RTNLGRP_IPV4_NETCONF, err);
1837 static const struct nla_policy devconf_ipv4_policy[NETCONFA_MAX+1] = {
1838 [NETCONFA_IFINDEX] = { .len = sizeof(int) },
1839 [NETCONFA_FORWARDING] = { .len = sizeof(int) },
1840 [NETCONFA_RP_FILTER] = { .len = sizeof(int) },
1841 [NETCONFA_PROXY_NEIGH] = { .len = sizeof(int) },
1842 [NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN] = { .len = sizeof(int) },
1845 static int inet_netconf_get_devconf(struct sk_buff *in_skb,
1846 struct nlmsghdr *nlh)
1848 struct net *net = sock_net(in_skb->sk);
1849 struct nlattr *tb[NETCONFA_MAX+1];
1850 struct netconfmsg *ncm;
1851 struct sk_buff *skb;
1852 struct ipv4_devconf *devconf;
1853 struct in_device *in_dev;
1854 struct net_device *dev;
1858 err = nlmsg_parse(nlh, sizeof(*ncm), tb, NETCONFA_MAX,
1859 devconf_ipv4_policy);
1864 if (!tb[NETCONFA_IFINDEX])
1867 ifindex = nla_get_s32(tb[NETCONFA_IFINDEX]);
1869 case NETCONFA_IFINDEX_ALL:
1870 devconf = net->ipv4.devconf_all;
1872 case NETCONFA_IFINDEX_DEFAULT:
1873 devconf = net->ipv4.devconf_dflt;
1876 dev = __dev_get_by_index(net, ifindex);
1879 in_dev = __in_dev_get_rtnl(dev);
1882 devconf = &in_dev->cnf;
1887 skb = nlmsg_new(inet_netconf_msgsize_devconf(-1), GFP_KERNEL);
1891 err = inet_netconf_fill_devconf(skb, ifindex, devconf,
1892 NETLINK_CB(in_skb).portid,
1893 nlh->nlmsg_seq, RTM_NEWNETCONF, 0,
1896 /* -EMSGSIZE implies BUG in inet_netconf_msgsize_devconf() */
1897 WARN_ON(err == -EMSGSIZE);
1901 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
1906 static int inet_netconf_dump_devconf(struct sk_buff *skb,
1907 struct netlink_callback *cb)
1909 struct net *net = sock_net(skb->sk);
1912 struct net_device *dev;
1913 struct in_device *in_dev;
1914 struct hlist_head *head;
1917 s_idx = idx = cb->args[1];
1919 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
1921 head = &net->dev_index_head[h];
1923 cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
1925 hlist_for_each_entry_rcu(dev, head, index_hlist) {
1928 in_dev = __in_dev_get_rcu(dev);
1932 if (inet_netconf_fill_devconf(skb, dev->ifindex,
1934 NETLINK_CB(cb->skb).portid,
1942 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
1948 if (h == NETDEV_HASHENTRIES) {
1949 if (inet_netconf_fill_devconf(skb, NETCONFA_IFINDEX_ALL,
1950 net->ipv4.devconf_all,
1951 NETLINK_CB(cb->skb).portid,
1953 RTM_NEWNETCONF, NLM_F_MULTI,
1959 if (h == NETDEV_HASHENTRIES + 1) {
1960 if (inet_netconf_fill_devconf(skb, NETCONFA_IFINDEX_DEFAULT,
1961 net->ipv4.devconf_dflt,
1962 NETLINK_CB(cb->skb).portid,
1964 RTM_NEWNETCONF, NLM_F_MULTI,
1977 #ifdef CONFIG_SYSCTL
1979 static void devinet_copy_dflt_conf(struct net *net, int i)
1981 struct net_device *dev;
1984 for_each_netdev_rcu(net, dev) {
1985 struct in_device *in_dev;
1987 in_dev = __in_dev_get_rcu(dev);
1988 if (in_dev && !test_bit(i, in_dev->cnf.state))
1989 in_dev->cnf.data[i] = net->ipv4.devconf_dflt->data[i];
1994 /* called with RTNL locked */
1995 static void inet_forward_change(struct net *net)
1997 struct net_device *dev;
1998 int on = IPV4_DEVCONF_ALL(net, FORWARDING);
2000 IPV4_DEVCONF_ALL(net, ACCEPT_REDIRECTS) = !on;
2001 IPV4_DEVCONF_DFLT(net, FORWARDING) = on;
2002 inet_netconf_notify_devconf(net, NETCONFA_FORWARDING,
2003 NETCONFA_IFINDEX_ALL,
2004 net->ipv4.devconf_all);
2005 inet_netconf_notify_devconf(net, NETCONFA_FORWARDING,
2006 NETCONFA_IFINDEX_DEFAULT,
2007 net->ipv4.devconf_dflt);
2009 for_each_netdev(net, dev) {
2010 struct in_device *in_dev;
2013 dev_disable_lro(dev);
2015 in_dev = __in_dev_get_rtnl(dev);
2017 IN_DEV_CONF_SET(in_dev, FORWARDING, on);
2018 inet_netconf_notify_devconf(net, NETCONFA_FORWARDING,
2019 dev->ifindex, &in_dev->cnf);
2024 static int devinet_conf_ifindex(struct net *net, struct ipv4_devconf *cnf)
2026 if (cnf == net->ipv4.devconf_dflt)
2027 return NETCONFA_IFINDEX_DEFAULT;
2028 else if (cnf == net->ipv4.devconf_all)
2029 return NETCONFA_IFINDEX_ALL;
2031 struct in_device *idev
2032 = container_of(cnf, struct in_device, cnf);
2033 return idev->dev->ifindex;
2037 static int devinet_conf_proc(struct ctl_table *ctl, int write,
2038 void __user *buffer,
2039 size_t *lenp, loff_t *ppos)
2041 int old_value = *(int *)ctl->data;
2042 int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
2043 int new_value = *(int *)ctl->data;
2046 struct ipv4_devconf *cnf = ctl->extra1;
2047 struct net *net = ctl->extra2;
2048 int i = (int *)ctl->data - cnf->data;
2051 set_bit(i, cnf->state);
2053 if (cnf == net->ipv4.devconf_dflt)
2054 devinet_copy_dflt_conf(net, i);
2055 if (i == IPV4_DEVCONF_ACCEPT_LOCAL - 1 ||
2056 i == IPV4_DEVCONF_ROUTE_LOCALNET - 1)
2057 if ((new_value == 0) && (old_value != 0))
2058 rt_cache_flush(net);
2060 if (i == IPV4_DEVCONF_RP_FILTER - 1 &&
2061 new_value != old_value) {
2062 ifindex = devinet_conf_ifindex(net, cnf);
2063 inet_netconf_notify_devconf(net, NETCONFA_RP_FILTER,
2066 if (i == IPV4_DEVCONF_PROXY_ARP - 1 &&
2067 new_value != old_value) {
2068 ifindex = devinet_conf_ifindex(net, cnf);
2069 inet_netconf_notify_devconf(net, NETCONFA_PROXY_NEIGH,
2072 if (i == IPV4_DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN - 1 &&
2073 new_value != old_value) {
2074 ifindex = devinet_conf_ifindex(net, cnf);
2075 inet_netconf_notify_devconf(net, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
2083 static int devinet_sysctl_forward(struct ctl_table *ctl, int write,
2084 void __user *buffer,
2085 size_t *lenp, loff_t *ppos)
2087 int *valp = ctl->data;
2090 int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
2092 if (write && *valp != val) {
2093 struct net *net = ctl->extra2;
2095 if (valp != &IPV4_DEVCONF_DFLT(net, FORWARDING)) {
2096 if (!rtnl_trylock()) {
2097 /* Restore the original values before restarting */
2100 return restart_syscall();
2102 if (valp == &IPV4_DEVCONF_ALL(net, FORWARDING)) {
2103 inet_forward_change(net);
2105 struct ipv4_devconf *cnf = ctl->extra1;
2106 struct in_device *idev =
2107 container_of(cnf, struct in_device, cnf);
2109 dev_disable_lro(idev->dev);
2110 inet_netconf_notify_devconf(net,
2111 NETCONFA_FORWARDING,
2116 rt_cache_flush(net);
2118 inet_netconf_notify_devconf(net, NETCONFA_FORWARDING,
2119 NETCONFA_IFINDEX_DEFAULT,
2120 net->ipv4.devconf_dflt);
2126 static int ipv4_doint_and_flush(struct ctl_table *ctl, int write,
2127 void __user *buffer,
2128 size_t *lenp, loff_t *ppos)
2130 int *valp = ctl->data;
2132 int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
2133 struct net *net = ctl->extra2;
2135 if (write && *valp != val)
2136 rt_cache_flush(net);
2141 #define DEVINET_SYSCTL_ENTRY(attr, name, mval, proc) \
2144 .data = ipv4_devconf.data + \
2145 IPV4_DEVCONF_ ## attr - 1, \
2146 .maxlen = sizeof(int), \
2148 .proc_handler = proc, \
2149 .extra1 = &ipv4_devconf, \
2152 #define DEVINET_SYSCTL_RW_ENTRY(attr, name) \
2153 DEVINET_SYSCTL_ENTRY(attr, name, 0644, devinet_conf_proc)
2155 #define DEVINET_SYSCTL_RO_ENTRY(attr, name) \
2156 DEVINET_SYSCTL_ENTRY(attr, name, 0444, devinet_conf_proc)
2158 #define DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, proc) \
2159 DEVINET_SYSCTL_ENTRY(attr, name, 0644, proc)
2161 #define DEVINET_SYSCTL_FLUSHING_ENTRY(attr, name) \
2162 DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, ipv4_doint_and_flush)
2164 static struct devinet_sysctl_table {
2165 struct ctl_table_header *sysctl_header;
2166 struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
2167 } devinet_sysctl = {
2169 DEVINET_SYSCTL_COMPLEX_ENTRY(FORWARDING, "forwarding",
2170 devinet_sysctl_forward),
2171 DEVINET_SYSCTL_RO_ENTRY(MC_FORWARDING, "mc_forwarding"),
2173 DEVINET_SYSCTL_RW_ENTRY(ACCEPT_REDIRECTS, "accept_redirects"),
2174 DEVINET_SYSCTL_RW_ENTRY(SECURE_REDIRECTS, "secure_redirects"),
2175 DEVINET_SYSCTL_RW_ENTRY(SHARED_MEDIA, "shared_media"),
2176 DEVINET_SYSCTL_RW_ENTRY(RP_FILTER, "rp_filter"),
2177 DEVINET_SYSCTL_RW_ENTRY(SEND_REDIRECTS, "send_redirects"),
2178 DEVINET_SYSCTL_RW_ENTRY(ACCEPT_SOURCE_ROUTE,
2179 "accept_source_route"),
2180 DEVINET_SYSCTL_RW_ENTRY(ACCEPT_LOCAL, "accept_local"),
2181 DEVINET_SYSCTL_RW_ENTRY(SRC_VMARK, "src_valid_mark"),
2182 DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP, "proxy_arp"),
2183 DEVINET_SYSCTL_RW_ENTRY(MEDIUM_ID, "medium_id"),
2184 DEVINET_SYSCTL_RW_ENTRY(BOOTP_RELAY, "bootp_relay"),
2185 DEVINET_SYSCTL_RW_ENTRY(LOG_MARTIANS, "log_martians"),
2186 DEVINET_SYSCTL_RW_ENTRY(TAG, "tag"),
2187 DEVINET_SYSCTL_RW_ENTRY(ARPFILTER, "arp_filter"),
2188 DEVINET_SYSCTL_RW_ENTRY(ARP_ANNOUNCE, "arp_announce"),
2189 DEVINET_SYSCTL_RW_ENTRY(ARP_IGNORE, "arp_ignore"),
2190 DEVINET_SYSCTL_RW_ENTRY(ARP_ACCEPT, "arp_accept"),
2191 DEVINET_SYSCTL_RW_ENTRY(ARP_NOTIFY, "arp_notify"),
2192 DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP_PVLAN, "proxy_arp_pvlan"),
2193 DEVINET_SYSCTL_RW_ENTRY(FORCE_IGMP_VERSION,
2194 "force_igmp_version"),
2195 DEVINET_SYSCTL_RW_ENTRY(IGMPV2_UNSOLICITED_REPORT_INTERVAL,
2196 "igmpv2_unsolicited_report_interval"),
2197 DEVINET_SYSCTL_RW_ENTRY(IGMPV3_UNSOLICITED_REPORT_INTERVAL,
2198 "igmpv3_unsolicited_report_interval"),
2199 DEVINET_SYSCTL_RW_ENTRY(IGNORE_ROUTES_WITH_LINKDOWN,
2200 "ignore_routes_with_linkdown"),
2202 DEVINET_SYSCTL_FLUSHING_ENTRY(NOXFRM, "disable_xfrm"),
2203 DEVINET_SYSCTL_FLUSHING_ENTRY(NOPOLICY, "disable_policy"),
2204 DEVINET_SYSCTL_FLUSHING_ENTRY(PROMOTE_SECONDARIES,
2205 "promote_secondaries"),
2206 DEVINET_SYSCTL_FLUSHING_ENTRY(ROUTE_LOCALNET,
2211 static int __devinet_sysctl_register(struct net *net, char *dev_name,
2212 struct ipv4_devconf *p)
2215 struct devinet_sysctl_table *t;
2216 char path[sizeof("net/ipv4/conf/") + IFNAMSIZ];
2218 t = kmemdup(&devinet_sysctl, sizeof(*t), GFP_KERNEL);
2222 for (i = 0; i < ARRAY_SIZE(t->devinet_vars) - 1; i++) {
2223 t->devinet_vars[i].data += (char *)p - (char *)&ipv4_devconf;
2224 t->devinet_vars[i].extra1 = p;
2225 t->devinet_vars[i].extra2 = net;
2228 snprintf(path, sizeof(path), "net/ipv4/conf/%s", dev_name);
2230 t->sysctl_header = register_net_sysctl(net, path, t->devinet_vars);
2231 if (!t->sysctl_header)
2243 static void __devinet_sysctl_unregister(struct ipv4_devconf *cnf)
2245 struct devinet_sysctl_table *t = cnf->sysctl;
2251 unregister_net_sysctl_table(t->sysctl_header);
2255 static int devinet_sysctl_register(struct in_device *idev)
2259 if (!sysctl_dev_name_is_allowed(idev->dev->name))
2262 err = neigh_sysctl_register(idev->dev, idev->arp_parms, NULL);
2265 err = __devinet_sysctl_register(dev_net(idev->dev), idev->dev->name,
2268 neigh_sysctl_unregister(idev->arp_parms);
2272 static void devinet_sysctl_unregister(struct in_device *idev)
2274 __devinet_sysctl_unregister(&idev->cnf);
2275 neigh_sysctl_unregister(idev->arp_parms);
2278 static struct ctl_table ctl_forward_entry[] = {
2280 .procname = "ip_forward",
2281 .data = &ipv4_devconf.data[
2282 IPV4_DEVCONF_FORWARDING - 1],
2283 .maxlen = sizeof(int),
2285 .proc_handler = devinet_sysctl_forward,
2286 .extra1 = &ipv4_devconf,
2287 .extra2 = &init_net,
2293 static __net_init int devinet_init_net(struct net *net)
2296 struct ipv4_devconf *all, *dflt;
2297 #ifdef CONFIG_SYSCTL
2298 struct ctl_table *tbl = ctl_forward_entry;
2299 struct ctl_table_header *forw_hdr;
2303 all = &ipv4_devconf;
2304 dflt = &ipv4_devconf_dflt;
2306 if (!net_eq(net, &init_net)) {
2307 all = kmemdup(all, sizeof(ipv4_devconf), GFP_KERNEL);
2311 dflt = kmemdup(dflt, sizeof(ipv4_devconf_dflt), GFP_KERNEL);
2313 goto err_alloc_dflt;
2315 #ifdef CONFIG_SYSCTL
2316 tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL);
2320 tbl[0].data = &all->data[IPV4_DEVCONF_FORWARDING - 1];
2321 tbl[0].extra1 = all;
2322 tbl[0].extra2 = net;
2326 #ifdef CONFIG_SYSCTL
2327 err = __devinet_sysctl_register(net, "all", all);
2331 err = __devinet_sysctl_register(net, "default", dflt);
2336 forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
2339 net->ipv4.forw_hdr = forw_hdr;
2342 net->ipv4.devconf_all = all;
2343 net->ipv4.devconf_dflt = dflt;
2346 #ifdef CONFIG_SYSCTL
2348 __devinet_sysctl_unregister(dflt);
2350 __devinet_sysctl_unregister(all);
2352 if (tbl != ctl_forward_entry)
2356 if (dflt != &ipv4_devconf_dflt)
2359 if (all != &ipv4_devconf)
2365 static __net_exit void devinet_exit_net(struct net *net)
2367 #ifdef CONFIG_SYSCTL
2368 struct ctl_table *tbl;
2370 tbl = net->ipv4.forw_hdr->ctl_table_arg;
2371 unregister_net_sysctl_table(net->ipv4.forw_hdr);
2372 __devinet_sysctl_unregister(net->ipv4.devconf_dflt);
2373 __devinet_sysctl_unregister(net->ipv4.devconf_all);
2376 kfree(net->ipv4.devconf_dflt);
2377 kfree(net->ipv4.devconf_all);
2380 static __net_initdata struct pernet_operations devinet_ops = {
2381 .init = devinet_init_net,
2382 .exit = devinet_exit_net,
2385 static struct rtnl_af_ops inet_af_ops __read_mostly = {
2387 .fill_link_af = inet_fill_link_af,
2388 .get_link_af_size = inet_get_link_af_size,
2389 .validate_link_af = inet_validate_link_af,
2390 .set_link_af = inet_set_link_af,
2393 void __init devinet_init(void)
2397 for (i = 0; i < IN4_ADDR_HSIZE; i++)
2398 INIT_HLIST_HEAD(&inet_addr_lst[i]);
2400 register_pernet_subsys(&devinet_ops);
2402 register_gifconf(PF_INET, inet_gifconf);
2403 register_netdevice_notifier(&ip_netdev_notifier);
2405 queue_delayed_work(system_power_efficient_wq, &check_lifetime_work, 0);
2407 rtnl_af_register(&inet_af_ops);
2409 rtnl_register(PF_INET, RTM_NEWADDR, inet_rtm_newaddr, NULL, NULL);
2410 rtnl_register(PF_INET, RTM_DELADDR, inet_rtm_deladdr, NULL, NULL);
2411 rtnl_register(PF_INET, RTM_GETADDR, NULL, inet_dump_ifaddr, NULL);
2412 rtnl_register(PF_INET, RTM_GETNETCONF, inet_netconf_get_devconf,
2413 inet_netconf_dump_devconf, NULL);