1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Generic address resolution entity
6 * Pedro Roque <roque@di.fc.ul.pt>
7 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
10 * Vitaly E. Lavrov releasing NULL neighbor in neigh_add.
11 * Harald Welte Add neighbour cache statistics like rtstat
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 #include <linux/slab.h>
17 #include <linux/kmemleak.h>
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/socket.h>
22 #include <linux/netdevice.h>
23 #include <linux/proc_fs.h>
25 #include <linux/sysctl.h>
27 #include <linux/times.h>
28 #include <net/net_namespace.h>
29 #include <net/neighbour.h>
33 #include <net/netevent.h>
34 #include <net/netlink.h>
35 #include <linux/rtnetlink.h>
36 #include <linux/random.h>
37 #include <linux/string.h>
38 #include <linux/log2.h>
39 #include <linux/inetdevice.h>
40 #include <net/addrconf.h>
42 #include <trace/events/neigh.h>
46 #define neigh_dbg(level, fmt, ...) \
48 if (level <= NEIGH_DEBUG) \
49 pr_debug(fmt, ##__VA_ARGS__); \
52 #define PNEIGH_HASHMASK 0xF
54 static void neigh_timer_handler(struct timer_list *t);
55 static void __neigh_notify(struct neighbour *n, int type, int flags,
57 static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid);
58 static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
59 struct net_device *dev);
62 static const struct seq_operations neigh_stat_seq_ops;
66 Neighbour hash table buckets are protected with rwlock tbl->lock.
68 - All the scans/updates to hash buckets MUST be made under this lock.
69 - NOTHING clever should be made under this lock: no callbacks
70 to protocol backends, no attempts to send something to network.
71 It will result in deadlocks, if backend/driver wants to use neighbour
73 - If the entry requires some non-trivial actions, increase
74 its reference count and release table lock.
76 Neighbour entries are protected:
77 - with reference count.
78 - with rwlock neigh->lock
80 Reference count prevents destruction.
82 neigh->lock mainly serializes ll address data and its validity state.
83 However, the same lock is used to protect another entry fields:
87 Again, nothing clever shall be made under neigh->lock,
88 the most complicated procedure, which we allow is dev->hard_header.
89 It is supposed, that dev->hard_header is simplistic and does
90 not make callbacks to neighbour tables.
93 static int neigh_blackhole(struct neighbour *neigh, struct sk_buff *skb)
99 static void neigh_cleanup_and_release(struct neighbour *neigh)
101 trace_neigh_cleanup_and_release(neigh, 0);
102 __neigh_notify(neigh, RTM_DELNEIGH, 0, 0);
103 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
104 neigh_release(neigh);
108 * It is random distribution in the interval (1/2)*base...(3/2)*base.
109 * It corresponds to default IPv6 settings and is not overridable,
110 * because it is really reasonable choice.
113 unsigned long neigh_rand_reach_time(unsigned long base)
115 return base ? (prandom_u32() % base) + (base >> 1) : 0;
117 EXPORT_SYMBOL(neigh_rand_reach_time);
119 static void neigh_mark_dead(struct neighbour *n)
122 if (!list_empty(&n->gc_list)) {
123 list_del_init(&n->gc_list);
124 atomic_dec(&n->tbl->gc_entries);
128 static void neigh_update_gc_list(struct neighbour *n)
130 bool on_gc_list, exempt_from_gc;
132 write_lock_bh(&n->tbl->lock);
133 write_lock(&n->lock);
138 /* remove from the gc list if new state is permanent or if neighbor
139 * is externally learned; otherwise entry should be on the gc list
141 exempt_from_gc = n->nud_state & NUD_PERMANENT ||
142 n->flags & NTF_EXT_LEARNED;
143 on_gc_list = !list_empty(&n->gc_list);
145 if (exempt_from_gc && on_gc_list) {
146 list_del_init(&n->gc_list);
147 atomic_dec(&n->tbl->gc_entries);
148 } else if (!exempt_from_gc && !on_gc_list) {
149 /* add entries to the tail; cleaning removes from the front */
150 list_add_tail(&n->gc_list, &n->tbl->gc_list);
151 atomic_inc(&n->tbl->gc_entries);
155 write_unlock(&n->lock);
156 write_unlock_bh(&n->tbl->lock);
159 static bool neigh_update_ext_learned(struct neighbour *neigh, u32 flags,
165 if (!(flags & NEIGH_UPDATE_F_ADMIN))
168 ndm_flags = (flags & NEIGH_UPDATE_F_EXT_LEARNED) ? NTF_EXT_LEARNED : 0;
169 if ((neigh->flags ^ ndm_flags) & NTF_EXT_LEARNED) {
170 if (ndm_flags & NTF_EXT_LEARNED)
171 neigh->flags |= NTF_EXT_LEARNED;
173 neigh->flags &= ~NTF_EXT_LEARNED;
181 static bool neigh_del(struct neighbour *n, struct neighbour __rcu **np,
182 struct neigh_table *tbl)
186 write_lock(&n->lock);
187 if (refcount_read(&n->refcnt) == 1) {
188 struct neighbour *neigh;
190 neigh = rcu_dereference_protected(n->next,
191 lockdep_is_held(&tbl->lock));
192 rcu_assign_pointer(*np, neigh);
196 write_unlock(&n->lock);
198 neigh_cleanup_and_release(n);
202 bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl)
204 struct neigh_hash_table *nht;
205 void *pkey = ndel->primary_key;
208 struct neighbour __rcu **np;
210 nht = rcu_dereference_protected(tbl->nht,
211 lockdep_is_held(&tbl->lock));
212 hash_val = tbl->hash(pkey, ndel->dev, nht->hash_rnd);
213 hash_val = hash_val >> (32 - nht->hash_shift);
215 np = &nht->hash_buckets[hash_val];
216 while ((n = rcu_dereference_protected(*np,
217 lockdep_is_held(&tbl->lock)))) {
219 return neigh_del(n, np, tbl);
225 static int neigh_forced_gc(struct neigh_table *tbl)
227 int max_clean = atomic_read(&tbl->gc_entries) -
228 READ_ONCE(tbl->gc_thresh2);
229 u64 tmax = ktime_get_ns() + NSEC_PER_MSEC;
230 unsigned long tref = jiffies - 5 * HZ;
231 struct neighbour *n, *tmp;
235 NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
237 write_lock_bh(&tbl->lock);
239 list_for_each_entry_safe(n, tmp, &tbl->gc_list, gc_list) {
240 if (refcount_read(&n->refcnt) == 1) {
243 write_lock(&n->lock);
244 if ((n->nud_state == NUD_FAILED) ||
245 (n->nud_state == NUD_NOARP) ||
246 (tbl->is_multicast &&
247 tbl->is_multicast(n->primary_key)) ||
248 !time_in_range(n->updated, tref, jiffies))
250 write_unlock(&n->lock);
252 if (remove && neigh_remove_one(n, tbl))
254 if (shrunk >= max_clean)
257 if (ktime_get_ns() > tmax)
264 WRITE_ONCE(tbl->last_flush, jiffies);
266 write_unlock_bh(&tbl->lock);
271 static void neigh_add_timer(struct neighbour *n, unsigned long when)
273 /* Use safe distance from the jiffies - LONG_MAX point while timer
274 * is running in DELAY/PROBE state but still show to user space
275 * large times in the past.
277 unsigned long mint = jiffies - (LONG_MAX - 86400 * HZ);
280 if (!time_in_range(n->confirmed, mint, jiffies))
282 if (time_before(n->used, n->confirmed))
283 n->used = n->confirmed;
284 if (unlikely(mod_timer(&n->timer, when))) {
285 printk("NEIGH: BUG, double timer add, state is %x\n",
291 static int neigh_del_timer(struct neighbour *n)
293 if ((n->nud_state & NUD_IN_TIMER) &&
294 del_timer(&n->timer)) {
301 static void pneigh_queue_purge(struct sk_buff_head *list, struct net *net)
303 struct sk_buff_head tmp;
307 skb_queue_head_init(&tmp);
308 spin_lock_irqsave(&list->lock, flags);
309 skb = skb_peek(list);
310 while (skb != NULL) {
311 struct sk_buff *skb_next = skb_peek_next(skb, list);
312 if (net == NULL || net_eq(dev_net(skb->dev), net)) {
313 __skb_unlink(skb, list);
314 __skb_queue_tail(&tmp, skb);
318 spin_unlock_irqrestore(&list->lock, flags);
320 while ((skb = __skb_dequeue(&tmp))) {
326 static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev,
330 struct neigh_hash_table *nht;
332 nht = rcu_dereference_protected(tbl->nht,
333 lockdep_is_held(&tbl->lock));
335 for (i = 0; i < (1 << nht->hash_shift); i++) {
337 struct neighbour __rcu **np = &nht->hash_buckets[i];
339 while ((n = rcu_dereference_protected(*np,
340 lockdep_is_held(&tbl->lock))) != NULL) {
341 if (dev && n->dev != dev) {
345 if (skip_perm && n->nud_state & NUD_PERMANENT) {
349 rcu_assign_pointer(*np,
350 rcu_dereference_protected(n->next,
351 lockdep_is_held(&tbl->lock)));
352 write_lock(&n->lock);
355 if (refcount_read(&n->refcnt) != 1) {
356 /* The most unpleasant situation.
357 We must destroy neighbour entry,
358 but someone still uses it.
360 The destroy will be delayed until
361 the last user releases us, but
362 we must kill timers etc. and move
365 __skb_queue_purge(&n->arp_queue);
366 n->arp_queue_len_bytes = 0;
367 n->output = neigh_blackhole;
368 if (n->nud_state & NUD_VALID)
369 n->nud_state = NUD_NOARP;
371 n->nud_state = NUD_NONE;
372 neigh_dbg(2, "neigh %p is stray\n", n);
374 write_unlock(&n->lock);
375 neigh_cleanup_and_release(n);
380 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
382 write_lock_bh(&tbl->lock);
383 neigh_flush_dev(tbl, dev, false);
384 write_unlock_bh(&tbl->lock);
386 EXPORT_SYMBOL(neigh_changeaddr);
388 static int __neigh_ifdown(struct neigh_table *tbl, struct net_device *dev,
391 write_lock_bh(&tbl->lock);
392 neigh_flush_dev(tbl, dev, skip_perm);
393 pneigh_ifdown_and_unlock(tbl, dev);
394 pneigh_queue_purge(&tbl->proxy_queue, dev ? dev_net(dev) : NULL);
395 if (skb_queue_empty_lockless(&tbl->proxy_queue))
396 del_timer_sync(&tbl->proxy_timer);
400 int neigh_carrier_down(struct neigh_table *tbl, struct net_device *dev)
402 __neigh_ifdown(tbl, dev, true);
405 EXPORT_SYMBOL(neigh_carrier_down);
407 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
409 __neigh_ifdown(tbl, dev, false);
412 EXPORT_SYMBOL(neigh_ifdown);
414 static struct neighbour *neigh_alloc(struct neigh_table *tbl,
415 struct net_device *dev,
416 u8 flags, bool exempt_from_gc)
418 struct neighbour *n = NULL;
419 unsigned long now = jiffies;
420 int entries, gc_thresh3;
425 entries = atomic_inc_return(&tbl->gc_entries) - 1;
426 gc_thresh3 = READ_ONCE(tbl->gc_thresh3);
427 if (entries >= gc_thresh3 ||
428 (entries >= READ_ONCE(tbl->gc_thresh2) &&
429 time_after(now, READ_ONCE(tbl->last_flush) + 5 * HZ))) {
430 if (!neigh_forced_gc(tbl) && entries >= gc_thresh3) {
431 net_info_ratelimited("%s: neighbor table overflow!\n",
433 NEIGH_CACHE_STAT_INC(tbl, table_fulls);
439 n = kzalloc(tbl->entry_size + dev->neigh_priv_len, GFP_ATOMIC);
443 __skb_queue_head_init(&n->arp_queue);
444 rwlock_init(&n->lock);
445 seqlock_init(&n->ha_lock);
446 n->updated = n->used = now;
447 n->nud_state = NUD_NONE;
448 n->output = neigh_blackhole;
450 seqlock_init(&n->hh.hh_lock);
451 n->parms = neigh_parms_clone(&tbl->parms);
452 timer_setup(&n->timer, neigh_timer_handler, 0);
454 NEIGH_CACHE_STAT_INC(tbl, allocs);
456 refcount_set(&n->refcnt, 1);
458 INIT_LIST_HEAD(&n->gc_list);
460 atomic_inc(&tbl->entries);
466 atomic_dec(&tbl->gc_entries);
470 static void neigh_get_hash_rnd(u32 *x)
472 *x = get_random_u32() | 1;
475 static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
477 size_t size = (1 << shift) * sizeof(struct neighbour *);
478 struct neigh_hash_table *ret;
479 struct neighbour __rcu **buckets;
482 ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
485 if (size <= PAGE_SIZE) {
486 buckets = kzalloc(size, GFP_ATOMIC);
488 buckets = (struct neighbour __rcu **)
489 __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
491 kmemleak_alloc(buckets, size, 1, GFP_ATOMIC);
497 ret->hash_buckets = buckets;
498 ret->hash_shift = shift;
499 for (i = 0; i < NEIGH_NUM_HASH_RND; i++)
500 neigh_get_hash_rnd(&ret->hash_rnd[i]);
504 static void neigh_hash_free_rcu(struct rcu_head *head)
506 struct neigh_hash_table *nht = container_of(head,
507 struct neigh_hash_table,
509 size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *);
510 struct neighbour __rcu **buckets = nht->hash_buckets;
512 if (size <= PAGE_SIZE) {
515 kmemleak_free(buckets);
516 free_pages((unsigned long)buckets, get_order(size));
521 static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl,
522 unsigned long new_shift)
524 unsigned int i, hash;
525 struct neigh_hash_table *new_nht, *old_nht;
527 NEIGH_CACHE_STAT_INC(tbl, hash_grows);
529 old_nht = rcu_dereference_protected(tbl->nht,
530 lockdep_is_held(&tbl->lock));
531 new_nht = neigh_hash_alloc(new_shift);
535 for (i = 0; i < (1 << old_nht->hash_shift); i++) {
536 struct neighbour *n, *next;
538 for (n = rcu_dereference_protected(old_nht->hash_buckets[i],
539 lockdep_is_held(&tbl->lock));
542 hash = tbl->hash(n->primary_key, n->dev,
545 hash >>= (32 - new_nht->hash_shift);
546 next = rcu_dereference_protected(n->next,
547 lockdep_is_held(&tbl->lock));
549 rcu_assign_pointer(n->next,
550 rcu_dereference_protected(
551 new_nht->hash_buckets[hash],
552 lockdep_is_held(&tbl->lock)));
553 rcu_assign_pointer(new_nht->hash_buckets[hash], n);
557 rcu_assign_pointer(tbl->nht, new_nht);
558 call_rcu(&old_nht->rcu, neigh_hash_free_rcu);
562 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
563 struct net_device *dev)
567 NEIGH_CACHE_STAT_INC(tbl, lookups);
570 n = __neigh_lookup_noref(tbl, pkey, dev);
572 if (!refcount_inc_not_zero(&n->refcnt))
574 NEIGH_CACHE_STAT_INC(tbl, hits);
577 rcu_read_unlock_bh();
580 EXPORT_SYMBOL(neigh_lookup);
582 static struct neighbour *
583 ___neigh_create(struct neigh_table *tbl, const void *pkey,
584 struct net_device *dev, u8 flags,
585 bool exempt_from_gc, bool want_ref)
587 u32 hash_val, key_len = tbl->key_len;
588 struct neighbour *n1, *rc, *n;
589 struct neigh_hash_table *nht;
592 n = neigh_alloc(tbl, dev, flags, exempt_from_gc);
593 trace_neigh_create(tbl, dev, pkey, n, exempt_from_gc);
595 rc = ERR_PTR(-ENOBUFS);
599 memcpy(n->primary_key, pkey, key_len);
603 /* Protocol specific setup. */
604 if (tbl->constructor && (error = tbl->constructor(n)) < 0) {
606 goto out_neigh_release;
609 if (dev->netdev_ops->ndo_neigh_construct) {
610 error = dev->netdev_ops->ndo_neigh_construct(dev, n);
613 goto out_neigh_release;
617 /* Device specific setup. */
618 if (n->parms->neigh_setup &&
619 (error = n->parms->neigh_setup(n)) < 0) {
621 goto out_neigh_release;
624 n->confirmed = jiffies - (NEIGH_VAR(n->parms, BASE_REACHABLE_TIME) << 1);
626 write_lock_bh(&tbl->lock);
627 nht = rcu_dereference_protected(tbl->nht,
628 lockdep_is_held(&tbl->lock));
630 if (atomic_read(&tbl->entries) > (1 << nht->hash_shift))
631 nht = neigh_hash_grow(tbl, nht->hash_shift + 1);
633 hash_val = tbl->hash(n->primary_key, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
635 if (n->parms->dead) {
636 rc = ERR_PTR(-EINVAL);
640 for (n1 = rcu_dereference_protected(nht->hash_buckets[hash_val],
641 lockdep_is_held(&tbl->lock));
643 n1 = rcu_dereference_protected(n1->next,
644 lockdep_is_held(&tbl->lock))) {
645 if (dev == n1->dev && !memcmp(n1->primary_key, n->primary_key, key_len)) {
655 list_add_tail(&n->gc_list, &n->tbl->gc_list);
659 rcu_assign_pointer(n->next,
660 rcu_dereference_protected(nht->hash_buckets[hash_val],
661 lockdep_is_held(&tbl->lock)));
662 rcu_assign_pointer(nht->hash_buckets[hash_val], n);
663 write_unlock_bh(&tbl->lock);
664 neigh_dbg(2, "neigh %p is created\n", n);
669 write_unlock_bh(&tbl->lock);
672 atomic_dec(&tbl->gc_entries);
677 struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
678 struct net_device *dev, bool want_ref)
680 return ___neigh_create(tbl, pkey, dev, 0, false, want_ref);
682 EXPORT_SYMBOL(__neigh_create);
684 static u32 pneigh_hash(const void *pkey, unsigned int key_len)
686 u32 hash_val = *(u32 *)(pkey + key_len - 4);
687 hash_val ^= (hash_val >> 16);
688 hash_val ^= hash_val >> 8;
689 hash_val ^= hash_val >> 4;
690 hash_val &= PNEIGH_HASHMASK;
694 static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n,
697 unsigned int key_len,
698 struct net_device *dev)
701 if (!memcmp(n->key, pkey, key_len) &&
702 net_eq(pneigh_net(n), net) &&
703 (n->dev == dev || !n->dev))
710 struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
711 struct net *net, const void *pkey, struct net_device *dev)
713 unsigned int key_len = tbl->key_len;
714 u32 hash_val = pneigh_hash(pkey, key_len);
716 return __pneigh_lookup_1(tbl->phash_buckets[hash_val],
717 net, pkey, key_len, dev);
719 EXPORT_SYMBOL_GPL(__pneigh_lookup);
721 struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
722 struct net *net, const void *pkey,
723 struct net_device *dev, int creat)
725 struct pneigh_entry *n;
726 unsigned int key_len = tbl->key_len;
727 u32 hash_val = pneigh_hash(pkey, key_len);
729 read_lock_bh(&tbl->lock);
730 n = __pneigh_lookup_1(tbl->phash_buckets[hash_val],
731 net, pkey, key_len, dev);
732 read_unlock_bh(&tbl->lock);
739 n = kzalloc(sizeof(*n) + key_len, GFP_KERNEL);
743 write_pnet(&n->net, net);
744 memcpy(n->key, pkey, key_len);
749 if (tbl->pconstructor && tbl->pconstructor(n)) {
757 write_lock_bh(&tbl->lock);
758 n->next = tbl->phash_buckets[hash_val];
759 tbl->phash_buckets[hash_val] = n;
760 write_unlock_bh(&tbl->lock);
764 EXPORT_SYMBOL(pneigh_lookup);
767 int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
768 struct net_device *dev)
770 struct pneigh_entry *n, **np;
771 unsigned int key_len = tbl->key_len;
772 u32 hash_val = pneigh_hash(pkey, key_len);
774 write_lock_bh(&tbl->lock);
775 for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
777 if (!memcmp(n->key, pkey, key_len) && n->dev == dev &&
778 net_eq(pneigh_net(n), net)) {
780 write_unlock_bh(&tbl->lock);
781 if (tbl->pdestructor)
789 write_unlock_bh(&tbl->lock);
793 static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
794 struct net_device *dev)
796 struct pneigh_entry *n, **np, *freelist = NULL;
799 for (h = 0; h <= PNEIGH_HASHMASK; h++) {
800 np = &tbl->phash_buckets[h];
801 while ((n = *np) != NULL) {
802 if (!dev || n->dev == dev) {
811 write_unlock_bh(&tbl->lock);
812 while ((n = freelist)) {
815 if (tbl->pdestructor)
824 static void neigh_parms_destroy(struct neigh_parms *parms);
826 static inline void neigh_parms_put(struct neigh_parms *parms)
828 if (refcount_dec_and_test(&parms->refcnt))
829 neigh_parms_destroy(parms);
833 * neighbour must already be out of the table;
836 void neigh_destroy(struct neighbour *neigh)
838 struct net_device *dev = neigh->dev;
840 NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
843 pr_warn("Destroying alive neighbour %p\n", neigh);
848 if (neigh_del_timer(neigh))
849 pr_warn("Impossible event\n");
851 write_lock_bh(&neigh->lock);
852 __skb_queue_purge(&neigh->arp_queue);
853 write_unlock_bh(&neigh->lock);
854 neigh->arp_queue_len_bytes = 0;
856 if (dev->netdev_ops->ndo_neigh_destroy)
857 dev->netdev_ops->ndo_neigh_destroy(dev, neigh);
860 neigh_parms_put(neigh->parms);
862 neigh_dbg(2, "neigh %p is destroyed\n", neigh);
864 atomic_dec(&neigh->tbl->entries);
865 kfree_rcu(neigh, rcu);
867 EXPORT_SYMBOL(neigh_destroy);
869 /* Neighbour state is suspicious;
872 Called with write_locked neigh.
874 static void neigh_suspect(struct neighbour *neigh)
876 neigh_dbg(2, "neigh %p is suspected\n", neigh);
878 neigh->output = neigh->ops->output;
881 /* Neighbour state is OK;
884 Called with write_locked neigh.
886 static void neigh_connect(struct neighbour *neigh)
888 neigh_dbg(2, "neigh %p is connected\n", neigh);
890 neigh->output = neigh->ops->connected_output;
893 static void neigh_periodic_work(struct work_struct *work)
895 struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work);
897 struct neighbour __rcu **np;
899 struct neigh_hash_table *nht;
901 NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
903 write_lock_bh(&tbl->lock);
904 nht = rcu_dereference_protected(tbl->nht,
905 lockdep_is_held(&tbl->lock));
908 * periodically recompute ReachableTime from random function
911 if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
912 struct neigh_parms *p;
914 WRITE_ONCE(tbl->last_rand, jiffies);
915 list_for_each_entry(p, &tbl->parms_list, list)
917 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
920 if (atomic_read(&tbl->entries) < READ_ONCE(tbl->gc_thresh1))
923 for (i = 0 ; i < (1 << nht->hash_shift); i++) {
924 np = &nht->hash_buckets[i];
926 while ((n = rcu_dereference_protected(*np,
927 lockdep_is_held(&tbl->lock))) != NULL) {
930 write_lock(&n->lock);
932 state = n->nud_state;
933 if ((state & (NUD_PERMANENT | NUD_IN_TIMER)) ||
934 (n->flags & NTF_EXT_LEARNED)) {
935 write_unlock(&n->lock);
939 if (time_before(n->used, n->confirmed) &&
940 time_is_before_eq_jiffies(n->confirmed))
941 n->used = n->confirmed;
943 if (refcount_read(&n->refcnt) == 1 &&
944 (state == NUD_FAILED ||
945 !time_in_range_open(jiffies, n->used,
946 n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) {
947 rcu_assign_pointer(*np,
948 rcu_dereference_protected(n->next,
949 lockdep_is_held(&tbl->lock)));
951 write_unlock(&n->lock);
952 neigh_cleanup_and_release(n);
955 write_unlock(&n->lock);
961 * It's fine to release lock here, even if hash table
962 * grows while we are preempted.
964 write_unlock_bh(&tbl->lock);
966 write_lock_bh(&tbl->lock);
967 nht = rcu_dereference_protected(tbl->nht,
968 lockdep_is_held(&tbl->lock));
971 /* Cycle through all hash buckets every BASE_REACHABLE_TIME/2 ticks.
972 * ARP entry timeouts range from 1/2 BASE_REACHABLE_TIME to 3/2
973 * BASE_REACHABLE_TIME.
975 queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
976 NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME) >> 1);
977 write_unlock_bh(&tbl->lock);
980 static __inline__ int neigh_max_probes(struct neighbour *n)
982 struct neigh_parms *p = n->parms;
983 return NEIGH_VAR(p, UCAST_PROBES) + NEIGH_VAR(p, APP_PROBES) +
984 (n->nud_state & NUD_PROBE ? NEIGH_VAR(p, MCAST_REPROBES) :
985 NEIGH_VAR(p, MCAST_PROBES));
988 static void neigh_invalidate(struct neighbour *neigh)
989 __releases(neigh->lock)
990 __acquires(neigh->lock)
994 NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
995 neigh_dbg(2, "neigh %p is failed\n", neigh);
996 neigh->updated = jiffies;
998 /* It is very thin place. report_unreachable is very complicated
999 routine. Particularly, it can hit the same neighbour entry!
1001 So that, we try to be accurate and avoid dead loop. --ANK
1003 while (neigh->nud_state == NUD_FAILED &&
1004 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1005 write_unlock(&neigh->lock);
1006 neigh->ops->error_report(neigh, skb);
1007 write_lock(&neigh->lock);
1009 __skb_queue_purge(&neigh->arp_queue);
1010 neigh->arp_queue_len_bytes = 0;
1013 static void neigh_probe(struct neighbour *neigh)
1014 __releases(neigh->lock)
1016 struct sk_buff *skb = skb_peek_tail(&neigh->arp_queue);
1017 /* keep skb alive even if arp_queue overflows */
1019 skb = skb_clone(skb, GFP_ATOMIC);
1020 write_unlock(&neigh->lock);
1021 if (neigh->ops->solicit)
1022 neigh->ops->solicit(neigh, skb);
1023 atomic_inc(&neigh->probes);
1027 /* Called when a timer expires for a neighbour entry. */
1029 static void neigh_timer_handler(struct timer_list *t)
1031 unsigned long now, next;
1032 struct neighbour *neigh = from_timer(neigh, t, timer);
1036 write_lock(&neigh->lock);
1038 state = neigh->nud_state;
1042 if (!(state & NUD_IN_TIMER))
1045 if (state & NUD_REACHABLE) {
1046 if (time_before_eq(now,
1047 neigh->confirmed + neigh->parms->reachable_time)) {
1048 neigh_dbg(2, "neigh %p is still alive\n", neigh);
1049 next = neigh->confirmed + neigh->parms->reachable_time;
1050 } else if (time_before_eq(now,
1052 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
1053 neigh_dbg(2, "neigh %p is delayed\n", neigh);
1054 neigh->nud_state = NUD_DELAY;
1055 neigh->updated = jiffies;
1056 neigh_suspect(neigh);
1057 next = now + NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME);
1059 neigh_dbg(2, "neigh %p is suspected\n", neigh);
1060 neigh->nud_state = NUD_STALE;
1061 neigh->updated = jiffies;
1062 neigh_suspect(neigh);
1065 } else if (state & NUD_DELAY) {
1066 if (time_before_eq(now,
1068 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
1069 neigh_dbg(2, "neigh %p is now reachable\n", neigh);
1070 neigh->nud_state = NUD_REACHABLE;
1071 neigh->updated = jiffies;
1072 neigh_connect(neigh);
1074 next = neigh->confirmed + neigh->parms->reachable_time;
1076 neigh_dbg(2, "neigh %p is probed\n", neigh);
1077 neigh->nud_state = NUD_PROBE;
1078 neigh->updated = jiffies;
1079 atomic_set(&neigh->probes, 0);
1081 next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
1085 /* NUD_PROBE|NUD_INCOMPLETE */
1086 next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME), HZ/100);
1089 if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
1090 atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
1091 neigh->nud_state = NUD_FAILED;
1093 neigh_invalidate(neigh);
1097 if (neigh->nud_state & NUD_IN_TIMER) {
1098 if (time_before(next, jiffies + HZ/100))
1099 next = jiffies + HZ/100;
1100 if (!mod_timer(&neigh->timer, next))
1103 if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
1107 write_unlock(&neigh->lock);
1111 neigh_update_notify(neigh, 0);
1113 trace_neigh_timer_handler(neigh, 0);
1115 neigh_release(neigh);
1118 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
1121 bool immediate_probe = false;
1123 write_lock_bh(&neigh->lock);
1126 if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
1131 if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
1132 if (NEIGH_VAR(neigh->parms, MCAST_PROBES) +
1133 NEIGH_VAR(neigh->parms, APP_PROBES)) {
1134 unsigned long next, now = jiffies;
1136 atomic_set(&neigh->probes,
1137 NEIGH_VAR(neigh->parms, UCAST_PROBES));
1138 neigh_del_timer(neigh);
1139 neigh->nud_state = NUD_INCOMPLETE;
1140 neigh->updated = now;
1141 next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
1143 neigh_add_timer(neigh, next);
1144 immediate_probe = true;
1146 neigh->nud_state = NUD_FAILED;
1147 neigh->updated = jiffies;
1148 write_unlock_bh(&neigh->lock);
1153 } else if (neigh->nud_state & NUD_STALE) {
1154 neigh_dbg(2, "neigh %p is delayed\n", neigh);
1155 neigh_del_timer(neigh);
1156 neigh->nud_state = NUD_DELAY;
1157 neigh->updated = jiffies;
1158 neigh_add_timer(neigh, jiffies +
1159 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME));
1162 if (neigh->nud_state == NUD_INCOMPLETE) {
1164 while (neigh->arp_queue_len_bytes + skb->truesize >
1165 NEIGH_VAR(neigh->parms, QUEUE_LEN_BYTES)) {
1166 struct sk_buff *buff;
1168 buff = __skb_dequeue(&neigh->arp_queue);
1171 neigh->arp_queue_len_bytes -= buff->truesize;
1173 NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
1176 __skb_queue_tail(&neigh->arp_queue, skb);
1177 neigh->arp_queue_len_bytes += skb->truesize;
1182 if (immediate_probe)
1185 write_unlock(&neigh->lock);
1187 trace_neigh_event_send_done(neigh, rc);
1191 if (neigh->nud_state & NUD_STALE)
1193 write_unlock_bh(&neigh->lock);
1195 trace_neigh_event_send_dead(neigh, 1);
1198 EXPORT_SYMBOL(__neigh_event_send);
1200 static void neigh_update_hhs(struct neighbour *neigh)
1202 struct hh_cache *hh;
1203 void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
1206 if (neigh->dev->header_ops)
1207 update = neigh->dev->header_ops->cache_update;
1211 if (READ_ONCE(hh->hh_len)) {
1212 write_seqlock_bh(&hh->hh_lock);
1213 update(hh, neigh->dev, neigh->ha);
1214 write_sequnlock_bh(&hh->hh_lock);
1221 /* Generic update routine.
1222 -- lladdr is new lladdr or NULL, if it is not supplied.
1223 -- new is new state.
1225 NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
1227 NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
1228 lladdr instead of overriding it
1230 NEIGH_UPDATE_F_ADMIN means that the change is administrative.
1231 NEIGH_UPDATE_F_USE means that the entry is user triggered.
1232 NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
1234 NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as
1237 Caller MUST hold reference count on the entry.
1240 static int __neigh_update(struct neighbour *neigh, const u8 *lladdr,
1241 u8 new, u32 flags, u32 nlmsg_pid,
1242 struct netlink_ext_ack *extack)
1244 bool ext_learn_change = false;
1248 struct net_device *dev;
1249 int update_isrouter = 0;
1251 trace_neigh_update(neigh, lladdr, new, flags, nlmsg_pid);
1253 write_lock_bh(&neigh->lock);
1256 old = neigh->nud_state;
1260 NL_SET_ERR_MSG(extack, "Neighbor entry is now dead");
1264 if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
1265 (old & (NUD_NOARP | NUD_PERMANENT)))
1268 ext_learn_change = neigh_update_ext_learned(neigh, flags, ¬ify);
1269 if (flags & NEIGH_UPDATE_F_USE) {
1270 new = old & ~NUD_PERMANENT;
1271 neigh->nud_state = new;
1276 if (!(new & NUD_VALID)) {
1277 neigh_del_timer(neigh);
1278 if (old & NUD_CONNECTED)
1279 neigh_suspect(neigh);
1280 neigh->nud_state = new;
1282 notify = old & NUD_VALID;
1283 if ((old & (NUD_INCOMPLETE | NUD_PROBE)) &&
1284 (new & NUD_FAILED)) {
1285 neigh_invalidate(neigh);
1291 /* Compare new lladdr with cached one */
1292 if (!dev->addr_len) {
1293 /* First case: device needs no address. */
1295 } else if (lladdr) {
1296 /* The second case: if something is already cached
1297 and a new address is proposed:
1299 - if they are different, check override flag
1301 if ((old & NUD_VALID) &&
1302 !memcmp(lladdr, neigh->ha, dev->addr_len))
1305 /* No address is supplied; if we know something,
1306 use it, otherwise discard the request.
1309 if (!(old & NUD_VALID)) {
1310 NL_SET_ERR_MSG(extack, "No link layer address given");
1316 /* Update confirmed timestamp for neighbour entry after we
1317 * received ARP packet even if it doesn't change IP to MAC binding.
1319 if (new & NUD_CONNECTED)
1320 neigh->confirmed = jiffies;
1322 /* If entry was valid and address is not changed,
1323 do not change entry state, if new one is STALE.
1326 update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1327 if (old & NUD_VALID) {
1328 if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
1329 update_isrouter = 0;
1330 if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
1331 (old & NUD_CONNECTED)) {
1337 if (lladdr == neigh->ha && new == NUD_STALE &&
1338 !(flags & NEIGH_UPDATE_F_ADMIN))
1343 /* Update timestamp only once we know we will make a change to the
1344 * neighbour entry. Otherwise we risk to move the locktime window with
1345 * noop updates and ignore relevant ARP updates.
1347 if (new != old || lladdr != neigh->ha)
1348 neigh->updated = jiffies;
1351 neigh_del_timer(neigh);
1352 if (new & NUD_PROBE)
1353 atomic_set(&neigh->probes, 0);
1354 if (new & NUD_IN_TIMER)
1355 neigh_add_timer(neigh, (jiffies +
1356 ((new & NUD_REACHABLE) ?
1357 neigh->parms->reachable_time :
1359 neigh->nud_state = new;
1363 if (lladdr != neigh->ha) {
1364 write_seqlock(&neigh->ha_lock);
1365 memcpy(&neigh->ha, lladdr, dev->addr_len);
1366 write_sequnlock(&neigh->ha_lock);
1367 neigh_update_hhs(neigh);
1368 if (!(new & NUD_CONNECTED))
1369 neigh->confirmed = jiffies -
1370 (NEIGH_VAR(neigh->parms, BASE_REACHABLE_TIME) << 1);
1375 if (new & NUD_CONNECTED)
1376 neigh_connect(neigh);
1378 neigh_suspect(neigh);
1379 if (!(old & NUD_VALID)) {
1380 struct sk_buff *skb;
1382 /* Again: avoid dead loop if something went wrong */
1384 while (neigh->nud_state & NUD_VALID &&
1385 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1386 struct dst_entry *dst = skb_dst(skb);
1387 struct neighbour *n2, *n1 = neigh;
1388 write_unlock_bh(&neigh->lock);
1392 /* Why not just use 'neigh' as-is? The problem is that
1393 * things such as shaper, eql, and sch_teql can end up
1394 * using alternative, different, neigh objects to output
1395 * the packet in the output path. So what we need to do
1396 * here is re-lookup the top-level neigh in the path so
1397 * we can reinject the packet there.
1400 if (dst && dst->obsolete != DST_OBSOLETE_DEAD) {
1401 n2 = dst_neigh_lookup_skb(dst, skb);
1405 n1->output(n1, skb);
1410 write_lock_bh(&neigh->lock);
1412 __skb_queue_purge(&neigh->arp_queue);
1413 neigh->arp_queue_len_bytes = 0;
1416 if (update_isrouter)
1417 neigh_update_is_router(neigh, flags, ¬ify);
1418 write_unlock_bh(&neigh->lock);
1420 if (((new ^ old) & NUD_PERMANENT) || ext_learn_change)
1421 neigh_update_gc_list(neigh);
1424 neigh_update_notify(neigh, nlmsg_pid);
1426 trace_neigh_update_done(neigh, err);
1431 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1432 u32 flags, u32 nlmsg_pid)
1434 return __neigh_update(neigh, lladdr, new, flags, nlmsg_pid, NULL);
1436 EXPORT_SYMBOL(neigh_update);
1438 /* Update the neigh to listen temporarily for probe responses, even if it is
1439 * in a NUD_FAILED state. The caller has to hold neigh->lock for writing.
1441 void __neigh_set_probe_once(struct neighbour *neigh)
1445 neigh->updated = jiffies;
1446 if (!(neigh->nud_state & NUD_FAILED))
1448 neigh->nud_state = NUD_INCOMPLETE;
1449 atomic_set(&neigh->probes, neigh_max_probes(neigh));
1450 neigh_add_timer(neigh,
1451 jiffies + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
1454 EXPORT_SYMBOL(__neigh_set_probe_once);
1456 struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1457 u8 *lladdr, void *saddr,
1458 struct net_device *dev)
1460 struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1461 lladdr || !dev->addr_len);
1463 neigh_update(neigh, lladdr, NUD_STALE,
1464 NEIGH_UPDATE_F_OVERRIDE, 0);
1467 EXPORT_SYMBOL(neigh_event_ns);
1469 /* called with read_lock_bh(&n->lock); */
1470 static void neigh_hh_init(struct neighbour *n)
1472 struct net_device *dev = n->dev;
1473 __be16 prot = n->tbl->protocol;
1474 struct hh_cache *hh = &n->hh;
1476 write_lock_bh(&n->lock);
1478 /* Only one thread can come in here and initialize the
1482 dev->header_ops->cache(n, hh, prot);
1484 write_unlock_bh(&n->lock);
1487 /* Slow and careful. */
1489 int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
1493 if (!neigh_event_send(neigh, skb)) {
1495 struct net_device *dev = neigh->dev;
1498 if (dev->header_ops->cache && !READ_ONCE(neigh->hh.hh_len))
1499 neigh_hh_init(neigh);
1502 __skb_pull(skb, skb_network_offset(skb));
1503 seq = read_seqbegin(&neigh->ha_lock);
1504 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1505 neigh->ha, NULL, skb->len);
1506 } while (read_seqretry(&neigh->ha_lock, seq));
1509 rc = dev_queue_xmit(skb);
1520 EXPORT_SYMBOL(neigh_resolve_output);
1522 /* As fast as possible without hh cache */
1524 int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb)
1526 struct net_device *dev = neigh->dev;
1531 __skb_pull(skb, skb_network_offset(skb));
1532 seq = read_seqbegin(&neigh->ha_lock);
1533 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1534 neigh->ha, NULL, skb->len);
1535 } while (read_seqretry(&neigh->ha_lock, seq));
1538 err = dev_queue_xmit(skb);
1545 EXPORT_SYMBOL(neigh_connected_output);
1547 int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb)
1549 return dev_queue_xmit(skb);
1551 EXPORT_SYMBOL(neigh_direct_output);
1553 static void neigh_proxy_process(struct timer_list *t)
1555 struct neigh_table *tbl = from_timer(tbl, t, proxy_timer);
1556 long sched_next = 0;
1557 unsigned long now = jiffies;
1558 struct sk_buff *skb, *n;
1560 spin_lock(&tbl->proxy_queue.lock);
1562 skb_queue_walk_safe(&tbl->proxy_queue, skb, n) {
1563 long tdif = NEIGH_CB(skb)->sched_next - now;
1566 struct net_device *dev = skb->dev;
1568 __skb_unlink(skb, &tbl->proxy_queue);
1569 if (tbl->proxy_redo && netif_running(dev)) {
1571 tbl->proxy_redo(skb);
1578 } else if (!sched_next || tdif < sched_next)
1581 del_timer(&tbl->proxy_timer);
1583 mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1584 spin_unlock(&tbl->proxy_queue.lock);
1587 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1588 struct sk_buff *skb)
1590 unsigned long now = jiffies;
1592 unsigned long sched_next = now + (prandom_u32() %
1593 NEIGH_VAR(p, PROXY_DELAY));
1595 if (tbl->proxy_queue.qlen > NEIGH_VAR(p, PROXY_QLEN)) {
1600 NEIGH_CB(skb)->sched_next = sched_next;
1601 NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1603 spin_lock(&tbl->proxy_queue.lock);
1604 if (del_timer(&tbl->proxy_timer)) {
1605 if (time_before(tbl->proxy_timer.expires, sched_next))
1606 sched_next = tbl->proxy_timer.expires;
1610 __skb_queue_tail(&tbl->proxy_queue, skb);
1611 mod_timer(&tbl->proxy_timer, sched_next);
1612 spin_unlock(&tbl->proxy_queue.lock);
1614 EXPORT_SYMBOL(pneigh_enqueue);
1616 static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl,
1617 struct net *net, int ifindex)
1619 struct neigh_parms *p;
1621 list_for_each_entry(p, &tbl->parms_list, list) {
1622 if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) ||
1623 (!p->dev && !ifindex && net_eq(net, &init_net)))
1630 struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1631 struct neigh_table *tbl)
1633 struct neigh_parms *p;
1634 struct net *net = dev_net(dev);
1635 const struct net_device_ops *ops = dev->netdev_ops;
1637 p = kmemdup(&tbl->parms, sizeof(*p), GFP_KERNEL);
1640 refcount_set(&p->refcnt, 1);
1642 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
1645 write_pnet(&p->net, net);
1646 p->sysctl_table = NULL;
1648 if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
1654 write_lock_bh(&tbl->lock);
1655 list_add(&p->list, &tbl->parms.list);
1656 write_unlock_bh(&tbl->lock);
1658 neigh_parms_data_state_cleanall(p);
1662 EXPORT_SYMBOL(neigh_parms_alloc);
1664 static void neigh_rcu_free_parms(struct rcu_head *head)
1666 struct neigh_parms *parms =
1667 container_of(head, struct neigh_parms, rcu_head);
1669 neigh_parms_put(parms);
1672 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1674 if (!parms || parms == &tbl->parms)
1676 write_lock_bh(&tbl->lock);
1677 list_del(&parms->list);
1679 write_unlock_bh(&tbl->lock);
1681 dev_put(parms->dev);
1682 call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1684 EXPORT_SYMBOL(neigh_parms_release);
1686 static void neigh_parms_destroy(struct neigh_parms *parms)
1691 static struct lock_class_key neigh_table_proxy_queue_class;
1693 static struct neigh_table *neigh_tables[NEIGH_NR_TABLES] __read_mostly;
1695 void neigh_table_init(int index, struct neigh_table *tbl)
1697 unsigned long now = jiffies;
1698 unsigned long phsize;
1700 INIT_LIST_HEAD(&tbl->parms_list);
1701 INIT_LIST_HEAD(&tbl->gc_list);
1702 list_add(&tbl->parms.list, &tbl->parms_list);
1703 write_pnet(&tbl->parms.net, &init_net);
1704 refcount_set(&tbl->parms.refcnt, 1);
1705 tbl->parms.reachable_time =
1706 neigh_rand_reach_time(NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME));
1708 tbl->stats = alloc_percpu(struct neigh_statistics);
1710 panic("cannot create neighbour cache statistics");
1712 #ifdef CONFIG_PROC_FS
1713 if (!proc_create_seq_data(tbl->id, 0, init_net.proc_net_stat,
1714 &neigh_stat_seq_ops, tbl))
1715 panic("cannot create neighbour proc dir entry");
1718 RCU_INIT_POINTER(tbl->nht, neigh_hash_alloc(3));
1720 phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1721 tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1723 if (!tbl->nht || !tbl->phash_buckets)
1724 panic("cannot allocate neighbour cache hashes");
1726 if (!tbl->entry_size)
1727 tbl->entry_size = ALIGN(offsetof(struct neighbour, primary_key) +
1728 tbl->key_len, NEIGH_PRIV_ALIGN);
1730 WARN_ON(tbl->entry_size % NEIGH_PRIV_ALIGN);
1732 rwlock_init(&tbl->lock);
1733 INIT_DEFERRABLE_WORK(&tbl->gc_work, neigh_periodic_work);
1734 queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
1735 tbl->parms.reachable_time);
1736 timer_setup(&tbl->proxy_timer, neigh_proxy_process, 0);
1737 skb_queue_head_init_class(&tbl->proxy_queue,
1738 &neigh_table_proxy_queue_class);
1740 tbl->last_flush = now;
1741 tbl->last_rand = now + tbl->parms.reachable_time * 20;
1743 neigh_tables[index] = tbl;
1745 EXPORT_SYMBOL(neigh_table_init);
1747 int neigh_table_clear(int index, struct neigh_table *tbl)
1749 neigh_tables[index] = NULL;
1750 /* It is not clean... Fix it to unload IPv6 module safely */
1751 cancel_delayed_work_sync(&tbl->gc_work);
1752 del_timer_sync(&tbl->proxy_timer);
1753 pneigh_queue_purge(&tbl->proxy_queue, NULL);
1754 neigh_ifdown(tbl, NULL);
1755 if (atomic_read(&tbl->entries))
1756 pr_crit("neighbour leakage\n");
1758 call_rcu(&rcu_dereference_protected(tbl->nht, 1)->rcu,
1759 neigh_hash_free_rcu);
1762 kfree(tbl->phash_buckets);
1763 tbl->phash_buckets = NULL;
1765 remove_proc_entry(tbl->id, init_net.proc_net_stat);
1767 free_percpu(tbl->stats);
1772 EXPORT_SYMBOL(neigh_table_clear);
1774 static struct neigh_table *neigh_find_table(int family)
1776 struct neigh_table *tbl = NULL;
1780 tbl = neigh_tables[NEIGH_ARP_TABLE];
1783 tbl = neigh_tables[NEIGH_ND_TABLE];
1790 const struct nla_policy nda_policy[NDA_MAX+1] = {
1791 [NDA_UNSPEC] = { .strict_start_type = NDA_NH_ID },
1792 [NDA_DST] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1793 [NDA_LLADDR] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1794 [NDA_CACHEINFO] = { .len = sizeof(struct nda_cacheinfo) },
1795 [NDA_PROBES] = { .type = NLA_U32 },
1796 [NDA_VLAN] = { .type = NLA_U16 },
1797 [NDA_PORT] = { .type = NLA_U16 },
1798 [NDA_VNI] = { .type = NLA_U32 },
1799 [NDA_IFINDEX] = { .type = NLA_U32 },
1800 [NDA_MASTER] = { .type = NLA_U32 },
1801 [NDA_PROTOCOL] = { .type = NLA_U8 },
1802 [NDA_NH_ID] = { .type = NLA_U32 },
1803 [NDA_FDB_EXT_ATTRS] = { .type = NLA_NESTED },
1806 static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh,
1807 struct netlink_ext_ack *extack)
1809 struct net *net = sock_net(skb->sk);
1811 struct nlattr *dst_attr;
1812 struct neigh_table *tbl;
1813 struct neighbour *neigh;
1814 struct net_device *dev = NULL;
1818 if (nlmsg_len(nlh) < sizeof(*ndm))
1821 dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
1823 NL_SET_ERR_MSG(extack, "Network address not specified");
1827 ndm = nlmsg_data(nlh);
1828 if (ndm->ndm_ifindex) {
1829 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1836 tbl = neigh_find_table(ndm->ndm_family);
1838 return -EAFNOSUPPORT;
1840 if (nla_len(dst_attr) < (int)tbl->key_len) {
1841 NL_SET_ERR_MSG(extack, "Invalid network address");
1845 if (ndm->ndm_flags & NTF_PROXY) {
1846 err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
1853 neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1854 if (neigh == NULL) {
1859 err = __neigh_update(neigh, NULL, NUD_FAILED,
1860 NEIGH_UPDATE_F_OVERRIDE | NEIGH_UPDATE_F_ADMIN,
1861 NETLINK_CB(skb).portid, extack);
1862 write_lock_bh(&tbl->lock);
1863 neigh_release(neigh);
1864 neigh_remove_one(neigh, tbl);
1865 write_unlock_bh(&tbl->lock);
1871 static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,
1872 struct netlink_ext_ack *extack)
1874 int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE |
1875 NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1876 struct net *net = sock_net(skb->sk);
1878 struct nlattr *tb[NDA_MAX+1];
1879 struct neigh_table *tbl;
1880 struct net_device *dev = NULL;
1881 struct neighbour *neigh;
1887 err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX,
1888 nda_policy, extack);
1894 NL_SET_ERR_MSG(extack, "Network address not specified");
1898 ndm = nlmsg_data(nlh);
1899 if (ndm->ndm_ifindex) {
1900 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1906 if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len) {
1907 NL_SET_ERR_MSG(extack, "Invalid link address");
1912 tbl = neigh_find_table(ndm->ndm_family);
1914 return -EAFNOSUPPORT;
1916 if (nla_len(tb[NDA_DST]) < (int)tbl->key_len) {
1917 NL_SET_ERR_MSG(extack, "Invalid network address");
1921 dst = nla_data(tb[NDA_DST]);
1922 lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
1924 if (tb[NDA_PROTOCOL])
1925 protocol = nla_get_u8(tb[NDA_PROTOCOL]);
1927 if (ndm->ndm_flags & NTF_PROXY) {
1928 struct pneigh_entry *pn;
1931 pn = pneigh_lookup(tbl, net, dst, dev, 1);
1933 pn->flags = ndm->ndm_flags;
1935 pn->protocol = protocol;
1942 NL_SET_ERR_MSG(extack, "Device not specified");
1946 if (tbl->allow_add && !tbl->allow_add(dev, extack)) {
1951 neigh = neigh_lookup(tbl, dst, dev);
1952 if (neigh == NULL) {
1953 bool exempt_from_gc;
1955 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1960 exempt_from_gc = ndm->ndm_state & NUD_PERMANENT ||
1961 ndm->ndm_flags & NTF_EXT_LEARNED;
1962 neigh = ___neigh_create(tbl, dst, dev,
1963 ndm->ndm_flags & NTF_EXT_LEARNED,
1964 exempt_from_gc, true);
1965 if (IS_ERR(neigh)) {
1966 err = PTR_ERR(neigh);
1970 if (nlh->nlmsg_flags & NLM_F_EXCL) {
1972 neigh_release(neigh);
1976 if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
1977 flags &= ~(NEIGH_UPDATE_F_OVERRIDE |
1978 NEIGH_UPDATE_F_OVERRIDE_ISROUTER);
1982 neigh->protocol = protocol;
1983 if (ndm->ndm_flags & NTF_EXT_LEARNED)
1984 flags |= NEIGH_UPDATE_F_EXT_LEARNED;
1985 if (ndm->ndm_flags & NTF_ROUTER)
1986 flags |= NEIGH_UPDATE_F_ISROUTER;
1987 if (ndm->ndm_flags & NTF_USE)
1988 flags |= NEIGH_UPDATE_F_USE;
1990 err = __neigh_update(neigh, lladdr, ndm->ndm_state, flags,
1991 NETLINK_CB(skb).portid, extack);
1992 if (!err && ndm->ndm_flags & NTF_USE) {
1993 neigh_event_send(neigh, NULL);
1996 neigh_release(neigh);
2001 static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
2003 struct nlattr *nest;
2005 nest = nla_nest_start_noflag(skb, NDTA_PARMS);
2010 nla_put_u32(skb, NDTPA_IFINDEX, parms->dev->ifindex)) ||
2011 nla_put_u32(skb, NDTPA_REFCNT, refcount_read(&parms->refcnt)) ||
2012 nla_put_u32(skb, NDTPA_QUEUE_LENBYTES,
2013 NEIGH_VAR(parms, QUEUE_LEN_BYTES)) ||
2014 /* approximative value for deprecated QUEUE_LEN (in packets) */
2015 nla_put_u32(skb, NDTPA_QUEUE_LEN,
2016 NEIGH_VAR(parms, QUEUE_LEN_BYTES) / SKB_TRUESIZE(ETH_FRAME_LEN)) ||
2017 nla_put_u32(skb, NDTPA_PROXY_QLEN, NEIGH_VAR(parms, PROXY_QLEN)) ||
2018 nla_put_u32(skb, NDTPA_APP_PROBES, NEIGH_VAR(parms, APP_PROBES)) ||
2019 nla_put_u32(skb, NDTPA_UCAST_PROBES,
2020 NEIGH_VAR(parms, UCAST_PROBES)) ||
2021 nla_put_u32(skb, NDTPA_MCAST_PROBES,
2022 NEIGH_VAR(parms, MCAST_PROBES)) ||
2023 nla_put_u32(skb, NDTPA_MCAST_REPROBES,
2024 NEIGH_VAR(parms, MCAST_REPROBES)) ||
2025 nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time,
2027 nla_put_msecs(skb, NDTPA_BASE_REACHABLE_TIME,
2028 NEIGH_VAR(parms, BASE_REACHABLE_TIME), NDTPA_PAD) ||
2029 nla_put_msecs(skb, NDTPA_GC_STALETIME,
2030 NEIGH_VAR(parms, GC_STALETIME), NDTPA_PAD) ||
2031 nla_put_msecs(skb, NDTPA_DELAY_PROBE_TIME,
2032 NEIGH_VAR(parms, DELAY_PROBE_TIME), NDTPA_PAD) ||
2033 nla_put_msecs(skb, NDTPA_RETRANS_TIME,
2034 NEIGH_VAR(parms, RETRANS_TIME), NDTPA_PAD) ||
2035 nla_put_msecs(skb, NDTPA_ANYCAST_DELAY,
2036 NEIGH_VAR(parms, ANYCAST_DELAY), NDTPA_PAD) ||
2037 nla_put_msecs(skb, NDTPA_PROXY_DELAY,
2038 NEIGH_VAR(parms, PROXY_DELAY), NDTPA_PAD) ||
2039 nla_put_msecs(skb, NDTPA_LOCKTIME,
2040 NEIGH_VAR(parms, LOCKTIME), NDTPA_PAD))
2041 goto nla_put_failure;
2042 return nla_nest_end(skb, nest);
2045 nla_nest_cancel(skb, nest);
2049 static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
2050 u32 pid, u32 seq, int type, int flags)
2052 struct nlmsghdr *nlh;
2053 struct ndtmsg *ndtmsg;
2055 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
2059 ndtmsg = nlmsg_data(nlh);
2061 read_lock_bh(&tbl->lock);
2062 ndtmsg->ndtm_family = tbl->family;
2063 ndtmsg->ndtm_pad1 = 0;
2064 ndtmsg->ndtm_pad2 = 0;
2066 if (nla_put_string(skb, NDTA_NAME, tbl->id) ||
2067 nla_put_msecs(skb, NDTA_GC_INTERVAL, READ_ONCE(tbl->gc_interval),
2069 nla_put_u32(skb, NDTA_THRESH1, READ_ONCE(tbl->gc_thresh1)) ||
2070 nla_put_u32(skb, NDTA_THRESH2, READ_ONCE(tbl->gc_thresh2)) ||
2071 nla_put_u32(skb, NDTA_THRESH3, READ_ONCE(tbl->gc_thresh3)))
2072 goto nla_put_failure;
2074 unsigned long now = jiffies;
2075 long flush_delta = now - READ_ONCE(tbl->last_flush);
2076 long rand_delta = now - READ_ONCE(tbl->last_rand);
2077 struct neigh_hash_table *nht;
2078 struct ndt_config ndc = {
2079 .ndtc_key_len = tbl->key_len,
2080 .ndtc_entry_size = tbl->entry_size,
2081 .ndtc_entries = atomic_read(&tbl->entries),
2082 .ndtc_last_flush = jiffies_to_msecs(flush_delta),
2083 .ndtc_last_rand = jiffies_to_msecs(rand_delta),
2084 .ndtc_proxy_qlen = READ_ONCE(tbl->proxy_queue.qlen),
2088 nht = rcu_dereference_bh(tbl->nht);
2089 ndc.ndtc_hash_rnd = nht->hash_rnd[0];
2090 ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1);
2091 rcu_read_unlock_bh();
2093 if (nla_put(skb, NDTA_CONFIG, sizeof(ndc), &ndc))
2094 goto nla_put_failure;
2099 struct ndt_stats ndst;
2101 memset(&ndst, 0, sizeof(ndst));
2103 for_each_possible_cpu(cpu) {
2104 struct neigh_statistics *st;
2106 st = per_cpu_ptr(tbl->stats, cpu);
2107 ndst.ndts_allocs += READ_ONCE(st->allocs);
2108 ndst.ndts_destroys += READ_ONCE(st->destroys);
2109 ndst.ndts_hash_grows += READ_ONCE(st->hash_grows);
2110 ndst.ndts_res_failed += READ_ONCE(st->res_failed);
2111 ndst.ndts_lookups += READ_ONCE(st->lookups);
2112 ndst.ndts_hits += READ_ONCE(st->hits);
2113 ndst.ndts_rcv_probes_mcast += READ_ONCE(st->rcv_probes_mcast);
2114 ndst.ndts_rcv_probes_ucast += READ_ONCE(st->rcv_probes_ucast);
2115 ndst.ndts_periodic_gc_runs += READ_ONCE(st->periodic_gc_runs);
2116 ndst.ndts_forced_gc_runs += READ_ONCE(st->forced_gc_runs);
2117 ndst.ndts_table_fulls += READ_ONCE(st->table_fulls);
2120 if (nla_put_64bit(skb, NDTA_STATS, sizeof(ndst), &ndst,
2122 goto nla_put_failure;
2125 BUG_ON(tbl->parms.dev);
2126 if (neightbl_fill_parms(skb, &tbl->parms) < 0)
2127 goto nla_put_failure;
2129 read_unlock_bh(&tbl->lock);
2130 nlmsg_end(skb, nlh);
2134 read_unlock_bh(&tbl->lock);
2135 nlmsg_cancel(skb, nlh);
2139 static int neightbl_fill_param_info(struct sk_buff *skb,
2140 struct neigh_table *tbl,
2141 struct neigh_parms *parms,
2142 u32 pid, u32 seq, int type,
2145 struct ndtmsg *ndtmsg;
2146 struct nlmsghdr *nlh;
2148 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
2152 ndtmsg = nlmsg_data(nlh);
2154 read_lock_bh(&tbl->lock);
2155 ndtmsg->ndtm_family = tbl->family;
2156 ndtmsg->ndtm_pad1 = 0;
2157 ndtmsg->ndtm_pad2 = 0;
2159 if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
2160 neightbl_fill_parms(skb, parms) < 0)
2163 read_unlock_bh(&tbl->lock);
2164 nlmsg_end(skb, nlh);
2167 read_unlock_bh(&tbl->lock);
2168 nlmsg_cancel(skb, nlh);
2172 static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
2173 [NDTA_NAME] = { .type = NLA_STRING },
2174 [NDTA_THRESH1] = { .type = NLA_U32 },
2175 [NDTA_THRESH2] = { .type = NLA_U32 },
2176 [NDTA_THRESH3] = { .type = NLA_U32 },
2177 [NDTA_GC_INTERVAL] = { .type = NLA_U64 },
2178 [NDTA_PARMS] = { .type = NLA_NESTED },
2181 static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
2182 [NDTPA_IFINDEX] = { .type = NLA_U32 },
2183 [NDTPA_QUEUE_LEN] = { .type = NLA_U32 },
2184 [NDTPA_PROXY_QLEN] = { .type = NLA_U32 },
2185 [NDTPA_APP_PROBES] = { .type = NLA_U32 },
2186 [NDTPA_UCAST_PROBES] = { .type = NLA_U32 },
2187 [NDTPA_MCAST_PROBES] = { .type = NLA_U32 },
2188 [NDTPA_MCAST_REPROBES] = { .type = NLA_U32 },
2189 [NDTPA_BASE_REACHABLE_TIME] = { .type = NLA_U64 },
2190 [NDTPA_GC_STALETIME] = { .type = NLA_U64 },
2191 [NDTPA_DELAY_PROBE_TIME] = { .type = NLA_U64 },
2192 [NDTPA_RETRANS_TIME] = { .type = NLA_U64 },
2193 [NDTPA_ANYCAST_DELAY] = { .type = NLA_U64 },
2194 [NDTPA_PROXY_DELAY] = { .type = NLA_U64 },
2195 [NDTPA_LOCKTIME] = { .type = NLA_U64 },
2198 static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh,
2199 struct netlink_ext_ack *extack)
2201 struct net *net = sock_net(skb->sk);
2202 struct neigh_table *tbl;
2203 struct ndtmsg *ndtmsg;
2204 struct nlattr *tb[NDTA_MAX+1];
2208 err = nlmsg_parse_deprecated(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
2209 nl_neightbl_policy, extack);
2213 if (tb[NDTA_NAME] == NULL) {
2218 ndtmsg = nlmsg_data(nlh);
2220 for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
2221 tbl = neigh_tables[tidx];
2224 if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
2226 if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0) {
2236 * We acquire tbl->lock to be nice to the periodic timers and
2237 * make sure they always see a consistent set of values.
2239 write_lock_bh(&tbl->lock);
2241 if (tb[NDTA_PARMS]) {
2242 struct nlattr *tbp[NDTPA_MAX+1];
2243 struct neigh_parms *p;
2246 err = nla_parse_nested_deprecated(tbp, NDTPA_MAX,
2248 nl_ntbl_parm_policy, extack);
2250 goto errout_tbl_lock;
2252 if (tbp[NDTPA_IFINDEX])
2253 ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
2255 p = lookup_neigh_parms(tbl, net, ifindex);
2258 goto errout_tbl_lock;
2261 for (i = 1; i <= NDTPA_MAX; i++) {
2266 case NDTPA_QUEUE_LEN:
2267 NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2268 nla_get_u32(tbp[i]) *
2269 SKB_TRUESIZE(ETH_FRAME_LEN));
2271 case NDTPA_QUEUE_LENBYTES:
2272 NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2273 nla_get_u32(tbp[i]));
2275 case NDTPA_PROXY_QLEN:
2276 NEIGH_VAR_SET(p, PROXY_QLEN,
2277 nla_get_u32(tbp[i]));
2279 case NDTPA_APP_PROBES:
2280 NEIGH_VAR_SET(p, APP_PROBES,
2281 nla_get_u32(tbp[i]));
2283 case NDTPA_UCAST_PROBES:
2284 NEIGH_VAR_SET(p, UCAST_PROBES,
2285 nla_get_u32(tbp[i]));
2287 case NDTPA_MCAST_PROBES:
2288 NEIGH_VAR_SET(p, MCAST_PROBES,
2289 nla_get_u32(tbp[i]));
2291 case NDTPA_MCAST_REPROBES:
2292 NEIGH_VAR_SET(p, MCAST_REPROBES,
2293 nla_get_u32(tbp[i]));
2295 case NDTPA_BASE_REACHABLE_TIME:
2296 NEIGH_VAR_SET(p, BASE_REACHABLE_TIME,
2297 nla_get_msecs(tbp[i]));
2298 /* update reachable_time as well, otherwise, the change will
2299 * only be effective after the next time neigh_periodic_work
2300 * decides to recompute it (can be multiple minutes)
2303 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
2305 case NDTPA_GC_STALETIME:
2306 NEIGH_VAR_SET(p, GC_STALETIME,
2307 nla_get_msecs(tbp[i]));
2309 case NDTPA_DELAY_PROBE_TIME:
2310 NEIGH_VAR_SET(p, DELAY_PROBE_TIME,
2311 nla_get_msecs(tbp[i]));
2312 call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
2314 case NDTPA_RETRANS_TIME:
2315 NEIGH_VAR_SET(p, RETRANS_TIME,
2316 nla_get_msecs(tbp[i]));
2318 case NDTPA_ANYCAST_DELAY:
2319 NEIGH_VAR_SET(p, ANYCAST_DELAY,
2320 nla_get_msecs(tbp[i]));
2322 case NDTPA_PROXY_DELAY:
2323 NEIGH_VAR_SET(p, PROXY_DELAY,
2324 nla_get_msecs(tbp[i]));
2326 case NDTPA_LOCKTIME:
2327 NEIGH_VAR_SET(p, LOCKTIME,
2328 nla_get_msecs(tbp[i]));
2335 if ((tb[NDTA_THRESH1] || tb[NDTA_THRESH2] ||
2336 tb[NDTA_THRESH3] || tb[NDTA_GC_INTERVAL]) &&
2337 !net_eq(net, &init_net))
2338 goto errout_tbl_lock;
2340 if (tb[NDTA_THRESH1])
2341 WRITE_ONCE(tbl->gc_thresh1, nla_get_u32(tb[NDTA_THRESH1]));
2343 if (tb[NDTA_THRESH2])
2344 WRITE_ONCE(tbl->gc_thresh2, nla_get_u32(tb[NDTA_THRESH2]));
2346 if (tb[NDTA_THRESH3])
2347 WRITE_ONCE(tbl->gc_thresh3, nla_get_u32(tb[NDTA_THRESH3]));
2349 if (tb[NDTA_GC_INTERVAL])
2350 WRITE_ONCE(tbl->gc_interval, nla_get_msecs(tb[NDTA_GC_INTERVAL]));
2355 write_unlock_bh(&tbl->lock);
2360 static int neightbl_valid_dump_info(const struct nlmsghdr *nlh,
2361 struct netlink_ext_ack *extack)
2363 struct ndtmsg *ndtm;
2365 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndtm))) {
2366 NL_SET_ERR_MSG(extack, "Invalid header for neighbor table dump request");
2370 ndtm = nlmsg_data(nlh);
2371 if (ndtm->ndtm_pad1 || ndtm->ndtm_pad2) {
2372 NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor table dump request");
2376 if (nlmsg_attrlen(nlh, sizeof(*ndtm))) {
2377 NL_SET_ERR_MSG(extack, "Invalid data after header in neighbor table dump request");
2384 static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2386 const struct nlmsghdr *nlh = cb->nlh;
2387 struct net *net = sock_net(skb->sk);
2388 int family, tidx, nidx = 0;
2389 int tbl_skip = cb->args[0];
2390 int neigh_skip = cb->args[1];
2391 struct neigh_table *tbl;
2393 if (cb->strict_check) {
2394 int err = neightbl_valid_dump_info(nlh, cb->extack);
2400 family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
2402 for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
2403 struct neigh_parms *p;
2405 tbl = neigh_tables[tidx];
2409 if (tidx < tbl_skip || (family && tbl->family != family))
2412 if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).portid,
2413 nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
2418 p = list_next_entry(&tbl->parms, list);
2419 list_for_each_entry_from(p, &tbl->parms_list, list) {
2420 if (!net_eq(neigh_parms_net(p), net))
2423 if (nidx < neigh_skip)
2426 if (neightbl_fill_param_info(skb, tbl, p,
2427 NETLINK_CB(cb->skb).portid,
2445 static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
2446 u32 pid, u32 seq, int type, unsigned int flags)
2448 unsigned long now = jiffies;
2449 struct nda_cacheinfo ci;
2450 struct nlmsghdr *nlh;
2453 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2457 ndm = nlmsg_data(nlh);
2458 ndm->ndm_family = neigh->ops->family;
2461 ndm->ndm_flags = neigh->flags;
2462 ndm->ndm_type = neigh->type;
2463 ndm->ndm_ifindex = neigh->dev->ifindex;
2465 if (nla_put(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key))
2466 goto nla_put_failure;
2468 read_lock_bh(&neigh->lock);
2469 ndm->ndm_state = neigh->nud_state;
2470 if (neigh->nud_state & NUD_VALID) {
2471 char haddr[MAX_ADDR_LEN];
2473 neigh_ha_snapshot(haddr, neigh, neigh->dev);
2474 if (nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, haddr) < 0) {
2475 read_unlock_bh(&neigh->lock);
2476 goto nla_put_failure;
2480 ci.ndm_used = jiffies_to_clock_t(now - neigh->used);
2481 ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed);
2482 ci.ndm_updated = jiffies_to_clock_t(now - neigh->updated);
2483 ci.ndm_refcnt = refcount_read(&neigh->refcnt) - 1;
2484 read_unlock_bh(&neigh->lock);
2486 if (nla_put_u32(skb, NDA_PROBES, atomic_read(&neigh->probes)) ||
2487 nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
2488 goto nla_put_failure;
2490 if (neigh->protocol && nla_put_u8(skb, NDA_PROTOCOL, neigh->protocol))
2491 goto nla_put_failure;
2493 nlmsg_end(skb, nlh);
2497 nlmsg_cancel(skb, nlh);
2501 static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
2502 u32 pid, u32 seq, int type, unsigned int flags,
2503 struct neigh_table *tbl)
2505 struct nlmsghdr *nlh;
2508 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2512 ndm = nlmsg_data(nlh);
2513 ndm->ndm_family = tbl->family;
2516 ndm->ndm_flags = pn->flags | NTF_PROXY;
2517 ndm->ndm_type = RTN_UNICAST;
2518 ndm->ndm_ifindex = pn->dev ? pn->dev->ifindex : 0;
2519 ndm->ndm_state = NUD_NONE;
2521 if (nla_put(skb, NDA_DST, tbl->key_len, pn->key))
2522 goto nla_put_failure;
2524 if (pn->protocol && nla_put_u8(skb, NDA_PROTOCOL, pn->protocol))
2525 goto nla_put_failure;
2527 nlmsg_end(skb, nlh);
2531 nlmsg_cancel(skb, nlh);
2535 static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid)
2537 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
2538 __neigh_notify(neigh, RTM_NEWNEIGH, 0, nlmsg_pid);
2541 static bool neigh_master_filtered(struct net_device *dev, int master_idx)
2543 struct net_device *master;
2548 master = dev ? netdev_master_upper_dev_get(dev) : NULL;
2549 if (!master || master->ifindex != master_idx)
2555 static bool neigh_ifindex_filtered(struct net_device *dev, int filter_idx)
2557 if (filter_idx && (!dev || dev->ifindex != filter_idx))
2563 struct neigh_dump_filter {
2568 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2569 struct netlink_callback *cb,
2570 struct neigh_dump_filter *filter)
2572 struct net *net = sock_net(skb->sk);
2573 struct neighbour *n;
2574 int rc, h, s_h = cb->args[1];
2575 int idx, s_idx = idx = cb->args[2];
2576 struct neigh_hash_table *nht;
2577 unsigned int flags = NLM_F_MULTI;
2579 if (filter->dev_idx || filter->master_idx)
2580 flags |= NLM_F_DUMP_FILTERED;
2583 nht = rcu_dereference_bh(tbl->nht);
2585 for (h = s_h; h < (1 << nht->hash_shift); h++) {
2588 for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0;
2590 n = rcu_dereference_bh(n->next)) {
2591 if (idx < s_idx || !net_eq(dev_net(n->dev), net))
2593 if (neigh_ifindex_filtered(n->dev, filter->dev_idx) ||
2594 neigh_master_filtered(n->dev, filter->master_idx))
2596 if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2609 rcu_read_unlock_bh();
2615 static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2616 struct netlink_callback *cb,
2617 struct neigh_dump_filter *filter)
2619 struct pneigh_entry *n;
2620 struct net *net = sock_net(skb->sk);
2621 int rc, h, s_h = cb->args[3];
2622 int idx, s_idx = idx = cb->args[4];
2623 unsigned int flags = NLM_F_MULTI;
2625 if (filter->dev_idx || filter->master_idx)
2626 flags |= NLM_F_DUMP_FILTERED;
2628 read_lock_bh(&tbl->lock);
2630 for (h = s_h; h <= PNEIGH_HASHMASK; h++) {
2633 for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
2634 if (idx < s_idx || pneigh_net(n) != net)
2636 if (neigh_ifindex_filtered(n->dev, filter->dev_idx) ||
2637 neigh_master_filtered(n->dev, filter->master_idx))
2639 if (pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2641 RTM_NEWNEIGH, flags, tbl) < 0) {
2642 read_unlock_bh(&tbl->lock);
2651 read_unlock_bh(&tbl->lock);
2660 static int neigh_valid_dump_req(const struct nlmsghdr *nlh,
2662 struct neigh_dump_filter *filter,
2663 struct netlink_ext_ack *extack)
2665 struct nlattr *tb[NDA_MAX + 1];
2671 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
2672 NL_SET_ERR_MSG(extack, "Invalid header for neighbor dump request");
2676 ndm = nlmsg_data(nlh);
2677 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_ifindex ||
2678 ndm->ndm_state || ndm->ndm_type) {
2679 NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor dump request");
2683 if (ndm->ndm_flags & ~NTF_PROXY) {
2684 NL_SET_ERR_MSG(extack, "Invalid flags in header for neighbor dump request");
2688 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg),
2689 tb, NDA_MAX, nda_policy,
2692 err = nlmsg_parse_deprecated(nlh, sizeof(struct ndmsg), tb,
2693 NDA_MAX, nda_policy, extack);
2698 for (i = 0; i <= NDA_MAX; ++i) {
2702 /* all new attributes should require strict_check */
2705 filter->dev_idx = nla_get_u32(tb[i]);
2708 filter->master_idx = nla_get_u32(tb[i]);
2712 NL_SET_ERR_MSG(extack, "Unsupported attribute in neighbor dump request");
2721 static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2723 const struct nlmsghdr *nlh = cb->nlh;
2724 struct neigh_dump_filter filter = {};
2725 struct neigh_table *tbl;
2730 family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
2732 /* check for full ndmsg structure presence, family member is
2733 * the same for both structures
2735 if (nlmsg_len(nlh) >= sizeof(struct ndmsg) &&
2736 ((struct ndmsg *)nlmsg_data(nlh))->ndm_flags == NTF_PROXY)
2739 err = neigh_valid_dump_req(nlh, cb->strict_check, &filter, cb->extack);
2740 if (err < 0 && cb->strict_check)
2745 for (t = 0; t < NEIGH_NR_TABLES; t++) {
2746 tbl = neigh_tables[t];
2750 if (t < s_t || (family && tbl->family != family))
2753 memset(&cb->args[1], 0, sizeof(cb->args) -
2754 sizeof(cb->args[0]));
2756 err = pneigh_dump_table(tbl, skb, cb, &filter);
2758 err = neigh_dump_table(tbl, skb, cb, &filter);
2767 static int neigh_valid_get_req(const struct nlmsghdr *nlh,
2768 struct neigh_table **tbl,
2769 void **dst, int *dev_idx, u8 *ndm_flags,
2770 struct netlink_ext_ack *extack)
2772 struct nlattr *tb[NDA_MAX + 1];
2776 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
2777 NL_SET_ERR_MSG(extack, "Invalid header for neighbor get request");
2781 ndm = nlmsg_data(nlh);
2782 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state ||
2784 NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor get request");
2788 if (ndm->ndm_flags & ~NTF_PROXY) {
2789 NL_SET_ERR_MSG(extack, "Invalid flags in header for neighbor get request");
2793 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb,
2794 NDA_MAX, nda_policy, extack);
2798 *ndm_flags = ndm->ndm_flags;
2799 *dev_idx = ndm->ndm_ifindex;
2800 *tbl = neigh_find_table(ndm->ndm_family);
2802 NL_SET_ERR_MSG(extack, "Unsupported family in header for neighbor get request");
2803 return -EAFNOSUPPORT;
2806 for (i = 0; i <= NDA_MAX; ++i) {
2812 if (nla_len(tb[i]) != (int)(*tbl)->key_len) {
2813 NL_SET_ERR_MSG(extack, "Invalid network address in neighbor get request");
2816 *dst = nla_data(tb[i]);
2819 NL_SET_ERR_MSG(extack, "Unsupported attribute in neighbor get request");
2827 static inline size_t neigh_nlmsg_size(void)
2829 return NLMSG_ALIGN(sizeof(struct ndmsg))
2830 + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2831 + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
2832 + nla_total_size(sizeof(struct nda_cacheinfo))
2833 + nla_total_size(4) /* NDA_PROBES */
2834 + nla_total_size(1); /* NDA_PROTOCOL */
2837 static int neigh_get_reply(struct net *net, struct neighbour *neigh,
2840 struct sk_buff *skb;
2843 skb = nlmsg_new(neigh_nlmsg_size(), GFP_KERNEL);
2847 err = neigh_fill_info(skb, neigh, pid, seq, RTM_NEWNEIGH, 0);
2853 err = rtnl_unicast(skb, net, pid);
2858 static inline size_t pneigh_nlmsg_size(void)
2860 return NLMSG_ALIGN(sizeof(struct ndmsg))
2861 + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2862 + nla_total_size(1); /* NDA_PROTOCOL */
2865 static int pneigh_get_reply(struct net *net, struct pneigh_entry *neigh,
2866 u32 pid, u32 seq, struct neigh_table *tbl)
2868 struct sk_buff *skb;
2871 skb = nlmsg_new(pneigh_nlmsg_size(), GFP_KERNEL);
2875 err = pneigh_fill_info(skb, neigh, pid, seq, RTM_NEWNEIGH, 0, tbl);
2881 err = rtnl_unicast(skb, net, pid);
2886 static int neigh_get(struct sk_buff *in_skb, struct nlmsghdr *nlh,
2887 struct netlink_ext_ack *extack)
2889 struct net *net = sock_net(in_skb->sk);
2890 struct net_device *dev = NULL;
2891 struct neigh_table *tbl = NULL;
2892 struct neighbour *neigh;
2898 err = neigh_valid_get_req(nlh, &tbl, &dst, &dev_idx, &ndm_flags,
2904 dev = __dev_get_by_index(net, dev_idx);
2906 NL_SET_ERR_MSG(extack, "Unknown device ifindex");
2912 NL_SET_ERR_MSG(extack, "Network address not specified");
2916 if (ndm_flags & NTF_PROXY) {
2917 struct pneigh_entry *pn;
2919 pn = pneigh_lookup(tbl, net, dst, dev, 0);
2921 NL_SET_ERR_MSG(extack, "Proxy neighbour entry not found");
2924 return pneigh_get_reply(net, pn, NETLINK_CB(in_skb).portid,
2925 nlh->nlmsg_seq, tbl);
2929 NL_SET_ERR_MSG(extack, "No device specified");
2933 neigh = neigh_lookup(tbl, dst, dev);
2935 NL_SET_ERR_MSG(extack, "Neighbour entry not found");
2939 err = neigh_get_reply(net, neigh, NETLINK_CB(in_skb).portid,
2942 neigh_release(neigh);
2947 void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
2950 struct neigh_hash_table *nht;
2953 nht = rcu_dereference_bh(tbl->nht);
2955 read_lock(&tbl->lock); /* avoid resizes */
2956 for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2957 struct neighbour *n;
2959 for (n = rcu_dereference_bh(nht->hash_buckets[chain]);
2961 n = rcu_dereference_bh(n->next))
2964 read_unlock(&tbl->lock);
2965 rcu_read_unlock_bh();
2967 EXPORT_SYMBOL(neigh_for_each);
2969 /* The tbl->lock must be held as a writer and BH disabled. */
2970 void __neigh_for_each_release(struct neigh_table *tbl,
2971 int (*cb)(struct neighbour *))
2974 struct neigh_hash_table *nht;
2976 nht = rcu_dereference_protected(tbl->nht,
2977 lockdep_is_held(&tbl->lock));
2978 for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2979 struct neighbour *n;
2980 struct neighbour __rcu **np;
2982 np = &nht->hash_buckets[chain];
2983 while ((n = rcu_dereference_protected(*np,
2984 lockdep_is_held(&tbl->lock))) != NULL) {
2987 write_lock(&n->lock);
2990 rcu_assign_pointer(*np,
2991 rcu_dereference_protected(n->next,
2992 lockdep_is_held(&tbl->lock)));
2996 write_unlock(&n->lock);
2998 neigh_cleanup_and_release(n);
3002 EXPORT_SYMBOL(__neigh_for_each_release);
3004 int neigh_xmit(int index, struct net_device *dev,
3005 const void *addr, struct sk_buff *skb)
3007 int err = -EAFNOSUPPORT;
3008 if (likely(index < NEIGH_NR_TABLES)) {
3009 struct neigh_table *tbl;
3010 struct neighbour *neigh;
3012 tbl = neigh_tables[index];
3016 if (index == NEIGH_ARP_TABLE) {
3017 u32 key = *((u32 *)addr);
3019 neigh = __ipv4_neigh_lookup_noref(dev, key);
3021 neigh = __neigh_lookup_noref(tbl, addr, dev);
3024 neigh = __neigh_create(tbl, addr, dev, false);
3025 err = PTR_ERR(neigh);
3026 if (IS_ERR(neigh)) {
3027 rcu_read_unlock_bh();
3030 err = neigh->output(neigh, skb);
3031 rcu_read_unlock_bh();
3033 else if (index == NEIGH_LINK_TABLE) {
3034 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
3035 addr, NULL, skb->len);
3038 err = dev_queue_xmit(skb);
3046 EXPORT_SYMBOL(neigh_xmit);
3048 #ifdef CONFIG_PROC_FS
3050 static struct neighbour *neigh_get_first(struct seq_file *seq)
3052 struct neigh_seq_state *state = seq->private;
3053 struct net *net = seq_file_net(seq);
3054 struct neigh_hash_table *nht = state->nht;
3055 struct neighbour *n = NULL;
3058 state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
3059 for (bucket = 0; bucket < (1 << nht->hash_shift); bucket++) {
3060 n = rcu_dereference_bh(nht->hash_buckets[bucket]);
3063 if (!net_eq(dev_net(n->dev), net))
3065 if (state->neigh_sub_iter) {
3069 v = state->neigh_sub_iter(state, n, &fakep);
3073 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
3075 if (n->nud_state & ~NUD_NOARP)
3078 n = rcu_dereference_bh(n->next);
3084 state->bucket = bucket;
3089 static struct neighbour *neigh_get_next(struct seq_file *seq,
3090 struct neighbour *n,
3093 struct neigh_seq_state *state = seq->private;
3094 struct net *net = seq_file_net(seq);
3095 struct neigh_hash_table *nht = state->nht;
3097 if (state->neigh_sub_iter) {
3098 void *v = state->neigh_sub_iter(state, n, pos);
3102 n = rcu_dereference_bh(n->next);
3106 if (!net_eq(dev_net(n->dev), net))
3108 if (state->neigh_sub_iter) {
3109 void *v = state->neigh_sub_iter(state, n, pos);
3114 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
3117 if (n->nud_state & ~NUD_NOARP)
3120 n = rcu_dereference_bh(n->next);
3126 if (++state->bucket >= (1 << nht->hash_shift))
3129 n = rcu_dereference_bh(nht->hash_buckets[state->bucket]);
3137 static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
3139 struct neighbour *n = neigh_get_first(seq);
3144 n = neigh_get_next(seq, n, pos);
3149 return *pos ? NULL : n;
3152 static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
3154 struct neigh_seq_state *state = seq->private;
3155 struct net *net = seq_file_net(seq);
3156 struct neigh_table *tbl = state->tbl;
3157 struct pneigh_entry *pn = NULL;
3158 int bucket = state->bucket;
3160 state->flags |= NEIGH_SEQ_IS_PNEIGH;
3161 for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
3162 pn = tbl->phash_buckets[bucket];
3163 while (pn && !net_eq(pneigh_net(pn), net))
3168 state->bucket = bucket;
3173 static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
3174 struct pneigh_entry *pn,
3177 struct neigh_seq_state *state = seq->private;
3178 struct net *net = seq_file_net(seq);
3179 struct neigh_table *tbl = state->tbl;
3183 } while (pn && !net_eq(pneigh_net(pn), net));
3186 if (++state->bucket > PNEIGH_HASHMASK)
3188 pn = tbl->phash_buckets[state->bucket];
3189 while (pn && !net_eq(pneigh_net(pn), net))
3201 static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
3203 struct pneigh_entry *pn = pneigh_get_first(seq);
3208 pn = pneigh_get_next(seq, pn, pos);
3213 return *pos ? NULL : pn;
3216 static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
3218 struct neigh_seq_state *state = seq->private;
3220 loff_t idxpos = *pos;
3222 rc = neigh_get_idx(seq, &idxpos);
3223 if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
3224 rc = pneigh_get_idx(seq, &idxpos);
3229 void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
3230 __acquires(tbl->lock)
3233 struct neigh_seq_state *state = seq->private;
3237 state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
3240 state->nht = rcu_dereference_bh(tbl->nht);
3241 read_lock(&tbl->lock);
3243 return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
3245 EXPORT_SYMBOL(neigh_seq_start);
3247 void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3249 struct neigh_seq_state *state;
3252 if (v == SEQ_START_TOKEN) {
3253 rc = neigh_get_first(seq);
3257 state = seq->private;
3258 if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
3259 rc = neigh_get_next(seq, v, NULL);
3262 if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
3263 rc = pneigh_get_first(seq);
3265 BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
3266 rc = pneigh_get_next(seq, v, NULL);
3272 EXPORT_SYMBOL(neigh_seq_next);
3274 void neigh_seq_stop(struct seq_file *seq, void *v)
3275 __releases(tbl->lock)
3278 struct neigh_seq_state *state = seq->private;
3279 struct neigh_table *tbl = state->tbl;
3281 read_unlock(&tbl->lock);
3282 rcu_read_unlock_bh();
3284 EXPORT_SYMBOL(neigh_seq_stop);
3286 /* statistics via seq_file */
3288 static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
3290 struct neigh_table *tbl = PDE_DATA(file_inode(seq->file));
3294 return SEQ_START_TOKEN;
3296 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
3297 if (!cpu_possible(cpu))
3300 return per_cpu_ptr(tbl->stats, cpu);
3305 static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3307 struct neigh_table *tbl = PDE_DATA(file_inode(seq->file));
3310 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
3311 if (!cpu_possible(cpu))
3314 return per_cpu_ptr(tbl->stats, cpu);
3320 static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
3325 static int neigh_stat_seq_show(struct seq_file *seq, void *v)
3327 struct neigh_table *tbl = PDE_DATA(file_inode(seq->file));
3328 struct neigh_statistics *st = v;
3330 if (v == SEQ_START_TOKEN) {
3331 seq_printf(seq, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards table_fulls\n");
3335 seq_printf(seq, "%08x %08lx %08lx %08lx %08lx %08lx %08lx "
3336 "%08lx %08lx %08lx %08lx %08lx %08lx\n",
3337 atomic_read(&tbl->entries),
3348 st->rcv_probes_mcast,
3349 st->rcv_probes_ucast,
3351 st->periodic_gc_runs,
3360 static const struct seq_operations neigh_stat_seq_ops = {
3361 .start = neigh_stat_seq_start,
3362 .next = neigh_stat_seq_next,
3363 .stop = neigh_stat_seq_stop,
3364 .show = neigh_stat_seq_show,
3366 #endif /* CONFIG_PROC_FS */
3368 static void __neigh_notify(struct neighbour *n, int type, int flags,
3371 struct net *net = dev_net(n->dev);
3372 struct sk_buff *skb;
3375 skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
3379 err = neigh_fill_info(skb, n, pid, 0, type, flags);
3381 /* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
3382 WARN_ON(err == -EMSGSIZE);
3386 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
3390 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
3393 void neigh_app_ns(struct neighbour *n)
3395 __neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST, 0);
3397 EXPORT_SYMBOL(neigh_app_ns);
3399 #ifdef CONFIG_SYSCTL
3400 static int unres_qlen_max = INT_MAX / SKB_TRUESIZE(ETH_FRAME_LEN);
3402 static int proc_unres_qlen(struct ctl_table *ctl, int write,
3403 void *buffer, size_t *lenp, loff_t *ppos)
3406 struct ctl_table tmp = *ctl;
3408 tmp.extra1 = SYSCTL_ZERO;
3409 tmp.extra2 = &unres_qlen_max;
3412 size = *(int *)ctl->data / SKB_TRUESIZE(ETH_FRAME_LEN);
3413 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
3416 *(int *)ctl->data = size * SKB_TRUESIZE(ETH_FRAME_LEN);
3420 static struct neigh_parms *neigh_get_dev_parms_rcu(struct net_device *dev,
3425 return __in_dev_arp_parms_get_rcu(dev);
3427 return __in6_dev_nd_parms_get_rcu(dev);
3432 static void neigh_copy_dflt_parms(struct net *net, struct neigh_parms *p,
3435 struct net_device *dev;
3436 int family = neigh_parms_family(p);
3439 for_each_netdev_rcu(net, dev) {
3440 struct neigh_parms *dst_p =
3441 neigh_get_dev_parms_rcu(dev, family);
3443 if (dst_p && !test_bit(index, dst_p->data_state))
3444 dst_p->data[index] = p->data[index];
3449 static void neigh_proc_update(struct ctl_table *ctl, int write)
3451 struct net_device *dev = ctl->extra1;
3452 struct neigh_parms *p = ctl->extra2;
3453 struct net *net = neigh_parms_net(p);
3454 int index = (int *) ctl->data - p->data;
3459 set_bit(index, p->data_state);
3460 if (index == NEIGH_VAR_DELAY_PROBE_TIME)
3461 call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
3462 if (!dev) /* NULL dev means this is default value */
3463 neigh_copy_dflt_parms(net, p, index);
3466 static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
3467 void *buffer, size_t *lenp,
3470 struct ctl_table tmp = *ctl;
3473 tmp.extra1 = SYSCTL_ZERO;
3474 tmp.extra2 = SYSCTL_INT_MAX;
3476 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
3477 neigh_proc_update(ctl, write);
3481 int neigh_proc_dointvec(struct ctl_table *ctl, int write, void *buffer,
3482 size_t *lenp, loff_t *ppos)
3484 int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
3486 neigh_proc_update(ctl, write);
3489 EXPORT_SYMBOL(neigh_proc_dointvec);
3491 int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write, void *buffer,
3492 size_t *lenp, loff_t *ppos)
3494 int ret = proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
3496 neigh_proc_update(ctl, write);
3499 EXPORT_SYMBOL(neigh_proc_dointvec_jiffies);
3501 static int neigh_proc_dointvec_userhz_jiffies(struct ctl_table *ctl, int write,
3502 void *buffer, size_t *lenp,
3505 int ret = proc_dointvec_userhz_jiffies(ctl, write, buffer, lenp, ppos);
3507 neigh_proc_update(ctl, write);
3511 int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write,
3512 void *buffer, size_t *lenp, loff_t *ppos)
3514 int ret = proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
3516 neigh_proc_update(ctl, write);
3519 EXPORT_SYMBOL(neigh_proc_dointvec_ms_jiffies);
3521 static int neigh_proc_dointvec_unres_qlen(struct ctl_table *ctl, int write,
3522 void *buffer, size_t *lenp,
3525 int ret = proc_unres_qlen(ctl, write, buffer, lenp, ppos);
3527 neigh_proc_update(ctl, write);
3531 static int neigh_proc_base_reachable_time(struct ctl_table *ctl, int write,
3532 void *buffer, size_t *lenp,
3535 struct neigh_parms *p = ctl->extra2;
3538 if (strcmp(ctl->procname, "base_reachable_time") == 0)
3539 ret = neigh_proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
3540 else if (strcmp(ctl->procname, "base_reachable_time_ms") == 0)
3541 ret = neigh_proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
3545 if (write && ret == 0) {
3546 /* update reachable_time as well, otherwise, the change will
3547 * only be effective after the next time neigh_periodic_work
3548 * decides to recompute it
3551 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
3556 #define NEIGH_PARMS_DATA_OFFSET(index) \
3557 (&((struct neigh_parms *) 0)->data[index])
3559 #define NEIGH_SYSCTL_ENTRY(attr, data_attr, name, mval, proc) \
3560 [NEIGH_VAR_ ## attr] = { \
3562 .data = NEIGH_PARMS_DATA_OFFSET(NEIGH_VAR_ ## data_attr), \
3563 .maxlen = sizeof(int), \
3565 .proc_handler = proc, \
3568 #define NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(attr, name) \
3569 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_zero_intmax)
3571 #define NEIGH_SYSCTL_JIFFIES_ENTRY(attr, name) \
3572 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_jiffies)
3574 #define NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(attr, name) \
3575 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_userhz_jiffies)
3577 #define NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(attr, data_attr, name) \
3578 NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
3580 #define NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(attr, data_attr, name) \
3581 NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_unres_qlen)
3583 static struct neigh_sysctl_table {
3584 struct ctl_table_header *sysctl_header;
3585 struct ctl_table neigh_vars[NEIGH_VAR_MAX + 1];
3586 } neigh_sysctl_template __read_mostly = {
3588 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_PROBES, "mcast_solicit"),
3589 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(UCAST_PROBES, "ucast_solicit"),
3590 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(APP_PROBES, "app_solicit"),
3591 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_REPROBES, "mcast_resolicit"),
3592 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(RETRANS_TIME, "retrans_time"),
3593 NEIGH_SYSCTL_JIFFIES_ENTRY(BASE_REACHABLE_TIME, "base_reachable_time"),
3594 NEIGH_SYSCTL_JIFFIES_ENTRY(DELAY_PROBE_TIME, "delay_first_probe_time"),
3595 NEIGH_SYSCTL_JIFFIES_ENTRY(GC_STALETIME, "gc_stale_time"),
3596 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(QUEUE_LEN_BYTES, "unres_qlen_bytes"),
3597 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(PROXY_QLEN, "proxy_qlen"),
3598 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(ANYCAST_DELAY, "anycast_delay"),
3599 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(PROXY_DELAY, "proxy_delay"),
3600 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(LOCKTIME, "locktime"),
3601 NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(QUEUE_LEN, QUEUE_LEN_BYTES, "unres_qlen"),
3602 NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(RETRANS_TIME_MS, RETRANS_TIME, "retrans_time_ms"),
3603 NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(BASE_REACHABLE_TIME_MS, BASE_REACHABLE_TIME, "base_reachable_time_ms"),
3604 [NEIGH_VAR_GC_INTERVAL] = {
3605 .procname = "gc_interval",
3606 .maxlen = sizeof(int),
3608 .proc_handler = proc_dointvec_jiffies,
3610 [NEIGH_VAR_GC_THRESH1] = {
3611 .procname = "gc_thresh1",
3612 .maxlen = sizeof(int),
3614 .extra1 = SYSCTL_ZERO,
3615 .extra2 = SYSCTL_INT_MAX,
3616 .proc_handler = proc_dointvec_minmax,
3618 [NEIGH_VAR_GC_THRESH2] = {
3619 .procname = "gc_thresh2",
3620 .maxlen = sizeof(int),
3622 .extra1 = SYSCTL_ZERO,
3623 .extra2 = SYSCTL_INT_MAX,
3624 .proc_handler = proc_dointvec_minmax,
3626 [NEIGH_VAR_GC_THRESH3] = {
3627 .procname = "gc_thresh3",
3628 .maxlen = sizeof(int),
3630 .extra1 = SYSCTL_ZERO,
3631 .extra2 = SYSCTL_INT_MAX,
3632 .proc_handler = proc_dointvec_minmax,
3638 int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
3639 proc_handler *handler)
3642 struct neigh_sysctl_table *t;
3643 const char *dev_name_source;
3644 char neigh_path[ sizeof("net//neigh/") + IFNAMSIZ + IFNAMSIZ ];
3647 t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL);
3651 for (i = 0; i < NEIGH_VAR_GC_INTERVAL; i++) {
3652 t->neigh_vars[i].data += (long) p;
3653 t->neigh_vars[i].extra1 = dev;
3654 t->neigh_vars[i].extra2 = p;
3658 dev_name_source = dev->name;
3659 /* Terminate the table early */
3660 memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
3661 sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
3663 struct neigh_table *tbl = p->tbl;
3664 dev_name_source = "default";
3665 t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = &tbl->gc_interval;
3666 t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = &tbl->gc_thresh1;
3667 t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = &tbl->gc_thresh2;
3668 t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = &tbl->gc_thresh3;
3673 t->neigh_vars[NEIGH_VAR_RETRANS_TIME].proc_handler = handler;
3675 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = handler;
3676 /* RetransTime (in milliseconds)*/
3677 t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler;
3678 /* ReachableTime (in milliseconds) */
3679 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler;
3681 /* Those handlers will update p->reachable_time after
3682 * base_reachable_time(_ms) is set to ensure the new timer starts being
3683 * applied after the next neighbour update instead of waiting for
3684 * neigh_periodic_work to update its value (can be multiple minutes)
3685 * So any handler that replaces them should do this as well
3688 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler =
3689 neigh_proc_base_reachable_time;
3690 /* ReachableTime (in milliseconds) */
3691 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler =
3692 neigh_proc_base_reachable_time;
3695 /* Don't export sysctls to unprivileged users */
3696 if (neigh_parms_net(p)->user_ns != &init_user_ns)
3697 t->neigh_vars[0].procname = NULL;
3699 switch (neigh_parms_family(p)) {
3710 snprintf(neigh_path, sizeof(neigh_path), "net/%s/neigh/%s",
3711 p_name, dev_name_source);
3713 register_net_sysctl(neigh_parms_net(p), neigh_path, t->neigh_vars);
3714 if (!t->sysctl_header)
3717 p->sysctl_table = t;
3725 EXPORT_SYMBOL(neigh_sysctl_register);
3727 void neigh_sysctl_unregister(struct neigh_parms *p)
3729 if (p->sysctl_table) {
3730 struct neigh_sysctl_table *t = p->sysctl_table;
3731 p->sysctl_table = NULL;
3732 unregister_net_sysctl_table(t->sysctl_header);
3736 EXPORT_SYMBOL(neigh_sysctl_unregister);
3738 #endif /* CONFIG_SYSCTL */
3740 static int __init neigh_init(void)
3742 rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL, 0);
3743 rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL, 0);
3744 rtnl_register(PF_UNSPEC, RTM_GETNEIGH, neigh_get, neigh_dump_info, 0);
3746 rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info,
3748 rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL, 0);
3753 subsys_initcall(neigh_init);