2 * INETPEER - A storage for permanent information about peers
4 * This source is covered by the GNU GPL, the same as all kernel sources.
6 * Authors: Andrey V. Savochkin <saw@msu.ru>
9 #include <linux/cache.h>
10 #include <linux/module.h>
11 #include <linux/types.h>
12 #include <linux/slab.h>
13 #include <linux/interrupt.h>
14 #include <linux/spinlock.h>
15 #include <linux/random.h>
16 #include <linux/timer.h>
17 #include <linux/time.h>
18 #include <linux/kernel.h>
20 #include <linux/net.h>
21 #include <linux/workqueue.h>
23 #include <net/inetpeer.h>
24 #include <net/secure_seq.h>
27 * Theory of operations.
28 * We keep one entry for each peer IP address. The nodes contains long-living
29 * information about the peer which doesn't depend on routes.
31 * Nodes are removed only when reference counter goes to 0.
32 * When it's happened the node may be removed when a sufficient amount of
33 * time has been passed since its last use. The less-recently-used entry can
34 * also be removed if the pool is overloaded i.e. if the total amount of
35 * entries is greater-or-equal than the threshold.
37 * Node pool is organised as an RB tree.
38 * Such an implementation has been chosen not just for fun. It's a way to
39 * prevent easy and efficient DoS attacks by creating hash collisions. A huge
40 * amount of long living nodes in a single hash slot would significantly delay
41 * lookups performed with disabled BHs.
43 * Serialisation issues.
44 * 1. Nodes may appear in the tree only with the pool lock held.
45 * 2. Nodes may disappear from the tree only with the pool lock held
46 * AND reference count being 0.
47 * 3. Global variable peer_total is modified under the pool lock.
48 * 4. struct inet_peer fields modification:
50 * refcnt: atomically against modifications on other CPU;
51 * usually under some other lock to prevent node disappearing
55 static struct kmem_cache *peer_cachep __ro_after_init;
57 void inet_peer_base_init(struct inet_peer_base *bp)
59 bp->rb_root = RB_ROOT;
60 seqlock_init(&bp->lock);
63 EXPORT_SYMBOL_GPL(inet_peer_base_init);
65 #define PEER_MAX_GC 32
67 /* Exported for sysctl_net_ipv4. */
68 int inet_peer_threshold __read_mostly = 65536 + 128; /* start to throw entries more
69 * aggressively at this stage */
70 int inet_peer_minttl __read_mostly = 120 * HZ; /* TTL under high load: 120 sec */
71 int inet_peer_maxttl __read_mostly = 10 * 60 * HZ; /* usual time to live: 10 min */
73 /* Called from ip_output.c:ip_init */
74 void __init inet_initpeers(void)
78 /* Use the straight interface to information about memory. */
80 /* The values below were suggested by Alexey Kuznetsov
81 * <kuznet@ms2.inr.ac.ru>. I don't have any opinion about the values
84 if (si.totalram <= (32768*1024)/PAGE_SIZE)
85 inet_peer_threshold >>= 1; /* max pool size about 1MB on IA32 */
86 if (si.totalram <= (16384*1024)/PAGE_SIZE)
87 inet_peer_threshold >>= 1; /* about 512KB */
88 if (si.totalram <= (8192*1024)/PAGE_SIZE)
89 inet_peer_threshold >>= 2; /* about 128KB */
91 peer_cachep = kmem_cache_create("inet_peer_cache",
92 sizeof(struct inet_peer),
93 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
97 /* Called with rcu_read_lock() or base->lock held */
98 static struct inet_peer *lookup(const struct inetpeer_addr *daddr,
99 struct inet_peer_base *base,
101 struct inet_peer *gc_stack[],
102 unsigned int *gc_cnt,
103 struct rb_node **parent_p,
104 struct rb_node ***pp_p)
106 struct rb_node **pp, *parent, *next;
109 pp = &base->rb_root.rb_node;
114 next = rcu_dereference_raw(*pp);
118 p = rb_entry(parent, struct inet_peer, rb_node);
119 cmp = inetpeer_addr_cmp(daddr, &p->daddr);
121 if (!refcount_inc_not_zero(&p->refcnt))
126 if (*gc_cnt < PEER_MAX_GC)
127 gc_stack[(*gc_cnt)++] = p;
128 } else if (unlikely(read_seqretry(&base->lock, seq))) {
134 pp = &next->rb_right;
141 static void inetpeer_free_rcu(struct rcu_head *head)
143 kmem_cache_free(peer_cachep, container_of(head, struct inet_peer, rcu));
146 /* perform garbage collect on all items stacked during a lookup */
147 static void inet_peer_gc(struct inet_peer_base *base,
148 struct inet_peer *gc_stack[],
155 if (base->total >= inet_peer_threshold)
156 ttl = 0; /* be aggressive */
158 ttl = inet_peer_maxttl
159 - (inet_peer_maxttl - inet_peer_minttl) / HZ *
160 base->total / inet_peer_threshold * HZ;
161 for (i = 0; i < gc_cnt; i++) {
164 /* The READ_ONCE() pairs with the WRITE_ONCE()
167 delta = (__u32)jiffies - READ_ONCE(p->dtime);
169 if (delta < ttl || !refcount_dec_if_one(&p->refcnt))
172 for (i = 0; i < gc_cnt; i++) {
175 rb_erase(&p->rb_node, &base->rb_root);
177 call_rcu(&p->rcu, inetpeer_free_rcu);
182 struct inet_peer *inet_getpeer(struct inet_peer_base *base,
183 const struct inetpeer_addr *daddr,
186 struct inet_peer *p, *gc_stack[PEER_MAX_GC];
187 struct rb_node **pp, *parent;
188 unsigned int gc_cnt, seq;
191 /* Attempt a lockless lookup first.
192 * Because of a concurrent writer, we might not find an existing entry.
195 seq = read_seqbegin(&base->lock);
196 p = lookup(daddr, base, seq, NULL, &gc_cnt, &parent, &pp);
197 invalidated = read_seqretry(&base->lock, seq);
203 /* If no writer did a change during our lookup, we can return early. */
204 if (!create && !invalidated)
207 /* retry an exact lookup, taking the lock before.
208 * At least, nodes should be hot in our cache.
211 write_seqlock_bh(&base->lock);
214 p = lookup(daddr, base, seq, gc_stack, &gc_cnt, &parent, &pp);
216 p = kmem_cache_alloc(peer_cachep, GFP_ATOMIC);
219 p->dtime = (__u32)jiffies;
220 refcount_set(&p->refcnt, 2);
221 atomic_set(&p->rid, 0);
222 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
225 /* 60*HZ is arbitrary, but chosen enough high so that the first
226 * calculation of tokens is at its maximum.
228 p->rate_last = jiffies - 60*HZ;
230 rb_link_node(&p->rb_node, parent, pp);
231 rb_insert_color(&p->rb_node, &base->rb_root);
236 inet_peer_gc(base, gc_stack, gc_cnt);
237 write_sequnlock_bh(&base->lock);
241 EXPORT_SYMBOL_GPL(inet_getpeer);
243 void inet_putpeer(struct inet_peer *p)
245 /* The WRITE_ONCE() pairs with itself (we run lockless)
246 * and the READ_ONCE() in inet_peer_gc()
248 WRITE_ONCE(p->dtime, (__u32)jiffies);
250 if (refcount_dec_and_test(&p->refcnt))
251 call_rcu(&p->rcu, inetpeer_free_rcu);
253 EXPORT_SYMBOL_GPL(inet_putpeer);
256 * Check transmit rate limitation for given message.
257 * The rate information is held in the inet_peer entries now.
258 * This function is generic and could be used for other purposes
259 * too. It uses a Token bucket filter as suggested by Alexey Kuznetsov.
261 * Note that the same inet_peer fields are modified by functions in
262 * route.c too, but these work for packet destinations while xrlim_allow
263 * works for icmp destinations. This means the rate limiting information
264 * for one "ip object" is shared - and these ICMPs are twice limited:
265 * by source and by destination.
267 * RFC 1812: 4.3.2.8 SHOULD be able to limit error message rate
268 * SHOULD allow setting of rate limits
270 * Shared between ICMPv4 and ICMPv6.
272 #define XRLIM_BURST_FACTOR 6
273 bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout)
275 unsigned long now, token;
281 token = peer->rate_tokens;
283 token += now - peer->rate_last;
284 peer->rate_last = now;
285 if (token > XRLIM_BURST_FACTOR * timeout)
286 token = XRLIM_BURST_FACTOR * timeout;
287 if (token >= timeout) {
291 peer->rate_tokens = token;
294 EXPORT_SYMBOL(inet_peer_xrlim_allow);
296 void inetpeer_invalidate_tree(struct inet_peer_base *base)
298 struct rb_node *p = rb_first(&base->rb_root);
301 struct inet_peer *peer = rb_entry(p, struct inet_peer, rb_node);
304 rb_erase(&peer->rb_node, &base->rb_root);
311 EXPORT_SYMBOL(inetpeer_invalidate_tree);