GNU Linux-libre 4.14.251-gnu1
[releases.git] / net / netfilter / xt_connlimit.c
1 /*
2  * netfilter module to limit the number of parallel tcp
3  * connections per IP address.
4  *   (c) 2000 Gerd Knorr <kraxel@bytesex.org>
5  *   Nov 2002: Martin Bene <martin.bene@icomedias.com>:
6  *              only ignore TIME_WAIT or gone connections
7  *   (C) CC Computer Consultants GmbH, 2007
8  *
9  * based on ...
10  *
11  * Kernel module to match connection tracking information.
12  * GPL (C) 1999  Rusty Russell (rusty@rustcorp.com.au).
13  */
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/in.h>
16 #include <linux/in6.h>
17 #include <linux/ip.h>
18 #include <linux/ipv6.h>
19 #include <linux/jhash.h>
20 #include <linux/slab.h>
21 #include <linux/list.h>
22 #include <linux/rbtree.h>
23 #include <linux/module.h>
24 #include <linux/random.h>
25 #include <linux/skbuff.h>
26 #include <linux/spinlock.h>
27 #include <linux/netfilter/nf_conntrack_tcp.h>
28 #include <linux/netfilter/x_tables.h>
29 #include <linux/netfilter/xt_connlimit.h>
30 #include <net/netfilter/nf_conntrack.h>
31 #include <net/netfilter/nf_conntrack_core.h>
32 #include <net/netfilter/nf_conntrack_tuple.h>
33 #include <net/netfilter/nf_conntrack_zones.h>
34
35 #define CONNLIMIT_SLOTS         256U
36
37 #ifdef CONFIG_LOCKDEP
38 #define CONNLIMIT_LOCK_SLOTS    8U
39 #else
40 #define CONNLIMIT_LOCK_SLOTS    256U
41 #endif
42
43 #define CONNLIMIT_GC_MAX_NODES  8
44
45 /* we will save the tuples of all connections we care about */
46 struct xt_connlimit_conn {
47         struct hlist_node               node;
48         struct nf_conntrack_tuple       tuple;
49         struct nf_conntrack_zone        zone;
50         int                             cpu;
51         u32                             jiffies32;
52 };
53
54 struct xt_connlimit_rb {
55         struct rb_node node;
56         struct hlist_head hhead; /* connections/hosts in same subnet */
57         union nf_inet_addr addr; /* search key */
58 };
59
60 static spinlock_t xt_connlimit_locks[CONNLIMIT_LOCK_SLOTS] __cacheline_aligned_in_smp;
61
62 struct xt_connlimit_data {
63         struct rb_root climit_root[CONNLIMIT_SLOTS];
64 };
65
66 static u_int32_t connlimit_rnd __read_mostly;
67 static struct kmem_cache *connlimit_rb_cachep __read_mostly;
68 static struct kmem_cache *connlimit_conn_cachep __read_mostly;
69
70 static inline unsigned int connlimit_iphash(__be32 addr)
71 {
72         return jhash_1word((__force __u32)addr,
73                             connlimit_rnd) % CONNLIMIT_SLOTS;
74 }
75
76 static inline unsigned int
77 connlimit_iphash6(const union nf_inet_addr *addr,
78                   const union nf_inet_addr *mask)
79 {
80         union nf_inet_addr res;
81         unsigned int i;
82
83         for (i = 0; i < ARRAY_SIZE(addr->ip6); ++i)
84                 res.ip6[i] = addr->ip6[i] & mask->ip6[i];
85
86         return jhash2((u32 *)res.ip6, ARRAY_SIZE(res.ip6),
87                        connlimit_rnd) % CONNLIMIT_SLOTS;
88 }
89
90 static inline bool already_closed(const struct nf_conn *conn)
91 {
92         if (nf_ct_protonum(conn) == IPPROTO_TCP)
93                 return conn->proto.tcp.state == TCP_CONNTRACK_TIME_WAIT ||
94                        conn->proto.tcp.state == TCP_CONNTRACK_CLOSE;
95         else
96                 return 0;
97 }
98
99 static int
100 same_source_net(const union nf_inet_addr *addr,
101                 const union nf_inet_addr *mask,
102                 const union nf_inet_addr *u3, u_int8_t family)
103 {
104         if (family == NFPROTO_IPV4) {
105                 return ntohl(addr->ip & mask->ip) -
106                        ntohl(u3->ip & mask->ip);
107         } else {
108                 union nf_inet_addr lh, rh;
109                 unsigned int i;
110
111                 for (i = 0; i < ARRAY_SIZE(addr->ip6); ++i) {
112                         lh.ip6[i] = addr->ip6[i] & mask->ip6[i];
113                         rh.ip6[i] = u3->ip6[i] & mask->ip6[i];
114                 }
115
116                 return memcmp(&lh.ip6, &rh.ip6, sizeof(lh.ip6));
117         }
118 }
119
120 bool nf_conncount_add(struct hlist_head *head,
121                       const struct nf_conntrack_tuple *tuple,
122                       const struct nf_conntrack_zone *zone)
123 {
124         struct xt_connlimit_conn *conn;
125
126         conn = kmem_cache_alloc(connlimit_conn_cachep, GFP_ATOMIC);
127         if (conn == NULL)
128                 return false;
129         conn->tuple = *tuple;
130         conn->zone = *zone;
131         conn->cpu = raw_smp_processor_id();
132         conn->jiffies32 = (u32)jiffies;
133         hlist_add_head(&conn->node, head);
134         return true;
135 }
136 EXPORT_SYMBOL_GPL(nf_conncount_add);
137
138 static const struct nf_conntrack_tuple_hash *
139 find_or_evict(struct net *net, struct xt_connlimit_conn *conn)
140 {
141         const struct nf_conntrack_tuple_hash *found;
142         unsigned long a, b;
143         int cpu = raw_smp_processor_id();
144         u32 age;
145
146         found = nf_conntrack_find_get(net, &conn->zone, &conn->tuple);
147         if (found)
148                 return found;
149         b = conn->jiffies32;
150         a = (u32)jiffies;
151
152         /* conn might have been added just before by another cpu and
153          * might still be unconfirmed.  In this case, nf_conntrack_find()
154          * returns no result.  Thus only evict if this cpu added the
155          * stale entry or if the entry is older than two jiffies.
156          */
157         age = a - b;
158         if (conn->cpu == cpu || age >= 2) {
159                 hlist_del(&conn->node);
160                 kmem_cache_free(connlimit_conn_cachep, conn);
161                 return ERR_PTR(-ENOENT);
162         }
163
164         return ERR_PTR(-EAGAIN);
165 }
166
167 unsigned int nf_conncount_lookup(struct net *net, struct hlist_head *head,
168                                  const struct nf_conntrack_tuple *tuple,
169                                  const struct nf_conntrack_zone *zone,
170                                  bool *addit)
171 {
172         const struct nf_conntrack_tuple_hash *found;
173         struct xt_connlimit_conn *conn;
174         struct nf_conn *found_ct;
175         struct hlist_node *n;
176         unsigned int length = 0;
177
178         *addit = true;
179
180         /* check the saved connections */
181         hlist_for_each_entry_safe(conn, n, head, node) {
182                 found = find_or_evict(net, conn);
183                 if (IS_ERR(found)) {
184                         /* Not found, but might be about to be confirmed */
185                         if (PTR_ERR(found) == -EAGAIN) {
186                                 length++;
187                                 if (!tuple)
188                                         continue;
189
190                                 if (nf_ct_tuple_equal(&conn->tuple, tuple) &&
191                                     nf_ct_zone_id(&conn->zone, conn->zone.dir) ==
192                                     nf_ct_zone_id(zone, zone->dir))
193                                         *addit = false;
194                         }
195                         continue;
196                 }
197
198                 found_ct = nf_ct_tuplehash_to_ctrack(found);
199
200                 if (nf_ct_tuple_equal(&conn->tuple, tuple) &&
201                     nf_ct_zone_equal(found_ct, zone, zone->dir)) {
202                         /*
203                          * Just to be sure we have it only once in the list.
204                          * We should not see tuples twice unless someone hooks
205                          * this into a table without "-p tcp --syn".
206                          */
207                         *addit = false;
208                 } else if (already_closed(found_ct)) {
209                         /*
210                          * we do not care about connections which are
211                          * closed already -> ditch it
212                          */
213                         nf_ct_put(found_ct);
214                         hlist_del(&conn->node);
215                         kmem_cache_free(connlimit_conn_cachep, conn);
216                         continue;
217                 }
218
219                 nf_ct_put(found_ct);
220                 length++;
221         }
222
223         return length;
224 }
225 EXPORT_SYMBOL_GPL(nf_conncount_lookup);
226
227 static void tree_nodes_free(struct rb_root *root,
228                             struct xt_connlimit_rb *gc_nodes[],
229                             unsigned int gc_count)
230 {
231         struct xt_connlimit_rb *rbconn;
232
233         while (gc_count) {
234                 rbconn = gc_nodes[--gc_count];
235                 rb_erase(&rbconn->node, root);
236                 kmem_cache_free(connlimit_rb_cachep, rbconn);
237         }
238 }
239
240 static unsigned int
241 count_tree(struct net *net, struct rb_root *root,
242            const struct nf_conntrack_tuple *tuple,
243            const union nf_inet_addr *addr, const union nf_inet_addr *mask,
244            u8 family, const struct nf_conntrack_zone *zone)
245 {
246         struct xt_connlimit_rb *gc_nodes[CONNLIMIT_GC_MAX_NODES];
247         struct rb_node **rbnode, *parent;
248         struct xt_connlimit_rb *rbconn;
249         struct xt_connlimit_conn *conn;
250         unsigned int gc_count;
251         bool no_gc = false;
252
253  restart:
254         gc_count = 0;
255         parent = NULL;
256         rbnode = &(root->rb_node);
257         while (*rbnode) {
258                 int diff;
259                 bool addit;
260
261                 rbconn = rb_entry(*rbnode, struct xt_connlimit_rb, node);
262
263                 parent = *rbnode;
264                 diff = same_source_net(addr, mask, &rbconn->addr, family);
265                 if (diff < 0) {
266                         rbnode = &((*rbnode)->rb_left);
267                 } else if (diff > 0) {
268                         rbnode = &((*rbnode)->rb_right);
269                 } else {
270                         /* same source network -> be counted! */
271                         unsigned int count;
272
273                         count = nf_conncount_lookup(net, &rbconn->hhead, tuple,
274                                                     zone, &addit);
275
276                         tree_nodes_free(root, gc_nodes, gc_count);
277                         if (!addit)
278                                 return count;
279
280                         if (!nf_conncount_add(&rbconn->hhead, tuple, zone))
281                                 return 0; /* hotdrop */
282
283                         return count + 1;
284                 }
285
286                 if (no_gc || gc_count >= ARRAY_SIZE(gc_nodes))
287                         continue;
288
289                 /* only used for GC on hhead, retval and 'addit' ignored */
290                 nf_conncount_lookup(net, &rbconn->hhead, tuple, zone, &addit);
291                 if (hlist_empty(&rbconn->hhead))
292                         gc_nodes[gc_count++] = rbconn;
293         }
294
295         if (gc_count) {
296                 no_gc = true;
297                 tree_nodes_free(root, gc_nodes, gc_count);
298                 /* tree_node_free before new allocation permits
299                  * allocator to re-use newly free'd object.
300                  *
301                  * This is a rare event; in most cases we will find
302                  * existing node to re-use. (or gc_count is 0).
303                  */
304                 goto restart;
305         }
306
307         /* no match, need to insert new node */
308         rbconn = kmem_cache_alloc(connlimit_rb_cachep, GFP_ATOMIC);
309         if (rbconn == NULL)
310                 return 0;
311
312         conn = kmem_cache_alloc(connlimit_conn_cachep, GFP_ATOMIC);
313         if (conn == NULL) {
314                 kmem_cache_free(connlimit_rb_cachep, rbconn);
315                 return 0;
316         }
317
318         conn->tuple = *tuple;
319         conn->zone = *zone;
320         rbconn->addr = *addr;
321
322         INIT_HLIST_HEAD(&rbconn->hhead);
323         hlist_add_head(&conn->node, &rbconn->hhead);
324
325         rb_link_node(&rbconn->node, parent, rbnode);
326         rb_insert_color(&rbconn->node, root);
327         return 1;
328 }
329
330 static int count_them(struct net *net,
331                       struct xt_connlimit_data *data,
332                       const struct nf_conntrack_tuple *tuple,
333                       const union nf_inet_addr *addr,
334                       const union nf_inet_addr *mask,
335                       u_int8_t family,
336                       const struct nf_conntrack_zone *zone)
337 {
338         struct rb_root *root;
339         int count;
340         u32 hash;
341
342         if (family == NFPROTO_IPV6)
343                 hash = connlimit_iphash6(addr, mask);
344         else
345                 hash = connlimit_iphash(addr->ip & mask->ip);
346         root = &data->climit_root[hash];
347
348         spin_lock_bh(&xt_connlimit_locks[hash % CONNLIMIT_LOCK_SLOTS]);
349
350         count = count_tree(net, root, tuple, addr, mask, family, zone);
351
352         spin_unlock_bh(&xt_connlimit_locks[hash % CONNLIMIT_LOCK_SLOTS]);
353
354         return count;
355 }
356
357 static bool
358 connlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
359 {
360         struct net *net = xt_net(par);
361         const struct xt_connlimit_info *info = par->matchinfo;
362         union nf_inet_addr addr;
363         struct nf_conntrack_tuple tuple;
364         const struct nf_conntrack_tuple *tuple_ptr = &tuple;
365         const struct nf_conntrack_zone *zone = &nf_ct_zone_dflt;
366         enum ip_conntrack_info ctinfo;
367         const struct nf_conn *ct;
368         unsigned int connections;
369
370         ct = nf_ct_get(skb, &ctinfo);
371         if (ct != NULL) {
372                 tuple_ptr = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
373                 zone = nf_ct_zone(ct);
374         } else if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb),
375                                       xt_family(par), net, &tuple)) {
376                 goto hotdrop;
377         }
378
379         if (xt_family(par) == NFPROTO_IPV6) {
380                 const struct ipv6hdr *iph = ipv6_hdr(skb);
381                 memcpy(&addr.ip6, (info->flags & XT_CONNLIMIT_DADDR) ?
382                        &iph->daddr : &iph->saddr, sizeof(addr.ip6));
383         } else {
384                 const struct iphdr *iph = ip_hdr(skb);
385                 addr.ip = (info->flags & XT_CONNLIMIT_DADDR) ?
386                           iph->daddr : iph->saddr;
387         }
388
389         connections = count_them(net, info->data, tuple_ptr, &addr,
390                                  &info->mask, xt_family(par), zone);
391         if (connections == 0)
392                 /* kmalloc failed, drop it entirely */
393                 goto hotdrop;
394
395         return (connections > info->limit) ^
396                !!(info->flags & XT_CONNLIMIT_INVERT);
397
398  hotdrop:
399         par->hotdrop = true;
400         return false;
401 }
402
403 static int connlimit_mt_check(const struct xt_mtchk_param *par)
404 {
405         struct xt_connlimit_info *info = par->matchinfo;
406         unsigned int i;
407         int ret;
408
409         net_get_random_once(&connlimit_rnd, sizeof(connlimit_rnd));
410
411         ret = nf_ct_netns_get(par->net, par->family);
412         if (ret < 0) {
413                 pr_info("cannot load conntrack support for "
414                         "address family %u\n", par->family);
415                 return ret;
416         }
417
418         /* init private data */
419         info->data = kmalloc(sizeof(struct xt_connlimit_data), GFP_KERNEL);
420         if (info->data == NULL) {
421                 nf_ct_netns_put(par->net, par->family);
422                 return -ENOMEM;
423         }
424
425         for (i = 0; i < ARRAY_SIZE(info->data->climit_root); ++i)
426                 info->data->climit_root[i] = RB_ROOT;
427
428         return 0;
429 }
430
431 void nf_conncount_cache_free(struct hlist_head *hhead)
432 {
433         struct xt_connlimit_conn *conn;
434         struct hlist_node *n;
435
436         hlist_for_each_entry_safe(conn, n, hhead, node)
437                 kmem_cache_free(connlimit_conn_cachep, conn);
438 }
439 EXPORT_SYMBOL_GPL(nf_conncount_cache_free);
440
441 static void destroy_tree(struct rb_root *r)
442 {
443         struct xt_connlimit_rb *rbconn;
444         struct rb_node *node;
445
446         while ((node = rb_first(r)) != NULL) {
447                 rbconn = rb_entry(node, struct xt_connlimit_rb, node);
448
449                 rb_erase(node, r);
450
451                 nf_conncount_cache_free(&rbconn->hhead);
452
453                 kmem_cache_free(connlimit_rb_cachep, rbconn);
454         }
455 }
456
457 static void connlimit_mt_destroy(const struct xt_mtdtor_param *par)
458 {
459         const struct xt_connlimit_info *info = par->matchinfo;
460         unsigned int i;
461
462         nf_ct_netns_put(par->net, par->family);
463
464         for (i = 0; i < ARRAY_SIZE(info->data->climit_root); ++i)
465                 destroy_tree(&info->data->climit_root[i]);
466
467         kfree(info->data);
468 }
469
470 static struct xt_match connlimit_mt_reg __read_mostly = {
471         .name       = "connlimit",
472         .revision   = 1,
473         .family     = NFPROTO_UNSPEC,
474         .checkentry = connlimit_mt_check,
475         .match      = connlimit_mt,
476         .matchsize  = sizeof(struct xt_connlimit_info),
477         .usersize   = offsetof(struct xt_connlimit_info, data),
478         .destroy    = connlimit_mt_destroy,
479         .me         = THIS_MODULE,
480 };
481
482 static int __init connlimit_mt_init(void)
483 {
484         int ret, i;
485
486         BUILD_BUG_ON(CONNLIMIT_LOCK_SLOTS > CONNLIMIT_SLOTS);
487         BUILD_BUG_ON((CONNLIMIT_SLOTS % CONNLIMIT_LOCK_SLOTS) != 0);
488
489         for (i = 0; i < CONNLIMIT_LOCK_SLOTS; ++i)
490                 spin_lock_init(&xt_connlimit_locks[i]);
491
492         connlimit_conn_cachep = kmem_cache_create("xt_connlimit_conn",
493                                            sizeof(struct xt_connlimit_conn),
494                                            0, 0, NULL);
495         if (!connlimit_conn_cachep)
496                 return -ENOMEM;
497
498         connlimit_rb_cachep = kmem_cache_create("xt_connlimit_rb",
499                                            sizeof(struct xt_connlimit_rb),
500                                            0, 0, NULL);
501         if (!connlimit_rb_cachep) {
502                 kmem_cache_destroy(connlimit_conn_cachep);
503                 return -ENOMEM;
504         }
505         ret = xt_register_match(&connlimit_mt_reg);
506         if (ret != 0) {
507                 kmem_cache_destroy(connlimit_conn_cachep);
508                 kmem_cache_destroy(connlimit_rb_cachep);
509         }
510         return ret;
511 }
512
513 static void __exit connlimit_mt_exit(void)
514 {
515         xt_unregister_match(&connlimit_mt_reg);
516         kmem_cache_destroy(connlimit_conn_cachep);
517         kmem_cache_destroy(connlimit_rb_cachep);
518 }
519
520 module_init(connlimit_mt_init);
521 module_exit(connlimit_mt_exit);
522 MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>");
523 MODULE_DESCRIPTION("Xtables: Number of connections matching");
524 MODULE_LICENSE("GPL");
525 MODULE_ALIAS("ipt_connlimit");
526 MODULE_ALIAS("ip6t_connlimit");