GNU Linux-libre 5.10.153-gnu1
[releases.git] / net / netfilter / nft_set_rbtree.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
4  *
5  * Development of this code funded by Astaro AG (http://www.astaro.com/)
6  */
7
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/list.h>
12 #include <linux/rbtree.h>
13 #include <linux/netlink.h>
14 #include <linux/netfilter.h>
15 #include <linux/netfilter/nf_tables.h>
16 #include <net/netfilter/nf_tables_core.h>
17
18 struct nft_rbtree {
19         struct rb_root          root;
20         rwlock_t                lock;
21         seqcount_rwlock_t       count;
22         struct delayed_work     gc_work;
23 };
24
25 struct nft_rbtree_elem {
26         struct rb_node          node;
27         struct nft_set_ext      ext;
28 };
29
30 static bool nft_rbtree_interval_end(const struct nft_rbtree_elem *rbe)
31 {
32         return nft_set_ext_exists(&rbe->ext, NFT_SET_EXT_FLAGS) &&
33                (*nft_set_ext_flags(&rbe->ext) & NFT_SET_ELEM_INTERVAL_END);
34 }
35
36 static bool nft_rbtree_interval_start(const struct nft_rbtree_elem *rbe)
37 {
38         return !nft_rbtree_interval_end(rbe);
39 }
40
41 static bool nft_rbtree_equal(const struct nft_set *set, const void *this,
42                              const struct nft_rbtree_elem *interval)
43 {
44         return memcmp(this, nft_set_ext_key(&interval->ext), set->klen) == 0;
45 }
46
47 static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
48                                 const u32 *key, const struct nft_set_ext **ext,
49                                 unsigned int seq)
50 {
51         struct nft_rbtree *priv = nft_set_priv(set);
52         const struct nft_rbtree_elem *rbe, *interval = NULL;
53         u8 genmask = nft_genmask_cur(net);
54         const struct rb_node *parent;
55         const void *this;
56         int d;
57
58         parent = rcu_dereference_raw(priv->root.rb_node);
59         while (parent != NULL) {
60                 if (read_seqcount_retry(&priv->count, seq))
61                         return false;
62
63                 rbe = rb_entry(parent, struct nft_rbtree_elem, node);
64
65                 this = nft_set_ext_key(&rbe->ext);
66                 d = memcmp(this, key, set->klen);
67                 if (d < 0) {
68                         parent = rcu_dereference_raw(parent->rb_left);
69                         if (interval &&
70                             nft_rbtree_equal(set, this, interval) &&
71                             nft_rbtree_interval_end(rbe) &&
72                             nft_rbtree_interval_start(interval))
73                                 continue;
74                         interval = rbe;
75                 } else if (d > 0)
76                         parent = rcu_dereference_raw(parent->rb_right);
77                 else {
78                         if (!nft_set_elem_active(&rbe->ext, genmask)) {
79                                 parent = rcu_dereference_raw(parent->rb_left);
80                                 continue;
81                         }
82
83                         if (nft_set_elem_expired(&rbe->ext))
84                                 return false;
85
86                         if (nft_rbtree_interval_end(rbe)) {
87                                 if (nft_set_is_anonymous(set))
88                                         return false;
89                                 parent = rcu_dereference_raw(parent->rb_left);
90                                 interval = NULL;
91                                 continue;
92                         }
93
94                         *ext = &rbe->ext;
95                         return true;
96                 }
97         }
98
99         if (set->flags & NFT_SET_INTERVAL && interval != NULL &&
100             nft_set_elem_active(&interval->ext, genmask) &&
101             !nft_set_elem_expired(&interval->ext) &&
102             nft_rbtree_interval_start(interval)) {
103                 *ext = &interval->ext;
104                 return true;
105         }
106
107         return false;
108 }
109
110 static bool nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
111                               const u32 *key, const struct nft_set_ext **ext)
112 {
113         struct nft_rbtree *priv = nft_set_priv(set);
114         unsigned int seq = read_seqcount_begin(&priv->count);
115         bool ret;
116
117         ret = __nft_rbtree_lookup(net, set, key, ext, seq);
118         if (ret || !read_seqcount_retry(&priv->count, seq))
119                 return ret;
120
121         read_lock_bh(&priv->lock);
122         seq = read_seqcount_begin(&priv->count);
123         ret = __nft_rbtree_lookup(net, set, key, ext, seq);
124         read_unlock_bh(&priv->lock);
125
126         return ret;
127 }
128
129 static bool __nft_rbtree_get(const struct net *net, const struct nft_set *set,
130                              const u32 *key, struct nft_rbtree_elem **elem,
131                              unsigned int seq, unsigned int flags, u8 genmask)
132 {
133         struct nft_rbtree_elem *rbe, *interval = NULL;
134         struct nft_rbtree *priv = nft_set_priv(set);
135         const struct rb_node *parent;
136         const void *this;
137         int d;
138
139         parent = rcu_dereference_raw(priv->root.rb_node);
140         while (parent != NULL) {
141                 if (read_seqcount_retry(&priv->count, seq))
142                         return false;
143
144                 rbe = rb_entry(parent, struct nft_rbtree_elem, node);
145
146                 this = nft_set_ext_key(&rbe->ext);
147                 d = memcmp(this, key, set->klen);
148                 if (d < 0) {
149                         parent = rcu_dereference_raw(parent->rb_left);
150                         if (!(flags & NFT_SET_ELEM_INTERVAL_END))
151                                 interval = rbe;
152                 } else if (d > 0) {
153                         parent = rcu_dereference_raw(parent->rb_right);
154                         if (flags & NFT_SET_ELEM_INTERVAL_END)
155                                 interval = rbe;
156                 } else {
157                         if (!nft_set_elem_active(&rbe->ext, genmask)) {
158                                 parent = rcu_dereference_raw(parent->rb_left);
159                                 continue;
160                         }
161
162                         if (nft_set_elem_expired(&rbe->ext))
163                                 return false;
164
165                         if (!nft_set_ext_exists(&rbe->ext, NFT_SET_EXT_FLAGS) ||
166                             (*nft_set_ext_flags(&rbe->ext) & NFT_SET_ELEM_INTERVAL_END) ==
167                             (flags & NFT_SET_ELEM_INTERVAL_END)) {
168                                 *elem = rbe;
169                                 return true;
170                         }
171
172                         if (nft_rbtree_interval_end(rbe))
173                                 interval = NULL;
174
175                         parent = rcu_dereference_raw(parent->rb_left);
176                 }
177         }
178
179         if (set->flags & NFT_SET_INTERVAL && interval != NULL &&
180             nft_set_elem_active(&interval->ext, genmask) &&
181             !nft_set_elem_expired(&interval->ext) &&
182             ((!nft_rbtree_interval_end(interval) &&
183               !(flags & NFT_SET_ELEM_INTERVAL_END)) ||
184              (nft_rbtree_interval_end(interval) &&
185               (flags & NFT_SET_ELEM_INTERVAL_END)))) {
186                 *elem = interval;
187                 return true;
188         }
189
190         return false;
191 }
192
193 static void *nft_rbtree_get(const struct net *net, const struct nft_set *set,
194                             const struct nft_set_elem *elem, unsigned int flags)
195 {
196         struct nft_rbtree *priv = nft_set_priv(set);
197         unsigned int seq = read_seqcount_begin(&priv->count);
198         struct nft_rbtree_elem *rbe = ERR_PTR(-ENOENT);
199         const u32 *key = (const u32 *)&elem->key.val;
200         u8 genmask = nft_genmask_cur(net);
201         bool ret;
202
203         ret = __nft_rbtree_get(net, set, key, &rbe, seq, flags, genmask);
204         if (ret || !read_seqcount_retry(&priv->count, seq))
205                 return rbe;
206
207         read_lock_bh(&priv->lock);
208         seq = read_seqcount_begin(&priv->count);
209         ret = __nft_rbtree_get(net, set, key, &rbe, seq, flags, genmask);
210         if (!ret)
211                 rbe = ERR_PTR(-ENOENT);
212         read_unlock_bh(&priv->lock);
213
214         return rbe;
215 }
216
217 static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
218                                struct nft_rbtree_elem *new,
219                                struct nft_set_ext **ext)
220 {
221         bool overlap = false, dup_end_left = false, dup_end_right = false;
222         struct nft_rbtree *priv = nft_set_priv(set);
223         u8 genmask = nft_genmask_next(net);
224         struct nft_rbtree_elem *rbe;
225         struct rb_node *parent, **p;
226         int d;
227
228         /* Detect overlaps as we descend the tree. Set the flag in these cases:
229          *
230          * a1. _ _ __>|  ?_ _ __|  (insert end before existing end)
231          * a2. _ _ ___|  ?_ _ _>|  (insert end after existing end)
232          * a3. _ _ ___? >|_ _ __|  (insert start before existing end)
233          *
234          * and clear it later on, as we eventually reach the points indicated by
235          * '?' above, in the cases described below. We'll always meet these
236          * later, locally, due to tree ordering, and overlaps for the intervals
237          * that are the closest together are always evaluated last.
238          *
239          * b1. _ _ __>|  !_ _ __|  (insert end before existing start)
240          * b2. _ _ ___|  !_ _ _>|  (insert end after existing start)
241          * b3. _ _ ___! >|_ _ __|  (insert start after existing end, as a leaf)
242          *            '--' no nodes falling in this range
243          * b4.          >|_ _   !  (insert start before existing start)
244          *
245          * Case a3. resolves to b3.:
246          * - if the inserted start element is the leftmost, because the '0'
247          *   element in the tree serves as end element
248          * - otherwise, if an existing end is found immediately to the left. If
249          *   there are existing nodes in between, we need to further descend the
250          *   tree before we can conclude the new start isn't causing an overlap
251          *
252          * or to b4., which, preceded by a3., means we already traversed one or
253          * more existing intervals entirely, from the right.
254          *
255          * For a new, rightmost pair of elements, we'll hit cases b3. and b2.,
256          * in that order.
257          *
258          * The flag is also cleared in two special cases:
259          *
260          * b5. |__ _ _!|<_ _ _   (insert start right before existing end)
261          * b6. |__ _ >|!__ _ _   (insert end right after existing start)
262          *
263          * which always happen as last step and imply that no further
264          * overlapping is possible.
265          *
266          * Another special case comes from the fact that start elements matching
267          * an already existing start element are allowed: insertion is not
268          * performed but we return -EEXIST in that case, and the error will be
269          * cleared by the caller if NLM_F_EXCL is not present in the request.
270          * This way, request for insertion of an exact overlap isn't reported as
271          * error to userspace if not desired.
272          *
273          * However, if the existing start matches a pre-existing start, but the
274          * end element doesn't match the corresponding pre-existing end element,
275          * we need to report a partial overlap. This is a local condition that
276          * can be noticed without need for a tracking flag, by checking for a
277          * local duplicated end for a corresponding start, from left and right,
278          * separately.
279          */
280
281         parent = NULL;
282         p = &priv->root.rb_node;
283         while (*p != NULL) {
284                 parent = *p;
285                 rbe = rb_entry(parent, struct nft_rbtree_elem, node);
286                 d = memcmp(nft_set_ext_key(&rbe->ext),
287                            nft_set_ext_key(&new->ext),
288                            set->klen);
289                 if (d < 0) {
290                         p = &parent->rb_left;
291
292                         if (nft_rbtree_interval_start(new)) {
293                                 if (nft_rbtree_interval_end(rbe) &&
294                                     nft_set_elem_active(&rbe->ext, genmask) &&
295                                     !nft_set_elem_expired(&rbe->ext) && !*p)
296                                         overlap = false;
297                         } else {
298                                 if (dup_end_left && !*p)
299                                         return -ENOTEMPTY;
300
301                                 overlap = nft_rbtree_interval_end(rbe) &&
302                                           nft_set_elem_active(&rbe->ext,
303                                                               genmask) &&
304                                           !nft_set_elem_expired(&rbe->ext);
305
306                                 if (overlap) {
307                                         dup_end_right = true;
308                                         continue;
309                                 }
310                         }
311                 } else if (d > 0) {
312                         p = &parent->rb_right;
313
314                         if (nft_rbtree_interval_end(new)) {
315                                 if (dup_end_right && !*p)
316                                         return -ENOTEMPTY;
317
318                                 overlap = nft_rbtree_interval_end(rbe) &&
319                                           nft_set_elem_active(&rbe->ext,
320                                                               genmask) &&
321                                           !nft_set_elem_expired(&rbe->ext);
322
323                                 if (overlap) {
324                                         dup_end_left = true;
325                                         continue;
326                                 }
327                         } else if (nft_set_elem_active(&rbe->ext, genmask) &&
328                                    !nft_set_elem_expired(&rbe->ext)) {
329                                 overlap = nft_rbtree_interval_end(rbe);
330                         }
331                 } else {
332                         if (nft_rbtree_interval_end(rbe) &&
333                             nft_rbtree_interval_start(new)) {
334                                 p = &parent->rb_left;
335
336                                 if (nft_set_elem_active(&rbe->ext, genmask) &&
337                                     !nft_set_elem_expired(&rbe->ext))
338                                         overlap = false;
339                         } else if (nft_rbtree_interval_start(rbe) &&
340                                    nft_rbtree_interval_end(new)) {
341                                 p = &parent->rb_right;
342
343                                 if (nft_set_elem_active(&rbe->ext, genmask) &&
344                                     !nft_set_elem_expired(&rbe->ext))
345                                         overlap = false;
346                         } else if (nft_set_elem_active(&rbe->ext, genmask) &&
347                                    !nft_set_elem_expired(&rbe->ext)) {
348                                 *ext = &rbe->ext;
349                                 return -EEXIST;
350                         } else {
351                                 overlap = false;
352                                 if (nft_rbtree_interval_end(rbe))
353                                         p = &parent->rb_left;
354                                 else
355                                         p = &parent->rb_right;
356                         }
357                 }
358
359                 dup_end_left = dup_end_right = false;
360         }
361
362         if (overlap)
363                 return -ENOTEMPTY;
364
365         rb_link_node_rcu(&new->node, parent, p);
366         rb_insert_color(&new->node, &priv->root);
367         return 0;
368 }
369
370 static int nft_rbtree_insert(const struct net *net, const struct nft_set *set,
371                              const struct nft_set_elem *elem,
372                              struct nft_set_ext **ext)
373 {
374         struct nft_rbtree *priv = nft_set_priv(set);
375         struct nft_rbtree_elem *rbe = elem->priv;
376         int err;
377
378         write_lock_bh(&priv->lock);
379         write_seqcount_begin(&priv->count);
380         err = __nft_rbtree_insert(net, set, rbe, ext);
381         write_seqcount_end(&priv->count);
382         write_unlock_bh(&priv->lock);
383
384         return err;
385 }
386
387 static void nft_rbtree_remove(const struct net *net,
388                               const struct nft_set *set,
389                               const struct nft_set_elem *elem)
390 {
391         struct nft_rbtree *priv = nft_set_priv(set);
392         struct nft_rbtree_elem *rbe = elem->priv;
393
394         write_lock_bh(&priv->lock);
395         write_seqcount_begin(&priv->count);
396         rb_erase(&rbe->node, &priv->root);
397         write_seqcount_end(&priv->count);
398         write_unlock_bh(&priv->lock);
399 }
400
401 static void nft_rbtree_activate(const struct net *net,
402                                 const struct nft_set *set,
403                                 const struct nft_set_elem *elem)
404 {
405         struct nft_rbtree_elem *rbe = elem->priv;
406
407         nft_set_elem_change_active(net, set, &rbe->ext);
408         nft_set_elem_clear_busy(&rbe->ext);
409 }
410
411 static bool nft_rbtree_flush(const struct net *net,
412                              const struct nft_set *set, void *priv)
413 {
414         struct nft_rbtree_elem *rbe = priv;
415
416         if (!nft_set_elem_mark_busy(&rbe->ext) ||
417             !nft_is_active(net, &rbe->ext)) {
418                 nft_set_elem_change_active(net, set, &rbe->ext);
419                 return true;
420         }
421         return false;
422 }
423
424 static void *nft_rbtree_deactivate(const struct net *net,
425                                    const struct nft_set *set,
426                                    const struct nft_set_elem *elem)
427 {
428         const struct nft_rbtree *priv = nft_set_priv(set);
429         const struct rb_node *parent = priv->root.rb_node;
430         struct nft_rbtree_elem *rbe, *this = elem->priv;
431         u8 genmask = nft_genmask_next(net);
432         int d;
433
434         while (parent != NULL) {
435                 rbe = rb_entry(parent, struct nft_rbtree_elem, node);
436
437                 d = memcmp(nft_set_ext_key(&rbe->ext), &elem->key.val,
438                                            set->klen);
439                 if (d < 0)
440                         parent = parent->rb_left;
441                 else if (d > 0)
442                         parent = parent->rb_right;
443                 else {
444                         if (nft_rbtree_interval_end(rbe) &&
445                             nft_rbtree_interval_start(this)) {
446                                 parent = parent->rb_left;
447                                 continue;
448                         } else if (nft_rbtree_interval_start(rbe) &&
449                                    nft_rbtree_interval_end(this)) {
450                                 parent = parent->rb_right;
451                                 continue;
452                         } else if (!nft_set_elem_active(&rbe->ext, genmask)) {
453                                 parent = parent->rb_left;
454                                 continue;
455                         }
456                         nft_rbtree_flush(net, set, rbe);
457                         return rbe;
458                 }
459         }
460         return NULL;
461 }
462
463 static void nft_rbtree_walk(const struct nft_ctx *ctx,
464                             struct nft_set *set,
465                             struct nft_set_iter *iter)
466 {
467         struct nft_rbtree *priv = nft_set_priv(set);
468         struct nft_rbtree_elem *rbe;
469         struct nft_set_elem elem;
470         struct rb_node *node;
471
472         read_lock_bh(&priv->lock);
473         for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) {
474                 rbe = rb_entry(node, struct nft_rbtree_elem, node);
475
476                 if (iter->count < iter->skip)
477                         goto cont;
478                 if (nft_set_elem_expired(&rbe->ext))
479                         goto cont;
480                 if (!nft_set_elem_active(&rbe->ext, iter->genmask))
481                         goto cont;
482
483                 elem.priv = rbe;
484
485                 iter->err = iter->fn(ctx, set, iter, &elem);
486                 if (iter->err < 0) {
487                         read_unlock_bh(&priv->lock);
488                         return;
489                 }
490 cont:
491                 iter->count++;
492         }
493         read_unlock_bh(&priv->lock);
494 }
495
496 static void nft_rbtree_gc(struct work_struct *work)
497 {
498         struct nft_rbtree_elem *rbe, *rbe_end = NULL, *rbe_prev = NULL;
499         struct nft_set_gc_batch *gcb = NULL;
500         struct nft_rbtree *priv;
501         struct rb_node *node;
502         struct nft_set *set;
503
504         priv = container_of(work, struct nft_rbtree, gc_work.work);
505         set  = nft_set_container_of(priv);
506
507         write_lock_bh(&priv->lock);
508         write_seqcount_begin(&priv->count);
509         for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) {
510                 rbe = rb_entry(node, struct nft_rbtree_elem, node);
511
512                 if (nft_rbtree_interval_end(rbe)) {
513                         rbe_end = rbe;
514                         continue;
515                 }
516                 if (!nft_set_elem_expired(&rbe->ext))
517                         continue;
518                 if (nft_set_elem_mark_busy(&rbe->ext))
519                         continue;
520
521                 if (rbe_prev) {
522                         rb_erase(&rbe_prev->node, &priv->root);
523                         rbe_prev = NULL;
524                 }
525                 gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC);
526                 if (!gcb)
527                         break;
528
529                 atomic_dec(&set->nelems);
530                 nft_set_gc_batch_add(gcb, rbe);
531                 rbe_prev = rbe;
532
533                 if (rbe_end) {
534                         atomic_dec(&set->nelems);
535                         nft_set_gc_batch_add(gcb, rbe_end);
536                         rb_erase(&rbe_end->node, &priv->root);
537                         rbe_end = NULL;
538                 }
539                 node = rb_next(node);
540                 if (!node)
541                         break;
542         }
543         if (rbe_prev)
544                 rb_erase(&rbe_prev->node, &priv->root);
545         write_seqcount_end(&priv->count);
546         write_unlock_bh(&priv->lock);
547
548         nft_set_gc_batch_complete(gcb);
549
550         queue_delayed_work(system_power_efficient_wq, &priv->gc_work,
551                            nft_set_gc_interval(set));
552 }
553
554 static u64 nft_rbtree_privsize(const struct nlattr * const nla[],
555                                const struct nft_set_desc *desc)
556 {
557         return sizeof(struct nft_rbtree);
558 }
559
560 static int nft_rbtree_init(const struct nft_set *set,
561                            const struct nft_set_desc *desc,
562                            const struct nlattr * const nla[])
563 {
564         struct nft_rbtree *priv = nft_set_priv(set);
565
566         rwlock_init(&priv->lock);
567         seqcount_rwlock_init(&priv->count, &priv->lock);
568         priv->root = RB_ROOT;
569
570         INIT_DEFERRABLE_WORK(&priv->gc_work, nft_rbtree_gc);
571         if (set->flags & NFT_SET_TIMEOUT)
572                 queue_delayed_work(system_power_efficient_wq, &priv->gc_work,
573                                    nft_set_gc_interval(set));
574
575         return 0;
576 }
577
578 static void nft_rbtree_destroy(const struct nft_set *set)
579 {
580         struct nft_rbtree *priv = nft_set_priv(set);
581         struct nft_rbtree_elem *rbe;
582         struct rb_node *node;
583
584         cancel_delayed_work_sync(&priv->gc_work);
585         rcu_barrier();
586         while ((node = priv->root.rb_node) != NULL) {
587                 rb_erase(node, &priv->root);
588                 rbe = rb_entry(node, struct nft_rbtree_elem, node);
589                 nft_set_elem_destroy(set, rbe, true);
590         }
591 }
592
593 static bool nft_rbtree_estimate(const struct nft_set_desc *desc, u32 features,
594                                 struct nft_set_estimate *est)
595 {
596         if (desc->field_count > 1)
597                 return false;
598
599         if (desc->size)
600                 est->size = sizeof(struct nft_rbtree) +
601                             desc->size * sizeof(struct nft_rbtree_elem);
602         else
603                 est->size = ~0;
604
605         est->lookup = NFT_SET_CLASS_O_LOG_N;
606         est->space  = NFT_SET_CLASS_O_N;
607
608         return true;
609 }
610
611 const struct nft_set_type nft_set_rbtree_type = {
612         .features       = NFT_SET_INTERVAL | NFT_SET_MAP | NFT_SET_OBJECT | NFT_SET_TIMEOUT,
613         .ops            = {
614                 .privsize       = nft_rbtree_privsize,
615                 .elemsize       = offsetof(struct nft_rbtree_elem, ext),
616                 .estimate       = nft_rbtree_estimate,
617                 .init           = nft_rbtree_init,
618                 .destroy        = nft_rbtree_destroy,
619                 .insert         = nft_rbtree_insert,
620                 .remove         = nft_rbtree_remove,
621                 .deactivate     = nft_rbtree_deactivate,
622                 .flush          = nft_rbtree_flush,
623                 .activate       = nft_rbtree_activate,
624                 .lookup         = nft_rbtree_lookup,
625                 .walk           = nft_rbtree_walk,
626                 .get            = nft_rbtree_get,
627         },
628 };