GNU Linux-libre 4.19.211-gnu1
[releases.git] / net / sched / cls_route.c
1 /*
2  * net/sched/cls_route.c        ROUTE4 classifier.
3  *
4  *              This program is free software; you can redistribute it and/or
5  *              modify it under the terms of the GNU General Public License
6  *              as published by the Free Software Foundation; either version
7  *              2 of the License, or (at your option) any later version.
8  *
9  * Authors:     Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10  */
11
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/types.h>
15 #include <linux/kernel.h>
16 #include <linux/string.h>
17 #include <linux/errno.h>
18 #include <linux/skbuff.h>
19 #include <net/dst.h>
20 #include <net/route.h>
21 #include <net/netlink.h>
22 #include <net/act_api.h>
23 #include <net/pkt_cls.h>
24
25 /*
26  * 1. For now we assume that route tags < 256.
27  *    It allows to use direct table lookups, instead of hash tables.
28  * 2. For now we assume that "from TAG" and "fromdev DEV" statements
29  *    are mutually  exclusive.
30  * 3. "to TAG from ANY" has higher priority, than "to ANY from XXX"
31  */
32 struct route4_fastmap {
33         struct route4_filter            *filter;
34         u32                             id;
35         int                             iif;
36 };
37
38 struct route4_head {
39         struct route4_fastmap           fastmap[16];
40         struct route4_bucket __rcu      *table[256 + 1];
41         struct rcu_head                 rcu;
42 };
43
44 struct route4_bucket {
45         /* 16 FROM buckets + 16 IIF buckets + 1 wildcard bucket */
46         struct route4_filter __rcu      *ht[16 + 16 + 1];
47         struct rcu_head                 rcu;
48 };
49
50 struct route4_filter {
51         struct route4_filter __rcu      *next;
52         u32                     id;
53         int                     iif;
54
55         struct tcf_result       res;
56         struct tcf_exts         exts;
57         u32                     handle;
58         struct route4_bucket    *bkt;
59         struct tcf_proto        *tp;
60         struct rcu_work         rwork;
61 };
62
63 #define ROUTE4_FAILURE ((struct route4_filter *)(-1L))
64
65 static inline int route4_fastmap_hash(u32 id, int iif)
66 {
67         return id & 0xF;
68 }
69
70 static DEFINE_SPINLOCK(fastmap_lock);
71 static void
72 route4_reset_fastmap(struct route4_head *head)
73 {
74         spin_lock_bh(&fastmap_lock);
75         memset(head->fastmap, 0, sizeof(head->fastmap));
76         spin_unlock_bh(&fastmap_lock);
77 }
78
79 static void
80 route4_set_fastmap(struct route4_head *head, u32 id, int iif,
81                    struct route4_filter *f)
82 {
83         int h = route4_fastmap_hash(id, iif);
84
85         /* fastmap updates must look atomic to aling id, iff, filter */
86         spin_lock_bh(&fastmap_lock);
87         head->fastmap[h].id = id;
88         head->fastmap[h].iif = iif;
89         head->fastmap[h].filter = f;
90         spin_unlock_bh(&fastmap_lock);
91 }
92
93 static inline int route4_hash_to(u32 id)
94 {
95         return id & 0xFF;
96 }
97
98 static inline int route4_hash_from(u32 id)
99 {
100         return (id >> 16) & 0xF;
101 }
102
103 static inline int route4_hash_iif(int iif)
104 {
105         return 16 + ((iif >> 16) & 0xF);
106 }
107
108 static inline int route4_hash_wild(void)
109 {
110         return 32;
111 }
112
113 #define ROUTE4_APPLY_RESULT()                                   \
114 {                                                               \
115         *res = f->res;                                          \
116         if (tcf_exts_has_actions(&f->exts)) {                   \
117                 int r = tcf_exts_exec(skb, &f->exts, res);      \
118                 if (r < 0) {                                    \
119                         dont_cache = 1;                         \
120                         continue;                               \
121                 }                                               \
122                 return r;                                       \
123         } else if (!dont_cache)                                 \
124                 route4_set_fastmap(head, id, iif, f);           \
125         return 0;                                               \
126 }
127
128 static int route4_classify(struct sk_buff *skb, const struct tcf_proto *tp,
129                            struct tcf_result *res)
130 {
131         struct route4_head *head = rcu_dereference_bh(tp->root);
132         struct dst_entry *dst;
133         struct route4_bucket *b;
134         struct route4_filter *f;
135         u32 id, h;
136         int iif, dont_cache = 0;
137
138         dst = skb_dst(skb);
139         if (!dst)
140                 goto failure;
141
142         id = dst->tclassid;
143
144         iif = inet_iif(skb);
145
146         h = route4_fastmap_hash(id, iif);
147
148         spin_lock(&fastmap_lock);
149         if (id == head->fastmap[h].id &&
150             iif == head->fastmap[h].iif &&
151             (f = head->fastmap[h].filter) != NULL) {
152                 if (f == ROUTE4_FAILURE) {
153                         spin_unlock(&fastmap_lock);
154                         goto failure;
155                 }
156
157                 *res = f->res;
158                 spin_unlock(&fastmap_lock);
159                 return 0;
160         }
161         spin_unlock(&fastmap_lock);
162
163         h = route4_hash_to(id);
164
165 restart:
166         b = rcu_dereference_bh(head->table[h]);
167         if (b) {
168                 for (f = rcu_dereference_bh(b->ht[route4_hash_from(id)]);
169                      f;
170                      f = rcu_dereference_bh(f->next))
171                         if (f->id == id)
172                                 ROUTE4_APPLY_RESULT();
173
174                 for (f = rcu_dereference_bh(b->ht[route4_hash_iif(iif)]);
175                      f;
176                      f = rcu_dereference_bh(f->next))
177                         if (f->iif == iif)
178                                 ROUTE4_APPLY_RESULT();
179
180                 for (f = rcu_dereference_bh(b->ht[route4_hash_wild()]);
181                      f;
182                      f = rcu_dereference_bh(f->next))
183                         ROUTE4_APPLY_RESULT();
184         }
185         if (h < 256) {
186                 h = 256;
187                 id &= ~0xFFFF;
188                 goto restart;
189         }
190
191         if (!dont_cache)
192                 route4_set_fastmap(head, id, iif, ROUTE4_FAILURE);
193 failure:
194         return -1;
195 }
196
197 static inline u32 to_hash(u32 id)
198 {
199         u32 h = id & 0xFF;
200
201         if (id & 0x8000)
202                 h += 256;
203         return h;
204 }
205
206 static inline u32 from_hash(u32 id)
207 {
208         id &= 0xFFFF;
209         if (id == 0xFFFF)
210                 return 32;
211         if (!(id & 0x8000)) {
212                 if (id > 255)
213                         return 256;
214                 return id & 0xF;
215         }
216         return 16 + (id & 0xF);
217 }
218
219 static void *route4_get(struct tcf_proto *tp, u32 handle)
220 {
221         struct route4_head *head = rtnl_dereference(tp->root);
222         struct route4_bucket *b;
223         struct route4_filter *f;
224         unsigned int h1, h2;
225
226         h1 = to_hash(handle);
227         if (h1 > 256)
228                 return NULL;
229
230         h2 = from_hash(handle >> 16);
231         if (h2 > 32)
232                 return NULL;
233
234         b = rtnl_dereference(head->table[h1]);
235         if (b) {
236                 for (f = rtnl_dereference(b->ht[h2]);
237                      f;
238                      f = rtnl_dereference(f->next))
239                         if (f->handle == handle)
240                                 return f;
241         }
242         return NULL;
243 }
244
245 static int route4_init(struct tcf_proto *tp)
246 {
247         struct route4_head *head;
248
249         head = kzalloc(sizeof(struct route4_head), GFP_KERNEL);
250         if (head == NULL)
251                 return -ENOBUFS;
252
253         rcu_assign_pointer(tp->root, head);
254         return 0;
255 }
256
257 static void __route4_delete_filter(struct route4_filter *f)
258 {
259         tcf_exts_destroy(&f->exts);
260         tcf_exts_put_net(&f->exts);
261         kfree(f);
262 }
263
264 static void route4_delete_filter_work(struct work_struct *work)
265 {
266         struct route4_filter *f = container_of(to_rcu_work(work),
267                                                struct route4_filter,
268                                                rwork);
269         rtnl_lock();
270         __route4_delete_filter(f);
271         rtnl_unlock();
272 }
273
274 static void route4_queue_work(struct route4_filter *f)
275 {
276         tcf_queue_work(&f->rwork, route4_delete_filter_work);
277 }
278
279 static void route4_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack)
280 {
281         struct route4_head *head = rtnl_dereference(tp->root);
282         int h1, h2;
283
284         if (head == NULL)
285                 return;
286
287         for (h1 = 0; h1 <= 256; h1++) {
288                 struct route4_bucket *b;
289
290                 b = rtnl_dereference(head->table[h1]);
291                 if (b) {
292                         for (h2 = 0; h2 <= 32; h2++) {
293                                 struct route4_filter *f;
294
295                                 while ((f = rtnl_dereference(b->ht[h2])) != NULL) {
296                                         struct route4_filter *next;
297
298                                         next = rtnl_dereference(f->next);
299                                         RCU_INIT_POINTER(b->ht[h2], next);
300                                         tcf_unbind_filter(tp, &f->res);
301                                         if (tcf_exts_get_net(&f->exts))
302                                                 route4_queue_work(f);
303                                         else
304                                                 __route4_delete_filter(f);
305                                 }
306                         }
307                         RCU_INIT_POINTER(head->table[h1], NULL);
308                         kfree_rcu(b, rcu);
309                 }
310         }
311         kfree_rcu(head, rcu);
312 }
313
314 static int route4_delete(struct tcf_proto *tp, void *arg, bool *last,
315                          struct netlink_ext_ack *extack)
316 {
317         struct route4_head *head = rtnl_dereference(tp->root);
318         struct route4_filter *f = arg;
319         struct route4_filter __rcu **fp;
320         struct route4_filter *nf;
321         struct route4_bucket *b;
322         unsigned int h = 0;
323         int i, h1;
324
325         if (!head || !f)
326                 return -EINVAL;
327
328         h = f->handle;
329         b = f->bkt;
330
331         fp = &b->ht[from_hash(h >> 16)];
332         for (nf = rtnl_dereference(*fp); nf;
333              fp = &nf->next, nf = rtnl_dereference(*fp)) {
334                 if (nf == f) {
335                         /* unlink it */
336                         RCU_INIT_POINTER(*fp, rtnl_dereference(f->next));
337
338                         /* Remove any fastmap lookups that might ref filter
339                          * notice we unlink'd the filter so we can't get it
340                          * back in the fastmap.
341                          */
342                         route4_reset_fastmap(head);
343
344                         /* Delete it */
345                         tcf_unbind_filter(tp, &f->res);
346                         tcf_exts_get_net(&f->exts);
347                         tcf_queue_work(&f->rwork, route4_delete_filter_work);
348
349                         /* Strip RTNL protected tree */
350                         for (i = 0; i <= 32; i++) {
351                                 struct route4_filter *rt;
352
353                                 rt = rtnl_dereference(b->ht[i]);
354                                 if (rt)
355                                         goto out;
356                         }
357
358                         /* OK, session has no flows */
359                         RCU_INIT_POINTER(head->table[to_hash(h)], NULL);
360                         kfree_rcu(b, rcu);
361                         break;
362                 }
363         }
364
365 out:
366         *last = true;
367         for (h1 = 0; h1 <= 256; h1++) {
368                 if (rcu_access_pointer(head->table[h1])) {
369                         *last = false;
370                         break;
371                 }
372         }
373
374         return 0;
375 }
376
377 static const struct nla_policy route4_policy[TCA_ROUTE4_MAX + 1] = {
378         [TCA_ROUTE4_CLASSID]    = { .type = NLA_U32 },
379         [TCA_ROUTE4_TO]         = { .type = NLA_U32 },
380         [TCA_ROUTE4_FROM]       = { .type = NLA_U32 },
381         [TCA_ROUTE4_IIF]        = { .type = NLA_U32 },
382 };
383
384 static int route4_set_parms(struct net *net, struct tcf_proto *tp,
385                             unsigned long base, struct route4_filter *f,
386                             u32 handle, struct route4_head *head,
387                             struct nlattr **tb, struct nlattr *est, int new,
388                             bool ovr, struct netlink_ext_ack *extack)
389 {
390         u32 id = 0, to = 0, nhandle = 0x8000;
391         struct route4_filter *fp;
392         unsigned int h1;
393         struct route4_bucket *b;
394         int err;
395
396         err = tcf_exts_validate(net, tp, tb, est, &f->exts, ovr, extack);
397         if (err < 0)
398                 return err;
399
400         if (tb[TCA_ROUTE4_TO]) {
401                 if (new && handle & 0x8000)
402                         return -EINVAL;
403                 to = nla_get_u32(tb[TCA_ROUTE4_TO]);
404                 if (to > 0xFF)
405                         return -EINVAL;
406                 nhandle = to;
407         }
408
409         if (tb[TCA_ROUTE4_FROM]) {
410                 if (tb[TCA_ROUTE4_IIF])
411                         return -EINVAL;
412                 id = nla_get_u32(tb[TCA_ROUTE4_FROM]);
413                 if (id > 0xFF)
414                         return -EINVAL;
415                 nhandle |= id << 16;
416         } else if (tb[TCA_ROUTE4_IIF]) {
417                 id = nla_get_u32(tb[TCA_ROUTE4_IIF]);
418                 if (id > 0x7FFF)
419                         return -EINVAL;
420                 nhandle |= (id | 0x8000) << 16;
421         } else
422                 nhandle |= 0xFFFF << 16;
423
424         if (handle && new) {
425                 nhandle |= handle & 0x7F00;
426                 if (nhandle != handle)
427                         return -EINVAL;
428         }
429
430         h1 = to_hash(nhandle);
431         b = rtnl_dereference(head->table[h1]);
432         if (!b) {
433                 b = kzalloc(sizeof(struct route4_bucket), GFP_KERNEL);
434                 if (b == NULL)
435                         return -ENOBUFS;
436
437                 rcu_assign_pointer(head->table[h1], b);
438         } else {
439                 unsigned int h2 = from_hash(nhandle >> 16);
440
441                 for (fp = rtnl_dereference(b->ht[h2]);
442                      fp;
443                      fp = rtnl_dereference(fp->next))
444                         if (fp->handle == f->handle)
445                                 return -EEXIST;
446         }
447
448         if (tb[TCA_ROUTE4_TO])
449                 f->id = to;
450
451         if (tb[TCA_ROUTE4_FROM])
452                 f->id = to | id<<16;
453         else if (tb[TCA_ROUTE4_IIF])
454                 f->iif = id;
455
456         f->handle = nhandle;
457         f->bkt = b;
458         f->tp = tp;
459
460         if (tb[TCA_ROUTE4_CLASSID]) {
461                 f->res.classid = nla_get_u32(tb[TCA_ROUTE4_CLASSID]);
462                 tcf_bind_filter(tp, &f->res, base);
463         }
464
465         return 0;
466 }
467
468 static int route4_change(struct net *net, struct sk_buff *in_skb,
469                          struct tcf_proto *tp, unsigned long base, u32 handle,
470                          struct nlattr **tca, void **arg, bool ovr,
471                          struct netlink_ext_ack *extack)
472 {
473         struct route4_head *head = rtnl_dereference(tp->root);
474         struct route4_filter __rcu **fp;
475         struct route4_filter *fold, *f1, *pfp, *f = NULL;
476         struct route4_bucket *b;
477         struct nlattr *opt = tca[TCA_OPTIONS];
478         struct nlattr *tb[TCA_ROUTE4_MAX + 1];
479         unsigned int h, th;
480         int err;
481         bool new = true;
482
483         if (opt == NULL)
484                 return handle ? -EINVAL : 0;
485
486         err = nla_parse_nested(tb, TCA_ROUTE4_MAX, opt, route4_policy, NULL);
487         if (err < 0)
488                 return err;
489
490         fold = *arg;
491         if (fold && handle && fold->handle != handle)
492                         return -EINVAL;
493
494         err = -ENOBUFS;
495         f = kzalloc(sizeof(struct route4_filter), GFP_KERNEL);
496         if (!f)
497                 goto errout;
498
499         err = tcf_exts_init(&f->exts, TCA_ROUTE4_ACT, TCA_ROUTE4_POLICE);
500         if (err < 0)
501                 goto errout;
502
503         if (fold) {
504                 f->id = fold->id;
505                 f->iif = fold->iif;
506                 f->res = fold->res;
507                 f->handle = fold->handle;
508
509                 f->tp = fold->tp;
510                 f->bkt = fold->bkt;
511                 new = false;
512         }
513
514         err = route4_set_parms(net, tp, base, f, handle, head, tb,
515                                tca[TCA_RATE], new, ovr, extack);
516         if (err < 0)
517                 goto errout;
518
519         h = from_hash(f->handle >> 16);
520         fp = &f->bkt->ht[h];
521         for (pfp = rtnl_dereference(*fp);
522              (f1 = rtnl_dereference(*fp)) != NULL;
523              fp = &f1->next)
524                 if (f->handle < f1->handle)
525                         break;
526
527         tcf_block_netif_keep_dst(tp->chain->block);
528         rcu_assign_pointer(f->next, f1);
529         rcu_assign_pointer(*fp, f);
530
531         if (fold && fold->handle && f->handle != fold->handle) {
532                 th = to_hash(fold->handle);
533                 h = from_hash(fold->handle >> 16);
534                 b = rtnl_dereference(head->table[th]);
535                 if (b) {
536                         fp = &b->ht[h];
537                         for (pfp = rtnl_dereference(*fp); pfp;
538                              fp = &pfp->next, pfp = rtnl_dereference(*fp)) {
539                                 if (pfp == fold) {
540                                         rcu_assign_pointer(*fp, fold->next);
541                                         break;
542                                 }
543                         }
544                 }
545         }
546
547         route4_reset_fastmap(head);
548         *arg = f;
549         if (fold) {
550                 tcf_unbind_filter(tp, &fold->res);
551                 tcf_exts_get_net(&fold->exts);
552                 tcf_queue_work(&fold->rwork, route4_delete_filter_work);
553         }
554         return 0;
555
556 errout:
557         if (f)
558                 tcf_exts_destroy(&f->exts);
559         kfree(f);
560         return err;
561 }
562
563 static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg)
564 {
565         struct route4_head *head = rtnl_dereference(tp->root);
566         unsigned int h, h1;
567
568         if (head == NULL)
569                 arg->stop = 1;
570
571         if (arg->stop)
572                 return;
573
574         for (h = 0; h <= 256; h++) {
575                 struct route4_bucket *b = rtnl_dereference(head->table[h]);
576
577                 if (b) {
578                         for (h1 = 0; h1 <= 32; h1++) {
579                                 struct route4_filter *f;
580
581                                 for (f = rtnl_dereference(b->ht[h1]);
582                                      f;
583                                      f = rtnl_dereference(f->next)) {
584                                         if (arg->count < arg->skip) {
585                                                 arg->count++;
586                                                 continue;
587                                         }
588                                         if (arg->fn(tp, f, arg) < 0) {
589                                                 arg->stop = 1;
590                                                 return;
591                                         }
592                                         arg->count++;
593                                 }
594                         }
595                 }
596         }
597 }
598
599 static int route4_dump(struct net *net, struct tcf_proto *tp, void *fh,
600                        struct sk_buff *skb, struct tcmsg *t)
601 {
602         struct route4_filter *f = fh;
603         struct nlattr *nest;
604         u32 id;
605
606         if (f == NULL)
607                 return skb->len;
608
609         t->tcm_handle = f->handle;
610
611         nest = nla_nest_start(skb, TCA_OPTIONS);
612         if (nest == NULL)
613                 goto nla_put_failure;
614
615         if (!(f->handle & 0x8000)) {
616                 id = f->id & 0xFF;
617                 if (nla_put_u32(skb, TCA_ROUTE4_TO, id))
618                         goto nla_put_failure;
619         }
620         if (f->handle & 0x80000000) {
621                 if ((f->handle >> 16) != 0xFFFF &&
622                     nla_put_u32(skb, TCA_ROUTE4_IIF, f->iif))
623                         goto nla_put_failure;
624         } else {
625                 id = f->id >> 16;
626                 if (nla_put_u32(skb, TCA_ROUTE4_FROM, id))
627                         goto nla_put_failure;
628         }
629         if (f->res.classid &&
630             nla_put_u32(skb, TCA_ROUTE4_CLASSID, f->res.classid))
631                 goto nla_put_failure;
632
633         if (tcf_exts_dump(skb, &f->exts) < 0)
634                 goto nla_put_failure;
635
636         nla_nest_end(skb, nest);
637
638         if (tcf_exts_dump_stats(skb, &f->exts) < 0)
639                 goto nla_put_failure;
640
641         return skb->len;
642
643 nla_put_failure:
644         nla_nest_cancel(skb, nest);
645         return -1;
646 }
647
648 static void route4_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
649                               unsigned long base)
650 {
651         struct route4_filter *f = fh;
652
653         if (f && f->res.classid == classid) {
654                 if (cl)
655                         __tcf_bind_filter(q, &f->res, base);
656                 else
657                         __tcf_unbind_filter(q, &f->res);
658         }
659 }
660
661 static struct tcf_proto_ops cls_route4_ops __read_mostly = {
662         .kind           =       "route",
663         .classify       =       route4_classify,
664         .init           =       route4_init,
665         .destroy        =       route4_destroy,
666         .get            =       route4_get,
667         .change         =       route4_change,
668         .delete         =       route4_delete,
669         .walk           =       route4_walk,
670         .dump           =       route4_dump,
671         .bind_class     =       route4_bind_class,
672         .owner          =       THIS_MODULE,
673 };
674
675 static int __init init_route4(void)
676 {
677         return register_tcf_proto_ops(&cls_route4_ops);
678 }
679
680 static void __exit exit_route4(void)
681 {
682         unregister_tcf_proto_ops(&cls_route4_ops);
683 }
684
685 module_init(init_route4)
686 module_exit(exit_route4)
687 MODULE_LICENSE("GPL");