2 * net/sched/cls_u32.c Ugly (or Universal) 32bit key Packet Classifier.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
11 * The filters are packed to hash tables of key nodes
12 * with a set of 32bit key/mask pairs at every node.
13 * Nodes reference next level hash tables etc.
15 * This scheme is the best universal classifier I managed to
16 * invent; it is not super-fast, but it is not slow (provided you
17 * program it correctly), and general enough. And its relative
18 * speed grows as the number of rules becomes larger.
20 * It seems that it represents the best middle point between
21 * speed and manageability both by human and by machine.
23 * It is especially useful for link sharing combined with QoS;
24 * pure RSVP doesn't need such a general approach and can use
25 * much simpler (and faster) schemes, sort of cls_rsvp.c.
27 * JHS: We should remove the CONFIG_NET_CLS_IND from here
28 * eventually when the meta match extension is made available
30 * nfmark match added by Catalin(ux aka Dino) BOIE <catab at umbrella.ro>
33 #include <linux/module.h>
34 #include <linux/slab.h>
35 #include <linux/types.h>
36 #include <linux/kernel.h>
37 #include <linux/string.h>
38 #include <linux/errno.h>
39 #include <linux/percpu.h>
40 #include <linux/rtnetlink.h>
41 #include <linux/skbuff.h>
42 #include <linux/bitmap.h>
43 #include <linux/netdevice.h>
44 #include <linux/hash.h>
45 #include <net/netlink.h>
46 #include <net/act_api.h>
47 #include <net/pkt_cls.h>
48 #include <linux/idr.h>
51 struct tc_u_knode __rcu *next;
53 struct tc_u_hnode __rcu *ht_up;
55 #ifdef CONFIG_NET_CLS_IND
59 struct tcf_result res;
60 struct tc_u_hnode __rcu *ht_down;
61 #ifdef CONFIG_CLS_U32_PERF
62 struct tc_u32_pcnt __percpu *pf;
65 unsigned int in_hw_count;
66 #ifdef CONFIG_CLS_U32_MARK
69 u32 __percpu *pcpu_success;
72 struct rcu_work rwork;
73 /* The 'sel' field MUST be the last field in structure to allow for
74 * tc_u32_keys allocated at end of structure.
76 struct tc_u32_sel sel;
80 struct tc_u_hnode __rcu *next;
83 struct tc_u_common *tp_c;
86 struct idr handle_idr;
89 /* The 'ht' field MUST be the last field in structure to allow for
90 * more entries allocated at end of structure.
92 struct tc_u_knode __rcu *ht[1];
96 struct tc_u_hnode __rcu *hlist;
99 struct idr handle_idr;
100 struct hlist_node hnode;
104 static inline unsigned int u32_hash_fold(__be32 key,
105 const struct tc_u32_sel *sel,
108 unsigned int h = ntohl(key & sel->hmask) >> fshift;
113 static int u32_classify(struct sk_buff *skb, const struct tcf_proto *tp,
114 struct tcf_result *res)
117 struct tc_u_knode *knode;
119 } stack[TC_U32_MAXDEPTH];
121 struct tc_u_hnode *ht = rcu_dereference_bh(tp->root);
122 unsigned int off = skb_network_offset(skb);
123 struct tc_u_knode *n;
127 #ifdef CONFIG_CLS_U32_PERF
133 n = rcu_dereference_bh(ht->ht[sel]);
137 struct tc_u32_key *key = n->sel.keys;
139 #ifdef CONFIG_CLS_U32_PERF
140 __this_cpu_inc(n->pf->rcnt);
144 if (tc_skip_sw(n->flags)) {
145 n = rcu_dereference_bh(n->next);
149 #ifdef CONFIG_CLS_U32_MARK
150 if ((skb->mark & n->mask) != n->val) {
151 n = rcu_dereference_bh(n->next);
154 __this_cpu_inc(*n->pcpu_success);
158 for (i = n->sel.nkeys; i > 0; i--, key++) {
159 int toff = off + key->off + (off2 & key->offmask);
162 if (skb_headroom(skb) + toff > INT_MAX)
165 data = skb_header_pointer(skb, toff, 4, &hdata);
168 if ((*data ^ key->val) & key->mask) {
169 n = rcu_dereference_bh(n->next);
172 #ifdef CONFIG_CLS_U32_PERF
173 __this_cpu_inc(n->pf->kcnts[j]);
178 ht = rcu_dereference_bh(n->ht_down);
181 if (n->sel.flags & TC_U32_TERMINAL) {
184 #ifdef CONFIG_NET_CLS_IND
185 if (!tcf_match_indev(skb, n->ifindex)) {
186 n = rcu_dereference_bh(n->next);
190 #ifdef CONFIG_CLS_U32_PERF
191 __this_cpu_inc(n->pf->rhit);
193 r = tcf_exts_exec(skb, &n->exts, res);
195 n = rcu_dereference_bh(n->next);
201 n = rcu_dereference_bh(n->next);
206 if (sdepth >= TC_U32_MAXDEPTH)
208 stack[sdepth].knode = n;
209 stack[sdepth].off = off;
212 ht = rcu_dereference_bh(n->ht_down);
217 data = skb_header_pointer(skb, off + n->sel.hoff, 4,
221 sel = ht->divisor & u32_hash_fold(*data, &n->sel,
224 if (!(n->sel.flags & (TC_U32_VAROFFSET | TC_U32_OFFSET | TC_U32_EAT)))
227 if (n->sel.flags & (TC_U32_OFFSET | TC_U32_VAROFFSET)) {
228 off2 = n->sel.off + 3;
229 if (n->sel.flags & TC_U32_VAROFFSET) {
232 data = skb_header_pointer(skb,
237 off2 += ntohs(n->sel.offmask & *data) >>
242 if (n->sel.flags & TC_U32_EAT) {
253 n = stack[sdepth].knode;
254 ht = rcu_dereference_bh(n->ht_up);
255 off = stack[sdepth].off;
262 net_warn_ratelimited("cls_u32: dead loop\n");
266 static struct tc_u_hnode *u32_lookup_ht(struct tc_u_common *tp_c, u32 handle)
268 struct tc_u_hnode *ht;
270 for (ht = rtnl_dereference(tp_c->hlist);
272 ht = rtnl_dereference(ht->next))
273 if (ht->handle == handle)
279 static struct tc_u_knode *u32_lookup_key(struct tc_u_hnode *ht, u32 handle)
282 struct tc_u_knode *n = NULL;
284 sel = TC_U32_HASH(handle);
285 if (sel > ht->divisor)
288 for (n = rtnl_dereference(ht->ht[sel]);
290 n = rtnl_dereference(n->next))
291 if (n->handle == handle)
298 static void *u32_get(struct tcf_proto *tp, u32 handle)
300 struct tc_u_hnode *ht;
301 struct tc_u_common *tp_c = tp->data;
303 if (TC_U32_HTID(handle) == TC_U32_ROOT)
304 ht = rtnl_dereference(tp->root);
306 ht = u32_lookup_ht(tp_c, TC_U32_HTID(handle));
311 if (TC_U32_KEY(handle) == 0)
314 return u32_lookup_key(ht, handle);
317 /* Protected by rtnl lock */
318 static u32 gen_new_htid(struct tc_u_common *tp_c, struct tc_u_hnode *ptr)
320 int id = idr_alloc_cyclic(&tp_c->handle_idr, ptr, 1, 0x7FF, GFP_KERNEL);
323 return (id | 0x800U) << 20;
326 static struct hlist_head *tc_u_common_hash;
328 #define U32_HASH_SHIFT 10
329 #define U32_HASH_SIZE (1 << U32_HASH_SHIFT)
331 static void *tc_u_common_ptr(const struct tcf_proto *tp)
333 struct tcf_block *block = tp->chain->block;
335 /* The block sharing is currently supported only
336 * for classless qdiscs. In that case we use block
337 * for tc_u_common identification. In case the
338 * block is not shared, block->q is a valid pointer
339 * and we can use that. That works for classful qdiscs.
341 if (tcf_block_shared(block))
347 static unsigned int tc_u_hash(const struct tcf_proto *tp)
349 return hash_ptr(tc_u_common_ptr(tp), U32_HASH_SHIFT);
352 static struct tc_u_common *tc_u_common_find(const struct tcf_proto *tp)
354 struct tc_u_common *tc;
358 hlist_for_each_entry(tc, &tc_u_common_hash[h], hnode) {
359 if (tc->ptr == tc_u_common_ptr(tp))
365 static int u32_init(struct tcf_proto *tp)
367 struct tc_u_hnode *root_ht;
368 struct tc_u_common *tp_c;
371 tp_c = tc_u_common_find(tp);
373 root_ht = kzalloc(sizeof(*root_ht), GFP_KERNEL);
378 root_ht->handle = tp_c ? gen_new_htid(tp_c, root_ht) : 0x80000000;
379 root_ht->prio = tp->prio;
380 idr_init(&root_ht->handle_idr);
383 tp_c = kzalloc(sizeof(*tp_c), GFP_KERNEL);
388 tp_c->ptr = tc_u_common_ptr(tp);
389 INIT_HLIST_NODE(&tp_c->hnode);
390 idr_init(&tp_c->handle_idr);
393 hlist_add_head(&tp_c->hnode, &tc_u_common_hash[h]);
397 RCU_INIT_POINTER(root_ht->next, tp_c->hlist);
398 rcu_assign_pointer(tp_c->hlist, root_ht);
399 root_ht->tp_c = tp_c;
402 rcu_assign_pointer(tp->root, root_ht);
407 static void __u32_destroy_key(struct tc_u_knode *n)
409 struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
411 tcf_exts_destroy(&n->exts);
412 if (ht && --ht->refcnt == 0)
417 static void u32_destroy_key(struct tcf_proto *tp, struct tc_u_knode *n,
420 tcf_exts_put_net(&n->exts);
421 #ifdef CONFIG_CLS_U32_PERF
425 #ifdef CONFIG_CLS_U32_MARK
427 free_percpu(n->pcpu_success);
429 __u32_destroy_key(n);
432 /* u32_delete_key_rcu should be called when free'ing a copied
433 * version of a tc_u_knode obtained from u32_init_knode(). When
434 * copies are obtained from u32_init_knode() the statistics are
435 * shared between the old and new copies to allow readers to
436 * continue to update the statistics during the copy. To support
437 * this the u32_delete_key_rcu variant does not free the percpu
440 static void u32_delete_key_work(struct work_struct *work)
442 struct tc_u_knode *key = container_of(to_rcu_work(work),
446 u32_destroy_key(key->tp, key, false);
450 /* u32_delete_key_freepf_rcu is the rcu callback variant
451 * that free's the entire structure including the statistics
452 * percpu variables. Only use this if the key is not a copy
453 * returned by u32_init_knode(). See u32_delete_key_rcu()
454 * for the variant that should be used with keys return from
457 static void u32_delete_key_freepf_work(struct work_struct *work)
459 struct tc_u_knode *key = container_of(to_rcu_work(work),
463 u32_destroy_key(key->tp, key, true);
467 static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode *key)
469 struct tc_u_knode __rcu **kp;
470 struct tc_u_knode *pkp;
471 struct tc_u_hnode *ht = rtnl_dereference(key->ht_up);
474 kp = &ht->ht[TC_U32_HASH(key->handle)];
475 for (pkp = rtnl_dereference(*kp); pkp;
476 kp = &pkp->next, pkp = rtnl_dereference(*kp)) {
478 RCU_INIT_POINTER(*kp, key->next);
480 tcf_unbind_filter(tp, &key->res);
481 idr_remove(&ht->handle_idr, key->handle);
482 tcf_exts_get_net(&key->exts);
483 tcf_queue_work(&key->rwork, u32_delete_key_freepf_work);
492 static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h,
493 struct netlink_ext_ack *extack)
495 struct tcf_block *block = tp->chain->block;
496 struct tc_cls_u32_offload cls_u32 = {};
498 tc_cls_common_offload_init(&cls_u32.common, tp, h->flags, extack);
499 cls_u32.command = TC_CLSU32_DELETE_HNODE;
500 cls_u32.hnode.divisor = h->divisor;
501 cls_u32.hnode.handle = h->handle;
502 cls_u32.hnode.prio = h->prio;
504 tc_setup_cb_call(block, NULL, TC_SETUP_CLSU32, &cls_u32, false);
507 static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h,
508 u32 flags, struct netlink_ext_ack *extack)
510 struct tcf_block *block = tp->chain->block;
511 struct tc_cls_u32_offload cls_u32 = {};
512 bool skip_sw = tc_skip_sw(flags);
513 bool offloaded = false;
516 tc_cls_common_offload_init(&cls_u32.common, tp, flags, extack);
517 cls_u32.command = TC_CLSU32_NEW_HNODE;
518 cls_u32.hnode.divisor = h->divisor;
519 cls_u32.hnode.handle = h->handle;
520 cls_u32.hnode.prio = h->prio;
522 err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSU32, &cls_u32, skip_sw);
524 u32_clear_hw_hnode(tp, h, NULL);
526 } else if (err > 0) {
530 if (skip_sw && !offloaded)
536 static void u32_remove_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
537 struct netlink_ext_ack *extack)
539 struct tcf_block *block = tp->chain->block;
540 struct tc_cls_u32_offload cls_u32 = {};
542 tc_cls_common_offload_init(&cls_u32.common, tp, n->flags, extack);
543 cls_u32.command = TC_CLSU32_DELETE_KNODE;
544 cls_u32.knode.handle = n->handle;
546 tc_setup_cb_call(block, NULL, TC_SETUP_CLSU32, &cls_u32, false);
547 tcf_block_offload_dec(block, &n->flags);
550 static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
551 u32 flags, struct netlink_ext_ack *extack)
553 struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
554 struct tcf_block *block = tp->chain->block;
555 struct tc_cls_u32_offload cls_u32 = {};
556 bool skip_sw = tc_skip_sw(flags);
559 tc_cls_common_offload_init(&cls_u32.common, tp, flags, extack);
560 cls_u32.command = TC_CLSU32_REPLACE_KNODE;
561 cls_u32.knode.handle = n->handle;
562 cls_u32.knode.fshift = n->fshift;
563 #ifdef CONFIG_CLS_U32_MARK
564 cls_u32.knode.val = n->val;
565 cls_u32.knode.mask = n->mask;
567 cls_u32.knode.val = 0;
568 cls_u32.knode.mask = 0;
570 cls_u32.knode.sel = &n->sel;
571 cls_u32.knode.exts = &n->exts;
573 cls_u32.knode.link_handle = ht->handle;
575 err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSU32, &cls_u32, skip_sw);
577 u32_remove_hw_knode(tp, n, NULL);
579 } else if (err > 0) {
580 n->in_hw_count = err;
581 tcf_block_offload_inc(block, &n->flags);
584 if (skip_sw && !(n->flags & TCA_CLS_FLAGS_IN_HW))
590 static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
591 struct netlink_ext_ack *extack)
593 struct tc_u_knode *n;
596 for (h = 0; h <= ht->divisor; h++) {
597 while ((n = rtnl_dereference(ht->ht[h])) != NULL) {
598 RCU_INIT_POINTER(ht->ht[h],
599 rtnl_dereference(n->next));
600 tcf_unbind_filter(tp, &n->res);
601 u32_remove_hw_knode(tp, n, extack);
602 idr_remove(&ht->handle_idr, n->handle);
603 if (tcf_exts_get_net(&n->exts))
604 tcf_queue_work(&n->rwork, u32_delete_key_freepf_work);
606 u32_destroy_key(n->tp, n, true);
611 static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
612 struct netlink_ext_ack *extack)
614 struct tc_u_common *tp_c = tp->data;
615 struct tc_u_hnode __rcu **hn;
616 struct tc_u_hnode *phn;
618 WARN_ON(--ht->refcnt);
620 u32_clear_hnode(tp, ht, extack);
623 for (phn = rtnl_dereference(*hn);
625 hn = &phn->next, phn = rtnl_dereference(*hn)) {
627 u32_clear_hw_hnode(tp, ht, extack);
628 idr_destroy(&ht->handle_idr);
629 idr_remove(&tp_c->handle_idr, ht->handle);
630 RCU_INIT_POINTER(*hn, ht->next);
639 static bool ht_empty(struct tc_u_hnode *ht)
643 for (h = 0; h <= ht->divisor; h++)
644 if (rcu_access_pointer(ht->ht[h]))
650 static void u32_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack)
652 struct tc_u_common *tp_c = tp->data;
653 struct tc_u_hnode *root_ht = rtnl_dereference(tp->root);
655 WARN_ON(root_ht == NULL);
657 if (root_ht && --root_ht->refcnt == 1)
658 u32_destroy_hnode(tp, root_ht, extack);
660 if (--tp_c->refcnt == 0) {
661 struct tc_u_hnode *ht;
663 hlist_del(&tp_c->hnode);
665 while ((ht = rtnl_dereference(tp_c->hlist)) != NULL) {
666 u32_clear_hnode(tp, ht, extack);
667 RCU_INIT_POINTER(tp_c->hlist, ht->next);
669 /* u32_destroy_key() will later free ht for us, if it's
670 * still referenced by some knode
672 if (--ht->refcnt == 0)
676 idr_destroy(&tp_c->handle_idr);
683 static int u32_delete(struct tcf_proto *tp, void *arg, bool *last,
684 struct netlink_ext_ack *extack)
686 struct tc_u_hnode *ht = arg;
687 struct tc_u_hnode *root_ht = rtnl_dereference(tp->root);
688 struct tc_u_common *tp_c = tp->data;
694 if (TC_U32_KEY(ht->handle)) {
695 u32_remove_hw_knode(tp, (struct tc_u_knode *)ht, extack);
696 ret = u32_delete_key(tp, (struct tc_u_knode *)ht);
701 NL_SET_ERR_MSG_MOD(extack, "Not allowed to delete root node");
705 if (ht->refcnt == 1) {
706 u32_destroy_hnode(tp, ht, extack);
708 NL_SET_ERR_MSG_MOD(extack, "Can not delete in-use filter");
715 if (root_ht->refcnt > 2) {
719 if (root_ht->refcnt == 2) {
720 if (!ht_empty(root_ht)) {
727 if (tp_c->refcnt > 1) {
732 if (tp_c->refcnt == 1) {
733 struct tc_u_hnode *ht;
735 for (ht = rtnl_dereference(tp_c->hlist);
737 ht = rtnl_dereference(ht->next))
748 static u32 gen_new_kid(struct tc_u_hnode *ht, u32 htid)
750 u32 index = htid | 0x800;
751 u32 max = htid | 0xFFF;
753 if (idr_alloc_u32(&ht->handle_idr, NULL, &index, max, GFP_KERNEL)) {
755 if (idr_alloc_u32(&ht->handle_idr, NULL, &index, max,
763 static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = {
764 [TCA_U32_CLASSID] = { .type = NLA_U32 },
765 [TCA_U32_HASH] = { .type = NLA_U32 },
766 [TCA_U32_LINK] = { .type = NLA_U32 },
767 [TCA_U32_DIVISOR] = { .type = NLA_U32 },
768 [TCA_U32_SEL] = { .len = sizeof(struct tc_u32_sel) },
769 [TCA_U32_INDEV] = { .type = NLA_STRING, .len = IFNAMSIZ },
770 [TCA_U32_MARK] = { .len = sizeof(struct tc_u32_mark) },
771 [TCA_U32_FLAGS] = { .type = NLA_U32 },
774 static int u32_set_parms(struct net *net, struct tcf_proto *tp,
775 unsigned long base, struct tc_u_hnode *ht,
776 struct tc_u_knode *n, struct nlattr **tb,
777 struct nlattr *est, bool ovr,
778 struct netlink_ext_ack *extack)
782 err = tcf_exts_validate(net, tp, tb, est, &n->exts, ovr, extack);
786 if (tb[TCA_U32_LINK]) {
787 u32 handle = nla_get_u32(tb[TCA_U32_LINK]);
788 struct tc_u_hnode *ht_down = NULL, *ht_old;
790 if (TC_U32_KEY(handle)) {
791 NL_SET_ERR_MSG_MOD(extack, "u32 Link handle must be a hash table");
796 ht_down = u32_lookup_ht(ht->tp_c, handle);
799 NL_SET_ERR_MSG_MOD(extack, "Link hash table not found");
805 ht_old = rtnl_dereference(n->ht_down);
806 rcu_assign_pointer(n->ht_down, ht_down);
811 if (tb[TCA_U32_CLASSID]) {
812 n->res.classid = nla_get_u32(tb[TCA_U32_CLASSID]);
813 tcf_bind_filter(tp, &n->res, base);
816 #ifdef CONFIG_NET_CLS_IND
817 if (tb[TCA_U32_INDEV]) {
819 ret = tcf_change_indev(net, tb[TCA_U32_INDEV], extack);
828 static void u32_replace_knode(struct tcf_proto *tp, struct tc_u_common *tp_c,
829 struct tc_u_knode *n)
831 struct tc_u_knode __rcu **ins;
832 struct tc_u_knode *pins;
833 struct tc_u_hnode *ht;
835 if (TC_U32_HTID(n->handle) == TC_U32_ROOT)
836 ht = rtnl_dereference(tp->root);
838 ht = u32_lookup_ht(tp_c, TC_U32_HTID(n->handle));
840 ins = &ht->ht[TC_U32_HASH(n->handle)];
842 /* The node must always exist for it to be replaced if this is not the
843 * case then something went very wrong elsewhere.
845 for (pins = rtnl_dereference(*ins); ;
846 ins = &pins->next, pins = rtnl_dereference(*ins))
847 if (pins->handle == n->handle)
850 idr_replace(&ht->handle_idr, n, n->handle);
851 RCU_INIT_POINTER(n->next, pins->next);
852 rcu_assign_pointer(*ins, n);
855 static struct tc_u_knode *u32_init_knode(struct tcf_proto *tp,
856 struct tc_u_knode *n)
858 struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
859 struct tc_u32_sel *s = &n->sel;
860 struct tc_u_knode *new;
862 new = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key),
868 RCU_INIT_POINTER(new->next, n->next);
869 new->handle = n->handle;
870 RCU_INIT_POINTER(new->ht_up, n->ht_up);
872 #ifdef CONFIG_NET_CLS_IND
873 new->ifindex = n->ifindex;
875 new->fshift = n->fshift;
877 new->flags = n->flags;
878 RCU_INIT_POINTER(new->ht_down, ht);
880 #ifdef CONFIG_CLS_U32_PERF
881 /* Statistics may be incremented by readers during update
882 * so we must keep them in tact. When the node is later destroyed
883 * a special destroy call must be made to not free the pf memory.
888 #ifdef CONFIG_CLS_U32_MARK
891 /* Similarly success statistics must be moved as pointers */
892 new->pcpu_success = n->pcpu_success;
895 memcpy(&new->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key));
897 if (tcf_exts_init(&new->exts, TCA_U32_ACT, TCA_U32_POLICE)) {
902 /* bump reference count as long as we hold pointer to structure */
909 static int u32_change(struct net *net, struct sk_buff *in_skb,
910 struct tcf_proto *tp, unsigned long base, u32 handle,
911 struct nlattr **tca, void **arg, bool ovr,
912 struct netlink_ext_ack *extack)
914 struct tc_u_common *tp_c = tp->data;
915 struct tc_u_hnode *ht;
916 struct tc_u_knode *n;
917 struct tc_u32_sel *s;
918 struct nlattr *opt = tca[TCA_OPTIONS];
919 struct nlattr *tb[TCA_U32_MAX + 1];
923 #ifdef CONFIG_CLS_U32_PERF
929 NL_SET_ERR_MSG_MOD(extack, "Filter handle requires options");
936 err = nla_parse_nested(tb, TCA_U32_MAX, opt, u32_policy, extack);
940 if (tb[TCA_U32_FLAGS]) {
941 flags = nla_get_u32(tb[TCA_U32_FLAGS]);
942 if (!tc_flags_valid(flags)) {
943 NL_SET_ERR_MSG_MOD(extack, "Invalid filter flags");
950 struct tc_u_knode *new;
952 if (TC_U32_KEY(n->handle) == 0) {
953 NL_SET_ERR_MSG_MOD(extack, "Key node id cannot be zero");
957 if ((n->flags ^ flags) &
958 ~(TCA_CLS_FLAGS_IN_HW | TCA_CLS_FLAGS_NOT_IN_HW)) {
959 NL_SET_ERR_MSG_MOD(extack, "Key node flags do not match passed flags");
963 new = u32_init_knode(tp, n);
967 err = u32_set_parms(net, tp, base,
968 rtnl_dereference(n->ht_up), new, tb,
969 tca[TCA_RATE], ovr, extack);
972 __u32_destroy_key(new);
976 err = u32_replace_hw_knode(tp, new, flags, extack);
978 __u32_destroy_key(new);
982 if (!tc_in_hw(new->flags))
983 new->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
985 u32_replace_knode(tp, tp_c, new);
986 tcf_unbind_filter(tp, &n->res);
987 tcf_exts_get_net(&n->exts);
988 tcf_queue_work(&n->rwork, u32_delete_key_work);
992 if (tb[TCA_U32_DIVISOR]) {
993 unsigned int divisor = nla_get_u32(tb[TCA_U32_DIVISOR]);
995 if (--divisor > 0x100) {
996 NL_SET_ERR_MSG_MOD(extack, "Exceeded maximum 256 hash buckets");
999 if (TC_U32_KEY(handle)) {
1000 NL_SET_ERR_MSG_MOD(extack, "Divisor can only be used on a hash table");
1003 ht = kzalloc(sizeof(*ht) + divisor*sizeof(void *), GFP_KERNEL);
1007 handle = gen_new_htid(tp->data, ht);
1013 err = idr_alloc_u32(&tp_c->handle_idr, ht, &handle,
1014 handle, GFP_KERNEL);
1022 ht->divisor = divisor;
1023 ht->handle = handle;
1024 ht->prio = tp->prio;
1025 idr_init(&ht->handle_idr);
1028 err = u32_replace_hw_hnode(tp, ht, flags, extack);
1030 idr_remove(&tp_c->handle_idr, handle);
1035 RCU_INIT_POINTER(ht->next, tp_c->hlist);
1036 rcu_assign_pointer(tp_c->hlist, ht);
1042 if (tb[TCA_U32_HASH]) {
1043 htid = nla_get_u32(tb[TCA_U32_HASH]);
1044 if (TC_U32_HTID(htid) == TC_U32_ROOT) {
1045 ht = rtnl_dereference(tp->root);
1048 ht = u32_lookup_ht(tp->data, TC_U32_HTID(htid));
1050 NL_SET_ERR_MSG_MOD(extack, "Specified hash table not found");
1055 ht = rtnl_dereference(tp->root);
1059 if (ht->divisor < TC_U32_HASH(htid)) {
1060 NL_SET_ERR_MSG_MOD(extack, "Specified hash table buckets exceed configured value");
1065 if (TC_U32_HTID(handle) && TC_U32_HTID(handle ^ htid)) {
1066 NL_SET_ERR_MSG_MOD(extack, "Handle specified hash table address mismatch");
1069 handle = htid | TC_U32_NODE(handle);
1070 err = idr_alloc_u32(&ht->handle_idr, NULL, &handle, handle,
1075 handle = gen_new_kid(ht, htid);
1077 if (tb[TCA_U32_SEL] == NULL) {
1078 NL_SET_ERR_MSG_MOD(extack, "Selector not specified");
1083 s = nla_data(tb[TCA_U32_SEL]);
1084 sel_size = struct_size(s, keys, s->nkeys);
1085 if (nla_len(tb[TCA_U32_SEL]) < sel_size) {
1090 n = kzalloc(offsetof(typeof(*n), sel) + sel_size, GFP_KERNEL);
1096 #ifdef CONFIG_CLS_U32_PERF
1097 size = sizeof(struct tc_u32_pcnt) + s->nkeys * sizeof(u64);
1098 n->pf = __alloc_percpu(size, __alignof__(struct tc_u32_pcnt));
1105 memcpy(&n->sel, s, sel_size);
1106 RCU_INIT_POINTER(n->ht_up, ht);
1108 n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0;
1112 err = tcf_exts_init(&n->exts, TCA_U32_ACT, TCA_U32_POLICE);
1116 #ifdef CONFIG_CLS_U32_MARK
1117 n->pcpu_success = alloc_percpu(u32);
1118 if (!n->pcpu_success) {
1123 if (tb[TCA_U32_MARK]) {
1124 struct tc_u32_mark *mark;
1126 mark = nla_data(tb[TCA_U32_MARK]);
1128 n->mask = mark->mask;
1132 err = u32_set_parms(net, tp, base, ht, n, tb, tca[TCA_RATE], ovr,
1135 struct tc_u_knode __rcu **ins;
1136 struct tc_u_knode *pins;
1138 err = u32_replace_hw_knode(tp, n, flags, extack);
1142 if (!tc_in_hw(n->flags))
1143 n->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
1145 ins = &ht->ht[TC_U32_HASH(handle)];
1146 for (pins = rtnl_dereference(*ins); pins;
1147 ins = &pins->next, pins = rtnl_dereference(*ins))
1148 if (TC_U32_NODE(handle) < TC_U32_NODE(pins->handle))
1151 RCU_INIT_POINTER(n->next, pins);
1152 rcu_assign_pointer(*ins, n);
1158 #ifdef CONFIG_CLS_U32_MARK
1159 free_percpu(n->pcpu_success);
1163 tcf_exts_destroy(&n->exts);
1164 #ifdef CONFIG_CLS_U32_PERF
1170 idr_remove(&ht->handle_idr, handle);
1174 static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg)
1176 struct tc_u_common *tp_c = tp->data;
1177 struct tc_u_hnode *ht;
1178 struct tc_u_knode *n;
1184 for (ht = rtnl_dereference(tp_c->hlist);
1186 ht = rtnl_dereference(ht->next)) {
1187 if (ht->prio != tp->prio)
1189 if (arg->count >= arg->skip) {
1190 if (arg->fn(tp, ht, arg) < 0) {
1196 for (h = 0; h <= ht->divisor; h++) {
1197 for (n = rtnl_dereference(ht->ht[h]);
1199 n = rtnl_dereference(n->next)) {
1200 if (arg->count < arg->skip) {
1204 if (arg->fn(tp, n, arg) < 0) {
1214 static int u32_reoffload_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
1215 bool add, tc_setup_cb_t *cb, void *cb_priv,
1216 struct netlink_ext_ack *extack)
1218 struct tc_cls_u32_offload cls_u32 = {};
1221 tc_cls_common_offload_init(&cls_u32.common, tp, ht->flags, extack);
1222 cls_u32.command = add ? TC_CLSU32_NEW_HNODE : TC_CLSU32_DELETE_HNODE;
1223 cls_u32.hnode.divisor = ht->divisor;
1224 cls_u32.hnode.handle = ht->handle;
1225 cls_u32.hnode.prio = ht->prio;
1227 err = cb(TC_SETUP_CLSU32, &cls_u32, cb_priv);
1228 if (err && add && tc_skip_sw(ht->flags))
1234 static int u32_reoffload_knode(struct tcf_proto *tp, struct tc_u_knode *n,
1235 bool add, tc_setup_cb_t *cb, void *cb_priv,
1236 struct netlink_ext_ack *extack)
1238 struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
1239 struct tcf_block *block = tp->chain->block;
1240 struct tc_cls_u32_offload cls_u32 = {};
1243 tc_cls_common_offload_init(&cls_u32.common, tp, n->flags, extack);
1244 cls_u32.command = add ?
1245 TC_CLSU32_REPLACE_KNODE : TC_CLSU32_DELETE_KNODE;
1246 cls_u32.knode.handle = n->handle;
1249 cls_u32.knode.fshift = n->fshift;
1250 #ifdef CONFIG_CLS_U32_MARK
1251 cls_u32.knode.val = n->val;
1252 cls_u32.knode.mask = n->mask;
1254 cls_u32.knode.val = 0;
1255 cls_u32.knode.mask = 0;
1257 cls_u32.knode.sel = &n->sel;
1258 cls_u32.knode.exts = &n->exts;
1260 cls_u32.knode.link_handle = ht->handle;
1263 err = cb(TC_SETUP_CLSU32, &cls_u32, cb_priv);
1265 if (add && tc_skip_sw(n->flags))
1270 tc_cls_offload_cnt_update(block, &n->in_hw_count, &n->flags, add);
1275 static int u32_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
1276 void *cb_priv, struct netlink_ext_ack *extack)
1278 struct tc_u_common *tp_c = tp->data;
1279 struct tc_u_hnode *ht;
1280 struct tc_u_knode *n;
1284 for (ht = rtnl_dereference(tp_c->hlist);
1286 ht = rtnl_dereference(ht->next)) {
1287 if (ht->prio != tp->prio)
1290 /* When adding filters to a new dev, try to offload the
1291 * hashtable first. When removing, do the filters before the
1294 if (add && !tc_skip_hw(ht->flags)) {
1295 err = u32_reoffload_hnode(tp, ht, add, cb, cb_priv,
1301 for (h = 0; h <= ht->divisor; h++) {
1302 for (n = rtnl_dereference(ht->ht[h]);
1304 n = rtnl_dereference(n->next)) {
1305 if (tc_skip_hw(n->flags))
1308 err = u32_reoffload_knode(tp, n, add, cb,
1315 if (!add && !tc_skip_hw(ht->flags))
1316 u32_reoffload_hnode(tp, ht, add, cb, cb_priv, extack);
1322 static void u32_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
1325 struct tc_u_knode *n = fh;
1327 if (n && n->res.classid == classid) {
1329 __tcf_bind_filter(q, &n->res, base);
1331 __tcf_unbind_filter(q, &n->res);
1335 static int u32_dump(struct net *net, struct tcf_proto *tp, void *fh,
1336 struct sk_buff *skb, struct tcmsg *t)
1338 struct tc_u_knode *n = fh;
1339 struct tc_u_hnode *ht_up, *ht_down;
1340 struct nlattr *nest;
1345 t->tcm_handle = n->handle;
1347 nest = nla_nest_start(skb, TCA_OPTIONS);
1349 goto nla_put_failure;
1351 if (TC_U32_KEY(n->handle) == 0) {
1352 struct tc_u_hnode *ht = fh;
1353 u32 divisor = ht->divisor + 1;
1355 if (nla_put_u32(skb, TCA_U32_DIVISOR, divisor))
1356 goto nla_put_failure;
1358 #ifdef CONFIG_CLS_U32_PERF
1359 struct tc_u32_pcnt *gpf;
1363 if (nla_put(skb, TCA_U32_SEL,
1364 sizeof(n->sel) + n->sel.nkeys*sizeof(struct tc_u32_key),
1366 goto nla_put_failure;
1368 ht_up = rtnl_dereference(n->ht_up);
1370 u32 htid = n->handle & 0xFFFFF000;
1371 if (nla_put_u32(skb, TCA_U32_HASH, htid))
1372 goto nla_put_failure;
1374 if (n->res.classid &&
1375 nla_put_u32(skb, TCA_U32_CLASSID, n->res.classid))
1376 goto nla_put_failure;
1378 ht_down = rtnl_dereference(n->ht_down);
1380 nla_put_u32(skb, TCA_U32_LINK, ht_down->handle))
1381 goto nla_put_failure;
1383 if (n->flags && nla_put_u32(skb, TCA_U32_FLAGS, n->flags))
1384 goto nla_put_failure;
1386 #ifdef CONFIG_CLS_U32_MARK
1387 if ((n->val || n->mask)) {
1388 struct tc_u32_mark mark = {.val = n->val,
1393 for_each_possible_cpu(cpum) {
1394 __u32 cnt = *per_cpu_ptr(n->pcpu_success, cpum);
1396 mark.success += cnt;
1399 if (nla_put(skb, TCA_U32_MARK, sizeof(mark), &mark))
1400 goto nla_put_failure;
1404 if (tcf_exts_dump(skb, &n->exts) < 0)
1405 goto nla_put_failure;
1407 #ifdef CONFIG_NET_CLS_IND
1409 struct net_device *dev;
1410 dev = __dev_get_by_index(net, n->ifindex);
1411 if (dev && nla_put_string(skb, TCA_U32_INDEV, dev->name))
1412 goto nla_put_failure;
1415 #ifdef CONFIG_CLS_U32_PERF
1416 gpf = kzalloc(sizeof(struct tc_u32_pcnt) +
1417 n->sel.nkeys * sizeof(u64),
1420 goto nla_put_failure;
1422 for_each_possible_cpu(cpu) {
1424 struct tc_u32_pcnt *pf = per_cpu_ptr(n->pf, cpu);
1426 gpf->rcnt += pf->rcnt;
1427 gpf->rhit += pf->rhit;
1428 for (i = 0; i < n->sel.nkeys; i++)
1429 gpf->kcnts[i] += pf->kcnts[i];
1432 if (nla_put_64bit(skb, TCA_U32_PCNT,
1433 sizeof(struct tc_u32_pcnt) +
1434 n->sel.nkeys * sizeof(u64),
1435 gpf, TCA_U32_PAD)) {
1437 goto nla_put_failure;
1443 nla_nest_end(skb, nest);
1445 if (TC_U32_KEY(n->handle))
1446 if (tcf_exts_dump_stats(skb, &n->exts) < 0)
1447 goto nla_put_failure;
1451 nla_nest_cancel(skb, nest);
1455 static struct tcf_proto_ops cls_u32_ops __read_mostly = {
1457 .classify = u32_classify,
1459 .destroy = u32_destroy,
1461 .change = u32_change,
1462 .delete = u32_delete,
1464 .reoffload = u32_reoffload,
1466 .bind_class = u32_bind_class,
1467 .owner = THIS_MODULE,
1470 static int __init init_u32(void)
1474 pr_info("u32 classifier\n");
1475 #ifdef CONFIG_CLS_U32_PERF
1476 pr_info(" Performance counters on\n");
1478 #ifdef CONFIG_NET_CLS_IND
1479 pr_info(" input device check on\n");
1481 #ifdef CONFIG_NET_CLS_ACT
1482 pr_info(" Actions configured\n");
1484 tc_u_common_hash = kvmalloc_array(U32_HASH_SIZE,
1485 sizeof(struct hlist_head),
1487 if (!tc_u_common_hash)
1490 for (i = 0; i < U32_HASH_SIZE; i++)
1491 INIT_HLIST_HEAD(&tc_u_common_hash[i]);
1493 ret = register_tcf_proto_ops(&cls_u32_ops);
1495 kvfree(tc_u_common_hash);
1499 static void __exit exit_u32(void)
1501 unregister_tcf_proto_ops(&cls_u32_ops);
1502 kvfree(tc_u_common_hash);
1505 module_init(init_u32)
1506 module_exit(exit_u32)
1507 MODULE_LICENSE("GPL");