GNU Linux-libre 5.10.217-gnu1
[releases.git] / net / sched / cls_u32.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/cls_u32.c  Ugly (or Universal) 32bit key Packet Classifier.
4  *
5  * Authors:     Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6  *
7  *      The filters are packed to hash tables of key nodes
8  *      with a set of 32bit key/mask pairs at every node.
9  *      Nodes reference next level hash tables etc.
10  *
11  *      This scheme is the best universal classifier I managed to
12  *      invent; it is not super-fast, but it is not slow (provided you
13  *      program it correctly), and general enough.  And its relative
14  *      speed grows as the number of rules becomes larger.
15  *
16  *      It seems that it represents the best middle point between
17  *      speed and manageability both by human and by machine.
18  *
19  *      It is especially useful for link sharing combined with QoS;
20  *      pure RSVP doesn't need such a general approach and can use
21  *      much simpler (and faster) schemes, sort of cls_rsvp.c.
22  *
23  *      nfmark match added by Catalin(ux aka Dino) BOIE <catab at umbrella.ro>
24  */
25
26 #include <linux/module.h>
27 #include <linux/slab.h>
28 #include <linux/types.h>
29 #include <linux/kernel.h>
30 #include <linux/string.h>
31 #include <linux/errno.h>
32 #include <linux/percpu.h>
33 #include <linux/rtnetlink.h>
34 #include <linux/skbuff.h>
35 #include <linux/bitmap.h>
36 #include <linux/netdevice.h>
37 #include <linux/hash.h>
38 #include <net/netlink.h>
39 #include <net/act_api.h>
40 #include <net/pkt_cls.h>
41 #include <linux/idr.h>
42
43 struct tc_u_knode {
44         struct tc_u_knode __rcu *next;
45         u32                     handle;
46         struct tc_u_hnode __rcu *ht_up;
47         struct tcf_exts         exts;
48         int                     ifindex;
49         u8                      fshift;
50         struct tcf_result       res;
51         struct tc_u_hnode __rcu *ht_down;
52 #ifdef CONFIG_CLS_U32_PERF
53         struct tc_u32_pcnt __percpu *pf;
54 #endif
55         u32                     flags;
56         unsigned int            in_hw_count;
57 #ifdef CONFIG_CLS_U32_MARK
58         u32                     val;
59         u32                     mask;
60         u32 __percpu            *pcpu_success;
61 #endif
62         struct rcu_work         rwork;
63         /* The 'sel' field MUST be the last field in structure to allow for
64          * tc_u32_keys allocated at end of structure.
65          */
66         struct tc_u32_sel       sel;
67 };
68
69 struct tc_u_hnode {
70         struct tc_u_hnode __rcu *next;
71         u32                     handle;
72         u32                     prio;
73         int                     refcnt;
74         unsigned int            divisor;
75         struct idr              handle_idr;
76         bool                    is_root;
77         struct rcu_head         rcu;
78         u32                     flags;
79         /* The 'ht' field MUST be the last field in structure to allow for
80          * more entries allocated at end of structure.
81          */
82         struct tc_u_knode __rcu *ht[];
83 };
84
85 struct tc_u_common {
86         struct tc_u_hnode __rcu *hlist;
87         void                    *ptr;
88         int                     refcnt;
89         struct idr              handle_idr;
90         struct hlist_node       hnode;
91         long                    knodes;
92 };
93
94 static inline unsigned int u32_hash_fold(__be32 key,
95                                          const struct tc_u32_sel *sel,
96                                          u8 fshift)
97 {
98         unsigned int h = ntohl(key & sel->hmask) >> fshift;
99
100         return h;
101 }
102
103 static int u32_classify(struct sk_buff *skb, const struct tcf_proto *tp,
104                         struct tcf_result *res)
105 {
106         struct {
107                 struct tc_u_knode *knode;
108                 unsigned int      off;
109         } stack[TC_U32_MAXDEPTH];
110
111         struct tc_u_hnode *ht = rcu_dereference_bh(tp->root);
112         unsigned int off = skb_network_offset(skb);
113         struct tc_u_knode *n;
114         int sdepth = 0;
115         int off2 = 0;
116         int sel = 0;
117 #ifdef CONFIG_CLS_U32_PERF
118         int j;
119 #endif
120         int i, r;
121
122 next_ht:
123         n = rcu_dereference_bh(ht->ht[sel]);
124
125 next_knode:
126         if (n) {
127                 struct tc_u32_key *key = n->sel.keys;
128
129 #ifdef CONFIG_CLS_U32_PERF
130                 __this_cpu_inc(n->pf->rcnt);
131                 j = 0;
132 #endif
133
134                 if (tc_skip_sw(n->flags)) {
135                         n = rcu_dereference_bh(n->next);
136                         goto next_knode;
137                 }
138
139 #ifdef CONFIG_CLS_U32_MARK
140                 if ((skb->mark & n->mask) != n->val) {
141                         n = rcu_dereference_bh(n->next);
142                         goto next_knode;
143                 } else {
144                         __this_cpu_inc(*n->pcpu_success);
145                 }
146 #endif
147
148                 for (i = n->sel.nkeys; i > 0; i--, key++) {
149                         int toff = off + key->off + (off2 & key->offmask);
150                         __be32 *data, hdata;
151
152                         if (skb_headroom(skb) + toff > INT_MAX)
153                                 goto out;
154
155                         data = skb_header_pointer(skb, toff, 4, &hdata);
156                         if (!data)
157                                 goto out;
158                         if ((*data ^ key->val) & key->mask) {
159                                 n = rcu_dereference_bh(n->next);
160                                 goto next_knode;
161                         }
162 #ifdef CONFIG_CLS_U32_PERF
163                         __this_cpu_inc(n->pf->kcnts[j]);
164                         j++;
165 #endif
166                 }
167
168                 ht = rcu_dereference_bh(n->ht_down);
169                 if (!ht) {
170 check_terminal:
171                         if (n->sel.flags & TC_U32_TERMINAL) {
172
173                                 *res = n->res;
174                                 if (!tcf_match_indev(skb, n->ifindex)) {
175                                         n = rcu_dereference_bh(n->next);
176                                         goto next_knode;
177                                 }
178 #ifdef CONFIG_CLS_U32_PERF
179                                 __this_cpu_inc(n->pf->rhit);
180 #endif
181                                 r = tcf_exts_exec(skb, &n->exts, res);
182                                 if (r < 0) {
183                                         n = rcu_dereference_bh(n->next);
184                                         goto next_knode;
185                                 }
186
187                                 return r;
188                         }
189                         n = rcu_dereference_bh(n->next);
190                         goto next_knode;
191                 }
192
193                 /* PUSH */
194                 if (sdepth >= TC_U32_MAXDEPTH)
195                         goto deadloop;
196                 stack[sdepth].knode = n;
197                 stack[sdepth].off = off;
198                 sdepth++;
199
200                 ht = rcu_dereference_bh(n->ht_down);
201                 sel = 0;
202                 if (ht->divisor) {
203                         __be32 *data, hdata;
204
205                         data = skb_header_pointer(skb, off + n->sel.hoff, 4,
206                                                   &hdata);
207                         if (!data)
208                                 goto out;
209                         sel = ht->divisor & u32_hash_fold(*data, &n->sel,
210                                                           n->fshift);
211                 }
212                 if (!(n->sel.flags & (TC_U32_VAROFFSET | TC_U32_OFFSET | TC_U32_EAT)))
213                         goto next_ht;
214
215                 if (n->sel.flags & (TC_U32_OFFSET | TC_U32_VAROFFSET)) {
216                         off2 = n->sel.off + 3;
217                         if (n->sel.flags & TC_U32_VAROFFSET) {
218                                 __be16 *data, hdata;
219
220                                 data = skb_header_pointer(skb,
221                                                           off + n->sel.offoff,
222                                                           2, &hdata);
223                                 if (!data)
224                                         goto out;
225                                 off2 += ntohs(n->sel.offmask & *data) >>
226                                         n->sel.offshift;
227                         }
228                         off2 &= ~3;
229                 }
230                 if (n->sel.flags & TC_U32_EAT) {
231                         off += off2;
232                         off2 = 0;
233                 }
234
235                 if (off < skb->len)
236                         goto next_ht;
237         }
238
239         /* POP */
240         if (sdepth--) {
241                 n = stack[sdepth].knode;
242                 ht = rcu_dereference_bh(n->ht_up);
243                 off = stack[sdepth].off;
244                 goto check_terminal;
245         }
246 out:
247         return -1;
248
249 deadloop:
250         net_warn_ratelimited("cls_u32: dead loop\n");
251         return -1;
252 }
253
254 static struct tc_u_hnode *u32_lookup_ht(struct tc_u_common *tp_c, u32 handle)
255 {
256         struct tc_u_hnode *ht;
257
258         for (ht = rtnl_dereference(tp_c->hlist);
259              ht;
260              ht = rtnl_dereference(ht->next))
261                 if (ht->handle == handle)
262                         break;
263
264         return ht;
265 }
266
267 static struct tc_u_knode *u32_lookup_key(struct tc_u_hnode *ht, u32 handle)
268 {
269         unsigned int sel;
270         struct tc_u_knode *n = NULL;
271
272         sel = TC_U32_HASH(handle);
273         if (sel > ht->divisor)
274                 goto out;
275
276         for (n = rtnl_dereference(ht->ht[sel]);
277              n;
278              n = rtnl_dereference(n->next))
279                 if (n->handle == handle)
280                         break;
281 out:
282         return n;
283 }
284
285
286 static void *u32_get(struct tcf_proto *tp, u32 handle)
287 {
288         struct tc_u_hnode *ht;
289         struct tc_u_common *tp_c = tp->data;
290
291         if (TC_U32_HTID(handle) == TC_U32_ROOT)
292                 ht = rtnl_dereference(tp->root);
293         else
294                 ht = u32_lookup_ht(tp_c, TC_U32_HTID(handle));
295
296         if (!ht)
297                 return NULL;
298
299         if (TC_U32_KEY(handle) == 0)
300                 return ht;
301
302         return u32_lookup_key(ht, handle);
303 }
304
305 /* Protected by rtnl lock */
306 static u32 gen_new_htid(struct tc_u_common *tp_c, struct tc_u_hnode *ptr)
307 {
308         int id = idr_alloc_cyclic(&tp_c->handle_idr, ptr, 1, 0x7FF, GFP_KERNEL);
309         if (id < 0)
310                 return 0;
311         return (id | 0x800U) << 20;
312 }
313
314 static struct hlist_head *tc_u_common_hash;
315
316 #define U32_HASH_SHIFT 10
317 #define U32_HASH_SIZE (1 << U32_HASH_SHIFT)
318
319 static void *tc_u_common_ptr(const struct tcf_proto *tp)
320 {
321         struct tcf_block *block = tp->chain->block;
322
323         /* The block sharing is currently supported only
324          * for classless qdiscs. In that case we use block
325          * for tc_u_common identification. In case the
326          * block is not shared, block->q is a valid pointer
327          * and we can use that. That works for classful qdiscs.
328          */
329         if (tcf_block_shared(block))
330                 return block;
331         else
332                 return block->q;
333 }
334
335 static struct hlist_head *tc_u_hash(void *key)
336 {
337         return tc_u_common_hash + hash_ptr(key, U32_HASH_SHIFT);
338 }
339
340 static struct tc_u_common *tc_u_common_find(void *key)
341 {
342         struct tc_u_common *tc;
343         hlist_for_each_entry(tc, tc_u_hash(key), hnode) {
344                 if (tc->ptr == key)
345                         return tc;
346         }
347         return NULL;
348 }
349
350 static int u32_init(struct tcf_proto *tp)
351 {
352         struct tc_u_hnode *root_ht;
353         void *key = tc_u_common_ptr(tp);
354         struct tc_u_common *tp_c = tc_u_common_find(key);
355
356         root_ht = kzalloc(struct_size(root_ht, ht, 1), GFP_KERNEL);
357         if (root_ht == NULL)
358                 return -ENOBUFS;
359
360         root_ht->refcnt++;
361         root_ht->handle = tp_c ? gen_new_htid(tp_c, root_ht) : 0x80000000;
362         root_ht->prio = tp->prio;
363         root_ht->is_root = true;
364         idr_init(&root_ht->handle_idr);
365
366         if (tp_c == NULL) {
367                 tp_c = kzalloc(sizeof(*tp_c), GFP_KERNEL);
368                 if (tp_c == NULL) {
369                         kfree(root_ht);
370                         return -ENOBUFS;
371                 }
372                 tp_c->ptr = key;
373                 INIT_HLIST_NODE(&tp_c->hnode);
374                 idr_init(&tp_c->handle_idr);
375
376                 hlist_add_head(&tp_c->hnode, tc_u_hash(key));
377         }
378
379         tp_c->refcnt++;
380         RCU_INIT_POINTER(root_ht->next, tp_c->hlist);
381         rcu_assign_pointer(tp_c->hlist, root_ht);
382
383         root_ht->refcnt++;
384         rcu_assign_pointer(tp->root, root_ht);
385         tp->data = tp_c;
386         return 0;
387 }
388
389 static void __u32_destroy_key(struct tc_u_knode *n)
390 {
391         struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
392
393         tcf_exts_destroy(&n->exts);
394         if (ht && --ht->refcnt == 0)
395                 kfree(ht);
396         kfree(n);
397 }
398
399 static void u32_destroy_key(struct tc_u_knode *n, bool free_pf)
400 {
401         tcf_exts_put_net(&n->exts);
402 #ifdef CONFIG_CLS_U32_PERF
403         if (free_pf)
404                 free_percpu(n->pf);
405 #endif
406 #ifdef CONFIG_CLS_U32_MARK
407         if (free_pf)
408                 free_percpu(n->pcpu_success);
409 #endif
410         __u32_destroy_key(n);
411 }
412
413 /* u32_delete_key_rcu should be called when free'ing a copied
414  * version of a tc_u_knode obtained from u32_init_knode(). When
415  * copies are obtained from u32_init_knode() the statistics are
416  * shared between the old and new copies to allow readers to
417  * continue to update the statistics during the copy. To support
418  * this the u32_delete_key_rcu variant does not free the percpu
419  * statistics.
420  */
421 static void u32_delete_key_work(struct work_struct *work)
422 {
423         struct tc_u_knode *key = container_of(to_rcu_work(work),
424                                               struct tc_u_knode,
425                                               rwork);
426         rtnl_lock();
427         u32_destroy_key(key, false);
428         rtnl_unlock();
429 }
430
431 /* u32_delete_key_freepf_rcu is the rcu callback variant
432  * that free's the entire structure including the statistics
433  * percpu variables. Only use this if the key is not a copy
434  * returned by u32_init_knode(). See u32_delete_key_rcu()
435  * for the variant that should be used with keys return from
436  * u32_init_knode()
437  */
438 static void u32_delete_key_freepf_work(struct work_struct *work)
439 {
440         struct tc_u_knode *key = container_of(to_rcu_work(work),
441                                               struct tc_u_knode,
442                                               rwork);
443         rtnl_lock();
444         u32_destroy_key(key, true);
445         rtnl_unlock();
446 }
447
448 static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode *key)
449 {
450         struct tc_u_common *tp_c = tp->data;
451         struct tc_u_knode __rcu **kp;
452         struct tc_u_knode *pkp;
453         struct tc_u_hnode *ht = rtnl_dereference(key->ht_up);
454
455         if (ht) {
456                 kp = &ht->ht[TC_U32_HASH(key->handle)];
457                 for (pkp = rtnl_dereference(*kp); pkp;
458                      kp = &pkp->next, pkp = rtnl_dereference(*kp)) {
459                         if (pkp == key) {
460                                 RCU_INIT_POINTER(*kp, key->next);
461                                 tp_c->knodes--;
462
463                                 tcf_unbind_filter(tp, &key->res);
464                                 idr_remove(&ht->handle_idr, key->handle);
465                                 tcf_exts_get_net(&key->exts);
466                                 tcf_queue_work(&key->rwork, u32_delete_key_freepf_work);
467                                 return 0;
468                         }
469                 }
470         }
471         WARN_ON(1);
472         return 0;
473 }
474
475 static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h,
476                                struct netlink_ext_ack *extack)
477 {
478         struct tcf_block *block = tp->chain->block;
479         struct tc_cls_u32_offload cls_u32 = {};
480
481         tc_cls_common_offload_init(&cls_u32.common, tp, h->flags, extack);
482         cls_u32.command = TC_CLSU32_DELETE_HNODE;
483         cls_u32.hnode.divisor = h->divisor;
484         cls_u32.hnode.handle = h->handle;
485         cls_u32.hnode.prio = h->prio;
486
487         tc_setup_cb_call(block, TC_SETUP_CLSU32, &cls_u32, false, true);
488 }
489
490 static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h,
491                                 u32 flags, struct netlink_ext_ack *extack)
492 {
493         struct tcf_block *block = tp->chain->block;
494         struct tc_cls_u32_offload cls_u32 = {};
495         bool skip_sw = tc_skip_sw(flags);
496         bool offloaded = false;
497         int err;
498
499         tc_cls_common_offload_init(&cls_u32.common, tp, flags, extack);
500         cls_u32.command = TC_CLSU32_NEW_HNODE;
501         cls_u32.hnode.divisor = h->divisor;
502         cls_u32.hnode.handle = h->handle;
503         cls_u32.hnode.prio = h->prio;
504
505         err = tc_setup_cb_call(block, TC_SETUP_CLSU32, &cls_u32, skip_sw, true);
506         if (err < 0) {
507                 u32_clear_hw_hnode(tp, h, NULL);
508                 return err;
509         } else if (err > 0) {
510                 offloaded = true;
511         }
512
513         if (skip_sw && !offloaded)
514                 return -EINVAL;
515
516         return 0;
517 }
518
519 static void u32_remove_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
520                                 struct netlink_ext_ack *extack)
521 {
522         struct tcf_block *block = tp->chain->block;
523         struct tc_cls_u32_offload cls_u32 = {};
524
525         tc_cls_common_offload_init(&cls_u32.common, tp, n->flags, extack);
526         cls_u32.command = TC_CLSU32_DELETE_KNODE;
527         cls_u32.knode.handle = n->handle;
528
529         tc_setup_cb_destroy(block, tp, TC_SETUP_CLSU32, &cls_u32, false,
530                             &n->flags, &n->in_hw_count, true);
531 }
532
533 static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
534                                 u32 flags, struct netlink_ext_ack *extack)
535 {
536         struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
537         struct tcf_block *block = tp->chain->block;
538         struct tc_cls_u32_offload cls_u32 = {};
539         bool skip_sw = tc_skip_sw(flags);
540         int err;
541
542         tc_cls_common_offload_init(&cls_u32.common, tp, flags, extack);
543         cls_u32.command = TC_CLSU32_REPLACE_KNODE;
544         cls_u32.knode.handle = n->handle;
545         cls_u32.knode.fshift = n->fshift;
546 #ifdef CONFIG_CLS_U32_MARK
547         cls_u32.knode.val = n->val;
548         cls_u32.knode.mask = n->mask;
549 #else
550         cls_u32.knode.val = 0;
551         cls_u32.knode.mask = 0;
552 #endif
553         cls_u32.knode.sel = &n->sel;
554         cls_u32.knode.res = &n->res;
555         cls_u32.knode.exts = &n->exts;
556         if (n->ht_down)
557                 cls_u32.knode.link_handle = ht->handle;
558
559         err = tc_setup_cb_add(block, tp, TC_SETUP_CLSU32, &cls_u32, skip_sw,
560                               &n->flags, &n->in_hw_count, true);
561         if (err) {
562                 u32_remove_hw_knode(tp, n, NULL);
563                 return err;
564         }
565
566         if (skip_sw && !(n->flags & TCA_CLS_FLAGS_IN_HW))
567                 return -EINVAL;
568
569         return 0;
570 }
571
572 static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
573                             struct netlink_ext_ack *extack)
574 {
575         struct tc_u_common *tp_c = tp->data;
576         struct tc_u_knode *n;
577         unsigned int h;
578
579         for (h = 0; h <= ht->divisor; h++) {
580                 while ((n = rtnl_dereference(ht->ht[h])) != NULL) {
581                         RCU_INIT_POINTER(ht->ht[h],
582                                          rtnl_dereference(n->next));
583                         tp_c->knodes--;
584                         tcf_unbind_filter(tp, &n->res);
585                         u32_remove_hw_knode(tp, n, extack);
586                         idr_remove(&ht->handle_idr, n->handle);
587                         if (tcf_exts_get_net(&n->exts))
588                                 tcf_queue_work(&n->rwork, u32_delete_key_freepf_work);
589                         else
590                                 u32_destroy_key(n, true);
591                 }
592         }
593 }
594
595 static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
596                              struct netlink_ext_ack *extack)
597 {
598         struct tc_u_common *tp_c = tp->data;
599         struct tc_u_hnode __rcu **hn;
600         struct tc_u_hnode *phn;
601
602         WARN_ON(--ht->refcnt);
603
604         u32_clear_hnode(tp, ht, extack);
605
606         hn = &tp_c->hlist;
607         for (phn = rtnl_dereference(*hn);
608              phn;
609              hn = &phn->next, phn = rtnl_dereference(*hn)) {
610                 if (phn == ht) {
611                         u32_clear_hw_hnode(tp, ht, extack);
612                         idr_destroy(&ht->handle_idr);
613                         idr_remove(&tp_c->handle_idr, ht->handle);
614                         RCU_INIT_POINTER(*hn, ht->next);
615                         kfree_rcu(ht, rcu);
616                         return 0;
617                 }
618         }
619
620         return -ENOENT;
621 }
622
623 static void u32_destroy(struct tcf_proto *tp, bool rtnl_held,
624                         struct netlink_ext_ack *extack)
625 {
626         struct tc_u_common *tp_c = tp->data;
627         struct tc_u_hnode *root_ht = rtnl_dereference(tp->root);
628
629         WARN_ON(root_ht == NULL);
630
631         if (root_ht && --root_ht->refcnt == 1)
632                 u32_destroy_hnode(tp, root_ht, extack);
633
634         if (--tp_c->refcnt == 0) {
635                 struct tc_u_hnode *ht;
636
637                 hlist_del(&tp_c->hnode);
638
639                 while ((ht = rtnl_dereference(tp_c->hlist)) != NULL) {
640                         u32_clear_hnode(tp, ht, extack);
641                         RCU_INIT_POINTER(tp_c->hlist, ht->next);
642
643                         /* u32_destroy_key() will later free ht for us, if it's
644                          * still referenced by some knode
645                          */
646                         if (--ht->refcnt == 0)
647                                 kfree_rcu(ht, rcu);
648                 }
649
650                 idr_destroy(&tp_c->handle_idr);
651                 kfree(tp_c);
652         }
653
654         tp->data = NULL;
655 }
656
657 static int u32_delete(struct tcf_proto *tp, void *arg, bool *last,
658                       bool rtnl_held, struct netlink_ext_ack *extack)
659 {
660         struct tc_u_hnode *ht = arg;
661         struct tc_u_common *tp_c = tp->data;
662         int ret = 0;
663
664         if (TC_U32_KEY(ht->handle)) {
665                 u32_remove_hw_knode(tp, (struct tc_u_knode *)ht, extack);
666                 ret = u32_delete_key(tp, (struct tc_u_knode *)ht);
667                 goto out;
668         }
669
670         if (ht->is_root) {
671                 NL_SET_ERR_MSG_MOD(extack, "Not allowed to delete root node");
672                 return -EINVAL;
673         }
674
675         if (ht->refcnt == 1) {
676                 u32_destroy_hnode(tp, ht, extack);
677         } else {
678                 NL_SET_ERR_MSG_MOD(extack, "Can not delete in-use filter");
679                 return -EBUSY;
680         }
681
682 out:
683         *last = tp_c->refcnt == 1 && tp_c->knodes == 0;
684         return ret;
685 }
686
687 static u32 gen_new_kid(struct tc_u_hnode *ht, u32 htid)
688 {
689         u32 index = htid | 0x800;
690         u32 max = htid | 0xFFF;
691
692         if (idr_alloc_u32(&ht->handle_idr, NULL, &index, max, GFP_KERNEL)) {
693                 index = htid + 1;
694                 if (idr_alloc_u32(&ht->handle_idr, NULL, &index, max,
695                                  GFP_KERNEL))
696                         index = max;
697         }
698
699         return index;
700 }
701
702 static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = {
703         [TCA_U32_CLASSID]       = { .type = NLA_U32 },
704         [TCA_U32_HASH]          = { .type = NLA_U32 },
705         [TCA_U32_LINK]          = { .type = NLA_U32 },
706         [TCA_U32_DIVISOR]       = { .type = NLA_U32 },
707         [TCA_U32_SEL]           = { .len = sizeof(struct tc_u32_sel) },
708         [TCA_U32_INDEV]         = { .type = NLA_STRING, .len = IFNAMSIZ },
709         [TCA_U32_MARK]          = { .len = sizeof(struct tc_u32_mark) },
710         [TCA_U32_FLAGS]         = { .type = NLA_U32 },
711 };
712
713 static int u32_set_parms(struct net *net, struct tcf_proto *tp,
714                          unsigned long base,
715                          struct tc_u_knode *n, struct nlattr **tb,
716                          struct nlattr *est, bool ovr,
717                          struct netlink_ext_ack *extack)
718 {
719         int err, ifindex = -1;
720
721         err = tcf_exts_validate(net, tp, tb, est, &n->exts, ovr, true, extack);
722         if (err < 0)
723                 return err;
724
725         if (tb[TCA_U32_INDEV]) {
726                 ifindex = tcf_change_indev(net, tb[TCA_U32_INDEV], extack);
727                 if (ifindex < 0)
728                         return -EINVAL;
729         }
730
731         if (tb[TCA_U32_LINK]) {
732                 u32 handle = nla_get_u32(tb[TCA_U32_LINK]);
733                 struct tc_u_hnode *ht_down = NULL, *ht_old;
734
735                 if (TC_U32_KEY(handle)) {
736                         NL_SET_ERR_MSG_MOD(extack, "u32 Link handle must be a hash table");
737                         return -EINVAL;
738                 }
739
740                 if (handle) {
741                         ht_down = u32_lookup_ht(tp->data, handle);
742
743                         if (!ht_down) {
744                                 NL_SET_ERR_MSG_MOD(extack, "Link hash table not found");
745                                 return -EINVAL;
746                         }
747                         if (ht_down->is_root) {
748                                 NL_SET_ERR_MSG_MOD(extack, "Not linking to root node");
749                                 return -EINVAL;
750                         }
751                         ht_down->refcnt++;
752                 }
753
754                 ht_old = rtnl_dereference(n->ht_down);
755                 rcu_assign_pointer(n->ht_down, ht_down);
756
757                 if (ht_old)
758                         ht_old->refcnt--;
759         }
760         if (tb[TCA_U32_CLASSID]) {
761                 n->res.classid = nla_get_u32(tb[TCA_U32_CLASSID]);
762                 tcf_bind_filter(tp, &n->res, base);
763         }
764
765         if (ifindex >= 0)
766                 n->ifindex = ifindex;
767
768         return 0;
769 }
770
771 static void u32_replace_knode(struct tcf_proto *tp, struct tc_u_common *tp_c,
772                               struct tc_u_knode *n)
773 {
774         struct tc_u_knode __rcu **ins;
775         struct tc_u_knode *pins;
776         struct tc_u_hnode *ht;
777
778         if (TC_U32_HTID(n->handle) == TC_U32_ROOT)
779                 ht = rtnl_dereference(tp->root);
780         else
781                 ht = u32_lookup_ht(tp_c, TC_U32_HTID(n->handle));
782
783         ins = &ht->ht[TC_U32_HASH(n->handle)];
784
785         /* The node must always exist for it to be replaced if this is not the
786          * case then something went very wrong elsewhere.
787          */
788         for (pins = rtnl_dereference(*ins); ;
789              ins = &pins->next, pins = rtnl_dereference(*ins))
790                 if (pins->handle == n->handle)
791                         break;
792
793         idr_replace(&ht->handle_idr, n, n->handle);
794         RCU_INIT_POINTER(n->next, pins->next);
795         rcu_assign_pointer(*ins, n);
796 }
797
798 static struct tc_u_knode *u32_init_knode(struct net *net, struct tcf_proto *tp,
799                                          struct tc_u_knode *n)
800 {
801         struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
802         struct tc_u32_sel *s = &n->sel;
803         struct tc_u_knode *new;
804
805         new = kzalloc(struct_size(new, sel.keys, s->nkeys), GFP_KERNEL);
806         if (!new)
807                 return NULL;
808
809         RCU_INIT_POINTER(new->next, n->next);
810         new->handle = n->handle;
811         RCU_INIT_POINTER(new->ht_up, n->ht_up);
812
813         new->ifindex = n->ifindex;
814         new->fshift = n->fshift;
815         new->flags = n->flags;
816         RCU_INIT_POINTER(new->ht_down, ht);
817
818 #ifdef CONFIG_CLS_U32_PERF
819         /* Statistics may be incremented by readers during update
820          * so we must keep them in tact. When the node is later destroyed
821          * a special destroy call must be made to not free the pf memory.
822          */
823         new->pf = n->pf;
824 #endif
825
826 #ifdef CONFIG_CLS_U32_MARK
827         new->val = n->val;
828         new->mask = n->mask;
829         /* Similarly success statistics must be moved as pointers */
830         new->pcpu_success = n->pcpu_success;
831 #endif
832         memcpy(&new->sel, s, struct_size(s, keys, s->nkeys));
833
834         if (tcf_exts_init(&new->exts, net, TCA_U32_ACT, TCA_U32_POLICE)) {
835                 kfree(new);
836                 return NULL;
837         }
838
839         /* bump reference count as long as we hold pointer to structure */
840         if (ht)
841                 ht->refcnt++;
842
843         return new;
844 }
845
846 static int u32_change(struct net *net, struct sk_buff *in_skb,
847                       struct tcf_proto *tp, unsigned long base, u32 handle,
848                       struct nlattr **tca, void **arg, bool ovr, bool rtnl_held,
849                       struct netlink_ext_ack *extack)
850 {
851         struct tc_u_common *tp_c = tp->data;
852         struct tc_u_hnode *ht;
853         struct tc_u_knode *n;
854         struct tc_u32_sel *s;
855         struct nlattr *opt = tca[TCA_OPTIONS];
856         struct nlattr *tb[TCA_U32_MAX + 1];
857         u32 htid, flags = 0;
858         size_t sel_size;
859         int err;
860
861         if (!opt) {
862                 if (handle) {
863                         NL_SET_ERR_MSG_MOD(extack, "Filter handle requires options");
864                         return -EINVAL;
865                 } else {
866                         return 0;
867                 }
868         }
869
870         err = nla_parse_nested_deprecated(tb, TCA_U32_MAX, opt, u32_policy,
871                                           extack);
872         if (err < 0)
873                 return err;
874
875         if (tb[TCA_U32_FLAGS]) {
876                 flags = nla_get_u32(tb[TCA_U32_FLAGS]);
877                 if (!tc_flags_valid(flags)) {
878                         NL_SET_ERR_MSG_MOD(extack, "Invalid filter flags");
879                         return -EINVAL;
880                 }
881         }
882
883         n = *arg;
884         if (n) {
885                 struct tc_u_knode *new;
886
887                 if (TC_U32_KEY(n->handle) == 0) {
888                         NL_SET_ERR_MSG_MOD(extack, "Key node id cannot be zero");
889                         return -EINVAL;
890                 }
891
892                 if ((n->flags ^ flags) &
893                     ~(TCA_CLS_FLAGS_IN_HW | TCA_CLS_FLAGS_NOT_IN_HW)) {
894                         NL_SET_ERR_MSG_MOD(extack, "Key node flags do not match passed flags");
895                         return -EINVAL;
896                 }
897
898                 new = u32_init_knode(net, tp, n);
899                 if (!new)
900                         return -ENOMEM;
901
902                 err = u32_set_parms(net, tp, base, new, tb,
903                                     tca[TCA_RATE], ovr, extack);
904
905                 if (err) {
906                         __u32_destroy_key(new);
907                         return err;
908                 }
909
910                 err = u32_replace_hw_knode(tp, new, flags, extack);
911                 if (err) {
912                         __u32_destroy_key(new);
913                         return err;
914                 }
915
916                 if (!tc_in_hw(new->flags))
917                         new->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
918
919                 u32_replace_knode(tp, tp_c, new);
920                 tcf_unbind_filter(tp, &n->res);
921                 tcf_exts_get_net(&n->exts);
922                 tcf_queue_work(&n->rwork, u32_delete_key_work);
923                 return 0;
924         }
925
926         if (tb[TCA_U32_DIVISOR]) {
927                 unsigned int divisor = nla_get_u32(tb[TCA_U32_DIVISOR]);
928
929                 if (!is_power_of_2(divisor)) {
930                         NL_SET_ERR_MSG_MOD(extack, "Divisor is not a power of 2");
931                         return -EINVAL;
932                 }
933                 if (divisor-- > 0x100) {
934                         NL_SET_ERR_MSG_MOD(extack, "Exceeded maximum 256 hash buckets");
935                         return -EINVAL;
936                 }
937                 if (TC_U32_KEY(handle)) {
938                         NL_SET_ERR_MSG_MOD(extack, "Divisor can only be used on a hash table");
939                         return -EINVAL;
940                 }
941                 ht = kzalloc(struct_size(ht, ht, divisor + 1), GFP_KERNEL);
942                 if (ht == NULL)
943                         return -ENOBUFS;
944                 if (handle == 0) {
945                         handle = gen_new_htid(tp->data, ht);
946                         if (handle == 0) {
947                                 kfree(ht);
948                                 return -ENOMEM;
949                         }
950                 } else {
951                         err = idr_alloc_u32(&tp_c->handle_idr, ht, &handle,
952                                             handle, GFP_KERNEL);
953                         if (err) {
954                                 kfree(ht);
955                                 return err;
956                         }
957                 }
958                 ht->refcnt = 1;
959                 ht->divisor = divisor;
960                 ht->handle = handle;
961                 ht->prio = tp->prio;
962                 idr_init(&ht->handle_idr);
963                 ht->flags = flags;
964
965                 err = u32_replace_hw_hnode(tp, ht, flags, extack);
966                 if (err) {
967                         idr_remove(&tp_c->handle_idr, handle);
968                         kfree(ht);
969                         return err;
970                 }
971
972                 RCU_INIT_POINTER(ht->next, tp_c->hlist);
973                 rcu_assign_pointer(tp_c->hlist, ht);
974                 *arg = ht;
975
976                 return 0;
977         }
978
979         if (tb[TCA_U32_HASH]) {
980                 htid = nla_get_u32(tb[TCA_U32_HASH]);
981                 if (TC_U32_HTID(htid) == TC_U32_ROOT) {
982                         ht = rtnl_dereference(tp->root);
983                         htid = ht->handle;
984                 } else {
985                         ht = u32_lookup_ht(tp->data, TC_U32_HTID(htid));
986                         if (!ht) {
987                                 NL_SET_ERR_MSG_MOD(extack, "Specified hash table not found");
988                                 return -EINVAL;
989                         }
990                 }
991         } else {
992                 ht = rtnl_dereference(tp->root);
993                 htid = ht->handle;
994         }
995
996         if (ht->divisor < TC_U32_HASH(htid)) {
997                 NL_SET_ERR_MSG_MOD(extack, "Specified hash table buckets exceed configured value");
998                 return -EINVAL;
999         }
1000
1001         /* At this point, we need to derive the new handle that will be used to
1002          * uniquely map the identity of this table match entry. The
1003          * identity of the entry that we need to construct is 32 bits made of:
1004          *     htid(12b):bucketid(8b):node/entryid(12b)
1005          *
1006          * At this point _we have the table(ht)_ in which we will insert this
1007          * entry. We carry the table's id in variable "htid".
1008          * Note that earlier code picked the ht selection either by a) the user
1009          * providing the htid specified via TCA_U32_HASH attribute or b) when
1010          * no such attribute is passed then the root ht, is default to at ID
1011          * 0x[800][00][000]. Rule: the root table has a single bucket with ID 0.
1012          * If OTOH the user passed us the htid, they may also pass a bucketid of
1013          * choice. 0 is fine. For example a user htid is 0x[600][01][000] it is
1014          * indicating hash bucketid of 1. Rule: the entry/node ID _cannot_ be
1015          * passed via the htid, so even if it was non-zero it will be ignored.
1016          *
1017          * We may also have a handle, if the user passed one. The handle also
1018          * carries the same addressing of htid(12b):bucketid(8b):node/entryid(12b).
1019          * Rule: the bucketid on the handle is ignored even if one was passed;
1020          * rather the value on "htid" is always assumed to be the bucketid.
1021          */
1022         if (handle) {
1023                 /* Rule: The htid from handle and tableid from htid must match */
1024                 if (TC_U32_HTID(handle) && TC_U32_HTID(handle ^ htid)) {
1025                         NL_SET_ERR_MSG_MOD(extack, "Handle specified hash table address mismatch");
1026                         return -EINVAL;
1027                 }
1028                 /* Ok, so far we have a valid htid(12b):bucketid(8b) but we
1029                  * need to finalize the table entry identification with the last
1030                  * part - the node/entryid(12b)). Rule: Nodeid _cannot be 0_ for
1031                  * entries. Rule: nodeid of 0 is reserved only for tables(see
1032                  * earlier code which processes TC_U32_DIVISOR attribute).
1033                  * Rule: The nodeid can only be derived from the handle (and not
1034                  * htid).
1035                  * Rule: if the handle specified zero for the node id example
1036                  * 0x60000000, then pick a new nodeid from the pool of IDs
1037                  * this hash table has been allocating from.
1038                  * If OTOH it is specified (i.e for example the user passed a
1039                  * handle such as 0x60000123), then we use it generate our final
1040                  * handle which is used to uniquely identify the match entry.
1041                  */
1042                 if (!TC_U32_NODE(handle)) {
1043                         handle = gen_new_kid(ht, htid);
1044                 } else {
1045                         handle = htid | TC_U32_NODE(handle);
1046                         err = idr_alloc_u32(&ht->handle_idr, NULL, &handle,
1047                                             handle, GFP_KERNEL);
1048                         if (err)
1049                                 return err;
1050                 }
1051         } else {
1052                 /* The user did not give us a handle; lets just generate one
1053                  * from the table's pool of nodeids.
1054                  */
1055                 handle = gen_new_kid(ht, htid);
1056         }
1057
1058         if (tb[TCA_U32_SEL] == NULL) {
1059                 NL_SET_ERR_MSG_MOD(extack, "Selector not specified");
1060                 err = -EINVAL;
1061                 goto erridr;
1062         }
1063
1064         s = nla_data(tb[TCA_U32_SEL]);
1065         sel_size = struct_size(s, keys, s->nkeys);
1066         if (nla_len(tb[TCA_U32_SEL]) < sel_size) {
1067                 err = -EINVAL;
1068                 goto erridr;
1069         }
1070
1071         n = kzalloc(struct_size(n, sel.keys, s->nkeys), GFP_KERNEL);
1072         if (n == NULL) {
1073                 err = -ENOBUFS;
1074                 goto erridr;
1075         }
1076
1077 #ifdef CONFIG_CLS_U32_PERF
1078         n->pf = __alloc_percpu(struct_size(n->pf, kcnts, s->nkeys),
1079                                __alignof__(struct tc_u32_pcnt));
1080         if (!n->pf) {
1081                 err = -ENOBUFS;
1082                 goto errfree;
1083         }
1084 #endif
1085
1086         memcpy(&n->sel, s, sel_size);
1087         RCU_INIT_POINTER(n->ht_up, ht);
1088         n->handle = handle;
1089         n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0;
1090         n->flags = flags;
1091
1092         err = tcf_exts_init(&n->exts, net, TCA_U32_ACT, TCA_U32_POLICE);
1093         if (err < 0)
1094                 goto errout;
1095
1096 #ifdef CONFIG_CLS_U32_MARK
1097         n->pcpu_success = alloc_percpu(u32);
1098         if (!n->pcpu_success) {
1099                 err = -ENOMEM;
1100                 goto errout;
1101         }
1102
1103         if (tb[TCA_U32_MARK]) {
1104                 struct tc_u32_mark *mark;
1105
1106                 mark = nla_data(tb[TCA_U32_MARK]);
1107                 n->val = mark->val;
1108                 n->mask = mark->mask;
1109         }
1110 #endif
1111
1112         err = u32_set_parms(net, tp, base, n, tb, tca[TCA_RATE], ovr,
1113                             extack);
1114         if (err == 0) {
1115                 struct tc_u_knode __rcu **ins;
1116                 struct tc_u_knode *pins;
1117
1118                 err = u32_replace_hw_knode(tp, n, flags, extack);
1119                 if (err)
1120                         goto errhw;
1121
1122                 if (!tc_in_hw(n->flags))
1123                         n->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
1124
1125                 ins = &ht->ht[TC_U32_HASH(handle)];
1126                 for (pins = rtnl_dereference(*ins); pins;
1127                      ins = &pins->next, pins = rtnl_dereference(*ins))
1128                         if (TC_U32_NODE(handle) < TC_U32_NODE(pins->handle))
1129                                 break;
1130
1131                 RCU_INIT_POINTER(n->next, pins);
1132                 rcu_assign_pointer(*ins, n);
1133                 tp_c->knodes++;
1134                 *arg = n;
1135                 return 0;
1136         }
1137
1138 errhw:
1139 #ifdef CONFIG_CLS_U32_MARK
1140         free_percpu(n->pcpu_success);
1141 #endif
1142
1143 errout:
1144         tcf_exts_destroy(&n->exts);
1145 #ifdef CONFIG_CLS_U32_PERF
1146 errfree:
1147         free_percpu(n->pf);
1148 #endif
1149         kfree(n);
1150 erridr:
1151         idr_remove(&ht->handle_idr, handle);
1152         return err;
1153 }
1154
1155 static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg,
1156                      bool rtnl_held)
1157 {
1158         struct tc_u_common *tp_c = tp->data;
1159         struct tc_u_hnode *ht;
1160         struct tc_u_knode *n;
1161         unsigned int h;
1162
1163         if (arg->stop)
1164                 return;
1165
1166         for (ht = rtnl_dereference(tp_c->hlist);
1167              ht;
1168              ht = rtnl_dereference(ht->next)) {
1169                 if (ht->prio != tp->prio)
1170                         continue;
1171                 if (arg->count >= arg->skip) {
1172                         if (arg->fn(tp, ht, arg) < 0) {
1173                                 arg->stop = 1;
1174                                 return;
1175                         }
1176                 }
1177                 arg->count++;
1178                 for (h = 0; h <= ht->divisor; h++) {
1179                         for (n = rtnl_dereference(ht->ht[h]);
1180                              n;
1181                              n = rtnl_dereference(n->next)) {
1182                                 if (arg->count < arg->skip) {
1183                                         arg->count++;
1184                                         continue;
1185                                 }
1186                                 if (arg->fn(tp, n, arg) < 0) {
1187                                         arg->stop = 1;
1188                                         return;
1189                                 }
1190                                 arg->count++;
1191                         }
1192                 }
1193         }
1194 }
1195
1196 static int u32_reoffload_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
1197                                bool add, flow_setup_cb_t *cb, void *cb_priv,
1198                                struct netlink_ext_ack *extack)
1199 {
1200         struct tc_cls_u32_offload cls_u32 = {};
1201         int err;
1202
1203         tc_cls_common_offload_init(&cls_u32.common, tp, ht->flags, extack);
1204         cls_u32.command = add ? TC_CLSU32_NEW_HNODE : TC_CLSU32_DELETE_HNODE;
1205         cls_u32.hnode.divisor = ht->divisor;
1206         cls_u32.hnode.handle = ht->handle;
1207         cls_u32.hnode.prio = ht->prio;
1208
1209         err = cb(TC_SETUP_CLSU32, &cls_u32, cb_priv);
1210         if (err && add && tc_skip_sw(ht->flags))
1211                 return err;
1212
1213         return 0;
1214 }
1215
1216 static int u32_reoffload_knode(struct tcf_proto *tp, struct tc_u_knode *n,
1217                                bool add, flow_setup_cb_t *cb, void *cb_priv,
1218                                struct netlink_ext_ack *extack)
1219 {
1220         struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
1221         struct tcf_block *block = tp->chain->block;
1222         struct tc_cls_u32_offload cls_u32 = {};
1223         int err;
1224
1225         tc_cls_common_offload_init(&cls_u32.common, tp, n->flags, extack);
1226         cls_u32.command = add ?
1227                 TC_CLSU32_REPLACE_KNODE : TC_CLSU32_DELETE_KNODE;
1228         cls_u32.knode.handle = n->handle;
1229
1230         if (add) {
1231                 cls_u32.knode.fshift = n->fshift;
1232 #ifdef CONFIG_CLS_U32_MARK
1233                 cls_u32.knode.val = n->val;
1234                 cls_u32.knode.mask = n->mask;
1235 #else
1236                 cls_u32.knode.val = 0;
1237                 cls_u32.knode.mask = 0;
1238 #endif
1239                 cls_u32.knode.sel = &n->sel;
1240                 cls_u32.knode.res = &n->res;
1241                 cls_u32.knode.exts = &n->exts;
1242                 if (n->ht_down)
1243                         cls_u32.knode.link_handle = ht->handle;
1244         }
1245
1246         err = tc_setup_cb_reoffload(block, tp, add, cb, TC_SETUP_CLSU32,
1247                                     &cls_u32, cb_priv, &n->flags,
1248                                     &n->in_hw_count);
1249         if (err)
1250                 return err;
1251
1252         return 0;
1253 }
1254
1255 static int u32_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
1256                          void *cb_priv, struct netlink_ext_ack *extack)
1257 {
1258         struct tc_u_common *tp_c = tp->data;
1259         struct tc_u_hnode *ht;
1260         struct tc_u_knode *n;
1261         unsigned int h;
1262         int err;
1263
1264         for (ht = rtnl_dereference(tp_c->hlist);
1265              ht;
1266              ht = rtnl_dereference(ht->next)) {
1267                 if (ht->prio != tp->prio)
1268                         continue;
1269
1270                 /* When adding filters to a new dev, try to offload the
1271                  * hashtable first. When removing, do the filters before the
1272                  * hashtable.
1273                  */
1274                 if (add && !tc_skip_hw(ht->flags)) {
1275                         err = u32_reoffload_hnode(tp, ht, add, cb, cb_priv,
1276                                                   extack);
1277                         if (err)
1278                                 return err;
1279                 }
1280
1281                 for (h = 0; h <= ht->divisor; h++) {
1282                         for (n = rtnl_dereference(ht->ht[h]);
1283                              n;
1284                              n = rtnl_dereference(n->next)) {
1285                                 if (tc_skip_hw(n->flags))
1286                                         continue;
1287
1288                                 err = u32_reoffload_knode(tp, n, add, cb,
1289                                                           cb_priv, extack);
1290                                 if (err)
1291                                         return err;
1292                         }
1293                 }
1294
1295                 if (!add && !tc_skip_hw(ht->flags))
1296                         u32_reoffload_hnode(tp, ht, add, cb, cb_priv, extack);
1297         }
1298
1299         return 0;
1300 }
1301
1302 static void u32_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
1303                            unsigned long base)
1304 {
1305         struct tc_u_knode *n = fh;
1306
1307         if (n && n->res.classid == classid) {
1308                 if (cl)
1309                         __tcf_bind_filter(q, &n->res, base);
1310                 else
1311                         __tcf_unbind_filter(q, &n->res);
1312         }
1313 }
1314
1315 static int u32_dump(struct net *net, struct tcf_proto *tp, void *fh,
1316                     struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
1317 {
1318         struct tc_u_knode *n = fh;
1319         struct tc_u_hnode *ht_up, *ht_down;
1320         struct nlattr *nest;
1321
1322         if (n == NULL)
1323                 return skb->len;
1324
1325         t->tcm_handle = n->handle;
1326
1327         nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
1328         if (nest == NULL)
1329                 goto nla_put_failure;
1330
1331         if (TC_U32_KEY(n->handle) == 0) {
1332                 struct tc_u_hnode *ht = fh;
1333                 u32 divisor = ht->divisor + 1;
1334
1335                 if (nla_put_u32(skb, TCA_U32_DIVISOR, divisor))
1336                         goto nla_put_failure;
1337         } else {
1338 #ifdef CONFIG_CLS_U32_PERF
1339                 struct tc_u32_pcnt *gpf;
1340                 int cpu;
1341 #endif
1342
1343                 if (nla_put(skb, TCA_U32_SEL, struct_size(&n->sel, keys, n->sel.nkeys),
1344                             &n->sel))
1345                         goto nla_put_failure;
1346
1347                 ht_up = rtnl_dereference(n->ht_up);
1348                 if (ht_up) {
1349                         u32 htid = n->handle & 0xFFFFF000;
1350                         if (nla_put_u32(skb, TCA_U32_HASH, htid))
1351                                 goto nla_put_failure;
1352                 }
1353                 if (n->res.classid &&
1354                     nla_put_u32(skb, TCA_U32_CLASSID, n->res.classid))
1355                         goto nla_put_failure;
1356
1357                 ht_down = rtnl_dereference(n->ht_down);
1358                 if (ht_down &&
1359                     nla_put_u32(skb, TCA_U32_LINK, ht_down->handle))
1360                         goto nla_put_failure;
1361
1362                 if (n->flags && nla_put_u32(skb, TCA_U32_FLAGS, n->flags))
1363                         goto nla_put_failure;
1364
1365 #ifdef CONFIG_CLS_U32_MARK
1366                 if ((n->val || n->mask)) {
1367                         struct tc_u32_mark mark = {.val = n->val,
1368                                                    .mask = n->mask,
1369                                                    .success = 0};
1370                         int cpum;
1371
1372                         for_each_possible_cpu(cpum) {
1373                                 __u32 cnt = *per_cpu_ptr(n->pcpu_success, cpum);
1374
1375                                 mark.success += cnt;
1376                         }
1377
1378                         if (nla_put(skb, TCA_U32_MARK, sizeof(mark), &mark))
1379                                 goto nla_put_failure;
1380                 }
1381 #endif
1382
1383                 if (tcf_exts_dump(skb, &n->exts) < 0)
1384                         goto nla_put_failure;
1385
1386                 if (n->ifindex) {
1387                         struct net_device *dev;
1388                         dev = __dev_get_by_index(net, n->ifindex);
1389                         if (dev && nla_put_string(skb, TCA_U32_INDEV, dev->name))
1390                                 goto nla_put_failure;
1391                 }
1392 #ifdef CONFIG_CLS_U32_PERF
1393                 gpf = kzalloc(struct_size(gpf, kcnts, n->sel.nkeys), GFP_KERNEL);
1394                 if (!gpf)
1395                         goto nla_put_failure;
1396
1397                 for_each_possible_cpu(cpu) {
1398                         int i;
1399                         struct tc_u32_pcnt *pf = per_cpu_ptr(n->pf, cpu);
1400
1401                         gpf->rcnt += pf->rcnt;
1402                         gpf->rhit += pf->rhit;
1403                         for (i = 0; i < n->sel.nkeys; i++)
1404                                 gpf->kcnts[i] += pf->kcnts[i];
1405                 }
1406
1407                 if (nla_put_64bit(skb, TCA_U32_PCNT, struct_size(gpf, kcnts, n->sel.nkeys),
1408                                   gpf, TCA_U32_PAD)) {
1409                         kfree(gpf);
1410                         goto nla_put_failure;
1411                 }
1412                 kfree(gpf);
1413 #endif
1414         }
1415
1416         nla_nest_end(skb, nest);
1417
1418         if (TC_U32_KEY(n->handle))
1419                 if (tcf_exts_dump_stats(skb, &n->exts) < 0)
1420                         goto nla_put_failure;
1421         return skb->len;
1422
1423 nla_put_failure:
1424         nla_nest_cancel(skb, nest);
1425         return -1;
1426 }
1427
1428 static struct tcf_proto_ops cls_u32_ops __read_mostly = {
1429         .kind           =       "u32",
1430         .classify       =       u32_classify,
1431         .init           =       u32_init,
1432         .destroy        =       u32_destroy,
1433         .get            =       u32_get,
1434         .change         =       u32_change,
1435         .delete         =       u32_delete,
1436         .walk           =       u32_walk,
1437         .reoffload      =       u32_reoffload,
1438         .dump           =       u32_dump,
1439         .bind_class     =       u32_bind_class,
1440         .owner          =       THIS_MODULE,
1441 };
1442
1443 static int __init init_u32(void)
1444 {
1445         int i, ret;
1446
1447         pr_info("u32 classifier\n");
1448 #ifdef CONFIG_CLS_U32_PERF
1449         pr_info("    Performance counters on\n");
1450 #endif
1451         pr_info("    input device check on\n");
1452 #ifdef CONFIG_NET_CLS_ACT
1453         pr_info("    Actions configured\n");
1454 #endif
1455         tc_u_common_hash = kvmalloc_array(U32_HASH_SIZE,
1456                                           sizeof(struct hlist_head),
1457                                           GFP_KERNEL);
1458         if (!tc_u_common_hash)
1459                 return -ENOMEM;
1460
1461         for (i = 0; i < U32_HASH_SIZE; i++)
1462                 INIT_HLIST_HEAD(&tc_u_common_hash[i]);
1463
1464         ret = register_tcf_proto_ops(&cls_u32_ops);
1465         if (ret)
1466                 kvfree(tc_u_common_hash);
1467         return ret;
1468 }
1469
1470 static void __exit exit_u32(void)
1471 {
1472         unregister_tcf_proto_ops(&cls_u32_ops);
1473         kvfree(tc_u_common_hash);
1474 }
1475
1476 module_init(init_u32)
1477 module_exit(exit_u32)
1478 MODULE_LICENSE("GPL");