GNU Linux-libre 4.4.299-gnu1
[releases.git] / net / sched / cls_flower.c
1 /*
2  * net/sched/cls_flower.c               Flower classifier
3  *
4  * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  */
11
12 #include <linux/kernel.h>
13 #include <linux/init.h>
14 #include <linux/module.h>
15 #include <linux/rhashtable.h>
16 #include <linux/workqueue.h>
17
18 #include <linux/if_ether.h>
19 #include <linux/in6.h>
20 #include <linux/ip.h>
21
22 #include <net/sch_generic.h>
23 #include <net/pkt_cls.h>
24 #include <net/ip.h>
25 #include <net/flow_dissector.h>
26
27 struct fl_flow_key {
28         int     indev_ifindex;
29         struct flow_dissector_key_control control;
30         struct flow_dissector_key_basic basic;
31         struct flow_dissector_key_eth_addrs eth;
32         struct flow_dissector_key_addrs ipaddrs;
33         union {
34                 struct flow_dissector_key_ipv4_addrs ipv4;
35                 struct flow_dissector_key_ipv6_addrs ipv6;
36         };
37         struct flow_dissector_key_ports tp;
38 } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
39
40 struct fl_flow_mask_range {
41         unsigned short int start;
42         unsigned short int end;
43 };
44
45 struct fl_flow_mask {
46         struct fl_flow_key key;
47         struct fl_flow_mask_range range;
48         struct rcu_head rcu;
49 };
50
51 struct cls_fl_head {
52         struct rhashtable ht;
53         struct fl_flow_mask mask;
54         struct flow_dissector dissector;
55         u32 hgen;
56         bool mask_assigned;
57         struct list_head filters;
58         struct rhashtable_params ht_params;
59         union {
60                 struct work_struct work;
61                 struct rcu_head rcu;
62         };
63 };
64
65 struct cls_fl_filter {
66         struct rhash_head ht_node;
67         struct fl_flow_key mkey;
68         struct tcf_exts exts;
69         struct tcf_result res;
70         struct fl_flow_key key;
71         struct list_head list;
72         u32 handle;
73         struct rcu_head rcu;
74 };
75
76 static unsigned short int fl_mask_range(const struct fl_flow_mask *mask)
77 {
78         return mask->range.end - mask->range.start;
79 }
80
81 static void fl_mask_update_range(struct fl_flow_mask *mask)
82 {
83         const u8 *bytes = (const u8 *) &mask->key;
84         size_t size = sizeof(mask->key);
85         size_t i, first = 0, last = size - 1;
86
87         for (i = 0; i < sizeof(mask->key); i++) {
88                 if (bytes[i]) {
89                         if (!first && i)
90                                 first = i;
91                         last = i;
92                 }
93         }
94         mask->range.start = rounddown(first, sizeof(long));
95         mask->range.end = roundup(last + 1, sizeof(long));
96 }
97
98 static void *fl_key_get_start(struct fl_flow_key *key,
99                               const struct fl_flow_mask *mask)
100 {
101         return (u8 *) key + mask->range.start;
102 }
103
104 static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key,
105                               struct fl_flow_mask *mask)
106 {
107         const long *lkey = fl_key_get_start(key, mask);
108         const long *lmask = fl_key_get_start(&mask->key, mask);
109         long *lmkey = fl_key_get_start(mkey, mask);
110         int i;
111
112         for (i = 0; i < fl_mask_range(mask); i += sizeof(long))
113                 *lmkey++ = *lkey++ & *lmask++;
114 }
115
116 static void fl_clear_masked_range(struct fl_flow_key *key,
117                                   struct fl_flow_mask *mask)
118 {
119         memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask));
120 }
121
122 static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
123                        struct tcf_result *res)
124 {
125         struct cls_fl_head *head = rcu_dereference_bh(tp->root);
126         struct cls_fl_filter *f;
127         struct fl_flow_key skb_key;
128         struct fl_flow_key skb_mkey;
129
130         flow_dissector_init_keys(&skb_key.control, &skb_key.basic);
131         fl_clear_masked_range(&skb_key, &head->mask);
132         skb_key.indev_ifindex = skb->skb_iif;
133         /* skb_flow_dissect() does not set n_proto in case an unknown protocol,
134          * so do it rather here.
135          */
136         skb_key.basic.n_proto = skb->protocol;
137         skb_flow_dissect(skb, &head->dissector, &skb_key, 0);
138
139         fl_set_masked_key(&skb_mkey, &skb_key, &head->mask);
140
141         f = rhashtable_lookup_fast(&head->ht,
142                                    fl_key_get_start(&skb_mkey, &head->mask),
143                                    head->ht_params);
144         if (f) {
145                 *res = f->res;
146                 return tcf_exts_exec(skb, &f->exts, res);
147         }
148         return -1;
149 }
150
151 static int fl_init(struct tcf_proto *tp)
152 {
153         struct cls_fl_head *head;
154
155         head = kzalloc(sizeof(*head), GFP_KERNEL);
156         if (!head)
157                 return -ENOBUFS;
158
159         INIT_LIST_HEAD_RCU(&head->filters);
160         rcu_assign_pointer(tp->root, head);
161
162         return 0;
163 }
164
165 static void fl_destroy_filter(struct rcu_head *head)
166 {
167         struct cls_fl_filter *f = container_of(head, struct cls_fl_filter, rcu);
168
169         tcf_exts_destroy(&f->exts);
170         kfree(f);
171 }
172
173 static void fl_destroy_sleepable(struct work_struct *work)
174 {
175         struct cls_fl_head *head = container_of(work, struct cls_fl_head,
176                                                 work);
177         if (head->mask_assigned)
178                 rhashtable_destroy(&head->ht);
179         kfree(head);
180         module_put(THIS_MODULE);
181 }
182
183 static void fl_destroy_rcu(struct rcu_head *rcu)
184 {
185         struct cls_fl_head *head = container_of(rcu, struct cls_fl_head, rcu);
186
187         INIT_WORK(&head->work, fl_destroy_sleepable);
188         schedule_work(&head->work);
189 }
190
191 static bool fl_destroy(struct tcf_proto *tp, bool force)
192 {
193         struct cls_fl_head *head = rtnl_dereference(tp->root);
194         struct cls_fl_filter *f, *next;
195
196         if (!force && !list_empty(&head->filters))
197                 return false;
198
199         list_for_each_entry_safe(f, next, &head->filters, list) {
200                 list_del_rcu(&f->list);
201                 call_rcu(&f->rcu, fl_destroy_filter);
202         }
203
204         __module_get(THIS_MODULE);
205         call_rcu(&head->rcu, fl_destroy_rcu);
206         return true;
207 }
208
209 static unsigned long fl_get(struct tcf_proto *tp, u32 handle)
210 {
211         struct cls_fl_head *head = rtnl_dereference(tp->root);
212         struct cls_fl_filter *f;
213
214         list_for_each_entry(f, &head->filters, list)
215                 if (f->handle == handle)
216                         return (unsigned long) f;
217         return 0;
218 }
219
220 static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
221         [TCA_FLOWER_UNSPEC]             = { .type = NLA_UNSPEC },
222         [TCA_FLOWER_CLASSID]            = { .type = NLA_U32 },
223         [TCA_FLOWER_INDEV]              = { .type = NLA_STRING,
224                                             .len = IFNAMSIZ },
225         [TCA_FLOWER_KEY_ETH_DST]        = { .len = ETH_ALEN },
226         [TCA_FLOWER_KEY_ETH_DST_MASK]   = { .len = ETH_ALEN },
227         [TCA_FLOWER_KEY_ETH_SRC]        = { .len = ETH_ALEN },
228         [TCA_FLOWER_KEY_ETH_SRC_MASK]   = { .len = ETH_ALEN },
229         [TCA_FLOWER_KEY_ETH_TYPE]       = { .type = NLA_U16 },
230         [TCA_FLOWER_KEY_IP_PROTO]       = { .type = NLA_U8 },
231         [TCA_FLOWER_KEY_IPV4_SRC]       = { .type = NLA_U32 },
232         [TCA_FLOWER_KEY_IPV4_SRC_MASK]  = { .type = NLA_U32 },
233         [TCA_FLOWER_KEY_IPV4_DST]       = { .type = NLA_U32 },
234         [TCA_FLOWER_KEY_IPV4_DST_MASK]  = { .type = NLA_U32 },
235         [TCA_FLOWER_KEY_IPV6_SRC]       = { .len = sizeof(struct in6_addr) },
236         [TCA_FLOWER_KEY_IPV6_SRC_MASK]  = { .len = sizeof(struct in6_addr) },
237         [TCA_FLOWER_KEY_IPV6_DST]       = { .len = sizeof(struct in6_addr) },
238         [TCA_FLOWER_KEY_IPV6_DST_MASK]  = { .len = sizeof(struct in6_addr) },
239         [TCA_FLOWER_KEY_TCP_SRC]        = { .type = NLA_U16 },
240         [TCA_FLOWER_KEY_TCP_DST]        = { .type = NLA_U16 },
241         [TCA_FLOWER_KEY_UDP_SRC]        = { .type = NLA_U16 },
242         [TCA_FLOWER_KEY_UDP_DST]        = { .type = NLA_U16 },
243 };
244
245 static void fl_set_key_val(struct nlattr **tb,
246                            void *val, int val_type,
247                            void *mask, int mask_type, int len)
248 {
249         if (!tb[val_type])
250                 return;
251         memcpy(val, nla_data(tb[val_type]), len);
252         if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type])
253                 memset(mask, 0xff, len);
254         else
255                 memcpy(mask, nla_data(tb[mask_type]), len);
256 }
257
258 static int fl_set_key(struct net *net, struct nlattr **tb,
259                       struct fl_flow_key *key, struct fl_flow_key *mask)
260 {
261 #ifdef CONFIG_NET_CLS_IND
262         if (tb[TCA_FLOWER_INDEV]) {
263                 int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV]);
264                 if (err < 0)
265                         return err;
266                 key->indev_ifindex = err;
267                 mask->indev_ifindex = 0xffffffff;
268         }
269 #endif
270
271         fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
272                        mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
273                        sizeof(key->eth.dst));
274         fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
275                        mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
276                        sizeof(key->eth.src));
277
278         fl_set_key_val(tb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
279                        &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
280                        sizeof(key->basic.n_proto));
281
282         if (key->basic.n_proto == htons(ETH_P_IP) ||
283             key->basic.n_proto == htons(ETH_P_IPV6)) {
284                 fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
285                                &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
286                                sizeof(key->basic.ip_proto));
287         }
288
289         if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) {
290                 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
291                 fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
292                                &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
293                                sizeof(key->ipv4.src));
294                 fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
295                                &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
296                                sizeof(key->ipv4.dst));
297         } else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) {
298                 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
299                 fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
300                                &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
301                                sizeof(key->ipv6.src));
302                 fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
303                                &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
304                                sizeof(key->ipv6.dst));
305         }
306
307         if (key->basic.ip_proto == IPPROTO_TCP) {
308                 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
309                                &mask->tp.src, TCA_FLOWER_UNSPEC,
310                                sizeof(key->tp.src));
311                 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
312                                &mask->tp.dst, TCA_FLOWER_UNSPEC,
313                                sizeof(key->tp.dst));
314         } else if (key->basic.ip_proto == IPPROTO_UDP) {
315                 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
316                                &mask->tp.src, TCA_FLOWER_UNSPEC,
317                                sizeof(key->tp.src));
318                 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
319                                &mask->tp.dst, TCA_FLOWER_UNSPEC,
320                                sizeof(key->tp.dst));
321         }
322
323         return 0;
324 }
325
326 static bool fl_mask_eq(struct fl_flow_mask *mask1,
327                        struct fl_flow_mask *mask2)
328 {
329         const long *lmask1 = fl_key_get_start(&mask1->key, mask1);
330         const long *lmask2 = fl_key_get_start(&mask2->key, mask2);
331
332         return !memcmp(&mask1->range, &mask2->range, sizeof(mask1->range)) &&
333                !memcmp(lmask1, lmask2, fl_mask_range(mask1));
334 }
335
336 static const struct rhashtable_params fl_ht_params = {
337         .key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */
338         .head_offset = offsetof(struct cls_fl_filter, ht_node),
339         .automatic_shrinking = true,
340 };
341
342 static int fl_init_hashtable(struct cls_fl_head *head,
343                              struct fl_flow_mask *mask)
344 {
345         head->ht_params = fl_ht_params;
346         head->ht_params.key_len = fl_mask_range(mask);
347         head->ht_params.key_offset += mask->range.start;
348
349         return rhashtable_init(&head->ht, &head->ht_params);
350 }
351
352 #define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
353 #define FL_KEY_MEMBER_SIZE(member) (sizeof(((struct fl_flow_key *) 0)->member))
354
355 #define FL_KEY_IS_MASKED(mask, member)                                          \
356         memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member),               \
357                    0, FL_KEY_MEMBER_SIZE(member))                               \
358
359 #define FL_KEY_SET(keys, cnt, id, member)                                       \
360         do {                                                                    \
361                 keys[cnt].key_id = id;                                          \
362                 keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member);                \
363                 cnt++;                                                          \
364         } while(0);
365
366 #define FL_KEY_SET_IF_MASKED(mask, keys, cnt, id, member)                       \
367         do {                                                                    \
368                 if (FL_KEY_IS_MASKED(mask, member))                             \
369                         FL_KEY_SET(keys, cnt, id, member);                      \
370         } while(0);
371
372 static void fl_init_dissector(struct cls_fl_head *head,
373                               struct fl_flow_mask *mask)
374 {
375         struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX];
376         size_t cnt = 0;
377
378         FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control);
379         FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic);
380         FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
381                              FLOW_DISSECTOR_KEY_ETH_ADDRS, eth);
382         FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
383                              FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
384         FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
385                              FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
386         FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
387                              FLOW_DISSECTOR_KEY_PORTS, tp);
388
389         skb_flow_dissector_init(&head->dissector, keys, cnt);
390 }
391
392 static int fl_check_assign_mask(struct cls_fl_head *head,
393                                 struct fl_flow_mask *mask)
394 {
395         int err;
396
397         if (head->mask_assigned) {
398                 if (!fl_mask_eq(&head->mask, mask))
399                         return -EINVAL;
400                 else
401                         return 0;
402         }
403
404         /* Mask is not assigned yet. So assign it and init hashtable
405          * according to that.
406          */
407         err = fl_init_hashtable(head, mask);
408         if (err)
409                 return err;
410         memcpy(&head->mask, mask, sizeof(head->mask));
411         head->mask_assigned = true;
412
413         fl_init_dissector(head, mask);
414
415         return 0;
416 }
417
418 static int fl_set_parms(struct net *net, struct tcf_proto *tp,
419                         struct cls_fl_filter *f, struct fl_flow_mask *mask,
420                         unsigned long base, struct nlattr **tb,
421                         struct nlattr *est, bool ovr)
422 {
423         struct tcf_exts e;
424         int err;
425
426         tcf_exts_init(&e, TCA_FLOWER_ACT, 0);
427         err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
428         if (err < 0)
429                 return err;
430
431         if (tb[TCA_FLOWER_CLASSID]) {
432                 f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
433                 tcf_bind_filter(tp, &f->res, base);
434         }
435
436         err = fl_set_key(net, tb, &f->key, &mask->key);
437         if (err)
438                 goto errout;
439
440         fl_mask_update_range(mask);
441         fl_set_masked_key(&f->mkey, &f->key, mask);
442
443         tcf_exts_change(tp, &f->exts, &e);
444
445         return 0;
446 errout:
447         tcf_exts_destroy(&e);
448         return err;
449 }
450
451 static u32 fl_grab_new_handle(struct tcf_proto *tp,
452                               struct cls_fl_head *head)
453 {
454         unsigned int i = 0x80000000;
455         u32 handle;
456
457         do {
458                 if (++head->hgen == 0x7FFFFFFF)
459                         head->hgen = 1;
460         } while (--i > 0 && fl_get(tp, head->hgen));
461
462         if (unlikely(i == 0)) {
463                 pr_err("Insufficient number of handles\n");
464                 handle = 0;
465         } else {
466                 handle = head->hgen;
467         }
468
469         return handle;
470 }
471
472 static int fl_change(struct net *net, struct sk_buff *in_skb,
473                      struct tcf_proto *tp, unsigned long base,
474                      u32 handle, struct nlattr **tca,
475                      unsigned long *arg, bool ovr)
476 {
477         struct cls_fl_head *head = rtnl_dereference(tp->root);
478         struct cls_fl_filter *fold = (struct cls_fl_filter *) *arg;
479         struct cls_fl_filter *fnew;
480         struct nlattr *tb[TCA_FLOWER_MAX + 1];
481         struct fl_flow_mask mask = {};
482         int err;
483
484         if (!tca[TCA_OPTIONS])
485                 return -EINVAL;
486
487         err = nla_parse_nested(tb, TCA_FLOWER_MAX, tca[TCA_OPTIONS], fl_policy);
488         if (err < 0)
489                 return err;
490
491         if (fold && handle && fold->handle != handle)
492                 return -EINVAL;
493
494         fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
495         if (!fnew)
496                 return -ENOBUFS;
497
498         tcf_exts_init(&fnew->exts, TCA_FLOWER_ACT, 0);
499
500         if (!handle) {
501                 handle = fl_grab_new_handle(tp, head);
502                 if (!handle) {
503                         err = -EINVAL;
504                         goto errout;
505                 }
506         }
507         fnew->handle = handle;
508
509         err = fl_set_parms(net, tp, fnew, &mask, base, tb, tca[TCA_RATE], ovr);
510         if (err)
511                 goto errout;
512
513         err = fl_check_assign_mask(head, &mask);
514         if (err)
515                 goto errout;
516
517         err = rhashtable_insert_fast(&head->ht, &fnew->ht_node,
518                                      head->ht_params);
519         if (err)
520                 goto errout;
521         if (fold)
522                 rhashtable_remove_fast(&head->ht, &fold->ht_node,
523                                        head->ht_params);
524
525         *arg = (unsigned long) fnew;
526
527         if (fold) {
528                 list_replace_rcu(&fold->list, &fnew->list);
529                 tcf_unbind_filter(tp, &fold->res);
530                 call_rcu(&fold->rcu, fl_destroy_filter);
531         } else {
532                 list_add_tail_rcu(&fnew->list, &head->filters);
533         }
534
535         return 0;
536
537 errout:
538         kfree(fnew);
539         return err;
540 }
541
542 static int fl_delete(struct tcf_proto *tp, unsigned long arg)
543 {
544         struct cls_fl_head *head = rtnl_dereference(tp->root);
545         struct cls_fl_filter *f = (struct cls_fl_filter *) arg;
546
547         rhashtable_remove_fast(&head->ht, &f->ht_node,
548                                head->ht_params);
549         list_del_rcu(&f->list);
550         tcf_unbind_filter(tp, &f->res);
551         call_rcu(&f->rcu, fl_destroy_filter);
552         return 0;
553 }
554
555 static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg)
556 {
557         struct cls_fl_head *head = rtnl_dereference(tp->root);
558         struct cls_fl_filter *f;
559
560         list_for_each_entry_rcu(f, &head->filters, list) {
561                 if (arg->count < arg->skip)
562                         goto skip;
563                 if (arg->fn(tp, (unsigned long) f, arg) < 0) {
564                         arg->stop = 1;
565                         break;
566                 }
567 skip:
568                 arg->count++;
569         }
570 }
571
572 static int fl_dump_key_val(struct sk_buff *skb,
573                            void *val, int val_type,
574                            void *mask, int mask_type, int len)
575 {
576         int err;
577
578         if (!memchr_inv(mask, 0, len))
579                 return 0;
580         err = nla_put(skb, val_type, len, val);
581         if (err)
582                 return err;
583         if (mask_type != TCA_FLOWER_UNSPEC) {
584                 err = nla_put(skb, mask_type, len, mask);
585                 if (err)
586                         return err;
587         }
588         return 0;
589 }
590
591 static int fl_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
592                    struct sk_buff *skb, struct tcmsg *t)
593 {
594         struct cls_fl_head *head = rtnl_dereference(tp->root);
595         struct cls_fl_filter *f = (struct cls_fl_filter *) fh;
596         struct nlattr *nest;
597         struct fl_flow_key *key, *mask;
598
599         if (!f)
600                 return skb->len;
601
602         t->tcm_handle = f->handle;
603
604         nest = nla_nest_start(skb, TCA_OPTIONS);
605         if (!nest)
606                 goto nla_put_failure;
607
608         if (f->res.classid &&
609             nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
610                 goto nla_put_failure;
611
612         key = &f->key;
613         mask = &head->mask.key;
614
615         if (mask->indev_ifindex) {
616                 struct net_device *dev;
617
618                 dev = __dev_get_by_index(net, key->indev_ifindex);
619                 if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name))
620                         goto nla_put_failure;
621         }
622
623         if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
624                             mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
625                             sizeof(key->eth.dst)) ||
626             fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
627                             mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
628                             sizeof(key->eth.src)) ||
629             fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
630                             &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
631                             sizeof(key->basic.n_proto)))
632                 goto nla_put_failure;
633         if ((key->basic.n_proto == htons(ETH_P_IP) ||
634              key->basic.n_proto == htons(ETH_P_IPV6)) &&
635             fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
636                             &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
637                             sizeof(key->basic.ip_proto)))
638                 goto nla_put_failure;
639
640         if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
641             (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
642                              &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
643                              sizeof(key->ipv4.src)) ||
644              fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
645                              &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
646                              sizeof(key->ipv4.dst))))
647                 goto nla_put_failure;
648         else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
649                  (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
650                                   &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
651                                   sizeof(key->ipv6.src)) ||
652                   fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
653                                   &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
654                                   sizeof(key->ipv6.dst))))
655                 goto nla_put_failure;
656
657         if (key->basic.ip_proto == IPPROTO_TCP &&
658             (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
659                              &mask->tp.src, TCA_FLOWER_UNSPEC,
660                              sizeof(key->tp.src)) ||
661              fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
662                              &mask->tp.dst, TCA_FLOWER_UNSPEC,
663                              sizeof(key->tp.dst))))
664                 goto nla_put_failure;
665         else if (key->basic.ip_proto == IPPROTO_UDP &&
666                  (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
667                                   &mask->tp.src, TCA_FLOWER_UNSPEC,
668                                   sizeof(key->tp.src)) ||
669                   fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
670                                   &mask->tp.dst, TCA_FLOWER_UNSPEC,
671                                   sizeof(key->tp.dst))))
672                 goto nla_put_failure;
673
674         if (tcf_exts_dump(skb, &f->exts))
675                 goto nla_put_failure;
676
677         nla_nest_end(skb, nest);
678
679         if (tcf_exts_dump_stats(skb, &f->exts) < 0)
680                 goto nla_put_failure;
681
682         return skb->len;
683
684 nla_put_failure:
685         nla_nest_cancel(skb, nest);
686         return -1;
687 }
688
689 static struct tcf_proto_ops cls_fl_ops __read_mostly = {
690         .kind           = "flower",
691         .classify       = fl_classify,
692         .init           = fl_init,
693         .destroy        = fl_destroy,
694         .get            = fl_get,
695         .change         = fl_change,
696         .delete         = fl_delete,
697         .walk           = fl_walk,
698         .dump           = fl_dump,
699         .owner          = THIS_MODULE,
700 };
701
702 static int __init cls_fl_init(void)
703 {
704         return register_tcf_proto_ops(&cls_fl_ops);
705 }
706
707 static void __exit cls_fl_exit(void)
708 {
709         unregister_tcf_proto_ops(&cls_fl_ops);
710 }
711
712 module_init(cls_fl_init);
713 module_exit(cls_fl_exit);
714
715 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
716 MODULE_DESCRIPTION("Flower classifier");
717 MODULE_LICENSE("GPL v2");