Mention branches and keyring.
[releases.git] / sched / cls_api.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/cls_api.c  Packet classifier API.
4  *
5  * Authors:     Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6  *
7  * Changes:
8  *
9  * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
10  */
11
12 #include <linux/module.h>
13 #include <linux/types.h>
14 #include <linux/kernel.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
17 #include <linux/err.h>
18 #include <linux/skbuff.h>
19 #include <linux/init.h>
20 #include <linux/kmod.h>
21 #include <linux/slab.h>
22 #include <linux/idr.h>
23 #include <linux/jhash.h>
24 #include <linux/rculist.h>
25 #include <net/net_namespace.h>
26 #include <net/sock.h>
27 #include <net/netlink.h>
28 #include <net/pkt_sched.h>
29 #include <net/pkt_cls.h>
30 #include <net/tc_act/tc_pedit.h>
31 #include <net/tc_act/tc_mirred.h>
32 #include <net/tc_act/tc_vlan.h>
33 #include <net/tc_act/tc_tunnel_key.h>
34 #include <net/tc_act/tc_csum.h>
35 #include <net/tc_act/tc_gact.h>
36 #include <net/tc_act/tc_police.h>
37 #include <net/tc_act/tc_sample.h>
38 #include <net/tc_act/tc_skbedit.h>
39 #include <net/tc_act/tc_ct.h>
40 #include <net/tc_act/tc_mpls.h>
41 #include <net/tc_act/tc_gate.h>
42 #include <net/flow_offload.h>
43
44 /* The list of all installed classifier types */
45 static LIST_HEAD(tcf_proto_base);
46
47 /* Protects list of registered TC modules. It is pure SMP lock. */
48 static DEFINE_RWLOCK(cls_mod_lock);
49
50 #ifdef CONFIG_NET_CLS_ACT
51 DEFINE_STATIC_KEY_FALSE(tc_skb_ext_tc);
52 EXPORT_SYMBOL(tc_skb_ext_tc);
53
54 void tc_skb_ext_tc_enable(void)
55 {
56         static_branch_inc(&tc_skb_ext_tc);
57 }
58 EXPORT_SYMBOL(tc_skb_ext_tc_enable);
59
60 void tc_skb_ext_tc_disable(void)
61 {
62         static_branch_dec(&tc_skb_ext_tc);
63 }
64 EXPORT_SYMBOL(tc_skb_ext_tc_disable);
65 #endif
66
67 static u32 destroy_obj_hashfn(const struct tcf_proto *tp)
68 {
69         return jhash_3words(tp->chain->index, tp->prio,
70                             (__force __u32)tp->protocol, 0);
71 }
72
73 static void tcf_proto_signal_destroying(struct tcf_chain *chain,
74                                         struct tcf_proto *tp)
75 {
76         struct tcf_block *block = chain->block;
77
78         mutex_lock(&block->proto_destroy_lock);
79         hash_add_rcu(block->proto_destroy_ht, &tp->destroy_ht_node,
80                      destroy_obj_hashfn(tp));
81         mutex_unlock(&block->proto_destroy_lock);
82 }
83
84 static bool tcf_proto_cmp(const struct tcf_proto *tp1,
85                           const struct tcf_proto *tp2)
86 {
87         return tp1->chain->index == tp2->chain->index &&
88                tp1->prio == tp2->prio &&
89                tp1->protocol == tp2->protocol;
90 }
91
92 static bool tcf_proto_exists_destroying(struct tcf_chain *chain,
93                                         struct tcf_proto *tp)
94 {
95         u32 hash = destroy_obj_hashfn(tp);
96         struct tcf_proto *iter;
97         bool found = false;
98
99         rcu_read_lock();
100         hash_for_each_possible_rcu(chain->block->proto_destroy_ht, iter,
101                                    destroy_ht_node, hash) {
102                 if (tcf_proto_cmp(tp, iter)) {
103                         found = true;
104                         break;
105                 }
106         }
107         rcu_read_unlock();
108
109         return found;
110 }
111
112 static void
113 tcf_proto_signal_destroyed(struct tcf_chain *chain, struct tcf_proto *tp)
114 {
115         struct tcf_block *block = chain->block;
116
117         mutex_lock(&block->proto_destroy_lock);
118         if (hash_hashed(&tp->destroy_ht_node))
119                 hash_del_rcu(&tp->destroy_ht_node);
120         mutex_unlock(&block->proto_destroy_lock);
121 }
122
123 /* Find classifier type by string name */
124
125 static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind)
126 {
127         const struct tcf_proto_ops *t, *res = NULL;
128
129         if (kind) {
130                 read_lock(&cls_mod_lock);
131                 list_for_each_entry(t, &tcf_proto_base, head) {
132                         if (strcmp(kind, t->kind) == 0) {
133                                 if (try_module_get(t->owner))
134                                         res = t;
135                                 break;
136                         }
137                 }
138                 read_unlock(&cls_mod_lock);
139         }
140         return res;
141 }
142
143 static const struct tcf_proto_ops *
144 tcf_proto_lookup_ops(const char *kind, bool rtnl_held,
145                      struct netlink_ext_ack *extack)
146 {
147         const struct tcf_proto_ops *ops;
148
149         ops = __tcf_proto_lookup_ops(kind);
150         if (ops)
151                 return ops;
152 #ifdef CONFIG_MODULES
153         if (rtnl_held)
154                 rtnl_unlock();
155         request_module("cls_%s", kind);
156         if (rtnl_held)
157                 rtnl_lock();
158         ops = __tcf_proto_lookup_ops(kind);
159         /* We dropped the RTNL semaphore in order to perform
160          * the module load. So, even if we succeeded in loading
161          * the module we have to replay the request. We indicate
162          * this using -EAGAIN.
163          */
164         if (ops) {
165                 module_put(ops->owner);
166                 return ERR_PTR(-EAGAIN);
167         }
168 #endif
169         NL_SET_ERR_MSG(extack, "TC classifier not found");
170         return ERR_PTR(-ENOENT);
171 }
172
173 /* Register(unregister) new classifier type */
174
175 int register_tcf_proto_ops(struct tcf_proto_ops *ops)
176 {
177         struct tcf_proto_ops *t;
178         int rc = -EEXIST;
179
180         write_lock(&cls_mod_lock);
181         list_for_each_entry(t, &tcf_proto_base, head)
182                 if (!strcmp(ops->kind, t->kind))
183                         goto out;
184
185         list_add_tail(&ops->head, &tcf_proto_base);
186         rc = 0;
187 out:
188         write_unlock(&cls_mod_lock);
189         return rc;
190 }
191 EXPORT_SYMBOL(register_tcf_proto_ops);
192
193 static struct workqueue_struct *tc_filter_wq;
194
195 void unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
196 {
197         struct tcf_proto_ops *t;
198         int rc = -ENOENT;
199
200         /* Wait for outstanding call_rcu()s, if any, from a
201          * tcf_proto_ops's destroy() handler.
202          */
203         rcu_barrier();
204         flush_workqueue(tc_filter_wq);
205
206         write_lock(&cls_mod_lock);
207         list_for_each_entry(t, &tcf_proto_base, head) {
208                 if (t == ops) {
209                         list_del(&t->head);
210                         rc = 0;
211                         break;
212                 }
213         }
214         write_unlock(&cls_mod_lock);
215
216         WARN(rc, "unregister tc filter kind(%s) failed %d\n", ops->kind, rc);
217 }
218 EXPORT_SYMBOL(unregister_tcf_proto_ops);
219
220 bool tcf_queue_work(struct rcu_work *rwork, work_func_t func)
221 {
222         INIT_RCU_WORK(rwork, func);
223         return queue_rcu_work(tc_filter_wq, rwork);
224 }
225 EXPORT_SYMBOL(tcf_queue_work);
226
227 /* Select new prio value from the range, managed by kernel. */
228
229 static inline u32 tcf_auto_prio(struct tcf_proto *tp)
230 {
231         u32 first = TC_H_MAKE(0xC0000000U, 0U);
232
233         if (tp)
234                 first = tp->prio - 1;
235
236         return TC_H_MAJ(first);
237 }
238
239 static bool tcf_proto_check_kind(struct nlattr *kind, char *name)
240 {
241         if (kind)
242                 return nla_strscpy(name, kind, IFNAMSIZ) < 0;
243         memset(name, 0, IFNAMSIZ);
244         return false;
245 }
246
247 static bool tcf_proto_is_unlocked(const char *kind)
248 {
249         const struct tcf_proto_ops *ops;
250         bool ret;
251
252         if (strlen(kind) == 0)
253                 return false;
254
255         ops = tcf_proto_lookup_ops(kind, false, NULL);
256         /* On error return false to take rtnl lock. Proto lookup/create
257          * functions will perform lookup again and properly handle errors.
258          */
259         if (IS_ERR(ops))
260                 return false;
261
262         ret = !!(ops->flags & TCF_PROTO_OPS_DOIT_UNLOCKED);
263         module_put(ops->owner);
264         return ret;
265 }
266
267 static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
268                                           u32 prio, struct tcf_chain *chain,
269                                           bool rtnl_held,
270                                           struct netlink_ext_ack *extack)
271 {
272         struct tcf_proto *tp;
273         int err;
274
275         tp = kzalloc(sizeof(*tp), GFP_KERNEL);
276         if (!tp)
277                 return ERR_PTR(-ENOBUFS);
278
279         tp->ops = tcf_proto_lookup_ops(kind, rtnl_held, extack);
280         if (IS_ERR(tp->ops)) {
281                 err = PTR_ERR(tp->ops);
282                 goto errout;
283         }
284         tp->classify = tp->ops->classify;
285         tp->protocol = protocol;
286         tp->prio = prio;
287         tp->chain = chain;
288         spin_lock_init(&tp->lock);
289         refcount_set(&tp->refcnt, 1);
290
291         err = tp->ops->init(tp);
292         if (err) {
293                 module_put(tp->ops->owner);
294                 goto errout;
295         }
296         return tp;
297
298 errout:
299         kfree(tp);
300         return ERR_PTR(err);
301 }
302
303 static void tcf_proto_get(struct tcf_proto *tp)
304 {
305         refcount_inc(&tp->refcnt);
306 }
307
308 static void tcf_chain_put(struct tcf_chain *chain);
309
310 static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held,
311                               bool sig_destroy, struct netlink_ext_ack *extack)
312 {
313         tp->ops->destroy(tp, rtnl_held, extack);
314         if (sig_destroy)
315                 tcf_proto_signal_destroyed(tp->chain, tp);
316         tcf_chain_put(tp->chain);
317         module_put(tp->ops->owner);
318         kfree_rcu(tp, rcu);
319 }
320
321 static void tcf_proto_put(struct tcf_proto *tp, bool rtnl_held,
322                           struct netlink_ext_ack *extack)
323 {
324         if (refcount_dec_and_test(&tp->refcnt))
325                 tcf_proto_destroy(tp, rtnl_held, true, extack);
326 }
327
328 static bool tcf_proto_check_delete(struct tcf_proto *tp)
329 {
330         if (tp->ops->delete_empty)
331                 return tp->ops->delete_empty(tp);
332
333         tp->deleting = true;
334         return tp->deleting;
335 }
336
337 static void tcf_proto_mark_delete(struct tcf_proto *tp)
338 {
339         spin_lock(&tp->lock);
340         tp->deleting = true;
341         spin_unlock(&tp->lock);
342 }
343
344 static bool tcf_proto_is_deleting(struct tcf_proto *tp)
345 {
346         bool deleting;
347
348         spin_lock(&tp->lock);
349         deleting = tp->deleting;
350         spin_unlock(&tp->lock);
351
352         return deleting;
353 }
354
355 #define ASSERT_BLOCK_LOCKED(block)                                      \
356         lockdep_assert_held(&(block)->lock)
357
358 struct tcf_filter_chain_list_item {
359         struct list_head list;
360         tcf_chain_head_change_t *chain_head_change;
361         void *chain_head_change_priv;
362 };
363
364 static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
365                                           u32 chain_index)
366 {
367         struct tcf_chain *chain;
368
369         ASSERT_BLOCK_LOCKED(block);
370
371         chain = kzalloc(sizeof(*chain), GFP_KERNEL);
372         if (!chain)
373                 return NULL;
374         list_add_tail_rcu(&chain->list, &block->chain_list);
375         mutex_init(&chain->filter_chain_lock);
376         chain->block = block;
377         chain->index = chain_index;
378         chain->refcnt = 1;
379         if (!chain->index)
380                 block->chain0.chain = chain;
381         return chain;
382 }
383
384 static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item,
385                                        struct tcf_proto *tp_head)
386 {
387         if (item->chain_head_change)
388                 item->chain_head_change(tp_head, item->chain_head_change_priv);
389 }
390
391 static void tcf_chain0_head_change(struct tcf_chain *chain,
392                                    struct tcf_proto *tp_head)
393 {
394         struct tcf_filter_chain_list_item *item;
395         struct tcf_block *block = chain->block;
396
397         if (chain->index)
398                 return;
399
400         mutex_lock(&block->lock);
401         list_for_each_entry(item, &block->chain0.filter_chain_list, list)
402                 tcf_chain_head_change_item(item, tp_head);
403         mutex_unlock(&block->lock);
404 }
405
406 /* Returns true if block can be safely freed. */
407
408 static bool tcf_chain_detach(struct tcf_chain *chain)
409 {
410         struct tcf_block *block = chain->block;
411
412         ASSERT_BLOCK_LOCKED(block);
413
414         list_del_rcu(&chain->list);
415         if (!chain->index)
416                 block->chain0.chain = NULL;
417
418         if (list_empty(&block->chain_list) &&
419             refcount_read(&block->refcnt) == 0)
420                 return true;
421
422         return false;
423 }
424
425 static void tcf_block_destroy(struct tcf_block *block)
426 {
427         mutex_destroy(&block->lock);
428         mutex_destroy(&block->proto_destroy_lock);
429         kfree_rcu(block, rcu);
430 }
431
432 static void tcf_chain_destroy(struct tcf_chain *chain, bool free_block)
433 {
434         struct tcf_block *block = chain->block;
435
436         mutex_destroy(&chain->filter_chain_lock);
437         kfree_rcu(chain, rcu);
438         if (free_block)
439                 tcf_block_destroy(block);
440 }
441
442 static void tcf_chain_hold(struct tcf_chain *chain)
443 {
444         ASSERT_BLOCK_LOCKED(chain->block);
445
446         ++chain->refcnt;
447 }
448
449 static bool tcf_chain_held_by_acts_only(struct tcf_chain *chain)
450 {
451         ASSERT_BLOCK_LOCKED(chain->block);
452
453         /* In case all the references are action references, this
454          * chain should not be shown to the user.
455          */
456         return chain->refcnt == chain->action_refcnt;
457 }
458
459 static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block,
460                                           u32 chain_index)
461 {
462         struct tcf_chain *chain;
463
464         ASSERT_BLOCK_LOCKED(block);
465
466         list_for_each_entry(chain, &block->chain_list, list) {
467                 if (chain->index == chain_index)
468                         return chain;
469         }
470         return NULL;
471 }
472
473 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
474 static struct tcf_chain *tcf_chain_lookup_rcu(const struct tcf_block *block,
475                                               u32 chain_index)
476 {
477         struct tcf_chain *chain;
478
479         list_for_each_entry_rcu(chain, &block->chain_list, list) {
480                 if (chain->index == chain_index)
481                         return chain;
482         }
483         return NULL;
484 }
485 #endif
486
487 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
488                            u32 seq, u16 flags, int event, bool unicast,
489                            struct netlink_ext_ack *extack);
490
491 static struct tcf_chain *__tcf_chain_get(struct tcf_block *block,
492                                          u32 chain_index, bool create,
493                                          bool by_act)
494 {
495         struct tcf_chain *chain = NULL;
496         bool is_first_reference;
497
498         mutex_lock(&block->lock);
499         chain = tcf_chain_lookup(block, chain_index);
500         if (chain) {
501                 tcf_chain_hold(chain);
502         } else {
503                 if (!create)
504                         goto errout;
505                 chain = tcf_chain_create(block, chain_index);
506                 if (!chain)
507                         goto errout;
508         }
509
510         if (by_act)
511                 ++chain->action_refcnt;
512         is_first_reference = chain->refcnt - chain->action_refcnt == 1;
513         mutex_unlock(&block->lock);
514
515         /* Send notification only in case we got the first
516          * non-action reference. Until then, the chain acts only as
517          * a placeholder for actions pointing to it and user ought
518          * not know about them.
519          */
520         if (is_first_reference && !by_act)
521                 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
522                                 RTM_NEWCHAIN, false, NULL);
523
524         return chain;
525
526 errout:
527         mutex_unlock(&block->lock);
528         return chain;
529 }
530
531 static struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
532                                        bool create)
533 {
534         return __tcf_chain_get(block, chain_index, create, false);
535 }
536
537 struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index)
538 {
539         return __tcf_chain_get(block, chain_index, true, true);
540 }
541 EXPORT_SYMBOL(tcf_chain_get_by_act);
542
543 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
544                                void *tmplt_priv);
545 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
546                                   void *tmplt_priv, u32 chain_index,
547                                   struct tcf_block *block, struct sk_buff *oskb,
548                                   u32 seq, u16 flags, bool unicast);
549
550 static void __tcf_chain_put(struct tcf_chain *chain, bool by_act,
551                             bool explicitly_created)
552 {
553         struct tcf_block *block = chain->block;
554         const struct tcf_proto_ops *tmplt_ops;
555         unsigned int refcnt, non_act_refcnt;
556         bool free_block = false;
557         void *tmplt_priv;
558
559         mutex_lock(&block->lock);
560         if (explicitly_created) {
561                 if (!chain->explicitly_created) {
562                         mutex_unlock(&block->lock);
563                         return;
564                 }
565                 chain->explicitly_created = false;
566         }
567
568         if (by_act)
569                 chain->action_refcnt--;
570
571         /* tc_chain_notify_delete can't be called while holding block lock.
572          * However, when block is unlocked chain can be changed concurrently, so
573          * save these to temporary variables.
574          */
575         refcnt = --chain->refcnt;
576         non_act_refcnt = refcnt - chain->action_refcnt;
577         tmplt_ops = chain->tmplt_ops;
578         tmplt_priv = chain->tmplt_priv;
579
580         if (non_act_refcnt == chain->explicitly_created && !by_act) {
581                 if (non_act_refcnt == 0)
582                         tc_chain_notify_delete(tmplt_ops, tmplt_priv,
583                                                chain->index, block, NULL, 0, 0,
584                                                false);
585                 /* Last reference to chain, no need to lock. */
586                 chain->flushing = false;
587         }
588
589         if (refcnt == 0)
590                 free_block = tcf_chain_detach(chain);
591         mutex_unlock(&block->lock);
592
593         if (refcnt == 0) {
594                 tc_chain_tmplt_del(tmplt_ops, tmplt_priv);
595                 tcf_chain_destroy(chain, free_block);
596         }
597 }
598
599 static void tcf_chain_put(struct tcf_chain *chain)
600 {
601         __tcf_chain_put(chain, false, false);
602 }
603
604 void tcf_chain_put_by_act(struct tcf_chain *chain)
605 {
606         __tcf_chain_put(chain, true, false);
607 }
608 EXPORT_SYMBOL(tcf_chain_put_by_act);
609
610 static void tcf_chain_put_explicitly_created(struct tcf_chain *chain)
611 {
612         __tcf_chain_put(chain, false, true);
613 }
614
615 static void tcf_chain_flush(struct tcf_chain *chain, bool rtnl_held)
616 {
617         struct tcf_proto *tp, *tp_next;
618
619         mutex_lock(&chain->filter_chain_lock);
620         tp = tcf_chain_dereference(chain->filter_chain, chain);
621         while (tp) {
622                 tp_next = rcu_dereference_protected(tp->next, 1);
623                 tcf_proto_signal_destroying(chain, tp);
624                 tp = tp_next;
625         }
626         tp = tcf_chain_dereference(chain->filter_chain, chain);
627         RCU_INIT_POINTER(chain->filter_chain, NULL);
628         tcf_chain0_head_change(chain, NULL);
629         chain->flushing = true;
630         mutex_unlock(&chain->filter_chain_lock);
631
632         while (tp) {
633                 tp_next = rcu_dereference_protected(tp->next, 1);
634                 tcf_proto_put(tp, rtnl_held, NULL);
635                 tp = tp_next;
636         }
637 }
638
639 static int tcf_block_setup(struct tcf_block *block,
640                            struct flow_block_offload *bo);
641
642 static void tcf_block_offload_init(struct flow_block_offload *bo,
643                                    struct net_device *dev, struct Qdisc *sch,
644                                    enum flow_block_command command,
645                                    enum flow_block_binder_type binder_type,
646                                    struct flow_block *flow_block,
647                                    bool shared, struct netlink_ext_ack *extack)
648 {
649         bo->net = dev_net(dev);
650         bo->command = command;
651         bo->binder_type = binder_type;
652         bo->block = flow_block;
653         bo->block_shared = shared;
654         bo->extack = extack;
655         bo->sch = sch;
656         bo->cb_list_head = &flow_block->cb_list;
657         INIT_LIST_HEAD(&bo->cb_list);
658 }
659
660 static void tcf_block_unbind(struct tcf_block *block,
661                              struct flow_block_offload *bo);
662
663 static void tc_block_indr_cleanup(struct flow_block_cb *block_cb)
664 {
665         struct tcf_block *block = block_cb->indr.data;
666         struct net_device *dev = block_cb->indr.dev;
667         struct Qdisc *sch = block_cb->indr.sch;
668         struct netlink_ext_ack extack = {};
669         struct flow_block_offload bo = {};
670
671         tcf_block_offload_init(&bo, dev, sch, FLOW_BLOCK_UNBIND,
672                                block_cb->indr.binder_type,
673                                &block->flow_block, tcf_block_shared(block),
674                                &extack);
675         rtnl_lock();
676         down_write(&block->cb_lock);
677         list_del(&block_cb->driver_list);
678         list_move(&block_cb->list, &bo.cb_list);
679         tcf_block_unbind(block, &bo);
680         up_write(&block->cb_lock);
681         rtnl_unlock();
682 }
683
684 static bool tcf_block_offload_in_use(struct tcf_block *block)
685 {
686         return atomic_read(&block->offloadcnt);
687 }
688
689 static int tcf_block_offload_cmd(struct tcf_block *block,
690                                  struct net_device *dev, struct Qdisc *sch,
691                                  struct tcf_block_ext_info *ei,
692                                  enum flow_block_command command,
693                                  struct netlink_ext_ack *extack)
694 {
695         struct flow_block_offload bo = {};
696
697         tcf_block_offload_init(&bo, dev, sch, command, ei->binder_type,
698                                &block->flow_block, tcf_block_shared(block),
699                                extack);
700
701         if (dev->netdev_ops->ndo_setup_tc) {
702                 int err;
703
704                 err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
705                 if (err < 0) {
706                         if (err != -EOPNOTSUPP)
707                                 NL_SET_ERR_MSG(extack, "Driver ndo_setup_tc failed");
708                         return err;
709                 }
710
711                 return tcf_block_setup(block, &bo);
712         }
713
714         flow_indr_dev_setup_offload(dev, sch, TC_SETUP_BLOCK, block, &bo,
715                                     tc_block_indr_cleanup);
716         tcf_block_setup(block, &bo);
717
718         return -EOPNOTSUPP;
719 }
720
721 static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
722                                   struct tcf_block_ext_info *ei,
723                                   struct netlink_ext_ack *extack)
724 {
725         struct net_device *dev = q->dev_queue->dev;
726         int err;
727
728         down_write(&block->cb_lock);
729
730         /* If tc offload feature is disabled and the block we try to bind
731          * to already has some offloaded filters, forbid to bind.
732          */
733         if (dev->netdev_ops->ndo_setup_tc &&
734             !tc_can_offload(dev) &&
735             tcf_block_offload_in_use(block)) {
736                 NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled");
737                 err = -EOPNOTSUPP;
738                 goto err_unlock;
739         }
740
741         err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_BIND, extack);
742         if (err == -EOPNOTSUPP)
743                 goto no_offload_dev_inc;
744         if (err)
745                 goto err_unlock;
746
747         up_write(&block->cb_lock);
748         return 0;
749
750 no_offload_dev_inc:
751         if (tcf_block_offload_in_use(block))
752                 goto err_unlock;
753
754         err = 0;
755         block->nooffloaddevcnt++;
756 err_unlock:
757         up_write(&block->cb_lock);
758         return err;
759 }
760
761 static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
762                                      struct tcf_block_ext_info *ei)
763 {
764         struct net_device *dev = q->dev_queue->dev;
765         int err;
766
767         down_write(&block->cb_lock);
768         err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_UNBIND, NULL);
769         if (err == -EOPNOTSUPP)
770                 goto no_offload_dev_dec;
771         up_write(&block->cb_lock);
772         return;
773
774 no_offload_dev_dec:
775         WARN_ON(block->nooffloaddevcnt-- == 0);
776         up_write(&block->cb_lock);
777 }
778
779 static int
780 tcf_chain0_head_change_cb_add(struct tcf_block *block,
781                               struct tcf_block_ext_info *ei,
782                               struct netlink_ext_ack *extack)
783 {
784         struct tcf_filter_chain_list_item *item;
785         struct tcf_chain *chain0;
786
787         item = kmalloc(sizeof(*item), GFP_KERNEL);
788         if (!item) {
789                 NL_SET_ERR_MSG(extack, "Memory allocation for head change callback item failed");
790                 return -ENOMEM;
791         }
792         item->chain_head_change = ei->chain_head_change;
793         item->chain_head_change_priv = ei->chain_head_change_priv;
794
795         mutex_lock(&block->lock);
796         chain0 = block->chain0.chain;
797         if (chain0)
798                 tcf_chain_hold(chain0);
799         else
800                 list_add(&item->list, &block->chain0.filter_chain_list);
801         mutex_unlock(&block->lock);
802
803         if (chain0) {
804                 struct tcf_proto *tp_head;
805
806                 mutex_lock(&chain0->filter_chain_lock);
807
808                 tp_head = tcf_chain_dereference(chain0->filter_chain, chain0);
809                 if (tp_head)
810                         tcf_chain_head_change_item(item, tp_head);
811
812                 mutex_lock(&block->lock);
813                 list_add(&item->list, &block->chain0.filter_chain_list);
814                 mutex_unlock(&block->lock);
815
816                 mutex_unlock(&chain0->filter_chain_lock);
817                 tcf_chain_put(chain0);
818         }
819
820         return 0;
821 }
822
823 static void
824 tcf_chain0_head_change_cb_del(struct tcf_block *block,
825                               struct tcf_block_ext_info *ei)
826 {
827         struct tcf_filter_chain_list_item *item;
828
829         mutex_lock(&block->lock);
830         list_for_each_entry(item, &block->chain0.filter_chain_list, list) {
831                 if ((!ei->chain_head_change && !ei->chain_head_change_priv) ||
832                     (item->chain_head_change == ei->chain_head_change &&
833                      item->chain_head_change_priv == ei->chain_head_change_priv)) {
834                         if (block->chain0.chain)
835                                 tcf_chain_head_change_item(item, NULL);
836                         list_del(&item->list);
837                         mutex_unlock(&block->lock);
838
839                         kfree(item);
840                         return;
841                 }
842         }
843         mutex_unlock(&block->lock);
844         WARN_ON(1);
845 }
846
847 struct tcf_net {
848         spinlock_t idr_lock; /* Protects idr */
849         struct idr idr;
850 };
851
852 static unsigned int tcf_net_id;
853
854 static int tcf_block_insert(struct tcf_block *block, struct net *net,
855                             struct netlink_ext_ack *extack)
856 {
857         struct tcf_net *tn = net_generic(net, tcf_net_id);
858         int err;
859
860         idr_preload(GFP_KERNEL);
861         spin_lock(&tn->idr_lock);
862         err = idr_alloc_u32(&tn->idr, block, &block->index, block->index,
863                             GFP_NOWAIT);
864         spin_unlock(&tn->idr_lock);
865         idr_preload_end();
866
867         return err;
868 }
869
870 static void tcf_block_remove(struct tcf_block *block, struct net *net)
871 {
872         struct tcf_net *tn = net_generic(net, tcf_net_id);
873
874         spin_lock(&tn->idr_lock);
875         idr_remove(&tn->idr, block->index);
876         spin_unlock(&tn->idr_lock);
877 }
878
879 static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
880                                           u32 block_index,
881                                           struct netlink_ext_ack *extack)
882 {
883         struct tcf_block *block;
884
885         block = kzalloc(sizeof(*block), GFP_KERNEL);
886         if (!block) {
887                 NL_SET_ERR_MSG(extack, "Memory allocation for block failed");
888                 return ERR_PTR(-ENOMEM);
889         }
890         mutex_init(&block->lock);
891         mutex_init(&block->proto_destroy_lock);
892         init_rwsem(&block->cb_lock);
893         flow_block_init(&block->flow_block);
894         INIT_LIST_HEAD(&block->chain_list);
895         INIT_LIST_HEAD(&block->owner_list);
896         INIT_LIST_HEAD(&block->chain0.filter_chain_list);
897
898         refcount_set(&block->refcnt, 1);
899         block->net = net;
900         block->index = block_index;
901
902         /* Don't store q pointer for blocks which are shared */
903         if (!tcf_block_shared(block))
904                 block->q = q;
905         return block;
906 }
907
908 static struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index)
909 {
910         struct tcf_net *tn = net_generic(net, tcf_net_id);
911
912         return idr_find(&tn->idr, block_index);
913 }
914
915 static struct tcf_block *tcf_block_refcnt_get(struct net *net, u32 block_index)
916 {
917         struct tcf_block *block;
918
919         rcu_read_lock();
920         block = tcf_block_lookup(net, block_index);
921         if (block && !refcount_inc_not_zero(&block->refcnt))
922                 block = NULL;
923         rcu_read_unlock();
924
925         return block;
926 }
927
928 static struct tcf_chain *
929 __tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
930 {
931         mutex_lock(&block->lock);
932         if (chain)
933                 chain = list_is_last(&chain->list, &block->chain_list) ?
934                         NULL : list_next_entry(chain, list);
935         else
936                 chain = list_first_entry_or_null(&block->chain_list,
937                                                  struct tcf_chain, list);
938
939         /* skip all action-only chains */
940         while (chain && tcf_chain_held_by_acts_only(chain))
941                 chain = list_is_last(&chain->list, &block->chain_list) ?
942                         NULL : list_next_entry(chain, list);
943
944         if (chain)
945                 tcf_chain_hold(chain);
946         mutex_unlock(&block->lock);
947
948         return chain;
949 }
950
951 /* Function to be used by all clients that want to iterate over all chains on
952  * block. It properly obtains block->lock and takes reference to chain before
953  * returning it. Users of this function must be tolerant to concurrent chain
954  * insertion/deletion or ensure that no concurrent chain modification is
955  * possible. Note that all netlink dump callbacks cannot guarantee to provide
956  * consistent dump because rtnl lock is released each time skb is filled with
957  * data and sent to user-space.
958  */
959
960 struct tcf_chain *
961 tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
962 {
963         struct tcf_chain *chain_next = __tcf_get_next_chain(block, chain);
964
965         if (chain)
966                 tcf_chain_put(chain);
967
968         return chain_next;
969 }
970 EXPORT_SYMBOL(tcf_get_next_chain);
971
972 static struct tcf_proto *
973 __tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
974 {
975         u32 prio = 0;
976
977         ASSERT_RTNL();
978         mutex_lock(&chain->filter_chain_lock);
979
980         if (!tp) {
981                 tp = tcf_chain_dereference(chain->filter_chain, chain);
982         } else if (tcf_proto_is_deleting(tp)) {
983                 /* 'deleting' flag is set and chain->filter_chain_lock was
984                  * unlocked, which means next pointer could be invalid. Restart
985                  * search.
986                  */
987                 prio = tp->prio + 1;
988                 tp = tcf_chain_dereference(chain->filter_chain, chain);
989
990                 for (; tp; tp = tcf_chain_dereference(tp->next, chain))
991                         if (!tp->deleting && tp->prio >= prio)
992                                 break;
993         } else {
994                 tp = tcf_chain_dereference(tp->next, chain);
995         }
996
997         if (tp)
998                 tcf_proto_get(tp);
999
1000         mutex_unlock(&chain->filter_chain_lock);
1001
1002         return tp;
1003 }
1004
1005 /* Function to be used by all clients that want to iterate over all tp's on
1006  * chain. Users of this function must be tolerant to concurrent tp
1007  * insertion/deletion or ensure that no concurrent chain modification is
1008  * possible. Note that all netlink dump callbacks cannot guarantee to provide
1009  * consistent dump because rtnl lock is released each time skb is filled with
1010  * data and sent to user-space.
1011  */
1012
1013 struct tcf_proto *
1014 tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
1015 {
1016         struct tcf_proto *tp_next = __tcf_get_next_proto(chain, tp);
1017
1018         if (tp)
1019                 tcf_proto_put(tp, true, NULL);
1020
1021         return tp_next;
1022 }
1023 EXPORT_SYMBOL(tcf_get_next_proto);
1024
1025 static void tcf_block_flush_all_chains(struct tcf_block *block, bool rtnl_held)
1026 {
1027         struct tcf_chain *chain;
1028
1029         /* Last reference to block. At this point chains cannot be added or
1030          * removed concurrently.
1031          */
1032         for (chain = tcf_get_next_chain(block, NULL);
1033              chain;
1034              chain = tcf_get_next_chain(block, chain)) {
1035                 tcf_chain_put_explicitly_created(chain);
1036                 tcf_chain_flush(chain, rtnl_held);
1037         }
1038 }
1039
1040 /* Lookup Qdisc and increments its reference counter.
1041  * Set parent, if necessary.
1042  */
1043
1044 static int __tcf_qdisc_find(struct net *net, struct Qdisc **q,
1045                             u32 *parent, int ifindex, bool rtnl_held,
1046                             struct netlink_ext_ack *extack)
1047 {
1048         const struct Qdisc_class_ops *cops;
1049         struct net_device *dev;
1050         int err = 0;
1051
1052         if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1053                 return 0;
1054
1055         rcu_read_lock();
1056
1057         /* Find link */
1058         dev = dev_get_by_index_rcu(net, ifindex);
1059         if (!dev) {
1060                 rcu_read_unlock();
1061                 return -ENODEV;
1062         }
1063
1064         /* Find qdisc */
1065         if (!*parent) {
1066                 *q = rcu_dereference(dev->qdisc);
1067                 *parent = (*q)->handle;
1068         } else {
1069                 *q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent));
1070                 if (!*q) {
1071                         NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1072                         err = -EINVAL;
1073                         goto errout_rcu;
1074                 }
1075         }
1076
1077         *q = qdisc_refcount_inc_nz(*q);
1078         if (!*q) {
1079                 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1080                 err = -EINVAL;
1081                 goto errout_rcu;
1082         }
1083
1084         /* Is it classful? */
1085         cops = (*q)->ops->cl_ops;
1086         if (!cops) {
1087                 NL_SET_ERR_MSG(extack, "Qdisc not classful");
1088                 err = -EINVAL;
1089                 goto errout_qdisc;
1090         }
1091
1092         if (!cops->tcf_block) {
1093                 NL_SET_ERR_MSG(extack, "Class doesn't support blocks");
1094                 err = -EOPNOTSUPP;
1095                 goto errout_qdisc;
1096         }
1097
1098 errout_rcu:
1099         /* At this point we know that qdisc is not noop_qdisc,
1100          * which means that qdisc holds a reference to net_device
1101          * and we hold a reference to qdisc, so it is safe to release
1102          * rcu read lock.
1103          */
1104         rcu_read_unlock();
1105         return err;
1106
1107 errout_qdisc:
1108         rcu_read_unlock();
1109
1110         if (rtnl_held)
1111                 qdisc_put(*q);
1112         else
1113                 qdisc_put_unlocked(*q);
1114         *q = NULL;
1115
1116         return err;
1117 }
1118
1119 static int __tcf_qdisc_cl_find(struct Qdisc *q, u32 parent, unsigned long *cl,
1120                                int ifindex, struct netlink_ext_ack *extack)
1121 {
1122         if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1123                 return 0;
1124
1125         /* Do we search for filter, attached to class? */
1126         if (TC_H_MIN(parent)) {
1127                 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1128
1129                 *cl = cops->find(q, parent);
1130                 if (*cl == 0) {
1131                         NL_SET_ERR_MSG(extack, "Specified class doesn't exist");
1132                         return -ENOENT;
1133                 }
1134         }
1135
1136         return 0;
1137 }
1138
1139 static struct tcf_block *__tcf_block_find(struct net *net, struct Qdisc *q,
1140                                           unsigned long cl, int ifindex,
1141                                           u32 block_index,
1142                                           struct netlink_ext_ack *extack)
1143 {
1144         struct tcf_block *block;
1145
1146         if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
1147                 block = tcf_block_refcnt_get(net, block_index);
1148                 if (!block) {
1149                         NL_SET_ERR_MSG(extack, "Block of given index was not found");
1150                         return ERR_PTR(-EINVAL);
1151                 }
1152         } else {
1153                 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1154
1155                 block = cops->tcf_block(q, cl, extack);
1156                 if (!block)
1157                         return ERR_PTR(-EINVAL);
1158
1159                 if (tcf_block_shared(block)) {
1160                         NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters");
1161                         return ERR_PTR(-EOPNOTSUPP);
1162                 }
1163
1164                 /* Always take reference to block in order to support execution
1165                  * of rules update path of cls API without rtnl lock. Caller
1166                  * must release block when it is finished using it. 'if' block
1167                  * of this conditional obtain reference to block by calling
1168                  * tcf_block_refcnt_get().
1169                  */
1170                 refcount_inc(&block->refcnt);
1171         }
1172
1173         return block;
1174 }
1175
1176 static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q,
1177                             struct tcf_block_ext_info *ei, bool rtnl_held)
1178 {
1179         if (refcount_dec_and_mutex_lock(&block->refcnt, &block->lock)) {
1180                 /* Flushing/putting all chains will cause the block to be
1181                  * deallocated when last chain is freed. However, if chain_list
1182                  * is empty, block has to be manually deallocated. After block
1183                  * reference counter reached 0, it is no longer possible to
1184                  * increment it or add new chains to block.
1185                  */
1186                 bool free_block = list_empty(&block->chain_list);
1187
1188                 mutex_unlock(&block->lock);
1189                 if (tcf_block_shared(block))
1190                         tcf_block_remove(block, block->net);
1191
1192                 if (q)
1193                         tcf_block_offload_unbind(block, q, ei);
1194
1195                 if (free_block)
1196                         tcf_block_destroy(block);
1197                 else
1198                         tcf_block_flush_all_chains(block, rtnl_held);
1199         } else if (q) {
1200                 tcf_block_offload_unbind(block, q, ei);
1201         }
1202 }
1203
1204 static void tcf_block_refcnt_put(struct tcf_block *block, bool rtnl_held)
1205 {
1206         __tcf_block_put(block, NULL, NULL, rtnl_held);
1207 }
1208
1209 /* Find tcf block.
1210  * Set q, parent, cl when appropriate.
1211  */
1212
1213 static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q,
1214                                         u32 *parent, unsigned long *cl,
1215                                         int ifindex, u32 block_index,
1216                                         struct netlink_ext_ack *extack)
1217 {
1218         struct tcf_block *block;
1219         int err = 0;
1220
1221         ASSERT_RTNL();
1222
1223         err = __tcf_qdisc_find(net, q, parent, ifindex, true, extack);
1224         if (err)
1225                 goto errout;
1226
1227         err = __tcf_qdisc_cl_find(*q, *parent, cl, ifindex, extack);
1228         if (err)
1229                 goto errout_qdisc;
1230
1231         block = __tcf_block_find(net, *q, *cl, ifindex, block_index, extack);
1232         if (IS_ERR(block)) {
1233                 err = PTR_ERR(block);
1234                 goto errout_qdisc;
1235         }
1236
1237         return block;
1238
1239 errout_qdisc:
1240         if (*q)
1241                 qdisc_put(*q);
1242 errout:
1243         *q = NULL;
1244         return ERR_PTR(err);
1245 }
1246
1247 static void tcf_block_release(struct Qdisc *q, struct tcf_block *block,
1248                               bool rtnl_held)
1249 {
1250         if (!IS_ERR_OR_NULL(block))
1251                 tcf_block_refcnt_put(block, rtnl_held);
1252
1253         if (q) {
1254                 if (rtnl_held)
1255                         qdisc_put(q);
1256                 else
1257                         qdisc_put_unlocked(q);
1258         }
1259 }
1260
1261 struct tcf_block_owner_item {
1262         struct list_head list;
1263         struct Qdisc *q;
1264         enum flow_block_binder_type binder_type;
1265 };
1266
1267 static void
1268 tcf_block_owner_netif_keep_dst(struct tcf_block *block,
1269                                struct Qdisc *q,
1270                                enum flow_block_binder_type binder_type)
1271 {
1272         if (block->keep_dst &&
1273             binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
1274             binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
1275                 netif_keep_dst(qdisc_dev(q));
1276 }
1277
1278 void tcf_block_netif_keep_dst(struct tcf_block *block)
1279 {
1280         struct tcf_block_owner_item *item;
1281
1282         block->keep_dst = true;
1283         list_for_each_entry(item, &block->owner_list, list)
1284                 tcf_block_owner_netif_keep_dst(block, item->q,
1285                                                item->binder_type);
1286 }
1287 EXPORT_SYMBOL(tcf_block_netif_keep_dst);
1288
1289 static int tcf_block_owner_add(struct tcf_block *block,
1290                                struct Qdisc *q,
1291                                enum flow_block_binder_type binder_type)
1292 {
1293         struct tcf_block_owner_item *item;
1294
1295         item = kmalloc(sizeof(*item), GFP_KERNEL);
1296         if (!item)
1297                 return -ENOMEM;
1298         item->q = q;
1299         item->binder_type = binder_type;
1300         list_add(&item->list, &block->owner_list);
1301         return 0;
1302 }
1303
1304 static void tcf_block_owner_del(struct tcf_block *block,
1305                                 struct Qdisc *q,
1306                                 enum flow_block_binder_type binder_type)
1307 {
1308         struct tcf_block_owner_item *item;
1309
1310         list_for_each_entry(item, &block->owner_list, list) {
1311                 if (item->q == q && item->binder_type == binder_type) {
1312                         list_del(&item->list);
1313                         kfree(item);
1314                         return;
1315                 }
1316         }
1317         WARN_ON(1);
1318 }
1319
1320 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
1321                       struct tcf_block_ext_info *ei,
1322                       struct netlink_ext_ack *extack)
1323 {
1324         struct net *net = qdisc_net(q);
1325         struct tcf_block *block = NULL;
1326         int err;
1327
1328         if (ei->block_index)
1329                 /* block_index not 0 means the shared block is requested */
1330                 block = tcf_block_refcnt_get(net, ei->block_index);
1331
1332         if (!block) {
1333                 block = tcf_block_create(net, q, ei->block_index, extack);
1334                 if (IS_ERR(block))
1335                         return PTR_ERR(block);
1336                 if (tcf_block_shared(block)) {
1337                         err = tcf_block_insert(block, net, extack);
1338                         if (err)
1339                                 goto err_block_insert;
1340                 }
1341         }
1342
1343         err = tcf_block_owner_add(block, q, ei->binder_type);
1344         if (err)
1345                 goto err_block_owner_add;
1346
1347         tcf_block_owner_netif_keep_dst(block, q, ei->binder_type);
1348
1349         err = tcf_chain0_head_change_cb_add(block, ei, extack);
1350         if (err)
1351                 goto err_chain0_head_change_cb_add;
1352
1353         err = tcf_block_offload_bind(block, q, ei, extack);
1354         if (err)
1355                 goto err_block_offload_bind;
1356
1357         *p_block = block;
1358         return 0;
1359
1360 err_block_offload_bind:
1361         tcf_chain0_head_change_cb_del(block, ei);
1362 err_chain0_head_change_cb_add:
1363         tcf_block_owner_del(block, q, ei->binder_type);
1364 err_block_owner_add:
1365 err_block_insert:
1366         tcf_block_refcnt_put(block, true);
1367         return err;
1368 }
1369 EXPORT_SYMBOL(tcf_block_get_ext);
1370
1371 static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv)
1372 {
1373         struct tcf_proto __rcu **p_filter_chain = priv;
1374
1375         rcu_assign_pointer(*p_filter_chain, tp_head);
1376 }
1377
1378 int tcf_block_get(struct tcf_block **p_block,
1379                   struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
1380                   struct netlink_ext_ack *extack)
1381 {
1382         struct tcf_block_ext_info ei = {
1383                 .chain_head_change = tcf_chain_head_change_dflt,
1384                 .chain_head_change_priv = p_filter_chain,
1385         };
1386
1387         WARN_ON(!p_filter_chain);
1388         return tcf_block_get_ext(p_block, q, &ei, extack);
1389 }
1390 EXPORT_SYMBOL(tcf_block_get);
1391
1392 /* XXX: Standalone actions are not allowed to jump to any chain, and bound
1393  * actions should be all removed after flushing.
1394  */
1395 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
1396                        struct tcf_block_ext_info *ei)
1397 {
1398         if (!block)
1399                 return;
1400         tcf_chain0_head_change_cb_del(block, ei);
1401         tcf_block_owner_del(block, q, ei->binder_type);
1402
1403         __tcf_block_put(block, q, ei, true);
1404 }
1405 EXPORT_SYMBOL(tcf_block_put_ext);
1406
1407 void tcf_block_put(struct tcf_block *block)
1408 {
1409         struct tcf_block_ext_info ei = {0, };
1410
1411         if (!block)
1412                 return;
1413         tcf_block_put_ext(block, block->q, &ei);
1414 }
1415
1416 EXPORT_SYMBOL(tcf_block_put);
1417
1418 static int
1419 tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb,
1420                             void *cb_priv, bool add, bool offload_in_use,
1421                             struct netlink_ext_ack *extack)
1422 {
1423         struct tcf_chain *chain, *chain_prev;
1424         struct tcf_proto *tp, *tp_prev;
1425         int err;
1426
1427         lockdep_assert_held(&block->cb_lock);
1428
1429         for (chain = __tcf_get_next_chain(block, NULL);
1430              chain;
1431              chain_prev = chain,
1432                      chain = __tcf_get_next_chain(block, chain),
1433                      tcf_chain_put(chain_prev)) {
1434                 for (tp = __tcf_get_next_proto(chain, NULL); tp;
1435                      tp_prev = tp,
1436                              tp = __tcf_get_next_proto(chain, tp),
1437                              tcf_proto_put(tp_prev, true, NULL)) {
1438                         if (tp->ops->reoffload) {
1439                                 err = tp->ops->reoffload(tp, add, cb, cb_priv,
1440                                                          extack);
1441                                 if (err && add)
1442                                         goto err_playback_remove;
1443                         } else if (add && offload_in_use) {
1444                                 err = -EOPNOTSUPP;
1445                                 NL_SET_ERR_MSG(extack, "Filter HW offload failed - classifier without re-offloading support");
1446                                 goto err_playback_remove;
1447                         }
1448                 }
1449         }
1450
1451         return 0;
1452
1453 err_playback_remove:
1454         tcf_proto_put(tp, true, NULL);
1455         tcf_chain_put(chain);
1456         tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use,
1457                                     extack);
1458         return err;
1459 }
1460
1461 static int tcf_block_bind(struct tcf_block *block,
1462                           struct flow_block_offload *bo)
1463 {
1464         struct flow_block_cb *block_cb, *next;
1465         int err, i = 0;
1466
1467         lockdep_assert_held(&block->cb_lock);
1468
1469         list_for_each_entry(block_cb, &bo->cb_list, list) {
1470                 err = tcf_block_playback_offloads(block, block_cb->cb,
1471                                                   block_cb->cb_priv, true,
1472                                                   tcf_block_offload_in_use(block),
1473                                                   bo->extack);
1474                 if (err)
1475                         goto err_unroll;
1476                 if (!bo->unlocked_driver_cb)
1477                         block->lockeddevcnt++;
1478
1479                 i++;
1480         }
1481         list_splice(&bo->cb_list, &block->flow_block.cb_list);
1482
1483         return 0;
1484
1485 err_unroll:
1486         list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1487                 list_del(&block_cb->driver_list);
1488                 if (i-- > 0) {
1489                         list_del(&block_cb->list);
1490                         tcf_block_playback_offloads(block, block_cb->cb,
1491                                                     block_cb->cb_priv, false,
1492                                                     tcf_block_offload_in_use(block),
1493                                                     NULL);
1494                         if (!bo->unlocked_driver_cb)
1495                                 block->lockeddevcnt--;
1496                 }
1497                 flow_block_cb_free(block_cb);
1498         }
1499
1500         return err;
1501 }
1502
1503 static void tcf_block_unbind(struct tcf_block *block,
1504                              struct flow_block_offload *bo)
1505 {
1506         struct flow_block_cb *block_cb, *next;
1507
1508         lockdep_assert_held(&block->cb_lock);
1509
1510         list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1511                 tcf_block_playback_offloads(block, block_cb->cb,
1512                                             block_cb->cb_priv, false,
1513                                             tcf_block_offload_in_use(block),
1514                                             NULL);
1515                 list_del(&block_cb->list);
1516                 flow_block_cb_free(block_cb);
1517                 if (!bo->unlocked_driver_cb)
1518                         block->lockeddevcnt--;
1519         }
1520 }
1521
1522 static int tcf_block_setup(struct tcf_block *block,
1523                            struct flow_block_offload *bo)
1524 {
1525         int err;
1526
1527         switch (bo->command) {
1528         case FLOW_BLOCK_BIND:
1529                 err = tcf_block_bind(block, bo);
1530                 break;
1531         case FLOW_BLOCK_UNBIND:
1532                 err = 0;
1533                 tcf_block_unbind(block, bo);
1534                 break;
1535         default:
1536                 WARN_ON_ONCE(1);
1537                 err = -EOPNOTSUPP;
1538         }
1539
1540         return err;
1541 }
1542
1543 /* Main classifier routine: scans classifier chain attached
1544  * to this qdisc, (optionally) tests for protocol and asks
1545  * specific classifiers.
1546  */
1547 static inline int __tcf_classify(struct sk_buff *skb,
1548                                  const struct tcf_proto *tp,
1549                                  const struct tcf_proto *orig_tp,
1550                                  struct tcf_result *res,
1551                                  bool compat_mode,
1552                                  u32 *last_executed_chain)
1553 {
1554 #ifdef CONFIG_NET_CLS_ACT
1555         const int max_reclassify_loop = 16;
1556         const struct tcf_proto *first_tp;
1557         int limit = 0;
1558
1559 reclassify:
1560 #endif
1561         for (; tp; tp = rcu_dereference_bh(tp->next)) {
1562                 __be16 protocol = skb_protocol(skb, false);
1563                 int err;
1564
1565                 if (tp->protocol != protocol &&
1566                     tp->protocol != htons(ETH_P_ALL))
1567                         continue;
1568
1569                 err = tp->classify(skb, tp, res);
1570 #ifdef CONFIG_NET_CLS_ACT
1571                 if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) {
1572                         first_tp = orig_tp;
1573                         *last_executed_chain = first_tp->chain->index;
1574                         goto reset;
1575                 } else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) {
1576                         first_tp = res->goto_tp;
1577                         *last_executed_chain = err & TC_ACT_EXT_VAL_MASK;
1578                         goto reset;
1579                 }
1580 #endif
1581                 if (err >= 0)
1582                         return err;
1583         }
1584
1585         return TC_ACT_UNSPEC; /* signal: continue lookup */
1586 #ifdef CONFIG_NET_CLS_ACT
1587 reset:
1588         if (unlikely(limit++ >= max_reclassify_loop)) {
1589                 net_notice_ratelimited("%u: reclassify loop, rule prio %u, protocol %02x\n",
1590                                        tp->chain->block->index,
1591                                        tp->prio & 0xffff,
1592                                        ntohs(tp->protocol));
1593                 return TC_ACT_SHOT;
1594         }
1595
1596         tp = first_tp;
1597         goto reclassify;
1598 #endif
1599 }
1600
1601 int tcf_classify(struct sk_buff *skb,
1602                  const struct tcf_block *block,
1603                  const struct tcf_proto *tp,
1604                  struct tcf_result *res, bool compat_mode)
1605 {
1606 #if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
1607         u32 last_executed_chain = 0;
1608
1609         return __tcf_classify(skb, tp, tp, res, compat_mode,
1610                               &last_executed_chain);
1611 #else
1612         u32 last_executed_chain = tp ? tp->chain->index : 0;
1613         const struct tcf_proto *orig_tp = tp;
1614         struct tc_skb_ext *ext;
1615         int ret;
1616
1617         if (block) {
1618                 ext = skb_ext_find(skb, TC_SKB_EXT);
1619
1620                 if (ext && ext->chain) {
1621                         struct tcf_chain *fchain;
1622
1623                         fchain = tcf_chain_lookup_rcu(block, ext->chain);
1624                         if (!fchain)
1625                                 return TC_ACT_SHOT;
1626
1627                         /* Consume, so cloned/redirect skbs won't inherit ext */
1628                         skb_ext_del(skb, TC_SKB_EXT);
1629
1630                         tp = rcu_dereference_bh(fchain->filter_chain);
1631                         last_executed_chain = fchain->index;
1632                 }
1633         }
1634
1635         ret = __tcf_classify(skb, tp, orig_tp, res, compat_mode,
1636                              &last_executed_chain);
1637
1638         if (tc_skb_ext_tc_enabled()) {
1639                 /* If we missed on some chain */
1640                 if (ret == TC_ACT_UNSPEC && last_executed_chain) {
1641                         struct tc_skb_cb *cb = tc_skb_cb(skb);
1642
1643                         ext = tc_skb_ext_alloc(skb);
1644                         if (WARN_ON_ONCE(!ext))
1645                                 return TC_ACT_SHOT;
1646                         ext->chain = last_executed_chain;
1647                         ext->mru = cb->mru;
1648                         ext->post_ct = cb->post_ct;
1649                         ext->post_ct_snat = cb->post_ct_snat;
1650                         ext->post_ct_dnat = cb->post_ct_dnat;
1651                         ext->zone = cb->zone;
1652                 }
1653         }
1654
1655         return ret;
1656 #endif
1657 }
1658 EXPORT_SYMBOL(tcf_classify);
1659
1660 struct tcf_chain_info {
1661         struct tcf_proto __rcu **pprev;
1662         struct tcf_proto __rcu *next;
1663 };
1664
1665 static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain *chain,
1666                                            struct tcf_chain_info *chain_info)
1667 {
1668         return tcf_chain_dereference(*chain_info->pprev, chain);
1669 }
1670
1671 static int tcf_chain_tp_insert(struct tcf_chain *chain,
1672                                struct tcf_chain_info *chain_info,
1673                                struct tcf_proto *tp)
1674 {
1675         if (chain->flushing)
1676                 return -EAGAIN;
1677
1678         RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info));
1679         if (*chain_info->pprev == chain->filter_chain)
1680                 tcf_chain0_head_change(chain, tp);
1681         tcf_proto_get(tp);
1682         rcu_assign_pointer(*chain_info->pprev, tp);
1683
1684         return 0;
1685 }
1686
1687 static void tcf_chain_tp_remove(struct tcf_chain *chain,
1688                                 struct tcf_chain_info *chain_info,
1689                                 struct tcf_proto *tp)
1690 {
1691         struct tcf_proto *next = tcf_chain_dereference(chain_info->next, chain);
1692
1693         tcf_proto_mark_delete(tp);
1694         if (tp == chain->filter_chain)
1695                 tcf_chain0_head_change(chain, next);
1696         RCU_INIT_POINTER(*chain_info->pprev, next);
1697 }
1698
1699 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1700                                            struct tcf_chain_info *chain_info,
1701                                            u32 protocol, u32 prio,
1702                                            bool prio_allocate);
1703
1704 /* Try to insert new proto.
1705  * If proto with specified priority already exists, free new proto
1706  * and return existing one.
1707  */
1708
1709 static struct tcf_proto *tcf_chain_tp_insert_unique(struct tcf_chain *chain,
1710                                                     struct tcf_proto *tp_new,
1711                                                     u32 protocol, u32 prio,
1712                                                     bool rtnl_held)
1713 {
1714         struct tcf_chain_info chain_info;
1715         struct tcf_proto *tp;
1716         int err = 0;
1717
1718         mutex_lock(&chain->filter_chain_lock);
1719
1720         if (tcf_proto_exists_destroying(chain, tp_new)) {
1721                 mutex_unlock(&chain->filter_chain_lock);
1722                 tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1723                 return ERR_PTR(-EAGAIN);
1724         }
1725
1726         tp = tcf_chain_tp_find(chain, &chain_info,
1727                                protocol, prio, false);
1728         if (!tp)
1729                 err = tcf_chain_tp_insert(chain, &chain_info, tp_new);
1730         mutex_unlock(&chain->filter_chain_lock);
1731
1732         if (tp) {
1733                 tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1734                 tp_new = tp;
1735         } else if (err) {
1736                 tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1737                 tp_new = ERR_PTR(err);
1738         }
1739
1740         return tp_new;
1741 }
1742
1743 static void tcf_chain_tp_delete_empty(struct tcf_chain *chain,
1744                                       struct tcf_proto *tp, bool rtnl_held,
1745                                       struct netlink_ext_ack *extack)
1746 {
1747         struct tcf_chain_info chain_info;
1748         struct tcf_proto *tp_iter;
1749         struct tcf_proto **pprev;
1750         struct tcf_proto *next;
1751
1752         mutex_lock(&chain->filter_chain_lock);
1753
1754         /* Atomically find and remove tp from chain. */
1755         for (pprev = &chain->filter_chain;
1756              (tp_iter = tcf_chain_dereference(*pprev, chain));
1757              pprev = &tp_iter->next) {
1758                 if (tp_iter == tp) {
1759                         chain_info.pprev = pprev;
1760                         chain_info.next = tp_iter->next;
1761                         WARN_ON(tp_iter->deleting);
1762                         break;
1763                 }
1764         }
1765         /* Verify that tp still exists and no new filters were inserted
1766          * concurrently.
1767          * Mark tp for deletion if it is empty.
1768          */
1769         if (!tp_iter || !tcf_proto_check_delete(tp)) {
1770                 mutex_unlock(&chain->filter_chain_lock);
1771                 return;
1772         }
1773
1774         tcf_proto_signal_destroying(chain, tp);
1775         next = tcf_chain_dereference(chain_info.next, chain);
1776         if (tp == chain->filter_chain)
1777                 tcf_chain0_head_change(chain, next);
1778         RCU_INIT_POINTER(*chain_info.pprev, next);
1779         mutex_unlock(&chain->filter_chain_lock);
1780
1781         tcf_proto_put(tp, rtnl_held, extack);
1782 }
1783
1784 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1785                                            struct tcf_chain_info *chain_info,
1786                                            u32 protocol, u32 prio,
1787                                            bool prio_allocate)
1788 {
1789         struct tcf_proto **pprev;
1790         struct tcf_proto *tp;
1791
1792         /* Check the chain for existence of proto-tcf with this priority */
1793         for (pprev = &chain->filter_chain;
1794              (tp = tcf_chain_dereference(*pprev, chain));
1795              pprev = &tp->next) {
1796                 if (tp->prio >= prio) {
1797                         if (tp->prio == prio) {
1798                                 if (prio_allocate ||
1799                                     (tp->protocol != protocol && protocol))
1800                                         return ERR_PTR(-EINVAL);
1801                         } else {
1802                                 tp = NULL;
1803                         }
1804                         break;
1805                 }
1806         }
1807         chain_info->pprev = pprev;
1808         if (tp) {
1809                 chain_info->next = tp->next;
1810                 tcf_proto_get(tp);
1811         } else {
1812                 chain_info->next = NULL;
1813         }
1814         return tp;
1815 }
1816
1817 static int tcf_fill_node(struct net *net, struct sk_buff *skb,
1818                          struct tcf_proto *tp, struct tcf_block *block,
1819                          struct Qdisc *q, u32 parent, void *fh,
1820                          u32 portid, u32 seq, u16 flags, int event,
1821                          bool terse_dump, bool rtnl_held,
1822                          struct netlink_ext_ack *extack)
1823 {
1824         struct tcmsg *tcm;
1825         struct nlmsghdr  *nlh;
1826         unsigned char *b = skb_tail_pointer(skb);
1827
1828         nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1829         if (!nlh)
1830                 goto out_nlmsg_trim;
1831         tcm = nlmsg_data(nlh);
1832         tcm->tcm_family = AF_UNSPEC;
1833         tcm->tcm__pad1 = 0;
1834         tcm->tcm__pad2 = 0;
1835         if (q) {
1836                 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1837                 tcm->tcm_parent = parent;
1838         } else {
1839                 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
1840                 tcm->tcm_block_index = block->index;
1841         }
1842         tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
1843         if (nla_put_string(skb, TCA_KIND, tp->ops->kind))
1844                 goto nla_put_failure;
1845         if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index))
1846                 goto nla_put_failure;
1847         if (!fh) {
1848                 tcm->tcm_handle = 0;
1849         } else if (terse_dump) {
1850                 if (tp->ops->terse_dump) {
1851                         if (tp->ops->terse_dump(net, tp, fh, skb, tcm,
1852                                                 rtnl_held) < 0)
1853                                 goto nla_put_failure;
1854                 } else {
1855                         goto cls_op_not_supp;
1856                 }
1857         } else {
1858                 if (tp->ops->dump &&
1859                     tp->ops->dump(net, tp, fh, skb, tcm, rtnl_held) < 0)
1860                         goto nla_put_failure;
1861         }
1862
1863         if (extack && extack->_msg &&
1864             nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg))
1865                 goto nla_put_failure;
1866
1867         nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1868
1869         return skb->len;
1870
1871 out_nlmsg_trim:
1872 nla_put_failure:
1873 cls_op_not_supp:
1874         nlmsg_trim(skb, b);
1875         return -1;
1876 }
1877
1878 static int tfilter_notify(struct net *net, struct sk_buff *oskb,
1879                           struct nlmsghdr *n, struct tcf_proto *tp,
1880                           struct tcf_block *block, struct Qdisc *q,
1881                           u32 parent, void *fh, int event, bool unicast,
1882                           bool rtnl_held, struct netlink_ext_ack *extack)
1883 {
1884         struct sk_buff *skb;
1885         u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1886         int err = 0;
1887
1888         skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1889         if (!skb)
1890                 return -ENOBUFS;
1891
1892         if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
1893                           n->nlmsg_seq, n->nlmsg_flags, event,
1894                           false, rtnl_held, extack) <= 0) {
1895                 kfree_skb(skb);
1896                 return -EINVAL;
1897         }
1898
1899         if (unicast)
1900                 err = rtnl_unicast(skb, net, portid);
1901         else
1902                 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1903                                      n->nlmsg_flags & NLM_F_ECHO);
1904         return err;
1905 }
1906
1907 static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
1908                               struct nlmsghdr *n, struct tcf_proto *tp,
1909                               struct tcf_block *block, struct Qdisc *q,
1910                               u32 parent, void *fh, bool unicast, bool *last,
1911                               bool rtnl_held, struct netlink_ext_ack *extack)
1912 {
1913         struct sk_buff *skb;
1914         u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1915         int err;
1916
1917         skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1918         if (!skb)
1919                 return -ENOBUFS;
1920
1921         if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
1922                           n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER,
1923                           false, rtnl_held, extack) <= 0) {
1924                 NL_SET_ERR_MSG(extack, "Failed to build del event notification");
1925                 kfree_skb(skb);
1926                 return -EINVAL;
1927         }
1928
1929         err = tp->ops->delete(tp, fh, last, rtnl_held, extack);
1930         if (err) {
1931                 kfree_skb(skb);
1932                 return err;
1933         }
1934
1935         if (unicast)
1936                 err = rtnl_unicast(skb, net, portid);
1937         else
1938                 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1939                                      n->nlmsg_flags & NLM_F_ECHO);
1940         if (err < 0)
1941                 NL_SET_ERR_MSG(extack, "Failed to send filter delete notification");
1942
1943         return err;
1944 }
1945
1946 static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
1947                                  struct tcf_block *block, struct Qdisc *q,
1948                                  u32 parent, struct nlmsghdr *n,
1949                                  struct tcf_chain *chain, int event,
1950                                  struct netlink_ext_ack *extack)
1951 {
1952         struct tcf_proto *tp;
1953
1954         for (tp = tcf_get_next_proto(chain, NULL);
1955              tp; tp = tcf_get_next_proto(chain, tp))
1956                 tfilter_notify(net, oskb, n, tp, block, q, parent, NULL,
1957                                event, false, true, extack);
1958 }
1959
1960 static void tfilter_put(struct tcf_proto *tp, void *fh)
1961 {
1962         if (tp->ops->put && fh)
1963                 tp->ops->put(tp, fh);
1964 }
1965
1966 static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
1967                           struct netlink_ext_ack *extack)
1968 {
1969         struct net *net = sock_net(skb->sk);
1970         struct nlattr *tca[TCA_MAX + 1];
1971         char name[IFNAMSIZ];
1972         struct tcmsg *t;
1973         u32 protocol;
1974         u32 prio;
1975         bool prio_allocate;
1976         u32 parent;
1977         u32 chain_index;
1978         struct Qdisc *q;
1979         struct tcf_chain_info chain_info;
1980         struct tcf_chain *chain;
1981         struct tcf_block *block;
1982         struct tcf_proto *tp;
1983         unsigned long cl;
1984         void *fh;
1985         int err;
1986         int tp_created;
1987         bool rtnl_held = false;
1988         u32 flags;
1989
1990 replay:
1991         tp_created = 0;
1992
1993         err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
1994                                      rtm_tca_policy, extack);
1995         if (err < 0)
1996                 return err;
1997
1998         t = nlmsg_data(n);
1999         protocol = TC_H_MIN(t->tcm_info);
2000         prio = TC_H_MAJ(t->tcm_info);
2001         prio_allocate = false;
2002         parent = t->tcm_parent;
2003         tp = NULL;
2004         cl = 0;
2005         block = NULL;
2006         q = NULL;
2007         chain = NULL;
2008         flags = 0;
2009
2010         if (prio == 0) {
2011                 /* If no priority is provided by the user,
2012                  * we allocate one.
2013                  */
2014                 if (n->nlmsg_flags & NLM_F_CREATE) {
2015                         prio = TC_H_MAKE(0x80000000U, 0U);
2016                         prio_allocate = true;
2017                 } else {
2018                         NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
2019                         return -ENOENT;
2020                 }
2021         }
2022
2023         /* Find head of filter chain. */
2024
2025         err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2026         if (err)
2027                 return err;
2028
2029         if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2030                 NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2031                 err = -EINVAL;
2032                 goto errout;
2033         }
2034
2035         /* Take rtnl mutex if rtnl_held was set to true on previous iteration,
2036          * block is shared (no qdisc found), qdisc is not unlocked, classifier
2037          * type is not specified, classifier is not unlocked.
2038          */
2039         if (rtnl_held ||
2040             (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2041             !tcf_proto_is_unlocked(name)) {
2042                 rtnl_held = true;
2043                 rtnl_lock();
2044         }
2045
2046         err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2047         if (err)
2048                 goto errout;
2049
2050         block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2051                                  extack);
2052         if (IS_ERR(block)) {
2053                 err = PTR_ERR(block);
2054                 goto errout;
2055         }
2056         block->classid = parent;
2057
2058         chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2059         if (chain_index > TC_ACT_EXT_VAL_MASK) {
2060                 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2061                 err = -EINVAL;
2062                 goto errout;
2063         }
2064         chain = tcf_chain_get(block, chain_index, true);
2065         if (!chain) {
2066                 NL_SET_ERR_MSG(extack, "Cannot create specified filter chain");
2067                 err = -ENOMEM;
2068                 goto errout;
2069         }
2070
2071         mutex_lock(&chain->filter_chain_lock);
2072         tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2073                                prio, prio_allocate);
2074         if (IS_ERR(tp)) {
2075                 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2076                 err = PTR_ERR(tp);
2077                 goto errout_locked;
2078         }
2079
2080         if (tp == NULL) {
2081                 struct tcf_proto *tp_new = NULL;
2082
2083                 if (chain->flushing) {
2084                         err = -EAGAIN;
2085                         goto errout_locked;
2086                 }
2087
2088                 /* Proto-tcf does not exist, create new one */
2089
2090                 if (tca[TCA_KIND] == NULL || !protocol) {
2091                         NL_SET_ERR_MSG(extack, "Filter kind and protocol must be specified");
2092                         err = -EINVAL;
2093                         goto errout_locked;
2094                 }
2095
2096                 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2097                         NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2098                         err = -ENOENT;
2099                         goto errout_locked;
2100                 }
2101
2102                 if (prio_allocate)
2103                         prio = tcf_auto_prio(tcf_chain_tp_prev(chain,
2104                                                                &chain_info));
2105
2106                 mutex_unlock(&chain->filter_chain_lock);
2107                 tp_new = tcf_proto_create(name, protocol, prio, chain,
2108                                           rtnl_held, extack);
2109                 if (IS_ERR(tp_new)) {
2110                         err = PTR_ERR(tp_new);
2111                         goto errout_tp;
2112                 }
2113
2114                 tp_created = 1;
2115                 tp = tcf_chain_tp_insert_unique(chain, tp_new, protocol, prio,
2116                                                 rtnl_held);
2117                 if (IS_ERR(tp)) {
2118                         err = PTR_ERR(tp);
2119                         goto errout_tp;
2120                 }
2121         } else {
2122                 mutex_unlock(&chain->filter_chain_lock);
2123         }
2124
2125         if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2126                 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2127                 err = -EINVAL;
2128                 goto errout;
2129         }
2130
2131         fh = tp->ops->get(tp, t->tcm_handle);
2132
2133         if (!fh) {
2134                 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2135                         NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2136                         err = -ENOENT;
2137                         goto errout;
2138                 }
2139         } else if (n->nlmsg_flags & NLM_F_EXCL) {
2140                 tfilter_put(tp, fh);
2141                 NL_SET_ERR_MSG(extack, "Filter already exists");
2142                 err = -EEXIST;
2143                 goto errout;
2144         }
2145
2146         if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) {
2147                 tfilter_put(tp, fh);
2148                 NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind");
2149                 err = -EINVAL;
2150                 goto errout;
2151         }
2152
2153         if (!(n->nlmsg_flags & NLM_F_CREATE))
2154                 flags |= TCA_ACT_FLAGS_REPLACE;
2155         if (!rtnl_held)
2156                 flags |= TCA_ACT_FLAGS_NO_RTNL;
2157         err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
2158                               flags, extack);
2159         if (err == 0) {
2160                 tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2161                                RTM_NEWTFILTER, false, rtnl_held, extack);
2162                 tfilter_put(tp, fh);
2163                 /* q pointer is NULL for shared blocks */
2164                 if (q)
2165                         q->flags &= ~TCQ_F_CAN_BYPASS;
2166         }
2167
2168 errout:
2169         if (err && tp_created)
2170                 tcf_chain_tp_delete_empty(chain, tp, rtnl_held, NULL);
2171 errout_tp:
2172         if (chain) {
2173                 if (tp && !IS_ERR(tp))
2174                         tcf_proto_put(tp, rtnl_held, NULL);
2175                 if (!tp_created)
2176                         tcf_chain_put(chain);
2177         }
2178         tcf_block_release(q, block, rtnl_held);
2179
2180         if (rtnl_held)
2181                 rtnl_unlock();
2182
2183         if (err == -EAGAIN) {
2184                 /* Take rtnl lock in case EAGAIN is caused by concurrent flush
2185                  * of target chain.
2186                  */
2187                 rtnl_held = true;
2188                 /* Replay the request. */
2189                 goto replay;
2190         }
2191         return err;
2192
2193 errout_locked:
2194         mutex_unlock(&chain->filter_chain_lock);
2195         goto errout;
2196 }
2197
2198 static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2199                           struct netlink_ext_ack *extack)
2200 {
2201         struct net *net = sock_net(skb->sk);
2202         struct nlattr *tca[TCA_MAX + 1];
2203         char name[IFNAMSIZ];
2204         struct tcmsg *t;
2205         u32 protocol;
2206         u32 prio;
2207         u32 parent;
2208         u32 chain_index;
2209         struct Qdisc *q = NULL;
2210         struct tcf_chain_info chain_info;
2211         struct tcf_chain *chain = NULL;
2212         struct tcf_block *block = NULL;
2213         struct tcf_proto *tp = NULL;
2214         unsigned long cl = 0;
2215         void *fh = NULL;
2216         int err;
2217         bool rtnl_held = false;
2218
2219         err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2220                                      rtm_tca_policy, extack);
2221         if (err < 0)
2222                 return err;
2223
2224         t = nlmsg_data(n);
2225         protocol = TC_H_MIN(t->tcm_info);
2226         prio = TC_H_MAJ(t->tcm_info);
2227         parent = t->tcm_parent;
2228
2229         if (prio == 0 && (protocol || t->tcm_handle || tca[TCA_KIND])) {
2230                 NL_SET_ERR_MSG(extack, "Cannot flush filters with protocol, handle or kind set");
2231                 return -ENOENT;
2232         }
2233
2234         /* Find head of filter chain. */
2235
2236         err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2237         if (err)
2238                 return err;
2239
2240         if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2241                 NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2242                 err = -EINVAL;
2243                 goto errout;
2244         }
2245         /* Take rtnl mutex if flushing whole chain, block is shared (no qdisc
2246          * found), qdisc is not unlocked, classifier type is not specified,
2247          * classifier is not unlocked.
2248          */
2249         if (!prio ||
2250             (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2251             !tcf_proto_is_unlocked(name)) {
2252                 rtnl_held = true;
2253                 rtnl_lock();
2254         }
2255
2256         err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2257         if (err)
2258                 goto errout;
2259
2260         block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2261                                  extack);
2262         if (IS_ERR(block)) {
2263                 err = PTR_ERR(block);
2264                 goto errout;
2265         }
2266
2267         chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2268         if (chain_index > TC_ACT_EXT_VAL_MASK) {
2269                 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2270                 err = -EINVAL;
2271                 goto errout;
2272         }
2273         chain = tcf_chain_get(block, chain_index, false);
2274         if (!chain) {
2275                 /* User requested flush on non-existent chain. Nothing to do,
2276                  * so just return success.
2277                  */
2278                 if (prio == 0) {
2279                         err = 0;
2280                         goto errout;
2281                 }
2282                 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2283                 err = -ENOENT;
2284                 goto errout;
2285         }
2286
2287         if (prio == 0) {
2288                 tfilter_notify_chain(net, skb, block, q, parent, n,
2289                                      chain, RTM_DELTFILTER, extack);
2290                 tcf_chain_flush(chain, rtnl_held);
2291                 err = 0;
2292                 goto errout;
2293         }
2294
2295         mutex_lock(&chain->filter_chain_lock);
2296         tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2297                                prio, false);
2298         if (!tp || IS_ERR(tp)) {
2299                 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2300                 err = tp ? PTR_ERR(tp) : -ENOENT;
2301                 goto errout_locked;
2302         } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2303                 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2304                 err = -EINVAL;
2305                 goto errout_locked;
2306         } else if (t->tcm_handle == 0) {
2307                 tcf_proto_signal_destroying(chain, tp);
2308                 tcf_chain_tp_remove(chain, &chain_info, tp);
2309                 mutex_unlock(&chain->filter_chain_lock);
2310
2311                 tcf_proto_put(tp, rtnl_held, NULL);
2312                 tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2313                                RTM_DELTFILTER, false, rtnl_held, extack);
2314                 err = 0;
2315                 goto errout;
2316         }
2317         mutex_unlock(&chain->filter_chain_lock);
2318
2319         fh = tp->ops->get(tp, t->tcm_handle);
2320
2321         if (!fh) {
2322                 NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2323                 err = -ENOENT;
2324         } else {
2325                 bool last;
2326
2327                 err = tfilter_del_notify(net, skb, n, tp, block,
2328                                          q, parent, fh, false, &last,
2329                                          rtnl_held, extack);
2330
2331                 if (err)
2332                         goto errout;
2333                 if (last)
2334                         tcf_chain_tp_delete_empty(chain, tp, rtnl_held, extack);
2335         }
2336
2337 errout:
2338         if (chain) {
2339                 if (tp && !IS_ERR(tp))
2340                         tcf_proto_put(tp, rtnl_held, NULL);
2341                 tcf_chain_put(chain);
2342         }
2343         tcf_block_release(q, block, rtnl_held);
2344
2345         if (rtnl_held)
2346                 rtnl_unlock();
2347
2348         return err;
2349
2350 errout_locked:
2351         mutex_unlock(&chain->filter_chain_lock);
2352         goto errout;
2353 }
2354
2355 static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2356                           struct netlink_ext_ack *extack)
2357 {
2358         struct net *net = sock_net(skb->sk);
2359         struct nlattr *tca[TCA_MAX + 1];
2360         char name[IFNAMSIZ];
2361         struct tcmsg *t;
2362         u32 protocol;
2363         u32 prio;
2364         u32 parent;
2365         u32 chain_index;
2366         struct Qdisc *q = NULL;
2367         struct tcf_chain_info chain_info;
2368         struct tcf_chain *chain = NULL;
2369         struct tcf_block *block = NULL;
2370         struct tcf_proto *tp = NULL;
2371         unsigned long cl = 0;
2372         void *fh = NULL;
2373         int err;
2374         bool rtnl_held = false;
2375
2376         err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2377                                      rtm_tca_policy, extack);
2378         if (err < 0)
2379                 return err;
2380
2381         t = nlmsg_data(n);
2382         protocol = TC_H_MIN(t->tcm_info);
2383         prio = TC_H_MAJ(t->tcm_info);
2384         parent = t->tcm_parent;
2385
2386         if (prio == 0) {
2387                 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
2388                 return -ENOENT;
2389         }
2390
2391         /* Find head of filter chain. */
2392
2393         err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2394         if (err)
2395                 return err;
2396
2397         if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2398                 NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2399                 err = -EINVAL;
2400                 goto errout;
2401         }
2402         /* Take rtnl mutex if block is shared (no qdisc found), qdisc is not
2403          * unlocked, classifier type is not specified, classifier is not
2404          * unlocked.
2405          */
2406         if ((q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2407             !tcf_proto_is_unlocked(name)) {
2408                 rtnl_held = true;
2409                 rtnl_lock();
2410         }
2411
2412         err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2413         if (err)
2414                 goto errout;
2415
2416         block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2417                                  extack);
2418         if (IS_ERR(block)) {
2419                 err = PTR_ERR(block);
2420                 goto errout;
2421         }
2422
2423         chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2424         if (chain_index > TC_ACT_EXT_VAL_MASK) {
2425                 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2426                 err = -EINVAL;
2427                 goto errout;
2428         }
2429         chain = tcf_chain_get(block, chain_index, false);
2430         if (!chain) {
2431                 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2432                 err = -EINVAL;
2433                 goto errout;
2434         }
2435
2436         mutex_lock(&chain->filter_chain_lock);
2437         tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2438                                prio, false);
2439         mutex_unlock(&chain->filter_chain_lock);
2440         if (!tp || IS_ERR(tp)) {
2441                 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2442                 err = tp ? PTR_ERR(tp) : -ENOENT;
2443                 goto errout;
2444         } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2445                 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2446                 err = -EINVAL;
2447                 goto errout;
2448         }
2449
2450         fh = tp->ops->get(tp, t->tcm_handle);
2451
2452         if (!fh) {
2453                 NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2454                 err = -ENOENT;
2455         } else {
2456                 err = tfilter_notify(net, skb, n, tp, block, q, parent,
2457                                      fh, RTM_NEWTFILTER, true, rtnl_held, NULL);
2458                 if (err < 0)
2459                         NL_SET_ERR_MSG(extack, "Failed to send filter notify message");
2460         }
2461
2462         tfilter_put(tp, fh);
2463 errout:
2464         if (chain) {
2465                 if (tp && !IS_ERR(tp))
2466                         tcf_proto_put(tp, rtnl_held, NULL);
2467                 tcf_chain_put(chain);
2468         }
2469         tcf_block_release(q, block, rtnl_held);
2470
2471         if (rtnl_held)
2472                 rtnl_unlock();
2473
2474         return err;
2475 }
2476
2477 struct tcf_dump_args {
2478         struct tcf_walker w;
2479         struct sk_buff *skb;
2480         struct netlink_callback *cb;
2481         struct tcf_block *block;
2482         struct Qdisc *q;
2483         u32 parent;
2484         bool terse_dump;
2485 };
2486
2487 static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
2488 {
2489         struct tcf_dump_args *a = (void *)arg;
2490         struct net *net = sock_net(a->skb->sk);
2491
2492         return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent,
2493                              n, NETLINK_CB(a->cb->skb).portid,
2494                              a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
2495                              RTM_NEWTFILTER, a->terse_dump, true, NULL);
2496 }
2497
2498 static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
2499                            struct sk_buff *skb, struct netlink_callback *cb,
2500                            long index_start, long *p_index, bool terse)
2501 {
2502         struct net *net = sock_net(skb->sk);
2503         struct tcf_block *block = chain->block;
2504         struct tcmsg *tcm = nlmsg_data(cb->nlh);
2505         struct tcf_proto *tp, *tp_prev;
2506         struct tcf_dump_args arg;
2507
2508         for (tp = __tcf_get_next_proto(chain, NULL);
2509              tp;
2510              tp_prev = tp,
2511                      tp = __tcf_get_next_proto(chain, tp),
2512                      tcf_proto_put(tp_prev, true, NULL),
2513                      (*p_index)++) {
2514                 if (*p_index < index_start)
2515                         continue;
2516                 if (TC_H_MAJ(tcm->tcm_info) &&
2517                     TC_H_MAJ(tcm->tcm_info) != tp->prio)
2518                         continue;
2519                 if (TC_H_MIN(tcm->tcm_info) &&
2520                     TC_H_MIN(tcm->tcm_info) != tp->protocol)
2521                         continue;
2522                 if (*p_index > index_start)
2523                         memset(&cb->args[1], 0,
2524                                sizeof(cb->args) - sizeof(cb->args[0]));
2525                 if (cb->args[1] == 0) {
2526                         if (tcf_fill_node(net, skb, tp, block, q, parent, NULL,
2527                                           NETLINK_CB(cb->skb).portid,
2528                                           cb->nlh->nlmsg_seq, NLM_F_MULTI,
2529                                           RTM_NEWTFILTER, false, true, NULL) <= 0)
2530                                 goto errout;
2531                         cb->args[1] = 1;
2532                 }
2533                 if (!tp->ops->walk)
2534                         continue;
2535                 arg.w.fn = tcf_node_dump;
2536                 arg.skb = skb;
2537                 arg.cb = cb;
2538                 arg.block = block;
2539                 arg.q = q;
2540                 arg.parent = parent;
2541                 arg.w.stop = 0;
2542                 arg.w.skip = cb->args[1] - 1;
2543                 arg.w.count = 0;
2544                 arg.w.cookie = cb->args[2];
2545                 arg.terse_dump = terse;
2546                 tp->ops->walk(tp, &arg.w, true);
2547                 cb->args[2] = arg.w.cookie;
2548                 cb->args[1] = arg.w.count + 1;
2549                 if (arg.w.stop)
2550                         goto errout;
2551         }
2552         return true;
2553
2554 errout:
2555         tcf_proto_put(tp, true, NULL);
2556         return false;
2557 }
2558
2559 static const struct nla_policy tcf_tfilter_dump_policy[TCA_MAX + 1] = {
2560         [TCA_DUMP_FLAGS] = NLA_POLICY_BITFIELD32(TCA_DUMP_FLAGS_TERSE),
2561 };
2562
2563 /* called with RTNL */
2564 static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
2565 {
2566         struct tcf_chain *chain, *chain_prev;
2567         struct net *net = sock_net(skb->sk);
2568         struct nlattr *tca[TCA_MAX + 1];
2569         struct Qdisc *q = NULL;
2570         struct tcf_block *block;
2571         struct tcmsg *tcm = nlmsg_data(cb->nlh);
2572         bool terse_dump = false;
2573         long index_start;
2574         long index;
2575         u32 parent;
2576         int err;
2577
2578         if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2579                 return skb->len;
2580
2581         err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
2582                                      tcf_tfilter_dump_policy, cb->extack);
2583         if (err)
2584                 return err;
2585
2586         if (tca[TCA_DUMP_FLAGS]) {
2587                 struct nla_bitfield32 flags =
2588                         nla_get_bitfield32(tca[TCA_DUMP_FLAGS]);
2589
2590                 terse_dump = flags.value & TCA_DUMP_FLAGS_TERSE;
2591         }
2592
2593         if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
2594                 block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
2595                 if (!block)
2596                         goto out;
2597                 /* If we work with block index, q is NULL and parent value
2598                  * will never be used in the following code. The check
2599                  * in tcf_fill_node prevents it. However, compiler does not
2600                  * see that far, so set parent to zero to silence the warning
2601                  * about parent being uninitialized.
2602                  */
2603                 parent = 0;
2604         } else {
2605                 const struct Qdisc_class_ops *cops;
2606                 struct net_device *dev;
2607                 unsigned long cl = 0;
2608
2609                 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2610                 if (!dev)
2611                         return skb->len;
2612
2613                 parent = tcm->tcm_parent;
2614                 if (!parent)
2615                         q = rtnl_dereference(dev->qdisc);
2616                 else
2617                         q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2618                 if (!q)
2619                         goto out;
2620                 cops = q->ops->cl_ops;
2621                 if (!cops)
2622                         goto out;
2623                 if (!cops->tcf_block)
2624                         goto out;
2625                 if (TC_H_MIN(tcm->tcm_parent)) {
2626                         cl = cops->find(q, tcm->tcm_parent);
2627                         if (cl == 0)
2628                                 goto out;
2629                 }
2630                 block = cops->tcf_block(q, cl, NULL);
2631                 if (!block)
2632                         goto out;
2633                 parent = block->classid;
2634                 if (tcf_block_shared(block))
2635                         q = NULL;
2636         }
2637
2638         index_start = cb->args[0];
2639         index = 0;
2640
2641         for (chain = __tcf_get_next_chain(block, NULL);
2642              chain;
2643              chain_prev = chain,
2644                      chain = __tcf_get_next_chain(block, chain),
2645                      tcf_chain_put(chain_prev)) {
2646                 if (tca[TCA_CHAIN] &&
2647                     nla_get_u32(tca[TCA_CHAIN]) != chain->index)
2648                         continue;
2649                 if (!tcf_chain_dump(chain, q, parent, skb, cb,
2650                                     index_start, &index, terse_dump)) {
2651                         tcf_chain_put(chain);
2652                         err = -EMSGSIZE;
2653                         break;
2654                 }
2655         }
2656
2657         if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
2658                 tcf_block_refcnt_put(block, true);
2659         cb->args[0] = index;
2660
2661 out:
2662         /* If we did no progress, the error (EMSGSIZE) is real */
2663         if (skb->len == 0 && err)
2664                 return err;
2665         return skb->len;
2666 }
2667
2668 static int tc_chain_fill_node(const struct tcf_proto_ops *tmplt_ops,
2669                               void *tmplt_priv, u32 chain_index,
2670                               struct net *net, struct sk_buff *skb,
2671                               struct tcf_block *block,
2672                               u32 portid, u32 seq, u16 flags, int event,
2673                               struct netlink_ext_ack *extack)
2674 {
2675         unsigned char *b = skb_tail_pointer(skb);
2676         const struct tcf_proto_ops *ops;
2677         struct nlmsghdr *nlh;
2678         struct tcmsg *tcm;
2679         void *priv;
2680
2681         ops = tmplt_ops;
2682         priv = tmplt_priv;
2683
2684         nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
2685         if (!nlh)
2686                 goto out_nlmsg_trim;
2687         tcm = nlmsg_data(nlh);
2688         tcm->tcm_family = AF_UNSPEC;
2689         tcm->tcm__pad1 = 0;
2690         tcm->tcm__pad2 = 0;
2691         tcm->tcm_handle = 0;
2692         if (block->q) {
2693                 tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex;
2694                 tcm->tcm_parent = block->q->handle;
2695         } else {
2696                 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
2697                 tcm->tcm_block_index = block->index;
2698         }
2699
2700         if (nla_put_u32(skb, TCA_CHAIN, chain_index))
2701                 goto nla_put_failure;
2702
2703         if (ops) {
2704                 if (nla_put_string(skb, TCA_KIND, ops->kind))
2705                         goto nla_put_failure;
2706                 if (ops->tmplt_dump(skb, net, priv) < 0)
2707                         goto nla_put_failure;
2708         }
2709
2710         if (extack && extack->_msg &&
2711             nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg))
2712                 goto out_nlmsg_trim;
2713
2714         nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2715
2716         return skb->len;
2717
2718 out_nlmsg_trim:
2719 nla_put_failure:
2720         nlmsg_trim(skb, b);
2721         return -EMSGSIZE;
2722 }
2723
2724 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
2725                            u32 seq, u16 flags, int event, bool unicast,
2726                            struct netlink_ext_ack *extack)
2727 {
2728         u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2729         struct tcf_block *block = chain->block;
2730         struct net *net = block->net;
2731         struct sk_buff *skb;
2732         int err = 0;
2733
2734         skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2735         if (!skb)
2736                 return -ENOBUFS;
2737
2738         if (tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
2739                                chain->index, net, skb, block, portid,
2740                                seq, flags, event, extack) <= 0) {
2741                 kfree_skb(skb);
2742                 return -EINVAL;
2743         }
2744
2745         if (unicast)
2746                 err = rtnl_unicast(skb, net, portid);
2747         else
2748                 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2749                                      flags & NLM_F_ECHO);
2750
2751         return err;
2752 }
2753
2754 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
2755                                   void *tmplt_priv, u32 chain_index,
2756                                   struct tcf_block *block, struct sk_buff *oskb,
2757                                   u32 seq, u16 flags, bool unicast)
2758 {
2759         u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2760         struct net *net = block->net;
2761         struct sk_buff *skb;
2762
2763         skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2764         if (!skb)
2765                 return -ENOBUFS;
2766
2767         if (tc_chain_fill_node(tmplt_ops, tmplt_priv, chain_index, net, skb,
2768                                block, portid, seq, flags, RTM_DELCHAIN, NULL) <= 0) {
2769                 kfree_skb(skb);
2770                 return -EINVAL;
2771         }
2772
2773         if (unicast)
2774                 return rtnl_unicast(skb, net, portid);
2775
2776         return rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO);
2777 }
2778
2779 static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net,
2780                               struct nlattr **tca,
2781                               struct netlink_ext_ack *extack)
2782 {
2783         const struct tcf_proto_ops *ops;
2784         char name[IFNAMSIZ];
2785         void *tmplt_priv;
2786
2787         /* If kind is not set, user did not specify template. */
2788         if (!tca[TCA_KIND])
2789                 return 0;
2790
2791         if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2792                 NL_SET_ERR_MSG(extack, "Specified TC chain template name too long");
2793                 return -EINVAL;
2794         }
2795
2796         ops = tcf_proto_lookup_ops(name, true, extack);
2797         if (IS_ERR(ops))
2798                 return PTR_ERR(ops);
2799         if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) {
2800                 NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier");
2801                 module_put(ops->owner);
2802                 return -EOPNOTSUPP;
2803         }
2804
2805         tmplt_priv = ops->tmplt_create(net, chain, tca, extack);
2806         if (IS_ERR(tmplt_priv)) {
2807                 module_put(ops->owner);
2808                 return PTR_ERR(tmplt_priv);
2809         }
2810         chain->tmplt_ops = ops;
2811         chain->tmplt_priv = tmplt_priv;
2812         return 0;
2813 }
2814
2815 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
2816                                void *tmplt_priv)
2817 {
2818         /* If template ops are set, no work to do for us. */
2819         if (!tmplt_ops)
2820                 return;
2821
2822         tmplt_ops->tmplt_destroy(tmplt_priv);
2823         module_put(tmplt_ops->owner);
2824 }
2825
2826 /* Add/delete/get a chain */
2827
2828 static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n,
2829                         struct netlink_ext_ack *extack)
2830 {
2831         struct net *net = sock_net(skb->sk);
2832         struct nlattr *tca[TCA_MAX + 1];
2833         struct tcmsg *t;
2834         u32 parent;
2835         u32 chain_index;
2836         struct Qdisc *q;
2837         struct tcf_chain *chain;
2838         struct tcf_block *block;
2839         unsigned long cl;
2840         int err;
2841
2842 replay:
2843         q = NULL;
2844         err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2845                                      rtm_tca_policy, extack);
2846         if (err < 0)
2847                 return err;
2848
2849         t = nlmsg_data(n);
2850         parent = t->tcm_parent;
2851         cl = 0;
2852
2853         block = tcf_block_find(net, &q, &parent, &cl,
2854                                t->tcm_ifindex, t->tcm_block_index, extack);
2855         if (IS_ERR(block))
2856                 return PTR_ERR(block);
2857
2858         chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2859         if (chain_index > TC_ACT_EXT_VAL_MASK) {
2860                 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2861                 err = -EINVAL;
2862                 goto errout_block;
2863         }
2864
2865         mutex_lock(&block->lock);
2866         chain = tcf_chain_lookup(block, chain_index);
2867         if (n->nlmsg_type == RTM_NEWCHAIN) {
2868                 if (chain) {
2869                         if (tcf_chain_held_by_acts_only(chain)) {
2870                                 /* The chain exists only because there is
2871                                  * some action referencing it.
2872                                  */
2873                                 tcf_chain_hold(chain);
2874                         } else {
2875                                 NL_SET_ERR_MSG(extack, "Filter chain already exists");
2876                                 err = -EEXIST;
2877                                 goto errout_block_locked;
2878                         }
2879                 } else {
2880                         if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2881                                 NL_SET_ERR_MSG(extack, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain");
2882                                 err = -ENOENT;
2883                                 goto errout_block_locked;
2884                         }
2885                         chain = tcf_chain_create(block, chain_index);
2886                         if (!chain) {
2887                                 NL_SET_ERR_MSG(extack, "Failed to create filter chain");
2888                                 err = -ENOMEM;
2889                                 goto errout_block_locked;
2890                         }
2891                 }
2892         } else {
2893                 if (!chain || tcf_chain_held_by_acts_only(chain)) {
2894                         NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2895                         err = -EINVAL;
2896                         goto errout_block_locked;
2897                 }
2898                 tcf_chain_hold(chain);
2899         }
2900
2901         if (n->nlmsg_type == RTM_NEWCHAIN) {
2902                 /* Modifying chain requires holding parent block lock. In case
2903                  * the chain was successfully added, take a reference to the
2904                  * chain. This ensures that an empty chain does not disappear at
2905                  * the end of this function.
2906                  */
2907                 tcf_chain_hold(chain);
2908                 chain->explicitly_created = true;
2909         }
2910         mutex_unlock(&block->lock);
2911
2912         switch (n->nlmsg_type) {
2913         case RTM_NEWCHAIN:
2914                 err = tc_chain_tmplt_add(chain, net, tca, extack);
2915                 if (err) {
2916                         tcf_chain_put_explicitly_created(chain);
2917                         goto errout;
2918                 }
2919
2920                 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
2921                                 RTM_NEWCHAIN, false, extack);
2922                 break;
2923         case RTM_DELCHAIN:
2924                 tfilter_notify_chain(net, skb, block, q, parent, n,
2925                                      chain, RTM_DELTFILTER, extack);
2926                 /* Flush the chain first as the user requested chain removal. */
2927                 tcf_chain_flush(chain, true);
2928                 /* In case the chain was successfully deleted, put a reference
2929                  * to the chain previously taken during addition.
2930                  */
2931                 tcf_chain_put_explicitly_created(chain);
2932                 break;
2933         case RTM_GETCHAIN:
2934                 err = tc_chain_notify(chain, skb, n->nlmsg_seq,
2935                                       n->nlmsg_flags, n->nlmsg_type, true, extack);
2936                 if (err < 0)
2937                         NL_SET_ERR_MSG(extack, "Failed to send chain notify message");
2938                 break;
2939         default:
2940                 err = -EOPNOTSUPP;
2941                 NL_SET_ERR_MSG(extack, "Unsupported message type");
2942                 goto errout;
2943         }
2944
2945 errout:
2946         tcf_chain_put(chain);
2947 errout_block:
2948         tcf_block_release(q, block, true);
2949         if (err == -EAGAIN)
2950                 /* Replay the request. */
2951                 goto replay;
2952         return err;
2953
2954 errout_block_locked:
2955         mutex_unlock(&block->lock);
2956         goto errout_block;
2957 }
2958
2959 /* called with RTNL */
2960 static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
2961 {
2962         struct net *net = sock_net(skb->sk);
2963         struct nlattr *tca[TCA_MAX + 1];
2964         struct Qdisc *q = NULL;
2965         struct tcf_block *block;
2966         struct tcmsg *tcm = nlmsg_data(cb->nlh);
2967         struct tcf_chain *chain;
2968         long index_start;
2969         long index;
2970         int err;
2971
2972         if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2973                 return skb->len;
2974
2975         err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
2976                                      rtm_tca_policy, cb->extack);
2977         if (err)
2978                 return err;
2979
2980         if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
2981                 block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
2982                 if (!block)
2983                         goto out;
2984         } else {
2985                 const struct Qdisc_class_ops *cops;
2986                 struct net_device *dev;
2987                 unsigned long cl = 0;
2988
2989                 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2990                 if (!dev)
2991                         return skb->len;
2992
2993                 if (!tcm->tcm_parent)
2994                         q = rtnl_dereference(dev->qdisc);
2995                 else
2996                         q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2997
2998                 if (!q)
2999                         goto out;
3000                 cops = q->ops->cl_ops;
3001                 if (!cops)
3002                         goto out;
3003                 if (!cops->tcf_block)
3004                         goto out;
3005                 if (TC_H_MIN(tcm->tcm_parent)) {
3006                         cl = cops->find(q, tcm->tcm_parent);
3007                         if (cl == 0)
3008                                 goto out;
3009                 }
3010                 block = cops->tcf_block(q, cl, NULL);
3011                 if (!block)
3012                         goto out;
3013                 if (tcf_block_shared(block))
3014                         q = NULL;
3015         }
3016
3017         index_start = cb->args[0];
3018         index = 0;
3019
3020         mutex_lock(&block->lock);
3021         list_for_each_entry(chain, &block->chain_list, list) {
3022                 if ((tca[TCA_CHAIN] &&
3023                      nla_get_u32(tca[TCA_CHAIN]) != chain->index))
3024                         continue;
3025                 if (index < index_start) {
3026                         index++;
3027                         continue;
3028                 }
3029                 if (tcf_chain_held_by_acts_only(chain))
3030                         continue;
3031                 err = tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
3032                                          chain->index, net, skb, block,
3033                                          NETLINK_CB(cb->skb).portid,
3034                                          cb->nlh->nlmsg_seq, NLM_F_MULTI,
3035                                          RTM_NEWCHAIN, NULL);
3036                 if (err <= 0)
3037                         break;
3038                 index++;
3039         }
3040         mutex_unlock(&block->lock);
3041
3042         if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
3043                 tcf_block_refcnt_put(block, true);
3044         cb->args[0] = index;
3045
3046 out:
3047         /* If we did no progress, the error (EMSGSIZE) is real */
3048         if (skb->len == 0 && err)
3049                 return err;
3050         return skb->len;
3051 }
3052
3053 void tcf_exts_destroy(struct tcf_exts *exts)
3054 {
3055 #ifdef CONFIG_NET_CLS_ACT
3056         if (exts->actions) {
3057                 tcf_action_destroy(exts->actions, TCA_ACT_UNBIND);
3058                 kfree(exts->actions);
3059         }
3060         exts->nr_actions = 0;
3061 #endif
3062 }
3063 EXPORT_SYMBOL(tcf_exts_destroy);
3064
3065 int tcf_exts_validate_ex(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
3066                          struct nlattr *rate_tlv, struct tcf_exts *exts,
3067                          u32 flags, u32 fl_flags, struct netlink_ext_ack *extack)
3068 {
3069 #ifdef CONFIG_NET_CLS_ACT
3070         {
3071                 int init_res[TCA_ACT_MAX_PRIO] = {};
3072                 struct tc_action *act;
3073                 size_t attr_size = 0;
3074
3075                 if (exts->police && tb[exts->police]) {
3076                         struct tc_action_ops *a_o;
3077
3078                         a_o = tc_action_load_ops(tb[exts->police], true,
3079                                                  !(flags & TCA_ACT_FLAGS_NO_RTNL),
3080                                                  extack);
3081                         if (IS_ERR(a_o))
3082                                 return PTR_ERR(a_o);
3083                         flags |= TCA_ACT_FLAGS_POLICE | TCA_ACT_FLAGS_BIND;
3084                         act = tcf_action_init_1(net, tp, tb[exts->police],
3085                                                 rate_tlv, a_o, init_res, flags,
3086                                                 extack);
3087                         module_put(a_o->owner);
3088                         if (IS_ERR(act))
3089                                 return PTR_ERR(act);
3090
3091                         act->type = exts->type = TCA_OLD_COMPAT;
3092                         exts->actions[0] = act;
3093                         exts->nr_actions = 1;
3094                         tcf_idr_insert_many(exts->actions);
3095                 } else if (exts->action && tb[exts->action]) {
3096                         int err;
3097
3098                         flags |= TCA_ACT_FLAGS_BIND;
3099                         err = tcf_action_init(net, tp, tb[exts->action],
3100                                               rate_tlv, exts->actions, init_res,
3101                                               &attr_size, flags, fl_flags,
3102                                               extack);
3103                         if (err < 0)
3104                                 return err;
3105                         exts->nr_actions = err;
3106                 }
3107         }
3108 #else
3109         if ((exts->action && tb[exts->action]) ||
3110             (exts->police && tb[exts->police])) {
3111                 NL_SET_ERR_MSG(extack, "Classifier actions are not supported per compile options (CONFIG_NET_CLS_ACT)");
3112                 return -EOPNOTSUPP;
3113         }
3114 #endif
3115
3116         return 0;
3117 }
3118 EXPORT_SYMBOL(tcf_exts_validate_ex);
3119
3120 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
3121                       struct nlattr *rate_tlv, struct tcf_exts *exts,
3122                       u32 flags, struct netlink_ext_ack *extack)
3123 {
3124         return tcf_exts_validate_ex(net, tp, tb, rate_tlv, exts,
3125                                     flags, 0, extack);
3126 }
3127 EXPORT_SYMBOL(tcf_exts_validate);
3128
3129 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src)
3130 {
3131 #ifdef CONFIG_NET_CLS_ACT
3132         struct tcf_exts old = *dst;
3133
3134         *dst = *src;
3135         tcf_exts_destroy(&old);
3136 #endif
3137 }
3138 EXPORT_SYMBOL(tcf_exts_change);
3139
3140 #ifdef CONFIG_NET_CLS_ACT
3141 static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts)
3142 {
3143         if (exts->nr_actions == 0)
3144                 return NULL;
3145         else
3146                 return exts->actions[0];
3147 }
3148 #endif
3149
3150 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
3151 {
3152 #ifdef CONFIG_NET_CLS_ACT
3153         struct nlattr *nest;
3154
3155         if (exts->action && tcf_exts_has_actions(exts)) {
3156                 /*
3157                  * again for backward compatible mode - we want
3158                  * to work with both old and new modes of entering
3159                  * tc data even if iproute2  was newer - jhs
3160                  */
3161                 if (exts->type != TCA_OLD_COMPAT) {
3162                         nest = nla_nest_start_noflag(skb, exts->action);
3163                         if (nest == NULL)
3164                                 goto nla_put_failure;
3165
3166                         if (tcf_action_dump(skb, exts->actions, 0, 0, false)
3167                             < 0)
3168                                 goto nla_put_failure;
3169                         nla_nest_end(skb, nest);
3170                 } else if (exts->police) {
3171                         struct tc_action *act = tcf_exts_first_act(exts);
3172                         nest = nla_nest_start_noflag(skb, exts->police);
3173                         if (nest == NULL || !act)
3174                                 goto nla_put_failure;
3175                         if (tcf_action_dump_old(skb, act, 0, 0) < 0)
3176                                 goto nla_put_failure;
3177                         nla_nest_end(skb, nest);
3178                 }
3179         }
3180         return 0;
3181
3182 nla_put_failure:
3183         nla_nest_cancel(skb, nest);
3184         return -1;
3185 #else
3186         return 0;
3187 #endif
3188 }
3189 EXPORT_SYMBOL(tcf_exts_dump);
3190
3191 int tcf_exts_terse_dump(struct sk_buff *skb, struct tcf_exts *exts)
3192 {
3193 #ifdef CONFIG_NET_CLS_ACT
3194         struct nlattr *nest;
3195
3196         if (!exts->action || !tcf_exts_has_actions(exts))
3197                 return 0;
3198
3199         nest = nla_nest_start_noflag(skb, exts->action);
3200         if (!nest)
3201                 goto nla_put_failure;
3202
3203         if (tcf_action_dump(skb, exts->actions, 0, 0, true) < 0)
3204                 goto nla_put_failure;
3205         nla_nest_end(skb, nest);
3206         return 0;
3207
3208 nla_put_failure:
3209         nla_nest_cancel(skb, nest);
3210         return -1;
3211 #else
3212         return 0;
3213 #endif
3214 }
3215 EXPORT_SYMBOL(tcf_exts_terse_dump);
3216
3217 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts)
3218 {
3219 #ifdef CONFIG_NET_CLS_ACT
3220         struct tc_action *a = tcf_exts_first_act(exts);
3221         if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0)
3222                 return -1;
3223 #endif
3224         return 0;
3225 }
3226 EXPORT_SYMBOL(tcf_exts_dump_stats);
3227
3228 static void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
3229 {
3230         if (*flags & TCA_CLS_FLAGS_IN_HW)
3231                 return;
3232         *flags |= TCA_CLS_FLAGS_IN_HW;
3233         atomic_inc(&block->offloadcnt);
3234 }
3235
3236 static void tcf_block_offload_dec(struct tcf_block *block, u32 *flags)
3237 {
3238         if (!(*flags & TCA_CLS_FLAGS_IN_HW))
3239                 return;
3240         *flags &= ~TCA_CLS_FLAGS_IN_HW;
3241         atomic_dec(&block->offloadcnt);
3242 }
3243
3244 static void tc_cls_offload_cnt_update(struct tcf_block *block,
3245                                       struct tcf_proto *tp, u32 *cnt,
3246                                       u32 *flags, u32 diff, bool add)
3247 {
3248         lockdep_assert_held(&block->cb_lock);
3249
3250         spin_lock(&tp->lock);
3251         if (add) {
3252                 if (!*cnt)
3253                         tcf_block_offload_inc(block, flags);
3254                 *cnt += diff;
3255         } else {
3256                 *cnt -= diff;
3257                 if (!*cnt)
3258                         tcf_block_offload_dec(block, flags);
3259         }
3260         spin_unlock(&tp->lock);
3261 }
3262
3263 static void
3264 tc_cls_offload_cnt_reset(struct tcf_block *block, struct tcf_proto *tp,
3265                          u32 *cnt, u32 *flags)
3266 {
3267         lockdep_assert_held(&block->cb_lock);
3268
3269         spin_lock(&tp->lock);
3270         tcf_block_offload_dec(block, flags);
3271         *cnt = 0;
3272         spin_unlock(&tp->lock);
3273 }
3274
3275 static int
3276 __tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3277                    void *type_data, bool err_stop)
3278 {
3279         struct flow_block_cb *block_cb;
3280         int ok_count = 0;
3281         int err;
3282
3283         list_for_each_entry(block_cb, &block->flow_block.cb_list, list) {
3284                 err = block_cb->cb(type, type_data, block_cb->cb_priv);
3285                 if (err) {
3286                         if (err_stop)
3287                                 return err;
3288                 } else {
3289                         ok_count++;
3290                 }
3291         }
3292         return ok_count;
3293 }
3294
3295 int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3296                      void *type_data, bool err_stop, bool rtnl_held)
3297 {
3298         bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3299         int ok_count;
3300
3301 retry:
3302         if (take_rtnl)
3303                 rtnl_lock();
3304         down_read(&block->cb_lock);
3305         /* Need to obtain rtnl lock if block is bound to devs that require it.
3306          * In block bind code cb_lock is obtained while holding rtnl, so we must
3307          * obtain the locks in same order here.
3308          */
3309         if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3310                 up_read(&block->cb_lock);
3311                 take_rtnl = true;
3312                 goto retry;
3313         }
3314
3315         ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3316
3317         up_read(&block->cb_lock);
3318         if (take_rtnl)
3319                 rtnl_unlock();
3320         return ok_count;
3321 }
3322 EXPORT_SYMBOL(tc_setup_cb_call);
3323
3324 /* Non-destructive filter add. If filter that wasn't already in hardware is
3325  * successfully offloaded, increment block offloads counter. On failure,
3326  * previously offloaded filter is considered to be intact and offloads counter
3327  * is not decremented.
3328  */
3329
3330 int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp,
3331                     enum tc_setup_type type, void *type_data, bool err_stop,
3332                     u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3333 {
3334         bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3335         int ok_count;
3336
3337 retry:
3338         if (take_rtnl)
3339                 rtnl_lock();
3340         down_read(&block->cb_lock);
3341         /* Need to obtain rtnl lock if block is bound to devs that require it.
3342          * In block bind code cb_lock is obtained while holding rtnl, so we must
3343          * obtain the locks in same order here.
3344          */
3345         if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3346                 up_read(&block->cb_lock);
3347                 take_rtnl = true;
3348                 goto retry;
3349         }
3350
3351         /* Make sure all netdevs sharing this block are offload-capable. */
3352         if (block->nooffloaddevcnt && err_stop) {
3353                 ok_count = -EOPNOTSUPP;
3354                 goto err_unlock;
3355         }
3356
3357         ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3358         if (ok_count < 0)
3359                 goto err_unlock;
3360
3361         if (tp->ops->hw_add)
3362                 tp->ops->hw_add(tp, type_data);
3363         if (ok_count > 0)
3364                 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags,
3365                                           ok_count, true);
3366 err_unlock:
3367         up_read(&block->cb_lock);
3368         if (take_rtnl)
3369                 rtnl_unlock();
3370         return min(ok_count, 0);
3371 }
3372 EXPORT_SYMBOL(tc_setup_cb_add);
3373
3374 /* Destructive filter replace. If filter that wasn't already in hardware is
3375  * successfully offloaded, increment block offload counter. On failure,
3376  * previously offloaded filter is considered to be destroyed and offload counter
3377  * is decremented.
3378  */
3379
3380 int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp,
3381                         enum tc_setup_type type, void *type_data, bool err_stop,
3382                         u32 *old_flags, unsigned int *old_in_hw_count,
3383                         u32 *new_flags, unsigned int *new_in_hw_count,
3384                         bool rtnl_held)
3385 {
3386         bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3387         int ok_count;
3388
3389 retry:
3390         if (take_rtnl)
3391                 rtnl_lock();
3392         down_read(&block->cb_lock);
3393         /* Need to obtain rtnl lock if block is bound to devs that require it.
3394          * In block bind code cb_lock is obtained while holding rtnl, so we must
3395          * obtain the locks in same order here.
3396          */
3397         if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3398                 up_read(&block->cb_lock);
3399                 take_rtnl = true;
3400                 goto retry;
3401         }
3402
3403         /* Make sure all netdevs sharing this block are offload-capable. */
3404         if (block->nooffloaddevcnt && err_stop) {
3405                 ok_count = -EOPNOTSUPP;
3406                 goto err_unlock;
3407         }
3408
3409         tc_cls_offload_cnt_reset(block, tp, old_in_hw_count, old_flags);
3410         if (tp->ops->hw_del)
3411                 tp->ops->hw_del(tp, type_data);
3412
3413         ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3414         if (ok_count < 0)
3415                 goto err_unlock;
3416
3417         if (tp->ops->hw_add)
3418                 tp->ops->hw_add(tp, type_data);
3419         if (ok_count > 0)
3420                 tc_cls_offload_cnt_update(block, tp, new_in_hw_count,
3421                                           new_flags, ok_count, true);
3422 err_unlock:
3423         up_read(&block->cb_lock);
3424         if (take_rtnl)
3425                 rtnl_unlock();
3426         return min(ok_count, 0);
3427 }
3428 EXPORT_SYMBOL(tc_setup_cb_replace);
3429
3430 /* Destroy filter and decrement block offload counter, if filter was previously
3431  * offloaded.
3432  */
3433
3434 int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp,
3435                         enum tc_setup_type type, void *type_data, bool err_stop,
3436                         u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3437 {
3438         bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3439         int ok_count;
3440
3441 retry:
3442         if (take_rtnl)
3443                 rtnl_lock();
3444         down_read(&block->cb_lock);
3445         /* Need to obtain rtnl lock if block is bound to devs that require it.
3446          * In block bind code cb_lock is obtained while holding rtnl, so we must
3447          * obtain the locks in same order here.
3448          */
3449         if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3450                 up_read(&block->cb_lock);
3451                 take_rtnl = true;
3452                 goto retry;
3453         }
3454
3455         ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3456
3457         tc_cls_offload_cnt_reset(block, tp, in_hw_count, flags);
3458         if (tp->ops->hw_del)
3459                 tp->ops->hw_del(tp, type_data);
3460
3461         up_read(&block->cb_lock);
3462         if (take_rtnl)
3463                 rtnl_unlock();
3464         return min(ok_count, 0);
3465 }
3466 EXPORT_SYMBOL(tc_setup_cb_destroy);
3467
3468 int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp,
3469                           bool add, flow_setup_cb_t *cb,
3470                           enum tc_setup_type type, void *type_data,
3471                           void *cb_priv, u32 *flags, unsigned int *in_hw_count)
3472 {
3473         int err = cb(type, type_data, cb_priv);
3474
3475         if (err) {
3476                 if (add && tc_skip_sw(*flags))
3477                         return err;
3478         } else {
3479                 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 1,
3480                                           add);
3481         }
3482
3483         return 0;
3484 }
3485 EXPORT_SYMBOL(tc_setup_cb_reoffload);
3486
3487 static int tcf_act_get_cookie(struct flow_action_entry *entry,
3488                               const struct tc_action *act)
3489 {
3490         struct tc_cookie *cookie;
3491         int err = 0;
3492
3493         rcu_read_lock();
3494         cookie = rcu_dereference(act->act_cookie);
3495         if (cookie) {
3496                 entry->cookie = flow_action_cookie_create(cookie->data,
3497                                                           cookie->len,
3498                                                           GFP_ATOMIC);
3499                 if (!entry->cookie)
3500                         err = -ENOMEM;
3501         }
3502         rcu_read_unlock();
3503         return err;
3504 }
3505
3506 static void tcf_act_put_cookie(struct flow_action_entry *entry)
3507 {
3508         flow_action_cookie_destroy(entry->cookie);
3509 }
3510
3511 void tc_cleanup_offload_action(struct flow_action *flow_action)
3512 {
3513         struct flow_action_entry *entry;
3514         int i;
3515
3516         flow_action_for_each(i, entry, flow_action) {
3517                 tcf_act_put_cookie(entry);
3518                 if (entry->destructor)
3519                         entry->destructor(entry->destructor_priv);
3520         }
3521 }
3522 EXPORT_SYMBOL(tc_cleanup_offload_action);
3523
3524 static int tc_setup_offload_act(struct tc_action *act,
3525                                 struct flow_action_entry *entry,
3526                                 u32 *index_inc,
3527                                 struct netlink_ext_ack *extack)
3528 {
3529 #ifdef CONFIG_NET_CLS_ACT
3530         if (act->ops->offload_act_setup) {
3531                 return act->ops->offload_act_setup(act, entry, index_inc, true,
3532                                                    extack);
3533         } else {
3534                 NL_SET_ERR_MSG(extack, "Action does not support offload");
3535                 return -EOPNOTSUPP;
3536         }
3537 #else
3538         return 0;
3539 #endif
3540 }
3541
3542 int tc_setup_action(struct flow_action *flow_action,
3543                     struct tc_action *actions[],
3544                     struct netlink_ext_ack *extack)
3545 {
3546         int i, j, k, index, err = 0;
3547         struct tc_action *act;
3548
3549         BUILD_BUG_ON(TCA_ACT_HW_STATS_ANY != FLOW_ACTION_HW_STATS_ANY);
3550         BUILD_BUG_ON(TCA_ACT_HW_STATS_IMMEDIATE != FLOW_ACTION_HW_STATS_IMMEDIATE);
3551         BUILD_BUG_ON(TCA_ACT_HW_STATS_DELAYED != FLOW_ACTION_HW_STATS_DELAYED);
3552
3553         if (!actions)
3554                 return 0;
3555
3556         j = 0;
3557         tcf_act_for_each_action(i, act, actions) {
3558                 struct flow_action_entry *entry;
3559
3560                 entry = &flow_action->entries[j];
3561                 spin_lock_bh(&act->tcfa_lock);
3562                 err = tcf_act_get_cookie(entry, act);
3563                 if (err)
3564                         goto err_out_locked;
3565
3566                 index = 0;
3567                 err = tc_setup_offload_act(act, entry, &index, extack);
3568                 if (err)
3569                         goto err_out_locked;
3570
3571                 for (k = 0; k < index ; k++) {
3572                         entry[k].hw_stats = tc_act_hw_stats(act->hw_stats);
3573                         entry[k].hw_index = act->tcfa_index;
3574                 }
3575
3576                 j += index;
3577
3578                 spin_unlock_bh(&act->tcfa_lock);
3579         }
3580
3581 err_out:
3582         if (err)
3583                 tc_cleanup_offload_action(flow_action);
3584
3585         return err;
3586 err_out_locked:
3587         spin_unlock_bh(&act->tcfa_lock);
3588         goto err_out;
3589 }
3590
3591 int tc_setup_offload_action(struct flow_action *flow_action,
3592                             const struct tcf_exts *exts,
3593                             struct netlink_ext_ack *extack)
3594 {
3595 #ifdef CONFIG_NET_CLS_ACT
3596         if (!exts)
3597                 return 0;
3598
3599         return tc_setup_action(flow_action, exts->actions, extack);
3600 #else
3601         return 0;
3602 #endif
3603 }
3604 EXPORT_SYMBOL(tc_setup_offload_action);
3605
3606 unsigned int tcf_exts_num_actions(struct tcf_exts *exts)
3607 {
3608         unsigned int num_acts = 0;
3609         struct tc_action *act;
3610         int i;
3611
3612         tcf_exts_for_each_action(i, act, exts) {
3613                 if (is_tcf_pedit(act))
3614                         num_acts += tcf_pedit_nkeys(act);
3615                 else
3616                         num_acts++;
3617         }
3618         return num_acts;
3619 }
3620 EXPORT_SYMBOL(tcf_exts_num_actions);
3621
3622 #ifdef CONFIG_NET_CLS_ACT
3623 static int tcf_qevent_parse_block_index(struct nlattr *block_index_attr,
3624                                         u32 *p_block_index,
3625                                         struct netlink_ext_ack *extack)
3626 {
3627         *p_block_index = nla_get_u32(block_index_attr);
3628         if (!*p_block_index) {
3629                 NL_SET_ERR_MSG(extack, "Block number may not be zero");
3630                 return -EINVAL;
3631         }
3632
3633         return 0;
3634 }
3635
3636 int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
3637                     enum flow_block_binder_type binder_type,
3638                     struct nlattr *block_index_attr,
3639                     struct netlink_ext_ack *extack)
3640 {
3641         u32 block_index;
3642         int err;
3643
3644         if (!block_index_attr)
3645                 return 0;
3646
3647         err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack);
3648         if (err)
3649                 return err;
3650
3651         qe->info.binder_type = binder_type;
3652         qe->info.chain_head_change = tcf_chain_head_change_dflt;
3653         qe->info.chain_head_change_priv = &qe->filter_chain;
3654         qe->info.block_index = block_index;
3655
3656         return tcf_block_get_ext(&qe->block, sch, &qe->info, extack);
3657 }
3658 EXPORT_SYMBOL(tcf_qevent_init);
3659
3660 void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch)
3661 {
3662         if (qe->info.block_index)
3663                 tcf_block_put_ext(qe->block, sch, &qe->info);
3664 }
3665 EXPORT_SYMBOL(tcf_qevent_destroy);
3666
3667 int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr,
3668                                struct netlink_ext_ack *extack)
3669 {
3670         u32 block_index;
3671         int err;
3672
3673         if (!block_index_attr)
3674                 return 0;
3675
3676         err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack);
3677         if (err)
3678                 return err;
3679
3680         /* Bounce newly-configured block or change in block. */
3681         if (block_index != qe->info.block_index) {
3682                 NL_SET_ERR_MSG(extack, "Change of blocks is not supported");
3683                 return -EINVAL;
3684         }
3685
3686         return 0;
3687 }
3688 EXPORT_SYMBOL(tcf_qevent_validate_change);
3689
3690 struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
3691                                   struct sk_buff **to_free, int *ret)
3692 {
3693         struct tcf_result cl_res;
3694         struct tcf_proto *fl;
3695
3696         if (!qe->info.block_index)
3697                 return skb;
3698
3699         fl = rcu_dereference_bh(qe->filter_chain);
3700
3701         switch (tcf_classify(skb, NULL, fl, &cl_res, false)) {
3702         case TC_ACT_SHOT:
3703                 qdisc_qstats_drop(sch);
3704                 __qdisc_drop(skb, to_free);
3705                 *ret = __NET_XMIT_BYPASS;
3706                 return NULL;
3707         case TC_ACT_STOLEN:
3708         case TC_ACT_QUEUED:
3709         case TC_ACT_TRAP:
3710                 __qdisc_drop(skb, to_free);
3711                 *ret = __NET_XMIT_STOLEN;
3712                 return NULL;
3713         case TC_ACT_REDIRECT:
3714                 skb_do_redirect(skb);
3715                 *ret = __NET_XMIT_STOLEN;
3716                 return NULL;
3717         }
3718
3719         return skb;
3720 }
3721 EXPORT_SYMBOL(tcf_qevent_handle);
3722
3723 int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe)
3724 {
3725         if (!qe->info.block_index)
3726                 return 0;
3727         return nla_put_u32(skb, attr_name, qe->info.block_index);
3728 }
3729 EXPORT_SYMBOL(tcf_qevent_dump);
3730 #endif
3731
3732 static __net_init int tcf_net_init(struct net *net)
3733 {
3734         struct tcf_net *tn = net_generic(net, tcf_net_id);
3735
3736         spin_lock_init(&tn->idr_lock);
3737         idr_init(&tn->idr);
3738         return 0;
3739 }
3740
3741 static void __net_exit tcf_net_exit(struct net *net)
3742 {
3743         struct tcf_net *tn = net_generic(net, tcf_net_id);
3744
3745         idr_destroy(&tn->idr);
3746 }
3747
3748 static struct pernet_operations tcf_net_ops = {
3749         .init = tcf_net_init,
3750         .exit = tcf_net_exit,
3751         .id   = &tcf_net_id,
3752         .size = sizeof(struct tcf_net),
3753 };
3754
3755 static int __init tc_filter_init(void)
3756 {
3757         int err;
3758
3759         tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0);
3760         if (!tc_filter_wq)
3761                 return -ENOMEM;
3762
3763         err = register_pernet_subsys(&tcf_net_ops);
3764         if (err)
3765                 goto err_register_pernet_subsys;
3766
3767         rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL,
3768                       RTNL_FLAG_DOIT_UNLOCKED);
3769         rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL,
3770                       RTNL_FLAG_DOIT_UNLOCKED);
3771         rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_get_tfilter,
3772                       tc_dump_tfilter, RTNL_FLAG_DOIT_UNLOCKED);
3773         rtnl_register(PF_UNSPEC, RTM_NEWCHAIN, tc_ctl_chain, NULL, 0);
3774         rtnl_register(PF_UNSPEC, RTM_DELCHAIN, tc_ctl_chain, NULL, 0);
3775         rtnl_register(PF_UNSPEC, RTM_GETCHAIN, tc_ctl_chain,
3776                       tc_dump_chain, 0);
3777
3778         return 0;
3779
3780 err_register_pernet_subsys:
3781         destroy_workqueue(tc_filter_wq);
3782         return err;
3783 }
3784
3785 subsys_initcall(tc_filter_init);