GNU Linux-libre 5.4.207-gnu1
[releases.git] / net / sched / act_skbedit.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2008, Intel Corporation.
4  *
5  * Author: Alexander Duyck <alexander.h.duyck@intel.com>
6  */
7
8 #include <linux/module.h>
9 #include <linux/init.h>
10 #include <linux/kernel.h>
11 #include <linux/skbuff.h>
12 #include <linux/rtnetlink.h>
13 #include <net/netlink.h>
14 #include <net/pkt_sched.h>
15 #include <net/ip.h>
16 #include <net/ipv6.h>
17 #include <net/dsfield.h>
18 #include <net/pkt_cls.h>
19
20 #include <linux/tc_act/tc_skbedit.h>
21 #include <net/tc_act/tc_skbedit.h>
22
23 static unsigned int skbedit_net_id;
24 static struct tc_action_ops act_skbedit_ops;
25
26 static int tcf_skbedit_act(struct sk_buff *skb, const struct tc_action *a,
27                            struct tcf_result *res)
28 {
29         struct tcf_skbedit *d = to_skbedit(a);
30         struct tcf_skbedit_params *params;
31         int action;
32
33         tcf_lastuse_update(&d->tcf_tm);
34         bstats_cpu_update(this_cpu_ptr(d->common.cpu_bstats), skb);
35
36         params = rcu_dereference_bh(d->params);
37         action = READ_ONCE(d->tcf_action);
38
39         if (params->flags & SKBEDIT_F_PRIORITY)
40                 skb->priority = params->priority;
41         if (params->flags & SKBEDIT_F_INHERITDSFIELD) {
42                 int wlen = skb_network_offset(skb);
43
44                 switch (skb_protocol(skb, true)) {
45                 case htons(ETH_P_IP):
46                         wlen += sizeof(struct iphdr);
47                         if (!pskb_may_pull(skb, wlen))
48                                 goto err;
49                         skb->priority = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
50                         break;
51
52                 case htons(ETH_P_IPV6):
53                         wlen += sizeof(struct ipv6hdr);
54                         if (!pskb_may_pull(skb, wlen))
55                                 goto err;
56                         skb->priority = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
57                         break;
58                 }
59         }
60         if (params->flags & SKBEDIT_F_QUEUE_MAPPING &&
61             skb->dev->real_num_tx_queues > params->queue_mapping)
62                 skb_set_queue_mapping(skb, params->queue_mapping);
63         if (params->flags & SKBEDIT_F_MARK) {
64                 skb->mark &= ~params->mask;
65                 skb->mark |= params->mark & params->mask;
66         }
67         if (params->flags & SKBEDIT_F_PTYPE)
68                 skb->pkt_type = params->ptype;
69         return action;
70
71 err:
72         qstats_drop_inc(this_cpu_ptr(d->common.cpu_qstats));
73         return TC_ACT_SHOT;
74 }
75
76 static const struct nla_policy skbedit_policy[TCA_SKBEDIT_MAX + 1] = {
77         [TCA_SKBEDIT_PARMS]             = { .len = sizeof(struct tc_skbedit) },
78         [TCA_SKBEDIT_PRIORITY]          = { .len = sizeof(u32) },
79         [TCA_SKBEDIT_QUEUE_MAPPING]     = { .len = sizeof(u16) },
80         [TCA_SKBEDIT_MARK]              = { .len = sizeof(u32) },
81         [TCA_SKBEDIT_PTYPE]             = { .len = sizeof(u16) },
82         [TCA_SKBEDIT_MASK]              = { .len = sizeof(u32) },
83         [TCA_SKBEDIT_FLAGS]             = { .len = sizeof(u64) },
84 };
85
86 static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
87                             struct nlattr *est, struct tc_action **a,
88                             int ovr, int bind, bool rtnl_held,
89                             struct tcf_proto *tp,
90                             struct netlink_ext_ack *extack)
91 {
92         struct tc_action_net *tn = net_generic(net, skbedit_net_id);
93         struct tcf_skbedit_params *params_new;
94         struct nlattr *tb[TCA_SKBEDIT_MAX + 1];
95         struct tcf_chain *goto_ch = NULL;
96         struct tc_skbedit *parm;
97         struct tcf_skbedit *d;
98         u32 flags = 0, *priority = NULL, *mark = NULL, *mask = NULL;
99         u16 *queue_mapping = NULL, *ptype = NULL;
100         bool exists = false;
101         int ret = 0, err;
102         u32 index;
103
104         if (nla == NULL)
105                 return -EINVAL;
106
107         err = nla_parse_nested_deprecated(tb, TCA_SKBEDIT_MAX, nla,
108                                           skbedit_policy, NULL);
109         if (err < 0)
110                 return err;
111
112         if (tb[TCA_SKBEDIT_PARMS] == NULL)
113                 return -EINVAL;
114
115         if (tb[TCA_SKBEDIT_PRIORITY] != NULL) {
116                 flags |= SKBEDIT_F_PRIORITY;
117                 priority = nla_data(tb[TCA_SKBEDIT_PRIORITY]);
118         }
119
120         if (tb[TCA_SKBEDIT_QUEUE_MAPPING] != NULL) {
121                 flags |= SKBEDIT_F_QUEUE_MAPPING;
122                 queue_mapping = nla_data(tb[TCA_SKBEDIT_QUEUE_MAPPING]);
123         }
124
125         if (tb[TCA_SKBEDIT_PTYPE] != NULL) {
126                 ptype = nla_data(tb[TCA_SKBEDIT_PTYPE]);
127                 if (!skb_pkt_type_ok(*ptype))
128                         return -EINVAL;
129                 flags |= SKBEDIT_F_PTYPE;
130         }
131
132         if (tb[TCA_SKBEDIT_MARK] != NULL) {
133                 flags |= SKBEDIT_F_MARK;
134                 mark = nla_data(tb[TCA_SKBEDIT_MARK]);
135         }
136
137         if (tb[TCA_SKBEDIT_MASK] != NULL) {
138                 flags |= SKBEDIT_F_MASK;
139                 mask = nla_data(tb[TCA_SKBEDIT_MASK]);
140         }
141
142         if (tb[TCA_SKBEDIT_FLAGS] != NULL) {
143                 u64 *pure_flags = nla_data(tb[TCA_SKBEDIT_FLAGS]);
144
145                 if (*pure_flags & SKBEDIT_F_INHERITDSFIELD)
146                         flags |= SKBEDIT_F_INHERITDSFIELD;
147         }
148
149         parm = nla_data(tb[TCA_SKBEDIT_PARMS]);
150         index = parm->index;
151         err = tcf_idr_check_alloc(tn, &index, a, bind);
152         if (err < 0)
153                 return err;
154         exists = err;
155         if (exists && bind)
156                 return 0;
157
158         if (!flags) {
159                 if (exists)
160                         tcf_idr_release(*a, bind);
161                 else
162                         tcf_idr_cleanup(tn, index);
163                 return -EINVAL;
164         }
165
166         if (!exists) {
167                 ret = tcf_idr_create(tn, index, est, a,
168                                      &act_skbedit_ops, bind, true);
169                 if (ret) {
170                         tcf_idr_cleanup(tn, index);
171                         return ret;
172                 }
173
174                 d = to_skbedit(*a);
175                 ret = ACT_P_CREATED;
176         } else {
177                 d = to_skbedit(*a);
178                 if (!ovr) {
179                         tcf_idr_release(*a, bind);
180                         return -EEXIST;
181                 }
182         }
183         err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
184         if (err < 0)
185                 goto release_idr;
186
187         params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
188         if (unlikely(!params_new)) {
189                 err = -ENOMEM;
190                 goto put_chain;
191         }
192
193         params_new->flags = flags;
194         if (flags & SKBEDIT_F_PRIORITY)
195                 params_new->priority = *priority;
196         if (flags & SKBEDIT_F_QUEUE_MAPPING)
197                 params_new->queue_mapping = *queue_mapping;
198         if (flags & SKBEDIT_F_MARK)
199                 params_new->mark = *mark;
200         if (flags & SKBEDIT_F_PTYPE)
201                 params_new->ptype = *ptype;
202         /* default behaviour is to use all the bits */
203         params_new->mask = 0xffffffff;
204         if (flags & SKBEDIT_F_MASK)
205                 params_new->mask = *mask;
206
207         spin_lock_bh(&d->tcf_lock);
208         goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
209         rcu_swap_protected(d->params, params_new,
210                            lockdep_is_held(&d->tcf_lock));
211         spin_unlock_bh(&d->tcf_lock);
212         if (params_new)
213                 kfree_rcu(params_new, rcu);
214         if (goto_ch)
215                 tcf_chain_put_by_act(goto_ch);
216
217         return ret;
218 put_chain:
219         if (goto_ch)
220                 tcf_chain_put_by_act(goto_ch);
221 release_idr:
222         tcf_idr_release(*a, bind);
223         return err;
224 }
225
226 static int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a,
227                             int bind, int ref)
228 {
229         unsigned char *b = skb_tail_pointer(skb);
230         struct tcf_skbedit *d = to_skbedit(a);
231         struct tcf_skbedit_params *params;
232         struct tc_skbedit opt = {
233                 .index   = d->tcf_index,
234                 .refcnt  = refcount_read(&d->tcf_refcnt) - ref,
235                 .bindcnt = atomic_read(&d->tcf_bindcnt) - bind,
236         };
237         u64 pure_flags = 0;
238         struct tcf_t t;
239
240         spin_lock_bh(&d->tcf_lock);
241         params = rcu_dereference_protected(d->params,
242                                            lockdep_is_held(&d->tcf_lock));
243         opt.action = d->tcf_action;
244
245         if (nla_put(skb, TCA_SKBEDIT_PARMS, sizeof(opt), &opt))
246                 goto nla_put_failure;
247         if ((params->flags & SKBEDIT_F_PRIORITY) &&
248             nla_put_u32(skb, TCA_SKBEDIT_PRIORITY, params->priority))
249                 goto nla_put_failure;
250         if ((params->flags & SKBEDIT_F_QUEUE_MAPPING) &&
251             nla_put_u16(skb, TCA_SKBEDIT_QUEUE_MAPPING, params->queue_mapping))
252                 goto nla_put_failure;
253         if ((params->flags & SKBEDIT_F_MARK) &&
254             nla_put_u32(skb, TCA_SKBEDIT_MARK, params->mark))
255                 goto nla_put_failure;
256         if ((params->flags & SKBEDIT_F_PTYPE) &&
257             nla_put_u16(skb, TCA_SKBEDIT_PTYPE, params->ptype))
258                 goto nla_put_failure;
259         if ((params->flags & SKBEDIT_F_MASK) &&
260             nla_put_u32(skb, TCA_SKBEDIT_MASK, params->mask))
261                 goto nla_put_failure;
262         if (params->flags & SKBEDIT_F_INHERITDSFIELD)
263                 pure_flags |= SKBEDIT_F_INHERITDSFIELD;
264         if (pure_flags != 0 &&
265             nla_put(skb, TCA_SKBEDIT_FLAGS, sizeof(pure_flags), &pure_flags))
266                 goto nla_put_failure;
267
268         tcf_tm_dump(&t, &d->tcf_tm);
269         if (nla_put_64bit(skb, TCA_SKBEDIT_TM, sizeof(t), &t, TCA_SKBEDIT_PAD))
270                 goto nla_put_failure;
271         spin_unlock_bh(&d->tcf_lock);
272
273         return skb->len;
274
275 nla_put_failure:
276         spin_unlock_bh(&d->tcf_lock);
277         nlmsg_trim(skb, b);
278         return -1;
279 }
280
281 static void tcf_skbedit_cleanup(struct tc_action *a)
282 {
283         struct tcf_skbedit *d = to_skbedit(a);
284         struct tcf_skbedit_params *params;
285
286         params = rcu_dereference_protected(d->params, 1);
287         if (params)
288                 kfree_rcu(params, rcu);
289 }
290
291 static int tcf_skbedit_walker(struct net *net, struct sk_buff *skb,
292                               struct netlink_callback *cb, int type,
293                               const struct tc_action_ops *ops,
294                               struct netlink_ext_ack *extack)
295 {
296         struct tc_action_net *tn = net_generic(net, skbedit_net_id);
297
298         return tcf_generic_walker(tn, skb, cb, type, ops, extack);
299 }
300
301 static int tcf_skbedit_search(struct net *net, struct tc_action **a, u32 index)
302 {
303         struct tc_action_net *tn = net_generic(net, skbedit_net_id);
304
305         return tcf_idr_search(tn, a, index);
306 }
307
308 static size_t tcf_skbedit_get_fill_size(const struct tc_action *act)
309 {
310         return nla_total_size(sizeof(struct tc_skbedit))
311                 + nla_total_size(sizeof(u32)) /* TCA_SKBEDIT_PRIORITY */
312                 + nla_total_size(sizeof(u16)) /* TCA_SKBEDIT_QUEUE_MAPPING */
313                 + nla_total_size(sizeof(u32)) /* TCA_SKBEDIT_MARK */
314                 + nla_total_size(sizeof(u16)) /* TCA_SKBEDIT_PTYPE */
315                 + nla_total_size(sizeof(u32)) /* TCA_SKBEDIT_MASK */
316                 + nla_total_size_64bit(sizeof(u64)); /* TCA_SKBEDIT_FLAGS */
317 }
318
319 static struct tc_action_ops act_skbedit_ops = {
320         .kind           =       "skbedit",
321         .id             =       TCA_ID_SKBEDIT,
322         .owner          =       THIS_MODULE,
323         .act            =       tcf_skbedit_act,
324         .dump           =       tcf_skbedit_dump,
325         .init           =       tcf_skbedit_init,
326         .cleanup        =       tcf_skbedit_cleanup,
327         .walk           =       tcf_skbedit_walker,
328         .get_fill_size  =       tcf_skbedit_get_fill_size,
329         .lookup         =       tcf_skbedit_search,
330         .size           =       sizeof(struct tcf_skbedit),
331 };
332
333 static __net_init int skbedit_init_net(struct net *net)
334 {
335         struct tc_action_net *tn = net_generic(net, skbedit_net_id);
336
337         return tc_action_net_init(net, tn, &act_skbedit_ops);
338 }
339
340 static void __net_exit skbedit_exit_net(struct list_head *net_list)
341 {
342         tc_action_net_exit(net_list, skbedit_net_id);
343 }
344
345 static struct pernet_operations skbedit_net_ops = {
346         .init = skbedit_init_net,
347         .exit_batch = skbedit_exit_net,
348         .id   = &skbedit_net_id,
349         .size = sizeof(struct tc_action_net),
350 };
351
352 MODULE_AUTHOR("Alexander Duyck, <alexander.h.duyck@intel.com>");
353 MODULE_DESCRIPTION("SKB Editing");
354 MODULE_LICENSE("GPL");
355
356 static int __init skbedit_init_module(void)
357 {
358         return tcf_register_action(&act_skbedit_ops, &skbedit_net_ops);
359 }
360
361 static void __exit skbedit_cleanup_module(void)
362 {
363         tcf_unregister_action(&act_skbedit_ops, &skbedit_net_ops);
364 }
365
366 module_init(skbedit_init_module);
367 module_exit(skbedit_cleanup_module);