1 /* SPDX-License-Identifier: GPL-2.0 */
2 #include <linux/kernel.h>
3 #include <linux/slab.h>
4 #include <net/flow_offload.h>
5 #include <linux/rtnetlink.h>
6 #include <linux/mutex.h>
7 #include <linux/rhashtable.h>
9 struct flow_rule *flow_rule_alloc(unsigned int num_actions)
11 struct flow_rule *rule;
14 rule = kzalloc(struct_size(rule, action.entries, num_actions),
19 rule->action.num_entries = num_actions;
20 /* Pre-fill each action hw_stats with DONT_CARE.
21 * Caller can override this if it wants stats for a given action.
23 for (i = 0; i < num_actions; i++)
24 rule->action.entries[i].hw_stats = FLOW_ACTION_HW_STATS_DONT_CARE;
28 EXPORT_SYMBOL(flow_rule_alloc);
30 #define FLOW_DISSECTOR_MATCH(__rule, __type, __out) \
31 const struct flow_match *__m = &(__rule)->match; \
32 struct flow_dissector *__d = (__m)->dissector; \
34 (__out)->key = skb_flow_dissector_target(__d, __type, (__m)->key); \
35 (__out)->mask = skb_flow_dissector_target(__d, __type, (__m)->mask); \
37 void flow_rule_match_meta(const struct flow_rule *rule,
38 struct flow_match_meta *out)
40 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_META, out);
42 EXPORT_SYMBOL(flow_rule_match_meta);
44 void flow_rule_match_basic(const struct flow_rule *rule,
45 struct flow_match_basic *out)
47 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_BASIC, out);
49 EXPORT_SYMBOL(flow_rule_match_basic);
51 void flow_rule_match_control(const struct flow_rule *rule,
52 struct flow_match_control *out)
54 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CONTROL, out);
56 EXPORT_SYMBOL(flow_rule_match_control);
58 void flow_rule_match_eth_addrs(const struct flow_rule *rule,
59 struct flow_match_eth_addrs *out)
61 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS, out);
63 EXPORT_SYMBOL(flow_rule_match_eth_addrs);
65 void flow_rule_match_vlan(const struct flow_rule *rule,
66 struct flow_match_vlan *out)
68 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_VLAN, out);
70 EXPORT_SYMBOL(flow_rule_match_vlan);
72 void flow_rule_match_cvlan(const struct flow_rule *rule,
73 struct flow_match_vlan *out)
75 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CVLAN, out);
77 EXPORT_SYMBOL(flow_rule_match_cvlan);
79 void flow_rule_match_ipv4_addrs(const struct flow_rule *rule,
80 struct flow_match_ipv4_addrs *out)
82 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS, out);
84 EXPORT_SYMBOL(flow_rule_match_ipv4_addrs);
86 void flow_rule_match_ipv6_addrs(const struct flow_rule *rule,
87 struct flow_match_ipv6_addrs *out)
89 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS, out);
91 EXPORT_SYMBOL(flow_rule_match_ipv6_addrs);
93 void flow_rule_match_ip(const struct flow_rule *rule,
94 struct flow_match_ip *out)
96 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IP, out);
98 EXPORT_SYMBOL(flow_rule_match_ip);
100 void flow_rule_match_ports(const struct flow_rule *rule,
101 struct flow_match_ports *out)
103 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_PORTS, out);
105 EXPORT_SYMBOL(flow_rule_match_ports);
107 void flow_rule_match_tcp(const struct flow_rule *rule,
108 struct flow_match_tcp *out)
110 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_TCP, out);
112 EXPORT_SYMBOL(flow_rule_match_tcp);
114 void flow_rule_match_icmp(const struct flow_rule *rule,
115 struct flow_match_icmp *out)
117 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ICMP, out);
119 EXPORT_SYMBOL(flow_rule_match_icmp);
121 void flow_rule_match_mpls(const struct flow_rule *rule,
122 struct flow_match_mpls *out)
124 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_MPLS, out);
126 EXPORT_SYMBOL(flow_rule_match_mpls);
128 void flow_rule_match_enc_control(const struct flow_rule *rule,
129 struct flow_match_control *out)
131 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL, out);
133 EXPORT_SYMBOL(flow_rule_match_enc_control);
135 void flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule,
136 struct flow_match_ipv4_addrs *out)
138 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, out);
140 EXPORT_SYMBOL(flow_rule_match_enc_ipv4_addrs);
142 void flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule,
143 struct flow_match_ipv6_addrs *out)
145 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, out);
147 EXPORT_SYMBOL(flow_rule_match_enc_ipv6_addrs);
149 void flow_rule_match_enc_ip(const struct flow_rule *rule,
150 struct flow_match_ip *out)
152 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IP, out);
154 EXPORT_SYMBOL(flow_rule_match_enc_ip);
156 void flow_rule_match_enc_ports(const struct flow_rule *rule,
157 struct flow_match_ports *out)
159 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_PORTS, out);
161 EXPORT_SYMBOL(flow_rule_match_enc_ports);
163 void flow_rule_match_enc_keyid(const struct flow_rule *rule,
164 struct flow_match_enc_keyid *out)
166 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_KEYID, out);
168 EXPORT_SYMBOL(flow_rule_match_enc_keyid);
170 void flow_rule_match_enc_opts(const struct flow_rule *rule,
171 struct flow_match_enc_opts *out)
173 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_OPTS, out);
175 EXPORT_SYMBOL(flow_rule_match_enc_opts);
177 struct flow_action_cookie *flow_action_cookie_create(void *data,
181 struct flow_action_cookie *cookie;
183 cookie = kmalloc(sizeof(*cookie) + len, gfp);
186 cookie->cookie_len = len;
187 memcpy(cookie->cookie, data, len);
190 EXPORT_SYMBOL(flow_action_cookie_create);
192 void flow_action_cookie_destroy(struct flow_action_cookie *cookie)
196 EXPORT_SYMBOL(flow_action_cookie_destroy);
198 void flow_rule_match_ct(const struct flow_rule *rule,
199 struct flow_match_ct *out)
201 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CT, out);
203 EXPORT_SYMBOL(flow_rule_match_ct);
205 struct flow_block_cb *flow_block_cb_alloc(flow_setup_cb_t *cb,
206 void *cb_ident, void *cb_priv,
207 void (*release)(void *cb_priv))
209 struct flow_block_cb *block_cb;
211 block_cb = kzalloc(sizeof(*block_cb), GFP_KERNEL);
213 return ERR_PTR(-ENOMEM);
216 block_cb->cb_ident = cb_ident;
217 block_cb->cb_priv = cb_priv;
218 block_cb->release = release;
222 EXPORT_SYMBOL(flow_block_cb_alloc);
224 void flow_block_cb_free(struct flow_block_cb *block_cb)
226 if (block_cb->release)
227 block_cb->release(block_cb->cb_priv);
231 EXPORT_SYMBOL(flow_block_cb_free);
233 struct flow_block_cb *flow_block_cb_lookup(struct flow_block *block,
234 flow_setup_cb_t *cb, void *cb_ident)
236 struct flow_block_cb *block_cb;
238 list_for_each_entry(block_cb, &block->cb_list, list) {
239 if (block_cb->cb == cb &&
240 block_cb->cb_ident == cb_ident)
246 EXPORT_SYMBOL(flow_block_cb_lookup);
248 void *flow_block_cb_priv(struct flow_block_cb *block_cb)
250 return block_cb->cb_priv;
252 EXPORT_SYMBOL(flow_block_cb_priv);
254 void flow_block_cb_incref(struct flow_block_cb *block_cb)
258 EXPORT_SYMBOL(flow_block_cb_incref);
260 unsigned int flow_block_cb_decref(struct flow_block_cb *block_cb)
262 return --block_cb->refcnt;
264 EXPORT_SYMBOL(flow_block_cb_decref);
266 bool flow_block_cb_is_busy(flow_setup_cb_t *cb, void *cb_ident,
267 struct list_head *driver_block_list)
269 struct flow_block_cb *block_cb;
271 list_for_each_entry(block_cb, driver_block_list, driver_list) {
272 if (block_cb->cb == cb &&
273 block_cb->cb_ident == cb_ident)
279 EXPORT_SYMBOL(flow_block_cb_is_busy);
281 int flow_block_cb_setup_simple(struct flow_block_offload *f,
282 struct list_head *driver_block_list,
284 void *cb_ident, void *cb_priv,
287 struct flow_block_cb *block_cb;
290 f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
293 f->driver_block_list = driver_block_list;
295 switch (f->command) {
296 case FLOW_BLOCK_BIND:
297 if (flow_block_cb_is_busy(cb, cb_ident, driver_block_list))
300 block_cb = flow_block_cb_alloc(cb, cb_ident, cb_priv, NULL);
301 if (IS_ERR(block_cb))
302 return PTR_ERR(block_cb);
304 flow_block_cb_add(block_cb, f);
305 list_add_tail(&block_cb->driver_list, driver_block_list);
307 case FLOW_BLOCK_UNBIND:
308 block_cb = flow_block_cb_lookup(f->block, cb, cb_ident);
312 flow_block_cb_remove(block_cb, f);
313 list_del(&block_cb->driver_list);
319 EXPORT_SYMBOL(flow_block_cb_setup_simple);
321 static DEFINE_MUTEX(flow_indr_block_lock);
322 static LIST_HEAD(flow_block_indr_list);
323 static LIST_HEAD(flow_block_indr_dev_list);
324 static LIST_HEAD(flow_indir_dev_list);
326 struct flow_indr_dev {
327 struct list_head list;
328 flow_indr_block_bind_cb_t *cb;
334 static struct flow_indr_dev *flow_indr_dev_alloc(flow_indr_block_bind_cb_t *cb,
337 struct flow_indr_dev *indr_dev;
339 indr_dev = kmalloc(sizeof(*indr_dev), GFP_KERNEL);
344 indr_dev->cb_priv = cb_priv;
345 refcount_set(&indr_dev->refcnt, 1);
350 struct flow_indir_dev_info {
352 struct net_device *dev;
354 enum tc_setup_type type;
355 void (*cleanup)(struct flow_block_cb *block_cb);
356 struct list_head list;
357 enum flow_block_command command;
358 enum flow_block_binder_type binder_type;
359 struct list_head *cb_list;
362 static void existing_qdiscs_register(flow_indr_block_bind_cb_t *cb, void *cb_priv)
364 struct flow_block_offload bo;
365 struct flow_indir_dev_info *cur;
367 list_for_each_entry(cur, &flow_indir_dev_list, list) {
368 memset(&bo, 0, sizeof(bo));
369 bo.command = cur->command;
370 bo.binder_type = cur->binder_type;
371 INIT_LIST_HEAD(&bo.cb_list);
372 cb(cur->dev, cur->sch, cb_priv, cur->type, &bo, cur->data, cur->cleanup);
373 list_splice(&bo.cb_list, cur->cb_list);
377 int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv)
379 struct flow_indr_dev *indr_dev;
381 mutex_lock(&flow_indr_block_lock);
382 list_for_each_entry(indr_dev, &flow_block_indr_dev_list, list) {
383 if (indr_dev->cb == cb &&
384 indr_dev->cb_priv == cb_priv) {
385 refcount_inc(&indr_dev->refcnt);
386 mutex_unlock(&flow_indr_block_lock);
391 indr_dev = flow_indr_dev_alloc(cb, cb_priv);
393 mutex_unlock(&flow_indr_block_lock);
397 list_add(&indr_dev->list, &flow_block_indr_dev_list);
398 existing_qdiscs_register(cb, cb_priv);
399 mutex_unlock(&flow_indr_block_lock);
403 EXPORT_SYMBOL(flow_indr_dev_register);
405 static void __flow_block_indr_cleanup(void (*release)(void *cb_priv),
407 struct list_head *cleanup_list)
409 struct flow_block_cb *this, *next;
411 list_for_each_entry_safe(this, next, &flow_block_indr_list, indr.list) {
412 if (this->release == release &&
413 this->indr.cb_priv == cb_priv)
414 list_move(&this->indr.list, cleanup_list);
418 static void flow_block_indr_notify(struct list_head *cleanup_list)
420 struct flow_block_cb *this, *next;
422 list_for_each_entry_safe(this, next, cleanup_list, indr.list) {
423 list_del(&this->indr.list);
424 this->indr.cleanup(this);
428 void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv,
429 void (*release)(void *cb_priv))
431 struct flow_indr_dev *this, *next, *indr_dev = NULL;
432 LIST_HEAD(cleanup_list);
434 mutex_lock(&flow_indr_block_lock);
435 list_for_each_entry_safe(this, next, &flow_block_indr_dev_list, list) {
436 if (this->cb == cb &&
437 this->cb_priv == cb_priv &&
438 refcount_dec_and_test(&this->refcnt)) {
440 list_del(&indr_dev->list);
446 mutex_unlock(&flow_indr_block_lock);
450 __flow_block_indr_cleanup(release, cb_priv, &cleanup_list);
451 mutex_unlock(&flow_indr_block_lock);
453 flow_block_indr_notify(&cleanup_list);
456 EXPORT_SYMBOL(flow_indr_dev_unregister);
458 static void flow_block_indr_init(struct flow_block_cb *flow_block,
459 struct flow_block_offload *bo,
460 struct net_device *dev, struct Qdisc *sch, void *data,
462 void (*cleanup)(struct flow_block_cb *block_cb))
464 flow_block->indr.binder_type = bo->binder_type;
465 flow_block->indr.data = data;
466 flow_block->indr.cb_priv = cb_priv;
467 flow_block->indr.dev = dev;
468 flow_block->indr.sch = sch;
469 flow_block->indr.cleanup = cleanup;
472 struct flow_block_cb *flow_indr_block_cb_alloc(flow_setup_cb_t *cb,
473 void *cb_ident, void *cb_priv,
474 void (*release)(void *cb_priv),
475 struct flow_block_offload *bo,
476 struct net_device *dev,
477 struct Qdisc *sch, void *data,
479 void (*cleanup)(struct flow_block_cb *block_cb))
481 struct flow_block_cb *block_cb;
483 block_cb = flow_block_cb_alloc(cb, cb_ident, cb_priv, release);
484 if (IS_ERR(block_cb))
487 flow_block_indr_init(block_cb, bo, dev, sch, data, indr_cb_priv, cleanup);
488 list_add(&block_cb->indr.list, &flow_block_indr_list);
493 EXPORT_SYMBOL(flow_indr_block_cb_alloc);
495 static struct flow_indir_dev_info *find_indir_dev(void *data)
497 struct flow_indir_dev_info *cur;
499 list_for_each_entry(cur, &flow_indir_dev_list, list) {
500 if (cur->data == data)
506 static int indir_dev_add(void *data, struct net_device *dev, struct Qdisc *sch,
507 enum tc_setup_type type, void (*cleanup)(struct flow_block_cb *block_cb),
508 struct flow_block_offload *bo)
510 struct flow_indir_dev_info *info;
512 info = find_indir_dev(data);
516 info = kzalloc(sizeof(*info), GFP_KERNEL);
524 info->cleanup = cleanup;
525 info->command = bo->command;
526 info->binder_type = bo->binder_type;
527 info->cb_list = bo->cb_list_head;
529 list_add(&info->list, &flow_indir_dev_list);
533 static int indir_dev_remove(void *data)
535 struct flow_indir_dev_info *info;
537 info = find_indir_dev(data);
541 list_del(&info->list);
547 int flow_indr_dev_setup_offload(struct net_device *dev, struct Qdisc *sch,
548 enum tc_setup_type type, void *data,
549 struct flow_block_offload *bo,
550 void (*cleanup)(struct flow_block_cb *block_cb))
552 struct flow_indr_dev *this;
554 mutex_lock(&flow_indr_block_lock);
556 if (bo->command == FLOW_BLOCK_BIND)
557 indir_dev_add(data, dev, sch, type, cleanup, bo);
558 else if (bo->command == FLOW_BLOCK_UNBIND)
559 indir_dev_remove(data);
561 list_for_each_entry(this, &flow_block_indr_dev_list, list)
562 this->cb(dev, sch, this->cb_priv, type, bo, data, cleanup);
564 mutex_unlock(&flow_indr_block_lock);
566 return list_empty(&bo->cb_list) ? -EOPNOTSUPP : 0;
568 EXPORT_SYMBOL(flow_indr_dev_setup_offload);
570 bool flow_indr_dev_exists(void)
572 return !list_empty(&flow_block_indr_dev_list);
574 EXPORT_SYMBOL(flow_indr_dev_exists);