1 #ifndef _NF_FLOW_TABLE_H
2 #define _NF_FLOW_TABLE_H
6 #include <linux/netdevice.h>
7 #include <linux/rhashtable-types.h>
8 #include <linux/rcupdate.h>
9 #include <linux/netfilter.h>
10 #include <linux/netfilter/nf_conntrack_tuple_common.h>
11 #include <net/flow_offload.h>
13 #include <linux/if_pppox.h>
14 #include <linux/ppp_defs.h>
19 enum flow_offload_tuple_dir;
22 struct flow_dissector_key_meta meta;
23 struct flow_dissector_key_control control;
24 struct flow_dissector_key_control enc_control;
25 struct flow_dissector_key_basic basic;
26 struct flow_dissector_key_vlan vlan;
27 struct flow_dissector_key_vlan cvlan;
29 struct flow_dissector_key_ipv4_addrs ipv4;
30 struct flow_dissector_key_ipv6_addrs ipv6;
32 struct flow_dissector_key_keyid enc_key_id;
34 struct flow_dissector_key_ipv4_addrs enc_ipv4;
35 struct flow_dissector_key_ipv6_addrs enc_ipv6;
37 struct flow_dissector_key_tcp tcp;
38 struct flow_dissector_key_ports tp;
39 } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
41 struct nf_flow_match {
42 struct flow_dissector dissector;
43 struct nf_flow_key key;
44 struct nf_flow_key mask;
48 struct nf_flow_match match;
49 struct flow_rule *rule;
52 struct nf_flowtable_type {
53 struct list_head list;
55 int (*init)(struct nf_flowtable *ft);
56 int (*setup)(struct nf_flowtable *ft,
57 struct net_device *dev,
58 enum flow_block_command cmd);
59 int (*action)(struct net *net,
60 const struct flow_offload *flow,
61 enum flow_offload_tuple_dir dir,
62 struct nf_flow_rule *flow_rule);
63 void (*free)(struct nf_flowtable *ft);
68 enum nf_flowtable_flags {
69 NF_FLOWTABLE_HW_OFFLOAD = 0x1, /* NFT_FLOWTABLE_HW_OFFLOAD */
70 NF_FLOWTABLE_COUNTER = 0x2, /* NFT_FLOWTABLE_COUNTER */
74 struct list_head list;
75 struct rhashtable rhashtable;
77 const struct nf_flowtable_type *type;
78 struct delayed_work gc_work;
80 struct flow_block flow_block;
81 struct rw_semaphore flow_block_lock; /* Guards flow_block */
85 static inline bool nf_flowtable_hw_offload(struct nf_flowtable *flowtable)
87 return flowtable->flags & NF_FLOWTABLE_HW_OFFLOAD;
90 enum flow_offload_tuple_dir {
91 FLOW_OFFLOAD_DIR_ORIGINAL = IP_CT_DIR_ORIGINAL,
92 FLOW_OFFLOAD_DIR_REPLY = IP_CT_DIR_REPLY,
94 #define FLOW_OFFLOAD_DIR_MAX IP_CT_DIR_MAX
96 enum flow_offload_xmit_type {
97 FLOW_OFFLOAD_XMIT_UNSPEC = 0,
98 FLOW_OFFLOAD_XMIT_NEIGH,
99 FLOW_OFFLOAD_XMIT_XFRM,
100 FLOW_OFFLOAD_XMIT_DIRECT,
101 FLOW_OFFLOAD_XMIT_TC,
104 #define NF_FLOW_TABLE_ENCAP_MAX 2
106 struct flow_offload_tuple {
108 struct in_addr src_v4;
109 struct in6_addr src_v6;
112 struct in_addr dst_v4;
113 struct in6_addr dst_v6;
127 } encap[NF_FLOW_TABLE_ENCAP_MAX];
129 /* All members above are keys for lookups, see flow_offload_hash(). */
139 struct dst_entry *dst_cache;
145 u8 h_source[ETH_ALEN];
154 struct flow_offload_tuple_rhash {
155 struct rhash_head node;
156 struct flow_offload_tuple tuple;
169 enum flow_offload_type {
170 NF_FLOW_OFFLOAD_UNSPEC = 0,
171 NF_FLOW_OFFLOAD_ROUTE,
174 struct flow_offload {
175 struct flow_offload_tuple_rhash tuplehash[FLOW_OFFLOAD_DIR_MAX];
180 struct rcu_head rcu_head;
183 #define NF_FLOW_TIMEOUT (30 * HZ)
184 #define nf_flowtable_time_stamp (u32)jiffies
186 unsigned long flow_offload_get_timeout(struct flow_offload *flow);
188 static inline __s32 nf_flow_timeout_delta(unsigned int timeout)
190 return (__s32)(timeout - nf_flowtable_time_stamp);
193 struct nf_flow_route {
195 struct dst_entry *dst;
201 } encap[NF_FLOW_TABLE_ENCAP_MAX];
208 u8 h_source[ETH_ALEN];
211 enum flow_offload_xmit_type xmit_type;
212 } tuple[FLOW_OFFLOAD_DIR_MAX];
215 struct flow_offload *flow_offload_alloc(struct nf_conn *ct);
216 void flow_offload_free(struct flow_offload *flow);
219 nf_flow_table_offload_add_cb(struct nf_flowtable *flow_table,
220 flow_setup_cb_t *cb, void *cb_priv)
222 struct flow_block *block = &flow_table->flow_block;
223 struct flow_block_cb *block_cb;
226 down_write(&flow_table->flow_block_lock);
227 block_cb = flow_block_cb_lookup(block, cb, cb_priv);
233 block_cb = flow_block_cb_alloc(cb, cb_priv, cb_priv, NULL);
234 if (IS_ERR(block_cb)) {
235 err = PTR_ERR(block_cb);
239 list_add_tail(&block_cb->list, &block->cb_list);
242 up_write(&flow_table->flow_block_lock);
247 nf_flow_table_offload_del_cb(struct nf_flowtable *flow_table,
248 flow_setup_cb_t *cb, void *cb_priv)
250 struct flow_block *block = &flow_table->flow_block;
251 struct flow_block_cb *block_cb;
253 down_write(&flow_table->flow_block_lock);
254 block_cb = flow_block_cb_lookup(block, cb, cb_priv);
256 list_del(&block_cb->list);
257 flow_block_cb_free(block_cb);
261 up_write(&flow_table->flow_block_lock);
264 int flow_offload_route_init(struct flow_offload *flow,
265 const struct nf_flow_route *route);
267 int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow);
268 void flow_offload_refresh(struct nf_flowtable *flow_table,
269 struct flow_offload *flow);
271 struct flow_offload_tuple_rhash *flow_offload_lookup(struct nf_flowtable *flow_table,
272 struct flow_offload_tuple *tuple);
273 void nf_flow_table_gc_run(struct nf_flowtable *flow_table);
274 void nf_flow_table_gc_cleanup(struct nf_flowtable *flowtable,
275 struct net_device *dev);
276 void nf_flow_table_cleanup(struct net_device *dev);
278 int nf_flow_table_init(struct nf_flowtable *flow_table);
279 void nf_flow_table_free(struct nf_flowtable *flow_table);
281 void flow_offload_teardown(struct flow_offload *flow);
283 void nf_flow_snat_port(const struct flow_offload *flow,
284 struct sk_buff *skb, unsigned int thoff,
285 u8 protocol, enum flow_offload_tuple_dir dir);
286 void nf_flow_dnat_port(const struct flow_offload *flow,
287 struct sk_buff *skb, unsigned int thoff,
288 u8 protocol, enum flow_offload_tuple_dir dir);
294 unsigned int nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
295 const struct nf_hook_state *state);
296 unsigned int nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
297 const struct nf_hook_state *state);
299 #define MODULE_ALIAS_NF_FLOWTABLE(family) \
300 MODULE_ALIAS("nf-flowtable-" __stringify(family))
302 void nf_flow_offload_add(struct nf_flowtable *flowtable,
303 struct flow_offload *flow);
304 void nf_flow_offload_del(struct nf_flowtable *flowtable,
305 struct flow_offload *flow);
306 void nf_flow_offload_stats(struct nf_flowtable *flowtable,
307 struct flow_offload *flow);
309 void nf_flow_table_offload_flush(struct nf_flowtable *flowtable);
310 void nf_flow_table_offload_flush_cleanup(struct nf_flowtable *flowtable);
312 int nf_flow_table_offload_setup(struct nf_flowtable *flowtable,
313 struct net_device *dev,
314 enum flow_block_command cmd);
315 int nf_flow_rule_route_ipv4(struct net *net, const struct flow_offload *flow,
316 enum flow_offload_tuple_dir dir,
317 struct nf_flow_rule *flow_rule);
318 int nf_flow_rule_route_ipv6(struct net *net, const struct flow_offload *flow,
319 enum flow_offload_tuple_dir dir,
320 struct nf_flow_rule *flow_rule);
322 int nf_flow_table_offload_init(void);
323 void nf_flow_table_offload_exit(void);
325 static inline __be16 nf_flow_pppoe_proto(const struct sk_buff *skb)
329 proto = *((__be16 *)(skb_mac_header(skb) + ETH_HLEN +
330 sizeof(struct pppoe_hdr)));
333 return htons(ETH_P_IP);
334 case htons(PPP_IPV6):
335 return htons(ETH_P_IPV6);
341 #define NF_FLOW_TABLE_STAT_INC(net, count) __this_cpu_inc((net)->ft.stat->count)
342 #define NF_FLOW_TABLE_STAT_DEC(net, count) __this_cpu_dec((net)->ft.stat->count)
343 #define NF_FLOW_TABLE_STAT_INC_ATOMIC(net, count) \
344 this_cpu_inc((net)->ft.stat->count)
345 #define NF_FLOW_TABLE_STAT_DEC_ATOMIC(net, count) \
346 this_cpu_dec((net)->ft.stat->count)
348 #ifdef CONFIG_NF_FLOW_TABLE_PROCFS
349 int nf_flow_table_init_proc(struct net *net);
350 void nf_flow_table_fini_proc(struct net *net);
352 static inline int nf_flow_table_init_proc(struct net *net)
357 static inline void nf_flow_table_fini_proc(struct net *net)
360 #endif /* CONFIG_NF_FLOW_TABLE_PROCFS */
362 #endif /* _NF_FLOW_TABLE_H */