GNU Linux-libre 4.14.295-gnu1
[releases.git] / net / netfilter / nf_queue.c
1 /*
2  * Rusty Russell (C)2000 -- This code is GPL.
3  * Patrick McHardy (c) 2006-2012
4  */
5
6 #include <linux/kernel.h>
7 #include <linux/slab.h>
8 #include <linux/init.h>
9 #include <linux/module.h>
10 #include <linux/proc_fs.h>
11 #include <linux/skbuff.h>
12 #include <linux/netfilter.h>
13 #include <linux/netfilter_bridge.h>
14 #include <linux/seq_file.h>
15 #include <linux/rcupdate.h>
16 #include <net/protocol.h>
17 #include <net/netfilter/nf_queue.h>
18 #include <net/dst.h>
19
20 #include "nf_internals.h"
21
22 /*
23  * Hook for nfnetlink_queue to register its queue handler.
24  * We do this so that most of the NFQUEUE code can be modular.
25  *
26  * Once the queue is registered it must reinject all packets it
27  * receives, no matter what.
28  */
29
30 /* return EBUSY when somebody else is registered, return EEXIST if the
31  * same handler is registered, return 0 in case of success. */
32 void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *qh)
33 {
34         /* should never happen, we only have one queueing backend in kernel */
35         WARN_ON(rcu_access_pointer(net->nf.queue_handler));
36         rcu_assign_pointer(net->nf.queue_handler, qh);
37 }
38 EXPORT_SYMBOL(nf_register_queue_handler);
39
40 /* The caller must flush their queue before this */
41 void nf_unregister_queue_handler(struct net *net)
42 {
43         RCU_INIT_POINTER(net->nf.queue_handler, NULL);
44 }
45 EXPORT_SYMBOL(nf_unregister_queue_handler);
46
47 static void nf_queue_sock_put(struct sock *sk)
48 {
49 #ifdef CONFIG_INET
50         sock_gen_put(sk);
51 #else
52         sock_put(sk);
53 #endif
54 }
55
56 void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
57 {
58         struct nf_hook_state *state = &entry->state;
59
60         /* Release those devices we held, or Alexey will kill me. */
61         if (state->in)
62                 dev_put(state->in);
63         if (state->out)
64                 dev_put(state->out);
65         if (state->sk)
66                 nf_queue_sock_put(state->sk);
67 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
68         if (entry->skb->nf_bridge) {
69                 struct net_device *physdev;
70
71                 physdev = nf_bridge_get_physindev(entry->skb);
72                 if (physdev)
73                         dev_put(physdev);
74                 physdev = nf_bridge_get_physoutdev(entry->skb);
75                 if (physdev)
76                         dev_put(physdev);
77         }
78 #endif
79 }
80 EXPORT_SYMBOL_GPL(nf_queue_entry_release_refs);
81
82 /* Bump dev refs so they don't vanish while packet is out */
83 bool nf_queue_entry_get_refs(struct nf_queue_entry *entry)
84 {
85         struct nf_hook_state *state = &entry->state;
86
87         if (state->sk && !refcount_inc_not_zero(&state->sk->sk_refcnt))
88                 return false;
89
90         if (state->in)
91                 dev_hold(state->in);
92         if (state->out)
93                 dev_hold(state->out);
94         if (state->sk)
95                 sock_hold(state->sk);
96 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
97         if (entry->skb->nf_bridge) {
98                 struct net_device *physdev;
99
100                 physdev = nf_bridge_get_physindev(entry->skb);
101                 if (physdev)
102                         dev_hold(physdev);
103                 physdev = nf_bridge_get_physoutdev(entry->skb);
104                 if (physdev)
105                         dev_hold(physdev);
106         }
107 #endif
108         return true;
109 }
110 EXPORT_SYMBOL_GPL(nf_queue_entry_get_refs);
111
112 unsigned int nf_queue_nf_hook_drop(struct net *net)
113 {
114         const struct nf_queue_handler *qh;
115         unsigned int count = 0;
116
117         rcu_read_lock();
118         qh = rcu_dereference(net->nf.queue_handler);
119         if (qh)
120                 count = qh->nf_hook_drop(net);
121         rcu_read_unlock();
122
123         return count;
124 }
125 EXPORT_SYMBOL_GPL(nf_queue_nf_hook_drop);
126
127 static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state,
128                       const struct nf_hook_entries *entries,
129                       unsigned int index, unsigned int queuenum)
130 {
131         int status = -ENOENT;
132         struct nf_queue_entry *entry = NULL;
133         const struct nf_afinfo *afinfo;
134         const struct nf_queue_handler *qh;
135         struct net *net = state->net;
136
137         /* QUEUE == DROP if no one is waiting, to be safe. */
138         qh = rcu_dereference(net->nf.queue_handler);
139         if (!qh) {
140                 status = -ESRCH;
141                 goto err;
142         }
143
144         afinfo = nf_get_afinfo(state->pf);
145         if (!afinfo)
146                 goto err;
147
148         entry = kmalloc(sizeof(*entry) + afinfo->route_key_size, GFP_ATOMIC);
149         if (!entry) {
150                 status = -ENOMEM;
151                 goto err;
152         }
153
154         if (skb_dst(skb) && !skb_dst_force(skb)) {
155                 status = -ENETDOWN;
156                 goto err;
157         }
158
159         *entry = (struct nf_queue_entry) {
160                 .skb    = skb,
161                 .state  = *state,
162                 .hook_index = index,
163                 .size   = sizeof(*entry) + afinfo->route_key_size,
164         };
165
166         if (!nf_queue_entry_get_refs(entry)) {
167                 kfree(entry);
168                 return -ENOTCONN;
169         }
170
171         afinfo->saveroute(skb, entry);
172         status = qh->outfn(entry, queuenum);
173
174         if (status < 0) {
175                 nf_queue_entry_release_refs(entry);
176                 goto err;
177         }
178
179         return 0;
180
181 err:
182         kfree(entry);
183         return status;
184 }
185
186 /* Packets leaving via this function must come back through nf_reinject(). */
187 int nf_queue(struct sk_buff *skb, struct nf_hook_state *state,
188              const struct nf_hook_entries *entries, unsigned int index,
189              unsigned int verdict)
190 {
191         int ret;
192
193         ret = __nf_queue(skb, state, entries, index, verdict >> NF_VERDICT_QBITS);
194         if (ret < 0) {
195                 if (ret == -ESRCH &&
196                     (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS))
197                         return 1;
198                 kfree_skb(skb);
199         }
200
201         return 0;
202 }
203
204 static unsigned int nf_iterate(struct sk_buff *skb,
205                                struct nf_hook_state *state,
206                                const struct nf_hook_entries *hooks,
207                                unsigned int *index)
208 {
209         const struct nf_hook_entry *hook;
210         unsigned int verdict, i = *index;
211
212         while (i < hooks->num_hook_entries) {
213                 hook = &hooks->hooks[i];
214 repeat:
215                 verdict = nf_hook_entry_hookfn(hook, skb, state);
216                 if (verdict != NF_ACCEPT) {
217                         *index = i;
218                         if (verdict != NF_REPEAT)
219                                 return verdict;
220                         goto repeat;
221                 }
222                 i++;
223         }
224
225         *index = i;
226         return NF_ACCEPT;
227 }
228
229 /* Caller must hold rcu read-side lock */
230 void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
231 {
232         const struct nf_hook_entry *hook_entry;
233         const struct nf_hook_entries *hooks;
234         struct sk_buff *skb = entry->skb;
235         const struct nf_afinfo *afinfo;
236         const struct net *net;
237         unsigned int i;
238         int err;
239         u8 pf;
240
241         net = entry->state.net;
242         pf = entry->state.pf;
243
244         hooks = rcu_dereference(net->nf.hooks[pf][entry->state.hook]);
245
246         nf_queue_entry_release_refs(entry);
247
248         i = entry->hook_index;
249         if (WARN_ON_ONCE(i >= hooks->num_hook_entries)) {
250                 kfree_skb(skb);
251                 kfree(entry);
252                 return;
253         }
254
255         hook_entry = &hooks->hooks[i];
256
257         /* Continue traversal iff userspace said ok... */
258         if (verdict == NF_REPEAT)
259                 verdict = nf_hook_entry_hookfn(hook_entry, skb, &entry->state);
260
261         if (verdict == NF_ACCEPT) {
262                 afinfo = nf_get_afinfo(entry->state.pf);
263                 if (!afinfo || afinfo->reroute(entry->state.net, skb, entry) < 0)
264                         verdict = NF_DROP;
265         }
266
267         if (verdict == NF_ACCEPT) {
268 next_hook:
269                 ++i;
270                 verdict = nf_iterate(skb, &entry->state, hooks, &i);
271         }
272
273         switch (verdict & NF_VERDICT_MASK) {
274         case NF_ACCEPT:
275         case NF_STOP:
276                 local_bh_disable();
277                 entry->state.okfn(entry->state.net, entry->state.sk, skb);
278                 local_bh_enable();
279                 break;
280         case NF_QUEUE:
281                 err = nf_queue(skb, &entry->state, hooks, i, verdict);
282                 if (err == 1)
283                         goto next_hook;
284                 break;
285         case NF_STOLEN:
286                 break;
287         default:
288                 kfree_skb(skb);
289         }
290
291         kfree(entry);
292 }
293 EXPORT_SYMBOL(nf_reinject);