1 // SPDX-License-Identifier: GPL-2.0
3 * To speed up listener socket lookup, create an array to store all sockets
4 * listening on the same port. This allows a decision to be made after finding
5 * the first socket. An optional BPF program can also be configured for
6 * selecting the socket index from the array of available sockets.
9 #include <net/sock_reuseport.h>
10 #include <linux/bpf.h>
11 #include <linux/idr.h>
12 #include <linux/filter.h>
13 #include <linux/rcupdate.h>
15 #define INIT_SOCKS 128
17 DEFINE_SPINLOCK(reuseport_lock);
19 #define REUSEPORT_MIN_ID 1
20 static DEFINE_IDA(reuseport_ida);
22 int reuseport_get_id(struct sock_reuseport *reuse)
26 if (reuse->reuseport_id)
27 return reuse->reuseport_id;
29 id = ida_simple_get(&reuseport_ida, REUSEPORT_MIN_ID, 0,
30 /* Called under reuseport_lock */
35 reuse->reuseport_id = id;
37 return reuse->reuseport_id;
40 static struct sock_reuseport *__reuseport_alloc(unsigned int max_socks)
42 unsigned int size = sizeof(struct sock_reuseport) +
43 sizeof(struct sock *) * max_socks;
44 struct sock_reuseport *reuse = kzalloc(size, GFP_ATOMIC);
49 reuse->max_socks = max_socks;
51 RCU_INIT_POINTER(reuse->prog, NULL);
55 int reuseport_alloc(struct sock *sk, bool bind_inany)
57 struct sock_reuseport *reuse;
59 /* bh lock used since this function call may precede hlist lock in
60 * soft irq of receive path or setsockopt from process context
62 spin_lock_bh(&reuseport_lock);
64 /* Allocation attempts can occur concurrently via the setsockopt path
65 * and the bind/hash path. Nothing to do when we lose the race.
67 reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
68 lockdep_is_held(&reuseport_lock));
70 /* Only set reuse->bind_inany if the bind_inany is true.
71 * Otherwise, it will overwrite the reuse->bind_inany
72 * which was set by the bind/hash path.
75 reuse->bind_inany = bind_inany;
79 reuse = __reuseport_alloc(INIT_SOCKS);
81 spin_unlock_bh(&reuseport_lock);
87 reuse->bind_inany = bind_inany;
88 rcu_assign_pointer(sk->sk_reuseport_cb, reuse);
91 spin_unlock_bh(&reuseport_lock);
95 EXPORT_SYMBOL(reuseport_alloc);
97 static struct sock_reuseport *reuseport_grow(struct sock_reuseport *reuse)
99 struct sock_reuseport *more_reuse;
100 u32 more_socks_size, i;
102 more_socks_size = reuse->max_socks * 2U;
103 if (more_socks_size > U16_MAX)
106 more_reuse = __reuseport_alloc(more_socks_size);
110 more_reuse->max_socks = more_socks_size;
111 more_reuse->num_socks = reuse->num_socks;
112 more_reuse->prog = reuse->prog;
113 more_reuse->reuseport_id = reuse->reuseport_id;
114 more_reuse->bind_inany = reuse->bind_inany;
115 more_reuse->has_conns = reuse->has_conns;
117 memcpy(more_reuse->socks, reuse->socks,
118 reuse->num_socks * sizeof(struct sock *));
119 more_reuse->synq_overflow_ts = READ_ONCE(reuse->synq_overflow_ts);
121 for (i = 0; i < reuse->num_socks; ++i)
122 rcu_assign_pointer(reuse->socks[i]->sk_reuseport_cb,
125 /* Note: we use kfree_rcu here instead of reuseport_free_rcu so
126 * that reuse and more_reuse can temporarily share a reference
129 kfree_rcu(reuse, rcu);
133 static void reuseport_free_rcu(struct rcu_head *head)
135 struct sock_reuseport *reuse;
137 reuse = container_of(head, struct sock_reuseport, rcu);
138 sk_reuseport_prog_free(rcu_dereference_protected(reuse->prog, 1));
139 if (reuse->reuseport_id)
140 ida_simple_remove(&reuseport_ida, reuse->reuseport_id);
145 * reuseport_add_sock - Add a socket to the reuseport group of another.
146 * @sk: New socket to add to the group.
147 * @sk2: Socket belonging to the existing reuseport group.
148 * May return ENOMEM and not add socket to group under memory pressure.
150 int reuseport_add_sock(struct sock *sk, struct sock *sk2, bool bind_inany)
152 struct sock_reuseport *old_reuse, *reuse;
154 if (!rcu_access_pointer(sk2->sk_reuseport_cb)) {
155 int err = reuseport_alloc(sk2, bind_inany);
161 spin_lock_bh(&reuseport_lock);
162 reuse = rcu_dereference_protected(sk2->sk_reuseport_cb,
163 lockdep_is_held(&reuseport_lock));
164 old_reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
165 lockdep_is_held(&reuseport_lock));
166 if (old_reuse && old_reuse->num_socks != 1) {
167 spin_unlock_bh(&reuseport_lock);
171 if (reuse->num_socks == reuse->max_socks) {
172 reuse = reuseport_grow(reuse);
174 spin_unlock_bh(&reuseport_lock);
179 reuse->socks[reuse->num_socks] = sk;
180 /* paired with smp_rmb() in reuseport_select_sock() */
183 rcu_assign_pointer(sk->sk_reuseport_cb, reuse);
185 spin_unlock_bh(&reuseport_lock);
188 call_rcu(&old_reuse->rcu, reuseport_free_rcu);
192 void reuseport_detach_sock(struct sock *sk)
194 struct sock_reuseport *reuse;
197 spin_lock_bh(&reuseport_lock);
198 reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
199 lockdep_is_held(&reuseport_lock));
201 /* At least one of the sk in this reuseport group is added to
202 * a bpf map. Notify the bpf side. The bpf map logic will
203 * remove the sk if it is indeed added to a bpf map.
205 if (reuse->reuseport_id)
206 bpf_sk_reuseport_detach(sk);
208 rcu_assign_pointer(sk->sk_reuseport_cb, NULL);
210 for (i = 0; i < reuse->num_socks; i++) {
211 if (reuse->socks[i] == sk) {
212 reuse->socks[i] = reuse->socks[reuse->num_socks - 1];
214 if (reuse->num_socks == 0)
215 call_rcu(&reuse->rcu, reuseport_free_rcu);
219 spin_unlock_bh(&reuseport_lock);
221 EXPORT_SYMBOL(reuseport_detach_sock);
223 static struct sock *run_bpf_filter(struct sock_reuseport *reuse, u16 socks,
224 struct bpf_prog *prog, struct sk_buff *skb,
227 struct sk_buff *nskb = NULL;
230 if (skb_shared(skb)) {
231 nskb = skb_clone(skb, GFP_ATOMIC);
237 /* temporarily advance data past protocol header */
238 if (!pskb_pull(skb, hdr_len)) {
242 index = bpf_prog_run_save_cb(prog, skb);
243 __skb_push(skb, hdr_len);
250 return reuse->socks[index];
254 * reuseport_select_sock - Select a socket from an SO_REUSEPORT group.
255 * @sk: First socket in the group.
256 * @hash: When no BPF filter is available, use this hash to select.
257 * @skb: skb to run through BPF filter.
258 * @hdr_len: BPF filter expects skb data pointer at payload data. If
259 * the skb does not yet point at the payload, this parameter represents
260 * how far the pointer needs to advance to reach the payload.
261 * Returns a socket that should receive the packet (or NULL on error).
263 struct sock *reuseport_select_sock(struct sock *sk,
268 struct sock_reuseport *reuse;
269 struct bpf_prog *prog;
270 struct sock *sk2 = NULL;
274 reuse = rcu_dereference(sk->sk_reuseport_cb);
276 /* if memory allocation failed or add call is not yet complete */
280 prog = rcu_dereference(reuse->prog);
281 socks = READ_ONCE(reuse->num_socks);
283 /* paired with smp_wmb() in reuseport_add_sock() */
289 if (prog->type == BPF_PROG_TYPE_SK_REUSEPORT)
290 sk2 = bpf_run_sk_reuseport(reuse, sk, prog, skb, hash);
292 sk2 = run_bpf_filter(reuse, socks, prog, skb, hdr_len);
295 /* no bpf or invalid bpf result: fall back to hash usage */
299 i = j = reciprocal_scale(hash, socks);
300 while (reuse->socks[i]->sk_state == TCP_ESTABLISHED) {
307 sk2 = reuse->socks[i];
315 EXPORT_SYMBOL(reuseport_select_sock);
317 int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog)
319 struct sock_reuseport *reuse;
320 struct bpf_prog *old_prog;
322 if (sk_unhashed(sk) && sk->sk_reuseport) {
323 int err = reuseport_alloc(sk, false);
327 } else if (!rcu_access_pointer(sk->sk_reuseport_cb)) {
328 /* The socket wasn't bound with SO_REUSEPORT */
332 spin_lock_bh(&reuseport_lock);
333 reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
334 lockdep_is_held(&reuseport_lock));
335 old_prog = rcu_dereference_protected(reuse->prog,
336 lockdep_is_held(&reuseport_lock));
337 rcu_assign_pointer(reuse->prog, prog);
338 spin_unlock_bh(&reuseport_lock);
340 sk_reuseport_prog_free(old_prog);
343 EXPORT_SYMBOL(reuseport_attach_prog);