Mention branches and keyring.
[releases.git] / ipv6 / inet6_hashtables.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * INET         An implementation of the TCP/IP protocol suite for the LINUX
4  *              operating system.  INET is implemented using the BSD Socket
5  *              interface as the means of communication with the user level.
6  *
7  *              Generic INET6 transport hashtables
8  *
9  * Authors:     Lotsa people, from code originally in tcp, generalised here
10  *              by Arnaldo Carvalho de Melo <acme@mandriva.com>
11  */
12
13 #include <linux/module.h>
14 #include <linux/random.h>
15
16 #include <net/addrconf.h>
17 #include <net/inet_connection_sock.h>
18 #include <net/inet_hashtables.h>
19 #include <net/inet6_hashtables.h>
20 #include <net/secure_seq.h>
21 #include <net/ip.h>
22 #include <net/sock_reuseport.h>
23
24 u32 inet6_ehashfn(const struct net *net,
25                   const struct in6_addr *laddr, const u16 lport,
26                   const struct in6_addr *faddr, const __be16 fport)
27 {
28         static u32 inet6_ehash_secret __read_mostly;
29         static u32 ipv6_hash_secret __read_mostly;
30
31         u32 lhash, fhash;
32
33         net_get_random_once(&inet6_ehash_secret, sizeof(inet6_ehash_secret));
34         net_get_random_once(&ipv6_hash_secret, sizeof(ipv6_hash_secret));
35
36         lhash = (__force u32)laddr->s6_addr32[3];
37         fhash = __ipv6_addr_jhash(faddr, ipv6_hash_secret);
38
39         return __inet6_ehashfn(lhash, lport, fhash, fport,
40                                inet6_ehash_secret + net_hash_mix(net));
41 }
42
43 /*
44  * Sockets in TCP_CLOSE state are _always_ taken out of the hash, so
45  * we need not check it for TCP lookups anymore, thanks Alexey. -DaveM
46  *
47  * The sockhash lock must be held as a reader here.
48  */
49 struct sock *__inet6_lookup_established(struct net *net,
50                                         struct inet_hashinfo *hashinfo,
51                                            const struct in6_addr *saddr,
52                                            const __be16 sport,
53                                            const struct in6_addr *daddr,
54                                            const u16 hnum,
55                                            const int dif, const int sdif)
56 {
57         struct sock *sk;
58         const struct hlist_nulls_node *node;
59         const __portpair ports = INET_COMBINED_PORTS(sport, hnum);
60         /* Optimize here for direct hit, only listening connections can
61          * have wildcards anyways.
62          */
63         unsigned int hash = inet6_ehashfn(net, daddr, hnum, saddr, sport);
64         unsigned int slot = hash & hashinfo->ehash_mask;
65         struct inet_ehash_bucket *head = &hashinfo->ehash[slot];
66
67
68 begin:
69         sk_nulls_for_each_rcu(sk, node, &head->chain) {
70                 if (sk->sk_hash != hash)
71                         continue;
72                 if (!inet6_match(net, sk, saddr, daddr, ports, dif, sdif))
73                         continue;
74                 if (unlikely(!refcount_inc_not_zero(&sk->sk_refcnt)))
75                         goto out;
76
77                 if (unlikely(!inet6_match(net, sk, saddr, daddr, ports, dif, sdif))) {
78                         sock_gen_put(sk);
79                         goto begin;
80                 }
81                 goto found;
82         }
83         if (get_nulls_value(node) != slot)
84                 goto begin;
85 out:
86         sk = NULL;
87 found:
88         return sk;
89 }
90 EXPORT_SYMBOL(__inet6_lookup_established);
91
92 static inline int compute_score(struct sock *sk, struct net *net,
93                                 const unsigned short hnum,
94                                 const struct in6_addr *daddr,
95                                 const int dif, const int sdif)
96 {
97         int score = -1;
98
99         if (net_eq(sock_net(sk), net) && inet_sk(sk)->inet_num == hnum &&
100             sk->sk_family == PF_INET6) {
101                 if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
102                         return -1;
103
104                 if (!inet_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif))
105                         return -1;
106
107                 score =  sk->sk_bound_dev_if ? 2 : 1;
108                 if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
109                         score++;
110         }
111         return score;
112 }
113
114 static inline struct sock *lookup_reuseport(struct net *net, struct sock *sk,
115                                             struct sk_buff *skb, int doff,
116                                             const struct in6_addr *saddr,
117                                             __be16 sport,
118                                             const struct in6_addr *daddr,
119                                             unsigned short hnum)
120 {
121         struct sock *reuse_sk = NULL;
122         u32 phash;
123
124         if (sk->sk_reuseport) {
125                 phash = inet6_ehashfn(net, daddr, hnum, saddr, sport);
126                 reuse_sk = reuseport_select_sock(sk, phash, skb, doff);
127         }
128         return reuse_sk;
129 }
130
131 /* called with rcu_read_lock() */
132 static struct sock *inet6_lhash2_lookup(struct net *net,
133                 struct inet_listen_hashbucket *ilb2,
134                 struct sk_buff *skb, int doff,
135                 const struct in6_addr *saddr,
136                 const __be16 sport, const struct in6_addr *daddr,
137                 const unsigned short hnum, const int dif, const int sdif)
138 {
139         struct sock *sk, *result = NULL;
140         struct hlist_nulls_node *node;
141         int score, hiscore = 0;
142
143         sk_nulls_for_each_rcu(sk, node, &ilb2->nulls_head) {
144                 score = compute_score(sk, net, hnum, daddr, dif, sdif);
145                 if (score > hiscore) {
146                         result = lookup_reuseport(net, sk, skb, doff,
147                                                   saddr, sport, daddr, hnum);
148                         if (result)
149                                 return result;
150
151                         result = sk;
152                         hiscore = score;
153                 }
154         }
155
156         return result;
157 }
158
159 static inline struct sock *inet6_lookup_run_bpf(struct net *net,
160                                                 struct inet_hashinfo *hashinfo,
161                                                 struct sk_buff *skb, int doff,
162                                                 const struct in6_addr *saddr,
163                                                 const __be16 sport,
164                                                 const struct in6_addr *daddr,
165                                                 const u16 hnum, const int dif)
166 {
167         struct sock *sk, *reuse_sk;
168         bool no_reuseport;
169
170         if (hashinfo != net->ipv4.tcp_death_row.hashinfo)
171                 return NULL; /* only TCP is supported */
172
173         no_reuseport = bpf_sk_lookup_run_v6(net, IPPROTO_TCP, saddr, sport,
174                                             daddr, hnum, dif, &sk);
175         if (no_reuseport || IS_ERR_OR_NULL(sk))
176                 return sk;
177
178         reuse_sk = lookup_reuseport(net, sk, skb, doff, saddr, sport, daddr, hnum);
179         if (reuse_sk)
180                 sk = reuse_sk;
181         return sk;
182 }
183
184 struct sock *inet6_lookup_listener(struct net *net,
185                 struct inet_hashinfo *hashinfo,
186                 struct sk_buff *skb, int doff,
187                 const struct in6_addr *saddr,
188                 const __be16 sport, const struct in6_addr *daddr,
189                 const unsigned short hnum, const int dif, const int sdif)
190 {
191         struct inet_listen_hashbucket *ilb2;
192         struct sock *result = NULL;
193         unsigned int hash2;
194
195         /* Lookup redirect from BPF */
196         if (static_branch_unlikely(&bpf_sk_lookup_enabled)) {
197                 result = inet6_lookup_run_bpf(net, hashinfo, skb, doff,
198                                               saddr, sport, daddr, hnum, dif);
199                 if (result)
200                         goto done;
201         }
202
203         hash2 = ipv6_portaddr_hash(net, daddr, hnum);
204         ilb2 = inet_lhash2_bucket(hashinfo, hash2);
205
206         result = inet6_lhash2_lookup(net, ilb2, skb, doff,
207                                      saddr, sport, daddr, hnum,
208                                      dif, sdif);
209         if (result)
210                 goto done;
211
212         /* Lookup lhash2 with in6addr_any */
213         hash2 = ipv6_portaddr_hash(net, &in6addr_any, hnum);
214         ilb2 = inet_lhash2_bucket(hashinfo, hash2);
215
216         result = inet6_lhash2_lookup(net, ilb2, skb, doff,
217                                      saddr, sport, &in6addr_any, hnum,
218                                      dif, sdif);
219 done:
220         if (IS_ERR(result))
221                 return NULL;
222         return result;
223 }
224 EXPORT_SYMBOL_GPL(inet6_lookup_listener);
225
226 struct sock *inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo,
227                           struct sk_buff *skb, int doff,
228                           const struct in6_addr *saddr, const __be16 sport,
229                           const struct in6_addr *daddr, const __be16 dport,
230                           const int dif)
231 {
232         struct sock *sk;
233         bool refcounted;
234
235         sk = __inet6_lookup(net, hashinfo, skb, doff, saddr, sport, daddr,
236                             ntohs(dport), dif, 0, &refcounted);
237         if (sk && !refcounted && !refcount_inc_not_zero(&sk->sk_refcnt))
238                 sk = NULL;
239         return sk;
240 }
241 EXPORT_SYMBOL_GPL(inet6_lookup);
242
243 static int __inet6_check_established(struct inet_timewait_death_row *death_row,
244                                      struct sock *sk, const __u16 lport,
245                                      struct inet_timewait_sock **twp)
246 {
247         struct inet_hashinfo *hinfo = death_row->hashinfo;
248         struct inet_sock *inet = inet_sk(sk);
249         const struct in6_addr *daddr = &sk->sk_v6_rcv_saddr;
250         const struct in6_addr *saddr = &sk->sk_v6_daddr;
251         const int dif = sk->sk_bound_dev_if;
252         struct net *net = sock_net(sk);
253         const int sdif = l3mdev_master_ifindex_by_index(net, dif);
254         const __portpair ports = INET_COMBINED_PORTS(inet->inet_dport, lport);
255         const unsigned int hash = inet6_ehashfn(net, daddr, lport, saddr,
256                                                 inet->inet_dport);
257         struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash);
258         spinlock_t *lock = inet_ehash_lockp(hinfo, hash);
259         struct sock *sk2;
260         const struct hlist_nulls_node *node;
261         struct inet_timewait_sock *tw = NULL;
262
263         spin_lock(lock);
264
265         sk_nulls_for_each(sk2, node, &head->chain) {
266                 if (sk2->sk_hash != hash)
267                         continue;
268
269                 if (likely(inet6_match(net, sk2, saddr, daddr, ports,
270                                        dif, sdif))) {
271                         if (sk2->sk_state == TCP_TIME_WAIT) {
272                                 tw = inet_twsk(sk2);
273                                 if (twsk_unique(sk, sk2, twp))
274                                         break;
275                         }
276                         goto not_unique;
277                 }
278         }
279
280         /* Must record num and sport now. Otherwise we will see
281          * in hash table socket with a funny identity.
282          */
283         inet->inet_num = lport;
284         inet->inet_sport = htons(lport);
285         sk->sk_hash = hash;
286         WARN_ON(!sk_unhashed(sk));
287         __sk_nulls_add_node_rcu(sk, &head->chain);
288         if (tw) {
289                 sk_nulls_del_node_init_rcu((struct sock *)tw);
290                 __NET_INC_STATS(net, LINUX_MIB_TIMEWAITRECYCLED);
291         }
292         spin_unlock(lock);
293         sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
294
295         if (twp) {
296                 *twp = tw;
297         } else if (tw) {
298                 /* Silly. Should hash-dance instead... */
299                 inet_twsk_deschedule_put(tw);
300         }
301         return 0;
302
303 not_unique:
304         spin_unlock(lock);
305         return -EADDRNOTAVAIL;
306 }
307
308 static u64 inet6_sk_port_offset(const struct sock *sk)
309 {
310         const struct inet_sock *inet = inet_sk(sk);
311
312         return secure_ipv6_port_ephemeral(sk->sk_v6_rcv_saddr.s6_addr32,
313                                           sk->sk_v6_daddr.s6_addr32,
314                                           inet->inet_dport);
315 }
316
317 int inet6_hash_connect(struct inet_timewait_death_row *death_row,
318                        struct sock *sk)
319 {
320         u64 port_offset = 0;
321
322         if (!inet_sk(sk)->inet_num)
323                 port_offset = inet6_sk_port_offset(sk);
324         return __inet_hash_connect(death_row, sk, port_offset,
325                                    __inet6_check_established);
326 }
327 EXPORT_SYMBOL_GPL(inet6_hash_connect);
328
329 int inet6_hash(struct sock *sk)
330 {
331         int err = 0;
332
333         if (sk->sk_state != TCP_CLOSE)
334                 err = __inet_hash(sk, NULL);
335
336         return err;
337 }
338 EXPORT_SYMBOL_GPL(inet6_hash);