GNU Linux-libre 6.1.90-gnu
[releases.git] / net / ipv4 / inet_timewait_sock.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * INET         An implementation of the TCP/IP protocol suite for the LINUX
4  *              operating system.  INET is implemented using the  BSD Socket
5  *              interface as the means of communication with the user level.
6  *
7  *              Generic TIME_WAIT sockets functions
8  *
9  *              From code orinally in TCP
10  */
11
12 #include <linux/kernel.h>
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <net/inet_hashtables.h>
16 #include <net/inet_timewait_sock.h>
17 #include <net/ip.h>
18
19
20 /**
21  *      inet_twsk_bind_unhash - unhash a timewait socket from bind hash
22  *      @tw: timewait socket
23  *      @hashinfo: hashinfo pointer
24  *
25  *      unhash a timewait socket from bind hash, if hashed.
26  *      bind hash lock must be held by caller.
27  *      Returns 1 if caller should call inet_twsk_put() after lock release.
28  */
29 void inet_twsk_bind_unhash(struct inet_timewait_sock *tw,
30                           struct inet_hashinfo *hashinfo)
31 {
32         struct inet_bind2_bucket *tb2 = tw->tw_tb2;
33         struct inet_bind_bucket *tb = tw->tw_tb;
34
35         if (!tb)
36                 return;
37
38         __hlist_del(&tw->tw_bind_node);
39         tw->tw_tb = NULL;
40         inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb);
41
42         __hlist_del(&tw->tw_bind2_node);
43         tw->tw_tb2 = NULL;
44         inet_bind2_bucket_destroy(hashinfo->bind2_bucket_cachep, tb2);
45
46         __sock_put((struct sock *)tw);
47 }
48
49 /* Must be called with locally disabled BHs. */
50 static void inet_twsk_kill(struct inet_timewait_sock *tw)
51 {
52         struct inet_hashinfo *hashinfo = tw->tw_dr->hashinfo;
53         spinlock_t *lock = inet_ehash_lockp(hashinfo, tw->tw_hash);
54         struct inet_bind_hashbucket *bhead, *bhead2;
55
56         spin_lock(lock);
57         sk_nulls_del_node_init_rcu((struct sock *)tw);
58         spin_unlock(lock);
59
60         /* Disassociate with bind bucket. */
61         bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), tw->tw_num,
62                         hashinfo->bhash_size)];
63         bhead2 = inet_bhashfn_portaddr(hashinfo, (struct sock *)tw,
64                                        twsk_net(tw), tw->tw_num);
65
66         spin_lock(&bhead->lock);
67         spin_lock(&bhead2->lock);
68         inet_twsk_bind_unhash(tw, hashinfo);
69         spin_unlock(&bhead2->lock);
70         spin_unlock(&bhead->lock);
71
72         refcount_dec(&tw->tw_dr->tw_refcount);
73         inet_twsk_put(tw);
74 }
75
76 void inet_twsk_free(struct inet_timewait_sock *tw)
77 {
78         struct module *owner = tw->tw_prot->owner;
79         twsk_destructor((struct sock *)tw);
80 #ifdef SOCK_REFCNT_DEBUG
81         pr_debug("%s timewait_sock %p released\n", tw->tw_prot->name, tw);
82 #endif
83         kmem_cache_free(tw->tw_prot->twsk_prot->twsk_slab, tw);
84         module_put(owner);
85 }
86
87 void inet_twsk_put(struct inet_timewait_sock *tw)
88 {
89         if (refcount_dec_and_test(&tw->tw_refcnt))
90                 inet_twsk_free(tw);
91 }
92 EXPORT_SYMBOL_GPL(inet_twsk_put);
93
94 static void inet_twsk_add_node_rcu(struct inet_timewait_sock *tw,
95                                    struct hlist_nulls_head *list)
96 {
97         hlist_nulls_add_head_rcu(&tw->tw_node, list);
98 }
99
100 static void inet_twsk_add_bind_node(struct inet_timewait_sock *tw,
101                                     struct hlist_head *list)
102 {
103         hlist_add_head(&tw->tw_bind_node, list);
104 }
105
106 static void inet_twsk_add_bind2_node(struct inet_timewait_sock *tw,
107                                      struct hlist_head *list)
108 {
109         hlist_add_head(&tw->tw_bind2_node, list);
110 }
111
112 /*
113  * Enter the time wait state. This is called with locally disabled BH.
114  * Essentially we whip up a timewait bucket, copy the relevant info into it
115  * from the SK, and mess with hash chains and list linkage.
116  */
117 void inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
118                            struct inet_hashinfo *hashinfo)
119 {
120         const struct inet_sock *inet = inet_sk(sk);
121         const struct inet_connection_sock *icsk = inet_csk(sk);
122         struct inet_ehash_bucket *ehead = inet_ehash_bucket(hashinfo, sk->sk_hash);
123         spinlock_t *lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
124         struct inet_bind_hashbucket *bhead, *bhead2;
125
126         /* Step 1: Put TW into bind hash. Original socket stays there too.
127            Note, that any socket with inet->num != 0 MUST be bound in
128            binding cache, even if it is closed.
129          */
130         bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), inet->inet_num,
131                         hashinfo->bhash_size)];
132         bhead2 = inet_bhashfn_portaddr(hashinfo, sk, twsk_net(tw), inet->inet_num);
133
134         spin_lock(&bhead->lock);
135         spin_lock(&bhead2->lock);
136
137         tw->tw_tb = icsk->icsk_bind_hash;
138         WARN_ON(!icsk->icsk_bind_hash);
139         inet_twsk_add_bind_node(tw, &tw->tw_tb->owners);
140
141         tw->tw_tb2 = icsk->icsk_bind2_hash;
142         WARN_ON(!icsk->icsk_bind2_hash);
143         inet_twsk_add_bind2_node(tw, &tw->tw_tb2->deathrow);
144
145         spin_unlock(&bhead2->lock);
146         spin_unlock(&bhead->lock);
147
148         spin_lock(lock);
149
150         inet_twsk_add_node_rcu(tw, &ehead->chain);
151
152         /* Step 3: Remove SK from hash chain */
153         if (__sk_nulls_del_node_init_rcu(sk))
154                 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
155
156         spin_unlock(lock);
157
158         /* tw_refcnt is set to 3 because we have :
159          * - one reference for bhash chain.
160          * - one reference for ehash chain.
161          * - one reference for timer.
162          * We can use atomic_set() because prior spin_lock()/spin_unlock()
163          * committed into memory all tw fields.
164          * Also note that after this point, we lost our implicit reference
165          * so we are not allowed to use tw anymore.
166          */
167         refcount_set(&tw->tw_refcnt, 3);
168 }
169 EXPORT_SYMBOL_GPL(inet_twsk_hashdance);
170
171 static void tw_timer_handler(struct timer_list *t)
172 {
173         struct inet_timewait_sock *tw = from_timer(tw, t, tw_timer);
174
175         inet_twsk_kill(tw);
176 }
177
178 struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
179                                            struct inet_timewait_death_row *dr,
180                                            const int state)
181 {
182         struct inet_timewait_sock *tw;
183
184         if (refcount_read(&dr->tw_refcount) - 1 >=
185             READ_ONCE(dr->sysctl_max_tw_buckets))
186                 return NULL;
187
188         tw = kmem_cache_alloc(sk->sk_prot_creator->twsk_prot->twsk_slab,
189                               GFP_ATOMIC);
190         if (tw) {
191                 const struct inet_sock *inet = inet_sk(sk);
192
193                 tw->tw_dr           = dr;
194                 /* Give us an identity. */
195                 tw->tw_daddr        = inet->inet_daddr;
196                 tw->tw_rcv_saddr    = inet->inet_rcv_saddr;
197                 tw->tw_bound_dev_if = sk->sk_bound_dev_if;
198                 tw->tw_tos          = inet->tos;
199                 tw->tw_num          = inet->inet_num;
200                 tw->tw_state        = TCP_TIME_WAIT;
201                 tw->tw_substate     = state;
202                 tw->tw_sport        = inet->inet_sport;
203                 tw->tw_dport        = inet->inet_dport;
204                 tw->tw_family       = sk->sk_family;
205                 tw->tw_reuse        = sk->sk_reuse;
206                 tw->tw_reuseport    = sk->sk_reuseport;
207                 tw->tw_hash         = sk->sk_hash;
208                 tw->tw_ipv6only     = 0;
209                 tw->tw_transparent  = inet->transparent;
210                 tw->tw_prot         = sk->sk_prot_creator;
211                 atomic64_set(&tw->tw_cookie, atomic64_read(&sk->sk_cookie));
212                 twsk_net_set(tw, sock_net(sk));
213                 timer_setup(&tw->tw_timer, tw_timer_handler, TIMER_PINNED);
214                 /*
215                  * Because we use RCU lookups, we should not set tw_refcnt
216                  * to a non null value before everything is setup for this
217                  * timewait socket.
218                  */
219                 refcount_set(&tw->tw_refcnt, 0);
220
221                 __module_get(tw->tw_prot->owner);
222         }
223
224         return tw;
225 }
226 EXPORT_SYMBOL_GPL(inet_twsk_alloc);
227
228 /* These are always called from BH context.  See callers in
229  * tcp_input.c to verify this.
230  */
231
232 /* This is for handling early-kills of TIME_WAIT sockets.
233  * Warning : consume reference.
234  * Caller should not access tw anymore.
235  */
236 void inet_twsk_deschedule_put(struct inet_timewait_sock *tw)
237 {
238         if (del_timer_sync(&tw->tw_timer))
239                 inet_twsk_kill(tw);
240         inet_twsk_put(tw);
241 }
242 EXPORT_SYMBOL(inet_twsk_deschedule_put);
243
244 void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo, bool rearm)
245 {
246         /* timeout := RTO * 3.5
247          *
248          * 3.5 = 1+2+0.5 to wait for two retransmits.
249          *
250          * RATIONALE: if FIN arrived and we entered TIME-WAIT state,
251          * our ACK acking that FIN can be lost. If N subsequent retransmitted
252          * FINs (or previous seqments) are lost (probability of such event
253          * is p^(N+1), where p is probability to lose single packet and
254          * time to detect the loss is about RTO*(2^N - 1) with exponential
255          * backoff). Normal timewait length is calculated so, that we
256          * waited at least for one retransmitted FIN (maximal RTO is 120sec).
257          * [ BTW Linux. following BSD, violates this requirement waiting
258          *   only for 60sec, we should wait at least for 240 secs.
259          *   Well, 240 consumes too much of resources 8)
260          * ]
261          * This interval is not reduced to catch old duplicate and
262          * responces to our wandering segments living for two MSLs.
263          * However, if we use PAWS to detect
264          * old duplicates, we can reduce the interval to bounds required
265          * by RTO, rather than MSL. So, if peer understands PAWS, we
266          * kill tw bucket after 3.5*RTO (it is important that this number
267          * is greater than TS tick!) and detect old duplicates with help
268          * of PAWS.
269          */
270
271         if (!rearm) {
272                 bool kill = timeo <= 4*HZ;
273
274                 __NET_INC_STATS(twsk_net(tw), kill ? LINUX_MIB_TIMEWAITKILLED :
275                                                      LINUX_MIB_TIMEWAITED);
276                 BUG_ON(mod_timer(&tw->tw_timer, jiffies + timeo));
277                 refcount_inc(&tw->tw_dr->tw_refcount);
278         } else {
279                 mod_timer_pending(&tw->tw_timer, jiffies + timeo);
280         }
281 }
282 EXPORT_SYMBOL_GPL(__inet_twsk_schedule);
283
284 /* Remove all non full sockets (TIME_WAIT and NEW_SYN_RECV) for dead netns */
285 void inet_twsk_purge(struct inet_hashinfo *hashinfo, int family)
286 {
287         struct hlist_nulls_node *node;
288         unsigned int slot;
289         struct sock *sk;
290
291         for (slot = 0; slot <= hashinfo->ehash_mask; slot++) {
292                 struct inet_ehash_bucket *head = &hashinfo->ehash[slot];
293 restart_rcu:
294                 cond_resched();
295                 rcu_read_lock();
296 restart:
297                 sk_nulls_for_each_rcu(sk, node, &head->chain) {
298                         int state = inet_sk_state_load(sk);
299
300                         if ((1 << state) & ~(TCPF_TIME_WAIT |
301                                              TCPF_NEW_SYN_RECV))
302                                 continue;
303
304                         if (sk->sk_family != family ||
305                             refcount_read(&sock_net(sk)->ns.count))
306                                 continue;
307
308                         if (unlikely(!refcount_inc_not_zero(&sk->sk_refcnt)))
309                                 continue;
310
311                         if (unlikely(sk->sk_family != family ||
312                                      refcount_read(&sock_net(sk)->ns.count))) {
313                                 sock_gen_put(sk);
314                                 goto restart;
315                         }
316
317                         rcu_read_unlock();
318                         local_bh_disable();
319                         if (state == TCP_TIME_WAIT) {
320                                 inet_twsk_deschedule_put(inet_twsk(sk));
321                         } else {
322                                 struct request_sock *req = inet_reqsk(sk);
323
324                                 inet_csk_reqsk_queue_drop_and_put(req->rsk_listener,
325                                                                   req);
326                         }
327                         local_bh_enable();
328                         goto restart_rcu;
329                 }
330                 /* If the nulls value we got at the end of this lookup is
331                  * not the expected one, we must restart lookup.
332                  * We probably met an item that was moved to another chain.
333                  */
334                 if (get_nulls_value(node) != slot)
335                         goto restart;
336                 rcu_read_unlock();
337         }
338 }
339 EXPORT_SYMBOL_GPL(inet_twsk_purge);