GNU Linux-libre 5.4.257-gnu1
[releases.git] / net / ipv4 / tcp_metrics.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/rcupdate.h>
3 #include <linux/spinlock.h>
4 #include <linux/jiffies.h>
5 #include <linux/module.h>
6 #include <linux/cache.h>
7 #include <linux/slab.h>
8 #include <linux/init.h>
9 #include <linux/tcp.h>
10 #include <linux/hash.h>
11 #include <linux/tcp_metrics.h>
12 #include <linux/vmalloc.h>
13
14 #include <net/inet_connection_sock.h>
15 #include <net/net_namespace.h>
16 #include <net/request_sock.h>
17 #include <net/inetpeer.h>
18 #include <net/sock.h>
19 #include <net/ipv6.h>
20 #include <net/dst.h>
21 #include <net/tcp.h>
22 #include <net/genetlink.h>
23
24 static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *saddr,
25                                                    const struct inetpeer_addr *daddr,
26                                                    struct net *net, unsigned int hash);
27
28 struct tcp_fastopen_metrics {
29         u16     mss;
30         u16     syn_loss:10,            /* Recurring Fast Open SYN losses */
31                 try_exp:2;              /* Request w/ exp. option (once) */
32         unsigned long   last_syn_loss;  /* Last Fast Open SYN loss */
33         struct  tcp_fastopen_cookie     cookie;
34 };
35
36 /* TCP_METRIC_MAX includes 2 extra fields for userspace compatibility
37  * Kernel only stores RTT and RTTVAR in usec resolution
38  */
39 #define TCP_METRIC_MAX_KERNEL (TCP_METRIC_MAX - 2)
40
41 struct tcp_metrics_block {
42         struct tcp_metrics_block __rcu  *tcpm_next;
43         struct net                      *tcpm_net;
44         struct inetpeer_addr            tcpm_saddr;
45         struct inetpeer_addr            tcpm_daddr;
46         unsigned long                   tcpm_stamp;
47         u32                             tcpm_lock;
48         u32                             tcpm_vals[TCP_METRIC_MAX_KERNEL + 1];
49         struct tcp_fastopen_metrics     tcpm_fastopen;
50
51         struct rcu_head                 rcu_head;
52 };
53
54 static inline struct net *tm_net(const struct tcp_metrics_block *tm)
55 {
56         /* Paired with the WRITE_ONCE() in tcpm_new() */
57         return READ_ONCE(tm->tcpm_net);
58 }
59
60 static bool tcp_metric_locked(struct tcp_metrics_block *tm,
61                               enum tcp_metric_index idx)
62 {
63         /* Paired with WRITE_ONCE() in tcpm_suck_dst() */
64         return READ_ONCE(tm->tcpm_lock) & (1 << idx);
65 }
66
67 static u32 tcp_metric_get(const struct tcp_metrics_block *tm,
68                           enum tcp_metric_index idx)
69 {
70         /* Paired with WRITE_ONCE() in tcp_metric_set() */
71         return READ_ONCE(tm->tcpm_vals[idx]);
72 }
73
74 static void tcp_metric_set(struct tcp_metrics_block *tm,
75                            enum tcp_metric_index idx,
76                            u32 val)
77 {
78         /* Paired with READ_ONCE() in tcp_metric_get() */
79         WRITE_ONCE(tm->tcpm_vals[idx], val);
80 }
81
82 static bool addr_same(const struct inetpeer_addr *a,
83                       const struct inetpeer_addr *b)
84 {
85         return (a->family == b->family) && !inetpeer_addr_cmp(a, b);
86 }
87
88 struct tcpm_hash_bucket {
89         struct tcp_metrics_block __rcu  *chain;
90 };
91
92 static struct tcpm_hash_bucket  *tcp_metrics_hash __read_mostly;
93 static unsigned int             tcp_metrics_hash_log __read_mostly;
94
95 static DEFINE_SPINLOCK(tcp_metrics_lock);
96 static DEFINE_SEQLOCK(fastopen_seqlock);
97
98 static void tcpm_suck_dst(struct tcp_metrics_block *tm,
99                           const struct dst_entry *dst,
100                           bool fastopen_clear)
101 {
102         u32 msval;
103         u32 val;
104
105         WRITE_ONCE(tm->tcpm_stamp, jiffies);
106
107         val = 0;
108         if (dst_metric_locked(dst, RTAX_RTT))
109                 val |= 1 << TCP_METRIC_RTT;
110         if (dst_metric_locked(dst, RTAX_RTTVAR))
111                 val |= 1 << TCP_METRIC_RTTVAR;
112         if (dst_metric_locked(dst, RTAX_SSTHRESH))
113                 val |= 1 << TCP_METRIC_SSTHRESH;
114         if (dst_metric_locked(dst, RTAX_CWND))
115                 val |= 1 << TCP_METRIC_CWND;
116         if (dst_metric_locked(dst, RTAX_REORDERING))
117                 val |= 1 << TCP_METRIC_REORDERING;
118         /* Paired with READ_ONCE() in tcp_metric_locked() */
119         WRITE_ONCE(tm->tcpm_lock, val);
120
121         msval = dst_metric_raw(dst, RTAX_RTT);
122         tcp_metric_set(tm, TCP_METRIC_RTT, msval * USEC_PER_MSEC);
123
124         msval = dst_metric_raw(dst, RTAX_RTTVAR);
125         tcp_metric_set(tm, TCP_METRIC_RTTVAR, msval * USEC_PER_MSEC);
126         tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
127                        dst_metric_raw(dst, RTAX_SSTHRESH));
128         tcp_metric_set(tm, TCP_METRIC_CWND,
129                        dst_metric_raw(dst, RTAX_CWND));
130         tcp_metric_set(tm, TCP_METRIC_REORDERING,
131                        dst_metric_raw(dst, RTAX_REORDERING));
132         if (fastopen_clear) {
133                 write_seqlock(&fastopen_seqlock);
134                 tm->tcpm_fastopen.mss = 0;
135                 tm->tcpm_fastopen.syn_loss = 0;
136                 tm->tcpm_fastopen.try_exp = 0;
137                 tm->tcpm_fastopen.cookie.exp = false;
138                 tm->tcpm_fastopen.cookie.len = 0;
139                 write_sequnlock(&fastopen_seqlock);
140         }
141 }
142
143 #define TCP_METRICS_TIMEOUT             (60 * 60 * HZ)
144
145 static void tcpm_check_stamp(struct tcp_metrics_block *tm,
146                              const struct dst_entry *dst)
147 {
148         unsigned long limit;
149
150         if (!tm)
151                 return;
152         limit = READ_ONCE(tm->tcpm_stamp) + TCP_METRICS_TIMEOUT;
153         if (unlikely(time_after(jiffies, limit)))
154                 tcpm_suck_dst(tm, dst, false);
155 }
156
157 #define TCP_METRICS_RECLAIM_DEPTH       5
158 #define TCP_METRICS_RECLAIM_PTR         (struct tcp_metrics_block *) 0x1UL
159
160 #define deref_locked(p) \
161         rcu_dereference_protected(p, lockdep_is_held(&tcp_metrics_lock))
162
163 static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst,
164                                           struct inetpeer_addr *saddr,
165                                           struct inetpeer_addr *daddr,
166                                           unsigned int hash)
167 {
168         struct tcp_metrics_block *tm;
169         struct net *net;
170         bool reclaim = false;
171
172         spin_lock_bh(&tcp_metrics_lock);
173         net = dev_net(dst->dev);
174
175         /* While waiting for the spin-lock the cache might have been populated
176          * with this entry and so we have to check again.
177          */
178         tm = __tcp_get_metrics(saddr, daddr, net, hash);
179         if (tm == TCP_METRICS_RECLAIM_PTR) {
180                 reclaim = true;
181                 tm = NULL;
182         }
183         if (tm) {
184                 tcpm_check_stamp(tm, dst);
185                 goto out_unlock;
186         }
187
188         if (unlikely(reclaim)) {
189                 struct tcp_metrics_block *oldest;
190
191                 oldest = deref_locked(tcp_metrics_hash[hash].chain);
192                 for (tm = deref_locked(oldest->tcpm_next); tm;
193                      tm = deref_locked(tm->tcpm_next)) {
194                         if (time_before(READ_ONCE(tm->tcpm_stamp),
195                                         READ_ONCE(oldest->tcpm_stamp)))
196                                 oldest = tm;
197                 }
198                 tm = oldest;
199         } else {
200                 tm = kzalloc(sizeof(*tm), GFP_ATOMIC);
201                 if (!tm)
202                         goto out_unlock;
203         }
204         /* Paired with the READ_ONCE() in tm_net() */
205         WRITE_ONCE(tm->tcpm_net, net);
206
207         tm->tcpm_saddr = *saddr;
208         tm->tcpm_daddr = *daddr;
209
210         tcpm_suck_dst(tm, dst, reclaim);
211
212         if (likely(!reclaim)) {
213                 tm->tcpm_next = tcp_metrics_hash[hash].chain;
214                 rcu_assign_pointer(tcp_metrics_hash[hash].chain, tm);
215         }
216
217 out_unlock:
218         spin_unlock_bh(&tcp_metrics_lock);
219         return tm;
220 }
221
222 static struct tcp_metrics_block *tcp_get_encode(struct tcp_metrics_block *tm, int depth)
223 {
224         if (tm)
225                 return tm;
226         if (depth > TCP_METRICS_RECLAIM_DEPTH)
227                 return TCP_METRICS_RECLAIM_PTR;
228         return NULL;
229 }
230
231 static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *saddr,
232                                                    const struct inetpeer_addr *daddr,
233                                                    struct net *net, unsigned int hash)
234 {
235         struct tcp_metrics_block *tm;
236         int depth = 0;
237
238         for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
239              tm = rcu_dereference(tm->tcpm_next)) {
240                 if (addr_same(&tm->tcpm_saddr, saddr) &&
241                     addr_same(&tm->tcpm_daddr, daddr) &&
242                     net_eq(tm_net(tm), net))
243                         break;
244                 depth++;
245         }
246         return tcp_get_encode(tm, depth);
247 }
248
249 static struct tcp_metrics_block *__tcp_get_metrics_req(struct request_sock *req,
250                                                        struct dst_entry *dst)
251 {
252         struct tcp_metrics_block *tm;
253         struct inetpeer_addr saddr, daddr;
254         unsigned int hash;
255         struct net *net;
256
257         saddr.family = req->rsk_ops->family;
258         daddr.family = req->rsk_ops->family;
259         switch (daddr.family) {
260         case AF_INET:
261                 inetpeer_set_addr_v4(&saddr, inet_rsk(req)->ir_loc_addr);
262                 inetpeer_set_addr_v4(&daddr, inet_rsk(req)->ir_rmt_addr);
263                 hash = ipv4_addr_hash(inet_rsk(req)->ir_rmt_addr);
264                 break;
265 #if IS_ENABLED(CONFIG_IPV6)
266         case AF_INET6:
267                 inetpeer_set_addr_v6(&saddr, &inet_rsk(req)->ir_v6_loc_addr);
268                 inetpeer_set_addr_v6(&daddr, &inet_rsk(req)->ir_v6_rmt_addr);
269                 hash = ipv6_addr_hash(&inet_rsk(req)->ir_v6_rmt_addr);
270                 break;
271 #endif
272         default:
273                 return NULL;
274         }
275
276         net = dev_net(dst->dev);
277         hash ^= net_hash_mix(net);
278         hash = hash_32(hash, tcp_metrics_hash_log);
279
280         for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
281              tm = rcu_dereference(tm->tcpm_next)) {
282                 if (addr_same(&tm->tcpm_saddr, &saddr) &&
283                     addr_same(&tm->tcpm_daddr, &daddr) &&
284                     net_eq(tm_net(tm), net))
285                         break;
286         }
287         tcpm_check_stamp(tm, dst);
288         return tm;
289 }
290
291 static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
292                                                  struct dst_entry *dst,
293                                                  bool create)
294 {
295         struct tcp_metrics_block *tm;
296         struct inetpeer_addr saddr, daddr;
297         unsigned int hash;
298         struct net *net;
299
300         if (sk->sk_family == AF_INET) {
301                 inetpeer_set_addr_v4(&saddr, inet_sk(sk)->inet_saddr);
302                 inetpeer_set_addr_v4(&daddr, inet_sk(sk)->inet_daddr);
303                 hash = ipv4_addr_hash(inet_sk(sk)->inet_daddr);
304         }
305 #if IS_ENABLED(CONFIG_IPV6)
306         else if (sk->sk_family == AF_INET6) {
307                 if (ipv6_addr_v4mapped(&sk->sk_v6_daddr)) {
308                         inetpeer_set_addr_v4(&saddr, inet_sk(sk)->inet_saddr);
309                         inetpeer_set_addr_v4(&daddr, inet_sk(sk)->inet_daddr);
310                         hash = ipv4_addr_hash(inet_sk(sk)->inet_daddr);
311                 } else {
312                         inetpeer_set_addr_v6(&saddr, &sk->sk_v6_rcv_saddr);
313                         inetpeer_set_addr_v6(&daddr, &sk->sk_v6_daddr);
314                         hash = ipv6_addr_hash(&sk->sk_v6_daddr);
315                 }
316         }
317 #endif
318         else
319                 return NULL;
320
321         net = dev_net(dst->dev);
322         hash ^= net_hash_mix(net);
323         hash = hash_32(hash, tcp_metrics_hash_log);
324
325         tm = __tcp_get_metrics(&saddr, &daddr, net, hash);
326         if (tm == TCP_METRICS_RECLAIM_PTR)
327                 tm = NULL;
328         if (!tm && create)
329                 tm = tcpm_new(dst, &saddr, &daddr, hash);
330         else
331                 tcpm_check_stamp(tm, dst);
332
333         return tm;
334 }
335
336 /* Save metrics learned by this TCP session.  This function is called
337  * only, when TCP finishes successfully i.e. when it enters TIME-WAIT
338  * or goes from LAST-ACK to CLOSE.
339  */
340 void tcp_update_metrics(struct sock *sk)
341 {
342         const struct inet_connection_sock *icsk = inet_csk(sk);
343         struct dst_entry *dst = __sk_dst_get(sk);
344         struct tcp_sock *tp = tcp_sk(sk);
345         struct net *net = sock_net(sk);
346         struct tcp_metrics_block *tm;
347         unsigned long rtt;
348         u32 val;
349         int m;
350
351         sk_dst_confirm(sk);
352         if (READ_ONCE(net->ipv4.sysctl_tcp_nometrics_save) || !dst)
353                 return;
354
355         rcu_read_lock();
356         if (icsk->icsk_backoff || !tp->srtt_us) {
357                 /* This session failed to estimate rtt. Why?
358                  * Probably, no packets returned in time.  Reset our
359                  * results.
360                  */
361                 tm = tcp_get_metrics(sk, dst, false);
362                 if (tm && !tcp_metric_locked(tm, TCP_METRIC_RTT))
363                         tcp_metric_set(tm, TCP_METRIC_RTT, 0);
364                 goto out_unlock;
365         } else
366                 tm = tcp_get_metrics(sk, dst, true);
367
368         if (!tm)
369                 goto out_unlock;
370
371         rtt = tcp_metric_get(tm, TCP_METRIC_RTT);
372         m = rtt - tp->srtt_us;
373
374         /* If newly calculated rtt larger than stored one, store new
375          * one. Otherwise, use EWMA. Remember, rtt overestimation is
376          * always better than underestimation.
377          */
378         if (!tcp_metric_locked(tm, TCP_METRIC_RTT)) {
379                 if (m <= 0)
380                         rtt = tp->srtt_us;
381                 else
382                         rtt -= (m >> 3);
383                 tcp_metric_set(tm, TCP_METRIC_RTT, rtt);
384         }
385
386         if (!tcp_metric_locked(tm, TCP_METRIC_RTTVAR)) {
387                 unsigned long var;
388
389                 if (m < 0)
390                         m = -m;
391
392                 /* Scale deviation to rttvar fixed point */
393                 m >>= 1;
394                 if (m < tp->mdev_us)
395                         m = tp->mdev_us;
396
397                 var = tcp_metric_get(tm, TCP_METRIC_RTTVAR);
398                 if (m >= var)
399                         var = m;
400                 else
401                         var -= (var - m) >> 2;
402
403                 tcp_metric_set(tm, TCP_METRIC_RTTVAR, var);
404         }
405
406         if (tcp_in_initial_slowstart(tp)) {
407                 /* Slow start still did not finish. */
408                 if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
409                         val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
410                         if (val && (tp->snd_cwnd >> 1) > val)
411                                 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
412                                                tp->snd_cwnd >> 1);
413                 }
414                 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
415                         val = tcp_metric_get(tm, TCP_METRIC_CWND);
416                         if (tp->snd_cwnd > val)
417                                 tcp_metric_set(tm, TCP_METRIC_CWND,
418                                                tp->snd_cwnd);
419                 }
420         } else if (!tcp_in_slow_start(tp) &&
421                    icsk->icsk_ca_state == TCP_CA_Open) {
422                 /* Cong. avoidance phase, cwnd is reliable. */
423                 if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH))
424                         tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
425                                        max(tp->snd_cwnd >> 1, tp->snd_ssthresh));
426                 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
427                         val = tcp_metric_get(tm, TCP_METRIC_CWND);
428                         tcp_metric_set(tm, TCP_METRIC_CWND, (val + tp->snd_cwnd) >> 1);
429                 }
430         } else {
431                 /* Else slow start did not finish, cwnd is non-sense,
432                  * ssthresh may be also invalid.
433                  */
434                 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
435                         val = tcp_metric_get(tm, TCP_METRIC_CWND);
436                         tcp_metric_set(tm, TCP_METRIC_CWND,
437                                        (val + tp->snd_ssthresh) >> 1);
438                 }
439                 if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
440                         val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
441                         if (val && tp->snd_ssthresh > val)
442                                 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
443                                                tp->snd_ssthresh);
444                 }
445                 if (!tcp_metric_locked(tm, TCP_METRIC_REORDERING)) {
446                         val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
447                         if (val < tp->reordering &&
448                             tp->reordering !=
449                             READ_ONCE(net->ipv4.sysctl_tcp_reordering))
450                                 tcp_metric_set(tm, TCP_METRIC_REORDERING,
451                                                tp->reordering);
452                 }
453         }
454         WRITE_ONCE(tm->tcpm_stamp, jiffies);
455 out_unlock:
456         rcu_read_unlock();
457 }
458
459 /* Initialize metrics on socket. */
460
461 void tcp_init_metrics(struct sock *sk)
462 {
463         struct dst_entry *dst = __sk_dst_get(sk);
464         struct tcp_sock *tp = tcp_sk(sk);
465         struct tcp_metrics_block *tm;
466         u32 val, crtt = 0; /* cached RTT scaled by 8 */
467
468         sk_dst_confirm(sk);
469         if (!dst)
470                 goto reset;
471
472         rcu_read_lock();
473         tm = tcp_get_metrics(sk, dst, true);
474         if (!tm) {
475                 rcu_read_unlock();
476                 goto reset;
477         }
478
479         if (tcp_metric_locked(tm, TCP_METRIC_CWND))
480                 tp->snd_cwnd_clamp = tcp_metric_get(tm, TCP_METRIC_CWND);
481
482         val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
483         if (val) {
484                 tp->snd_ssthresh = val;
485                 if (tp->snd_ssthresh > tp->snd_cwnd_clamp)
486                         tp->snd_ssthresh = tp->snd_cwnd_clamp;
487         } else {
488                 /* ssthresh may have been reduced unnecessarily during.
489                  * 3WHS. Restore it back to its initial default.
490                  */
491                 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
492         }
493         val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
494         if (val && tp->reordering != val)
495                 tp->reordering = val;
496
497         crtt = tcp_metric_get(tm, TCP_METRIC_RTT);
498         rcu_read_unlock();
499 reset:
500         /* The initial RTT measurement from the SYN/SYN-ACK is not ideal
501          * to seed the RTO for later data packets because SYN packets are
502          * small. Use the per-dst cached values to seed the RTO but keep
503          * the RTT estimator variables intact (e.g., srtt, mdev, rttvar).
504          * Later the RTO will be updated immediately upon obtaining the first
505          * data RTT sample (tcp_rtt_estimator()). Hence the cached RTT only
506          * influences the first RTO but not later RTT estimation.
507          *
508          * But if RTT is not available from the SYN (due to retransmits or
509          * syn cookies) or the cache, force a conservative 3secs timeout.
510          *
511          * A bit of theory. RTT is time passed after "normal" sized packet
512          * is sent until it is ACKed. In normal circumstances sending small
513          * packets force peer to delay ACKs and calculation is correct too.
514          * The algorithm is adaptive and, provided we follow specs, it
515          * NEVER underestimate RTT. BUT! If peer tries to make some clever
516          * tricks sort of "quick acks" for time long enough to decrease RTT
517          * to low value, and then abruptly stops to do it and starts to delay
518          * ACKs, wait for troubles.
519          */
520         if (crtt > tp->srtt_us) {
521                 /* Set RTO like tcp_rtt_estimator(), but from cached RTT. */
522                 crtt /= 8 * USEC_PER_SEC / HZ;
523                 inet_csk(sk)->icsk_rto = crtt + max(2 * crtt, tcp_rto_min(sk));
524         } else if (tp->srtt_us == 0) {
525                 /* RFC6298: 5.7 We've failed to get a valid RTT sample from
526                  * 3WHS. This is most likely due to retransmission,
527                  * including spurious one. Reset the RTO back to 3secs
528                  * from the more aggressive 1sec to avoid more spurious
529                  * retransmission.
530                  */
531                 tp->rttvar_us = jiffies_to_usecs(TCP_TIMEOUT_FALLBACK);
532                 tp->mdev_us = tp->mdev_max_us = tp->rttvar_us;
533
534                 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK;
535         }
536 }
537
538 bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst)
539 {
540         struct tcp_metrics_block *tm;
541         bool ret;
542
543         if (!dst)
544                 return false;
545
546         rcu_read_lock();
547         tm = __tcp_get_metrics_req(req, dst);
548         if (tm && tcp_metric_get(tm, TCP_METRIC_RTT))
549                 ret = true;
550         else
551                 ret = false;
552         rcu_read_unlock();
553
554         return ret;
555 }
556
557 void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
558                             struct tcp_fastopen_cookie *cookie)
559 {
560         struct tcp_metrics_block *tm;
561
562         rcu_read_lock();
563         tm = tcp_get_metrics(sk, __sk_dst_get(sk), false);
564         if (tm) {
565                 struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
566                 unsigned int seq;
567
568                 do {
569                         seq = read_seqbegin(&fastopen_seqlock);
570                         if (tfom->mss)
571                                 *mss = tfom->mss;
572                         *cookie = tfom->cookie;
573                         if (cookie->len <= 0 && tfom->try_exp == 1)
574                                 cookie->exp = true;
575                 } while (read_seqretry(&fastopen_seqlock, seq));
576         }
577         rcu_read_unlock();
578 }
579
580 void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
581                             struct tcp_fastopen_cookie *cookie, bool syn_lost,
582                             u16 try_exp)
583 {
584         struct dst_entry *dst = __sk_dst_get(sk);
585         struct tcp_metrics_block *tm;
586
587         if (!dst)
588                 return;
589         rcu_read_lock();
590         tm = tcp_get_metrics(sk, dst, true);
591         if (tm) {
592                 struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
593
594                 write_seqlock_bh(&fastopen_seqlock);
595                 if (mss)
596                         tfom->mss = mss;
597                 if (cookie && cookie->len > 0)
598                         tfom->cookie = *cookie;
599                 else if (try_exp > tfom->try_exp &&
600                          tfom->cookie.len <= 0 && !tfom->cookie.exp)
601                         tfom->try_exp = try_exp;
602                 if (syn_lost) {
603                         ++tfom->syn_loss;
604                         tfom->last_syn_loss = jiffies;
605                 } else
606                         tfom->syn_loss = 0;
607                 write_sequnlock_bh(&fastopen_seqlock);
608         }
609         rcu_read_unlock();
610 }
611
612 static struct genl_family tcp_metrics_nl_family;
613
614 static const struct nla_policy tcp_metrics_nl_policy[TCP_METRICS_ATTR_MAX + 1] = {
615         [TCP_METRICS_ATTR_ADDR_IPV4]    = { .type = NLA_U32, },
616         [TCP_METRICS_ATTR_ADDR_IPV6]    = { .type = NLA_BINARY,
617                                             .len = sizeof(struct in6_addr), },
618         /* Following attributes are not received for GET/DEL,
619          * we keep them for reference
620          */
621 #if 0
622         [TCP_METRICS_ATTR_AGE]          = { .type = NLA_MSECS, },
623         [TCP_METRICS_ATTR_TW_TSVAL]     = { .type = NLA_U32, },
624         [TCP_METRICS_ATTR_TW_TS_STAMP]  = { .type = NLA_S32, },
625         [TCP_METRICS_ATTR_VALS]         = { .type = NLA_NESTED, },
626         [TCP_METRICS_ATTR_FOPEN_MSS]    = { .type = NLA_U16, },
627         [TCP_METRICS_ATTR_FOPEN_SYN_DROPS]      = { .type = NLA_U16, },
628         [TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS]    = { .type = NLA_MSECS, },
629         [TCP_METRICS_ATTR_FOPEN_COOKIE] = { .type = NLA_BINARY,
630                                             .len = TCP_FASTOPEN_COOKIE_MAX, },
631 #endif
632 };
633
634 /* Add attributes, caller cancels its header on failure */
635 static int tcp_metrics_fill_info(struct sk_buff *msg,
636                                  struct tcp_metrics_block *tm)
637 {
638         struct nlattr *nest;
639         int i;
640
641         switch (tm->tcpm_daddr.family) {
642         case AF_INET:
643                 if (nla_put_in_addr(msg, TCP_METRICS_ATTR_ADDR_IPV4,
644                                     inetpeer_get_addr_v4(&tm->tcpm_daddr)) < 0)
645                         goto nla_put_failure;
646                 if (nla_put_in_addr(msg, TCP_METRICS_ATTR_SADDR_IPV4,
647                                     inetpeer_get_addr_v4(&tm->tcpm_saddr)) < 0)
648                         goto nla_put_failure;
649                 break;
650         case AF_INET6:
651                 if (nla_put_in6_addr(msg, TCP_METRICS_ATTR_ADDR_IPV6,
652                                      inetpeer_get_addr_v6(&tm->tcpm_daddr)) < 0)
653                         goto nla_put_failure;
654                 if (nla_put_in6_addr(msg, TCP_METRICS_ATTR_SADDR_IPV6,
655                                      inetpeer_get_addr_v6(&tm->tcpm_saddr)) < 0)
656                         goto nla_put_failure;
657                 break;
658         default:
659                 return -EAFNOSUPPORT;
660         }
661
662         if (nla_put_msecs(msg, TCP_METRICS_ATTR_AGE,
663                           jiffies - READ_ONCE(tm->tcpm_stamp),
664                           TCP_METRICS_ATTR_PAD) < 0)
665                 goto nla_put_failure;
666
667         {
668                 int n = 0;
669
670                 nest = nla_nest_start_noflag(msg, TCP_METRICS_ATTR_VALS);
671                 if (!nest)
672                         goto nla_put_failure;
673                 for (i = 0; i < TCP_METRIC_MAX_KERNEL + 1; i++) {
674                         u32 val = tcp_metric_get(tm, i);
675
676                         if (!val)
677                                 continue;
678                         if (i == TCP_METRIC_RTT) {
679                                 if (nla_put_u32(msg, TCP_METRIC_RTT_US + 1,
680                                                 val) < 0)
681                                         goto nla_put_failure;
682                                 n++;
683                                 val = max(val / 1000, 1U);
684                         }
685                         if (i == TCP_METRIC_RTTVAR) {
686                                 if (nla_put_u32(msg, TCP_METRIC_RTTVAR_US + 1,
687                                                 val) < 0)
688                                         goto nla_put_failure;
689                                 n++;
690                                 val = max(val / 1000, 1U);
691                         }
692                         if (nla_put_u32(msg, i + 1, val) < 0)
693                                 goto nla_put_failure;
694                         n++;
695                 }
696                 if (n)
697                         nla_nest_end(msg, nest);
698                 else
699                         nla_nest_cancel(msg, nest);
700         }
701
702         {
703                 struct tcp_fastopen_metrics tfom_copy[1], *tfom;
704                 unsigned int seq;
705
706                 do {
707                         seq = read_seqbegin(&fastopen_seqlock);
708                         tfom_copy[0] = tm->tcpm_fastopen;
709                 } while (read_seqretry(&fastopen_seqlock, seq));
710
711                 tfom = tfom_copy;
712                 if (tfom->mss &&
713                     nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_MSS,
714                                 tfom->mss) < 0)
715                         goto nla_put_failure;
716                 if (tfom->syn_loss &&
717                     (nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROPS,
718                                 tfom->syn_loss) < 0 ||
719                      nla_put_msecs(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS,
720                                 jiffies - tfom->last_syn_loss,
721                                 TCP_METRICS_ATTR_PAD) < 0))
722                         goto nla_put_failure;
723                 if (tfom->cookie.len > 0 &&
724                     nla_put(msg, TCP_METRICS_ATTR_FOPEN_COOKIE,
725                             tfom->cookie.len, tfom->cookie.val) < 0)
726                         goto nla_put_failure;
727         }
728
729         return 0;
730
731 nla_put_failure:
732         return -EMSGSIZE;
733 }
734
735 static int tcp_metrics_dump_info(struct sk_buff *skb,
736                                  struct netlink_callback *cb,
737                                  struct tcp_metrics_block *tm)
738 {
739         void *hdr;
740
741         hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
742                           &tcp_metrics_nl_family, NLM_F_MULTI,
743                           TCP_METRICS_CMD_GET);
744         if (!hdr)
745                 return -EMSGSIZE;
746
747         if (tcp_metrics_fill_info(skb, tm) < 0)
748                 goto nla_put_failure;
749
750         genlmsg_end(skb, hdr);
751         return 0;
752
753 nla_put_failure:
754         genlmsg_cancel(skb, hdr);
755         return -EMSGSIZE;
756 }
757
758 static int tcp_metrics_nl_dump(struct sk_buff *skb,
759                                struct netlink_callback *cb)
760 {
761         struct net *net = sock_net(skb->sk);
762         unsigned int max_rows = 1U << tcp_metrics_hash_log;
763         unsigned int row, s_row = cb->args[0];
764         int s_col = cb->args[1], col = s_col;
765
766         for (row = s_row; row < max_rows; row++, s_col = 0) {
767                 struct tcp_metrics_block *tm;
768                 struct tcpm_hash_bucket *hb = tcp_metrics_hash + row;
769
770                 rcu_read_lock();
771                 for (col = 0, tm = rcu_dereference(hb->chain); tm;
772                      tm = rcu_dereference(tm->tcpm_next), col++) {
773                         if (!net_eq(tm_net(tm), net))
774                                 continue;
775                         if (col < s_col)
776                                 continue;
777                         if (tcp_metrics_dump_info(skb, cb, tm) < 0) {
778                                 rcu_read_unlock();
779                                 goto done;
780                         }
781                 }
782                 rcu_read_unlock();
783         }
784
785 done:
786         cb->args[0] = row;
787         cb->args[1] = col;
788         return skb->len;
789 }
790
791 static int __parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
792                            unsigned int *hash, int optional, int v4, int v6)
793 {
794         struct nlattr *a;
795
796         a = info->attrs[v4];
797         if (a) {
798                 inetpeer_set_addr_v4(addr, nla_get_in_addr(a));
799                 if (hash)
800                         *hash = ipv4_addr_hash(inetpeer_get_addr_v4(addr));
801                 return 0;
802         }
803         a = info->attrs[v6];
804         if (a) {
805                 struct in6_addr in6;
806
807                 if (nla_len(a) != sizeof(struct in6_addr))
808                         return -EINVAL;
809                 in6 = nla_get_in6_addr(a);
810                 inetpeer_set_addr_v6(addr, &in6);
811                 if (hash)
812                         *hash = ipv6_addr_hash(inetpeer_get_addr_v6(addr));
813                 return 0;
814         }
815         return optional ? 1 : -EAFNOSUPPORT;
816 }
817
818 static int parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
819                          unsigned int *hash, int optional)
820 {
821         return __parse_nl_addr(info, addr, hash, optional,
822                                TCP_METRICS_ATTR_ADDR_IPV4,
823                                TCP_METRICS_ATTR_ADDR_IPV6);
824 }
825
826 static int parse_nl_saddr(struct genl_info *info, struct inetpeer_addr *addr)
827 {
828         return __parse_nl_addr(info, addr, NULL, 0,
829                                TCP_METRICS_ATTR_SADDR_IPV4,
830                                TCP_METRICS_ATTR_SADDR_IPV6);
831 }
832
833 static int tcp_metrics_nl_cmd_get(struct sk_buff *skb, struct genl_info *info)
834 {
835         struct tcp_metrics_block *tm;
836         struct inetpeer_addr saddr, daddr;
837         unsigned int hash;
838         struct sk_buff *msg;
839         struct net *net = genl_info_net(info);
840         void *reply;
841         int ret;
842         bool src = true;
843
844         ret = parse_nl_addr(info, &daddr, &hash, 0);
845         if (ret < 0)
846                 return ret;
847
848         ret = parse_nl_saddr(info, &saddr);
849         if (ret < 0)
850                 src = false;
851
852         msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
853         if (!msg)
854                 return -ENOMEM;
855
856         reply = genlmsg_put_reply(msg, info, &tcp_metrics_nl_family, 0,
857                                   info->genlhdr->cmd);
858         if (!reply)
859                 goto nla_put_failure;
860
861         hash ^= net_hash_mix(net);
862         hash = hash_32(hash, tcp_metrics_hash_log);
863         ret = -ESRCH;
864         rcu_read_lock();
865         for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
866              tm = rcu_dereference(tm->tcpm_next)) {
867                 if (addr_same(&tm->tcpm_daddr, &daddr) &&
868                     (!src || addr_same(&tm->tcpm_saddr, &saddr)) &&
869                     net_eq(tm_net(tm), net)) {
870                         ret = tcp_metrics_fill_info(msg, tm);
871                         break;
872                 }
873         }
874         rcu_read_unlock();
875         if (ret < 0)
876                 goto out_free;
877
878         genlmsg_end(msg, reply);
879         return genlmsg_reply(msg, info);
880
881 nla_put_failure:
882         ret = -EMSGSIZE;
883
884 out_free:
885         nlmsg_free(msg);
886         return ret;
887 }
888
889 static void tcp_metrics_flush_all(struct net *net)
890 {
891         unsigned int max_rows = 1U << tcp_metrics_hash_log;
892         struct tcpm_hash_bucket *hb = tcp_metrics_hash;
893         struct tcp_metrics_block *tm;
894         unsigned int row;
895
896         for (row = 0; row < max_rows; row++, hb++) {
897                 struct tcp_metrics_block __rcu **pp;
898                 bool match;
899
900                 spin_lock_bh(&tcp_metrics_lock);
901                 pp = &hb->chain;
902                 for (tm = deref_locked(*pp); tm; tm = deref_locked(*pp)) {
903                         match = net ? net_eq(tm_net(tm), net) :
904                                 !refcount_read(&tm_net(tm)->count);
905                         if (match) {
906                                 *pp = tm->tcpm_next;
907                                 kfree_rcu(tm, rcu_head);
908                         } else {
909                                 pp = &tm->tcpm_next;
910                         }
911                 }
912                 spin_unlock_bh(&tcp_metrics_lock);
913         }
914 }
915
916 static int tcp_metrics_nl_cmd_del(struct sk_buff *skb, struct genl_info *info)
917 {
918         struct tcpm_hash_bucket *hb;
919         struct tcp_metrics_block *tm;
920         struct tcp_metrics_block __rcu **pp;
921         struct inetpeer_addr saddr, daddr;
922         unsigned int hash;
923         struct net *net = genl_info_net(info);
924         int ret;
925         bool src = true, found = false;
926
927         ret = parse_nl_addr(info, &daddr, &hash, 1);
928         if (ret < 0)
929                 return ret;
930         if (ret > 0) {
931                 tcp_metrics_flush_all(net);
932                 return 0;
933         }
934         ret = parse_nl_saddr(info, &saddr);
935         if (ret < 0)
936                 src = false;
937
938         hash ^= net_hash_mix(net);
939         hash = hash_32(hash, tcp_metrics_hash_log);
940         hb = tcp_metrics_hash + hash;
941         pp = &hb->chain;
942         spin_lock_bh(&tcp_metrics_lock);
943         for (tm = deref_locked(*pp); tm; tm = deref_locked(*pp)) {
944                 if (addr_same(&tm->tcpm_daddr, &daddr) &&
945                     (!src || addr_same(&tm->tcpm_saddr, &saddr)) &&
946                     net_eq(tm_net(tm), net)) {
947                         *pp = tm->tcpm_next;
948                         kfree_rcu(tm, rcu_head);
949                         found = true;
950                 } else {
951                         pp = &tm->tcpm_next;
952                 }
953         }
954         spin_unlock_bh(&tcp_metrics_lock);
955         if (!found)
956                 return -ESRCH;
957         return 0;
958 }
959
960 static const struct genl_ops tcp_metrics_nl_ops[] = {
961         {
962                 .cmd = TCP_METRICS_CMD_GET,
963                 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
964                 .doit = tcp_metrics_nl_cmd_get,
965                 .dumpit = tcp_metrics_nl_dump,
966         },
967         {
968                 .cmd = TCP_METRICS_CMD_DEL,
969                 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
970                 .doit = tcp_metrics_nl_cmd_del,
971                 .flags = GENL_ADMIN_PERM,
972         },
973 };
974
975 static struct genl_family tcp_metrics_nl_family __ro_after_init = {
976         .hdrsize        = 0,
977         .name           = TCP_METRICS_GENL_NAME,
978         .version        = TCP_METRICS_GENL_VERSION,
979         .maxattr        = TCP_METRICS_ATTR_MAX,
980         .policy = tcp_metrics_nl_policy,
981         .netnsok        = true,
982         .module         = THIS_MODULE,
983         .ops            = tcp_metrics_nl_ops,
984         .n_ops          = ARRAY_SIZE(tcp_metrics_nl_ops),
985 };
986
987 static unsigned int tcpmhash_entries;
988 static int __init set_tcpmhash_entries(char *str)
989 {
990         ssize_t ret;
991
992         if (!str)
993                 return 0;
994
995         ret = kstrtouint(str, 0, &tcpmhash_entries);
996         if (ret)
997                 return 0;
998
999         return 1;
1000 }
1001 __setup("tcpmhash_entries=", set_tcpmhash_entries);
1002
1003 static int __net_init tcp_net_metrics_init(struct net *net)
1004 {
1005         size_t size;
1006         unsigned int slots;
1007
1008         if (!net_eq(net, &init_net))
1009                 return 0;
1010
1011         slots = tcpmhash_entries;
1012         if (!slots) {
1013                 if (totalram_pages() >= 128 * 1024)
1014                         slots = 16 * 1024;
1015                 else
1016                         slots = 8 * 1024;
1017         }
1018
1019         tcp_metrics_hash_log = order_base_2(slots);
1020         size = sizeof(struct tcpm_hash_bucket) << tcp_metrics_hash_log;
1021
1022         tcp_metrics_hash = kvzalloc(size, GFP_KERNEL);
1023         if (!tcp_metrics_hash)
1024                 return -ENOMEM;
1025
1026         return 0;
1027 }
1028
1029 static void __net_exit tcp_net_metrics_exit_batch(struct list_head *net_exit_list)
1030 {
1031         tcp_metrics_flush_all(NULL);
1032 }
1033
1034 static __net_initdata struct pernet_operations tcp_net_metrics_ops = {
1035         .init           =       tcp_net_metrics_init,
1036         .exit_batch     =       tcp_net_metrics_exit_batch,
1037 };
1038
1039 void __init tcp_metrics_init(void)
1040 {
1041         int ret;
1042
1043         ret = register_pernet_subsys(&tcp_net_metrics_ops);
1044         if (ret < 0)
1045                 panic("Could not allocate the tcp_metrics hash table\n");
1046
1047         ret = genl_register_family(&tcp_metrics_nl_family);
1048         if (ret < 0)
1049                 panic("Could not register tcp_metrics generic netlink\n");
1050 }