1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/rcupdate.h>
3 #include <linux/spinlock.h>
4 #include <linux/jiffies.h>
5 #include <linux/module.h>
6 #include <linux/cache.h>
7 #include <linux/slab.h>
8 #include <linux/init.h>
10 #include <linux/hash.h>
11 #include <linux/tcp_metrics.h>
12 #include <linux/vmalloc.h>
14 #include <net/inet_connection_sock.h>
15 #include <net/net_namespace.h>
16 #include <net/request_sock.h>
17 #include <net/inetpeer.h>
22 #include <net/genetlink.h>
24 int sysctl_tcp_nometrics_save __read_mostly;
26 static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *saddr,
27 const struct inetpeer_addr *daddr,
28 struct net *net, unsigned int hash);
30 struct tcp_fastopen_metrics {
32 u16 syn_loss:10, /* Recurring Fast Open SYN losses */
33 try_exp:2; /* Request w/ exp. option (once) */
34 unsigned long last_syn_loss; /* Last Fast Open SYN loss */
35 struct tcp_fastopen_cookie cookie;
38 /* TCP_METRIC_MAX includes 2 extra fields for userspace compatibility
39 * Kernel only stores RTT and RTTVAR in usec resolution
41 #define TCP_METRIC_MAX_KERNEL (TCP_METRIC_MAX - 2)
43 struct tcp_metrics_block {
44 struct tcp_metrics_block __rcu *tcpm_next;
46 struct inetpeer_addr tcpm_saddr;
47 struct inetpeer_addr tcpm_daddr;
48 unsigned long tcpm_stamp;
50 u32 tcpm_vals[TCP_METRIC_MAX_KERNEL + 1];
51 struct tcp_fastopen_metrics tcpm_fastopen;
53 struct rcu_head rcu_head;
56 static inline struct net *tm_net(const struct tcp_metrics_block *tm)
58 /* Paired with the WRITE_ONCE() in tcpm_new() */
59 return READ_ONCE(tm->tcpm_net);
62 static bool tcp_metric_locked(struct tcp_metrics_block *tm,
63 enum tcp_metric_index idx)
65 /* Paired with WRITE_ONCE() in tcpm_suck_dst() */
66 return READ_ONCE(tm->tcpm_lock) & (1 << idx);
69 static u32 tcp_metric_get(const struct tcp_metrics_block *tm,
70 enum tcp_metric_index idx)
72 /* Paired with WRITE_ONCE() in tcp_metric_set() */
73 return READ_ONCE(tm->tcpm_vals[idx]);
76 static void tcp_metric_set(struct tcp_metrics_block *tm,
77 enum tcp_metric_index idx,
80 /* Paired with READ_ONCE() in tcp_metric_get() */
81 WRITE_ONCE(tm->tcpm_vals[idx], val);
84 static bool addr_same(const struct inetpeer_addr *a,
85 const struct inetpeer_addr *b)
87 return (a->family == b->family) && !inetpeer_addr_cmp(a, b);
90 struct tcpm_hash_bucket {
91 struct tcp_metrics_block __rcu *chain;
94 static struct tcpm_hash_bucket *tcp_metrics_hash __read_mostly;
95 static unsigned int tcp_metrics_hash_log __read_mostly;
97 static DEFINE_SPINLOCK(tcp_metrics_lock);
98 static DEFINE_SEQLOCK(fastopen_seqlock);
100 static void tcpm_suck_dst(struct tcp_metrics_block *tm,
101 const struct dst_entry *dst,
107 WRITE_ONCE(tm->tcpm_stamp, jiffies);
110 if (dst_metric_locked(dst, RTAX_RTT))
111 val |= 1 << TCP_METRIC_RTT;
112 if (dst_metric_locked(dst, RTAX_RTTVAR))
113 val |= 1 << TCP_METRIC_RTTVAR;
114 if (dst_metric_locked(dst, RTAX_SSTHRESH))
115 val |= 1 << TCP_METRIC_SSTHRESH;
116 if (dst_metric_locked(dst, RTAX_CWND))
117 val |= 1 << TCP_METRIC_CWND;
118 if (dst_metric_locked(dst, RTAX_REORDERING))
119 val |= 1 << TCP_METRIC_REORDERING;
120 /* Paired with READ_ONCE() in tcp_metric_locked() */
121 WRITE_ONCE(tm->tcpm_lock, val);
123 msval = dst_metric_raw(dst, RTAX_RTT);
124 tcp_metric_set(tm, TCP_METRIC_RTT, msval * USEC_PER_MSEC);
126 msval = dst_metric_raw(dst, RTAX_RTTVAR);
127 tcp_metric_set(tm, TCP_METRIC_RTTVAR, msval * USEC_PER_MSEC);
128 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
129 dst_metric_raw(dst, RTAX_SSTHRESH));
130 tcp_metric_set(tm, TCP_METRIC_CWND,
131 dst_metric_raw(dst, RTAX_CWND));
132 tcp_metric_set(tm, TCP_METRIC_REORDERING,
133 dst_metric_raw(dst, RTAX_REORDERING));
134 if (fastopen_clear) {
135 write_seqlock(&fastopen_seqlock);
136 tm->tcpm_fastopen.mss = 0;
137 tm->tcpm_fastopen.syn_loss = 0;
138 tm->tcpm_fastopen.try_exp = 0;
139 tm->tcpm_fastopen.cookie.exp = false;
140 tm->tcpm_fastopen.cookie.len = 0;
141 write_sequnlock(&fastopen_seqlock);
145 #define TCP_METRICS_TIMEOUT (60 * 60 * HZ)
147 static void tcpm_check_stamp(struct tcp_metrics_block *tm,
148 const struct dst_entry *dst)
154 limit = READ_ONCE(tm->tcpm_stamp) + TCP_METRICS_TIMEOUT;
155 if (unlikely(time_after(jiffies, limit)))
156 tcpm_suck_dst(tm, dst, false);
159 #define TCP_METRICS_RECLAIM_DEPTH 5
160 #define TCP_METRICS_RECLAIM_PTR (struct tcp_metrics_block *) 0x1UL
162 #define deref_locked(p) \
163 rcu_dereference_protected(p, lockdep_is_held(&tcp_metrics_lock))
165 static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst,
166 struct inetpeer_addr *saddr,
167 struct inetpeer_addr *daddr,
170 struct tcp_metrics_block *tm;
172 bool reclaim = false;
174 spin_lock_bh(&tcp_metrics_lock);
175 net = dev_net(dst->dev);
177 /* While waiting for the spin-lock the cache might have been populated
178 * with this entry and so we have to check again.
180 tm = __tcp_get_metrics(saddr, daddr, net, hash);
181 if (tm == TCP_METRICS_RECLAIM_PTR) {
186 tcpm_check_stamp(tm, dst);
190 if (unlikely(reclaim)) {
191 struct tcp_metrics_block *oldest;
193 oldest = deref_locked(tcp_metrics_hash[hash].chain);
194 for (tm = deref_locked(oldest->tcpm_next); tm;
195 tm = deref_locked(tm->tcpm_next)) {
196 if (time_before(READ_ONCE(tm->tcpm_stamp),
197 READ_ONCE(oldest->tcpm_stamp)))
202 tm = kzalloc(sizeof(*tm), GFP_ATOMIC);
206 /* Paired with the READ_ONCE() in tm_net() */
207 WRITE_ONCE(tm->tcpm_net, net);
209 tm->tcpm_saddr = *saddr;
210 tm->tcpm_daddr = *daddr;
212 tcpm_suck_dst(tm, dst, reclaim);
214 if (likely(!reclaim)) {
215 tm->tcpm_next = tcp_metrics_hash[hash].chain;
216 rcu_assign_pointer(tcp_metrics_hash[hash].chain, tm);
220 spin_unlock_bh(&tcp_metrics_lock);
224 static struct tcp_metrics_block *tcp_get_encode(struct tcp_metrics_block *tm, int depth)
228 if (depth > TCP_METRICS_RECLAIM_DEPTH)
229 return TCP_METRICS_RECLAIM_PTR;
233 static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *saddr,
234 const struct inetpeer_addr *daddr,
235 struct net *net, unsigned int hash)
237 struct tcp_metrics_block *tm;
240 for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
241 tm = rcu_dereference(tm->tcpm_next)) {
242 if (addr_same(&tm->tcpm_saddr, saddr) &&
243 addr_same(&tm->tcpm_daddr, daddr) &&
244 net_eq(tm_net(tm), net))
248 return tcp_get_encode(tm, depth);
251 static struct tcp_metrics_block *__tcp_get_metrics_req(struct request_sock *req,
252 struct dst_entry *dst)
254 struct tcp_metrics_block *tm;
255 struct inetpeer_addr saddr, daddr;
259 saddr.family = req->rsk_ops->family;
260 daddr.family = req->rsk_ops->family;
261 switch (daddr.family) {
263 inetpeer_set_addr_v4(&saddr, inet_rsk(req)->ir_loc_addr);
264 inetpeer_set_addr_v4(&daddr, inet_rsk(req)->ir_rmt_addr);
265 hash = ipv4_addr_hash(inet_rsk(req)->ir_rmt_addr);
267 #if IS_ENABLED(CONFIG_IPV6)
269 inetpeer_set_addr_v6(&saddr, &inet_rsk(req)->ir_v6_loc_addr);
270 inetpeer_set_addr_v6(&daddr, &inet_rsk(req)->ir_v6_rmt_addr);
271 hash = ipv6_addr_hash(&inet_rsk(req)->ir_v6_rmt_addr);
278 net = dev_net(dst->dev);
279 hash ^= net_hash_mix(net);
280 hash = hash_32(hash, tcp_metrics_hash_log);
282 for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
283 tm = rcu_dereference(tm->tcpm_next)) {
284 if (addr_same(&tm->tcpm_saddr, &saddr) &&
285 addr_same(&tm->tcpm_daddr, &daddr) &&
286 net_eq(tm_net(tm), net))
289 tcpm_check_stamp(tm, dst);
293 static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
294 struct dst_entry *dst,
297 struct tcp_metrics_block *tm;
298 struct inetpeer_addr saddr, daddr;
302 if (sk->sk_family == AF_INET) {
303 inetpeer_set_addr_v4(&saddr, inet_sk(sk)->inet_saddr);
304 inetpeer_set_addr_v4(&daddr, inet_sk(sk)->inet_daddr);
305 hash = ipv4_addr_hash(inet_sk(sk)->inet_daddr);
307 #if IS_ENABLED(CONFIG_IPV6)
308 else if (sk->sk_family == AF_INET6) {
309 if (ipv6_addr_v4mapped(&sk->sk_v6_daddr)) {
310 inetpeer_set_addr_v4(&saddr, inet_sk(sk)->inet_saddr);
311 inetpeer_set_addr_v4(&daddr, inet_sk(sk)->inet_daddr);
312 hash = ipv4_addr_hash(inet_sk(sk)->inet_daddr);
314 inetpeer_set_addr_v6(&saddr, &sk->sk_v6_rcv_saddr);
315 inetpeer_set_addr_v6(&daddr, &sk->sk_v6_daddr);
316 hash = ipv6_addr_hash(&sk->sk_v6_daddr);
323 net = dev_net(dst->dev);
324 hash ^= net_hash_mix(net);
325 hash = hash_32(hash, tcp_metrics_hash_log);
327 tm = __tcp_get_metrics(&saddr, &daddr, net, hash);
328 if (tm == TCP_METRICS_RECLAIM_PTR)
331 tm = tcpm_new(dst, &saddr, &daddr, hash);
333 tcpm_check_stamp(tm, dst);
338 /* Save metrics learned by this TCP session. This function is called
339 * only, when TCP finishes successfully i.e. when it enters TIME-WAIT
340 * or goes from LAST-ACK to CLOSE.
342 void tcp_update_metrics(struct sock *sk)
344 const struct inet_connection_sock *icsk = inet_csk(sk);
345 struct dst_entry *dst = __sk_dst_get(sk);
346 struct tcp_sock *tp = tcp_sk(sk);
347 struct net *net = sock_net(sk);
348 struct tcp_metrics_block *tm;
354 if (sysctl_tcp_nometrics_save || !dst)
358 if (icsk->icsk_backoff || !tp->srtt_us) {
359 /* This session failed to estimate rtt. Why?
360 * Probably, no packets returned in time. Reset our
363 tm = tcp_get_metrics(sk, dst, false);
364 if (tm && !tcp_metric_locked(tm, TCP_METRIC_RTT))
365 tcp_metric_set(tm, TCP_METRIC_RTT, 0);
368 tm = tcp_get_metrics(sk, dst, true);
373 rtt = tcp_metric_get(tm, TCP_METRIC_RTT);
374 m = rtt - tp->srtt_us;
376 /* If newly calculated rtt larger than stored one, store new
377 * one. Otherwise, use EWMA. Remember, rtt overestimation is
378 * always better than underestimation.
380 if (!tcp_metric_locked(tm, TCP_METRIC_RTT)) {
385 tcp_metric_set(tm, TCP_METRIC_RTT, rtt);
388 if (!tcp_metric_locked(tm, TCP_METRIC_RTTVAR)) {
394 /* Scale deviation to rttvar fixed point */
399 var = tcp_metric_get(tm, TCP_METRIC_RTTVAR);
403 var -= (var - m) >> 2;
405 tcp_metric_set(tm, TCP_METRIC_RTTVAR, var);
408 if (tcp_in_initial_slowstart(tp)) {
409 /* Slow start still did not finish. */
410 if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
411 val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
412 if (val && (tp->snd_cwnd >> 1) > val)
413 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
416 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
417 val = tcp_metric_get(tm, TCP_METRIC_CWND);
418 if (tp->snd_cwnd > val)
419 tcp_metric_set(tm, TCP_METRIC_CWND,
422 } else if (!tcp_in_slow_start(tp) &&
423 icsk->icsk_ca_state == TCP_CA_Open) {
424 /* Cong. avoidance phase, cwnd is reliable. */
425 if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH))
426 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
427 max(tp->snd_cwnd >> 1, tp->snd_ssthresh));
428 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
429 val = tcp_metric_get(tm, TCP_METRIC_CWND);
430 tcp_metric_set(tm, TCP_METRIC_CWND, (val + tp->snd_cwnd) >> 1);
433 /* Else slow start did not finish, cwnd is non-sense,
434 * ssthresh may be also invalid.
436 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
437 val = tcp_metric_get(tm, TCP_METRIC_CWND);
438 tcp_metric_set(tm, TCP_METRIC_CWND,
439 (val + tp->snd_ssthresh) >> 1);
441 if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
442 val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
443 if (val && tp->snd_ssthresh > val)
444 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
447 if (!tcp_metric_locked(tm, TCP_METRIC_REORDERING)) {
448 val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
449 if (val < tp->reordering &&
450 tp->reordering != net->ipv4.sysctl_tcp_reordering)
451 tcp_metric_set(tm, TCP_METRIC_REORDERING,
455 WRITE_ONCE(tm->tcpm_stamp, jiffies);
460 /* Initialize metrics on socket. */
462 void tcp_init_metrics(struct sock *sk)
464 struct dst_entry *dst = __sk_dst_get(sk);
465 struct tcp_sock *tp = tcp_sk(sk);
466 struct tcp_metrics_block *tm;
467 u32 val, crtt = 0; /* cached RTT scaled by 8 */
470 /* ssthresh may have been reduced unnecessarily during.
471 * 3WHS. Restore it back to its initial default.
473 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
478 tm = tcp_get_metrics(sk, dst, false);
484 if (tcp_metric_locked(tm, TCP_METRIC_CWND))
485 tp->snd_cwnd_clamp = tcp_metric_get(tm, TCP_METRIC_CWND);
487 val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
489 tp->snd_ssthresh = val;
490 if (tp->snd_ssthresh > tp->snd_cwnd_clamp)
491 tp->snd_ssthresh = tp->snd_cwnd_clamp;
493 val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
494 if (val && tp->reordering != val) {
495 tcp_disable_fack(tp);
496 tp->reordering = val;
499 crtt = tcp_metric_get(tm, TCP_METRIC_RTT);
502 /* The initial RTT measurement from the SYN/SYN-ACK is not ideal
503 * to seed the RTO for later data packets because SYN packets are
504 * small. Use the per-dst cached values to seed the RTO but keep
505 * the RTT estimator variables intact (e.g., srtt, mdev, rttvar).
506 * Later the RTO will be updated immediately upon obtaining the first
507 * data RTT sample (tcp_rtt_estimator()). Hence the cached RTT only
508 * influences the first RTO but not later RTT estimation.
510 * But if RTT is not available from the SYN (due to retransmits or
511 * syn cookies) or the cache, force a conservative 3secs timeout.
513 * A bit of theory. RTT is time passed after "normal" sized packet
514 * is sent until it is ACKed. In normal circumstances sending small
515 * packets force peer to delay ACKs and calculation is correct too.
516 * The algorithm is adaptive and, provided we follow specs, it
517 * NEVER underestimate RTT. BUT! If peer tries to make some clever
518 * tricks sort of "quick acks" for time long enough to decrease RTT
519 * to low value, and then abruptly stops to do it and starts to delay
520 * ACKs, wait for troubles.
522 if (crtt > tp->srtt_us) {
523 /* Set RTO like tcp_rtt_estimator(), but from cached RTT. */
524 crtt /= 8 * USEC_PER_SEC / HZ;
525 inet_csk(sk)->icsk_rto = crtt + max(2 * crtt, tcp_rto_min(sk));
526 } else if (tp->srtt_us == 0) {
527 /* RFC6298: 5.7 We've failed to get a valid RTT sample from
528 * 3WHS. This is most likely due to retransmission,
529 * including spurious one. Reset the RTO back to 3secs
530 * from the more aggressive 1sec to avoid more spurious
533 tp->rttvar_us = jiffies_to_usecs(TCP_TIMEOUT_FALLBACK);
534 tp->mdev_us = tp->mdev_max_us = tp->rttvar_us;
536 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK;
538 /* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been
539 * retransmitted. In light of RFC6298 more aggressive 1sec
540 * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK
541 * retransmission has occurred.
543 if (tp->total_retrans > 1)
546 tp->snd_cwnd = tcp_init_cwnd(tp, dst);
547 tp->snd_cwnd_stamp = tcp_jiffies32;
550 bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst)
552 struct tcp_metrics_block *tm;
559 tm = __tcp_get_metrics_req(req, dst);
560 if (tm && tcp_metric_get(tm, TCP_METRIC_RTT))
569 void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
570 struct tcp_fastopen_cookie *cookie,
571 int *syn_loss, unsigned long *last_syn_loss)
573 struct tcp_metrics_block *tm;
576 tm = tcp_get_metrics(sk, __sk_dst_get(sk), false);
578 struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
582 seq = read_seqbegin(&fastopen_seqlock);
585 *cookie = tfom->cookie;
586 if (cookie->len <= 0 && tfom->try_exp == 1)
588 *syn_loss = tfom->syn_loss;
589 *last_syn_loss = *syn_loss ? tfom->last_syn_loss : 0;
590 } while (read_seqretry(&fastopen_seqlock, seq));
595 void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
596 struct tcp_fastopen_cookie *cookie, bool syn_lost,
599 struct dst_entry *dst = __sk_dst_get(sk);
600 struct tcp_metrics_block *tm;
605 tm = tcp_get_metrics(sk, dst, true);
607 struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
609 write_seqlock_bh(&fastopen_seqlock);
612 if (cookie && cookie->len > 0)
613 tfom->cookie = *cookie;
614 else if (try_exp > tfom->try_exp &&
615 tfom->cookie.len <= 0 && !tfom->cookie.exp)
616 tfom->try_exp = try_exp;
619 tfom->last_syn_loss = jiffies;
622 write_sequnlock_bh(&fastopen_seqlock);
627 static struct genl_family tcp_metrics_nl_family;
629 static const struct nla_policy tcp_metrics_nl_policy[TCP_METRICS_ATTR_MAX + 1] = {
630 [TCP_METRICS_ATTR_ADDR_IPV4] = { .type = NLA_U32, },
631 [TCP_METRICS_ATTR_ADDR_IPV6] = { .type = NLA_BINARY,
632 .len = sizeof(struct in6_addr), },
633 /* Following attributes are not received for GET/DEL,
634 * we keep them for reference
637 [TCP_METRICS_ATTR_AGE] = { .type = NLA_MSECS, },
638 [TCP_METRICS_ATTR_TW_TSVAL] = { .type = NLA_U32, },
639 [TCP_METRICS_ATTR_TW_TS_STAMP] = { .type = NLA_S32, },
640 [TCP_METRICS_ATTR_VALS] = { .type = NLA_NESTED, },
641 [TCP_METRICS_ATTR_FOPEN_MSS] = { .type = NLA_U16, },
642 [TCP_METRICS_ATTR_FOPEN_SYN_DROPS] = { .type = NLA_U16, },
643 [TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS] = { .type = NLA_MSECS, },
644 [TCP_METRICS_ATTR_FOPEN_COOKIE] = { .type = NLA_BINARY,
645 .len = TCP_FASTOPEN_COOKIE_MAX, },
649 /* Add attributes, caller cancels its header on failure */
650 static int tcp_metrics_fill_info(struct sk_buff *msg,
651 struct tcp_metrics_block *tm)
656 switch (tm->tcpm_daddr.family) {
658 if (nla_put_in_addr(msg, TCP_METRICS_ATTR_ADDR_IPV4,
659 inetpeer_get_addr_v4(&tm->tcpm_daddr)) < 0)
660 goto nla_put_failure;
661 if (nla_put_in_addr(msg, TCP_METRICS_ATTR_SADDR_IPV4,
662 inetpeer_get_addr_v4(&tm->tcpm_saddr)) < 0)
663 goto nla_put_failure;
666 if (nla_put_in6_addr(msg, TCP_METRICS_ATTR_ADDR_IPV6,
667 inetpeer_get_addr_v6(&tm->tcpm_daddr)) < 0)
668 goto nla_put_failure;
669 if (nla_put_in6_addr(msg, TCP_METRICS_ATTR_SADDR_IPV6,
670 inetpeer_get_addr_v6(&tm->tcpm_saddr)) < 0)
671 goto nla_put_failure;
674 return -EAFNOSUPPORT;
677 if (nla_put_msecs(msg, TCP_METRICS_ATTR_AGE,
678 jiffies - READ_ONCE(tm->tcpm_stamp),
679 TCP_METRICS_ATTR_PAD) < 0)
680 goto nla_put_failure;
685 nest = nla_nest_start(msg, TCP_METRICS_ATTR_VALS);
687 goto nla_put_failure;
688 for (i = 0; i < TCP_METRIC_MAX_KERNEL + 1; i++) {
689 u32 val = tcp_metric_get(tm, i);
693 if (i == TCP_METRIC_RTT) {
694 if (nla_put_u32(msg, TCP_METRIC_RTT_US + 1,
696 goto nla_put_failure;
698 val = max(val / 1000, 1U);
700 if (i == TCP_METRIC_RTTVAR) {
701 if (nla_put_u32(msg, TCP_METRIC_RTTVAR_US + 1,
703 goto nla_put_failure;
705 val = max(val / 1000, 1U);
707 if (nla_put_u32(msg, i + 1, val) < 0)
708 goto nla_put_failure;
712 nla_nest_end(msg, nest);
714 nla_nest_cancel(msg, nest);
718 struct tcp_fastopen_metrics tfom_copy[1], *tfom;
722 seq = read_seqbegin(&fastopen_seqlock);
723 tfom_copy[0] = tm->tcpm_fastopen;
724 } while (read_seqretry(&fastopen_seqlock, seq));
728 nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_MSS,
730 goto nla_put_failure;
731 if (tfom->syn_loss &&
732 (nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROPS,
733 tfom->syn_loss) < 0 ||
734 nla_put_msecs(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS,
735 jiffies - tfom->last_syn_loss,
736 TCP_METRICS_ATTR_PAD) < 0))
737 goto nla_put_failure;
738 if (tfom->cookie.len > 0 &&
739 nla_put(msg, TCP_METRICS_ATTR_FOPEN_COOKIE,
740 tfom->cookie.len, tfom->cookie.val) < 0)
741 goto nla_put_failure;
750 static int tcp_metrics_dump_info(struct sk_buff *skb,
751 struct netlink_callback *cb,
752 struct tcp_metrics_block *tm)
756 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
757 &tcp_metrics_nl_family, NLM_F_MULTI,
758 TCP_METRICS_CMD_GET);
762 if (tcp_metrics_fill_info(skb, tm) < 0)
763 goto nla_put_failure;
765 genlmsg_end(skb, hdr);
769 genlmsg_cancel(skb, hdr);
773 static int tcp_metrics_nl_dump(struct sk_buff *skb,
774 struct netlink_callback *cb)
776 struct net *net = sock_net(skb->sk);
777 unsigned int max_rows = 1U << tcp_metrics_hash_log;
778 unsigned int row, s_row = cb->args[0];
779 int s_col = cb->args[1], col = s_col;
781 for (row = s_row; row < max_rows; row++, s_col = 0) {
782 struct tcp_metrics_block *tm;
783 struct tcpm_hash_bucket *hb = tcp_metrics_hash + row;
786 for (col = 0, tm = rcu_dereference(hb->chain); tm;
787 tm = rcu_dereference(tm->tcpm_next), col++) {
788 if (!net_eq(tm_net(tm), net))
792 if (tcp_metrics_dump_info(skb, cb, tm) < 0) {
806 static int __parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
807 unsigned int *hash, int optional, int v4, int v6)
813 inetpeer_set_addr_v4(addr, nla_get_in_addr(a));
815 *hash = ipv4_addr_hash(inetpeer_get_addr_v4(addr));
822 if (nla_len(a) != sizeof(struct in6_addr))
824 in6 = nla_get_in6_addr(a);
825 inetpeer_set_addr_v6(addr, &in6);
827 *hash = ipv6_addr_hash(inetpeer_get_addr_v6(addr));
830 return optional ? 1 : -EAFNOSUPPORT;
833 static int parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
834 unsigned int *hash, int optional)
836 return __parse_nl_addr(info, addr, hash, optional,
837 TCP_METRICS_ATTR_ADDR_IPV4,
838 TCP_METRICS_ATTR_ADDR_IPV6);
841 static int parse_nl_saddr(struct genl_info *info, struct inetpeer_addr *addr)
843 return __parse_nl_addr(info, addr, NULL, 0,
844 TCP_METRICS_ATTR_SADDR_IPV4,
845 TCP_METRICS_ATTR_SADDR_IPV6);
848 static int tcp_metrics_nl_cmd_get(struct sk_buff *skb, struct genl_info *info)
850 struct tcp_metrics_block *tm;
851 struct inetpeer_addr saddr, daddr;
854 struct net *net = genl_info_net(info);
859 ret = parse_nl_addr(info, &daddr, &hash, 0);
863 ret = parse_nl_saddr(info, &saddr);
867 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
871 reply = genlmsg_put_reply(msg, info, &tcp_metrics_nl_family, 0,
874 goto nla_put_failure;
876 hash ^= net_hash_mix(net);
877 hash = hash_32(hash, tcp_metrics_hash_log);
880 for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
881 tm = rcu_dereference(tm->tcpm_next)) {
882 if (addr_same(&tm->tcpm_daddr, &daddr) &&
883 (!src || addr_same(&tm->tcpm_saddr, &saddr)) &&
884 net_eq(tm_net(tm), net)) {
885 ret = tcp_metrics_fill_info(msg, tm);
893 genlmsg_end(msg, reply);
894 return genlmsg_reply(msg, info);
904 static void tcp_metrics_flush_all(struct net *net)
906 unsigned int max_rows = 1U << tcp_metrics_hash_log;
907 struct tcpm_hash_bucket *hb = tcp_metrics_hash;
908 struct tcp_metrics_block *tm;
911 for (row = 0; row < max_rows; row++, hb++) {
912 struct tcp_metrics_block __rcu **pp;
913 spin_lock_bh(&tcp_metrics_lock);
915 for (tm = deref_locked(*pp); tm; tm = deref_locked(*pp)) {
916 if (net_eq(tm_net(tm), net)) {
918 kfree_rcu(tm, rcu_head);
923 spin_unlock_bh(&tcp_metrics_lock);
927 static int tcp_metrics_nl_cmd_del(struct sk_buff *skb, struct genl_info *info)
929 struct tcpm_hash_bucket *hb;
930 struct tcp_metrics_block *tm;
931 struct tcp_metrics_block __rcu **pp;
932 struct inetpeer_addr saddr, daddr;
934 struct net *net = genl_info_net(info);
936 bool src = true, found = false;
938 ret = parse_nl_addr(info, &daddr, &hash, 1);
942 tcp_metrics_flush_all(net);
945 ret = parse_nl_saddr(info, &saddr);
949 hash ^= net_hash_mix(net);
950 hash = hash_32(hash, tcp_metrics_hash_log);
951 hb = tcp_metrics_hash + hash;
953 spin_lock_bh(&tcp_metrics_lock);
954 for (tm = deref_locked(*pp); tm; tm = deref_locked(*pp)) {
955 if (addr_same(&tm->tcpm_daddr, &daddr) &&
956 (!src || addr_same(&tm->tcpm_saddr, &saddr)) &&
957 net_eq(tm_net(tm), net)) {
959 kfree_rcu(tm, rcu_head);
965 spin_unlock_bh(&tcp_metrics_lock);
971 static const struct genl_ops tcp_metrics_nl_ops[] = {
973 .cmd = TCP_METRICS_CMD_GET,
974 .doit = tcp_metrics_nl_cmd_get,
975 .dumpit = tcp_metrics_nl_dump,
976 .policy = tcp_metrics_nl_policy,
979 .cmd = TCP_METRICS_CMD_DEL,
980 .doit = tcp_metrics_nl_cmd_del,
981 .policy = tcp_metrics_nl_policy,
982 .flags = GENL_ADMIN_PERM,
986 static struct genl_family tcp_metrics_nl_family __ro_after_init = {
988 .name = TCP_METRICS_GENL_NAME,
989 .version = TCP_METRICS_GENL_VERSION,
990 .maxattr = TCP_METRICS_ATTR_MAX,
992 .module = THIS_MODULE,
993 .ops = tcp_metrics_nl_ops,
994 .n_ops = ARRAY_SIZE(tcp_metrics_nl_ops),
997 static unsigned int tcpmhash_entries;
998 static int __init set_tcpmhash_entries(char *str)
1005 ret = kstrtouint(str, 0, &tcpmhash_entries);
1011 __setup("tcpmhash_entries=", set_tcpmhash_entries);
1013 static int __net_init tcp_net_metrics_init(struct net *net)
1018 if (!net_eq(net, &init_net))
1021 slots = tcpmhash_entries;
1023 if (totalram_pages >= 128 * 1024)
1029 tcp_metrics_hash_log = order_base_2(slots);
1030 size = sizeof(struct tcpm_hash_bucket) << tcp_metrics_hash_log;
1032 tcp_metrics_hash = kvzalloc(size, GFP_KERNEL);
1033 if (!tcp_metrics_hash)
1039 static void __net_exit tcp_net_metrics_exit(struct net *net)
1041 tcp_metrics_flush_all(net);
1044 static __net_initdata struct pernet_operations tcp_net_metrics_ops = {
1045 .init = tcp_net_metrics_init,
1046 .exit = tcp_net_metrics_exit,
1049 void __init tcp_metrics_init(void)
1053 ret = register_pernet_subsys(&tcp_net_metrics_ops);
1055 panic("Could not allocate the tcp_metrics hash table\n");
1057 ret = genl_register_family(&tcp_metrics_nl_family);
1059 panic("Could not register tcp_metrics generic netlink\n");