1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/rcupdate.h>
3 #include <linux/spinlock.h>
4 #include <linux/jiffies.h>
5 #include <linux/module.h>
6 #include <linux/cache.h>
7 #include <linux/slab.h>
8 #include <linux/init.h>
10 #include <linux/hash.h>
11 #include <linux/tcp_metrics.h>
12 #include <linux/vmalloc.h>
14 #include <net/inet_connection_sock.h>
15 #include <net/net_namespace.h>
16 #include <net/request_sock.h>
17 #include <net/inetpeer.h>
22 #include <net/genetlink.h>
24 static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *saddr,
25 const struct inetpeer_addr *daddr,
26 struct net *net, unsigned int hash);
28 struct tcp_fastopen_metrics {
30 u16 syn_loss:10, /* Recurring Fast Open SYN losses */
31 try_exp:2; /* Request w/ exp. option (once) */
32 unsigned long last_syn_loss; /* Last Fast Open SYN loss */
33 struct tcp_fastopen_cookie cookie;
36 /* TCP_METRIC_MAX includes 2 extra fields for userspace compatibility
37 * Kernel only stores RTT and RTTVAR in usec resolution
39 #define TCP_METRIC_MAX_KERNEL (TCP_METRIC_MAX - 2)
41 struct tcp_metrics_block {
42 struct tcp_metrics_block __rcu *tcpm_next;
44 struct inetpeer_addr tcpm_saddr;
45 struct inetpeer_addr tcpm_daddr;
46 unsigned long tcpm_stamp;
48 u32 tcpm_vals[TCP_METRIC_MAX_KERNEL + 1];
49 struct tcp_fastopen_metrics tcpm_fastopen;
51 struct rcu_head rcu_head;
54 static inline struct net *tm_net(const struct tcp_metrics_block *tm)
56 /* Paired with the WRITE_ONCE() in tcpm_new() */
57 return READ_ONCE(tm->tcpm_net);
60 static bool tcp_metric_locked(struct tcp_metrics_block *tm,
61 enum tcp_metric_index idx)
63 /* Paired with WRITE_ONCE() in tcpm_suck_dst() */
64 return READ_ONCE(tm->tcpm_lock) & (1 << idx);
67 static u32 tcp_metric_get(const struct tcp_metrics_block *tm,
68 enum tcp_metric_index idx)
70 /* Paired with WRITE_ONCE() in tcp_metric_set() */
71 return READ_ONCE(tm->tcpm_vals[idx]);
74 static void tcp_metric_set(struct tcp_metrics_block *tm,
75 enum tcp_metric_index idx,
78 /* Paired with READ_ONCE() in tcp_metric_get() */
79 WRITE_ONCE(tm->tcpm_vals[idx], val);
82 static bool addr_same(const struct inetpeer_addr *a,
83 const struct inetpeer_addr *b)
85 return (a->family == b->family) && !inetpeer_addr_cmp(a, b);
88 struct tcpm_hash_bucket {
89 struct tcp_metrics_block __rcu *chain;
92 static struct tcpm_hash_bucket *tcp_metrics_hash __read_mostly;
93 static unsigned int tcp_metrics_hash_log __read_mostly;
95 static DEFINE_SPINLOCK(tcp_metrics_lock);
96 static DEFINE_SEQLOCK(fastopen_seqlock);
98 static void tcpm_suck_dst(struct tcp_metrics_block *tm,
99 const struct dst_entry *dst,
105 WRITE_ONCE(tm->tcpm_stamp, jiffies);
108 if (dst_metric_locked(dst, RTAX_RTT))
109 val |= 1 << TCP_METRIC_RTT;
110 if (dst_metric_locked(dst, RTAX_RTTVAR))
111 val |= 1 << TCP_METRIC_RTTVAR;
112 if (dst_metric_locked(dst, RTAX_SSTHRESH))
113 val |= 1 << TCP_METRIC_SSTHRESH;
114 if (dst_metric_locked(dst, RTAX_CWND))
115 val |= 1 << TCP_METRIC_CWND;
116 if (dst_metric_locked(dst, RTAX_REORDERING))
117 val |= 1 << TCP_METRIC_REORDERING;
118 /* Paired with READ_ONCE() in tcp_metric_locked() */
119 WRITE_ONCE(tm->tcpm_lock, val);
121 msval = dst_metric_raw(dst, RTAX_RTT);
122 tcp_metric_set(tm, TCP_METRIC_RTT, msval * USEC_PER_MSEC);
124 msval = dst_metric_raw(dst, RTAX_RTTVAR);
125 tcp_metric_set(tm, TCP_METRIC_RTTVAR, msval * USEC_PER_MSEC);
126 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
127 dst_metric_raw(dst, RTAX_SSTHRESH));
128 tcp_metric_set(tm, TCP_METRIC_CWND,
129 dst_metric_raw(dst, RTAX_CWND));
130 tcp_metric_set(tm, TCP_METRIC_REORDERING,
131 dst_metric_raw(dst, RTAX_REORDERING));
132 if (fastopen_clear) {
133 write_seqlock(&fastopen_seqlock);
134 tm->tcpm_fastopen.mss = 0;
135 tm->tcpm_fastopen.syn_loss = 0;
136 tm->tcpm_fastopen.try_exp = 0;
137 tm->tcpm_fastopen.cookie.exp = false;
138 tm->tcpm_fastopen.cookie.len = 0;
139 write_sequnlock(&fastopen_seqlock);
143 #define TCP_METRICS_TIMEOUT (60 * 60 * HZ)
145 static void tcpm_check_stamp(struct tcp_metrics_block *tm,
146 const struct dst_entry *dst)
152 limit = READ_ONCE(tm->tcpm_stamp) + TCP_METRICS_TIMEOUT;
153 if (unlikely(time_after(jiffies, limit)))
154 tcpm_suck_dst(tm, dst, false);
157 #define TCP_METRICS_RECLAIM_DEPTH 5
158 #define TCP_METRICS_RECLAIM_PTR (struct tcp_metrics_block *) 0x1UL
160 #define deref_locked(p) \
161 rcu_dereference_protected(p, lockdep_is_held(&tcp_metrics_lock))
163 static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst,
164 struct inetpeer_addr *saddr,
165 struct inetpeer_addr *daddr,
168 struct tcp_metrics_block *tm;
170 bool reclaim = false;
172 spin_lock_bh(&tcp_metrics_lock);
173 net = dev_net(dst->dev);
175 /* While waiting for the spin-lock the cache might have been populated
176 * with this entry and so we have to check again.
178 tm = __tcp_get_metrics(saddr, daddr, net, hash);
179 if (tm == TCP_METRICS_RECLAIM_PTR) {
184 tcpm_check_stamp(tm, dst);
188 if (unlikely(reclaim)) {
189 struct tcp_metrics_block *oldest;
191 oldest = deref_locked(tcp_metrics_hash[hash].chain);
192 for (tm = deref_locked(oldest->tcpm_next); tm;
193 tm = deref_locked(tm->tcpm_next)) {
194 if (time_before(READ_ONCE(tm->tcpm_stamp),
195 READ_ONCE(oldest->tcpm_stamp)))
200 tm = kzalloc(sizeof(*tm), GFP_ATOMIC);
204 /* Paired with the READ_ONCE() in tm_net() */
205 WRITE_ONCE(tm->tcpm_net, net);
207 tm->tcpm_saddr = *saddr;
208 tm->tcpm_daddr = *daddr;
210 tcpm_suck_dst(tm, dst, reclaim);
212 if (likely(!reclaim)) {
213 tm->tcpm_next = tcp_metrics_hash[hash].chain;
214 rcu_assign_pointer(tcp_metrics_hash[hash].chain, tm);
218 spin_unlock_bh(&tcp_metrics_lock);
222 static struct tcp_metrics_block *tcp_get_encode(struct tcp_metrics_block *tm, int depth)
226 if (depth > TCP_METRICS_RECLAIM_DEPTH)
227 return TCP_METRICS_RECLAIM_PTR;
231 static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *saddr,
232 const struct inetpeer_addr *daddr,
233 struct net *net, unsigned int hash)
235 struct tcp_metrics_block *tm;
238 for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
239 tm = rcu_dereference(tm->tcpm_next)) {
240 if (addr_same(&tm->tcpm_saddr, saddr) &&
241 addr_same(&tm->tcpm_daddr, daddr) &&
242 net_eq(tm_net(tm), net))
246 return tcp_get_encode(tm, depth);
249 static struct tcp_metrics_block *__tcp_get_metrics_req(struct request_sock *req,
250 struct dst_entry *dst)
252 struct tcp_metrics_block *tm;
253 struct inetpeer_addr saddr, daddr;
257 saddr.family = req->rsk_ops->family;
258 daddr.family = req->rsk_ops->family;
259 switch (daddr.family) {
261 inetpeer_set_addr_v4(&saddr, inet_rsk(req)->ir_loc_addr);
262 inetpeer_set_addr_v4(&daddr, inet_rsk(req)->ir_rmt_addr);
263 hash = ipv4_addr_hash(inet_rsk(req)->ir_rmt_addr);
265 #if IS_ENABLED(CONFIG_IPV6)
267 inetpeer_set_addr_v6(&saddr, &inet_rsk(req)->ir_v6_loc_addr);
268 inetpeer_set_addr_v6(&daddr, &inet_rsk(req)->ir_v6_rmt_addr);
269 hash = ipv6_addr_hash(&inet_rsk(req)->ir_v6_rmt_addr);
276 net = dev_net(dst->dev);
277 hash ^= net_hash_mix(net);
278 hash = hash_32(hash, tcp_metrics_hash_log);
280 for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
281 tm = rcu_dereference(tm->tcpm_next)) {
282 if (addr_same(&tm->tcpm_saddr, &saddr) &&
283 addr_same(&tm->tcpm_daddr, &daddr) &&
284 net_eq(tm_net(tm), net))
287 tcpm_check_stamp(tm, dst);
291 static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
292 struct dst_entry *dst,
295 struct tcp_metrics_block *tm;
296 struct inetpeer_addr saddr, daddr;
300 if (sk->sk_family == AF_INET) {
301 inetpeer_set_addr_v4(&saddr, inet_sk(sk)->inet_saddr);
302 inetpeer_set_addr_v4(&daddr, inet_sk(sk)->inet_daddr);
303 hash = ipv4_addr_hash(inet_sk(sk)->inet_daddr);
305 #if IS_ENABLED(CONFIG_IPV6)
306 else if (sk->sk_family == AF_INET6) {
307 if (ipv6_addr_v4mapped(&sk->sk_v6_daddr)) {
308 inetpeer_set_addr_v4(&saddr, inet_sk(sk)->inet_saddr);
309 inetpeer_set_addr_v4(&daddr, inet_sk(sk)->inet_daddr);
310 hash = ipv4_addr_hash(inet_sk(sk)->inet_daddr);
312 inetpeer_set_addr_v6(&saddr, &sk->sk_v6_rcv_saddr);
313 inetpeer_set_addr_v6(&daddr, &sk->sk_v6_daddr);
314 hash = ipv6_addr_hash(&sk->sk_v6_daddr);
321 net = dev_net(dst->dev);
322 hash ^= net_hash_mix(net);
323 hash = hash_32(hash, tcp_metrics_hash_log);
325 tm = __tcp_get_metrics(&saddr, &daddr, net, hash);
326 if (tm == TCP_METRICS_RECLAIM_PTR)
329 tm = tcpm_new(dst, &saddr, &daddr, hash);
331 tcpm_check_stamp(tm, dst);
336 /* Save metrics learned by this TCP session. This function is called
337 * only, when TCP finishes successfully i.e. when it enters TIME-WAIT
338 * or goes from LAST-ACK to CLOSE.
340 void tcp_update_metrics(struct sock *sk)
342 const struct inet_connection_sock *icsk = inet_csk(sk);
343 struct dst_entry *dst = __sk_dst_get(sk);
344 struct tcp_sock *tp = tcp_sk(sk);
345 struct net *net = sock_net(sk);
346 struct tcp_metrics_block *tm;
352 if (READ_ONCE(net->ipv4.sysctl_tcp_nometrics_save) || !dst)
356 if (icsk->icsk_backoff || !tp->srtt_us) {
357 /* This session failed to estimate rtt. Why?
358 * Probably, no packets returned in time. Reset our
361 tm = tcp_get_metrics(sk, dst, false);
362 if (tm && !tcp_metric_locked(tm, TCP_METRIC_RTT))
363 tcp_metric_set(tm, TCP_METRIC_RTT, 0);
366 tm = tcp_get_metrics(sk, dst, true);
371 rtt = tcp_metric_get(tm, TCP_METRIC_RTT);
372 m = rtt - tp->srtt_us;
374 /* If newly calculated rtt larger than stored one, store new
375 * one. Otherwise, use EWMA. Remember, rtt overestimation is
376 * always better than underestimation.
378 if (!tcp_metric_locked(tm, TCP_METRIC_RTT)) {
383 tcp_metric_set(tm, TCP_METRIC_RTT, rtt);
386 if (!tcp_metric_locked(tm, TCP_METRIC_RTTVAR)) {
392 /* Scale deviation to rttvar fixed point */
397 var = tcp_metric_get(tm, TCP_METRIC_RTTVAR);
401 var -= (var - m) >> 2;
403 tcp_metric_set(tm, TCP_METRIC_RTTVAR, var);
406 if (tcp_in_initial_slowstart(tp)) {
407 /* Slow start still did not finish. */
408 if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
409 val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
410 if (val && (tp->snd_cwnd >> 1) > val)
411 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
414 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
415 val = tcp_metric_get(tm, TCP_METRIC_CWND);
416 if (tp->snd_cwnd > val)
417 tcp_metric_set(tm, TCP_METRIC_CWND,
420 } else if (!tcp_in_slow_start(tp) &&
421 icsk->icsk_ca_state == TCP_CA_Open) {
422 /* Cong. avoidance phase, cwnd is reliable. */
423 if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH))
424 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
425 max(tp->snd_cwnd >> 1, tp->snd_ssthresh));
426 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
427 val = tcp_metric_get(tm, TCP_METRIC_CWND);
428 tcp_metric_set(tm, TCP_METRIC_CWND, (val + tp->snd_cwnd) >> 1);
431 /* Else slow start did not finish, cwnd is non-sense,
432 * ssthresh may be also invalid.
434 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
435 val = tcp_metric_get(tm, TCP_METRIC_CWND);
436 tcp_metric_set(tm, TCP_METRIC_CWND,
437 (val + tp->snd_ssthresh) >> 1);
439 if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
440 val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
441 if (val && tp->snd_ssthresh > val)
442 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
445 if (!tcp_metric_locked(tm, TCP_METRIC_REORDERING)) {
446 val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
447 if (val < tp->reordering &&
449 READ_ONCE(net->ipv4.sysctl_tcp_reordering))
450 tcp_metric_set(tm, TCP_METRIC_REORDERING,
454 WRITE_ONCE(tm->tcpm_stamp, jiffies);
459 /* Initialize metrics on socket. */
461 void tcp_init_metrics(struct sock *sk)
463 struct dst_entry *dst = __sk_dst_get(sk);
464 struct tcp_sock *tp = tcp_sk(sk);
465 struct tcp_metrics_block *tm;
466 u32 val, crtt = 0; /* cached RTT scaled by 8 */
473 tm = tcp_get_metrics(sk, dst, true);
479 if (tcp_metric_locked(tm, TCP_METRIC_CWND))
480 tp->snd_cwnd_clamp = tcp_metric_get(tm, TCP_METRIC_CWND);
482 val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
484 tp->snd_ssthresh = val;
485 if (tp->snd_ssthresh > tp->snd_cwnd_clamp)
486 tp->snd_ssthresh = tp->snd_cwnd_clamp;
488 /* ssthresh may have been reduced unnecessarily during.
489 * 3WHS. Restore it back to its initial default.
491 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
493 val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
494 if (val && tp->reordering != val)
495 tp->reordering = val;
497 crtt = tcp_metric_get(tm, TCP_METRIC_RTT);
500 /* The initial RTT measurement from the SYN/SYN-ACK is not ideal
501 * to seed the RTO for later data packets because SYN packets are
502 * small. Use the per-dst cached values to seed the RTO but keep
503 * the RTT estimator variables intact (e.g., srtt, mdev, rttvar).
504 * Later the RTO will be updated immediately upon obtaining the first
505 * data RTT sample (tcp_rtt_estimator()). Hence the cached RTT only
506 * influences the first RTO but not later RTT estimation.
508 * But if RTT is not available from the SYN (due to retransmits or
509 * syn cookies) or the cache, force a conservative 3secs timeout.
511 * A bit of theory. RTT is time passed after "normal" sized packet
512 * is sent until it is ACKed. In normal circumstances sending small
513 * packets force peer to delay ACKs and calculation is correct too.
514 * The algorithm is adaptive and, provided we follow specs, it
515 * NEVER underestimate RTT. BUT! If peer tries to make some clever
516 * tricks sort of "quick acks" for time long enough to decrease RTT
517 * to low value, and then abruptly stops to do it and starts to delay
518 * ACKs, wait for troubles.
520 if (crtt > tp->srtt_us) {
521 /* Set RTO like tcp_rtt_estimator(), but from cached RTT. */
522 crtt /= 8 * USEC_PER_SEC / HZ;
523 inet_csk(sk)->icsk_rto = crtt + max(2 * crtt, tcp_rto_min(sk));
524 } else if (tp->srtt_us == 0) {
525 /* RFC6298: 5.7 We've failed to get a valid RTT sample from
526 * 3WHS. This is most likely due to retransmission,
527 * including spurious one. Reset the RTO back to 3secs
528 * from the more aggressive 1sec to avoid more spurious
531 tp->rttvar_us = jiffies_to_usecs(TCP_TIMEOUT_FALLBACK);
532 tp->mdev_us = tp->mdev_max_us = tp->rttvar_us;
534 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK;
538 bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst)
540 struct tcp_metrics_block *tm;
547 tm = __tcp_get_metrics_req(req, dst);
548 if (tm && tcp_metric_get(tm, TCP_METRIC_RTT))
557 void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
558 struct tcp_fastopen_cookie *cookie)
560 struct tcp_metrics_block *tm;
563 tm = tcp_get_metrics(sk, __sk_dst_get(sk), false);
565 struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
569 seq = read_seqbegin(&fastopen_seqlock);
572 *cookie = tfom->cookie;
573 if (cookie->len <= 0 && tfom->try_exp == 1)
575 } while (read_seqretry(&fastopen_seqlock, seq));
580 void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
581 struct tcp_fastopen_cookie *cookie, bool syn_lost,
584 struct dst_entry *dst = __sk_dst_get(sk);
585 struct tcp_metrics_block *tm;
590 tm = tcp_get_metrics(sk, dst, true);
592 struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
594 write_seqlock_bh(&fastopen_seqlock);
597 if (cookie && cookie->len > 0)
598 tfom->cookie = *cookie;
599 else if (try_exp > tfom->try_exp &&
600 tfom->cookie.len <= 0 && !tfom->cookie.exp)
601 tfom->try_exp = try_exp;
604 tfom->last_syn_loss = jiffies;
607 write_sequnlock_bh(&fastopen_seqlock);
612 static struct genl_family tcp_metrics_nl_family;
614 static const struct nla_policy tcp_metrics_nl_policy[TCP_METRICS_ATTR_MAX + 1] = {
615 [TCP_METRICS_ATTR_ADDR_IPV4] = { .type = NLA_U32, },
616 [TCP_METRICS_ATTR_ADDR_IPV6] = { .type = NLA_BINARY,
617 .len = sizeof(struct in6_addr), },
618 /* Following attributes are not received for GET/DEL,
619 * we keep them for reference
622 [TCP_METRICS_ATTR_AGE] = { .type = NLA_MSECS, },
623 [TCP_METRICS_ATTR_TW_TSVAL] = { .type = NLA_U32, },
624 [TCP_METRICS_ATTR_TW_TS_STAMP] = { .type = NLA_S32, },
625 [TCP_METRICS_ATTR_VALS] = { .type = NLA_NESTED, },
626 [TCP_METRICS_ATTR_FOPEN_MSS] = { .type = NLA_U16, },
627 [TCP_METRICS_ATTR_FOPEN_SYN_DROPS] = { .type = NLA_U16, },
628 [TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS] = { .type = NLA_MSECS, },
629 [TCP_METRICS_ATTR_FOPEN_COOKIE] = { .type = NLA_BINARY,
630 .len = TCP_FASTOPEN_COOKIE_MAX, },
634 /* Add attributes, caller cancels its header on failure */
635 static int tcp_metrics_fill_info(struct sk_buff *msg,
636 struct tcp_metrics_block *tm)
641 switch (tm->tcpm_daddr.family) {
643 if (nla_put_in_addr(msg, TCP_METRICS_ATTR_ADDR_IPV4,
644 inetpeer_get_addr_v4(&tm->tcpm_daddr)) < 0)
645 goto nla_put_failure;
646 if (nla_put_in_addr(msg, TCP_METRICS_ATTR_SADDR_IPV4,
647 inetpeer_get_addr_v4(&tm->tcpm_saddr)) < 0)
648 goto nla_put_failure;
651 if (nla_put_in6_addr(msg, TCP_METRICS_ATTR_ADDR_IPV6,
652 inetpeer_get_addr_v6(&tm->tcpm_daddr)) < 0)
653 goto nla_put_failure;
654 if (nla_put_in6_addr(msg, TCP_METRICS_ATTR_SADDR_IPV6,
655 inetpeer_get_addr_v6(&tm->tcpm_saddr)) < 0)
656 goto nla_put_failure;
659 return -EAFNOSUPPORT;
662 if (nla_put_msecs(msg, TCP_METRICS_ATTR_AGE,
663 jiffies - READ_ONCE(tm->tcpm_stamp),
664 TCP_METRICS_ATTR_PAD) < 0)
665 goto nla_put_failure;
670 nest = nla_nest_start_noflag(msg, TCP_METRICS_ATTR_VALS);
672 goto nla_put_failure;
673 for (i = 0; i < TCP_METRIC_MAX_KERNEL + 1; i++) {
674 u32 val = tcp_metric_get(tm, i);
678 if (i == TCP_METRIC_RTT) {
679 if (nla_put_u32(msg, TCP_METRIC_RTT_US + 1,
681 goto nla_put_failure;
683 val = max(val / 1000, 1U);
685 if (i == TCP_METRIC_RTTVAR) {
686 if (nla_put_u32(msg, TCP_METRIC_RTTVAR_US + 1,
688 goto nla_put_failure;
690 val = max(val / 1000, 1U);
692 if (nla_put_u32(msg, i + 1, val) < 0)
693 goto nla_put_failure;
697 nla_nest_end(msg, nest);
699 nla_nest_cancel(msg, nest);
703 struct tcp_fastopen_metrics tfom_copy[1], *tfom;
707 seq = read_seqbegin(&fastopen_seqlock);
708 tfom_copy[0] = tm->tcpm_fastopen;
709 } while (read_seqretry(&fastopen_seqlock, seq));
713 nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_MSS,
715 goto nla_put_failure;
716 if (tfom->syn_loss &&
717 (nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROPS,
718 tfom->syn_loss) < 0 ||
719 nla_put_msecs(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS,
720 jiffies - tfom->last_syn_loss,
721 TCP_METRICS_ATTR_PAD) < 0))
722 goto nla_put_failure;
723 if (tfom->cookie.len > 0 &&
724 nla_put(msg, TCP_METRICS_ATTR_FOPEN_COOKIE,
725 tfom->cookie.len, tfom->cookie.val) < 0)
726 goto nla_put_failure;
735 static int tcp_metrics_dump_info(struct sk_buff *skb,
736 struct netlink_callback *cb,
737 struct tcp_metrics_block *tm)
741 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
742 &tcp_metrics_nl_family, NLM_F_MULTI,
743 TCP_METRICS_CMD_GET);
747 if (tcp_metrics_fill_info(skb, tm) < 0)
748 goto nla_put_failure;
750 genlmsg_end(skb, hdr);
754 genlmsg_cancel(skb, hdr);
758 static int tcp_metrics_nl_dump(struct sk_buff *skb,
759 struct netlink_callback *cb)
761 struct net *net = sock_net(skb->sk);
762 unsigned int max_rows = 1U << tcp_metrics_hash_log;
763 unsigned int row, s_row = cb->args[0];
764 int s_col = cb->args[1], col = s_col;
766 for (row = s_row; row < max_rows; row++, s_col = 0) {
767 struct tcp_metrics_block *tm;
768 struct tcpm_hash_bucket *hb = tcp_metrics_hash + row;
771 for (col = 0, tm = rcu_dereference(hb->chain); tm;
772 tm = rcu_dereference(tm->tcpm_next), col++) {
773 if (!net_eq(tm_net(tm), net))
777 if (tcp_metrics_dump_info(skb, cb, tm) < 0) {
791 static int __parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
792 unsigned int *hash, int optional, int v4, int v6)
798 inetpeer_set_addr_v4(addr, nla_get_in_addr(a));
800 *hash = ipv4_addr_hash(inetpeer_get_addr_v4(addr));
807 if (nla_len(a) != sizeof(struct in6_addr))
809 in6 = nla_get_in6_addr(a);
810 inetpeer_set_addr_v6(addr, &in6);
812 *hash = ipv6_addr_hash(inetpeer_get_addr_v6(addr));
815 return optional ? 1 : -EAFNOSUPPORT;
818 static int parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
819 unsigned int *hash, int optional)
821 return __parse_nl_addr(info, addr, hash, optional,
822 TCP_METRICS_ATTR_ADDR_IPV4,
823 TCP_METRICS_ATTR_ADDR_IPV6);
826 static int parse_nl_saddr(struct genl_info *info, struct inetpeer_addr *addr)
828 return __parse_nl_addr(info, addr, NULL, 0,
829 TCP_METRICS_ATTR_SADDR_IPV4,
830 TCP_METRICS_ATTR_SADDR_IPV6);
833 static int tcp_metrics_nl_cmd_get(struct sk_buff *skb, struct genl_info *info)
835 struct tcp_metrics_block *tm;
836 struct inetpeer_addr saddr, daddr;
839 struct net *net = genl_info_net(info);
844 ret = parse_nl_addr(info, &daddr, &hash, 0);
848 ret = parse_nl_saddr(info, &saddr);
852 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
856 reply = genlmsg_put_reply(msg, info, &tcp_metrics_nl_family, 0,
859 goto nla_put_failure;
861 hash ^= net_hash_mix(net);
862 hash = hash_32(hash, tcp_metrics_hash_log);
865 for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
866 tm = rcu_dereference(tm->tcpm_next)) {
867 if (addr_same(&tm->tcpm_daddr, &daddr) &&
868 (!src || addr_same(&tm->tcpm_saddr, &saddr)) &&
869 net_eq(tm_net(tm), net)) {
870 ret = tcp_metrics_fill_info(msg, tm);
878 genlmsg_end(msg, reply);
879 return genlmsg_reply(msg, info);
889 static void tcp_metrics_flush_all(struct net *net)
891 unsigned int max_rows = 1U << tcp_metrics_hash_log;
892 struct tcpm_hash_bucket *hb = tcp_metrics_hash;
893 struct tcp_metrics_block *tm;
896 for (row = 0; row < max_rows; row++, hb++) {
897 struct tcp_metrics_block __rcu **pp;
900 spin_lock_bh(&tcp_metrics_lock);
902 for (tm = deref_locked(*pp); tm; tm = deref_locked(*pp)) {
903 match = net ? net_eq(tm_net(tm), net) :
904 !refcount_read(&tm_net(tm)->count);
907 kfree_rcu(tm, rcu_head);
912 spin_unlock_bh(&tcp_metrics_lock);
916 static int tcp_metrics_nl_cmd_del(struct sk_buff *skb, struct genl_info *info)
918 struct tcpm_hash_bucket *hb;
919 struct tcp_metrics_block *tm;
920 struct tcp_metrics_block __rcu **pp;
921 struct inetpeer_addr saddr, daddr;
923 struct net *net = genl_info_net(info);
925 bool src = true, found = false;
927 ret = parse_nl_addr(info, &daddr, &hash, 1);
931 tcp_metrics_flush_all(net);
934 ret = parse_nl_saddr(info, &saddr);
938 hash ^= net_hash_mix(net);
939 hash = hash_32(hash, tcp_metrics_hash_log);
940 hb = tcp_metrics_hash + hash;
942 spin_lock_bh(&tcp_metrics_lock);
943 for (tm = deref_locked(*pp); tm; tm = deref_locked(*pp)) {
944 if (addr_same(&tm->tcpm_daddr, &daddr) &&
945 (!src || addr_same(&tm->tcpm_saddr, &saddr)) &&
946 net_eq(tm_net(tm), net)) {
948 kfree_rcu(tm, rcu_head);
954 spin_unlock_bh(&tcp_metrics_lock);
960 static const struct genl_ops tcp_metrics_nl_ops[] = {
962 .cmd = TCP_METRICS_CMD_GET,
963 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
964 .doit = tcp_metrics_nl_cmd_get,
965 .dumpit = tcp_metrics_nl_dump,
968 .cmd = TCP_METRICS_CMD_DEL,
969 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
970 .doit = tcp_metrics_nl_cmd_del,
971 .flags = GENL_ADMIN_PERM,
975 static struct genl_family tcp_metrics_nl_family __ro_after_init = {
977 .name = TCP_METRICS_GENL_NAME,
978 .version = TCP_METRICS_GENL_VERSION,
979 .maxattr = TCP_METRICS_ATTR_MAX,
980 .policy = tcp_metrics_nl_policy,
982 .module = THIS_MODULE,
983 .ops = tcp_metrics_nl_ops,
984 .n_ops = ARRAY_SIZE(tcp_metrics_nl_ops),
987 static unsigned int tcpmhash_entries;
988 static int __init set_tcpmhash_entries(char *str)
995 ret = kstrtouint(str, 0, &tcpmhash_entries);
1001 __setup("tcpmhash_entries=", set_tcpmhash_entries);
1003 static int __net_init tcp_net_metrics_init(struct net *net)
1008 if (!net_eq(net, &init_net))
1011 slots = tcpmhash_entries;
1013 if (totalram_pages() >= 128 * 1024)
1019 tcp_metrics_hash_log = order_base_2(slots);
1020 size = sizeof(struct tcpm_hash_bucket) << tcp_metrics_hash_log;
1022 tcp_metrics_hash = kvzalloc(size, GFP_KERNEL);
1023 if (!tcp_metrics_hash)
1029 static void __net_exit tcp_net_metrics_exit_batch(struct list_head *net_exit_list)
1031 tcp_metrics_flush_all(NULL);
1034 static __net_initdata struct pernet_operations tcp_net_metrics_ops = {
1035 .init = tcp_net_metrics_init,
1036 .exit_batch = tcp_net_metrics_exit_batch,
1039 void __init tcp_metrics_init(void)
1043 ret = register_pernet_subsys(&tcp_net_metrics_ops);
1045 panic("Could not allocate the tcp_metrics hash table\n");
1047 ret = genl_register_family(&tcp_metrics_nl_family);
1049 panic("Could not register tcp_metrics generic netlink\n");