Linux 6.7-rc7
[linux-modified.git] / net / netfilter / ipvs / ip_vs_conn.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * IPVS         An implementation of the IP virtual server support for the
4  *              LINUX operating system.  IPVS is now implemented as a module
5  *              over the Netfilter framework. IPVS can be used to build a
6  *              high-performance and highly available server based on a
7  *              cluster of servers.
8  *
9  * Authors:     Wensong Zhang <wensong@linuxvirtualserver.org>
10  *              Peter Kese <peter.kese@ijs.si>
11  *              Julian Anastasov <ja@ssi.bg>
12  *
13  * The IPVS code for kernel 2.2 was done by Wensong Zhang and Peter Kese,
14  * with changes/fixes from Julian Anastasov, Lars Marowsky-Bree, Horms
15  * and others. Many code here is taken from IP MASQ code of kernel 2.2.
16  *
17  * Changes:
18  */
19
20 #define KMSG_COMPONENT "IPVS"
21 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
22
23 #include <linux/interrupt.h>
24 #include <linux/in.h>
25 #include <linux/inet.h>
26 #include <linux/net.h>
27 #include <linux/kernel.h>
28 #include <linux/module.h>
29 #include <linux/proc_fs.h>              /* for proc_net_* */
30 #include <linux/slab.h>
31 #include <linux/seq_file.h>
32 #include <linux/jhash.h>
33 #include <linux/random.h>
34
35 #include <net/net_namespace.h>
36 #include <net/ip_vs.h>
37
38
39 #ifndef CONFIG_IP_VS_TAB_BITS
40 #define CONFIG_IP_VS_TAB_BITS   12
41 #endif
42
43 /*
44  * Connection hash size. Default is what was selected at compile time.
45 */
46 static int ip_vs_conn_tab_bits = CONFIG_IP_VS_TAB_BITS;
47 module_param_named(conn_tab_bits, ip_vs_conn_tab_bits, int, 0444);
48 MODULE_PARM_DESC(conn_tab_bits, "Set connections' hash size");
49
50 /* size and mask values */
51 int ip_vs_conn_tab_size __read_mostly;
52 static int ip_vs_conn_tab_mask __read_mostly;
53
54 /*
55  *  Connection hash table: for input and output packets lookups of IPVS
56  */
57 static struct hlist_head *ip_vs_conn_tab __read_mostly;
58
59 /*  SLAB cache for IPVS connections */
60 static struct kmem_cache *ip_vs_conn_cachep __read_mostly;
61
62 /*  counter for no client port connections */
63 static atomic_t ip_vs_conn_no_cport_cnt = ATOMIC_INIT(0);
64
65 /* random value for IPVS connection hash */
66 static unsigned int ip_vs_conn_rnd __read_mostly;
67
68 /*
69  *  Fine locking granularity for big connection hash table
70  */
71 #define CT_LOCKARRAY_BITS  5
72 #define CT_LOCKARRAY_SIZE  (1<<CT_LOCKARRAY_BITS)
73 #define CT_LOCKARRAY_MASK  (CT_LOCKARRAY_SIZE-1)
74
75 /* We need an addrstrlen that works with or without v6 */
76 #ifdef CONFIG_IP_VS_IPV6
77 #define IP_VS_ADDRSTRLEN INET6_ADDRSTRLEN
78 #else
79 #define IP_VS_ADDRSTRLEN (8+1)
80 #endif
81
82 struct ip_vs_aligned_lock
83 {
84         spinlock_t      l;
85 } __attribute__((__aligned__(SMP_CACHE_BYTES)));
86
87 /* lock array for conn table */
88 static struct ip_vs_aligned_lock
89 __ip_vs_conntbl_lock_array[CT_LOCKARRAY_SIZE] __cacheline_aligned;
90
91 static inline void ct_write_lock_bh(unsigned int key)
92 {
93         spin_lock_bh(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
94 }
95
96 static inline void ct_write_unlock_bh(unsigned int key)
97 {
98         spin_unlock_bh(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
99 }
100
101 static void ip_vs_conn_expire(struct timer_list *t);
102
103 /*
104  *      Returns hash value for IPVS connection entry
105  */
106 static unsigned int ip_vs_conn_hashkey(struct netns_ipvs *ipvs, int af, unsigned int proto,
107                                        const union nf_inet_addr *addr,
108                                        __be16 port)
109 {
110 #ifdef CONFIG_IP_VS_IPV6
111         if (af == AF_INET6)
112                 return (jhash_3words(jhash(addr, 16, ip_vs_conn_rnd),
113                                     (__force u32)port, proto, ip_vs_conn_rnd) ^
114                         ((size_t)ipvs>>8)) & ip_vs_conn_tab_mask;
115 #endif
116         return (jhash_3words((__force u32)addr->ip, (__force u32)port, proto,
117                             ip_vs_conn_rnd) ^
118                 ((size_t)ipvs>>8)) & ip_vs_conn_tab_mask;
119 }
120
121 static unsigned int ip_vs_conn_hashkey_param(const struct ip_vs_conn_param *p,
122                                              bool inverse)
123 {
124         const union nf_inet_addr *addr;
125         __be16 port;
126
127         if (p->pe_data && p->pe->hashkey_raw)
128                 return p->pe->hashkey_raw(p, ip_vs_conn_rnd, inverse) &
129                         ip_vs_conn_tab_mask;
130
131         if (likely(!inverse)) {
132                 addr = p->caddr;
133                 port = p->cport;
134         } else {
135                 addr = p->vaddr;
136                 port = p->vport;
137         }
138
139         return ip_vs_conn_hashkey(p->ipvs, p->af, p->protocol, addr, port);
140 }
141
142 static unsigned int ip_vs_conn_hashkey_conn(const struct ip_vs_conn *cp)
143 {
144         struct ip_vs_conn_param p;
145
146         ip_vs_conn_fill_param(cp->ipvs, cp->af, cp->protocol,
147                               &cp->caddr, cp->cport, NULL, 0, &p);
148
149         if (cp->pe) {
150                 p.pe = cp->pe;
151                 p.pe_data = cp->pe_data;
152                 p.pe_data_len = cp->pe_data_len;
153         }
154
155         return ip_vs_conn_hashkey_param(&p, false);
156 }
157
158 /*
159  *      Hashes ip_vs_conn in ip_vs_conn_tab by netns,proto,addr,port.
160  *      returns bool success.
161  */
162 static inline int ip_vs_conn_hash(struct ip_vs_conn *cp)
163 {
164         unsigned int hash;
165         int ret;
166
167         if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
168                 return 0;
169
170         /* Hash by protocol, client address and port */
171         hash = ip_vs_conn_hashkey_conn(cp);
172
173         ct_write_lock_bh(hash);
174         spin_lock(&cp->lock);
175
176         if (!(cp->flags & IP_VS_CONN_F_HASHED)) {
177                 cp->flags |= IP_VS_CONN_F_HASHED;
178                 refcount_inc(&cp->refcnt);
179                 hlist_add_head_rcu(&cp->c_list, &ip_vs_conn_tab[hash]);
180                 ret = 1;
181         } else {
182                 pr_err("%s(): request for already hashed, called from %pS\n",
183                        __func__, __builtin_return_address(0));
184                 ret = 0;
185         }
186
187         spin_unlock(&cp->lock);
188         ct_write_unlock_bh(hash);
189
190         return ret;
191 }
192
193
194 /*
195  *      UNhashes ip_vs_conn from ip_vs_conn_tab.
196  *      returns bool success. Caller should hold conn reference.
197  */
198 static inline int ip_vs_conn_unhash(struct ip_vs_conn *cp)
199 {
200         unsigned int hash;
201         int ret;
202
203         /* unhash it and decrease its reference counter */
204         hash = ip_vs_conn_hashkey_conn(cp);
205
206         ct_write_lock_bh(hash);
207         spin_lock(&cp->lock);
208
209         if (cp->flags & IP_VS_CONN_F_HASHED) {
210                 hlist_del_rcu(&cp->c_list);
211                 cp->flags &= ~IP_VS_CONN_F_HASHED;
212                 refcount_dec(&cp->refcnt);
213                 ret = 1;
214         } else
215                 ret = 0;
216
217         spin_unlock(&cp->lock);
218         ct_write_unlock_bh(hash);
219
220         return ret;
221 }
222
223 /* Try to unlink ip_vs_conn from ip_vs_conn_tab.
224  * returns bool success.
225  */
226 static inline bool ip_vs_conn_unlink(struct ip_vs_conn *cp)
227 {
228         unsigned int hash;
229         bool ret = false;
230
231         if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
232                 return refcount_dec_if_one(&cp->refcnt);
233
234         hash = ip_vs_conn_hashkey_conn(cp);
235
236         ct_write_lock_bh(hash);
237         spin_lock(&cp->lock);
238
239         if (cp->flags & IP_VS_CONN_F_HASHED) {
240                 /* Decrease refcnt and unlink conn only if we are last user */
241                 if (refcount_dec_if_one(&cp->refcnt)) {
242                         hlist_del_rcu(&cp->c_list);
243                         cp->flags &= ~IP_VS_CONN_F_HASHED;
244                         ret = true;
245                 }
246         }
247
248         spin_unlock(&cp->lock);
249         ct_write_unlock_bh(hash);
250
251         return ret;
252 }
253
254
255 /*
256  *  Gets ip_vs_conn associated with supplied parameters in the ip_vs_conn_tab.
257  *  Called for pkts coming from OUTside-to-INside.
258  *      p->caddr, p->cport: pkt source address (foreign host)
259  *      p->vaddr, p->vport: pkt dest address (load balancer)
260  */
261 static inline struct ip_vs_conn *
262 __ip_vs_conn_in_get(const struct ip_vs_conn_param *p)
263 {
264         unsigned int hash;
265         struct ip_vs_conn *cp;
266
267         hash = ip_vs_conn_hashkey_param(p, false);
268
269         rcu_read_lock();
270
271         hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[hash], c_list) {
272                 if (p->cport == cp->cport && p->vport == cp->vport &&
273                     cp->af == p->af &&
274                     ip_vs_addr_equal(p->af, p->caddr, &cp->caddr) &&
275                     ip_vs_addr_equal(p->af, p->vaddr, &cp->vaddr) &&
276                     ((!p->cport) ^ (!(cp->flags & IP_VS_CONN_F_NO_CPORT))) &&
277                     p->protocol == cp->protocol &&
278                     cp->ipvs == p->ipvs) {
279                         if (!__ip_vs_conn_get(cp))
280                                 continue;
281                         /* HIT */
282                         rcu_read_unlock();
283                         return cp;
284                 }
285         }
286
287         rcu_read_unlock();
288
289         return NULL;
290 }
291
292 struct ip_vs_conn *ip_vs_conn_in_get(const struct ip_vs_conn_param *p)
293 {
294         struct ip_vs_conn *cp;
295
296         cp = __ip_vs_conn_in_get(p);
297         if (!cp && atomic_read(&ip_vs_conn_no_cport_cnt)) {
298                 struct ip_vs_conn_param cport_zero_p = *p;
299                 cport_zero_p.cport = 0;
300                 cp = __ip_vs_conn_in_get(&cport_zero_p);
301         }
302
303         IP_VS_DBG_BUF(9, "lookup/in %s %s:%d->%s:%d %s\n",
304                       ip_vs_proto_name(p->protocol),
305                       IP_VS_DBG_ADDR(p->af, p->caddr), ntohs(p->cport),
306                       IP_VS_DBG_ADDR(p->af, p->vaddr), ntohs(p->vport),
307                       cp ? "hit" : "not hit");
308
309         return cp;
310 }
311
312 static int
313 ip_vs_conn_fill_param_proto(struct netns_ipvs *ipvs,
314                             int af, const struct sk_buff *skb,
315                             const struct ip_vs_iphdr *iph,
316                             struct ip_vs_conn_param *p)
317 {
318         __be16 _ports[2], *pptr;
319
320         pptr = frag_safe_skb_hp(skb, iph->len, sizeof(_ports), _ports);
321         if (pptr == NULL)
322                 return 1;
323
324         if (likely(!ip_vs_iph_inverse(iph)))
325                 ip_vs_conn_fill_param(ipvs, af, iph->protocol, &iph->saddr,
326                                       pptr[0], &iph->daddr, pptr[1], p);
327         else
328                 ip_vs_conn_fill_param(ipvs, af, iph->protocol, &iph->daddr,
329                                       pptr[1], &iph->saddr, pptr[0], p);
330         return 0;
331 }
332
333 struct ip_vs_conn *
334 ip_vs_conn_in_get_proto(struct netns_ipvs *ipvs, int af,
335                         const struct sk_buff *skb,
336                         const struct ip_vs_iphdr *iph)
337 {
338         struct ip_vs_conn_param p;
339
340         if (ip_vs_conn_fill_param_proto(ipvs, af, skb, iph, &p))
341                 return NULL;
342
343         return ip_vs_conn_in_get(&p);
344 }
345 EXPORT_SYMBOL_GPL(ip_vs_conn_in_get_proto);
346
347 /* Get reference to connection template */
348 struct ip_vs_conn *ip_vs_ct_in_get(const struct ip_vs_conn_param *p)
349 {
350         unsigned int hash;
351         struct ip_vs_conn *cp;
352
353         hash = ip_vs_conn_hashkey_param(p, false);
354
355         rcu_read_lock();
356
357         hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[hash], c_list) {
358                 if (unlikely(p->pe_data && p->pe->ct_match)) {
359                         if (cp->ipvs != p->ipvs)
360                                 continue;
361                         if (p->pe == cp->pe && p->pe->ct_match(p, cp)) {
362                                 if (__ip_vs_conn_get(cp))
363                                         goto out;
364                         }
365                         continue;
366                 }
367
368                 if (cp->af == p->af &&
369                     ip_vs_addr_equal(p->af, p->caddr, &cp->caddr) &&
370                     /* protocol should only be IPPROTO_IP if
371                      * p->vaddr is a fwmark */
372                     ip_vs_addr_equal(p->protocol == IPPROTO_IP ? AF_UNSPEC :
373                                      p->af, p->vaddr, &cp->vaddr) &&
374                     p->vport == cp->vport && p->cport == cp->cport &&
375                     cp->flags & IP_VS_CONN_F_TEMPLATE &&
376                     p->protocol == cp->protocol &&
377                     cp->ipvs == p->ipvs) {
378                         if (__ip_vs_conn_get(cp))
379                                 goto out;
380                 }
381         }
382         cp = NULL;
383
384   out:
385         rcu_read_unlock();
386
387         IP_VS_DBG_BUF(9, "template lookup/in %s %s:%d->%s:%d %s\n",
388                       ip_vs_proto_name(p->protocol),
389                       IP_VS_DBG_ADDR(p->af, p->caddr), ntohs(p->cport),
390                       IP_VS_DBG_ADDR(p->af, p->vaddr), ntohs(p->vport),
391                       cp ? "hit" : "not hit");
392
393         return cp;
394 }
395
396 /* Gets ip_vs_conn associated with supplied parameters in the ip_vs_conn_tab.
397  * Called for pkts coming from inside-to-OUTside.
398  *      p->caddr, p->cport: pkt source address (inside host)
399  *      p->vaddr, p->vport: pkt dest address (foreign host) */
400 struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p)
401 {
402         unsigned int hash;
403         struct ip_vs_conn *cp, *ret=NULL;
404         const union nf_inet_addr *saddr;
405         __be16 sport;
406
407         /*
408          *      Check for "full" addressed entries
409          */
410         hash = ip_vs_conn_hashkey_param(p, true);
411
412         rcu_read_lock();
413
414         hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[hash], c_list) {
415                 if (p->vport != cp->cport)
416                         continue;
417
418                 if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) {
419                         sport = cp->vport;
420                         saddr = &cp->vaddr;
421                 } else {
422                         sport = cp->dport;
423                         saddr = &cp->daddr;
424                 }
425
426                 if (p->cport == sport && cp->af == p->af &&
427                     ip_vs_addr_equal(p->af, p->vaddr, &cp->caddr) &&
428                     ip_vs_addr_equal(p->af, p->caddr, saddr) &&
429                     p->protocol == cp->protocol &&
430                     cp->ipvs == p->ipvs) {
431                         if (!__ip_vs_conn_get(cp))
432                                 continue;
433                         /* HIT */
434                         ret = cp;
435                         break;
436                 }
437         }
438
439         rcu_read_unlock();
440
441         IP_VS_DBG_BUF(9, "lookup/out %s %s:%d->%s:%d %s\n",
442                       ip_vs_proto_name(p->protocol),
443                       IP_VS_DBG_ADDR(p->af, p->caddr), ntohs(p->cport),
444                       IP_VS_DBG_ADDR(p->af, p->vaddr), ntohs(p->vport),
445                       ret ? "hit" : "not hit");
446
447         return ret;
448 }
449
450 struct ip_vs_conn *
451 ip_vs_conn_out_get_proto(struct netns_ipvs *ipvs, int af,
452                          const struct sk_buff *skb,
453                          const struct ip_vs_iphdr *iph)
454 {
455         struct ip_vs_conn_param p;
456
457         if (ip_vs_conn_fill_param_proto(ipvs, af, skb, iph, &p))
458                 return NULL;
459
460         return ip_vs_conn_out_get(&p);
461 }
462 EXPORT_SYMBOL_GPL(ip_vs_conn_out_get_proto);
463
464 /*
465  *      Put back the conn and restart its timer with its timeout
466  */
467 static void __ip_vs_conn_put_timer(struct ip_vs_conn *cp)
468 {
469         unsigned long t = (cp->flags & IP_VS_CONN_F_ONE_PACKET) ?
470                 0 : cp->timeout;
471         mod_timer(&cp->timer, jiffies+t);
472
473         __ip_vs_conn_put(cp);
474 }
475
476 void ip_vs_conn_put(struct ip_vs_conn *cp)
477 {
478         if ((cp->flags & IP_VS_CONN_F_ONE_PACKET) &&
479             (refcount_read(&cp->refcnt) == 1) &&
480             !timer_pending(&cp->timer))
481                 /* expire connection immediately */
482                 ip_vs_conn_expire(&cp->timer);
483         else
484                 __ip_vs_conn_put_timer(cp);
485 }
486
487 /*
488  *      Fill a no_client_port connection with a client port number
489  */
490 void ip_vs_conn_fill_cport(struct ip_vs_conn *cp, __be16 cport)
491 {
492         if (ip_vs_conn_unhash(cp)) {
493                 spin_lock_bh(&cp->lock);
494                 if (cp->flags & IP_VS_CONN_F_NO_CPORT) {
495                         atomic_dec(&ip_vs_conn_no_cport_cnt);
496                         cp->flags &= ~IP_VS_CONN_F_NO_CPORT;
497                         cp->cport = cport;
498                 }
499                 spin_unlock_bh(&cp->lock);
500
501                 /* hash on new dport */
502                 ip_vs_conn_hash(cp);
503         }
504 }
505
506
507 /*
508  *      Bind a connection entry with the corresponding packet_xmit.
509  *      Called by ip_vs_conn_new.
510  */
511 static inline void ip_vs_bind_xmit(struct ip_vs_conn *cp)
512 {
513         switch (IP_VS_FWD_METHOD(cp)) {
514         case IP_VS_CONN_F_MASQ:
515                 cp->packet_xmit = ip_vs_nat_xmit;
516                 break;
517
518         case IP_VS_CONN_F_TUNNEL:
519 #ifdef CONFIG_IP_VS_IPV6
520                 if (cp->daf == AF_INET6)
521                         cp->packet_xmit = ip_vs_tunnel_xmit_v6;
522                 else
523 #endif
524                         cp->packet_xmit = ip_vs_tunnel_xmit;
525                 break;
526
527         case IP_VS_CONN_F_DROUTE:
528                 cp->packet_xmit = ip_vs_dr_xmit;
529                 break;
530
531         case IP_VS_CONN_F_LOCALNODE:
532                 cp->packet_xmit = ip_vs_null_xmit;
533                 break;
534
535         case IP_VS_CONN_F_BYPASS:
536                 cp->packet_xmit = ip_vs_bypass_xmit;
537                 break;
538         }
539 }
540
541 #ifdef CONFIG_IP_VS_IPV6
542 static inline void ip_vs_bind_xmit_v6(struct ip_vs_conn *cp)
543 {
544         switch (IP_VS_FWD_METHOD(cp)) {
545         case IP_VS_CONN_F_MASQ:
546                 cp->packet_xmit = ip_vs_nat_xmit_v6;
547                 break;
548
549         case IP_VS_CONN_F_TUNNEL:
550                 if (cp->daf == AF_INET6)
551                         cp->packet_xmit = ip_vs_tunnel_xmit_v6;
552                 else
553                         cp->packet_xmit = ip_vs_tunnel_xmit;
554                 break;
555
556         case IP_VS_CONN_F_DROUTE:
557                 cp->packet_xmit = ip_vs_dr_xmit_v6;
558                 break;
559
560         case IP_VS_CONN_F_LOCALNODE:
561                 cp->packet_xmit = ip_vs_null_xmit;
562                 break;
563
564         case IP_VS_CONN_F_BYPASS:
565                 cp->packet_xmit = ip_vs_bypass_xmit_v6;
566                 break;
567         }
568 }
569 #endif
570
571
572 static inline int ip_vs_dest_totalconns(struct ip_vs_dest *dest)
573 {
574         return atomic_read(&dest->activeconns)
575                 + atomic_read(&dest->inactconns);
576 }
577
578 /*
579  *      Bind a connection entry with a virtual service destination
580  *      Called just after a new connection entry is created.
581  */
582 static inline void
583 ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
584 {
585         unsigned int conn_flags;
586         __u32 flags;
587
588         /* if dest is NULL, then return directly */
589         if (!dest)
590                 return;
591
592         /* Increase the refcnt counter of the dest */
593         ip_vs_dest_hold(dest);
594
595         conn_flags = atomic_read(&dest->conn_flags);
596         if (cp->protocol != IPPROTO_UDP)
597                 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
598         flags = cp->flags;
599         /* Bind with the destination and its corresponding transmitter */
600         if (flags & IP_VS_CONN_F_SYNC) {
601                 /* if the connection is not template and is created
602                  * by sync, preserve the activity flag.
603                  */
604                 if (!(flags & IP_VS_CONN_F_TEMPLATE))
605                         conn_flags &= ~IP_VS_CONN_F_INACTIVE;
606                 /* connections inherit forwarding method from dest */
607                 flags &= ~(IP_VS_CONN_F_FWD_MASK | IP_VS_CONN_F_NOOUTPUT);
608         }
609         flags |= conn_flags;
610         cp->flags = flags;
611         cp->dest = dest;
612
613         IP_VS_DBG_BUF(7, "Bind-dest %s c:%s:%d v:%s:%d "
614                       "d:%s:%d fwd:%c s:%u conn->flags:%X conn->refcnt:%d "
615                       "dest->refcnt:%d\n",
616                       ip_vs_proto_name(cp->protocol),
617                       IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport),
618                       IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport),
619                       IP_VS_DBG_ADDR(cp->daf, &cp->daddr), ntohs(cp->dport),
620                       ip_vs_fwd_tag(cp), cp->state,
621                       cp->flags, refcount_read(&cp->refcnt),
622                       refcount_read(&dest->refcnt));
623
624         /* Update the connection counters */
625         if (!(flags & IP_VS_CONN_F_TEMPLATE)) {
626                 /* It is a normal connection, so modify the counters
627                  * according to the flags, later the protocol can
628                  * update them on state change
629                  */
630                 if (!(flags & IP_VS_CONN_F_INACTIVE))
631                         atomic_inc(&dest->activeconns);
632                 else
633                         atomic_inc(&dest->inactconns);
634         } else {
635                 /* It is a persistent connection/template, so increase
636                    the persistent connection counter */
637                 atomic_inc(&dest->persistconns);
638         }
639
640         if (dest->u_threshold != 0 &&
641             ip_vs_dest_totalconns(dest) >= dest->u_threshold)
642                 dest->flags |= IP_VS_DEST_F_OVERLOAD;
643 }
644
645
646 /*
647  * Check if there is a destination for the connection, if so
648  * bind the connection to the destination.
649  */
650 void ip_vs_try_bind_dest(struct ip_vs_conn *cp)
651 {
652         struct ip_vs_dest *dest;
653
654         rcu_read_lock();
655
656         /* This function is only invoked by the synchronization code. We do
657          * not currently support heterogeneous pools with synchronization,
658          * so we can make the assumption that the svc_af is the same as the
659          * dest_af
660          */
661         dest = ip_vs_find_dest(cp->ipvs, cp->af, cp->af, &cp->daddr,
662                                cp->dport, &cp->vaddr, cp->vport,
663                                cp->protocol, cp->fwmark, cp->flags);
664         if (dest) {
665                 struct ip_vs_proto_data *pd;
666
667                 spin_lock_bh(&cp->lock);
668                 if (cp->dest) {
669                         spin_unlock_bh(&cp->lock);
670                         rcu_read_unlock();
671                         return;
672                 }
673
674                 /* Applications work depending on the forwarding method
675                  * but better to reassign them always when binding dest */
676                 if (cp->app)
677                         ip_vs_unbind_app(cp);
678
679                 ip_vs_bind_dest(cp, dest);
680                 spin_unlock_bh(&cp->lock);
681
682                 /* Update its packet transmitter */
683                 cp->packet_xmit = NULL;
684 #ifdef CONFIG_IP_VS_IPV6
685                 if (cp->af == AF_INET6)
686                         ip_vs_bind_xmit_v6(cp);
687                 else
688 #endif
689                         ip_vs_bind_xmit(cp);
690
691                 pd = ip_vs_proto_data_get(cp->ipvs, cp->protocol);
692                 if (pd && atomic_read(&pd->appcnt))
693                         ip_vs_bind_app(cp, pd->pp);
694         }
695         rcu_read_unlock();
696 }
697
698
699 /*
700  *      Unbind a connection entry with its VS destination
701  *      Called by the ip_vs_conn_expire function.
702  */
703 static inline void ip_vs_unbind_dest(struct ip_vs_conn *cp)
704 {
705         struct ip_vs_dest *dest = cp->dest;
706
707         if (!dest)
708                 return;
709
710         IP_VS_DBG_BUF(7, "Unbind-dest %s c:%s:%d v:%s:%d "
711                       "d:%s:%d fwd:%c s:%u conn->flags:%X conn->refcnt:%d "
712                       "dest->refcnt:%d\n",
713                       ip_vs_proto_name(cp->protocol),
714                       IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport),
715                       IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport),
716                       IP_VS_DBG_ADDR(cp->daf, &cp->daddr), ntohs(cp->dport),
717                       ip_vs_fwd_tag(cp), cp->state,
718                       cp->flags, refcount_read(&cp->refcnt),
719                       refcount_read(&dest->refcnt));
720
721         /* Update the connection counters */
722         if (!(cp->flags & IP_VS_CONN_F_TEMPLATE)) {
723                 /* It is a normal connection, so decrease the inactconns
724                    or activeconns counter */
725                 if (cp->flags & IP_VS_CONN_F_INACTIVE) {
726                         atomic_dec(&dest->inactconns);
727                 } else {
728                         atomic_dec(&dest->activeconns);
729                 }
730         } else {
731                 /* It is a persistent connection/template, so decrease
732                    the persistent connection counter */
733                 atomic_dec(&dest->persistconns);
734         }
735
736         if (dest->l_threshold != 0) {
737                 if (ip_vs_dest_totalconns(dest) < dest->l_threshold)
738                         dest->flags &= ~IP_VS_DEST_F_OVERLOAD;
739         } else if (dest->u_threshold != 0) {
740                 if (ip_vs_dest_totalconns(dest) * 4 < dest->u_threshold * 3)
741                         dest->flags &= ~IP_VS_DEST_F_OVERLOAD;
742         } else {
743                 if (dest->flags & IP_VS_DEST_F_OVERLOAD)
744                         dest->flags &= ~IP_VS_DEST_F_OVERLOAD;
745         }
746
747         ip_vs_dest_put(dest);
748 }
749
750 static int expire_quiescent_template(struct netns_ipvs *ipvs,
751                                      struct ip_vs_dest *dest)
752 {
753 #ifdef CONFIG_SYSCTL
754         return ipvs->sysctl_expire_quiescent_template &&
755                 (atomic_read(&dest->weight) == 0);
756 #else
757         return 0;
758 #endif
759 }
760
761 /*
762  *      Checking if the destination of a connection template is available.
763  *      If available, return 1, otherwise invalidate this connection
764  *      template and return 0.
765  */
766 int ip_vs_check_template(struct ip_vs_conn *ct, struct ip_vs_dest *cdest)
767 {
768         struct ip_vs_dest *dest = ct->dest;
769         struct netns_ipvs *ipvs = ct->ipvs;
770
771         /*
772          * Checking the dest server status.
773          */
774         if ((dest == NULL) ||
775             !(dest->flags & IP_VS_DEST_F_AVAILABLE) ||
776             expire_quiescent_template(ipvs, dest) ||
777             (cdest && (dest != cdest))) {
778                 IP_VS_DBG_BUF(9, "check_template: dest not available for "
779                               "protocol %s s:%s:%d v:%s:%d "
780                               "-> d:%s:%d\n",
781                               ip_vs_proto_name(ct->protocol),
782                               IP_VS_DBG_ADDR(ct->af, &ct->caddr),
783                               ntohs(ct->cport),
784                               IP_VS_DBG_ADDR(ct->af, &ct->vaddr),
785                               ntohs(ct->vport),
786                               IP_VS_DBG_ADDR(ct->daf, &ct->daddr),
787                               ntohs(ct->dport));
788
789                 /*
790                  * Invalidate the connection template
791                  */
792                 if (ct->vport != htons(0xffff)) {
793                         if (ip_vs_conn_unhash(ct)) {
794                                 ct->dport = htons(0xffff);
795                                 ct->vport = htons(0xffff);
796                                 ct->cport = 0;
797                                 ip_vs_conn_hash(ct);
798                         }
799                 }
800
801                 /*
802                  * Simply decrease the refcnt of the template,
803                  * don't restart its timer.
804                  */
805                 __ip_vs_conn_put(ct);
806                 return 0;
807         }
808         return 1;
809 }
810
811 static void ip_vs_conn_rcu_free(struct rcu_head *head)
812 {
813         struct ip_vs_conn *cp = container_of(head, struct ip_vs_conn,
814                                              rcu_head);
815
816         ip_vs_pe_put(cp->pe);
817         kfree(cp->pe_data);
818         kmem_cache_free(ip_vs_conn_cachep, cp);
819 }
820
821 /* Try to delete connection while not holding reference */
822 static void ip_vs_conn_del(struct ip_vs_conn *cp)
823 {
824         if (del_timer(&cp->timer)) {
825                 /* Drop cp->control chain too */
826                 if (cp->control)
827                         cp->timeout = 0;
828                 ip_vs_conn_expire(&cp->timer);
829         }
830 }
831
832 /* Try to delete connection while holding reference */
833 static void ip_vs_conn_del_put(struct ip_vs_conn *cp)
834 {
835         if (del_timer(&cp->timer)) {
836                 /* Drop cp->control chain too */
837                 if (cp->control)
838                         cp->timeout = 0;
839                 __ip_vs_conn_put(cp);
840                 ip_vs_conn_expire(&cp->timer);
841         } else {
842                 __ip_vs_conn_put(cp);
843         }
844 }
845
846 static void ip_vs_conn_expire(struct timer_list *t)
847 {
848         struct ip_vs_conn *cp = from_timer(cp, t, timer);
849         struct netns_ipvs *ipvs = cp->ipvs;
850
851         /*
852          *      do I control anybody?
853          */
854         if (atomic_read(&cp->n_control))
855                 goto expire_later;
856
857         /* Unlink conn if not referenced anymore */
858         if (likely(ip_vs_conn_unlink(cp))) {
859                 struct ip_vs_conn *ct = cp->control;
860
861                 /* delete the timer if it is activated by other users */
862                 del_timer(&cp->timer);
863
864                 /* does anybody control me? */
865                 if (ct) {
866                         bool has_ref = !cp->timeout && __ip_vs_conn_get(ct);
867
868                         ip_vs_control_del(cp);
869                         /* Drop CTL or non-assured TPL if not used anymore */
870                         if (has_ref && !atomic_read(&ct->n_control) &&
871                             (!(ct->flags & IP_VS_CONN_F_TEMPLATE) ||
872                              !(ct->state & IP_VS_CTPL_S_ASSURED))) {
873                                 IP_VS_DBG(4, "drop controlling connection\n");
874                                 ip_vs_conn_del_put(ct);
875                         } else if (has_ref) {
876                                 __ip_vs_conn_put(ct);
877                         }
878                 }
879
880                 if ((cp->flags & IP_VS_CONN_F_NFCT) &&
881                     !(cp->flags & IP_VS_CONN_F_ONE_PACKET)) {
882                         /* Do not access conntracks during subsys cleanup
883                          * because nf_conntrack_find_get can not be used after
884                          * conntrack cleanup for the net.
885                          */
886                         smp_rmb();
887                         if (ipvs->enable)
888                                 ip_vs_conn_drop_conntrack(cp);
889                 }
890
891                 if (unlikely(cp->app != NULL))
892                         ip_vs_unbind_app(cp);
893                 ip_vs_unbind_dest(cp);
894                 if (cp->flags & IP_VS_CONN_F_NO_CPORT)
895                         atomic_dec(&ip_vs_conn_no_cport_cnt);
896                 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
897                         ip_vs_conn_rcu_free(&cp->rcu_head);
898                 else
899                         call_rcu(&cp->rcu_head, ip_vs_conn_rcu_free);
900                 atomic_dec(&ipvs->conn_count);
901                 return;
902         }
903
904   expire_later:
905         IP_VS_DBG(7, "delayed: conn->refcnt=%d conn->n_control=%d\n",
906                   refcount_read(&cp->refcnt),
907                   atomic_read(&cp->n_control));
908
909         refcount_inc(&cp->refcnt);
910         cp->timeout = 60*HZ;
911
912         if (ipvs->sync_state & IP_VS_STATE_MASTER)
913                 ip_vs_sync_conn(ipvs, cp, sysctl_sync_threshold(ipvs));
914
915         __ip_vs_conn_put_timer(cp);
916 }
917
918 /* Modify timer, so that it expires as soon as possible.
919  * Can be called without reference only if under RCU lock.
920  * We can have such chain of conns linked with ->control: DATA->CTL->TPL
921  * - DATA (eg. FTP) and TPL (persistence) can be present depending on setup
922  * - cp->timeout=0 indicates all conns from chain should be dropped but
923  * TPL is not dropped if in assured state
924  */
925 void ip_vs_conn_expire_now(struct ip_vs_conn *cp)
926 {
927         /* Using mod_timer_pending will ensure the timer is not
928          * modified after the final del_timer in ip_vs_conn_expire.
929          */
930         if (timer_pending(&cp->timer) &&
931             time_after(cp->timer.expires, jiffies))
932                 mod_timer_pending(&cp->timer, jiffies);
933 }
934
935
936 /*
937  *      Create a new connection entry and hash it into the ip_vs_conn_tab
938  */
939 struct ip_vs_conn *
940 ip_vs_conn_new(const struct ip_vs_conn_param *p, int dest_af,
941                const union nf_inet_addr *daddr, __be16 dport, unsigned int flags,
942                struct ip_vs_dest *dest, __u32 fwmark)
943 {
944         struct ip_vs_conn *cp;
945         struct netns_ipvs *ipvs = p->ipvs;
946         struct ip_vs_proto_data *pd = ip_vs_proto_data_get(p->ipvs,
947                                                            p->protocol);
948
949         cp = kmem_cache_alloc(ip_vs_conn_cachep, GFP_ATOMIC);
950         if (cp == NULL) {
951                 IP_VS_ERR_RL("%s(): no memory\n", __func__);
952                 return NULL;
953         }
954
955         INIT_HLIST_NODE(&cp->c_list);
956         timer_setup(&cp->timer, ip_vs_conn_expire, 0);
957         cp->ipvs           = ipvs;
958         cp->af             = p->af;
959         cp->daf            = dest_af;
960         cp->protocol       = p->protocol;
961         ip_vs_addr_set(p->af, &cp->caddr, p->caddr);
962         cp->cport          = p->cport;
963         /* proto should only be IPPROTO_IP if p->vaddr is a fwmark */
964         ip_vs_addr_set(p->protocol == IPPROTO_IP ? AF_UNSPEC : p->af,
965                        &cp->vaddr, p->vaddr);
966         cp->vport          = p->vport;
967         ip_vs_addr_set(cp->daf, &cp->daddr, daddr);
968         cp->dport          = dport;
969         cp->flags          = flags;
970         cp->fwmark         = fwmark;
971         if (flags & IP_VS_CONN_F_TEMPLATE && p->pe) {
972                 ip_vs_pe_get(p->pe);
973                 cp->pe = p->pe;
974                 cp->pe_data = p->pe_data;
975                 cp->pe_data_len = p->pe_data_len;
976         } else {
977                 cp->pe = NULL;
978                 cp->pe_data = NULL;
979                 cp->pe_data_len = 0;
980         }
981         spin_lock_init(&cp->lock);
982
983         /*
984          * Set the entry is referenced by the current thread before hashing
985          * it in the table, so that other thread run ip_vs_random_dropentry
986          * but cannot drop this entry.
987          */
988         refcount_set(&cp->refcnt, 1);
989
990         cp->control = NULL;
991         atomic_set(&cp->n_control, 0);
992         atomic_set(&cp->in_pkts, 0);
993
994         cp->packet_xmit = NULL;
995         cp->app = NULL;
996         cp->app_data = NULL;
997         /* reset struct ip_vs_seq */
998         cp->in_seq.delta = 0;
999         cp->out_seq.delta = 0;
1000
1001         atomic_inc(&ipvs->conn_count);
1002         if (flags & IP_VS_CONN_F_NO_CPORT)
1003                 atomic_inc(&ip_vs_conn_no_cport_cnt);
1004
1005         /* Bind the connection with a destination server */
1006         cp->dest = NULL;
1007         ip_vs_bind_dest(cp, dest);
1008
1009         /* Set its state and timeout */
1010         cp->state = 0;
1011         cp->old_state = 0;
1012         cp->timeout = 3*HZ;
1013         cp->sync_endtime = jiffies & ~3UL;
1014
1015         /* Bind its packet transmitter */
1016 #ifdef CONFIG_IP_VS_IPV6
1017         if (p->af == AF_INET6)
1018                 ip_vs_bind_xmit_v6(cp);
1019         else
1020 #endif
1021                 ip_vs_bind_xmit(cp);
1022
1023         if (unlikely(pd && atomic_read(&pd->appcnt)))
1024                 ip_vs_bind_app(cp, pd->pp);
1025
1026         /*
1027          * Allow conntrack to be preserved. By default, conntrack
1028          * is created and destroyed for every packet.
1029          * Sometimes keeping conntrack can be useful for
1030          * IP_VS_CONN_F_ONE_PACKET too.
1031          */
1032
1033         if (ip_vs_conntrack_enabled(ipvs))
1034                 cp->flags |= IP_VS_CONN_F_NFCT;
1035
1036         /* Hash it in the ip_vs_conn_tab finally */
1037         ip_vs_conn_hash(cp);
1038
1039         return cp;
1040 }
1041
1042 /*
1043  *      /proc/net/ip_vs_conn entries
1044  */
1045 #ifdef CONFIG_PROC_FS
1046 struct ip_vs_iter_state {
1047         struct seq_net_private  p;
1048         struct hlist_head       *l;
1049 };
1050
1051 static void *ip_vs_conn_array(struct seq_file *seq, loff_t pos)
1052 {
1053         int idx;
1054         struct ip_vs_conn *cp;
1055         struct ip_vs_iter_state *iter = seq->private;
1056
1057         for (idx = 0; idx < ip_vs_conn_tab_size; idx++) {
1058                 hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[idx], c_list) {
1059                         /* __ip_vs_conn_get() is not needed by
1060                          * ip_vs_conn_seq_show and ip_vs_conn_sync_seq_show
1061                          */
1062                         if (pos-- == 0) {
1063                                 iter->l = &ip_vs_conn_tab[idx];
1064                                 return cp;
1065                         }
1066                 }
1067                 cond_resched_rcu();
1068         }
1069
1070         return NULL;
1071 }
1072
1073 static void *ip_vs_conn_seq_start(struct seq_file *seq, loff_t *pos)
1074         __acquires(RCU)
1075 {
1076         struct ip_vs_iter_state *iter = seq->private;
1077
1078         iter->l = NULL;
1079         rcu_read_lock();
1080         return *pos ? ip_vs_conn_array(seq, *pos - 1) :SEQ_START_TOKEN;
1081 }
1082
1083 static void *ip_vs_conn_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1084 {
1085         struct ip_vs_conn *cp = v;
1086         struct ip_vs_iter_state *iter = seq->private;
1087         struct hlist_node *e;
1088         struct hlist_head *l = iter->l;
1089         int idx;
1090
1091         ++*pos;
1092         if (v == SEQ_START_TOKEN)
1093                 return ip_vs_conn_array(seq, 0);
1094
1095         /* more on same hash chain? */
1096         e = rcu_dereference(hlist_next_rcu(&cp->c_list));
1097         if (e)
1098                 return hlist_entry(e, struct ip_vs_conn, c_list);
1099
1100         idx = l - ip_vs_conn_tab;
1101         while (++idx < ip_vs_conn_tab_size) {
1102                 hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[idx], c_list) {
1103                         iter->l = &ip_vs_conn_tab[idx];
1104                         return cp;
1105                 }
1106                 cond_resched_rcu();
1107         }
1108         iter->l = NULL;
1109         return NULL;
1110 }
1111
1112 static void ip_vs_conn_seq_stop(struct seq_file *seq, void *v)
1113         __releases(RCU)
1114 {
1115         rcu_read_unlock();
1116 }
1117
1118 static int ip_vs_conn_seq_show(struct seq_file *seq, void *v)
1119 {
1120
1121         if (v == SEQ_START_TOKEN)
1122                 seq_puts(seq,
1123    "Pro FromIP   FPrt ToIP     TPrt DestIP   DPrt State       Expires PEName PEData\n");
1124         else {
1125                 const struct ip_vs_conn *cp = v;
1126                 struct net *net = seq_file_net(seq);
1127                 char pe_data[IP_VS_PENAME_MAXLEN + IP_VS_PEDATA_MAXLEN + 3];
1128                 size_t len = 0;
1129                 char dbuf[IP_VS_ADDRSTRLEN];
1130
1131                 if (!net_eq(cp->ipvs->net, net))
1132                         return 0;
1133                 if (cp->pe_data) {
1134                         pe_data[0] = ' ';
1135                         len = strlen(cp->pe->name);
1136                         memcpy(pe_data + 1, cp->pe->name, len);
1137                         pe_data[len + 1] = ' ';
1138                         len += 2;
1139                         len += cp->pe->show_pe_data(cp, pe_data + len);
1140                 }
1141                 pe_data[len] = '\0';
1142
1143 #ifdef CONFIG_IP_VS_IPV6
1144                 if (cp->daf == AF_INET6)
1145                         snprintf(dbuf, sizeof(dbuf), "%pI6", &cp->daddr.in6);
1146                 else
1147 #endif
1148                         snprintf(dbuf, sizeof(dbuf), "%08X",
1149                                  ntohl(cp->daddr.ip));
1150
1151 #ifdef CONFIG_IP_VS_IPV6
1152                 if (cp->af == AF_INET6)
1153                         seq_printf(seq, "%-3s %pI6 %04X %pI6 %04X "
1154                                 "%s %04X %-11s %7u%s\n",
1155                                 ip_vs_proto_name(cp->protocol),
1156                                 &cp->caddr.in6, ntohs(cp->cport),
1157                                 &cp->vaddr.in6, ntohs(cp->vport),
1158                                 dbuf, ntohs(cp->dport),
1159                                 ip_vs_state_name(cp),
1160                                 jiffies_delta_to_msecs(cp->timer.expires -
1161                                                        jiffies) / 1000,
1162                                 pe_data);
1163                 else
1164 #endif
1165                         seq_printf(seq,
1166                                 "%-3s %08X %04X %08X %04X"
1167                                 " %s %04X %-11s %7u%s\n",
1168                                 ip_vs_proto_name(cp->protocol),
1169                                 ntohl(cp->caddr.ip), ntohs(cp->cport),
1170                                 ntohl(cp->vaddr.ip), ntohs(cp->vport),
1171                                 dbuf, ntohs(cp->dport),
1172                                 ip_vs_state_name(cp),
1173                                 jiffies_delta_to_msecs(cp->timer.expires -
1174                                                        jiffies) / 1000,
1175                                 pe_data);
1176         }
1177         return 0;
1178 }
1179
1180 static const struct seq_operations ip_vs_conn_seq_ops = {
1181         .start = ip_vs_conn_seq_start,
1182         .next  = ip_vs_conn_seq_next,
1183         .stop  = ip_vs_conn_seq_stop,
1184         .show  = ip_vs_conn_seq_show,
1185 };
1186
1187 static const char *ip_vs_origin_name(unsigned int flags)
1188 {
1189         if (flags & IP_VS_CONN_F_SYNC)
1190                 return "SYNC";
1191         else
1192                 return "LOCAL";
1193 }
1194
1195 static int ip_vs_conn_sync_seq_show(struct seq_file *seq, void *v)
1196 {
1197         char dbuf[IP_VS_ADDRSTRLEN];
1198
1199         if (v == SEQ_START_TOKEN)
1200                 seq_puts(seq,
1201    "Pro FromIP   FPrt ToIP     TPrt DestIP   DPrt State       Origin Expires\n");
1202         else {
1203                 const struct ip_vs_conn *cp = v;
1204                 struct net *net = seq_file_net(seq);
1205
1206                 if (!net_eq(cp->ipvs->net, net))
1207                         return 0;
1208
1209 #ifdef CONFIG_IP_VS_IPV6
1210                 if (cp->daf == AF_INET6)
1211                         snprintf(dbuf, sizeof(dbuf), "%pI6", &cp->daddr.in6);
1212                 else
1213 #endif
1214                         snprintf(dbuf, sizeof(dbuf), "%08X",
1215                                  ntohl(cp->daddr.ip));
1216
1217 #ifdef CONFIG_IP_VS_IPV6
1218                 if (cp->af == AF_INET6)
1219                         seq_printf(seq, "%-3s %pI6 %04X %pI6 %04X "
1220                                 "%s %04X %-11s %-6s %7u\n",
1221                                 ip_vs_proto_name(cp->protocol),
1222                                 &cp->caddr.in6, ntohs(cp->cport),
1223                                 &cp->vaddr.in6, ntohs(cp->vport),
1224                                 dbuf, ntohs(cp->dport),
1225                                 ip_vs_state_name(cp),
1226                                 ip_vs_origin_name(cp->flags),
1227                                 jiffies_delta_to_msecs(cp->timer.expires -
1228                                                        jiffies) / 1000);
1229                 else
1230 #endif
1231                         seq_printf(seq,
1232                                 "%-3s %08X %04X %08X %04X "
1233                                 "%s %04X %-11s %-6s %7u\n",
1234                                 ip_vs_proto_name(cp->protocol),
1235                                 ntohl(cp->caddr.ip), ntohs(cp->cport),
1236                                 ntohl(cp->vaddr.ip), ntohs(cp->vport),
1237                                 dbuf, ntohs(cp->dport),
1238                                 ip_vs_state_name(cp),
1239                                 ip_vs_origin_name(cp->flags),
1240                                 jiffies_delta_to_msecs(cp->timer.expires -
1241                                                        jiffies) / 1000);
1242         }
1243         return 0;
1244 }
1245
1246 static const struct seq_operations ip_vs_conn_sync_seq_ops = {
1247         .start = ip_vs_conn_seq_start,
1248         .next  = ip_vs_conn_seq_next,
1249         .stop  = ip_vs_conn_seq_stop,
1250         .show  = ip_vs_conn_sync_seq_show,
1251 };
1252 #endif
1253
1254
1255 /* Randomly drop connection entries before running out of memory
1256  * Can be used for DATA and CTL conns. For TPL conns there are exceptions:
1257  * - traffic for services in OPS mode increases ct->in_pkts, so it is supported
1258  * - traffic for services not in OPS mode does not increase ct->in_pkts in
1259  * all cases, so it is not supported
1260  */
1261 static inline int todrop_entry(struct ip_vs_conn *cp)
1262 {
1263         /*
1264          * The drop rate array needs tuning for real environments.
1265          * Called from timer bh only => no locking
1266          */
1267         static const signed char todrop_rate[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
1268         static signed char todrop_counter[9] = {0};
1269         int i;
1270
1271         /* if the conn entry hasn't lasted for 60 seconds, don't drop it.
1272            This will leave enough time for normal connection to get
1273            through. */
1274         if (time_before(cp->timeout + jiffies, cp->timer.expires + 60*HZ))
1275                 return 0;
1276
1277         /* Don't drop the entry if its number of incoming packets is not
1278            located in [0, 8] */
1279         i = atomic_read(&cp->in_pkts);
1280         if (i > 8 || i < 0) return 0;
1281
1282         if (!todrop_rate[i]) return 0;
1283         if (--todrop_counter[i] > 0) return 0;
1284
1285         todrop_counter[i] = todrop_rate[i];
1286         return 1;
1287 }
1288
1289 static inline bool ip_vs_conn_ops_mode(struct ip_vs_conn *cp)
1290 {
1291         struct ip_vs_service *svc;
1292
1293         if (!cp->dest)
1294                 return false;
1295         svc = rcu_dereference(cp->dest->svc);
1296         return svc && (svc->flags & IP_VS_SVC_F_ONEPACKET);
1297 }
1298
1299 /* Called from keventd and must protect itself from softirqs */
1300 void ip_vs_random_dropentry(struct netns_ipvs *ipvs)
1301 {
1302         int idx;
1303         struct ip_vs_conn *cp;
1304
1305         rcu_read_lock();
1306         /*
1307          * Randomly scan 1/32 of the whole table every second
1308          */
1309         for (idx = 0; idx < (ip_vs_conn_tab_size>>5); idx++) {
1310                 unsigned int hash = get_random_u32() & ip_vs_conn_tab_mask;
1311
1312                 hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[hash], c_list) {
1313                         if (cp->ipvs != ipvs)
1314                                 continue;
1315                         if (atomic_read(&cp->n_control))
1316                                 continue;
1317                         if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
1318                                 /* connection template of OPS */
1319                                 if (ip_vs_conn_ops_mode(cp))
1320                                         goto try_drop;
1321                                 if (!(cp->state & IP_VS_CTPL_S_ASSURED))
1322                                         goto drop;
1323                                 continue;
1324                         }
1325                         if (cp->protocol == IPPROTO_TCP) {
1326                                 switch(cp->state) {
1327                                 case IP_VS_TCP_S_SYN_RECV:
1328                                 case IP_VS_TCP_S_SYNACK:
1329                                         break;
1330
1331                                 case IP_VS_TCP_S_ESTABLISHED:
1332                                         if (todrop_entry(cp))
1333                                                 break;
1334                                         continue;
1335
1336                                 default:
1337                                         continue;
1338                                 }
1339                         } else if (cp->protocol == IPPROTO_SCTP) {
1340                                 switch (cp->state) {
1341                                 case IP_VS_SCTP_S_INIT1:
1342                                 case IP_VS_SCTP_S_INIT:
1343                                         break;
1344                                 case IP_VS_SCTP_S_ESTABLISHED:
1345                                         if (todrop_entry(cp))
1346                                                 break;
1347                                         continue;
1348                                 default:
1349                                         continue;
1350                                 }
1351                         } else {
1352 try_drop:
1353                                 if (!todrop_entry(cp))
1354                                         continue;
1355                         }
1356
1357 drop:
1358                         IP_VS_DBG(4, "drop connection\n");
1359                         ip_vs_conn_del(cp);
1360                 }
1361                 cond_resched_rcu();
1362         }
1363         rcu_read_unlock();
1364 }
1365
1366
1367 /*
1368  *      Flush all the connection entries in the ip_vs_conn_tab
1369  */
1370 static void ip_vs_conn_flush(struct netns_ipvs *ipvs)
1371 {
1372         int idx;
1373         struct ip_vs_conn *cp, *cp_c;
1374
1375 flush_again:
1376         rcu_read_lock();
1377         for (idx = 0; idx < ip_vs_conn_tab_size; idx++) {
1378
1379                 hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[idx], c_list) {
1380                         if (cp->ipvs != ipvs)
1381                                 continue;
1382                         if (atomic_read(&cp->n_control))
1383                                 continue;
1384                         cp_c = cp->control;
1385                         IP_VS_DBG(4, "del connection\n");
1386                         ip_vs_conn_del(cp);
1387                         if (cp_c && !atomic_read(&cp_c->n_control)) {
1388                                 IP_VS_DBG(4, "del controlling connection\n");
1389                                 ip_vs_conn_del(cp_c);
1390                         }
1391                 }
1392                 cond_resched_rcu();
1393         }
1394         rcu_read_unlock();
1395
1396         /* the counter may be not NULL, because maybe some conn entries
1397            are run by slow timer handler or unhashed but still referred */
1398         if (atomic_read(&ipvs->conn_count) != 0) {
1399                 schedule();
1400                 goto flush_again;
1401         }
1402 }
1403
1404 #ifdef CONFIG_SYSCTL
1405 void ip_vs_expire_nodest_conn_flush(struct netns_ipvs *ipvs)
1406 {
1407         int idx;
1408         struct ip_vs_conn *cp, *cp_c;
1409         struct ip_vs_dest *dest;
1410
1411         rcu_read_lock();
1412         for (idx = 0; idx < ip_vs_conn_tab_size; idx++) {
1413                 hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[idx], c_list) {
1414                         if (cp->ipvs != ipvs)
1415                                 continue;
1416
1417                         dest = cp->dest;
1418                         if (!dest || (dest->flags & IP_VS_DEST_F_AVAILABLE))
1419                                 continue;
1420
1421                         if (atomic_read(&cp->n_control))
1422                                 continue;
1423
1424                         cp_c = cp->control;
1425                         IP_VS_DBG(4, "del connection\n");
1426                         ip_vs_conn_del(cp);
1427                         if (cp_c && !atomic_read(&cp_c->n_control)) {
1428                                 IP_VS_DBG(4, "del controlling connection\n");
1429                                 ip_vs_conn_del(cp_c);
1430                         }
1431                 }
1432                 cond_resched_rcu();
1433
1434                 /* netns clean up started, abort delayed work */
1435                 if (!ipvs->enable)
1436                         break;
1437         }
1438         rcu_read_unlock();
1439 }
1440 #endif
1441
1442 /*
1443  * per netns init and exit
1444  */
1445 int __net_init ip_vs_conn_net_init(struct netns_ipvs *ipvs)
1446 {
1447         atomic_set(&ipvs->conn_count, 0);
1448
1449 #ifdef CONFIG_PROC_FS
1450         if (!proc_create_net("ip_vs_conn", 0, ipvs->net->proc_net,
1451                              &ip_vs_conn_seq_ops,
1452                              sizeof(struct ip_vs_iter_state)))
1453                 goto err_conn;
1454
1455         if (!proc_create_net("ip_vs_conn_sync", 0, ipvs->net->proc_net,
1456                              &ip_vs_conn_sync_seq_ops,
1457                              sizeof(struct ip_vs_iter_state)))
1458                 goto err_conn_sync;
1459 #endif
1460
1461         return 0;
1462
1463 #ifdef CONFIG_PROC_FS
1464 err_conn_sync:
1465         remove_proc_entry("ip_vs_conn", ipvs->net->proc_net);
1466 err_conn:
1467         return -ENOMEM;
1468 #endif
1469 }
1470
1471 void __net_exit ip_vs_conn_net_cleanup(struct netns_ipvs *ipvs)
1472 {
1473         /* flush all the connection entries first */
1474         ip_vs_conn_flush(ipvs);
1475 #ifdef CONFIG_PROC_FS
1476         remove_proc_entry("ip_vs_conn", ipvs->net->proc_net);
1477         remove_proc_entry("ip_vs_conn_sync", ipvs->net->proc_net);
1478 #endif
1479 }
1480
1481 int __init ip_vs_conn_init(void)
1482 {
1483         size_t tab_array_size;
1484         int max_avail;
1485 #if BITS_PER_LONG > 32
1486         int max = 27;
1487 #else
1488         int max = 20;
1489 #endif
1490         int min = 8;
1491         int idx;
1492
1493         max_avail = order_base_2(totalram_pages()) + PAGE_SHIFT;
1494         max_avail -= 2;         /* ~4 in hash row */
1495         max_avail -= 1;         /* IPVS up to 1/2 of mem */
1496         max_avail -= order_base_2(sizeof(struct ip_vs_conn));
1497         max = clamp(max, min, max_avail);
1498         ip_vs_conn_tab_bits = clamp_val(ip_vs_conn_tab_bits, min, max);
1499         ip_vs_conn_tab_size = 1 << ip_vs_conn_tab_bits;
1500         ip_vs_conn_tab_mask = ip_vs_conn_tab_size - 1;
1501
1502         /*
1503          * Allocate the connection hash table and initialize its list heads
1504          */
1505         tab_array_size = array_size(ip_vs_conn_tab_size,
1506                                     sizeof(*ip_vs_conn_tab));
1507         ip_vs_conn_tab = kvmalloc_array(ip_vs_conn_tab_size,
1508                                         sizeof(*ip_vs_conn_tab), GFP_KERNEL);
1509         if (!ip_vs_conn_tab)
1510                 return -ENOMEM;
1511
1512         /* Allocate ip_vs_conn slab cache */
1513         ip_vs_conn_cachep = kmem_cache_create("ip_vs_conn",
1514                                               sizeof(struct ip_vs_conn), 0,
1515                                               SLAB_HWCACHE_ALIGN, NULL);
1516         if (!ip_vs_conn_cachep) {
1517                 kvfree(ip_vs_conn_tab);
1518                 return -ENOMEM;
1519         }
1520
1521         pr_info("Connection hash table configured (size=%d, memory=%zdKbytes)\n",
1522                 ip_vs_conn_tab_size, tab_array_size / 1024);
1523         IP_VS_DBG(0, "Each connection entry needs %zd bytes at least\n",
1524                   sizeof(struct ip_vs_conn));
1525
1526         for (idx = 0; idx < ip_vs_conn_tab_size; idx++)
1527                 INIT_HLIST_HEAD(&ip_vs_conn_tab[idx]);
1528
1529         for (idx = 0; idx < CT_LOCKARRAY_SIZE; idx++)  {
1530                 spin_lock_init(&__ip_vs_conntbl_lock_array[idx].l);
1531         }
1532
1533         /* calculate the random value for connection hash */
1534         get_random_bytes(&ip_vs_conn_rnd, sizeof(ip_vs_conn_rnd));
1535
1536         return 0;
1537 }
1538
1539 void ip_vs_conn_cleanup(void)
1540 {
1541         /* Wait all ip_vs_conn_rcu_free() callbacks to complete */
1542         rcu_barrier();
1543         /* Release the empty cache */
1544         kmem_cache_destroy(ip_vs_conn_cachep);
1545         kvfree(ip_vs_conn_tab);
1546 }