1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * ip_vs_est.c: simple rate estimator for IPVS
5 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
7 * Changes: Hans Schillstrom <hans.schillstrom@ericsson.com>
8 * Network name space (netns) aware.
9 * Global data moved to netns i.e struct netns_ipvs
10 * Affected data: est_list and est_lock.
11 * estimation_timer() runs with timer per netns.
12 * get_stats()) do the per cpu summing.
15 #define KMSG_COMPONENT "IPVS"
16 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
18 #include <linux/kernel.h>
19 #include <linux/jiffies.h>
20 #include <linux/types.h>
21 #include <linux/interrupt.h>
22 #include <linux/sysctl.h>
23 #include <linux/list.h>
24 #include <linux/rcupdate_wait.h>
26 #include <net/ip_vs.h>
29 This code is to estimate rate in a shorter interval (such as 8
30 seconds) for virtual services and real servers. For measure rate in a
31 long interval, it is easy to implement a user level daemon which
32 periodically reads those statistical counters and measure rate.
34 We measure rate during the last 8 seconds every 2 seconds:
36 avgrate = avgrate*(1-W) + rate*W
42 * Average bps is scaled by 2^5, while average pps and cps are scaled by 2^10.
44 * Netlink users can see 64-bit values but sockopt users are restricted
45 to 32-bit values for conns, packets, bps, cps and pps.
47 * A lot of code is taken from net/core/gen_estimator.c
50 - cpustats counters are updated per-cpu in SoftIRQ context with BH disabled
51 - kthreads read the cpustats to update the estimators (svcs, dests, total)
52 - the states of estimators can be read (get stats) or modified (zero stats)
56 - estimators are added initially to est_temp_list and later kthread 0
57 distributes them to one or many kthreads for estimation
58 - kthread contexts are created and attached to array
59 - the kthread tasks are started when first service is added, before that
60 the total stats are not estimated
61 - when configuration (cpulist/nice) is changed, the tasks are restarted
62 by work (est_reload_work)
63 - kthread tasks are stopped while the cpulist is empty
64 - the kthread context holds lists with estimators (chains) which are
65 processed every 2 seconds
66 - as estimators can be added dynamically and in bursts, we try to spread
67 them to multiple chains which are estimated at different time
68 - on start, kthread 0 enters calculation phase to determine the chain limits
69 and the limit of estimators per kthread
70 - est_add_ktid: ktid where to add new ests, can point to empty slot where
74 static struct lock_class_key __ipvs_est_key;
76 static void ip_vs_est_calc_phase(struct netns_ipvs *ipvs);
77 static void ip_vs_est_drain_temp_list(struct netns_ipvs *ipvs);
79 static void ip_vs_chain_estimation(struct hlist_head *chain)
81 struct ip_vs_estimator *e;
82 struct ip_vs_cpu_stats *c;
83 struct ip_vs_stats *s;
86 hlist_for_each_entry_rcu(e, chain, list) {
87 u64 conns, inpkts, outpkts, inbytes, outbytes;
88 u64 kconns = 0, kinpkts = 0, koutpkts = 0;
89 u64 kinbytes = 0, koutbytes = 0;
93 if (kthread_should_stop())
96 s = container_of(e, struct ip_vs_stats, est);
97 for_each_possible_cpu(i) {
98 c = per_cpu_ptr(s->cpustats, i);
100 start = u64_stats_fetch_begin(&c->syncp);
101 conns = u64_stats_read(&c->cnt.conns);
102 inpkts = u64_stats_read(&c->cnt.inpkts);
103 outpkts = u64_stats_read(&c->cnt.outpkts);
104 inbytes = u64_stats_read(&c->cnt.inbytes);
105 outbytes = u64_stats_read(&c->cnt.outbytes);
106 } while (u64_stats_fetch_retry(&c->syncp, start));
111 koutbytes += outbytes;
116 s->kstats.conns = kconns;
117 s->kstats.inpkts = kinpkts;
118 s->kstats.outpkts = koutpkts;
119 s->kstats.inbytes = kinbytes;
120 s->kstats.outbytes = koutbytes;
122 /* scaled by 2^10, but divided 2 seconds */
123 rate = (s->kstats.conns - e->last_conns) << 9;
124 e->last_conns = s->kstats.conns;
125 e->cps += ((s64)rate - (s64)e->cps) >> 2;
127 rate = (s->kstats.inpkts - e->last_inpkts) << 9;
128 e->last_inpkts = s->kstats.inpkts;
129 e->inpps += ((s64)rate - (s64)e->inpps) >> 2;
131 rate = (s->kstats.outpkts - e->last_outpkts) << 9;
132 e->last_outpkts = s->kstats.outpkts;
133 e->outpps += ((s64)rate - (s64)e->outpps) >> 2;
135 /* scaled by 2^5, but divided 2 seconds */
136 rate = (s->kstats.inbytes - e->last_inbytes) << 4;
137 e->last_inbytes = s->kstats.inbytes;
138 e->inbps += ((s64)rate - (s64)e->inbps) >> 2;
140 rate = (s->kstats.outbytes - e->last_outbytes) << 4;
141 e->last_outbytes = s->kstats.outbytes;
142 e->outbps += ((s64)rate - (s64)e->outbps) >> 2;
143 spin_unlock(&s->lock);
147 static void ip_vs_tick_estimation(struct ip_vs_est_kt_data *kd, int row)
149 struct ip_vs_est_tick_data *td;
153 td = rcu_dereference(kd->ticks[row]);
156 for_each_set_bit(cid, td->present, IPVS_EST_TICK_CHAINS) {
157 if (kthread_should_stop())
159 ip_vs_chain_estimation(&td->chains[cid]);
161 td = rcu_dereference(kd->ticks[row]);
170 static int ip_vs_estimation_kthread(void *data)
172 struct ip_vs_est_kt_data *kd = data;
173 struct netns_ipvs *ipvs = kd->ipvs;
174 int row = kd->est_row;
180 if (!ipvs->est_chain_max)
183 if (!ipvs->est_chain_max) {
184 ipvs->est_calc_phase = 1;
185 /* commit est_calc_phase before reading est_genid */
189 /* kthread 0 will handle the calc phase */
190 if (ipvs->est_calc_phase)
191 ip_vs_est_calc_phase(ipvs);
195 if (!id && !hlist_empty(&ipvs->est_temp_list))
196 ip_vs_est_drain_temp_list(ipvs);
197 set_current_state(TASK_IDLE);
198 if (kthread_should_stop())
201 /* before estimation, check if we should sleep */
203 gap = kd->est_timer - now;
205 if (gap > IPVS_EST_TICK) {
206 kd->est_timer = now - IPVS_EST_TICK;
209 schedule_timeout(gap);
211 __set_current_state(TASK_RUNNING);
212 if (gap < -8 * IPVS_EST_TICK)
216 if (kd->tick_len[row])
217 ip_vs_tick_estimation(kd, row);
220 if (row >= IPVS_EST_NTICKS)
222 WRITE_ONCE(kd->est_row, row);
223 kd->est_timer += IPVS_EST_TICK;
225 __set_current_state(TASK_RUNNING);
230 /* Schedule stop/start for kthread tasks */
231 void ip_vs_est_reload_start(struct netns_ipvs *ipvs)
233 /* Ignore reloads before first service is added */
236 ip_vs_est_stopped_recalc(ipvs);
237 /* Bump the kthread configuration genid */
238 atomic_inc(&ipvs->est_genid);
239 queue_delayed_work(system_long_wq, &ipvs->est_reload_work, 0);
242 /* Start kthread task with current configuration */
243 int ip_vs_est_kthread_start(struct netns_ipvs *ipvs,
244 struct ip_vs_est_kt_data *kd)
250 lockdep_assert_held(&ipvs->est_mutex);
255 gap = kd->est_timer - now;
256 /* Sync est_timer if task is starting later */
257 if (abs(gap) > 4 * IPVS_EST_TICK)
259 kd->task = kthread_create(ip_vs_estimation_kthread, kd, "ipvs-e:%d:%d",
261 if (IS_ERR(kd->task)) {
262 ret = PTR_ERR(kd->task);
267 set_user_nice(kd->task, sysctl_est_nice(ipvs));
268 set_cpus_allowed_ptr(kd->task, sysctl_est_cpulist(ipvs));
270 pr_info("starting estimator thread %d...\n", kd->id);
271 wake_up_process(kd->task);
277 void ip_vs_est_kthread_stop(struct ip_vs_est_kt_data *kd)
280 pr_info("stopping estimator thread %d...\n", kd->id);
281 kthread_stop(kd->task);
286 /* Apply parameters to kthread */
287 static void ip_vs_est_set_params(struct netns_ipvs *ipvs,
288 struct ip_vs_est_kt_data *kd)
290 kd->chain_max = ipvs->est_chain_max;
291 /* We are using single chain on RCU preemption */
292 if (IPVS_EST_TICK_CHAINS == 1)
293 kd->chain_max *= IPVS_EST_CHAIN_FACTOR;
294 kd->tick_max = IPVS_EST_TICK_CHAINS * kd->chain_max;
295 kd->est_max_count = IPVS_EST_NTICKS * kd->tick_max;
298 /* Create and start estimation kthread in a free or new array slot */
299 static int ip_vs_est_add_kthread(struct netns_ipvs *ipvs)
301 struct ip_vs_est_kt_data *kd = NULL;
302 int id = ipvs->est_kt_count;
307 if ((unsigned long)ipvs->est_kt_count >= ipvs->est_max_threads &&
308 ipvs->enable && ipvs->est_max_threads)
311 mutex_lock(&ipvs->est_mutex);
313 for (i = 0; i < id; i++) {
314 if (!ipvs->est_kt_arr[i])
318 arr = krealloc_array(ipvs->est_kt_arr, id + 1,
319 sizeof(struct ip_vs_est_kt_data *),
323 ipvs->est_kt_arr = arr;
328 kd = kzalloc(sizeof(*kd), GFP_KERNEL);
332 bitmap_fill(kd->avail, IPVS_EST_NTICKS);
333 kd->est_timer = jiffies;
335 ip_vs_est_set_params(ipvs, kd);
337 /* Pre-allocate stats used in calc phase */
338 if (!id && !kd->calc_stats) {
339 kd->calc_stats = ip_vs_stats_alloc();
344 /* Start kthread tasks only when services are present */
345 if (ipvs->enable && !ip_vs_est_stopped(ipvs)) {
346 ret = ip_vs_est_kthread_start(ipvs, kd);
352 ipvs->est_kt_count++;
353 ipvs->est_kt_arr[id] = kd;
355 /* Use most recent kthread for new ests */
356 ipvs->est_add_ktid = id;
360 mutex_unlock(&ipvs->est_mutex);
362 ip_vs_stats_free(kd->calc_stats);
369 /* Select ktid where to add new ests: available, unused or new slot */
370 static void ip_vs_est_update_ktid(struct netns_ipvs *ipvs)
372 int ktid, best = ipvs->est_kt_count;
373 struct ip_vs_est_kt_data *kd;
375 for (ktid = 0; ktid < ipvs->est_kt_count; ktid++) {
376 kd = ipvs->est_kt_arr[ktid];
378 if (kd->est_count < kd->est_max_count) {
382 } else if (ktid < best) {
386 ipvs->est_add_ktid = best;
389 /* Add estimator to current kthread (est_add_ktid) */
390 static int ip_vs_enqueue_estimator(struct netns_ipvs *ipvs,
391 struct ip_vs_estimator *est)
393 struct ip_vs_est_kt_data *kd = NULL;
394 struct ip_vs_est_tick_data *td;
395 int ktid, row, crow, cid, ret;
396 int delay = est->ktrow;
398 BUILD_BUG_ON_MSG(IPVS_EST_TICK_CHAINS > 127,
399 "Too many chains for ktcid");
401 if (ipvs->est_add_ktid < ipvs->est_kt_count) {
402 kd = ipvs->est_kt_arr[ipvs->est_add_ktid];
407 ret = ip_vs_est_add_kthread(ipvs);
410 kd = ipvs->est_kt_arr[ipvs->est_add_ktid];
414 /* For small number of estimators prefer to use few ticks,
415 * otherwise try to add into the last estimated row.
416 * est_row and add_row point after the row we should use
418 if (kd->est_count >= 2 * kd->tick_max || delay < IPVS_EST_NTICKS - 1)
419 crow = READ_ONCE(kd->est_row);
423 if (crow >= IPVS_EST_NTICKS)
424 crow -= IPVS_EST_NTICKS;
425 /* Assume initial delay ? */
426 if (delay >= IPVS_EST_NTICKS - 1) {
427 /* Preserve initial delay or decrease it if no space in tick */
429 if (crow < IPVS_EST_NTICKS - 1) {
431 row = find_last_bit(kd->avail, crow);
434 row = find_last_bit(kd->avail, IPVS_EST_NTICKS);
436 /* Preserve delay or increase it if no space in tick */
437 row = IPVS_EST_NTICKS;
439 row = find_next_bit(kd->avail, IPVS_EST_NTICKS, crow);
440 if (row >= IPVS_EST_NTICKS)
441 row = find_first_bit(kd->avail, IPVS_EST_NTICKS);
444 td = rcu_dereference_protected(kd->ticks[row], 1);
446 td = kzalloc(sizeof(*td), GFP_KERNEL);
451 rcu_assign_pointer(kd->ticks[row], td);
454 cid = find_first_zero_bit(td->full, IPVS_EST_TICK_CHAINS);
458 if (!td->chain_len[cid])
459 __set_bit(cid, td->present);
460 td->chain_len[cid]++;
464 hlist_add_head_rcu(&est->list, &td->chains[cid]);
466 if (td->chain_len[cid] >= kd->chain_max) {
467 __set_bit(cid, td->full);
468 if (kd->tick_len[row] >= kd->tick_max)
469 __clear_bit(row, kd->avail);
472 /* Update est_add_ktid to point to first available/empty kt slot */
473 if (kd->est_count == kd->est_max_count)
474 ip_vs_est_update_ktid(ipvs);
482 /* Start estimation for stats */
483 int ip_vs_start_estimator(struct netns_ipvs *ipvs, struct ip_vs_stats *stats)
485 struct ip_vs_estimator *est = &stats->est;
488 if (!ipvs->est_max_threads && ipvs->enable)
489 ipvs->est_max_threads = ip_vs_est_max_threads(ipvs);
492 est->ktrow = IPVS_EST_NTICKS - 1; /* Initial delay */
494 /* We prefer this code to be short, kthread 0 will requeue the
495 * estimator to available chain. If tasks are disabled, we
496 * will not allocate much memory, just for kt 0.
499 if (!ipvs->est_kt_count || !ipvs->est_kt_arr[0])
500 ret = ip_vs_est_add_kthread(ipvs);
502 hlist_add_head(&est->list, &ipvs->est_temp_list);
504 INIT_HLIST_NODE(&est->list);
508 static void ip_vs_est_kthread_destroy(struct ip_vs_est_kt_data *kd)
512 pr_info("stop unused estimator thread %d...\n", kd->id);
513 kthread_stop(kd->task);
515 ip_vs_stats_free(kd->calc_stats);
520 /* Unlink estimator from chain */
521 void ip_vs_stop_estimator(struct netns_ipvs *ipvs, struct ip_vs_stats *stats)
523 struct ip_vs_estimator *est = &stats->est;
524 struct ip_vs_est_tick_data *td;
525 struct ip_vs_est_kt_data *kd;
526 int ktid = est->ktid;
527 int row = est->ktrow;
528 int cid = est->ktcid;
530 /* Failed to add to chain ? */
531 if (hlist_unhashed(&est->list))
534 /* On return, estimator can be freed, dequeue it now */
536 /* In est_temp_list ? */
538 hlist_del(&est->list);
542 hlist_del_rcu(&est->list);
543 kd = ipvs->est_kt_arr[ktid];
544 td = rcu_dereference_protected(kd->ticks[row], 1);
545 __clear_bit(cid, td->full);
546 td->chain_len[cid]--;
547 if (!td->chain_len[cid])
548 __clear_bit(cid, td->present);
550 __set_bit(row, kd->avail);
551 if (!kd->tick_len[row]) {
552 RCU_INIT_POINTER(kd->ticks[row], NULL);
553 kfree_rcu(td, rcu_head);
557 /* This kt slot can become available just now, prefer it */
558 if (ktid < ipvs->est_add_ktid)
559 ipvs->est_add_ktid = ktid;
564 mutex_lock(&ipvs->est_mutex);
565 ip_vs_est_kthread_destroy(kd);
566 ipvs->est_kt_arr[ktid] = NULL;
567 if (ktid == ipvs->est_kt_count - 1) {
568 ipvs->est_kt_count--;
569 while (ipvs->est_kt_count > 1 &&
570 !ipvs->est_kt_arr[ipvs->est_kt_count - 1])
571 ipvs->est_kt_count--;
573 mutex_unlock(&ipvs->est_mutex);
575 /* This slot is now empty, prefer another available kt slot */
576 if (ktid == ipvs->est_add_ktid)
577 ip_vs_est_update_ktid(ipvs);
581 /* kt 0 is freed after all other kthreads and chains are empty */
582 if (ipvs->est_kt_count == 1 && hlist_empty(&ipvs->est_temp_list)) {
583 kd = ipvs->est_kt_arr[0];
584 if (!kd || !kd->est_count) {
585 mutex_lock(&ipvs->est_mutex);
587 ip_vs_est_kthread_destroy(kd);
588 ipvs->est_kt_arr[0] = NULL;
590 ipvs->est_kt_count--;
591 mutex_unlock(&ipvs->est_mutex);
592 ipvs->est_add_ktid = 0;
597 /* Register all ests from est_temp_list to kthreads */
598 static void ip_vs_est_drain_temp_list(struct netns_ipvs *ipvs)
600 struct ip_vs_estimator *est;
605 mutex_lock(&__ip_vs_mutex);
608 est = hlist_entry_safe(ipvs->est_temp_list.first,
609 struct ip_vs_estimator, list);
611 if (kthread_should_stop())
613 hlist_del_init(&est->list);
614 if (ip_vs_enqueue_estimator(ipvs, est) >= 0)
617 hlist_add_head(&est->list,
618 &ipvs->est_temp_list);
619 /* Abort, some entries will not be estimated
625 mutex_unlock(&__ip_vs_mutex);
630 mutex_unlock(&__ip_vs_mutex);
633 /* Calculate limits for all kthreads */
634 static int ip_vs_est_calc_limits(struct netns_ipvs *ipvs, int *chain_max)
636 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
637 struct ip_vs_est_kt_data *kd;
638 struct hlist_head chain;
639 struct ip_vs_stats *s;
640 int cache_factor = 4;
649 INIT_HLIST_HEAD(&chain);
650 mutex_lock(&__ip_vs_mutex);
651 kd = ipvs->est_kt_arr[0];
652 mutex_unlock(&__ip_vs_mutex);
653 s = kd ? kd->calc_stats : NULL;
656 hlist_add_head(&s->est.list, &chain);
659 /* Get best result from many tests */
660 for (ntest = 0; ntest < 12; ntest++) {
662 /* Wait for cpufreq frequency transition */
663 wait_event_idle_timeout(wq, kthread_should_stop(),
665 if (!ipvs->enable || kthread_should_stop())
672 /* Put stats in cache */
673 ip_vs_chain_estimation(&chain);
676 for (i = loops * cache_factor; i > 0; i--)
677 ip_vs_chain_estimation(&chain);
683 if (!ipvs->enable || kthread_should_stop())
687 diff = ktime_to_ns(ktime_sub(t2, t1));
688 if (diff <= 1 * NSEC_PER_USEC) {
689 /* Do more loops on low time resolution */
693 if (diff >= NSEC_PER_SEC)
697 if (!min_est || val < min_est) {
699 /* goal: 95usec per chain */
700 val = 95 * NSEC_PER_USEC;
701 if (val >= min_est) {
702 do_div(val, min_est);
712 hlist_del_init(&s->est.list);
721 /* Calculate the parameters and apply them in context of kt #0
722 * ECP: est_calc_phase
724 * ECP ECM Insert Chain enable Description
725 * ---------------------------------------------------------------------------
726 * 0 0 est_temp_list 0 create kt #0 context
727 * 0 0 est_temp_list 0->1 service added, start kthread #0 task
728 * 0->1 0 est_temp_list 1 kt task #0 started, enters calc phase
729 * 1 0 est_temp_list 1 kt #0: determine est_chain_max,
730 * stop tasks, move ests to est_temp_list
731 * and free kd for kthreads 1..last
732 * 1->0 0->N kt chains 1 ests can go to kthreads
733 * 0 N kt chains 1 drain est_temp_list, create new kthread
734 * contexts, start tasks, estimate
736 static void ip_vs_est_calc_phase(struct netns_ipvs *ipvs)
738 int genid = atomic_read(&ipvs->est_genid);
739 struct ip_vs_est_tick_data *td;
740 struct ip_vs_est_kt_data *kd;
741 struct ip_vs_estimator *est;
742 struct ip_vs_stats *stats;
743 int id, row, cid, delay;
748 if (!ip_vs_est_calc_limits(ipvs, &chain_max))
751 mutex_lock(&__ip_vs_mutex);
753 /* Stop all other tasks, so that we can immediately move the
754 * estimators to est_temp_list without RCU grace period
756 mutex_lock(&ipvs->est_mutex);
757 for (id = 1; id < ipvs->est_kt_count; id++) {
758 /* netns clean up started, abort */
761 kd = ipvs->est_kt_arr[id];
764 ip_vs_est_kthread_stop(kd);
766 mutex_unlock(&ipvs->est_mutex);
768 /* Move all estimators to est_temp_list but carefully,
769 * all estimators and kthread data can be released while
770 * we reschedule. Even for kthread 0.
774 /* Order entries in est_temp_list in ascending delay, so now
775 * walk delay(desc), id(desc), cid(asc)
777 delay = IPVS_EST_NTICKS;
785 /* Destroy contexts backwards */
786 id = ipvs->est_kt_count;
789 if (!ipvs->enable || kthread_should_stop())
794 kd = ipvs->est_kt_arr[id];
797 /* kt 0 can exist with empty chains */
798 if (!id && kd->est_count <= 1)
801 row = kd->est_row + delay;
802 if (row >= IPVS_EST_NTICKS)
803 row -= IPVS_EST_NTICKS;
804 td = rcu_dereference_protected(kd->ticks[row], 1);
811 if (kthread_should_stop())
815 /* Give chance estimators to be added (to est_temp_list)
816 * and deleted (releasing kthread contexts)
818 mutex_unlock(&__ip_vs_mutex);
820 mutex_lock(&__ip_vs_mutex);
822 /* Current kt released ? */
823 if (id >= ipvs->est_kt_count)
825 if (kd != ipvs->est_kt_arr[id])
827 /* Current td released ? */
828 if (td != rcu_dereference_protected(kd->ticks[row], 1))
830 /* No fatal changes on the current kd and td */
832 est = hlist_entry_safe(td->chains[cid].first, struct ip_vs_estimator,
836 if (cid >= IPVS_EST_TICK_CHAINS)
840 /* We can cheat and increase est_count to protect kt 0 context
841 * from release but we prefer to keep the last estimator
843 last = kd->est_count <= 1;
844 /* Do not free kt #0 data */
847 last_td = kd->tick_len[row] <= 1;
848 stats = container_of(est, struct ip_vs_stats, est);
849 ip_vs_stop_estimator(ipvs, stats);
850 /* Tasks are stopped, move without RCU grace period */
852 est->ktrow = row - kd->est_row;
854 est->ktrow += IPVS_EST_NTICKS;
855 hlist_add_head(&est->list, &ipvs->est_temp_list);
865 /* All estimators removed while calculating ? */
866 if (!ipvs->est_kt_count)
868 kd = ipvs->est_kt_arr[0];
871 kd->add_row = kd->est_row;
872 ipvs->est_chain_max = chain_max;
873 ip_vs_est_set_params(ipvs, kd);
875 pr_info("using max %d ests per chain, %d per kthread\n",
876 kd->chain_max, kd->est_max_count);
878 /* Try to keep tot_stats in kt0, enqueue it early */
879 if (ipvs->tot_stats && !hlist_unhashed(&ipvs->tot_stats->s.est.list) &&
880 ipvs->tot_stats->s.est.ktid == -1) {
881 hlist_del(&ipvs->tot_stats->s.est.list);
882 hlist_add_head(&ipvs->tot_stats->s.est.list,
883 &ipvs->est_temp_list);
886 mutex_lock(&ipvs->est_mutex);
888 /* We completed the calc phase, new calc phase not requested */
889 if (genid == atomic_read(&ipvs->est_genid))
890 ipvs->est_calc_phase = 0;
893 mutex_unlock(&ipvs->est_mutex);
896 mutex_unlock(&__ip_vs_mutex);
899 void ip_vs_zero_estimator(struct ip_vs_stats *stats)
901 struct ip_vs_estimator *est = &stats->est;
902 struct ip_vs_kstats *k = &stats->kstats;
904 /* reset counters, caller must hold the stats->lock lock */
905 est->last_inbytes = k->inbytes;
906 est->last_outbytes = k->outbytes;
907 est->last_conns = k->conns;
908 est->last_inpkts = k->inpkts;
909 est->last_outpkts = k->outpkts;
917 /* Get decoded rates */
918 void ip_vs_read_estimator(struct ip_vs_kstats *dst, struct ip_vs_stats *stats)
920 struct ip_vs_estimator *e = &stats->est;
922 dst->cps = (e->cps + 0x1FF) >> 10;
923 dst->inpps = (e->inpps + 0x1FF) >> 10;
924 dst->outpps = (e->outpps + 0x1FF) >> 10;
925 dst->inbps = (e->inbps + 0xF) >> 5;
926 dst->outbps = (e->outbps + 0xF) >> 5;
929 int __net_init ip_vs_estimator_net_init(struct netns_ipvs *ipvs)
931 INIT_HLIST_HEAD(&ipvs->est_temp_list);
932 ipvs->est_kt_arr = NULL;
933 ipvs->est_max_threads = 0;
934 ipvs->est_calc_phase = 0;
935 ipvs->est_chain_max = 0;
936 ipvs->est_kt_count = 0;
937 ipvs->est_add_ktid = 0;
938 atomic_set(&ipvs->est_genid, 0);
939 atomic_set(&ipvs->est_genid_done, 0);
940 __mutex_init(&ipvs->est_mutex, "ipvs->est_mutex", &__ipvs_est_key);
944 void __net_exit ip_vs_estimator_net_cleanup(struct netns_ipvs *ipvs)
948 for (i = 0; i < ipvs->est_kt_count; i++)
949 ip_vs_est_kthread_destroy(ipvs->est_kt_arr[i]);
950 kfree(ipvs->est_kt_arr);
951 mutex_destroy(&ipvs->est_mutex);