1 /* SPDX-License-Identifier: GPL-2.0
3 * IO cost model based controller.
5 * Copyright (C) 2019 Tejun Heo <tj@kernel.org>
6 * Copyright (C) 2019 Andy Newell <newella@fb.com>
7 * Copyright (C) 2019 Facebook
9 * One challenge of controlling IO resources is the lack of trivially
10 * observable cost metric. This is distinguished from CPU and memory where
11 * wallclock time and the number of bytes can serve as accurate enough
14 * Bandwidth and iops are the most commonly used metrics for IO devices but
15 * depending on the type and specifics of the device, different IO patterns
16 * easily lead to multiple orders of magnitude variations rendering them
17 * useless for the purpose of IO capacity distribution. While on-device
18 * time, with a lot of clutches, could serve as a useful approximation for
19 * non-queued rotational devices, this is no longer viable with modern
20 * devices, even the rotational ones.
22 * While there is no cost metric we can trivially observe, it isn't a
23 * complete mystery. For example, on a rotational device, seek cost
24 * dominates while a contiguous transfer contributes a smaller amount
25 * proportional to the size. If we can characterize at least the relative
26 * costs of these different types of IOs, it should be possible to
27 * implement a reasonable work-conserving proportional IO resource
32 * IO cost model estimates the cost of an IO given its basic parameters and
33 * history (e.g. the end sector of the last IO). The cost is measured in
34 * device time. If a given IO is estimated to cost 10ms, the device should
35 * be able to process ~100 of those IOs in a second.
37 * Currently, there's only one builtin cost model - linear. Each IO is
38 * classified as sequential or random and given a base cost accordingly.
39 * On top of that, a size cost proportional to the length of the IO is
40 * added. While simple, this model captures the operational
41 * characteristics of a wide varienty of devices well enough. Default
42 * parameters for several different classes of devices are provided and the
43 * parameters can be configured from userspace via
44 * /sys/fs/cgroup/io.cost.model.
46 * If needed, tools/cgroup/iocost_coef_gen.py can be used to generate
47 * device-specific coefficients.
51 * The device virtual time (vtime) is used as the primary control metric.
52 * The control strategy is composed of the following three parts.
54 * 2-1. Vtime Distribution
56 * When a cgroup becomes active in terms of IOs, its hierarchical share is
57 * calculated. Please consider the following hierarchy where the numbers
58 * inside parentheses denote the configured weights.
64 * A0 (w:100) A1 (w:100)
66 * If B is idle and only A0 and A1 are actively issuing IOs, as the two are
67 * of equal weight, each gets 50% share. If then B starts issuing IOs, B
68 * gets 300/(100+300) or 75% share, and A0 and A1 equally splits the rest,
69 * 12.5% each. The distribution mechanism only cares about these flattened
70 * shares. They're called hweights (hierarchical weights) and always add
71 * upto 1 (WEIGHT_ONE).
73 * A given cgroup's vtime runs slower in inverse proportion to its hweight.
74 * For example, with 12.5% weight, A0's time runs 8 times slower (100/12.5)
75 * against the device vtime - an IO which takes 10ms on the underlying
76 * device is considered to take 80ms on A0.
78 * This constitutes the basis of IO capacity distribution. Each cgroup's
79 * vtime is running at a rate determined by its hweight. A cgroup tracks
80 * the vtime consumed by past IOs and can issue a new IO if doing so
81 * wouldn't outrun the current device vtime. Otherwise, the IO is
82 * suspended until the vtime has progressed enough to cover it.
84 * 2-2. Vrate Adjustment
86 * It's unrealistic to expect the cost model to be perfect. There are too
87 * many devices and even on the same device the overall performance
88 * fluctuates depending on numerous factors such as IO mixture and device
89 * internal garbage collection. The controller needs to adapt dynamically.
91 * This is achieved by adjusting the overall IO rate according to how busy
92 * the device is. If the device becomes overloaded, we're sending down too
93 * many IOs and should generally slow down. If there are waiting issuers
94 * but the device isn't saturated, we're issuing too few and should
97 * To slow down, we lower the vrate - the rate at which the device vtime
98 * passes compared to the wall clock. For example, if the vtime is running
99 * at the vrate of 75%, all cgroups added up would only be able to issue
100 * 750ms worth of IOs per second, and vice-versa for speeding up.
102 * Device business is determined using two criteria - rq wait and
103 * completion latencies.
105 * When a device gets saturated, the on-device and then the request queues
106 * fill up and a bio which is ready to be issued has to wait for a request
107 * to become available. When this delay becomes noticeable, it's a clear
108 * indication that the device is saturated and we lower the vrate. This
109 * saturation signal is fairly conservative as it only triggers when both
110 * hardware and software queues are filled up, and is used as the default
113 * As devices can have deep queues and be unfair in how the queued commands
114 * are executed, soley depending on rq wait may not result in satisfactory
115 * control quality. For a better control quality, completion latency QoS
116 * parameters can be configured so that the device is considered saturated
117 * if N'th percentile completion latency rises above the set point.
119 * The completion latency requirements are a function of both the
120 * underlying device characteristics and the desired IO latency quality of
121 * service. There is an inherent trade-off - the tighter the latency QoS,
122 * the higher the bandwidth lossage. Latency QoS is disabled by default
123 * and can be set through /sys/fs/cgroup/io.cost.qos.
125 * 2-3. Work Conservation
127 * Imagine two cgroups A and B with equal weights. A is issuing a small IO
128 * periodically while B is sending out enough parallel IOs to saturate the
129 * device on its own. Let's say A's usage amounts to 100ms worth of IO
130 * cost per second, i.e., 10% of the device capacity. The naive
131 * distribution of half and half would lead to 60% utilization of the
132 * device, a significant reduction in the total amount of work done
133 * compared to free-for-all competition. This is too high a cost to pay
136 * To conserve the total amount of work done, we keep track of how much
137 * each active cgroup is actually using and yield part of its weight if
138 * there are other cgroups which can make use of it. In the above case,
139 * A's weight will be lowered so that it hovers above the actual usage and
140 * B would be able to use the rest.
142 * As we don't want to penalize a cgroup for donating its weight, the
143 * surplus weight adjustment factors in a margin and has an immediate
144 * snapback mechanism in case the cgroup needs more IO vtime for itself.
146 * Note that adjusting down surplus weights has the same effects as
147 * accelerating vtime for other cgroups and work conservation can also be
148 * implemented by adjusting vrate dynamically. However, squaring who can
149 * donate and should take back how much requires hweight propagations
150 * anyway making it easier to implement and understand as a separate
155 * Instead of debugfs or other clumsy monitoring mechanisms, this
156 * controller uses a drgn based monitoring script -
157 * tools/cgroup/iocost_monitor.py. For details on drgn, please see
158 * https://github.com/osandov/drgn. The output looks like the following.
160 * sdb RUN per=300ms cur_per=234.218:v203.695 busy= +1 vrate= 62.12%
161 * active weight hweight% inflt% dbt delay usages%
162 * test/a * 50/ 50 33.33/ 33.33 27.65 2 0*041 033:033:033
163 * test/b * 100/ 100 66.67/ 66.67 17.56 0 0*000 066:079:077
165 * - per : Timer period
166 * - cur_per : Internal wall and device vtime clock
167 * - vrate : Device virtual time rate against wall clock
168 * - weight : Surplus-adjusted and configured weights
169 * - hweight : Surplus-adjusted and configured hierarchical weights
170 * - inflt : The percentage of in-flight IO cost at the end of last period
171 * - del_ms : Deferred issuer delay induction level and duration
172 * - usages : Usage history
175 #include <linux/kernel.h>
176 #include <linux/module.h>
177 #include <linux/timer.h>
178 #include <linux/time64.h>
179 #include <linux/parser.h>
180 #include <linux/sched/signal.h>
181 #include <asm/local.h>
182 #include <asm/local64.h>
183 #include "blk-rq-qos.h"
184 #include "blk-stat.h"
186 #include "blk-cgroup.h"
188 #ifdef CONFIG_TRACEPOINTS
190 /* copied from TRACE_CGROUP_PATH, see cgroup-internal.h */
191 #define TRACE_IOCG_PATH_LEN 1024
192 static DEFINE_SPINLOCK(trace_iocg_path_lock);
193 static char trace_iocg_path[TRACE_IOCG_PATH_LEN];
195 #define TRACE_IOCG_PATH(type, iocg, ...) \
197 unsigned long flags; \
198 if (trace_iocost_##type##_enabled()) { \
199 spin_lock_irqsave(&trace_iocg_path_lock, flags); \
200 cgroup_path(iocg_to_blkg(iocg)->blkcg->css.cgroup, \
201 trace_iocg_path, TRACE_IOCG_PATH_LEN); \
202 trace_iocost_##type(iocg, trace_iocg_path, \
204 spin_unlock_irqrestore(&trace_iocg_path_lock, flags); \
208 #else /* CONFIG_TRACE_POINTS */
209 #define TRACE_IOCG_PATH(type, iocg, ...) do { } while (0)
210 #endif /* CONFIG_TRACE_POINTS */
215 /* timer period is calculated from latency requirements, bound it */
216 MIN_PERIOD = USEC_PER_MSEC,
217 MAX_PERIOD = USEC_PER_SEC,
220 * iocg->vtime is targeted at 50% behind the device vtime, which
221 * serves as its IO credit buffer. Surplus weight adjustment is
222 * immediately canceled if the vtime margin runs below 10%.
226 MARGIN_TARGET_PCT = 50,
228 INUSE_ADJ_STEP_PCT = 25,
230 /* Have some play in timer operations */
233 /* 1/64k is granular enough and can easily be handled w/ u32 */
234 WEIGHT_ONE = 1 << 16,
239 * As vtime is used to calculate the cost of each IO, it needs to
240 * be fairly high precision. For example, it should be able to
241 * represent the cost of a single page worth of discard with
242 * suffificient accuracy. At the same time, it should be able to
243 * represent reasonably long enough durations to be useful and
244 * convenient during operation.
246 * 1s worth of vtime is 2^37. This gives us both sub-nanosecond
247 * granularity and days of wrap-around time even at extreme vrates.
249 VTIME_PER_SEC_SHIFT = 37,
250 VTIME_PER_SEC = 1LLU << VTIME_PER_SEC_SHIFT,
251 VTIME_PER_USEC = VTIME_PER_SEC / USEC_PER_SEC,
252 VTIME_PER_NSEC = VTIME_PER_SEC / NSEC_PER_SEC,
254 /* bound vrate adjustments within two orders of magnitude */
255 VRATE_MIN_PPM = 10000, /* 1% */
256 VRATE_MAX_PPM = 100000000, /* 10000% */
258 VRATE_MIN = VTIME_PER_USEC * VRATE_MIN_PPM / MILLION,
259 VRATE_CLAMP_ADJ_PCT = 4,
261 /* switch iff the conditions are met for longer than this */
262 AUTOP_CYCLE_NSEC = 10LLU * NSEC_PER_SEC,
266 /* if IOs end up waiting for requests, issue less */
267 RQ_WAIT_BUSY_PCT = 5,
269 /* unbusy hysterisis */
273 * The effect of delay is indirect and non-linear and a huge amount of
274 * future debt can accumulate abruptly while unthrottled. Linearly scale
275 * up delay as debt is going up and then let it decay exponentially.
276 * This gives us quick ramp ups while delay is accumulating and long
277 * tails which can help reducing the frequency of debt explosions on
278 * unthrottle. The parameters are experimentally determined.
280 * The delay mechanism provides adequate protection and behavior in many
281 * cases. However, this is far from ideal and falls shorts on both
282 * fronts. The debtors are often throttled too harshly costing a
283 * significant level of fairness and possibly total work while the
284 * protection against their impacts on the system can be choppy and
287 * The shortcoming primarily stems from the fact that, unlike for page
288 * cache, the kernel doesn't have well-defined back-pressure propagation
289 * mechanism and policies for anonymous memory. Fully addressing this
290 * issue will likely require substantial improvements in the area.
292 MIN_DELAY_THR_PCT = 500,
293 MAX_DELAY_THR_PCT = 25000,
295 MAX_DELAY = 250 * USEC_PER_MSEC,
297 /* halve debts if avg usage over 100ms is under 50% */
299 DFGV_PERIOD = 100 * USEC_PER_MSEC,
301 /* don't let cmds which take a very long time pin lagging for too long */
302 MAX_LAGGING_PERIODS = 10,
305 * Count IO size in 4k pages. The 12bit shift helps keeping
306 * size-proportional components of cost calculation in closer
307 * numbers of digits to per-IO cost components.
310 IOC_PAGE_SIZE = 1 << IOC_PAGE_SHIFT,
311 IOC_SECT_TO_PAGE_SHIFT = IOC_PAGE_SHIFT - SECTOR_SHIFT,
313 /* if apart further than 16M, consider randio for linear model */
314 LCOEF_RANDIO_PAGES = 4096,
323 /* io.cost.qos controls including per-dev enable of the whole controller */
330 /* io.cost.qos params */
341 /* io.cost.model controls */
348 /* builtin linear cost model coefficients */
378 u32 qos[NR_QOS_PARAMS];
379 u64 i_lcoefs[NR_I_LCOEFS];
380 u64 lcoefs[NR_LCOEFS];
381 u32 too_fast_vrate_pct;
382 u32 too_slow_vrate_pct;
398 struct ioc_pcpu_stat {
399 struct ioc_missed missed[2];
401 local64_t rq_wait_ns;
411 struct ioc_params params;
412 struct ioc_margins margins;
419 struct timer_list timer;
420 struct list_head active_iocgs; /* active cgroups */
421 struct ioc_pcpu_stat __percpu *pcpu_stat;
423 enum ioc_running running;
424 atomic64_t vtime_rate;
428 seqcount_spinlock_t period_seqcount;
429 u64 period_at; /* wallclock starttime */
430 u64 period_at_vtime; /* vtime starttime */
432 atomic64_t cur_period; /* inc'd each period */
433 int busy_level; /* saturation history */
435 bool weights_updated;
436 atomic_t hweight_gen; /* for lazy hweights */
438 /* debt forgivness */
441 u64 dfgv_usage_us_sum;
443 u64 autop_too_fast_at;
444 u64 autop_too_slow_at;
446 bool user_qos_params:1;
447 bool user_cost_model:1;
450 struct iocg_pcpu_stat {
451 local64_t abs_vusage;
461 /* per device-cgroup pair */
463 struct blkg_policy_data pd;
467 * A iocg can get its weight from two sources - an explicit
468 * per-device-cgroup configuration or the default weight of the
469 * cgroup. `cfg_weight` is the explicit per-device-cgroup
470 * configuration. `weight` is the effective considering both
473 * When an idle cgroup becomes active its `active` goes from 0 to
474 * `weight`. `inuse` is the surplus adjusted active weight.
475 * `active` and `inuse` are used to calculate `hweight_active` and
478 * `last_inuse` remembers `inuse` while an iocg is idle to persist
479 * surplus adjustments.
481 * `inuse` may be adjusted dynamically during period. `saved_*` are used
482 * to determine and track adjustments.
492 sector_t cursor; /* to detect randio */
495 * `vtime` is this iocg's vtime cursor which progresses as IOs are
496 * issued. If lagging behind device vtime, the delta represents
497 * the currently available IO budget. If running ahead, the
500 * `vtime_done` is the same but progressed on completion rather
501 * than issue. The delta behind `vtime` represents the cost of
502 * currently in-flight IOs.
505 atomic64_t done_vtime;
508 /* current delay in effect and when it started */
513 * The period this iocg was last active in. Used for deactivation
514 * and invalidating `vtime`.
516 atomic64_t active_period;
517 struct list_head active_list;
519 /* see __propagate_weights() and current_hweight() for details */
520 u64 child_active_sum;
522 u64 child_adjusted_sum;
526 u32 hweight_donating;
527 u32 hweight_after_donation;
529 struct list_head walk_list;
530 struct list_head surplus_list;
532 struct wait_queue_head waitq;
533 struct hrtimer waitq_timer;
535 /* timestamp at the latest activation */
539 struct iocg_pcpu_stat __percpu *pcpu_stat;
540 struct iocg_stat stat;
541 struct iocg_stat last_stat;
542 u64 last_stat_abs_vusage;
548 /* this iocg's depth in the hierarchy and ancestors including self */
550 struct ioc_gq *ancestors[];
555 struct blkcg_policy_data cpd;
556 unsigned int dfl_weight;
567 struct wait_queue_entry wait;
573 struct iocg_wake_ctx {
579 static const struct ioc_params autop[] = {
582 [QOS_RLAT] = 250000, /* 250ms */
584 [QOS_MIN] = VRATE_MIN_PPM,
585 [QOS_MAX] = VRATE_MAX_PPM,
588 [I_LCOEF_RBPS] = 174019176,
589 [I_LCOEF_RSEQIOPS] = 41708,
590 [I_LCOEF_RRANDIOPS] = 370,
591 [I_LCOEF_WBPS] = 178075866,
592 [I_LCOEF_WSEQIOPS] = 42705,
593 [I_LCOEF_WRANDIOPS] = 378,
598 [QOS_RLAT] = 25000, /* 25ms */
600 [QOS_MIN] = VRATE_MIN_PPM,
601 [QOS_MAX] = VRATE_MAX_PPM,
604 [I_LCOEF_RBPS] = 245855193,
605 [I_LCOEF_RSEQIOPS] = 61575,
606 [I_LCOEF_RRANDIOPS] = 6946,
607 [I_LCOEF_WBPS] = 141365009,
608 [I_LCOEF_WSEQIOPS] = 33716,
609 [I_LCOEF_WRANDIOPS] = 26796,
614 [QOS_RLAT] = 25000, /* 25ms */
616 [QOS_MIN] = VRATE_MIN_PPM,
617 [QOS_MAX] = VRATE_MAX_PPM,
620 [I_LCOEF_RBPS] = 488636629,
621 [I_LCOEF_RSEQIOPS] = 8932,
622 [I_LCOEF_RRANDIOPS] = 8518,
623 [I_LCOEF_WBPS] = 427891549,
624 [I_LCOEF_WSEQIOPS] = 28755,
625 [I_LCOEF_WRANDIOPS] = 21940,
627 .too_fast_vrate_pct = 500,
631 [QOS_RLAT] = 5000, /* 5ms */
633 [QOS_MIN] = VRATE_MIN_PPM,
634 [QOS_MAX] = VRATE_MAX_PPM,
637 [I_LCOEF_RBPS] = 3102524156LLU,
638 [I_LCOEF_RSEQIOPS] = 724816,
639 [I_LCOEF_RRANDIOPS] = 778122,
640 [I_LCOEF_WBPS] = 1742780862LLU,
641 [I_LCOEF_WSEQIOPS] = 425702,
642 [I_LCOEF_WRANDIOPS] = 443193,
644 .too_slow_vrate_pct = 10,
649 * vrate adjust percentages indexed by ioc->busy_level. We adjust up on
650 * vtime credit shortage and down on device saturation.
652 static u32 vrate_adj_pct[] =
654 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
655 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
656 4, 4, 4, 4, 4, 4, 4, 4, 8, 8, 8, 8, 8, 8, 8, 8, 16 };
658 static struct blkcg_policy blkcg_policy_iocost;
660 /* accessors and helpers */
661 static struct ioc *rqos_to_ioc(struct rq_qos *rqos)
663 return container_of(rqos, struct ioc, rqos);
666 static struct ioc *q_to_ioc(struct request_queue *q)
668 return rqos_to_ioc(rq_qos_id(q, RQ_QOS_COST));
671 static const char __maybe_unused *ioc_name(struct ioc *ioc)
673 struct gendisk *disk = ioc->rqos.q->disk;
677 return disk->disk_name;
680 static struct ioc_gq *pd_to_iocg(struct blkg_policy_data *pd)
682 return pd ? container_of(pd, struct ioc_gq, pd) : NULL;
685 static struct ioc_gq *blkg_to_iocg(struct blkcg_gq *blkg)
687 return pd_to_iocg(blkg_to_pd(blkg, &blkcg_policy_iocost));
690 static struct blkcg_gq *iocg_to_blkg(struct ioc_gq *iocg)
692 return pd_to_blkg(&iocg->pd);
695 static struct ioc_cgrp *blkcg_to_iocc(struct blkcg *blkcg)
697 return container_of(blkcg_to_cpd(blkcg, &blkcg_policy_iocost),
698 struct ioc_cgrp, cpd);
702 * Scale @abs_cost to the inverse of @hw_inuse. The lower the hierarchical
703 * weight, the more expensive each IO. Must round up.
705 static u64 abs_cost_to_cost(u64 abs_cost, u32 hw_inuse)
707 return DIV64_U64_ROUND_UP(abs_cost * WEIGHT_ONE, hw_inuse);
711 * The inverse of abs_cost_to_cost(). Must round up.
713 static u64 cost_to_abs_cost(u64 cost, u32 hw_inuse)
715 return DIV64_U64_ROUND_UP(cost * hw_inuse, WEIGHT_ONE);
718 static void iocg_commit_bio(struct ioc_gq *iocg, struct bio *bio,
719 u64 abs_cost, u64 cost)
721 struct iocg_pcpu_stat *gcs;
723 bio->bi_iocost_cost = cost;
724 atomic64_add(cost, &iocg->vtime);
726 gcs = get_cpu_ptr(iocg->pcpu_stat);
727 local64_add(abs_cost, &gcs->abs_vusage);
731 static void iocg_lock(struct ioc_gq *iocg, bool lock_ioc, unsigned long *flags)
734 spin_lock_irqsave(&iocg->ioc->lock, *flags);
735 spin_lock(&iocg->waitq.lock);
737 spin_lock_irqsave(&iocg->waitq.lock, *flags);
741 static void iocg_unlock(struct ioc_gq *iocg, bool unlock_ioc, unsigned long *flags)
744 spin_unlock(&iocg->waitq.lock);
745 spin_unlock_irqrestore(&iocg->ioc->lock, *flags);
747 spin_unlock_irqrestore(&iocg->waitq.lock, *flags);
751 #define CREATE_TRACE_POINTS
752 #include <trace/events/iocost.h>
754 static void ioc_refresh_margins(struct ioc *ioc)
756 struct ioc_margins *margins = &ioc->margins;
757 u32 period_us = ioc->period_us;
758 u64 vrate = ioc->vtime_base_rate;
760 margins->min = (period_us * MARGIN_MIN_PCT / 100) * vrate;
761 margins->low = (period_us * MARGIN_LOW_PCT / 100) * vrate;
762 margins->target = (period_us * MARGIN_TARGET_PCT / 100) * vrate;
765 /* latency Qos params changed, update period_us and all the dependent params */
766 static void ioc_refresh_period_us(struct ioc *ioc)
768 u32 ppm, lat, multi, period_us;
770 lockdep_assert_held(&ioc->lock);
772 /* pick the higher latency target */
773 if (ioc->params.qos[QOS_RLAT] >= ioc->params.qos[QOS_WLAT]) {
774 ppm = ioc->params.qos[QOS_RPPM];
775 lat = ioc->params.qos[QOS_RLAT];
777 ppm = ioc->params.qos[QOS_WPPM];
778 lat = ioc->params.qos[QOS_WLAT];
782 * We want the period to be long enough to contain a healthy number
783 * of IOs while short enough for granular control. Define it as a
784 * multiple of the latency target. Ideally, the multiplier should
785 * be scaled according to the percentile so that it would nominally
786 * contain a certain number of requests. Let's be simpler and
787 * scale it linearly so that it's 2x >= pct(90) and 10x at pct(50).
790 multi = max_t(u32, (MILLION - ppm) / 50000, 2);
793 period_us = multi * lat;
794 period_us = clamp_t(u32, period_us, MIN_PERIOD, MAX_PERIOD);
796 /* calculate dependent params */
797 ioc->period_us = period_us;
798 ioc->timer_slack_ns = div64_u64(
799 (u64)period_us * NSEC_PER_USEC * TIMER_SLACK_PCT,
801 ioc_refresh_margins(ioc);
804 static int ioc_autop_idx(struct ioc *ioc)
806 int idx = ioc->autop_idx;
807 const struct ioc_params *p = &autop[idx];
812 if (!blk_queue_nonrot(ioc->rqos.q))
815 /* handle SATA SSDs w/ broken NCQ */
816 if (blk_queue_depth(ioc->rqos.q) == 1)
817 return AUTOP_SSD_QD1;
819 /* use one of the normal ssd sets */
820 if (idx < AUTOP_SSD_DFL)
821 return AUTOP_SSD_DFL;
823 /* if user is overriding anything, maintain what was there */
824 if (ioc->user_qos_params || ioc->user_cost_model)
827 /* step up/down based on the vrate */
828 vrate_pct = div64_u64(ioc->vtime_base_rate * 100, VTIME_PER_USEC);
829 now_ns = ktime_get_ns();
831 if (p->too_fast_vrate_pct && p->too_fast_vrate_pct <= vrate_pct) {
832 if (!ioc->autop_too_fast_at)
833 ioc->autop_too_fast_at = now_ns;
834 if (now_ns - ioc->autop_too_fast_at >= AUTOP_CYCLE_NSEC)
837 ioc->autop_too_fast_at = 0;
840 if (p->too_slow_vrate_pct && p->too_slow_vrate_pct >= vrate_pct) {
841 if (!ioc->autop_too_slow_at)
842 ioc->autop_too_slow_at = now_ns;
843 if (now_ns - ioc->autop_too_slow_at >= AUTOP_CYCLE_NSEC)
846 ioc->autop_too_slow_at = 0;
853 * Take the followings as input
855 * @bps maximum sequential throughput
856 * @seqiops maximum sequential 4k iops
857 * @randiops maximum random 4k iops
859 * and calculate the linear model cost coefficients.
861 * *@page per-page cost 1s / (@bps / 4096)
862 * *@seqio base cost of a seq IO max((1s / @seqiops) - *@page, 0)
863 * @randiops base cost of a rand IO max((1s / @randiops) - *@page, 0)
865 static void calc_lcoefs(u64 bps, u64 seqiops, u64 randiops,
866 u64 *page, u64 *seqio, u64 *randio)
870 *page = *seqio = *randio = 0;
873 u64 bps_pages = DIV_ROUND_UP_ULL(bps, IOC_PAGE_SIZE);
876 *page = DIV64_U64_ROUND_UP(VTIME_PER_SEC, bps_pages);
882 v = DIV64_U64_ROUND_UP(VTIME_PER_SEC, seqiops);
888 v = DIV64_U64_ROUND_UP(VTIME_PER_SEC, randiops);
894 static void ioc_refresh_lcoefs(struct ioc *ioc)
896 u64 *u = ioc->params.i_lcoefs;
897 u64 *c = ioc->params.lcoefs;
899 calc_lcoefs(u[I_LCOEF_RBPS], u[I_LCOEF_RSEQIOPS], u[I_LCOEF_RRANDIOPS],
900 &c[LCOEF_RPAGE], &c[LCOEF_RSEQIO], &c[LCOEF_RRANDIO]);
901 calc_lcoefs(u[I_LCOEF_WBPS], u[I_LCOEF_WSEQIOPS], u[I_LCOEF_WRANDIOPS],
902 &c[LCOEF_WPAGE], &c[LCOEF_WSEQIO], &c[LCOEF_WRANDIO]);
905 static bool ioc_refresh_params(struct ioc *ioc, bool force)
907 const struct ioc_params *p;
910 lockdep_assert_held(&ioc->lock);
912 idx = ioc_autop_idx(ioc);
915 if (idx == ioc->autop_idx && !force)
918 if (idx != ioc->autop_idx)
919 atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC);
921 ioc->autop_idx = idx;
922 ioc->autop_too_fast_at = 0;
923 ioc->autop_too_slow_at = 0;
925 if (!ioc->user_qos_params)
926 memcpy(ioc->params.qos, p->qos, sizeof(p->qos));
927 if (!ioc->user_cost_model)
928 memcpy(ioc->params.i_lcoefs, p->i_lcoefs, sizeof(p->i_lcoefs));
930 ioc_refresh_period_us(ioc);
931 ioc_refresh_lcoefs(ioc);
933 ioc->vrate_min = DIV64_U64_ROUND_UP((u64)ioc->params.qos[QOS_MIN] *
934 VTIME_PER_USEC, MILLION);
935 ioc->vrate_max = div64_u64((u64)ioc->params.qos[QOS_MAX] *
936 VTIME_PER_USEC, MILLION);
942 * When an iocg accumulates too much vtime or gets deactivated, we throw away
943 * some vtime, which lowers the overall device utilization. As the exact amount
944 * which is being thrown away is known, we can compensate by accelerating the
945 * vrate accordingly so that the extra vtime generated in the current period
946 * matches what got lost.
948 static void ioc_refresh_vrate(struct ioc *ioc, struct ioc_now *now)
950 s64 pleft = ioc->period_at + ioc->period_us - now->now;
951 s64 vperiod = ioc->period_us * ioc->vtime_base_rate;
952 s64 vcomp, vcomp_min, vcomp_max;
954 lockdep_assert_held(&ioc->lock);
956 /* we need some time left in this period */
961 * Calculate how much vrate should be adjusted to offset the error.
962 * Limit the amount of adjustment and deduct the adjusted amount from
965 vcomp = -div64_s64(ioc->vtime_err, pleft);
966 vcomp_min = -(ioc->vtime_base_rate >> 1);
967 vcomp_max = ioc->vtime_base_rate;
968 vcomp = clamp(vcomp, vcomp_min, vcomp_max);
970 ioc->vtime_err += vcomp * pleft;
972 atomic64_set(&ioc->vtime_rate, ioc->vtime_base_rate + vcomp);
974 /* bound how much error can accumulate */
975 ioc->vtime_err = clamp(ioc->vtime_err, -vperiod, vperiod);
978 static void ioc_adjust_base_vrate(struct ioc *ioc, u32 rq_wait_pct,
979 int nr_lagging, int nr_shortages,
980 int prev_busy_level, u32 *missed_ppm)
982 u64 vrate = ioc->vtime_base_rate;
983 u64 vrate_min = ioc->vrate_min, vrate_max = ioc->vrate_max;
985 if (!ioc->busy_level || (ioc->busy_level < 0 && nr_lagging)) {
986 if (ioc->busy_level != prev_busy_level || nr_lagging)
987 trace_iocost_ioc_vrate_adj(ioc, atomic64_read(&ioc->vtime_rate),
988 missed_ppm, rq_wait_pct,
989 nr_lagging, nr_shortages);
995 * If vrate is out of bounds, apply clamp gradually as the
996 * bounds can change abruptly. Otherwise, apply busy_level
999 if (vrate < vrate_min) {
1000 vrate = div64_u64(vrate * (100 + VRATE_CLAMP_ADJ_PCT), 100);
1001 vrate = min(vrate, vrate_min);
1002 } else if (vrate > vrate_max) {
1003 vrate = div64_u64(vrate * (100 - VRATE_CLAMP_ADJ_PCT), 100);
1004 vrate = max(vrate, vrate_max);
1006 int idx = min_t(int, abs(ioc->busy_level),
1007 ARRAY_SIZE(vrate_adj_pct) - 1);
1008 u32 adj_pct = vrate_adj_pct[idx];
1010 if (ioc->busy_level > 0)
1011 adj_pct = 100 - adj_pct;
1013 adj_pct = 100 + adj_pct;
1015 vrate = clamp(DIV64_U64_ROUND_UP(vrate * adj_pct, 100),
1016 vrate_min, vrate_max);
1019 trace_iocost_ioc_vrate_adj(ioc, vrate, missed_ppm, rq_wait_pct,
1020 nr_lagging, nr_shortages);
1022 ioc->vtime_base_rate = vrate;
1023 ioc_refresh_margins(ioc);
1026 /* take a snapshot of the current [v]time and vrate */
1027 static void ioc_now(struct ioc *ioc, struct ioc_now *now)
1031 now->now_ns = ktime_get();
1032 now->now = ktime_to_us(now->now_ns);
1033 now->vrate = atomic64_read(&ioc->vtime_rate);
1036 * The current vtime is
1038 * vtime at period start + (wallclock time since the start) * vrate
1040 * As a consistent snapshot of `period_at_vtime` and `period_at` is
1041 * needed, they're seqcount protected.
1044 seq = read_seqcount_begin(&ioc->period_seqcount);
1045 now->vnow = ioc->period_at_vtime +
1046 (now->now - ioc->period_at) * now->vrate;
1047 } while (read_seqcount_retry(&ioc->period_seqcount, seq));
1050 static void ioc_start_period(struct ioc *ioc, struct ioc_now *now)
1052 WARN_ON_ONCE(ioc->running != IOC_RUNNING);
1054 write_seqcount_begin(&ioc->period_seqcount);
1055 ioc->period_at = now->now;
1056 ioc->period_at_vtime = now->vnow;
1057 write_seqcount_end(&ioc->period_seqcount);
1059 ioc->timer.expires = jiffies + usecs_to_jiffies(ioc->period_us);
1060 add_timer(&ioc->timer);
1064 * Update @iocg's `active` and `inuse` to @active and @inuse, update level
1065 * weight sums and propagate upwards accordingly. If @save, the current margin
1066 * is saved to be used as reference for later inuse in-period adjustments.
1068 static void __propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
1069 bool save, struct ioc_now *now)
1071 struct ioc *ioc = iocg->ioc;
1074 lockdep_assert_held(&ioc->lock);
1077 * For an active leaf node, its inuse shouldn't be zero or exceed
1078 * @active. An active internal node's inuse is solely determined by the
1079 * inuse to active ratio of its children regardless of @inuse.
1081 if (list_empty(&iocg->active_list) && iocg->child_active_sum) {
1082 inuse = DIV64_U64_ROUND_UP(active * iocg->child_inuse_sum,
1083 iocg->child_active_sum);
1085 inuse = clamp_t(u32, inuse, 1, active);
1088 iocg->last_inuse = iocg->inuse;
1090 iocg->saved_margin = now->vnow - atomic64_read(&iocg->vtime);
1092 if (active == iocg->active && inuse == iocg->inuse)
1095 for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
1096 struct ioc_gq *parent = iocg->ancestors[lvl];
1097 struct ioc_gq *child = iocg->ancestors[lvl + 1];
1098 u32 parent_active = 0, parent_inuse = 0;
1100 /* update the level sums */
1101 parent->child_active_sum += (s32)(active - child->active);
1102 parent->child_inuse_sum += (s32)(inuse - child->inuse);
1103 /* apply the updates */
1104 child->active = active;
1105 child->inuse = inuse;
1108 * The delta between inuse and active sums indicates that
1109 * much of weight is being given away. Parent's inuse
1110 * and active should reflect the ratio.
1112 if (parent->child_active_sum) {
1113 parent_active = parent->weight;
1114 parent_inuse = DIV64_U64_ROUND_UP(
1115 parent_active * parent->child_inuse_sum,
1116 parent->child_active_sum);
1119 /* do we need to keep walking up? */
1120 if (parent_active == parent->active &&
1121 parent_inuse == parent->inuse)
1124 active = parent_active;
1125 inuse = parent_inuse;
1128 ioc->weights_updated = true;
1131 static void commit_weights(struct ioc *ioc)
1133 lockdep_assert_held(&ioc->lock);
1135 if (ioc->weights_updated) {
1136 /* paired with rmb in current_hweight(), see there */
1138 atomic_inc(&ioc->hweight_gen);
1139 ioc->weights_updated = false;
1143 static void propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
1144 bool save, struct ioc_now *now)
1146 __propagate_weights(iocg, active, inuse, save, now);
1147 commit_weights(iocg->ioc);
1150 static void current_hweight(struct ioc_gq *iocg, u32 *hw_activep, u32 *hw_inusep)
1152 struct ioc *ioc = iocg->ioc;
1157 /* hot path - if uptodate, use cached */
1158 ioc_gen = atomic_read(&ioc->hweight_gen);
1159 if (ioc_gen == iocg->hweight_gen)
1163 * Paired with wmb in commit_weights(). If we saw the updated
1164 * hweight_gen, all the weight updates from __propagate_weights() are
1167 * We can race with weight updates during calculation and get it
1168 * wrong. However, hweight_gen would have changed and a future
1169 * reader will recalculate and we're guaranteed to discard the
1170 * wrong result soon.
1174 hwa = hwi = WEIGHT_ONE;
1175 for (lvl = 0; lvl <= iocg->level - 1; lvl++) {
1176 struct ioc_gq *parent = iocg->ancestors[lvl];
1177 struct ioc_gq *child = iocg->ancestors[lvl + 1];
1178 u64 active_sum = READ_ONCE(parent->child_active_sum);
1179 u64 inuse_sum = READ_ONCE(parent->child_inuse_sum);
1180 u32 active = READ_ONCE(child->active);
1181 u32 inuse = READ_ONCE(child->inuse);
1183 /* we can race with deactivations and either may read as zero */
1184 if (!active_sum || !inuse_sum)
1187 active_sum = max_t(u64, active, active_sum);
1188 hwa = div64_u64((u64)hwa * active, active_sum);
1190 inuse_sum = max_t(u64, inuse, inuse_sum);
1191 hwi = div64_u64((u64)hwi * inuse, inuse_sum);
1194 iocg->hweight_active = max_t(u32, hwa, 1);
1195 iocg->hweight_inuse = max_t(u32, hwi, 1);
1196 iocg->hweight_gen = ioc_gen;
1199 *hw_activep = iocg->hweight_active;
1201 *hw_inusep = iocg->hweight_inuse;
1205 * Calculate the hweight_inuse @iocg would get with max @inuse assuming all the
1206 * other weights stay unchanged.
1208 static u32 current_hweight_max(struct ioc_gq *iocg)
1210 u32 hwm = WEIGHT_ONE;
1211 u32 inuse = iocg->active;
1212 u64 child_inuse_sum;
1215 lockdep_assert_held(&iocg->ioc->lock);
1217 for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
1218 struct ioc_gq *parent = iocg->ancestors[lvl];
1219 struct ioc_gq *child = iocg->ancestors[lvl + 1];
1221 child_inuse_sum = parent->child_inuse_sum + inuse - child->inuse;
1222 hwm = div64_u64((u64)hwm * inuse, child_inuse_sum);
1223 inuse = DIV64_U64_ROUND_UP(parent->active * child_inuse_sum,
1224 parent->child_active_sum);
1227 return max_t(u32, hwm, 1);
1230 static void weight_updated(struct ioc_gq *iocg, struct ioc_now *now)
1232 struct ioc *ioc = iocg->ioc;
1233 struct blkcg_gq *blkg = iocg_to_blkg(iocg);
1234 struct ioc_cgrp *iocc = blkcg_to_iocc(blkg->blkcg);
1237 lockdep_assert_held(&ioc->lock);
1239 weight = iocg->cfg_weight ?: iocc->dfl_weight;
1240 if (weight != iocg->weight && iocg->active)
1241 propagate_weights(iocg, weight, iocg->inuse, true, now);
1242 iocg->weight = weight;
1245 static bool iocg_activate(struct ioc_gq *iocg, struct ioc_now *now)
1247 struct ioc *ioc = iocg->ioc;
1248 u64 last_period, cur_period;
1253 * If seem to be already active, just update the stamp to tell the
1254 * timer that we're still active. We don't mind occassional races.
1256 if (!list_empty(&iocg->active_list)) {
1258 cur_period = atomic64_read(&ioc->cur_period);
1259 if (atomic64_read(&iocg->active_period) != cur_period)
1260 atomic64_set(&iocg->active_period, cur_period);
1264 /* racy check on internal node IOs, treat as root level IOs */
1265 if (iocg->child_active_sum)
1268 spin_lock_irq(&ioc->lock);
1273 cur_period = atomic64_read(&ioc->cur_period);
1274 last_period = atomic64_read(&iocg->active_period);
1275 atomic64_set(&iocg->active_period, cur_period);
1277 /* already activated or breaking leaf-only constraint? */
1278 if (!list_empty(&iocg->active_list))
1279 goto succeed_unlock;
1280 for (i = iocg->level - 1; i > 0; i--)
1281 if (!list_empty(&iocg->ancestors[i]->active_list))
1284 if (iocg->child_active_sum)
1288 * Always start with the target budget. On deactivation, we throw away
1289 * anything above it.
1291 vtarget = now->vnow - ioc->margins.target;
1292 vtime = atomic64_read(&iocg->vtime);
1294 atomic64_add(vtarget - vtime, &iocg->vtime);
1295 atomic64_add(vtarget - vtime, &iocg->done_vtime);
1299 * Activate, propagate weight and start period timer if not
1300 * running. Reset hweight_gen to avoid accidental match from
1303 iocg->hweight_gen = atomic_read(&ioc->hweight_gen) - 1;
1304 list_add(&iocg->active_list, &ioc->active_iocgs);
1306 propagate_weights(iocg, iocg->weight,
1307 iocg->last_inuse ?: iocg->weight, true, now);
1309 TRACE_IOCG_PATH(iocg_activate, iocg, now,
1310 last_period, cur_period, vtime);
1312 iocg->activated_at = now->now;
1314 if (ioc->running == IOC_IDLE) {
1315 ioc->running = IOC_RUNNING;
1316 ioc->dfgv_period_at = now->now;
1317 ioc->dfgv_period_rem = 0;
1318 ioc_start_period(ioc, now);
1322 spin_unlock_irq(&ioc->lock);
1326 spin_unlock_irq(&ioc->lock);
1330 static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now)
1332 struct ioc *ioc = iocg->ioc;
1333 struct blkcg_gq *blkg = iocg_to_blkg(iocg);
1334 u64 tdelta, delay, new_delay, shift;
1335 s64 vover, vover_pct;
1338 lockdep_assert_held(&iocg->waitq.lock);
1341 * If the delay is set by another CPU, we may be in the past. No need to
1342 * change anything if so. This avoids decay calculation underflow.
1344 if (time_before64(now->now, iocg->delay_at))
1347 /* calculate the current delay in effect - 1/2 every second */
1348 tdelta = now->now - iocg->delay_at;
1349 shift = div64_u64(tdelta, USEC_PER_SEC);
1350 if (iocg->delay && shift < BITS_PER_LONG)
1351 delay = iocg->delay >> shift;
1355 /* calculate the new delay from the debt amount */
1356 current_hweight(iocg, &hwa, NULL);
1357 vover = atomic64_read(&iocg->vtime) +
1358 abs_cost_to_cost(iocg->abs_vdebt, hwa) - now->vnow;
1359 vover_pct = div64_s64(100 * vover,
1360 ioc->period_us * ioc->vtime_base_rate);
1362 if (vover_pct <= MIN_DELAY_THR_PCT)
1364 else if (vover_pct >= MAX_DELAY_THR_PCT)
1365 new_delay = MAX_DELAY;
1367 new_delay = MIN_DELAY +
1368 div_u64((MAX_DELAY - MIN_DELAY) *
1369 (vover_pct - MIN_DELAY_THR_PCT),
1370 MAX_DELAY_THR_PCT - MIN_DELAY_THR_PCT);
1372 /* pick the higher one and apply */
1373 if (new_delay > delay) {
1374 iocg->delay = new_delay;
1375 iocg->delay_at = now->now;
1379 if (delay >= MIN_DELAY) {
1380 if (!iocg->indelay_since)
1381 iocg->indelay_since = now->now;
1382 blkcg_set_delay(blkg, delay * NSEC_PER_USEC);
1385 if (iocg->indelay_since) {
1386 iocg->stat.indelay_us += now->now - iocg->indelay_since;
1387 iocg->indelay_since = 0;
1390 blkcg_clear_delay(blkg);
1395 static void iocg_incur_debt(struct ioc_gq *iocg, u64 abs_cost,
1396 struct ioc_now *now)
1398 struct iocg_pcpu_stat *gcs;
1400 lockdep_assert_held(&iocg->ioc->lock);
1401 lockdep_assert_held(&iocg->waitq.lock);
1402 WARN_ON_ONCE(list_empty(&iocg->active_list));
1405 * Once in debt, debt handling owns inuse. @iocg stays at the minimum
1406 * inuse donating all of it share to others until its debt is paid off.
1408 if (!iocg->abs_vdebt && abs_cost) {
1409 iocg->indebt_since = now->now;
1410 propagate_weights(iocg, iocg->active, 0, false, now);
1413 iocg->abs_vdebt += abs_cost;
1415 gcs = get_cpu_ptr(iocg->pcpu_stat);
1416 local64_add(abs_cost, &gcs->abs_vusage);
1420 static void iocg_pay_debt(struct ioc_gq *iocg, u64 abs_vpay,
1421 struct ioc_now *now)
1423 lockdep_assert_held(&iocg->ioc->lock);
1424 lockdep_assert_held(&iocg->waitq.lock);
1426 /* make sure that nobody messed with @iocg */
1427 WARN_ON_ONCE(list_empty(&iocg->active_list));
1428 WARN_ON_ONCE(iocg->inuse > 1);
1430 iocg->abs_vdebt -= min(abs_vpay, iocg->abs_vdebt);
1432 /* if debt is paid in full, restore inuse */
1433 if (!iocg->abs_vdebt) {
1434 iocg->stat.indebt_us += now->now - iocg->indebt_since;
1435 iocg->indebt_since = 0;
1437 propagate_weights(iocg, iocg->active, iocg->last_inuse,
1442 static int iocg_wake_fn(struct wait_queue_entry *wq_entry, unsigned mode,
1443 int flags, void *key)
1445 struct iocg_wait *wait = container_of(wq_entry, struct iocg_wait, wait);
1446 struct iocg_wake_ctx *ctx = key;
1447 u64 cost = abs_cost_to_cost(wait->abs_cost, ctx->hw_inuse);
1449 ctx->vbudget -= cost;
1451 if (ctx->vbudget < 0)
1454 iocg_commit_bio(ctx->iocg, wait->bio, wait->abs_cost, cost);
1455 wait->committed = true;
1458 * autoremove_wake_function() removes the wait entry only when it
1459 * actually changed the task state. We want the wait always removed.
1460 * Remove explicitly and use default_wake_function(). Note that the
1461 * order of operations is important as finish_wait() tests whether
1462 * @wq_entry is removed without grabbing the lock.
1464 default_wake_function(wq_entry, mode, flags, key);
1465 list_del_init_careful(&wq_entry->entry);
1470 * Calculate the accumulated budget, pay debt if @pay_debt and wake up waiters
1471 * accordingly. When @pay_debt is %true, the caller must be holding ioc->lock in
1472 * addition to iocg->waitq.lock.
1474 static void iocg_kick_waitq(struct ioc_gq *iocg, bool pay_debt,
1475 struct ioc_now *now)
1477 struct ioc *ioc = iocg->ioc;
1478 struct iocg_wake_ctx ctx = { .iocg = iocg };
1479 u64 vshortage, expires, oexpires;
1483 lockdep_assert_held(&iocg->waitq.lock);
1485 current_hweight(iocg, &hwa, NULL);
1486 vbudget = now->vnow - atomic64_read(&iocg->vtime);
1489 if (pay_debt && iocg->abs_vdebt && vbudget > 0) {
1490 u64 abs_vbudget = cost_to_abs_cost(vbudget, hwa);
1491 u64 abs_vpay = min_t(u64, abs_vbudget, iocg->abs_vdebt);
1492 u64 vpay = abs_cost_to_cost(abs_vpay, hwa);
1494 lockdep_assert_held(&ioc->lock);
1496 atomic64_add(vpay, &iocg->vtime);
1497 atomic64_add(vpay, &iocg->done_vtime);
1498 iocg_pay_debt(iocg, abs_vpay, now);
1502 if (iocg->abs_vdebt || iocg->delay)
1503 iocg_kick_delay(iocg, now);
1506 * Debt can still be outstanding if we haven't paid all yet or the
1507 * caller raced and called without @pay_debt. Shouldn't wake up waiters
1508 * under debt. Make sure @vbudget reflects the outstanding amount and is
1511 if (iocg->abs_vdebt) {
1512 s64 vdebt = abs_cost_to_cost(iocg->abs_vdebt, hwa);
1513 vbudget = min_t(s64, 0, vbudget - vdebt);
1517 * Wake up the ones which are due and see how much vtime we'll need for
1518 * the next one. As paying off debt restores hw_inuse, it must be read
1519 * after the above debt payment.
1521 ctx.vbudget = vbudget;
1522 current_hweight(iocg, NULL, &ctx.hw_inuse);
1524 __wake_up_locked_key(&iocg->waitq, TASK_NORMAL, &ctx);
1526 if (!waitqueue_active(&iocg->waitq)) {
1527 if (iocg->wait_since) {
1528 iocg->stat.wait_us += now->now - iocg->wait_since;
1529 iocg->wait_since = 0;
1534 if (!iocg->wait_since)
1535 iocg->wait_since = now->now;
1537 if (WARN_ON_ONCE(ctx.vbudget >= 0))
1540 /* determine next wakeup, add a timer margin to guarantee chunking */
1541 vshortage = -ctx.vbudget;
1542 expires = now->now_ns +
1543 DIV64_U64_ROUND_UP(vshortage, ioc->vtime_base_rate) *
1545 expires += ioc->timer_slack_ns;
1547 /* if already active and close enough, don't bother */
1548 oexpires = ktime_to_ns(hrtimer_get_softexpires(&iocg->waitq_timer));
1549 if (hrtimer_is_queued(&iocg->waitq_timer) &&
1550 abs(oexpires - expires) <= ioc->timer_slack_ns)
1553 hrtimer_start_range_ns(&iocg->waitq_timer, ns_to_ktime(expires),
1554 ioc->timer_slack_ns, HRTIMER_MODE_ABS);
1557 static enum hrtimer_restart iocg_waitq_timer_fn(struct hrtimer *timer)
1559 struct ioc_gq *iocg = container_of(timer, struct ioc_gq, waitq_timer);
1560 bool pay_debt = READ_ONCE(iocg->abs_vdebt);
1562 unsigned long flags;
1564 ioc_now(iocg->ioc, &now);
1566 iocg_lock(iocg, pay_debt, &flags);
1567 iocg_kick_waitq(iocg, pay_debt, &now);
1568 iocg_unlock(iocg, pay_debt, &flags);
1570 return HRTIMER_NORESTART;
1573 static void ioc_lat_stat(struct ioc *ioc, u32 *missed_ppm_ar, u32 *rq_wait_pct_p)
1575 u32 nr_met[2] = { };
1576 u32 nr_missed[2] = { };
1580 for_each_online_cpu(cpu) {
1581 struct ioc_pcpu_stat *stat = per_cpu_ptr(ioc->pcpu_stat, cpu);
1582 u64 this_rq_wait_ns;
1584 for (rw = READ; rw <= WRITE; rw++) {
1585 u32 this_met = local_read(&stat->missed[rw].nr_met);
1586 u32 this_missed = local_read(&stat->missed[rw].nr_missed);
1588 nr_met[rw] += this_met - stat->missed[rw].last_met;
1589 nr_missed[rw] += this_missed - stat->missed[rw].last_missed;
1590 stat->missed[rw].last_met = this_met;
1591 stat->missed[rw].last_missed = this_missed;
1594 this_rq_wait_ns = local64_read(&stat->rq_wait_ns);
1595 rq_wait_ns += this_rq_wait_ns - stat->last_rq_wait_ns;
1596 stat->last_rq_wait_ns = this_rq_wait_ns;
1599 for (rw = READ; rw <= WRITE; rw++) {
1600 if (nr_met[rw] + nr_missed[rw])
1602 DIV64_U64_ROUND_UP((u64)nr_missed[rw] * MILLION,
1603 nr_met[rw] + nr_missed[rw]);
1605 missed_ppm_ar[rw] = 0;
1608 *rq_wait_pct_p = div64_u64(rq_wait_ns * 100,
1609 ioc->period_us * NSEC_PER_USEC);
1612 /* was iocg idle this period? */
1613 static bool iocg_is_idle(struct ioc_gq *iocg)
1615 struct ioc *ioc = iocg->ioc;
1617 /* did something get issued this period? */
1618 if (atomic64_read(&iocg->active_period) ==
1619 atomic64_read(&ioc->cur_period))
1622 /* is something in flight? */
1623 if (atomic64_read(&iocg->done_vtime) != atomic64_read(&iocg->vtime))
1630 * Call this function on the target leaf @iocg's to build pre-order traversal
1631 * list of all the ancestors in @inner_walk. The inner nodes are linked through
1632 * ->walk_list and the caller is responsible for dissolving the list after use.
1634 static void iocg_build_inner_walk(struct ioc_gq *iocg,
1635 struct list_head *inner_walk)
1639 WARN_ON_ONCE(!list_empty(&iocg->walk_list));
1641 /* find the first ancestor which hasn't been visited yet */
1642 for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
1643 if (!list_empty(&iocg->ancestors[lvl]->walk_list))
1647 /* walk down and visit the inner nodes to get pre-order traversal */
1648 while (++lvl <= iocg->level - 1) {
1649 struct ioc_gq *inner = iocg->ancestors[lvl];
1651 /* record traversal order */
1652 list_add_tail(&inner->walk_list, inner_walk);
1656 /* propagate the deltas to the parent */
1657 static void iocg_flush_stat_upward(struct ioc_gq *iocg)
1659 if (iocg->level > 0) {
1660 struct iocg_stat *parent_stat =
1661 &iocg->ancestors[iocg->level - 1]->stat;
1663 parent_stat->usage_us +=
1664 iocg->stat.usage_us - iocg->last_stat.usage_us;
1665 parent_stat->wait_us +=
1666 iocg->stat.wait_us - iocg->last_stat.wait_us;
1667 parent_stat->indebt_us +=
1668 iocg->stat.indebt_us - iocg->last_stat.indebt_us;
1669 parent_stat->indelay_us +=
1670 iocg->stat.indelay_us - iocg->last_stat.indelay_us;
1673 iocg->last_stat = iocg->stat;
1676 /* collect per-cpu counters and propagate the deltas to the parent */
1677 static void iocg_flush_stat_leaf(struct ioc_gq *iocg, struct ioc_now *now)
1679 struct ioc *ioc = iocg->ioc;
1684 lockdep_assert_held(&iocg->ioc->lock);
1686 /* collect per-cpu counters */
1687 for_each_possible_cpu(cpu) {
1688 abs_vusage += local64_read(
1689 per_cpu_ptr(&iocg->pcpu_stat->abs_vusage, cpu));
1691 vusage_delta = abs_vusage - iocg->last_stat_abs_vusage;
1692 iocg->last_stat_abs_vusage = abs_vusage;
1694 iocg->usage_delta_us = div64_u64(vusage_delta, ioc->vtime_base_rate);
1695 iocg->stat.usage_us += iocg->usage_delta_us;
1697 iocg_flush_stat_upward(iocg);
1700 /* get stat counters ready for reading on all active iocgs */
1701 static void iocg_flush_stat(struct list_head *target_iocgs, struct ioc_now *now)
1703 LIST_HEAD(inner_walk);
1704 struct ioc_gq *iocg, *tiocg;
1706 /* flush leaves and build inner node walk list */
1707 list_for_each_entry(iocg, target_iocgs, active_list) {
1708 iocg_flush_stat_leaf(iocg, now);
1709 iocg_build_inner_walk(iocg, &inner_walk);
1712 /* keep flushing upwards by walking the inner list backwards */
1713 list_for_each_entry_safe_reverse(iocg, tiocg, &inner_walk, walk_list) {
1714 iocg_flush_stat_upward(iocg);
1715 list_del_init(&iocg->walk_list);
1720 * Determine what @iocg's hweight_inuse should be after donating unused
1721 * capacity. @hwm is the upper bound and used to signal no donation. This
1722 * function also throws away @iocg's excess budget.
1724 static u32 hweight_after_donation(struct ioc_gq *iocg, u32 old_hwi, u32 hwm,
1725 u32 usage, struct ioc_now *now)
1727 struct ioc *ioc = iocg->ioc;
1728 u64 vtime = atomic64_read(&iocg->vtime);
1729 s64 excess, delta, target, new_hwi;
1731 /* debt handling owns inuse for debtors */
1732 if (iocg->abs_vdebt)
1735 /* see whether minimum margin requirement is met */
1736 if (waitqueue_active(&iocg->waitq) ||
1737 time_after64(vtime, now->vnow - ioc->margins.min))
1740 /* throw away excess above target */
1741 excess = now->vnow - vtime - ioc->margins.target;
1743 atomic64_add(excess, &iocg->vtime);
1744 atomic64_add(excess, &iocg->done_vtime);
1746 ioc->vtime_err -= div64_u64(excess * old_hwi, WEIGHT_ONE);
1750 * Let's say the distance between iocg's and device's vtimes as a
1751 * fraction of period duration is delta. Assuming that the iocg will
1752 * consume the usage determined above, we want to determine new_hwi so
1753 * that delta equals MARGIN_TARGET at the end of the next period.
1755 * We need to execute usage worth of IOs while spending the sum of the
1756 * new budget (1 - MARGIN_TARGET) and the leftover from the last period
1759 * usage = (1 - MARGIN_TARGET + delta) * new_hwi
1761 * Therefore, the new_hwi is:
1763 * new_hwi = usage / (1 - MARGIN_TARGET + delta)
1765 delta = div64_s64(WEIGHT_ONE * (now->vnow - vtime),
1766 now->vnow - ioc->period_at_vtime);
1767 target = WEIGHT_ONE * MARGIN_TARGET_PCT / 100;
1768 new_hwi = div64_s64(WEIGHT_ONE * usage, WEIGHT_ONE - target + delta);
1770 return clamp_t(s64, new_hwi, 1, hwm);
1774 * For work-conservation, an iocg which isn't using all of its share should
1775 * donate the leftover to other iocgs. There are two ways to achieve this - 1.
1776 * bumping up vrate accordingly 2. lowering the donating iocg's inuse weight.
1778 * #1 is mathematically simpler but has the drawback of requiring synchronous
1779 * global hweight_inuse updates when idle iocg's get activated or inuse weights
1780 * change due to donation snapbacks as it has the possibility of grossly
1781 * overshooting what's allowed by the model and vrate.
1783 * #2 is inherently safe with local operations. The donating iocg can easily
1784 * snap back to higher weights when needed without worrying about impacts on
1785 * other nodes as the impacts will be inherently correct. This also makes idle
1786 * iocg activations safe. The only effect activations have is decreasing
1787 * hweight_inuse of others, the right solution to which is for those iocgs to
1788 * snap back to higher weights.
1790 * So, we go with #2. The challenge is calculating how each donating iocg's
1791 * inuse should be adjusted to achieve the target donation amounts. This is done
1792 * using Andy's method described in the following pdf.
1794 * https://drive.google.com/file/d/1PsJwxPFtjUnwOY1QJ5AeICCcsL7BM3bo
1796 * Given the weights and target after-donation hweight_inuse values, Andy's
1797 * method determines how the proportional distribution should look like at each
1798 * sibling level to maintain the relative relationship between all non-donating
1799 * pairs. To roughly summarize, it divides the tree into donating and
1800 * non-donating parts, calculates global donation rate which is used to
1801 * determine the target hweight_inuse for each node, and then derives per-level
1804 * The following pdf shows that global distribution calculated this way can be
1805 * achieved by scaling inuse weights of donating leaves and propagating the
1806 * adjustments upwards proportionally.
1808 * https://drive.google.com/file/d/1vONz1-fzVO7oY5DXXsLjSxEtYYQbOvsE
1810 * Combining the above two, we can determine how each leaf iocg's inuse should
1811 * be adjusted to achieve the target donation.
1813 * https://drive.google.com/file/d/1WcrltBOSPN0qXVdBgnKm4mdp9FhuEFQN
1815 * The inline comments use symbols from the last pdf.
1817 * b is the sum of the absolute budgets in the subtree. 1 for the root node.
1818 * f is the sum of the absolute budgets of non-donating nodes in the subtree.
1819 * t is the sum of the absolute budgets of donating nodes in the subtree.
1820 * w is the weight of the node. w = w_f + w_t
1821 * w_f is the non-donating portion of w. w_f = w * f / b
1822 * w_b is the donating portion of w. w_t = w * t / b
1823 * s is the sum of all sibling weights. s = Sum(w) for siblings
1824 * s_f and s_t are the non-donating and donating portions of s.
1826 * Subscript p denotes the parent's counterpart and ' the adjusted value - e.g.
1827 * w_pt is the donating portion of the parent's weight and w'_pt the same value
1828 * after adjustments. Subscript r denotes the root node's values.
1830 static void transfer_surpluses(struct list_head *surpluses, struct ioc_now *now)
1832 LIST_HEAD(over_hwa);
1833 LIST_HEAD(inner_walk);
1834 struct ioc_gq *iocg, *tiocg, *root_iocg;
1835 u32 after_sum, over_sum, over_target, gamma;
1838 * It's pretty unlikely but possible for the total sum of
1839 * hweight_after_donation's to be higher than WEIGHT_ONE, which will
1840 * confuse the following calculations. If such condition is detected,
1841 * scale down everyone over its full share equally to keep the sum below
1846 list_for_each_entry(iocg, surpluses, surplus_list) {
1849 current_hweight(iocg, &hwa, NULL);
1850 after_sum += iocg->hweight_after_donation;
1852 if (iocg->hweight_after_donation > hwa) {
1853 over_sum += iocg->hweight_after_donation;
1854 list_add(&iocg->walk_list, &over_hwa);
1858 if (after_sum >= WEIGHT_ONE) {
1860 * The delta should be deducted from the over_sum, calculate
1861 * target over_sum value.
1863 u32 over_delta = after_sum - (WEIGHT_ONE - 1);
1864 WARN_ON_ONCE(over_sum <= over_delta);
1865 over_target = over_sum - over_delta;
1870 list_for_each_entry_safe(iocg, tiocg, &over_hwa, walk_list) {
1872 iocg->hweight_after_donation =
1873 div_u64((u64)iocg->hweight_after_donation *
1874 over_target, over_sum);
1875 list_del_init(&iocg->walk_list);
1879 * Build pre-order inner node walk list and prepare for donation
1880 * adjustment calculations.
1882 list_for_each_entry(iocg, surpluses, surplus_list) {
1883 iocg_build_inner_walk(iocg, &inner_walk);
1886 root_iocg = list_first_entry(&inner_walk, struct ioc_gq, walk_list);
1887 WARN_ON_ONCE(root_iocg->level > 0);
1889 list_for_each_entry(iocg, &inner_walk, walk_list) {
1890 iocg->child_adjusted_sum = 0;
1891 iocg->hweight_donating = 0;
1892 iocg->hweight_after_donation = 0;
1896 * Propagate the donating budget (b_t) and after donation budget (b'_t)
1899 list_for_each_entry(iocg, surpluses, surplus_list) {
1900 struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1902 parent->hweight_donating += iocg->hweight_donating;
1903 parent->hweight_after_donation += iocg->hweight_after_donation;
1906 list_for_each_entry_reverse(iocg, &inner_walk, walk_list) {
1907 if (iocg->level > 0) {
1908 struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1910 parent->hweight_donating += iocg->hweight_donating;
1911 parent->hweight_after_donation += iocg->hweight_after_donation;
1916 * Calculate inner hwa's (b) and make sure the donation values are
1917 * within the accepted ranges as we're doing low res calculations with
1920 list_for_each_entry(iocg, &inner_walk, walk_list) {
1922 struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1924 iocg->hweight_active = DIV64_U64_ROUND_UP(
1925 (u64)parent->hweight_active * iocg->active,
1926 parent->child_active_sum);
1930 iocg->hweight_donating = min(iocg->hweight_donating,
1931 iocg->hweight_active);
1932 iocg->hweight_after_donation = min(iocg->hweight_after_donation,
1933 iocg->hweight_donating - 1);
1934 if (WARN_ON_ONCE(iocg->hweight_active <= 1 ||
1935 iocg->hweight_donating <= 1 ||
1936 iocg->hweight_after_donation == 0)) {
1937 pr_warn("iocg: invalid donation weights in ");
1938 pr_cont_cgroup_path(iocg_to_blkg(iocg)->blkcg->css.cgroup);
1939 pr_cont(": active=%u donating=%u after=%u\n",
1940 iocg->hweight_active, iocg->hweight_donating,
1941 iocg->hweight_after_donation);
1946 * Calculate the global donation rate (gamma) - the rate to adjust
1947 * non-donating budgets by.
1949 * No need to use 64bit multiplication here as the first operand is
1950 * guaranteed to be smaller than WEIGHT_ONE (1<<16).
1952 * We know that there are beneficiary nodes and the sum of the donating
1953 * hweights can't be whole; however, due to the round-ups during hweight
1954 * calculations, root_iocg->hweight_donating might still end up equal to
1955 * or greater than whole. Limit the range when calculating the divider.
1957 * gamma = (1 - t_r') / (1 - t_r)
1959 gamma = DIV_ROUND_UP(
1960 (WEIGHT_ONE - root_iocg->hweight_after_donation) * WEIGHT_ONE,
1961 WEIGHT_ONE - min_t(u32, root_iocg->hweight_donating, WEIGHT_ONE - 1));
1964 * Calculate adjusted hwi, child_adjusted_sum and inuse for the inner
1967 list_for_each_entry(iocg, &inner_walk, walk_list) {
1968 struct ioc_gq *parent;
1969 u32 inuse, wpt, wptp;
1972 if (iocg->level == 0) {
1973 /* adjusted weight sum for 1st level: s' = s * b_pf / b'_pf */
1974 iocg->child_adjusted_sum = DIV64_U64_ROUND_UP(
1975 iocg->child_active_sum * (WEIGHT_ONE - iocg->hweight_donating),
1976 WEIGHT_ONE - iocg->hweight_after_donation);
1980 parent = iocg->ancestors[iocg->level - 1];
1982 /* b' = gamma * b_f + b_t' */
1983 iocg->hweight_inuse = DIV64_U64_ROUND_UP(
1984 (u64)gamma * (iocg->hweight_active - iocg->hweight_donating),
1985 WEIGHT_ONE) + iocg->hweight_after_donation;
1987 /* w' = s' * b' / b'_p */
1988 inuse = DIV64_U64_ROUND_UP(
1989 (u64)parent->child_adjusted_sum * iocg->hweight_inuse,
1990 parent->hweight_inuse);
1992 /* adjusted weight sum for children: s' = s_f + s_t * w'_pt / w_pt */
1993 st = DIV64_U64_ROUND_UP(
1994 iocg->child_active_sum * iocg->hweight_donating,
1995 iocg->hweight_active);
1996 sf = iocg->child_active_sum - st;
1997 wpt = DIV64_U64_ROUND_UP(
1998 (u64)iocg->active * iocg->hweight_donating,
1999 iocg->hweight_active);
2000 wptp = DIV64_U64_ROUND_UP(
2001 (u64)inuse * iocg->hweight_after_donation,
2002 iocg->hweight_inuse);
2004 iocg->child_adjusted_sum = sf + DIV64_U64_ROUND_UP(st * wptp, wpt);
2008 * All inner nodes now have ->hweight_inuse and ->child_adjusted_sum and
2009 * we can finally determine leaf adjustments.
2011 list_for_each_entry(iocg, surpluses, surplus_list) {
2012 struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
2016 * In-debt iocgs participated in the donation calculation with
2017 * the minimum target hweight_inuse. Configuring inuse
2018 * accordingly would work fine but debt handling expects
2019 * @iocg->inuse stay at the minimum and we don't wanna
2022 if (iocg->abs_vdebt) {
2023 WARN_ON_ONCE(iocg->inuse > 1);
2027 /* w' = s' * b' / b'_p, note that b' == b'_t for donating leaves */
2028 inuse = DIV64_U64_ROUND_UP(
2029 parent->child_adjusted_sum * iocg->hweight_after_donation,
2030 parent->hweight_inuse);
2032 TRACE_IOCG_PATH(inuse_transfer, iocg, now,
2034 iocg->hweight_inuse,
2035 iocg->hweight_after_donation);
2037 __propagate_weights(iocg, iocg->active, inuse, true, now);
2040 /* walk list should be dissolved after use */
2041 list_for_each_entry_safe(iocg, tiocg, &inner_walk, walk_list)
2042 list_del_init(&iocg->walk_list);
2046 * A low weight iocg can amass a large amount of debt, for example, when
2047 * anonymous memory gets reclaimed aggressively. If the system has a lot of
2048 * memory paired with a slow IO device, the debt can span multiple seconds or
2049 * more. If there are no other subsequent IO issuers, the in-debt iocg may end
2050 * up blocked paying its debt while the IO device is idle.
2052 * The following protects against such cases. If the device has been
2053 * sufficiently idle for a while, the debts are halved and delays are
2056 static void ioc_forgive_debts(struct ioc *ioc, u64 usage_us_sum, int nr_debtors,
2057 struct ioc_now *now)
2059 struct ioc_gq *iocg;
2060 u64 dur, usage_pct, nr_cycles;
2062 /* if no debtor, reset the cycle */
2064 ioc->dfgv_period_at = now->now;
2065 ioc->dfgv_period_rem = 0;
2066 ioc->dfgv_usage_us_sum = 0;
2071 * Debtors can pass through a lot of writes choking the device and we
2072 * don't want to be forgiving debts while the device is struggling from
2073 * write bursts. If we're missing latency targets, consider the device
2076 if (ioc->busy_level > 0)
2077 usage_us_sum = max_t(u64, usage_us_sum, ioc->period_us);
2079 ioc->dfgv_usage_us_sum += usage_us_sum;
2080 if (time_before64(now->now, ioc->dfgv_period_at + DFGV_PERIOD))
2084 * At least DFGV_PERIOD has passed since the last period. Calculate the
2085 * average usage and reset the period counters.
2087 dur = now->now - ioc->dfgv_period_at;
2088 usage_pct = div64_u64(100 * ioc->dfgv_usage_us_sum, dur);
2090 ioc->dfgv_period_at = now->now;
2091 ioc->dfgv_usage_us_sum = 0;
2093 /* if was too busy, reset everything */
2094 if (usage_pct > DFGV_USAGE_PCT) {
2095 ioc->dfgv_period_rem = 0;
2100 * Usage is lower than threshold. Let's forgive some debts. Debt
2101 * forgiveness runs off of the usual ioc timer but its period usually
2102 * doesn't match ioc's. Compensate the difference by performing the
2103 * reduction as many times as would fit in the duration since the last
2104 * run and carrying over the left-over duration in @ioc->dfgv_period_rem
2105 * - if ioc period is 75% of DFGV_PERIOD, one out of three consecutive
2106 * reductions is doubled.
2108 nr_cycles = dur + ioc->dfgv_period_rem;
2109 ioc->dfgv_period_rem = do_div(nr_cycles, DFGV_PERIOD);
2111 list_for_each_entry(iocg, &ioc->active_iocgs, active_list) {
2112 u64 __maybe_unused old_debt, __maybe_unused old_delay;
2114 if (!iocg->abs_vdebt && !iocg->delay)
2117 spin_lock(&iocg->waitq.lock);
2119 old_debt = iocg->abs_vdebt;
2120 old_delay = iocg->delay;
2122 if (iocg->abs_vdebt)
2123 iocg->abs_vdebt = iocg->abs_vdebt >> nr_cycles ?: 1;
2125 iocg->delay = iocg->delay >> nr_cycles ?: 1;
2127 iocg_kick_waitq(iocg, true, now);
2129 TRACE_IOCG_PATH(iocg_forgive_debt, iocg, now, usage_pct,
2130 old_debt, iocg->abs_vdebt,
2131 old_delay, iocg->delay);
2133 spin_unlock(&iocg->waitq.lock);
2138 * Check the active iocgs' state to avoid oversleeping and deactive
2141 * Since waiters determine the sleep durations based on the vrate
2142 * they saw at the time of sleep, if vrate has increased, some
2143 * waiters could be sleeping for too long. Wake up tardy waiters
2144 * which should have woken up in the last period and expire idle
2147 static int ioc_check_iocgs(struct ioc *ioc, struct ioc_now *now)
2150 struct ioc_gq *iocg, *tiocg;
2152 list_for_each_entry_safe(iocg, tiocg, &ioc->active_iocgs, active_list) {
2153 if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt &&
2154 !iocg->delay && !iocg_is_idle(iocg))
2157 spin_lock(&iocg->waitq.lock);
2159 /* flush wait and indebt stat deltas */
2160 if (iocg->wait_since) {
2161 iocg->stat.wait_us += now->now - iocg->wait_since;
2162 iocg->wait_since = now->now;
2164 if (iocg->indebt_since) {
2165 iocg->stat.indebt_us +=
2166 now->now - iocg->indebt_since;
2167 iocg->indebt_since = now->now;
2169 if (iocg->indelay_since) {
2170 iocg->stat.indelay_us +=
2171 now->now - iocg->indelay_since;
2172 iocg->indelay_since = now->now;
2175 if (waitqueue_active(&iocg->waitq) || iocg->abs_vdebt ||
2177 /* might be oversleeping vtime / hweight changes, kick */
2178 iocg_kick_waitq(iocg, true, now);
2179 if (iocg->abs_vdebt || iocg->delay)
2181 } else if (iocg_is_idle(iocg)) {
2182 /* no waiter and idle, deactivate */
2183 u64 vtime = atomic64_read(&iocg->vtime);
2187 * @iocg has been inactive for a full duration and will
2188 * have a high budget. Account anything above target as
2189 * error and throw away. On reactivation, it'll start
2190 * with the target budget.
2192 excess = now->vnow - vtime - ioc->margins.target;
2196 current_hweight(iocg, NULL, &old_hwi);
2197 ioc->vtime_err -= div64_u64(excess * old_hwi,
2201 TRACE_IOCG_PATH(iocg_idle, iocg, now,
2202 atomic64_read(&iocg->active_period),
2203 atomic64_read(&ioc->cur_period), vtime);
2204 __propagate_weights(iocg, 0, 0, false, now);
2205 list_del_init(&iocg->active_list);
2208 spin_unlock(&iocg->waitq.lock);
2211 commit_weights(ioc);
2215 static void ioc_timer_fn(struct timer_list *timer)
2217 struct ioc *ioc = container_of(timer, struct ioc, timer);
2218 struct ioc_gq *iocg, *tiocg;
2220 LIST_HEAD(surpluses);
2221 int nr_debtors, nr_shortages = 0, nr_lagging = 0;
2222 u64 usage_us_sum = 0;
2223 u32 ppm_rthr = MILLION - ioc->params.qos[QOS_RPPM];
2224 u32 ppm_wthr = MILLION - ioc->params.qos[QOS_WPPM];
2225 u32 missed_ppm[2], rq_wait_pct;
2227 int prev_busy_level;
2229 /* how were the latencies during the period? */
2230 ioc_lat_stat(ioc, missed_ppm, &rq_wait_pct);
2232 /* take care of active iocgs */
2233 spin_lock_irq(&ioc->lock);
2237 period_vtime = now.vnow - ioc->period_at_vtime;
2238 if (WARN_ON_ONCE(!period_vtime)) {
2239 spin_unlock_irq(&ioc->lock);
2243 nr_debtors = ioc_check_iocgs(ioc, &now);
2246 * Wait and indebt stat are flushed above and the donation calculation
2247 * below needs updated usage stat. Let's bring stat up-to-date.
2249 iocg_flush_stat(&ioc->active_iocgs, &now);
2251 /* calc usage and see whether some weights need to be moved around */
2252 list_for_each_entry(iocg, &ioc->active_iocgs, active_list) {
2253 u64 vdone, vtime, usage_us;
2254 u32 hw_active, hw_inuse;
2257 * Collect unused and wind vtime closer to vnow to prevent
2258 * iocgs from accumulating a large amount of budget.
2260 vdone = atomic64_read(&iocg->done_vtime);
2261 vtime = atomic64_read(&iocg->vtime);
2262 current_hweight(iocg, &hw_active, &hw_inuse);
2265 * Latency QoS detection doesn't account for IOs which are
2266 * in-flight for longer than a period. Detect them by
2267 * comparing vdone against period start. If lagging behind
2268 * IOs from past periods, don't increase vrate.
2270 if ((ppm_rthr != MILLION || ppm_wthr != MILLION) &&
2271 !atomic_read(&iocg_to_blkg(iocg)->use_delay) &&
2272 time_after64(vtime, vdone) &&
2273 time_after64(vtime, now.vnow -
2274 MAX_LAGGING_PERIODS * period_vtime) &&
2275 time_before64(vdone, now.vnow - period_vtime))
2279 * Determine absolute usage factoring in in-flight IOs to avoid
2280 * high-latency completions appearing as idle.
2282 usage_us = iocg->usage_delta_us;
2283 usage_us_sum += usage_us;
2285 /* see whether there's surplus vtime */
2286 WARN_ON_ONCE(!list_empty(&iocg->surplus_list));
2287 if (hw_inuse < hw_active ||
2288 (!waitqueue_active(&iocg->waitq) &&
2289 time_before64(vtime, now.vnow - ioc->margins.low))) {
2290 u32 hwa, old_hwi, hwm, new_hwi, usage;
2293 if (vdone != vtime) {
2294 u64 inflight_us = DIV64_U64_ROUND_UP(
2295 cost_to_abs_cost(vtime - vdone, hw_inuse),
2296 ioc->vtime_base_rate);
2298 usage_us = max(usage_us, inflight_us);
2301 /* convert to hweight based usage ratio */
2302 if (time_after64(iocg->activated_at, ioc->period_at))
2303 usage_dur = max_t(u64, now.now - iocg->activated_at, 1);
2305 usage_dur = max_t(u64, now.now - ioc->period_at, 1);
2307 usage = clamp_t(u32,
2308 DIV64_U64_ROUND_UP(usage_us * WEIGHT_ONE,
2313 * Already donating or accumulated enough to start.
2314 * Determine the donation amount.
2316 current_hweight(iocg, &hwa, &old_hwi);
2317 hwm = current_hweight_max(iocg);
2318 new_hwi = hweight_after_donation(iocg, old_hwi, hwm,
2321 * Donation calculation assumes hweight_after_donation
2322 * to be positive, a condition that a donor w/ hwa < 2
2323 * can't meet. Don't bother with donation if hwa is
2324 * below 2. It's not gonna make a meaningful difference
2327 if (new_hwi < hwm && hwa >= 2) {
2328 iocg->hweight_donating = hwa;
2329 iocg->hweight_after_donation = new_hwi;
2330 list_add(&iocg->surplus_list, &surpluses);
2331 } else if (!iocg->abs_vdebt) {
2333 * @iocg doesn't have enough to donate. Reset
2334 * its inuse to active.
2336 * Don't reset debtors as their inuse's are
2337 * owned by debt handling. This shouldn't affect
2338 * donation calculuation in any meaningful way
2339 * as @iocg doesn't have a meaningful amount of
2342 TRACE_IOCG_PATH(inuse_shortage, iocg, &now,
2343 iocg->inuse, iocg->active,
2344 iocg->hweight_inuse, new_hwi);
2346 __propagate_weights(iocg, iocg->active,
2347 iocg->active, true, &now);
2351 /* genuinely short on vtime */
2356 if (!list_empty(&surpluses) && nr_shortages)
2357 transfer_surpluses(&surpluses, &now);
2359 commit_weights(ioc);
2361 /* surplus list should be dissolved after use */
2362 list_for_each_entry_safe(iocg, tiocg, &surpluses, surplus_list)
2363 list_del_init(&iocg->surplus_list);
2366 * If q is getting clogged or we're missing too much, we're issuing
2367 * too much IO and should lower vtime rate. If we're not missing
2368 * and experiencing shortages but not surpluses, we're too stingy
2369 * and should increase vtime rate.
2371 prev_busy_level = ioc->busy_level;
2372 if (rq_wait_pct > RQ_WAIT_BUSY_PCT ||
2373 missed_ppm[READ] > ppm_rthr ||
2374 missed_ppm[WRITE] > ppm_wthr) {
2375 /* clearly missing QoS targets, slow down vrate */
2376 ioc->busy_level = max(ioc->busy_level, 0);
2378 } else if (rq_wait_pct <= RQ_WAIT_BUSY_PCT * UNBUSY_THR_PCT / 100 &&
2379 missed_ppm[READ] <= ppm_rthr * UNBUSY_THR_PCT / 100 &&
2380 missed_ppm[WRITE] <= ppm_wthr * UNBUSY_THR_PCT / 100) {
2381 /* QoS targets are being met with >25% margin */
2384 * We're throttling while the device has spare
2385 * capacity. If vrate was being slowed down, stop.
2387 ioc->busy_level = min(ioc->busy_level, 0);
2390 * If there are IOs spanning multiple periods, wait
2391 * them out before pushing the device harder.
2397 * Nobody is being throttled and the users aren't
2398 * issuing enough IOs to saturate the device. We
2399 * simply don't know how close the device is to
2400 * saturation. Coast.
2402 ioc->busy_level = 0;
2405 /* inside the hysterisis margin, we're good */
2406 ioc->busy_level = 0;
2409 ioc->busy_level = clamp(ioc->busy_level, -1000, 1000);
2411 ioc_adjust_base_vrate(ioc, rq_wait_pct, nr_lagging, nr_shortages,
2412 prev_busy_level, missed_ppm);
2414 ioc_refresh_params(ioc, false);
2416 ioc_forgive_debts(ioc, usage_us_sum, nr_debtors, &now);
2419 * This period is done. Move onto the next one. If nothing's
2420 * going on with the device, stop the timer.
2422 atomic64_inc(&ioc->cur_period);
2424 if (ioc->running != IOC_STOP) {
2425 if (!list_empty(&ioc->active_iocgs)) {
2426 ioc_start_period(ioc, &now);
2428 ioc->busy_level = 0;
2430 ioc->running = IOC_IDLE;
2433 ioc_refresh_vrate(ioc, &now);
2436 spin_unlock_irq(&ioc->lock);
2439 static u64 adjust_inuse_and_calc_cost(struct ioc_gq *iocg, u64 vtime,
2440 u64 abs_cost, struct ioc_now *now)
2442 struct ioc *ioc = iocg->ioc;
2443 struct ioc_margins *margins = &ioc->margins;
2444 u32 __maybe_unused old_inuse = iocg->inuse, __maybe_unused old_hwi;
2447 u64 cost, new_inuse;
2448 unsigned long flags;
2450 current_hweight(iocg, NULL, &hwi);
2452 cost = abs_cost_to_cost(abs_cost, hwi);
2453 margin = now->vnow - vtime - cost;
2455 /* debt handling owns inuse for debtors */
2456 if (iocg->abs_vdebt)
2460 * We only increase inuse during period and do so if the margin has
2461 * deteriorated since the previous adjustment.
2463 if (margin >= iocg->saved_margin || margin >= margins->low ||
2464 iocg->inuse == iocg->active)
2467 spin_lock_irqsave(&ioc->lock, flags);
2469 /* we own inuse only when @iocg is in the normal active state */
2470 if (iocg->abs_vdebt || list_empty(&iocg->active_list)) {
2471 spin_unlock_irqrestore(&ioc->lock, flags);
2476 * Bump up inuse till @abs_cost fits in the existing budget.
2477 * adj_step must be determined after acquiring ioc->lock - we might
2478 * have raced and lost to another thread for activation and could
2479 * be reading 0 iocg->active before ioc->lock which will lead to
2482 new_inuse = iocg->inuse;
2483 adj_step = DIV_ROUND_UP(iocg->active * INUSE_ADJ_STEP_PCT, 100);
2485 new_inuse = new_inuse + adj_step;
2486 propagate_weights(iocg, iocg->active, new_inuse, true, now);
2487 current_hweight(iocg, NULL, &hwi);
2488 cost = abs_cost_to_cost(abs_cost, hwi);
2489 } while (time_after64(vtime + cost, now->vnow) &&
2490 iocg->inuse != iocg->active);
2492 spin_unlock_irqrestore(&ioc->lock, flags);
2494 TRACE_IOCG_PATH(inuse_adjust, iocg, now,
2495 old_inuse, iocg->inuse, old_hwi, hwi);
2500 static void calc_vtime_cost_builtin(struct bio *bio, struct ioc_gq *iocg,
2501 bool is_merge, u64 *costp)
2503 struct ioc *ioc = iocg->ioc;
2504 u64 coef_seqio, coef_randio, coef_page;
2505 u64 pages = max_t(u64, bio_sectors(bio) >> IOC_SECT_TO_PAGE_SHIFT, 1);
2509 switch (bio_op(bio)) {
2511 coef_seqio = ioc->params.lcoefs[LCOEF_RSEQIO];
2512 coef_randio = ioc->params.lcoefs[LCOEF_RRANDIO];
2513 coef_page = ioc->params.lcoefs[LCOEF_RPAGE];
2516 coef_seqio = ioc->params.lcoefs[LCOEF_WSEQIO];
2517 coef_randio = ioc->params.lcoefs[LCOEF_WRANDIO];
2518 coef_page = ioc->params.lcoefs[LCOEF_WPAGE];
2525 seek_pages = abs(bio->bi_iter.bi_sector - iocg->cursor);
2526 seek_pages >>= IOC_SECT_TO_PAGE_SHIFT;
2530 if (seek_pages > LCOEF_RANDIO_PAGES) {
2531 cost += coef_randio;
2536 cost += pages * coef_page;
2541 static u64 calc_vtime_cost(struct bio *bio, struct ioc_gq *iocg, bool is_merge)
2545 calc_vtime_cost_builtin(bio, iocg, is_merge, &cost);
2549 static void calc_size_vtime_cost_builtin(struct request *rq, struct ioc *ioc,
2552 unsigned int pages = blk_rq_stats_sectors(rq) >> IOC_SECT_TO_PAGE_SHIFT;
2554 switch (req_op(rq)) {
2556 *costp = pages * ioc->params.lcoefs[LCOEF_RPAGE];
2559 *costp = pages * ioc->params.lcoefs[LCOEF_WPAGE];
2566 static u64 calc_size_vtime_cost(struct request *rq, struct ioc *ioc)
2570 calc_size_vtime_cost_builtin(rq, ioc, &cost);
2574 static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio)
2576 struct blkcg_gq *blkg = bio->bi_blkg;
2577 struct ioc *ioc = rqos_to_ioc(rqos);
2578 struct ioc_gq *iocg = blkg_to_iocg(blkg);
2580 struct iocg_wait wait;
2581 u64 abs_cost, cost, vtime;
2582 bool use_debt, ioc_locked;
2583 unsigned long flags;
2585 /* bypass IOs if disabled, still initializing, or for root cgroup */
2586 if (!ioc->enabled || !iocg || !iocg->level)
2589 /* calculate the absolute vtime cost */
2590 abs_cost = calc_vtime_cost(bio, iocg, false);
2594 if (!iocg_activate(iocg, &now))
2597 iocg->cursor = bio_end_sector(bio);
2598 vtime = atomic64_read(&iocg->vtime);
2599 cost = adjust_inuse_and_calc_cost(iocg, vtime, abs_cost, &now);
2602 * If no one's waiting and within budget, issue right away. The
2603 * tests are racy but the races aren't systemic - we only miss once
2604 * in a while which is fine.
2606 if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt &&
2607 time_before_eq64(vtime + cost, now.vnow)) {
2608 iocg_commit_bio(iocg, bio, abs_cost, cost);
2613 * We're over budget. This can be handled in two ways. IOs which may
2614 * cause priority inversions are punted to @ioc->aux_iocg and charged as
2615 * debt. Otherwise, the issuer is blocked on @iocg->waitq. Debt handling
2616 * requires @ioc->lock, waitq handling @iocg->waitq.lock. Determine
2617 * whether debt handling is needed and acquire locks accordingly.
2619 use_debt = bio_issue_as_root_blkg(bio) || fatal_signal_pending(current);
2620 ioc_locked = use_debt || READ_ONCE(iocg->abs_vdebt);
2622 iocg_lock(iocg, ioc_locked, &flags);
2625 * @iocg must stay activated for debt and waitq handling. Deactivation
2626 * is synchronized against both ioc->lock and waitq.lock and we won't
2627 * get deactivated as long as we're waiting or has debt, so we're good
2628 * if we're activated here. In the unlikely cases that we aren't, just
2631 if (unlikely(list_empty(&iocg->active_list))) {
2632 iocg_unlock(iocg, ioc_locked, &flags);
2633 iocg_commit_bio(iocg, bio, abs_cost, cost);
2638 * We're over budget. If @bio has to be issued regardless, remember
2639 * the abs_cost instead of advancing vtime. iocg_kick_waitq() will pay
2640 * off the debt before waking more IOs.
2642 * This way, the debt is continuously paid off each period with the
2643 * actual budget available to the cgroup. If we just wound vtime, we
2644 * would incorrectly use the current hw_inuse for the entire amount
2645 * which, for example, can lead to the cgroup staying blocked for a
2646 * long time even with substantially raised hw_inuse.
2648 * An iocg with vdebt should stay online so that the timer can keep
2649 * deducting its vdebt and [de]activate use_delay mechanism
2650 * accordingly. We don't want to race against the timer trying to
2651 * clear them and leave @iocg inactive w/ dangling use_delay heavily
2652 * penalizing the cgroup and its descendants.
2655 iocg_incur_debt(iocg, abs_cost, &now);
2656 if (iocg_kick_delay(iocg, &now))
2657 blkcg_schedule_throttle(rqos->q->disk,
2658 (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
2659 iocg_unlock(iocg, ioc_locked, &flags);
2663 /* guarantee that iocgs w/ waiters have maximum inuse */
2664 if (!iocg->abs_vdebt && iocg->inuse != iocg->active) {
2666 iocg_unlock(iocg, false, &flags);
2670 propagate_weights(iocg, iocg->active, iocg->active, true,
2675 * Append self to the waitq and schedule the wakeup timer if we're
2676 * the first waiter. The timer duration is calculated based on the
2677 * current vrate. vtime and hweight changes can make it too short
2678 * or too long. Each wait entry records the absolute cost it's
2679 * waiting for to allow re-evaluation using a custom wait entry.
2681 * If too short, the timer simply reschedules itself. If too long,
2682 * the period timer will notice and trigger wakeups.
2684 * All waiters are on iocg->waitq and the wait states are
2685 * synchronized using waitq.lock.
2687 init_waitqueue_func_entry(&wait.wait, iocg_wake_fn);
2688 wait.wait.private = current;
2690 wait.abs_cost = abs_cost;
2691 wait.committed = false; /* will be set true by waker */
2693 __add_wait_queue_entry_tail(&iocg->waitq, &wait.wait);
2694 iocg_kick_waitq(iocg, ioc_locked, &now);
2696 iocg_unlock(iocg, ioc_locked, &flags);
2699 set_current_state(TASK_UNINTERRUPTIBLE);
2705 /* waker already committed us, proceed */
2706 finish_wait(&iocg->waitq, &wait.wait);
2709 static void ioc_rqos_merge(struct rq_qos *rqos, struct request *rq,
2712 struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg);
2713 struct ioc *ioc = rqos_to_ioc(rqos);
2714 sector_t bio_end = bio_end_sector(bio);
2716 u64 vtime, abs_cost, cost;
2717 unsigned long flags;
2719 /* bypass if disabled, still initializing, or for root cgroup */
2720 if (!ioc->enabled || !iocg || !iocg->level)
2723 abs_cost = calc_vtime_cost(bio, iocg, true);
2729 vtime = atomic64_read(&iocg->vtime);
2730 cost = adjust_inuse_and_calc_cost(iocg, vtime, abs_cost, &now);
2732 /* update cursor if backmerging into the request at the cursor */
2733 if (blk_rq_pos(rq) < bio_end &&
2734 blk_rq_pos(rq) + blk_rq_sectors(rq) == iocg->cursor)
2735 iocg->cursor = bio_end;
2738 * Charge if there's enough vtime budget and the existing request has
2741 if (rq->bio && rq->bio->bi_iocost_cost &&
2742 time_before_eq64(atomic64_read(&iocg->vtime) + cost, now.vnow)) {
2743 iocg_commit_bio(iocg, bio, abs_cost, cost);
2748 * Otherwise, account it as debt if @iocg is online, which it should
2749 * be for the vast majority of cases. See debt handling in
2750 * ioc_rqos_throttle() for details.
2752 spin_lock_irqsave(&ioc->lock, flags);
2753 spin_lock(&iocg->waitq.lock);
2755 if (likely(!list_empty(&iocg->active_list))) {
2756 iocg_incur_debt(iocg, abs_cost, &now);
2757 if (iocg_kick_delay(iocg, &now))
2758 blkcg_schedule_throttle(rqos->q->disk,
2759 (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
2761 iocg_commit_bio(iocg, bio, abs_cost, cost);
2764 spin_unlock(&iocg->waitq.lock);
2765 spin_unlock_irqrestore(&ioc->lock, flags);
2768 static void ioc_rqos_done_bio(struct rq_qos *rqos, struct bio *bio)
2770 struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg);
2772 if (iocg && bio->bi_iocost_cost)
2773 atomic64_add(bio->bi_iocost_cost, &iocg->done_vtime);
2776 static void ioc_rqos_done(struct rq_qos *rqos, struct request *rq)
2778 struct ioc *ioc = rqos_to_ioc(rqos);
2779 struct ioc_pcpu_stat *ccs;
2780 u64 on_q_ns, rq_wait_ns, size_nsec;
2783 if (!ioc->enabled || !rq->alloc_time_ns || !rq->start_time_ns)
2786 switch (req_op(rq)) {
2799 on_q_ns = ktime_get_ns() - rq->alloc_time_ns;
2800 rq_wait_ns = rq->start_time_ns - rq->alloc_time_ns;
2801 size_nsec = div64_u64(calc_size_vtime_cost(rq, ioc), VTIME_PER_NSEC);
2803 ccs = get_cpu_ptr(ioc->pcpu_stat);
2805 if (on_q_ns <= size_nsec ||
2806 on_q_ns - size_nsec <= ioc->params.qos[pidx] * NSEC_PER_USEC)
2807 local_inc(&ccs->missed[rw].nr_met);
2809 local_inc(&ccs->missed[rw].nr_missed);
2811 local64_add(rq_wait_ns, &ccs->rq_wait_ns);
2816 static void ioc_rqos_queue_depth_changed(struct rq_qos *rqos)
2818 struct ioc *ioc = rqos_to_ioc(rqos);
2820 spin_lock_irq(&ioc->lock);
2821 ioc_refresh_params(ioc, false);
2822 spin_unlock_irq(&ioc->lock);
2825 static void ioc_rqos_exit(struct rq_qos *rqos)
2827 struct ioc *ioc = rqos_to_ioc(rqos);
2829 blkcg_deactivate_policy(rqos->q, &blkcg_policy_iocost);
2831 spin_lock_irq(&ioc->lock);
2832 ioc->running = IOC_STOP;
2833 spin_unlock_irq(&ioc->lock);
2835 del_timer_sync(&ioc->timer);
2836 free_percpu(ioc->pcpu_stat);
2840 static struct rq_qos_ops ioc_rqos_ops = {
2841 .throttle = ioc_rqos_throttle,
2842 .merge = ioc_rqos_merge,
2843 .done_bio = ioc_rqos_done_bio,
2844 .done = ioc_rqos_done,
2845 .queue_depth_changed = ioc_rqos_queue_depth_changed,
2846 .exit = ioc_rqos_exit,
2849 static int blk_iocost_init(struct gendisk *disk)
2851 struct request_queue *q = disk->queue;
2853 struct rq_qos *rqos;
2856 ioc = kzalloc(sizeof(*ioc), GFP_KERNEL);
2860 ioc->pcpu_stat = alloc_percpu(struct ioc_pcpu_stat);
2861 if (!ioc->pcpu_stat) {
2866 for_each_possible_cpu(cpu) {
2867 struct ioc_pcpu_stat *ccs = per_cpu_ptr(ioc->pcpu_stat, cpu);
2869 for (i = 0; i < ARRAY_SIZE(ccs->missed); i++) {
2870 local_set(&ccs->missed[i].nr_met, 0);
2871 local_set(&ccs->missed[i].nr_missed, 0);
2873 local64_set(&ccs->rq_wait_ns, 0);
2877 rqos->id = RQ_QOS_COST;
2878 rqos->ops = &ioc_rqos_ops;
2881 spin_lock_init(&ioc->lock);
2882 timer_setup(&ioc->timer, ioc_timer_fn, 0);
2883 INIT_LIST_HEAD(&ioc->active_iocgs);
2885 ioc->running = IOC_IDLE;
2886 ioc->vtime_base_rate = VTIME_PER_USEC;
2887 atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC);
2888 seqcount_spinlock_init(&ioc->period_seqcount, &ioc->lock);
2889 ioc->period_at = ktime_to_us(ktime_get());
2890 atomic64_set(&ioc->cur_period, 0);
2891 atomic_set(&ioc->hweight_gen, 0);
2893 spin_lock_irq(&ioc->lock);
2894 ioc->autop_idx = AUTOP_INVALID;
2895 ioc_refresh_params(ioc, true);
2896 spin_unlock_irq(&ioc->lock);
2899 * rqos must be added before activation to allow iocg_pd_init() to
2900 * lookup the ioc from q. This means that the rqos methods may get
2901 * called before policy activation completion, can't assume that the
2902 * target bio has an iocg associated and need to test for NULL iocg.
2904 ret = rq_qos_add(q, rqos);
2908 ret = blkcg_activate_policy(q, &blkcg_policy_iocost);
2914 rq_qos_del(q, rqos);
2916 free_percpu(ioc->pcpu_stat);
2921 static struct blkcg_policy_data *ioc_cpd_alloc(gfp_t gfp)
2923 struct ioc_cgrp *iocc;
2925 iocc = kzalloc(sizeof(struct ioc_cgrp), gfp);
2929 iocc->dfl_weight = CGROUP_WEIGHT_DFL * WEIGHT_ONE;
2933 static void ioc_cpd_free(struct blkcg_policy_data *cpd)
2935 kfree(container_of(cpd, struct ioc_cgrp, cpd));
2938 static struct blkg_policy_data *ioc_pd_alloc(gfp_t gfp, struct request_queue *q,
2939 struct blkcg *blkcg)
2941 int levels = blkcg->css.cgroup->level + 1;
2942 struct ioc_gq *iocg;
2944 iocg = kzalloc_node(struct_size(iocg, ancestors, levels), gfp, q->node);
2948 iocg->pcpu_stat = alloc_percpu_gfp(struct iocg_pcpu_stat, gfp);
2949 if (!iocg->pcpu_stat) {
2957 static void ioc_pd_init(struct blkg_policy_data *pd)
2959 struct ioc_gq *iocg = pd_to_iocg(pd);
2960 struct blkcg_gq *blkg = pd_to_blkg(&iocg->pd);
2961 struct ioc *ioc = q_to_ioc(blkg->q);
2963 struct blkcg_gq *tblkg;
2964 unsigned long flags;
2969 atomic64_set(&iocg->vtime, now.vnow);
2970 atomic64_set(&iocg->done_vtime, now.vnow);
2971 atomic64_set(&iocg->active_period, atomic64_read(&ioc->cur_period));
2972 INIT_LIST_HEAD(&iocg->active_list);
2973 INIT_LIST_HEAD(&iocg->walk_list);
2974 INIT_LIST_HEAD(&iocg->surplus_list);
2975 iocg->hweight_active = WEIGHT_ONE;
2976 iocg->hweight_inuse = WEIGHT_ONE;
2978 init_waitqueue_head(&iocg->waitq);
2979 hrtimer_init(&iocg->waitq_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
2980 iocg->waitq_timer.function = iocg_waitq_timer_fn;
2982 iocg->level = blkg->blkcg->css.cgroup->level;
2984 for (tblkg = blkg; tblkg; tblkg = tblkg->parent) {
2985 struct ioc_gq *tiocg = blkg_to_iocg(tblkg);
2986 iocg->ancestors[tiocg->level] = tiocg;
2989 spin_lock_irqsave(&ioc->lock, flags);
2990 weight_updated(iocg, &now);
2991 spin_unlock_irqrestore(&ioc->lock, flags);
2994 static void ioc_pd_free(struct blkg_policy_data *pd)
2996 struct ioc_gq *iocg = pd_to_iocg(pd);
2997 struct ioc *ioc = iocg->ioc;
2998 unsigned long flags;
3001 spin_lock_irqsave(&ioc->lock, flags);
3003 if (!list_empty(&iocg->active_list)) {
3007 propagate_weights(iocg, 0, 0, false, &now);
3008 list_del_init(&iocg->active_list);
3011 WARN_ON_ONCE(!list_empty(&iocg->walk_list));
3012 WARN_ON_ONCE(!list_empty(&iocg->surplus_list));
3014 spin_unlock_irqrestore(&ioc->lock, flags);
3016 hrtimer_cancel(&iocg->waitq_timer);
3018 free_percpu(iocg->pcpu_stat);
3022 static void ioc_pd_stat(struct blkg_policy_data *pd, struct seq_file *s)
3024 struct ioc_gq *iocg = pd_to_iocg(pd);
3025 struct ioc *ioc = iocg->ioc;
3030 if (iocg->level == 0) {
3031 unsigned vp10k = DIV64_U64_ROUND_CLOSEST(
3032 ioc->vtime_base_rate * 10000,
3034 seq_printf(s, " cost.vrate=%u.%02u", vp10k / 100, vp10k % 100);
3037 seq_printf(s, " cost.usage=%llu", iocg->last_stat.usage_us);
3039 if (blkcg_debug_stats)
3040 seq_printf(s, " cost.wait=%llu cost.indebt=%llu cost.indelay=%llu",
3041 iocg->last_stat.wait_us,
3042 iocg->last_stat.indebt_us,
3043 iocg->last_stat.indelay_us);
3046 static u64 ioc_weight_prfill(struct seq_file *sf, struct blkg_policy_data *pd,
3049 const char *dname = blkg_dev_name(pd->blkg);
3050 struct ioc_gq *iocg = pd_to_iocg(pd);
3052 if (dname && iocg->cfg_weight)
3053 seq_printf(sf, "%s %u\n", dname, iocg->cfg_weight / WEIGHT_ONE);
3058 static int ioc_weight_show(struct seq_file *sf, void *v)
3060 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
3061 struct ioc_cgrp *iocc = blkcg_to_iocc(blkcg);
3063 seq_printf(sf, "default %u\n", iocc->dfl_weight / WEIGHT_ONE);
3064 blkcg_print_blkgs(sf, blkcg, ioc_weight_prfill,
3065 &blkcg_policy_iocost, seq_cft(sf)->private, false);
3069 static ssize_t ioc_weight_write(struct kernfs_open_file *of, char *buf,
3070 size_t nbytes, loff_t off)
3072 struct blkcg *blkcg = css_to_blkcg(of_css(of));
3073 struct ioc_cgrp *iocc = blkcg_to_iocc(blkcg);
3074 struct blkg_conf_ctx ctx;
3076 struct ioc_gq *iocg;
3080 if (!strchr(buf, ':')) {
3081 struct blkcg_gq *blkg;
3083 if (!sscanf(buf, "default %u", &v) && !sscanf(buf, "%u", &v))
3086 if (v < CGROUP_WEIGHT_MIN || v > CGROUP_WEIGHT_MAX)
3089 spin_lock_irq(&blkcg->lock);
3090 iocc->dfl_weight = v * WEIGHT_ONE;
3091 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
3092 struct ioc_gq *iocg = blkg_to_iocg(blkg);
3095 spin_lock(&iocg->ioc->lock);
3096 ioc_now(iocg->ioc, &now);
3097 weight_updated(iocg, &now);
3098 spin_unlock(&iocg->ioc->lock);
3101 spin_unlock_irq(&blkcg->lock);
3106 ret = blkg_conf_prep(blkcg, &blkcg_policy_iocost, buf, &ctx);
3110 iocg = blkg_to_iocg(ctx.blkg);
3112 if (!strncmp(ctx.body, "default", 7)) {
3115 if (!sscanf(ctx.body, "%u", &v))
3117 if (v < CGROUP_WEIGHT_MIN || v > CGROUP_WEIGHT_MAX)
3121 spin_lock(&iocg->ioc->lock);
3122 iocg->cfg_weight = v * WEIGHT_ONE;
3123 ioc_now(iocg->ioc, &now);
3124 weight_updated(iocg, &now);
3125 spin_unlock(&iocg->ioc->lock);
3127 blkg_conf_finish(&ctx);
3131 blkg_conf_finish(&ctx);
3135 static u64 ioc_qos_prfill(struct seq_file *sf, struct blkg_policy_data *pd,
3138 const char *dname = blkg_dev_name(pd->blkg);
3139 struct ioc *ioc = pd_to_iocg(pd)->ioc;
3144 seq_printf(sf, "%s enable=%d ctrl=%s rpct=%u.%02u rlat=%u wpct=%u.%02u wlat=%u min=%u.%02u max=%u.%02u\n",
3145 dname, ioc->enabled, ioc->user_qos_params ? "user" : "auto",
3146 ioc->params.qos[QOS_RPPM] / 10000,
3147 ioc->params.qos[QOS_RPPM] % 10000 / 100,
3148 ioc->params.qos[QOS_RLAT],
3149 ioc->params.qos[QOS_WPPM] / 10000,
3150 ioc->params.qos[QOS_WPPM] % 10000 / 100,
3151 ioc->params.qos[QOS_WLAT],
3152 ioc->params.qos[QOS_MIN] / 10000,
3153 ioc->params.qos[QOS_MIN] % 10000 / 100,
3154 ioc->params.qos[QOS_MAX] / 10000,
3155 ioc->params.qos[QOS_MAX] % 10000 / 100);
3159 static int ioc_qos_show(struct seq_file *sf, void *v)
3161 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
3163 blkcg_print_blkgs(sf, blkcg, ioc_qos_prfill,
3164 &blkcg_policy_iocost, seq_cft(sf)->private, false);
3168 static const match_table_t qos_ctrl_tokens = {
3169 { QOS_ENABLE, "enable=%u" },
3170 { QOS_CTRL, "ctrl=%s" },
3171 { NR_QOS_CTRL_PARAMS, NULL },
3174 static const match_table_t qos_tokens = {
3175 { QOS_RPPM, "rpct=%s" },
3176 { QOS_RLAT, "rlat=%u" },
3177 { QOS_WPPM, "wpct=%s" },
3178 { QOS_WLAT, "wlat=%u" },
3179 { QOS_MIN, "min=%s" },
3180 { QOS_MAX, "max=%s" },
3181 { NR_QOS_PARAMS, NULL },
3184 static ssize_t ioc_qos_write(struct kernfs_open_file *of, char *input,
3185 size_t nbytes, loff_t off)
3187 struct block_device *bdev;
3188 struct gendisk *disk;
3190 u32 qos[NR_QOS_PARAMS];
3195 bdev = blkcg_conf_open_bdev(&input);
3197 return PTR_ERR(bdev);
3199 disk = bdev->bd_disk;
3200 ioc = q_to_ioc(disk->queue);
3202 ret = blk_iocost_init(disk);
3205 ioc = q_to_ioc(disk->queue);
3208 spin_lock_irq(&ioc->lock);
3209 memcpy(qos, ioc->params.qos, sizeof(qos));
3210 enable = ioc->enabled;
3211 user = ioc->user_qos_params;
3212 spin_unlock_irq(&ioc->lock);
3214 while ((p = strsep(&input, " \t\n"))) {
3215 substring_t args[MAX_OPT_ARGS];
3223 switch (match_token(p, qos_ctrl_tokens, args)) {
3225 match_u64(&args[0], &v);
3229 match_strlcpy(buf, &args[0], sizeof(buf));
3230 if (!strcmp(buf, "auto"))
3232 else if (!strcmp(buf, "user"))
3239 tok = match_token(p, qos_tokens, args);
3243 if (match_strlcpy(buf, &args[0], sizeof(buf)) >=
3246 if (cgroup_parse_float(buf, 2, &v))
3248 if (v < 0 || v > 10000)
3254 if (match_u64(&args[0], &v))
3260 if (match_strlcpy(buf, &args[0], sizeof(buf)) >=
3263 if (cgroup_parse_float(buf, 2, &v))
3267 qos[tok] = clamp_t(s64, v * 100,
3268 VRATE_MIN_PPM, VRATE_MAX_PPM);
3276 if (qos[QOS_MIN] > qos[QOS_MAX])
3279 spin_lock_irq(&ioc->lock);
3282 blk_stat_enable_accounting(disk->queue);
3283 blk_queue_flag_set(QUEUE_FLAG_RQ_ALLOC_TIME, disk->queue);
3284 ioc->enabled = true;
3286 blk_queue_flag_clear(QUEUE_FLAG_RQ_ALLOC_TIME, disk->queue);
3287 ioc->enabled = false;
3291 memcpy(ioc->params.qos, qos, sizeof(qos));
3292 ioc->user_qos_params = true;
3294 ioc->user_qos_params = false;
3297 ioc_refresh_params(ioc, true);
3298 spin_unlock_irq(&ioc->lock);
3300 blkdev_put_no_open(bdev);
3305 blkdev_put_no_open(bdev);
3309 static u64 ioc_cost_model_prfill(struct seq_file *sf,
3310 struct blkg_policy_data *pd, int off)
3312 const char *dname = blkg_dev_name(pd->blkg);
3313 struct ioc *ioc = pd_to_iocg(pd)->ioc;
3314 u64 *u = ioc->params.i_lcoefs;
3319 seq_printf(sf, "%s ctrl=%s model=linear "
3320 "rbps=%llu rseqiops=%llu rrandiops=%llu "
3321 "wbps=%llu wseqiops=%llu wrandiops=%llu\n",
3322 dname, ioc->user_cost_model ? "user" : "auto",
3323 u[I_LCOEF_RBPS], u[I_LCOEF_RSEQIOPS], u[I_LCOEF_RRANDIOPS],
3324 u[I_LCOEF_WBPS], u[I_LCOEF_WSEQIOPS], u[I_LCOEF_WRANDIOPS]);
3328 static int ioc_cost_model_show(struct seq_file *sf, void *v)
3330 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
3332 blkcg_print_blkgs(sf, blkcg, ioc_cost_model_prfill,
3333 &blkcg_policy_iocost, seq_cft(sf)->private, false);
3337 static const match_table_t cost_ctrl_tokens = {
3338 { COST_CTRL, "ctrl=%s" },
3339 { COST_MODEL, "model=%s" },
3340 { NR_COST_CTRL_PARAMS, NULL },
3343 static const match_table_t i_lcoef_tokens = {
3344 { I_LCOEF_RBPS, "rbps=%u" },
3345 { I_LCOEF_RSEQIOPS, "rseqiops=%u" },
3346 { I_LCOEF_RRANDIOPS, "rrandiops=%u" },
3347 { I_LCOEF_WBPS, "wbps=%u" },
3348 { I_LCOEF_WSEQIOPS, "wseqiops=%u" },
3349 { I_LCOEF_WRANDIOPS, "wrandiops=%u" },
3350 { NR_I_LCOEFS, NULL },
3353 static ssize_t ioc_cost_model_write(struct kernfs_open_file *of, char *input,
3354 size_t nbytes, loff_t off)
3356 struct block_device *bdev;
3363 bdev = blkcg_conf_open_bdev(&input);
3365 return PTR_ERR(bdev);
3367 ioc = q_to_ioc(bdev_get_queue(bdev));
3369 ret = blk_iocost_init(bdev->bd_disk);
3372 ioc = q_to_ioc(bdev_get_queue(bdev));
3375 spin_lock_irq(&ioc->lock);
3376 memcpy(u, ioc->params.i_lcoefs, sizeof(u));
3377 user = ioc->user_cost_model;
3378 spin_unlock_irq(&ioc->lock);
3380 while ((p = strsep(&input, " \t\n"))) {
3381 substring_t args[MAX_OPT_ARGS];
3389 switch (match_token(p, cost_ctrl_tokens, args)) {
3391 match_strlcpy(buf, &args[0], sizeof(buf));
3392 if (!strcmp(buf, "auto"))
3394 else if (!strcmp(buf, "user"))
3400 match_strlcpy(buf, &args[0], sizeof(buf));
3401 if (strcmp(buf, "linear"))
3406 tok = match_token(p, i_lcoef_tokens, args);
3407 if (tok == NR_I_LCOEFS)
3409 if (match_u64(&args[0], &v))
3415 spin_lock_irq(&ioc->lock);
3417 memcpy(ioc->params.i_lcoefs, u, sizeof(u));
3418 ioc->user_cost_model = true;
3420 ioc->user_cost_model = false;
3422 ioc_refresh_params(ioc, true);
3423 spin_unlock_irq(&ioc->lock);
3425 blkdev_put_no_open(bdev);
3431 blkdev_put_no_open(bdev);
3435 static struct cftype ioc_files[] = {
3438 .flags = CFTYPE_NOT_ON_ROOT,
3439 .seq_show = ioc_weight_show,
3440 .write = ioc_weight_write,
3444 .flags = CFTYPE_ONLY_ON_ROOT,
3445 .seq_show = ioc_qos_show,
3446 .write = ioc_qos_write,
3449 .name = "cost.model",
3450 .flags = CFTYPE_ONLY_ON_ROOT,
3451 .seq_show = ioc_cost_model_show,
3452 .write = ioc_cost_model_write,
3457 static struct blkcg_policy blkcg_policy_iocost = {
3458 .dfl_cftypes = ioc_files,
3459 .cpd_alloc_fn = ioc_cpd_alloc,
3460 .cpd_free_fn = ioc_cpd_free,
3461 .pd_alloc_fn = ioc_pd_alloc,
3462 .pd_init_fn = ioc_pd_init,
3463 .pd_free_fn = ioc_pd_free,
3464 .pd_stat_fn = ioc_pd_stat,
3467 static int __init ioc_init(void)
3469 return blkcg_policy_register(&blkcg_policy_iocost);
3472 static void __exit ioc_exit(void)
3474 blkcg_policy_unregister(&blkcg_policy_iocost);
3477 module_init(ioc_init);
3478 module_exit(ioc_exit);