1 /* SPDX-License-Identifier: GPL-2.0
3 * IO cost model based controller.
5 * Copyright (C) 2019 Tejun Heo <tj@kernel.org>
6 * Copyright (C) 2019 Andy Newell <newella@fb.com>
7 * Copyright (C) 2019 Facebook
9 * One challenge of controlling IO resources is the lack of trivially
10 * observable cost metric. This is distinguished from CPU and memory where
11 * wallclock time and the number of bytes can serve as accurate enough
14 * Bandwidth and iops are the most commonly used metrics for IO devices but
15 * depending on the type and specifics of the device, different IO patterns
16 * easily lead to multiple orders of magnitude variations rendering them
17 * useless for the purpose of IO capacity distribution. While on-device
18 * time, with a lot of clutches, could serve as a useful approximation for
19 * non-queued rotational devices, this is no longer viable with modern
20 * devices, even the rotational ones.
22 * While there is no cost metric we can trivially observe, it isn't a
23 * complete mystery. For example, on a rotational device, seek cost
24 * dominates while a contiguous transfer contributes a smaller amount
25 * proportional to the size. If we can characterize at least the relative
26 * costs of these different types of IOs, it should be possible to
27 * implement a reasonable work-conserving proportional IO resource
32 * IO cost model estimates the cost of an IO given its basic parameters and
33 * history (e.g. the end sector of the last IO). The cost is measured in
34 * device time. If a given IO is estimated to cost 10ms, the device should
35 * be able to process ~100 of those IOs in a second.
37 * Currently, there's only one builtin cost model - linear. Each IO is
38 * classified as sequential or random and given a base cost accordingly.
39 * On top of that, a size cost proportional to the length of the IO is
40 * added. While simple, this model captures the operational
41 * characteristics of a wide varienty of devices well enough. Default
42 * paramters for several different classes of devices are provided and the
43 * parameters can be configured from userspace via
44 * /sys/fs/cgroup/io.cost.model.
46 * If needed, tools/cgroup/iocost_coef_gen.py can be used to generate
47 * device-specific coefficients.
51 * The device virtual time (vtime) is used as the primary control metric.
52 * The control strategy is composed of the following three parts.
54 * 2-1. Vtime Distribution
56 * When a cgroup becomes active in terms of IOs, its hierarchical share is
57 * calculated. Please consider the following hierarchy where the numbers
58 * inside parentheses denote the configured weights.
64 * A0 (w:100) A1 (w:100)
66 * If B is idle and only A0 and A1 are actively issuing IOs, as the two are
67 * of equal weight, each gets 50% share. If then B starts issuing IOs, B
68 * gets 300/(100+300) or 75% share, and A0 and A1 equally splits the rest,
69 * 12.5% each. The distribution mechanism only cares about these flattened
70 * shares. They're called hweights (hierarchical weights) and always add
71 * upto 1 (WEIGHT_ONE).
73 * A given cgroup's vtime runs slower in inverse proportion to its hweight.
74 * For example, with 12.5% weight, A0's time runs 8 times slower (100/12.5)
75 * against the device vtime - an IO which takes 10ms on the underlying
76 * device is considered to take 80ms on A0.
78 * This constitutes the basis of IO capacity distribution. Each cgroup's
79 * vtime is running at a rate determined by its hweight. A cgroup tracks
80 * the vtime consumed by past IOs and can issue a new IO iff doing so
81 * wouldn't outrun the current device vtime. Otherwise, the IO is
82 * suspended until the vtime has progressed enough to cover it.
84 * 2-2. Vrate Adjustment
86 * It's unrealistic to expect the cost model to be perfect. There are too
87 * many devices and even on the same device the overall performance
88 * fluctuates depending on numerous factors such as IO mixture and device
89 * internal garbage collection. The controller needs to adapt dynamically.
91 * This is achieved by adjusting the overall IO rate according to how busy
92 * the device is. If the device becomes overloaded, we're sending down too
93 * many IOs and should generally slow down. If there are waiting issuers
94 * but the device isn't saturated, we're issuing too few and should
97 * To slow down, we lower the vrate - the rate at which the device vtime
98 * passes compared to the wall clock. For example, if the vtime is running
99 * at the vrate of 75%, all cgroups added up would only be able to issue
100 * 750ms worth of IOs per second, and vice-versa for speeding up.
102 * Device business is determined using two criteria - rq wait and
103 * completion latencies.
105 * When a device gets saturated, the on-device and then the request queues
106 * fill up and a bio which is ready to be issued has to wait for a request
107 * to become available. When this delay becomes noticeable, it's a clear
108 * indication that the device is saturated and we lower the vrate. This
109 * saturation signal is fairly conservative as it only triggers when both
110 * hardware and software queues are filled up, and is used as the default
113 * As devices can have deep queues and be unfair in how the queued commands
114 * are executed, soley depending on rq wait may not result in satisfactory
115 * control quality. For a better control quality, completion latency QoS
116 * parameters can be configured so that the device is considered saturated
117 * if N'th percentile completion latency rises above the set point.
119 * The completion latency requirements are a function of both the
120 * underlying device characteristics and the desired IO latency quality of
121 * service. There is an inherent trade-off - the tighter the latency QoS,
122 * the higher the bandwidth lossage. Latency QoS is disabled by default
123 * and can be set through /sys/fs/cgroup/io.cost.qos.
125 * 2-3. Work Conservation
127 * Imagine two cgroups A and B with equal weights. A is issuing a small IO
128 * periodically while B is sending out enough parallel IOs to saturate the
129 * device on its own. Let's say A's usage amounts to 100ms worth of IO
130 * cost per second, i.e., 10% of the device capacity. The naive
131 * distribution of half and half would lead to 60% utilization of the
132 * device, a significant reduction in the total amount of work done
133 * compared to free-for-all competition. This is too high a cost to pay
136 * To conserve the total amount of work done, we keep track of how much
137 * each active cgroup is actually using and yield part of its weight if
138 * there are other cgroups which can make use of it. In the above case,
139 * A's weight will be lowered so that it hovers above the actual usage and
140 * B would be able to use the rest.
142 * As we don't want to penalize a cgroup for donating its weight, the
143 * surplus weight adjustment factors in a margin and has an immediate
144 * snapback mechanism in case the cgroup needs more IO vtime for itself.
146 * Note that adjusting down surplus weights has the same effects as
147 * accelerating vtime for other cgroups and work conservation can also be
148 * implemented by adjusting vrate dynamically. However, squaring who can
149 * donate and should take back how much requires hweight propagations
150 * anyway making it easier to implement and understand as a separate
155 * Instead of debugfs or other clumsy monitoring mechanisms, this
156 * controller uses a drgn based monitoring script -
157 * tools/cgroup/iocost_monitor.py. For details on drgn, please see
158 * https://github.com/osandov/drgn. The ouput looks like the following.
160 * sdb RUN per=300ms cur_per=234.218:v203.695 busy= +1 vrate= 62.12%
161 * active weight hweight% inflt% dbt delay usages%
162 * test/a * 50/ 50 33.33/ 33.33 27.65 2 0*041 033:033:033
163 * test/b * 100/ 100 66.67/ 66.67 17.56 0 0*000 066:079:077
165 * - per : Timer period
166 * - cur_per : Internal wall and device vtime clock
167 * - vrate : Device virtual time rate against wall clock
168 * - weight : Surplus-adjusted and configured weights
169 * - hweight : Surplus-adjusted and configured hierarchical weights
170 * - inflt : The percentage of in-flight IO cost at the end of last period
171 * - del_ms : Deferred issuer delay induction level and duration
172 * - usages : Usage history
175 #include <linux/kernel.h>
176 #include <linux/module.h>
177 #include <linux/timer.h>
178 #include <linux/time64.h>
179 #include <linux/parser.h>
180 #include <linux/sched/signal.h>
181 #include <linux/blk-cgroup.h>
182 #include <asm/local.h>
183 #include <asm/local64.h>
184 #include "blk-rq-qos.h"
185 #include "blk-stat.h"
188 #ifdef CONFIG_TRACEPOINTS
190 /* copied from TRACE_CGROUP_PATH, see cgroup-internal.h */
191 #define TRACE_IOCG_PATH_LEN 1024
192 static DEFINE_SPINLOCK(trace_iocg_path_lock);
193 static char trace_iocg_path[TRACE_IOCG_PATH_LEN];
195 #define TRACE_IOCG_PATH(type, iocg, ...) \
197 unsigned long flags; \
198 if (trace_iocost_##type##_enabled()) { \
199 spin_lock_irqsave(&trace_iocg_path_lock, flags); \
200 cgroup_path(iocg_to_blkg(iocg)->blkcg->css.cgroup, \
201 trace_iocg_path, TRACE_IOCG_PATH_LEN); \
202 trace_iocost_##type(iocg, trace_iocg_path, \
204 spin_unlock_irqrestore(&trace_iocg_path_lock, flags); \
208 #else /* CONFIG_TRACE_POINTS */
209 #define TRACE_IOCG_PATH(type, iocg, ...) do { } while (0)
210 #endif /* CONFIG_TRACE_POINTS */
215 /* timer period is calculated from latency requirements, bound it */
216 MIN_PERIOD = USEC_PER_MSEC,
217 MAX_PERIOD = USEC_PER_SEC,
220 * iocg->vtime is targeted at 50% behind the device vtime, which
221 * serves as its IO credit buffer. Surplus weight adjustment is
222 * immediately canceled if the vtime margin runs below 10%.
226 MARGIN_TARGET_PCT = 50,
228 INUSE_ADJ_STEP_PCT = 25,
230 /* Have some play in timer operations */
233 /* 1/64k is granular enough and can easily be handled w/ u32 */
234 WEIGHT_ONE = 1 << 16,
239 * As vtime is used to calculate the cost of each IO, it needs to
240 * be fairly high precision. For example, it should be able to
241 * represent the cost of a single page worth of discard with
242 * suffificient accuracy. At the same time, it should be able to
243 * represent reasonably long enough durations to be useful and
244 * convenient during operation.
246 * 1s worth of vtime is 2^37. This gives us both sub-nanosecond
247 * granularity and days of wrap-around time even at extreme vrates.
249 VTIME_PER_SEC_SHIFT = 37,
250 VTIME_PER_SEC = 1LLU << VTIME_PER_SEC_SHIFT,
251 VTIME_PER_USEC = VTIME_PER_SEC / USEC_PER_SEC,
252 VTIME_PER_NSEC = VTIME_PER_SEC / NSEC_PER_SEC,
254 /* bound vrate adjustments within two orders of magnitude */
255 VRATE_MIN_PPM = 10000, /* 1% */
256 VRATE_MAX_PPM = 100000000, /* 10000% */
258 VRATE_MIN = VTIME_PER_USEC * VRATE_MIN_PPM / MILLION,
259 VRATE_CLAMP_ADJ_PCT = 4,
261 /* switch iff the conditions are met for longer than this */
262 AUTOP_CYCLE_NSEC = 10LLU * NSEC_PER_SEC,
266 /* if IOs end up waiting for requests, issue less */
267 RQ_WAIT_BUSY_PCT = 5,
269 /* unbusy hysterisis */
273 * The effect of delay is indirect and non-linear and a huge amount of
274 * future debt can accumulate abruptly while unthrottled. Linearly scale
275 * up delay as debt is going up and then let it decay exponentially.
276 * This gives us quick ramp ups while delay is accumulating and long
277 * tails which can help reducing the frequency of debt explosions on
278 * unthrottle. The parameters are experimentally determined.
280 * The delay mechanism provides adequate protection and behavior in many
281 * cases. However, this is far from ideal and falls shorts on both
282 * fronts. The debtors are often throttled too harshly costing a
283 * significant level of fairness and possibly total work while the
284 * protection against their impacts on the system can be choppy and
287 * The shortcoming primarily stems from the fact that, unlike for page
288 * cache, the kernel doesn't have well-defined back-pressure propagation
289 * mechanism and policies for anonymous memory. Fully addressing this
290 * issue will likely require substantial improvements in the area.
292 MIN_DELAY_THR_PCT = 500,
293 MAX_DELAY_THR_PCT = 25000,
295 MAX_DELAY = 250 * USEC_PER_MSEC,
297 /* halve debts if avg usage over 100ms is under 50% */
299 DFGV_PERIOD = 100 * USEC_PER_MSEC,
301 /* don't let cmds which take a very long time pin lagging for too long */
302 MAX_LAGGING_PERIODS = 10,
305 * Count IO size in 4k pages. The 12bit shift helps keeping
306 * size-proportional components of cost calculation in closer
307 * numbers of digits to per-IO cost components.
310 IOC_PAGE_SIZE = 1 << IOC_PAGE_SHIFT,
311 IOC_SECT_TO_PAGE_SHIFT = IOC_PAGE_SHIFT - SECTOR_SHIFT,
313 /* if apart further than 16M, consider randio for linear model */
314 LCOEF_RANDIO_PAGES = 4096,
323 /* io.cost.qos controls including per-dev enable of the whole controller */
330 /* io.cost.qos params */
341 /* io.cost.model controls */
348 /* builtin linear cost model coefficients */
380 u32 qos[NR_QOS_PARAMS];
381 u64 i_lcoefs[NR_I_LCOEFS];
382 u64 lcoefs[NR_LCOEFS];
383 u32 too_fast_vrate_pct;
384 u32 too_slow_vrate_pct;
400 struct ioc_pcpu_stat {
401 struct ioc_missed missed[2];
403 local64_t rq_wait_ns;
413 struct ioc_params params;
414 struct ioc_margins margins;
421 struct timer_list timer;
422 struct list_head active_iocgs; /* active cgroups */
423 struct ioc_pcpu_stat __percpu *pcpu_stat;
425 enum ioc_running running;
426 atomic64_t vtime_rate;
430 seqcount_spinlock_t period_seqcount;
431 u64 period_at; /* wallclock starttime */
432 u64 period_at_vtime; /* vtime starttime */
434 atomic64_t cur_period; /* inc'd each period */
435 int busy_level; /* saturation history */
437 bool weights_updated;
438 atomic_t hweight_gen; /* for lazy hweights */
440 /* debt forgivness */
443 u64 dfgv_usage_us_sum;
445 u64 autop_too_fast_at;
446 u64 autop_too_slow_at;
448 bool user_qos_params:1;
449 bool user_cost_model:1;
452 struct iocg_pcpu_stat {
453 local64_t abs_vusage;
463 /* per device-cgroup pair */
465 struct blkg_policy_data pd;
469 * A iocg can get its weight from two sources - an explicit
470 * per-device-cgroup configuration or the default weight of the
471 * cgroup. `cfg_weight` is the explicit per-device-cgroup
472 * configuration. `weight` is the effective considering both
475 * When an idle cgroup becomes active its `active` goes from 0 to
476 * `weight`. `inuse` is the surplus adjusted active weight.
477 * `active` and `inuse` are used to calculate `hweight_active` and
480 * `last_inuse` remembers `inuse` while an iocg is idle to persist
481 * surplus adjustments.
483 * `inuse` may be adjusted dynamically during period. `saved_*` are used
484 * to determine and track adjustments.
494 sector_t cursor; /* to detect randio */
497 * `vtime` is this iocg's vtime cursor which progresses as IOs are
498 * issued. If lagging behind device vtime, the delta represents
499 * the currently available IO budget. If runnning ahead, the
502 * `vtime_done` is the same but progressed on completion rather
503 * than issue. The delta behind `vtime` represents the cost of
504 * currently in-flight IOs.
507 atomic64_t done_vtime;
510 /* current delay in effect and when it started */
515 * The period this iocg was last active in. Used for deactivation
516 * and invalidating `vtime`.
518 atomic64_t active_period;
519 struct list_head active_list;
521 /* see __propagate_weights() and current_hweight() for details */
522 u64 child_active_sum;
524 u64 child_adjusted_sum;
528 u32 hweight_donating;
529 u32 hweight_after_donation;
531 struct list_head walk_list;
532 struct list_head surplus_list;
534 struct wait_queue_head waitq;
535 struct hrtimer waitq_timer;
537 /* timestamp at the latest activation */
541 struct iocg_pcpu_stat __percpu *pcpu_stat;
542 struct iocg_stat local_stat;
543 struct iocg_stat desc_stat;
544 struct iocg_stat last_stat;
545 u64 last_stat_abs_vusage;
551 /* this iocg's depth in the hierarchy and ancestors including self */
553 struct ioc_gq *ancestors[];
558 struct blkcg_policy_data cpd;
559 unsigned int dfl_weight;
570 struct wait_queue_entry wait;
576 struct iocg_wake_ctx {
582 static const struct ioc_params autop[] = {
585 [QOS_RLAT] = 250000, /* 250ms */
587 [QOS_MIN] = VRATE_MIN_PPM,
588 [QOS_MAX] = VRATE_MAX_PPM,
591 [I_LCOEF_RBPS] = 174019176,
592 [I_LCOEF_RSEQIOPS] = 41708,
593 [I_LCOEF_RRANDIOPS] = 370,
594 [I_LCOEF_WBPS] = 178075866,
595 [I_LCOEF_WSEQIOPS] = 42705,
596 [I_LCOEF_WRANDIOPS] = 378,
601 [QOS_RLAT] = 25000, /* 25ms */
603 [QOS_MIN] = VRATE_MIN_PPM,
604 [QOS_MAX] = VRATE_MAX_PPM,
607 [I_LCOEF_RBPS] = 245855193,
608 [I_LCOEF_RSEQIOPS] = 61575,
609 [I_LCOEF_RRANDIOPS] = 6946,
610 [I_LCOEF_WBPS] = 141365009,
611 [I_LCOEF_WSEQIOPS] = 33716,
612 [I_LCOEF_WRANDIOPS] = 26796,
617 [QOS_RLAT] = 25000, /* 25ms */
619 [QOS_MIN] = VRATE_MIN_PPM,
620 [QOS_MAX] = VRATE_MAX_PPM,
623 [I_LCOEF_RBPS] = 488636629,
624 [I_LCOEF_RSEQIOPS] = 8932,
625 [I_LCOEF_RRANDIOPS] = 8518,
626 [I_LCOEF_WBPS] = 427891549,
627 [I_LCOEF_WSEQIOPS] = 28755,
628 [I_LCOEF_WRANDIOPS] = 21940,
630 .too_fast_vrate_pct = 500,
634 [QOS_RLAT] = 5000, /* 5ms */
636 [QOS_MIN] = VRATE_MIN_PPM,
637 [QOS_MAX] = VRATE_MAX_PPM,
640 [I_LCOEF_RBPS] = 3102524156LLU,
641 [I_LCOEF_RSEQIOPS] = 724816,
642 [I_LCOEF_RRANDIOPS] = 778122,
643 [I_LCOEF_WBPS] = 1742780862LLU,
644 [I_LCOEF_WSEQIOPS] = 425702,
645 [I_LCOEF_WRANDIOPS] = 443193,
647 .too_slow_vrate_pct = 10,
652 * vrate adjust percentages indexed by ioc->busy_level. We adjust up on
653 * vtime credit shortage and down on device saturation.
655 static u32 vrate_adj_pct[] =
657 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
658 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
659 4, 4, 4, 4, 4, 4, 4, 4, 8, 8, 8, 8, 8, 8, 8, 8, 16 };
661 static struct blkcg_policy blkcg_policy_iocost;
663 /* accessors and helpers */
664 static struct ioc *rqos_to_ioc(struct rq_qos *rqos)
666 return container_of(rqos, struct ioc, rqos);
669 static struct ioc *q_to_ioc(struct request_queue *q)
671 return rqos_to_ioc(rq_qos_id(q, RQ_QOS_COST));
674 static const char *q_name(struct request_queue *q)
676 if (blk_queue_registered(q))
677 return kobject_name(q->kobj.parent);
682 static const char __maybe_unused *ioc_name(struct ioc *ioc)
684 return q_name(ioc->rqos.q);
687 static struct ioc_gq *pd_to_iocg(struct blkg_policy_data *pd)
689 return pd ? container_of(pd, struct ioc_gq, pd) : NULL;
692 static struct ioc_gq *blkg_to_iocg(struct blkcg_gq *blkg)
694 return pd_to_iocg(blkg_to_pd(blkg, &blkcg_policy_iocost));
697 static struct blkcg_gq *iocg_to_blkg(struct ioc_gq *iocg)
699 return pd_to_blkg(&iocg->pd);
702 static struct ioc_cgrp *blkcg_to_iocc(struct blkcg *blkcg)
704 return container_of(blkcg_to_cpd(blkcg, &blkcg_policy_iocost),
705 struct ioc_cgrp, cpd);
709 * Scale @abs_cost to the inverse of @hw_inuse. The lower the hierarchical
710 * weight, the more expensive each IO. Must round up.
712 static u64 abs_cost_to_cost(u64 abs_cost, u32 hw_inuse)
714 return DIV64_U64_ROUND_UP(abs_cost * WEIGHT_ONE, hw_inuse);
718 * The inverse of abs_cost_to_cost(). Must round up.
720 static u64 cost_to_abs_cost(u64 cost, u32 hw_inuse)
722 return DIV64_U64_ROUND_UP(cost * hw_inuse, WEIGHT_ONE);
725 static void iocg_commit_bio(struct ioc_gq *iocg, struct bio *bio,
726 u64 abs_cost, u64 cost)
728 struct iocg_pcpu_stat *gcs;
730 bio->bi_iocost_cost = cost;
731 atomic64_add(cost, &iocg->vtime);
733 gcs = get_cpu_ptr(iocg->pcpu_stat);
734 local64_add(abs_cost, &gcs->abs_vusage);
738 static void iocg_lock(struct ioc_gq *iocg, bool lock_ioc, unsigned long *flags)
741 spin_lock_irqsave(&iocg->ioc->lock, *flags);
742 spin_lock(&iocg->waitq.lock);
744 spin_lock_irqsave(&iocg->waitq.lock, *flags);
748 static void iocg_unlock(struct ioc_gq *iocg, bool unlock_ioc, unsigned long *flags)
751 spin_unlock(&iocg->waitq.lock);
752 spin_unlock_irqrestore(&iocg->ioc->lock, *flags);
754 spin_unlock_irqrestore(&iocg->waitq.lock, *flags);
758 #define CREATE_TRACE_POINTS
759 #include <trace/events/iocost.h>
761 static void ioc_refresh_margins(struct ioc *ioc)
763 struct ioc_margins *margins = &ioc->margins;
764 u32 period_us = ioc->period_us;
765 u64 vrate = ioc->vtime_base_rate;
767 margins->min = (period_us * MARGIN_MIN_PCT / 100) * vrate;
768 margins->low = (period_us * MARGIN_LOW_PCT / 100) * vrate;
769 margins->target = (period_us * MARGIN_TARGET_PCT / 100) * vrate;
772 /* latency Qos params changed, update period_us and all the dependent params */
773 static void ioc_refresh_period_us(struct ioc *ioc)
775 u32 ppm, lat, multi, period_us;
777 lockdep_assert_held(&ioc->lock);
779 /* pick the higher latency target */
780 if (ioc->params.qos[QOS_RLAT] >= ioc->params.qos[QOS_WLAT]) {
781 ppm = ioc->params.qos[QOS_RPPM];
782 lat = ioc->params.qos[QOS_RLAT];
784 ppm = ioc->params.qos[QOS_WPPM];
785 lat = ioc->params.qos[QOS_WLAT];
789 * We want the period to be long enough to contain a healthy number
790 * of IOs while short enough for granular control. Define it as a
791 * multiple of the latency target. Ideally, the multiplier should
792 * be scaled according to the percentile so that it would nominally
793 * contain a certain number of requests. Let's be simpler and
794 * scale it linearly so that it's 2x >= pct(90) and 10x at pct(50).
797 multi = max_t(u32, (MILLION - ppm) / 50000, 2);
800 period_us = multi * lat;
801 period_us = clamp_t(u32, period_us, MIN_PERIOD, MAX_PERIOD);
803 /* calculate dependent params */
804 ioc->period_us = period_us;
805 ioc->timer_slack_ns = div64_u64(
806 (u64)period_us * NSEC_PER_USEC * TIMER_SLACK_PCT,
808 ioc_refresh_margins(ioc);
811 static int ioc_autop_idx(struct ioc *ioc)
813 int idx = ioc->autop_idx;
814 const struct ioc_params *p = &autop[idx];
819 if (!blk_queue_nonrot(ioc->rqos.q))
822 /* handle SATA SSDs w/ broken NCQ */
823 if (blk_queue_depth(ioc->rqos.q) == 1)
824 return AUTOP_SSD_QD1;
826 /* use one of the normal ssd sets */
827 if (idx < AUTOP_SSD_DFL)
828 return AUTOP_SSD_DFL;
830 /* if user is overriding anything, maintain what was there */
831 if (ioc->user_qos_params || ioc->user_cost_model)
834 /* step up/down based on the vrate */
835 vrate_pct = div64_u64(ioc->vtime_base_rate * 100, VTIME_PER_USEC);
836 now_ns = ktime_get_ns();
838 if (p->too_fast_vrate_pct && p->too_fast_vrate_pct <= vrate_pct) {
839 if (!ioc->autop_too_fast_at)
840 ioc->autop_too_fast_at = now_ns;
841 if (now_ns - ioc->autop_too_fast_at >= AUTOP_CYCLE_NSEC)
844 ioc->autop_too_fast_at = 0;
847 if (p->too_slow_vrate_pct && p->too_slow_vrate_pct >= vrate_pct) {
848 if (!ioc->autop_too_slow_at)
849 ioc->autop_too_slow_at = now_ns;
850 if (now_ns - ioc->autop_too_slow_at >= AUTOP_CYCLE_NSEC)
853 ioc->autop_too_slow_at = 0;
860 * Take the followings as input
862 * @bps maximum sequential throughput
863 * @seqiops maximum sequential 4k iops
864 * @randiops maximum random 4k iops
866 * and calculate the linear model cost coefficients.
868 * *@page per-page cost 1s / (@bps / 4096)
869 * *@seqio base cost of a seq IO max((1s / @seqiops) - *@page, 0)
870 * @randiops base cost of a rand IO max((1s / @randiops) - *@page, 0)
872 static void calc_lcoefs(u64 bps, u64 seqiops, u64 randiops,
873 u64 *page, u64 *seqio, u64 *randio)
877 *page = *seqio = *randio = 0;
880 u64 bps_pages = DIV_ROUND_UP_ULL(bps, IOC_PAGE_SIZE);
883 *page = DIV64_U64_ROUND_UP(VTIME_PER_SEC, bps_pages);
889 v = DIV64_U64_ROUND_UP(VTIME_PER_SEC, seqiops);
895 v = DIV64_U64_ROUND_UP(VTIME_PER_SEC, randiops);
901 static void ioc_refresh_lcoefs(struct ioc *ioc)
903 u64 *u = ioc->params.i_lcoefs;
904 u64 *c = ioc->params.lcoefs;
906 calc_lcoefs(u[I_LCOEF_RBPS], u[I_LCOEF_RSEQIOPS], u[I_LCOEF_RRANDIOPS],
907 &c[LCOEF_RPAGE], &c[LCOEF_RSEQIO], &c[LCOEF_RRANDIO]);
908 calc_lcoefs(u[I_LCOEF_WBPS], u[I_LCOEF_WSEQIOPS], u[I_LCOEF_WRANDIOPS],
909 &c[LCOEF_WPAGE], &c[LCOEF_WSEQIO], &c[LCOEF_WRANDIO]);
912 static bool ioc_refresh_params(struct ioc *ioc, bool force)
914 const struct ioc_params *p;
917 lockdep_assert_held(&ioc->lock);
919 idx = ioc_autop_idx(ioc);
922 if (idx == ioc->autop_idx && !force)
925 if (idx != ioc->autop_idx)
926 atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC);
928 ioc->autop_idx = idx;
929 ioc->autop_too_fast_at = 0;
930 ioc->autop_too_slow_at = 0;
932 if (!ioc->user_qos_params)
933 memcpy(ioc->params.qos, p->qos, sizeof(p->qos));
934 if (!ioc->user_cost_model)
935 memcpy(ioc->params.i_lcoefs, p->i_lcoefs, sizeof(p->i_lcoefs));
937 ioc_refresh_period_us(ioc);
938 ioc_refresh_lcoefs(ioc);
940 ioc->vrate_min = DIV64_U64_ROUND_UP((u64)ioc->params.qos[QOS_MIN] *
941 VTIME_PER_USEC, MILLION);
942 ioc->vrate_max = div64_u64((u64)ioc->params.qos[QOS_MAX] *
943 VTIME_PER_USEC, MILLION);
949 * When an iocg accumulates too much vtime or gets deactivated, we throw away
950 * some vtime, which lowers the overall device utilization. As the exact amount
951 * which is being thrown away is known, we can compensate by accelerating the
952 * vrate accordingly so that the extra vtime generated in the current period
953 * matches what got lost.
955 static void ioc_refresh_vrate(struct ioc *ioc, struct ioc_now *now)
957 s64 pleft = ioc->period_at + ioc->period_us - now->now;
958 s64 vperiod = ioc->period_us * ioc->vtime_base_rate;
959 s64 vcomp, vcomp_min, vcomp_max;
961 lockdep_assert_held(&ioc->lock);
963 /* we need some time left in this period */
968 * Calculate how much vrate should be adjusted to offset the error.
969 * Limit the amount of adjustment and deduct the adjusted amount from
972 vcomp = -div64_s64(ioc->vtime_err, pleft);
973 vcomp_min = -(ioc->vtime_base_rate >> 1);
974 vcomp_max = ioc->vtime_base_rate;
975 vcomp = clamp(vcomp, vcomp_min, vcomp_max);
977 ioc->vtime_err += vcomp * pleft;
979 atomic64_set(&ioc->vtime_rate, ioc->vtime_base_rate + vcomp);
981 /* bound how much error can accumulate */
982 ioc->vtime_err = clamp(ioc->vtime_err, -vperiod, vperiod);
985 /* take a snapshot of the current [v]time and vrate */
986 static void ioc_now(struct ioc *ioc, struct ioc_now *now)
990 now->now_ns = ktime_get();
991 now->now = ktime_to_us(now->now_ns);
992 now->vrate = atomic64_read(&ioc->vtime_rate);
995 * The current vtime is
997 * vtime at period start + (wallclock time since the start) * vrate
999 * As a consistent snapshot of `period_at_vtime` and `period_at` is
1000 * needed, they're seqcount protected.
1003 seq = read_seqcount_begin(&ioc->period_seqcount);
1004 now->vnow = ioc->period_at_vtime +
1005 (now->now - ioc->period_at) * now->vrate;
1006 } while (read_seqcount_retry(&ioc->period_seqcount, seq));
1009 static void ioc_start_period(struct ioc *ioc, struct ioc_now *now)
1011 WARN_ON_ONCE(ioc->running != IOC_RUNNING);
1013 write_seqcount_begin(&ioc->period_seqcount);
1014 ioc->period_at = now->now;
1015 ioc->period_at_vtime = now->vnow;
1016 write_seqcount_end(&ioc->period_seqcount);
1018 ioc->timer.expires = jiffies + usecs_to_jiffies(ioc->period_us);
1019 add_timer(&ioc->timer);
1023 * Update @iocg's `active` and `inuse` to @active and @inuse, update level
1024 * weight sums and propagate upwards accordingly. If @save, the current margin
1025 * is saved to be used as reference for later inuse in-period adjustments.
1027 static void __propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
1028 bool save, struct ioc_now *now)
1030 struct ioc *ioc = iocg->ioc;
1033 lockdep_assert_held(&ioc->lock);
1036 * For an active leaf node, its inuse shouldn't be zero or exceed
1037 * @active. An active internal node's inuse is solely determined by the
1038 * inuse to active ratio of its children regardless of @inuse.
1040 if (list_empty(&iocg->active_list) && iocg->child_active_sum) {
1041 inuse = DIV64_U64_ROUND_UP(active * iocg->child_inuse_sum,
1042 iocg->child_active_sum);
1044 inuse = clamp_t(u32, inuse, 1, active);
1047 iocg->last_inuse = iocg->inuse;
1049 iocg->saved_margin = now->vnow - atomic64_read(&iocg->vtime);
1051 if (active == iocg->active && inuse == iocg->inuse)
1054 for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
1055 struct ioc_gq *parent = iocg->ancestors[lvl];
1056 struct ioc_gq *child = iocg->ancestors[lvl + 1];
1057 u32 parent_active = 0, parent_inuse = 0;
1059 /* update the level sums */
1060 parent->child_active_sum += (s32)(active - child->active);
1061 parent->child_inuse_sum += (s32)(inuse - child->inuse);
1062 /* apply the updates */
1063 child->active = active;
1064 child->inuse = inuse;
1067 * The delta between inuse and active sums indicates that
1068 * that much of weight is being given away. Parent's inuse
1069 * and active should reflect the ratio.
1071 if (parent->child_active_sum) {
1072 parent_active = parent->weight;
1073 parent_inuse = DIV64_U64_ROUND_UP(
1074 parent_active * parent->child_inuse_sum,
1075 parent->child_active_sum);
1078 /* do we need to keep walking up? */
1079 if (parent_active == parent->active &&
1080 parent_inuse == parent->inuse)
1083 active = parent_active;
1084 inuse = parent_inuse;
1087 ioc->weights_updated = true;
1090 static void commit_weights(struct ioc *ioc)
1092 lockdep_assert_held(&ioc->lock);
1094 if (ioc->weights_updated) {
1095 /* paired with rmb in current_hweight(), see there */
1097 atomic_inc(&ioc->hweight_gen);
1098 ioc->weights_updated = false;
1102 static void propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
1103 bool save, struct ioc_now *now)
1105 __propagate_weights(iocg, active, inuse, save, now);
1106 commit_weights(iocg->ioc);
1109 static void current_hweight(struct ioc_gq *iocg, u32 *hw_activep, u32 *hw_inusep)
1111 struct ioc *ioc = iocg->ioc;
1116 /* hot path - if uptodate, use cached */
1117 ioc_gen = atomic_read(&ioc->hweight_gen);
1118 if (ioc_gen == iocg->hweight_gen)
1122 * Paired with wmb in commit_weights(). If we saw the updated
1123 * hweight_gen, all the weight updates from __propagate_weights() are
1126 * We can race with weight updates during calculation and get it
1127 * wrong. However, hweight_gen would have changed and a future
1128 * reader will recalculate and we're guaranteed to discard the
1129 * wrong result soon.
1133 hwa = hwi = WEIGHT_ONE;
1134 for (lvl = 0; lvl <= iocg->level - 1; lvl++) {
1135 struct ioc_gq *parent = iocg->ancestors[lvl];
1136 struct ioc_gq *child = iocg->ancestors[lvl + 1];
1137 u64 active_sum = READ_ONCE(parent->child_active_sum);
1138 u64 inuse_sum = READ_ONCE(parent->child_inuse_sum);
1139 u32 active = READ_ONCE(child->active);
1140 u32 inuse = READ_ONCE(child->inuse);
1142 /* we can race with deactivations and either may read as zero */
1143 if (!active_sum || !inuse_sum)
1146 active_sum = max_t(u64, active, active_sum);
1147 hwa = div64_u64((u64)hwa * active, active_sum);
1149 inuse_sum = max_t(u64, inuse, inuse_sum);
1150 hwi = div64_u64((u64)hwi * inuse, inuse_sum);
1153 iocg->hweight_active = max_t(u32, hwa, 1);
1154 iocg->hweight_inuse = max_t(u32, hwi, 1);
1155 iocg->hweight_gen = ioc_gen;
1158 *hw_activep = iocg->hweight_active;
1160 *hw_inusep = iocg->hweight_inuse;
1164 * Calculate the hweight_inuse @iocg would get with max @inuse assuming all the
1165 * other weights stay unchanged.
1167 static u32 current_hweight_max(struct ioc_gq *iocg)
1169 u32 hwm = WEIGHT_ONE;
1170 u32 inuse = iocg->active;
1171 u64 child_inuse_sum;
1174 lockdep_assert_held(&iocg->ioc->lock);
1176 for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
1177 struct ioc_gq *parent = iocg->ancestors[lvl];
1178 struct ioc_gq *child = iocg->ancestors[lvl + 1];
1180 child_inuse_sum = parent->child_inuse_sum + inuse - child->inuse;
1181 hwm = div64_u64((u64)hwm * inuse, child_inuse_sum);
1182 inuse = DIV64_U64_ROUND_UP(parent->active * child_inuse_sum,
1183 parent->child_active_sum);
1186 return max_t(u32, hwm, 1);
1189 static void weight_updated(struct ioc_gq *iocg, struct ioc_now *now)
1191 struct ioc *ioc = iocg->ioc;
1192 struct blkcg_gq *blkg = iocg_to_blkg(iocg);
1193 struct ioc_cgrp *iocc = blkcg_to_iocc(blkg->blkcg);
1196 lockdep_assert_held(&ioc->lock);
1198 weight = iocg->cfg_weight ?: iocc->dfl_weight;
1199 if (weight != iocg->weight && iocg->active)
1200 propagate_weights(iocg, weight, iocg->inuse, true, now);
1201 iocg->weight = weight;
1204 static bool iocg_activate(struct ioc_gq *iocg, struct ioc_now *now)
1206 struct ioc *ioc = iocg->ioc;
1207 u64 last_period, cur_period;
1212 * If seem to be already active, just update the stamp to tell the
1213 * timer that we're still active. We don't mind occassional races.
1215 if (!list_empty(&iocg->active_list)) {
1217 cur_period = atomic64_read(&ioc->cur_period);
1218 if (atomic64_read(&iocg->active_period) != cur_period)
1219 atomic64_set(&iocg->active_period, cur_period);
1223 /* racy check on internal node IOs, treat as root level IOs */
1224 if (iocg->child_active_sum)
1227 spin_lock_irq(&ioc->lock);
1232 cur_period = atomic64_read(&ioc->cur_period);
1233 last_period = atomic64_read(&iocg->active_period);
1234 atomic64_set(&iocg->active_period, cur_period);
1236 /* already activated or breaking leaf-only constraint? */
1237 if (!list_empty(&iocg->active_list))
1238 goto succeed_unlock;
1239 for (i = iocg->level - 1; i > 0; i--)
1240 if (!list_empty(&iocg->ancestors[i]->active_list))
1243 if (iocg->child_active_sum)
1247 * Always start with the target budget. On deactivation, we throw away
1248 * anything above it.
1250 vtarget = now->vnow - ioc->margins.target;
1251 vtime = atomic64_read(&iocg->vtime);
1253 atomic64_add(vtarget - vtime, &iocg->vtime);
1254 atomic64_add(vtarget - vtime, &iocg->done_vtime);
1258 * Activate, propagate weight and start period timer if not
1259 * running. Reset hweight_gen to avoid accidental match from
1262 iocg->hweight_gen = atomic_read(&ioc->hweight_gen) - 1;
1263 list_add(&iocg->active_list, &ioc->active_iocgs);
1265 propagate_weights(iocg, iocg->weight,
1266 iocg->last_inuse ?: iocg->weight, true, now);
1268 TRACE_IOCG_PATH(iocg_activate, iocg, now,
1269 last_period, cur_period, vtime);
1271 iocg->activated_at = now->now;
1273 if (ioc->running == IOC_IDLE) {
1274 ioc->running = IOC_RUNNING;
1275 ioc->dfgv_period_at = now->now;
1276 ioc->dfgv_period_rem = 0;
1277 ioc_start_period(ioc, now);
1281 spin_unlock_irq(&ioc->lock);
1285 spin_unlock_irq(&ioc->lock);
1289 static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now)
1291 struct ioc *ioc = iocg->ioc;
1292 struct blkcg_gq *blkg = iocg_to_blkg(iocg);
1293 u64 tdelta, delay, new_delay, shift;
1294 s64 vover, vover_pct;
1297 lockdep_assert_held(&iocg->waitq.lock);
1300 * If the delay is set by another CPU, we may be in the past. No need to
1301 * change anything if so. This avoids decay calculation underflow.
1303 if (time_before64(now->now, iocg->delay_at))
1306 /* calculate the current delay in effect - 1/2 every second */
1307 tdelta = now->now - iocg->delay_at;
1308 shift = div64_u64(tdelta, USEC_PER_SEC);
1309 if (iocg->delay && shift < BITS_PER_LONG)
1310 delay = iocg->delay >> shift;
1314 /* calculate the new delay from the debt amount */
1315 current_hweight(iocg, &hwa, NULL);
1316 vover = atomic64_read(&iocg->vtime) +
1317 abs_cost_to_cost(iocg->abs_vdebt, hwa) - now->vnow;
1318 vover_pct = div64_s64(100 * vover,
1319 ioc->period_us * ioc->vtime_base_rate);
1321 if (vover_pct <= MIN_DELAY_THR_PCT)
1323 else if (vover_pct >= MAX_DELAY_THR_PCT)
1324 new_delay = MAX_DELAY;
1326 new_delay = MIN_DELAY +
1327 div_u64((MAX_DELAY - MIN_DELAY) *
1328 (vover_pct - MIN_DELAY_THR_PCT),
1329 MAX_DELAY_THR_PCT - MIN_DELAY_THR_PCT);
1331 /* pick the higher one and apply */
1332 if (new_delay > delay) {
1333 iocg->delay = new_delay;
1334 iocg->delay_at = now->now;
1338 if (delay >= MIN_DELAY) {
1339 if (!iocg->indelay_since)
1340 iocg->indelay_since = now->now;
1341 blkcg_set_delay(blkg, delay * NSEC_PER_USEC);
1344 if (iocg->indelay_since) {
1345 iocg->local_stat.indelay_us += now->now - iocg->indelay_since;
1346 iocg->indelay_since = 0;
1349 blkcg_clear_delay(blkg);
1354 static void iocg_incur_debt(struct ioc_gq *iocg, u64 abs_cost,
1355 struct ioc_now *now)
1357 struct iocg_pcpu_stat *gcs;
1359 lockdep_assert_held(&iocg->ioc->lock);
1360 lockdep_assert_held(&iocg->waitq.lock);
1361 WARN_ON_ONCE(list_empty(&iocg->active_list));
1364 * Once in debt, debt handling owns inuse. @iocg stays at the minimum
1365 * inuse donating all of it share to others until its debt is paid off.
1367 if (!iocg->abs_vdebt && abs_cost) {
1368 iocg->indebt_since = now->now;
1369 propagate_weights(iocg, iocg->active, 0, false, now);
1372 iocg->abs_vdebt += abs_cost;
1374 gcs = get_cpu_ptr(iocg->pcpu_stat);
1375 local64_add(abs_cost, &gcs->abs_vusage);
1379 static void iocg_pay_debt(struct ioc_gq *iocg, u64 abs_vpay,
1380 struct ioc_now *now)
1382 lockdep_assert_held(&iocg->ioc->lock);
1383 lockdep_assert_held(&iocg->waitq.lock);
1385 /* make sure that nobody messed with @iocg */
1386 WARN_ON_ONCE(list_empty(&iocg->active_list));
1387 WARN_ON_ONCE(iocg->inuse > 1);
1389 iocg->abs_vdebt -= min(abs_vpay, iocg->abs_vdebt);
1391 /* if debt is paid in full, restore inuse */
1392 if (!iocg->abs_vdebt) {
1393 iocg->local_stat.indebt_us += now->now - iocg->indebt_since;
1394 iocg->indebt_since = 0;
1396 propagate_weights(iocg, iocg->active, iocg->last_inuse,
1401 static int iocg_wake_fn(struct wait_queue_entry *wq_entry, unsigned mode,
1402 int flags, void *key)
1404 struct iocg_wait *wait = container_of(wq_entry, struct iocg_wait, wait);
1405 struct iocg_wake_ctx *ctx = (struct iocg_wake_ctx *)key;
1406 u64 cost = abs_cost_to_cost(wait->abs_cost, ctx->hw_inuse);
1408 ctx->vbudget -= cost;
1410 if (ctx->vbudget < 0)
1413 iocg_commit_bio(ctx->iocg, wait->bio, wait->abs_cost, cost);
1414 wait->committed = true;
1417 * autoremove_wake_function() removes the wait entry only when it
1418 * actually changed the task state. We want the wait always removed.
1419 * Remove explicitly and use default_wake_function(). Note that the
1420 * order of operations is important as finish_wait() tests whether
1421 * @wq_entry is removed without grabbing the lock.
1423 default_wake_function(wq_entry, mode, flags, key);
1424 list_del_init_careful(&wq_entry->entry);
1429 * Calculate the accumulated budget, pay debt if @pay_debt and wake up waiters
1430 * accordingly. When @pay_debt is %true, the caller must be holding ioc->lock in
1431 * addition to iocg->waitq.lock.
1433 static void iocg_kick_waitq(struct ioc_gq *iocg, bool pay_debt,
1434 struct ioc_now *now)
1436 struct ioc *ioc = iocg->ioc;
1437 struct iocg_wake_ctx ctx = { .iocg = iocg };
1438 u64 vshortage, expires, oexpires;
1442 lockdep_assert_held(&iocg->waitq.lock);
1444 current_hweight(iocg, &hwa, NULL);
1445 vbudget = now->vnow - atomic64_read(&iocg->vtime);
1448 if (pay_debt && iocg->abs_vdebt && vbudget > 0) {
1449 u64 abs_vbudget = cost_to_abs_cost(vbudget, hwa);
1450 u64 abs_vpay = min_t(u64, abs_vbudget, iocg->abs_vdebt);
1451 u64 vpay = abs_cost_to_cost(abs_vpay, hwa);
1453 lockdep_assert_held(&ioc->lock);
1455 atomic64_add(vpay, &iocg->vtime);
1456 atomic64_add(vpay, &iocg->done_vtime);
1457 iocg_pay_debt(iocg, abs_vpay, now);
1461 if (iocg->abs_vdebt || iocg->delay)
1462 iocg_kick_delay(iocg, now);
1465 * Debt can still be outstanding if we haven't paid all yet or the
1466 * caller raced and called without @pay_debt. Shouldn't wake up waiters
1467 * under debt. Make sure @vbudget reflects the outstanding amount and is
1470 if (iocg->abs_vdebt) {
1471 s64 vdebt = abs_cost_to_cost(iocg->abs_vdebt, hwa);
1472 vbudget = min_t(s64, 0, vbudget - vdebt);
1476 * Wake up the ones which are due and see how much vtime we'll need for
1477 * the next one. As paying off debt restores hw_inuse, it must be read
1478 * after the above debt payment.
1480 ctx.vbudget = vbudget;
1481 current_hweight(iocg, NULL, &ctx.hw_inuse);
1483 __wake_up_locked_key(&iocg->waitq, TASK_NORMAL, &ctx);
1485 if (!waitqueue_active(&iocg->waitq)) {
1486 if (iocg->wait_since) {
1487 iocg->local_stat.wait_us += now->now - iocg->wait_since;
1488 iocg->wait_since = 0;
1493 if (!iocg->wait_since)
1494 iocg->wait_since = now->now;
1496 if (WARN_ON_ONCE(ctx.vbudget >= 0))
1499 /* determine next wakeup, add a timer margin to guarantee chunking */
1500 vshortage = -ctx.vbudget;
1501 expires = now->now_ns +
1502 DIV64_U64_ROUND_UP(vshortage, ioc->vtime_base_rate) *
1504 expires += ioc->timer_slack_ns;
1506 /* if already active and close enough, don't bother */
1507 oexpires = ktime_to_ns(hrtimer_get_softexpires(&iocg->waitq_timer));
1508 if (hrtimer_is_queued(&iocg->waitq_timer) &&
1509 abs(oexpires - expires) <= ioc->timer_slack_ns)
1512 hrtimer_start_range_ns(&iocg->waitq_timer, ns_to_ktime(expires),
1513 ioc->timer_slack_ns, HRTIMER_MODE_ABS);
1516 static enum hrtimer_restart iocg_waitq_timer_fn(struct hrtimer *timer)
1518 struct ioc_gq *iocg = container_of(timer, struct ioc_gq, waitq_timer);
1519 bool pay_debt = READ_ONCE(iocg->abs_vdebt);
1521 unsigned long flags;
1523 ioc_now(iocg->ioc, &now);
1525 iocg_lock(iocg, pay_debt, &flags);
1526 iocg_kick_waitq(iocg, pay_debt, &now);
1527 iocg_unlock(iocg, pay_debt, &flags);
1529 return HRTIMER_NORESTART;
1532 static void ioc_lat_stat(struct ioc *ioc, u32 *missed_ppm_ar, u32 *rq_wait_pct_p)
1534 u32 nr_met[2] = { };
1535 u32 nr_missed[2] = { };
1539 for_each_online_cpu(cpu) {
1540 struct ioc_pcpu_stat *stat = per_cpu_ptr(ioc->pcpu_stat, cpu);
1541 u64 this_rq_wait_ns;
1543 for (rw = READ; rw <= WRITE; rw++) {
1544 u32 this_met = local_read(&stat->missed[rw].nr_met);
1545 u32 this_missed = local_read(&stat->missed[rw].nr_missed);
1547 nr_met[rw] += this_met - stat->missed[rw].last_met;
1548 nr_missed[rw] += this_missed - stat->missed[rw].last_missed;
1549 stat->missed[rw].last_met = this_met;
1550 stat->missed[rw].last_missed = this_missed;
1553 this_rq_wait_ns = local64_read(&stat->rq_wait_ns);
1554 rq_wait_ns += this_rq_wait_ns - stat->last_rq_wait_ns;
1555 stat->last_rq_wait_ns = this_rq_wait_ns;
1558 for (rw = READ; rw <= WRITE; rw++) {
1559 if (nr_met[rw] + nr_missed[rw])
1561 DIV64_U64_ROUND_UP((u64)nr_missed[rw] * MILLION,
1562 nr_met[rw] + nr_missed[rw]);
1564 missed_ppm_ar[rw] = 0;
1567 *rq_wait_pct_p = div64_u64(rq_wait_ns * 100,
1568 ioc->period_us * NSEC_PER_USEC);
1571 /* was iocg idle this period? */
1572 static bool iocg_is_idle(struct ioc_gq *iocg)
1574 struct ioc *ioc = iocg->ioc;
1576 /* did something get issued this period? */
1577 if (atomic64_read(&iocg->active_period) ==
1578 atomic64_read(&ioc->cur_period))
1581 /* is something in flight? */
1582 if (atomic64_read(&iocg->done_vtime) != atomic64_read(&iocg->vtime))
1589 * Call this function on the target leaf @iocg's to build pre-order traversal
1590 * list of all the ancestors in @inner_walk. The inner nodes are linked through
1591 * ->walk_list and the caller is responsible for dissolving the list after use.
1593 static void iocg_build_inner_walk(struct ioc_gq *iocg,
1594 struct list_head *inner_walk)
1598 WARN_ON_ONCE(!list_empty(&iocg->walk_list));
1600 /* find the first ancestor which hasn't been visited yet */
1601 for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
1602 if (!list_empty(&iocg->ancestors[lvl]->walk_list))
1606 /* walk down and visit the inner nodes to get pre-order traversal */
1607 while (++lvl <= iocg->level - 1) {
1608 struct ioc_gq *inner = iocg->ancestors[lvl];
1610 /* record traversal order */
1611 list_add_tail(&inner->walk_list, inner_walk);
1615 /* collect per-cpu counters and propagate the deltas to the parent */
1616 static void iocg_flush_stat_one(struct ioc_gq *iocg, struct ioc_now *now)
1618 struct ioc *ioc = iocg->ioc;
1619 struct iocg_stat new_stat;
1624 lockdep_assert_held(&iocg->ioc->lock);
1626 /* collect per-cpu counters */
1627 for_each_possible_cpu(cpu) {
1628 abs_vusage += local64_read(
1629 per_cpu_ptr(&iocg->pcpu_stat->abs_vusage, cpu));
1631 vusage_delta = abs_vusage - iocg->last_stat_abs_vusage;
1632 iocg->last_stat_abs_vusage = abs_vusage;
1634 iocg->usage_delta_us = div64_u64(vusage_delta, ioc->vtime_base_rate);
1635 iocg->local_stat.usage_us += iocg->usage_delta_us;
1637 /* propagate upwards */
1639 iocg->local_stat.usage_us + iocg->desc_stat.usage_us;
1641 iocg->local_stat.wait_us + iocg->desc_stat.wait_us;
1642 new_stat.indebt_us =
1643 iocg->local_stat.indebt_us + iocg->desc_stat.indebt_us;
1644 new_stat.indelay_us =
1645 iocg->local_stat.indelay_us + iocg->desc_stat.indelay_us;
1647 /* propagate the deltas to the parent */
1648 if (iocg->level > 0) {
1649 struct iocg_stat *parent_stat =
1650 &iocg->ancestors[iocg->level - 1]->desc_stat;
1652 parent_stat->usage_us +=
1653 new_stat.usage_us - iocg->last_stat.usage_us;
1654 parent_stat->wait_us +=
1655 new_stat.wait_us - iocg->last_stat.wait_us;
1656 parent_stat->indebt_us +=
1657 new_stat.indebt_us - iocg->last_stat.indebt_us;
1658 parent_stat->indelay_us +=
1659 new_stat.indelay_us - iocg->last_stat.indelay_us;
1662 iocg->last_stat = new_stat;
1665 /* get stat counters ready for reading on all active iocgs */
1666 static void iocg_flush_stat(struct list_head *target_iocgs, struct ioc_now *now)
1668 LIST_HEAD(inner_walk);
1669 struct ioc_gq *iocg, *tiocg;
1671 /* flush leaves and build inner node walk list */
1672 list_for_each_entry(iocg, target_iocgs, active_list) {
1673 iocg_flush_stat_one(iocg, now);
1674 iocg_build_inner_walk(iocg, &inner_walk);
1677 /* keep flushing upwards by walking the inner list backwards */
1678 list_for_each_entry_safe_reverse(iocg, tiocg, &inner_walk, walk_list) {
1679 iocg_flush_stat_one(iocg, now);
1680 list_del_init(&iocg->walk_list);
1685 * Determine what @iocg's hweight_inuse should be after donating unused
1686 * capacity. @hwm is the upper bound and used to signal no donation. This
1687 * function also throws away @iocg's excess budget.
1689 static u32 hweight_after_donation(struct ioc_gq *iocg, u32 old_hwi, u32 hwm,
1690 u32 usage, struct ioc_now *now)
1692 struct ioc *ioc = iocg->ioc;
1693 u64 vtime = atomic64_read(&iocg->vtime);
1694 s64 excess, delta, target, new_hwi;
1696 /* debt handling owns inuse for debtors */
1697 if (iocg->abs_vdebt)
1700 /* see whether minimum margin requirement is met */
1701 if (waitqueue_active(&iocg->waitq) ||
1702 time_after64(vtime, now->vnow - ioc->margins.min))
1705 /* throw away excess above target */
1706 excess = now->vnow - vtime - ioc->margins.target;
1708 atomic64_add(excess, &iocg->vtime);
1709 atomic64_add(excess, &iocg->done_vtime);
1711 ioc->vtime_err -= div64_u64(excess * old_hwi, WEIGHT_ONE);
1715 * Let's say the distance between iocg's and device's vtimes as a
1716 * fraction of period duration is delta. Assuming that the iocg will
1717 * consume the usage determined above, we want to determine new_hwi so
1718 * that delta equals MARGIN_TARGET at the end of the next period.
1720 * We need to execute usage worth of IOs while spending the sum of the
1721 * new budget (1 - MARGIN_TARGET) and the leftover from the last period
1724 * usage = (1 - MARGIN_TARGET + delta) * new_hwi
1726 * Therefore, the new_hwi is:
1728 * new_hwi = usage / (1 - MARGIN_TARGET + delta)
1730 delta = div64_s64(WEIGHT_ONE * (now->vnow - vtime),
1731 now->vnow - ioc->period_at_vtime);
1732 target = WEIGHT_ONE * MARGIN_TARGET_PCT / 100;
1733 new_hwi = div64_s64(WEIGHT_ONE * usage, WEIGHT_ONE - target + delta);
1735 return clamp_t(s64, new_hwi, 1, hwm);
1739 * For work-conservation, an iocg which isn't using all of its share should
1740 * donate the leftover to other iocgs. There are two ways to achieve this - 1.
1741 * bumping up vrate accordingly 2. lowering the donating iocg's inuse weight.
1743 * #1 is mathematically simpler but has the drawback of requiring synchronous
1744 * global hweight_inuse updates when idle iocg's get activated or inuse weights
1745 * change due to donation snapbacks as it has the possibility of grossly
1746 * overshooting what's allowed by the model and vrate.
1748 * #2 is inherently safe with local operations. The donating iocg can easily
1749 * snap back to higher weights when needed without worrying about impacts on
1750 * other nodes as the impacts will be inherently correct. This also makes idle
1751 * iocg activations safe. The only effect activations have is decreasing
1752 * hweight_inuse of others, the right solution to which is for those iocgs to
1753 * snap back to higher weights.
1755 * So, we go with #2. The challenge is calculating how each donating iocg's
1756 * inuse should be adjusted to achieve the target donation amounts. This is done
1757 * using Andy's method described in the following pdf.
1759 * https://drive.google.com/file/d/1PsJwxPFtjUnwOY1QJ5AeICCcsL7BM3bo
1761 * Given the weights and target after-donation hweight_inuse values, Andy's
1762 * method determines how the proportional distribution should look like at each
1763 * sibling level to maintain the relative relationship between all non-donating
1764 * pairs. To roughly summarize, it divides the tree into donating and
1765 * non-donating parts, calculates global donation rate which is used to
1766 * determine the target hweight_inuse for each node, and then derives per-level
1769 * The following pdf shows that global distribution calculated this way can be
1770 * achieved by scaling inuse weights of donating leaves and propagating the
1771 * adjustments upwards proportionally.
1773 * https://drive.google.com/file/d/1vONz1-fzVO7oY5DXXsLjSxEtYYQbOvsE
1775 * Combining the above two, we can determine how each leaf iocg's inuse should
1776 * be adjusted to achieve the target donation.
1778 * https://drive.google.com/file/d/1WcrltBOSPN0qXVdBgnKm4mdp9FhuEFQN
1780 * The inline comments use symbols from the last pdf.
1782 * b is the sum of the absolute budgets in the subtree. 1 for the root node.
1783 * f is the sum of the absolute budgets of non-donating nodes in the subtree.
1784 * t is the sum of the absolute budgets of donating nodes in the subtree.
1785 * w is the weight of the node. w = w_f + w_t
1786 * w_f is the non-donating portion of w. w_f = w * f / b
1787 * w_b is the donating portion of w. w_t = w * t / b
1788 * s is the sum of all sibling weights. s = Sum(w) for siblings
1789 * s_f and s_t are the non-donating and donating portions of s.
1791 * Subscript p denotes the parent's counterpart and ' the adjusted value - e.g.
1792 * w_pt is the donating portion of the parent's weight and w'_pt the same value
1793 * after adjustments. Subscript r denotes the root node's values.
1795 static void transfer_surpluses(struct list_head *surpluses, struct ioc_now *now)
1797 LIST_HEAD(over_hwa);
1798 LIST_HEAD(inner_walk);
1799 struct ioc_gq *iocg, *tiocg, *root_iocg;
1800 u32 after_sum, over_sum, over_target, gamma;
1803 * It's pretty unlikely but possible for the total sum of
1804 * hweight_after_donation's to be higher than WEIGHT_ONE, which will
1805 * confuse the following calculations. If such condition is detected,
1806 * scale down everyone over its full share equally to keep the sum below
1811 list_for_each_entry(iocg, surpluses, surplus_list) {
1814 current_hweight(iocg, &hwa, NULL);
1815 after_sum += iocg->hweight_after_donation;
1817 if (iocg->hweight_after_donation > hwa) {
1818 over_sum += iocg->hweight_after_donation;
1819 list_add(&iocg->walk_list, &over_hwa);
1823 if (after_sum >= WEIGHT_ONE) {
1825 * The delta should be deducted from the over_sum, calculate
1826 * target over_sum value.
1828 u32 over_delta = after_sum - (WEIGHT_ONE - 1);
1829 WARN_ON_ONCE(over_sum <= over_delta);
1830 over_target = over_sum - over_delta;
1835 list_for_each_entry_safe(iocg, tiocg, &over_hwa, walk_list) {
1837 iocg->hweight_after_donation =
1838 div_u64((u64)iocg->hweight_after_donation *
1839 over_target, over_sum);
1840 list_del_init(&iocg->walk_list);
1844 * Build pre-order inner node walk list and prepare for donation
1845 * adjustment calculations.
1847 list_for_each_entry(iocg, surpluses, surplus_list) {
1848 iocg_build_inner_walk(iocg, &inner_walk);
1851 root_iocg = list_first_entry(&inner_walk, struct ioc_gq, walk_list);
1852 WARN_ON_ONCE(root_iocg->level > 0);
1854 list_for_each_entry(iocg, &inner_walk, walk_list) {
1855 iocg->child_adjusted_sum = 0;
1856 iocg->hweight_donating = 0;
1857 iocg->hweight_after_donation = 0;
1861 * Propagate the donating budget (b_t) and after donation budget (b'_t)
1864 list_for_each_entry(iocg, surpluses, surplus_list) {
1865 struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1867 parent->hweight_donating += iocg->hweight_donating;
1868 parent->hweight_after_donation += iocg->hweight_after_donation;
1871 list_for_each_entry_reverse(iocg, &inner_walk, walk_list) {
1872 if (iocg->level > 0) {
1873 struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1875 parent->hweight_donating += iocg->hweight_donating;
1876 parent->hweight_after_donation += iocg->hweight_after_donation;
1881 * Calculate inner hwa's (b) and make sure the donation values are
1882 * within the accepted ranges as we're doing low res calculations with
1885 list_for_each_entry(iocg, &inner_walk, walk_list) {
1887 struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1889 iocg->hweight_active = DIV64_U64_ROUND_UP(
1890 (u64)parent->hweight_active * iocg->active,
1891 parent->child_active_sum);
1895 iocg->hweight_donating = min(iocg->hweight_donating,
1896 iocg->hweight_active);
1897 iocg->hweight_after_donation = min(iocg->hweight_after_donation,
1898 iocg->hweight_donating - 1);
1899 if (WARN_ON_ONCE(iocg->hweight_active <= 1 ||
1900 iocg->hweight_donating <= 1 ||
1901 iocg->hweight_after_donation == 0)) {
1902 pr_warn("iocg: invalid donation weights in ");
1903 pr_cont_cgroup_path(iocg_to_blkg(iocg)->blkcg->css.cgroup);
1904 pr_cont(": active=%u donating=%u after=%u\n",
1905 iocg->hweight_active, iocg->hweight_donating,
1906 iocg->hweight_after_donation);
1911 * Calculate the global donation rate (gamma) - the rate to adjust
1912 * non-donating budgets by.
1914 * No need to use 64bit multiplication here as the first operand is
1915 * guaranteed to be smaller than WEIGHT_ONE (1<<16).
1917 * We know that there are beneficiary nodes and the sum of the donating
1918 * hweights can't be whole; however, due to the round-ups during hweight
1919 * calculations, root_iocg->hweight_donating might still end up equal to
1920 * or greater than whole. Limit the range when calculating the divider.
1922 * gamma = (1 - t_r') / (1 - t_r)
1924 gamma = DIV_ROUND_UP(
1925 (WEIGHT_ONE - root_iocg->hweight_after_donation) * WEIGHT_ONE,
1926 WEIGHT_ONE - min_t(u32, root_iocg->hweight_donating, WEIGHT_ONE - 1));
1929 * Calculate adjusted hwi, child_adjusted_sum and inuse for the inner
1932 list_for_each_entry(iocg, &inner_walk, walk_list) {
1933 struct ioc_gq *parent;
1934 u32 inuse, wpt, wptp;
1937 if (iocg->level == 0) {
1938 /* adjusted weight sum for 1st level: s' = s * b_pf / b'_pf */
1939 iocg->child_adjusted_sum = DIV64_U64_ROUND_UP(
1940 iocg->child_active_sum * (WEIGHT_ONE - iocg->hweight_donating),
1941 WEIGHT_ONE - iocg->hweight_after_donation);
1945 parent = iocg->ancestors[iocg->level - 1];
1947 /* b' = gamma * b_f + b_t' */
1948 iocg->hweight_inuse = DIV64_U64_ROUND_UP(
1949 (u64)gamma * (iocg->hweight_active - iocg->hweight_donating),
1950 WEIGHT_ONE) + iocg->hweight_after_donation;
1952 /* w' = s' * b' / b'_p */
1953 inuse = DIV64_U64_ROUND_UP(
1954 (u64)parent->child_adjusted_sum * iocg->hweight_inuse,
1955 parent->hweight_inuse);
1957 /* adjusted weight sum for children: s' = s_f + s_t * w'_pt / w_pt */
1958 st = DIV64_U64_ROUND_UP(
1959 iocg->child_active_sum * iocg->hweight_donating,
1960 iocg->hweight_active);
1961 sf = iocg->child_active_sum - st;
1962 wpt = DIV64_U64_ROUND_UP(
1963 (u64)iocg->active * iocg->hweight_donating,
1964 iocg->hweight_active);
1965 wptp = DIV64_U64_ROUND_UP(
1966 (u64)inuse * iocg->hweight_after_donation,
1967 iocg->hweight_inuse);
1969 iocg->child_adjusted_sum = sf + DIV64_U64_ROUND_UP(st * wptp, wpt);
1973 * All inner nodes now have ->hweight_inuse and ->child_adjusted_sum and
1974 * we can finally determine leaf adjustments.
1976 list_for_each_entry(iocg, surpluses, surplus_list) {
1977 struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1981 * In-debt iocgs participated in the donation calculation with
1982 * the minimum target hweight_inuse. Configuring inuse
1983 * accordingly would work fine but debt handling expects
1984 * @iocg->inuse stay at the minimum and we don't wanna
1987 if (iocg->abs_vdebt) {
1988 WARN_ON_ONCE(iocg->inuse > 1);
1992 /* w' = s' * b' / b'_p, note that b' == b'_t for donating leaves */
1993 inuse = DIV64_U64_ROUND_UP(
1994 parent->child_adjusted_sum * iocg->hweight_after_donation,
1995 parent->hweight_inuse);
1997 TRACE_IOCG_PATH(inuse_transfer, iocg, now,
1999 iocg->hweight_inuse,
2000 iocg->hweight_after_donation);
2002 __propagate_weights(iocg, iocg->active, inuse, true, now);
2005 /* walk list should be dissolved after use */
2006 list_for_each_entry_safe(iocg, tiocg, &inner_walk, walk_list)
2007 list_del_init(&iocg->walk_list);
2011 * A low weight iocg can amass a large amount of debt, for example, when
2012 * anonymous memory gets reclaimed aggressively. If the system has a lot of
2013 * memory paired with a slow IO device, the debt can span multiple seconds or
2014 * more. If there are no other subsequent IO issuers, the in-debt iocg may end
2015 * up blocked paying its debt while the IO device is idle.
2017 * The following protects against such cases. If the device has been
2018 * sufficiently idle for a while, the debts are halved and delays are
2021 static void ioc_forgive_debts(struct ioc *ioc, u64 usage_us_sum, int nr_debtors,
2022 struct ioc_now *now)
2024 struct ioc_gq *iocg;
2025 u64 dur, usage_pct, nr_cycles;
2027 /* if no debtor, reset the cycle */
2029 ioc->dfgv_period_at = now->now;
2030 ioc->dfgv_period_rem = 0;
2031 ioc->dfgv_usage_us_sum = 0;
2036 * Debtors can pass through a lot of writes choking the device and we
2037 * don't want to be forgiving debts while the device is struggling from
2038 * write bursts. If we're missing latency targets, consider the device
2041 if (ioc->busy_level > 0)
2042 usage_us_sum = max_t(u64, usage_us_sum, ioc->period_us);
2044 ioc->dfgv_usage_us_sum += usage_us_sum;
2045 if (time_before64(now->now, ioc->dfgv_period_at + DFGV_PERIOD))
2049 * At least DFGV_PERIOD has passed since the last period. Calculate the
2050 * average usage and reset the period counters.
2052 dur = now->now - ioc->dfgv_period_at;
2053 usage_pct = div64_u64(100 * ioc->dfgv_usage_us_sum, dur);
2055 ioc->dfgv_period_at = now->now;
2056 ioc->dfgv_usage_us_sum = 0;
2058 /* if was too busy, reset everything */
2059 if (usage_pct > DFGV_USAGE_PCT) {
2060 ioc->dfgv_period_rem = 0;
2065 * Usage is lower than threshold. Let's forgive some debts. Debt
2066 * forgiveness runs off of the usual ioc timer but its period usually
2067 * doesn't match ioc's. Compensate the difference by performing the
2068 * reduction as many times as would fit in the duration since the last
2069 * run and carrying over the left-over duration in @ioc->dfgv_period_rem
2070 * - if ioc period is 75% of DFGV_PERIOD, one out of three consecutive
2071 * reductions is doubled.
2073 nr_cycles = dur + ioc->dfgv_period_rem;
2074 ioc->dfgv_period_rem = do_div(nr_cycles, DFGV_PERIOD);
2076 list_for_each_entry(iocg, &ioc->active_iocgs, active_list) {
2077 u64 __maybe_unused old_debt, __maybe_unused old_delay;
2079 if (!iocg->abs_vdebt && !iocg->delay)
2082 spin_lock(&iocg->waitq.lock);
2084 old_debt = iocg->abs_vdebt;
2085 old_delay = iocg->delay;
2087 if (iocg->abs_vdebt)
2088 iocg->abs_vdebt = iocg->abs_vdebt >> nr_cycles ?: 1;
2090 iocg->delay = iocg->delay >> nr_cycles ?: 1;
2092 iocg_kick_waitq(iocg, true, now);
2094 TRACE_IOCG_PATH(iocg_forgive_debt, iocg, now, usage_pct,
2095 old_debt, iocg->abs_vdebt,
2096 old_delay, iocg->delay);
2098 spin_unlock(&iocg->waitq.lock);
2102 static void ioc_timer_fn(struct timer_list *timer)
2104 struct ioc *ioc = container_of(timer, struct ioc, timer);
2105 struct ioc_gq *iocg, *tiocg;
2107 LIST_HEAD(surpluses);
2108 int nr_debtors = 0, nr_shortages = 0, nr_lagging = 0;
2109 u64 usage_us_sum = 0;
2110 u32 ppm_rthr = MILLION - ioc->params.qos[QOS_RPPM];
2111 u32 ppm_wthr = MILLION - ioc->params.qos[QOS_WPPM];
2112 u32 missed_ppm[2], rq_wait_pct;
2114 int prev_busy_level;
2116 /* how were the latencies during the period? */
2117 ioc_lat_stat(ioc, missed_ppm, &rq_wait_pct);
2119 /* take care of active iocgs */
2120 spin_lock_irq(&ioc->lock);
2124 period_vtime = now.vnow - ioc->period_at_vtime;
2125 if (WARN_ON_ONCE(!period_vtime)) {
2126 spin_unlock_irq(&ioc->lock);
2131 * Waiters determine the sleep durations based on the vrate they
2132 * saw at the time of sleep. If vrate has increased, some waiters
2133 * could be sleeping for too long. Wake up tardy waiters which
2134 * should have woken up in the last period and expire idle iocgs.
2136 list_for_each_entry_safe(iocg, tiocg, &ioc->active_iocgs, active_list) {
2137 if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt &&
2138 !iocg->delay && !iocg_is_idle(iocg))
2141 spin_lock(&iocg->waitq.lock);
2143 /* flush wait and indebt stat deltas */
2144 if (iocg->wait_since) {
2145 iocg->local_stat.wait_us += now.now - iocg->wait_since;
2146 iocg->wait_since = now.now;
2148 if (iocg->indebt_since) {
2149 iocg->local_stat.indebt_us +=
2150 now.now - iocg->indebt_since;
2151 iocg->indebt_since = now.now;
2153 if (iocg->indelay_since) {
2154 iocg->local_stat.indelay_us +=
2155 now.now - iocg->indelay_since;
2156 iocg->indelay_since = now.now;
2159 if (waitqueue_active(&iocg->waitq) || iocg->abs_vdebt ||
2161 /* might be oversleeping vtime / hweight changes, kick */
2162 iocg_kick_waitq(iocg, true, &now);
2163 if (iocg->abs_vdebt || iocg->delay)
2165 } else if (iocg_is_idle(iocg)) {
2166 /* no waiter and idle, deactivate */
2167 u64 vtime = atomic64_read(&iocg->vtime);
2171 * @iocg has been inactive for a full duration and will
2172 * have a high budget. Account anything above target as
2173 * error and throw away. On reactivation, it'll start
2174 * with the target budget.
2176 excess = now.vnow - vtime - ioc->margins.target;
2180 current_hweight(iocg, NULL, &old_hwi);
2181 ioc->vtime_err -= div64_u64(excess * old_hwi,
2185 __propagate_weights(iocg, 0, 0, false, &now);
2186 list_del_init(&iocg->active_list);
2189 spin_unlock(&iocg->waitq.lock);
2191 commit_weights(ioc);
2194 * Wait and indebt stat are flushed above and the donation calculation
2195 * below needs updated usage stat. Let's bring stat up-to-date.
2197 iocg_flush_stat(&ioc->active_iocgs, &now);
2199 /* calc usage and see whether some weights need to be moved around */
2200 list_for_each_entry(iocg, &ioc->active_iocgs, active_list) {
2201 u64 vdone, vtime, usage_us, usage_dur;
2202 u32 usage, hw_active, hw_inuse;
2205 * Collect unused and wind vtime closer to vnow to prevent
2206 * iocgs from accumulating a large amount of budget.
2208 vdone = atomic64_read(&iocg->done_vtime);
2209 vtime = atomic64_read(&iocg->vtime);
2210 current_hweight(iocg, &hw_active, &hw_inuse);
2213 * Latency QoS detection doesn't account for IOs which are
2214 * in-flight for longer than a period. Detect them by
2215 * comparing vdone against period start. If lagging behind
2216 * IOs from past periods, don't increase vrate.
2218 if ((ppm_rthr != MILLION || ppm_wthr != MILLION) &&
2219 !atomic_read(&iocg_to_blkg(iocg)->use_delay) &&
2220 time_after64(vtime, vdone) &&
2221 time_after64(vtime, now.vnow -
2222 MAX_LAGGING_PERIODS * period_vtime) &&
2223 time_before64(vdone, now.vnow - period_vtime))
2227 * Determine absolute usage factoring in in-flight IOs to avoid
2228 * high-latency completions appearing as idle.
2230 usage_us = iocg->usage_delta_us;
2231 usage_us_sum += usage_us;
2233 if (vdone != vtime) {
2234 u64 inflight_us = DIV64_U64_ROUND_UP(
2235 cost_to_abs_cost(vtime - vdone, hw_inuse),
2236 ioc->vtime_base_rate);
2237 usage_us = max(usage_us, inflight_us);
2240 /* convert to hweight based usage ratio */
2241 if (time_after64(iocg->activated_at, ioc->period_at))
2242 usage_dur = max_t(u64, now.now - iocg->activated_at, 1);
2244 usage_dur = max_t(u64, now.now - ioc->period_at, 1);
2246 usage = clamp_t(u32,
2247 DIV64_U64_ROUND_UP(usage_us * WEIGHT_ONE,
2251 /* see whether there's surplus vtime */
2252 WARN_ON_ONCE(!list_empty(&iocg->surplus_list));
2253 if (hw_inuse < hw_active ||
2254 (!waitqueue_active(&iocg->waitq) &&
2255 time_before64(vtime, now.vnow - ioc->margins.low))) {
2256 u32 hwa, old_hwi, hwm, new_hwi;
2259 * Already donating or accumulated enough to start.
2260 * Determine the donation amount.
2262 current_hweight(iocg, &hwa, &old_hwi);
2263 hwm = current_hweight_max(iocg);
2264 new_hwi = hweight_after_donation(iocg, old_hwi, hwm,
2267 * Donation calculation assumes hweight_after_donation
2268 * to be positive, a condition that a donor w/ hwa < 2
2269 * can't meet. Don't bother with donation if hwa is
2270 * below 2. It's not gonna make a meaningful difference
2273 if (new_hwi < hwm && hwa >= 2) {
2274 iocg->hweight_donating = hwa;
2275 iocg->hweight_after_donation = new_hwi;
2276 list_add(&iocg->surplus_list, &surpluses);
2277 } else if (!iocg->abs_vdebt) {
2279 * @iocg doesn't have enough to donate. Reset
2280 * its inuse to active.
2282 * Don't reset debtors as their inuse's are
2283 * owned by debt handling. This shouldn't affect
2284 * donation calculuation in any meaningful way
2285 * as @iocg doesn't have a meaningful amount of
2288 TRACE_IOCG_PATH(inuse_shortage, iocg, &now,
2289 iocg->inuse, iocg->active,
2290 iocg->hweight_inuse, new_hwi);
2292 __propagate_weights(iocg, iocg->active,
2293 iocg->active, true, &now);
2297 /* genuinely short on vtime */
2302 if (!list_empty(&surpluses) && nr_shortages)
2303 transfer_surpluses(&surpluses, &now);
2305 commit_weights(ioc);
2307 /* surplus list should be dissolved after use */
2308 list_for_each_entry_safe(iocg, tiocg, &surpluses, surplus_list)
2309 list_del_init(&iocg->surplus_list);
2312 * If q is getting clogged or we're missing too much, we're issuing
2313 * too much IO and should lower vtime rate. If we're not missing
2314 * and experiencing shortages but not surpluses, we're too stingy
2315 * and should increase vtime rate.
2317 prev_busy_level = ioc->busy_level;
2318 if (rq_wait_pct > RQ_WAIT_BUSY_PCT ||
2319 missed_ppm[READ] > ppm_rthr ||
2320 missed_ppm[WRITE] > ppm_wthr) {
2321 /* clearly missing QoS targets, slow down vrate */
2322 ioc->busy_level = max(ioc->busy_level, 0);
2324 } else if (rq_wait_pct <= RQ_WAIT_BUSY_PCT * UNBUSY_THR_PCT / 100 &&
2325 missed_ppm[READ] <= ppm_rthr * UNBUSY_THR_PCT / 100 &&
2326 missed_ppm[WRITE] <= ppm_wthr * UNBUSY_THR_PCT / 100) {
2327 /* QoS targets are being met with >25% margin */
2330 * We're throttling while the device has spare
2331 * capacity. If vrate was being slowed down, stop.
2333 ioc->busy_level = min(ioc->busy_level, 0);
2336 * If there are IOs spanning multiple periods, wait
2337 * them out before pushing the device harder.
2343 * Nobody is being throttled and the users aren't
2344 * issuing enough IOs to saturate the device. We
2345 * simply don't know how close the device is to
2346 * saturation. Coast.
2348 ioc->busy_level = 0;
2351 /* inside the hysterisis margin, we're good */
2352 ioc->busy_level = 0;
2355 ioc->busy_level = clamp(ioc->busy_level, -1000, 1000);
2357 if (ioc->busy_level > 0 || (ioc->busy_level < 0 && !nr_lagging)) {
2358 u64 vrate = ioc->vtime_base_rate;
2359 u64 vrate_min = ioc->vrate_min, vrate_max = ioc->vrate_max;
2361 /* rq_wait signal is always reliable, ignore user vrate_min */
2362 if (rq_wait_pct > RQ_WAIT_BUSY_PCT)
2363 vrate_min = VRATE_MIN;
2366 * If vrate is out of bounds, apply clamp gradually as the
2367 * bounds can change abruptly. Otherwise, apply busy_level
2370 if (vrate < vrate_min) {
2371 vrate = div64_u64(vrate * (100 + VRATE_CLAMP_ADJ_PCT),
2373 vrate = min(vrate, vrate_min);
2374 } else if (vrate > vrate_max) {
2375 vrate = div64_u64(vrate * (100 - VRATE_CLAMP_ADJ_PCT),
2377 vrate = max(vrate, vrate_max);
2379 int idx = min_t(int, abs(ioc->busy_level),
2380 ARRAY_SIZE(vrate_adj_pct) - 1);
2381 u32 adj_pct = vrate_adj_pct[idx];
2383 if (ioc->busy_level > 0)
2384 adj_pct = 100 - adj_pct;
2386 adj_pct = 100 + adj_pct;
2388 vrate = clamp(DIV64_U64_ROUND_UP(vrate * adj_pct, 100),
2389 vrate_min, vrate_max);
2392 trace_iocost_ioc_vrate_adj(ioc, vrate, missed_ppm, rq_wait_pct,
2393 nr_lagging, nr_shortages);
2395 ioc->vtime_base_rate = vrate;
2396 ioc_refresh_margins(ioc);
2397 } else if (ioc->busy_level != prev_busy_level || nr_lagging) {
2398 trace_iocost_ioc_vrate_adj(ioc, atomic64_read(&ioc->vtime_rate),
2399 missed_ppm, rq_wait_pct, nr_lagging,
2403 ioc_refresh_params(ioc, false);
2405 ioc_forgive_debts(ioc, usage_us_sum, nr_debtors, &now);
2408 * This period is done. Move onto the next one. If nothing's
2409 * going on with the device, stop the timer.
2411 atomic64_inc(&ioc->cur_period);
2413 if (ioc->running != IOC_STOP) {
2414 if (!list_empty(&ioc->active_iocgs)) {
2415 ioc_start_period(ioc, &now);
2417 ioc->busy_level = 0;
2419 ioc->running = IOC_IDLE;
2422 ioc_refresh_vrate(ioc, &now);
2425 spin_unlock_irq(&ioc->lock);
2428 static u64 adjust_inuse_and_calc_cost(struct ioc_gq *iocg, u64 vtime,
2429 u64 abs_cost, struct ioc_now *now)
2431 struct ioc *ioc = iocg->ioc;
2432 struct ioc_margins *margins = &ioc->margins;
2433 u32 __maybe_unused old_inuse = iocg->inuse, __maybe_unused old_hwi;
2436 u64 cost, new_inuse;
2437 unsigned long flags;
2439 current_hweight(iocg, NULL, &hwi);
2441 cost = abs_cost_to_cost(abs_cost, hwi);
2442 margin = now->vnow - vtime - cost;
2444 /* debt handling owns inuse for debtors */
2445 if (iocg->abs_vdebt)
2449 * We only increase inuse during period and do so iff the margin has
2450 * deteriorated since the previous adjustment.
2452 if (margin >= iocg->saved_margin || margin >= margins->low ||
2453 iocg->inuse == iocg->active)
2456 spin_lock_irqsave(&ioc->lock, flags);
2458 /* we own inuse only when @iocg is in the normal active state */
2459 if (iocg->abs_vdebt || list_empty(&iocg->active_list)) {
2460 spin_unlock_irqrestore(&ioc->lock, flags);
2465 * Bump up inuse till @abs_cost fits in the existing budget.
2466 * adj_step must be determined after acquiring ioc->lock - we might
2467 * have raced and lost to another thread for activation and could
2468 * be reading 0 iocg->active before ioc->lock which will lead to
2471 new_inuse = iocg->inuse;
2472 adj_step = DIV_ROUND_UP(iocg->active * INUSE_ADJ_STEP_PCT, 100);
2474 new_inuse = new_inuse + adj_step;
2475 propagate_weights(iocg, iocg->active, new_inuse, true, now);
2476 current_hweight(iocg, NULL, &hwi);
2477 cost = abs_cost_to_cost(abs_cost, hwi);
2478 } while (time_after64(vtime + cost, now->vnow) &&
2479 iocg->inuse != iocg->active);
2481 spin_unlock_irqrestore(&ioc->lock, flags);
2483 TRACE_IOCG_PATH(inuse_adjust, iocg, now,
2484 old_inuse, iocg->inuse, old_hwi, hwi);
2489 static void calc_vtime_cost_builtin(struct bio *bio, struct ioc_gq *iocg,
2490 bool is_merge, u64 *costp)
2492 struct ioc *ioc = iocg->ioc;
2493 u64 coef_seqio, coef_randio, coef_page;
2494 u64 pages = max_t(u64, bio_sectors(bio) >> IOC_SECT_TO_PAGE_SHIFT, 1);
2498 switch (bio_op(bio)) {
2500 coef_seqio = ioc->params.lcoefs[LCOEF_RSEQIO];
2501 coef_randio = ioc->params.lcoefs[LCOEF_RRANDIO];
2502 coef_page = ioc->params.lcoefs[LCOEF_RPAGE];
2505 coef_seqio = ioc->params.lcoefs[LCOEF_WSEQIO];
2506 coef_randio = ioc->params.lcoefs[LCOEF_WRANDIO];
2507 coef_page = ioc->params.lcoefs[LCOEF_WPAGE];
2514 seek_pages = abs(bio->bi_iter.bi_sector - iocg->cursor);
2515 seek_pages >>= IOC_SECT_TO_PAGE_SHIFT;
2519 if (seek_pages > LCOEF_RANDIO_PAGES) {
2520 cost += coef_randio;
2525 cost += pages * coef_page;
2530 static u64 calc_vtime_cost(struct bio *bio, struct ioc_gq *iocg, bool is_merge)
2534 calc_vtime_cost_builtin(bio, iocg, is_merge, &cost);
2538 static void calc_size_vtime_cost_builtin(struct request *rq, struct ioc *ioc,
2541 unsigned int pages = blk_rq_stats_sectors(rq) >> IOC_SECT_TO_PAGE_SHIFT;
2543 switch (req_op(rq)) {
2545 *costp = pages * ioc->params.lcoefs[LCOEF_RPAGE];
2548 *costp = pages * ioc->params.lcoefs[LCOEF_WPAGE];
2555 static u64 calc_size_vtime_cost(struct request *rq, struct ioc *ioc)
2559 calc_size_vtime_cost_builtin(rq, ioc, &cost);
2563 static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio)
2565 struct blkcg_gq *blkg = bio->bi_blkg;
2566 struct ioc *ioc = rqos_to_ioc(rqos);
2567 struct ioc_gq *iocg = blkg_to_iocg(blkg);
2569 struct iocg_wait wait;
2570 u64 abs_cost, cost, vtime;
2571 bool use_debt, ioc_locked;
2572 unsigned long flags;
2574 /* bypass IOs if disabled, still initializing, or for root cgroup */
2575 if (!ioc->enabled || !iocg || !iocg->level)
2578 /* calculate the absolute vtime cost */
2579 abs_cost = calc_vtime_cost(bio, iocg, false);
2583 if (!iocg_activate(iocg, &now))
2586 iocg->cursor = bio_end_sector(bio);
2587 vtime = atomic64_read(&iocg->vtime);
2588 cost = adjust_inuse_and_calc_cost(iocg, vtime, abs_cost, &now);
2591 * If no one's waiting and within budget, issue right away. The
2592 * tests are racy but the races aren't systemic - we only miss once
2593 * in a while which is fine.
2595 if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt &&
2596 time_before_eq64(vtime + cost, now.vnow)) {
2597 iocg_commit_bio(iocg, bio, abs_cost, cost);
2602 * We're over budget. This can be handled in two ways. IOs which may
2603 * cause priority inversions are punted to @ioc->aux_iocg and charged as
2604 * debt. Otherwise, the issuer is blocked on @iocg->waitq. Debt handling
2605 * requires @ioc->lock, waitq handling @iocg->waitq.lock. Determine
2606 * whether debt handling is needed and acquire locks accordingly.
2608 use_debt = bio_issue_as_root_blkg(bio) || fatal_signal_pending(current);
2609 ioc_locked = use_debt || READ_ONCE(iocg->abs_vdebt);
2611 iocg_lock(iocg, ioc_locked, &flags);
2614 * @iocg must stay activated for debt and waitq handling. Deactivation
2615 * is synchronized against both ioc->lock and waitq.lock and we won't
2616 * get deactivated as long as we're waiting or has debt, so we're good
2617 * if we're activated here. In the unlikely cases that we aren't, just
2620 if (unlikely(list_empty(&iocg->active_list))) {
2621 iocg_unlock(iocg, ioc_locked, &flags);
2622 iocg_commit_bio(iocg, bio, abs_cost, cost);
2627 * We're over budget. If @bio has to be issued regardless, remember
2628 * the abs_cost instead of advancing vtime. iocg_kick_waitq() will pay
2629 * off the debt before waking more IOs.
2631 * This way, the debt is continuously paid off each period with the
2632 * actual budget available to the cgroup. If we just wound vtime, we
2633 * would incorrectly use the current hw_inuse for the entire amount
2634 * which, for example, can lead to the cgroup staying blocked for a
2635 * long time even with substantially raised hw_inuse.
2637 * An iocg with vdebt should stay online so that the timer can keep
2638 * deducting its vdebt and [de]activate use_delay mechanism
2639 * accordingly. We don't want to race against the timer trying to
2640 * clear them and leave @iocg inactive w/ dangling use_delay heavily
2641 * penalizing the cgroup and its descendants.
2644 iocg_incur_debt(iocg, abs_cost, &now);
2645 if (iocg_kick_delay(iocg, &now))
2646 blkcg_schedule_throttle(rqos->q,
2647 (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
2648 iocg_unlock(iocg, ioc_locked, &flags);
2652 /* guarantee that iocgs w/ waiters have maximum inuse */
2653 if (!iocg->abs_vdebt && iocg->inuse != iocg->active) {
2655 iocg_unlock(iocg, false, &flags);
2659 propagate_weights(iocg, iocg->active, iocg->active, true,
2664 * Append self to the waitq and schedule the wakeup timer if we're
2665 * the first waiter. The timer duration is calculated based on the
2666 * current vrate. vtime and hweight changes can make it too short
2667 * or too long. Each wait entry records the absolute cost it's
2668 * waiting for to allow re-evaluation using a custom wait entry.
2670 * If too short, the timer simply reschedules itself. If too long,
2671 * the period timer will notice and trigger wakeups.
2673 * All waiters are on iocg->waitq and the wait states are
2674 * synchronized using waitq.lock.
2676 init_waitqueue_func_entry(&wait.wait, iocg_wake_fn);
2677 wait.wait.private = current;
2679 wait.abs_cost = abs_cost;
2680 wait.committed = false; /* will be set true by waker */
2682 __add_wait_queue_entry_tail(&iocg->waitq, &wait.wait);
2683 iocg_kick_waitq(iocg, ioc_locked, &now);
2685 iocg_unlock(iocg, ioc_locked, &flags);
2688 set_current_state(TASK_UNINTERRUPTIBLE);
2694 /* waker already committed us, proceed */
2695 finish_wait(&iocg->waitq, &wait.wait);
2698 static void ioc_rqos_merge(struct rq_qos *rqos, struct request *rq,
2701 struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg);
2702 struct ioc *ioc = rqos_to_ioc(rqos);
2703 sector_t bio_end = bio_end_sector(bio);
2705 u64 vtime, abs_cost, cost;
2706 unsigned long flags;
2708 /* bypass if disabled, still initializing, or for root cgroup */
2709 if (!ioc->enabled || !iocg || !iocg->level)
2712 abs_cost = calc_vtime_cost(bio, iocg, true);
2718 vtime = atomic64_read(&iocg->vtime);
2719 cost = adjust_inuse_and_calc_cost(iocg, vtime, abs_cost, &now);
2721 /* update cursor if backmerging into the request at the cursor */
2722 if (blk_rq_pos(rq) < bio_end &&
2723 blk_rq_pos(rq) + blk_rq_sectors(rq) == iocg->cursor)
2724 iocg->cursor = bio_end;
2727 * Charge if there's enough vtime budget and the existing request has
2730 if (rq->bio && rq->bio->bi_iocost_cost &&
2731 time_before_eq64(atomic64_read(&iocg->vtime) + cost, now.vnow)) {
2732 iocg_commit_bio(iocg, bio, abs_cost, cost);
2737 * Otherwise, account it as debt if @iocg is online, which it should
2738 * be for the vast majority of cases. See debt handling in
2739 * ioc_rqos_throttle() for details.
2741 spin_lock_irqsave(&ioc->lock, flags);
2742 spin_lock(&iocg->waitq.lock);
2744 if (likely(!list_empty(&iocg->active_list))) {
2745 iocg_incur_debt(iocg, abs_cost, &now);
2746 if (iocg_kick_delay(iocg, &now))
2747 blkcg_schedule_throttle(rqos->q,
2748 (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
2750 iocg_commit_bio(iocg, bio, abs_cost, cost);
2753 spin_unlock(&iocg->waitq.lock);
2754 spin_unlock_irqrestore(&ioc->lock, flags);
2757 static void ioc_rqos_done_bio(struct rq_qos *rqos, struct bio *bio)
2759 struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg);
2761 if (iocg && bio->bi_iocost_cost)
2762 atomic64_add(bio->bi_iocost_cost, &iocg->done_vtime);
2765 static void ioc_rqos_done(struct rq_qos *rqos, struct request *rq)
2767 struct ioc *ioc = rqos_to_ioc(rqos);
2768 struct ioc_pcpu_stat *ccs;
2769 u64 on_q_ns, rq_wait_ns, size_nsec;
2772 if (!ioc->enabled || !rq->alloc_time_ns || !rq->start_time_ns)
2775 switch (req_op(rq) & REQ_OP_MASK) {
2788 on_q_ns = ktime_get_ns() - rq->alloc_time_ns;
2789 rq_wait_ns = rq->start_time_ns - rq->alloc_time_ns;
2790 size_nsec = div64_u64(calc_size_vtime_cost(rq, ioc), VTIME_PER_NSEC);
2792 ccs = get_cpu_ptr(ioc->pcpu_stat);
2794 if (on_q_ns <= size_nsec ||
2795 on_q_ns - size_nsec <= ioc->params.qos[pidx] * NSEC_PER_USEC)
2796 local_inc(&ccs->missed[rw].nr_met);
2798 local_inc(&ccs->missed[rw].nr_missed);
2800 local64_add(rq_wait_ns, &ccs->rq_wait_ns);
2805 static void ioc_rqos_queue_depth_changed(struct rq_qos *rqos)
2807 struct ioc *ioc = rqos_to_ioc(rqos);
2809 spin_lock_irq(&ioc->lock);
2810 ioc_refresh_params(ioc, false);
2811 spin_unlock_irq(&ioc->lock);
2814 static void ioc_rqos_exit(struct rq_qos *rqos)
2816 struct ioc *ioc = rqos_to_ioc(rqos);
2818 blkcg_deactivate_policy(rqos->q, &blkcg_policy_iocost);
2820 spin_lock_irq(&ioc->lock);
2821 ioc->running = IOC_STOP;
2822 spin_unlock_irq(&ioc->lock);
2824 del_timer_sync(&ioc->timer);
2825 free_percpu(ioc->pcpu_stat);
2829 static struct rq_qos_ops ioc_rqos_ops = {
2830 .throttle = ioc_rqos_throttle,
2831 .merge = ioc_rqos_merge,
2832 .done_bio = ioc_rqos_done_bio,
2833 .done = ioc_rqos_done,
2834 .queue_depth_changed = ioc_rqos_queue_depth_changed,
2835 .exit = ioc_rqos_exit,
2838 static int blk_iocost_init(struct request_queue *q)
2841 struct rq_qos *rqos;
2844 ioc = kzalloc(sizeof(*ioc), GFP_KERNEL);
2848 ioc->pcpu_stat = alloc_percpu(struct ioc_pcpu_stat);
2849 if (!ioc->pcpu_stat) {
2854 for_each_possible_cpu(cpu) {
2855 struct ioc_pcpu_stat *ccs = per_cpu_ptr(ioc->pcpu_stat, cpu);
2857 for (i = 0; i < ARRAY_SIZE(ccs->missed); i++) {
2858 local_set(&ccs->missed[i].nr_met, 0);
2859 local_set(&ccs->missed[i].nr_missed, 0);
2861 local64_set(&ccs->rq_wait_ns, 0);
2865 rqos->id = RQ_QOS_COST;
2866 rqos->ops = &ioc_rqos_ops;
2869 spin_lock_init(&ioc->lock);
2870 timer_setup(&ioc->timer, ioc_timer_fn, 0);
2871 INIT_LIST_HEAD(&ioc->active_iocgs);
2873 ioc->running = IOC_IDLE;
2874 ioc->vtime_base_rate = VTIME_PER_USEC;
2875 atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC);
2876 seqcount_spinlock_init(&ioc->period_seqcount, &ioc->lock);
2877 ioc->period_at = ktime_to_us(ktime_get());
2878 atomic64_set(&ioc->cur_period, 0);
2879 atomic_set(&ioc->hweight_gen, 0);
2881 spin_lock_irq(&ioc->lock);
2882 ioc->autop_idx = AUTOP_INVALID;
2883 ioc_refresh_params(ioc, true);
2884 spin_unlock_irq(&ioc->lock);
2887 * rqos must be added before activation to allow iocg_pd_init() to
2888 * lookup the ioc from q. This means that the rqos methods may get
2889 * called before policy activation completion, can't assume that the
2890 * target bio has an iocg associated and need to test for NULL iocg.
2892 rq_qos_add(q, rqos);
2893 ret = blkcg_activate_policy(q, &blkcg_policy_iocost);
2895 rq_qos_del(q, rqos);
2896 free_percpu(ioc->pcpu_stat);
2903 static struct blkcg_policy_data *ioc_cpd_alloc(gfp_t gfp)
2905 struct ioc_cgrp *iocc;
2907 iocc = kzalloc(sizeof(struct ioc_cgrp), gfp);
2911 iocc->dfl_weight = CGROUP_WEIGHT_DFL * WEIGHT_ONE;
2915 static void ioc_cpd_free(struct blkcg_policy_data *cpd)
2917 kfree(container_of(cpd, struct ioc_cgrp, cpd));
2920 static struct blkg_policy_data *ioc_pd_alloc(gfp_t gfp, struct request_queue *q,
2921 struct blkcg *blkcg)
2923 int levels = blkcg->css.cgroup->level + 1;
2924 struct ioc_gq *iocg;
2926 iocg = kzalloc_node(struct_size(iocg, ancestors, levels), gfp, q->node);
2930 iocg->pcpu_stat = alloc_percpu_gfp(struct iocg_pcpu_stat, gfp);
2931 if (!iocg->pcpu_stat) {
2939 static void ioc_pd_init(struct blkg_policy_data *pd)
2941 struct ioc_gq *iocg = pd_to_iocg(pd);
2942 struct blkcg_gq *blkg = pd_to_blkg(&iocg->pd);
2943 struct ioc *ioc = q_to_ioc(blkg->q);
2945 struct blkcg_gq *tblkg;
2946 unsigned long flags;
2951 atomic64_set(&iocg->vtime, now.vnow);
2952 atomic64_set(&iocg->done_vtime, now.vnow);
2953 atomic64_set(&iocg->active_period, atomic64_read(&ioc->cur_period));
2954 INIT_LIST_HEAD(&iocg->active_list);
2955 INIT_LIST_HEAD(&iocg->walk_list);
2956 INIT_LIST_HEAD(&iocg->surplus_list);
2957 iocg->hweight_active = WEIGHT_ONE;
2958 iocg->hweight_inuse = WEIGHT_ONE;
2960 init_waitqueue_head(&iocg->waitq);
2961 hrtimer_init(&iocg->waitq_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
2962 iocg->waitq_timer.function = iocg_waitq_timer_fn;
2964 iocg->level = blkg->blkcg->css.cgroup->level;
2966 for (tblkg = blkg; tblkg; tblkg = tblkg->parent) {
2967 struct ioc_gq *tiocg = blkg_to_iocg(tblkg);
2968 iocg->ancestors[tiocg->level] = tiocg;
2971 spin_lock_irqsave(&ioc->lock, flags);
2972 weight_updated(iocg, &now);
2973 spin_unlock_irqrestore(&ioc->lock, flags);
2976 static void ioc_pd_free(struct blkg_policy_data *pd)
2978 struct ioc_gq *iocg = pd_to_iocg(pd);
2979 struct ioc *ioc = iocg->ioc;
2980 unsigned long flags;
2983 spin_lock_irqsave(&ioc->lock, flags);
2985 if (!list_empty(&iocg->active_list)) {
2989 propagate_weights(iocg, 0, 0, false, &now);
2990 list_del_init(&iocg->active_list);
2993 WARN_ON_ONCE(!list_empty(&iocg->walk_list));
2994 WARN_ON_ONCE(!list_empty(&iocg->surplus_list));
2996 spin_unlock_irqrestore(&ioc->lock, flags);
2998 hrtimer_cancel(&iocg->waitq_timer);
3000 free_percpu(iocg->pcpu_stat);
3004 static size_t ioc_pd_stat(struct blkg_policy_data *pd, char *buf, size_t size)
3006 struct ioc_gq *iocg = pd_to_iocg(pd);
3007 struct ioc *ioc = iocg->ioc;
3013 if (iocg->level == 0) {
3014 unsigned vp10k = DIV64_U64_ROUND_CLOSEST(
3015 ioc->vtime_base_rate * 10000,
3017 pos += scnprintf(buf + pos, size - pos, " cost.vrate=%u.%02u",
3018 vp10k / 100, vp10k % 100);
3021 pos += scnprintf(buf + pos, size - pos, " cost.usage=%llu",
3022 iocg->last_stat.usage_us);
3024 if (blkcg_debug_stats)
3025 pos += scnprintf(buf + pos, size - pos,
3026 " cost.wait=%llu cost.indebt=%llu cost.indelay=%llu",
3027 iocg->last_stat.wait_us,
3028 iocg->last_stat.indebt_us,
3029 iocg->last_stat.indelay_us);
3034 static u64 ioc_weight_prfill(struct seq_file *sf, struct blkg_policy_data *pd,
3037 const char *dname = blkg_dev_name(pd->blkg);
3038 struct ioc_gq *iocg = pd_to_iocg(pd);
3040 if (dname && iocg->cfg_weight)
3041 seq_printf(sf, "%s %u\n", dname, iocg->cfg_weight / WEIGHT_ONE);
3046 static int ioc_weight_show(struct seq_file *sf, void *v)
3048 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
3049 struct ioc_cgrp *iocc = blkcg_to_iocc(blkcg);
3051 seq_printf(sf, "default %u\n", iocc->dfl_weight / WEIGHT_ONE);
3052 blkcg_print_blkgs(sf, blkcg, ioc_weight_prfill,
3053 &blkcg_policy_iocost, seq_cft(sf)->private, false);
3057 static ssize_t ioc_weight_write(struct kernfs_open_file *of, char *buf,
3058 size_t nbytes, loff_t off)
3060 struct blkcg *blkcg = css_to_blkcg(of_css(of));
3061 struct ioc_cgrp *iocc = blkcg_to_iocc(blkcg);
3062 struct blkg_conf_ctx ctx;
3064 struct ioc_gq *iocg;
3068 if (!strchr(buf, ':')) {
3069 struct blkcg_gq *blkg;
3071 if (!sscanf(buf, "default %u", &v) && !sscanf(buf, "%u", &v))
3074 if (v < CGROUP_WEIGHT_MIN || v > CGROUP_WEIGHT_MAX)
3077 spin_lock_irq(&blkcg->lock);
3078 iocc->dfl_weight = v * WEIGHT_ONE;
3079 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
3080 struct ioc_gq *iocg = blkg_to_iocg(blkg);
3083 spin_lock(&iocg->ioc->lock);
3084 ioc_now(iocg->ioc, &now);
3085 weight_updated(iocg, &now);
3086 spin_unlock(&iocg->ioc->lock);
3089 spin_unlock_irq(&blkcg->lock);
3094 ret = blkg_conf_prep(blkcg, &blkcg_policy_iocost, buf, &ctx);
3098 iocg = blkg_to_iocg(ctx.blkg);
3100 if (!strncmp(ctx.body, "default", 7)) {
3103 if (!sscanf(ctx.body, "%u", &v))
3105 if (v < CGROUP_WEIGHT_MIN || v > CGROUP_WEIGHT_MAX)
3109 spin_lock(&iocg->ioc->lock);
3110 iocg->cfg_weight = v * WEIGHT_ONE;
3111 ioc_now(iocg->ioc, &now);
3112 weight_updated(iocg, &now);
3113 spin_unlock(&iocg->ioc->lock);
3115 blkg_conf_finish(&ctx);
3119 blkg_conf_finish(&ctx);
3123 static u64 ioc_qos_prfill(struct seq_file *sf, struct blkg_policy_data *pd,
3126 const char *dname = blkg_dev_name(pd->blkg);
3127 struct ioc *ioc = pd_to_iocg(pd)->ioc;
3132 seq_printf(sf, "%s enable=%d ctrl=%s rpct=%u.%02u rlat=%u wpct=%u.%02u wlat=%u min=%u.%02u max=%u.%02u\n",
3133 dname, ioc->enabled, ioc->user_qos_params ? "user" : "auto",
3134 ioc->params.qos[QOS_RPPM] / 10000,
3135 ioc->params.qos[QOS_RPPM] % 10000 / 100,
3136 ioc->params.qos[QOS_RLAT],
3137 ioc->params.qos[QOS_WPPM] / 10000,
3138 ioc->params.qos[QOS_WPPM] % 10000 / 100,
3139 ioc->params.qos[QOS_WLAT],
3140 ioc->params.qos[QOS_MIN] / 10000,
3141 ioc->params.qos[QOS_MIN] % 10000 / 100,
3142 ioc->params.qos[QOS_MAX] / 10000,
3143 ioc->params.qos[QOS_MAX] % 10000 / 100);
3147 static int ioc_qos_show(struct seq_file *sf, void *v)
3149 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
3151 blkcg_print_blkgs(sf, blkcg, ioc_qos_prfill,
3152 &blkcg_policy_iocost, seq_cft(sf)->private, false);
3156 static const match_table_t qos_ctrl_tokens = {
3157 { QOS_ENABLE, "enable=%u" },
3158 { QOS_CTRL, "ctrl=%s" },
3159 { NR_QOS_CTRL_PARAMS, NULL },
3162 static const match_table_t qos_tokens = {
3163 { QOS_RPPM, "rpct=%s" },
3164 { QOS_RLAT, "rlat=%u" },
3165 { QOS_WPPM, "wpct=%s" },
3166 { QOS_WLAT, "wlat=%u" },
3167 { QOS_MIN, "min=%s" },
3168 { QOS_MAX, "max=%s" },
3169 { NR_QOS_PARAMS, NULL },
3172 static ssize_t ioc_qos_write(struct kernfs_open_file *of, char *input,
3173 size_t nbytes, loff_t off)
3175 struct gendisk *disk;
3177 u32 qos[NR_QOS_PARAMS];
3182 disk = blkcg_conf_get_disk(&input);
3184 return PTR_ERR(disk);
3186 ioc = q_to_ioc(disk->queue);
3188 ret = blk_iocost_init(disk->queue);
3191 ioc = q_to_ioc(disk->queue);
3194 spin_lock_irq(&ioc->lock);
3195 memcpy(qos, ioc->params.qos, sizeof(qos));
3196 enable = ioc->enabled;
3197 user = ioc->user_qos_params;
3198 spin_unlock_irq(&ioc->lock);
3200 while ((p = strsep(&input, " \t\n"))) {
3201 substring_t args[MAX_OPT_ARGS];
3209 switch (match_token(p, qos_ctrl_tokens, args)) {
3211 match_u64(&args[0], &v);
3215 match_strlcpy(buf, &args[0], sizeof(buf));
3216 if (!strcmp(buf, "auto"))
3218 else if (!strcmp(buf, "user"))
3225 tok = match_token(p, qos_tokens, args);
3229 if (match_strlcpy(buf, &args[0], sizeof(buf)) >=
3232 if (cgroup_parse_float(buf, 2, &v))
3234 if (v < 0 || v > 10000)
3240 if (match_u64(&args[0], &v))
3246 if (match_strlcpy(buf, &args[0], sizeof(buf)) >=
3249 if (cgroup_parse_float(buf, 2, &v))
3253 qos[tok] = clamp_t(s64, v * 100,
3254 VRATE_MIN_PPM, VRATE_MAX_PPM);
3262 if (qos[QOS_MIN] > qos[QOS_MAX])
3265 spin_lock_irq(&ioc->lock);
3268 blk_stat_enable_accounting(ioc->rqos.q);
3269 blk_queue_flag_set(QUEUE_FLAG_RQ_ALLOC_TIME, ioc->rqos.q);
3270 ioc->enabled = true;
3272 blk_queue_flag_clear(QUEUE_FLAG_RQ_ALLOC_TIME, ioc->rqos.q);
3273 ioc->enabled = false;
3277 memcpy(ioc->params.qos, qos, sizeof(qos));
3278 ioc->user_qos_params = true;
3280 ioc->user_qos_params = false;
3283 ioc_refresh_params(ioc, true);
3284 spin_unlock_irq(&ioc->lock);
3286 put_disk_and_module(disk);
3291 put_disk_and_module(disk);
3295 static u64 ioc_cost_model_prfill(struct seq_file *sf,
3296 struct blkg_policy_data *pd, int off)
3298 const char *dname = blkg_dev_name(pd->blkg);
3299 struct ioc *ioc = pd_to_iocg(pd)->ioc;
3300 u64 *u = ioc->params.i_lcoefs;
3305 seq_printf(sf, "%s ctrl=%s model=linear "
3306 "rbps=%llu rseqiops=%llu rrandiops=%llu "
3307 "wbps=%llu wseqiops=%llu wrandiops=%llu\n",
3308 dname, ioc->user_cost_model ? "user" : "auto",
3309 u[I_LCOEF_RBPS], u[I_LCOEF_RSEQIOPS], u[I_LCOEF_RRANDIOPS],
3310 u[I_LCOEF_WBPS], u[I_LCOEF_WSEQIOPS], u[I_LCOEF_WRANDIOPS]);
3314 static int ioc_cost_model_show(struct seq_file *sf, void *v)
3316 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
3318 blkcg_print_blkgs(sf, blkcg, ioc_cost_model_prfill,
3319 &blkcg_policy_iocost, seq_cft(sf)->private, false);
3323 static const match_table_t cost_ctrl_tokens = {
3324 { COST_CTRL, "ctrl=%s" },
3325 { COST_MODEL, "model=%s" },
3326 { NR_COST_CTRL_PARAMS, NULL },
3329 static const match_table_t i_lcoef_tokens = {
3330 { I_LCOEF_RBPS, "rbps=%u" },
3331 { I_LCOEF_RSEQIOPS, "rseqiops=%u" },
3332 { I_LCOEF_RRANDIOPS, "rrandiops=%u" },
3333 { I_LCOEF_WBPS, "wbps=%u" },
3334 { I_LCOEF_WSEQIOPS, "wseqiops=%u" },
3335 { I_LCOEF_WRANDIOPS, "wrandiops=%u" },
3336 { NR_I_LCOEFS, NULL },
3339 static ssize_t ioc_cost_model_write(struct kernfs_open_file *of, char *input,
3340 size_t nbytes, loff_t off)
3342 struct gendisk *disk;
3349 disk = blkcg_conf_get_disk(&input);
3351 return PTR_ERR(disk);
3353 ioc = q_to_ioc(disk->queue);
3355 ret = blk_iocost_init(disk->queue);
3358 ioc = q_to_ioc(disk->queue);
3361 spin_lock_irq(&ioc->lock);
3362 memcpy(u, ioc->params.i_lcoefs, sizeof(u));
3363 user = ioc->user_cost_model;
3364 spin_unlock_irq(&ioc->lock);
3366 while ((p = strsep(&input, " \t\n"))) {
3367 substring_t args[MAX_OPT_ARGS];
3375 switch (match_token(p, cost_ctrl_tokens, args)) {
3377 match_strlcpy(buf, &args[0], sizeof(buf));
3378 if (!strcmp(buf, "auto"))
3380 else if (!strcmp(buf, "user"))
3386 match_strlcpy(buf, &args[0], sizeof(buf));
3387 if (strcmp(buf, "linear"))
3392 tok = match_token(p, i_lcoef_tokens, args);
3393 if (tok == NR_I_LCOEFS)
3395 if (match_u64(&args[0], &v))
3401 spin_lock_irq(&ioc->lock);
3403 memcpy(ioc->params.i_lcoefs, u, sizeof(u));
3404 ioc->user_cost_model = true;
3406 ioc->user_cost_model = false;
3408 ioc_refresh_params(ioc, true);
3409 spin_unlock_irq(&ioc->lock);
3411 put_disk_and_module(disk);
3417 put_disk_and_module(disk);
3421 static struct cftype ioc_files[] = {
3424 .flags = CFTYPE_NOT_ON_ROOT,
3425 .seq_show = ioc_weight_show,
3426 .write = ioc_weight_write,
3430 .flags = CFTYPE_ONLY_ON_ROOT,
3431 .seq_show = ioc_qos_show,
3432 .write = ioc_qos_write,
3435 .name = "cost.model",
3436 .flags = CFTYPE_ONLY_ON_ROOT,
3437 .seq_show = ioc_cost_model_show,
3438 .write = ioc_cost_model_write,
3443 static struct blkcg_policy blkcg_policy_iocost = {
3444 .dfl_cftypes = ioc_files,
3445 .cpd_alloc_fn = ioc_cpd_alloc,
3446 .cpd_free_fn = ioc_cpd_free,
3447 .pd_alloc_fn = ioc_pd_alloc,
3448 .pd_init_fn = ioc_pd_init,
3449 .pd_free_fn = ioc_pd_free,
3450 .pd_stat_fn = ioc_pd_stat,
3453 static int __init ioc_init(void)
3455 return blkcg_policy_register(&blkcg_policy_iocost);
3458 static void __exit ioc_exit(void)
3460 blkcg_policy_unregister(&blkcg_policy_iocost);
3463 module_init(ioc_init);
3464 module_exit(ioc_exit);