1 /* SPDX-License-Identifier: GPL-2.0
3 * IO cost model based controller.
5 * Copyright (C) 2019 Tejun Heo <tj@kernel.org>
6 * Copyright (C) 2019 Andy Newell <newella@fb.com>
7 * Copyright (C) 2019 Facebook
9 * One challenge of controlling IO resources is the lack of trivially
10 * observable cost metric. This is distinguished from CPU and memory where
11 * wallclock time and the number of bytes can serve as accurate enough
14 * Bandwidth and iops are the most commonly used metrics for IO devices but
15 * depending on the type and specifics of the device, different IO patterns
16 * easily lead to multiple orders of magnitude variations rendering them
17 * useless for the purpose of IO capacity distribution. While on-device
18 * time, with a lot of clutches, could serve as a useful approximation for
19 * non-queued rotational devices, this is no longer viable with modern
20 * devices, even the rotational ones.
22 * While there is no cost metric we can trivially observe, it isn't a
23 * complete mystery. For example, on a rotational device, seek cost
24 * dominates while a contiguous transfer contributes a smaller amount
25 * proportional to the size. If we can characterize at least the relative
26 * costs of these different types of IOs, it should be possible to
27 * implement a reasonable work-conserving proportional IO resource
32 * IO cost model estimates the cost of an IO given its basic parameters and
33 * history (e.g. the end sector of the last IO). The cost is measured in
34 * device time. If a given IO is estimated to cost 10ms, the device should
35 * be able to process ~100 of those IOs in a second.
37 * Currently, there's only one builtin cost model - linear. Each IO is
38 * classified as sequential or random and given a base cost accordingly.
39 * On top of that, a size cost proportional to the length of the IO is
40 * added. While simple, this model captures the operational
41 * characteristics of a wide varienty of devices well enough. Default
42 * paramters for several different classes of devices are provided and the
43 * parameters can be configured from userspace via
44 * /sys/fs/cgroup/io.cost.model.
46 * If needed, tools/cgroup/iocost_coef_gen.py can be used to generate
47 * device-specific coefficients.
49 * If needed, tools/cgroup/iocost_coef_gen.py can be used to generate
50 * device-specific coefficients.
54 * The device virtual time (vtime) is used as the primary control metric.
55 * The control strategy is composed of the following three parts.
57 * 2-1. Vtime Distribution
59 * When a cgroup becomes active in terms of IOs, its hierarchical share is
60 * calculated. Please consider the following hierarchy where the numbers
61 * inside parentheses denote the configured weights.
67 * A0 (w:100) A1 (w:100)
69 * If B is idle and only A0 and A1 are actively issuing IOs, as the two are
70 * of equal weight, each gets 50% share. If then B starts issuing IOs, B
71 * gets 300/(100+300) or 75% share, and A0 and A1 equally splits the rest,
72 * 12.5% each. The distribution mechanism only cares about these flattened
73 * shares. They're called hweights (hierarchical weights) and always add
74 * upto 1 (HWEIGHT_WHOLE).
76 * A given cgroup's vtime runs slower in inverse proportion to its hweight.
77 * For example, with 12.5% weight, A0's time runs 8 times slower (100/12.5)
78 * against the device vtime - an IO which takes 10ms on the underlying
79 * device is considered to take 80ms on A0.
81 * This constitutes the basis of IO capacity distribution. Each cgroup's
82 * vtime is running at a rate determined by its hweight. A cgroup tracks
83 * the vtime consumed by past IOs and can issue a new IO iff doing so
84 * wouldn't outrun the current device vtime. Otherwise, the IO is
85 * suspended until the vtime has progressed enough to cover it.
87 * 2-2. Vrate Adjustment
89 * It's unrealistic to expect the cost model to be perfect. There are too
90 * many devices and even on the same device the overall performance
91 * fluctuates depending on numerous factors such as IO mixture and device
92 * internal garbage collection. The controller needs to adapt dynamically.
94 * This is achieved by adjusting the overall IO rate according to how busy
95 * the device is. If the device becomes overloaded, we're sending down too
96 * many IOs and should generally slow down. If there are waiting issuers
97 * but the device isn't saturated, we're issuing too few and should
100 * To slow down, we lower the vrate - the rate at which the device vtime
101 * passes compared to the wall clock. For example, if the vtime is running
102 * at the vrate of 75%, all cgroups added up would only be able to issue
103 * 750ms worth of IOs per second, and vice-versa for speeding up.
105 * Device business is determined using two criteria - rq wait and
106 * completion latencies.
108 * When a device gets saturated, the on-device and then the request queues
109 * fill up and a bio which is ready to be issued has to wait for a request
110 * to become available. When this delay becomes noticeable, it's a clear
111 * indication that the device is saturated and we lower the vrate. This
112 * saturation signal is fairly conservative as it only triggers when both
113 * hardware and software queues are filled up, and is used as the default
116 * As devices can have deep queues and be unfair in how the queued commands
117 * are executed, soley depending on rq wait may not result in satisfactory
118 * control quality. For a better control quality, completion latency QoS
119 * parameters can be configured so that the device is considered saturated
120 * if N'th percentile completion latency rises above the set point.
122 * The completion latency requirements are a function of both the
123 * underlying device characteristics and the desired IO latency quality of
124 * service. There is an inherent trade-off - the tighter the latency QoS,
125 * the higher the bandwidth lossage. Latency QoS is disabled by default
126 * and can be set through /sys/fs/cgroup/io.cost.qos.
128 * 2-3. Work Conservation
130 * Imagine two cgroups A and B with equal weights. A is issuing a small IO
131 * periodically while B is sending out enough parallel IOs to saturate the
132 * device on its own. Let's say A's usage amounts to 100ms worth of IO
133 * cost per second, i.e., 10% of the device capacity. The naive
134 * distribution of half and half would lead to 60% utilization of the
135 * device, a significant reduction in the total amount of work done
136 * compared to free-for-all competition. This is too high a cost to pay
139 * To conserve the total amount of work done, we keep track of how much
140 * each active cgroup is actually using and yield part of its weight if
141 * there are other cgroups which can make use of it. In the above case,
142 * A's weight will be lowered so that it hovers above the actual usage and
143 * B would be able to use the rest.
145 * As we don't want to penalize a cgroup for donating its weight, the
146 * surplus weight adjustment factors in a margin and has an immediate
147 * snapback mechanism in case the cgroup needs more IO vtime for itself.
149 * Note that adjusting down surplus weights has the same effects as
150 * accelerating vtime for other cgroups and work conservation can also be
151 * implemented by adjusting vrate dynamically. However, squaring who can
152 * donate and should take back how much requires hweight propagations
153 * anyway making it easier to implement and understand as a separate
158 * Instead of debugfs or other clumsy monitoring mechanisms, this
159 * controller uses a drgn based monitoring script -
160 * tools/cgroup/iocost_monitor.py. For details on drgn, please see
161 * https://github.com/osandov/drgn. The ouput looks like the following.
163 * sdb RUN per=300ms cur_per=234.218:v203.695 busy= +1 vrate= 62.12%
164 * active weight hweight% inflt% dbt delay usages%
165 * test/a * 50/ 50 33.33/ 33.33 27.65 2 0*041 033:033:033
166 * test/b * 100/ 100 66.67/ 66.67 17.56 0 0*000 066:079:077
168 * - per : Timer period
169 * - cur_per : Internal wall and device vtime clock
170 * - vrate : Device virtual time rate against wall clock
171 * - weight : Surplus-adjusted and configured weights
172 * - hweight : Surplus-adjusted and configured hierarchical weights
173 * - inflt : The percentage of in-flight IO cost at the end of last period
174 * - del_ms : Deferred issuer delay induction level and duration
175 * - usages : Usage history
178 #include <linux/kernel.h>
179 #include <linux/module.h>
180 #include <linux/timer.h>
181 #include <linux/time64.h>
182 #include <linux/parser.h>
183 #include <linux/sched/signal.h>
184 #include <linux/blk-cgroup.h>
185 #include "blk-rq-qos.h"
186 #include "blk-stat.h"
189 #ifdef CONFIG_TRACEPOINTS
191 /* copied from TRACE_CGROUP_PATH, see cgroup-internal.h */
192 #define TRACE_IOCG_PATH_LEN 1024
193 static DEFINE_SPINLOCK(trace_iocg_path_lock);
194 static char trace_iocg_path[TRACE_IOCG_PATH_LEN];
196 #define TRACE_IOCG_PATH(type, iocg, ...) \
198 unsigned long flags; \
199 if (trace_iocost_##type##_enabled()) { \
200 spin_lock_irqsave(&trace_iocg_path_lock, flags); \
201 cgroup_path(iocg_to_blkg(iocg)->blkcg->css.cgroup, \
202 trace_iocg_path, TRACE_IOCG_PATH_LEN); \
203 trace_iocost_##type(iocg, trace_iocg_path, \
205 spin_unlock_irqrestore(&trace_iocg_path_lock, flags); \
209 #else /* CONFIG_TRACE_POINTS */
210 #define TRACE_IOCG_PATH(type, iocg, ...) do { } while (0)
211 #endif /* CONFIG_TRACE_POINTS */
216 /* timer period is calculated from latency requirements, bound it */
217 MIN_PERIOD = USEC_PER_MSEC,
218 MAX_PERIOD = USEC_PER_SEC,
221 * A cgroup's vtime can run 50% behind the device vtime, which
222 * serves as its IO credit buffer. Surplus weight adjustment is
223 * immediately canceled if the vtime margin runs below 10%.
226 INUSE_MARGIN_PCT = 10,
228 /* Have some play in waitq timer operations */
229 WAITQ_TIMER_MARGIN_PCT = 5,
232 * vtime can wrap well within a reasonable uptime when vrate is
233 * consistently raised. Don't trust recorded cgroup vtime if the
234 * period counter indicates that it's older than 5mins.
236 VTIME_VALID_DUR = 300 * USEC_PER_SEC,
239 * Remember the past three non-zero usages and use the max for
240 * surplus calculation. Three slots guarantee that we remember one
241 * full period usage from the last active stretch even after
242 * partial deactivation and re-activation periods. Don't start
243 * giving away weight before collecting two data points to prevent
244 * hweight adjustments based on one partial activation period.
247 MIN_VALID_USAGES = 2,
249 /* 1/64k is granular enough and can easily be handled w/ u32 */
250 HWEIGHT_WHOLE = 1 << 16,
253 * As vtime is used to calculate the cost of each IO, it needs to
254 * be fairly high precision. For example, it should be able to
255 * represent the cost of a single page worth of discard with
256 * suffificient accuracy. At the same time, it should be able to
257 * represent reasonably long enough durations to be useful and
258 * convenient during operation.
260 * 1s worth of vtime is 2^37. This gives us both sub-nanosecond
261 * granularity and days of wrap-around time even at extreme vrates.
263 VTIME_PER_SEC_SHIFT = 37,
264 VTIME_PER_SEC = 1LLU << VTIME_PER_SEC_SHIFT,
265 VTIME_PER_USEC = VTIME_PER_SEC / USEC_PER_SEC,
267 /* bound vrate adjustments within two orders of magnitude */
268 VRATE_MIN_PPM = 10000, /* 1% */
269 VRATE_MAX_PPM = 100000000, /* 10000% */
271 VRATE_MIN = VTIME_PER_USEC * VRATE_MIN_PPM / MILLION,
272 VRATE_CLAMP_ADJ_PCT = 4,
274 /* if IOs end up waiting for requests, issue less */
275 RQ_WAIT_BUSY_PCT = 5,
277 /* unbusy hysterisis */
280 /* don't let cmds which take a very long time pin lagging for too long */
281 MAX_LAGGING_PERIODS = 10,
284 * If usage% * 1.25 + 2% is lower than hweight% by more than 3%,
285 * donate the surplus.
287 SURPLUS_SCALE_PCT = 125, /* * 125% */
288 SURPLUS_SCALE_ABS = HWEIGHT_WHOLE / 50, /* + 2% */
289 SURPLUS_MIN_ADJ_DELTA = HWEIGHT_WHOLE / 33, /* 3% */
291 /* switch iff the conditions are met for longer than this */
292 AUTOP_CYCLE_NSEC = 10LLU * NSEC_PER_SEC,
295 * Count IO size in 4k pages. The 12bit shift helps keeping
296 * size-proportional components of cost calculation in closer
297 * numbers of digits to per-IO cost components.
300 IOC_PAGE_SIZE = 1 << IOC_PAGE_SHIFT,
301 IOC_SECT_TO_PAGE_SHIFT = IOC_PAGE_SHIFT - SECTOR_SHIFT,
303 /* if apart further than 16M, consider randio for linear model */
304 LCOEF_RANDIO_PAGES = 4096,
313 /* io.cost.qos controls including per-dev enable of the whole controller */
320 /* io.cost.qos params */
331 /* io.cost.model controls */
338 /* builtin linear cost model coefficients */
370 u32 qos[NR_QOS_PARAMS];
371 u64 i_lcoefs[NR_I_LCOEFS];
372 u64 lcoefs[NR_LCOEFS];
373 u32 too_fast_vrate_pct;
374 u32 too_slow_vrate_pct;
384 struct ioc_pcpu_stat {
385 struct ioc_missed missed[2];
397 struct ioc_params params;
404 struct timer_list timer;
405 struct list_head active_iocgs; /* active cgroups */
406 struct ioc_pcpu_stat __percpu *pcpu_stat;
408 enum ioc_running running;
409 atomic64_t vtime_rate;
411 seqcount_t period_seqcount;
412 u32 period_at; /* wallclock starttime */
413 u64 period_at_vtime; /* vtime starttime */
415 atomic64_t cur_period; /* inc'd each period */
416 int busy_level; /* saturation history */
418 u64 inuse_margin_vtime;
419 bool weights_updated;
420 atomic_t hweight_gen; /* for lazy hweights */
422 u64 autop_too_fast_at;
423 u64 autop_too_slow_at;
425 bool user_qos_params:1;
426 bool user_cost_model:1;
429 /* per device-cgroup pair */
431 struct blkg_policy_data pd;
435 * A iocg can get its weight from two sources - an explicit
436 * per-device-cgroup configuration or the default weight of the
437 * cgroup. `cfg_weight` is the explicit per-device-cgroup
438 * configuration. `weight` is the effective considering both
441 * When an idle cgroup becomes active its `active` goes from 0 to
442 * `weight`. `inuse` is the surplus adjusted active weight.
443 * `active` and `inuse` are used to calculate `hweight_active` and
446 * `last_inuse` remembers `inuse` while an iocg is idle to persist
447 * surplus adjustments.
455 sector_t cursor; /* to detect randio */
458 * `vtime` is this iocg's vtime cursor which progresses as IOs are
459 * issued. If lagging behind device vtime, the delta represents
460 * the currently available IO budget. If runnning ahead, the
463 * `vtime_done` is the same but progressed on completion rather
464 * than issue. The delta behind `vtime` represents the cost of
465 * currently in-flight IOs.
467 * `last_vtime` is used to remember `vtime` at the end of the last
468 * period to calculate utilization.
471 atomic64_t done_vtime;
476 * The period this iocg was last active in. Used for deactivation
477 * and invalidating `vtime`.
479 atomic64_t active_period;
480 struct list_head active_list;
482 /* see __propagate_active_weight() and current_hweight() for details */
483 u64 child_active_sum;
490 struct wait_queue_head waitq;
491 struct hrtimer waitq_timer;
492 struct hrtimer delay_timer;
494 /* usage is recorded as fractions of HWEIGHT_WHOLE */
496 u32 usages[NR_USAGE_SLOTS];
498 /* this iocg's depth in the hierarchy and ancestors including self */
500 struct ioc_gq *ancestors[];
505 struct blkcg_policy_data cpd;
506 unsigned int dfl_weight;
517 struct wait_queue_entry wait;
523 struct iocg_wake_ctx {
529 static const struct ioc_params autop[] = {
532 [QOS_RLAT] = 250000, /* 250ms */
534 [QOS_MIN] = VRATE_MIN_PPM,
535 [QOS_MAX] = VRATE_MAX_PPM,
538 [I_LCOEF_RBPS] = 174019176,
539 [I_LCOEF_RSEQIOPS] = 41708,
540 [I_LCOEF_RRANDIOPS] = 370,
541 [I_LCOEF_WBPS] = 178075866,
542 [I_LCOEF_WSEQIOPS] = 42705,
543 [I_LCOEF_WRANDIOPS] = 378,
548 [QOS_RLAT] = 25000, /* 25ms */
550 [QOS_MIN] = VRATE_MIN_PPM,
551 [QOS_MAX] = VRATE_MAX_PPM,
554 [I_LCOEF_RBPS] = 245855193,
555 [I_LCOEF_RSEQIOPS] = 61575,
556 [I_LCOEF_RRANDIOPS] = 6946,
557 [I_LCOEF_WBPS] = 141365009,
558 [I_LCOEF_WSEQIOPS] = 33716,
559 [I_LCOEF_WRANDIOPS] = 26796,
564 [QOS_RLAT] = 25000, /* 25ms */
566 [QOS_MIN] = VRATE_MIN_PPM,
567 [QOS_MAX] = VRATE_MAX_PPM,
570 [I_LCOEF_RBPS] = 488636629,
571 [I_LCOEF_RSEQIOPS] = 8932,
572 [I_LCOEF_RRANDIOPS] = 8518,
573 [I_LCOEF_WBPS] = 427891549,
574 [I_LCOEF_WSEQIOPS] = 28755,
575 [I_LCOEF_WRANDIOPS] = 21940,
577 .too_fast_vrate_pct = 500,
581 [QOS_RLAT] = 5000, /* 5ms */
583 [QOS_MIN] = VRATE_MIN_PPM,
584 [QOS_MAX] = VRATE_MAX_PPM,
587 [I_LCOEF_RBPS] = 3102524156LLU,
588 [I_LCOEF_RSEQIOPS] = 724816,
589 [I_LCOEF_RRANDIOPS] = 778122,
590 [I_LCOEF_WBPS] = 1742780862LLU,
591 [I_LCOEF_WSEQIOPS] = 425702,
592 [I_LCOEF_WRANDIOPS] = 443193,
594 .too_slow_vrate_pct = 10,
599 * vrate adjust percentages indexed by ioc->busy_level. We adjust up on
600 * vtime credit shortage and down on device saturation.
602 static u32 vrate_adj_pct[] =
604 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
605 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
606 4, 4, 4, 4, 4, 4, 4, 4, 8, 8, 8, 8, 8, 8, 8, 8, 16 };
608 static struct blkcg_policy blkcg_policy_iocost;
610 /* accessors and helpers */
611 static struct ioc *rqos_to_ioc(struct rq_qos *rqos)
613 return container_of(rqos, struct ioc, rqos);
616 static struct ioc *q_to_ioc(struct request_queue *q)
618 return rqos_to_ioc(rq_qos_id(q, RQ_QOS_COST));
621 static const char *q_name(struct request_queue *q)
623 if (test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags))
624 return kobject_name(q->kobj.parent);
629 static const char __maybe_unused *ioc_name(struct ioc *ioc)
631 return q_name(ioc->rqos.q);
634 static struct ioc_gq *pd_to_iocg(struct blkg_policy_data *pd)
636 return pd ? container_of(pd, struct ioc_gq, pd) : NULL;
639 static struct ioc_gq *blkg_to_iocg(struct blkcg_gq *blkg)
641 return pd_to_iocg(blkg_to_pd(blkg, &blkcg_policy_iocost));
644 static struct blkcg_gq *iocg_to_blkg(struct ioc_gq *iocg)
646 return pd_to_blkg(&iocg->pd);
649 static struct ioc_cgrp *blkcg_to_iocc(struct blkcg *blkcg)
651 return container_of(blkcg_to_cpd(blkcg, &blkcg_policy_iocost),
652 struct ioc_cgrp, cpd);
656 * Scale @abs_cost to the inverse of @hw_inuse. The lower the hierarchical
657 * weight, the more expensive each IO. Must round up.
659 static u64 abs_cost_to_cost(u64 abs_cost, u32 hw_inuse)
661 return DIV64_U64_ROUND_UP(abs_cost * HWEIGHT_WHOLE, hw_inuse);
665 * The inverse of abs_cost_to_cost(). Must round up.
667 static u64 cost_to_abs_cost(u64 cost, u32 hw_inuse)
669 return DIV64_U64_ROUND_UP(cost * hw_inuse, HWEIGHT_WHOLE);
672 static void iocg_commit_bio(struct ioc_gq *iocg, struct bio *bio, u64 cost)
674 bio->bi_iocost_cost = cost;
675 atomic64_add(cost, &iocg->vtime);
678 #define CREATE_TRACE_POINTS
679 #include <trace/events/iocost.h>
681 /* latency Qos params changed, update period_us and all the dependent params */
682 static void ioc_refresh_period_us(struct ioc *ioc)
684 u32 ppm, lat, multi, period_us;
686 lockdep_assert_held(&ioc->lock);
688 /* pick the higher latency target */
689 if (ioc->params.qos[QOS_RLAT] >= ioc->params.qos[QOS_WLAT]) {
690 ppm = ioc->params.qos[QOS_RPPM];
691 lat = ioc->params.qos[QOS_RLAT];
693 ppm = ioc->params.qos[QOS_WPPM];
694 lat = ioc->params.qos[QOS_WLAT];
698 * We want the period to be long enough to contain a healthy number
699 * of IOs while short enough for granular control. Define it as a
700 * multiple of the latency target. Ideally, the multiplier should
701 * be scaled according to the percentile so that it would nominally
702 * contain a certain number of requests. Let's be simpler and
703 * scale it linearly so that it's 2x >= pct(90) and 10x at pct(50).
706 multi = max_t(u32, (MILLION - ppm) / 50000, 2);
709 period_us = multi * lat;
710 period_us = clamp_t(u32, period_us, MIN_PERIOD, MAX_PERIOD);
712 /* calculate dependent params */
713 ioc->period_us = period_us;
714 ioc->margin_us = period_us * MARGIN_PCT / 100;
715 ioc->inuse_margin_vtime = DIV64_U64_ROUND_UP(
716 period_us * VTIME_PER_USEC * INUSE_MARGIN_PCT, 100);
719 static int ioc_autop_idx(struct ioc *ioc)
721 int idx = ioc->autop_idx;
722 const struct ioc_params *p = &autop[idx];
727 if (!blk_queue_nonrot(ioc->rqos.q))
730 /* handle SATA SSDs w/ broken NCQ */
731 if (blk_queue_depth(ioc->rqos.q) == 1)
732 return AUTOP_SSD_QD1;
734 /* use one of the normal ssd sets */
735 if (idx < AUTOP_SSD_DFL)
736 return AUTOP_SSD_DFL;
738 /* if user is overriding anything, maintain what was there */
739 if (ioc->user_qos_params || ioc->user_cost_model)
742 /* step up/down based on the vrate */
743 vrate_pct = div64_u64(atomic64_read(&ioc->vtime_rate) * 100,
745 now_ns = ktime_get_ns();
747 if (p->too_fast_vrate_pct && p->too_fast_vrate_pct <= vrate_pct) {
748 if (!ioc->autop_too_fast_at)
749 ioc->autop_too_fast_at = now_ns;
750 if (now_ns - ioc->autop_too_fast_at >= AUTOP_CYCLE_NSEC)
753 ioc->autop_too_fast_at = 0;
756 if (p->too_slow_vrate_pct && p->too_slow_vrate_pct >= vrate_pct) {
757 if (!ioc->autop_too_slow_at)
758 ioc->autop_too_slow_at = now_ns;
759 if (now_ns - ioc->autop_too_slow_at >= AUTOP_CYCLE_NSEC)
762 ioc->autop_too_slow_at = 0;
769 * Take the followings as input
771 * @bps maximum sequential throughput
772 * @seqiops maximum sequential 4k iops
773 * @randiops maximum random 4k iops
775 * and calculate the linear model cost coefficients.
777 * *@page per-page cost 1s / (@bps / 4096)
778 * *@seqio base cost of a seq IO max((1s / @seqiops) - *@page, 0)
779 * @randiops base cost of a rand IO max((1s / @randiops) - *@page, 0)
781 static void calc_lcoefs(u64 bps, u64 seqiops, u64 randiops,
782 u64 *page, u64 *seqio, u64 *randio)
786 *page = *seqio = *randio = 0;
789 u64 bps_pages = DIV_ROUND_UP_ULL(bps, IOC_PAGE_SIZE);
792 *page = DIV64_U64_ROUND_UP(VTIME_PER_SEC, bps_pages);
798 v = DIV64_U64_ROUND_UP(VTIME_PER_SEC, seqiops);
804 v = DIV64_U64_ROUND_UP(VTIME_PER_SEC, randiops);
810 static void ioc_refresh_lcoefs(struct ioc *ioc)
812 u64 *u = ioc->params.i_lcoefs;
813 u64 *c = ioc->params.lcoefs;
815 calc_lcoefs(u[I_LCOEF_RBPS], u[I_LCOEF_RSEQIOPS], u[I_LCOEF_RRANDIOPS],
816 &c[LCOEF_RPAGE], &c[LCOEF_RSEQIO], &c[LCOEF_RRANDIO]);
817 calc_lcoefs(u[I_LCOEF_WBPS], u[I_LCOEF_WSEQIOPS], u[I_LCOEF_WRANDIOPS],
818 &c[LCOEF_WPAGE], &c[LCOEF_WSEQIO], &c[LCOEF_WRANDIO]);
821 static bool ioc_refresh_params(struct ioc *ioc, bool force)
823 const struct ioc_params *p;
826 lockdep_assert_held(&ioc->lock);
828 idx = ioc_autop_idx(ioc);
831 if (idx == ioc->autop_idx && !force)
834 if (idx != ioc->autop_idx)
835 atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC);
837 ioc->autop_idx = idx;
838 ioc->autop_too_fast_at = 0;
839 ioc->autop_too_slow_at = 0;
841 if (!ioc->user_qos_params)
842 memcpy(ioc->params.qos, p->qos, sizeof(p->qos));
843 if (!ioc->user_cost_model)
844 memcpy(ioc->params.i_lcoefs, p->i_lcoefs, sizeof(p->i_lcoefs));
846 ioc_refresh_period_us(ioc);
847 ioc_refresh_lcoefs(ioc);
849 ioc->vrate_min = DIV64_U64_ROUND_UP((u64)ioc->params.qos[QOS_MIN] *
850 VTIME_PER_USEC, MILLION);
851 ioc->vrate_max = div64_u64((u64)ioc->params.qos[QOS_MAX] *
852 VTIME_PER_USEC, MILLION);
857 /* take a snapshot of the current [v]time and vrate */
858 static void ioc_now(struct ioc *ioc, struct ioc_now *now)
862 now->now_ns = ktime_get();
863 now->now = ktime_to_us(now->now_ns);
864 now->vrate = atomic64_read(&ioc->vtime_rate);
867 * The current vtime is
869 * vtime at period start + (wallclock time since the start) * vrate
871 * As a consistent snapshot of `period_at_vtime` and `period_at` is
872 * needed, they're seqcount protected.
875 seq = read_seqcount_begin(&ioc->period_seqcount);
876 now->vnow = ioc->period_at_vtime +
877 (now->now - ioc->period_at) * now->vrate;
878 } while (read_seqcount_retry(&ioc->period_seqcount, seq));
881 static void ioc_start_period(struct ioc *ioc, struct ioc_now *now)
883 lockdep_assert_held(&ioc->lock);
884 WARN_ON_ONCE(ioc->running != IOC_RUNNING);
886 write_seqcount_begin(&ioc->period_seqcount);
887 ioc->period_at = now->now;
888 ioc->period_at_vtime = now->vnow;
889 write_seqcount_end(&ioc->period_seqcount);
891 ioc->timer.expires = jiffies + usecs_to_jiffies(ioc->period_us);
892 add_timer(&ioc->timer);
896 * Update @iocg's `active` and `inuse` to @active and @inuse, update level
897 * weight sums and propagate upwards accordingly.
899 static void __propagate_active_weight(struct ioc_gq *iocg, u32 active, u32 inuse)
901 struct ioc *ioc = iocg->ioc;
904 lockdep_assert_held(&ioc->lock);
906 inuse = min(active, inuse);
908 for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
909 struct ioc_gq *parent = iocg->ancestors[lvl];
910 struct ioc_gq *child = iocg->ancestors[lvl + 1];
911 u32 parent_active = 0, parent_inuse = 0;
913 /* update the level sums */
914 parent->child_active_sum += (s32)(active - child->active);
915 parent->child_inuse_sum += (s32)(inuse - child->inuse);
916 /* apply the udpates */
917 child->active = active;
918 child->inuse = inuse;
921 * The delta between inuse and active sums indicates that
922 * that much of weight is being given away. Parent's inuse
923 * and active should reflect the ratio.
925 if (parent->child_active_sum) {
926 parent_active = parent->weight;
927 parent_inuse = DIV64_U64_ROUND_UP(
928 parent_active * parent->child_inuse_sum,
929 parent->child_active_sum);
932 /* do we need to keep walking up? */
933 if (parent_active == parent->active &&
934 parent_inuse == parent->inuse)
937 active = parent_active;
938 inuse = parent_inuse;
941 ioc->weights_updated = true;
944 static void commit_active_weights(struct ioc *ioc)
946 lockdep_assert_held(&ioc->lock);
948 if (ioc->weights_updated) {
949 /* paired with rmb in current_hweight(), see there */
951 atomic_inc(&ioc->hweight_gen);
952 ioc->weights_updated = false;
956 static void propagate_active_weight(struct ioc_gq *iocg, u32 active, u32 inuse)
958 __propagate_active_weight(iocg, active, inuse);
959 commit_active_weights(iocg->ioc);
962 static void current_hweight(struct ioc_gq *iocg, u32 *hw_activep, u32 *hw_inusep)
964 struct ioc *ioc = iocg->ioc;
969 /* hot path - if uptodate, use cached */
970 ioc_gen = atomic_read(&ioc->hweight_gen);
971 if (ioc_gen == iocg->hweight_gen)
975 * Paired with wmb in commit_active_weights(). If we saw the
976 * updated hweight_gen, all the weight updates from
977 * __propagate_active_weight() are visible too.
979 * We can race with weight updates during calculation and get it
980 * wrong. However, hweight_gen would have changed and a future
981 * reader will recalculate and we're guaranteed to discard the
986 hwa = hwi = HWEIGHT_WHOLE;
987 for (lvl = 0; lvl <= iocg->level - 1; lvl++) {
988 struct ioc_gq *parent = iocg->ancestors[lvl];
989 struct ioc_gq *child = iocg->ancestors[lvl + 1];
990 u32 active_sum = READ_ONCE(parent->child_active_sum);
991 u32 inuse_sum = READ_ONCE(parent->child_inuse_sum);
992 u32 active = READ_ONCE(child->active);
993 u32 inuse = READ_ONCE(child->inuse);
995 /* we can race with deactivations and either may read as zero */
996 if (!active_sum || !inuse_sum)
999 active_sum = max(active, active_sum);
1000 hwa = hwa * active / active_sum; /* max 16bits * 10000 */
1002 inuse_sum = max(inuse, inuse_sum);
1003 hwi = hwi * inuse / inuse_sum; /* max 16bits * 10000 */
1006 iocg->hweight_active = max_t(u32, hwa, 1);
1007 iocg->hweight_inuse = max_t(u32, hwi, 1);
1008 iocg->hweight_gen = ioc_gen;
1011 *hw_activep = iocg->hweight_active;
1013 *hw_inusep = iocg->hweight_inuse;
1016 static void weight_updated(struct ioc_gq *iocg)
1018 struct ioc *ioc = iocg->ioc;
1019 struct blkcg_gq *blkg = iocg_to_blkg(iocg);
1020 struct ioc_cgrp *iocc = blkcg_to_iocc(blkg->blkcg);
1023 lockdep_assert_held(&ioc->lock);
1025 weight = iocg->cfg_weight ?: iocc->dfl_weight;
1026 if (weight != iocg->weight && iocg->active)
1027 propagate_active_weight(iocg, weight,
1028 DIV64_U64_ROUND_UP(iocg->inuse * weight, iocg->weight));
1029 iocg->weight = weight;
1032 static bool iocg_activate(struct ioc_gq *iocg, struct ioc_now *now)
1034 struct ioc *ioc = iocg->ioc;
1035 u64 last_period, cur_period, max_period_delta;
1036 u64 vtime, vmargin, vmin;
1040 * If seem to be already active, just update the stamp to tell the
1041 * timer that we're still active. We don't mind occassional races.
1043 if (!list_empty(&iocg->active_list)) {
1045 cur_period = atomic64_read(&ioc->cur_period);
1046 if (atomic64_read(&iocg->active_period) != cur_period)
1047 atomic64_set(&iocg->active_period, cur_period);
1051 /* racy check on internal node IOs, treat as root level IOs */
1052 if (iocg->child_active_sum)
1055 spin_lock_irq(&ioc->lock);
1060 cur_period = atomic64_read(&ioc->cur_period);
1061 last_period = atomic64_read(&iocg->active_period);
1062 atomic64_set(&iocg->active_period, cur_period);
1064 /* already activated or breaking leaf-only constraint? */
1065 if (!list_empty(&iocg->active_list))
1066 goto succeed_unlock;
1067 for (i = iocg->level - 1; i > 0; i--)
1068 if (!list_empty(&iocg->ancestors[i]->active_list))
1071 if (iocg->child_active_sum)
1075 * vtime may wrap when vrate is raised substantially due to
1076 * underestimated IO costs. Look at the period and ignore its
1077 * vtime if the iocg has been idle for too long. Also, cap the
1078 * budget it can start with to the margin.
1080 max_period_delta = DIV64_U64_ROUND_UP(VTIME_VALID_DUR, ioc->period_us);
1081 vtime = atomic64_read(&iocg->vtime);
1082 vmargin = ioc->margin_us * now->vrate;
1083 vmin = now->vnow - vmargin;
1085 if (last_period + max_period_delta < cur_period ||
1086 time_before64(vtime, vmin)) {
1087 atomic64_add(vmin - vtime, &iocg->vtime);
1088 atomic64_add(vmin - vtime, &iocg->done_vtime);
1093 * Activate, propagate weight and start period timer if not
1094 * running. Reset hweight_gen to avoid accidental match from
1097 iocg->hweight_gen = atomic_read(&ioc->hweight_gen) - 1;
1098 list_add(&iocg->active_list, &ioc->active_iocgs);
1099 propagate_active_weight(iocg, iocg->weight,
1100 iocg->last_inuse ?: iocg->weight);
1102 TRACE_IOCG_PATH(iocg_activate, iocg, now,
1103 last_period, cur_period, vtime);
1105 iocg->last_vtime = vtime;
1107 if (ioc->running == IOC_IDLE) {
1108 ioc->running = IOC_RUNNING;
1109 ioc_start_period(ioc, now);
1113 spin_unlock_irq(&ioc->lock);
1117 spin_unlock_irq(&ioc->lock);
1121 static int iocg_wake_fn(struct wait_queue_entry *wq_entry, unsigned mode,
1122 int flags, void *key)
1124 struct iocg_wait *wait = container_of(wq_entry, struct iocg_wait, wait);
1125 struct iocg_wake_ctx *ctx = (struct iocg_wake_ctx *)key;
1126 u64 cost = abs_cost_to_cost(wait->abs_cost, ctx->hw_inuse);
1128 ctx->vbudget -= cost;
1130 if (ctx->vbudget < 0)
1133 iocg_commit_bio(ctx->iocg, wait->bio, cost);
1136 * autoremove_wake_function() removes the wait entry only when it
1137 * actually changed the task state. We want the wait always
1138 * removed. Remove explicitly and use default_wake_function().
1140 list_del_init(&wq_entry->entry);
1141 wait->committed = true;
1143 default_wake_function(wq_entry, mode, flags, key);
1147 static void iocg_kick_waitq(struct ioc_gq *iocg, struct ioc_now *now)
1149 struct ioc *ioc = iocg->ioc;
1150 struct iocg_wake_ctx ctx = { .iocg = iocg };
1151 u64 margin_ns = (u64)(ioc->period_us *
1152 WAITQ_TIMER_MARGIN_PCT / 100) * NSEC_PER_USEC;
1153 u64 vdebt, vshortage, expires, oexpires;
1157 lockdep_assert_held(&iocg->waitq.lock);
1159 current_hweight(iocg, NULL, &hw_inuse);
1160 vbudget = now->vnow - atomic64_read(&iocg->vtime);
1163 vdebt = abs_cost_to_cost(iocg->abs_vdebt, hw_inuse);
1164 if (vdebt && vbudget > 0) {
1165 u64 delta = min_t(u64, vbudget, vdebt);
1166 u64 abs_delta = min(cost_to_abs_cost(delta, hw_inuse),
1169 atomic64_add(delta, &iocg->vtime);
1170 atomic64_add(delta, &iocg->done_vtime);
1171 iocg->abs_vdebt -= abs_delta;
1175 * Wake up the ones which are due and see how much vtime we'll need
1178 ctx.hw_inuse = hw_inuse;
1179 ctx.vbudget = vbudget - vdebt;
1180 __wake_up_locked_key(&iocg->waitq, TASK_NORMAL, &ctx);
1181 if (!waitqueue_active(&iocg->waitq))
1183 if (WARN_ON_ONCE(ctx.vbudget >= 0))
1186 /* determine next wakeup, add a quarter margin to guarantee chunking */
1187 vshortage = -ctx.vbudget;
1188 expires = now->now_ns +
1189 DIV64_U64_ROUND_UP(vshortage, now->vrate) * NSEC_PER_USEC;
1190 expires += margin_ns / 4;
1192 /* if already active and close enough, don't bother */
1193 oexpires = ktime_to_ns(hrtimer_get_softexpires(&iocg->waitq_timer));
1194 if (hrtimer_is_queued(&iocg->waitq_timer) &&
1195 abs(oexpires - expires) <= margin_ns / 4)
1198 hrtimer_start_range_ns(&iocg->waitq_timer, ns_to_ktime(expires),
1199 margin_ns / 4, HRTIMER_MODE_ABS);
1202 static enum hrtimer_restart iocg_waitq_timer_fn(struct hrtimer *timer)
1204 struct ioc_gq *iocg = container_of(timer, struct ioc_gq, waitq_timer);
1206 unsigned long flags;
1208 ioc_now(iocg->ioc, &now);
1210 spin_lock_irqsave(&iocg->waitq.lock, flags);
1211 iocg_kick_waitq(iocg, &now);
1212 spin_unlock_irqrestore(&iocg->waitq.lock, flags);
1214 return HRTIMER_NORESTART;
1217 static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now, u64 cost)
1219 struct ioc *ioc = iocg->ioc;
1220 struct blkcg_gq *blkg = iocg_to_blkg(iocg);
1221 u64 vtime = atomic64_read(&iocg->vtime);
1222 u64 vmargin = ioc->margin_us * now->vrate;
1223 u64 margin_ns = ioc->margin_us * NSEC_PER_USEC;
1224 u64 expires, oexpires;
1227 lockdep_assert_held(&iocg->waitq.lock);
1229 /* debt-adjust vtime */
1230 current_hweight(iocg, NULL, &hw_inuse);
1231 vtime += abs_cost_to_cost(iocg->abs_vdebt, hw_inuse);
1234 * Clear or maintain depending on the overage. Non-zero vdebt is what
1235 * guarantees that @iocg is online and future iocg_kick_delay() will
1236 * clear use_delay. Don't leave it on when there's no vdebt.
1238 if (!iocg->abs_vdebt || time_before_eq64(vtime, now->vnow)) {
1239 blkcg_clear_delay(blkg);
1242 if (!atomic_read(&blkg->use_delay) &&
1243 time_before_eq64(vtime, now->vnow + vmargin))
1248 u64 cost_ns = DIV64_U64_ROUND_UP(cost * NSEC_PER_USEC,
1250 blkcg_add_delay(blkg, now->now_ns, cost_ns);
1252 blkcg_use_delay(blkg);
1254 expires = now->now_ns + DIV64_U64_ROUND_UP(vtime - now->vnow,
1255 now->vrate) * NSEC_PER_USEC;
1257 /* if already active and close enough, don't bother */
1258 oexpires = ktime_to_ns(hrtimer_get_softexpires(&iocg->delay_timer));
1259 if (hrtimer_is_queued(&iocg->delay_timer) &&
1260 abs(oexpires - expires) <= margin_ns / 4)
1263 hrtimer_start_range_ns(&iocg->delay_timer, ns_to_ktime(expires),
1264 margin_ns / 4, HRTIMER_MODE_ABS);
1268 static enum hrtimer_restart iocg_delay_timer_fn(struct hrtimer *timer)
1270 struct ioc_gq *iocg = container_of(timer, struct ioc_gq, delay_timer);
1272 unsigned long flags;
1274 spin_lock_irqsave(&iocg->waitq.lock, flags);
1275 ioc_now(iocg->ioc, &now);
1276 iocg_kick_delay(iocg, &now, 0);
1277 spin_unlock_irqrestore(&iocg->waitq.lock, flags);
1279 return HRTIMER_NORESTART;
1282 static void ioc_lat_stat(struct ioc *ioc, u32 *missed_ppm_ar, u32 *rq_wait_pct_p)
1284 u32 nr_met[2] = { };
1285 u32 nr_missed[2] = { };
1289 for_each_online_cpu(cpu) {
1290 struct ioc_pcpu_stat *stat = per_cpu_ptr(ioc->pcpu_stat, cpu);
1291 u64 this_rq_wait_ns;
1293 for (rw = READ; rw <= WRITE; rw++) {
1294 u32 this_met = READ_ONCE(stat->missed[rw].nr_met);
1295 u32 this_missed = READ_ONCE(stat->missed[rw].nr_missed);
1297 nr_met[rw] += this_met - stat->missed[rw].last_met;
1298 nr_missed[rw] += this_missed - stat->missed[rw].last_missed;
1299 stat->missed[rw].last_met = this_met;
1300 stat->missed[rw].last_missed = this_missed;
1303 this_rq_wait_ns = READ_ONCE(stat->rq_wait_ns);
1304 rq_wait_ns += this_rq_wait_ns - stat->last_rq_wait_ns;
1305 stat->last_rq_wait_ns = this_rq_wait_ns;
1308 for (rw = READ; rw <= WRITE; rw++) {
1309 if (nr_met[rw] + nr_missed[rw])
1311 DIV64_U64_ROUND_UP((u64)nr_missed[rw] * MILLION,
1312 nr_met[rw] + nr_missed[rw]);
1314 missed_ppm_ar[rw] = 0;
1317 *rq_wait_pct_p = div64_u64(rq_wait_ns * 100,
1318 ioc->period_us * NSEC_PER_USEC);
1321 /* was iocg idle this period? */
1322 static bool iocg_is_idle(struct ioc_gq *iocg)
1324 struct ioc *ioc = iocg->ioc;
1326 /* did something get issued this period? */
1327 if (atomic64_read(&iocg->active_period) ==
1328 atomic64_read(&ioc->cur_period))
1331 /* is something in flight? */
1332 if (atomic64_read(&iocg->done_vtime) != atomic64_read(&iocg->vtime))
1338 /* returns usage with margin added if surplus is large enough */
1339 static u32 surplus_adjusted_hweight_inuse(u32 usage, u32 hw_inuse)
1342 usage = DIV_ROUND_UP(usage * SURPLUS_SCALE_PCT, 100);
1343 usage += SURPLUS_SCALE_ABS;
1345 /* don't bother if the surplus is too small */
1346 if (usage + SURPLUS_MIN_ADJ_DELTA > hw_inuse)
1352 static void ioc_timer_fn(struct timer_list *timer)
1354 struct ioc *ioc = container_of(timer, struct ioc, timer);
1355 struct ioc_gq *iocg, *tiocg;
1357 int nr_surpluses = 0, nr_shortages = 0, nr_lagging = 0;
1358 u32 ppm_rthr = MILLION - ioc->params.qos[QOS_RPPM];
1359 u32 ppm_wthr = MILLION - ioc->params.qos[QOS_WPPM];
1360 u32 missed_ppm[2], rq_wait_pct;
1362 int prev_busy_level, i;
1364 /* how were the latencies during the period? */
1365 ioc_lat_stat(ioc, missed_ppm, &rq_wait_pct);
1367 /* take care of active iocgs */
1368 spin_lock_irq(&ioc->lock);
1372 period_vtime = now.vnow - ioc->period_at_vtime;
1373 if (WARN_ON_ONCE(!period_vtime)) {
1374 spin_unlock_irq(&ioc->lock);
1379 * Waiters determine the sleep durations based on the vrate they
1380 * saw at the time of sleep. If vrate has increased, some waiters
1381 * could be sleeping for too long. Wake up tardy waiters which
1382 * should have woken up in the last period and expire idle iocgs.
1384 list_for_each_entry_safe(iocg, tiocg, &ioc->active_iocgs, active_list) {
1385 if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt &&
1386 !iocg_is_idle(iocg))
1389 spin_lock(&iocg->waitq.lock);
1391 if (waitqueue_active(&iocg->waitq) || iocg->abs_vdebt) {
1392 /* might be oversleeping vtime / hweight changes, kick */
1393 iocg_kick_waitq(iocg, &now);
1394 iocg_kick_delay(iocg, &now, 0);
1395 } else if (iocg_is_idle(iocg)) {
1396 /* no waiter and idle, deactivate */
1397 iocg->last_inuse = iocg->inuse;
1398 __propagate_active_weight(iocg, 0, 0);
1399 list_del_init(&iocg->active_list);
1402 spin_unlock(&iocg->waitq.lock);
1404 commit_active_weights(ioc);
1406 /* calc usages and see whether some weights need to be moved around */
1407 list_for_each_entry(iocg, &ioc->active_iocgs, active_list) {
1408 u64 vdone, vtime, vusage, vmargin, vmin;
1409 u32 hw_active, hw_inuse, usage;
1412 * Collect unused and wind vtime closer to vnow to prevent
1413 * iocgs from accumulating a large amount of budget.
1415 vdone = atomic64_read(&iocg->done_vtime);
1416 vtime = atomic64_read(&iocg->vtime);
1417 current_hweight(iocg, &hw_active, &hw_inuse);
1420 * Latency QoS detection doesn't account for IOs which are
1421 * in-flight for longer than a period. Detect them by
1422 * comparing vdone against period start. If lagging behind
1423 * IOs from past periods, don't increase vrate.
1425 if ((ppm_rthr != MILLION || ppm_wthr != MILLION) &&
1426 !atomic_read(&iocg_to_blkg(iocg)->use_delay) &&
1427 time_after64(vtime, vdone) &&
1428 time_after64(vtime, now.vnow -
1429 MAX_LAGGING_PERIODS * period_vtime) &&
1430 time_before64(vdone, now.vnow - period_vtime))
1433 if (waitqueue_active(&iocg->waitq))
1434 vusage = now.vnow - iocg->last_vtime;
1435 else if (time_before64(iocg->last_vtime, vtime))
1436 vusage = vtime - iocg->last_vtime;
1440 iocg->last_vtime += vusage;
1442 * Factor in in-flight vtime into vusage to avoid
1443 * high-latency completions appearing as idle. This should
1444 * be done after the above ->last_time adjustment.
1446 vusage = max(vusage, vtime - vdone);
1448 /* calculate hweight based usage ratio and record */
1450 usage = DIV64_U64_ROUND_UP(vusage * hw_inuse,
1452 iocg->usage_idx = (iocg->usage_idx + 1) % NR_USAGE_SLOTS;
1453 iocg->usages[iocg->usage_idx] = usage;
1458 /* see whether there's surplus vtime */
1459 vmargin = ioc->margin_us * now.vrate;
1460 vmin = now.vnow - vmargin;
1462 iocg->has_surplus = false;
1464 if (!waitqueue_active(&iocg->waitq) &&
1465 time_before64(vtime, vmin)) {
1466 u64 delta = vmin - vtime;
1468 /* throw away surplus vtime */
1469 atomic64_add(delta, &iocg->vtime);
1470 atomic64_add(delta, &iocg->done_vtime);
1471 iocg->last_vtime += delta;
1472 /* if usage is sufficiently low, maybe it can donate */
1473 if (surplus_adjusted_hweight_inuse(usage, hw_inuse)) {
1474 iocg->has_surplus = true;
1477 } else if (hw_inuse < hw_active) {
1478 u32 new_hwi, new_inuse;
1480 /* was donating but might need to take back some */
1481 if (waitqueue_active(&iocg->waitq)) {
1482 new_hwi = hw_active;
1484 new_hwi = max(hw_inuse,
1485 usage * SURPLUS_SCALE_PCT / 100 +
1489 new_inuse = div64_u64((u64)iocg->inuse * new_hwi,
1491 new_inuse = clamp_t(u32, new_inuse, 1, iocg->active);
1493 if (new_inuse > iocg->inuse) {
1494 TRACE_IOCG_PATH(inuse_takeback, iocg, &now,
1495 iocg->inuse, new_inuse,
1497 __propagate_active_weight(iocg, iocg->weight,
1501 /* genuninely out of vtime */
1506 if (!nr_shortages || !nr_surpluses)
1507 goto skip_surplus_transfers;
1509 /* there are both shortages and surpluses, transfer surpluses */
1510 list_for_each_entry(iocg, &ioc->active_iocgs, active_list) {
1511 u32 usage, hw_active, hw_inuse, new_hwi, new_inuse;
1514 if (!iocg->has_surplus)
1517 /* base the decision on max historical usage */
1518 for (i = 0, usage = 0; i < NR_USAGE_SLOTS; i++) {
1519 if (iocg->usages[i]) {
1520 usage = max(usage, iocg->usages[i]);
1524 if (nr_valid < MIN_VALID_USAGES)
1527 current_hweight(iocg, &hw_active, &hw_inuse);
1528 new_hwi = surplus_adjusted_hweight_inuse(usage, hw_inuse);
1532 new_inuse = DIV64_U64_ROUND_UP((u64)iocg->inuse * new_hwi,
1534 if (new_inuse < iocg->inuse) {
1535 TRACE_IOCG_PATH(inuse_giveaway, iocg, &now,
1536 iocg->inuse, new_inuse,
1538 __propagate_active_weight(iocg, iocg->weight, new_inuse);
1541 skip_surplus_transfers:
1542 commit_active_weights(ioc);
1545 * If q is getting clogged or we're missing too much, we're issuing
1546 * too much IO and should lower vtime rate. If we're not missing
1547 * and experiencing shortages but not surpluses, we're too stingy
1548 * and should increase vtime rate.
1550 prev_busy_level = ioc->busy_level;
1551 if (rq_wait_pct > RQ_WAIT_BUSY_PCT ||
1552 missed_ppm[READ] > ppm_rthr ||
1553 missed_ppm[WRITE] > ppm_wthr) {
1554 /* clearly missing QoS targets, slow down vrate */
1555 ioc->busy_level = max(ioc->busy_level, 0);
1557 } else if (rq_wait_pct <= RQ_WAIT_BUSY_PCT * UNBUSY_THR_PCT / 100 &&
1558 missed_ppm[READ] <= ppm_rthr * UNBUSY_THR_PCT / 100 &&
1559 missed_ppm[WRITE] <= ppm_wthr * UNBUSY_THR_PCT / 100) {
1560 /* QoS targets are being met with >25% margin */
1563 * We're throttling while the device has spare
1564 * capacity. If vrate was being slowed down, stop.
1566 ioc->busy_level = min(ioc->busy_level, 0);
1569 * If there are IOs spanning multiple periods, wait
1570 * them out before pushing the device harder. If
1571 * there are surpluses, let redistribution work it
1574 if (!nr_lagging && !nr_surpluses)
1578 * Nobody is being throttled and the users aren't
1579 * issuing enough IOs to saturate the device. We
1580 * simply don't know how close the device is to
1581 * saturation. Coast.
1583 ioc->busy_level = 0;
1586 /* inside the hysterisis margin, we're good */
1587 ioc->busy_level = 0;
1590 ioc->busy_level = clamp(ioc->busy_level, -1000, 1000);
1592 if (ioc->busy_level > 0 || (ioc->busy_level < 0 && !nr_lagging)) {
1593 u64 vrate = atomic64_read(&ioc->vtime_rate);
1594 u64 vrate_min = ioc->vrate_min, vrate_max = ioc->vrate_max;
1596 /* rq_wait signal is always reliable, ignore user vrate_min */
1597 if (rq_wait_pct > RQ_WAIT_BUSY_PCT)
1598 vrate_min = VRATE_MIN;
1601 * If vrate is out of bounds, apply clamp gradually as the
1602 * bounds can change abruptly. Otherwise, apply busy_level
1605 if (vrate < vrate_min) {
1606 vrate = div64_u64(vrate * (100 + VRATE_CLAMP_ADJ_PCT),
1608 vrate = min(vrate, vrate_min);
1609 } else if (vrate > vrate_max) {
1610 vrate = div64_u64(vrate * (100 - VRATE_CLAMP_ADJ_PCT),
1612 vrate = max(vrate, vrate_max);
1614 int idx = min_t(int, abs(ioc->busy_level),
1615 ARRAY_SIZE(vrate_adj_pct) - 1);
1616 u32 adj_pct = vrate_adj_pct[idx];
1618 if (ioc->busy_level > 0)
1619 adj_pct = 100 - adj_pct;
1621 adj_pct = 100 + adj_pct;
1623 vrate = clamp(DIV64_U64_ROUND_UP(vrate * adj_pct, 100),
1624 vrate_min, vrate_max);
1627 trace_iocost_ioc_vrate_adj(ioc, vrate, missed_ppm, rq_wait_pct,
1628 nr_lagging, nr_shortages,
1631 atomic64_set(&ioc->vtime_rate, vrate);
1632 ioc->inuse_margin_vtime = DIV64_U64_ROUND_UP(
1633 ioc->period_us * vrate * INUSE_MARGIN_PCT, 100);
1634 } else if (ioc->busy_level != prev_busy_level || nr_lagging) {
1635 trace_iocost_ioc_vrate_adj(ioc, atomic64_read(&ioc->vtime_rate),
1636 missed_ppm, rq_wait_pct, nr_lagging,
1637 nr_shortages, nr_surpluses);
1640 ioc_refresh_params(ioc, false);
1643 * This period is done. Move onto the next one. If nothing's
1644 * going on with the device, stop the timer.
1646 atomic64_inc(&ioc->cur_period);
1648 if (ioc->running != IOC_STOP) {
1649 if (!list_empty(&ioc->active_iocgs)) {
1650 ioc_start_period(ioc, &now);
1652 ioc->busy_level = 0;
1653 ioc->running = IOC_IDLE;
1657 spin_unlock_irq(&ioc->lock);
1660 static void calc_vtime_cost_builtin(struct bio *bio, struct ioc_gq *iocg,
1661 bool is_merge, u64 *costp)
1663 struct ioc *ioc = iocg->ioc;
1664 u64 coef_seqio, coef_randio, coef_page;
1665 u64 pages = max_t(u64, bio_sectors(bio) >> IOC_SECT_TO_PAGE_SHIFT, 1);
1669 switch (bio_op(bio)) {
1671 coef_seqio = ioc->params.lcoefs[LCOEF_RSEQIO];
1672 coef_randio = ioc->params.lcoefs[LCOEF_RRANDIO];
1673 coef_page = ioc->params.lcoefs[LCOEF_RPAGE];
1676 coef_seqio = ioc->params.lcoefs[LCOEF_WSEQIO];
1677 coef_randio = ioc->params.lcoefs[LCOEF_WRANDIO];
1678 coef_page = ioc->params.lcoefs[LCOEF_WPAGE];
1685 seek_pages = abs(bio->bi_iter.bi_sector - iocg->cursor);
1686 seek_pages >>= IOC_SECT_TO_PAGE_SHIFT;
1690 if (seek_pages > LCOEF_RANDIO_PAGES) {
1691 cost += coef_randio;
1696 cost += pages * coef_page;
1701 static u64 calc_vtime_cost(struct bio *bio, struct ioc_gq *iocg, bool is_merge)
1705 calc_vtime_cost_builtin(bio, iocg, is_merge, &cost);
1709 static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio)
1711 struct blkcg_gq *blkg = bio->bi_blkg;
1712 struct ioc *ioc = rqos_to_ioc(rqos);
1713 struct ioc_gq *iocg = blkg_to_iocg(blkg);
1715 struct iocg_wait wait;
1716 u32 hw_active, hw_inuse;
1717 u64 abs_cost, cost, vtime;
1719 /* bypass IOs if disabled or for root cgroup */
1720 if (!ioc->enabled || !iocg->level)
1723 /* always activate so that even 0 cost IOs get protected to some level */
1724 if (!iocg_activate(iocg, &now))
1727 /* calculate the absolute vtime cost */
1728 abs_cost = calc_vtime_cost(bio, iocg, false);
1732 iocg->cursor = bio_end_sector(bio);
1734 vtime = atomic64_read(&iocg->vtime);
1735 current_hweight(iocg, &hw_active, &hw_inuse);
1737 if (hw_inuse < hw_active &&
1738 time_after_eq64(vtime + ioc->inuse_margin_vtime, now.vnow)) {
1739 TRACE_IOCG_PATH(inuse_reset, iocg, &now,
1740 iocg->inuse, iocg->weight, hw_inuse, hw_active);
1741 spin_lock_irq(&ioc->lock);
1742 propagate_active_weight(iocg, iocg->weight, iocg->weight);
1743 spin_unlock_irq(&ioc->lock);
1744 current_hweight(iocg, &hw_active, &hw_inuse);
1747 cost = abs_cost_to_cost(abs_cost, hw_inuse);
1750 * If no one's waiting and within budget, issue right away. The
1751 * tests are racy but the races aren't systemic - we only miss once
1752 * in a while which is fine.
1754 if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt &&
1755 time_before_eq64(vtime + cost, now.vnow)) {
1756 iocg_commit_bio(iocg, bio, cost);
1761 * We activated above but w/o any synchronization. Deactivation is
1762 * synchronized with waitq.lock and we won't get deactivated as long
1763 * as we're waiting or has debt, so we're good if we're activated
1764 * here. In the unlikely case that we aren't, just issue the IO.
1766 spin_lock_irq(&iocg->waitq.lock);
1768 if (unlikely(list_empty(&iocg->active_list))) {
1769 spin_unlock_irq(&iocg->waitq.lock);
1770 iocg_commit_bio(iocg, bio, cost);
1775 * We're over budget. If @bio has to be issued regardless, remember
1776 * the abs_cost instead of advancing vtime. iocg_kick_waitq() will pay
1777 * off the debt before waking more IOs.
1779 * This way, the debt is continuously paid off each period with the
1780 * actual budget available to the cgroup. If we just wound vtime, we
1781 * would incorrectly use the current hw_inuse for the entire amount
1782 * which, for example, can lead to the cgroup staying blocked for a
1783 * long time even with substantially raised hw_inuse.
1785 * An iocg with vdebt should stay online so that the timer can keep
1786 * deducting its vdebt and [de]activate use_delay mechanism
1787 * accordingly. We don't want to race against the timer trying to
1788 * clear them and leave @iocg inactive w/ dangling use_delay heavily
1789 * penalizing the cgroup and its descendants.
1791 if (bio_issue_as_root_blkg(bio) || fatal_signal_pending(current)) {
1792 iocg->abs_vdebt += abs_cost;
1793 if (iocg_kick_delay(iocg, &now, cost))
1794 blkcg_schedule_throttle(rqos->q,
1795 (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
1796 spin_unlock_irq(&iocg->waitq.lock);
1801 * Append self to the waitq and schedule the wakeup timer if we're
1802 * the first waiter. The timer duration is calculated based on the
1803 * current vrate. vtime and hweight changes can make it too short
1804 * or too long. Each wait entry records the absolute cost it's
1805 * waiting for to allow re-evaluation using a custom wait entry.
1807 * If too short, the timer simply reschedules itself. If too long,
1808 * the period timer will notice and trigger wakeups.
1810 * All waiters are on iocg->waitq and the wait states are
1811 * synchronized using waitq.lock.
1813 init_waitqueue_func_entry(&wait.wait, iocg_wake_fn);
1814 wait.wait.private = current;
1816 wait.abs_cost = abs_cost;
1817 wait.committed = false; /* will be set true by waker */
1819 __add_wait_queue_entry_tail(&iocg->waitq, &wait.wait);
1820 iocg_kick_waitq(iocg, &now);
1822 spin_unlock_irq(&iocg->waitq.lock);
1825 set_current_state(TASK_UNINTERRUPTIBLE);
1831 /* waker already committed us, proceed */
1832 finish_wait(&iocg->waitq, &wait.wait);
1835 static void ioc_rqos_merge(struct rq_qos *rqos, struct request *rq,
1838 struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg);
1839 struct ioc *ioc = iocg->ioc;
1840 sector_t bio_end = bio_end_sector(bio);
1844 unsigned long flags;
1846 /* bypass if disabled or for root cgroup */
1847 if (!ioc->enabled || !iocg->level)
1850 abs_cost = calc_vtime_cost(bio, iocg, true);
1855 current_hweight(iocg, NULL, &hw_inuse);
1856 cost = abs_cost_to_cost(abs_cost, hw_inuse);
1858 /* update cursor if backmerging into the request at the cursor */
1859 if (blk_rq_pos(rq) < bio_end &&
1860 blk_rq_pos(rq) + blk_rq_sectors(rq) == iocg->cursor)
1861 iocg->cursor = bio_end;
1864 * Charge if there's enough vtime budget and the existing request has
1867 if (rq->bio && rq->bio->bi_iocost_cost &&
1868 time_before_eq64(atomic64_read(&iocg->vtime) + cost, now.vnow)) {
1869 iocg_commit_bio(iocg, bio, cost);
1874 * Otherwise, account it as debt if @iocg is online, which it should
1875 * be for the vast majority of cases. See debt handling in
1876 * ioc_rqos_throttle() for details.
1878 spin_lock_irqsave(&iocg->waitq.lock, flags);
1879 if (likely(!list_empty(&iocg->active_list))) {
1880 iocg->abs_vdebt += abs_cost;
1881 iocg_kick_delay(iocg, &now, cost);
1883 iocg_commit_bio(iocg, bio, cost);
1885 spin_unlock_irqrestore(&iocg->waitq.lock, flags);
1888 static void ioc_rqos_done_bio(struct rq_qos *rqos, struct bio *bio)
1890 struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg);
1892 if (iocg && bio->bi_iocost_cost)
1893 atomic64_add(bio->bi_iocost_cost, &iocg->done_vtime);
1896 static void ioc_rqos_done(struct rq_qos *rqos, struct request *rq)
1898 struct ioc *ioc = rqos_to_ioc(rqos);
1899 u64 on_q_ns, rq_wait_ns;
1902 if (!ioc->enabled || !rq->alloc_time_ns || !rq->start_time_ns)
1905 switch (req_op(rq) & REQ_OP_MASK) {
1918 on_q_ns = ktime_get_ns() - rq->alloc_time_ns;
1919 rq_wait_ns = rq->start_time_ns - rq->alloc_time_ns;
1921 if (on_q_ns <= ioc->params.qos[pidx] * NSEC_PER_USEC)
1922 this_cpu_inc(ioc->pcpu_stat->missed[rw].nr_met);
1924 this_cpu_inc(ioc->pcpu_stat->missed[rw].nr_missed);
1926 this_cpu_add(ioc->pcpu_stat->rq_wait_ns, rq_wait_ns);
1929 static void ioc_rqos_queue_depth_changed(struct rq_qos *rqos)
1931 struct ioc *ioc = rqos_to_ioc(rqos);
1933 spin_lock_irq(&ioc->lock);
1934 ioc_refresh_params(ioc, false);
1935 spin_unlock_irq(&ioc->lock);
1938 static void ioc_rqos_exit(struct rq_qos *rqos)
1940 struct ioc *ioc = rqos_to_ioc(rqos);
1942 blkcg_deactivate_policy(rqos->q, &blkcg_policy_iocost);
1944 spin_lock_irq(&ioc->lock);
1945 ioc->running = IOC_STOP;
1946 spin_unlock_irq(&ioc->lock);
1948 del_timer_sync(&ioc->timer);
1949 free_percpu(ioc->pcpu_stat);
1953 static struct rq_qos_ops ioc_rqos_ops = {
1954 .throttle = ioc_rqos_throttle,
1955 .merge = ioc_rqos_merge,
1956 .done_bio = ioc_rqos_done_bio,
1957 .done = ioc_rqos_done,
1958 .queue_depth_changed = ioc_rqos_queue_depth_changed,
1959 .exit = ioc_rqos_exit,
1962 static int blk_iocost_init(struct request_queue *q)
1965 struct rq_qos *rqos;
1968 ioc = kzalloc(sizeof(*ioc), GFP_KERNEL);
1972 ioc->pcpu_stat = alloc_percpu(struct ioc_pcpu_stat);
1973 if (!ioc->pcpu_stat) {
1979 rqos->id = RQ_QOS_COST;
1980 rqos->ops = &ioc_rqos_ops;
1983 spin_lock_init(&ioc->lock);
1984 timer_setup(&ioc->timer, ioc_timer_fn, 0);
1985 INIT_LIST_HEAD(&ioc->active_iocgs);
1987 ioc->running = IOC_IDLE;
1988 atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC);
1989 seqcount_init(&ioc->period_seqcount);
1990 ioc->period_at = ktime_to_us(ktime_get());
1991 atomic64_set(&ioc->cur_period, 0);
1992 atomic_set(&ioc->hweight_gen, 0);
1994 spin_lock_irq(&ioc->lock);
1995 ioc->autop_idx = AUTOP_INVALID;
1996 ioc_refresh_params(ioc, true);
1997 spin_unlock_irq(&ioc->lock);
1999 rq_qos_add(q, rqos);
2000 ret = blkcg_activate_policy(q, &blkcg_policy_iocost);
2002 rq_qos_del(q, rqos);
2003 free_percpu(ioc->pcpu_stat);
2010 static struct blkcg_policy_data *ioc_cpd_alloc(gfp_t gfp)
2012 struct ioc_cgrp *iocc;
2014 iocc = kzalloc(sizeof(struct ioc_cgrp), gfp);
2018 iocc->dfl_weight = CGROUP_WEIGHT_DFL;
2022 static void ioc_cpd_free(struct blkcg_policy_data *cpd)
2024 kfree(container_of(cpd, struct ioc_cgrp, cpd));
2027 static struct blkg_policy_data *ioc_pd_alloc(gfp_t gfp, struct request_queue *q,
2028 struct blkcg *blkcg)
2030 int levels = blkcg->css.cgroup->level + 1;
2031 struct ioc_gq *iocg;
2033 iocg = kzalloc_node(sizeof(*iocg) + levels * sizeof(iocg->ancestors[0]),
2041 static void ioc_pd_init(struct blkg_policy_data *pd)
2043 struct ioc_gq *iocg = pd_to_iocg(pd);
2044 struct blkcg_gq *blkg = pd_to_blkg(&iocg->pd);
2045 struct ioc *ioc = q_to_ioc(blkg->q);
2047 struct blkcg_gq *tblkg;
2048 unsigned long flags;
2053 atomic64_set(&iocg->vtime, now.vnow);
2054 atomic64_set(&iocg->done_vtime, now.vnow);
2055 atomic64_set(&iocg->active_period, atomic64_read(&ioc->cur_period));
2056 INIT_LIST_HEAD(&iocg->active_list);
2057 iocg->hweight_active = HWEIGHT_WHOLE;
2058 iocg->hweight_inuse = HWEIGHT_WHOLE;
2060 init_waitqueue_head(&iocg->waitq);
2061 hrtimer_init(&iocg->waitq_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
2062 iocg->waitq_timer.function = iocg_waitq_timer_fn;
2063 hrtimer_init(&iocg->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
2064 iocg->delay_timer.function = iocg_delay_timer_fn;
2066 iocg->level = blkg->blkcg->css.cgroup->level;
2068 for (tblkg = blkg; tblkg; tblkg = tblkg->parent) {
2069 struct ioc_gq *tiocg = blkg_to_iocg(tblkg);
2070 iocg->ancestors[tiocg->level] = tiocg;
2073 spin_lock_irqsave(&ioc->lock, flags);
2074 weight_updated(iocg);
2075 spin_unlock_irqrestore(&ioc->lock, flags);
2078 static void ioc_pd_free(struct blkg_policy_data *pd)
2080 struct ioc_gq *iocg = pd_to_iocg(pd);
2081 struct ioc *ioc = iocg->ioc;
2082 unsigned long flags;
2085 spin_lock_irqsave(&ioc->lock, flags);
2086 if (!list_empty(&iocg->active_list)) {
2087 propagate_active_weight(iocg, 0, 0);
2088 list_del_init(&iocg->active_list);
2090 spin_unlock_irqrestore(&ioc->lock, flags);
2092 hrtimer_cancel(&iocg->waitq_timer);
2093 hrtimer_cancel(&iocg->delay_timer);
2098 static u64 ioc_weight_prfill(struct seq_file *sf, struct blkg_policy_data *pd,
2101 const char *dname = blkg_dev_name(pd->blkg);
2102 struct ioc_gq *iocg = pd_to_iocg(pd);
2104 if (dname && iocg->cfg_weight)
2105 seq_printf(sf, "%s %u\n", dname, iocg->cfg_weight);
2110 static int ioc_weight_show(struct seq_file *sf, void *v)
2112 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
2113 struct ioc_cgrp *iocc = blkcg_to_iocc(blkcg);
2115 seq_printf(sf, "default %u\n", iocc->dfl_weight);
2116 blkcg_print_blkgs(sf, blkcg, ioc_weight_prfill,
2117 &blkcg_policy_iocost, seq_cft(sf)->private, false);
2121 static ssize_t ioc_weight_write(struct kernfs_open_file *of, char *buf,
2122 size_t nbytes, loff_t off)
2124 struct blkcg *blkcg = css_to_blkcg(of_css(of));
2125 struct ioc_cgrp *iocc = blkcg_to_iocc(blkcg);
2126 struct blkg_conf_ctx ctx;
2127 struct ioc_gq *iocg;
2131 if (!strchr(buf, ':')) {
2132 struct blkcg_gq *blkg;
2134 if (!sscanf(buf, "default %u", &v) && !sscanf(buf, "%u", &v))
2137 if (v < CGROUP_WEIGHT_MIN || v > CGROUP_WEIGHT_MAX)
2140 spin_lock(&blkcg->lock);
2141 iocc->dfl_weight = v;
2142 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
2143 struct ioc_gq *iocg = blkg_to_iocg(blkg);
2146 spin_lock_irq(&iocg->ioc->lock);
2147 weight_updated(iocg);
2148 spin_unlock_irq(&iocg->ioc->lock);
2151 spin_unlock(&blkcg->lock);
2156 ret = blkg_conf_prep(blkcg, &blkcg_policy_iocost, buf, &ctx);
2160 iocg = blkg_to_iocg(ctx.blkg);
2162 if (!strncmp(ctx.body, "default", 7)) {
2165 if (!sscanf(ctx.body, "%u", &v))
2167 if (v < CGROUP_WEIGHT_MIN || v > CGROUP_WEIGHT_MAX)
2171 spin_lock(&iocg->ioc->lock);
2172 iocg->cfg_weight = v;
2173 weight_updated(iocg);
2174 spin_unlock(&iocg->ioc->lock);
2176 blkg_conf_finish(&ctx);
2180 blkg_conf_finish(&ctx);
2184 static u64 ioc_qos_prfill(struct seq_file *sf, struct blkg_policy_data *pd,
2187 const char *dname = blkg_dev_name(pd->blkg);
2188 struct ioc *ioc = pd_to_iocg(pd)->ioc;
2193 seq_printf(sf, "%s enable=%d ctrl=%s rpct=%u.%02u rlat=%u wpct=%u.%02u wlat=%u min=%u.%02u max=%u.%02u\n",
2194 dname, ioc->enabled, ioc->user_qos_params ? "user" : "auto",
2195 ioc->params.qos[QOS_RPPM] / 10000,
2196 ioc->params.qos[QOS_RPPM] % 10000 / 100,
2197 ioc->params.qos[QOS_RLAT],
2198 ioc->params.qos[QOS_WPPM] / 10000,
2199 ioc->params.qos[QOS_WPPM] % 10000 / 100,
2200 ioc->params.qos[QOS_WLAT],
2201 ioc->params.qos[QOS_MIN] / 10000,
2202 ioc->params.qos[QOS_MIN] % 10000 / 100,
2203 ioc->params.qos[QOS_MAX] / 10000,
2204 ioc->params.qos[QOS_MAX] % 10000 / 100);
2208 static int ioc_qos_show(struct seq_file *sf, void *v)
2210 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
2212 blkcg_print_blkgs(sf, blkcg, ioc_qos_prfill,
2213 &blkcg_policy_iocost, seq_cft(sf)->private, false);
2217 static const match_table_t qos_ctrl_tokens = {
2218 { QOS_ENABLE, "enable=%u" },
2219 { QOS_CTRL, "ctrl=%s" },
2220 { NR_QOS_CTRL_PARAMS, NULL },
2223 static const match_table_t qos_tokens = {
2224 { QOS_RPPM, "rpct=%s" },
2225 { QOS_RLAT, "rlat=%u" },
2226 { QOS_WPPM, "wpct=%s" },
2227 { QOS_WLAT, "wlat=%u" },
2228 { QOS_MIN, "min=%s" },
2229 { QOS_MAX, "max=%s" },
2230 { NR_QOS_PARAMS, NULL },
2233 static ssize_t ioc_qos_write(struct kernfs_open_file *of, char *input,
2234 size_t nbytes, loff_t off)
2236 struct gendisk *disk;
2238 u32 qos[NR_QOS_PARAMS];
2243 disk = blkcg_conf_get_disk(&input);
2245 return PTR_ERR(disk);
2247 ioc = q_to_ioc(disk->queue);
2249 ret = blk_iocost_init(disk->queue);
2252 ioc = q_to_ioc(disk->queue);
2255 spin_lock_irq(&ioc->lock);
2256 memcpy(qos, ioc->params.qos, sizeof(qos));
2257 enable = ioc->enabled;
2258 user = ioc->user_qos_params;
2259 spin_unlock_irq(&ioc->lock);
2261 while ((p = strsep(&input, " \t\n"))) {
2262 substring_t args[MAX_OPT_ARGS];
2270 switch (match_token(p, qos_ctrl_tokens, args)) {
2272 match_u64(&args[0], &v);
2276 match_strlcpy(buf, &args[0], sizeof(buf));
2277 if (!strcmp(buf, "auto"))
2279 else if (!strcmp(buf, "user"))
2286 tok = match_token(p, qos_tokens, args);
2290 if (match_strlcpy(buf, &args[0], sizeof(buf)) >=
2293 if (cgroup_parse_float(buf, 2, &v))
2295 if (v < 0 || v > 10000)
2301 if (match_u64(&args[0], &v))
2307 if (match_strlcpy(buf, &args[0], sizeof(buf)) >=
2310 if (cgroup_parse_float(buf, 2, &v))
2314 qos[tok] = clamp_t(s64, v * 100,
2315 VRATE_MIN_PPM, VRATE_MAX_PPM);
2323 if (qos[QOS_MIN] > qos[QOS_MAX])
2326 spin_lock_irq(&ioc->lock);
2329 blk_queue_flag_set(QUEUE_FLAG_RQ_ALLOC_TIME, ioc->rqos.q);
2330 ioc->enabled = true;
2332 blk_queue_flag_clear(QUEUE_FLAG_RQ_ALLOC_TIME, ioc->rqos.q);
2333 ioc->enabled = false;
2337 memcpy(ioc->params.qos, qos, sizeof(qos));
2338 ioc->user_qos_params = true;
2340 ioc->user_qos_params = false;
2343 ioc_refresh_params(ioc, true);
2344 spin_unlock_irq(&ioc->lock);
2346 put_disk_and_module(disk);
2351 put_disk_and_module(disk);
2355 static u64 ioc_cost_model_prfill(struct seq_file *sf,
2356 struct blkg_policy_data *pd, int off)
2358 const char *dname = blkg_dev_name(pd->blkg);
2359 struct ioc *ioc = pd_to_iocg(pd)->ioc;
2360 u64 *u = ioc->params.i_lcoefs;
2365 seq_printf(sf, "%s ctrl=%s model=linear "
2366 "rbps=%llu rseqiops=%llu rrandiops=%llu "
2367 "wbps=%llu wseqiops=%llu wrandiops=%llu\n",
2368 dname, ioc->user_cost_model ? "user" : "auto",
2369 u[I_LCOEF_RBPS], u[I_LCOEF_RSEQIOPS], u[I_LCOEF_RRANDIOPS],
2370 u[I_LCOEF_WBPS], u[I_LCOEF_WSEQIOPS], u[I_LCOEF_WRANDIOPS]);
2374 static int ioc_cost_model_show(struct seq_file *sf, void *v)
2376 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
2378 blkcg_print_blkgs(sf, blkcg, ioc_cost_model_prfill,
2379 &blkcg_policy_iocost, seq_cft(sf)->private, false);
2383 static const match_table_t cost_ctrl_tokens = {
2384 { COST_CTRL, "ctrl=%s" },
2385 { COST_MODEL, "model=%s" },
2386 { NR_COST_CTRL_PARAMS, NULL },
2389 static const match_table_t i_lcoef_tokens = {
2390 { I_LCOEF_RBPS, "rbps=%u" },
2391 { I_LCOEF_RSEQIOPS, "rseqiops=%u" },
2392 { I_LCOEF_RRANDIOPS, "rrandiops=%u" },
2393 { I_LCOEF_WBPS, "wbps=%u" },
2394 { I_LCOEF_WSEQIOPS, "wseqiops=%u" },
2395 { I_LCOEF_WRANDIOPS, "wrandiops=%u" },
2396 { NR_I_LCOEFS, NULL },
2399 static ssize_t ioc_cost_model_write(struct kernfs_open_file *of, char *input,
2400 size_t nbytes, loff_t off)
2402 struct gendisk *disk;
2409 disk = blkcg_conf_get_disk(&input);
2411 return PTR_ERR(disk);
2413 ioc = q_to_ioc(disk->queue);
2415 ret = blk_iocost_init(disk->queue);
2418 ioc = q_to_ioc(disk->queue);
2421 spin_lock_irq(&ioc->lock);
2422 memcpy(u, ioc->params.i_lcoefs, sizeof(u));
2423 user = ioc->user_cost_model;
2424 spin_unlock_irq(&ioc->lock);
2426 while ((p = strsep(&input, " \t\n"))) {
2427 substring_t args[MAX_OPT_ARGS];
2435 switch (match_token(p, cost_ctrl_tokens, args)) {
2437 match_strlcpy(buf, &args[0], sizeof(buf));
2438 if (!strcmp(buf, "auto"))
2440 else if (!strcmp(buf, "user"))
2446 match_strlcpy(buf, &args[0], sizeof(buf));
2447 if (strcmp(buf, "linear"))
2452 tok = match_token(p, i_lcoef_tokens, args);
2453 if (tok == NR_I_LCOEFS)
2455 if (match_u64(&args[0], &v))
2461 spin_lock_irq(&ioc->lock);
2463 memcpy(ioc->params.i_lcoefs, u, sizeof(u));
2464 ioc->user_cost_model = true;
2466 ioc->user_cost_model = false;
2468 ioc_refresh_params(ioc, true);
2469 spin_unlock_irq(&ioc->lock);
2471 put_disk_and_module(disk);
2477 put_disk_and_module(disk);
2481 static struct cftype ioc_files[] = {
2484 .flags = CFTYPE_NOT_ON_ROOT,
2485 .seq_show = ioc_weight_show,
2486 .write = ioc_weight_write,
2490 .flags = CFTYPE_ONLY_ON_ROOT,
2491 .seq_show = ioc_qos_show,
2492 .write = ioc_qos_write,
2495 .name = "cost.model",
2496 .flags = CFTYPE_ONLY_ON_ROOT,
2497 .seq_show = ioc_cost_model_show,
2498 .write = ioc_cost_model_write,
2503 static struct blkcg_policy blkcg_policy_iocost = {
2504 .dfl_cftypes = ioc_files,
2505 .cpd_alloc_fn = ioc_cpd_alloc,
2506 .cpd_free_fn = ioc_cpd_free,
2507 .pd_alloc_fn = ioc_pd_alloc,
2508 .pd_init_fn = ioc_pd_init,
2509 .pd_free_fn = ioc_pd_free,
2512 static int __init ioc_init(void)
2514 return blkcg_policy_register(&blkcg_policy_iocost);
2517 static void __exit ioc_exit(void)
2519 return blkcg_policy_unregister(&blkcg_policy_iocost);
2522 module_init(ioc_init);
2523 module_exit(ioc_exit);