GNU Linux-libre 6.1.91-gnu
[releases.git] / block / blk-iocost.c
1 /* SPDX-License-Identifier: GPL-2.0
2  *
3  * IO cost model based controller.
4  *
5  * Copyright (C) 2019 Tejun Heo <tj@kernel.org>
6  * Copyright (C) 2019 Andy Newell <newella@fb.com>
7  * Copyright (C) 2019 Facebook
8  *
9  * One challenge of controlling IO resources is the lack of trivially
10  * observable cost metric.  This is distinguished from CPU and memory where
11  * wallclock time and the number of bytes can serve as accurate enough
12  * approximations.
13  *
14  * Bandwidth and iops are the most commonly used metrics for IO devices but
15  * depending on the type and specifics of the device, different IO patterns
16  * easily lead to multiple orders of magnitude variations rendering them
17  * useless for the purpose of IO capacity distribution.  While on-device
18  * time, with a lot of clutches, could serve as a useful approximation for
19  * non-queued rotational devices, this is no longer viable with modern
20  * devices, even the rotational ones.
21  *
22  * While there is no cost metric we can trivially observe, it isn't a
23  * complete mystery.  For example, on a rotational device, seek cost
24  * dominates while a contiguous transfer contributes a smaller amount
25  * proportional to the size.  If we can characterize at least the relative
26  * costs of these different types of IOs, it should be possible to
27  * implement a reasonable work-conserving proportional IO resource
28  * distribution.
29  *
30  * 1. IO Cost Model
31  *
32  * IO cost model estimates the cost of an IO given its basic parameters and
33  * history (e.g. the end sector of the last IO).  The cost is measured in
34  * device time.  If a given IO is estimated to cost 10ms, the device should
35  * be able to process ~100 of those IOs in a second.
36  *
37  * Currently, there's only one builtin cost model - linear.  Each IO is
38  * classified as sequential or random and given a base cost accordingly.
39  * On top of that, a size cost proportional to the length of the IO is
40  * added.  While simple, this model captures the operational
41  * characteristics of a wide varienty of devices well enough.  Default
42  * parameters for several different classes of devices are provided and the
43  * parameters can be configured from userspace via
44  * /sys/fs/cgroup/io.cost.model.
45  *
46  * If needed, tools/cgroup/iocost_coef_gen.py can be used to generate
47  * device-specific coefficients.
48  *
49  * 2. Control Strategy
50  *
51  * The device virtual time (vtime) is used as the primary control metric.
52  * The control strategy is composed of the following three parts.
53  *
54  * 2-1. Vtime Distribution
55  *
56  * When a cgroup becomes active in terms of IOs, its hierarchical share is
57  * calculated.  Please consider the following hierarchy where the numbers
58  * inside parentheses denote the configured weights.
59  *
60  *           root
61  *         /       \
62  *      A (w:100)  B (w:300)
63  *      /       \
64  *  A0 (w:100)  A1 (w:100)
65  *
66  * If B is idle and only A0 and A1 are actively issuing IOs, as the two are
67  * of equal weight, each gets 50% share.  If then B starts issuing IOs, B
68  * gets 300/(100+300) or 75% share, and A0 and A1 equally splits the rest,
69  * 12.5% each.  The distribution mechanism only cares about these flattened
70  * shares.  They're called hweights (hierarchical weights) and always add
71  * upto 1 (WEIGHT_ONE).
72  *
73  * A given cgroup's vtime runs slower in inverse proportion to its hweight.
74  * For example, with 12.5% weight, A0's time runs 8 times slower (100/12.5)
75  * against the device vtime - an IO which takes 10ms on the underlying
76  * device is considered to take 80ms on A0.
77  *
78  * This constitutes the basis of IO capacity distribution.  Each cgroup's
79  * vtime is running at a rate determined by its hweight.  A cgroup tracks
80  * the vtime consumed by past IOs and can issue a new IO if doing so
81  * wouldn't outrun the current device vtime.  Otherwise, the IO is
82  * suspended until the vtime has progressed enough to cover it.
83  *
84  * 2-2. Vrate Adjustment
85  *
86  * It's unrealistic to expect the cost model to be perfect.  There are too
87  * many devices and even on the same device the overall performance
88  * fluctuates depending on numerous factors such as IO mixture and device
89  * internal garbage collection.  The controller needs to adapt dynamically.
90  *
91  * This is achieved by adjusting the overall IO rate according to how busy
92  * the device is.  If the device becomes overloaded, we're sending down too
93  * many IOs and should generally slow down.  If there are waiting issuers
94  * but the device isn't saturated, we're issuing too few and should
95  * generally speed up.
96  *
97  * To slow down, we lower the vrate - the rate at which the device vtime
98  * passes compared to the wall clock.  For example, if the vtime is running
99  * at the vrate of 75%, all cgroups added up would only be able to issue
100  * 750ms worth of IOs per second, and vice-versa for speeding up.
101  *
102  * Device business is determined using two criteria - rq wait and
103  * completion latencies.
104  *
105  * When a device gets saturated, the on-device and then the request queues
106  * fill up and a bio which is ready to be issued has to wait for a request
107  * to become available.  When this delay becomes noticeable, it's a clear
108  * indication that the device is saturated and we lower the vrate.  This
109  * saturation signal is fairly conservative as it only triggers when both
110  * hardware and software queues are filled up, and is used as the default
111  * busy signal.
112  *
113  * As devices can have deep queues and be unfair in how the queued commands
114  * are executed, soley depending on rq wait may not result in satisfactory
115  * control quality.  For a better control quality, completion latency QoS
116  * parameters can be configured so that the device is considered saturated
117  * if N'th percentile completion latency rises above the set point.
118  *
119  * The completion latency requirements are a function of both the
120  * underlying device characteristics and the desired IO latency quality of
121  * service.  There is an inherent trade-off - the tighter the latency QoS,
122  * the higher the bandwidth lossage.  Latency QoS is disabled by default
123  * and can be set through /sys/fs/cgroup/io.cost.qos.
124  *
125  * 2-3. Work Conservation
126  *
127  * Imagine two cgroups A and B with equal weights.  A is issuing a small IO
128  * periodically while B is sending out enough parallel IOs to saturate the
129  * device on its own.  Let's say A's usage amounts to 100ms worth of IO
130  * cost per second, i.e., 10% of the device capacity.  The naive
131  * distribution of half and half would lead to 60% utilization of the
132  * device, a significant reduction in the total amount of work done
133  * compared to free-for-all competition.  This is too high a cost to pay
134  * for IO control.
135  *
136  * To conserve the total amount of work done, we keep track of how much
137  * each active cgroup is actually using and yield part of its weight if
138  * there are other cgroups which can make use of it.  In the above case,
139  * A's weight will be lowered so that it hovers above the actual usage and
140  * B would be able to use the rest.
141  *
142  * As we don't want to penalize a cgroup for donating its weight, the
143  * surplus weight adjustment factors in a margin and has an immediate
144  * snapback mechanism in case the cgroup needs more IO vtime for itself.
145  *
146  * Note that adjusting down surplus weights has the same effects as
147  * accelerating vtime for other cgroups and work conservation can also be
148  * implemented by adjusting vrate dynamically.  However, squaring who can
149  * donate and should take back how much requires hweight propagations
150  * anyway making it easier to implement and understand as a separate
151  * mechanism.
152  *
153  * 3. Monitoring
154  *
155  * Instead of debugfs or other clumsy monitoring mechanisms, this
156  * controller uses a drgn based monitoring script -
157  * tools/cgroup/iocost_monitor.py.  For details on drgn, please see
158  * https://github.com/osandov/drgn.  The output looks like the following.
159  *
160  *  sdb RUN   per=300ms cur_per=234.218:v203.695 busy= +1 vrate= 62.12%
161  *                 active      weight      hweight% inflt% dbt  delay usages%
162  *  test/a              *    50/   50  33.33/ 33.33  27.65   2  0*041 033:033:033
163  *  test/b              *   100/  100  66.67/ 66.67  17.56   0  0*000 066:079:077
164  *
165  * - per        : Timer period
166  * - cur_per    : Internal wall and device vtime clock
167  * - vrate      : Device virtual time rate against wall clock
168  * - weight     : Surplus-adjusted and configured weights
169  * - hweight    : Surplus-adjusted and configured hierarchical weights
170  * - inflt      : The percentage of in-flight IO cost at the end of last period
171  * - del_ms     : Deferred issuer delay induction level and duration
172  * - usages     : Usage history
173  */
174
175 #include <linux/kernel.h>
176 #include <linux/module.h>
177 #include <linux/timer.h>
178 #include <linux/time64.h>
179 #include <linux/parser.h>
180 #include <linux/sched/signal.h>
181 #include <asm/local.h>
182 #include <asm/local64.h>
183 #include "blk-rq-qos.h"
184 #include "blk-stat.h"
185 #include "blk-wbt.h"
186 #include "blk-cgroup.h"
187
188 #ifdef CONFIG_TRACEPOINTS
189
190 /* copied from TRACE_CGROUP_PATH, see cgroup-internal.h */
191 #define TRACE_IOCG_PATH_LEN 1024
192 static DEFINE_SPINLOCK(trace_iocg_path_lock);
193 static char trace_iocg_path[TRACE_IOCG_PATH_LEN];
194
195 #define TRACE_IOCG_PATH(type, iocg, ...)                                        \
196         do {                                                                    \
197                 unsigned long flags;                                            \
198                 if (trace_iocost_##type##_enabled()) {                          \
199                         spin_lock_irqsave(&trace_iocg_path_lock, flags);        \
200                         cgroup_path(iocg_to_blkg(iocg)->blkcg->css.cgroup,      \
201                                     trace_iocg_path, TRACE_IOCG_PATH_LEN);      \
202                         trace_iocost_##type(iocg, trace_iocg_path,              \
203                                               ##__VA_ARGS__);                   \
204                         spin_unlock_irqrestore(&trace_iocg_path_lock, flags);   \
205                 }                                                               \
206         } while (0)
207
208 #else   /* CONFIG_TRACE_POINTS */
209 #define TRACE_IOCG_PATH(type, iocg, ...)        do { } while (0)
210 #endif  /* CONFIG_TRACE_POINTS */
211
212 enum {
213         MILLION                 = 1000000,
214
215         /* timer period is calculated from latency requirements, bound it */
216         MIN_PERIOD              = USEC_PER_MSEC,
217         MAX_PERIOD              = USEC_PER_SEC,
218
219         /*
220          * iocg->vtime is targeted at 50% behind the device vtime, which
221          * serves as its IO credit buffer.  Surplus weight adjustment is
222          * immediately canceled if the vtime margin runs below 10%.
223          */
224         MARGIN_MIN_PCT          = 10,
225         MARGIN_LOW_PCT          = 20,
226         MARGIN_TARGET_PCT       = 50,
227
228         INUSE_ADJ_STEP_PCT      = 25,
229
230         /* Have some play in timer operations */
231         TIMER_SLACK_PCT         = 1,
232
233         /* 1/64k is granular enough and can easily be handled w/ u32 */
234         WEIGHT_ONE              = 1 << 16,
235 };
236
237 enum {
238         /*
239          * As vtime is used to calculate the cost of each IO, it needs to
240          * be fairly high precision.  For example, it should be able to
241          * represent the cost of a single page worth of discard with
242          * suffificient accuracy.  At the same time, it should be able to
243          * represent reasonably long enough durations to be useful and
244          * convenient during operation.
245          *
246          * 1s worth of vtime is 2^37.  This gives us both sub-nanosecond
247          * granularity and days of wrap-around time even at extreme vrates.
248          */
249         VTIME_PER_SEC_SHIFT     = 37,
250         VTIME_PER_SEC           = 1LLU << VTIME_PER_SEC_SHIFT,
251         VTIME_PER_USEC          = VTIME_PER_SEC / USEC_PER_SEC,
252         VTIME_PER_NSEC          = VTIME_PER_SEC / NSEC_PER_SEC,
253
254         /* bound vrate adjustments within two orders of magnitude */
255         VRATE_MIN_PPM           = 10000,        /* 1% */
256         VRATE_MAX_PPM           = 100000000,    /* 10000% */
257
258         VRATE_MIN               = VTIME_PER_USEC * VRATE_MIN_PPM / MILLION,
259         VRATE_CLAMP_ADJ_PCT     = 4,
260
261         /* switch iff the conditions are met for longer than this */
262         AUTOP_CYCLE_NSEC        = 10LLU * NSEC_PER_SEC,
263 };
264
265 enum {
266         /* if IOs end up waiting for requests, issue less */
267         RQ_WAIT_BUSY_PCT        = 5,
268
269         /* unbusy hysterisis */
270         UNBUSY_THR_PCT          = 75,
271
272         /*
273          * The effect of delay is indirect and non-linear and a huge amount of
274          * future debt can accumulate abruptly while unthrottled. Linearly scale
275          * up delay as debt is going up and then let it decay exponentially.
276          * This gives us quick ramp ups while delay is accumulating and long
277          * tails which can help reducing the frequency of debt explosions on
278          * unthrottle. The parameters are experimentally determined.
279          *
280          * The delay mechanism provides adequate protection and behavior in many
281          * cases. However, this is far from ideal and falls shorts on both
282          * fronts. The debtors are often throttled too harshly costing a
283          * significant level of fairness and possibly total work while the
284          * protection against their impacts on the system can be choppy and
285          * unreliable.
286          *
287          * The shortcoming primarily stems from the fact that, unlike for page
288          * cache, the kernel doesn't have well-defined back-pressure propagation
289          * mechanism and policies for anonymous memory. Fully addressing this
290          * issue will likely require substantial improvements in the area.
291          */
292         MIN_DELAY_THR_PCT       = 500,
293         MAX_DELAY_THR_PCT       = 25000,
294         MIN_DELAY               = 250,
295         MAX_DELAY               = 250 * USEC_PER_MSEC,
296
297         /* halve debts if avg usage over 100ms is under 50% */
298         DFGV_USAGE_PCT          = 50,
299         DFGV_PERIOD             = 100 * USEC_PER_MSEC,
300
301         /* don't let cmds which take a very long time pin lagging for too long */
302         MAX_LAGGING_PERIODS     = 10,
303
304         /*
305          * Count IO size in 4k pages.  The 12bit shift helps keeping
306          * size-proportional components of cost calculation in closer
307          * numbers of digits to per-IO cost components.
308          */
309         IOC_PAGE_SHIFT          = 12,
310         IOC_PAGE_SIZE           = 1 << IOC_PAGE_SHIFT,
311         IOC_SECT_TO_PAGE_SHIFT  = IOC_PAGE_SHIFT - SECTOR_SHIFT,
312
313         /* if apart further than 16M, consider randio for linear model */
314         LCOEF_RANDIO_PAGES      = 4096,
315 };
316
317 enum ioc_running {
318         IOC_IDLE,
319         IOC_RUNNING,
320         IOC_STOP,
321 };
322
323 /* io.cost.qos controls including per-dev enable of the whole controller */
324 enum {
325         QOS_ENABLE,
326         QOS_CTRL,
327         NR_QOS_CTRL_PARAMS,
328 };
329
330 /* io.cost.qos params */
331 enum {
332         QOS_RPPM,
333         QOS_RLAT,
334         QOS_WPPM,
335         QOS_WLAT,
336         QOS_MIN,
337         QOS_MAX,
338         NR_QOS_PARAMS,
339 };
340
341 /* io.cost.model controls */
342 enum {
343         COST_CTRL,
344         COST_MODEL,
345         NR_COST_CTRL_PARAMS,
346 };
347
348 /* builtin linear cost model coefficients */
349 enum {
350         I_LCOEF_RBPS,
351         I_LCOEF_RSEQIOPS,
352         I_LCOEF_RRANDIOPS,
353         I_LCOEF_WBPS,
354         I_LCOEF_WSEQIOPS,
355         I_LCOEF_WRANDIOPS,
356         NR_I_LCOEFS,
357 };
358
359 enum {
360         LCOEF_RPAGE,
361         LCOEF_RSEQIO,
362         LCOEF_RRANDIO,
363         LCOEF_WPAGE,
364         LCOEF_WSEQIO,
365         LCOEF_WRANDIO,
366         NR_LCOEFS,
367 };
368
369 enum {
370         AUTOP_INVALID,
371         AUTOP_HDD,
372         AUTOP_SSD_QD1,
373         AUTOP_SSD_DFL,
374         AUTOP_SSD_FAST,
375 };
376
377 struct ioc_params {
378         u32                             qos[NR_QOS_PARAMS];
379         u64                             i_lcoefs[NR_I_LCOEFS];
380         u64                             lcoefs[NR_LCOEFS];
381         u32                             too_fast_vrate_pct;
382         u32                             too_slow_vrate_pct;
383 };
384
385 struct ioc_margins {
386         s64                             min;
387         s64                             low;
388         s64                             target;
389 };
390
391 struct ioc_missed {
392         local_t                         nr_met;
393         local_t                         nr_missed;
394         u32                             last_met;
395         u32                             last_missed;
396 };
397
398 struct ioc_pcpu_stat {
399         struct ioc_missed               missed[2];
400
401         local64_t                       rq_wait_ns;
402         u64                             last_rq_wait_ns;
403 };
404
405 /* per device */
406 struct ioc {
407         struct rq_qos                   rqos;
408
409         bool                            enabled;
410
411         struct ioc_params               params;
412         struct ioc_margins              margins;
413         u32                             period_us;
414         u32                             timer_slack_ns;
415         u64                             vrate_min;
416         u64                             vrate_max;
417
418         spinlock_t                      lock;
419         struct timer_list               timer;
420         struct list_head                active_iocgs;   /* active cgroups */
421         struct ioc_pcpu_stat __percpu   *pcpu_stat;
422
423         enum ioc_running                running;
424         atomic64_t                      vtime_rate;
425         u64                             vtime_base_rate;
426         s64                             vtime_err;
427
428         seqcount_spinlock_t             period_seqcount;
429         u64                             period_at;      /* wallclock starttime */
430         u64                             period_at_vtime; /* vtime starttime */
431
432         atomic64_t                      cur_period;     /* inc'd each period */
433         int                             busy_level;     /* saturation history */
434
435         bool                            weights_updated;
436         atomic_t                        hweight_gen;    /* for lazy hweights */
437
438         /* debt forgivness */
439         u64                             dfgv_period_at;
440         u64                             dfgv_period_rem;
441         u64                             dfgv_usage_us_sum;
442
443         u64                             autop_too_fast_at;
444         u64                             autop_too_slow_at;
445         int                             autop_idx;
446         bool                            user_qos_params:1;
447         bool                            user_cost_model:1;
448 };
449
450 struct iocg_pcpu_stat {
451         local64_t                       abs_vusage;
452 };
453
454 struct iocg_stat {
455         u64                             usage_us;
456         u64                             wait_us;
457         u64                             indebt_us;
458         u64                             indelay_us;
459 };
460
461 /* per device-cgroup pair */
462 struct ioc_gq {
463         struct blkg_policy_data         pd;
464         struct ioc                      *ioc;
465
466         /*
467          * A iocg can get its weight from two sources - an explicit
468          * per-device-cgroup configuration or the default weight of the
469          * cgroup.  `cfg_weight` is the explicit per-device-cgroup
470          * configuration.  `weight` is the effective considering both
471          * sources.
472          *
473          * When an idle cgroup becomes active its `active` goes from 0 to
474          * `weight`.  `inuse` is the surplus adjusted active weight.
475          * `active` and `inuse` are used to calculate `hweight_active` and
476          * `hweight_inuse`.
477          *
478          * `last_inuse` remembers `inuse` while an iocg is idle to persist
479          * surplus adjustments.
480          *
481          * `inuse` may be adjusted dynamically during period. `saved_*` are used
482          * to determine and track adjustments.
483          */
484         u32                             cfg_weight;
485         u32                             weight;
486         u32                             active;
487         u32                             inuse;
488
489         u32                             last_inuse;
490         s64                             saved_margin;
491
492         sector_t                        cursor;         /* to detect randio */
493
494         /*
495          * `vtime` is this iocg's vtime cursor which progresses as IOs are
496          * issued.  If lagging behind device vtime, the delta represents
497          * the currently available IO budget.  If running ahead, the
498          * overage.
499          *
500          * `vtime_done` is the same but progressed on completion rather
501          * than issue.  The delta behind `vtime` represents the cost of
502          * currently in-flight IOs.
503          */
504         atomic64_t                      vtime;
505         atomic64_t                      done_vtime;
506         u64                             abs_vdebt;
507
508         /* current delay in effect and when it started */
509         u64                             delay;
510         u64                             delay_at;
511
512         /*
513          * The period this iocg was last active in.  Used for deactivation
514          * and invalidating `vtime`.
515          */
516         atomic64_t                      active_period;
517         struct list_head                active_list;
518
519         /* see __propagate_weights() and current_hweight() for details */
520         u64                             child_active_sum;
521         u64                             child_inuse_sum;
522         u64                             child_adjusted_sum;
523         int                             hweight_gen;
524         u32                             hweight_active;
525         u32                             hweight_inuse;
526         u32                             hweight_donating;
527         u32                             hweight_after_donation;
528
529         struct list_head                walk_list;
530         struct list_head                surplus_list;
531
532         struct wait_queue_head          waitq;
533         struct hrtimer                  waitq_timer;
534
535         /* timestamp at the latest activation */
536         u64                             activated_at;
537
538         /* statistics */
539         struct iocg_pcpu_stat __percpu  *pcpu_stat;
540         struct iocg_stat                stat;
541         struct iocg_stat                last_stat;
542         u64                             last_stat_abs_vusage;
543         u64                             usage_delta_us;
544         u64                             wait_since;
545         u64                             indebt_since;
546         u64                             indelay_since;
547
548         /* this iocg's depth in the hierarchy and ancestors including self */
549         int                             level;
550         struct ioc_gq                   *ancestors[];
551 };
552
553 /* per cgroup */
554 struct ioc_cgrp {
555         struct blkcg_policy_data        cpd;
556         unsigned int                    dfl_weight;
557 };
558
559 struct ioc_now {
560         u64                             now_ns;
561         u64                             now;
562         u64                             vnow;
563         u64                             vrate;
564 };
565
566 struct iocg_wait {
567         struct wait_queue_entry         wait;
568         struct bio                      *bio;
569         u64                             abs_cost;
570         bool                            committed;
571 };
572
573 struct iocg_wake_ctx {
574         struct ioc_gq                   *iocg;
575         u32                             hw_inuse;
576         s64                             vbudget;
577 };
578
579 static const struct ioc_params autop[] = {
580         [AUTOP_HDD] = {
581                 .qos                            = {
582                         [QOS_RLAT]              =        250000, /* 250ms */
583                         [QOS_WLAT]              =        250000,
584                         [QOS_MIN]               = VRATE_MIN_PPM,
585                         [QOS_MAX]               = VRATE_MAX_PPM,
586                 },
587                 .i_lcoefs                       = {
588                         [I_LCOEF_RBPS]          =     174019176,
589                         [I_LCOEF_RSEQIOPS]      =         41708,
590                         [I_LCOEF_RRANDIOPS]     =           370,
591                         [I_LCOEF_WBPS]          =     178075866,
592                         [I_LCOEF_WSEQIOPS]      =         42705,
593                         [I_LCOEF_WRANDIOPS]     =           378,
594                 },
595         },
596         [AUTOP_SSD_QD1] = {
597                 .qos                            = {
598                         [QOS_RLAT]              =         25000, /* 25ms */
599                         [QOS_WLAT]              =         25000,
600                         [QOS_MIN]               = VRATE_MIN_PPM,
601                         [QOS_MAX]               = VRATE_MAX_PPM,
602                 },
603                 .i_lcoefs                       = {
604                         [I_LCOEF_RBPS]          =     245855193,
605                         [I_LCOEF_RSEQIOPS]      =         61575,
606                         [I_LCOEF_RRANDIOPS]     =          6946,
607                         [I_LCOEF_WBPS]          =     141365009,
608                         [I_LCOEF_WSEQIOPS]      =         33716,
609                         [I_LCOEF_WRANDIOPS]     =         26796,
610                 },
611         },
612         [AUTOP_SSD_DFL] = {
613                 .qos                            = {
614                         [QOS_RLAT]              =         25000, /* 25ms */
615                         [QOS_WLAT]              =         25000,
616                         [QOS_MIN]               = VRATE_MIN_PPM,
617                         [QOS_MAX]               = VRATE_MAX_PPM,
618                 },
619                 .i_lcoefs                       = {
620                         [I_LCOEF_RBPS]          =     488636629,
621                         [I_LCOEF_RSEQIOPS]      =          8932,
622                         [I_LCOEF_RRANDIOPS]     =          8518,
623                         [I_LCOEF_WBPS]          =     427891549,
624                         [I_LCOEF_WSEQIOPS]      =         28755,
625                         [I_LCOEF_WRANDIOPS]     =         21940,
626                 },
627                 .too_fast_vrate_pct             =           500,
628         },
629         [AUTOP_SSD_FAST] = {
630                 .qos                            = {
631                         [QOS_RLAT]              =          5000, /* 5ms */
632                         [QOS_WLAT]              =          5000,
633                         [QOS_MIN]               = VRATE_MIN_PPM,
634                         [QOS_MAX]               = VRATE_MAX_PPM,
635                 },
636                 .i_lcoefs                       = {
637                         [I_LCOEF_RBPS]          =    3102524156LLU,
638                         [I_LCOEF_RSEQIOPS]      =        724816,
639                         [I_LCOEF_RRANDIOPS]     =        778122,
640                         [I_LCOEF_WBPS]          =    1742780862LLU,
641                         [I_LCOEF_WSEQIOPS]      =        425702,
642                         [I_LCOEF_WRANDIOPS]     =        443193,
643                 },
644                 .too_slow_vrate_pct             =            10,
645         },
646 };
647
648 /*
649  * vrate adjust percentages indexed by ioc->busy_level.  We adjust up on
650  * vtime credit shortage and down on device saturation.
651  */
652 static u32 vrate_adj_pct[] =
653         { 0, 0, 0, 0,
654           1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
655           2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
656           4, 4, 4, 4, 4, 4, 4, 4, 8, 8, 8, 8, 8, 8, 8, 8, 16 };
657
658 static struct blkcg_policy blkcg_policy_iocost;
659
660 /* accessors and helpers */
661 static struct ioc *rqos_to_ioc(struct rq_qos *rqos)
662 {
663         return container_of(rqos, struct ioc, rqos);
664 }
665
666 static struct ioc *q_to_ioc(struct request_queue *q)
667 {
668         return rqos_to_ioc(rq_qos_id(q, RQ_QOS_COST));
669 }
670
671 static const char __maybe_unused *ioc_name(struct ioc *ioc)
672 {
673         struct gendisk *disk = ioc->rqos.q->disk;
674
675         if (!disk)
676                 return "<unknown>";
677         return disk->disk_name;
678 }
679
680 static struct ioc_gq *pd_to_iocg(struct blkg_policy_data *pd)
681 {
682         return pd ? container_of(pd, struct ioc_gq, pd) : NULL;
683 }
684
685 static struct ioc_gq *blkg_to_iocg(struct blkcg_gq *blkg)
686 {
687         return pd_to_iocg(blkg_to_pd(blkg, &blkcg_policy_iocost));
688 }
689
690 static struct blkcg_gq *iocg_to_blkg(struct ioc_gq *iocg)
691 {
692         return pd_to_blkg(&iocg->pd);
693 }
694
695 static struct ioc_cgrp *blkcg_to_iocc(struct blkcg *blkcg)
696 {
697         return container_of(blkcg_to_cpd(blkcg, &blkcg_policy_iocost),
698                             struct ioc_cgrp, cpd);
699 }
700
701 /*
702  * Scale @abs_cost to the inverse of @hw_inuse.  The lower the hierarchical
703  * weight, the more expensive each IO.  Must round up.
704  */
705 static u64 abs_cost_to_cost(u64 abs_cost, u32 hw_inuse)
706 {
707         return DIV64_U64_ROUND_UP(abs_cost * WEIGHT_ONE, hw_inuse);
708 }
709
710 /*
711  * The inverse of abs_cost_to_cost().  Must round up.
712  */
713 static u64 cost_to_abs_cost(u64 cost, u32 hw_inuse)
714 {
715         return DIV64_U64_ROUND_UP(cost * hw_inuse, WEIGHT_ONE);
716 }
717
718 static void iocg_commit_bio(struct ioc_gq *iocg, struct bio *bio,
719                             u64 abs_cost, u64 cost)
720 {
721         struct iocg_pcpu_stat *gcs;
722
723         bio->bi_iocost_cost = cost;
724         atomic64_add(cost, &iocg->vtime);
725
726         gcs = get_cpu_ptr(iocg->pcpu_stat);
727         local64_add(abs_cost, &gcs->abs_vusage);
728         put_cpu_ptr(gcs);
729 }
730
731 static void iocg_lock(struct ioc_gq *iocg, bool lock_ioc, unsigned long *flags)
732 {
733         if (lock_ioc) {
734                 spin_lock_irqsave(&iocg->ioc->lock, *flags);
735                 spin_lock(&iocg->waitq.lock);
736         } else {
737                 spin_lock_irqsave(&iocg->waitq.lock, *flags);
738         }
739 }
740
741 static void iocg_unlock(struct ioc_gq *iocg, bool unlock_ioc, unsigned long *flags)
742 {
743         if (unlock_ioc) {
744                 spin_unlock(&iocg->waitq.lock);
745                 spin_unlock_irqrestore(&iocg->ioc->lock, *flags);
746         } else {
747                 spin_unlock_irqrestore(&iocg->waitq.lock, *flags);
748         }
749 }
750
751 #define CREATE_TRACE_POINTS
752 #include <trace/events/iocost.h>
753
754 static void ioc_refresh_margins(struct ioc *ioc)
755 {
756         struct ioc_margins *margins = &ioc->margins;
757         u32 period_us = ioc->period_us;
758         u64 vrate = ioc->vtime_base_rate;
759
760         margins->min = (period_us * MARGIN_MIN_PCT / 100) * vrate;
761         margins->low = (period_us * MARGIN_LOW_PCT / 100) * vrate;
762         margins->target = (period_us * MARGIN_TARGET_PCT / 100) * vrate;
763 }
764
765 /* latency Qos params changed, update period_us and all the dependent params */
766 static void ioc_refresh_period_us(struct ioc *ioc)
767 {
768         u32 ppm, lat, multi, period_us;
769
770         lockdep_assert_held(&ioc->lock);
771
772         /* pick the higher latency target */
773         if (ioc->params.qos[QOS_RLAT] >= ioc->params.qos[QOS_WLAT]) {
774                 ppm = ioc->params.qos[QOS_RPPM];
775                 lat = ioc->params.qos[QOS_RLAT];
776         } else {
777                 ppm = ioc->params.qos[QOS_WPPM];
778                 lat = ioc->params.qos[QOS_WLAT];
779         }
780
781         /*
782          * We want the period to be long enough to contain a healthy number
783          * of IOs while short enough for granular control.  Define it as a
784          * multiple of the latency target.  Ideally, the multiplier should
785          * be scaled according to the percentile so that it would nominally
786          * contain a certain number of requests.  Let's be simpler and
787          * scale it linearly so that it's 2x >= pct(90) and 10x at pct(50).
788          */
789         if (ppm)
790                 multi = max_t(u32, (MILLION - ppm) / 50000, 2);
791         else
792                 multi = 2;
793         period_us = multi * lat;
794         period_us = clamp_t(u32, period_us, MIN_PERIOD, MAX_PERIOD);
795
796         /* calculate dependent params */
797         ioc->period_us = period_us;
798         ioc->timer_slack_ns = div64_u64(
799                 (u64)period_us * NSEC_PER_USEC * TIMER_SLACK_PCT,
800                 100);
801         ioc_refresh_margins(ioc);
802 }
803
804 static int ioc_autop_idx(struct ioc *ioc)
805 {
806         int idx = ioc->autop_idx;
807         const struct ioc_params *p = &autop[idx];
808         u32 vrate_pct;
809         u64 now_ns;
810
811         /* rotational? */
812         if (!blk_queue_nonrot(ioc->rqos.q))
813                 return AUTOP_HDD;
814
815         /* handle SATA SSDs w/ broken NCQ */
816         if (blk_queue_depth(ioc->rqos.q) == 1)
817                 return AUTOP_SSD_QD1;
818
819         /* use one of the normal ssd sets */
820         if (idx < AUTOP_SSD_DFL)
821                 return AUTOP_SSD_DFL;
822
823         /* if user is overriding anything, maintain what was there */
824         if (ioc->user_qos_params || ioc->user_cost_model)
825                 return idx;
826
827         /* step up/down based on the vrate */
828         vrate_pct = div64_u64(ioc->vtime_base_rate * 100, VTIME_PER_USEC);
829         now_ns = ktime_get_ns();
830
831         if (p->too_fast_vrate_pct && p->too_fast_vrate_pct <= vrate_pct) {
832                 if (!ioc->autop_too_fast_at)
833                         ioc->autop_too_fast_at = now_ns;
834                 if (now_ns - ioc->autop_too_fast_at >= AUTOP_CYCLE_NSEC)
835                         return idx + 1;
836         } else {
837                 ioc->autop_too_fast_at = 0;
838         }
839
840         if (p->too_slow_vrate_pct && p->too_slow_vrate_pct >= vrate_pct) {
841                 if (!ioc->autop_too_slow_at)
842                         ioc->autop_too_slow_at = now_ns;
843                 if (now_ns - ioc->autop_too_slow_at >= AUTOP_CYCLE_NSEC)
844                         return idx - 1;
845         } else {
846                 ioc->autop_too_slow_at = 0;
847         }
848
849         return idx;
850 }
851
852 /*
853  * Take the followings as input
854  *
855  *  @bps        maximum sequential throughput
856  *  @seqiops    maximum sequential 4k iops
857  *  @randiops   maximum random 4k iops
858  *
859  * and calculate the linear model cost coefficients.
860  *
861  *  *@page      per-page cost           1s / (@bps / 4096)
862  *  *@seqio     base cost of a seq IO   max((1s / @seqiops) - *@page, 0)
863  *  @randiops   base cost of a rand IO  max((1s / @randiops) - *@page, 0)
864  */
865 static void calc_lcoefs(u64 bps, u64 seqiops, u64 randiops,
866                         u64 *page, u64 *seqio, u64 *randio)
867 {
868         u64 v;
869
870         *page = *seqio = *randio = 0;
871
872         if (bps) {
873                 u64 bps_pages = DIV_ROUND_UP_ULL(bps, IOC_PAGE_SIZE);
874
875                 if (bps_pages)
876                         *page = DIV64_U64_ROUND_UP(VTIME_PER_SEC, bps_pages);
877                 else
878                         *page = 1;
879         }
880
881         if (seqiops) {
882                 v = DIV64_U64_ROUND_UP(VTIME_PER_SEC, seqiops);
883                 if (v > *page)
884                         *seqio = v - *page;
885         }
886
887         if (randiops) {
888                 v = DIV64_U64_ROUND_UP(VTIME_PER_SEC, randiops);
889                 if (v > *page)
890                         *randio = v - *page;
891         }
892 }
893
894 static void ioc_refresh_lcoefs(struct ioc *ioc)
895 {
896         u64 *u = ioc->params.i_lcoefs;
897         u64 *c = ioc->params.lcoefs;
898
899         calc_lcoefs(u[I_LCOEF_RBPS], u[I_LCOEF_RSEQIOPS], u[I_LCOEF_RRANDIOPS],
900                     &c[LCOEF_RPAGE], &c[LCOEF_RSEQIO], &c[LCOEF_RRANDIO]);
901         calc_lcoefs(u[I_LCOEF_WBPS], u[I_LCOEF_WSEQIOPS], u[I_LCOEF_WRANDIOPS],
902                     &c[LCOEF_WPAGE], &c[LCOEF_WSEQIO], &c[LCOEF_WRANDIO]);
903 }
904
905 static bool ioc_refresh_params(struct ioc *ioc, bool force)
906 {
907         const struct ioc_params *p;
908         int idx;
909
910         lockdep_assert_held(&ioc->lock);
911
912         idx = ioc_autop_idx(ioc);
913         p = &autop[idx];
914
915         if (idx == ioc->autop_idx && !force)
916                 return false;
917
918         if (idx != ioc->autop_idx)
919                 atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC);
920
921         ioc->autop_idx = idx;
922         ioc->autop_too_fast_at = 0;
923         ioc->autop_too_slow_at = 0;
924
925         if (!ioc->user_qos_params)
926                 memcpy(ioc->params.qos, p->qos, sizeof(p->qos));
927         if (!ioc->user_cost_model)
928                 memcpy(ioc->params.i_lcoefs, p->i_lcoefs, sizeof(p->i_lcoefs));
929
930         ioc_refresh_period_us(ioc);
931         ioc_refresh_lcoefs(ioc);
932
933         ioc->vrate_min = DIV64_U64_ROUND_UP((u64)ioc->params.qos[QOS_MIN] *
934                                             VTIME_PER_USEC, MILLION);
935         ioc->vrate_max = div64_u64((u64)ioc->params.qos[QOS_MAX] *
936                                    VTIME_PER_USEC, MILLION);
937
938         return true;
939 }
940
941 /*
942  * When an iocg accumulates too much vtime or gets deactivated, we throw away
943  * some vtime, which lowers the overall device utilization. As the exact amount
944  * which is being thrown away is known, we can compensate by accelerating the
945  * vrate accordingly so that the extra vtime generated in the current period
946  * matches what got lost.
947  */
948 static void ioc_refresh_vrate(struct ioc *ioc, struct ioc_now *now)
949 {
950         s64 pleft = ioc->period_at + ioc->period_us - now->now;
951         s64 vperiod = ioc->period_us * ioc->vtime_base_rate;
952         s64 vcomp, vcomp_min, vcomp_max;
953
954         lockdep_assert_held(&ioc->lock);
955
956         /* we need some time left in this period */
957         if (pleft <= 0)
958                 goto done;
959
960         /*
961          * Calculate how much vrate should be adjusted to offset the error.
962          * Limit the amount of adjustment and deduct the adjusted amount from
963          * the error.
964          */
965         vcomp = -div64_s64(ioc->vtime_err, pleft);
966         vcomp_min = -(ioc->vtime_base_rate >> 1);
967         vcomp_max = ioc->vtime_base_rate;
968         vcomp = clamp(vcomp, vcomp_min, vcomp_max);
969
970         ioc->vtime_err += vcomp * pleft;
971
972         atomic64_set(&ioc->vtime_rate, ioc->vtime_base_rate + vcomp);
973 done:
974         /* bound how much error can accumulate */
975         ioc->vtime_err = clamp(ioc->vtime_err, -vperiod, vperiod);
976 }
977
978 static void ioc_adjust_base_vrate(struct ioc *ioc, u32 rq_wait_pct,
979                                   int nr_lagging, int nr_shortages,
980                                   int prev_busy_level, u32 *missed_ppm)
981 {
982         u64 vrate = ioc->vtime_base_rate;
983         u64 vrate_min = ioc->vrate_min, vrate_max = ioc->vrate_max;
984
985         if (!ioc->busy_level || (ioc->busy_level < 0 && nr_lagging)) {
986                 if (ioc->busy_level != prev_busy_level || nr_lagging)
987                         trace_iocost_ioc_vrate_adj(ioc, atomic64_read(&ioc->vtime_rate),
988                                                    missed_ppm, rq_wait_pct,
989                                                    nr_lagging, nr_shortages);
990
991                 return;
992         }
993
994         /*
995          * If vrate is out of bounds, apply clamp gradually as the
996          * bounds can change abruptly.  Otherwise, apply busy_level
997          * based adjustment.
998          */
999         if (vrate < vrate_min) {
1000                 vrate = div64_u64(vrate * (100 + VRATE_CLAMP_ADJ_PCT), 100);
1001                 vrate = min(vrate, vrate_min);
1002         } else if (vrate > vrate_max) {
1003                 vrate = div64_u64(vrate * (100 - VRATE_CLAMP_ADJ_PCT), 100);
1004                 vrate = max(vrate, vrate_max);
1005         } else {
1006                 int idx = min_t(int, abs(ioc->busy_level),
1007                                 ARRAY_SIZE(vrate_adj_pct) - 1);
1008                 u32 adj_pct = vrate_adj_pct[idx];
1009
1010                 if (ioc->busy_level > 0)
1011                         adj_pct = 100 - adj_pct;
1012                 else
1013                         adj_pct = 100 + adj_pct;
1014
1015                 vrate = clamp(DIV64_U64_ROUND_UP(vrate * adj_pct, 100),
1016                               vrate_min, vrate_max);
1017         }
1018
1019         trace_iocost_ioc_vrate_adj(ioc, vrate, missed_ppm, rq_wait_pct,
1020                                    nr_lagging, nr_shortages);
1021
1022         ioc->vtime_base_rate = vrate;
1023         ioc_refresh_margins(ioc);
1024 }
1025
1026 /* take a snapshot of the current [v]time and vrate */
1027 static void ioc_now(struct ioc *ioc, struct ioc_now *now)
1028 {
1029         unsigned seq;
1030
1031         now->now_ns = ktime_get();
1032         now->now = ktime_to_us(now->now_ns);
1033         now->vrate = atomic64_read(&ioc->vtime_rate);
1034
1035         /*
1036          * The current vtime is
1037          *
1038          *   vtime at period start + (wallclock time since the start) * vrate
1039          *
1040          * As a consistent snapshot of `period_at_vtime` and `period_at` is
1041          * needed, they're seqcount protected.
1042          */
1043         do {
1044                 seq = read_seqcount_begin(&ioc->period_seqcount);
1045                 now->vnow = ioc->period_at_vtime +
1046                         (now->now - ioc->period_at) * now->vrate;
1047         } while (read_seqcount_retry(&ioc->period_seqcount, seq));
1048 }
1049
1050 static void ioc_start_period(struct ioc *ioc, struct ioc_now *now)
1051 {
1052         WARN_ON_ONCE(ioc->running != IOC_RUNNING);
1053
1054         write_seqcount_begin(&ioc->period_seqcount);
1055         ioc->period_at = now->now;
1056         ioc->period_at_vtime = now->vnow;
1057         write_seqcount_end(&ioc->period_seqcount);
1058
1059         ioc->timer.expires = jiffies + usecs_to_jiffies(ioc->period_us);
1060         add_timer(&ioc->timer);
1061 }
1062
1063 /*
1064  * Update @iocg's `active` and `inuse` to @active and @inuse, update level
1065  * weight sums and propagate upwards accordingly. If @save, the current margin
1066  * is saved to be used as reference for later inuse in-period adjustments.
1067  */
1068 static void __propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
1069                                 bool save, struct ioc_now *now)
1070 {
1071         struct ioc *ioc = iocg->ioc;
1072         int lvl;
1073
1074         lockdep_assert_held(&ioc->lock);
1075
1076         /*
1077          * For an active leaf node, its inuse shouldn't be zero or exceed
1078          * @active. An active internal node's inuse is solely determined by the
1079          * inuse to active ratio of its children regardless of @inuse.
1080          */
1081         if (list_empty(&iocg->active_list) && iocg->child_active_sum) {
1082                 inuse = DIV64_U64_ROUND_UP(active * iocg->child_inuse_sum,
1083                                            iocg->child_active_sum);
1084         } else {
1085                 inuse = clamp_t(u32, inuse, 1, active);
1086         }
1087
1088         iocg->last_inuse = iocg->inuse;
1089         if (save)
1090                 iocg->saved_margin = now->vnow - atomic64_read(&iocg->vtime);
1091
1092         if (active == iocg->active && inuse == iocg->inuse)
1093                 return;
1094
1095         for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
1096                 struct ioc_gq *parent = iocg->ancestors[lvl];
1097                 struct ioc_gq *child = iocg->ancestors[lvl + 1];
1098                 u32 parent_active = 0, parent_inuse = 0;
1099
1100                 /* update the level sums */
1101                 parent->child_active_sum += (s32)(active - child->active);
1102                 parent->child_inuse_sum += (s32)(inuse - child->inuse);
1103                 /* apply the updates */
1104                 child->active = active;
1105                 child->inuse = inuse;
1106
1107                 /*
1108                  * The delta between inuse and active sums indicates that
1109                  * much of weight is being given away.  Parent's inuse
1110                  * and active should reflect the ratio.
1111                  */
1112                 if (parent->child_active_sum) {
1113                         parent_active = parent->weight;
1114                         parent_inuse = DIV64_U64_ROUND_UP(
1115                                 parent_active * parent->child_inuse_sum,
1116                                 parent->child_active_sum);
1117                 }
1118
1119                 /* do we need to keep walking up? */
1120                 if (parent_active == parent->active &&
1121                     parent_inuse == parent->inuse)
1122                         break;
1123
1124                 active = parent_active;
1125                 inuse = parent_inuse;
1126         }
1127
1128         ioc->weights_updated = true;
1129 }
1130
1131 static void commit_weights(struct ioc *ioc)
1132 {
1133         lockdep_assert_held(&ioc->lock);
1134
1135         if (ioc->weights_updated) {
1136                 /* paired with rmb in current_hweight(), see there */
1137                 smp_wmb();
1138                 atomic_inc(&ioc->hweight_gen);
1139                 ioc->weights_updated = false;
1140         }
1141 }
1142
1143 static void propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
1144                               bool save, struct ioc_now *now)
1145 {
1146         __propagate_weights(iocg, active, inuse, save, now);
1147         commit_weights(iocg->ioc);
1148 }
1149
1150 static void current_hweight(struct ioc_gq *iocg, u32 *hw_activep, u32 *hw_inusep)
1151 {
1152         struct ioc *ioc = iocg->ioc;
1153         int lvl;
1154         u32 hwa, hwi;
1155         int ioc_gen;
1156
1157         /* hot path - if uptodate, use cached */
1158         ioc_gen = atomic_read(&ioc->hweight_gen);
1159         if (ioc_gen == iocg->hweight_gen)
1160                 goto out;
1161
1162         /*
1163          * Paired with wmb in commit_weights(). If we saw the updated
1164          * hweight_gen, all the weight updates from __propagate_weights() are
1165          * visible too.
1166          *
1167          * We can race with weight updates during calculation and get it
1168          * wrong.  However, hweight_gen would have changed and a future
1169          * reader will recalculate and we're guaranteed to discard the
1170          * wrong result soon.
1171          */
1172         smp_rmb();
1173
1174         hwa = hwi = WEIGHT_ONE;
1175         for (lvl = 0; lvl <= iocg->level - 1; lvl++) {
1176                 struct ioc_gq *parent = iocg->ancestors[lvl];
1177                 struct ioc_gq *child = iocg->ancestors[lvl + 1];
1178                 u64 active_sum = READ_ONCE(parent->child_active_sum);
1179                 u64 inuse_sum = READ_ONCE(parent->child_inuse_sum);
1180                 u32 active = READ_ONCE(child->active);
1181                 u32 inuse = READ_ONCE(child->inuse);
1182
1183                 /* we can race with deactivations and either may read as zero */
1184                 if (!active_sum || !inuse_sum)
1185                         continue;
1186
1187                 active_sum = max_t(u64, active, active_sum);
1188                 hwa = div64_u64((u64)hwa * active, active_sum);
1189
1190                 inuse_sum = max_t(u64, inuse, inuse_sum);
1191                 hwi = div64_u64((u64)hwi * inuse, inuse_sum);
1192         }
1193
1194         iocg->hweight_active = max_t(u32, hwa, 1);
1195         iocg->hweight_inuse = max_t(u32, hwi, 1);
1196         iocg->hweight_gen = ioc_gen;
1197 out:
1198         if (hw_activep)
1199                 *hw_activep = iocg->hweight_active;
1200         if (hw_inusep)
1201                 *hw_inusep = iocg->hweight_inuse;
1202 }
1203
1204 /*
1205  * Calculate the hweight_inuse @iocg would get with max @inuse assuming all the
1206  * other weights stay unchanged.
1207  */
1208 static u32 current_hweight_max(struct ioc_gq *iocg)
1209 {
1210         u32 hwm = WEIGHT_ONE;
1211         u32 inuse = iocg->active;
1212         u64 child_inuse_sum;
1213         int lvl;
1214
1215         lockdep_assert_held(&iocg->ioc->lock);
1216
1217         for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
1218                 struct ioc_gq *parent = iocg->ancestors[lvl];
1219                 struct ioc_gq *child = iocg->ancestors[lvl + 1];
1220
1221                 child_inuse_sum = parent->child_inuse_sum + inuse - child->inuse;
1222                 hwm = div64_u64((u64)hwm * inuse, child_inuse_sum);
1223                 inuse = DIV64_U64_ROUND_UP(parent->active * child_inuse_sum,
1224                                            parent->child_active_sum);
1225         }
1226
1227         return max_t(u32, hwm, 1);
1228 }
1229
1230 static void weight_updated(struct ioc_gq *iocg, struct ioc_now *now)
1231 {
1232         struct ioc *ioc = iocg->ioc;
1233         struct blkcg_gq *blkg = iocg_to_blkg(iocg);
1234         struct ioc_cgrp *iocc = blkcg_to_iocc(blkg->blkcg);
1235         u32 weight;
1236
1237         lockdep_assert_held(&ioc->lock);
1238
1239         weight = iocg->cfg_weight ?: iocc->dfl_weight;
1240         if (weight != iocg->weight && iocg->active)
1241                 propagate_weights(iocg, weight, iocg->inuse, true, now);
1242         iocg->weight = weight;
1243 }
1244
1245 static bool iocg_activate(struct ioc_gq *iocg, struct ioc_now *now)
1246 {
1247         struct ioc *ioc = iocg->ioc;
1248         u64 last_period, cur_period;
1249         u64 vtime, vtarget;
1250         int i;
1251
1252         /*
1253          * If seem to be already active, just update the stamp to tell the
1254          * timer that we're still active.  We don't mind occassional races.
1255          */
1256         if (!list_empty(&iocg->active_list)) {
1257                 ioc_now(ioc, now);
1258                 cur_period = atomic64_read(&ioc->cur_period);
1259                 if (atomic64_read(&iocg->active_period) != cur_period)
1260                         atomic64_set(&iocg->active_period, cur_period);
1261                 return true;
1262         }
1263
1264         /* racy check on internal node IOs, treat as root level IOs */
1265         if (iocg->child_active_sum)
1266                 return false;
1267
1268         spin_lock_irq(&ioc->lock);
1269
1270         ioc_now(ioc, now);
1271
1272         /* update period */
1273         cur_period = atomic64_read(&ioc->cur_period);
1274         last_period = atomic64_read(&iocg->active_period);
1275         atomic64_set(&iocg->active_period, cur_period);
1276
1277         /* already activated or breaking leaf-only constraint? */
1278         if (!list_empty(&iocg->active_list))
1279                 goto succeed_unlock;
1280         for (i = iocg->level - 1; i > 0; i--)
1281                 if (!list_empty(&iocg->ancestors[i]->active_list))
1282                         goto fail_unlock;
1283
1284         if (iocg->child_active_sum)
1285                 goto fail_unlock;
1286
1287         /*
1288          * Always start with the target budget. On deactivation, we throw away
1289          * anything above it.
1290          */
1291         vtarget = now->vnow - ioc->margins.target;
1292         vtime = atomic64_read(&iocg->vtime);
1293
1294         atomic64_add(vtarget - vtime, &iocg->vtime);
1295         atomic64_add(vtarget - vtime, &iocg->done_vtime);
1296         vtime = vtarget;
1297
1298         /*
1299          * Activate, propagate weight and start period timer if not
1300          * running.  Reset hweight_gen to avoid accidental match from
1301          * wrapping.
1302          */
1303         iocg->hweight_gen = atomic_read(&ioc->hweight_gen) - 1;
1304         list_add(&iocg->active_list, &ioc->active_iocgs);
1305
1306         propagate_weights(iocg, iocg->weight,
1307                           iocg->last_inuse ?: iocg->weight, true, now);
1308
1309         TRACE_IOCG_PATH(iocg_activate, iocg, now,
1310                         last_period, cur_period, vtime);
1311
1312         iocg->activated_at = now->now;
1313
1314         if (ioc->running == IOC_IDLE) {
1315                 ioc->running = IOC_RUNNING;
1316                 ioc->dfgv_period_at = now->now;
1317                 ioc->dfgv_period_rem = 0;
1318                 ioc_start_period(ioc, now);
1319         }
1320
1321 succeed_unlock:
1322         spin_unlock_irq(&ioc->lock);
1323         return true;
1324
1325 fail_unlock:
1326         spin_unlock_irq(&ioc->lock);
1327         return false;
1328 }
1329
1330 static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now)
1331 {
1332         struct ioc *ioc = iocg->ioc;
1333         struct blkcg_gq *blkg = iocg_to_blkg(iocg);
1334         u64 tdelta, delay, new_delay, shift;
1335         s64 vover, vover_pct;
1336         u32 hwa;
1337
1338         lockdep_assert_held(&iocg->waitq.lock);
1339
1340         /*
1341          * If the delay is set by another CPU, we may be in the past. No need to
1342          * change anything if so. This avoids decay calculation underflow.
1343          */
1344         if (time_before64(now->now, iocg->delay_at))
1345                 return false;
1346
1347         /* calculate the current delay in effect - 1/2 every second */
1348         tdelta = now->now - iocg->delay_at;
1349         shift = div64_u64(tdelta, USEC_PER_SEC);
1350         if (iocg->delay && shift < BITS_PER_LONG)
1351                 delay = iocg->delay >> shift;
1352         else
1353                 delay = 0;
1354
1355         /* calculate the new delay from the debt amount */
1356         current_hweight(iocg, &hwa, NULL);
1357         vover = atomic64_read(&iocg->vtime) +
1358                 abs_cost_to_cost(iocg->abs_vdebt, hwa) - now->vnow;
1359         vover_pct = div64_s64(100 * vover,
1360                               ioc->period_us * ioc->vtime_base_rate);
1361
1362         if (vover_pct <= MIN_DELAY_THR_PCT)
1363                 new_delay = 0;
1364         else if (vover_pct >= MAX_DELAY_THR_PCT)
1365                 new_delay = MAX_DELAY;
1366         else
1367                 new_delay = MIN_DELAY +
1368                         div_u64((MAX_DELAY - MIN_DELAY) *
1369                                 (vover_pct - MIN_DELAY_THR_PCT),
1370                                 MAX_DELAY_THR_PCT - MIN_DELAY_THR_PCT);
1371
1372         /* pick the higher one and apply */
1373         if (new_delay > delay) {
1374                 iocg->delay = new_delay;
1375                 iocg->delay_at = now->now;
1376                 delay = new_delay;
1377         }
1378
1379         if (delay >= MIN_DELAY) {
1380                 if (!iocg->indelay_since)
1381                         iocg->indelay_since = now->now;
1382                 blkcg_set_delay(blkg, delay * NSEC_PER_USEC);
1383                 return true;
1384         } else {
1385                 if (iocg->indelay_since) {
1386                         iocg->stat.indelay_us += now->now - iocg->indelay_since;
1387                         iocg->indelay_since = 0;
1388                 }
1389                 iocg->delay = 0;
1390                 blkcg_clear_delay(blkg);
1391                 return false;
1392         }
1393 }
1394
1395 static void iocg_incur_debt(struct ioc_gq *iocg, u64 abs_cost,
1396                             struct ioc_now *now)
1397 {
1398         struct iocg_pcpu_stat *gcs;
1399
1400         lockdep_assert_held(&iocg->ioc->lock);
1401         lockdep_assert_held(&iocg->waitq.lock);
1402         WARN_ON_ONCE(list_empty(&iocg->active_list));
1403
1404         /*
1405          * Once in debt, debt handling owns inuse. @iocg stays at the minimum
1406          * inuse donating all of it share to others until its debt is paid off.
1407          */
1408         if (!iocg->abs_vdebt && abs_cost) {
1409                 iocg->indebt_since = now->now;
1410                 propagate_weights(iocg, iocg->active, 0, false, now);
1411         }
1412
1413         iocg->abs_vdebt += abs_cost;
1414
1415         gcs = get_cpu_ptr(iocg->pcpu_stat);
1416         local64_add(abs_cost, &gcs->abs_vusage);
1417         put_cpu_ptr(gcs);
1418 }
1419
1420 static void iocg_pay_debt(struct ioc_gq *iocg, u64 abs_vpay,
1421                           struct ioc_now *now)
1422 {
1423         lockdep_assert_held(&iocg->ioc->lock);
1424         lockdep_assert_held(&iocg->waitq.lock);
1425
1426         /* make sure that nobody messed with @iocg */
1427         WARN_ON_ONCE(list_empty(&iocg->active_list));
1428         WARN_ON_ONCE(iocg->inuse > 1);
1429
1430         iocg->abs_vdebt -= min(abs_vpay, iocg->abs_vdebt);
1431
1432         /* if debt is paid in full, restore inuse */
1433         if (!iocg->abs_vdebt) {
1434                 iocg->stat.indebt_us += now->now - iocg->indebt_since;
1435                 iocg->indebt_since = 0;
1436
1437                 propagate_weights(iocg, iocg->active, iocg->last_inuse,
1438                                   false, now);
1439         }
1440 }
1441
1442 static int iocg_wake_fn(struct wait_queue_entry *wq_entry, unsigned mode,
1443                         int flags, void *key)
1444 {
1445         struct iocg_wait *wait = container_of(wq_entry, struct iocg_wait, wait);
1446         struct iocg_wake_ctx *ctx = key;
1447         u64 cost = abs_cost_to_cost(wait->abs_cost, ctx->hw_inuse);
1448
1449         ctx->vbudget -= cost;
1450
1451         if (ctx->vbudget < 0)
1452                 return -1;
1453
1454         iocg_commit_bio(ctx->iocg, wait->bio, wait->abs_cost, cost);
1455         wait->committed = true;
1456
1457         /*
1458          * autoremove_wake_function() removes the wait entry only when it
1459          * actually changed the task state. We want the wait always removed.
1460          * Remove explicitly and use default_wake_function(). Note that the
1461          * order of operations is important as finish_wait() tests whether
1462          * @wq_entry is removed without grabbing the lock.
1463          */
1464         default_wake_function(wq_entry, mode, flags, key);
1465         list_del_init_careful(&wq_entry->entry);
1466         return 0;
1467 }
1468
1469 /*
1470  * Calculate the accumulated budget, pay debt if @pay_debt and wake up waiters
1471  * accordingly. When @pay_debt is %true, the caller must be holding ioc->lock in
1472  * addition to iocg->waitq.lock.
1473  */
1474 static void iocg_kick_waitq(struct ioc_gq *iocg, bool pay_debt,
1475                             struct ioc_now *now)
1476 {
1477         struct ioc *ioc = iocg->ioc;
1478         struct iocg_wake_ctx ctx = { .iocg = iocg };
1479         u64 vshortage, expires, oexpires;
1480         s64 vbudget;
1481         u32 hwa;
1482
1483         lockdep_assert_held(&iocg->waitq.lock);
1484
1485         current_hweight(iocg, &hwa, NULL);
1486         vbudget = now->vnow - atomic64_read(&iocg->vtime);
1487
1488         /* pay off debt */
1489         if (pay_debt && iocg->abs_vdebt && vbudget > 0) {
1490                 u64 abs_vbudget = cost_to_abs_cost(vbudget, hwa);
1491                 u64 abs_vpay = min_t(u64, abs_vbudget, iocg->abs_vdebt);
1492                 u64 vpay = abs_cost_to_cost(abs_vpay, hwa);
1493
1494                 lockdep_assert_held(&ioc->lock);
1495
1496                 atomic64_add(vpay, &iocg->vtime);
1497                 atomic64_add(vpay, &iocg->done_vtime);
1498                 iocg_pay_debt(iocg, abs_vpay, now);
1499                 vbudget -= vpay;
1500         }
1501
1502         if (iocg->abs_vdebt || iocg->delay)
1503                 iocg_kick_delay(iocg, now);
1504
1505         /*
1506          * Debt can still be outstanding if we haven't paid all yet or the
1507          * caller raced and called without @pay_debt. Shouldn't wake up waiters
1508          * under debt. Make sure @vbudget reflects the outstanding amount and is
1509          * not positive.
1510          */
1511         if (iocg->abs_vdebt) {
1512                 s64 vdebt = abs_cost_to_cost(iocg->abs_vdebt, hwa);
1513                 vbudget = min_t(s64, 0, vbudget - vdebt);
1514         }
1515
1516         /*
1517          * Wake up the ones which are due and see how much vtime we'll need for
1518          * the next one. As paying off debt restores hw_inuse, it must be read
1519          * after the above debt payment.
1520          */
1521         ctx.vbudget = vbudget;
1522         current_hweight(iocg, NULL, &ctx.hw_inuse);
1523
1524         __wake_up_locked_key(&iocg->waitq, TASK_NORMAL, &ctx);
1525
1526         if (!waitqueue_active(&iocg->waitq)) {
1527                 if (iocg->wait_since) {
1528                         iocg->stat.wait_us += now->now - iocg->wait_since;
1529                         iocg->wait_since = 0;
1530                 }
1531                 return;
1532         }
1533
1534         if (!iocg->wait_since)
1535                 iocg->wait_since = now->now;
1536
1537         if (WARN_ON_ONCE(ctx.vbudget >= 0))
1538                 return;
1539
1540         /* determine next wakeup, add a timer margin to guarantee chunking */
1541         vshortage = -ctx.vbudget;
1542         expires = now->now_ns +
1543                 DIV64_U64_ROUND_UP(vshortage, ioc->vtime_base_rate) *
1544                 NSEC_PER_USEC;
1545         expires += ioc->timer_slack_ns;
1546
1547         /* if already active and close enough, don't bother */
1548         oexpires = ktime_to_ns(hrtimer_get_softexpires(&iocg->waitq_timer));
1549         if (hrtimer_is_queued(&iocg->waitq_timer) &&
1550             abs(oexpires - expires) <= ioc->timer_slack_ns)
1551                 return;
1552
1553         hrtimer_start_range_ns(&iocg->waitq_timer, ns_to_ktime(expires),
1554                                ioc->timer_slack_ns, HRTIMER_MODE_ABS);
1555 }
1556
1557 static enum hrtimer_restart iocg_waitq_timer_fn(struct hrtimer *timer)
1558 {
1559         struct ioc_gq *iocg = container_of(timer, struct ioc_gq, waitq_timer);
1560         bool pay_debt = READ_ONCE(iocg->abs_vdebt);
1561         struct ioc_now now;
1562         unsigned long flags;
1563
1564         ioc_now(iocg->ioc, &now);
1565
1566         iocg_lock(iocg, pay_debt, &flags);
1567         iocg_kick_waitq(iocg, pay_debt, &now);
1568         iocg_unlock(iocg, pay_debt, &flags);
1569
1570         return HRTIMER_NORESTART;
1571 }
1572
1573 static void ioc_lat_stat(struct ioc *ioc, u32 *missed_ppm_ar, u32 *rq_wait_pct_p)
1574 {
1575         u32 nr_met[2] = { };
1576         u32 nr_missed[2] = { };
1577         u64 rq_wait_ns = 0;
1578         int cpu, rw;
1579
1580         for_each_online_cpu(cpu) {
1581                 struct ioc_pcpu_stat *stat = per_cpu_ptr(ioc->pcpu_stat, cpu);
1582                 u64 this_rq_wait_ns;
1583
1584                 for (rw = READ; rw <= WRITE; rw++) {
1585                         u32 this_met = local_read(&stat->missed[rw].nr_met);
1586                         u32 this_missed = local_read(&stat->missed[rw].nr_missed);
1587
1588                         nr_met[rw] += this_met - stat->missed[rw].last_met;
1589                         nr_missed[rw] += this_missed - stat->missed[rw].last_missed;
1590                         stat->missed[rw].last_met = this_met;
1591                         stat->missed[rw].last_missed = this_missed;
1592                 }
1593
1594                 this_rq_wait_ns = local64_read(&stat->rq_wait_ns);
1595                 rq_wait_ns += this_rq_wait_ns - stat->last_rq_wait_ns;
1596                 stat->last_rq_wait_ns = this_rq_wait_ns;
1597         }
1598
1599         for (rw = READ; rw <= WRITE; rw++) {
1600                 if (nr_met[rw] + nr_missed[rw])
1601                         missed_ppm_ar[rw] =
1602                                 DIV64_U64_ROUND_UP((u64)nr_missed[rw] * MILLION,
1603                                                    nr_met[rw] + nr_missed[rw]);
1604                 else
1605                         missed_ppm_ar[rw] = 0;
1606         }
1607
1608         *rq_wait_pct_p = div64_u64(rq_wait_ns * 100,
1609                                    ioc->period_us * NSEC_PER_USEC);
1610 }
1611
1612 /* was iocg idle this period? */
1613 static bool iocg_is_idle(struct ioc_gq *iocg)
1614 {
1615         struct ioc *ioc = iocg->ioc;
1616
1617         /* did something get issued this period? */
1618         if (atomic64_read(&iocg->active_period) ==
1619             atomic64_read(&ioc->cur_period))
1620                 return false;
1621
1622         /* is something in flight? */
1623         if (atomic64_read(&iocg->done_vtime) != atomic64_read(&iocg->vtime))
1624                 return false;
1625
1626         return true;
1627 }
1628
1629 /*
1630  * Call this function on the target leaf @iocg's to build pre-order traversal
1631  * list of all the ancestors in @inner_walk. The inner nodes are linked through
1632  * ->walk_list and the caller is responsible for dissolving the list after use.
1633  */
1634 static void iocg_build_inner_walk(struct ioc_gq *iocg,
1635                                   struct list_head *inner_walk)
1636 {
1637         int lvl;
1638
1639         WARN_ON_ONCE(!list_empty(&iocg->walk_list));
1640
1641         /* find the first ancestor which hasn't been visited yet */
1642         for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
1643                 if (!list_empty(&iocg->ancestors[lvl]->walk_list))
1644                         break;
1645         }
1646
1647         /* walk down and visit the inner nodes to get pre-order traversal */
1648         while (++lvl <= iocg->level - 1) {
1649                 struct ioc_gq *inner = iocg->ancestors[lvl];
1650
1651                 /* record traversal order */
1652                 list_add_tail(&inner->walk_list, inner_walk);
1653         }
1654 }
1655
1656 /* propagate the deltas to the parent */
1657 static void iocg_flush_stat_upward(struct ioc_gq *iocg)
1658 {
1659         if (iocg->level > 0) {
1660                 struct iocg_stat *parent_stat =
1661                         &iocg->ancestors[iocg->level - 1]->stat;
1662
1663                 parent_stat->usage_us +=
1664                         iocg->stat.usage_us - iocg->last_stat.usage_us;
1665                 parent_stat->wait_us +=
1666                         iocg->stat.wait_us - iocg->last_stat.wait_us;
1667                 parent_stat->indebt_us +=
1668                         iocg->stat.indebt_us - iocg->last_stat.indebt_us;
1669                 parent_stat->indelay_us +=
1670                         iocg->stat.indelay_us - iocg->last_stat.indelay_us;
1671         }
1672
1673         iocg->last_stat = iocg->stat;
1674 }
1675
1676 /* collect per-cpu counters and propagate the deltas to the parent */
1677 static void iocg_flush_stat_leaf(struct ioc_gq *iocg, struct ioc_now *now)
1678 {
1679         struct ioc *ioc = iocg->ioc;
1680         u64 abs_vusage = 0;
1681         u64 vusage_delta;
1682         int cpu;
1683
1684         lockdep_assert_held(&iocg->ioc->lock);
1685
1686         /* collect per-cpu counters */
1687         for_each_possible_cpu(cpu) {
1688                 abs_vusage += local64_read(
1689                                 per_cpu_ptr(&iocg->pcpu_stat->abs_vusage, cpu));
1690         }
1691         vusage_delta = abs_vusage - iocg->last_stat_abs_vusage;
1692         iocg->last_stat_abs_vusage = abs_vusage;
1693
1694         iocg->usage_delta_us = div64_u64(vusage_delta, ioc->vtime_base_rate);
1695         iocg->stat.usage_us += iocg->usage_delta_us;
1696
1697         iocg_flush_stat_upward(iocg);
1698 }
1699
1700 /* get stat counters ready for reading on all active iocgs */
1701 static void iocg_flush_stat(struct list_head *target_iocgs, struct ioc_now *now)
1702 {
1703         LIST_HEAD(inner_walk);
1704         struct ioc_gq *iocg, *tiocg;
1705
1706         /* flush leaves and build inner node walk list */
1707         list_for_each_entry(iocg, target_iocgs, active_list) {
1708                 iocg_flush_stat_leaf(iocg, now);
1709                 iocg_build_inner_walk(iocg, &inner_walk);
1710         }
1711
1712         /* keep flushing upwards by walking the inner list backwards */
1713         list_for_each_entry_safe_reverse(iocg, tiocg, &inner_walk, walk_list) {
1714                 iocg_flush_stat_upward(iocg);
1715                 list_del_init(&iocg->walk_list);
1716         }
1717 }
1718
1719 /*
1720  * Determine what @iocg's hweight_inuse should be after donating unused
1721  * capacity. @hwm is the upper bound and used to signal no donation. This
1722  * function also throws away @iocg's excess budget.
1723  */
1724 static u32 hweight_after_donation(struct ioc_gq *iocg, u32 old_hwi, u32 hwm,
1725                                   u32 usage, struct ioc_now *now)
1726 {
1727         struct ioc *ioc = iocg->ioc;
1728         u64 vtime = atomic64_read(&iocg->vtime);
1729         s64 excess, delta, target, new_hwi;
1730
1731         /* debt handling owns inuse for debtors */
1732         if (iocg->abs_vdebt)
1733                 return 1;
1734
1735         /* see whether minimum margin requirement is met */
1736         if (waitqueue_active(&iocg->waitq) ||
1737             time_after64(vtime, now->vnow - ioc->margins.min))
1738                 return hwm;
1739
1740         /* throw away excess above target */
1741         excess = now->vnow - vtime - ioc->margins.target;
1742         if (excess > 0) {
1743                 atomic64_add(excess, &iocg->vtime);
1744                 atomic64_add(excess, &iocg->done_vtime);
1745                 vtime += excess;
1746                 ioc->vtime_err -= div64_u64(excess * old_hwi, WEIGHT_ONE);
1747         }
1748
1749         /*
1750          * Let's say the distance between iocg's and device's vtimes as a
1751          * fraction of period duration is delta. Assuming that the iocg will
1752          * consume the usage determined above, we want to determine new_hwi so
1753          * that delta equals MARGIN_TARGET at the end of the next period.
1754          *
1755          * We need to execute usage worth of IOs while spending the sum of the
1756          * new budget (1 - MARGIN_TARGET) and the leftover from the last period
1757          * (delta):
1758          *
1759          *   usage = (1 - MARGIN_TARGET + delta) * new_hwi
1760          *
1761          * Therefore, the new_hwi is:
1762          *
1763          *   new_hwi = usage / (1 - MARGIN_TARGET + delta)
1764          */
1765         delta = div64_s64(WEIGHT_ONE * (now->vnow - vtime),
1766                           now->vnow - ioc->period_at_vtime);
1767         target = WEIGHT_ONE * MARGIN_TARGET_PCT / 100;
1768         new_hwi = div64_s64(WEIGHT_ONE * usage, WEIGHT_ONE - target + delta);
1769
1770         return clamp_t(s64, new_hwi, 1, hwm);
1771 }
1772
1773 /*
1774  * For work-conservation, an iocg which isn't using all of its share should
1775  * donate the leftover to other iocgs. There are two ways to achieve this - 1.
1776  * bumping up vrate accordingly 2. lowering the donating iocg's inuse weight.
1777  *
1778  * #1 is mathematically simpler but has the drawback of requiring synchronous
1779  * global hweight_inuse updates when idle iocg's get activated or inuse weights
1780  * change due to donation snapbacks as it has the possibility of grossly
1781  * overshooting what's allowed by the model and vrate.
1782  *
1783  * #2 is inherently safe with local operations. The donating iocg can easily
1784  * snap back to higher weights when needed without worrying about impacts on
1785  * other nodes as the impacts will be inherently correct. This also makes idle
1786  * iocg activations safe. The only effect activations have is decreasing
1787  * hweight_inuse of others, the right solution to which is for those iocgs to
1788  * snap back to higher weights.
1789  *
1790  * So, we go with #2. The challenge is calculating how each donating iocg's
1791  * inuse should be adjusted to achieve the target donation amounts. This is done
1792  * using Andy's method described in the following pdf.
1793  *
1794  *   https://drive.google.com/file/d/1PsJwxPFtjUnwOY1QJ5AeICCcsL7BM3bo
1795  *
1796  * Given the weights and target after-donation hweight_inuse values, Andy's
1797  * method determines how the proportional distribution should look like at each
1798  * sibling level to maintain the relative relationship between all non-donating
1799  * pairs. To roughly summarize, it divides the tree into donating and
1800  * non-donating parts, calculates global donation rate which is used to
1801  * determine the target hweight_inuse for each node, and then derives per-level
1802  * proportions.
1803  *
1804  * The following pdf shows that global distribution calculated this way can be
1805  * achieved by scaling inuse weights of donating leaves and propagating the
1806  * adjustments upwards proportionally.
1807  *
1808  *   https://drive.google.com/file/d/1vONz1-fzVO7oY5DXXsLjSxEtYYQbOvsE
1809  *
1810  * Combining the above two, we can determine how each leaf iocg's inuse should
1811  * be adjusted to achieve the target donation.
1812  *
1813  *   https://drive.google.com/file/d/1WcrltBOSPN0qXVdBgnKm4mdp9FhuEFQN
1814  *
1815  * The inline comments use symbols from the last pdf.
1816  *
1817  *   b is the sum of the absolute budgets in the subtree. 1 for the root node.
1818  *   f is the sum of the absolute budgets of non-donating nodes in the subtree.
1819  *   t is the sum of the absolute budgets of donating nodes in the subtree.
1820  *   w is the weight of the node. w = w_f + w_t
1821  *   w_f is the non-donating portion of w. w_f = w * f / b
1822  *   w_b is the donating portion of w. w_t = w * t / b
1823  *   s is the sum of all sibling weights. s = Sum(w) for siblings
1824  *   s_f and s_t are the non-donating and donating portions of s.
1825  *
1826  * Subscript p denotes the parent's counterpart and ' the adjusted value - e.g.
1827  * w_pt is the donating portion of the parent's weight and w'_pt the same value
1828  * after adjustments. Subscript r denotes the root node's values.
1829  */
1830 static void transfer_surpluses(struct list_head *surpluses, struct ioc_now *now)
1831 {
1832         LIST_HEAD(over_hwa);
1833         LIST_HEAD(inner_walk);
1834         struct ioc_gq *iocg, *tiocg, *root_iocg;
1835         u32 after_sum, over_sum, over_target, gamma;
1836
1837         /*
1838          * It's pretty unlikely but possible for the total sum of
1839          * hweight_after_donation's to be higher than WEIGHT_ONE, which will
1840          * confuse the following calculations. If such condition is detected,
1841          * scale down everyone over its full share equally to keep the sum below
1842          * WEIGHT_ONE.
1843          */
1844         after_sum = 0;
1845         over_sum = 0;
1846         list_for_each_entry(iocg, surpluses, surplus_list) {
1847                 u32 hwa;
1848
1849                 current_hweight(iocg, &hwa, NULL);
1850                 after_sum += iocg->hweight_after_donation;
1851
1852                 if (iocg->hweight_after_donation > hwa) {
1853                         over_sum += iocg->hweight_after_donation;
1854                         list_add(&iocg->walk_list, &over_hwa);
1855                 }
1856         }
1857
1858         if (after_sum >= WEIGHT_ONE) {
1859                 /*
1860                  * The delta should be deducted from the over_sum, calculate
1861                  * target over_sum value.
1862                  */
1863                 u32 over_delta = after_sum - (WEIGHT_ONE - 1);
1864                 WARN_ON_ONCE(over_sum <= over_delta);
1865                 over_target = over_sum - over_delta;
1866         } else {
1867                 over_target = 0;
1868         }
1869
1870         list_for_each_entry_safe(iocg, tiocg, &over_hwa, walk_list) {
1871                 if (over_target)
1872                         iocg->hweight_after_donation =
1873                                 div_u64((u64)iocg->hweight_after_donation *
1874                                         over_target, over_sum);
1875                 list_del_init(&iocg->walk_list);
1876         }
1877
1878         /*
1879          * Build pre-order inner node walk list and prepare for donation
1880          * adjustment calculations.
1881          */
1882         list_for_each_entry(iocg, surpluses, surplus_list) {
1883                 iocg_build_inner_walk(iocg, &inner_walk);
1884         }
1885
1886         root_iocg = list_first_entry(&inner_walk, struct ioc_gq, walk_list);
1887         WARN_ON_ONCE(root_iocg->level > 0);
1888
1889         list_for_each_entry(iocg, &inner_walk, walk_list) {
1890                 iocg->child_adjusted_sum = 0;
1891                 iocg->hweight_donating = 0;
1892                 iocg->hweight_after_donation = 0;
1893         }
1894
1895         /*
1896          * Propagate the donating budget (b_t) and after donation budget (b'_t)
1897          * up the hierarchy.
1898          */
1899         list_for_each_entry(iocg, surpluses, surplus_list) {
1900                 struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1901
1902                 parent->hweight_donating += iocg->hweight_donating;
1903                 parent->hweight_after_donation += iocg->hweight_after_donation;
1904         }
1905
1906         list_for_each_entry_reverse(iocg, &inner_walk, walk_list) {
1907                 if (iocg->level > 0) {
1908                         struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1909
1910                         parent->hweight_donating += iocg->hweight_donating;
1911                         parent->hweight_after_donation += iocg->hweight_after_donation;
1912                 }
1913         }
1914
1915         /*
1916          * Calculate inner hwa's (b) and make sure the donation values are
1917          * within the accepted ranges as we're doing low res calculations with
1918          * roundups.
1919          */
1920         list_for_each_entry(iocg, &inner_walk, walk_list) {
1921                 if (iocg->level) {
1922                         struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1923
1924                         iocg->hweight_active = DIV64_U64_ROUND_UP(
1925                                 (u64)parent->hweight_active * iocg->active,
1926                                 parent->child_active_sum);
1927
1928                 }
1929
1930                 iocg->hweight_donating = min(iocg->hweight_donating,
1931                                              iocg->hweight_active);
1932                 iocg->hweight_after_donation = min(iocg->hweight_after_donation,
1933                                                    iocg->hweight_donating - 1);
1934                 if (WARN_ON_ONCE(iocg->hweight_active <= 1 ||
1935                                  iocg->hweight_donating <= 1 ||
1936                                  iocg->hweight_after_donation == 0)) {
1937                         pr_warn("iocg: invalid donation weights in ");
1938                         pr_cont_cgroup_path(iocg_to_blkg(iocg)->blkcg->css.cgroup);
1939                         pr_cont(": active=%u donating=%u after=%u\n",
1940                                 iocg->hweight_active, iocg->hweight_donating,
1941                                 iocg->hweight_after_donation);
1942                 }
1943         }
1944
1945         /*
1946          * Calculate the global donation rate (gamma) - the rate to adjust
1947          * non-donating budgets by.
1948          *
1949          * No need to use 64bit multiplication here as the first operand is
1950          * guaranteed to be smaller than WEIGHT_ONE (1<<16).
1951          *
1952          * We know that there are beneficiary nodes and the sum of the donating
1953          * hweights can't be whole; however, due to the round-ups during hweight
1954          * calculations, root_iocg->hweight_donating might still end up equal to
1955          * or greater than whole. Limit the range when calculating the divider.
1956          *
1957          * gamma = (1 - t_r') / (1 - t_r)
1958          */
1959         gamma = DIV_ROUND_UP(
1960                 (WEIGHT_ONE - root_iocg->hweight_after_donation) * WEIGHT_ONE,
1961                 WEIGHT_ONE - min_t(u32, root_iocg->hweight_donating, WEIGHT_ONE - 1));
1962
1963         /*
1964          * Calculate adjusted hwi, child_adjusted_sum and inuse for the inner
1965          * nodes.
1966          */
1967         list_for_each_entry(iocg, &inner_walk, walk_list) {
1968                 struct ioc_gq *parent;
1969                 u32 inuse, wpt, wptp;
1970                 u64 st, sf;
1971
1972                 if (iocg->level == 0) {
1973                         /* adjusted weight sum for 1st level: s' = s * b_pf / b'_pf */
1974                         iocg->child_adjusted_sum = DIV64_U64_ROUND_UP(
1975                                 iocg->child_active_sum * (WEIGHT_ONE - iocg->hweight_donating),
1976                                 WEIGHT_ONE - iocg->hweight_after_donation);
1977                         continue;
1978                 }
1979
1980                 parent = iocg->ancestors[iocg->level - 1];
1981
1982                 /* b' = gamma * b_f + b_t' */
1983                 iocg->hweight_inuse = DIV64_U64_ROUND_UP(
1984                         (u64)gamma * (iocg->hweight_active - iocg->hweight_donating),
1985                         WEIGHT_ONE) + iocg->hweight_after_donation;
1986
1987                 /* w' = s' * b' / b'_p */
1988                 inuse = DIV64_U64_ROUND_UP(
1989                         (u64)parent->child_adjusted_sum * iocg->hweight_inuse,
1990                         parent->hweight_inuse);
1991
1992                 /* adjusted weight sum for children: s' = s_f + s_t * w'_pt / w_pt */
1993                 st = DIV64_U64_ROUND_UP(
1994                         iocg->child_active_sum * iocg->hweight_donating,
1995                         iocg->hweight_active);
1996                 sf = iocg->child_active_sum - st;
1997                 wpt = DIV64_U64_ROUND_UP(
1998                         (u64)iocg->active * iocg->hweight_donating,
1999                         iocg->hweight_active);
2000                 wptp = DIV64_U64_ROUND_UP(
2001                         (u64)inuse * iocg->hweight_after_donation,
2002                         iocg->hweight_inuse);
2003
2004                 iocg->child_adjusted_sum = sf + DIV64_U64_ROUND_UP(st * wptp, wpt);
2005         }
2006
2007         /*
2008          * All inner nodes now have ->hweight_inuse and ->child_adjusted_sum and
2009          * we can finally determine leaf adjustments.
2010          */
2011         list_for_each_entry(iocg, surpluses, surplus_list) {
2012                 struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
2013                 u32 inuse;
2014
2015                 /*
2016                  * In-debt iocgs participated in the donation calculation with
2017                  * the minimum target hweight_inuse. Configuring inuse
2018                  * accordingly would work fine but debt handling expects
2019                  * @iocg->inuse stay at the minimum and we don't wanna
2020                  * interfere.
2021                  */
2022                 if (iocg->abs_vdebt) {
2023                         WARN_ON_ONCE(iocg->inuse > 1);
2024                         continue;
2025                 }
2026
2027                 /* w' = s' * b' / b'_p, note that b' == b'_t for donating leaves */
2028                 inuse = DIV64_U64_ROUND_UP(
2029                         parent->child_adjusted_sum * iocg->hweight_after_donation,
2030                         parent->hweight_inuse);
2031
2032                 TRACE_IOCG_PATH(inuse_transfer, iocg, now,
2033                                 iocg->inuse, inuse,
2034                                 iocg->hweight_inuse,
2035                                 iocg->hweight_after_donation);
2036
2037                 __propagate_weights(iocg, iocg->active, inuse, true, now);
2038         }
2039
2040         /* walk list should be dissolved after use */
2041         list_for_each_entry_safe(iocg, tiocg, &inner_walk, walk_list)
2042                 list_del_init(&iocg->walk_list);
2043 }
2044
2045 /*
2046  * A low weight iocg can amass a large amount of debt, for example, when
2047  * anonymous memory gets reclaimed aggressively. If the system has a lot of
2048  * memory paired with a slow IO device, the debt can span multiple seconds or
2049  * more. If there are no other subsequent IO issuers, the in-debt iocg may end
2050  * up blocked paying its debt while the IO device is idle.
2051  *
2052  * The following protects against such cases. If the device has been
2053  * sufficiently idle for a while, the debts are halved and delays are
2054  * recalculated.
2055  */
2056 static void ioc_forgive_debts(struct ioc *ioc, u64 usage_us_sum, int nr_debtors,
2057                               struct ioc_now *now)
2058 {
2059         struct ioc_gq *iocg;
2060         u64 dur, usage_pct, nr_cycles;
2061
2062         /* if no debtor, reset the cycle */
2063         if (!nr_debtors) {
2064                 ioc->dfgv_period_at = now->now;
2065                 ioc->dfgv_period_rem = 0;
2066                 ioc->dfgv_usage_us_sum = 0;
2067                 return;
2068         }
2069
2070         /*
2071          * Debtors can pass through a lot of writes choking the device and we
2072          * don't want to be forgiving debts while the device is struggling from
2073          * write bursts. If we're missing latency targets, consider the device
2074          * fully utilized.
2075          */
2076         if (ioc->busy_level > 0)
2077                 usage_us_sum = max_t(u64, usage_us_sum, ioc->period_us);
2078
2079         ioc->dfgv_usage_us_sum += usage_us_sum;
2080         if (time_before64(now->now, ioc->dfgv_period_at + DFGV_PERIOD))
2081                 return;
2082
2083         /*
2084          * At least DFGV_PERIOD has passed since the last period. Calculate the
2085          * average usage and reset the period counters.
2086          */
2087         dur = now->now - ioc->dfgv_period_at;
2088         usage_pct = div64_u64(100 * ioc->dfgv_usage_us_sum, dur);
2089
2090         ioc->dfgv_period_at = now->now;
2091         ioc->dfgv_usage_us_sum = 0;
2092
2093         /* if was too busy, reset everything */
2094         if (usage_pct > DFGV_USAGE_PCT) {
2095                 ioc->dfgv_period_rem = 0;
2096                 return;
2097         }
2098
2099         /*
2100          * Usage is lower than threshold. Let's forgive some debts. Debt
2101          * forgiveness runs off of the usual ioc timer but its period usually
2102          * doesn't match ioc's. Compensate the difference by performing the
2103          * reduction as many times as would fit in the duration since the last
2104          * run and carrying over the left-over duration in @ioc->dfgv_period_rem
2105          * - if ioc period is 75% of DFGV_PERIOD, one out of three consecutive
2106          * reductions is doubled.
2107          */
2108         nr_cycles = dur + ioc->dfgv_period_rem;
2109         ioc->dfgv_period_rem = do_div(nr_cycles, DFGV_PERIOD);
2110
2111         list_for_each_entry(iocg, &ioc->active_iocgs, active_list) {
2112                 u64 __maybe_unused old_debt, __maybe_unused old_delay;
2113
2114                 if (!iocg->abs_vdebt && !iocg->delay)
2115                         continue;
2116
2117                 spin_lock(&iocg->waitq.lock);
2118
2119                 old_debt = iocg->abs_vdebt;
2120                 old_delay = iocg->delay;
2121
2122                 if (iocg->abs_vdebt)
2123                         iocg->abs_vdebt = iocg->abs_vdebt >> nr_cycles ?: 1;
2124                 if (iocg->delay)
2125                         iocg->delay = iocg->delay >> nr_cycles ?: 1;
2126
2127                 iocg_kick_waitq(iocg, true, now);
2128
2129                 TRACE_IOCG_PATH(iocg_forgive_debt, iocg, now, usage_pct,
2130                                 old_debt, iocg->abs_vdebt,
2131                                 old_delay, iocg->delay);
2132
2133                 spin_unlock(&iocg->waitq.lock);
2134         }
2135 }
2136
2137 /*
2138  * Check the active iocgs' state to avoid oversleeping and deactive
2139  * idle iocgs.
2140  *
2141  * Since waiters determine the sleep durations based on the vrate
2142  * they saw at the time of sleep, if vrate has increased, some
2143  * waiters could be sleeping for too long. Wake up tardy waiters
2144  * which should have woken up in the last period and expire idle
2145  * iocgs.
2146  */
2147 static int ioc_check_iocgs(struct ioc *ioc, struct ioc_now *now)
2148 {
2149         int nr_debtors = 0;
2150         struct ioc_gq *iocg, *tiocg;
2151
2152         list_for_each_entry_safe(iocg, tiocg, &ioc->active_iocgs, active_list) {
2153                 if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt &&
2154                     !iocg->delay && !iocg_is_idle(iocg))
2155                         continue;
2156
2157                 spin_lock(&iocg->waitq.lock);
2158
2159                 /* flush wait and indebt stat deltas */
2160                 if (iocg->wait_since) {
2161                         iocg->stat.wait_us += now->now - iocg->wait_since;
2162                         iocg->wait_since = now->now;
2163                 }
2164                 if (iocg->indebt_since) {
2165                         iocg->stat.indebt_us +=
2166                                 now->now - iocg->indebt_since;
2167                         iocg->indebt_since = now->now;
2168                 }
2169                 if (iocg->indelay_since) {
2170                         iocg->stat.indelay_us +=
2171                                 now->now - iocg->indelay_since;
2172                         iocg->indelay_since = now->now;
2173                 }
2174
2175                 if (waitqueue_active(&iocg->waitq) || iocg->abs_vdebt ||
2176                     iocg->delay) {
2177                         /* might be oversleeping vtime / hweight changes, kick */
2178                         iocg_kick_waitq(iocg, true, now);
2179                         if (iocg->abs_vdebt || iocg->delay)
2180                                 nr_debtors++;
2181                 } else if (iocg_is_idle(iocg)) {
2182                         /* no waiter and idle, deactivate */
2183                         u64 vtime = atomic64_read(&iocg->vtime);
2184                         s64 excess;
2185
2186                         /*
2187                          * @iocg has been inactive for a full duration and will
2188                          * have a high budget. Account anything above target as
2189                          * error and throw away. On reactivation, it'll start
2190                          * with the target budget.
2191                          */
2192                         excess = now->vnow - vtime - ioc->margins.target;
2193                         if (excess > 0) {
2194                                 u32 old_hwi;
2195
2196                                 current_hweight(iocg, NULL, &old_hwi);
2197                                 ioc->vtime_err -= div64_u64(excess * old_hwi,
2198                                                             WEIGHT_ONE);
2199                         }
2200
2201                         TRACE_IOCG_PATH(iocg_idle, iocg, now,
2202                                         atomic64_read(&iocg->active_period),
2203                                         atomic64_read(&ioc->cur_period), vtime);
2204                         __propagate_weights(iocg, 0, 0, false, now);
2205                         list_del_init(&iocg->active_list);
2206                 }
2207
2208                 spin_unlock(&iocg->waitq.lock);
2209         }
2210
2211         commit_weights(ioc);
2212         return nr_debtors;
2213 }
2214
2215 static void ioc_timer_fn(struct timer_list *timer)
2216 {
2217         struct ioc *ioc = container_of(timer, struct ioc, timer);
2218         struct ioc_gq *iocg, *tiocg;
2219         struct ioc_now now;
2220         LIST_HEAD(surpluses);
2221         int nr_debtors, nr_shortages = 0, nr_lagging = 0;
2222         u64 usage_us_sum = 0;
2223         u32 ppm_rthr = MILLION - ioc->params.qos[QOS_RPPM];
2224         u32 ppm_wthr = MILLION - ioc->params.qos[QOS_WPPM];
2225         u32 missed_ppm[2], rq_wait_pct;
2226         u64 period_vtime;
2227         int prev_busy_level;
2228
2229         /* how were the latencies during the period? */
2230         ioc_lat_stat(ioc, missed_ppm, &rq_wait_pct);
2231
2232         /* take care of active iocgs */
2233         spin_lock_irq(&ioc->lock);
2234
2235         ioc_now(ioc, &now);
2236
2237         period_vtime = now.vnow - ioc->period_at_vtime;
2238         if (WARN_ON_ONCE(!period_vtime)) {
2239                 spin_unlock_irq(&ioc->lock);
2240                 return;
2241         }
2242
2243         nr_debtors = ioc_check_iocgs(ioc, &now);
2244
2245         /*
2246          * Wait and indebt stat are flushed above and the donation calculation
2247          * below needs updated usage stat. Let's bring stat up-to-date.
2248          */
2249         iocg_flush_stat(&ioc->active_iocgs, &now);
2250
2251         /* calc usage and see whether some weights need to be moved around */
2252         list_for_each_entry(iocg, &ioc->active_iocgs, active_list) {
2253                 u64 vdone, vtime, usage_us;
2254                 u32 hw_active, hw_inuse;
2255
2256                 /*
2257                  * Collect unused and wind vtime closer to vnow to prevent
2258                  * iocgs from accumulating a large amount of budget.
2259                  */
2260                 vdone = atomic64_read(&iocg->done_vtime);
2261                 vtime = atomic64_read(&iocg->vtime);
2262                 current_hweight(iocg, &hw_active, &hw_inuse);
2263
2264                 /*
2265                  * Latency QoS detection doesn't account for IOs which are
2266                  * in-flight for longer than a period.  Detect them by
2267                  * comparing vdone against period start.  If lagging behind
2268                  * IOs from past periods, don't increase vrate.
2269                  */
2270                 if ((ppm_rthr != MILLION || ppm_wthr != MILLION) &&
2271                     !atomic_read(&iocg_to_blkg(iocg)->use_delay) &&
2272                     time_after64(vtime, vdone) &&
2273                     time_after64(vtime, now.vnow -
2274                                  MAX_LAGGING_PERIODS * period_vtime) &&
2275                     time_before64(vdone, now.vnow - period_vtime))
2276                         nr_lagging++;
2277
2278                 /*
2279                  * Determine absolute usage factoring in in-flight IOs to avoid
2280                  * high-latency completions appearing as idle.
2281                  */
2282                 usage_us = iocg->usage_delta_us;
2283                 usage_us_sum += usage_us;
2284
2285                 /* see whether there's surplus vtime */
2286                 WARN_ON_ONCE(!list_empty(&iocg->surplus_list));
2287                 if (hw_inuse < hw_active ||
2288                     (!waitqueue_active(&iocg->waitq) &&
2289                      time_before64(vtime, now.vnow - ioc->margins.low))) {
2290                         u32 hwa, old_hwi, hwm, new_hwi, usage;
2291                         u64 usage_dur;
2292
2293                         if (vdone != vtime) {
2294                                 u64 inflight_us = DIV64_U64_ROUND_UP(
2295                                         cost_to_abs_cost(vtime - vdone, hw_inuse),
2296                                         ioc->vtime_base_rate);
2297
2298                                 usage_us = max(usage_us, inflight_us);
2299                         }
2300
2301                         /* convert to hweight based usage ratio */
2302                         if (time_after64(iocg->activated_at, ioc->period_at))
2303                                 usage_dur = max_t(u64, now.now - iocg->activated_at, 1);
2304                         else
2305                                 usage_dur = max_t(u64, now.now - ioc->period_at, 1);
2306
2307                         usage = clamp_t(u32,
2308                                 DIV64_U64_ROUND_UP(usage_us * WEIGHT_ONE,
2309                                                    usage_dur),
2310                                 1, WEIGHT_ONE);
2311
2312                         /*
2313                          * Already donating or accumulated enough to start.
2314                          * Determine the donation amount.
2315                          */
2316                         current_hweight(iocg, &hwa, &old_hwi);
2317                         hwm = current_hweight_max(iocg);
2318                         new_hwi = hweight_after_donation(iocg, old_hwi, hwm,
2319                                                          usage, &now);
2320                         /*
2321                          * Donation calculation assumes hweight_after_donation
2322                          * to be positive, a condition that a donor w/ hwa < 2
2323                          * can't meet. Don't bother with donation if hwa is
2324                          * below 2. It's not gonna make a meaningful difference
2325                          * anyway.
2326                          */
2327                         if (new_hwi < hwm && hwa >= 2) {
2328                                 iocg->hweight_donating = hwa;
2329                                 iocg->hweight_after_donation = new_hwi;
2330                                 list_add(&iocg->surplus_list, &surpluses);
2331                         } else if (!iocg->abs_vdebt) {
2332                                 /*
2333                                  * @iocg doesn't have enough to donate. Reset
2334                                  * its inuse to active.
2335                                  *
2336                                  * Don't reset debtors as their inuse's are
2337                                  * owned by debt handling. This shouldn't affect
2338                                  * donation calculuation in any meaningful way
2339                                  * as @iocg doesn't have a meaningful amount of
2340                                  * share anyway.
2341                                  */
2342                                 TRACE_IOCG_PATH(inuse_shortage, iocg, &now,
2343                                                 iocg->inuse, iocg->active,
2344                                                 iocg->hweight_inuse, new_hwi);
2345
2346                                 __propagate_weights(iocg, iocg->active,
2347                                                     iocg->active, true, &now);
2348                                 nr_shortages++;
2349                         }
2350                 } else {
2351                         /* genuinely short on vtime */
2352                         nr_shortages++;
2353                 }
2354         }
2355
2356         if (!list_empty(&surpluses) && nr_shortages)
2357                 transfer_surpluses(&surpluses, &now);
2358
2359         commit_weights(ioc);
2360
2361         /* surplus list should be dissolved after use */
2362         list_for_each_entry_safe(iocg, tiocg, &surpluses, surplus_list)
2363                 list_del_init(&iocg->surplus_list);
2364
2365         /*
2366          * If q is getting clogged or we're missing too much, we're issuing
2367          * too much IO and should lower vtime rate.  If we're not missing
2368          * and experiencing shortages but not surpluses, we're too stingy
2369          * and should increase vtime rate.
2370          */
2371         prev_busy_level = ioc->busy_level;
2372         if (rq_wait_pct > RQ_WAIT_BUSY_PCT ||
2373             missed_ppm[READ] > ppm_rthr ||
2374             missed_ppm[WRITE] > ppm_wthr) {
2375                 /* clearly missing QoS targets, slow down vrate */
2376                 ioc->busy_level = max(ioc->busy_level, 0);
2377                 ioc->busy_level++;
2378         } else if (rq_wait_pct <= RQ_WAIT_BUSY_PCT * UNBUSY_THR_PCT / 100 &&
2379                    missed_ppm[READ] <= ppm_rthr * UNBUSY_THR_PCT / 100 &&
2380                    missed_ppm[WRITE] <= ppm_wthr * UNBUSY_THR_PCT / 100) {
2381                 /* QoS targets are being met with >25% margin */
2382                 if (nr_shortages) {
2383                         /*
2384                          * We're throttling while the device has spare
2385                          * capacity.  If vrate was being slowed down, stop.
2386                          */
2387                         ioc->busy_level = min(ioc->busy_level, 0);
2388
2389                         /*
2390                          * If there are IOs spanning multiple periods, wait
2391                          * them out before pushing the device harder.
2392                          */
2393                         if (!nr_lagging)
2394                                 ioc->busy_level--;
2395                 } else {
2396                         /*
2397                          * Nobody is being throttled and the users aren't
2398                          * issuing enough IOs to saturate the device.  We
2399                          * simply don't know how close the device is to
2400                          * saturation.  Coast.
2401                          */
2402                         ioc->busy_level = 0;
2403                 }
2404         } else {
2405                 /* inside the hysterisis margin, we're good */
2406                 ioc->busy_level = 0;
2407         }
2408
2409         ioc->busy_level = clamp(ioc->busy_level, -1000, 1000);
2410
2411         ioc_adjust_base_vrate(ioc, rq_wait_pct, nr_lagging, nr_shortages,
2412                               prev_busy_level, missed_ppm);
2413
2414         ioc_refresh_params(ioc, false);
2415
2416         ioc_forgive_debts(ioc, usage_us_sum, nr_debtors, &now);
2417
2418         /*
2419          * This period is done.  Move onto the next one.  If nothing's
2420          * going on with the device, stop the timer.
2421          */
2422         atomic64_inc(&ioc->cur_period);
2423
2424         if (ioc->running != IOC_STOP) {
2425                 if (!list_empty(&ioc->active_iocgs)) {
2426                         ioc_start_period(ioc, &now);
2427                 } else {
2428                         ioc->busy_level = 0;
2429                         ioc->vtime_err = 0;
2430                         ioc->running = IOC_IDLE;
2431                 }
2432
2433                 ioc_refresh_vrate(ioc, &now);
2434         }
2435
2436         spin_unlock_irq(&ioc->lock);
2437 }
2438
2439 static u64 adjust_inuse_and_calc_cost(struct ioc_gq *iocg, u64 vtime,
2440                                       u64 abs_cost, struct ioc_now *now)
2441 {
2442         struct ioc *ioc = iocg->ioc;
2443         struct ioc_margins *margins = &ioc->margins;
2444         u32 __maybe_unused old_inuse = iocg->inuse, __maybe_unused old_hwi;
2445         u32 hwi, adj_step;
2446         s64 margin;
2447         u64 cost, new_inuse;
2448         unsigned long flags;
2449
2450         current_hweight(iocg, NULL, &hwi);
2451         old_hwi = hwi;
2452         cost = abs_cost_to_cost(abs_cost, hwi);
2453         margin = now->vnow - vtime - cost;
2454
2455         /* debt handling owns inuse for debtors */
2456         if (iocg->abs_vdebt)
2457                 return cost;
2458
2459         /*
2460          * We only increase inuse during period and do so if the margin has
2461          * deteriorated since the previous adjustment.
2462          */
2463         if (margin >= iocg->saved_margin || margin >= margins->low ||
2464             iocg->inuse == iocg->active)
2465                 return cost;
2466
2467         spin_lock_irqsave(&ioc->lock, flags);
2468
2469         /* we own inuse only when @iocg is in the normal active state */
2470         if (iocg->abs_vdebt || list_empty(&iocg->active_list)) {
2471                 spin_unlock_irqrestore(&ioc->lock, flags);
2472                 return cost;
2473         }
2474
2475         /*
2476          * Bump up inuse till @abs_cost fits in the existing budget.
2477          * adj_step must be determined after acquiring ioc->lock - we might
2478          * have raced and lost to another thread for activation and could
2479          * be reading 0 iocg->active before ioc->lock which will lead to
2480          * infinite loop.
2481          */
2482         new_inuse = iocg->inuse;
2483         adj_step = DIV_ROUND_UP(iocg->active * INUSE_ADJ_STEP_PCT, 100);
2484         do {
2485                 new_inuse = new_inuse + adj_step;
2486                 propagate_weights(iocg, iocg->active, new_inuse, true, now);
2487                 current_hweight(iocg, NULL, &hwi);
2488                 cost = abs_cost_to_cost(abs_cost, hwi);
2489         } while (time_after64(vtime + cost, now->vnow) &&
2490                  iocg->inuse != iocg->active);
2491
2492         spin_unlock_irqrestore(&ioc->lock, flags);
2493
2494         TRACE_IOCG_PATH(inuse_adjust, iocg, now,
2495                         old_inuse, iocg->inuse, old_hwi, hwi);
2496
2497         return cost;
2498 }
2499
2500 static void calc_vtime_cost_builtin(struct bio *bio, struct ioc_gq *iocg,
2501                                     bool is_merge, u64 *costp)
2502 {
2503         struct ioc *ioc = iocg->ioc;
2504         u64 coef_seqio, coef_randio, coef_page;
2505         u64 pages = max_t(u64, bio_sectors(bio) >> IOC_SECT_TO_PAGE_SHIFT, 1);
2506         u64 seek_pages = 0;
2507         u64 cost = 0;
2508
2509         switch (bio_op(bio)) {
2510         case REQ_OP_READ:
2511                 coef_seqio      = ioc->params.lcoefs[LCOEF_RSEQIO];
2512                 coef_randio     = ioc->params.lcoefs[LCOEF_RRANDIO];
2513                 coef_page       = ioc->params.lcoefs[LCOEF_RPAGE];
2514                 break;
2515         case REQ_OP_WRITE:
2516                 coef_seqio      = ioc->params.lcoefs[LCOEF_WSEQIO];
2517                 coef_randio     = ioc->params.lcoefs[LCOEF_WRANDIO];
2518                 coef_page       = ioc->params.lcoefs[LCOEF_WPAGE];
2519                 break;
2520         default:
2521                 goto out;
2522         }
2523
2524         if (iocg->cursor) {
2525                 seek_pages = abs(bio->bi_iter.bi_sector - iocg->cursor);
2526                 seek_pages >>= IOC_SECT_TO_PAGE_SHIFT;
2527         }
2528
2529         if (!is_merge) {
2530                 if (seek_pages > LCOEF_RANDIO_PAGES) {
2531                         cost += coef_randio;
2532                 } else {
2533                         cost += coef_seqio;
2534                 }
2535         }
2536         cost += pages * coef_page;
2537 out:
2538         *costp = cost;
2539 }
2540
2541 static u64 calc_vtime_cost(struct bio *bio, struct ioc_gq *iocg, bool is_merge)
2542 {
2543         u64 cost;
2544
2545         calc_vtime_cost_builtin(bio, iocg, is_merge, &cost);
2546         return cost;
2547 }
2548
2549 static void calc_size_vtime_cost_builtin(struct request *rq, struct ioc *ioc,
2550                                          u64 *costp)
2551 {
2552         unsigned int pages = blk_rq_stats_sectors(rq) >> IOC_SECT_TO_PAGE_SHIFT;
2553
2554         switch (req_op(rq)) {
2555         case REQ_OP_READ:
2556                 *costp = pages * ioc->params.lcoefs[LCOEF_RPAGE];
2557                 break;
2558         case REQ_OP_WRITE:
2559                 *costp = pages * ioc->params.lcoefs[LCOEF_WPAGE];
2560                 break;
2561         default:
2562                 *costp = 0;
2563         }
2564 }
2565
2566 static u64 calc_size_vtime_cost(struct request *rq, struct ioc *ioc)
2567 {
2568         u64 cost;
2569
2570         calc_size_vtime_cost_builtin(rq, ioc, &cost);
2571         return cost;
2572 }
2573
2574 static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio)
2575 {
2576         struct blkcg_gq *blkg = bio->bi_blkg;
2577         struct ioc *ioc = rqos_to_ioc(rqos);
2578         struct ioc_gq *iocg = blkg_to_iocg(blkg);
2579         struct ioc_now now;
2580         struct iocg_wait wait;
2581         u64 abs_cost, cost, vtime;
2582         bool use_debt, ioc_locked;
2583         unsigned long flags;
2584
2585         /* bypass IOs if disabled, still initializing, or for root cgroup */
2586         if (!ioc->enabled || !iocg || !iocg->level)
2587                 return;
2588
2589         /* calculate the absolute vtime cost */
2590         abs_cost = calc_vtime_cost(bio, iocg, false);
2591         if (!abs_cost)
2592                 return;
2593
2594         if (!iocg_activate(iocg, &now))
2595                 return;
2596
2597         iocg->cursor = bio_end_sector(bio);
2598         vtime = atomic64_read(&iocg->vtime);
2599         cost = adjust_inuse_and_calc_cost(iocg, vtime, abs_cost, &now);
2600
2601         /*
2602          * If no one's waiting and within budget, issue right away.  The
2603          * tests are racy but the races aren't systemic - we only miss once
2604          * in a while which is fine.
2605          */
2606         if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt &&
2607             time_before_eq64(vtime + cost, now.vnow)) {
2608                 iocg_commit_bio(iocg, bio, abs_cost, cost);
2609                 return;
2610         }
2611
2612         /*
2613          * We're over budget. This can be handled in two ways. IOs which may
2614          * cause priority inversions are punted to @ioc->aux_iocg and charged as
2615          * debt. Otherwise, the issuer is blocked on @iocg->waitq. Debt handling
2616          * requires @ioc->lock, waitq handling @iocg->waitq.lock. Determine
2617          * whether debt handling is needed and acquire locks accordingly.
2618          */
2619         use_debt = bio_issue_as_root_blkg(bio) || fatal_signal_pending(current);
2620         ioc_locked = use_debt || READ_ONCE(iocg->abs_vdebt);
2621 retry_lock:
2622         iocg_lock(iocg, ioc_locked, &flags);
2623
2624         /*
2625          * @iocg must stay activated for debt and waitq handling. Deactivation
2626          * is synchronized against both ioc->lock and waitq.lock and we won't
2627          * get deactivated as long as we're waiting or has debt, so we're good
2628          * if we're activated here. In the unlikely cases that we aren't, just
2629          * issue the IO.
2630          */
2631         if (unlikely(list_empty(&iocg->active_list))) {
2632                 iocg_unlock(iocg, ioc_locked, &flags);
2633                 iocg_commit_bio(iocg, bio, abs_cost, cost);
2634                 return;
2635         }
2636
2637         /*
2638          * We're over budget. If @bio has to be issued regardless, remember
2639          * the abs_cost instead of advancing vtime. iocg_kick_waitq() will pay
2640          * off the debt before waking more IOs.
2641          *
2642          * This way, the debt is continuously paid off each period with the
2643          * actual budget available to the cgroup. If we just wound vtime, we
2644          * would incorrectly use the current hw_inuse for the entire amount
2645          * which, for example, can lead to the cgroup staying blocked for a
2646          * long time even with substantially raised hw_inuse.
2647          *
2648          * An iocg with vdebt should stay online so that the timer can keep
2649          * deducting its vdebt and [de]activate use_delay mechanism
2650          * accordingly. We don't want to race against the timer trying to
2651          * clear them and leave @iocg inactive w/ dangling use_delay heavily
2652          * penalizing the cgroup and its descendants.
2653          */
2654         if (use_debt) {
2655                 iocg_incur_debt(iocg, abs_cost, &now);
2656                 if (iocg_kick_delay(iocg, &now))
2657                         blkcg_schedule_throttle(rqos->q->disk,
2658                                         (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
2659                 iocg_unlock(iocg, ioc_locked, &flags);
2660                 return;
2661         }
2662
2663         /* guarantee that iocgs w/ waiters have maximum inuse */
2664         if (!iocg->abs_vdebt && iocg->inuse != iocg->active) {
2665                 if (!ioc_locked) {
2666                         iocg_unlock(iocg, false, &flags);
2667                         ioc_locked = true;
2668                         goto retry_lock;
2669                 }
2670                 propagate_weights(iocg, iocg->active, iocg->active, true,
2671                                   &now);
2672         }
2673
2674         /*
2675          * Append self to the waitq and schedule the wakeup timer if we're
2676          * the first waiter.  The timer duration is calculated based on the
2677          * current vrate.  vtime and hweight changes can make it too short
2678          * or too long.  Each wait entry records the absolute cost it's
2679          * waiting for to allow re-evaluation using a custom wait entry.
2680          *
2681          * If too short, the timer simply reschedules itself.  If too long,
2682          * the period timer will notice and trigger wakeups.
2683          *
2684          * All waiters are on iocg->waitq and the wait states are
2685          * synchronized using waitq.lock.
2686          */
2687         init_waitqueue_func_entry(&wait.wait, iocg_wake_fn);
2688         wait.wait.private = current;
2689         wait.bio = bio;
2690         wait.abs_cost = abs_cost;
2691         wait.committed = false; /* will be set true by waker */
2692
2693         __add_wait_queue_entry_tail(&iocg->waitq, &wait.wait);
2694         iocg_kick_waitq(iocg, ioc_locked, &now);
2695
2696         iocg_unlock(iocg, ioc_locked, &flags);
2697
2698         while (true) {
2699                 set_current_state(TASK_UNINTERRUPTIBLE);
2700                 if (wait.committed)
2701                         break;
2702                 io_schedule();
2703         }
2704
2705         /* waker already committed us, proceed */
2706         finish_wait(&iocg->waitq, &wait.wait);
2707 }
2708
2709 static void ioc_rqos_merge(struct rq_qos *rqos, struct request *rq,
2710                            struct bio *bio)
2711 {
2712         struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg);
2713         struct ioc *ioc = rqos_to_ioc(rqos);
2714         sector_t bio_end = bio_end_sector(bio);
2715         struct ioc_now now;
2716         u64 vtime, abs_cost, cost;
2717         unsigned long flags;
2718
2719         /* bypass if disabled, still initializing, or for root cgroup */
2720         if (!ioc->enabled || !iocg || !iocg->level)
2721                 return;
2722
2723         abs_cost = calc_vtime_cost(bio, iocg, true);
2724         if (!abs_cost)
2725                 return;
2726
2727         ioc_now(ioc, &now);
2728
2729         vtime = atomic64_read(&iocg->vtime);
2730         cost = adjust_inuse_and_calc_cost(iocg, vtime, abs_cost, &now);
2731
2732         /* update cursor if backmerging into the request at the cursor */
2733         if (blk_rq_pos(rq) < bio_end &&
2734             blk_rq_pos(rq) + blk_rq_sectors(rq) == iocg->cursor)
2735                 iocg->cursor = bio_end;
2736
2737         /*
2738          * Charge if there's enough vtime budget and the existing request has
2739          * cost assigned.
2740          */
2741         if (rq->bio && rq->bio->bi_iocost_cost &&
2742             time_before_eq64(atomic64_read(&iocg->vtime) + cost, now.vnow)) {
2743                 iocg_commit_bio(iocg, bio, abs_cost, cost);
2744                 return;
2745         }
2746
2747         /*
2748          * Otherwise, account it as debt if @iocg is online, which it should
2749          * be for the vast majority of cases. See debt handling in
2750          * ioc_rqos_throttle() for details.
2751          */
2752         spin_lock_irqsave(&ioc->lock, flags);
2753         spin_lock(&iocg->waitq.lock);
2754
2755         if (likely(!list_empty(&iocg->active_list))) {
2756                 iocg_incur_debt(iocg, abs_cost, &now);
2757                 if (iocg_kick_delay(iocg, &now))
2758                         blkcg_schedule_throttle(rqos->q->disk,
2759                                         (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
2760         } else {
2761                 iocg_commit_bio(iocg, bio, abs_cost, cost);
2762         }
2763
2764         spin_unlock(&iocg->waitq.lock);
2765         spin_unlock_irqrestore(&ioc->lock, flags);
2766 }
2767
2768 static void ioc_rqos_done_bio(struct rq_qos *rqos, struct bio *bio)
2769 {
2770         struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg);
2771
2772         if (iocg && bio->bi_iocost_cost)
2773                 atomic64_add(bio->bi_iocost_cost, &iocg->done_vtime);
2774 }
2775
2776 static void ioc_rqos_done(struct rq_qos *rqos, struct request *rq)
2777 {
2778         struct ioc *ioc = rqos_to_ioc(rqos);
2779         struct ioc_pcpu_stat *ccs;
2780         u64 on_q_ns, rq_wait_ns, size_nsec;
2781         int pidx, rw;
2782
2783         if (!ioc->enabled || !rq->alloc_time_ns || !rq->start_time_ns)
2784                 return;
2785
2786         switch (req_op(rq)) {
2787         case REQ_OP_READ:
2788                 pidx = QOS_RLAT;
2789                 rw = READ;
2790                 break;
2791         case REQ_OP_WRITE:
2792                 pidx = QOS_WLAT;
2793                 rw = WRITE;
2794                 break;
2795         default:
2796                 return;
2797         }
2798
2799         on_q_ns = ktime_get_ns() - rq->alloc_time_ns;
2800         rq_wait_ns = rq->start_time_ns - rq->alloc_time_ns;
2801         size_nsec = div64_u64(calc_size_vtime_cost(rq, ioc), VTIME_PER_NSEC);
2802
2803         ccs = get_cpu_ptr(ioc->pcpu_stat);
2804
2805         if (on_q_ns <= size_nsec ||
2806             on_q_ns - size_nsec <= ioc->params.qos[pidx] * NSEC_PER_USEC)
2807                 local_inc(&ccs->missed[rw].nr_met);
2808         else
2809                 local_inc(&ccs->missed[rw].nr_missed);
2810
2811         local64_add(rq_wait_ns, &ccs->rq_wait_ns);
2812
2813         put_cpu_ptr(ccs);
2814 }
2815
2816 static void ioc_rqos_queue_depth_changed(struct rq_qos *rqos)
2817 {
2818         struct ioc *ioc = rqos_to_ioc(rqos);
2819
2820         spin_lock_irq(&ioc->lock);
2821         ioc_refresh_params(ioc, false);
2822         spin_unlock_irq(&ioc->lock);
2823 }
2824
2825 static void ioc_rqos_exit(struct rq_qos *rqos)
2826 {
2827         struct ioc *ioc = rqos_to_ioc(rqos);
2828
2829         blkcg_deactivate_policy(rqos->q, &blkcg_policy_iocost);
2830
2831         spin_lock_irq(&ioc->lock);
2832         ioc->running = IOC_STOP;
2833         spin_unlock_irq(&ioc->lock);
2834
2835         del_timer_sync(&ioc->timer);
2836         free_percpu(ioc->pcpu_stat);
2837         kfree(ioc);
2838 }
2839
2840 static struct rq_qos_ops ioc_rqos_ops = {
2841         .throttle = ioc_rqos_throttle,
2842         .merge = ioc_rqos_merge,
2843         .done_bio = ioc_rqos_done_bio,
2844         .done = ioc_rqos_done,
2845         .queue_depth_changed = ioc_rqos_queue_depth_changed,
2846         .exit = ioc_rqos_exit,
2847 };
2848
2849 static int blk_iocost_init(struct gendisk *disk)
2850 {
2851         struct request_queue *q = disk->queue;
2852         struct ioc *ioc;
2853         struct rq_qos *rqos;
2854         int i, cpu, ret;
2855
2856         ioc = kzalloc(sizeof(*ioc), GFP_KERNEL);
2857         if (!ioc)
2858                 return -ENOMEM;
2859
2860         ioc->pcpu_stat = alloc_percpu(struct ioc_pcpu_stat);
2861         if (!ioc->pcpu_stat) {
2862                 kfree(ioc);
2863                 return -ENOMEM;
2864         }
2865
2866         for_each_possible_cpu(cpu) {
2867                 struct ioc_pcpu_stat *ccs = per_cpu_ptr(ioc->pcpu_stat, cpu);
2868
2869                 for (i = 0; i < ARRAY_SIZE(ccs->missed); i++) {
2870                         local_set(&ccs->missed[i].nr_met, 0);
2871                         local_set(&ccs->missed[i].nr_missed, 0);
2872                 }
2873                 local64_set(&ccs->rq_wait_ns, 0);
2874         }
2875
2876         rqos = &ioc->rqos;
2877         rqos->id = RQ_QOS_COST;
2878         rqos->ops = &ioc_rqos_ops;
2879         rqos->q = q;
2880
2881         spin_lock_init(&ioc->lock);
2882         timer_setup(&ioc->timer, ioc_timer_fn, 0);
2883         INIT_LIST_HEAD(&ioc->active_iocgs);
2884
2885         ioc->running = IOC_IDLE;
2886         ioc->vtime_base_rate = VTIME_PER_USEC;
2887         atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC);
2888         seqcount_spinlock_init(&ioc->period_seqcount, &ioc->lock);
2889         ioc->period_at = ktime_to_us(ktime_get());
2890         atomic64_set(&ioc->cur_period, 0);
2891         atomic_set(&ioc->hweight_gen, 0);
2892
2893         spin_lock_irq(&ioc->lock);
2894         ioc->autop_idx = AUTOP_INVALID;
2895         ioc_refresh_params(ioc, true);
2896         spin_unlock_irq(&ioc->lock);
2897
2898         /*
2899          * rqos must be added before activation to allow iocg_pd_init() to
2900          * lookup the ioc from q. This means that the rqos methods may get
2901          * called before policy activation completion, can't assume that the
2902          * target bio has an iocg associated and need to test for NULL iocg.
2903          */
2904         ret = rq_qos_add(q, rqos);
2905         if (ret)
2906                 goto err_free_ioc;
2907
2908         ret = blkcg_activate_policy(q, &blkcg_policy_iocost);
2909         if (ret)
2910                 goto err_del_qos;
2911         return 0;
2912
2913 err_del_qos:
2914         rq_qos_del(q, rqos);
2915 err_free_ioc:
2916         free_percpu(ioc->pcpu_stat);
2917         kfree(ioc);
2918         return ret;
2919 }
2920
2921 static struct blkcg_policy_data *ioc_cpd_alloc(gfp_t gfp)
2922 {
2923         struct ioc_cgrp *iocc;
2924
2925         iocc = kzalloc(sizeof(struct ioc_cgrp), gfp);
2926         if (!iocc)
2927                 return NULL;
2928
2929         iocc->dfl_weight = CGROUP_WEIGHT_DFL * WEIGHT_ONE;
2930         return &iocc->cpd;
2931 }
2932
2933 static void ioc_cpd_free(struct blkcg_policy_data *cpd)
2934 {
2935         kfree(container_of(cpd, struct ioc_cgrp, cpd));
2936 }
2937
2938 static struct blkg_policy_data *ioc_pd_alloc(gfp_t gfp, struct request_queue *q,
2939                                              struct blkcg *blkcg)
2940 {
2941         int levels = blkcg->css.cgroup->level + 1;
2942         struct ioc_gq *iocg;
2943
2944         iocg = kzalloc_node(struct_size(iocg, ancestors, levels), gfp, q->node);
2945         if (!iocg)
2946                 return NULL;
2947
2948         iocg->pcpu_stat = alloc_percpu_gfp(struct iocg_pcpu_stat, gfp);
2949         if (!iocg->pcpu_stat) {
2950                 kfree(iocg);
2951                 return NULL;
2952         }
2953
2954         return &iocg->pd;
2955 }
2956
2957 static void ioc_pd_init(struct blkg_policy_data *pd)
2958 {
2959         struct ioc_gq *iocg = pd_to_iocg(pd);
2960         struct blkcg_gq *blkg = pd_to_blkg(&iocg->pd);
2961         struct ioc *ioc = q_to_ioc(blkg->q);
2962         struct ioc_now now;
2963         struct blkcg_gq *tblkg;
2964         unsigned long flags;
2965
2966         ioc_now(ioc, &now);
2967
2968         iocg->ioc = ioc;
2969         atomic64_set(&iocg->vtime, now.vnow);
2970         atomic64_set(&iocg->done_vtime, now.vnow);
2971         atomic64_set(&iocg->active_period, atomic64_read(&ioc->cur_period));
2972         INIT_LIST_HEAD(&iocg->active_list);
2973         INIT_LIST_HEAD(&iocg->walk_list);
2974         INIT_LIST_HEAD(&iocg->surplus_list);
2975         iocg->hweight_active = WEIGHT_ONE;
2976         iocg->hweight_inuse = WEIGHT_ONE;
2977
2978         init_waitqueue_head(&iocg->waitq);
2979         hrtimer_init(&iocg->waitq_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
2980         iocg->waitq_timer.function = iocg_waitq_timer_fn;
2981
2982         iocg->level = blkg->blkcg->css.cgroup->level;
2983
2984         for (tblkg = blkg; tblkg; tblkg = tblkg->parent) {
2985                 struct ioc_gq *tiocg = blkg_to_iocg(tblkg);
2986                 iocg->ancestors[tiocg->level] = tiocg;
2987         }
2988
2989         spin_lock_irqsave(&ioc->lock, flags);
2990         weight_updated(iocg, &now);
2991         spin_unlock_irqrestore(&ioc->lock, flags);
2992 }
2993
2994 static void ioc_pd_free(struct blkg_policy_data *pd)
2995 {
2996         struct ioc_gq *iocg = pd_to_iocg(pd);
2997         struct ioc *ioc = iocg->ioc;
2998         unsigned long flags;
2999
3000         if (ioc) {
3001                 spin_lock_irqsave(&ioc->lock, flags);
3002
3003                 if (!list_empty(&iocg->active_list)) {
3004                         struct ioc_now now;
3005
3006                         ioc_now(ioc, &now);
3007                         propagate_weights(iocg, 0, 0, false, &now);
3008                         list_del_init(&iocg->active_list);
3009                 }
3010
3011                 WARN_ON_ONCE(!list_empty(&iocg->walk_list));
3012                 WARN_ON_ONCE(!list_empty(&iocg->surplus_list));
3013
3014                 spin_unlock_irqrestore(&ioc->lock, flags);
3015
3016                 hrtimer_cancel(&iocg->waitq_timer);
3017         }
3018         free_percpu(iocg->pcpu_stat);
3019         kfree(iocg);
3020 }
3021
3022 static void ioc_pd_stat(struct blkg_policy_data *pd, struct seq_file *s)
3023 {
3024         struct ioc_gq *iocg = pd_to_iocg(pd);
3025         struct ioc *ioc = iocg->ioc;
3026
3027         if (!ioc->enabled)
3028                 return;
3029
3030         if (iocg->level == 0) {
3031                 unsigned vp10k = DIV64_U64_ROUND_CLOSEST(
3032                         ioc->vtime_base_rate * 10000,
3033                         VTIME_PER_USEC);
3034                 seq_printf(s, " cost.vrate=%u.%02u", vp10k / 100, vp10k % 100);
3035         }
3036
3037         seq_printf(s, " cost.usage=%llu", iocg->last_stat.usage_us);
3038
3039         if (blkcg_debug_stats)
3040                 seq_printf(s, " cost.wait=%llu cost.indebt=%llu cost.indelay=%llu",
3041                         iocg->last_stat.wait_us,
3042                         iocg->last_stat.indebt_us,
3043                         iocg->last_stat.indelay_us);
3044 }
3045
3046 static u64 ioc_weight_prfill(struct seq_file *sf, struct blkg_policy_data *pd,
3047                              int off)
3048 {
3049         const char *dname = blkg_dev_name(pd->blkg);
3050         struct ioc_gq *iocg = pd_to_iocg(pd);
3051
3052         if (dname && iocg->cfg_weight)
3053                 seq_printf(sf, "%s %u\n", dname, iocg->cfg_weight / WEIGHT_ONE);
3054         return 0;
3055 }
3056
3057
3058 static int ioc_weight_show(struct seq_file *sf, void *v)
3059 {
3060         struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
3061         struct ioc_cgrp *iocc = blkcg_to_iocc(blkcg);
3062
3063         seq_printf(sf, "default %u\n", iocc->dfl_weight / WEIGHT_ONE);
3064         blkcg_print_blkgs(sf, blkcg, ioc_weight_prfill,
3065                           &blkcg_policy_iocost, seq_cft(sf)->private, false);
3066         return 0;
3067 }
3068
3069 static ssize_t ioc_weight_write(struct kernfs_open_file *of, char *buf,
3070                                 size_t nbytes, loff_t off)
3071 {
3072         struct blkcg *blkcg = css_to_blkcg(of_css(of));
3073         struct ioc_cgrp *iocc = blkcg_to_iocc(blkcg);
3074         struct blkg_conf_ctx ctx;
3075         struct ioc_now now;
3076         struct ioc_gq *iocg;
3077         u32 v;
3078         int ret;
3079
3080         if (!strchr(buf, ':')) {
3081                 struct blkcg_gq *blkg;
3082
3083                 if (!sscanf(buf, "default %u", &v) && !sscanf(buf, "%u", &v))
3084                         return -EINVAL;
3085
3086                 if (v < CGROUP_WEIGHT_MIN || v > CGROUP_WEIGHT_MAX)
3087                         return -EINVAL;
3088
3089                 spin_lock_irq(&blkcg->lock);
3090                 iocc->dfl_weight = v * WEIGHT_ONE;
3091                 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
3092                         struct ioc_gq *iocg = blkg_to_iocg(blkg);
3093
3094                         if (iocg) {
3095                                 spin_lock(&iocg->ioc->lock);
3096                                 ioc_now(iocg->ioc, &now);
3097                                 weight_updated(iocg, &now);
3098                                 spin_unlock(&iocg->ioc->lock);
3099                         }
3100                 }
3101                 spin_unlock_irq(&blkcg->lock);
3102
3103                 return nbytes;
3104         }
3105
3106         ret = blkg_conf_prep(blkcg, &blkcg_policy_iocost, buf, &ctx);
3107         if (ret)
3108                 return ret;
3109
3110         iocg = blkg_to_iocg(ctx.blkg);
3111
3112         if (!strncmp(ctx.body, "default", 7)) {
3113                 v = 0;
3114         } else {
3115                 if (!sscanf(ctx.body, "%u", &v))
3116                         goto einval;
3117                 if (v < CGROUP_WEIGHT_MIN || v > CGROUP_WEIGHT_MAX)
3118                         goto einval;
3119         }
3120
3121         spin_lock(&iocg->ioc->lock);
3122         iocg->cfg_weight = v * WEIGHT_ONE;
3123         ioc_now(iocg->ioc, &now);
3124         weight_updated(iocg, &now);
3125         spin_unlock(&iocg->ioc->lock);
3126
3127         blkg_conf_finish(&ctx);
3128         return nbytes;
3129
3130 einval:
3131         blkg_conf_finish(&ctx);
3132         return -EINVAL;
3133 }
3134
3135 static u64 ioc_qos_prfill(struct seq_file *sf, struct blkg_policy_data *pd,
3136                           int off)
3137 {
3138         const char *dname = blkg_dev_name(pd->blkg);
3139         struct ioc *ioc = pd_to_iocg(pd)->ioc;
3140
3141         if (!dname)
3142                 return 0;
3143
3144         seq_printf(sf, "%s enable=%d ctrl=%s rpct=%u.%02u rlat=%u wpct=%u.%02u wlat=%u min=%u.%02u max=%u.%02u\n",
3145                    dname, ioc->enabled, ioc->user_qos_params ? "user" : "auto",
3146                    ioc->params.qos[QOS_RPPM] / 10000,
3147                    ioc->params.qos[QOS_RPPM] % 10000 / 100,
3148                    ioc->params.qos[QOS_RLAT],
3149                    ioc->params.qos[QOS_WPPM] / 10000,
3150                    ioc->params.qos[QOS_WPPM] % 10000 / 100,
3151                    ioc->params.qos[QOS_WLAT],
3152                    ioc->params.qos[QOS_MIN] / 10000,
3153                    ioc->params.qos[QOS_MIN] % 10000 / 100,
3154                    ioc->params.qos[QOS_MAX] / 10000,
3155                    ioc->params.qos[QOS_MAX] % 10000 / 100);
3156         return 0;
3157 }
3158
3159 static int ioc_qos_show(struct seq_file *sf, void *v)
3160 {
3161         struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
3162
3163         blkcg_print_blkgs(sf, blkcg, ioc_qos_prfill,
3164                           &blkcg_policy_iocost, seq_cft(sf)->private, false);
3165         return 0;
3166 }
3167
3168 static const match_table_t qos_ctrl_tokens = {
3169         { QOS_ENABLE,           "enable=%u"     },
3170         { QOS_CTRL,             "ctrl=%s"       },
3171         { NR_QOS_CTRL_PARAMS,   NULL            },
3172 };
3173
3174 static const match_table_t qos_tokens = {
3175         { QOS_RPPM,             "rpct=%s"       },
3176         { QOS_RLAT,             "rlat=%u"       },
3177         { QOS_WPPM,             "wpct=%s"       },
3178         { QOS_WLAT,             "wlat=%u"       },
3179         { QOS_MIN,              "min=%s"        },
3180         { QOS_MAX,              "max=%s"        },
3181         { NR_QOS_PARAMS,        NULL            },
3182 };
3183
3184 static ssize_t ioc_qos_write(struct kernfs_open_file *of, char *input,
3185                              size_t nbytes, loff_t off)
3186 {
3187         struct block_device *bdev;
3188         struct gendisk *disk;
3189         struct ioc *ioc;
3190         u32 qos[NR_QOS_PARAMS];
3191         bool enable, user;
3192         char *p;
3193         int ret;
3194
3195         bdev = blkcg_conf_open_bdev(&input);
3196         if (IS_ERR(bdev))
3197                 return PTR_ERR(bdev);
3198
3199         disk = bdev->bd_disk;
3200         ioc = q_to_ioc(disk->queue);
3201         if (!ioc) {
3202                 ret = blk_iocost_init(disk);
3203                 if (ret)
3204                         goto err;
3205                 ioc = q_to_ioc(disk->queue);
3206         }
3207
3208         spin_lock_irq(&ioc->lock);
3209         memcpy(qos, ioc->params.qos, sizeof(qos));
3210         enable = ioc->enabled;
3211         user = ioc->user_qos_params;
3212         spin_unlock_irq(&ioc->lock);
3213
3214         while ((p = strsep(&input, " \t\n"))) {
3215                 substring_t args[MAX_OPT_ARGS];
3216                 char buf[32];
3217                 int tok;
3218                 s64 v;
3219
3220                 if (!*p)
3221                         continue;
3222
3223                 switch (match_token(p, qos_ctrl_tokens, args)) {
3224                 case QOS_ENABLE:
3225                         match_u64(&args[0], &v);
3226                         enable = v;
3227                         continue;
3228                 case QOS_CTRL:
3229                         match_strlcpy(buf, &args[0], sizeof(buf));
3230                         if (!strcmp(buf, "auto"))
3231                                 user = false;
3232                         else if (!strcmp(buf, "user"))
3233                                 user = true;
3234                         else
3235                                 goto einval;
3236                         continue;
3237                 }
3238
3239                 tok = match_token(p, qos_tokens, args);
3240                 switch (tok) {
3241                 case QOS_RPPM:
3242                 case QOS_WPPM:
3243                         if (match_strlcpy(buf, &args[0], sizeof(buf)) >=
3244                             sizeof(buf))
3245                                 goto einval;
3246                         if (cgroup_parse_float(buf, 2, &v))
3247                                 goto einval;
3248                         if (v < 0 || v > 10000)
3249                                 goto einval;
3250                         qos[tok] = v * 100;
3251                         break;
3252                 case QOS_RLAT:
3253                 case QOS_WLAT:
3254                         if (match_u64(&args[0], &v))
3255                                 goto einval;
3256                         qos[tok] = v;
3257                         break;
3258                 case QOS_MIN:
3259                 case QOS_MAX:
3260                         if (match_strlcpy(buf, &args[0], sizeof(buf)) >=
3261                             sizeof(buf))
3262                                 goto einval;
3263                         if (cgroup_parse_float(buf, 2, &v))
3264                                 goto einval;
3265                         if (v < 0)
3266                                 goto einval;
3267                         qos[tok] = clamp_t(s64, v * 100,
3268                                            VRATE_MIN_PPM, VRATE_MAX_PPM);
3269                         break;
3270                 default:
3271                         goto einval;
3272                 }
3273                 user = true;
3274         }
3275
3276         if (qos[QOS_MIN] > qos[QOS_MAX])
3277                 goto einval;
3278
3279         spin_lock_irq(&ioc->lock);
3280
3281         if (enable) {
3282                 blk_stat_enable_accounting(disk->queue);
3283                 blk_queue_flag_set(QUEUE_FLAG_RQ_ALLOC_TIME, disk->queue);
3284                 ioc->enabled = true;
3285         } else {
3286                 blk_queue_flag_clear(QUEUE_FLAG_RQ_ALLOC_TIME, disk->queue);
3287                 ioc->enabled = false;
3288         }
3289
3290         if (user) {
3291                 memcpy(ioc->params.qos, qos, sizeof(qos));
3292                 ioc->user_qos_params = true;
3293         } else {
3294                 ioc->user_qos_params = false;
3295         }
3296
3297         ioc_refresh_params(ioc, true);
3298         spin_unlock_irq(&ioc->lock);
3299
3300         blkdev_put_no_open(bdev);
3301         return nbytes;
3302 einval:
3303         ret = -EINVAL;
3304 err:
3305         blkdev_put_no_open(bdev);
3306         return ret;
3307 }
3308
3309 static u64 ioc_cost_model_prfill(struct seq_file *sf,
3310                                  struct blkg_policy_data *pd, int off)
3311 {
3312         const char *dname = blkg_dev_name(pd->blkg);
3313         struct ioc *ioc = pd_to_iocg(pd)->ioc;
3314         u64 *u = ioc->params.i_lcoefs;
3315
3316         if (!dname)
3317                 return 0;
3318
3319         seq_printf(sf, "%s ctrl=%s model=linear "
3320                    "rbps=%llu rseqiops=%llu rrandiops=%llu "
3321                    "wbps=%llu wseqiops=%llu wrandiops=%llu\n",
3322                    dname, ioc->user_cost_model ? "user" : "auto",
3323                    u[I_LCOEF_RBPS], u[I_LCOEF_RSEQIOPS], u[I_LCOEF_RRANDIOPS],
3324                    u[I_LCOEF_WBPS], u[I_LCOEF_WSEQIOPS], u[I_LCOEF_WRANDIOPS]);
3325         return 0;
3326 }
3327
3328 static int ioc_cost_model_show(struct seq_file *sf, void *v)
3329 {
3330         struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
3331
3332         blkcg_print_blkgs(sf, blkcg, ioc_cost_model_prfill,
3333                           &blkcg_policy_iocost, seq_cft(sf)->private, false);
3334         return 0;
3335 }
3336
3337 static const match_table_t cost_ctrl_tokens = {
3338         { COST_CTRL,            "ctrl=%s"       },
3339         { COST_MODEL,           "model=%s"      },
3340         { NR_COST_CTRL_PARAMS,  NULL            },
3341 };
3342
3343 static const match_table_t i_lcoef_tokens = {
3344         { I_LCOEF_RBPS,         "rbps=%u"       },
3345         { I_LCOEF_RSEQIOPS,     "rseqiops=%u"   },
3346         { I_LCOEF_RRANDIOPS,    "rrandiops=%u"  },
3347         { I_LCOEF_WBPS,         "wbps=%u"       },
3348         { I_LCOEF_WSEQIOPS,     "wseqiops=%u"   },
3349         { I_LCOEF_WRANDIOPS,    "wrandiops=%u"  },
3350         { NR_I_LCOEFS,          NULL            },
3351 };
3352
3353 static ssize_t ioc_cost_model_write(struct kernfs_open_file *of, char *input,
3354                                     size_t nbytes, loff_t off)
3355 {
3356         struct block_device *bdev;
3357         struct ioc *ioc;
3358         u64 u[NR_I_LCOEFS];
3359         bool user;
3360         char *p;
3361         int ret;
3362
3363         bdev = blkcg_conf_open_bdev(&input);
3364         if (IS_ERR(bdev))
3365                 return PTR_ERR(bdev);
3366
3367         ioc = q_to_ioc(bdev_get_queue(bdev));
3368         if (!ioc) {
3369                 ret = blk_iocost_init(bdev->bd_disk);
3370                 if (ret)
3371                         goto err;
3372                 ioc = q_to_ioc(bdev_get_queue(bdev));
3373         }
3374
3375         spin_lock_irq(&ioc->lock);
3376         memcpy(u, ioc->params.i_lcoefs, sizeof(u));
3377         user = ioc->user_cost_model;
3378         spin_unlock_irq(&ioc->lock);
3379
3380         while ((p = strsep(&input, " \t\n"))) {
3381                 substring_t args[MAX_OPT_ARGS];
3382                 char buf[32];
3383                 int tok;
3384                 u64 v;
3385
3386                 if (!*p)
3387                         continue;
3388
3389                 switch (match_token(p, cost_ctrl_tokens, args)) {
3390                 case COST_CTRL:
3391                         match_strlcpy(buf, &args[0], sizeof(buf));
3392                         if (!strcmp(buf, "auto"))
3393                                 user = false;
3394                         else if (!strcmp(buf, "user"))
3395                                 user = true;
3396                         else
3397                                 goto einval;
3398                         continue;
3399                 case COST_MODEL:
3400                         match_strlcpy(buf, &args[0], sizeof(buf));
3401                         if (strcmp(buf, "linear"))
3402                                 goto einval;
3403                         continue;
3404                 }
3405
3406                 tok = match_token(p, i_lcoef_tokens, args);
3407                 if (tok == NR_I_LCOEFS)
3408                         goto einval;
3409                 if (match_u64(&args[0], &v))
3410                         goto einval;
3411                 u[tok] = v;
3412                 user = true;
3413         }
3414
3415         spin_lock_irq(&ioc->lock);
3416         if (user) {
3417                 memcpy(ioc->params.i_lcoefs, u, sizeof(u));
3418                 ioc->user_cost_model = true;
3419         } else {
3420                 ioc->user_cost_model = false;
3421         }
3422         ioc_refresh_params(ioc, true);
3423         spin_unlock_irq(&ioc->lock);
3424
3425         blkdev_put_no_open(bdev);
3426         return nbytes;
3427
3428 einval:
3429         ret = -EINVAL;
3430 err:
3431         blkdev_put_no_open(bdev);
3432         return ret;
3433 }
3434
3435 static struct cftype ioc_files[] = {
3436         {
3437                 .name = "weight",
3438                 .flags = CFTYPE_NOT_ON_ROOT,
3439                 .seq_show = ioc_weight_show,
3440                 .write = ioc_weight_write,
3441         },
3442         {
3443                 .name = "cost.qos",
3444                 .flags = CFTYPE_ONLY_ON_ROOT,
3445                 .seq_show = ioc_qos_show,
3446                 .write = ioc_qos_write,
3447         },
3448         {
3449                 .name = "cost.model",
3450                 .flags = CFTYPE_ONLY_ON_ROOT,
3451                 .seq_show = ioc_cost_model_show,
3452                 .write = ioc_cost_model_write,
3453         },
3454         {}
3455 };
3456
3457 static struct blkcg_policy blkcg_policy_iocost = {
3458         .dfl_cftypes    = ioc_files,
3459         .cpd_alloc_fn   = ioc_cpd_alloc,
3460         .cpd_free_fn    = ioc_cpd_free,
3461         .pd_alloc_fn    = ioc_pd_alloc,
3462         .pd_init_fn     = ioc_pd_init,
3463         .pd_free_fn     = ioc_pd_free,
3464         .pd_stat_fn     = ioc_pd_stat,
3465 };
3466
3467 static int __init ioc_init(void)
3468 {
3469         return blkcg_policy_register(&blkcg_policy_iocost);
3470 }
3471
3472 static void __exit ioc_exit(void)
3473 {
3474         blkcg_policy_unregister(&blkcg_policy_iocost);
3475 }
3476
3477 module_init(ioc_init);
3478 module_exit(ioc_exit);