GNU Linux-libre 6.1.24-gnu
[releases.git] / block / blk-iocost.c
1 /* SPDX-License-Identifier: GPL-2.0
2  *
3  * IO cost model based controller.
4  *
5  * Copyright (C) 2019 Tejun Heo <tj@kernel.org>
6  * Copyright (C) 2019 Andy Newell <newella@fb.com>
7  * Copyright (C) 2019 Facebook
8  *
9  * One challenge of controlling IO resources is the lack of trivially
10  * observable cost metric.  This is distinguished from CPU and memory where
11  * wallclock time and the number of bytes can serve as accurate enough
12  * approximations.
13  *
14  * Bandwidth and iops are the most commonly used metrics for IO devices but
15  * depending on the type and specifics of the device, different IO patterns
16  * easily lead to multiple orders of magnitude variations rendering them
17  * useless for the purpose of IO capacity distribution.  While on-device
18  * time, with a lot of clutches, could serve as a useful approximation for
19  * non-queued rotational devices, this is no longer viable with modern
20  * devices, even the rotational ones.
21  *
22  * While there is no cost metric we can trivially observe, it isn't a
23  * complete mystery.  For example, on a rotational device, seek cost
24  * dominates while a contiguous transfer contributes a smaller amount
25  * proportional to the size.  If we can characterize at least the relative
26  * costs of these different types of IOs, it should be possible to
27  * implement a reasonable work-conserving proportional IO resource
28  * distribution.
29  *
30  * 1. IO Cost Model
31  *
32  * IO cost model estimates the cost of an IO given its basic parameters and
33  * history (e.g. the end sector of the last IO).  The cost is measured in
34  * device time.  If a given IO is estimated to cost 10ms, the device should
35  * be able to process ~100 of those IOs in a second.
36  *
37  * Currently, there's only one builtin cost model - linear.  Each IO is
38  * classified as sequential or random and given a base cost accordingly.
39  * On top of that, a size cost proportional to the length of the IO is
40  * added.  While simple, this model captures the operational
41  * characteristics of a wide varienty of devices well enough.  Default
42  * parameters for several different classes of devices are provided and the
43  * parameters can be configured from userspace via
44  * /sys/fs/cgroup/io.cost.model.
45  *
46  * If needed, tools/cgroup/iocost_coef_gen.py can be used to generate
47  * device-specific coefficients.
48  *
49  * 2. Control Strategy
50  *
51  * The device virtual time (vtime) is used as the primary control metric.
52  * The control strategy is composed of the following three parts.
53  *
54  * 2-1. Vtime Distribution
55  *
56  * When a cgroup becomes active in terms of IOs, its hierarchical share is
57  * calculated.  Please consider the following hierarchy where the numbers
58  * inside parentheses denote the configured weights.
59  *
60  *           root
61  *         /       \
62  *      A (w:100)  B (w:300)
63  *      /       \
64  *  A0 (w:100)  A1 (w:100)
65  *
66  * If B is idle and only A0 and A1 are actively issuing IOs, as the two are
67  * of equal weight, each gets 50% share.  If then B starts issuing IOs, B
68  * gets 300/(100+300) or 75% share, and A0 and A1 equally splits the rest,
69  * 12.5% each.  The distribution mechanism only cares about these flattened
70  * shares.  They're called hweights (hierarchical weights) and always add
71  * upto 1 (WEIGHT_ONE).
72  *
73  * A given cgroup's vtime runs slower in inverse proportion to its hweight.
74  * For example, with 12.5% weight, A0's time runs 8 times slower (100/12.5)
75  * against the device vtime - an IO which takes 10ms on the underlying
76  * device is considered to take 80ms on A0.
77  *
78  * This constitutes the basis of IO capacity distribution.  Each cgroup's
79  * vtime is running at a rate determined by its hweight.  A cgroup tracks
80  * the vtime consumed by past IOs and can issue a new IO if doing so
81  * wouldn't outrun the current device vtime.  Otherwise, the IO is
82  * suspended until the vtime has progressed enough to cover it.
83  *
84  * 2-2. Vrate Adjustment
85  *
86  * It's unrealistic to expect the cost model to be perfect.  There are too
87  * many devices and even on the same device the overall performance
88  * fluctuates depending on numerous factors such as IO mixture and device
89  * internal garbage collection.  The controller needs to adapt dynamically.
90  *
91  * This is achieved by adjusting the overall IO rate according to how busy
92  * the device is.  If the device becomes overloaded, we're sending down too
93  * many IOs and should generally slow down.  If there are waiting issuers
94  * but the device isn't saturated, we're issuing too few and should
95  * generally speed up.
96  *
97  * To slow down, we lower the vrate - the rate at which the device vtime
98  * passes compared to the wall clock.  For example, if the vtime is running
99  * at the vrate of 75%, all cgroups added up would only be able to issue
100  * 750ms worth of IOs per second, and vice-versa for speeding up.
101  *
102  * Device business is determined using two criteria - rq wait and
103  * completion latencies.
104  *
105  * When a device gets saturated, the on-device and then the request queues
106  * fill up and a bio which is ready to be issued has to wait for a request
107  * to become available.  When this delay becomes noticeable, it's a clear
108  * indication that the device is saturated and we lower the vrate.  This
109  * saturation signal is fairly conservative as it only triggers when both
110  * hardware and software queues are filled up, and is used as the default
111  * busy signal.
112  *
113  * As devices can have deep queues and be unfair in how the queued commands
114  * are executed, soley depending on rq wait may not result in satisfactory
115  * control quality.  For a better control quality, completion latency QoS
116  * parameters can be configured so that the device is considered saturated
117  * if N'th percentile completion latency rises above the set point.
118  *
119  * The completion latency requirements are a function of both the
120  * underlying device characteristics and the desired IO latency quality of
121  * service.  There is an inherent trade-off - the tighter the latency QoS,
122  * the higher the bandwidth lossage.  Latency QoS is disabled by default
123  * and can be set through /sys/fs/cgroup/io.cost.qos.
124  *
125  * 2-3. Work Conservation
126  *
127  * Imagine two cgroups A and B with equal weights.  A is issuing a small IO
128  * periodically while B is sending out enough parallel IOs to saturate the
129  * device on its own.  Let's say A's usage amounts to 100ms worth of IO
130  * cost per second, i.e., 10% of the device capacity.  The naive
131  * distribution of half and half would lead to 60% utilization of the
132  * device, a significant reduction in the total amount of work done
133  * compared to free-for-all competition.  This is too high a cost to pay
134  * for IO control.
135  *
136  * To conserve the total amount of work done, we keep track of how much
137  * each active cgroup is actually using and yield part of its weight if
138  * there are other cgroups which can make use of it.  In the above case,
139  * A's weight will be lowered so that it hovers above the actual usage and
140  * B would be able to use the rest.
141  *
142  * As we don't want to penalize a cgroup for donating its weight, the
143  * surplus weight adjustment factors in a margin and has an immediate
144  * snapback mechanism in case the cgroup needs more IO vtime for itself.
145  *
146  * Note that adjusting down surplus weights has the same effects as
147  * accelerating vtime for other cgroups and work conservation can also be
148  * implemented by adjusting vrate dynamically.  However, squaring who can
149  * donate and should take back how much requires hweight propagations
150  * anyway making it easier to implement and understand as a separate
151  * mechanism.
152  *
153  * 3. Monitoring
154  *
155  * Instead of debugfs or other clumsy monitoring mechanisms, this
156  * controller uses a drgn based monitoring script -
157  * tools/cgroup/iocost_monitor.py.  For details on drgn, please see
158  * https://github.com/osandov/drgn.  The output looks like the following.
159  *
160  *  sdb RUN   per=300ms cur_per=234.218:v203.695 busy= +1 vrate= 62.12%
161  *                 active      weight      hweight% inflt% dbt  delay usages%
162  *  test/a              *    50/   50  33.33/ 33.33  27.65   2  0*041 033:033:033
163  *  test/b              *   100/  100  66.67/ 66.67  17.56   0  0*000 066:079:077
164  *
165  * - per        : Timer period
166  * - cur_per    : Internal wall and device vtime clock
167  * - vrate      : Device virtual time rate against wall clock
168  * - weight     : Surplus-adjusted and configured weights
169  * - hweight    : Surplus-adjusted and configured hierarchical weights
170  * - inflt      : The percentage of in-flight IO cost at the end of last period
171  * - del_ms     : Deferred issuer delay induction level and duration
172  * - usages     : Usage history
173  */
174
175 #include <linux/kernel.h>
176 #include <linux/module.h>
177 #include <linux/timer.h>
178 #include <linux/time64.h>
179 #include <linux/parser.h>
180 #include <linux/sched/signal.h>
181 #include <asm/local.h>
182 #include <asm/local64.h>
183 #include "blk-rq-qos.h"
184 #include "blk-stat.h"
185 #include "blk-wbt.h"
186 #include "blk-cgroup.h"
187
188 #ifdef CONFIG_TRACEPOINTS
189
190 /* copied from TRACE_CGROUP_PATH, see cgroup-internal.h */
191 #define TRACE_IOCG_PATH_LEN 1024
192 static DEFINE_SPINLOCK(trace_iocg_path_lock);
193 static char trace_iocg_path[TRACE_IOCG_PATH_LEN];
194
195 #define TRACE_IOCG_PATH(type, iocg, ...)                                        \
196         do {                                                                    \
197                 unsigned long flags;                                            \
198                 if (trace_iocost_##type##_enabled()) {                          \
199                         spin_lock_irqsave(&trace_iocg_path_lock, flags);        \
200                         cgroup_path(iocg_to_blkg(iocg)->blkcg->css.cgroup,      \
201                                     trace_iocg_path, TRACE_IOCG_PATH_LEN);      \
202                         trace_iocost_##type(iocg, trace_iocg_path,              \
203                                               ##__VA_ARGS__);                   \
204                         spin_unlock_irqrestore(&trace_iocg_path_lock, flags);   \
205                 }                                                               \
206         } while (0)
207
208 #else   /* CONFIG_TRACE_POINTS */
209 #define TRACE_IOCG_PATH(type, iocg, ...)        do { } while (0)
210 #endif  /* CONFIG_TRACE_POINTS */
211
212 enum {
213         MILLION                 = 1000000,
214
215         /* timer period is calculated from latency requirements, bound it */
216         MIN_PERIOD              = USEC_PER_MSEC,
217         MAX_PERIOD              = USEC_PER_SEC,
218
219         /*
220          * iocg->vtime is targeted at 50% behind the device vtime, which
221          * serves as its IO credit buffer.  Surplus weight adjustment is
222          * immediately canceled if the vtime margin runs below 10%.
223          */
224         MARGIN_MIN_PCT          = 10,
225         MARGIN_LOW_PCT          = 20,
226         MARGIN_TARGET_PCT       = 50,
227
228         INUSE_ADJ_STEP_PCT      = 25,
229
230         /* Have some play in timer operations */
231         TIMER_SLACK_PCT         = 1,
232
233         /* 1/64k is granular enough and can easily be handled w/ u32 */
234         WEIGHT_ONE              = 1 << 16,
235
236         /*
237          * As vtime is used to calculate the cost of each IO, it needs to
238          * be fairly high precision.  For example, it should be able to
239          * represent the cost of a single page worth of discard with
240          * suffificient accuracy.  At the same time, it should be able to
241          * represent reasonably long enough durations to be useful and
242          * convenient during operation.
243          *
244          * 1s worth of vtime is 2^37.  This gives us both sub-nanosecond
245          * granularity and days of wrap-around time even at extreme vrates.
246          */
247         VTIME_PER_SEC_SHIFT     = 37,
248         VTIME_PER_SEC           = 1LLU << VTIME_PER_SEC_SHIFT,
249         VTIME_PER_USEC          = VTIME_PER_SEC / USEC_PER_SEC,
250         VTIME_PER_NSEC          = VTIME_PER_SEC / NSEC_PER_SEC,
251
252         /* bound vrate adjustments within two orders of magnitude */
253         VRATE_MIN_PPM           = 10000,        /* 1% */
254         VRATE_MAX_PPM           = 100000000,    /* 10000% */
255
256         VRATE_MIN               = VTIME_PER_USEC * VRATE_MIN_PPM / MILLION,
257         VRATE_CLAMP_ADJ_PCT     = 4,
258
259         /* if IOs end up waiting for requests, issue less */
260         RQ_WAIT_BUSY_PCT        = 5,
261
262         /* unbusy hysterisis */
263         UNBUSY_THR_PCT          = 75,
264
265         /*
266          * The effect of delay is indirect and non-linear and a huge amount of
267          * future debt can accumulate abruptly while unthrottled. Linearly scale
268          * up delay as debt is going up and then let it decay exponentially.
269          * This gives us quick ramp ups while delay is accumulating and long
270          * tails which can help reducing the frequency of debt explosions on
271          * unthrottle. The parameters are experimentally determined.
272          *
273          * The delay mechanism provides adequate protection and behavior in many
274          * cases. However, this is far from ideal and falls shorts on both
275          * fronts. The debtors are often throttled too harshly costing a
276          * significant level of fairness and possibly total work while the
277          * protection against their impacts on the system can be choppy and
278          * unreliable.
279          *
280          * The shortcoming primarily stems from the fact that, unlike for page
281          * cache, the kernel doesn't have well-defined back-pressure propagation
282          * mechanism and policies for anonymous memory. Fully addressing this
283          * issue will likely require substantial improvements in the area.
284          */
285         MIN_DELAY_THR_PCT       = 500,
286         MAX_DELAY_THR_PCT       = 25000,
287         MIN_DELAY               = 250,
288         MAX_DELAY               = 250 * USEC_PER_MSEC,
289
290         /* halve debts if avg usage over 100ms is under 50% */
291         DFGV_USAGE_PCT          = 50,
292         DFGV_PERIOD             = 100 * USEC_PER_MSEC,
293
294         /* don't let cmds which take a very long time pin lagging for too long */
295         MAX_LAGGING_PERIODS     = 10,
296
297         /* switch iff the conditions are met for longer than this */
298         AUTOP_CYCLE_NSEC        = 10LLU * NSEC_PER_SEC,
299
300         /*
301          * Count IO size in 4k pages.  The 12bit shift helps keeping
302          * size-proportional components of cost calculation in closer
303          * numbers of digits to per-IO cost components.
304          */
305         IOC_PAGE_SHIFT          = 12,
306         IOC_PAGE_SIZE           = 1 << IOC_PAGE_SHIFT,
307         IOC_SECT_TO_PAGE_SHIFT  = IOC_PAGE_SHIFT - SECTOR_SHIFT,
308
309         /* if apart further than 16M, consider randio for linear model */
310         LCOEF_RANDIO_PAGES      = 4096,
311 };
312
313 enum ioc_running {
314         IOC_IDLE,
315         IOC_RUNNING,
316         IOC_STOP,
317 };
318
319 /* io.cost.qos controls including per-dev enable of the whole controller */
320 enum {
321         QOS_ENABLE,
322         QOS_CTRL,
323         NR_QOS_CTRL_PARAMS,
324 };
325
326 /* io.cost.qos params */
327 enum {
328         QOS_RPPM,
329         QOS_RLAT,
330         QOS_WPPM,
331         QOS_WLAT,
332         QOS_MIN,
333         QOS_MAX,
334         NR_QOS_PARAMS,
335 };
336
337 /* io.cost.model controls */
338 enum {
339         COST_CTRL,
340         COST_MODEL,
341         NR_COST_CTRL_PARAMS,
342 };
343
344 /* builtin linear cost model coefficients */
345 enum {
346         I_LCOEF_RBPS,
347         I_LCOEF_RSEQIOPS,
348         I_LCOEF_RRANDIOPS,
349         I_LCOEF_WBPS,
350         I_LCOEF_WSEQIOPS,
351         I_LCOEF_WRANDIOPS,
352         NR_I_LCOEFS,
353 };
354
355 enum {
356         LCOEF_RPAGE,
357         LCOEF_RSEQIO,
358         LCOEF_RRANDIO,
359         LCOEF_WPAGE,
360         LCOEF_WSEQIO,
361         LCOEF_WRANDIO,
362         NR_LCOEFS,
363 };
364
365 enum {
366         AUTOP_INVALID,
367         AUTOP_HDD,
368         AUTOP_SSD_QD1,
369         AUTOP_SSD_DFL,
370         AUTOP_SSD_FAST,
371 };
372
373 struct ioc_params {
374         u32                             qos[NR_QOS_PARAMS];
375         u64                             i_lcoefs[NR_I_LCOEFS];
376         u64                             lcoefs[NR_LCOEFS];
377         u32                             too_fast_vrate_pct;
378         u32                             too_slow_vrate_pct;
379 };
380
381 struct ioc_margins {
382         s64                             min;
383         s64                             low;
384         s64                             target;
385 };
386
387 struct ioc_missed {
388         local_t                         nr_met;
389         local_t                         nr_missed;
390         u32                             last_met;
391         u32                             last_missed;
392 };
393
394 struct ioc_pcpu_stat {
395         struct ioc_missed               missed[2];
396
397         local64_t                       rq_wait_ns;
398         u64                             last_rq_wait_ns;
399 };
400
401 /* per device */
402 struct ioc {
403         struct rq_qos                   rqos;
404
405         bool                            enabled;
406
407         struct ioc_params               params;
408         struct ioc_margins              margins;
409         u32                             period_us;
410         u32                             timer_slack_ns;
411         u64                             vrate_min;
412         u64                             vrate_max;
413
414         spinlock_t                      lock;
415         struct timer_list               timer;
416         struct list_head                active_iocgs;   /* active cgroups */
417         struct ioc_pcpu_stat __percpu   *pcpu_stat;
418
419         enum ioc_running                running;
420         atomic64_t                      vtime_rate;
421         u64                             vtime_base_rate;
422         s64                             vtime_err;
423
424         seqcount_spinlock_t             period_seqcount;
425         u64                             period_at;      /* wallclock starttime */
426         u64                             period_at_vtime; /* vtime starttime */
427
428         atomic64_t                      cur_period;     /* inc'd each period */
429         int                             busy_level;     /* saturation history */
430
431         bool                            weights_updated;
432         atomic_t                        hweight_gen;    /* for lazy hweights */
433
434         /* debt forgivness */
435         u64                             dfgv_period_at;
436         u64                             dfgv_period_rem;
437         u64                             dfgv_usage_us_sum;
438
439         u64                             autop_too_fast_at;
440         u64                             autop_too_slow_at;
441         int                             autop_idx;
442         bool                            user_qos_params:1;
443         bool                            user_cost_model:1;
444 };
445
446 struct iocg_pcpu_stat {
447         local64_t                       abs_vusage;
448 };
449
450 struct iocg_stat {
451         u64                             usage_us;
452         u64                             wait_us;
453         u64                             indebt_us;
454         u64                             indelay_us;
455 };
456
457 /* per device-cgroup pair */
458 struct ioc_gq {
459         struct blkg_policy_data         pd;
460         struct ioc                      *ioc;
461
462         /*
463          * A iocg can get its weight from two sources - an explicit
464          * per-device-cgroup configuration or the default weight of the
465          * cgroup.  `cfg_weight` is the explicit per-device-cgroup
466          * configuration.  `weight` is the effective considering both
467          * sources.
468          *
469          * When an idle cgroup becomes active its `active` goes from 0 to
470          * `weight`.  `inuse` is the surplus adjusted active weight.
471          * `active` and `inuse` are used to calculate `hweight_active` and
472          * `hweight_inuse`.
473          *
474          * `last_inuse` remembers `inuse` while an iocg is idle to persist
475          * surplus adjustments.
476          *
477          * `inuse` may be adjusted dynamically during period. `saved_*` are used
478          * to determine and track adjustments.
479          */
480         u32                             cfg_weight;
481         u32                             weight;
482         u32                             active;
483         u32                             inuse;
484
485         u32                             last_inuse;
486         s64                             saved_margin;
487
488         sector_t                        cursor;         /* to detect randio */
489
490         /*
491          * `vtime` is this iocg's vtime cursor which progresses as IOs are
492          * issued.  If lagging behind device vtime, the delta represents
493          * the currently available IO budget.  If running ahead, the
494          * overage.
495          *
496          * `vtime_done` is the same but progressed on completion rather
497          * than issue.  The delta behind `vtime` represents the cost of
498          * currently in-flight IOs.
499          */
500         atomic64_t                      vtime;
501         atomic64_t                      done_vtime;
502         u64                             abs_vdebt;
503
504         /* current delay in effect and when it started */
505         u64                             delay;
506         u64                             delay_at;
507
508         /*
509          * The period this iocg was last active in.  Used for deactivation
510          * and invalidating `vtime`.
511          */
512         atomic64_t                      active_period;
513         struct list_head                active_list;
514
515         /* see __propagate_weights() and current_hweight() for details */
516         u64                             child_active_sum;
517         u64                             child_inuse_sum;
518         u64                             child_adjusted_sum;
519         int                             hweight_gen;
520         u32                             hweight_active;
521         u32                             hweight_inuse;
522         u32                             hweight_donating;
523         u32                             hweight_after_donation;
524
525         struct list_head                walk_list;
526         struct list_head                surplus_list;
527
528         struct wait_queue_head          waitq;
529         struct hrtimer                  waitq_timer;
530
531         /* timestamp at the latest activation */
532         u64                             activated_at;
533
534         /* statistics */
535         struct iocg_pcpu_stat __percpu  *pcpu_stat;
536         struct iocg_stat                stat;
537         struct iocg_stat                last_stat;
538         u64                             last_stat_abs_vusage;
539         u64                             usage_delta_us;
540         u64                             wait_since;
541         u64                             indebt_since;
542         u64                             indelay_since;
543
544         /* this iocg's depth in the hierarchy and ancestors including self */
545         int                             level;
546         struct ioc_gq                   *ancestors[];
547 };
548
549 /* per cgroup */
550 struct ioc_cgrp {
551         struct blkcg_policy_data        cpd;
552         unsigned int                    dfl_weight;
553 };
554
555 struct ioc_now {
556         u64                             now_ns;
557         u64                             now;
558         u64                             vnow;
559         u64                             vrate;
560 };
561
562 struct iocg_wait {
563         struct wait_queue_entry         wait;
564         struct bio                      *bio;
565         u64                             abs_cost;
566         bool                            committed;
567 };
568
569 struct iocg_wake_ctx {
570         struct ioc_gq                   *iocg;
571         u32                             hw_inuse;
572         s64                             vbudget;
573 };
574
575 static const struct ioc_params autop[] = {
576         [AUTOP_HDD] = {
577                 .qos                            = {
578                         [QOS_RLAT]              =        250000, /* 250ms */
579                         [QOS_WLAT]              =        250000,
580                         [QOS_MIN]               = VRATE_MIN_PPM,
581                         [QOS_MAX]               = VRATE_MAX_PPM,
582                 },
583                 .i_lcoefs                       = {
584                         [I_LCOEF_RBPS]          =     174019176,
585                         [I_LCOEF_RSEQIOPS]      =         41708,
586                         [I_LCOEF_RRANDIOPS]     =           370,
587                         [I_LCOEF_WBPS]          =     178075866,
588                         [I_LCOEF_WSEQIOPS]      =         42705,
589                         [I_LCOEF_WRANDIOPS]     =           378,
590                 },
591         },
592         [AUTOP_SSD_QD1] = {
593                 .qos                            = {
594                         [QOS_RLAT]              =         25000, /* 25ms */
595                         [QOS_WLAT]              =         25000,
596                         [QOS_MIN]               = VRATE_MIN_PPM,
597                         [QOS_MAX]               = VRATE_MAX_PPM,
598                 },
599                 .i_lcoefs                       = {
600                         [I_LCOEF_RBPS]          =     245855193,
601                         [I_LCOEF_RSEQIOPS]      =         61575,
602                         [I_LCOEF_RRANDIOPS]     =          6946,
603                         [I_LCOEF_WBPS]          =     141365009,
604                         [I_LCOEF_WSEQIOPS]      =         33716,
605                         [I_LCOEF_WRANDIOPS]     =         26796,
606                 },
607         },
608         [AUTOP_SSD_DFL] = {
609                 .qos                            = {
610                         [QOS_RLAT]              =         25000, /* 25ms */
611                         [QOS_WLAT]              =         25000,
612                         [QOS_MIN]               = VRATE_MIN_PPM,
613                         [QOS_MAX]               = VRATE_MAX_PPM,
614                 },
615                 .i_lcoefs                       = {
616                         [I_LCOEF_RBPS]          =     488636629,
617                         [I_LCOEF_RSEQIOPS]      =          8932,
618                         [I_LCOEF_RRANDIOPS]     =          8518,
619                         [I_LCOEF_WBPS]          =     427891549,
620                         [I_LCOEF_WSEQIOPS]      =         28755,
621                         [I_LCOEF_WRANDIOPS]     =         21940,
622                 },
623                 .too_fast_vrate_pct             =           500,
624         },
625         [AUTOP_SSD_FAST] = {
626                 .qos                            = {
627                         [QOS_RLAT]              =          5000, /* 5ms */
628                         [QOS_WLAT]              =          5000,
629                         [QOS_MIN]               = VRATE_MIN_PPM,
630                         [QOS_MAX]               = VRATE_MAX_PPM,
631                 },
632                 .i_lcoefs                       = {
633                         [I_LCOEF_RBPS]          =    3102524156LLU,
634                         [I_LCOEF_RSEQIOPS]      =        724816,
635                         [I_LCOEF_RRANDIOPS]     =        778122,
636                         [I_LCOEF_WBPS]          =    1742780862LLU,
637                         [I_LCOEF_WSEQIOPS]      =        425702,
638                         [I_LCOEF_WRANDIOPS]     =        443193,
639                 },
640                 .too_slow_vrate_pct             =            10,
641         },
642 };
643
644 /*
645  * vrate adjust percentages indexed by ioc->busy_level.  We adjust up on
646  * vtime credit shortage and down on device saturation.
647  */
648 static u32 vrate_adj_pct[] =
649         { 0, 0, 0, 0,
650           1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
651           2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
652           4, 4, 4, 4, 4, 4, 4, 4, 8, 8, 8, 8, 8, 8, 8, 8, 16 };
653
654 static struct blkcg_policy blkcg_policy_iocost;
655
656 /* accessors and helpers */
657 static struct ioc *rqos_to_ioc(struct rq_qos *rqos)
658 {
659         return container_of(rqos, struct ioc, rqos);
660 }
661
662 static struct ioc *q_to_ioc(struct request_queue *q)
663 {
664         return rqos_to_ioc(rq_qos_id(q, RQ_QOS_COST));
665 }
666
667 static const char __maybe_unused *ioc_name(struct ioc *ioc)
668 {
669         struct gendisk *disk = ioc->rqos.q->disk;
670
671         if (!disk)
672                 return "<unknown>";
673         return disk->disk_name;
674 }
675
676 static struct ioc_gq *pd_to_iocg(struct blkg_policy_data *pd)
677 {
678         return pd ? container_of(pd, struct ioc_gq, pd) : NULL;
679 }
680
681 static struct ioc_gq *blkg_to_iocg(struct blkcg_gq *blkg)
682 {
683         return pd_to_iocg(blkg_to_pd(blkg, &blkcg_policy_iocost));
684 }
685
686 static struct blkcg_gq *iocg_to_blkg(struct ioc_gq *iocg)
687 {
688         return pd_to_blkg(&iocg->pd);
689 }
690
691 static struct ioc_cgrp *blkcg_to_iocc(struct blkcg *blkcg)
692 {
693         return container_of(blkcg_to_cpd(blkcg, &blkcg_policy_iocost),
694                             struct ioc_cgrp, cpd);
695 }
696
697 /*
698  * Scale @abs_cost to the inverse of @hw_inuse.  The lower the hierarchical
699  * weight, the more expensive each IO.  Must round up.
700  */
701 static u64 abs_cost_to_cost(u64 abs_cost, u32 hw_inuse)
702 {
703         return DIV64_U64_ROUND_UP(abs_cost * WEIGHT_ONE, hw_inuse);
704 }
705
706 /*
707  * The inverse of abs_cost_to_cost().  Must round up.
708  */
709 static u64 cost_to_abs_cost(u64 cost, u32 hw_inuse)
710 {
711         return DIV64_U64_ROUND_UP(cost * hw_inuse, WEIGHT_ONE);
712 }
713
714 static void iocg_commit_bio(struct ioc_gq *iocg, struct bio *bio,
715                             u64 abs_cost, u64 cost)
716 {
717         struct iocg_pcpu_stat *gcs;
718
719         bio->bi_iocost_cost = cost;
720         atomic64_add(cost, &iocg->vtime);
721
722         gcs = get_cpu_ptr(iocg->pcpu_stat);
723         local64_add(abs_cost, &gcs->abs_vusage);
724         put_cpu_ptr(gcs);
725 }
726
727 static void iocg_lock(struct ioc_gq *iocg, bool lock_ioc, unsigned long *flags)
728 {
729         if (lock_ioc) {
730                 spin_lock_irqsave(&iocg->ioc->lock, *flags);
731                 spin_lock(&iocg->waitq.lock);
732         } else {
733                 spin_lock_irqsave(&iocg->waitq.lock, *flags);
734         }
735 }
736
737 static void iocg_unlock(struct ioc_gq *iocg, bool unlock_ioc, unsigned long *flags)
738 {
739         if (unlock_ioc) {
740                 spin_unlock(&iocg->waitq.lock);
741                 spin_unlock_irqrestore(&iocg->ioc->lock, *flags);
742         } else {
743                 spin_unlock_irqrestore(&iocg->waitq.lock, *flags);
744         }
745 }
746
747 #define CREATE_TRACE_POINTS
748 #include <trace/events/iocost.h>
749
750 static void ioc_refresh_margins(struct ioc *ioc)
751 {
752         struct ioc_margins *margins = &ioc->margins;
753         u32 period_us = ioc->period_us;
754         u64 vrate = ioc->vtime_base_rate;
755
756         margins->min = (period_us * MARGIN_MIN_PCT / 100) * vrate;
757         margins->low = (period_us * MARGIN_LOW_PCT / 100) * vrate;
758         margins->target = (period_us * MARGIN_TARGET_PCT / 100) * vrate;
759 }
760
761 /* latency Qos params changed, update period_us and all the dependent params */
762 static void ioc_refresh_period_us(struct ioc *ioc)
763 {
764         u32 ppm, lat, multi, period_us;
765
766         lockdep_assert_held(&ioc->lock);
767
768         /* pick the higher latency target */
769         if (ioc->params.qos[QOS_RLAT] >= ioc->params.qos[QOS_WLAT]) {
770                 ppm = ioc->params.qos[QOS_RPPM];
771                 lat = ioc->params.qos[QOS_RLAT];
772         } else {
773                 ppm = ioc->params.qos[QOS_WPPM];
774                 lat = ioc->params.qos[QOS_WLAT];
775         }
776
777         /*
778          * We want the period to be long enough to contain a healthy number
779          * of IOs while short enough for granular control.  Define it as a
780          * multiple of the latency target.  Ideally, the multiplier should
781          * be scaled according to the percentile so that it would nominally
782          * contain a certain number of requests.  Let's be simpler and
783          * scale it linearly so that it's 2x >= pct(90) and 10x at pct(50).
784          */
785         if (ppm)
786                 multi = max_t(u32, (MILLION - ppm) / 50000, 2);
787         else
788                 multi = 2;
789         period_us = multi * lat;
790         period_us = clamp_t(u32, period_us, MIN_PERIOD, MAX_PERIOD);
791
792         /* calculate dependent params */
793         ioc->period_us = period_us;
794         ioc->timer_slack_ns = div64_u64(
795                 (u64)period_us * NSEC_PER_USEC * TIMER_SLACK_PCT,
796                 100);
797         ioc_refresh_margins(ioc);
798 }
799
800 static int ioc_autop_idx(struct ioc *ioc)
801 {
802         int idx = ioc->autop_idx;
803         const struct ioc_params *p = &autop[idx];
804         u32 vrate_pct;
805         u64 now_ns;
806
807         /* rotational? */
808         if (!blk_queue_nonrot(ioc->rqos.q))
809                 return AUTOP_HDD;
810
811         /* handle SATA SSDs w/ broken NCQ */
812         if (blk_queue_depth(ioc->rqos.q) == 1)
813                 return AUTOP_SSD_QD1;
814
815         /* use one of the normal ssd sets */
816         if (idx < AUTOP_SSD_DFL)
817                 return AUTOP_SSD_DFL;
818
819         /* if user is overriding anything, maintain what was there */
820         if (ioc->user_qos_params || ioc->user_cost_model)
821                 return idx;
822
823         /* step up/down based on the vrate */
824         vrate_pct = div64_u64(ioc->vtime_base_rate * 100, VTIME_PER_USEC);
825         now_ns = ktime_get_ns();
826
827         if (p->too_fast_vrate_pct && p->too_fast_vrate_pct <= vrate_pct) {
828                 if (!ioc->autop_too_fast_at)
829                         ioc->autop_too_fast_at = now_ns;
830                 if (now_ns - ioc->autop_too_fast_at >= AUTOP_CYCLE_NSEC)
831                         return idx + 1;
832         } else {
833                 ioc->autop_too_fast_at = 0;
834         }
835
836         if (p->too_slow_vrate_pct && p->too_slow_vrate_pct >= vrate_pct) {
837                 if (!ioc->autop_too_slow_at)
838                         ioc->autop_too_slow_at = now_ns;
839                 if (now_ns - ioc->autop_too_slow_at >= AUTOP_CYCLE_NSEC)
840                         return idx - 1;
841         } else {
842                 ioc->autop_too_slow_at = 0;
843         }
844
845         return idx;
846 }
847
848 /*
849  * Take the followings as input
850  *
851  *  @bps        maximum sequential throughput
852  *  @seqiops    maximum sequential 4k iops
853  *  @randiops   maximum random 4k iops
854  *
855  * and calculate the linear model cost coefficients.
856  *
857  *  *@page      per-page cost           1s / (@bps / 4096)
858  *  *@seqio     base cost of a seq IO   max((1s / @seqiops) - *@page, 0)
859  *  @randiops   base cost of a rand IO  max((1s / @randiops) - *@page, 0)
860  */
861 static void calc_lcoefs(u64 bps, u64 seqiops, u64 randiops,
862                         u64 *page, u64 *seqio, u64 *randio)
863 {
864         u64 v;
865
866         *page = *seqio = *randio = 0;
867
868         if (bps) {
869                 u64 bps_pages = DIV_ROUND_UP_ULL(bps, IOC_PAGE_SIZE);
870
871                 if (bps_pages)
872                         *page = DIV64_U64_ROUND_UP(VTIME_PER_SEC, bps_pages);
873                 else
874                         *page = 1;
875         }
876
877         if (seqiops) {
878                 v = DIV64_U64_ROUND_UP(VTIME_PER_SEC, seqiops);
879                 if (v > *page)
880                         *seqio = v - *page;
881         }
882
883         if (randiops) {
884                 v = DIV64_U64_ROUND_UP(VTIME_PER_SEC, randiops);
885                 if (v > *page)
886                         *randio = v - *page;
887         }
888 }
889
890 static void ioc_refresh_lcoefs(struct ioc *ioc)
891 {
892         u64 *u = ioc->params.i_lcoefs;
893         u64 *c = ioc->params.lcoefs;
894
895         calc_lcoefs(u[I_LCOEF_RBPS], u[I_LCOEF_RSEQIOPS], u[I_LCOEF_RRANDIOPS],
896                     &c[LCOEF_RPAGE], &c[LCOEF_RSEQIO], &c[LCOEF_RRANDIO]);
897         calc_lcoefs(u[I_LCOEF_WBPS], u[I_LCOEF_WSEQIOPS], u[I_LCOEF_WRANDIOPS],
898                     &c[LCOEF_WPAGE], &c[LCOEF_WSEQIO], &c[LCOEF_WRANDIO]);
899 }
900
901 static bool ioc_refresh_params(struct ioc *ioc, bool force)
902 {
903         const struct ioc_params *p;
904         int idx;
905
906         lockdep_assert_held(&ioc->lock);
907
908         idx = ioc_autop_idx(ioc);
909         p = &autop[idx];
910
911         if (idx == ioc->autop_idx && !force)
912                 return false;
913
914         if (idx != ioc->autop_idx)
915                 atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC);
916
917         ioc->autop_idx = idx;
918         ioc->autop_too_fast_at = 0;
919         ioc->autop_too_slow_at = 0;
920
921         if (!ioc->user_qos_params)
922                 memcpy(ioc->params.qos, p->qos, sizeof(p->qos));
923         if (!ioc->user_cost_model)
924                 memcpy(ioc->params.i_lcoefs, p->i_lcoefs, sizeof(p->i_lcoefs));
925
926         ioc_refresh_period_us(ioc);
927         ioc_refresh_lcoefs(ioc);
928
929         ioc->vrate_min = DIV64_U64_ROUND_UP((u64)ioc->params.qos[QOS_MIN] *
930                                             VTIME_PER_USEC, MILLION);
931         ioc->vrate_max = div64_u64((u64)ioc->params.qos[QOS_MAX] *
932                                    VTIME_PER_USEC, MILLION);
933
934         return true;
935 }
936
937 /*
938  * When an iocg accumulates too much vtime or gets deactivated, we throw away
939  * some vtime, which lowers the overall device utilization. As the exact amount
940  * which is being thrown away is known, we can compensate by accelerating the
941  * vrate accordingly so that the extra vtime generated in the current period
942  * matches what got lost.
943  */
944 static void ioc_refresh_vrate(struct ioc *ioc, struct ioc_now *now)
945 {
946         s64 pleft = ioc->period_at + ioc->period_us - now->now;
947         s64 vperiod = ioc->period_us * ioc->vtime_base_rate;
948         s64 vcomp, vcomp_min, vcomp_max;
949
950         lockdep_assert_held(&ioc->lock);
951
952         /* we need some time left in this period */
953         if (pleft <= 0)
954                 goto done;
955
956         /*
957          * Calculate how much vrate should be adjusted to offset the error.
958          * Limit the amount of adjustment and deduct the adjusted amount from
959          * the error.
960          */
961         vcomp = -div64_s64(ioc->vtime_err, pleft);
962         vcomp_min = -(ioc->vtime_base_rate >> 1);
963         vcomp_max = ioc->vtime_base_rate;
964         vcomp = clamp(vcomp, vcomp_min, vcomp_max);
965
966         ioc->vtime_err += vcomp * pleft;
967
968         atomic64_set(&ioc->vtime_rate, ioc->vtime_base_rate + vcomp);
969 done:
970         /* bound how much error can accumulate */
971         ioc->vtime_err = clamp(ioc->vtime_err, -vperiod, vperiod);
972 }
973
974 static void ioc_adjust_base_vrate(struct ioc *ioc, u32 rq_wait_pct,
975                                   int nr_lagging, int nr_shortages,
976                                   int prev_busy_level, u32 *missed_ppm)
977 {
978         u64 vrate = ioc->vtime_base_rate;
979         u64 vrate_min = ioc->vrate_min, vrate_max = ioc->vrate_max;
980
981         if (!ioc->busy_level || (ioc->busy_level < 0 && nr_lagging)) {
982                 if (ioc->busy_level != prev_busy_level || nr_lagging)
983                         trace_iocost_ioc_vrate_adj(ioc, atomic64_read(&ioc->vtime_rate),
984                                                    missed_ppm, rq_wait_pct,
985                                                    nr_lagging, nr_shortages);
986
987                 return;
988         }
989
990         /*
991          * If vrate is out of bounds, apply clamp gradually as the
992          * bounds can change abruptly.  Otherwise, apply busy_level
993          * based adjustment.
994          */
995         if (vrate < vrate_min) {
996                 vrate = div64_u64(vrate * (100 + VRATE_CLAMP_ADJ_PCT), 100);
997                 vrate = min(vrate, vrate_min);
998         } else if (vrate > vrate_max) {
999                 vrate = div64_u64(vrate * (100 - VRATE_CLAMP_ADJ_PCT), 100);
1000                 vrate = max(vrate, vrate_max);
1001         } else {
1002                 int idx = min_t(int, abs(ioc->busy_level),
1003                                 ARRAY_SIZE(vrate_adj_pct) - 1);
1004                 u32 adj_pct = vrate_adj_pct[idx];
1005
1006                 if (ioc->busy_level > 0)
1007                         adj_pct = 100 - adj_pct;
1008                 else
1009                         adj_pct = 100 + adj_pct;
1010
1011                 vrate = clamp(DIV64_U64_ROUND_UP(vrate * adj_pct, 100),
1012                               vrate_min, vrate_max);
1013         }
1014
1015         trace_iocost_ioc_vrate_adj(ioc, vrate, missed_ppm, rq_wait_pct,
1016                                    nr_lagging, nr_shortages);
1017
1018         ioc->vtime_base_rate = vrate;
1019         ioc_refresh_margins(ioc);
1020 }
1021
1022 /* take a snapshot of the current [v]time and vrate */
1023 static void ioc_now(struct ioc *ioc, struct ioc_now *now)
1024 {
1025         unsigned seq;
1026
1027         now->now_ns = ktime_get();
1028         now->now = ktime_to_us(now->now_ns);
1029         now->vrate = atomic64_read(&ioc->vtime_rate);
1030
1031         /*
1032          * The current vtime is
1033          *
1034          *   vtime at period start + (wallclock time since the start) * vrate
1035          *
1036          * As a consistent snapshot of `period_at_vtime` and `period_at` is
1037          * needed, they're seqcount protected.
1038          */
1039         do {
1040                 seq = read_seqcount_begin(&ioc->period_seqcount);
1041                 now->vnow = ioc->period_at_vtime +
1042                         (now->now - ioc->period_at) * now->vrate;
1043         } while (read_seqcount_retry(&ioc->period_seqcount, seq));
1044 }
1045
1046 static void ioc_start_period(struct ioc *ioc, struct ioc_now *now)
1047 {
1048         WARN_ON_ONCE(ioc->running != IOC_RUNNING);
1049
1050         write_seqcount_begin(&ioc->period_seqcount);
1051         ioc->period_at = now->now;
1052         ioc->period_at_vtime = now->vnow;
1053         write_seqcount_end(&ioc->period_seqcount);
1054
1055         ioc->timer.expires = jiffies + usecs_to_jiffies(ioc->period_us);
1056         add_timer(&ioc->timer);
1057 }
1058
1059 /*
1060  * Update @iocg's `active` and `inuse` to @active and @inuse, update level
1061  * weight sums and propagate upwards accordingly. If @save, the current margin
1062  * is saved to be used as reference for later inuse in-period adjustments.
1063  */
1064 static void __propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
1065                                 bool save, struct ioc_now *now)
1066 {
1067         struct ioc *ioc = iocg->ioc;
1068         int lvl;
1069
1070         lockdep_assert_held(&ioc->lock);
1071
1072         /*
1073          * For an active leaf node, its inuse shouldn't be zero or exceed
1074          * @active. An active internal node's inuse is solely determined by the
1075          * inuse to active ratio of its children regardless of @inuse.
1076          */
1077         if (list_empty(&iocg->active_list) && iocg->child_active_sum) {
1078                 inuse = DIV64_U64_ROUND_UP(active * iocg->child_inuse_sum,
1079                                            iocg->child_active_sum);
1080         } else {
1081                 inuse = clamp_t(u32, inuse, 1, active);
1082         }
1083
1084         iocg->last_inuse = iocg->inuse;
1085         if (save)
1086                 iocg->saved_margin = now->vnow - atomic64_read(&iocg->vtime);
1087
1088         if (active == iocg->active && inuse == iocg->inuse)
1089                 return;
1090
1091         for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
1092                 struct ioc_gq *parent = iocg->ancestors[lvl];
1093                 struct ioc_gq *child = iocg->ancestors[lvl + 1];
1094                 u32 parent_active = 0, parent_inuse = 0;
1095
1096                 /* update the level sums */
1097                 parent->child_active_sum += (s32)(active - child->active);
1098                 parent->child_inuse_sum += (s32)(inuse - child->inuse);
1099                 /* apply the updates */
1100                 child->active = active;
1101                 child->inuse = inuse;
1102
1103                 /*
1104                  * The delta between inuse and active sums indicates that
1105                  * much of weight is being given away.  Parent's inuse
1106                  * and active should reflect the ratio.
1107                  */
1108                 if (parent->child_active_sum) {
1109                         parent_active = parent->weight;
1110                         parent_inuse = DIV64_U64_ROUND_UP(
1111                                 parent_active * parent->child_inuse_sum,
1112                                 parent->child_active_sum);
1113                 }
1114
1115                 /* do we need to keep walking up? */
1116                 if (parent_active == parent->active &&
1117                     parent_inuse == parent->inuse)
1118                         break;
1119
1120                 active = parent_active;
1121                 inuse = parent_inuse;
1122         }
1123
1124         ioc->weights_updated = true;
1125 }
1126
1127 static void commit_weights(struct ioc *ioc)
1128 {
1129         lockdep_assert_held(&ioc->lock);
1130
1131         if (ioc->weights_updated) {
1132                 /* paired with rmb in current_hweight(), see there */
1133                 smp_wmb();
1134                 atomic_inc(&ioc->hweight_gen);
1135                 ioc->weights_updated = false;
1136         }
1137 }
1138
1139 static void propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
1140                               bool save, struct ioc_now *now)
1141 {
1142         __propagate_weights(iocg, active, inuse, save, now);
1143         commit_weights(iocg->ioc);
1144 }
1145
1146 static void current_hweight(struct ioc_gq *iocg, u32 *hw_activep, u32 *hw_inusep)
1147 {
1148         struct ioc *ioc = iocg->ioc;
1149         int lvl;
1150         u32 hwa, hwi;
1151         int ioc_gen;
1152
1153         /* hot path - if uptodate, use cached */
1154         ioc_gen = atomic_read(&ioc->hweight_gen);
1155         if (ioc_gen == iocg->hweight_gen)
1156                 goto out;
1157
1158         /*
1159          * Paired with wmb in commit_weights(). If we saw the updated
1160          * hweight_gen, all the weight updates from __propagate_weights() are
1161          * visible too.
1162          *
1163          * We can race with weight updates during calculation and get it
1164          * wrong.  However, hweight_gen would have changed and a future
1165          * reader will recalculate and we're guaranteed to discard the
1166          * wrong result soon.
1167          */
1168         smp_rmb();
1169
1170         hwa = hwi = WEIGHT_ONE;
1171         for (lvl = 0; lvl <= iocg->level - 1; lvl++) {
1172                 struct ioc_gq *parent = iocg->ancestors[lvl];
1173                 struct ioc_gq *child = iocg->ancestors[lvl + 1];
1174                 u64 active_sum = READ_ONCE(parent->child_active_sum);
1175                 u64 inuse_sum = READ_ONCE(parent->child_inuse_sum);
1176                 u32 active = READ_ONCE(child->active);
1177                 u32 inuse = READ_ONCE(child->inuse);
1178
1179                 /* we can race with deactivations and either may read as zero */
1180                 if (!active_sum || !inuse_sum)
1181                         continue;
1182
1183                 active_sum = max_t(u64, active, active_sum);
1184                 hwa = div64_u64((u64)hwa * active, active_sum);
1185
1186                 inuse_sum = max_t(u64, inuse, inuse_sum);
1187                 hwi = div64_u64((u64)hwi * inuse, inuse_sum);
1188         }
1189
1190         iocg->hweight_active = max_t(u32, hwa, 1);
1191         iocg->hweight_inuse = max_t(u32, hwi, 1);
1192         iocg->hweight_gen = ioc_gen;
1193 out:
1194         if (hw_activep)
1195                 *hw_activep = iocg->hweight_active;
1196         if (hw_inusep)
1197                 *hw_inusep = iocg->hweight_inuse;
1198 }
1199
1200 /*
1201  * Calculate the hweight_inuse @iocg would get with max @inuse assuming all the
1202  * other weights stay unchanged.
1203  */
1204 static u32 current_hweight_max(struct ioc_gq *iocg)
1205 {
1206         u32 hwm = WEIGHT_ONE;
1207         u32 inuse = iocg->active;
1208         u64 child_inuse_sum;
1209         int lvl;
1210
1211         lockdep_assert_held(&iocg->ioc->lock);
1212
1213         for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
1214                 struct ioc_gq *parent = iocg->ancestors[lvl];
1215                 struct ioc_gq *child = iocg->ancestors[lvl + 1];
1216
1217                 child_inuse_sum = parent->child_inuse_sum + inuse - child->inuse;
1218                 hwm = div64_u64((u64)hwm * inuse, child_inuse_sum);
1219                 inuse = DIV64_U64_ROUND_UP(parent->active * child_inuse_sum,
1220                                            parent->child_active_sum);
1221         }
1222
1223         return max_t(u32, hwm, 1);
1224 }
1225
1226 static void weight_updated(struct ioc_gq *iocg, struct ioc_now *now)
1227 {
1228         struct ioc *ioc = iocg->ioc;
1229         struct blkcg_gq *blkg = iocg_to_blkg(iocg);
1230         struct ioc_cgrp *iocc = blkcg_to_iocc(blkg->blkcg);
1231         u32 weight;
1232
1233         lockdep_assert_held(&ioc->lock);
1234
1235         weight = iocg->cfg_weight ?: iocc->dfl_weight;
1236         if (weight != iocg->weight && iocg->active)
1237                 propagate_weights(iocg, weight, iocg->inuse, true, now);
1238         iocg->weight = weight;
1239 }
1240
1241 static bool iocg_activate(struct ioc_gq *iocg, struct ioc_now *now)
1242 {
1243         struct ioc *ioc = iocg->ioc;
1244         u64 last_period, cur_period;
1245         u64 vtime, vtarget;
1246         int i;
1247
1248         /*
1249          * If seem to be already active, just update the stamp to tell the
1250          * timer that we're still active.  We don't mind occassional races.
1251          */
1252         if (!list_empty(&iocg->active_list)) {
1253                 ioc_now(ioc, now);
1254                 cur_period = atomic64_read(&ioc->cur_period);
1255                 if (atomic64_read(&iocg->active_period) != cur_period)
1256                         atomic64_set(&iocg->active_period, cur_period);
1257                 return true;
1258         }
1259
1260         /* racy check on internal node IOs, treat as root level IOs */
1261         if (iocg->child_active_sum)
1262                 return false;
1263
1264         spin_lock_irq(&ioc->lock);
1265
1266         ioc_now(ioc, now);
1267
1268         /* update period */
1269         cur_period = atomic64_read(&ioc->cur_period);
1270         last_period = atomic64_read(&iocg->active_period);
1271         atomic64_set(&iocg->active_period, cur_period);
1272
1273         /* already activated or breaking leaf-only constraint? */
1274         if (!list_empty(&iocg->active_list))
1275                 goto succeed_unlock;
1276         for (i = iocg->level - 1; i > 0; i--)
1277                 if (!list_empty(&iocg->ancestors[i]->active_list))
1278                         goto fail_unlock;
1279
1280         if (iocg->child_active_sum)
1281                 goto fail_unlock;
1282
1283         /*
1284          * Always start with the target budget. On deactivation, we throw away
1285          * anything above it.
1286          */
1287         vtarget = now->vnow - ioc->margins.target;
1288         vtime = atomic64_read(&iocg->vtime);
1289
1290         atomic64_add(vtarget - vtime, &iocg->vtime);
1291         atomic64_add(vtarget - vtime, &iocg->done_vtime);
1292         vtime = vtarget;
1293
1294         /*
1295          * Activate, propagate weight and start period timer if not
1296          * running.  Reset hweight_gen to avoid accidental match from
1297          * wrapping.
1298          */
1299         iocg->hweight_gen = atomic_read(&ioc->hweight_gen) - 1;
1300         list_add(&iocg->active_list, &ioc->active_iocgs);
1301
1302         propagate_weights(iocg, iocg->weight,
1303                           iocg->last_inuse ?: iocg->weight, true, now);
1304
1305         TRACE_IOCG_PATH(iocg_activate, iocg, now,
1306                         last_period, cur_period, vtime);
1307
1308         iocg->activated_at = now->now;
1309
1310         if (ioc->running == IOC_IDLE) {
1311                 ioc->running = IOC_RUNNING;
1312                 ioc->dfgv_period_at = now->now;
1313                 ioc->dfgv_period_rem = 0;
1314                 ioc_start_period(ioc, now);
1315         }
1316
1317 succeed_unlock:
1318         spin_unlock_irq(&ioc->lock);
1319         return true;
1320
1321 fail_unlock:
1322         spin_unlock_irq(&ioc->lock);
1323         return false;
1324 }
1325
1326 static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now)
1327 {
1328         struct ioc *ioc = iocg->ioc;
1329         struct blkcg_gq *blkg = iocg_to_blkg(iocg);
1330         u64 tdelta, delay, new_delay;
1331         s64 vover, vover_pct;
1332         u32 hwa;
1333
1334         lockdep_assert_held(&iocg->waitq.lock);
1335
1336         /* calculate the current delay in effect - 1/2 every second */
1337         tdelta = now->now - iocg->delay_at;
1338         if (iocg->delay)
1339                 delay = iocg->delay >> div64_u64(tdelta, USEC_PER_SEC);
1340         else
1341                 delay = 0;
1342
1343         /* calculate the new delay from the debt amount */
1344         current_hweight(iocg, &hwa, NULL);
1345         vover = atomic64_read(&iocg->vtime) +
1346                 abs_cost_to_cost(iocg->abs_vdebt, hwa) - now->vnow;
1347         vover_pct = div64_s64(100 * vover,
1348                               ioc->period_us * ioc->vtime_base_rate);
1349
1350         if (vover_pct <= MIN_DELAY_THR_PCT)
1351                 new_delay = 0;
1352         else if (vover_pct >= MAX_DELAY_THR_PCT)
1353                 new_delay = MAX_DELAY;
1354         else
1355                 new_delay = MIN_DELAY +
1356                         div_u64((MAX_DELAY - MIN_DELAY) *
1357                                 (vover_pct - MIN_DELAY_THR_PCT),
1358                                 MAX_DELAY_THR_PCT - MIN_DELAY_THR_PCT);
1359
1360         /* pick the higher one and apply */
1361         if (new_delay > delay) {
1362                 iocg->delay = new_delay;
1363                 iocg->delay_at = now->now;
1364                 delay = new_delay;
1365         }
1366
1367         if (delay >= MIN_DELAY) {
1368                 if (!iocg->indelay_since)
1369                         iocg->indelay_since = now->now;
1370                 blkcg_set_delay(blkg, delay * NSEC_PER_USEC);
1371                 return true;
1372         } else {
1373                 if (iocg->indelay_since) {
1374                         iocg->stat.indelay_us += now->now - iocg->indelay_since;
1375                         iocg->indelay_since = 0;
1376                 }
1377                 iocg->delay = 0;
1378                 blkcg_clear_delay(blkg);
1379                 return false;
1380         }
1381 }
1382
1383 static void iocg_incur_debt(struct ioc_gq *iocg, u64 abs_cost,
1384                             struct ioc_now *now)
1385 {
1386         struct iocg_pcpu_stat *gcs;
1387
1388         lockdep_assert_held(&iocg->ioc->lock);
1389         lockdep_assert_held(&iocg->waitq.lock);
1390         WARN_ON_ONCE(list_empty(&iocg->active_list));
1391
1392         /*
1393          * Once in debt, debt handling owns inuse. @iocg stays at the minimum
1394          * inuse donating all of it share to others until its debt is paid off.
1395          */
1396         if (!iocg->abs_vdebt && abs_cost) {
1397                 iocg->indebt_since = now->now;
1398                 propagate_weights(iocg, iocg->active, 0, false, now);
1399         }
1400
1401         iocg->abs_vdebt += abs_cost;
1402
1403         gcs = get_cpu_ptr(iocg->pcpu_stat);
1404         local64_add(abs_cost, &gcs->abs_vusage);
1405         put_cpu_ptr(gcs);
1406 }
1407
1408 static void iocg_pay_debt(struct ioc_gq *iocg, u64 abs_vpay,
1409                           struct ioc_now *now)
1410 {
1411         lockdep_assert_held(&iocg->ioc->lock);
1412         lockdep_assert_held(&iocg->waitq.lock);
1413
1414         /* make sure that nobody messed with @iocg */
1415         WARN_ON_ONCE(list_empty(&iocg->active_list));
1416         WARN_ON_ONCE(iocg->inuse > 1);
1417
1418         iocg->abs_vdebt -= min(abs_vpay, iocg->abs_vdebt);
1419
1420         /* if debt is paid in full, restore inuse */
1421         if (!iocg->abs_vdebt) {
1422                 iocg->stat.indebt_us += now->now - iocg->indebt_since;
1423                 iocg->indebt_since = 0;
1424
1425                 propagate_weights(iocg, iocg->active, iocg->last_inuse,
1426                                   false, now);
1427         }
1428 }
1429
1430 static int iocg_wake_fn(struct wait_queue_entry *wq_entry, unsigned mode,
1431                         int flags, void *key)
1432 {
1433         struct iocg_wait *wait = container_of(wq_entry, struct iocg_wait, wait);
1434         struct iocg_wake_ctx *ctx = key;
1435         u64 cost = abs_cost_to_cost(wait->abs_cost, ctx->hw_inuse);
1436
1437         ctx->vbudget -= cost;
1438
1439         if (ctx->vbudget < 0)
1440                 return -1;
1441
1442         iocg_commit_bio(ctx->iocg, wait->bio, wait->abs_cost, cost);
1443         wait->committed = true;
1444
1445         /*
1446          * autoremove_wake_function() removes the wait entry only when it
1447          * actually changed the task state. We want the wait always removed.
1448          * Remove explicitly and use default_wake_function(). Note that the
1449          * order of operations is important as finish_wait() tests whether
1450          * @wq_entry is removed without grabbing the lock.
1451          */
1452         default_wake_function(wq_entry, mode, flags, key);
1453         list_del_init_careful(&wq_entry->entry);
1454         return 0;
1455 }
1456
1457 /*
1458  * Calculate the accumulated budget, pay debt if @pay_debt and wake up waiters
1459  * accordingly. When @pay_debt is %true, the caller must be holding ioc->lock in
1460  * addition to iocg->waitq.lock.
1461  */
1462 static void iocg_kick_waitq(struct ioc_gq *iocg, bool pay_debt,
1463                             struct ioc_now *now)
1464 {
1465         struct ioc *ioc = iocg->ioc;
1466         struct iocg_wake_ctx ctx = { .iocg = iocg };
1467         u64 vshortage, expires, oexpires;
1468         s64 vbudget;
1469         u32 hwa;
1470
1471         lockdep_assert_held(&iocg->waitq.lock);
1472
1473         current_hweight(iocg, &hwa, NULL);
1474         vbudget = now->vnow - atomic64_read(&iocg->vtime);
1475
1476         /* pay off debt */
1477         if (pay_debt && iocg->abs_vdebt && vbudget > 0) {
1478                 u64 abs_vbudget = cost_to_abs_cost(vbudget, hwa);
1479                 u64 abs_vpay = min_t(u64, abs_vbudget, iocg->abs_vdebt);
1480                 u64 vpay = abs_cost_to_cost(abs_vpay, hwa);
1481
1482                 lockdep_assert_held(&ioc->lock);
1483
1484                 atomic64_add(vpay, &iocg->vtime);
1485                 atomic64_add(vpay, &iocg->done_vtime);
1486                 iocg_pay_debt(iocg, abs_vpay, now);
1487                 vbudget -= vpay;
1488         }
1489
1490         if (iocg->abs_vdebt || iocg->delay)
1491                 iocg_kick_delay(iocg, now);
1492
1493         /*
1494          * Debt can still be outstanding if we haven't paid all yet or the
1495          * caller raced and called without @pay_debt. Shouldn't wake up waiters
1496          * under debt. Make sure @vbudget reflects the outstanding amount and is
1497          * not positive.
1498          */
1499         if (iocg->abs_vdebt) {
1500                 s64 vdebt = abs_cost_to_cost(iocg->abs_vdebt, hwa);
1501                 vbudget = min_t(s64, 0, vbudget - vdebt);
1502         }
1503
1504         /*
1505          * Wake up the ones which are due and see how much vtime we'll need for
1506          * the next one. As paying off debt restores hw_inuse, it must be read
1507          * after the above debt payment.
1508          */
1509         ctx.vbudget = vbudget;
1510         current_hweight(iocg, NULL, &ctx.hw_inuse);
1511
1512         __wake_up_locked_key(&iocg->waitq, TASK_NORMAL, &ctx);
1513
1514         if (!waitqueue_active(&iocg->waitq)) {
1515                 if (iocg->wait_since) {
1516                         iocg->stat.wait_us += now->now - iocg->wait_since;
1517                         iocg->wait_since = 0;
1518                 }
1519                 return;
1520         }
1521
1522         if (!iocg->wait_since)
1523                 iocg->wait_since = now->now;
1524
1525         if (WARN_ON_ONCE(ctx.vbudget >= 0))
1526                 return;
1527
1528         /* determine next wakeup, add a timer margin to guarantee chunking */
1529         vshortage = -ctx.vbudget;
1530         expires = now->now_ns +
1531                 DIV64_U64_ROUND_UP(vshortage, ioc->vtime_base_rate) *
1532                 NSEC_PER_USEC;
1533         expires += ioc->timer_slack_ns;
1534
1535         /* if already active and close enough, don't bother */
1536         oexpires = ktime_to_ns(hrtimer_get_softexpires(&iocg->waitq_timer));
1537         if (hrtimer_is_queued(&iocg->waitq_timer) &&
1538             abs(oexpires - expires) <= ioc->timer_slack_ns)
1539                 return;
1540
1541         hrtimer_start_range_ns(&iocg->waitq_timer, ns_to_ktime(expires),
1542                                ioc->timer_slack_ns, HRTIMER_MODE_ABS);
1543 }
1544
1545 static enum hrtimer_restart iocg_waitq_timer_fn(struct hrtimer *timer)
1546 {
1547         struct ioc_gq *iocg = container_of(timer, struct ioc_gq, waitq_timer);
1548         bool pay_debt = READ_ONCE(iocg->abs_vdebt);
1549         struct ioc_now now;
1550         unsigned long flags;
1551
1552         ioc_now(iocg->ioc, &now);
1553
1554         iocg_lock(iocg, pay_debt, &flags);
1555         iocg_kick_waitq(iocg, pay_debt, &now);
1556         iocg_unlock(iocg, pay_debt, &flags);
1557
1558         return HRTIMER_NORESTART;
1559 }
1560
1561 static void ioc_lat_stat(struct ioc *ioc, u32 *missed_ppm_ar, u32 *rq_wait_pct_p)
1562 {
1563         u32 nr_met[2] = { };
1564         u32 nr_missed[2] = { };
1565         u64 rq_wait_ns = 0;
1566         int cpu, rw;
1567
1568         for_each_online_cpu(cpu) {
1569                 struct ioc_pcpu_stat *stat = per_cpu_ptr(ioc->pcpu_stat, cpu);
1570                 u64 this_rq_wait_ns;
1571
1572                 for (rw = READ; rw <= WRITE; rw++) {
1573                         u32 this_met = local_read(&stat->missed[rw].nr_met);
1574                         u32 this_missed = local_read(&stat->missed[rw].nr_missed);
1575
1576                         nr_met[rw] += this_met - stat->missed[rw].last_met;
1577                         nr_missed[rw] += this_missed - stat->missed[rw].last_missed;
1578                         stat->missed[rw].last_met = this_met;
1579                         stat->missed[rw].last_missed = this_missed;
1580                 }
1581
1582                 this_rq_wait_ns = local64_read(&stat->rq_wait_ns);
1583                 rq_wait_ns += this_rq_wait_ns - stat->last_rq_wait_ns;
1584                 stat->last_rq_wait_ns = this_rq_wait_ns;
1585         }
1586
1587         for (rw = READ; rw <= WRITE; rw++) {
1588                 if (nr_met[rw] + nr_missed[rw])
1589                         missed_ppm_ar[rw] =
1590                                 DIV64_U64_ROUND_UP((u64)nr_missed[rw] * MILLION,
1591                                                    nr_met[rw] + nr_missed[rw]);
1592                 else
1593                         missed_ppm_ar[rw] = 0;
1594         }
1595
1596         *rq_wait_pct_p = div64_u64(rq_wait_ns * 100,
1597                                    ioc->period_us * NSEC_PER_USEC);
1598 }
1599
1600 /* was iocg idle this period? */
1601 static bool iocg_is_idle(struct ioc_gq *iocg)
1602 {
1603         struct ioc *ioc = iocg->ioc;
1604
1605         /* did something get issued this period? */
1606         if (atomic64_read(&iocg->active_period) ==
1607             atomic64_read(&ioc->cur_period))
1608                 return false;
1609
1610         /* is something in flight? */
1611         if (atomic64_read(&iocg->done_vtime) != atomic64_read(&iocg->vtime))
1612                 return false;
1613
1614         return true;
1615 }
1616
1617 /*
1618  * Call this function on the target leaf @iocg's to build pre-order traversal
1619  * list of all the ancestors in @inner_walk. The inner nodes are linked through
1620  * ->walk_list and the caller is responsible for dissolving the list after use.
1621  */
1622 static void iocg_build_inner_walk(struct ioc_gq *iocg,
1623                                   struct list_head *inner_walk)
1624 {
1625         int lvl;
1626
1627         WARN_ON_ONCE(!list_empty(&iocg->walk_list));
1628
1629         /* find the first ancestor which hasn't been visited yet */
1630         for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
1631                 if (!list_empty(&iocg->ancestors[lvl]->walk_list))
1632                         break;
1633         }
1634
1635         /* walk down and visit the inner nodes to get pre-order traversal */
1636         while (++lvl <= iocg->level - 1) {
1637                 struct ioc_gq *inner = iocg->ancestors[lvl];
1638
1639                 /* record traversal order */
1640                 list_add_tail(&inner->walk_list, inner_walk);
1641         }
1642 }
1643
1644 /* propagate the deltas to the parent */
1645 static void iocg_flush_stat_upward(struct ioc_gq *iocg)
1646 {
1647         if (iocg->level > 0) {
1648                 struct iocg_stat *parent_stat =
1649                         &iocg->ancestors[iocg->level - 1]->stat;
1650
1651                 parent_stat->usage_us +=
1652                         iocg->stat.usage_us - iocg->last_stat.usage_us;
1653                 parent_stat->wait_us +=
1654                         iocg->stat.wait_us - iocg->last_stat.wait_us;
1655                 parent_stat->indebt_us +=
1656                         iocg->stat.indebt_us - iocg->last_stat.indebt_us;
1657                 parent_stat->indelay_us +=
1658                         iocg->stat.indelay_us - iocg->last_stat.indelay_us;
1659         }
1660
1661         iocg->last_stat = iocg->stat;
1662 }
1663
1664 /* collect per-cpu counters and propagate the deltas to the parent */
1665 static void iocg_flush_stat_leaf(struct ioc_gq *iocg, struct ioc_now *now)
1666 {
1667         struct ioc *ioc = iocg->ioc;
1668         u64 abs_vusage = 0;
1669         u64 vusage_delta;
1670         int cpu;
1671
1672         lockdep_assert_held(&iocg->ioc->lock);
1673
1674         /* collect per-cpu counters */
1675         for_each_possible_cpu(cpu) {
1676                 abs_vusage += local64_read(
1677                                 per_cpu_ptr(&iocg->pcpu_stat->abs_vusage, cpu));
1678         }
1679         vusage_delta = abs_vusage - iocg->last_stat_abs_vusage;
1680         iocg->last_stat_abs_vusage = abs_vusage;
1681
1682         iocg->usage_delta_us = div64_u64(vusage_delta, ioc->vtime_base_rate);
1683         iocg->stat.usage_us += iocg->usage_delta_us;
1684
1685         iocg_flush_stat_upward(iocg);
1686 }
1687
1688 /* get stat counters ready for reading on all active iocgs */
1689 static void iocg_flush_stat(struct list_head *target_iocgs, struct ioc_now *now)
1690 {
1691         LIST_HEAD(inner_walk);
1692         struct ioc_gq *iocg, *tiocg;
1693
1694         /* flush leaves and build inner node walk list */
1695         list_for_each_entry(iocg, target_iocgs, active_list) {
1696                 iocg_flush_stat_leaf(iocg, now);
1697                 iocg_build_inner_walk(iocg, &inner_walk);
1698         }
1699
1700         /* keep flushing upwards by walking the inner list backwards */
1701         list_for_each_entry_safe_reverse(iocg, tiocg, &inner_walk, walk_list) {
1702                 iocg_flush_stat_upward(iocg);
1703                 list_del_init(&iocg->walk_list);
1704         }
1705 }
1706
1707 /*
1708  * Determine what @iocg's hweight_inuse should be after donating unused
1709  * capacity. @hwm is the upper bound and used to signal no donation. This
1710  * function also throws away @iocg's excess budget.
1711  */
1712 static u32 hweight_after_donation(struct ioc_gq *iocg, u32 old_hwi, u32 hwm,
1713                                   u32 usage, struct ioc_now *now)
1714 {
1715         struct ioc *ioc = iocg->ioc;
1716         u64 vtime = atomic64_read(&iocg->vtime);
1717         s64 excess, delta, target, new_hwi;
1718
1719         /* debt handling owns inuse for debtors */
1720         if (iocg->abs_vdebt)
1721                 return 1;
1722
1723         /* see whether minimum margin requirement is met */
1724         if (waitqueue_active(&iocg->waitq) ||
1725             time_after64(vtime, now->vnow - ioc->margins.min))
1726                 return hwm;
1727
1728         /* throw away excess above target */
1729         excess = now->vnow - vtime - ioc->margins.target;
1730         if (excess > 0) {
1731                 atomic64_add(excess, &iocg->vtime);
1732                 atomic64_add(excess, &iocg->done_vtime);
1733                 vtime += excess;
1734                 ioc->vtime_err -= div64_u64(excess * old_hwi, WEIGHT_ONE);
1735         }
1736
1737         /*
1738          * Let's say the distance between iocg's and device's vtimes as a
1739          * fraction of period duration is delta. Assuming that the iocg will
1740          * consume the usage determined above, we want to determine new_hwi so
1741          * that delta equals MARGIN_TARGET at the end of the next period.
1742          *
1743          * We need to execute usage worth of IOs while spending the sum of the
1744          * new budget (1 - MARGIN_TARGET) and the leftover from the last period
1745          * (delta):
1746          *
1747          *   usage = (1 - MARGIN_TARGET + delta) * new_hwi
1748          *
1749          * Therefore, the new_hwi is:
1750          *
1751          *   new_hwi = usage / (1 - MARGIN_TARGET + delta)
1752          */
1753         delta = div64_s64(WEIGHT_ONE * (now->vnow - vtime),
1754                           now->vnow - ioc->period_at_vtime);
1755         target = WEIGHT_ONE * MARGIN_TARGET_PCT / 100;
1756         new_hwi = div64_s64(WEIGHT_ONE * usage, WEIGHT_ONE - target + delta);
1757
1758         return clamp_t(s64, new_hwi, 1, hwm);
1759 }
1760
1761 /*
1762  * For work-conservation, an iocg which isn't using all of its share should
1763  * donate the leftover to other iocgs. There are two ways to achieve this - 1.
1764  * bumping up vrate accordingly 2. lowering the donating iocg's inuse weight.
1765  *
1766  * #1 is mathematically simpler but has the drawback of requiring synchronous
1767  * global hweight_inuse updates when idle iocg's get activated or inuse weights
1768  * change due to donation snapbacks as it has the possibility of grossly
1769  * overshooting what's allowed by the model and vrate.
1770  *
1771  * #2 is inherently safe with local operations. The donating iocg can easily
1772  * snap back to higher weights when needed without worrying about impacts on
1773  * other nodes as the impacts will be inherently correct. This also makes idle
1774  * iocg activations safe. The only effect activations have is decreasing
1775  * hweight_inuse of others, the right solution to which is for those iocgs to
1776  * snap back to higher weights.
1777  *
1778  * So, we go with #2. The challenge is calculating how each donating iocg's
1779  * inuse should be adjusted to achieve the target donation amounts. This is done
1780  * using Andy's method described in the following pdf.
1781  *
1782  *   https://drive.google.com/file/d/1PsJwxPFtjUnwOY1QJ5AeICCcsL7BM3bo
1783  *
1784  * Given the weights and target after-donation hweight_inuse values, Andy's
1785  * method determines how the proportional distribution should look like at each
1786  * sibling level to maintain the relative relationship between all non-donating
1787  * pairs. To roughly summarize, it divides the tree into donating and
1788  * non-donating parts, calculates global donation rate which is used to
1789  * determine the target hweight_inuse for each node, and then derives per-level
1790  * proportions.
1791  *
1792  * The following pdf shows that global distribution calculated this way can be
1793  * achieved by scaling inuse weights of donating leaves and propagating the
1794  * adjustments upwards proportionally.
1795  *
1796  *   https://drive.google.com/file/d/1vONz1-fzVO7oY5DXXsLjSxEtYYQbOvsE
1797  *
1798  * Combining the above two, we can determine how each leaf iocg's inuse should
1799  * be adjusted to achieve the target donation.
1800  *
1801  *   https://drive.google.com/file/d/1WcrltBOSPN0qXVdBgnKm4mdp9FhuEFQN
1802  *
1803  * The inline comments use symbols from the last pdf.
1804  *
1805  *   b is the sum of the absolute budgets in the subtree. 1 for the root node.
1806  *   f is the sum of the absolute budgets of non-donating nodes in the subtree.
1807  *   t is the sum of the absolute budgets of donating nodes in the subtree.
1808  *   w is the weight of the node. w = w_f + w_t
1809  *   w_f is the non-donating portion of w. w_f = w * f / b
1810  *   w_b is the donating portion of w. w_t = w * t / b
1811  *   s is the sum of all sibling weights. s = Sum(w) for siblings
1812  *   s_f and s_t are the non-donating and donating portions of s.
1813  *
1814  * Subscript p denotes the parent's counterpart and ' the adjusted value - e.g.
1815  * w_pt is the donating portion of the parent's weight and w'_pt the same value
1816  * after adjustments. Subscript r denotes the root node's values.
1817  */
1818 static void transfer_surpluses(struct list_head *surpluses, struct ioc_now *now)
1819 {
1820         LIST_HEAD(over_hwa);
1821         LIST_HEAD(inner_walk);
1822         struct ioc_gq *iocg, *tiocg, *root_iocg;
1823         u32 after_sum, over_sum, over_target, gamma;
1824
1825         /*
1826          * It's pretty unlikely but possible for the total sum of
1827          * hweight_after_donation's to be higher than WEIGHT_ONE, which will
1828          * confuse the following calculations. If such condition is detected,
1829          * scale down everyone over its full share equally to keep the sum below
1830          * WEIGHT_ONE.
1831          */
1832         after_sum = 0;
1833         over_sum = 0;
1834         list_for_each_entry(iocg, surpluses, surplus_list) {
1835                 u32 hwa;
1836
1837                 current_hweight(iocg, &hwa, NULL);
1838                 after_sum += iocg->hweight_after_donation;
1839
1840                 if (iocg->hweight_after_donation > hwa) {
1841                         over_sum += iocg->hweight_after_donation;
1842                         list_add(&iocg->walk_list, &over_hwa);
1843                 }
1844         }
1845
1846         if (after_sum >= WEIGHT_ONE) {
1847                 /*
1848                  * The delta should be deducted from the over_sum, calculate
1849                  * target over_sum value.
1850                  */
1851                 u32 over_delta = after_sum - (WEIGHT_ONE - 1);
1852                 WARN_ON_ONCE(over_sum <= over_delta);
1853                 over_target = over_sum - over_delta;
1854         } else {
1855                 over_target = 0;
1856         }
1857
1858         list_for_each_entry_safe(iocg, tiocg, &over_hwa, walk_list) {
1859                 if (over_target)
1860                         iocg->hweight_after_donation =
1861                                 div_u64((u64)iocg->hweight_after_donation *
1862                                         over_target, over_sum);
1863                 list_del_init(&iocg->walk_list);
1864         }
1865
1866         /*
1867          * Build pre-order inner node walk list and prepare for donation
1868          * adjustment calculations.
1869          */
1870         list_for_each_entry(iocg, surpluses, surplus_list) {
1871                 iocg_build_inner_walk(iocg, &inner_walk);
1872         }
1873
1874         root_iocg = list_first_entry(&inner_walk, struct ioc_gq, walk_list);
1875         WARN_ON_ONCE(root_iocg->level > 0);
1876
1877         list_for_each_entry(iocg, &inner_walk, walk_list) {
1878                 iocg->child_adjusted_sum = 0;
1879                 iocg->hweight_donating = 0;
1880                 iocg->hweight_after_donation = 0;
1881         }
1882
1883         /*
1884          * Propagate the donating budget (b_t) and after donation budget (b'_t)
1885          * up the hierarchy.
1886          */
1887         list_for_each_entry(iocg, surpluses, surplus_list) {
1888                 struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1889
1890                 parent->hweight_donating += iocg->hweight_donating;
1891                 parent->hweight_after_donation += iocg->hweight_after_donation;
1892         }
1893
1894         list_for_each_entry_reverse(iocg, &inner_walk, walk_list) {
1895                 if (iocg->level > 0) {
1896                         struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1897
1898                         parent->hweight_donating += iocg->hweight_donating;
1899                         parent->hweight_after_donation += iocg->hweight_after_donation;
1900                 }
1901         }
1902
1903         /*
1904          * Calculate inner hwa's (b) and make sure the donation values are
1905          * within the accepted ranges as we're doing low res calculations with
1906          * roundups.
1907          */
1908         list_for_each_entry(iocg, &inner_walk, walk_list) {
1909                 if (iocg->level) {
1910                         struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1911
1912                         iocg->hweight_active = DIV64_U64_ROUND_UP(
1913                                 (u64)parent->hweight_active * iocg->active,
1914                                 parent->child_active_sum);
1915
1916                 }
1917
1918                 iocg->hweight_donating = min(iocg->hweight_donating,
1919                                              iocg->hweight_active);
1920                 iocg->hweight_after_donation = min(iocg->hweight_after_donation,
1921                                                    iocg->hweight_donating - 1);
1922                 if (WARN_ON_ONCE(iocg->hweight_active <= 1 ||
1923                                  iocg->hweight_donating <= 1 ||
1924                                  iocg->hweight_after_donation == 0)) {
1925                         pr_warn("iocg: invalid donation weights in ");
1926                         pr_cont_cgroup_path(iocg_to_blkg(iocg)->blkcg->css.cgroup);
1927                         pr_cont(": active=%u donating=%u after=%u\n",
1928                                 iocg->hweight_active, iocg->hweight_donating,
1929                                 iocg->hweight_after_donation);
1930                 }
1931         }
1932
1933         /*
1934          * Calculate the global donation rate (gamma) - the rate to adjust
1935          * non-donating budgets by.
1936          *
1937          * No need to use 64bit multiplication here as the first operand is
1938          * guaranteed to be smaller than WEIGHT_ONE (1<<16).
1939          *
1940          * We know that there are beneficiary nodes and the sum of the donating
1941          * hweights can't be whole; however, due to the round-ups during hweight
1942          * calculations, root_iocg->hweight_donating might still end up equal to
1943          * or greater than whole. Limit the range when calculating the divider.
1944          *
1945          * gamma = (1 - t_r') / (1 - t_r)
1946          */
1947         gamma = DIV_ROUND_UP(
1948                 (WEIGHT_ONE - root_iocg->hweight_after_donation) * WEIGHT_ONE,
1949                 WEIGHT_ONE - min_t(u32, root_iocg->hweight_donating, WEIGHT_ONE - 1));
1950
1951         /*
1952          * Calculate adjusted hwi, child_adjusted_sum and inuse for the inner
1953          * nodes.
1954          */
1955         list_for_each_entry(iocg, &inner_walk, walk_list) {
1956                 struct ioc_gq *parent;
1957                 u32 inuse, wpt, wptp;
1958                 u64 st, sf;
1959
1960                 if (iocg->level == 0) {
1961                         /* adjusted weight sum for 1st level: s' = s * b_pf / b'_pf */
1962                         iocg->child_adjusted_sum = DIV64_U64_ROUND_UP(
1963                                 iocg->child_active_sum * (WEIGHT_ONE - iocg->hweight_donating),
1964                                 WEIGHT_ONE - iocg->hweight_after_donation);
1965                         continue;
1966                 }
1967
1968                 parent = iocg->ancestors[iocg->level - 1];
1969
1970                 /* b' = gamma * b_f + b_t' */
1971                 iocg->hweight_inuse = DIV64_U64_ROUND_UP(
1972                         (u64)gamma * (iocg->hweight_active - iocg->hweight_donating),
1973                         WEIGHT_ONE) + iocg->hweight_after_donation;
1974
1975                 /* w' = s' * b' / b'_p */
1976                 inuse = DIV64_U64_ROUND_UP(
1977                         (u64)parent->child_adjusted_sum * iocg->hweight_inuse,
1978                         parent->hweight_inuse);
1979
1980                 /* adjusted weight sum for children: s' = s_f + s_t * w'_pt / w_pt */
1981                 st = DIV64_U64_ROUND_UP(
1982                         iocg->child_active_sum * iocg->hweight_donating,
1983                         iocg->hweight_active);
1984                 sf = iocg->child_active_sum - st;
1985                 wpt = DIV64_U64_ROUND_UP(
1986                         (u64)iocg->active * iocg->hweight_donating,
1987                         iocg->hweight_active);
1988                 wptp = DIV64_U64_ROUND_UP(
1989                         (u64)inuse * iocg->hweight_after_donation,
1990                         iocg->hweight_inuse);
1991
1992                 iocg->child_adjusted_sum = sf + DIV64_U64_ROUND_UP(st * wptp, wpt);
1993         }
1994
1995         /*
1996          * All inner nodes now have ->hweight_inuse and ->child_adjusted_sum and
1997          * we can finally determine leaf adjustments.
1998          */
1999         list_for_each_entry(iocg, surpluses, surplus_list) {
2000                 struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
2001                 u32 inuse;
2002
2003                 /*
2004                  * In-debt iocgs participated in the donation calculation with
2005                  * the minimum target hweight_inuse. Configuring inuse
2006                  * accordingly would work fine but debt handling expects
2007                  * @iocg->inuse stay at the minimum and we don't wanna
2008                  * interfere.
2009                  */
2010                 if (iocg->abs_vdebt) {
2011                         WARN_ON_ONCE(iocg->inuse > 1);
2012                         continue;
2013                 }
2014
2015                 /* w' = s' * b' / b'_p, note that b' == b'_t for donating leaves */
2016                 inuse = DIV64_U64_ROUND_UP(
2017                         parent->child_adjusted_sum * iocg->hweight_after_donation,
2018                         parent->hweight_inuse);
2019
2020                 TRACE_IOCG_PATH(inuse_transfer, iocg, now,
2021                                 iocg->inuse, inuse,
2022                                 iocg->hweight_inuse,
2023                                 iocg->hweight_after_donation);
2024
2025                 __propagate_weights(iocg, iocg->active, inuse, true, now);
2026         }
2027
2028         /* walk list should be dissolved after use */
2029         list_for_each_entry_safe(iocg, tiocg, &inner_walk, walk_list)
2030                 list_del_init(&iocg->walk_list);
2031 }
2032
2033 /*
2034  * A low weight iocg can amass a large amount of debt, for example, when
2035  * anonymous memory gets reclaimed aggressively. If the system has a lot of
2036  * memory paired with a slow IO device, the debt can span multiple seconds or
2037  * more. If there are no other subsequent IO issuers, the in-debt iocg may end
2038  * up blocked paying its debt while the IO device is idle.
2039  *
2040  * The following protects against such cases. If the device has been
2041  * sufficiently idle for a while, the debts are halved and delays are
2042  * recalculated.
2043  */
2044 static void ioc_forgive_debts(struct ioc *ioc, u64 usage_us_sum, int nr_debtors,
2045                               struct ioc_now *now)
2046 {
2047         struct ioc_gq *iocg;
2048         u64 dur, usage_pct, nr_cycles;
2049
2050         /* if no debtor, reset the cycle */
2051         if (!nr_debtors) {
2052                 ioc->dfgv_period_at = now->now;
2053                 ioc->dfgv_period_rem = 0;
2054                 ioc->dfgv_usage_us_sum = 0;
2055                 return;
2056         }
2057
2058         /*
2059          * Debtors can pass through a lot of writes choking the device and we
2060          * don't want to be forgiving debts while the device is struggling from
2061          * write bursts. If we're missing latency targets, consider the device
2062          * fully utilized.
2063          */
2064         if (ioc->busy_level > 0)
2065                 usage_us_sum = max_t(u64, usage_us_sum, ioc->period_us);
2066
2067         ioc->dfgv_usage_us_sum += usage_us_sum;
2068         if (time_before64(now->now, ioc->dfgv_period_at + DFGV_PERIOD))
2069                 return;
2070
2071         /*
2072          * At least DFGV_PERIOD has passed since the last period. Calculate the
2073          * average usage and reset the period counters.
2074          */
2075         dur = now->now - ioc->dfgv_period_at;
2076         usage_pct = div64_u64(100 * ioc->dfgv_usage_us_sum, dur);
2077
2078         ioc->dfgv_period_at = now->now;
2079         ioc->dfgv_usage_us_sum = 0;
2080
2081         /* if was too busy, reset everything */
2082         if (usage_pct > DFGV_USAGE_PCT) {
2083                 ioc->dfgv_period_rem = 0;
2084                 return;
2085         }
2086
2087         /*
2088          * Usage is lower than threshold. Let's forgive some debts. Debt
2089          * forgiveness runs off of the usual ioc timer but its period usually
2090          * doesn't match ioc's. Compensate the difference by performing the
2091          * reduction as many times as would fit in the duration since the last
2092          * run and carrying over the left-over duration in @ioc->dfgv_period_rem
2093          * - if ioc period is 75% of DFGV_PERIOD, one out of three consecutive
2094          * reductions is doubled.
2095          */
2096         nr_cycles = dur + ioc->dfgv_period_rem;
2097         ioc->dfgv_period_rem = do_div(nr_cycles, DFGV_PERIOD);
2098
2099         list_for_each_entry(iocg, &ioc->active_iocgs, active_list) {
2100                 u64 __maybe_unused old_debt, __maybe_unused old_delay;
2101
2102                 if (!iocg->abs_vdebt && !iocg->delay)
2103                         continue;
2104
2105                 spin_lock(&iocg->waitq.lock);
2106
2107                 old_debt = iocg->abs_vdebt;
2108                 old_delay = iocg->delay;
2109
2110                 if (iocg->abs_vdebt)
2111                         iocg->abs_vdebt = iocg->abs_vdebt >> nr_cycles ?: 1;
2112                 if (iocg->delay)
2113                         iocg->delay = iocg->delay >> nr_cycles ?: 1;
2114
2115                 iocg_kick_waitq(iocg, true, now);
2116
2117                 TRACE_IOCG_PATH(iocg_forgive_debt, iocg, now, usage_pct,
2118                                 old_debt, iocg->abs_vdebt,
2119                                 old_delay, iocg->delay);
2120
2121                 spin_unlock(&iocg->waitq.lock);
2122         }
2123 }
2124
2125 /*
2126  * Check the active iocgs' state to avoid oversleeping and deactive
2127  * idle iocgs.
2128  *
2129  * Since waiters determine the sleep durations based on the vrate
2130  * they saw at the time of sleep, if vrate has increased, some
2131  * waiters could be sleeping for too long. Wake up tardy waiters
2132  * which should have woken up in the last period and expire idle
2133  * iocgs.
2134  */
2135 static int ioc_check_iocgs(struct ioc *ioc, struct ioc_now *now)
2136 {
2137         int nr_debtors = 0;
2138         struct ioc_gq *iocg, *tiocg;
2139
2140         list_for_each_entry_safe(iocg, tiocg, &ioc->active_iocgs, active_list) {
2141                 if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt &&
2142                     !iocg->delay && !iocg_is_idle(iocg))
2143                         continue;
2144
2145                 spin_lock(&iocg->waitq.lock);
2146
2147                 /* flush wait and indebt stat deltas */
2148                 if (iocg->wait_since) {
2149                         iocg->stat.wait_us += now->now - iocg->wait_since;
2150                         iocg->wait_since = now->now;
2151                 }
2152                 if (iocg->indebt_since) {
2153                         iocg->stat.indebt_us +=
2154                                 now->now - iocg->indebt_since;
2155                         iocg->indebt_since = now->now;
2156                 }
2157                 if (iocg->indelay_since) {
2158                         iocg->stat.indelay_us +=
2159                                 now->now - iocg->indelay_since;
2160                         iocg->indelay_since = now->now;
2161                 }
2162
2163                 if (waitqueue_active(&iocg->waitq) || iocg->abs_vdebt ||
2164                     iocg->delay) {
2165                         /* might be oversleeping vtime / hweight changes, kick */
2166                         iocg_kick_waitq(iocg, true, now);
2167                         if (iocg->abs_vdebt || iocg->delay)
2168                                 nr_debtors++;
2169                 } else if (iocg_is_idle(iocg)) {
2170                         /* no waiter and idle, deactivate */
2171                         u64 vtime = atomic64_read(&iocg->vtime);
2172                         s64 excess;
2173
2174                         /*
2175                          * @iocg has been inactive for a full duration and will
2176                          * have a high budget. Account anything above target as
2177                          * error and throw away. On reactivation, it'll start
2178                          * with the target budget.
2179                          */
2180                         excess = now->vnow - vtime - ioc->margins.target;
2181                         if (excess > 0) {
2182                                 u32 old_hwi;
2183
2184                                 current_hweight(iocg, NULL, &old_hwi);
2185                                 ioc->vtime_err -= div64_u64(excess * old_hwi,
2186                                                             WEIGHT_ONE);
2187                         }
2188
2189                         TRACE_IOCG_PATH(iocg_idle, iocg, now,
2190                                         atomic64_read(&iocg->active_period),
2191                                         atomic64_read(&ioc->cur_period), vtime);
2192                         __propagate_weights(iocg, 0, 0, false, now);
2193                         list_del_init(&iocg->active_list);
2194                 }
2195
2196                 spin_unlock(&iocg->waitq.lock);
2197         }
2198
2199         commit_weights(ioc);
2200         return nr_debtors;
2201 }
2202
2203 static void ioc_timer_fn(struct timer_list *timer)
2204 {
2205         struct ioc *ioc = container_of(timer, struct ioc, timer);
2206         struct ioc_gq *iocg, *tiocg;
2207         struct ioc_now now;
2208         LIST_HEAD(surpluses);
2209         int nr_debtors, nr_shortages = 0, nr_lagging = 0;
2210         u64 usage_us_sum = 0;
2211         u32 ppm_rthr = MILLION - ioc->params.qos[QOS_RPPM];
2212         u32 ppm_wthr = MILLION - ioc->params.qos[QOS_WPPM];
2213         u32 missed_ppm[2], rq_wait_pct;
2214         u64 period_vtime;
2215         int prev_busy_level;
2216
2217         /* how were the latencies during the period? */
2218         ioc_lat_stat(ioc, missed_ppm, &rq_wait_pct);
2219
2220         /* take care of active iocgs */
2221         spin_lock_irq(&ioc->lock);
2222
2223         ioc_now(ioc, &now);
2224
2225         period_vtime = now.vnow - ioc->period_at_vtime;
2226         if (WARN_ON_ONCE(!period_vtime)) {
2227                 spin_unlock_irq(&ioc->lock);
2228                 return;
2229         }
2230
2231         nr_debtors = ioc_check_iocgs(ioc, &now);
2232
2233         /*
2234          * Wait and indebt stat are flushed above and the donation calculation
2235          * below needs updated usage stat. Let's bring stat up-to-date.
2236          */
2237         iocg_flush_stat(&ioc->active_iocgs, &now);
2238
2239         /* calc usage and see whether some weights need to be moved around */
2240         list_for_each_entry(iocg, &ioc->active_iocgs, active_list) {
2241                 u64 vdone, vtime, usage_us;
2242                 u32 hw_active, hw_inuse;
2243
2244                 /*
2245                  * Collect unused and wind vtime closer to vnow to prevent
2246                  * iocgs from accumulating a large amount of budget.
2247                  */
2248                 vdone = atomic64_read(&iocg->done_vtime);
2249                 vtime = atomic64_read(&iocg->vtime);
2250                 current_hweight(iocg, &hw_active, &hw_inuse);
2251
2252                 /*
2253                  * Latency QoS detection doesn't account for IOs which are
2254                  * in-flight for longer than a period.  Detect them by
2255                  * comparing vdone against period start.  If lagging behind
2256                  * IOs from past periods, don't increase vrate.
2257                  */
2258                 if ((ppm_rthr != MILLION || ppm_wthr != MILLION) &&
2259                     !atomic_read(&iocg_to_blkg(iocg)->use_delay) &&
2260                     time_after64(vtime, vdone) &&
2261                     time_after64(vtime, now.vnow -
2262                                  MAX_LAGGING_PERIODS * period_vtime) &&
2263                     time_before64(vdone, now.vnow - period_vtime))
2264                         nr_lagging++;
2265
2266                 /*
2267                  * Determine absolute usage factoring in in-flight IOs to avoid
2268                  * high-latency completions appearing as idle.
2269                  */
2270                 usage_us = iocg->usage_delta_us;
2271                 usage_us_sum += usage_us;
2272
2273                 /* see whether there's surplus vtime */
2274                 WARN_ON_ONCE(!list_empty(&iocg->surplus_list));
2275                 if (hw_inuse < hw_active ||
2276                     (!waitqueue_active(&iocg->waitq) &&
2277                      time_before64(vtime, now.vnow - ioc->margins.low))) {
2278                         u32 hwa, old_hwi, hwm, new_hwi, usage;
2279                         u64 usage_dur;
2280
2281                         if (vdone != vtime) {
2282                                 u64 inflight_us = DIV64_U64_ROUND_UP(
2283                                         cost_to_abs_cost(vtime - vdone, hw_inuse),
2284                                         ioc->vtime_base_rate);
2285
2286                                 usage_us = max(usage_us, inflight_us);
2287                         }
2288
2289                         /* convert to hweight based usage ratio */
2290                         if (time_after64(iocg->activated_at, ioc->period_at))
2291                                 usage_dur = max_t(u64, now.now - iocg->activated_at, 1);
2292                         else
2293                                 usage_dur = max_t(u64, now.now - ioc->period_at, 1);
2294
2295                         usage = clamp_t(u32,
2296                                 DIV64_U64_ROUND_UP(usage_us * WEIGHT_ONE,
2297                                                    usage_dur),
2298                                 1, WEIGHT_ONE);
2299
2300                         /*
2301                          * Already donating or accumulated enough to start.
2302                          * Determine the donation amount.
2303                          */
2304                         current_hweight(iocg, &hwa, &old_hwi);
2305                         hwm = current_hweight_max(iocg);
2306                         new_hwi = hweight_after_donation(iocg, old_hwi, hwm,
2307                                                          usage, &now);
2308                         /*
2309                          * Donation calculation assumes hweight_after_donation
2310                          * to be positive, a condition that a donor w/ hwa < 2
2311                          * can't meet. Don't bother with donation if hwa is
2312                          * below 2. It's not gonna make a meaningful difference
2313                          * anyway.
2314                          */
2315                         if (new_hwi < hwm && hwa >= 2) {
2316                                 iocg->hweight_donating = hwa;
2317                                 iocg->hweight_after_donation = new_hwi;
2318                                 list_add(&iocg->surplus_list, &surpluses);
2319                         } else if (!iocg->abs_vdebt) {
2320                                 /*
2321                                  * @iocg doesn't have enough to donate. Reset
2322                                  * its inuse to active.
2323                                  *
2324                                  * Don't reset debtors as their inuse's are
2325                                  * owned by debt handling. This shouldn't affect
2326                                  * donation calculuation in any meaningful way
2327                                  * as @iocg doesn't have a meaningful amount of
2328                                  * share anyway.
2329                                  */
2330                                 TRACE_IOCG_PATH(inuse_shortage, iocg, &now,
2331                                                 iocg->inuse, iocg->active,
2332                                                 iocg->hweight_inuse, new_hwi);
2333
2334                                 __propagate_weights(iocg, iocg->active,
2335                                                     iocg->active, true, &now);
2336                                 nr_shortages++;
2337                         }
2338                 } else {
2339                         /* genuinely short on vtime */
2340                         nr_shortages++;
2341                 }
2342         }
2343
2344         if (!list_empty(&surpluses) && nr_shortages)
2345                 transfer_surpluses(&surpluses, &now);
2346
2347         commit_weights(ioc);
2348
2349         /* surplus list should be dissolved after use */
2350         list_for_each_entry_safe(iocg, tiocg, &surpluses, surplus_list)
2351                 list_del_init(&iocg->surplus_list);
2352
2353         /*
2354          * If q is getting clogged or we're missing too much, we're issuing
2355          * too much IO and should lower vtime rate.  If we're not missing
2356          * and experiencing shortages but not surpluses, we're too stingy
2357          * and should increase vtime rate.
2358          */
2359         prev_busy_level = ioc->busy_level;
2360         if (rq_wait_pct > RQ_WAIT_BUSY_PCT ||
2361             missed_ppm[READ] > ppm_rthr ||
2362             missed_ppm[WRITE] > ppm_wthr) {
2363                 /* clearly missing QoS targets, slow down vrate */
2364                 ioc->busy_level = max(ioc->busy_level, 0);
2365                 ioc->busy_level++;
2366         } else if (rq_wait_pct <= RQ_WAIT_BUSY_PCT * UNBUSY_THR_PCT / 100 &&
2367                    missed_ppm[READ] <= ppm_rthr * UNBUSY_THR_PCT / 100 &&
2368                    missed_ppm[WRITE] <= ppm_wthr * UNBUSY_THR_PCT / 100) {
2369                 /* QoS targets are being met with >25% margin */
2370                 if (nr_shortages) {
2371                         /*
2372                          * We're throttling while the device has spare
2373                          * capacity.  If vrate was being slowed down, stop.
2374                          */
2375                         ioc->busy_level = min(ioc->busy_level, 0);
2376
2377                         /*
2378                          * If there are IOs spanning multiple periods, wait
2379                          * them out before pushing the device harder.
2380                          */
2381                         if (!nr_lagging)
2382                                 ioc->busy_level--;
2383                 } else {
2384                         /*
2385                          * Nobody is being throttled and the users aren't
2386                          * issuing enough IOs to saturate the device.  We
2387                          * simply don't know how close the device is to
2388                          * saturation.  Coast.
2389                          */
2390                         ioc->busy_level = 0;
2391                 }
2392         } else {
2393                 /* inside the hysterisis margin, we're good */
2394                 ioc->busy_level = 0;
2395         }
2396
2397         ioc->busy_level = clamp(ioc->busy_level, -1000, 1000);
2398
2399         ioc_adjust_base_vrate(ioc, rq_wait_pct, nr_lagging, nr_shortages,
2400                               prev_busy_level, missed_ppm);
2401
2402         ioc_refresh_params(ioc, false);
2403
2404         ioc_forgive_debts(ioc, usage_us_sum, nr_debtors, &now);
2405
2406         /*
2407          * This period is done.  Move onto the next one.  If nothing's
2408          * going on with the device, stop the timer.
2409          */
2410         atomic64_inc(&ioc->cur_period);
2411
2412         if (ioc->running != IOC_STOP) {
2413                 if (!list_empty(&ioc->active_iocgs)) {
2414                         ioc_start_period(ioc, &now);
2415                 } else {
2416                         ioc->busy_level = 0;
2417                         ioc->vtime_err = 0;
2418                         ioc->running = IOC_IDLE;
2419                 }
2420
2421                 ioc_refresh_vrate(ioc, &now);
2422         }
2423
2424         spin_unlock_irq(&ioc->lock);
2425 }
2426
2427 static u64 adjust_inuse_and_calc_cost(struct ioc_gq *iocg, u64 vtime,
2428                                       u64 abs_cost, struct ioc_now *now)
2429 {
2430         struct ioc *ioc = iocg->ioc;
2431         struct ioc_margins *margins = &ioc->margins;
2432         u32 __maybe_unused old_inuse = iocg->inuse, __maybe_unused old_hwi;
2433         u32 hwi, adj_step;
2434         s64 margin;
2435         u64 cost, new_inuse;
2436
2437         current_hweight(iocg, NULL, &hwi);
2438         old_hwi = hwi;
2439         cost = abs_cost_to_cost(abs_cost, hwi);
2440         margin = now->vnow - vtime - cost;
2441
2442         /* debt handling owns inuse for debtors */
2443         if (iocg->abs_vdebt)
2444                 return cost;
2445
2446         /*
2447          * We only increase inuse during period and do so if the margin has
2448          * deteriorated since the previous adjustment.
2449          */
2450         if (margin >= iocg->saved_margin || margin >= margins->low ||
2451             iocg->inuse == iocg->active)
2452                 return cost;
2453
2454         spin_lock_irq(&ioc->lock);
2455
2456         /* we own inuse only when @iocg is in the normal active state */
2457         if (iocg->abs_vdebt || list_empty(&iocg->active_list)) {
2458                 spin_unlock_irq(&ioc->lock);
2459                 return cost;
2460         }
2461
2462         /*
2463          * Bump up inuse till @abs_cost fits in the existing budget.
2464          * adj_step must be determined after acquiring ioc->lock - we might
2465          * have raced and lost to another thread for activation and could
2466          * be reading 0 iocg->active before ioc->lock which will lead to
2467          * infinite loop.
2468          */
2469         new_inuse = iocg->inuse;
2470         adj_step = DIV_ROUND_UP(iocg->active * INUSE_ADJ_STEP_PCT, 100);
2471         do {
2472                 new_inuse = new_inuse + adj_step;
2473                 propagate_weights(iocg, iocg->active, new_inuse, true, now);
2474                 current_hweight(iocg, NULL, &hwi);
2475                 cost = abs_cost_to_cost(abs_cost, hwi);
2476         } while (time_after64(vtime + cost, now->vnow) &&
2477                  iocg->inuse != iocg->active);
2478
2479         spin_unlock_irq(&ioc->lock);
2480
2481         TRACE_IOCG_PATH(inuse_adjust, iocg, now,
2482                         old_inuse, iocg->inuse, old_hwi, hwi);
2483
2484         return cost;
2485 }
2486
2487 static void calc_vtime_cost_builtin(struct bio *bio, struct ioc_gq *iocg,
2488                                     bool is_merge, u64 *costp)
2489 {
2490         struct ioc *ioc = iocg->ioc;
2491         u64 coef_seqio, coef_randio, coef_page;
2492         u64 pages = max_t(u64, bio_sectors(bio) >> IOC_SECT_TO_PAGE_SHIFT, 1);
2493         u64 seek_pages = 0;
2494         u64 cost = 0;
2495
2496         switch (bio_op(bio)) {
2497         case REQ_OP_READ:
2498                 coef_seqio      = ioc->params.lcoefs[LCOEF_RSEQIO];
2499                 coef_randio     = ioc->params.lcoefs[LCOEF_RRANDIO];
2500                 coef_page       = ioc->params.lcoefs[LCOEF_RPAGE];
2501                 break;
2502         case REQ_OP_WRITE:
2503                 coef_seqio      = ioc->params.lcoefs[LCOEF_WSEQIO];
2504                 coef_randio     = ioc->params.lcoefs[LCOEF_WRANDIO];
2505                 coef_page       = ioc->params.lcoefs[LCOEF_WPAGE];
2506                 break;
2507         default:
2508                 goto out;
2509         }
2510
2511         if (iocg->cursor) {
2512                 seek_pages = abs(bio->bi_iter.bi_sector - iocg->cursor);
2513                 seek_pages >>= IOC_SECT_TO_PAGE_SHIFT;
2514         }
2515
2516         if (!is_merge) {
2517                 if (seek_pages > LCOEF_RANDIO_PAGES) {
2518                         cost += coef_randio;
2519                 } else {
2520                         cost += coef_seqio;
2521                 }
2522         }
2523         cost += pages * coef_page;
2524 out:
2525         *costp = cost;
2526 }
2527
2528 static u64 calc_vtime_cost(struct bio *bio, struct ioc_gq *iocg, bool is_merge)
2529 {
2530         u64 cost;
2531
2532         calc_vtime_cost_builtin(bio, iocg, is_merge, &cost);
2533         return cost;
2534 }
2535
2536 static void calc_size_vtime_cost_builtin(struct request *rq, struct ioc *ioc,
2537                                          u64 *costp)
2538 {
2539         unsigned int pages = blk_rq_stats_sectors(rq) >> IOC_SECT_TO_PAGE_SHIFT;
2540
2541         switch (req_op(rq)) {
2542         case REQ_OP_READ:
2543                 *costp = pages * ioc->params.lcoefs[LCOEF_RPAGE];
2544                 break;
2545         case REQ_OP_WRITE:
2546                 *costp = pages * ioc->params.lcoefs[LCOEF_WPAGE];
2547                 break;
2548         default:
2549                 *costp = 0;
2550         }
2551 }
2552
2553 static u64 calc_size_vtime_cost(struct request *rq, struct ioc *ioc)
2554 {
2555         u64 cost;
2556
2557         calc_size_vtime_cost_builtin(rq, ioc, &cost);
2558         return cost;
2559 }
2560
2561 static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio)
2562 {
2563         struct blkcg_gq *blkg = bio->bi_blkg;
2564         struct ioc *ioc = rqos_to_ioc(rqos);
2565         struct ioc_gq *iocg = blkg_to_iocg(blkg);
2566         struct ioc_now now;
2567         struct iocg_wait wait;
2568         u64 abs_cost, cost, vtime;
2569         bool use_debt, ioc_locked;
2570         unsigned long flags;
2571
2572         /* bypass IOs if disabled, still initializing, or for root cgroup */
2573         if (!ioc->enabled || !iocg || !iocg->level)
2574                 return;
2575
2576         /* calculate the absolute vtime cost */
2577         abs_cost = calc_vtime_cost(bio, iocg, false);
2578         if (!abs_cost)
2579                 return;
2580
2581         if (!iocg_activate(iocg, &now))
2582                 return;
2583
2584         iocg->cursor = bio_end_sector(bio);
2585         vtime = atomic64_read(&iocg->vtime);
2586         cost = adjust_inuse_and_calc_cost(iocg, vtime, abs_cost, &now);
2587
2588         /*
2589          * If no one's waiting and within budget, issue right away.  The
2590          * tests are racy but the races aren't systemic - we only miss once
2591          * in a while which is fine.
2592          */
2593         if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt &&
2594             time_before_eq64(vtime + cost, now.vnow)) {
2595                 iocg_commit_bio(iocg, bio, abs_cost, cost);
2596                 return;
2597         }
2598
2599         /*
2600          * We're over budget. This can be handled in two ways. IOs which may
2601          * cause priority inversions are punted to @ioc->aux_iocg and charged as
2602          * debt. Otherwise, the issuer is blocked on @iocg->waitq. Debt handling
2603          * requires @ioc->lock, waitq handling @iocg->waitq.lock. Determine
2604          * whether debt handling is needed and acquire locks accordingly.
2605          */
2606         use_debt = bio_issue_as_root_blkg(bio) || fatal_signal_pending(current);
2607         ioc_locked = use_debt || READ_ONCE(iocg->abs_vdebt);
2608 retry_lock:
2609         iocg_lock(iocg, ioc_locked, &flags);
2610
2611         /*
2612          * @iocg must stay activated for debt and waitq handling. Deactivation
2613          * is synchronized against both ioc->lock and waitq.lock and we won't
2614          * get deactivated as long as we're waiting or has debt, so we're good
2615          * if we're activated here. In the unlikely cases that we aren't, just
2616          * issue the IO.
2617          */
2618         if (unlikely(list_empty(&iocg->active_list))) {
2619                 iocg_unlock(iocg, ioc_locked, &flags);
2620                 iocg_commit_bio(iocg, bio, abs_cost, cost);
2621                 return;
2622         }
2623
2624         /*
2625          * We're over budget. If @bio has to be issued regardless, remember
2626          * the abs_cost instead of advancing vtime. iocg_kick_waitq() will pay
2627          * off the debt before waking more IOs.
2628          *
2629          * This way, the debt is continuously paid off each period with the
2630          * actual budget available to the cgroup. If we just wound vtime, we
2631          * would incorrectly use the current hw_inuse for the entire amount
2632          * which, for example, can lead to the cgroup staying blocked for a
2633          * long time even with substantially raised hw_inuse.
2634          *
2635          * An iocg with vdebt should stay online so that the timer can keep
2636          * deducting its vdebt and [de]activate use_delay mechanism
2637          * accordingly. We don't want to race against the timer trying to
2638          * clear them and leave @iocg inactive w/ dangling use_delay heavily
2639          * penalizing the cgroup and its descendants.
2640          */
2641         if (use_debt) {
2642                 iocg_incur_debt(iocg, abs_cost, &now);
2643                 if (iocg_kick_delay(iocg, &now))
2644                         blkcg_schedule_throttle(rqos->q->disk,
2645                                         (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
2646                 iocg_unlock(iocg, ioc_locked, &flags);
2647                 return;
2648         }
2649
2650         /* guarantee that iocgs w/ waiters have maximum inuse */
2651         if (!iocg->abs_vdebt && iocg->inuse != iocg->active) {
2652                 if (!ioc_locked) {
2653                         iocg_unlock(iocg, false, &flags);
2654                         ioc_locked = true;
2655                         goto retry_lock;
2656                 }
2657                 propagate_weights(iocg, iocg->active, iocg->active, true,
2658                                   &now);
2659         }
2660
2661         /*
2662          * Append self to the waitq and schedule the wakeup timer if we're
2663          * the first waiter.  The timer duration is calculated based on the
2664          * current vrate.  vtime and hweight changes can make it too short
2665          * or too long.  Each wait entry records the absolute cost it's
2666          * waiting for to allow re-evaluation using a custom wait entry.
2667          *
2668          * If too short, the timer simply reschedules itself.  If too long,
2669          * the period timer will notice and trigger wakeups.
2670          *
2671          * All waiters are on iocg->waitq and the wait states are
2672          * synchronized using waitq.lock.
2673          */
2674         init_waitqueue_func_entry(&wait.wait, iocg_wake_fn);
2675         wait.wait.private = current;
2676         wait.bio = bio;
2677         wait.abs_cost = abs_cost;
2678         wait.committed = false; /* will be set true by waker */
2679
2680         __add_wait_queue_entry_tail(&iocg->waitq, &wait.wait);
2681         iocg_kick_waitq(iocg, ioc_locked, &now);
2682
2683         iocg_unlock(iocg, ioc_locked, &flags);
2684
2685         while (true) {
2686                 set_current_state(TASK_UNINTERRUPTIBLE);
2687                 if (wait.committed)
2688                         break;
2689                 io_schedule();
2690         }
2691
2692         /* waker already committed us, proceed */
2693         finish_wait(&iocg->waitq, &wait.wait);
2694 }
2695
2696 static void ioc_rqos_merge(struct rq_qos *rqos, struct request *rq,
2697                            struct bio *bio)
2698 {
2699         struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg);
2700         struct ioc *ioc = rqos_to_ioc(rqos);
2701         sector_t bio_end = bio_end_sector(bio);
2702         struct ioc_now now;
2703         u64 vtime, abs_cost, cost;
2704         unsigned long flags;
2705
2706         /* bypass if disabled, still initializing, or for root cgroup */
2707         if (!ioc->enabled || !iocg || !iocg->level)
2708                 return;
2709
2710         abs_cost = calc_vtime_cost(bio, iocg, true);
2711         if (!abs_cost)
2712                 return;
2713
2714         ioc_now(ioc, &now);
2715
2716         vtime = atomic64_read(&iocg->vtime);
2717         cost = adjust_inuse_and_calc_cost(iocg, vtime, abs_cost, &now);
2718
2719         /* update cursor if backmerging into the request at the cursor */
2720         if (blk_rq_pos(rq) < bio_end &&
2721             blk_rq_pos(rq) + blk_rq_sectors(rq) == iocg->cursor)
2722                 iocg->cursor = bio_end;
2723
2724         /*
2725          * Charge if there's enough vtime budget and the existing request has
2726          * cost assigned.
2727          */
2728         if (rq->bio && rq->bio->bi_iocost_cost &&
2729             time_before_eq64(atomic64_read(&iocg->vtime) + cost, now.vnow)) {
2730                 iocg_commit_bio(iocg, bio, abs_cost, cost);
2731                 return;
2732         }
2733
2734         /*
2735          * Otherwise, account it as debt if @iocg is online, which it should
2736          * be for the vast majority of cases. See debt handling in
2737          * ioc_rqos_throttle() for details.
2738          */
2739         spin_lock_irqsave(&ioc->lock, flags);
2740         spin_lock(&iocg->waitq.lock);
2741
2742         if (likely(!list_empty(&iocg->active_list))) {
2743                 iocg_incur_debt(iocg, abs_cost, &now);
2744                 if (iocg_kick_delay(iocg, &now))
2745                         blkcg_schedule_throttle(rqos->q->disk,
2746                                         (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
2747         } else {
2748                 iocg_commit_bio(iocg, bio, abs_cost, cost);
2749         }
2750
2751         spin_unlock(&iocg->waitq.lock);
2752         spin_unlock_irqrestore(&ioc->lock, flags);
2753 }
2754
2755 static void ioc_rqos_done_bio(struct rq_qos *rqos, struct bio *bio)
2756 {
2757         struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg);
2758
2759         if (iocg && bio->bi_iocost_cost)
2760                 atomic64_add(bio->bi_iocost_cost, &iocg->done_vtime);
2761 }
2762
2763 static void ioc_rqos_done(struct rq_qos *rqos, struct request *rq)
2764 {
2765         struct ioc *ioc = rqos_to_ioc(rqos);
2766         struct ioc_pcpu_stat *ccs;
2767         u64 on_q_ns, rq_wait_ns, size_nsec;
2768         int pidx, rw;
2769
2770         if (!ioc->enabled || !rq->alloc_time_ns || !rq->start_time_ns)
2771                 return;
2772
2773         switch (req_op(rq)) {
2774         case REQ_OP_READ:
2775                 pidx = QOS_RLAT;
2776                 rw = READ;
2777                 break;
2778         case REQ_OP_WRITE:
2779                 pidx = QOS_WLAT;
2780                 rw = WRITE;
2781                 break;
2782         default:
2783                 return;
2784         }
2785
2786         on_q_ns = ktime_get_ns() - rq->alloc_time_ns;
2787         rq_wait_ns = rq->start_time_ns - rq->alloc_time_ns;
2788         size_nsec = div64_u64(calc_size_vtime_cost(rq, ioc), VTIME_PER_NSEC);
2789
2790         ccs = get_cpu_ptr(ioc->pcpu_stat);
2791
2792         if (on_q_ns <= size_nsec ||
2793             on_q_ns - size_nsec <= ioc->params.qos[pidx] * NSEC_PER_USEC)
2794                 local_inc(&ccs->missed[rw].nr_met);
2795         else
2796                 local_inc(&ccs->missed[rw].nr_missed);
2797
2798         local64_add(rq_wait_ns, &ccs->rq_wait_ns);
2799
2800         put_cpu_ptr(ccs);
2801 }
2802
2803 static void ioc_rqos_queue_depth_changed(struct rq_qos *rqos)
2804 {
2805         struct ioc *ioc = rqos_to_ioc(rqos);
2806
2807         spin_lock_irq(&ioc->lock);
2808         ioc_refresh_params(ioc, false);
2809         spin_unlock_irq(&ioc->lock);
2810 }
2811
2812 static void ioc_rqos_exit(struct rq_qos *rqos)
2813 {
2814         struct ioc *ioc = rqos_to_ioc(rqos);
2815
2816         blkcg_deactivate_policy(rqos->q, &blkcg_policy_iocost);
2817
2818         spin_lock_irq(&ioc->lock);
2819         ioc->running = IOC_STOP;
2820         spin_unlock_irq(&ioc->lock);
2821
2822         del_timer_sync(&ioc->timer);
2823         free_percpu(ioc->pcpu_stat);
2824         kfree(ioc);
2825 }
2826
2827 static struct rq_qos_ops ioc_rqos_ops = {
2828         .throttle = ioc_rqos_throttle,
2829         .merge = ioc_rqos_merge,
2830         .done_bio = ioc_rqos_done_bio,
2831         .done = ioc_rqos_done,
2832         .queue_depth_changed = ioc_rqos_queue_depth_changed,
2833         .exit = ioc_rqos_exit,
2834 };
2835
2836 static int blk_iocost_init(struct gendisk *disk)
2837 {
2838         struct request_queue *q = disk->queue;
2839         struct ioc *ioc;
2840         struct rq_qos *rqos;
2841         int i, cpu, ret;
2842
2843         ioc = kzalloc(sizeof(*ioc), GFP_KERNEL);
2844         if (!ioc)
2845                 return -ENOMEM;
2846
2847         ioc->pcpu_stat = alloc_percpu(struct ioc_pcpu_stat);
2848         if (!ioc->pcpu_stat) {
2849                 kfree(ioc);
2850                 return -ENOMEM;
2851         }
2852
2853         for_each_possible_cpu(cpu) {
2854                 struct ioc_pcpu_stat *ccs = per_cpu_ptr(ioc->pcpu_stat, cpu);
2855
2856                 for (i = 0; i < ARRAY_SIZE(ccs->missed); i++) {
2857                         local_set(&ccs->missed[i].nr_met, 0);
2858                         local_set(&ccs->missed[i].nr_missed, 0);
2859                 }
2860                 local64_set(&ccs->rq_wait_ns, 0);
2861         }
2862
2863         rqos = &ioc->rqos;
2864         rqos->id = RQ_QOS_COST;
2865         rqos->ops = &ioc_rqos_ops;
2866         rqos->q = q;
2867
2868         spin_lock_init(&ioc->lock);
2869         timer_setup(&ioc->timer, ioc_timer_fn, 0);
2870         INIT_LIST_HEAD(&ioc->active_iocgs);
2871
2872         ioc->running = IOC_IDLE;
2873         ioc->vtime_base_rate = VTIME_PER_USEC;
2874         atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC);
2875         seqcount_spinlock_init(&ioc->period_seqcount, &ioc->lock);
2876         ioc->period_at = ktime_to_us(ktime_get());
2877         atomic64_set(&ioc->cur_period, 0);
2878         atomic_set(&ioc->hweight_gen, 0);
2879
2880         spin_lock_irq(&ioc->lock);
2881         ioc->autop_idx = AUTOP_INVALID;
2882         ioc_refresh_params(ioc, true);
2883         spin_unlock_irq(&ioc->lock);
2884
2885         /*
2886          * rqos must be added before activation to allow iocg_pd_init() to
2887          * lookup the ioc from q. This means that the rqos methods may get
2888          * called before policy activation completion, can't assume that the
2889          * target bio has an iocg associated and need to test for NULL iocg.
2890          */
2891         ret = rq_qos_add(q, rqos);
2892         if (ret)
2893                 goto err_free_ioc;
2894
2895         ret = blkcg_activate_policy(q, &blkcg_policy_iocost);
2896         if (ret)
2897                 goto err_del_qos;
2898         return 0;
2899
2900 err_del_qos:
2901         rq_qos_del(q, rqos);
2902 err_free_ioc:
2903         free_percpu(ioc->pcpu_stat);
2904         kfree(ioc);
2905         return ret;
2906 }
2907
2908 static struct blkcg_policy_data *ioc_cpd_alloc(gfp_t gfp)
2909 {
2910         struct ioc_cgrp *iocc;
2911
2912         iocc = kzalloc(sizeof(struct ioc_cgrp), gfp);
2913         if (!iocc)
2914                 return NULL;
2915
2916         iocc->dfl_weight = CGROUP_WEIGHT_DFL * WEIGHT_ONE;
2917         return &iocc->cpd;
2918 }
2919
2920 static void ioc_cpd_free(struct blkcg_policy_data *cpd)
2921 {
2922         kfree(container_of(cpd, struct ioc_cgrp, cpd));
2923 }
2924
2925 static struct blkg_policy_data *ioc_pd_alloc(gfp_t gfp, struct request_queue *q,
2926                                              struct blkcg *blkcg)
2927 {
2928         int levels = blkcg->css.cgroup->level + 1;
2929         struct ioc_gq *iocg;
2930
2931         iocg = kzalloc_node(struct_size(iocg, ancestors, levels), gfp, q->node);
2932         if (!iocg)
2933                 return NULL;
2934
2935         iocg->pcpu_stat = alloc_percpu_gfp(struct iocg_pcpu_stat, gfp);
2936         if (!iocg->pcpu_stat) {
2937                 kfree(iocg);
2938                 return NULL;
2939         }
2940
2941         return &iocg->pd;
2942 }
2943
2944 static void ioc_pd_init(struct blkg_policy_data *pd)
2945 {
2946         struct ioc_gq *iocg = pd_to_iocg(pd);
2947         struct blkcg_gq *blkg = pd_to_blkg(&iocg->pd);
2948         struct ioc *ioc = q_to_ioc(blkg->q);
2949         struct ioc_now now;
2950         struct blkcg_gq *tblkg;
2951         unsigned long flags;
2952
2953         ioc_now(ioc, &now);
2954
2955         iocg->ioc = ioc;
2956         atomic64_set(&iocg->vtime, now.vnow);
2957         atomic64_set(&iocg->done_vtime, now.vnow);
2958         atomic64_set(&iocg->active_period, atomic64_read(&ioc->cur_period));
2959         INIT_LIST_HEAD(&iocg->active_list);
2960         INIT_LIST_HEAD(&iocg->walk_list);
2961         INIT_LIST_HEAD(&iocg->surplus_list);
2962         iocg->hweight_active = WEIGHT_ONE;
2963         iocg->hweight_inuse = WEIGHT_ONE;
2964
2965         init_waitqueue_head(&iocg->waitq);
2966         hrtimer_init(&iocg->waitq_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
2967         iocg->waitq_timer.function = iocg_waitq_timer_fn;
2968
2969         iocg->level = blkg->blkcg->css.cgroup->level;
2970
2971         for (tblkg = blkg; tblkg; tblkg = tblkg->parent) {
2972                 struct ioc_gq *tiocg = blkg_to_iocg(tblkg);
2973                 iocg->ancestors[tiocg->level] = tiocg;
2974         }
2975
2976         spin_lock_irqsave(&ioc->lock, flags);
2977         weight_updated(iocg, &now);
2978         spin_unlock_irqrestore(&ioc->lock, flags);
2979 }
2980
2981 static void ioc_pd_free(struct blkg_policy_data *pd)
2982 {
2983         struct ioc_gq *iocg = pd_to_iocg(pd);
2984         struct ioc *ioc = iocg->ioc;
2985         unsigned long flags;
2986
2987         if (ioc) {
2988                 spin_lock_irqsave(&ioc->lock, flags);
2989
2990                 if (!list_empty(&iocg->active_list)) {
2991                         struct ioc_now now;
2992
2993                         ioc_now(ioc, &now);
2994                         propagate_weights(iocg, 0, 0, false, &now);
2995                         list_del_init(&iocg->active_list);
2996                 }
2997
2998                 WARN_ON_ONCE(!list_empty(&iocg->walk_list));
2999                 WARN_ON_ONCE(!list_empty(&iocg->surplus_list));
3000
3001                 spin_unlock_irqrestore(&ioc->lock, flags);
3002
3003                 hrtimer_cancel(&iocg->waitq_timer);
3004         }
3005         free_percpu(iocg->pcpu_stat);
3006         kfree(iocg);
3007 }
3008
3009 static void ioc_pd_stat(struct blkg_policy_data *pd, struct seq_file *s)
3010 {
3011         struct ioc_gq *iocg = pd_to_iocg(pd);
3012         struct ioc *ioc = iocg->ioc;
3013
3014         if (!ioc->enabled)
3015                 return;
3016
3017         if (iocg->level == 0) {
3018                 unsigned vp10k = DIV64_U64_ROUND_CLOSEST(
3019                         ioc->vtime_base_rate * 10000,
3020                         VTIME_PER_USEC);
3021                 seq_printf(s, " cost.vrate=%u.%02u", vp10k / 100, vp10k % 100);
3022         }
3023
3024         seq_printf(s, " cost.usage=%llu", iocg->last_stat.usage_us);
3025
3026         if (blkcg_debug_stats)
3027                 seq_printf(s, " cost.wait=%llu cost.indebt=%llu cost.indelay=%llu",
3028                         iocg->last_stat.wait_us,
3029                         iocg->last_stat.indebt_us,
3030                         iocg->last_stat.indelay_us);
3031 }
3032
3033 static u64 ioc_weight_prfill(struct seq_file *sf, struct blkg_policy_data *pd,
3034                              int off)
3035 {
3036         const char *dname = blkg_dev_name(pd->blkg);
3037         struct ioc_gq *iocg = pd_to_iocg(pd);
3038
3039         if (dname && iocg->cfg_weight)
3040                 seq_printf(sf, "%s %u\n", dname, iocg->cfg_weight / WEIGHT_ONE);
3041         return 0;
3042 }
3043
3044
3045 static int ioc_weight_show(struct seq_file *sf, void *v)
3046 {
3047         struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
3048         struct ioc_cgrp *iocc = blkcg_to_iocc(blkcg);
3049
3050         seq_printf(sf, "default %u\n", iocc->dfl_weight / WEIGHT_ONE);
3051         blkcg_print_blkgs(sf, blkcg, ioc_weight_prfill,
3052                           &blkcg_policy_iocost, seq_cft(sf)->private, false);
3053         return 0;
3054 }
3055
3056 static ssize_t ioc_weight_write(struct kernfs_open_file *of, char *buf,
3057                                 size_t nbytes, loff_t off)
3058 {
3059         struct blkcg *blkcg = css_to_blkcg(of_css(of));
3060         struct ioc_cgrp *iocc = blkcg_to_iocc(blkcg);
3061         struct blkg_conf_ctx ctx;
3062         struct ioc_now now;
3063         struct ioc_gq *iocg;
3064         u32 v;
3065         int ret;
3066
3067         if (!strchr(buf, ':')) {
3068                 struct blkcg_gq *blkg;
3069
3070                 if (!sscanf(buf, "default %u", &v) && !sscanf(buf, "%u", &v))
3071                         return -EINVAL;
3072
3073                 if (v < CGROUP_WEIGHT_MIN || v > CGROUP_WEIGHT_MAX)
3074                         return -EINVAL;
3075
3076                 spin_lock_irq(&blkcg->lock);
3077                 iocc->dfl_weight = v * WEIGHT_ONE;
3078                 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
3079                         struct ioc_gq *iocg = blkg_to_iocg(blkg);
3080
3081                         if (iocg) {
3082                                 spin_lock(&iocg->ioc->lock);
3083                                 ioc_now(iocg->ioc, &now);
3084                                 weight_updated(iocg, &now);
3085                                 spin_unlock(&iocg->ioc->lock);
3086                         }
3087                 }
3088                 spin_unlock_irq(&blkcg->lock);
3089
3090                 return nbytes;
3091         }
3092
3093         ret = blkg_conf_prep(blkcg, &blkcg_policy_iocost, buf, &ctx);
3094         if (ret)
3095                 return ret;
3096
3097         iocg = blkg_to_iocg(ctx.blkg);
3098
3099         if (!strncmp(ctx.body, "default", 7)) {
3100                 v = 0;
3101         } else {
3102                 if (!sscanf(ctx.body, "%u", &v))
3103                         goto einval;
3104                 if (v < CGROUP_WEIGHT_MIN || v > CGROUP_WEIGHT_MAX)
3105                         goto einval;
3106         }
3107
3108         spin_lock(&iocg->ioc->lock);
3109         iocg->cfg_weight = v * WEIGHT_ONE;
3110         ioc_now(iocg->ioc, &now);
3111         weight_updated(iocg, &now);
3112         spin_unlock(&iocg->ioc->lock);
3113
3114         blkg_conf_finish(&ctx);
3115         return nbytes;
3116
3117 einval:
3118         blkg_conf_finish(&ctx);
3119         return -EINVAL;
3120 }
3121
3122 static u64 ioc_qos_prfill(struct seq_file *sf, struct blkg_policy_data *pd,
3123                           int off)
3124 {
3125         const char *dname = blkg_dev_name(pd->blkg);
3126         struct ioc *ioc = pd_to_iocg(pd)->ioc;
3127
3128         if (!dname)
3129                 return 0;
3130
3131         seq_printf(sf, "%s enable=%d ctrl=%s rpct=%u.%02u rlat=%u wpct=%u.%02u wlat=%u min=%u.%02u max=%u.%02u\n",
3132                    dname, ioc->enabled, ioc->user_qos_params ? "user" : "auto",
3133                    ioc->params.qos[QOS_RPPM] / 10000,
3134                    ioc->params.qos[QOS_RPPM] % 10000 / 100,
3135                    ioc->params.qos[QOS_RLAT],
3136                    ioc->params.qos[QOS_WPPM] / 10000,
3137                    ioc->params.qos[QOS_WPPM] % 10000 / 100,
3138                    ioc->params.qos[QOS_WLAT],
3139                    ioc->params.qos[QOS_MIN] / 10000,
3140                    ioc->params.qos[QOS_MIN] % 10000 / 100,
3141                    ioc->params.qos[QOS_MAX] / 10000,
3142                    ioc->params.qos[QOS_MAX] % 10000 / 100);
3143         return 0;
3144 }
3145
3146 static int ioc_qos_show(struct seq_file *sf, void *v)
3147 {
3148         struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
3149
3150         blkcg_print_blkgs(sf, blkcg, ioc_qos_prfill,
3151                           &blkcg_policy_iocost, seq_cft(sf)->private, false);
3152         return 0;
3153 }
3154
3155 static const match_table_t qos_ctrl_tokens = {
3156         { QOS_ENABLE,           "enable=%u"     },
3157         { QOS_CTRL,             "ctrl=%s"       },
3158         { NR_QOS_CTRL_PARAMS,   NULL            },
3159 };
3160
3161 static const match_table_t qos_tokens = {
3162         { QOS_RPPM,             "rpct=%s"       },
3163         { QOS_RLAT,             "rlat=%u"       },
3164         { QOS_WPPM,             "wpct=%s"       },
3165         { QOS_WLAT,             "wlat=%u"       },
3166         { QOS_MIN,              "min=%s"        },
3167         { QOS_MAX,              "max=%s"        },
3168         { NR_QOS_PARAMS,        NULL            },
3169 };
3170
3171 static ssize_t ioc_qos_write(struct kernfs_open_file *of, char *input,
3172                              size_t nbytes, loff_t off)
3173 {
3174         struct block_device *bdev;
3175         struct gendisk *disk;
3176         struct ioc *ioc;
3177         u32 qos[NR_QOS_PARAMS];
3178         bool enable, user;
3179         char *p;
3180         int ret;
3181
3182         bdev = blkcg_conf_open_bdev(&input);
3183         if (IS_ERR(bdev))
3184                 return PTR_ERR(bdev);
3185
3186         disk = bdev->bd_disk;
3187         ioc = q_to_ioc(disk->queue);
3188         if (!ioc) {
3189                 ret = blk_iocost_init(disk);
3190                 if (ret)
3191                         goto err;
3192                 ioc = q_to_ioc(disk->queue);
3193         }
3194
3195         spin_lock_irq(&ioc->lock);
3196         memcpy(qos, ioc->params.qos, sizeof(qos));
3197         enable = ioc->enabled;
3198         user = ioc->user_qos_params;
3199         spin_unlock_irq(&ioc->lock);
3200
3201         while ((p = strsep(&input, " \t\n"))) {
3202                 substring_t args[MAX_OPT_ARGS];
3203                 char buf[32];
3204                 int tok;
3205                 s64 v;
3206
3207                 if (!*p)
3208                         continue;
3209
3210                 switch (match_token(p, qos_ctrl_tokens, args)) {
3211                 case QOS_ENABLE:
3212                         match_u64(&args[0], &v);
3213                         enable = v;
3214                         continue;
3215                 case QOS_CTRL:
3216                         match_strlcpy(buf, &args[0], sizeof(buf));
3217                         if (!strcmp(buf, "auto"))
3218                                 user = false;
3219                         else if (!strcmp(buf, "user"))
3220                                 user = true;
3221                         else
3222                                 goto einval;
3223                         continue;
3224                 }
3225
3226                 tok = match_token(p, qos_tokens, args);
3227                 switch (tok) {
3228                 case QOS_RPPM:
3229                 case QOS_WPPM:
3230                         if (match_strlcpy(buf, &args[0], sizeof(buf)) >=
3231                             sizeof(buf))
3232                                 goto einval;
3233                         if (cgroup_parse_float(buf, 2, &v))
3234                                 goto einval;
3235                         if (v < 0 || v > 10000)
3236                                 goto einval;
3237                         qos[tok] = v * 100;
3238                         break;
3239                 case QOS_RLAT:
3240                 case QOS_WLAT:
3241                         if (match_u64(&args[0], &v))
3242                                 goto einval;
3243                         qos[tok] = v;
3244                         break;
3245                 case QOS_MIN:
3246                 case QOS_MAX:
3247                         if (match_strlcpy(buf, &args[0], sizeof(buf)) >=
3248                             sizeof(buf))
3249                                 goto einval;
3250                         if (cgroup_parse_float(buf, 2, &v))
3251                                 goto einval;
3252                         if (v < 0)
3253                                 goto einval;
3254                         qos[tok] = clamp_t(s64, v * 100,
3255                                            VRATE_MIN_PPM, VRATE_MAX_PPM);
3256                         break;
3257                 default:
3258                         goto einval;
3259                 }
3260                 user = true;
3261         }
3262
3263         if (qos[QOS_MIN] > qos[QOS_MAX])
3264                 goto einval;
3265
3266         spin_lock_irq(&ioc->lock);
3267
3268         if (enable) {
3269                 blk_stat_enable_accounting(disk->queue);
3270                 blk_queue_flag_set(QUEUE_FLAG_RQ_ALLOC_TIME, disk->queue);
3271                 ioc->enabled = true;
3272         } else {
3273                 blk_queue_flag_clear(QUEUE_FLAG_RQ_ALLOC_TIME, disk->queue);
3274                 ioc->enabled = false;
3275         }
3276
3277         if (user) {
3278                 memcpy(ioc->params.qos, qos, sizeof(qos));
3279                 ioc->user_qos_params = true;
3280         } else {
3281                 ioc->user_qos_params = false;
3282         }
3283
3284         ioc_refresh_params(ioc, true);
3285         spin_unlock_irq(&ioc->lock);
3286
3287         blkdev_put_no_open(bdev);
3288         return nbytes;
3289 einval:
3290         ret = -EINVAL;
3291 err:
3292         blkdev_put_no_open(bdev);
3293         return ret;
3294 }
3295
3296 static u64 ioc_cost_model_prfill(struct seq_file *sf,
3297                                  struct blkg_policy_data *pd, int off)
3298 {
3299         const char *dname = blkg_dev_name(pd->blkg);
3300         struct ioc *ioc = pd_to_iocg(pd)->ioc;
3301         u64 *u = ioc->params.i_lcoefs;
3302
3303         if (!dname)
3304                 return 0;
3305
3306         seq_printf(sf, "%s ctrl=%s model=linear "
3307                    "rbps=%llu rseqiops=%llu rrandiops=%llu "
3308                    "wbps=%llu wseqiops=%llu wrandiops=%llu\n",
3309                    dname, ioc->user_cost_model ? "user" : "auto",
3310                    u[I_LCOEF_RBPS], u[I_LCOEF_RSEQIOPS], u[I_LCOEF_RRANDIOPS],
3311                    u[I_LCOEF_WBPS], u[I_LCOEF_WSEQIOPS], u[I_LCOEF_WRANDIOPS]);
3312         return 0;
3313 }
3314
3315 static int ioc_cost_model_show(struct seq_file *sf, void *v)
3316 {
3317         struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
3318
3319         blkcg_print_blkgs(sf, blkcg, ioc_cost_model_prfill,
3320                           &blkcg_policy_iocost, seq_cft(sf)->private, false);
3321         return 0;
3322 }
3323
3324 static const match_table_t cost_ctrl_tokens = {
3325         { COST_CTRL,            "ctrl=%s"       },
3326         { COST_MODEL,           "model=%s"      },
3327         { NR_COST_CTRL_PARAMS,  NULL            },
3328 };
3329
3330 static const match_table_t i_lcoef_tokens = {
3331         { I_LCOEF_RBPS,         "rbps=%u"       },
3332         { I_LCOEF_RSEQIOPS,     "rseqiops=%u"   },
3333         { I_LCOEF_RRANDIOPS,    "rrandiops=%u"  },
3334         { I_LCOEF_WBPS,         "wbps=%u"       },
3335         { I_LCOEF_WSEQIOPS,     "wseqiops=%u"   },
3336         { I_LCOEF_WRANDIOPS,    "wrandiops=%u"  },
3337         { NR_I_LCOEFS,          NULL            },
3338 };
3339
3340 static ssize_t ioc_cost_model_write(struct kernfs_open_file *of, char *input,
3341                                     size_t nbytes, loff_t off)
3342 {
3343         struct block_device *bdev;
3344         struct ioc *ioc;
3345         u64 u[NR_I_LCOEFS];
3346         bool user;
3347         char *p;
3348         int ret;
3349
3350         bdev = blkcg_conf_open_bdev(&input);
3351         if (IS_ERR(bdev))
3352                 return PTR_ERR(bdev);
3353
3354         ioc = q_to_ioc(bdev_get_queue(bdev));
3355         if (!ioc) {
3356                 ret = blk_iocost_init(bdev->bd_disk);
3357                 if (ret)
3358                         goto err;
3359                 ioc = q_to_ioc(bdev_get_queue(bdev));
3360         }
3361
3362         spin_lock_irq(&ioc->lock);
3363         memcpy(u, ioc->params.i_lcoefs, sizeof(u));
3364         user = ioc->user_cost_model;
3365         spin_unlock_irq(&ioc->lock);
3366
3367         while ((p = strsep(&input, " \t\n"))) {
3368                 substring_t args[MAX_OPT_ARGS];
3369                 char buf[32];
3370                 int tok;
3371                 u64 v;
3372
3373                 if (!*p)
3374                         continue;
3375
3376                 switch (match_token(p, cost_ctrl_tokens, args)) {
3377                 case COST_CTRL:
3378                         match_strlcpy(buf, &args[0], sizeof(buf));
3379                         if (!strcmp(buf, "auto"))
3380                                 user = false;
3381                         else if (!strcmp(buf, "user"))
3382                                 user = true;
3383                         else
3384                                 goto einval;
3385                         continue;
3386                 case COST_MODEL:
3387                         match_strlcpy(buf, &args[0], sizeof(buf));
3388                         if (strcmp(buf, "linear"))
3389                                 goto einval;
3390                         continue;
3391                 }
3392
3393                 tok = match_token(p, i_lcoef_tokens, args);
3394                 if (tok == NR_I_LCOEFS)
3395                         goto einval;
3396                 if (match_u64(&args[0], &v))
3397                         goto einval;
3398                 u[tok] = v;
3399                 user = true;
3400         }
3401
3402         spin_lock_irq(&ioc->lock);
3403         if (user) {
3404                 memcpy(ioc->params.i_lcoefs, u, sizeof(u));
3405                 ioc->user_cost_model = true;
3406         } else {
3407                 ioc->user_cost_model = false;
3408         }
3409         ioc_refresh_params(ioc, true);
3410         spin_unlock_irq(&ioc->lock);
3411
3412         blkdev_put_no_open(bdev);
3413         return nbytes;
3414
3415 einval:
3416         ret = -EINVAL;
3417 err:
3418         blkdev_put_no_open(bdev);
3419         return ret;
3420 }
3421
3422 static struct cftype ioc_files[] = {
3423         {
3424                 .name = "weight",
3425                 .flags = CFTYPE_NOT_ON_ROOT,
3426                 .seq_show = ioc_weight_show,
3427                 .write = ioc_weight_write,
3428         },
3429         {
3430                 .name = "cost.qos",
3431                 .flags = CFTYPE_ONLY_ON_ROOT,
3432                 .seq_show = ioc_qos_show,
3433                 .write = ioc_qos_write,
3434         },
3435         {
3436                 .name = "cost.model",
3437                 .flags = CFTYPE_ONLY_ON_ROOT,
3438                 .seq_show = ioc_cost_model_show,
3439                 .write = ioc_cost_model_write,
3440         },
3441         {}
3442 };
3443
3444 static struct blkcg_policy blkcg_policy_iocost = {
3445         .dfl_cftypes    = ioc_files,
3446         .cpd_alloc_fn   = ioc_cpd_alloc,
3447         .cpd_free_fn    = ioc_cpd_free,
3448         .pd_alloc_fn    = ioc_pd_alloc,
3449         .pd_init_fn     = ioc_pd_init,
3450         .pd_free_fn     = ioc_pd_free,
3451         .pd_stat_fn     = ioc_pd_stat,
3452 };
3453
3454 static int __init ioc_init(void)
3455 {
3456         return blkcg_policy_register(&blkcg_policy_iocost);
3457 }
3458
3459 static void __exit ioc_exit(void)
3460 {
3461         blkcg_policy_unregister(&blkcg_policy_iocost);
3462 }
3463
3464 module_init(ioc_init);
3465 module_exit(ioc_exit);