2 * Budget Fair Queueing (BFQ) I/O scheduler.
4 * Based on ideas and code from CFQ:
5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8 * Paolo Valente <paolo.valente@unimore.it>
10 * Copyright (C) 2010 Paolo Valente <paolo.valente@unimore.it>
11 * Arianna Avanzini <avanzini@google.com>
13 * Copyright (C) 2017 Paolo Valente <paolo.valente@linaro.org>
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License as
17 * published by the Free Software Foundation; either version 2 of the
18 * License, or (at your option) any later version.
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 * General Public License for more details.
25 * BFQ is a proportional-share I/O scheduler, with some extra
26 * low-latency capabilities. BFQ also supports full hierarchical
27 * scheduling through cgroups. Next paragraphs provide an introduction
28 * on BFQ inner workings. Details on BFQ benefits, usage and
29 * limitations can be found in Documentation/block/bfq-iosched.txt.
31 * BFQ is a proportional-share storage-I/O scheduling algorithm based
32 * on the slice-by-slice service scheme of CFQ. But BFQ assigns
33 * budgets, measured in number of sectors, to processes instead of
34 * time slices. The device is not granted to the in-service process
35 * for a given time slice, but until it has exhausted its assigned
36 * budget. This change from the time to the service domain enables BFQ
37 * to distribute the device throughput among processes as desired,
38 * without any distortion due to throughput fluctuations, or to device
39 * internal queueing. BFQ uses an ad hoc internal scheduler, called
40 * B-WF2Q+, to schedule processes according to their budgets. More
41 * precisely, BFQ schedules queues associated with processes. Each
42 * process/queue is assigned a user-configurable weight, and B-WF2Q+
43 * guarantees that each queue receives a fraction of the throughput
44 * proportional to its weight. Thanks to the accurate policy of
45 * B-WF2Q+, BFQ can afford to assign high budgets to I/O-bound
46 * processes issuing sequential requests (to boost the throughput),
47 * and yet guarantee a low latency to interactive and soft real-time
50 * In particular, to provide these low-latency guarantees, BFQ
51 * explicitly privileges the I/O of two classes of time-sensitive
52 * applications: interactive and soft real-time. This feature enables
53 * BFQ to provide applications in these classes with a very low
54 * latency. Finally, BFQ also features additional heuristics for
55 * preserving both a low latency and a high throughput on NCQ-capable,
56 * rotational or flash-based devices, and to get the job done quickly
57 * for applications consisting in many I/O-bound processes.
59 * NOTE: if the main or only goal, with a given device, is to achieve
60 * the maximum-possible throughput at all times, then do switch off
61 * all low-latency heuristics for that device, by setting low_latency
64 * BFQ is described in [1], where also a reference to the initial, more
65 * theoretical paper on BFQ can be found. The interested reader can find
66 * in the latter paper full details on the main algorithm, as well as
67 * formulas of the guarantees and formal proofs of all the properties.
68 * With respect to the version of BFQ presented in these papers, this
69 * implementation adds a few more heuristics, such as the one that
70 * guarantees a low latency to soft real-time applications, and a
71 * hierarchical extension based on H-WF2Q+.
73 * B-WF2Q+ is based on WF2Q+, which is described in [2], together with
74 * H-WF2Q+, while the augmented tree used here to implement B-WF2Q+
75 * with O(log N) complexity derives from the one introduced with EEVDF
78 * [1] P. Valente, A. Avanzini, "Evolution of the BFQ Storage I/O
79 * Scheduler", Proceedings of the First Workshop on Mobile System
80 * Technologies (MST-2015), May 2015.
81 * http://algogroup.unimore.it/people/paolo/disk_sched/mst-2015.pdf
83 * [2] Jon C.R. Bennett and H. Zhang, "Hierarchical Packet Fair Queueing
84 * Algorithms", IEEE/ACM Transactions on Networking, 5(5):675-689,
87 * http://www.cs.cmu.edu/~hzhang/papers/TON-97-Oct.ps.gz
89 * [3] I. Stoica and H. Abdel-Wahab, "Earliest Eligible Virtual Deadline
90 * First: A Flexible and Accurate Mechanism for Proportional Share
91 * Resource Allocation", technical report.
93 * http://www.cs.berkeley.edu/~istoica/papers/eevdf-tr-95.pdf
95 #include <linux/module.h>
96 #include <linux/slab.h>
97 #include <linux/blkdev.h>
98 #include <linux/cgroup.h>
99 #include <linux/elevator.h>
100 #include <linux/ktime.h>
101 #include <linux/rbtree.h>
102 #include <linux/ioprio.h>
103 #include <linux/sbitmap.h>
104 #include <linux/delay.h>
108 #include "blk-mq-tag.h"
109 #include "blk-mq-sched.h"
110 #include "bfq-iosched.h"
113 #define BFQ_BFQQ_FNS(name) \
114 void bfq_mark_bfqq_##name(struct bfq_queue *bfqq) \
116 __set_bit(BFQQF_##name, &(bfqq)->flags); \
118 void bfq_clear_bfqq_##name(struct bfq_queue *bfqq) \
120 __clear_bit(BFQQF_##name, &(bfqq)->flags); \
122 int bfq_bfqq_##name(const struct bfq_queue *bfqq) \
124 return test_bit(BFQQF_##name, &(bfqq)->flags); \
127 BFQ_BFQQ_FNS(just_created);
129 BFQ_BFQQ_FNS(wait_request);
130 BFQ_BFQQ_FNS(non_blocking_wait_rq);
131 BFQ_BFQQ_FNS(fifo_expire);
132 BFQ_BFQQ_FNS(has_short_ttime);
134 BFQ_BFQQ_FNS(IO_bound);
135 BFQ_BFQQ_FNS(in_large_burst);
137 BFQ_BFQQ_FNS(split_coop);
138 BFQ_BFQQ_FNS(softrt_update);
139 #undef BFQ_BFQQ_FNS \
141 /* Expiration time of sync (0) and async (1) requests, in ns. */
142 static const u64 bfq_fifo_expire[2] = { NSEC_PER_SEC / 4, NSEC_PER_SEC / 8 };
144 /* Maximum backwards seek (magic number lifted from CFQ), in KiB. */
145 static const int bfq_back_max = 16 * 1024;
147 /* Penalty of a backwards seek, in number of sectors. */
148 static const int bfq_back_penalty = 2;
150 /* Idling period duration, in ns. */
151 static u64 bfq_slice_idle = NSEC_PER_SEC / 125;
153 /* Minimum number of assigned budgets for which stats are safe to compute. */
154 static const int bfq_stats_min_budgets = 194;
156 /* Default maximum budget values, in sectors and number of requests. */
157 static const int bfq_default_max_budget = 16 * 1024;
160 * Async to sync throughput distribution is controlled as follows:
161 * when an async request is served, the entity is charged the number
162 * of sectors of the request, multiplied by the factor below
164 static const int bfq_async_charge_factor = 10;
166 /* Default timeout values, in jiffies, approximating CFQ defaults. */
167 const int bfq_timeout = HZ / 8;
169 static struct kmem_cache *bfq_pool;
171 /* Below this threshold (in ns), we consider thinktime immediate. */
172 #define BFQ_MIN_TT (2 * NSEC_PER_MSEC)
174 /* hw_tag detection: parallel requests threshold and min samples needed. */
175 #define BFQ_HW_QUEUE_THRESHOLD 4
176 #define BFQ_HW_QUEUE_SAMPLES 32
178 #define BFQQ_SEEK_THR (sector_t)(8 * 100)
179 #define BFQQ_SECT_THR_NONROT (sector_t)(2 * 32)
180 #define BFQQ_CLOSE_THR (sector_t)(8 * 1024)
181 #define BFQQ_SEEKY(bfqq) (hweight32(bfqq->seek_history) > 32/8)
183 /* Min number of samples required to perform peak-rate update */
184 #define BFQ_RATE_MIN_SAMPLES 32
185 /* Min observation time interval required to perform a peak-rate update (ns) */
186 #define BFQ_RATE_MIN_INTERVAL (300*NSEC_PER_MSEC)
187 /* Target observation time interval for a peak-rate update (ns) */
188 #define BFQ_RATE_REF_INTERVAL NSEC_PER_SEC
190 /* Shift used for peak rate fixed precision calculations. */
191 #define BFQ_RATE_SHIFT 16
194 * By default, BFQ computes the duration of the weight raising for
195 * interactive applications automatically, using the following formula:
196 * duration = (R / r) * T, where r is the peak rate of the device, and
197 * R and T are two reference parameters.
198 * In particular, R is the peak rate of the reference device (see below),
199 * and T is a reference time: given the systems that are likely to be
200 * installed on the reference device according to its speed class, T is
201 * about the maximum time needed, under BFQ and while reading two files in
202 * parallel, to load typical large applications on these systems.
203 * In practice, the slower/faster the device at hand is, the more/less it
204 * takes to load applications with respect to the reference device.
205 * Accordingly, the longer/shorter BFQ grants weight raising to interactive
208 * BFQ uses four different reference pairs (R, T), depending on:
209 * . whether the device is rotational or non-rotational;
210 * . whether the device is slow, such as old or portable HDDs, as well as
211 * SD cards, or fast, such as newer HDDs and SSDs.
213 * The device's speed class is dynamically (re)detected in
214 * bfq_update_peak_rate() every time the estimated peak rate is updated.
216 * In the following definitions, R_slow[0]/R_fast[0] and
217 * T_slow[0]/T_fast[0] are the reference values for a slow/fast
218 * rotational device, whereas R_slow[1]/R_fast[1] and
219 * T_slow[1]/T_fast[1] are the reference values for a slow/fast
220 * non-rotational device. Finally, device_speed_thresh are the
221 * thresholds used to switch between speed classes. The reference
222 * rates are not the actual peak rates of the devices used as a
223 * reference, but slightly lower values. The reason for using these
224 * slightly lower values is that the peak-rate estimator tends to
225 * yield slightly lower values than the actual peak rate (it can yield
226 * the actual peak rate only if there is only one process doing I/O,
227 * and the process does sequential I/O).
229 * Both the reference peak rates and the thresholds are measured in
230 * sectors/usec, left-shifted by BFQ_RATE_SHIFT.
232 static int R_slow[2] = {1000, 10700};
233 static int R_fast[2] = {14000, 33000};
235 * To improve readability, a conversion function is used to initialize the
236 * following arrays, which entails that they can be initialized only in a
239 static int T_slow[2];
240 static int T_fast[2];
241 static int device_speed_thresh[2];
243 #define RQ_BIC(rq) icq_to_bic((rq)->elv.priv[0])
244 #define RQ_BFQQ(rq) ((rq)->elv.priv[1])
246 struct bfq_queue *bic_to_bfqq(struct bfq_io_cq *bic, bool is_sync)
248 return bic->bfqq[is_sync];
251 void bic_set_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq, bool is_sync)
253 bic->bfqq[is_sync] = bfqq;
256 struct bfq_data *bic_to_bfqd(struct bfq_io_cq *bic)
258 return bic->icq.q->elevator->elevator_data;
262 * icq_to_bic - convert iocontext queue structure to bfq_io_cq.
263 * @icq: the iocontext queue.
265 static struct bfq_io_cq *icq_to_bic(struct io_cq *icq)
267 /* bic->icq is the first member, %NULL will convert to %NULL */
268 return container_of(icq, struct bfq_io_cq, icq);
272 * bfq_bic_lookup - search into @ioc a bic associated to @bfqd.
273 * @bfqd: the lookup key.
274 * @ioc: the io_context of the process doing I/O.
275 * @q: the request queue.
277 static struct bfq_io_cq *bfq_bic_lookup(struct bfq_data *bfqd,
278 struct io_context *ioc,
279 struct request_queue *q)
283 struct bfq_io_cq *icq;
285 spin_lock_irqsave(q->queue_lock, flags);
286 icq = icq_to_bic(ioc_lookup_icq(ioc, q));
287 spin_unlock_irqrestore(q->queue_lock, flags);
296 * Scheduler run of queue, if there are requests pending and no one in the
297 * driver that will restart queueing.
299 void bfq_schedule_dispatch(struct bfq_data *bfqd)
301 lockdep_assert_held(&bfqd->lock);
303 if (bfqd->queued != 0) {
304 bfq_log(bfqd, "schedule dispatch");
305 blk_mq_run_hw_queues(bfqd->queue, true);
309 #define bfq_class_idle(bfqq) ((bfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
310 #define bfq_class_rt(bfqq) ((bfqq)->ioprio_class == IOPRIO_CLASS_RT)
312 #define bfq_sample_valid(samples) ((samples) > 80)
315 * Lifted from AS - choose which of rq1 and rq2 that is best served now.
316 * We choose the request that is closesr to the head right now. Distance
317 * behind the head is penalized and only allowed to a certain extent.
319 static struct request *bfq_choose_req(struct bfq_data *bfqd,
324 sector_t s1, s2, d1 = 0, d2 = 0;
325 unsigned long back_max;
326 #define BFQ_RQ1_WRAP 0x01 /* request 1 wraps */
327 #define BFQ_RQ2_WRAP 0x02 /* request 2 wraps */
328 unsigned int wrap = 0; /* bit mask: requests behind the disk head? */
330 if (!rq1 || rq1 == rq2)
335 if (rq_is_sync(rq1) && !rq_is_sync(rq2))
337 else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
339 if ((rq1->cmd_flags & REQ_META) && !(rq2->cmd_flags & REQ_META))
341 else if ((rq2->cmd_flags & REQ_META) && !(rq1->cmd_flags & REQ_META))
344 s1 = blk_rq_pos(rq1);
345 s2 = blk_rq_pos(rq2);
348 * By definition, 1KiB is 2 sectors.
350 back_max = bfqd->bfq_back_max * 2;
353 * Strict one way elevator _except_ in the case where we allow
354 * short backward seeks which are biased as twice the cost of a
355 * similar forward seek.
359 else if (s1 + back_max >= last)
360 d1 = (last - s1) * bfqd->bfq_back_penalty;
362 wrap |= BFQ_RQ1_WRAP;
366 else if (s2 + back_max >= last)
367 d2 = (last - s2) * bfqd->bfq_back_penalty;
369 wrap |= BFQ_RQ2_WRAP;
371 /* Found required data */
374 * By doing switch() on the bit mask "wrap" we avoid having to
375 * check two variables for all permutations: --> faster!
378 case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
393 case BFQ_RQ1_WRAP|BFQ_RQ2_WRAP: /* both rqs wrapped */
396 * Since both rqs are wrapped,
397 * start with the one that's further behind head
398 * (--> only *one* back seek required),
399 * since back seek takes more time than forward.
408 static struct bfq_queue *
409 bfq_rq_pos_tree_lookup(struct bfq_data *bfqd, struct rb_root *root,
410 sector_t sector, struct rb_node **ret_parent,
411 struct rb_node ***rb_link)
413 struct rb_node **p, *parent;
414 struct bfq_queue *bfqq = NULL;
422 bfqq = rb_entry(parent, struct bfq_queue, pos_node);
425 * Sort strictly based on sector. Smallest to the left,
426 * largest to the right.
428 if (sector > blk_rq_pos(bfqq->next_rq))
430 else if (sector < blk_rq_pos(bfqq->next_rq))
438 *ret_parent = parent;
442 bfq_log(bfqd, "rq_pos_tree_lookup %llu: returning %d",
443 (unsigned long long)sector,
444 bfqq ? bfqq->pid : 0);
449 void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq)
451 struct rb_node **p, *parent;
452 struct bfq_queue *__bfqq;
454 if (bfqq->pos_root) {
455 rb_erase(&bfqq->pos_node, bfqq->pos_root);
456 bfqq->pos_root = NULL;
459 if (bfq_class_idle(bfqq))
464 bfqq->pos_root = &bfq_bfqq_to_bfqg(bfqq)->rq_pos_tree;
465 __bfqq = bfq_rq_pos_tree_lookup(bfqd, bfqq->pos_root,
466 blk_rq_pos(bfqq->next_rq), &parent, &p);
468 rb_link_node(&bfqq->pos_node, parent, p);
469 rb_insert_color(&bfqq->pos_node, bfqq->pos_root);
471 bfqq->pos_root = NULL;
475 * Tell whether there are active queues or groups with differentiated weights.
477 static bool bfq_differentiated_weights(struct bfq_data *bfqd)
480 * For weights to differ, at least one of the trees must contain
481 * at least two nodes.
483 return (!RB_EMPTY_ROOT(&bfqd->queue_weights_tree) &&
484 (bfqd->queue_weights_tree.rb_node->rb_left ||
485 bfqd->queue_weights_tree.rb_node->rb_right)
486 #ifdef CONFIG_BFQ_GROUP_IOSCHED
488 (!RB_EMPTY_ROOT(&bfqd->group_weights_tree) &&
489 (bfqd->group_weights_tree.rb_node->rb_left ||
490 bfqd->group_weights_tree.rb_node->rb_right)
496 * The following function returns true if every queue must receive the
497 * same share of the throughput (this condition is used when deciding
498 * whether idling may be disabled, see the comments in the function
499 * bfq_bfqq_may_idle()).
501 * Such a scenario occurs when:
502 * 1) all active queues have the same weight,
503 * 2) all active groups at the same level in the groups tree have the same
505 * 3) all active groups at the same level in the groups tree have the same
506 * number of children.
508 * Unfortunately, keeping the necessary state for evaluating exactly the
509 * above symmetry conditions would be quite complex and time-consuming.
510 * Therefore this function evaluates, instead, the following stronger
511 * sub-conditions, for which it is much easier to maintain the needed
513 * 1) all active queues have the same weight,
514 * 2) all active groups have the same weight,
515 * 3) all active groups have at most one active child each.
516 * In particular, the last two conditions are always true if hierarchical
517 * support and the cgroups interface are not enabled, thus no state needs
518 * to be maintained in this case.
520 static bool bfq_symmetric_scenario(struct bfq_data *bfqd)
522 return !bfq_differentiated_weights(bfqd);
526 * If the weight-counter tree passed as input contains no counter for
527 * the weight of the input entity, then add that counter; otherwise just
528 * increment the existing counter.
530 * Note that weight-counter trees contain few nodes in mostly symmetric
531 * scenarios. For example, if all queues have the same weight, then the
532 * weight-counter tree for the queues may contain at most one node.
533 * This holds even if low_latency is on, because weight-raised queues
534 * are not inserted in the tree.
535 * In most scenarios, the rate at which nodes are created/destroyed
538 void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_entity *entity,
539 struct rb_root *root)
541 struct rb_node **new = &(root->rb_node), *parent = NULL;
544 * Do not insert if the entity is already associated with a
545 * counter, which happens if:
546 * 1) the entity is associated with a queue,
547 * 2) a request arrival has caused the queue to become both
548 * non-weight-raised, and hence change its weight, and
549 * backlogged; in this respect, each of the two events
550 * causes an invocation of this function,
551 * 3) this is the invocation of this function caused by the
552 * second event. This second invocation is actually useless,
553 * and we handle this fact by exiting immediately. More
554 * efficient or clearer solutions might possibly be adopted.
556 if (entity->weight_counter)
560 struct bfq_weight_counter *__counter = container_of(*new,
561 struct bfq_weight_counter,
565 if (entity->weight == __counter->weight) {
566 entity->weight_counter = __counter;
569 if (entity->weight < __counter->weight)
570 new = &((*new)->rb_left);
572 new = &((*new)->rb_right);
575 entity->weight_counter = kzalloc(sizeof(struct bfq_weight_counter),
579 * In the unlucky event of an allocation failure, we just
580 * exit. This will cause the weight of entity to not be
581 * considered in bfq_differentiated_weights, which, in its
582 * turn, causes the scenario to be deemed wrongly symmetric in
583 * case entity's weight would have been the only weight making
584 * the scenario asymmetric. On the bright side, no unbalance
585 * will however occur when entity becomes inactive again (the
586 * invocation of this function is triggered by an activation
587 * of entity). In fact, bfq_weights_tree_remove does nothing
588 * if !entity->weight_counter.
590 if (unlikely(!entity->weight_counter))
593 entity->weight_counter->weight = entity->weight;
594 rb_link_node(&entity->weight_counter->weights_node, parent, new);
595 rb_insert_color(&entity->weight_counter->weights_node, root);
598 entity->weight_counter->num_active++;
602 * Decrement the weight counter associated with the entity, and, if the
603 * counter reaches 0, remove the counter from the tree.
604 * See the comments to the function bfq_weights_tree_add() for considerations
607 void bfq_weights_tree_remove(struct bfq_data *bfqd, struct bfq_entity *entity,
608 struct rb_root *root)
610 if (!entity->weight_counter)
613 entity->weight_counter->num_active--;
614 if (entity->weight_counter->num_active > 0)
615 goto reset_entity_pointer;
617 rb_erase(&entity->weight_counter->weights_node, root);
618 kfree(entity->weight_counter);
620 reset_entity_pointer:
621 entity->weight_counter = NULL;
625 * Return expired entry, or NULL to just start from scratch in rbtree.
627 static struct request *bfq_check_fifo(struct bfq_queue *bfqq,
628 struct request *last)
632 if (bfq_bfqq_fifo_expire(bfqq))
635 bfq_mark_bfqq_fifo_expire(bfqq);
637 rq = rq_entry_fifo(bfqq->fifo.next);
639 if (rq == last || ktime_get_ns() < rq->fifo_time)
642 bfq_log_bfqq(bfqq->bfqd, bfqq, "check_fifo: returned %p", rq);
646 static struct request *bfq_find_next_rq(struct bfq_data *bfqd,
647 struct bfq_queue *bfqq,
648 struct request *last)
650 struct rb_node *rbnext = rb_next(&last->rb_node);
651 struct rb_node *rbprev = rb_prev(&last->rb_node);
652 struct request *next, *prev = NULL;
654 /* Follow expired path, else get first next available. */
655 next = bfq_check_fifo(bfqq, last);
660 prev = rb_entry_rq(rbprev);
663 next = rb_entry_rq(rbnext);
665 rbnext = rb_first(&bfqq->sort_list);
666 if (rbnext && rbnext != &last->rb_node)
667 next = rb_entry_rq(rbnext);
670 return bfq_choose_req(bfqd, next, prev, blk_rq_pos(last));
673 /* see the definition of bfq_async_charge_factor for details */
674 static unsigned long bfq_serv_to_charge(struct request *rq,
675 struct bfq_queue *bfqq)
677 if (bfq_bfqq_sync(bfqq) || bfqq->wr_coeff > 1)
678 return blk_rq_sectors(rq);
681 * If there are no weight-raised queues, then amplify service
682 * by just the async charge factor; otherwise amplify service
683 * by twice the async charge factor, to further reduce latency
684 * for weight-raised queues.
686 if (bfqq->bfqd->wr_busy_queues == 0)
687 return blk_rq_sectors(rq) * bfq_async_charge_factor;
689 return blk_rq_sectors(rq) * 2 * bfq_async_charge_factor;
693 * bfq_updated_next_req - update the queue after a new next_rq selection.
694 * @bfqd: the device data the queue belongs to.
695 * @bfqq: the queue to update.
697 * If the first request of a queue changes we make sure that the queue
698 * has enough budget to serve at least its first request (if the
699 * request has grown). We do this because if the queue has not enough
700 * budget for its first request, it has to go through two dispatch
701 * rounds to actually get it dispatched.
703 static void bfq_updated_next_req(struct bfq_data *bfqd,
704 struct bfq_queue *bfqq)
706 struct bfq_entity *entity = &bfqq->entity;
707 struct request *next_rq = bfqq->next_rq;
708 unsigned long new_budget;
713 if (bfqq == bfqd->in_service_queue)
715 * In order not to break guarantees, budgets cannot be
716 * changed after an entity has been selected.
720 new_budget = max_t(unsigned long, bfqq->max_budget,
721 bfq_serv_to_charge(next_rq, bfqq));
722 if (entity->budget != new_budget) {
723 entity->budget = new_budget;
724 bfq_log_bfqq(bfqd, bfqq, "updated next rq: new budget %lu",
726 bfq_requeue_bfqq(bfqd, bfqq, false);
731 bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd,
732 struct bfq_io_cq *bic, bool bfq_already_existing)
734 unsigned int old_wr_coeff = bfqq->wr_coeff;
735 bool busy = bfq_already_existing && bfq_bfqq_busy(bfqq);
737 if (bic->saved_has_short_ttime)
738 bfq_mark_bfqq_has_short_ttime(bfqq);
740 bfq_clear_bfqq_has_short_ttime(bfqq);
742 if (bic->saved_IO_bound)
743 bfq_mark_bfqq_IO_bound(bfqq);
745 bfq_clear_bfqq_IO_bound(bfqq);
747 bfqq->ttime = bic->saved_ttime;
748 bfqq->wr_coeff = bic->saved_wr_coeff;
749 bfqq->wr_start_at_switch_to_srt = bic->saved_wr_start_at_switch_to_srt;
750 bfqq->last_wr_start_finish = bic->saved_last_wr_start_finish;
751 bfqq->wr_cur_max_time = bic->saved_wr_cur_max_time;
753 if (bfqq->wr_coeff > 1 && (bfq_bfqq_in_large_burst(bfqq) ||
754 time_is_before_jiffies(bfqq->last_wr_start_finish +
755 bfqq->wr_cur_max_time))) {
756 bfq_log_bfqq(bfqq->bfqd, bfqq,
757 "resume state: switching off wr");
762 /* make sure weight will be updated, however we got here */
763 bfqq->entity.prio_changed = 1;
768 if (old_wr_coeff == 1 && bfqq->wr_coeff > 1)
769 bfqd->wr_busy_queues++;
770 else if (old_wr_coeff > 1 && bfqq->wr_coeff == 1)
771 bfqd->wr_busy_queues--;
774 static int bfqq_process_refs(struct bfq_queue *bfqq)
776 return bfqq->ref - bfqq->allocated - bfqq->entity.on_st;
779 /* Empty burst list and add just bfqq (see comments on bfq_handle_burst) */
780 static void bfq_reset_burst_list(struct bfq_data *bfqd, struct bfq_queue *bfqq)
782 struct bfq_queue *item;
783 struct hlist_node *n;
785 hlist_for_each_entry_safe(item, n, &bfqd->burst_list, burst_list_node)
786 hlist_del_init(&item->burst_list_node);
787 hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list);
788 bfqd->burst_size = 1;
789 bfqd->burst_parent_entity = bfqq->entity.parent;
792 /* Add bfqq to the list of queues in current burst (see bfq_handle_burst) */
793 static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq)
795 /* Increment burst size to take into account also bfqq */
798 if (bfqd->burst_size == bfqd->bfq_large_burst_thresh) {
799 struct bfq_queue *pos, *bfqq_item;
800 struct hlist_node *n;
803 * Enough queues have been activated shortly after each
804 * other to consider this burst as large.
806 bfqd->large_burst = true;
809 * We can now mark all queues in the burst list as
810 * belonging to a large burst.
812 hlist_for_each_entry(bfqq_item, &bfqd->burst_list,
814 bfq_mark_bfqq_in_large_burst(bfqq_item);
815 bfq_mark_bfqq_in_large_burst(bfqq);
818 * From now on, and until the current burst finishes, any
819 * new queue being activated shortly after the last queue
820 * was inserted in the burst can be immediately marked as
821 * belonging to a large burst. So the burst list is not
822 * needed any more. Remove it.
824 hlist_for_each_entry_safe(pos, n, &bfqd->burst_list,
826 hlist_del_init(&pos->burst_list_node);
828 * Burst not yet large: add bfqq to the burst list. Do
829 * not increment the ref counter for bfqq, because bfqq
830 * is removed from the burst list before freeing bfqq
833 hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list);
837 * If many queues belonging to the same group happen to be created
838 * shortly after each other, then the processes associated with these
839 * queues have typically a common goal. In particular, bursts of queue
840 * creations are usually caused by services or applications that spawn
841 * many parallel threads/processes. Examples are systemd during boot,
842 * or git grep. To help these processes get their job done as soon as
843 * possible, it is usually better to not grant either weight-raising
844 * or device idling to their queues.
846 * In this comment we describe, firstly, the reasons why this fact
847 * holds, and, secondly, the next function, which implements the main
848 * steps needed to properly mark these queues so that they can then be
849 * treated in a different way.
851 * The above services or applications benefit mostly from a high
852 * throughput: the quicker the requests of the activated queues are
853 * cumulatively served, the sooner the target job of these queues gets
854 * completed. As a consequence, weight-raising any of these queues,
855 * which also implies idling the device for it, is almost always
856 * counterproductive. In most cases it just lowers throughput.
858 * On the other hand, a burst of queue creations may be caused also by
859 * the start of an application that does not consist of a lot of
860 * parallel I/O-bound threads. In fact, with a complex application,
861 * several short processes may need to be executed to start-up the
862 * application. In this respect, to start an application as quickly as
863 * possible, the best thing to do is in any case to privilege the I/O
864 * related to the application with respect to all other
865 * I/O. Therefore, the best strategy to start as quickly as possible
866 * an application that causes a burst of queue creations is to
867 * weight-raise all the queues created during the burst. This is the
868 * exact opposite of the best strategy for the other type of bursts.
870 * In the end, to take the best action for each of the two cases, the
871 * two types of bursts need to be distinguished. Fortunately, this
872 * seems relatively easy, by looking at the sizes of the bursts. In
873 * particular, we found a threshold such that only bursts with a
874 * larger size than that threshold are apparently caused by
875 * services or commands such as systemd or git grep. For brevity,
876 * hereafter we call just 'large' these bursts. BFQ *does not*
877 * weight-raise queues whose creation occurs in a large burst. In
878 * addition, for each of these queues BFQ performs or does not perform
879 * idling depending on which choice boosts the throughput more. The
880 * exact choice depends on the device and request pattern at
883 * Unfortunately, false positives may occur while an interactive task
884 * is starting (e.g., an application is being started). The
885 * consequence is that the queues associated with the task do not
886 * enjoy weight raising as expected. Fortunately these false positives
887 * are very rare. They typically occur if some service happens to
888 * start doing I/O exactly when the interactive task starts.
890 * Turning back to the next function, it implements all the steps
891 * needed to detect the occurrence of a large burst and to properly
892 * mark all the queues belonging to it (so that they can then be
893 * treated in a different way). This goal is achieved by maintaining a
894 * "burst list" that holds, temporarily, the queues that belong to the
895 * burst in progress. The list is then used to mark these queues as
896 * belonging to a large burst if the burst does become large. The main
897 * steps are the following.
899 * . when the very first queue is created, the queue is inserted into the
900 * list (as it could be the first queue in a possible burst)
902 * . if the current burst has not yet become large, and a queue Q that does
903 * not yet belong to the burst is activated shortly after the last time
904 * at which a new queue entered the burst list, then the function appends
905 * Q to the burst list
907 * . if, as a consequence of the previous step, the burst size reaches
908 * the large-burst threshold, then
910 * . all the queues in the burst list are marked as belonging to a
913 * . the burst list is deleted; in fact, the burst list already served
914 * its purpose (keeping temporarily track of the queues in a burst,
915 * so as to be able to mark them as belonging to a large burst in the
916 * previous sub-step), and now is not needed any more
918 * . the device enters a large-burst mode
920 * . if a queue Q that does not belong to the burst is created while
921 * the device is in large-burst mode and shortly after the last time
922 * at which a queue either entered the burst list or was marked as
923 * belonging to the current large burst, then Q is immediately marked
924 * as belonging to a large burst.
926 * . if a queue Q that does not belong to the burst is created a while
927 * later, i.e., not shortly after, than the last time at which a queue
928 * either entered the burst list or was marked as belonging to the
929 * current large burst, then the current burst is deemed as finished and:
931 * . the large-burst mode is reset if set
933 * . the burst list is emptied
935 * . Q is inserted in the burst list, as Q may be the first queue
936 * in a possible new burst (then the burst list contains just Q
939 static void bfq_handle_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq)
942 * If bfqq is already in the burst list or is part of a large
943 * burst, or finally has just been split, then there is
944 * nothing else to do.
946 if (!hlist_unhashed(&bfqq->burst_list_node) ||
947 bfq_bfqq_in_large_burst(bfqq) ||
948 time_is_after_eq_jiffies(bfqq->split_time +
949 msecs_to_jiffies(10)))
953 * If bfqq's creation happens late enough, or bfqq belongs to
954 * a different group than the burst group, then the current
955 * burst is finished, and related data structures must be
958 * In this respect, consider the special case where bfqq is
959 * the very first queue created after BFQ is selected for this
960 * device. In this case, last_ins_in_burst and
961 * burst_parent_entity are not yet significant when we get
962 * here. But it is easy to verify that, whether or not the
963 * following condition is true, bfqq will end up being
964 * inserted into the burst list. In particular the list will
965 * happen to contain only bfqq. And this is exactly what has
966 * to happen, as bfqq may be the first queue of the first
969 if (time_is_before_jiffies(bfqd->last_ins_in_burst +
970 bfqd->bfq_burst_interval) ||
971 bfqq->entity.parent != bfqd->burst_parent_entity) {
972 bfqd->large_burst = false;
973 bfq_reset_burst_list(bfqd, bfqq);
978 * If we get here, then bfqq is being activated shortly after the
979 * last queue. So, if the current burst is also large, we can mark
980 * bfqq as belonging to this large burst immediately.
982 if (bfqd->large_burst) {
983 bfq_mark_bfqq_in_large_burst(bfqq);
988 * If we get here, then a large-burst state has not yet been
989 * reached, but bfqq is being activated shortly after the last
990 * queue. Then we add bfqq to the burst.
992 bfq_add_to_burst(bfqd, bfqq);
995 * At this point, bfqq either has been added to the current
996 * burst or has caused the current burst to terminate and a
997 * possible new burst to start. In particular, in the second
998 * case, bfqq has become the first queue in the possible new
999 * burst. In both cases last_ins_in_burst needs to be moved
1002 bfqd->last_ins_in_burst = jiffies;
1005 static int bfq_bfqq_budget_left(struct bfq_queue *bfqq)
1007 struct bfq_entity *entity = &bfqq->entity;
1009 return entity->budget - entity->service;
1013 * If enough samples have been computed, return the current max budget
1014 * stored in bfqd, which is dynamically updated according to the
1015 * estimated disk peak rate; otherwise return the default max budget
1017 static int bfq_max_budget(struct bfq_data *bfqd)
1019 if (bfqd->budgets_assigned < bfq_stats_min_budgets)
1020 return bfq_default_max_budget;
1022 return bfqd->bfq_max_budget;
1026 * Return min budget, which is a fraction of the current or default
1027 * max budget (trying with 1/32)
1029 static int bfq_min_budget(struct bfq_data *bfqd)
1031 if (bfqd->budgets_assigned < bfq_stats_min_budgets)
1032 return bfq_default_max_budget / 32;
1034 return bfqd->bfq_max_budget / 32;
1038 * The next function, invoked after the input queue bfqq switches from
1039 * idle to busy, updates the budget of bfqq. The function also tells
1040 * whether the in-service queue should be expired, by returning
1041 * true. The purpose of expiring the in-service queue is to give bfqq
1042 * the chance to possibly preempt the in-service queue, and the reason
1043 * for preempting the in-service queue is to achieve one of the two
1046 * 1. Guarantee to bfqq its reserved bandwidth even if bfqq has
1047 * expired because it has remained idle. In particular, bfqq may have
1048 * expired for one of the following two reasons:
1050 * - BFQQE_NO_MORE_REQUESTS bfqq did not enjoy any device idling
1051 * and did not make it to issue a new request before its last
1052 * request was served;
1054 * - BFQQE_TOO_IDLE bfqq did enjoy device idling, but did not issue
1055 * a new request before the expiration of the idling-time.
1057 * Even if bfqq has expired for one of the above reasons, the process
1058 * associated with the queue may be however issuing requests greedily,
1059 * and thus be sensitive to the bandwidth it receives (bfqq may have
1060 * remained idle for other reasons: CPU high load, bfqq not enjoying
1061 * idling, I/O throttling somewhere in the path from the process to
1062 * the I/O scheduler, ...). But if, after every expiration for one of
1063 * the above two reasons, bfqq has to wait for the service of at least
1064 * one full budget of another queue before being served again, then
1065 * bfqq is likely to get a much lower bandwidth or resource time than
1066 * its reserved ones. To address this issue, two countermeasures need
1069 * First, the budget and the timestamps of bfqq need to be updated in
1070 * a special way on bfqq reactivation: they need to be updated as if
1071 * bfqq did not remain idle and did not expire. In fact, if they are
1072 * computed as if bfqq expired and remained idle until reactivation,
1073 * then the process associated with bfqq is treated as if, instead of
1074 * being greedy, it stopped issuing requests when bfqq remained idle,
1075 * and restarts issuing requests only on this reactivation. In other
1076 * words, the scheduler does not help the process recover the "service
1077 * hole" between bfqq expiration and reactivation. As a consequence,
1078 * the process receives a lower bandwidth than its reserved one. In
1079 * contrast, to recover this hole, the budget must be updated as if
1080 * bfqq was not expired at all before this reactivation, i.e., it must
1081 * be set to the value of the remaining budget when bfqq was
1082 * expired. Along the same line, timestamps need to be assigned the
1083 * value they had the last time bfqq was selected for service, i.e.,
1084 * before last expiration. Thus timestamps need to be back-shifted
1085 * with respect to their normal computation (see [1] for more details
1086 * on this tricky aspect).
1088 * Secondly, to allow the process to recover the hole, the in-service
1089 * queue must be expired too, to give bfqq the chance to preempt it
1090 * immediately. In fact, if bfqq has to wait for a full budget of the
1091 * in-service queue to be completed, then it may become impossible to
1092 * let the process recover the hole, even if the back-shifted
1093 * timestamps of bfqq are lower than those of the in-service queue. If
1094 * this happens for most or all of the holes, then the process may not
1095 * receive its reserved bandwidth. In this respect, it is worth noting
1096 * that, being the service of outstanding requests unpreemptible, a
1097 * little fraction of the holes may however be unrecoverable, thereby
1098 * causing a little loss of bandwidth.
1100 * The last important point is detecting whether bfqq does need this
1101 * bandwidth recovery. In this respect, the next function deems the
1102 * process associated with bfqq greedy, and thus allows it to recover
1103 * the hole, if: 1) the process is waiting for the arrival of a new
1104 * request (which implies that bfqq expired for one of the above two
1105 * reasons), and 2) such a request has arrived soon. The first
1106 * condition is controlled through the flag non_blocking_wait_rq,
1107 * while the second through the flag arrived_in_time. If both
1108 * conditions hold, then the function computes the budget in the
1109 * above-described special way, and signals that the in-service queue
1110 * should be expired. Timestamp back-shifting is done later in
1111 * __bfq_activate_entity.
1113 * 2. Reduce latency. Even if timestamps are not backshifted to let
1114 * the process associated with bfqq recover a service hole, bfqq may
1115 * however happen to have, after being (re)activated, a lower finish
1116 * timestamp than the in-service queue. That is, the next budget of
1117 * bfqq may have to be completed before the one of the in-service
1118 * queue. If this is the case, then preempting the in-service queue
1119 * allows this goal to be achieved, apart from the unpreemptible,
1120 * outstanding requests mentioned above.
1122 * Unfortunately, regardless of which of the above two goals one wants
1123 * to achieve, service trees need first to be updated to know whether
1124 * the in-service queue must be preempted. To have service trees
1125 * correctly updated, the in-service queue must be expired and
1126 * rescheduled, and bfqq must be scheduled too. This is one of the
1127 * most costly operations (in future versions, the scheduling
1128 * mechanism may be re-designed in such a way to make it possible to
1129 * know whether preemption is needed without needing to update service
1130 * trees). In addition, queue preemptions almost always cause random
1131 * I/O, and thus loss of throughput. Because of these facts, the next
1132 * function adopts the following simple scheme to avoid both costly
1133 * operations and too frequent preemptions: it requests the expiration
1134 * of the in-service queue (unconditionally) only for queues that need
1135 * to recover a hole, or that either are weight-raised or deserve to
1138 static bool bfq_bfqq_update_budg_for_activation(struct bfq_data *bfqd,
1139 struct bfq_queue *bfqq,
1140 bool arrived_in_time,
1141 bool wr_or_deserves_wr)
1143 struct bfq_entity *entity = &bfqq->entity;
1145 if (bfq_bfqq_non_blocking_wait_rq(bfqq) && arrived_in_time) {
1147 * We do not clear the flag non_blocking_wait_rq here, as
1148 * the latter is used in bfq_activate_bfqq to signal
1149 * that timestamps need to be back-shifted (and is
1150 * cleared right after).
1154 * In next assignment we rely on that either
1155 * entity->service or entity->budget are not updated
1156 * on expiration if bfqq is empty (see
1157 * __bfq_bfqq_recalc_budget). Thus both quantities
1158 * remain unchanged after such an expiration, and the
1159 * following statement therefore assigns to
1160 * entity->budget the remaining budget on such an
1161 * expiration. For clarity, entity->service is not
1162 * updated on expiration in any case, and, in normal
1163 * operation, is reset only when bfqq is selected for
1164 * service (see bfq_get_next_queue).
1166 entity->budget = min_t(unsigned long,
1167 bfq_bfqq_budget_left(bfqq),
1173 entity->budget = max_t(unsigned long, bfqq->max_budget,
1174 bfq_serv_to_charge(bfqq->next_rq, bfqq));
1175 bfq_clear_bfqq_non_blocking_wait_rq(bfqq);
1176 return wr_or_deserves_wr;
1179 static unsigned int bfq_wr_duration(struct bfq_data *bfqd)
1183 if (bfqd->bfq_wr_max_time > 0)
1184 return bfqd->bfq_wr_max_time;
1186 dur = bfqd->RT_prod;
1187 do_div(dur, bfqd->peak_rate);
1190 * Limit duration between 3 and 13 seconds. Tests show that
1191 * higher values than 13 seconds often yield the opposite of
1192 * the desired result, i.e., worsen responsiveness by letting
1193 * non-interactive and non-soft-real-time applications
1194 * preserve weight raising for a too long time interval.
1196 * On the other end, lower values than 3 seconds make it
1197 * difficult for most interactive tasks to complete their jobs
1198 * before weight-raising finishes.
1200 if (dur > msecs_to_jiffies(13000))
1201 dur = msecs_to_jiffies(13000);
1202 else if (dur < msecs_to_jiffies(3000))
1203 dur = msecs_to_jiffies(3000);
1209 * Return the farthest future time instant according to jiffies
1212 static unsigned long bfq_greatest_from_now(void)
1214 return jiffies + MAX_JIFFY_OFFSET;
1218 * Return the farthest past time instant according to jiffies
1221 static unsigned long bfq_smallest_from_now(void)
1223 return jiffies - MAX_JIFFY_OFFSET;
1226 static void bfq_update_bfqq_wr_on_rq_arrival(struct bfq_data *bfqd,
1227 struct bfq_queue *bfqq,
1228 unsigned int old_wr_coeff,
1229 bool wr_or_deserves_wr,
1234 if (old_wr_coeff == 1 && wr_or_deserves_wr) {
1235 /* start a weight-raising period */
1237 bfqq->wr_coeff = bfqd->bfq_wr_coeff;
1238 bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
1241 * No interactive weight raising in progress
1242 * here: assign minus infinity to
1243 * wr_start_at_switch_to_srt, to make sure
1244 * that, at the end of the soft-real-time
1245 * weight raising periods that is starting
1246 * now, no interactive weight-raising period
1247 * may be wrongly considered as still in
1248 * progress (and thus actually started by
1251 bfqq->wr_start_at_switch_to_srt =
1252 bfq_smallest_from_now();
1253 bfqq->wr_coeff = bfqd->bfq_wr_coeff *
1254 BFQ_SOFTRT_WEIGHT_FACTOR;
1255 bfqq->wr_cur_max_time =
1256 bfqd->bfq_wr_rt_max_time;
1260 * If needed, further reduce budget to make sure it is
1261 * close to bfqq's backlog, so as to reduce the
1262 * scheduling-error component due to a too large
1263 * budget. Do not care about throughput consequences,
1264 * but only about latency. Finally, do not assign a
1265 * too small budget either, to avoid increasing
1266 * latency by causing too frequent expirations.
1268 bfqq->entity.budget = min_t(unsigned long,
1269 bfqq->entity.budget,
1270 2 * bfq_min_budget(bfqd));
1271 } else if (old_wr_coeff > 1) {
1272 if (interactive) { /* update wr coeff and duration */
1273 bfqq->wr_coeff = bfqd->bfq_wr_coeff;
1274 bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
1275 } else if (in_burst)
1279 * The application is now or still meeting the
1280 * requirements for being deemed soft rt. We
1281 * can then correctly and safely (re)charge
1282 * the weight-raising duration for the
1283 * application with the weight-raising
1284 * duration for soft rt applications.
1286 * In particular, doing this recharge now, i.e.,
1287 * before the weight-raising period for the
1288 * application finishes, reduces the probability
1289 * of the following negative scenario:
1290 * 1) the weight of a soft rt application is
1291 * raised at startup (as for any newly
1292 * created application),
1293 * 2) since the application is not interactive,
1294 * at a certain time weight-raising is
1295 * stopped for the application,
1296 * 3) at that time the application happens to
1297 * still have pending requests, and hence
1298 * is destined to not have a chance to be
1299 * deemed soft rt before these requests are
1300 * completed (see the comments to the
1301 * function bfq_bfqq_softrt_next_start()
1302 * for details on soft rt detection),
1303 * 4) these pending requests experience a high
1304 * latency because the application is not
1305 * weight-raised while they are pending.
1307 if (bfqq->wr_cur_max_time !=
1308 bfqd->bfq_wr_rt_max_time) {
1309 bfqq->wr_start_at_switch_to_srt =
1310 bfqq->last_wr_start_finish;
1312 bfqq->wr_cur_max_time =
1313 bfqd->bfq_wr_rt_max_time;
1314 bfqq->wr_coeff = bfqd->bfq_wr_coeff *
1315 BFQ_SOFTRT_WEIGHT_FACTOR;
1317 bfqq->last_wr_start_finish = jiffies;
1322 static bool bfq_bfqq_idle_for_long_time(struct bfq_data *bfqd,
1323 struct bfq_queue *bfqq)
1325 return bfqq->dispatched == 0 &&
1326 time_is_before_jiffies(
1327 bfqq->budget_timeout +
1328 bfqd->bfq_wr_min_idle_time);
1331 static void bfq_bfqq_handle_idle_busy_switch(struct bfq_data *bfqd,
1332 struct bfq_queue *bfqq,
1337 bool soft_rt, in_burst, wr_or_deserves_wr,
1338 bfqq_wants_to_preempt,
1339 idle_for_long_time = bfq_bfqq_idle_for_long_time(bfqd, bfqq),
1341 * See the comments on
1342 * bfq_bfqq_update_budg_for_activation for
1343 * details on the usage of the next variable.
1345 arrived_in_time = ktime_get_ns() <=
1346 bfqq->ttime.last_end_request +
1347 bfqd->bfq_slice_idle * 3;
1349 bfqg_stats_update_io_add(bfqq_group(RQ_BFQQ(rq)), bfqq, rq->cmd_flags);
1352 * bfqq deserves to be weight-raised if:
1354 * - it does not belong to a large burst,
1355 * - it has been idle for enough time or is soft real-time,
1356 * - is linked to a bfq_io_cq (it is not shared in any sense).
1358 in_burst = bfq_bfqq_in_large_burst(bfqq);
1359 soft_rt = bfqd->bfq_wr_max_softrt_rate > 0 &&
1361 time_is_before_jiffies(bfqq->soft_rt_next_start);
1362 *interactive = !in_burst && idle_for_long_time;
1363 wr_or_deserves_wr = bfqd->low_latency &&
1364 (bfqq->wr_coeff > 1 ||
1365 (bfq_bfqq_sync(bfqq) &&
1366 bfqq->bic && (*interactive || soft_rt)));
1369 * Using the last flag, update budget and check whether bfqq
1370 * may want to preempt the in-service queue.
1372 bfqq_wants_to_preempt =
1373 bfq_bfqq_update_budg_for_activation(bfqd, bfqq,
1378 * If bfqq happened to be activated in a burst, but has been
1379 * idle for much more than an interactive queue, then we
1380 * assume that, in the overall I/O initiated in the burst, the
1381 * I/O associated with bfqq is finished. So bfqq does not need
1382 * to be treated as a queue belonging to a burst
1383 * anymore. Accordingly, we reset bfqq's in_large_burst flag
1384 * if set, and remove bfqq from the burst list if it's
1385 * there. We do not decrement burst_size, because the fact
1386 * that bfqq does not need to belong to the burst list any
1387 * more does not invalidate the fact that bfqq was created in
1390 if (likely(!bfq_bfqq_just_created(bfqq)) &&
1391 idle_for_long_time &&
1392 time_is_before_jiffies(
1393 bfqq->budget_timeout +
1394 msecs_to_jiffies(10000))) {
1395 hlist_del_init(&bfqq->burst_list_node);
1396 bfq_clear_bfqq_in_large_burst(bfqq);
1399 bfq_clear_bfqq_just_created(bfqq);
1402 if (!bfq_bfqq_IO_bound(bfqq)) {
1403 if (arrived_in_time) {
1404 bfqq->requests_within_timer++;
1405 if (bfqq->requests_within_timer >=
1406 bfqd->bfq_requests_within_timer)
1407 bfq_mark_bfqq_IO_bound(bfqq);
1409 bfqq->requests_within_timer = 0;
1412 if (bfqd->low_latency) {
1413 if (unlikely(time_is_after_jiffies(bfqq->split_time)))
1416 jiffies - bfqd->bfq_wr_min_idle_time - 1;
1418 if (time_is_before_jiffies(bfqq->split_time +
1419 bfqd->bfq_wr_min_idle_time)) {
1420 bfq_update_bfqq_wr_on_rq_arrival(bfqd, bfqq,
1427 if (old_wr_coeff != bfqq->wr_coeff)
1428 bfqq->entity.prio_changed = 1;
1432 bfqq->last_idle_bklogged = jiffies;
1433 bfqq->service_from_backlogged = 0;
1434 bfq_clear_bfqq_softrt_update(bfqq);
1436 bfq_add_bfqq_busy(bfqd, bfqq);
1439 * Expire in-service queue only if preemption may be needed
1440 * for guarantees. In this respect, the function
1441 * next_queue_may_preempt just checks a simple, necessary
1442 * condition, and not a sufficient condition based on
1443 * timestamps. In fact, for the latter condition to be
1444 * evaluated, timestamps would need first to be updated, and
1445 * this operation is quite costly (see the comments on the
1446 * function bfq_bfqq_update_budg_for_activation).
1448 if (bfqd->in_service_queue && bfqq_wants_to_preempt &&
1449 bfqd->in_service_queue->wr_coeff < bfqq->wr_coeff &&
1450 next_queue_may_preempt(bfqd))
1451 bfq_bfqq_expire(bfqd, bfqd->in_service_queue,
1452 false, BFQQE_PREEMPTED);
1455 static void bfq_add_request(struct request *rq)
1457 struct bfq_queue *bfqq = RQ_BFQQ(rq);
1458 struct bfq_data *bfqd = bfqq->bfqd;
1459 struct request *next_rq, *prev;
1460 unsigned int old_wr_coeff = bfqq->wr_coeff;
1461 bool interactive = false;
1463 bfq_log_bfqq(bfqd, bfqq, "add_request %d", rq_is_sync(rq));
1464 bfqq->queued[rq_is_sync(rq)]++;
1467 elv_rb_add(&bfqq->sort_list, rq);
1470 * Check if this request is a better next-serve candidate.
1472 prev = bfqq->next_rq;
1473 next_rq = bfq_choose_req(bfqd, bfqq->next_rq, rq, bfqd->last_position);
1474 bfqq->next_rq = next_rq;
1477 * Adjust priority tree position, if next_rq changes.
1479 if (prev != bfqq->next_rq)
1480 bfq_pos_tree_add_move(bfqd, bfqq);
1482 if (!bfq_bfqq_busy(bfqq)) /* switching to busy ... */
1483 bfq_bfqq_handle_idle_busy_switch(bfqd, bfqq, old_wr_coeff,
1486 if (bfqd->low_latency && old_wr_coeff == 1 && !rq_is_sync(rq) &&
1487 time_is_before_jiffies(
1488 bfqq->last_wr_start_finish +
1489 bfqd->bfq_wr_min_inter_arr_async)) {
1490 bfqq->wr_coeff = bfqd->bfq_wr_coeff;
1491 bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
1493 bfqd->wr_busy_queues++;
1494 bfqq->entity.prio_changed = 1;
1496 if (prev != bfqq->next_rq)
1497 bfq_updated_next_req(bfqd, bfqq);
1501 * Assign jiffies to last_wr_start_finish in the following
1504 * . if bfqq is not going to be weight-raised, because, for
1505 * non weight-raised queues, last_wr_start_finish stores the
1506 * arrival time of the last request; as of now, this piece
1507 * of information is used only for deciding whether to
1508 * weight-raise async queues
1510 * . if bfqq is not weight-raised, because, if bfqq is now
1511 * switching to weight-raised, then last_wr_start_finish
1512 * stores the time when weight-raising starts
1514 * . if bfqq is interactive, because, regardless of whether
1515 * bfqq is currently weight-raised, the weight-raising
1516 * period must start or restart (this case is considered
1517 * separately because it is not detected by the above
1518 * conditions, if bfqq is already weight-raised)
1520 * last_wr_start_finish has to be updated also if bfqq is soft
1521 * real-time, because the weight-raising period is constantly
1522 * restarted on idle-to-busy transitions for these queues, but
1523 * this is already done in bfq_bfqq_handle_idle_busy_switch if
1526 if (bfqd->low_latency &&
1527 (old_wr_coeff == 1 || bfqq->wr_coeff == 1 || interactive))
1528 bfqq->last_wr_start_finish = jiffies;
1531 static struct request *bfq_find_rq_fmerge(struct bfq_data *bfqd,
1533 struct request_queue *q)
1535 struct bfq_queue *bfqq = bfqd->bio_bfqq;
1539 return elv_rb_find(&bfqq->sort_list, bio_end_sector(bio));
1544 static sector_t get_sdist(sector_t last_pos, struct request *rq)
1547 return abs(blk_rq_pos(rq) - last_pos);
1552 #if 0 /* Still not clear if we can do without next two functions */
1553 static void bfq_activate_request(struct request_queue *q, struct request *rq)
1555 struct bfq_data *bfqd = q->elevator->elevator_data;
1557 bfqd->rq_in_driver++;
1560 static void bfq_deactivate_request(struct request_queue *q, struct request *rq)
1562 struct bfq_data *bfqd = q->elevator->elevator_data;
1564 bfqd->rq_in_driver--;
1568 static void bfq_remove_request(struct request_queue *q,
1571 struct bfq_queue *bfqq = RQ_BFQQ(rq);
1572 struct bfq_data *bfqd = bfqq->bfqd;
1573 const int sync = rq_is_sync(rq);
1575 if (bfqq->next_rq == rq) {
1576 bfqq->next_rq = bfq_find_next_rq(bfqd, bfqq, rq);
1577 bfq_updated_next_req(bfqd, bfqq);
1580 if (rq->queuelist.prev != &rq->queuelist)
1581 list_del_init(&rq->queuelist);
1582 bfqq->queued[sync]--;
1584 elv_rb_del(&bfqq->sort_list, rq);
1586 elv_rqhash_del(q, rq);
1587 if (q->last_merge == rq)
1588 q->last_merge = NULL;
1590 if (RB_EMPTY_ROOT(&bfqq->sort_list)) {
1591 bfqq->next_rq = NULL;
1593 if (bfq_bfqq_busy(bfqq) && bfqq != bfqd->in_service_queue) {
1594 bfq_del_bfqq_busy(bfqd, bfqq, false);
1596 * bfqq emptied. In normal operation, when
1597 * bfqq is empty, bfqq->entity.service and
1598 * bfqq->entity.budget must contain,
1599 * respectively, the service received and the
1600 * budget used last time bfqq emptied. These
1601 * facts do not hold in this case, as at least
1602 * this last removal occurred while bfqq is
1603 * not in service. To avoid inconsistencies,
1604 * reset both bfqq->entity.service and
1605 * bfqq->entity.budget, if bfqq has still a
1606 * process that may issue I/O requests to it.
1608 bfqq->entity.budget = bfqq->entity.service = 0;
1612 * Remove queue from request-position tree as it is empty.
1614 if (bfqq->pos_root) {
1615 rb_erase(&bfqq->pos_node, bfqq->pos_root);
1616 bfqq->pos_root = NULL;
1620 if (rq->cmd_flags & REQ_META)
1621 bfqq->meta_pending--;
1623 bfqg_stats_update_io_remove(bfqq_group(bfqq), rq->cmd_flags);
1626 static bool bfq_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio)
1628 struct request_queue *q = hctx->queue;
1629 struct bfq_data *bfqd = q->elevator->elevator_data;
1630 struct request *free = NULL;
1632 * bfq_bic_lookup grabs the queue_lock: invoke it now and
1633 * store its return value for later use, to avoid nesting
1634 * queue_lock inside the bfqd->lock. We assume that the bic
1635 * returned by bfq_bic_lookup does not go away before
1636 * bfqd->lock is taken.
1638 struct bfq_io_cq *bic = bfq_bic_lookup(bfqd, current->io_context, q);
1641 spin_lock_irq(&bfqd->lock);
1644 bfqd->bio_bfqq = bic_to_bfqq(bic, op_is_sync(bio->bi_opf));
1646 bfqd->bio_bfqq = NULL;
1647 bfqd->bio_bic = bic;
1649 ret = blk_mq_sched_try_merge(q, bio, &free);
1652 blk_mq_free_request(free);
1653 spin_unlock_irq(&bfqd->lock);
1658 static int bfq_request_merge(struct request_queue *q, struct request **req,
1661 struct bfq_data *bfqd = q->elevator->elevator_data;
1662 struct request *__rq;
1664 __rq = bfq_find_rq_fmerge(bfqd, bio, q);
1665 if (__rq && elv_bio_merge_ok(__rq, bio)) {
1667 return ELEVATOR_FRONT_MERGE;
1670 return ELEVATOR_NO_MERGE;
1673 static void bfq_request_merged(struct request_queue *q, struct request *req,
1674 enum elv_merge type)
1676 if (type == ELEVATOR_FRONT_MERGE &&
1677 rb_prev(&req->rb_node) &&
1679 blk_rq_pos(container_of(rb_prev(&req->rb_node),
1680 struct request, rb_node))) {
1681 struct bfq_queue *bfqq = RQ_BFQQ(req);
1682 struct bfq_data *bfqd = bfqq->bfqd;
1683 struct request *prev, *next_rq;
1685 /* Reposition request in its sort_list */
1686 elv_rb_del(&bfqq->sort_list, req);
1687 elv_rb_add(&bfqq->sort_list, req);
1689 /* Choose next request to be served for bfqq */
1690 prev = bfqq->next_rq;
1691 next_rq = bfq_choose_req(bfqd, bfqq->next_rq, req,
1692 bfqd->last_position);
1693 bfqq->next_rq = next_rq;
1695 * If next_rq changes, update both the queue's budget to
1696 * fit the new request and the queue's position in its
1699 if (prev != bfqq->next_rq) {
1700 bfq_updated_next_req(bfqd, bfqq);
1701 bfq_pos_tree_add_move(bfqd, bfqq);
1706 static void bfq_requests_merged(struct request_queue *q, struct request *rq,
1707 struct request *next)
1709 struct bfq_queue *bfqq = RQ_BFQQ(rq), *next_bfqq = RQ_BFQQ(next);
1711 if (!RB_EMPTY_NODE(&rq->rb_node))
1715 * If next and rq belong to the same bfq_queue and next is older
1716 * than rq, then reposition rq in the fifo (by substituting next
1717 * with rq). Otherwise, if next and rq belong to different
1718 * bfq_queues, never reposition rq: in fact, we would have to
1719 * reposition it with respect to next's position in its own fifo,
1720 * which would most certainly be too expensive with respect to
1723 if (bfqq == next_bfqq &&
1724 !list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
1725 next->fifo_time < rq->fifo_time) {
1726 list_del_init(&rq->queuelist);
1727 list_replace_init(&next->queuelist, &rq->queuelist);
1728 rq->fifo_time = next->fifo_time;
1731 if (bfqq->next_rq == next)
1734 bfq_remove_request(q, next);
1737 bfqg_stats_update_io_merged(bfqq_group(bfqq), next->cmd_flags);
1740 /* Must be called with bfqq != NULL */
1741 static void bfq_bfqq_end_wr(struct bfq_queue *bfqq)
1743 if (bfq_bfqq_busy(bfqq))
1744 bfqq->bfqd->wr_busy_queues--;
1746 bfqq->wr_cur_max_time = 0;
1747 bfqq->last_wr_start_finish = jiffies;
1749 * Trigger a weight change on the next invocation of
1750 * __bfq_entity_update_weight_prio.
1752 bfqq->entity.prio_changed = 1;
1755 void bfq_end_wr_async_queues(struct bfq_data *bfqd,
1756 struct bfq_group *bfqg)
1760 for (i = 0; i < 2; i++)
1761 for (j = 0; j < IOPRIO_BE_NR; j++)
1762 if (bfqg->async_bfqq[i][j])
1763 bfq_bfqq_end_wr(bfqg->async_bfqq[i][j]);
1764 if (bfqg->async_idle_bfqq)
1765 bfq_bfqq_end_wr(bfqg->async_idle_bfqq);
1768 static void bfq_end_wr(struct bfq_data *bfqd)
1770 struct bfq_queue *bfqq;
1772 spin_lock_irq(&bfqd->lock);
1774 list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list)
1775 bfq_bfqq_end_wr(bfqq);
1776 list_for_each_entry(bfqq, &bfqd->idle_list, bfqq_list)
1777 bfq_bfqq_end_wr(bfqq);
1778 bfq_end_wr_async(bfqd);
1780 spin_unlock_irq(&bfqd->lock);
1783 static sector_t bfq_io_struct_pos(void *io_struct, bool request)
1786 return blk_rq_pos(io_struct);
1788 return ((struct bio *)io_struct)->bi_iter.bi_sector;
1791 static int bfq_rq_close_to_sector(void *io_struct, bool request,
1794 return abs(bfq_io_struct_pos(io_struct, request) - sector) <=
1798 static struct bfq_queue *bfqq_find_close(struct bfq_data *bfqd,
1799 struct bfq_queue *bfqq,
1802 struct rb_root *root = &bfq_bfqq_to_bfqg(bfqq)->rq_pos_tree;
1803 struct rb_node *parent, *node;
1804 struct bfq_queue *__bfqq;
1806 if (RB_EMPTY_ROOT(root))
1810 * First, if we find a request starting at the end of the last
1811 * request, choose it.
1813 __bfqq = bfq_rq_pos_tree_lookup(bfqd, root, sector, &parent, NULL);
1818 * If the exact sector wasn't found, the parent of the NULL leaf
1819 * will contain the closest sector (rq_pos_tree sorted by
1820 * next_request position).
1822 __bfqq = rb_entry(parent, struct bfq_queue, pos_node);
1823 if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector))
1826 if (blk_rq_pos(__bfqq->next_rq) < sector)
1827 node = rb_next(&__bfqq->pos_node);
1829 node = rb_prev(&__bfqq->pos_node);
1833 __bfqq = rb_entry(node, struct bfq_queue, pos_node);
1834 if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector))
1840 static struct bfq_queue *bfq_find_close_cooperator(struct bfq_data *bfqd,
1841 struct bfq_queue *cur_bfqq,
1844 struct bfq_queue *bfqq;
1847 * We shall notice if some of the queues are cooperating,
1848 * e.g., working closely on the same area of the device. In
1849 * that case, we can group them together and: 1) don't waste
1850 * time idling, and 2) serve the union of their requests in
1851 * the best possible order for throughput.
1853 bfqq = bfqq_find_close(bfqd, cur_bfqq, sector);
1854 if (!bfqq || bfqq == cur_bfqq)
1860 static struct bfq_queue *
1861 bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
1863 int process_refs, new_process_refs;
1864 struct bfq_queue *__bfqq;
1867 * If there are no process references on the new_bfqq, then it is
1868 * unsafe to follow the ->new_bfqq chain as other bfqq's in the chain
1869 * may have dropped their last reference (not just their last process
1872 if (!bfqq_process_refs(new_bfqq))
1875 /* Avoid a circular list and skip interim queue merges. */
1876 while ((__bfqq = new_bfqq->new_bfqq)) {
1882 process_refs = bfqq_process_refs(bfqq);
1883 new_process_refs = bfqq_process_refs(new_bfqq);
1885 * If the process for the bfqq has gone away, there is no
1886 * sense in merging the queues.
1888 if (process_refs == 0 || new_process_refs == 0)
1891 bfq_log_bfqq(bfqq->bfqd, bfqq, "scheduling merge with queue %d",
1895 * Merging is just a redirection: the requests of the process
1896 * owning one of the two queues are redirected to the other queue.
1897 * The latter queue, in its turn, is set as shared if this is the
1898 * first time that the requests of some process are redirected to
1901 * We redirect bfqq to new_bfqq and not the opposite, because
1902 * we are in the context of the process owning bfqq, thus we
1903 * have the io_cq of this process. So we can immediately
1904 * configure this io_cq to redirect the requests of the
1905 * process to new_bfqq. In contrast, the io_cq of new_bfqq is
1906 * not available any more (new_bfqq->bic == NULL).
1908 * Anyway, even in case new_bfqq coincides with the in-service
1909 * queue, redirecting requests the in-service queue is the
1910 * best option, as we feed the in-service queue with new
1911 * requests close to the last request served and, by doing so,
1912 * are likely to increase the throughput.
1914 bfqq->new_bfqq = new_bfqq;
1915 new_bfqq->ref += process_refs;
1919 static bool bfq_may_be_close_cooperator(struct bfq_queue *bfqq,
1920 struct bfq_queue *new_bfqq)
1922 if (bfq_class_idle(bfqq) || bfq_class_idle(new_bfqq) ||
1923 (bfqq->ioprio_class != new_bfqq->ioprio_class))
1927 * If either of the queues has already been detected as seeky,
1928 * then merging it with the other queue is unlikely to lead to
1931 if (BFQQ_SEEKY(bfqq) || BFQQ_SEEKY(new_bfqq))
1935 * Interleaved I/O is known to be done by (some) applications
1936 * only for reads, so it does not make sense to merge async
1939 if (!bfq_bfqq_sync(bfqq) || !bfq_bfqq_sync(new_bfqq))
1946 * If this function returns true, then bfqq cannot be merged. The idea
1947 * is that true cooperation happens very early after processes start
1948 * to do I/O. Usually, late cooperations are just accidental false
1949 * positives. In case bfqq is weight-raised, such false positives
1950 * would evidently degrade latency guarantees for bfqq.
1952 static bool wr_from_too_long(struct bfq_queue *bfqq)
1954 return bfqq->wr_coeff > 1 &&
1955 time_is_before_jiffies(bfqq->last_wr_start_finish +
1956 msecs_to_jiffies(100));
1960 * Attempt to schedule a merge of bfqq with the currently in-service
1961 * queue or with a close queue among the scheduled queues. Return
1962 * NULL if no merge was scheduled, a pointer to the shared bfq_queue
1963 * structure otherwise.
1965 * The OOM queue is not allowed to participate to cooperation: in fact, since
1966 * the requests temporarily redirected to the OOM queue could be redirected
1967 * again to dedicated queues at any time, the state needed to correctly
1968 * handle merging with the OOM queue would be quite complex and expensive
1969 * to maintain. Besides, in such a critical condition as an out of memory,
1970 * the benefits of queue merging may be little relevant, or even negligible.
1972 * Weight-raised queues can be merged only if their weight-raising
1973 * period has just started. In fact cooperating processes are usually
1974 * started together. Thus, with this filter we avoid false positives
1975 * that would jeopardize low-latency guarantees.
1977 * WARNING: queue merging may impair fairness among non-weight raised
1978 * queues, for at least two reasons: 1) the original weight of a
1979 * merged queue may change during the merged state, 2) even being the
1980 * weight the same, a merged queue may be bloated with many more
1981 * requests than the ones produced by its originally-associated
1984 static struct bfq_queue *
1985 bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
1986 void *io_struct, bool request)
1988 struct bfq_queue *in_service_bfqq, *new_bfqq;
1991 return bfqq->new_bfqq;
1994 wr_from_too_long(bfqq) ||
1995 unlikely(bfqq == &bfqd->oom_bfqq))
1998 /* If there is only one backlogged queue, don't search. */
1999 if (bfqd->busy_queues == 1)
2002 in_service_bfqq = bfqd->in_service_queue;
2004 if (!in_service_bfqq || in_service_bfqq == bfqq
2005 || wr_from_too_long(in_service_bfqq) ||
2006 unlikely(in_service_bfqq == &bfqd->oom_bfqq))
2007 goto check_scheduled;
2009 if (bfq_rq_close_to_sector(io_struct, request, bfqd->last_position) &&
2010 bfqq->entity.parent == in_service_bfqq->entity.parent &&
2011 bfq_may_be_close_cooperator(bfqq, in_service_bfqq)) {
2012 new_bfqq = bfq_setup_merge(bfqq, in_service_bfqq);
2017 * Check whether there is a cooperator among currently scheduled
2018 * queues. The only thing we need is that the bio/request is not
2019 * NULL, as we need it to establish whether a cooperator exists.
2022 new_bfqq = bfq_find_close_cooperator(bfqd, bfqq,
2023 bfq_io_struct_pos(io_struct, request));
2025 if (new_bfqq && !wr_from_too_long(new_bfqq) &&
2026 likely(new_bfqq != &bfqd->oom_bfqq) &&
2027 bfq_may_be_close_cooperator(bfqq, new_bfqq))
2028 return bfq_setup_merge(bfqq, new_bfqq);
2033 static void bfq_bfqq_save_state(struct bfq_queue *bfqq)
2035 struct bfq_io_cq *bic = bfqq->bic;
2038 * If !bfqq->bic, the queue is already shared or its requests
2039 * have already been redirected to a shared queue; both idle window
2040 * and weight raising state have already been saved. Do nothing.
2045 bic->saved_ttime = bfqq->ttime;
2046 bic->saved_has_short_ttime = bfq_bfqq_has_short_ttime(bfqq);
2047 bic->saved_IO_bound = bfq_bfqq_IO_bound(bfqq);
2048 bic->saved_in_large_burst = bfq_bfqq_in_large_burst(bfqq);
2049 bic->was_in_burst_list = !hlist_unhashed(&bfqq->burst_list_node);
2050 bic->saved_wr_coeff = bfqq->wr_coeff;
2051 bic->saved_wr_start_at_switch_to_srt = bfqq->wr_start_at_switch_to_srt;
2052 bic->saved_last_wr_start_finish = bfqq->last_wr_start_finish;
2053 bic->saved_wr_cur_max_time = bfqq->wr_cur_max_time;
2057 bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
2058 struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
2060 bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu",
2061 (unsigned long)new_bfqq->pid);
2062 /* Save weight raising and idle window of the merged queues */
2063 bfq_bfqq_save_state(bfqq);
2064 bfq_bfqq_save_state(new_bfqq);
2065 if (bfq_bfqq_IO_bound(bfqq))
2066 bfq_mark_bfqq_IO_bound(new_bfqq);
2067 bfq_clear_bfqq_IO_bound(bfqq);
2070 * If bfqq is weight-raised, then let new_bfqq inherit
2071 * weight-raising. To reduce false positives, neglect the case
2072 * where bfqq has just been created, but has not yet made it
2073 * to be weight-raised (which may happen because EQM may merge
2074 * bfqq even before bfq_add_request is executed for the first
2075 * time for bfqq). Handling this case would however be very
2076 * easy, thanks to the flag just_created.
2078 if (new_bfqq->wr_coeff == 1 && bfqq->wr_coeff > 1) {
2079 new_bfqq->wr_coeff = bfqq->wr_coeff;
2080 new_bfqq->wr_cur_max_time = bfqq->wr_cur_max_time;
2081 new_bfqq->last_wr_start_finish = bfqq->last_wr_start_finish;
2082 new_bfqq->wr_start_at_switch_to_srt =
2083 bfqq->wr_start_at_switch_to_srt;
2084 if (bfq_bfqq_busy(new_bfqq))
2085 bfqd->wr_busy_queues++;
2086 new_bfqq->entity.prio_changed = 1;
2089 if (bfqq->wr_coeff > 1) { /* bfqq has given its wr to new_bfqq */
2091 bfqq->entity.prio_changed = 1;
2092 if (bfq_bfqq_busy(bfqq))
2093 bfqd->wr_busy_queues--;
2096 bfq_log_bfqq(bfqd, new_bfqq, "merge_bfqqs: wr_busy %d",
2097 bfqd->wr_busy_queues);
2100 * Merge queues (that is, let bic redirect its requests to new_bfqq)
2102 bic_set_bfqq(bic, new_bfqq, 1);
2103 bfq_mark_bfqq_coop(new_bfqq);
2105 * new_bfqq now belongs to at least two bics (it is a shared queue):
2106 * set new_bfqq->bic to NULL. bfqq either:
2107 * - does not belong to any bic any more, and hence bfqq->bic must
2108 * be set to NULL, or
2109 * - is a queue whose owning bics have already been redirected to a
2110 * different queue, hence the queue is destined to not belong to
2111 * any bic soon and bfqq->bic is already NULL (therefore the next
2112 * assignment causes no harm).
2114 new_bfqq->bic = NULL;
2116 /* release process reference to bfqq */
2117 bfq_put_queue(bfqq);
2120 static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
2123 struct bfq_data *bfqd = q->elevator->elevator_data;
2124 bool is_sync = op_is_sync(bio->bi_opf);
2125 struct bfq_queue *bfqq = bfqd->bio_bfqq, *new_bfqq;
2128 * Disallow merge of a sync bio into an async request.
2130 if (is_sync && !rq_is_sync(rq))
2134 * Lookup the bfqq that this bio will be queued with. Allow
2135 * merge only if rq is queued there.
2141 * We take advantage of this function to perform an early merge
2142 * of the queues of possible cooperating processes.
2144 new_bfqq = bfq_setup_cooperator(bfqd, bfqq, bio, false);
2147 * bic still points to bfqq, then it has not yet been
2148 * redirected to some other bfq_queue, and a queue
2149 * merge beween bfqq and new_bfqq can be safely
2150 * fulfillled, i.e., bic can be redirected to new_bfqq
2151 * and bfqq can be put.
2153 bfq_merge_bfqqs(bfqd, bfqd->bio_bic, bfqq,
2156 * If we get here, bio will be queued into new_queue,
2157 * so use new_bfqq to decide whether bio and rq can be
2163 * Change also bqfd->bio_bfqq, as
2164 * bfqd->bio_bic now points to new_bfqq, and
2165 * this function may be invoked again (and then may
2166 * use again bqfd->bio_bfqq).
2168 bfqd->bio_bfqq = bfqq;
2171 return bfqq == RQ_BFQQ(rq);
2175 * Set the maximum time for the in-service queue to consume its
2176 * budget. This prevents seeky processes from lowering the throughput.
2177 * In practice, a time-slice service scheme is used with seeky
2180 static void bfq_set_budget_timeout(struct bfq_data *bfqd,
2181 struct bfq_queue *bfqq)
2183 unsigned int timeout_coeff;
2185 if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time)
2188 timeout_coeff = bfqq->entity.weight / bfqq->entity.orig_weight;
2190 bfqd->last_budget_start = ktime_get();
2192 bfqq->budget_timeout = jiffies +
2193 bfqd->bfq_timeout * timeout_coeff;
2196 static void __bfq_set_in_service_queue(struct bfq_data *bfqd,
2197 struct bfq_queue *bfqq)
2200 bfqg_stats_update_avg_queue_size(bfqq_group(bfqq));
2201 bfq_clear_bfqq_fifo_expire(bfqq);
2203 bfqd->budgets_assigned = (bfqd->budgets_assigned * 7 + 256) / 8;
2205 if (time_is_before_jiffies(bfqq->last_wr_start_finish) &&
2206 bfqq->wr_coeff > 1 &&
2207 bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time &&
2208 time_is_before_jiffies(bfqq->budget_timeout)) {
2210 * For soft real-time queues, move the start
2211 * of the weight-raising period forward by the
2212 * time the queue has not received any
2213 * service. Otherwise, a relatively long
2214 * service delay is likely to cause the
2215 * weight-raising period of the queue to end,
2216 * because of the short duration of the
2217 * weight-raising period of a soft real-time
2218 * queue. It is worth noting that this move
2219 * is not so dangerous for the other queues,
2220 * because soft real-time queues are not
2223 * To not add a further variable, we use the
2224 * overloaded field budget_timeout to
2225 * determine for how long the queue has not
2226 * received service, i.e., how much time has
2227 * elapsed since the queue expired. However,
2228 * this is a little imprecise, because
2229 * budget_timeout is set to jiffies if bfqq
2230 * not only expires, but also remains with no
2233 if (time_after(bfqq->budget_timeout,
2234 bfqq->last_wr_start_finish))
2235 bfqq->last_wr_start_finish +=
2236 jiffies - bfqq->budget_timeout;
2238 bfqq->last_wr_start_finish = jiffies;
2241 bfq_set_budget_timeout(bfqd, bfqq);
2242 bfq_log_bfqq(bfqd, bfqq,
2243 "set_in_service_queue, cur-budget = %d",
2244 bfqq->entity.budget);
2247 bfqd->in_service_queue = bfqq;
2251 * Get and set a new queue for service.
2253 static struct bfq_queue *bfq_set_in_service_queue(struct bfq_data *bfqd)
2255 struct bfq_queue *bfqq = bfq_get_next_queue(bfqd);
2257 __bfq_set_in_service_queue(bfqd, bfqq);
2261 static void bfq_arm_slice_timer(struct bfq_data *bfqd)
2263 struct bfq_queue *bfqq = bfqd->in_service_queue;
2266 bfq_mark_bfqq_wait_request(bfqq);
2269 * We don't want to idle for seeks, but we do want to allow
2270 * fair distribution of slice time for a process doing back-to-back
2271 * seeks. So allow a little bit of time for him to submit a new rq.
2273 sl = bfqd->bfq_slice_idle;
2275 * Unless the queue is being weight-raised or the scenario is
2276 * asymmetric, grant only minimum idle time if the queue
2277 * is seeky. A long idling is preserved for a weight-raised
2278 * queue, or, more in general, in an asymmetric scenario,
2279 * because a long idling is needed for guaranteeing to a queue
2280 * its reserved share of the throughput (in particular, it is
2281 * needed if the queue has a higher weight than some other
2284 if (BFQQ_SEEKY(bfqq) && bfqq->wr_coeff == 1 &&
2285 bfq_symmetric_scenario(bfqd))
2286 sl = min_t(u64, sl, BFQ_MIN_TT);
2287 else if (bfqq->wr_coeff > 1)
2288 sl = max_t(u32, sl, 20ULL * NSEC_PER_MSEC);
2290 bfqd->last_idling_start = ktime_get();
2291 hrtimer_start(&bfqd->idle_slice_timer, ns_to_ktime(sl),
2293 bfqg_stats_set_start_idle_time(bfqq_group(bfqq));
2297 * In autotuning mode, max_budget is dynamically recomputed as the
2298 * amount of sectors transferred in timeout at the estimated peak
2299 * rate. This enables BFQ to utilize a full timeslice with a full
2300 * budget, even if the in-service queue is served at peak rate. And
2301 * this maximises throughput with sequential workloads.
2303 static unsigned long bfq_calc_max_budget(struct bfq_data *bfqd)
2305 return (u64)bfqd->peak_rate * USEC_PER_MSEC *
2306 jiffies_to_msecs(bfqd->bfq_timeout)>>BFQ_RATE_SHIFT;
2310 * Update parameters related to throughput and responsiveness, as a
2311 * function of the estimated peak rate. See comments on
2312 * bfq_calc_max_budget(), and on T_slow and T_fast arrays.
2314 static void update_thr_responsiveness_params(struct bfq_data *bfqd)
2316 int dev_type = blk_queue_nonrot(bfqd->queue);
2318 if (bfqd->bfq_user_max_budget == 0)
2319 bfqd->bfq_max_budget =
2320 bfq_calc_max_budget(bfqd);
2322 if (bfqd->device_speed == BFQ_BFQD_FAST &&
2323 bfqd->peak_rate < device_speed_thresh[dev_type]) {
2324 bfqd->device_speed = BFQ_BFQD_SLOW;
2325 bfqd->RT_prod = R_slow[dev_type] *
2327 } else if (bfqd->device_speed == BFQ_BFQD_SLOW &&
2328 bfqd->peak_rate > device_speed_thresh[dev_type]) {
2329 bfqd->device_speed = BFQ_BFQD_FAST;
2330 bfqd->RT_prod = R_fast[dev_type] *
2335 "dev_type %s dev_speed_class = %s (%llu sects/sec), thresh %llu setcs/sec",
2336 dev_type == 0 ? "ROT" : "NONROT",
2337 bfqd->device_speed == BFQ_BFQD_FAST ? "FAST" : "SLOW",
2338 bfqd->device_speed == BFQ_BFQD_FAST ?
2339 (USEC_PER_SEC*(u64)R_fast[dev_type])>>BFQ_RATE_SHIFT :
2340 (USEC_PER_SEC*(u64)R_slow[dev_type])>>BFQ_RATE_SHIFT,
2341 (USEC_PER_SEC*(u64)device_speed_thresh[dev_type])>>
2345 static void bfq_reset_rate_computation(struct bfq_data *bfqd,
2348 if (rq != NULL) { /* new rq dispatch now, reset accordingly */
2349 bfqd->last_dispatch = bfqd->first_dispatch = ktime_get_ns();
2350 bfqd->peak_rate_samples = 1;
2351 bfqd->sequential_samples = 0;
2352 bfqd->tot_sectors_dispatched = bfqd->last_rq_max_size =
2354 } else /* no new rq dispatched, just reset the number of samples */
2355 bfqd->peak_rate_samples = 0; /* full re-init on next disp. */
2358 "reset_rate_computation at end, sample %u/%u tot_sects %llu",
2359 bfqd->peak_rate_samples, bfqd->sequential_samples,
2360 bfqd->tot_sectors_dispatched);
2363 static void bfq_update_rate_reset(struct bfq_data *bfqd, struct request *rq)
2365 u32 rate, weight, divisor;
2368 * For the convergence property to hold (see comments on
2369 * bfq_update_peak_rate()) and for the assessment to be
2370 * reliable, a minimum number of samples must be present, and
2371 * a minimum amount of time must have elapsed. If not so, do
2372 * not compute new rate. Just reset parameters, to get ready
2373 * for a new evaluation attempt.
2375 if (bfqd->peak_rate_samples < BFQ_RATE_MIN_SAMPLES ||
2376 bfqd->delta_from_first < BFQ_RATE_MIN_INTERVAL)
2377 goto reset_computation;
2380 * If a new request completion has occurred after last
2381 * dispatch, then, to approximate the rate at which requests
2382 * have been served by the device, it is more precise to
2383 * extend the observation interval to the last completion.
2385 bfqd->delta_from_first =
2386 max_t(u64, bfqd->delta_from_first,
2387 bfqd->last_completion - bfqd->first_dispatch);
2390 * Rate computed in sects/usec, and not sects/nsec, for
2393 rate = div64_ul(bfqd->tot_sectors_dispatched<<BFQ_RATE_SHIFT,
2394 div_u64(bfqd->delta_from_first, NSEC_PER_USEC));
2397 * Peak rate not updated if:
2398 * - the percentage of sequential dispatches is below 3/4 of the
2399 * total, and rate is below the current estimated peak rate
2400 * - rate is unreasonably high (> 20M sectors/sec)
2402 if ((bfqd->sequential_samples < (3 * bfqd->peak_rate_samples)>>2 &&
2403 rate <= bfqd->peak_rate) ||
2404 rate > 20<<BFQ_RATE_SHIFT)
2405 goto reset_computation;
2408 * We have to update the peak rate, at last! To this purpose,
2409 * we use a low-pass filter. We compute the smoothing constant
2410 * of the filter as a function of the 'weight' of the new
2413 * As can be seen in next formulas, we define this weight as a
2414 * quantity proportional to how sequential the workload is,
2415 * and to how long the observation time interval is.
2417 * The weight runs from 0 to 8. The maximum value of the
2418 * weight, 8, yields the minimum value for the smoothing
2419 * constant. At this minimum value for the smoothing constant,
2420 * the measured rate contributes for half of the next value of
2421 * the estimated peak rate.
2423 * So, the first step is to compute the weight as a function
2424 * of how sequential the workload is. Note that the weight
2425 * cannot reach 9, because bfqd->sequential_samples cannot
2426 * become equal to bfqd->peak_rate_samples, which, in its
2427 * turn, holds true because bfqd->sequential_samples is not
2428 * incremented for the first sample.
2430 weight = (9 * bfqd->sequential_samples) / bfqd->peak_rate_samples;
2433 * Second step: further refine the weight as a function of the
2434 * duration of the observation interval.
2436 weight = min_t(u32, 8,
2437 div_u64(weight * bfqd->delta_from_first,
2438 BFQ_RATE_REF_INTERVAL));
2441 * Divisor ranging from 10, for minimum weight, to 2, for
2444 divisor = 10 - weight;
2447 * Finally, update peak rate:
2449 * peak_rate = peak_rate * (divisor-1) / divisor + rate / divisor
2451 bfqd->peak_rate *= divisor-1;
2452 bfqd->peak_rate /= divisor;
2453 rate /= divisor; /* smoothing constant alpha = 1/divisor */
2455 bfqd->peak_rate += rate;
2456 update_thr_responsiveness_params(bfqd);
2459 bfq_reset_rate_computation(bfqd, rq);
2463 * Update the read/write peak rate (the main quantity used for
2464 * auto-tuning, see update_thr_responsiveness_params()).
2466 * It is not trivial to estimate the peak rate (correctly): because of
2467 * the presence of sw and hw queues between the scheduler and the
2468 * device components that finally serve I/O requests, it is hard to
2469 * say exactly when a given dispatched request is served inside the
2470 * device, and for how long. As a consequence, it is hard to know
2471 * precisely at what rate a given set of requests is actually served
2474 * On the opposite end, the dispatch time of any request is trivially
2475 * available, and, from this piece of information, the "dispatch rate"
2476 * of requests can be immediately computed. So, the idea in the next
2477 * function is to use what is known, namely request dispatch times
2478 * (plus, when useful, request completion times), to estimate what is
2479 * unknown, namely in-device request service rate.
2481 * The main issue is that, because of the above facts, the rate at
2482 * which a certain set of requests is dispatched over a certain time
2483 * interval can vary greatly with respect to the rate at which the
2484 * same requests are then served. But, since the size of any
2485 * intermediate queue is limited, and the service scheme is lossless
2486 * (no request is silently dropped), the following obvious convergence
2487 * property holds: the number of requests dispatched MUST become
2488 * closer and closer to the number of requests completed as the
2489 * observation interval grows. This is the key property used in
2490 * the next function to estimate the peak service rate as a function
2491 * of the observed dispatch rate. The function assumes to be invoked
2492 * on every request dispatch.
2494 static void bfq_update_peak_rate(struct bfq_data *bfqd, struct request *rq)
2496 u64 now_ns = ktime_get_ns();
2498 if (bfqd->peak_rate_samples == 0) { /* first dispatch */
2499 bfq_log(bfqd, "update_peak_rate: goto reset, samples %d",
2500 bfqd->peak_rate_samples);
2501 bfq_reset_rate_computation(bfqd, rq);
2502 goto update_last_values; /* will add one sample */
2506 * Device idle for very long: the observation interval lasting
2507 * up to this dispatch cannot be a valid observation interval
2508 * for computing a new peak rate (similarly to the late-
2509 * completion event in bfq_completed_request()). Go to
2510 * update_rate_and_reset to have the following three steps
2512 * - close the observation interval at the last (previous)
2513 * request dispatch or completion
2514 * - compute rate, if possible, for that observation interval
2515 * - start a new observation interval with this dispatch
2517 if (now_ns - bfqd->last_dispatch > 100*NSEC_PER_MSEC &&
2518 bfqd->rq_in_driver == 0)
2519 goto update_rate_and_reset;
2521 /* Update sampling information */
2522 bfqd->peak_rate_samples++;
2524 if ((bfqd->rq_in_driver > 0 ||
2525 now_ns - bfqd->last_completion < BFQ_MIN_TT)
2526 && get_sdist(bfqd->last_position, rq) < BFQQ_SEEK_THR)
2527 bfqd->sequential_samples++;
2529 bfqd->tot_sectors_dispatched += blk_rq_sectors(rq);
2531 /* Reset max observed rq size every 32 dispatches */
2532 if (likely(bfqd->peak_rate_samples % 32))
2533 bfqd->last_rq_max_size =
2534 max_t(u32, blk_rq_sectors(rq), bfqd->last_rq_max_size);
2536 bfqd->last_rq_max_size = blk_rq_sectors(rq);
2538 bfqd->delta_from_first = now_ns - bfqd->first_dispatch;
2540 /* Target observation interval not yet reached, go on sampling */
2541 if (bfqd->delta_from_first < BFQ_RATE_REF_INTERVAL)
2542 goto update_last_values;
2544 update_rate_and_reset:
2545 bfq_update_rate_reset(bfqd, rq);
2547 bfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
2548 bfqd->last_dispatch = now_ns;
2552 * Remove request from internal lists.
2554 static void bfq_dispatch_remove(struct request_queue *q, struct request *rq)
2556 struct bfq_queue *bfqq = RQ_BFQQ(rq);
2559 * For consistency, the next instruction should have been
2560 * executed after removing the request from the queue and
2561 * dispatching it. We execute instead this instruction before
2562 * bfq_remove_request() (and hence introduce a temporary
2563 * inconsistency), for efficiency. In fact, should this
2564 * dispatch occur for a non in-service bfqq, this anticipated
2565 * increment prevents two counters related to bfqq->dispatched
2566 * from risking to be, first, uselessly decremented, and then
2567 * incremented again when the (new) value of bfqq->dispatched
2568 * happens to be taken into account.
2571 bfq_update_peak_rate(q->elevator->elevator_data, rq);
2573 bfq_remove_request(q, rq);
2576 static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq)
2579 * If this bfqq is shared between multiple processes, check
2580 * to make sure that those processes are still issuing I/Os
2581 * within the mean seek distance. If not, it may be time to
2582 * break the queues apart again.
2584 if (bfq_bfqq_coop(bfqq) && BFQQ_SEEKY(bfqq))
2585 bfq_mark_bfqq_split_coop(bfqq);
2587 if (RB_EMPTY_ROOT(&bfqq->sort_list)) {
2588 if (bfqq->dispatched == 0)
2590 * Overloading budget_timeout field to store
2591 * the time at which the queue remains with no
2592 * backlog and no outstanding request; used by
2593 * the weight-raising mechanism.
2595 bfqq->budget_timeout = jiffies;
2597 bfq_del_bfqq_busy(bfqd, bfqq, true);
2599 bfq_requeue_bfqq(bfqd, bfqq, true);
2601 * Resort priority tree of potential close cooperators.
2603 bfq_pos_tree_add_move(bfqd, bfqq);
2607 * All in-service entities must have been properly deactivated
2608 * or requeued before executing the next function, which
2609 * resets all in-service entites as no more in service.
2611 __bfq_bfqd_reset_in_service(bfqd);
2615 * __bfq_bfqq_recalc_budget - try to adapt the budget to the @bfqq behavior.
2616 * @bfqd: device data.
2617 * @bfqq: queue to update.
2618 * @reason: reason for expiration.
2620 * Handle the feedback on @bfqq budget at queue expiration.
2621 * See the body for detailed comments.
2623 static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd,
2624 struct bfq_queue *bfqq,
2625 enum bfqq_expiration reason)
2627 struct request *next_rq;
2628 int budget, min_budget;
2630 min_budget = bfq_min_budget(bfqd);
2632 if (bfqq->wr_coeff == 1)
2633 budget = bfqq->max_budget;
2635 * Use a constant, low budget for weight-raised queues,
2636 * to help achieve a low latency. Keep it slightly higher
2637 * than the minimum possible budget, to cause a little
2638 * bit fewer expirations.
2640 budget = 2 * min_budget;
2642 bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last budg %d, budg left %d",
2643 bfqq->entity.budget, bfq_bfqq_budget_left(bfqq));
2644 bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last max_budg %d, min budg %d",
2645 budget, bfq_min_budget(bfqd));
2646 bfq_log_bfqq(bfqd, bfqq, "recalc_budg: sync %d, seeky %d",
2647 bfq_bfqq_sync(bfqq), BFQQ_SEEKY(bfqd->in_service_queue));
2649 if (bfq_bfqq_sync(bfqq) && bfqq->wr_coeff == 1) {
2652 * Caveat: in all the following cases we trade latency
2655 case BFQQE_TOO_IDLE:
2657 * This is the only case where we may reduce
2658 * the budget: if there is no request of the
2659 * process still waiting for completion, then
2660 * we assume (tentatively) that the timer has
2661 * expired because the batch of requests of
2662 * the process could have been served with a
2663 * smaller budget. Hence, betting that
2664 * process will behave in the same way when it
2665 * becomes backlogged again, we reduce its
2666 * next budget. As long as we guess right,
2667 * this budget cut reduces the latency
2668 * experienced by the process.
2670 * However, if there are still outstanding
2671 * requests, then the process may have not yet
2672 * issued its next request just because it is
2673 * still waiting for the completion of some of
2674 * the still outstanding ones. So in this
2675 * subcase we do not reduce its budget, on the
2676 * contrary we increase it to possibly boost
2677 * the throughput, as discussed in the
2678 * comments to the BUDGET_TIMEOUT case.
2680 if (bfqq->dispatched > 0) /* still outstanding reqs */
2681 budget = min(budget * 2, bfqd->bfq_max_budget);
2683 if (budget > 5 * min_budget)
2684 budget -= 4 * min_budget;
2686 budget = min_budget;
2689 case BFQQE_BUDGET_TIMEOUT:
2691 * We double the budget here because it gives
2692 * the chance to boost the throughput if this
2693 * is not a seeky process (and has bumped into
2694 * this timeout because of, e.g., ZBR).
2696 budget = min(budget * 2, bfqd->bfq_max_budget);
2698 case BFQQE_BUDGET_EXHAUSTED:
2700 * The process still has backlog, and did not
2701 * let either the budget timeout or the disk
2702 * idling timeout expire. Hence it is not
2703 * seeky, has a short thinktime and may be
2704 * happy with a higher budget too. So
2705 * definitely increase the budget of this good
2706 * candidate to boost the disk throughput.
2708 budget = min(budget * 4, bfqd->bfq_max_budget);
2710 case BFQQE_NO_MORE_REQUESTS:
2712 * For queues that expire for this reason, it
2713 * is particularly important to keep the
2714 * budget close to the actual service they
2715 * need. Doing so reduces the timestamp
2716 * misalignment problem described in the
2717 * comments in the body of
2718 * __bfq_activate_entity. In fact, suppose
2719 * that a queue systematically expires for
2720 * BFQQE_NO_MORE_REQUESTS and presents a
2721 * new request in time to enjoy timestamp
2722 * back-shifting. The larger the budget of the
2723 * queue is with respect to the service the
2724 * queue actually requests in each service
2725 * slot, the more times the queue can be
2726 * reactivated with the same virtual finish
2727 * time. It follows that, even if this finish
2728 * time is pushed to the system virtual time
2729 * to reduce the consequent timestamp
2730 * misalignment, the queue unjustly enjoys for
2731 * many re-activations a lower finish time
2732 * than all newly activated queues.
2734 * The service needed by bfqq is measured
2735 * quite precisely by bfqq->entity.service.
2736 * Since bfqq does not enjoy device idling,
2737 * bfqq->entity.service is equal to the number
2738 * of sectors that the process associated with
2739 * bfqq requested to read/write before waiting
2740 * for request completions, or blocking for
2743 budget = max_t(int, bfqq->entity.service, min_budget);
2748 } else if (!bfq_bfqq_sync(bfqq)) {
2750 * Async queues get always the maximum possible
2751 * budget, as for them we do not care about latency
2752 * (in addition, their ability to dispatch is limited
2753 * by the charging factor).
2755 budget = bfqd->bfq_max_budget;
2758 bfqq->max_budget = budget;
2760 if (bfqd->budgets_assigned >= bfq_stats_min_budgets &&
2761 !bfqd->bfq_user_max_budget)
2762 bfqq->max_budget = min(bfqq->max_budget, bfqd->bfq_max_budget);
2765 * If there is still backlog, then assign a new budget, making
2766 * sure that it is large enough for the next request. Since
2767 * the finish time of bfqq must be kept in sync with the
2768 * budget, be sure to call __bfq_bfqq_expire() *after* this
2771 * If there is no backlog, then no need to update the budget;
2772 * it will be updated on the arrival of a new request.
2774 next_rq = bfqq->next_rq;
2776 bfqq->entity.budget = max_t(unsigned long, bfqq->max_budget,
2777 bfq_serv_to_charge(next_rq, bfqq));
2779 bfq_log_bfqq(bfqd, bfqq, "head sect: %u, new budget %d",
2780 next_rq ? blk_rq_sectors(next_rq) : 0,
2781 bfqq->entity.budget);
2785 * Return true if the process associated with bfqq is "slow". The slow
2786 * flag is used, in addition to the budget timeout, to reduce the
2787 * amount of service provided to seeky processes, and thus reduce
2788 * their chances to lower the throughput. More details in the comments
2789 * on the function bfq_bfqq_expire().
2791 * An important observation is in order: as discussed in the comments
2792 * on the function bfq_update_peak_rate(), with devices with internal
2793 * queues, it is hard if ever possible to know when and for how long
2794 * an I/O request is processed by the device (apart from the trivial
2795 * I/O pattern where a new request is dispatched only after the
2796 * previous one has been completed). This makes it hard to evaluate
2797 * the real rate at which the I/O requests of each bfq_queue are
2798 * served. In fact, for an I/O scheduler like BFQ, serving a
2799 * bfq_queue means just dispatching its requests during its service
2800 * slot (i.e., until the budget of the queue is exhausted, or the
2801 * queue remains idle, or, finally, a timeout fires). But, during the
2802 * service slot of a bfq_queue, around 100 ms at most, the device may
2803 * be even still processing requests of bfq_queues served in previous
2804 * service slots. On the opposite end, the requests of the in-service
2805 * bfq_queue may be completed after the service slot of the queue
2808 * Anyway, unless more sophisticated solutions are used
2809 * (where possible), the sum of the sizes of the requests dispatched
2810 * during the service slot of a bfq_queue is probably the only
2811 * approximation available for the service received by the bfq_queue
2812 * during its service slot. And this sum is the quantity used in this
2813 * function to evaluate the I/O speed of a process.
2815 static bool bfq_bfqq_is_slow(struct bfq_data *bfqd, struct bfq_queue *bfqq,
2816 bool compensate, enum bfqq_expiration reason,
2817 unsigned long *delta_ms)
2819 ktime_t delta_ktime;
2821 bool slow = BFQQ_SEEKY(bfqq); /* if delta too short, use seekyness */
2823 if (!bfq_bfqq_sync(bfqq))
2827 delta_ktime = bfqd->last_idling_start;
2829 delta_ktime = ktime_get();
2830 delta_ktime = ktime_sub(delta_ktime, bfqd->last_budget_start);
2831 delta_usecs = ktime_to_us(delta_ktime);
2833 /* don't use too short time intervals */
2834 if (delta_usecs < 1000) {
2835 if (blk_queue_nonrot(bfqd->queue))
2837 * give same worst-case guarantees as idling
2840 *delta_ms = BFQ_MIN_TT / NSEC_PER_MSEC;
2841 else /* charge at least one seek */
2842 *delta_ms = bfq_slice_idle / NSEC_PER_MSEC;
2847 *delta_ms = delta_usecs / USEC_PER_MSEC;
2850 * Use only long (> 20ms) intervals to filter out excessive
2851 * spikes in service rate estimation.
2853 if (delta_usecs > 20000) {
2855 * Caveat for rotational devices: processes doing I/O
2856 * in the slower disk zones tend to be slow(er) even
2857 * if not seeky. In this respect, the estimated peak
2858 * rate is likely to be an average over the disk
2859 * surface. Accordingly, to not be too harsh with
2860 * unlucky processes, a process is deemed slow only if
2861 * its rate has been lower than half of the estimated
2864 slow = bfqq->entity.service < bfqd->bfq_max_budget / 2;
2867 bfq_log_bfqq(bfqd, bfqq, "bfq_bfqq_is_slow: slow %d", slow);
2873 * To be deemed as soft real-time, an application must meet two
2874 * requirements. First, the application must not require an average
2875 * bandwidth higher than the approximate bandwidth required to playback or
2876 * record a compressed high-definition video.
2877 * The next function is invoked on the completion of the last request of a
2878 * batch, to compute the next-start time instant, soft_rt_next_start, such
2879 * that, if the next request of the application does not arrive before
2880 * soft_rt_next_start, then the above requirement on the bandwidth is met.
2882 * The second requirement is that the request pattern of the application is
2883 * isochronous, i.e., that, after issuing a request or a batch of requests,
2884 * the application stops issuing new requests until all its pending requests
2885 * have been completed. After that, the application may issue a new batch,
2887 * For this reason the next function is invoked to compute
2888 * soft_rt_next_start only for applications that meet this requirement,
2889 * whereas soft_rt_next_start is set to infinity for applications that do
2892 * Unfortunately, even a greedy application may happen to behave in an
2893 * isochronous way if the CPU load is high. In fact, the application may
2894 * stop issuing requests while the CPUs are busy serving other processes,
2895 * then restart, then stop again for a while, and so on. In addition, if
2896 * the disk achieves a low enough throughput with the request pattern
2897 * issued by the application (e.g., because the request pattern is random
2898 * and/or the device is slow), then the application may meet the above
2899 * bandwidth requirement too. To prevent such a greedy application to be
2900 * deemed as soft real-time, a further rule is used in the computation of
2901 * soft_rt_next_start: soft_rt_next_start must be higher than the current
2902 * time plus the maximum time for which the arrival of a request is waited
2903 * for when a sync queue becomes idle, namely bfqd->bfq_slice_idle.
2904 * This filters out greedy applications, as the latter issue instead their
2905 * next request as soon as possible after the last one has been completed
2906 * (in contrast, when a batch of requests is completed, a soft real-time
2907 * application spends some time processing data).
2909 * Unfortunately, the last filter may easily generate false positives if
2910 * only bfqd->bfq_slice_idle is used as a reference time interval and one
2911 * or both the following cases occur:
2912 * 1) HZ is so low that the duration of a jiffy is comparable to or higher
2913 * than bfqd->bfq_slice_idle. This happens, e.g., on slow devices with
2915 * 2) jiffies, instead of increasing at a constant rate, may stop increasing
2916 * for a while, then suddenly 'jump' by several units to recover the lost
2917 * increments. This seems to happen, e.g., inside virtual machines.
2918 * To address this issue, we do not use as a reference time interval just
2919 * bfqd->bfq_slice_idle, but bfqd->bfq_slice_idle plus a few jiffies. In
2920 * particular we add the minimum number of jiffies for which the filter
2921 * seems to be quite precise also in embedded systems and KVM/QEMU virtual
2924 static unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd,
2925 struct bfq_queue *bfqq)
2927 return max(bfqq->last_idle_bklogged +
2928 HZ * bfqq->service_from_backlogged /
2929 bfqd->bfq_wr_max_softrt_rate,
2930 jiffies + nsecs_to_jiffies(bfqq->bfqd->bfq_slice_idle) + 4);
2934 * bfq_bfqq_expire - expire a queue.
2935 * @bfqd: device owning the queue.
2936 * @bfqq: the queue to expire.
2937 * @compensate: if true, compensate for the time spent idling.
2938 * @reason: the reason causing the expiration.
2940 * If the process associated with bfqq does slow I/O (e.g., because it
2941 * issues random requests), we charge bfqq with the time it has been
2942 * in service instead of the service it has received (see
2943 * bfq_bfqq_charge_time for details on how this goal is achieved). As
2944 * a consequence, bfqq will typically get higher timestamps upon
2945 * reactivation, and hence it will be rescheduled as if it had
2946 * received more service than what it has actually received. In the
2947 * end, bfqq receives less service in proportion to how slowly its
2948 * associated process consumes its budgets (and hence how seriously it
2949 * tends to lower the throughput). In addition, this time-charging
2950 * strategy guarantees time fairness among slow processes. In
2951 * contrast, if the process associated with bfqq is not slow, we
2952 * charge bfqq exactly with the service it has received.
2954 * Charging time to the first type of queues and the exact service to
2955 * the other has the effect of using the WF2Q+ policy to schedule the
2956 * former on a timeslice basis, without violating service domain
2957 * guarantees among the latter.
2959 void bfq_bfqq_expire(struct bfq_data *bfqd,
2960 struct bfq_queue *bfqq,
2962 enum bfqq_expiration reason)
2965 unsigned long delta = 0;
2966 struct bfq_entity *entity = &bfqq->entity;
2970 * Check whether the process is slow (see bfq_bfqq_is_slow).
2972 slow = bfq_bfqq_is_slow(bfqd, bfqq, compensate, reason, &delta);
2975 * Increase service_from_backlogged before next statement,
2976 * because the possible next invocation of
2977 * bfq_bfqq_charge_time would likely inflate
2978 * entity->service. In contrast, service_from_backlogged must
2979 * contain real service, to enable the soft real-time
2980 * heuristic to correctly compute the bandwidth consumed by
2983 bfqq->service_from_backlogged += entity->service;
2986 * As above explained, charge slow (typically seeky) and
2987 * timed-out queues with the time and not the service
2988 * received, to favor sequential workloads.
2990 * Processes doing I/O in the slower disk zones will tend to
2991 * be slow(er) even if not seeky. Therefore, since the
2992 * estimated peak rate is actually an average over the disk
2993 * surface, these processes may timeout just for bad luck. To
2994 * avoid punishing them, do not charge time to processes that
2995 * succeeded in consuming at least 2/3 of their budget. This
2996 * allows BFQ to preserve enough elasticity to still perform
2997 * bandwidth, and not time, distribution with little unlucky
2998 * or quasi-sequential processes.
3000 if (bfqq->wr_coeff == 1 &&
3002 (reason == BFQQE_BUDGET_TIMEOUT &&
3003 bfq_bfqq_budget_left(bfqq) >= entity->budget / 3)))
3004 bfq_bfqq_charge_time(bfqd, bfqq, delta);
3006 if (reason == BFQQE_TOO_IDLE &&
3007 entity->service <= 2 * entity->budget / 10)
3008 bfq_clear_bfqq_IO_bound(bfqq);
3010 if (bfqd->low_latency && bfqq->wr_coeff == 1)
3011 bfqq->last_wr_start_finish = jiffies;
3013 if (bfqd->low_latency && bfqd->bfq_wr_max_softrt_rate > 0 &&
3014 RB_EMPTY_ROOT(&bfqq->sort_list)) {
3016 * If we get here, and there are no outstanding
3017 * requests, then the request pattern is isochronous
3018 * (see the comments on the function
3019 * bfq_bfqq_softrt_next_start()). Thus we can compute
3020 * soft_rt_next_start. If, instead, the queue still
3021 * has outstanding requests, then we have to wait for
3022 * the completion of all the outstanding requests to
3023 * discover whether the request pattern is actually
3026 if (bfqq->dispatched == 0)
3027 bfqq->soft_rt_next_start =
3028 bfq_bfqq_softrt_next_start(bfqd, bfqq);
3031 * The application is still waiting for the
3032 * completion of one or more requests:
3033 * prevent it from possibly being incorrectly
3034 * deemed as soft real-time by setting its
3035 * soft_rt_next_start to infinity. In fact,
3036 * without this assignment, the application
3037 * would be incorrectly deemed as soft
3039 * 1) it issued a new request before the
3040 * completion of all its in-flight
3042 * 2) at that time, its soft_rt_next_start
3043 * happened to be in the past.
3045 bfqq->soft_rt_next_start =
3046 bfq_greatest_from_now();
3048 * Schedule an update of soft_rt_next_start to when
3049 * the task may be discovered to be isochronous.
3051 bfq_mark_bfqq_softrt_update(bfqq);
3055 bfq_log_bfqq(bfqd, bfqq,
3056 "expire (%d, slow %d, num_disp %d, short_ttime %d)", reason,
3057 slow, bfqq->dispatched, bfq_bfqq_has_short_ttime(bfqq));
3060 * Increase, decrease or leave budget unchanged according to
3063 __bfq_bfqq_recalc_budget(bfqd, bfqq, reason);
3065 __bfq_bfqq_expire(bfqd, bfqq);
3067 /* mark bfqq as waiting a request only if a bic still points to it */
3068 if (ref > 1 && !bfq_bfqq_busy(bfqq) &&
3069 reason != BFQQE_BUDGET_TIMEOUT &&
3070 reason != BFQQE_BUDGET_EXHAUSTED)
3071 bfq_mark_bfqq_non_blocking_wait_rq(bfqq);
3075 * Budget timeout is not implemented through a dedicated timer, but
3076 * just checked on request arrivals and completions, as well as on
3077 * idle timer expirations.
3079 static bool bfq_bfqq_budget_timeout(struct bfq_queue *bfqq)
3081 return time_is_before_eq_jiffies(bfqq->budget_timeout);
3085 * If we expire a queue that is actively waiting (i.e., with the
3086 * device idled) for the arrival of a new request, then we may incur
3087 * the timestamp misalignment problem described in the body of the
3088 * function __bfq_activate_entity. Hence we return true only if this
3089 * condition does not hold, or if the queue is slow enough to deserve
3090 * only to be kicked off for preserving a high throughput.
3092 static bool bfq_may_expire_for_budg_timeout(struct bfq_queue *bfqq)
3094 bfq_log_bfqq(bfqq->bfqd, bfqq,
3095 "may_budget_timeout: wait_request %d left %d timeout %d",
3096 bfq_bfqq_wait_request(bfqq),
3097 bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3,
3098 bfq_bfqq_budget_timeout(bfqq));
3100 return (!bfq_bfqq_wait_request(bfqq) ||
3101 bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3)
3103 bfq_bfqq_budget_timeout(bfqq);
3107 * For a queue that becomes empty, device idling is allowed only if
3108 * this function returns true for the queue. As a consequence, since
3109 * device idling plays a critical role in both throughput boosting and
3110 * service guarantees, the return value of this function plays a
3111 * critical role in both these aspects as well.
3113 * In a nutshell, this function returns true only if idling is
3114 * beneficial for throughput or, even if detrimental for throughput,
3115 * idling is however necessary to preserve service guarantees (low
3116 * latency, desired throughput distribution, ...). In particular, on
3117 * NCQ-capable devices, this function tries to return false, so as to
3118 * help keep the drives' internal queues full, whenever this helps the
3119 * device boost the throughput without causing any service-guarantee
3122 * In more detail, the return value of this function is obtained by,
3123 * first, computing a number of boolean variables that take into
3124 * account throughput and service-guarantee issues, and, then,
3125 * combining these variables in a logical expression. Most of the
3126 * issues taken into account are not trivial. We discuss these issues
3127 * individually while introducing the variables.
3129 static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq)
3131 struct bfq_data *bfqd = bfqq->bfqd;
3132 bool rot_without_queueing =
3133 !blk_queue_nonrot(bfqd->queue) && !bfqd->hw_tag,
3134 bfqq_sequential_and_IO_bound,
3135 idling_boosts_thr, idling_boosts_thr_without_issues,
3136 idling_needed_for_service_guarantees,
3137 asymmetric_scenario;
3139 if (bfqd->strict_guarantees)
3143 * Idling is performed only if slice_idle > 0. In addition, we
3146 * (b) bfqq is in the idle io prio class: in this case we do
3147 * not idle because we want to minimize the bandwidth that
3148 * queues in this class can steal to higher-priority queues
3150 if (bfqd->bfq_slice_idle == 0 || !bfq_bfqq_sync(bfqq) ||
3151 bfq_class_idle(bfqq))
3154 bfqq_sequential_and_IO_bound = !BFQQ_SEEKY(bfqq) &&
3155 bfq_bfqq_IO_bound(bfqq) && bfq_bfqq_has_short_ttime(bfqq);
3158 * The next variable takes into account the cases where idling
3159 * boosts the throughput.
3161 * The value of the variable is computed considering, first, that
3162 * idling is virtually always beneficial for the throughput if:
3163 * (a) the device is not NCQ-capable and rotational, or
3164 * (b) regardless of the presence of NCQ, the device is rotational and
3165 * the request pattern for bfqq is I/O-bound and sequential, or
3166 * (c) regardless of whether it is rotational, the device is
3167 * not NCQ-capable and the request pattern for bfqq is
3168 * I/O-bound and sequential.
3170 * Secondly, and in contrast to the above item (b), idling an
3171 * NCQ-capable flash-based device would not boost the
3172 * throughput even with sequential I/O; rather it would lower
3173 * the throughput in proportion to how fast the device
3174 * is. Accordingly, the next variable is true if any of the
3175 * above conditions (a), (b) or (c) is true, and, in
3176 * particular, happens to be false if bfqd is an NCQ-capable
3177 * flash-based device.
3179 idling_boosts_thr = rot_without_queueing ||
3180 ((!blk_queue_nonrot(bfqd->queue) || !bfqd->hw_tag) &&
3181 bfqq_sequential_and_IO_bound);
3184 * The value of the next variable,
3185 * idling_boosts_thr_without_issues, is equal to that of
3186 * idling_boosts_thr, unless a special case holds. In this
3187 * special case, described below, idling may cause problems to
3188 * weight-raised queues.
3190 * When the request pool is saturated (e.g., in the presence
3191 * of write hogs), if the processes associated with
3192 * non-weight-raised queues ask for requests at a lower rate,
3193 * then processes associated with weight-raised queues have a
3194 * higher probability to get a request from the pool
3195 * immediately (or at least soon) when they need one. Thus
3196 * they have a higher probability to actually get a fraction
3197 * of the device throughput proportional to their high
3198 * weight. This is especially true with NCQ-capable drives,
3199 * which enqueue several requests in advance, and further
3200 * reorder internally-queued requests.
3202 * For this reason, we force to false the value of
3203 * idling_boosts_thr_without_issues if there are weight-raised
3204 * busy queues. In this case, and if bfqq is not weight-raised,
3205 * this guarantees that the device is not idled for bfqq (if,
3206 * instead, bfqq is weight-raised, then idling will be
3207 * guaranteed by another variable, see below). Combined with
3208 * the timestamping rules of BFQ (see [1] for details), this
3209 * behavior causes bfqq, and hence any sync non-weight-raised
3210 * queue, to get a lower number of requests served, and thus
3211 * to ask for a lower number of requests from the request
3212 * pool, before the busy weight-raised queues get served
3213 * again. This often mitigates starvation problems in the
3214 * presence of heavy write workloads and NCQ, thereby
3215 * guaranteeing a higher application and system responsiveness
3216 * in these hostile scenarios.
3218 idling_boosts_thr_without_issues = idling_boosts_thr &&
3219 bfqd->wr_busy_queues == 0;
3222 * There is then a case where idling must be performed not
3223 * for throughput concerns, but to preserve service
3226 * To introduce this case, we can note that allowing the drive
3227 * to enqueue more than one request at a time, and hence
3228 * delegating de facto final scheduling decisions to the
3229 * drive's internal scheduler, entails loss of control on the
3230 * actual request service order. In particular, the critical
3231 * situation is when requests from different processes happen
3232 * to be present, at the same time, in the internal queue(s)
3233 * of the drive. In such a situation, the drive, by deciding
3234 * the service order of the internally-queued requests, does
3235 * determine also the actual throughput distribution among
3236 * these processes. But the drive typically has no notion or
3237 * concern about per-process throughput distribution, and
3238 * makes its decisions only on a per-request basis. Therefore,
3239 * the service distribution enforced by the drive's internal
3240 * scheduler is likely to coincide with the desired
3241 * device-throughput distribution only in a completely
3242 * symmetric scenario where:
3243 * (i) each of these processes must get the same throughput as
3245 * (ii) all these processes have the same I/O pattern
3246 (either sequential or random).
3247 * In fact, in such a scenario, the drive will tend to treat
3248 * the requests of each of these processes in about the same
3249 * way as the requests of the others, and thus to provide
3250 * each of these processes with about the same throughput
3251 * (which is exactly the desired throughput distribution). In
3252 * contrast, in any asymmetric scenario, device idling is
3253 * certainly needed to guarantee that bfqq receives its
3254 * assigned fraction of the device throughput (see [1] for
3257 * We address this issue by controlling, actually, only the
3258 * symmetry sub-condition (i), i.e., provided that
3259 * sub-condition (i) holds, idling is not performed,
3260 * regardless of whether sub-condition (ii) holds. In other
3261 * words, only if sub-condition (i) holds, then idling is
3262 * allowed, and the device tends to be prevented from queueing
3263 * many requests, possibly of several processes. The reason
3264 * for not controlling also sub-condition (ii) is that we
3265 * exploit preemption to preserve guarantees in case of
3266 * symmetric scenarios, even if (ii) does not hold, as
3267 * explained in the next two paragraphs.
3269 * Even if a queue, say Q, is expired when it remains idle, Q
3270 * can still preempt the new in-service queue if the next
3271 * request of Q arrives soon (see the comments on
3272 * bfq_bfqq_update_budg_for_activation). If all queues and
3273 * groups have the same weight, this form of preemption,
3274 * combined with the hole-recovery heuristic described in the
3275 * comments on function bfq_bfqq_update_budg_for_activation,
3276 * are enough to preserve a correct bandwidth distribution in
3277 * the mid term, even without idling. In fact, even if not
3278 * idling allows the internal queues of the device to contain
3279 * many requests, and thus to reorder requests, we can rather
3280 * safely assume that the internal scheduler still preserves a
3281 * minimum of mid-term fairness. The motivation for using
3282 * preemption instead of idling is that, by not idling,
3283 * service guarantees are preserved without minimally
3284 * sacrificing throughput. In other words, both a high
3285 * throughput and its desired distribution are obtained.
3287 * More precisely, this preemption-based, idleless approach
3288 * provides fairness in terms of IOPS, and not sectors per
3289 * second. This can be seen with a simple example. Suppose
3290 * that there are two queues with the same weight, but that
3291 * the first queue receives requests of 8 sectors, while the
3292 * second queue receives requests of 1024 sectors. In
3293 * addition, suppose that each of the two queues contains at
3294 * most one request at a time, which implies that each queue
3295 * always remains idle after it is served. Finally, after
3296 * remaining idle, each queue receives very quickly a new
3297 * request. It follows that the two queues are served
3298 * alternatively, preempting each other if needed. This
3299 * implies that, although both queues have the same weight,
3300 * the queue with large requests receives a service that is
3301 * 1024/8 times as high as the service received by the other
3304 * On the other hand, device idling is performed, and thus
3305 * pure sector-domain guarantees are provided, for the
3306 * following queues, which are likely to need stronger
3307 * throughput guarantees: weight-raised queues, and queues
3308 * with a higher weight than other queues. When such queues
3309 * are active, sub-condition (i) is false, which triggers
3312 * According to the above considerations, the next variable is
3313 * true (only) if sub-condition (i) holds. To compute the
3314 * value of this variable, we not only use the return value of
3315 * the function bfq_symmetric_scenario(), but also check
3316 * whether bfqq is being weight-raised, because
3317 * bfq_symmetric_scenario() does not take into account also
3318 * weight-raised queues (see comments on
3319 * bfq_weights_tree_add()). In particular, if bfqq is being
3320 * weight-raised, it is important to idle only if there are
3321 * other, non-weight-raised queues that may steal throughput
3322 * to bfqq. Actually, we should be even more precise, and
3323 * differentiate between interactive weight raising and
3324 * soft real-time weight raising.
3326 * As a side note, it is worth considering that the above
3327 * device-idling countermeasures may however fail in the
3328 * following unlucky scenario: if idling is (correctly)
3329 * disabled in a time period during which all symmetry
3330 * sub-conditions hold, and hence the device is allowed to
3331 * enqueue many requests, but at some later point in time some
3332 * sub-condition stops to hold, then it may become impossible
3333 * to let requests be served in the desired order until all
3334 * the requests already queued in the device have been served.
3336 asymmetric_scenario = (bfqq->wr_coeff > 1 &&
3337 bfqd->wr_busy_queues < bfqd->busy_queues) ||
3338 !bfq_symmetric_scenario(bfqd);
3341 * Finally, there is a case where maximizing throughput is the
3342 * best choice even if it may cause unfairness toward
3343 * bfqq. Such a case is when bfqq became active in a burst of
3344 * queue activations. Queues that became active during a large
3345 * burst benefit only from throughput, as discussed in the
3346 * comments on bfq_handle_burst. Thus, if bfqq became active
3347 * in a burst and not idling the device maximizes throughput,
3348 * then the device must no be idled, because not idling the
3349 * device provides bfqq and all other queues in the burst with
3350 * maximum benefit. Combining this and the above case, we can
3351 * now establish when idling is actually needed to preserve
3352 * service guarantees.
3354 idling_needed_for_service_guarantees =
3355 asymmetric_scenario && !bfq_bfqq_in_large_burst(bfqq);
3358 * We have now all the components we need to compute the
3359 * return value of the function, which is true only if idling
3360 * either boosts the throughput (without issues), or is
3361 * necessary to preserve service guarantees.
3363 return idling_boosts_thr_without_issues ||
3364 idling_needed_for_service_guarantees;
3368 * If the in-service queue is empty but the function bfq_bfqq_may_idle
3369 * returns true, then:
3370 * 1) the queue must remain in service and cannot be expired, and
3371 * 2) the device must be idled to wait for the possible arrival of a new
3372 * request for the queue.
3373 * See the comments on the function bfq_bfqq_may_idle for the reasons
3374 * why performing device idling is the best choice to boost the throughput
3375 * and preserve service guarantees when bfq_bfqq_may_idle itself
3378 static bool bfq_bfqq_must_idle(struct bfq_queue *bfqq)
3380 return RB_EMPTY_ROOT(&bfqq->sort_list) && bfq_bfqq_may_idle(bfqq);
3384 * Select a queue for service. If we have a current queue in service,
3385 * check whether to continue servicing it, or retrieve and set a new one.
3387 static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd)
3389 struct bfq_queue *bfqq;
3390 struct request *next_rq;
3391 enum bfqq_expiration reason = BFQQE_BUDGET_TIMEOUT;
3393 bfqq = bfqd->in_service_queue;
3397 bfq_log_bfqq(bfqd, bfqq, "select_queue: already in-service queue");
3399 if (bfq_may_expire_for_budg_timeout(bfqq) &&
3400 !bfq_bfqq_wait_request(bfqq) &&
3401 !bfq_bfqq_must_idle(bfqq))
3406 * This loop is rarely executed more than once. Even when it
3407 * happens, it is much more convenient to re-execute this loop
3408 * than to return NULL and trigger a new dispatch to get a
3411 next_rq = bfqq->next_rq;
3413 * If bfqq has requests queued and it has enough budget left to
3414 * serve them, keep the queue, otherwise expire it.
3417 if (bfq_serv_to_charge(next_rq, bfqq) >
3418 bfq_bfqq_budget_left(bfqq)) {
3420 * Expire the queue for budget exhaustion,
3421 * which makes sure that the next budget is
3422 * enough to serve the next request, even if
3423 * it comes from the fifo expired path.
3425 reason = BFQQE_BUDGET_EXHAUSTED;
3429 * The idle timer may be pending because we may
3430 * not disable disk idling even when a new request
3433 if (bfq_bfqq_wait_request(bfqq)) {
3435 * If we get here: 1) at least a new request
3436 * has arrived but we have not disabled the
3437 * timer because the request was too small,
3438 * 2) then the block layer has unplugged
3439 * the device, causing the dispatch to be
3442 * Since the device is unplugged, now the
3443 * requests are probably large enough to
3444 * provide a reasonable throughput.
3445 * So we disable idling.
3447 bfq_clear_bfqq_wait_request(bfqq);
3448 hrtimer_try_to_cancel(&bfqd->idle_slice_timer);
3449 bfqg_stats_update_idle_time(bfqq_group(bfqq));
3456 * No requests pending. However, if the in-service queue is idling
3457 * for a new request, or has requests waiting for a completion and
3458 * may idle after their completion, then keep it anyway.
3460 if (bfq_bfqq_wait_request(bfqq) ||
3461 (bfqq->dispatched != 0 && bfq_bfqq_may_idle(bfqq))) {
3466 reason = BFQQE_NO_MORE_REQUESTS;
3468 bfq_bfqq_expire(bfqd, bfqq, false, reason);
3470 bfqq = bfq_set_in_service_queue(bfqd);
3472 bfq_log_bfqq(bfqd, bfqq, "select_queue: checking new queue");
3477 bfq_log_bfqq(bfqd, bfqq, "select_queue: returned this queue");
3479 bfq_log(bfqd, "select_queue: no queue returned");
3484 static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq)
3486 struct bfq_entity *entity = &bfqq->entity;
3488 if (bfqq->wr_coeff > 1) { /* queue is being weight-raised */
3489 bfq_log_bfqq(bfqd, bfqq,
3490 "raising period dur %u/%u msec, old coeff %u, w %d(%d)",
3491 jiffies_to_msecs(jiffies - bfqq->last_wr_start_finish),
3492 jiffies_to_msecs(bfqq->wr_cur_max_time),
3494 bfqq->entity.weight, bfqq->entity.orig_weight);
3496 if (entity->prio_changed)
3497 bfq_log_bfqq(bfqd, bfqq, "WARN: pending prio change");
3500 * If the queue was activated in a burst, or too much
3501 * time has elapsed from the beginning of this
3502 * weight-raising period, then end weight raising.
3504 if (bfq_bfqq_in_large_burst(bfqq))
3505 bfq_bfqq_end_wr(bfqq);
3506 else if (time_is_before_jiffies(bfqq->last_wr_start_finish +
3507 bfqq->wr_cur_max_time)) {
3508 if (bfqq->wr_cur_max_time != bfqd->bfq_wr_rt_max_time ||
3509 time_is_before_jiffies(bfqq->wr_start_at_switch_to_srt +
3510 bfq_wr_duration(bfqd)))
3511 bfq_bfqq_end_wr(bfqq);
3513 /* switch back to interactive wr */
3514 bfqq->wr_coeff = bfqd->bfq_wr_coeff;
3515 bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
3516 bfqq->last_wr_start_finish =
3517 bfqq->wr_start_at_switch_to_srt;
3518 bfqq->entity.prio_changed = 1;
3523 * To improve latency (for this or other queues), immediately
3524 * update weight both if it must be raised and if it must be
3525 * lowered. Since, entity may be on some active tree here, and
3526 * might have a pending change of its ioprio class, invoke
3527 * next function with the last parameter unset (see the
3528 * comments on the function).
3530 if ((entity->weight > entity->orig_weight) != (bfqq->wr_coeff > 1))
3531 __bfq_entity_update_weight_prio(bfq_entity_service_tree(entity),
3536 * Dispatch next request from bfqq.
3538 static struct request *bfq_dispatch_rq_from_bfqq(struct bfq_data *bfqd,
3539 struct bfq_queue *bfqq)
3541 struct request *rq = bfqq->next_rq;
3542 unsigned long service_to_charge;
3544 service_to_charge = bfq_serv_to_charge(rq, bfqq);
3546 bfq_bfqq_served(bfqq, service_to_charge);
3548 bfq_dispatch_remove(bfqd->queue, rq);
3551 * If weight raising has to terminate for bfqq, then next
3552 * function causes an immediate update of bfqq's weight,
3553 * without waiting for next activation. As a consequence, on
3554 * expiration, bfqq will be timestamped as if has never been
3555 * weight-raised during this service slot, even if it has
3556 * received part or even most of the service as a
3557 * weight-raised queue. This inflates bfqq's timestamps, which
3558 * is beneficial, as bfqq is then more willing to leave the
3559 * device immediately to possible other weight-raised queues.
3561 bfq_update_wr_data(bfqd, bfqq);
3564 * Expire bfqq, pretending that its budget expired, if bfqq
3565 * belongs to CLASS_IDLE and other queues are waiting for
3568 if (bfqd->busy_queues > 1 && bfq_class_idle(bfqq))
3574 bfq_bfqq_expire(bfqd, bfqq, false, BFQQE_BUDGET_EXHAUSTED);
3578 static bool bfq_has_work(struct blk_mq_hw_ctx *hctx)
3580 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
3583 * Avoiding lock: a race on bfqd->busy_queues should cause at
3584 * most a call to dispatch for nothing
3586 return !list_empty_careful(&bfqd->dispatch) ||
3587 bfqd->busy_queues > 0;
3590 static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
3592 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
3593 struct request *rq = NULL;
3594 struct bfq_queue *bfqq = NULL;
3596 if (!list_empty(&bfqd->dispatch)) {
3597 rq = list_first_entry(&bfqd->dispatch, struct request,
3599 list_del_init(&rq->queuelist);
3605 * Increment counters here, because this
3606 * dispatch does not follow the standard
3607 * dispatch flow (where counters are
3612 goto inc_in_driver_start_rq;
3616 * We exploit the put_rq_private hook to decrement
3617 * rq_in_driver, but put_rq_private will not be
3618 * invoked on this request. So, to avoid unbalance,
3619 * just start this request, without incrementing
3620 * rq_in_driver. As a negative consequence,
3621 * rq_in_driver is deceptively lower than it should be
3622 * while this request is in service. This may cause
3623 * bfq_schedule_dispatch to be invoked uselessly.
3625 * As for implementing an exact solution, the
3626 * put_request hook, if defined, is probably invoked
3627 * also on this request. So, by exploiting this hook,
3628 * we could 1) increment rq_in_driver here, and 2)
3629 * decrement it in put_request. Such a solution would
3630 * let the value of the counter be always accurate,
3631 * but it would entail using an extra interface
3632 * function. This cost seems higher than the benefit,
3633 * being the frequency of non-elevator-private
3634 * requests very low.
3639 bfq_log(bfqd, "dispatch requests: %d busy queues", bfqd->busy_queues);
3641 if (bfqd->busy_queues == 0)
3645 * Force device to serve one request at a time if
3646 * strict_guarantees is true. Forcing this service scheme is
3647 * currently the ONLY way to guarantee that the request
3648 * service order enforced by the scheduler is respected by a
3649 * queueing device. Otherwise the device is free even to make
3650 * some unlucky request wait for as long as the device
3653 * Of course, serving one request at at time may cause loss of
3656 if (bfqd->strict_guarantees && bfqd->rq_in_driver > 0)
3659 bfqq = bfq_select_queue(bfqd);
3663 rq = bfq_dispatch_rq_from_bfqq(bfqd, bfqq);
3666 inc_in_driver_start_rq:
3667 bfqd->rq_in_driver++;
3669 rq->rq_flags |= RQF_STARTED;
3675 static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
3677 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
3680 spin_lock_irq(&bfqd->lock);
3682 rq = __bfq_dispatch_request(hctx);
3683 spin_unlock_irq(&bfqd->lock);
3689 * Task holds one reference to the queue, dropped when task exits. Each rq
3690 * in-flight on this queue also holds a reference, dropped when rq is freed.
3692 * Scheduler lock must be held here. Recall not to use bfqq after calling
3693 * this function on it.
3695 void bfq_put_queue(struct bfq_queue *bfqq)
3697 #ifdef CONFIG_BFQ_GROUP_IOSCHED
3698 struct bfq_group *bfqg = bfqq_group(bfqq);
3702 bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p %d",
3709 if (bfq_bfqq_sync(bfqq))
3711 * The fact that this queue is being destroyed does not
3712 * invalidate the fact that this queue may have been
3713 * activated during the current burst. As a consequence,
3714 * although the queue does not exist anymore, and hence
3715 * needs to be removed from the burst list if there,
3716 * the burst size has not to be decremented.
3718 hlist_del_init(&bfqq->burst_list_node);
3720 kmem_cache_free(bfq_pool, bfqq);
3721 #ifdef CONFIG_BFQ_GROUP_IOSCHED
3722 bfqg_and_blkg_put(bfqg);
3726 static void bfq_put_cooperator(struct bfq_queue *bfqq)
3728 struct bfq_queue *__bfqq, *next;
3731 * If this queue was scheduled to merge with another queue, be
3732 * sure to drop the reference taken on that queue (and others in
3733 * the merge chain). See bfq_setup_merge and bfq_merge_bfqqs.
3735 __bfqq = bfqq->new_bfqq;
3739 next = __bfqq->new_bfqq;
3740 bfq_put_queue(__bfqq);
3745 static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
3747 if (bfqq == bfqd->in_service_queue) {
3748 __bfq_bfqq_expire(bfqd, bfqq);
3749 bfq_schedule_dispatch(bfqd);
3752 bfq_log_bfqq(bfqd, bfqq, "exit_bfqq: %p, %d", bfqq, bfqq->ref);
3754 bfq_put_cooperator(bfqq);
3756 bfq_put_queue(bfqq); /* release process reference */
3759 static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync)
3761 struct bfq_queue *bfqq = bic_to_bfqq(bic, is_sync);
3762 struct bfq_data *bfqd;
3765 bfqd = bfqq->bfqd; /* NULL if scheduler already exited */
3768 unsigned long flags;
3770 spin_lock_irqsave(&bfqd->lock, flags);
3772 bfq_exit_bfqq(bfqd, bfqq);
3773 bic_set_bfqq(bic, NULL, is_sync);
3774 spin_unlock_irqrestore(&bfqd->lock, flags);
3778 static void bfq_exit_icq(struct io_cq *icq)
3780 struct bfq_io_cq *bic = icq_to_bic(icq);
3782 bfq_exit_icq_bfqq(bic, true);
3783 bfq_exit_icq_bfqq(bic, false);
3787 * Update the entity prio values; note that the new values will not
3788 * be used until the next (re)activation.
3791 bfq_set_next_ioprio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic)
3793 struct task_struct *tsk = current;
3795 struct bfq_data *bfqd = bfqq->bfqd;
3800 ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio);
3801 switch (ioprio_class) {
3803 dev_err(bfqq->bfqd->queue->backing_dev_info->dev,
3804 "bfq: bad prio class %d\n", ioprio_class);
3806 case IOPRIO_CLASS_NONE:
3808 * No prio set, inherit CPU scheduling settings.
3810 bfqq->new_ioprio = task_nice_ioprio(tsk);
3811 bfqq->new_ioprio_class = task_nice_ioclass(tsk);
3813 case IOPRIO_CLASS_RT:
3814 bfqq->new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
3815 bfqq->new_ioprio_class = IOPRIO_CLASS_RT;
3817 case IOPRIO_CLASS_BE:
3818 bfqq->new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
3819 bfqq->new_ioprio_class = IOPRIO_CLASS_BE;
3821 case IOPRIO_CLASS_IDLE:
3822 bfqq->new_ioprio_class = IOPRIO_CLASS_IDLE;
3823 bfqq->new_ioprio = 7;
3827 if (bfqq->new_ioprio >= IOPRIO_BE_NR) {
3828 pr_crit("bfq_set_next_ioprio_data: new_ioprio %d\n",
3830 bfqq->new_ioprio = IOPRIO_BE_NR - 1;
3833 bfqq->entity.new_weight = bfq_ioprio_to_weight(bfqq->new_ioprio);
3834 bfqq->entity.prio_changed = 1;
3837 static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
3838 struct bio *bio, bool is_sync,
3839 struct bfq_io_cq *bic);
3841 static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio)
3843 struct bfq_data *bfqd = bic_to_bfqd(bic);
3844 struct bfq_queue *bfqq;
3845 int ioprio = bic->icq.ioc->ioprio;
3848 * This condition may trigger on a newly created bic, be sure to
3849 * drop the lock before returning.
3851 if (unlikely(!bfqd) || likely(bic->ioprio == ioprio))
3854 bic->ioprio = ioprio;
3856 bfqq = bic_to_bfqq(bic, false);
3858 /* release process reference on this queue */
3859 bfq_put_queue(bfqq);
3860 bfqq = bfq_get_queue(bfqd, bio, BLK_RW_ASYNC, bic);
3861 bic_set_bfqq(bic, bfqq, false);
3864 bfqq = bic_to_bfqq(bic, true);
3866 bfq_set_next_ioprio_data(bfqq, bic);
3869 static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
3870 struct bfq_io_cq *bic, pid_t pid, int is_sync)
3872 RB_CLEAR_NODE(&bfqq->entity.rb_node);
3873 INIT_LIST_HEAD(&bfqq->fifo);
3874 INIT_HLIST_NODE(&bfqq->burst_list_node);
3880 bfq_set_next_ioprio_data(bfqq, bic);
3884 * No need to mark as has_short_ttime if in
3885 * idle_class, because no device idling is performed
3886 * for queues in idle class
3888 if (!bfq_class_idle(bfqq))
3889 /* tentatively mark as has_short_ttime */
3890 bfq_mark_bfqq_has_short_ttime(bfqq);
3891 bfq_mark_bfqq_sync(bfqq);
3892 bfq_mark_bfqq_just_created(bfqq);
3894 bfq_clear_bfqq_sync(bfqq);
3896 /* set end request to minus infinity from now */
3897 bfqq->ttime.last_end_request = ktime_get_ns() + 1;
3899 bfq_mark_bfqq_IO_bound(bfqq);
3903 /* Tentative initial value to trade off between thr and lat */
3904 bfqq->max_budget = (2 * bfq_max_budget(bfqd)) / 3;
3905 bfqq->budget_timeout = bfq_smallest_from_now();
3908 bfqq->last_wr_start_finish = jiffies;
3909 bfqq->wr_start_at_switch_to_srt = bfq_smallest_from_now();
3910 bfqq->split_time = bfq_smallest_from_now();
3913 * Set to the value for which bfqq will not be deemed as
3914 * soft rt when it becomes backlogged.
3916 bfqq->soft_rt_next_start = bfq_greatest_from_now();
3918 /* first request is almost certainly seeky */
3919 bfqq->seek_history = 1;
3922 static struct bfq_queue **bfq_async_queue_prio(struct bfq_data *bfqd,
3923 struct bfq_group *bfqg,
3924 int ioprio_class, int ioprio)
3926 switch (ioprio_class) {
3927 case IOPRIO_CLASS_RT:
3928 return &bfqg->async_bfqq[0][ioprio];
3929 case IOPRIO_CLASS_NONE:
3930 ioprio = IOPRIO_NORM;
3932 case IOPRIO_CLASS_BE:
3933 return &bfqg->async_bfqq[1][ioprio];
3934 case IOPRIO_CLASS_IDLE:
3935 return &bfqg->async_idle_bfqq;
3941 static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
3942 struct bio *bio, bool is_sync,
3943 struct bfq_io_cq *bic)
3945 const int ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
3946 const int ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio);
3947 struct bfq_queue **async_bfqq = NULL;
3948 struct bfq_queue *bfqq;
3949 struct bfq_group *bfqg;
3953 bfqg = bfq_find_set_group(bfqd, bio_blkcg(bio));
3955 bfqq = &bfqd->oom_bfqq;
3960 async_bfqq = bfq_async_queue_prio(bfqd, bfqg, ioprio_class,
3967 bfqq = kmem_cache_alloc_node(bfq_pool,
3968 GFP_NOWAIT | __GFP_ZERO | __GFP_NOWARN,
3972 bfq_init_bfqq(bfqd, bfqq, bic, current->pid,
3974 bfq_init_entity(&bfqq->entity, bfqg);
3975 bfq_log_bfqq(bfqd, bfqq, "allocated");
3977 bfqq = &bfqd->oom_bfqq;
3978 bfq_log_bfqq(bfqd, bfqq, "using oom bfqq");
3983 * Pin the queue now that it's allocated, scheduler exit will
3988 * Extra group reference, w.r.t. sync
3989 * queue. This extra reference is removed
3990 * only if bfqq->bfqg disappears, to
3991 * guarantee that this queue is not freed
3992 * until its group goes away.
3994 bfq_log_bfqq(bfqd, bfqq, "get_queue, bfqq not in async: %p, %d",
4000 bfqq->ref++; /* get a process reference to this queue */
4001 bfq_log_bfqq(bfqd, bfqq, "get_queue, at end: %p, %d", bfqq, bfqq->ref);
4006 static void bfq_update_io_thinktime(struct bfq_data *bfqd,
4007 struct bfq_queue *bfqq)
4009 struct bfq_ttime *ttime = &bfqq->ttime;
4010 u64 elapsed = ktime_get_ns() - bfqq->ttime.last_end_request;
4012 elapsed = min_t(u64, elapsed, 2ULL * bfqd->bfq_slice_idle);
4014 ttime->ttime_samples = (7*bfqq->ttime.ttime_samples + 256) / 8;
4015 ttime->ttime_total = div_u64(7*ttime->ttime_total + 256*elapsed, 8);
4016 ttime->ttime_mean = div64_ul(ttime->ttime_total + 128,
4017 ttime->ttime_samples);
4021 bfq_update_io_seektime(struct bfq_data *bfqd, struct bfq_queue *bfqq,
4024 bfqq->seek_history <<= 1;
4025 bfqq->seek_history |=
4026 get_sdist(bfqq->last_request_pos, rq) > BFQQ_SEEK_THR &&
4027 (!blk_queue_nonrot(bfqd->queue) ||
4028 blk_rq_sectors(rq) < BFQQ_SECT_THR_NONROT);
4031 static void bfq_update_has_short_ttime(struct bfq_data *bfqd,
4032 struct bfq_queue *bfqq,
4033 struct bfq_io_cq *bic)
4035 bool has_short_ttime = true;
4038 * No need to update has_short_ttime if bfqq is async or in
4039 * idle io prio class, or if bfq_slice_idle is zero, because
4040 * no device idling is performed for bfqq in this case.
4042 if (!bfq_bfqq_sync(bfqq) || bfq_class_idle(bfqq) ||
4043 bfqd->bfq_slice_idle == 0)
4046 /* Idle window just restored, statistics are meaningless. */
4047 if (time_is_after_eq_jiffies(bfqq->split_time +
4048 bfqd->bfq_wr_min_idle_time))
4051 /* Think time is infinite if no process is linked to
4052 * bfqq. Otherwise check average think time to
4053 * decide whether to mark as has_short_ttime
4055 if (atomic_read(&bic->icq.ioc->active_ref) == 0 ||
4056 (bfq_sample_valid(bfqq->ttime.ttime_samples) &&
4057 bfqq->ttime.ttime_mean > bfqd->bfq_slice_idle))
4058 has_short_ttime = false;
4060 bfq_log_bfqq(bfqd, bfqq, "update_has_short_ttime: has_short_ttime %d",
4063 if (has_short_ttime)
4064 bfq_mark_bfqq_has_short_ttime(bfqq);
4066 bfq_clear_bfqq_has_short_ttime(bfqq);
4070 * Called when a new fs request (rq) is added to bfqq. Check if there's
4071 * something we should do about it.
4073 static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
4076 struct bfq_io_cq *bic = RQ_BIC(rq);
4078 if (rq->cmd_flags & REQ_META)
4079 bfqq->meta_pending++;
4081 bfq_update_io_thinktime(bfqd, bfqq);
4082 bfq_update_has_short_ttime(bfqd, bfqq, bic);
4083 bfq_update_io_seektime(bfqd, bfqq, rq);
4085 bfq_log_bfqq(bfqd, bfqq,
4086 "rq_enqueued: has_short_ttime=%d (seeky %d)",
4087 bfq_bfqq_has_short_ttime(bfqq), BFQQ_SEEKY(bfqq));
4089 bfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
4091 if (bfqq == bfqd->in_service_queue && bfq_bfqq_wait_request(bfqq)) {
4092 bool small_req = bfqq->queued[rq_is_sync(rq)] == 1 &&
4093 blk_rq_sectors(rq) < 32;
4094 bool budget_timeout = bfq_bfqq_budget_timeout(bfqq);
4097 * There is just this request queued: if the request
4098 * is small and the queue is not to be expired, then
4101 * In this way, if the device is being idled to wait
4102 * for a new request from the in-service queue, we
4103 * avoid unplugging the device and committing the
4104 * device to serve just a small request. On the
4105 * contrary, we wait for the block layer to decide
4106 * when to unplug the device: hopefully, new requests
4107 * will be merged to this one quickly, then the device
4108 * will be unplugged and larger requests will be
4111 if (small_req && !budget_timeout)
4115 * A large enough request arrived, or the queue is to
4116 * be expired: in both cases disk idling is to be
4117 * stopped, so clear wait_request flag and reset
4120 bfq_clear_bfqq_wait_request(bfqq);
4121 hrtimer_try_to_cancel(&bfqd->idle_slice_timer);
4122 bfqg_stats_update_idle_time(bfqq_group(bfqq));
4125 * The queue is not empty, because a new request just
4126 * arrived. Hence we can safely expire the queue, in
4127 * case of budget timeout, without risking that the
4128 * timestamps of the queue are not updated correctly.
4129 * See [1] for more details.
4132 bfq_bfqq_expire(bfqd, bfqq, false,
4133 BFQQE_BUDGET_TIMEOUT);
4137 static void __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
4139 struct bfq_queue *bfqq = RQ_BFQQ(rq),
4140 *new_bfqq = bfq_setup_cooperator(bfqd, bfqq, rq, true);
4143 if (bic_to_bfqq(RQ_BIC(rq), 1) != bfqq)
4144 new_bfqq = bic_to_bfqq(RQ_BIC(rq), 1);
4146 * Release the request's reference to the old bfqq
4147 * and make sure one is taken to the shared queue.
4149 new_bfqq->allocated++;
4152 bfq_clear_bfqq_just_created(bfqq);
4154 * If the bic associated with the process
4155 * issuing this request still points to bfqq
4156 * (and thus has not been already redirected
4157 * to new_bfqq or even some other bfq_queue),
4158 * then complete the merge and redirect it to
4161 if (bic_to_bfqq(RQ_BIC(rq), 1) == bfqq)
4162 bfq_merge_bfqqs(bfqd, RQ_BIC(rq),
4165 * rq is about to be enqueued into new_bfqq,
4166 * release rq reference on bfqq
4168 bfq_put_queue(bfqq);
4169 rq->elv.priv[1] = new_bfqq;
4173 bfq_add_request(rq);
4175 rq->fifo_time = ktime_get_ns() + bfqd->bfq_fifo_expire[rq_is_sync(rq)];
4176 list_add_tail(&rq->queuelist, &bfqq->fifo);
4178 bfq_rq_enqueued(bfqd, bfqq, rq);
4181 static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
4184 struct request_queue *q = hctx->queue;
4185 struct bfq_data *bfqd = q->elevator->elevator_data;
4187 spin_lock_irq(&bfqd->lock);
4188 if (blk_mq_sched_try_insert_merge(q, rq)) {
4189 spin_unlock_irq(&bfqd->lock);
4193 spin_unlock_irq(&bfqd->lock);
4195 blk_mq_sched_request_inserted(rq);
4197 spin_lock_irq(&bfqd->lock);
4198 if (at_head || blk_rq_is_passthrough(rq)) {
4200 list_add(&rq->queuelist, &bfqd->dispatch);
4202 list_add_tail(&rq->queuelist, &bfqd->dispatch);
4204 __bfq_insert_request(bfqd, rq);
4206 if (rq_mergeable(rq)) {
4207 elv_rqhash_add(q, rq);
4213 spin_unlock_irq(&bfqd->lock);
4216 static void bfq_insert_requests(struct blk_mq_hw_ctx *hctx,
4217 struct list_head *list, bool at_head)
4219 while (!list_empty(list)) {
4222 rq = list_first_entry(list, struct request, queuelist);
4223 list_del_init(&rq->queuelist);
4224 bfq_insert_request(hctx, rq, at_head);
4228 static void bfq_update_hw_tag(struct bfq_data *bfqd)
4230 bfqd->max_rq_in_driver = max_t(int, bfqd->max_rq_in_driver,
4231 bfqd->rq_in_driver);
4233 if (bfqd->hw_tag == 1)
4237 * This sample is valid if the number of outstanding requests
4238 * is large enough to allow a queueing behavior. Note that the
4239 * sum is not exact, as it's not taking into account deactivated
4242 if (bfqd->rq_in_driver + bfqd->queued < BFQ_HW_QUEUE_THRESHOLD)
4245 if (bfqd->hw_tag_samples++ < BFQ_HW_QUEUE_SAMPLES)
4248 bfqd->hw_tag = bfqd->max_rq_in_driver > BFQ_HW_QUEUE_THRESHOLD;
4249 bfqd->max_rq_in_driver = 0;
4250 bfqd->hw_tag_samples = 0;
4253 static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd)
4258 bfq_update_hw_tag(bfqd);
4260 bfqd->rq_in_driver--;
4263 if (!bfqq->dispatched && !bfq_bfqq_busy(bfqq)) {
4265 * Set budget_timeout (which we overload to store the
4266 * time at which the queue remains with no backlog and
4267 * no outstanding request; used by the weight-raising
4270 bfqq->budget_timeout = jiffies;
4272 bfq_weights_tree_remove(bfqd, &bfqq->entity,
4273 &bfqd->queue_weights_tree);
4276 now_ns = ktime_get_ns();
4278 bfqq->ttime.last_end_request = now_ns;
4281 * Using us instead of ns, to get a reasonable precision in
4282 * computing rate in next check.
4284 delta_us = div_u64(now_ns - bfqd->last_completion, NSEC_PER_USEC);
4287 * If the request took rather long to complete, and, according
4288 * to the maximum request size recorded, this completion latency
4289 * implies that the request was certainly served at a very low
4290 * rate (less than 1M sectors/sec), then the whole observation
4291 * interval that lasts up to this time instant cannot be a
4292 * valid time interval for computing a new peak rate. Invoke
4293 * bfq_update_rate_reset to have the following three steps
4295 * - close the observation interval at the last (previous)
4296 * request dispatch or completion
4297 * - compute rate, if possible, for that observation interval
4298 * - reset to zero samples, which will trigger a proper
4299 * re-initialization of the observation interval on next
4302 if (delta_us > BFQ_MIN_TT/NSEC_PER_USEC &&
4303 (bfqd->last_rq_max_size<<BFQ_RATE_SHIFT)/delta_us <
4304 1UL<<(BFQ_RATE_SHIFT - 10))
4305 bfq_update_rate_reset(bfqd, NULL);
4306 bfqd->last_completion = now_ns;
4309 * If we are waiting to discover whether the request pattern
4310 * of the task associated with the queue is actually
4311 * isochronous, and both requisites for this condition to hold
4312 * are now satisfied, then compute soft_rt_next_start (see the
4313 * comments on the function bfq_bfqq_softrt_next_start()). We
4314 * schedule this delayed check when bfqq expires, if it still
4315 * has in-flight requests.
4317 if (bfq_bfqq_softrt_update(bfqq) && bfqq->dispatched == 0 &&
4318 RB_EMPTY_ROOT(&bfqq->sort_list))
4319 bfqq->soft_rt_next_start =
4320 bfq_bfqq_softrt_next_start(bfqd, bfqq);
4323 * If this is the in-service queue, check if it needs to be expired,
4324 * or if we want to idle in case it has no pending requests.
4326 if (bfqd->in_service_queue == bfqq) {
4327 if (bfqq->dispatched == 0 && bfq_bfqq_must_idle(bfqq)) {
4328 bfq_arm_slice_timer(bfqd);
4330 } else if (bfq_may_expire_for_budg_timeout(bfqq))
4331 bfq_bfqq_expire(bfqd, bfqq, false,
4332 BFQQE_BUDGET_TIMEOUT);
4333 else if (RB_EMPTY_ROOT(&bfqq->sort_list) &&
4334 (bfqq->dispatched == 0 ||
4335 !bfq_bfqq_may_idle(bfqq)))
4336 bfq_bfqq_expire(bfqd, bfqq, false,
4337 BFQQE_NO_MORE_REQUESTS);
4340 if (!bfqd->rq_in_driver)
4341 bfq_schedule_dispatch(bfqd);
4344 static void bfq_put_rq_priv_body(struct bfq_queue *bfqq)
4348 bfq_put_queue(bfqq);
4351 static void bfq_finish_request(struct request *rq)
4353 struct bfq_queue *bfqq;
4354 struct bfq_data *bfqd;
4362 if (rq->rq_flags & RQF_STARTED)
4363 bfqg_stats_update_completion(bfqq_group(bfqq),
4364 rq_start_time_ns(rq),
4365 rq_io_start_time_ns(rq),
4368 if (likely(rq->rq_flags & RQF_STARTED)) {
4369 unsigned long flags;
4371 spin_lock_irqsave(&bfqd->lock, flags);
4373 bfq_completed_request(bfqq, bfqd);
4374 bfq_put_rq_priv_body(bfqq);
4376 spin_unlock_irqrestore(&bfqd->lock, flags);
4379 * Request rq may be still/already in the scheduler,
4380 * in which case we need to remove it. And we cannot
4381 * defer such a check and removal, to avoid
4382 * inconsistencies in the time interval from the end
4383 * of this function to the start of the deferred work.
4384 * This situation seems to occur only in process
4385 * context, as a consequence of a merge. In the
4386 * current version of the code, this implies that the
4390 if (!RB_EMPTY_NODE(&rq->rb_node))
4391 bfq_remove_request(rq->q, rq);
4392 bfq_put_rq_priv_body(bfqq);
4395 rq->elv.priv[0] = NULL;
4396 rq->elv.priv[1] = NULL;
4400 * Returns NULL if a new bfqq should be allocated, or the old bfqq if this
4401 * was the last process referring to that bfqq.
4403 static struct bfq_queue *
4404 bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq)
4406 bfq_log_bfqq(bfqq->bfqd, bfqq, "splitting queue");
4408 if (bfqq_process_refs(bfqq) == 1) {
4409 bfqq->pid = current->pid;
4410 bfq_clear_bfqq_coop(bfqq);
4411 bfq_clear_bfqq_split_coop(bfqq);
4415 bic_set_bfqq(bic, NULL, 1);
4417 bfq_put_cooperator(bfqq);
4419 bfq_put_queue(bfqq);
4423 static struct bfq_queue *bfq_get_bfqq_handle_split(struct bfq_data *bfqd,
4424 struct bfq_io_cq *bic,
4426 bool split, bool is_sync,
4429 struct bfq_queue *bfqq = bic_to_bfqq(bic, is_sync);
4431 if (likely(bfqq && bfqq != &bfqd->oom_bfqq))
4438 bfq_put_queue(bfqq);
4439 bfqq = bfq_get_queue(bfqd, bio, is_sync, bic);
4441 bic_set_bfqq(bic, bfqq, is_sync);
4442 if (split && is_sync) {
4443 if ((bic->was_in_burst_list && bfqd->large_burst) ||
4444 bic->saved_in_large_burst)
4445 bfq_mark_bfqq_in_large_burst(bfqq);
4447 bfq_clear_bfqq_in_large_burst(bfqq);
4448 if (bic->was_in_burst_list)
4449 hlist_add_head(&bfqq->burst_list_node,
4452 bfqq->split_time = jiffies;
4459 * Allocate bfq data structures associated with this request.
4461 static void bfq_prepare_request(struct request *rq, struct bio *bio)
4463 struct request_queue *q = rq->q;
4464 struct bfq_data *bfqd = q->elevator->elevator_data;
4465 struct bfq_io_cq *bic;
4466 const int is_sync = rq_is_sync(rq);
4467 struct bfq_queue *bfqq;
4468 bool new_queue = false;
4469 bool bfqq_already_existing = false, split = false;
4472 * Even if we don't have an icq attached, we should still clear
4473 * the scheduler pointers, as they might point to previously
4474 * allocated bic/bfqq structs.
4477 rq->elv.priv[0] = rq->elv.priv[1] = NULL;
4481 bic = icq_to_bic(rq->elv.icq);
4483 spin_lock_irq(&bfqd->lock);
4485 bfq_check_ioprio_change(bic, bio);
4487 bfq_bic_update_cgroup(bic, bio);
4489 bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio, false, is_sync,
4492 if (likely(!new_queue)) {
4493 /* If the queue was seeky for too long, break it apart. */
4494 if (bfq_bfqq_coop(bfqq) && bfq_bfqq_split_coop(bfqq)) {
4495 bfq_log_bfqq(bfqd, bfqq, "breaking apart bfqq");
4497 /* Update bic before losing reference to bfqq */
4498 if (bfq_bfqq_in_large_burst(bfqq))
4499 bic->saved_in_large_burst = true;
4501 bfqq = bfq_split_bfqq(bic, bfqq);
4505 bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio,
4509 bfqq_already_existing = true;
4515 bfq_log_bfqq(bfqd, bfqq, "get_request %p: bfqq %p, %d",
4516 rq, bfqq, bfqq->ref);
4518 rq->elv.priv[0] = bic;
4519 rq->elv.priv[1] = bfqq;
4522 * If a bfq_queue has only one process reference, it is owned
4523 * by only this bic: we can then set bfqq->bic = bic. in
4524 * addition, if the queue has also just been split, we have to
4527 if (likely(bfqq != &bfqd->oom_bfqq) && bfqq_process_refs(bfqq) == 1) {
4531 * The queue has just been split from a shared
4532 * queue: restore the idle window and the
4533 * possible weight raising period.
4535 bfq_bfqq_resume_state(bfqq, bfqd, bic,
4536 bfqq_already_existing);
4540 if (unlikely(bfq_bfqq_just_created(bfqq)))
4541 bfq_handle_burst(bfqd, bfqq);
4543 spin_unlock_irq(&bfqd->lock);
4547 bfq_idle_slice_timer_body(struct bfq_data *bfqd, struct bfq_queue *bfqq)
4549 enum bfqq_expiration reason;
4550 unsigned long flags;
4552 spin_lock_irqsave(&bfqd->lock, flags);
4555 * Considering that bfqq may be in race, we should firstly check
4556 * whether bfqq is in service before doing something on it. If
4557 * the bfqq in race is not in service, it has already been expired
4558 * through __bfq_bfqq_expire func and its wait_request flags has
4559 * been cleared in __bfq_bfqd_reset_in_service func.
4561 if (bfqq != bfqd->in_service_queue) {
4562 spin_unlock_irqrestore(&bfqd->lock, flags);
4566 bfq_clear_bfqq_wait_request(bfqq);
4568 if (bfq_bfqq_budget_timeout(bfqq))
4570 * Also here the queue can be safely expired
4571 * for budget timeout without wasting
4574 reason = BFQQE_BUDGET_TIMEOUT;
4575 else if (bfqq->queued[0] == 0 && bfqq->queued[1] == 0)
4577 * The queue may not be empty upon timer expiration,
4578 * because we may not disable the timer when the
4579 * first request of the in-service queue arrives
4580 * during disk idling.
4582 reason = BFQQE_TOO_IDLE;
4584 goto schedule_dispatch;
4586 bfq_bfqq_expire(bfqd, bfqq, true, reason);
4589 bfq_schedule_dispatch(bfqd);
4590 spin_unlock_irqrestore(&bfqd->lock, flags);
4594 * Handler of the expiration of the timer running if the in-service queue
4595 * is idling inside its time slice.
4597 static enum hrtimer_restart bfq_idle_slice_timer(struct hrtimer *timer)
4599 struct bfq_data *bfqd = container_of(timer, struct bfq_data,
4601 struct bfq_queue *bfqq = bfqd->in_service_queue;
4604 * Theoretical race here: the in-service queue can be NULL or
4605 * different from the queue that was idling if a new request
4606 * arrives for the current queue and there is a full dispatch
4607 * cycle that changes the in-service queue. This can hardly
4608 * happen, but in the worst case we just expire a queue too
4612 bfq_idle_slice_timer_body(bfqd, bfqq);
4614 return HRTIMER_NORESTART;
4617 static void __bfq_put_async_bfqq(struct bfq_data *bfqd,
4618 struct bfq_queue **bfqq_ptr)
4620 struct bfq_queue *bfqq = *bfqq_ptr;
4622 bfq_log(bfqd, "put_async_bfqq: %p", bfqq);
4624 bfq_bfqq_move(bfqd, bfqq, bfqd->root_group);
4626 bfq_log_bfqq(bfqd, bfqq, "put_async_bfqq: putting %p, %d",
4628 bfq_put_queue(bfqq);
4634 * Release all the bfqg references to its async queues. If we are
4635 * deallocating the group these queues may still contain requests, so
4636 * we reparent them to the root cgroup (i.e., the only one that will
4637 * exist for sure until all the requests on a device are gone).
4639 void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg)
4643 for (i = 0; i < 2; i++)
4644 for (j = 0; j < IOPRIO_BE_NR; j++)
4645 __bfq_put_async_bfqq(bfqd, &bfqg->async_bfqq[i][j]);
4647 __bfq_put_async_bfqq(bfqd, &bfqg->async_idle_bfqq);
4650 static void bfq_exit_queue(struct elevator_queue *e)
4652 struct bfq_data *bfqd = e->elevator_data;
4653 struct bfq_queue *bfqq, *n;
4655 hrtimer_cancel(&bfqd->idle_slice_timer);
4657 spin_lock_irq(&bfqd->lock);
4658 list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list)
4659 bfq_deactivate_bfqq(bfqd, bfqq, false, false);
4660 spin_unlock_irq(&bfqd->lock);
4662 hrtimer_cancel(&bfqd->idle_slice_timer);
4664 #ifdef CONFIG_BFQ_GROUP_IOSCHED
4665 blkcg_deactivate_policy(bfqd->queue, &blkcg_policy_bfq);
4667 spin_lock_irq(&bfqd->lock);
4668 bfq_put_async_queues(bfqd, bfqd->root_group);
4669 kfree(bfqd->root_group);
4670 spin_unlock_irq(&bfqd->lock);
4676 static void bfq_init_root_group(struct bfq_group *root_group,
4677 struct bfq_data *bfqd)
4681 #ifdef CONFIG_BFQ_GROUP_IOSCHED
4682 root_group->entity.parent = NULL;
4683 root_group->my_entity = NULL;
4684 root_group->bfqd = bfqd;
4686 root_group->rq_pos_tree = RB_ROOT;
4687 for (i = 0; i < BFQ_IOPRIO_CLASSES; i++)
4688 root_group->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT;
4689 root_group->sched_data.bfq_class_idle_last_service = jiffies;
4692 static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
4694 struct bfq_data *bfqd;
4695 struct elevator_queue *eq;
4697 eq = elevator_alloc(q, e);
4701 bfqd = kzalloc_node(sizeof(*bfqd), GFP_KERNEL, q->node);
4703 kobject_put(&eq->kobj);
4706 eq->elevator_data = bfqd;
4708 spin_lock_irq(q->queue_lock);
4710 spin_unlock_irq(q->queue_lock);
4713 * Our fallback bfqq if bfq_find_alloc_queue() runs into OOM issues.
4714 * Grab a permanent reference to it, so that the normal code flow
4715 * will not attempt to free it.
4717 bfq_init_bfqq(bfqd, &bfqd->oom_bfqq, NULL, 1, 0);
4718 bfqd->oom_bfqq.ref++;
4719 bfqd->oom_bfqq.new_ioprio = BFQ_DEFAULT_QUEUE_IOPRIO;
4720 bfqd->oom_bfqq.new_ioprio_class = IOPRIO_CLASS_BE;
4721 bfqd->oom_bfqq.entity.new_weight =
4722 bfq_ioprio_to_weight(bfqd->oom_bfqq.new_ioprio);
4724 /* oom_bfqq does not participate to bursts */
4725 bfq_clear_bfqq_just_created(&bfqd->oom_bfqq);
4728 * Trigger weight initialization, according to ioprio, at the
4729 * oom_bfqq's first activation. The oom_bfqq's ioprio and ioprio
4730 * class won't be changed any more.
4732 bfqd->oom_bfqq.entity.prio_changed = 1;
4736 INIT_LIST_HEAD(&bfqd->dispatch);
4738 hrtimer_init(&bfqd->idle_slice_timer, CLOCK_MONOTONIC,
4740 bfqd->idle_slice_timer.function = bfq_idle_slice_timer;
4742 bfqd->queue_weights_tree = RB_ROOT;
4743 bfqd->group_weights_tree = RB_ROOT;
4745 INIT_LIST_HEAD(&bfqd->active_list);
4746 INIT_LIST_HEAD(&bfqd->idle_list);
4747 INIT_HLIST_HEAD(&bfqd->burst_list);
4751 bfqd->bfq_max_budget = bfq_default_max_budget;
4753 bfqd->bfq_fifo_expire[0] = bfq_fifo_expire[0];
4754 bfqd->bfq_fifo_expire[1] = bfq_fifo_expire[1];
4755 bfqd->bfq_back_max = bfq_back_max;
4756 bfqd->bfq_back_penalty = bfq_back_penalty;
4757 bfqd->bfq_slice_idle = bfq_slice_idle;
4758 bfqd->bfq_timeout = bfq_timeout;
4760 bfqd->bfq_requests_within_timer = 120;
4762 bfqd->bfq_large_burst_thresh = 8;
4763 bfqd->bfq_burst_interval = msecs_to_jiffies(180);
4765 bfqd->low_latency = true;
4768 * Trade-off between responsiveness and fairness.
4770 bfqd->bfq_wr_coeff = 30;
4771 bfqd->bfq_wr_rt_max_time = msecs_to_jiffies(300);
4772 bfqd->bfq_wr_max_time = 0;
4773 bfqd->bfq_wr_min_idle_time = msecs_to_jiffies(2000);
4774 bfqd->bfq_wr_min_inter_arr_async = msecs_to_jiffies(500);
4775 bfqd->bfq_wr_max_softrt_rate = 7000; /*
4776 * Approximate rate required
4777 * to playback or record a
4778 * high-definition compressed
4781 bfqd->wr_busy_queues = 0;
4784 * Begin by assuming, optimistically, that the device is a
4785 * high-speed one, and that its peak rate is equal to 2/3 of
4786 * the highest reference rate.
4788 bfqd->RT_prod = R_fast[blk_queue_nonrot(bfqd->queue)] *
4789 T_fast[blk_queue_nonrot(bfqd->queue)];
4790 bfqd->peak_rate = R_fast[blk_queue_nonrot(bfqd->queue)] * 2 / 3;
4791 bfqd->device_speed = BFQ_BFQD_FAST;
4793 spin_lock_init(&bfqd->lock);
4796 * The invocation of the next bfq_create_group_hierarchy
4797 * function is the head of a chain of function calls
4798 * (bfq_create_group_hierarchy->blkcg_activate_policy->
4799 * blk_mq_freeze_queue) that may lead to the invocation of the
4800 * has_work hook function. For this reason,
4801 * bfq_create_group_hierarchy is invoked only after all
4802 * scheduler data has been initialized, apart from the fields
4803 * that can be initialized only after invoking
4804 * bfq_create_group_hierarchy. This, in particular, enables
4805 * has_work to correctly return false. Of course, to avoid
4806 * other inconsistencies, the blk-mq stack must then refrain
4807 * from invoking further scheduler hooks before this init
4808 * function is finished.
4810 bfqd->root_group = bfq_create_group_hierarchy(bfqd, q->node);
4811 if (!bfqd->root_group)
4813 bfq_init_root_group(bfqd->root_group, bfqd);
4814 bfq_init_entity(&bfqd->oom_bfqq.entity, bfqd->root_group);
4816 wbt_disable_default(q);
4821 kobject_put(&eq->kobj);
4825 static void bfq_slab_kill(void)
4827 kmem_cache_destroy(bfq_pool);
4830 static int __init bfq_slab_setup(void)
4832 bfq_pool = KMEM_CACHE(bfq_queue, 0);
4838 static ssize_t bfq_var_show(unsigned int var, char *page)
4840 return sprintf(page, "%u\n", var);
4843 static int bfq_var_store(unsigned long *var, const char *page)
4845 unsigned long new_val;
4846 int ret = kstrtoul(page, 10, &new_val);
4854 #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
4855 static ssize_t __FUNC(struct elevator_queue *e, char *page) \
4857 struct bfq_data *bfqd = e->elevator_data; \
4858 u64 __data = __VAR; \
4860 __data = jiffies_to_msecs(__data); \
4861 else if (__CONV == 2) \
4862 __data = div_u64(__data, NSEC_PER_MSEC); \
4863 return bfq_var_show(__data, (page)); \
4865 SHOW_FUNCTION(bfq_fifo_expire_sync_show, bfqd->bfq_fifo_expire[1], 2);
4866 SHOW_FUNCTION(bfq_fifo_expire_async_show, bfqd->bfq_fifo_expire[0], 2);
4867 SHOW_FUNCTION(bfq_back_seek_max_show, bfqd->bfq_back_max, 0);
4868 SHOW_FUNCTION(bfq_back_seek_penalty_show, bfqd->bfq_back_penalty, 0);
4869 SHOW_FUNCTION(bfq_slice_idle_show, bfqd->bfq_slice_idle, 2);
4870 SHOW_FUNCTION(bfq_max_budget_show, bfqd->bfq_user_max_budget, 0);
4871 SHOW_FUNCTION(bfq_timeout_sync_show, bfqd->bfq_timeout, 1);
4872 SHOW_FUNCTION(bfq_strict_guarantees_show, bfqd->strict_guarantees, 0);
4873 SHOW_FUNCTION(bfq_low_latency_show, bfqd->low_latency, 0);
4874 #undef SHOW_FUNCTION
4876 #define USEC_SHOW_FUNCTION(__FUNC, __VAR) \
4877 static ssize_t __FUNC(struct elevator_queue *e, char *page) \
4879 struct bfq_data *bfqd = e->elevator_data; \
4880 u64 __data = __VAR; \
4881 __data = div_u64(__data, NSEC_PER_USEC); \
4882 return bfq_var_show(__data, (page)); \
4884 USEC_SHOW_FUNCTION(bfq_slice_idle_us_show, bfqd->bfq_slice_idle);
4885 #undef USEC_SHOW_FUNCTION
4887 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
4889 __FUNC(struct elevator_queue *e, const char *page, size_t count) \
4891 struct bfq_data *bfqd = e->elevator_data; \
4892 unsigned long __data, __min = (MIN), __max = (MAX); \
4895 ret = bfq_var_store(&__data, (page)); \
4898 if (__data < __min) \
4900 else if (__data > __max) \
4903 *(__PTR) = msecs_to_jiffies(__data); \
4904 else if (__CONV == 2) \
4905 *(__PTR) = (u64)__data * NSEC_PER_MSEC; \
4907 *(__PTR) = __data; \
4910 STORE_FUNCTION(bfq_fifo_expire_sync_store, &bfqd->bfq_fifo_expire[1], 1,
4912 STORE_FUNCTION(bfq_fifo_expire_async_store, &bfqd->bfq_fifo_expire[0], 1,
4914 STORE_FUNCTION(bfq_back_seek_max_store, &bfqd->bfq_back_max, 0, INT_MAX, 0);
4915 STORE_FUNCTION(bfq_back_seek_penalty_store, &bfqd->bfq_back_penalty, 1,
4917 STORE_FUNCTION(bfq_slice_idle_store, &bfqd->bfq_slice_idle, 0, INT_MAX, 2);
4918 #undef STORE_FUNCTION
4920 #define USEC_STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \
4921 static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)\
4923 struct bfq_data *bfqd = e->elevator_data; \
4924 unsigned long __data, __min = (MIN), __max = (MAX); \
4927 ret = bfq_var_store(&__data, (page)); \
4930 if (__data < __min) \
4932 else if (__data > __max) \
4934 *(__PTR) = (u64)__data * NSEC_PER_USEC; \
4937 USEC_STORE_FUNCTION(bfq_slice_idle_us_store, &bfqd->bfq_slice_idle, 0,
4939 #undef USEC_STORE_FUNCTION
4941 static ssize_t bfq_max_budget_store(struct elevator_queue *e,
4942 const char *page, size_t count)
4944 struct bfq_data *bfqd = e->elevator_data;
4945 unsigned long __data;
4948 ret = bfq_var_store(&__data, (page));
4953 bfqd->bfq_max_budget = bfq_calc_max_budget(bfqd);
4955 if (__data > INT_MAX)
4957 bfqd->bfq_max_budget = __data;
4960 bfqd->bfq_user_max_budget = __data;
4966 * Leaving this name to preserve name compatibility with cfq
4967 * parameters, but this timeout is used for both sync and async.
4969 static ssize_t bfq_timeout_sync_store(struct elevator_queue *e,
4970 const char *page, size_t count)
4972 struct bfq_data *bfqd = e->elevator_data;
4973 unsigned long __data;
4976 ret = bfq_var_store(&__data, (page));
4982 else if (__data > INT_MAX)
4985 bfqd->bfq_timeout = msecs_to_jiffies(__data);
4986 if (bfqd->bfq_user_max_budget == 0)
4987 bfqd->bfq_max_budget = bfq_calc_max_budget(bfqd);
4992 static ssize_t bfq_strict_guarantees_store(struct elevator_queue *e,
4993 const char *page, size_t count)
4995 struct bfq_data *bfqd = e->elevator_data;
4996 unsigned long __data;
4999 ret = bfq_var_store(&__data, (page));
5005 if (!bfqd->strict_guarantees && __data == 1
5006 && bfqd->bfq_slice_idle < 8 * NSEC_PER_MSEC)
5007 bfqd->bfq_slice_idle = 8 * NSEC_PER_MSEC;
5009 bfqd->strict_guarantees = __data;
5014 static ssize_t bfq_low_latency_store(struct elevator_queue *e,
5015 const char *page, size_t count)
5017 struct bfq_data *bfqd = e->elevator_data;
5018 unsigned long __data;
5021 ret = bfq_var_store(&__data, (page));
5027 if (__data == 0 && bfqd->low_latency != 0)
5029 bfqd->low_latency = __data;
5034 #define BFQ_ATTR(name) \
5035 __ATTR(name, 0644, bfq_##name##_show, bfq_##name##_store)
5037 static struct elv_fs_entry bfq_attrs[] = {
5038 BFQ_ATTR(fifo_expire_sync),
5039 BFQ_ATTR(fifo_expire_async),
5040 BFQ_ATTR(back_seek_max),
5041 BFQ_ATTR(back_seek_penalty),
5042 BFQ_ATTR(slice_idle),
5043 BFQ_ATTR(slice_idle_us),
5044 BFQ_ATTR(max_budget),
5045 BFQ_ATTR(timeout_sync),
5046 BFQ_ATTR(strict_guarantees),
5047 BFQ_ATTR(low_latency),
5051 static struct elevator_type iosched_bfq_mq = {
5053 .prepare_request = bfq_prepare_request,
5054 .finish_request = bfq_finish_request,
5055 .exit_icq = bfq_exit_icq,
5056 .insert_requests = bfq_insert_requests,
5057 .dispatch_request = bfq_dispatch_request,
5058 .next_request = elv_rb_latter_request,
5059 .former_request = elv_rb_former_request,
5060 .allow_merge = bfq_allow_bio_merge,
5061 .bio_merge = bfq_bio_merge,
5062 .request_merge = bfq_request_merge,
5063 .requests_merged = bfq_requests_merged,
5064 .request_merged = bfq_request_merged,
5065 .has_work = bfq_has_work,
5066 .init_sched = bfq_init_queue,
5067 .exit_sched = bfq_exit_queue,
5071 .icq_size = sizeof(struct bfq_io_cq),
5072 .icq_align = __alignof__(struct bfq_io_cq),
5073 .elevator_attrs = bfq_attrs,
5074 .elevator_name = "bfq",
5075 .elevator_owner = THIS_MODULE,
5077 MODULE_ALIAS("bfq-iosched");
5079 static int __init bfq_init(void)
5083 #ifdef CONFIG_BFQ_GROUP_IOSCHED
5084 ret = blkcg_policy_register(&blkcg_policy_bfq);
5090 if (bfq_slab_setup())
5094 * Times to load large popular applications for the typical
5095 * systems installed on the reference devices (see the
5096 * comments before the definitions of the next two
5097 * arrays). Actually, we use slightly slower values, as the
5098 * estimated peak rate tends to be smaller than the actual
5099 * peak rate. The reason for this last fact is that estimates
5100 * are computed over much shorter time intervals than the long
5101 * intervals typically used for benchmarking. Why? First, to
5102 * adapt more quickly to variations. Second, because an I/O
5103 * scheduler cannot rely on a peak-rate-evaluation workload to
5104 * be run for a long time.
5106 T_slow[0] = msecs_to_jiffies(3500); /* actually 4 sec */
5107 T_slow[1] = msecs_to_jiffies(6000); /* actually 6.5 sec */
5108 T_fast[0] = msecs_to_jiffies(7000); /* actually 8 sec */
5109 T_fast[1] = msecs_to_jiffies(2500); /* actually 3 sec */
5112 * Thresholds that determine the switch between speed classes
5113 * (see the comments before the definition of the array
5114 * device_speed_thresh). These thresholds are biased towards
5115 * transitions to the fast class. This is safer than the
5116 * opposite bias. In fact, a wrong transition to the slow
5117 * class results in short weight-raising periods, because the
5118 * speed of the device then tends to be higher that the
5119 * reference peak rate. On the opposite end, a wrong
5120 * transition to the fast class tends to increase
5121 * weight-raising periods, because of the opposite reason.
5123 device_speed_thresh[0] = (4 * R_slow[0]) / 3;
5124 device_speed_thresh[1] = (4 * R_slow[1]) / 3;
5126 ret = elv_register(&iosched_bfq_mq);
5135 #ifdef CONFIG_BFQ_GROUP_IOSCHED
5136 blkcg_policy_unregister(&blkcg_policy_bfq);
5141 static void __exit bfq_exit(void)
5143 elv_unregister(&iosched_bfq_mq);
5144 #ifdef CONFIG_BFQ_GROUP_IOSCHED
5145 blkcg_policy_unregister(&blkcg_policy_bfq);
5150 module_init(bfq_init);
5151 module_exit(bfq_exit);
5153 MODULE_AUTHOR("Paolo Valente");
5154 MODULE_LICENSE("GPL");
5155 MODULE_DESCRIPTION("MQ Budget Fair Queueing I/O Scheduler");