GNU Linux-libre 4.14.262-gnu1
[releases.git] / block / blk-mq.c
1 /*
2  * Block multiqueue core code
3  *
4  * Copyright (C) 2013-2014 Jens Axboe
5  * Copyright (C) 2013-2014 Christoph Hellwig
6  */
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/backing-dev.h>
10 #include <linux/bio.h>
11 #include <linux/blkdev.h>
12 #include <linux/kmemleak.h>
13 #include <linux/mm.h>
14 #include <linux/init.h>
15 #include <linux/slab.h>
16 #include <linux/workqueue.h>
17 #include <linux/smp.h>
18 #include <linux/llist.h>
19 #include <linux/list_sort.h>
20 #include <linux/cpu.h>
21 #include <linux/cache.h>
22 #include <linux/sched/sysctl.h>
23 #include <linux/sched/topology.h>
24 #include <linux/sched/signal.h>
25 #include <linux/delay.h>
26 #include <linux/crash_dump.h>
27 #include <linux/prefetch.h>
28
29 #include <trace/events/block.h>
30
31 #include <linux/blk-mq.h>
32 #include "blk.h"
33 #include "blk-mq.h"
34 #include "blk-mq-debugfs.h"
35 #include "blk-mq-tag.h"
36 #include "blk-stat.h"
37 #include "blk-wbt.h"
38 #include "blk-mq-sched.h"
39
40 static void blk_mq_poll_stats_start(struct request_queue *q);
41 static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);
42
43 static int blk_mq_poll_stats_bkt(const struct request *rq)
44 {
45         int ddir, bytes, bucket;
46
47         ddir = rq_data_dir(rq);
48         bytes = blk_rq_bytes(rq);
49
50         bucket = ddir + 2*(ilog2(bytes) - 9);
51
52         if (bucket < 0)
53                 return -1;
54         else if (bucket >= BLK_MQ_POLL_STATS_BKTS)
55                 return ddir + BLK_MQ_POLL_STATS_BKTS - 2;
56
57         return bucket;
58 }
59
60 /*
61  * Check if any of the ctx's have pending work in this hardware queue
62  */
63 bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
64 {
65         return sbitmap_any_bit_set(&hctx->ctx_map) ||
66                         !list_empty_careful(&hctx->dispatch) ||
67                         blk_mq_sched_has_work(hctx);
68 }
69
70 /*
71  * Mark this ctx as having pending work in this hardware queue
72  */
73 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
74                                      struct blk_mq_ctx *ctx)
75 {
76         if (!sbitmap_test_bit(&hctx->ctx_map, ctx->index_hw))
77                 sbitmap_set_bit(&hctx->ctx_map, ctx->index_hw);
78 }
79
80 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
81                                       struct blk_mq_ctx *ctx)
82 {
83         sbitmap_clear_bit(&hctx->ctx_map, ctx->index_hw);
84 }
85
86 struct mq_inflight {
87         struct hd_struct *part;
88         unsigned int *inflight;
89 };
90
91 static void blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx,
92                                   struct request *rq, void *priv,
93                                   bool reserved)
94 {
95         struct mq_inflight *mi = priv;
96
97         if (test_bit(REQ_ATOM_STARTED, &rq->atomic_flags) &&
98             !test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags)) {
99                 /*
100                  * index[0] counts the specific partition that was asked
101                  * for. index[1] counts the ones that are active on the
102                  * whole device, so increment that if mi->part is indeed
103                  * a partition, and not a whole device.
104                  */
105                 if (rq->part == mi->part)
106                         mi->inflight[0]++;
107                 if (mi->part->partno)
108                         mi->inflight[1]++;
109         }
110 }
111
112 void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part,
113                       unsigned int inflight[2])
114 {
115         struct mq_inflight mi = { .part = part, .inflight = inflight, };
116
117         inflight[0] = inflight[1] = 0;
118         blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
119 }
120
121 static void blk_mq_check_inflight_rw(struct blk_mq_hw_ctx *hctx,
122                                      struct request *rq, void *priv,
123                                      bool reserved)
124 {
125         struct mq_inflight *mi = priv;
126
127         if (rq->part == mi->part)
128                 mi->inflight[rq_data_dir(rq)]++;
129 }
130
131 void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part,
132                          unsigned int inflight[2])
133 {
134         struct mq_inflight mi = { .part = part, .inflight = inflight, };
135
136         inflight[0] = inflight[1] = 0;
137         blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight_rw, &mi);
138 }
139
140 void blk_freeze_queue_start(struct request_queue *q)
141 {
142         int freeze_depth;
143
144         freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
145         if (freeze_depth == 1) {
146                 percpu_ref_kill(&q->q_usage_counter);
147                 blk_mq_run_hw_queues(q, false);
148         }
149 }
150 EXPORT_SYMBOL_GPL(blk_freeze_queue_start);
151
152 void blk_mq_freeze_queue_wait(struct request_queue *q)
153 {
154         wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
155 }
156 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait);
157
158 int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
159                                      unsigned long timeout)
160 {
161         return wait_event_timeout(q->mq_freeze_wq,
162                                         percpu_ref_is_zero(&q->q_usage_counter),
163                                         timeout);
164 }
165 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait_timeout);
166
167 /*
168  * Guarantee no request is in use, so we can change any data structure of
169  * the queue afterward.
170  */
171 void blk_freeze_queue(struct request_queue *q)
172 {
173         /*
174          * In the !blk_mq case we are only calling this to kill the
175          * q_usage_counter, otherwise this increases the freeze depth
176          * and waits for it to return to zero.  For this reason there is
177          * no blk_unfreeze_queue(), and blk_freeze_queue() is not
178          * exported to drivers as the only user for unfreeze is blk_mq.
179          */
180         blk_freeze_queue_start(q);
181         if (!q->mq_ops)
182                 blk_drain_queue(q);
183         blk_mq_freeze_queue_wait(q);
184 }
185
186 void blk_mq_freeze_queue(struct request_queue *q)
187 {
188         /*
189          * ...just an alias to keep freeze and unfreeze actions balanced
190          * in the blk_mq_* namespace
191          */
192         blk_freeze_queue(q);
193 }
194 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
195
196 void blk_mq_unfreeze_queue(struct request_queue *q)
197 {
198         int freeze_depth;
199
200         freeze_depth = atomic_dec_return(&q->mq_freeze_depth);
201         WARN_ON_ONCE(freeze_depth < 0);
202         if (!freeze_depth) {
203                 percpu_ref_reinit(&q->q_usage_counter);
204                 wake_up_all(&q->mq_freeze_wq);
205         }
206 }
207 EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
208
209 /*
210  * FIXME: replace the scsi_internal_device_*block_nowait() calls in the
211  * mpt3sas driver such that this function can be removed.
212  */
213 void blk_mq_quiesce_queue_nowait(struct request_queue *q)
214 {
215         unsigned long flags;
216
217         spin_lock_irqsave(q->queue_lock, flags);
218         queue_flag_set(QUEUE_FLAG_QUIESCED, q);
219         spin_unlock_irqrestore(q->queue_lock, flags);
220 }
221 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait);
222
223 /**
224  * blk_mq_quiesce_queue() - wait until all ongoing dispatches have finished
225  * @q: request queue.
226  *
227  * Note: this function does not prevent that the struct request end_io()
228  * callback function is invoked. Once this function is returned, we make
229  * sure no dispatch can happen until the queue is unquiesced via
230  * blk_mq_unquiesce_queue().
231  */
232 void blk_mq_quiesce_queue(struct request_queue *q)
233 {
234         struct blk_mq_hw_ctx *hctx;
235         unsigned int i;
236         bool rcu = false;
237
238         blk_mq_quiesce_queue_nowait(q);
239
240         queue_for_each_hw_ctx(q, hctx, i) {
241                 if (hctx->flags & BLK_MQ_F_BLOCKING)
242                         synchronize_srcu(hctx->queue_rq_srcu);
243                 else
244                         rcu = true;
245         }
246         if (rcu)
247                 synchronize_rcu();
248 }
249 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue);
250
251 /*
252  * blk_mq_unquiesce_queue() - counterpart of blk_mq_quiesce_queue()
253  * @q: request queue.
254  *
255  * This function recovers queue into the state before quiescing
256  * which is done by blk_mq_quiesce_queue.
257  */
258 void blk_mq_unquiesce_queue(struct request_queue *q)
259 {
260         unsigned long flags;
261
262         spin_lock_irqsave(q->queue_lock, flags);
263         queue_flag_clear(QUEUE_FLAG_QUIESCED, q);
264         spin_unlock_irqrestore(q->queue_lock, flags);
265
266         /* dispatch requests which are inserted during quiescing */
267         blk_mq_run_hw_queues(q, true);
268 }
269 EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue);
270
271 void blk_mq_wake_waiters(struct request_queue *q)
272 {
273         struct blk_mq_hw_ctx *hctx;
274         unsigned int i;
275
276         queue_for_each_hw_ctx(q, hctx, i)
277                 if (blk_mq_hw_queue_mapped(hctx))
278                         blk_mq_tag_wakeup_all(hctx->tags, true);
279
280         /*
281          * If we are called because the queue has now been marked as
282          * dying, we need to ensure that processes currently waiting on
283          * the queue are notified as well.
284          */
285         wake_up_all(&q->mq_freeze_wq);
286 }
287
288 bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
289 {
290         return blk_mq_has_free_tags(hctx->tags);
291 }
292 EXPORT_SYMBOL(blk_mq_can_queue);
293
294 static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
295                 unsigned int tag, unsigned int op)
296 {
297         struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
298         struct request *rq = tags->static_rqs[tag];
299
300         rq->rq_flags = 0;
301
302         if (data->flags & BLK_MQ_REQ_INTERNAL) {
303                 rq->tag = -1;
304                 rq->internal_tag = tag;
305         } else {
306                 if (blk_mq_tag_busy(data->hctx)) {
307                         rq->rq_flags = RQF_MQ_INFLIGHT;
308                         atomic_inc(&data->hctx->nr_active);
309                 }
310                 rq->tag = tag;
311                 rq->internal_tag = -1;
312                 data->hctx->tags->rqs[rq->tag] = rq;
313         }
314
315         INIT_LIST_HEAD(&rq->queuelist);
316         /* csd/requeue_work/fifo_time is initialized before use */
317         rq->q = data->q;
318         rq->mq_ctx = data->ctx;
319         rq->cmd_flags = op;
320         if (blk_queue_io_stat(data->q))
321                 rq->rq_flags |= RQF_IO_STAT;
322         /* do not touch atomic flags, it needs atomic ops against the timer */
323         rq->cpu = -1;
324         INIT_HLIST_NODE(&rq->hash);
325         RB_CLEAR_NODE(&rq->rb_node);
326         rq->rq_disk = NULL;
327         rq->part = NULL;
328         rq->start_time = jiffies;
329 #ifdef CONFIG_BLK_CGROUP
330         rq->rl = NULL;
331         set_start_time_ns(rq);
332         rq->io_start_time_ns = 0;
333 #endif
334         rq->nr_phys_segments = 0;
335 #if defined(CONFIG_BLK_DEV_INTEGRITY)
336         rq->nr_integrity_segments = 0;
337 #endif
338         rq->special = NULL;
339         /* tag was already set */
340         rq->extra_len = 0;
341
342         INIT_LIST_HEAD(&rq->timeout_list);
343         rq->timeout = 0;
344
345         rq->end_io = NULL;
346         rq->end_io_data = NULL;
347         rq->next_rq = NULL;
348
349         data->ctx->rq_dispatched[op_is_sync(op)]++;
350         return rq;
351 }
352
353 static struct request *blk_mq_get_request(struct request_queue *q,
354                 struct bio *bio, unsigned int op,
355                 struct blk_mq_alloc_data *data)
356 {
357         struct elevator_queue *e = q->elevator;
358         struct request *rq;
359         unsigned int tag;
360         struct blk_mq_ctx *local_ctx = NULL;
361
362         blk_queue_enter_live(q);
363         data->q = q;
364         if (likely(!data->ctx))
365                 data->ctx = local_ctx = blk_mq_get_ctx(q);
366         if (likely(!data->hctx))
367                 data->hctx = blk_mq_map_queue(q, data->ctx->cpu);
368         if (op & REQ_NOWAIT)
369                 data->flags |= BLK_MQ_REQ_NOWAIT;
370
371         if (e) {
372                 data->flags |= BLK_MQ_REQ_INTERNAL;
373
374                 /*
375                  * Flush requests are special and go directly to the
376                  * dispatch list.
377                  */
378                 if (!op_is_flush(op) && e->type->ops.mq.limit_depth)
379                         e->type->ops.mq.limit_depth(op, data);
380         }
381
382         tag = blk_mq_get_tag(data);
383         if (tag == BLK_MQ_TAG_FAIL) {
384                 if (local_ctx) {
385                         blk_mq_put_ctx(local_ctx);
386                         data->ctx = NULL;
387                 }
388                 blk_queue_exit(q);
389                 return NULL;
390         }
391
392         rq = blk_mq_rq_ctx_init(data, tag, op);
393         if (!op_is_flush(op)) {
394                 rq->elv.icq = NULL;
395                 if (e && e->type->ops.mq.prepare_request) {
396                         if (e->type->icq_cache && rq_ioc(bio))
397                                 blk_mq_sched_assign_ioc(rq, bio);
398
399                         e->type->ops.mq.prepare_request(rq, bio);
400                         rq->rq_flags |= RQF_ELVPRIV;
401                 }
402         }
403         data->hctx->queued++;
404         return rq;
405 }
406
407 struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
408                 unsigned int flags)
409 {
410         struct blk_mq_alloc_data alloc_data = { .flags = flags };
411         struct request *rq;
412         int ret;
413
414         ret = blk_queue_enter(q, flags & BLK_MQ_REQ_NOWAIT);
415         if (ret)
416                 return ERR_PTR(ret);
417
418         rq = blk_mq_get_request(q, NULL, op, &alloc_data);
419         blk_queue_exit(q);
420
421         if (!rq)
422                 return ERR_PTR(-EWOULDBLOCK);
423
424         blk_mq_put_ctx(alloc_data.ctx);
425
426         rq->__data_len = 0;
427         rq->__sector = (sector_t) -1;
428         rq->bio = rq->biotail = NULL;
429         return rq;
430 }
431 EXPORT_SYMBOL(blk_mq_alloc_request);
432
433 struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
434                 unsigned int op, unsigned int flags, unsigned int hctx_idx)
435 {
436         struct blk_mq_alloc_data alloc_data = { .flags = flags };
437         struct request *rq;
438         unsigned int cpu;
439         int ret;
440
441         /*
442          * If the tag allocator sleeps we could get an allocation for a
443          * different hardware context.  No need to complicate the low level
444          * allocator for this for the rare use case of a command tied to
445          * a specific queue.
446          */
447         if (WARN_ON_ONCE(!(flags & BLK_MQ_REQ_NOWAIT)))
448                 return ERR_PTR(-EINVAL);
449
450         if (hctx_idx >= q->nr_hw_queues)
451                 return ERR_PTR(-EIO);
452
453         ret = blk_queue_enter(q, true);
454         if (ret)
455                 return ERR_PTR(ret);
456
457         /*
458          * Check if the hardware context is actually mapped to anything.
459          * If not tell the caller that it should skip this queue.
460          */
461         alloc_data.hctx = q->queue_hw_ctx[hctx_idx];
462         if (!blk_mq_hw_queue_mapped(alloc_data.hctx)) {
463                 blk_queue_exit(q);
464                 return ERR_PTR(-EXDEV);
465         }
466         cpu = cpumask_first(alloc_data.hctx->cpumask);
467         alloc_data.ctx = __blk_mq_get_ctx(q, cpu);
468
469         rq = blk_mq_get_request(q, NULL, op, &alloc_data);
470         blk_queue_exit(q);
471
472         if (!rq)
473                 return ERR_PTR(-EWOULDBLOCK);
474
475         return rq;
476 }
477 EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
478
479 void blk_mq_free_request(struct request *rq)
480 {
481         struct request_queue *q = rq->q;
482         struct elevator_queue *e = q->elevator;
483         struct blk_mq_ctx *ctx = rq->mq_ctx;
484         struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
485         const int sched_tag = rq->internal_tag;
486
487         if (rq->rq_flags & RQF_ELVPRIV) {
488                 if (e && e->type->ops.mq.finish_request)
489                         e->type->ops.mq.finish_request(rq);
490                 if (rq->elv.icq) {
491                         put_io_context(rq->elv.icq->ioc);
492                         rq->elv.icq = NULL;
493                 }
494         }
495
496         ctx->rq_completed[rq_is_sync(rq)]++;
497         if (rq->rq_flags & RQF_MQ_INFLIGHT)
498                 atomic_dec(&hctx->nr_active);
499
500         wbt_done(q->rq_wb, &rq->issue_stat);
501
502         clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
503         clear_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags);
504         if (rq->tag != -1)
505                 blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag);
506         if (sched_tag != -1)
507                 blk_mq_put_tag(hctx, hctx->sched_tags, ctx, sched_tag);
508         blk_mq_sched_restart(hctx);
509         blk_queue_exit(q);
510 }
511 EXPORT_SYMBOL_GPL(blk_mq_free_request);
512
513 inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
514 {
515         blk_account_io_done(rq);
516
517         if (rq->end_io) {
518                 wbt_done(rq->q->rq_wb, &rq->issue_stat);
519                 rq->end_io(rq, error);
520         } else {
521                 if (unlikely(blk_bidi_rq(rq)))
522                         blk_mq_free_request(rq->next_rq);
523                 blk_mq_free_request(rq);
524         }
525 }
526 EXPORT_SYMBOL(__blk_mq_end_request);
527
528 void blk_mq_end_request(struct request *rq, blk_status_t error)
529 {
530         if (blk_update_request(rq, error, blk_rq_bytes(rq)))
531                 BUG();
532         __blk_mq_end_request(rq, error);
533 }
534 EXPORT_SYMBOL(blk_mq_end_request);
535
536 static void __blk_mq_complete_request_remote(void *data)
537 {
538         struct request *rq = data;
539
540         rq->q->softirq_done_fn(rq);
541 }
542
543 static void __blk_mq_complete_request(struct request *rq)
544 {
545         struct blk_mq_ctx *ctx = rq->mq_ctx;
546         bool shared = false;
547         int cpu;
548
549         if (rq->internal_tag != -1)
550                 blk_mq_sched_completed_request(rq);
551         if (rq->rq_flags & RQF_STATS) {
552                 blk_mq_poll_stats_start(rq->q);
553                 blk_stat_add(rq);
554         }
555
556         if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) {
557                 rq->q->softirq_done_fn(rq);
558                 return;
559         }
560
561         cpu = get_cpu();
562         if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
563                 shared = cpus_share_cache(cpu, ctx->cpu);
564
565         if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
566                 rq->csd.func = __blk_mq_complete_request_remote;
567                 rq->csd.info = rq;
568                 rq->csd.flags = 0;
569                 smp_call_function_single_async(ctx->cpu, &rq->csd);
570         } else {
571                 rq->q->softirq_done_fn(rq);
572         }
573         put_cpu();
574 }
575
576 /**
577  * blk_mq_complete_request - end I/O on a request
578  * @rq:         the request being processed
579  *
580  * Description:
581  *      Ends all I/O on a request. It does not handle partial completions.
582  *      The actual completion happens out-of-order, through a IPI handler.
583  **/
584 void blk_mq_complete_request(struct request *rq)
585 {
586         struct request_queue *q = rq->q;
587
588         if (unlikely(blk_should_fake_timeout(q)))
589                 return;
590         if (!blk_mark_rq_complete(rq))
591                 __blk_mq_complete_request(rq);
592 }
593 EXPORT_SYMBOL(blk_mq_complete_request);
594
595 int blk_mq_request_started(struct request *rq)
596 {
597         return test_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
598 }
599 EXPORT_SYMBOL_GPL(blk_mq_request_started);
600
601 void blk_mq_start_request(struct request *rq)
602 {
603         struct request_queue *q = rq->q;
604
605         blk_mq_sched_started_request(rq);
606
607         trace_block_rq_issue(q, rq);
608
609         if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
610                 blk_stat_set_issue(&rq->issue_stat, blk_rq_sectors(rq));
611                 rq->rq_flags |= RQF_STATS;
612                 wbt_issue(q->rq_wb, &rq->issue_stat);
613         }
614
615         blk_add_timer(rq);
616
617         /*
618          * Ensure that ->deadline is visible before set the started
619          * flag and clear the completed flag.
620          */
621         smp_mb__before_atomic();
622
623         /*
624          * Mark us as started and clear complete. Complete might have been
625          * set if requeue raced with timeout, which then marked it as
626          * complete. So be sure to clear complete again when we start
627          * the request, otherwise we'll ignore the completion event.
628          */
629         if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
630                 set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
631         if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
632                 clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
633
634         if (q->dma_drain_size && blk_rq_bytes(rq)) {
635                 /*
636                  * Make sure space for the drain appears.  We know we can do
637                  * this because max_hw_segments has been adjusted to be one
638                  * fewer than the device can handle.
639                  */
640                 rq->nr_phys_segments++;
641         }
642 }
643 EXPORT_SYMBOL(blk_mq_start_request);
644
645 /*
646  * When we reach here because queue is busy, REQ_ATOM_COMPLETE
647  * flag isn't set yet, so there may be race with timeout handler,
648  * but given rq->deadline is just set in .queue_rq() under
649  * this situation, the race won't be possible in reality because
650  * rq->timeout should be set as big enough to cover the window
651  * between blk_mq_start_request() called from .queue_rq() and
652  * clearing REQ_ATOM_STARTED here.
653  */
654 static void __blk_mq_requeue_request(struct request *rq)
655 {
656         struct request_queue *q = rq->q;
657
658         trace_block_rq_requeue(q, rq);
659         wbt_requeue(q->rq_wb, &rq->issue_stat);
660
661         if (test_and_clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
662                 if (q->dma_drain_size && blk_rq_bytes(rq))
663                         rq->nr_phys_segments--;
664         }
665 }
666
667 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
668 {
669         __blk_mq_requeue_request(rq);
670
671         /* this request will be re-inserted to io scheduler queue */
672         blk_mq_sched_requeue_request(rq);
673
674         BUG_ON(blk_queued_rq(rq));
675         blk_mq_add_to_requeue_list(rq, true, kick_requeue_list);
676 }
677 EXPORT_SYMBOL(blk_mq_requeue_request);
678
679 static void blk_mq_requeue_work(struct work_struct *work)
680 {
681         struct request_queue *q =
682                 container_of(work, struct request_queue, requeue_work.work);
683         LIST_HEAD(rq_list);
684         struct request *rq, *next;
685
686         spin_lock_irq(&q->requeue_lock);
687         list_splice_init(&q->requeue_list, &rq_list);
688         spin_unlock_irq(&q->requeue_lock);
689
690         list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
691                 if (!(rq->rq_flags & RQF_SOFTBARRIER))
692                         continue;
693
694                 rq->rq_flags &= ~RQF_SOFTBARRIER;
695                 list_del_init(&rq->queuelist);
696                 blk_mq_sched_insert_request(rq, true, false, false, true);
697         }
698
699         while (!list_empty(&rq_list)) {
700                 rq = list_entry(rq_list.next, struct request, queuelist);
701                 list_del_init(&rq->queuelist);
702                 blk_mq_sched_insert_request(rq, false, false, false, true);
703         }
704
705         blk_mq_run_hw_queues(q, false);
706 }
707
708 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
709                                 bool kick_requeue_list)
710 {
711         struct request_queue *q = rq->q;
712         unsigned long flags;
713
714         /*
715          * We abuse this flag that is otherwise used by the I/O scheduler to
716          * request head insertation from the workqueue.
717          */
718         BUG_ON(rq->rq_flags & RQF_SOFTBARRIER);
719
720         spin_lock_irqsave(&q->requeue_lock, flags);
721         if (at_head) {
722                 rq->rq_flags |= RQF_SOFTBARRIER;
723                 list_add(&rq->queuelist, &q->requeue_list);
724         } else {
725                 list_add_tail(&rq->queuelist, &q->requeue_list);
726         }
727         spin_unlock_irqrestore(&q->requeue_lock, flags);
728
729         if (kick_requeue_list)
730                 blk_mq_kick_requeue_list(q);
731 }
732 EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
733
734 void blk_mq_kick_requeue_list(struct request_queue *q)
735 {
736         kblockd_schedule_delayed_work(&q->requeue_work, 0);
737 }
738 EXPORT_SYMBOL(blk_mq_kick_requeue_list);
739
740 void blk_mq_delay_kick_requeue_list(struct request_queue *q,
741                                     unsigned long msecs)
742 {
743         kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work,
744                                     msecs_to_jiffies(msecs));
745 }
746 EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
747
748 struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
749 {
750         if (tag < tags->nr_tags) {
751                 prefetch(tags->rqs[tag]);
752                 return tags->rqs[tag];
753         }
754
755         return NULL;
756 }
757 EXPORT_SYMBOL(blk_mq_tag_to_rq);
758
759 struct blk_mq_timeout_data {
760         unsigned long next;
761         unsigned int next_set;
762 };
763
764 void blk_mq_rq_timed_out(struct request *req, bool reserved)
765 {
766         const struct blk_mq_ops *ops = req->q->mq_ops;
767         enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER;
768
769         /*
770          * We know that complete is set at this point. If STARTED isn't set
771          * anymore, then the request isn't active and the "timeout" should
772          * just be ignored. This can happen due to the bitflag ordering.
773          * Timeout first checks if STARTED is set, and if it is, assumes
774          * the request is active. But if we race with completion, then
775          * both flags will get cleared. So check here again, and ignore
776          * a timeout event with a request that isn't active.
777          */
778         if (!test_bit(REQ_ATOM_STARTED, &req->atomic_flags))
779                 return;
780
781         if (ops->timeout)
782                 ret = ops->timeout(req, reserved);
783
784         switch (ret) {
785         case BLK_EH_HANDLED:
786                 __blk_mq_complete_request(req);
787                 break;
788         case BLK_EH_RESET_TIMER:
789                 blk_add_timer(req);
790                 blk_clear_rq_complete(req);
791                 break;
792         case BLK_EH_NOT_HANDLED:
793                 break;
794         default:
795                 printk(KERN_ERR "block: bad eh return: %d\n", ret);
796                 break;
797         }
798 }
799
800 static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
801                 struct request *rq, void *priv, bool reserved)
802 {
803         struct blk_mq_timeout_data *data = priv;
804
805         if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
806                 return;
807
808         /*
809          * The rq being checked may have been freed and reallocated
810          * out already here, we avoid this race by checking rq->deadline
811          * and REQ_ATOM_COMPLETE flag together:
812          *
813          * - if rq->deadline is observed as new value because of
814          *   reusing, the rq won't be timed out because of timing.
815          * - if rq->deadline is observed as previous value,
816          *   REQ_ATOM_COMPLETE flag won't be cleared in reuse path
817          *   because we put a barrier between setting rq->deadline
818          *   and clearing the flag in blk_mq_start_request(), so
819          *   this rq won't be timed out too.
820          */
821         if (time_after_eq(jiffies, rq->deadline)) {
822                 if (!blk_mark_rq_complete(rq))
823                         blk_mq_rq_timed_out(rq, reserved);
824         } else if (!data->next_set || time_after(data->next, rq->deadline)) {
825                 data->next = rq->deadline;
826                 data->next_set = 1;
827         }
828 }
829
830 static void blk_mq_timeout_work(struct work_struct *work)
831 {
832         struct request_queue *q =
833                 container_of(work, struct request_queue, timeout_work);
834         struct blk_mq_timeout_data data = {
835                 .next           = 0,
836                 .next_set       = 0,
837         };
838         int i;
839
840         /* A deadlock might occur if a request is stuck requiring a
841          * timeout at the same time a queue freeze is waiting
842          * completion, since the timeout code would not be able to
843          * acquire the queue reference here.
844          *
845          * That's why we don't use blk_queue_enter here; instead, we use
846          * percpu_ref_tryget directly, because we need to be able to
847          * obtain a reference even in the short window between the queue
848          * starting to freeze, by dropping the first reference in
849          * blk_freeze_queue_start, and the moment the last request is
850          * consumed, marked by the instant q_usage_counter reaches
851          * zero.
852          */
853         if (!percpu_ref_tryget(&q->q_usage_counter))
854                 return;
855
856         blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &data);
857
858         if (data.next_set) {
859                 data.next = blk_rq_timeout(round_jiffies_up(data.next));
860                 mod_timer(&q->timeout, data.next);
861         } else {
862                 struct blk_mq_hw_ctx *hctx;
863
864                 queue_for_each_hw_ctx(q, hctx, i) {
865                         /* the hctx may be unmapped, so check it here */
866                         if (blk_mq_hw_queue_mapped(hctx))
867                                 blk_mq_tag_idle(hctx);
868                 }
869         }
870         blk_queue_exit(q);
871 }
872
873 struct flush_busy_ctx_data {
874         struct blk_mq_hw_ctx *hctx;
875         struct list_head *list;
876 };
877
878 static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data)
879 {
880         struct flush_busy_ctx_data *flush_data = data;
881         struct blk_mq_hw_ctx *hctx = flush_data->hctx;
882         struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
883
884         sbitmap_clear_bit(sb, bitnr);
885         spin_lock(&ctx->lock);
886         list_splice_tail_init(&ctx->rq_list, flush_data->list);
887         spin_unlock(&ctx->lock);
888         return true;
889 }
890
891 /*
892  * Process software queues that have been marked busy, splicing them
893  * to the for-dispatch
894  */
895 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
896 {
897         struct flush_busy_ctx_data data = {
898                 .hctx = hctx,
899                 .list = list,
900         };
901
902         sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data);
903 }
904 EXPORT_SYMBOL_GPL(blk_mq_flush_busy_ctxs);
905
906 static inline unsigned int queued_to_index(unsigned int queued)
907 {
908         if (!queued)
909                 return 0;
910
911         return min(BLK_MQ_MAX_DISPATCH_ORDER - 1, ilog2(queued) + 1);
912 }
913
914 bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
915                            bool wait)
916 {
917         struct blk_mq_alloc_data data = {
918                 .q = rq->q,
919                 .hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu),
920                 .flags = wait ? 0 : BLK_MQ_REQ_NOWAIT,
921         };
922
923         might_sleep_if(wait);
924
925         if (rq->tag != -1)
926                 goto done;
927
928         if (blk_mq_tag_is_reserved(data.hctx->sched_tags, rq->internal_tag))
929                 data.flags |= BLK_MQ_REQ_RESERVED;
930
931         rq->tag = blk_mq_get_tag(&data);
932         if (rq->tag >= 0) {
933                 if (blk_mq_tag_busy(data.hctx)) {
934                         rq->rq_flags |= RQF_MQ_INFLIGHT;
935                         atomic_inc(&data.hctx->nr_active);
936                 }
937                 data.hctx->tags->rqs[rq->tag] = rq;
938         }
939
940 done:
941         if (hctx)
942                 *hctx = data.hctx;
943         return rq->tag != -1;
944 }
945
946 static void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
947                                     struct request *rq)
948 {
949         blk_mq_put_tag(hctx, hctx->tags, rq->mq_ctx, rq->tag);
950         rq->tag = -1;
951
952         if (rq->rq_flags & RQF_MQ_INFLIGHT) {
953                 rq->rq_flags &= ~RQF_MQ_INFLIGHT;
954                 atomic_dec(&hctx->nr_active);
955         }
956 }
957
958 static void blk_mq_put_driver_tag_hctx(struct blk_mq_hw_ctx *hctx,
959                                        struct request *rq)
960 {
961         if (rq->tag == -1 || rq->internal_tag == -1)
962                 return;
963
964         __blk_mq_put_driver_tag(hctx, rq);
965 }
966
967 static void blk_mq_put_driver_tag(struct request *rq)
968 {
969         struct blk_mq_hw_ctx *hctx;
970
971         if (rq->tag == -1 || rq->internal_tag == -1)
972                 return;
973
974         hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu);
975         __blk_mq_put_driver_tag(hctx, rq);
976 }
977
978 /*
979  * If we fail getting a driver tag because all the driver tags are already
980  * assigned and on the dispatch list, BUT the first entry does not have a
981  * tag, then we could deadlock. For that case, move entries with assigned
982  * driver tags to the front, leaving the set of tagged requests in the
983  * same order, and the untagged set in the same order.
984  */
985 static bool reorder_tags_to_front(struct list_head *list)
986 {
987         struct request *rq, *tmp, *first = NULL;
988
989         list_for_each_entry_safe_reverse(rq, tmp, list, queuelist) {
990                 if (rq == first)
991                         break;
992                 if (rq->tag != -1) {
993                         list_move(&rq->queuelist, list);
994                         if (!first)
995                                 first = rq;
996                 }
997         }
998
999         return first != NULL;
1000 }
1001
1002 static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode, int flags,
1003                                 void *key)
1004 {
1005         struct blk_mq_hw_ctx *hctx;
1006
1007         hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait);
1008
1009         list_del(&wait->entry);
1010         clear_bit_unlock(BLK_MQ_S_TAG_WAITING, &hctx->state);
1011         blk_mq_run_hw_queue(hctx, true);
1012         return 1;
1013 }
1014
1015 static bool blk_mq_dispatch_wait_add(struct blk_mq_hw_ctx *hctx)
1016 {
1017         struct sbq_wait_state *ws;
1018
1019         /*
1020          * The TAG_WAITING bit serves as a lock protecting hctx->dispatch_wait.
1021          * The thread which wins the race to grab this bit adds the hardware
1022          * queue to the wait queue.
1023          */
1024         if (test_bit(BLK_MQ_S_TAG_WAITING, &hctx->state) ||
1025             test_and_set_bit_lock(BLK_MQ_S_TAG_WAITING, &hctx->state))
1026                 return false;
1027
1028         init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake);
1029         ws = bt_wait_ptr(&hctx->tags->bitmap_tags, hctx);
1030
1031         /*
1032          * As soon as this returns, it's no longer safe to fiddle with
1033          * hctx->dispatch_wait, since a completion can wake up the wait queue
1034          * and unlock the bit.
1035          */
1036         add_wait_queue(&ws->wait, &hctx->dispatch_wait);
1037         return true;
1038 }
1039
1040 bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list)
1041 {
1042         struct blk_mq_hw_ctx *hctx;
1043         struct request *rq;
1044         int errors, queued;
1045
1046         if (list_empty(list))
1047                 return false;
1048
1049         /*
1050          * Now process all the entries, sending them to the driver.
1051          */
1052         errors = queued = 0;
1053         do {
1054                 struct blk_mq_queue_data bd;
1055                 blk_status_t ret;
1056
1057                 rq = list_first_entry(list, struct request, queuelist);
1058                 if (!blk_mq_get_driver_tag(rq, &hctx, false)) {
1059                         if (!queued && reorder_tags_to_front(list))
1060                                 continue;
1061
1062                         /*
1063                          * The initial allocation attempt failed, so we need to
1064                          * rerun the hardware queue when a tag is freed.
1065                          */
1066                         if (!blk_mq_dispatch_wait_add(hctx))
1067                                 break;
1068
1069                         /*
1070                          * It's possible that a tag was freed in the window
1071                          * between the allocation failure and adding the
1072                          * hardware queue to the wait queue.
1073                          */
1074                         if (!blk_mq_get_driver_tag(rq, &hctx, false))
1075                                 break;
1076                 }
1077
1078                 list_del_init(&rq->queuelist);
1079
1080                 bd.rq = rq;
1081
1082                 /*
1083                  * Flag last if we have no more requests, or if we have more
1084                  * but can't assign a driver tag to it.
1085                  */
1086                 if (list_empty(list))
1087                         bd.last = true;
1088                 else {
1089                         struct request *nxt;
1090
1091                         nxt = list_first_entry(list, struct request, queuelist);
1092                         bd.last = !blk_mq_get_driver_tag(nxt, NULL, false);
1093                 }
1094
1095                 ret = q->mq_ops->queue_rq(hctx, &bd);
1096                 if (ret == BLK_STS_RESOURCE) {
1097                         blk_mq_put_driver_tag_hctx(hctx, rq);
1098                         list_add(&rq->queuelist, list);
1099                         __blk_mq_requeue_request(rq);
1100                         break;
1101                 }
1102
1103                 if (unlikely(ret != BLK_STS_OK)) {
1104                         errors++;
1105                         blk_mq_end_request(rq, BLK_STS_IOERR);
1106                         continue;
1107                 }
1108
1109                 queued++;
1110         } while (!list_empty(list));
1111
1112         hctx->dispatched[queued_to_index(queued)]++;
1113
1114         /*
1115          * Any items that need requeuing? Stuff them into hctx->dispatch,
1116          * that is where we will continue on next queue run.
1117          */
1118         if (!list_empty(list)) {
1119                 /*
1120                  * If an I/O scheduler has been configured and we got a driver
1121                  * tag for the next request already, free it again.
1122                  */
1123                 rq = list_first_entry(list, struct request, queuelist);
1124                 blk_mq_put_driver_tag(rq);
1125
1126                 spin_lock(&hctx->lock);
1127                 list_splice_init(list, &hctx->dispatch);
1128                 spin_unlock(&hctx->lock);
1129
1130                 /*
1131                  * If SCHED_RESTART was set by the caller of this function and
1132                  * it is no longer set that means that it was cleared by another
1133                  * thread and hence that a queue rerun is needed.
1134                  *
1135                  * If TAG_WAITING is set that means that an I/O scheduler has
1136                  * been configured and another thread is waiting for a driver
1137                  * tag. To guarantee fairness, do not rerun this hardware queue
1138                  * but let the other thread grab the driver tag.
1139                  *
1140                  * If no I/O scheduler has been configured it is possible that
1141                  * the hardware queue got stopped and restarted before requests
1142                  * were pushed back onto the dispatch list. Rerun the queue to
1143                  * avoid starvation. Notes:
1144                  * - blk_mq_run_hw_queue() checks whether or not a queue has
1145                  *   been stopped before rerunning a queue.
1146                  * - Some but not all block drivers stop a queue before
1147                  *   returning BLK_STS_RESOURCE. Two exceptions are scsi-mq
1148                  *   and dm-rq.
1149                  */
1150                 if (!blk_mq_sched_needs_restart(hctx) &&
1151                     !test_bit(BLK_MQ_S_TAG_WAITING, &hctx->state))
1152                         blk_mq_run_hw_queue(hctx, true);
1153         }
1154
1155         return (queued + errors) != 0;
1156 }
1157
1158 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
1159 {
1160         int srcu_idx;
1161
1162         /*
1163          * We should be running this queue from one of the CPUs that
1164          * are mapped to it.
1165          *
1166          * There are at least two related races now between setting
1167          * hctx->next_cpu from blk_mq_hctx_next_cpu() and running
1168          * __blk_mq_run_hw_queue():
1169          *
1170          * - hctx->next_cpu is found offline in blk_mq_hctx_next_cpu(),
1171          *   but later it becomes online, then this warning is harmless
1172          *   at all
1173          *
1174          * - hctx->next_cpu is found online in blk_mq_hctx_next_cpu(),
1175          *   but later it becomes offline, then the warning can't be
1176          *   triggered, and we depend on blk-mq timeout handler to
1177          *   handle dispatched requests to this hctx
1178          */
1179         if (!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) &&
1180                 cpu_online(hctx->next_cpu)) {
1181                 printk(KERN_WARNING "run queue from wrong CPU %d, hctx %s\n",
1182                         raw_smp_processor_id(),
1183                         cpumask_empty(hctx->cpumask) ? "inactive": "active");
1184                 dump_stack();
1185         }
1186
1187         /*
1188          * We can't run the queue inline with ints disabled. Ensure that
1189          * we catch bad users of this early.
1190          */
1191         WARN_ON_ONCE(in_interrupt());
1192
1193         if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
1194                 rcu_read_lock();
1195                 blk_mq_sched_dispatch_requests(hctx);
1196                 rcu_read_unlock();
1197         } else {
1198                 might_sleep();
1199
1200                 srcu_idx = srcu_read_lock(hctx->queue_rq_srcu);
1201                 blk_mq_sched_dispatch_requests(hctx);
1202                 srcu_read_unlock(hctx->queue_rq_srcu, srcu_idx);
1203         }
1204 }
1205
1206 /*
1207  * It'd be great if the workqueue API had a way to pass
1208  * in a mask and had some smarts for more clever placement.
1209  * For now we just round-robin here, switching for every
1210  * BLK_MQ_CPU_WORK_BATCH queued items.
1211  */
1212 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
1213 {
1214         if (hctx->queue->nr_hw_queues == 1)
1215                 return WORK_CPU_UNBOUND;
1216
1217         if (--hctx->next_cpu_batch <= 0) {
1218                 int next_cpu;
1219
1220                 next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask);
1221                 if (next_cpu >= nr_cpu_ids)
1222                         next_cpu = cpumask_first(hctx->cpumask);
1223
1224                 hctx->next_cpu = next_cpu;
1225                 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
1226         }
1227
1228         return hctx->next_cpu;
1229 }
1230
1231 static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
1232                                         unsigned long msecs)
1233 {
1234         if (WARN_ON_ONCE(!blk_mq_hw_queue_mapped(hctx)))
1235                 return;
1236
1237         if (unlikely(blk_mq_hctx_stopped(hctx)))
1238                 return;
1239
1240         if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
1241                 int cpu = get_cpu();
1242                 if (cpumask_test_cpu(cpu, hctx->cpumask)) {
1243                         __blk_mq_run_hw_queue(hctx);
1244                         put_cpu();
1245                         return;
1246                 }
1247
1248                 put_cpu();
1249         }
1250
1251         kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
1252                                          &hctx->run_work,
1253                                          msecs_to_jiffies(msecs));
1254 }
1255
1256 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
1257 {
1258         __blk_mq_delay_run_hw_queue(hctx, true, msecs);
1259 }
1260 EXPORT_SYMBOL(blk_mq_delay_run_hw_queue);
1261
1262 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
1263 {
1264         __blk_mq_delay_run_hw_queue(hctx, async, 0);
1265 }
1266 EXPORT_SYMBOL(blk_mq_run_hw_queue);
1267
1268 void blk_mq_run_hw_queues(struct request_queue *q, bool async)
1269 {
1270         struct blk_mq_hw_ctx *hctx;
1271         int i;
1272
1273         queue_for_each_hw_ctx(q, hctx, i) {
1274                 if (!blk_mq_hctx_has_pending(hctx) ||
1275                     blk_mq_hctx_stopped(hctx))
1276                         continue;
1277
1278                 blk_mq_run_hw_queue(hctx, async);
1279         }
1280 }
1281 EXPORT_SYMBOL(blk_mq_run_hw_queues);
1282
1283 /**
1284  * blk_mq_queue_stopped() - check whether one or more hctxs have been stopped
1285  * @q: request queue.
1286  *
1287  * The caller is responsible for serializing this function against
1288  * blk_mq_{start,stop}_hw_queue().
1289  */
1290 bool blk_mq_queue_stopped(struct request_queue *q)
1291 {
1292         struct blk_mq_hw_ctx *hctx;
1293         int i;
1294
1295         queue_for_each_hw_ctx(q, hctx, i)
1296                 if (blk_mq_hctx_stopped(hctx))
1297                         return true;
1298
1299         return false;
1300 }
1301 EXPORT_SYMBOL(blk_mq_queue_stopped);
1302
1303 /*
1304  * This function is often used for pausing .queue_rq() by driver when
1305  * there isn't enough resource or some conditions aren't satisfied, and
1306  * BLK_STS_RESOURCE is usually returned.
1307  *
1308  * We do not guarantee that dispatch can be drained or blocked
1309  * after blk_mq_stop_hw_queue() returns. Please use
1310  * blk_mq_quiesce_queue() for that requirement.
1311  */
1312 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
1313 {
1314         cancel_delayed_work(&hctx->run_work);
1315
1316         set_bit(BLK_MQ_S_STOPPED, &hctx->state);
1317 }
1318 EXPORT_SYMBOL(blk_mq_stop_hw_queue);
1319
1320 /*
1321  * This function is often used for pausing .queue_rq() by driver when
1322  * there isn't enough resource or some conditions aren't satisfied, and
1323  * BLK_STS_RESOURCE is usually returned.
1324  *
1325  * We do not guarantee that dispatch can be drained or blocked
1326  * after blk_mq_stop_hw_queues() returns. Please use
1327  * blk_mq_quiesce_queue() for that requirement.
1328  */
1329 void blk_mq_stop_hw_queues(struct request_queue *q)
1330 {
1331         struct blk_mq_hw_ctx *hctx;
1332         int i;
1333
1334         queue_for_each_hw_ctx(q, hctx, i)
1335                 blk_mq_stop_hw_queue(hctx);
1336 }
1337 EXPORT_SYMBOL(blk_mq_stop_hw_queues);
1338
1339 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
1340 {
1341         clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
1342
1343         blk_mq_run_hw_queue(hctx, false);
1344 }
1345 EXPORT_SYMBOL(blk_mq_start_hw_queue);
1346
1347 void blk_mq_start_hw_queues(struct request_queue *q)
1348 {
1349         struct blk_mq_hw_ctx *hctx;
1350         int i;
1351
1352         queue_for_each_hw_ctx(q, hctx, i)
1353                 blk_mq_start_hw_queue(hctx);
1354 }
1355 EXPORT_SYMBOL(blk_mq_start_hw_queues);
1356
1357 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
1358 {
1359         if (!blk_mq_hctx_stopped(hctx))
1360                 return;
1361
1362         clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
1363         blk_mq_run_hw_queue(hctx, async);
1364 }
1365 EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue);
1366
1367 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
1368 {
1369         struct blk_mq_hw_ctx *hctx;
1370         int i;
1371
1372         queue_for_each_hw_ctx(q, hctx, i)
1373                 blk_mq_start_stopped_hw_queue(hctx, async);
1374 }
1375 EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
1376
1377 static void blk_mq_run_work_fn(struct work_struct *work)
1378 {
1379         struct blk_mq_hw_ctx *hctx;
1380
1381         hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
1382
1383         /*
1384          * If we are stopped, don't run the queue. The exception is if
1385          * BLK_MQ_S_START_ON_RUN is set. For that case, we auto-clear
1386          * the STOPPED bit and run it.
1387          */
1388         if (test_bit(BLK_MQ_S_STOPPED, &hctx->state)) {
1389                 if (!test_bit(BLK_MQ_S_START_ON_RUN, &hctx->state))
1390                         return;
1391
1392                 clear_bit(BLK_MQ_S_START_ON_RUN, &hctx->state);
1393                 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
1394         }
1395
1396         __blk_mq_run_hw_queue(hctx);
1397 }
1398
1399
1400 void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
1401 {
1402         if (WARN_ON_ONCE(!blk_mq_hw_queue_mapped(hctx)))
1403                 return;
1404
1405         /*
1406          * Stop the hw queue, then modify currently delayed work.
1407          * This should prevent us from running the queue prematurely.
1408          * Mark the queue as auto-clearing STOPPED when it runs.
1409          */
1410         blk_mq_stop_hw_queue(hctx);
1411         set_bit(BLK_MQ_S_START_ON_RUN, &hctx->state);
1412         kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
1413                                         &hctx->run_work,
1414                                         msecs_to_jiffies(msecs));
1415 }
1416 EXPORT_SYMBOL(blk_mq_delay_queue);
1417
1418 static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
1419                                             struct request *rq,
1420                                             bool at_head)
1421 {
1422         struct blk_mq_ctx *ctx = rq->mq_ctx;
1423
1424         lockdep_assert_held(&ctx->lock);
1425
1426         trace_block_rq_insert(hctx->queue, rq);
1427
1428         if (at_head)
1429                 list_add(&rq->queuelist, &ctx->rq_list);
1430         else
1431                 list_add_tail(&rq->queuelist, &ctx->rq_list);
1432 }
1433
1434 void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
1435                              bool at_head)
1436 {
1437         struct blk_mq_ctx *ctx = rq->mq_ctx;
1438
1439         lockdep_assert_held(&ctx->lock);
1440
1441         __blk_mq_insert_req_list(hctx, rq, at_head);
1442         blk_mq_hctx_mark_pending(hctx, ctx);
1443 }
1444
1445 /*
1446  * Should only be used carefully, when the caller knows we want to
1447  * bypass a potential IO scheduler on the target device.
1448  */
1449 void blk_mq_request_bypass_insert(struct request *rq)
1450 {
1451         struct blk_mq_ctx *ctx = rq->mq_ctx;
1452         struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(rq->q, ctx->cpu);
1453
1454         spin_lock(&hctx->lock);
1455         list_add_tail(&rq->queuelist, &hctx->dispatch);
1456         spin_unlock(&hctx->lock);
1457
1458         blk_mq_run_hw_queue(hctx, false);
1459 }
1460
1461 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
1462                             struct list_head *list)
1463
1464 {
1465         /*
1466          * preemption doesn't flush plug list, so it's possible ctx->cpu is
1467          * offline now
1468          */
1469         spin_lock(&ctx->lock);
1470         while (!list_empty(list)) {
1471                 struct request *rq;
1472
1473                 rq = list_first_entry(list, struct request, queuelist);
1474                 BUG_ON(rq->mq_ctx != ctx);
1475                 list_del_init(&rq->queuelist);
1476                 __blk_mq_insert_req_list(hctx, rq, false);
1477         }
1478         blk_mq_hctx_mark_pending(hctx, ctx);
1479         spin_unlock(&ctx->lock);
1480 }
1481
1482 static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b)
1483 {
1484         struct request *rqa = container_of(a, struct request, queuelist);
1485         struct request *rqb = container_of(b, struct request, queuelist);
1486
1487         return !(rqa->mq_ctx < rqb->mq_ctx ||
1488                  (rqa->mq_ctx == rqb->mq_ctx &&
1489                   blk_rq_pos(rqa) < blk_rq_pos(rqb)));
1490 }
1491
1492 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1493 {
1494         struct blk_mq_ctx *this_ctx;
1495         struct request_queue *this_q;
1496         struct request *rq;
1497         LIST_HEAD(list);
1498         LIST_HEAD(ctx_list);
1499         unsigned int depth;
1500
1501         list_splice_init(&plug->mq_list, &list);
1502
1503         list_sort(NULL, &list, plug_ctx_cmp);
1504
1505         this_q = NULL;
1506         this_ctx = NULL;
1507         depth = 0;
1508
1509         while (!list_empty(&list)) {
1510                 rq = list_entry_rq(list.next);
1511                 list_del_init(&rq->queuelist);
1512                 BUG_ON(!rq->q);
1513                 if (rq->mq_ctx != this_ctx) {
1514                         if (this_ctx) {
1515                                 trace_block_unplug(this_q, depth, !from_schedule);
1516                                 blk_mq_sched_insert_requests(this_q, this_ctx,
1517                                                                 &ctx_list,
1518                                                                 from_schedule);
1519                         }
1520
1521                         this_ctx = rq->mq_ctx;
1522                         this_q = rq->q;
1523                         depth = 0;
1524                 }
1525
1526                 depth++;
1527                 list_add_tail(&rq->queuelist, &ctx_list);
1528         }
1529
1530         /*
1531          * If 'this_ctx' is set, we know we have entries to complete
1532          * on 'ctx_list'. Do those.
1533          */
1534         if (this_ctx) {
1535                 trace_block_unplug(this_q, depth, !from_schedule);
1536                 blk_mq_sched_insert_requests(this_q, this_ctx, &ctx_list,
1537                                                 from_schedule);
1538         }
1539 }
1540
1541 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
1542 {
1543         blk_init_request_from_bio(rq, bio);
1544
1545         blk_account_io_start(rq, true);
1546 }
1547
1548 static inline bool hctx_allow_merges(struct blk_mq_hw_ctx *hctx)
1549 {
1550         return (hctx->flags & BLK_MQ_F_SHOULD_MERGE) &&
1551                 !blk_queue_nomerges(hctx->queue);
1552 }
1553
1554 static inline void blk_mq_queue_io(struct blk_mq_hw_ctx *hctx,
1555                                    struct blk_mq_ctx *ctx,
1556                                    struct request *rq)
1557 {
1558         spin_lock(&ctx->lock);
1559         __blk_mq_insert_request(hctx, rq, false);
1560         spin_unlock(&ctx->lock);
1561 }
1562
1563 static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq)
1564 {
1565         if (rq->tag != -1)
1566                 return blk_tag_to_qc_t(rq->tag, hctx->queue_num, false);
1567
1568         return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true);
1569 }
1570
1571 static void __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
1572                                         struct request *rq,
1573                                         blk_qc_t *cookie, bool may_sleep)
1574 {
1575         struct request_queue *q = rq->q;
1576         struct blk_mq_queue_data bd = {
1577                 .rq = rq,
1578                 .last = true,
1579         };
1580         blk_qc_t new_cookie;
1581         blk_status_t ret;
1582         bool run_queue = true;
1583
1584         /* RCU or SRCU read lock is needed before checking quiesced flag */
1585         if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) {
1586                 run_queue = false;
1587                 goto insert;
1588         }
1589
1590         if (q->elevator)
1591                 goto insert;
1592
1593         if (!blk_mq_get_driver_tag(rq, NULL, false))
1594                 goto insert;
1595
1596         new_cookie = request_to_qc_t(hctx, rq);
1597
1598         /*
1599          * For OK queue, we are done. For error, kill it. Any other
1600          * error (busy), just add it to our list as we previously
1601          * would have done
1602          */
1603         ret = q->mq_ops->queue_rq(hctx, &bd);
1604         switch (ret) {
1605         case BLK_STS_OK:
1606                 *cookie = new_cookie;
1607                 return;
1608         case BLK_STS_RESOURCE:
1609                 __blk_mq_requeue_request(rq);
1610                 goto insert;
1611         default:
1612                 *cookie = BLK_QC_T_NONE;
1613                 blk_mq_end_request(rq, ret);
1614                 return;
1615         }
1616
1617 insert:
1618         blk_mq_sched_insert_request(rq, false, run_queue, false, may_sleep);
1619 }
1620
1621 static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
1622                 struct request *rq, blk_qc_t *cookie)
1623 {
1624         if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
1625                 rcu_read_lock();
1626                 __blk_mq_try_issue_directly(hctx, rq, cookie, false);
1627                 rcu_read_unlock();
1628         } else {
1629                 unsigned int srcu_idx;
1630
1631                 might_sleep();
1632
1633                 srcu_idx = srcu_read_lock(hctx->queue_rq_srcu);
1634                 __blk_mq_try_issue_directly(hctx, rq, cookie, true);
1635                 srcu_read_unlock(hctx->queue_rq_srcu, srcu_idx);
1636         }
1637 }
1638
1639 static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
1640 {
1641         const int is_sync = op_is_sync(bio->bi_opf);
1642         const int is_flush_fua = op_is_flush(bio->bi_opf);
1643         struct blk_mq_alloc_data data = { .flags = 0 };
1644         struct request *rq;
1645         unsigned int request_count = 0;
1646         struct blk_plug *plug;
1647         struct request *same_queue_rq = NULL;
1648         blk_qc_t cookie;
1649         unsigned int wb_acct;
1650
1651         blk_queue_bounce(q, &bio);
1652
1653         blk_queue_split(q, &bio);
1654
1655         if (!bio_integrity_prep(bio))
1656                 return BLK_QC_T_NONE;
1657
1658         if (!is_flush_fua && !blk_queue_nomerges(q) &&
1659             blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
1660                 return BLK_QC_T_NONE;
1661
1662         if (blk_mq_sched_bio_merge(q, bio))
1663                 return BLK_QC_T_NONE;
1664
1665         wb_acct = wbt_wait(q->rq_wb, bio, NULL);
1666
1667         trace_block_getrq(q, bio, bio->bi_opf);
1668
1669         rq = blk_mq_get_request(q, bio, bio->bi_opf, &data);
1670         if (unlikely(!rq)) {
1671                 __wbt_done(q->rq_wb, wb_acct);
1672                 if (bio->bi_opf & REQ_NOWAIT)
1673                         bio_wouldblock_error(bio);
1674                 return BLK_QC_T_NONE;
1675         }
1676
1677         wbt_track(&rq->issue_stat, wb_acct);
1678
1679         cookie = request_to_qc_t(data.hctx, rq);
1680
1681         plug = current->plug;
1682         if (unlikely(is_flush_fua)) {
1683                 blk_mq_put_ctx(data.ctx);
1684                 blk_mq_bio_to_request(rq, bio);
1685                 if (q->elevator) {
1686                         blk_mq_sched_insert_request(rq, false, true, true,
1687                                         true);
1688                 } else {
1689                         blk_insert_flush(rq);
1690                         blk_mq_run_hw_queue(data.hctx, true);
1691                 }
1692         } else if (plug && q->nr_hw_queues == 1) {
1693                 struct request *last = NULL;
1694
1695                 blk_mq_put_ctx(data.ctx);
1696                 blk_mq_bio_to_request(rq, bio);
1697
1698                 /*
1699                  * @request_count may become stale because of schedule
1700                  * out, so check the list again.
1701                  */
1702                 if (list_empty(&plug->mq_list))
1703                         request_count = 0;
1704                 else if (blk_queue_nomerges(q))
1705                         request_count = blk_plug_queued_count(q);
1706
1707                 if (!request_count)
1708                         trace_block_plug(q);
1709                 else
1710                         last = list_entry_rq(plug->mq_list.prev);
1711
1712                 if (request_count >= BLK_MAX_REQUEST_COUNT || (last &&
1713                     blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
1714                         blk_flush_plug_list(plug, false);
1715                         trace_block_plug(q);
1716                 }
1717
1718                 list_add_tail(&rq->queuelist, &plug->mq_list);
1719         } else if (plug && !blk_queue_nomerges(q)) {
1720                 blk_mq_bio_to_request(rq, bio);
1721
1722                 /*
1723                  * We do limited plugging. If the bio can be merged, do that.
1724                  * Otherwise the existing request in the plug list will be
1725                  * issued. So the plug list will have one request at most
1726                  * The plug list might get flushed before this. If that happens,
1727                  * the plug list is empty, and same_queue_rq is invalid.
1728                  */
1729                 if (list_empty(&plug->mq_list))
1730                         same_queue_rq = NULL;
1731                 if (same_queue_rq)
1732                         list_del_init(&same_queue_rq->queuelist);
1733                 list_add_tail(&rq->queuelist, &plug->mq_list);
1734
1735                 blk_mq_put_ctx(data.ctx);
1736
1737                 if (same_queue_rq) {
1738                         data.hctx = blk_mq_map_queue(q,
1739                                         same_queue_rq->mq_ctx->cpu);
1740                         blk_mq_try_issue_directly(data.hctx, same_queue_rq,
1741                                         &cookie);
1742                 }
1743         } else if (q->nr_hw_queues > 1 && is_sync) {
1744                 blk_mq_put_ctx(data.ctx);
1745                 blk_mq_bio_to_request(rq, bio);
1746                 blk_mq_try_issue_directly(data.hctx, rq, &cookie);
1747         } else if (q->elevator) {
1748                 blk_mq_put_ctx(data.ctx);
1749                 blk_mq_bio_to_request(rq, bio);
1750                 blk_mq_sched_insert_request(rq, false, true, true, true);
1751         } else {
1752                 blk_mq_put_ctx(data.ctx);
1753                 blk_mq_bio_to_request(rq, bio);
1754                 blk_mq_queue_io(data.hctx, data.ctx, rq);
1755                 blk_mq_run_hw_queue(data.hctx, true);
1756         }
1757
1758         return cookie;
1759 }
1760
1761 void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
1762                      unsigned int hctx_idx)
1763 {
1764         struct page *page;
1765
1766         if (tags->rqs && set->ops->exit_request) {
1767                 int i;
1768
1769                 for (i = 0; i < tags->nr_tags; i++) {
1770                         struct request *rq = tags->static_rqs[i];
1771
1772                         if (!rq)
1773                                 continue;
1774                         set->ops->exit_request(set, rq, hctx_idx);
1775                         tags->static_rqs[i] = NULL;
1776                 }
1777         }
1778
1779         while (!list_empty(&tags->page_list)) {
1780                 page = list_first_entry(&tags->page_list, struct page, lru);
1781                 list_del_init(&page->lru);
1782                 /*
1783                  * Remove kmemleak object previously allocated in
1784                  * blk_mq_init_rq_map().
1785                  */
1786                 kmemleak_free(page_address(page));
1787                 __free_pages(page, page->private);
1788         }
1789 }
1790
1791 void blk_mq_free_rq_map(struct blk_mq_tags *tags)
1792 {
1793         kfree(tags->rqs);
1794         tags->rqs = NULL;
1795         kfree(tags->static_rqs);
1796         tags->static_rqs = NULL;
1797
1798         blk_mq_free_tags(tags);
1799 }
1800
1801 struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
1802                                         unsigned int hctx_idx,
1803                                         unsigned int nr_tags,
1804                                         unsigned int reserved_tags)
1805 {
1806         struct blk_mq_tags *tags;
1807         int node;
1808
1809         node = blk_mq_hw_queue_to_node(set->mq_map, hctx_idx);
1810         if (node == NUMA_NO_NODE)
1811                 node = set->numa_node;
1812
1813         tags = blk_mq_init_tags(nr_tags, reserved_tags, node,
1814                                 BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags));
1815         if (!tags)
1816                 return NULL;
1817
1818         tags->rqs = kzalloc_node(nr_tags * sizeof(struct request *),
1819                                  GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
1820                                  node);
1821         if (!tags->rqs) {
1822                 blk_mq_free_tags(tags);
1823                 return NULL;
1824         }
1825
1826         tags->static_rqs = kzalloc_node(nr_tags * sizeof(struct request *),
1827                                  GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
1828                                  node);
1829         if (!tags->static_rqs) {
1830                 kfree(tags->rqs);
1831                 blk_mq_free_tags(tags);
1832                 return NULL;
1833         }
1834
1835         return tags;
1836 }
1837
1838 static size_t order_to_size(unsigned int order)
1839 {
1840         return (size_t)PAGE_SIZE << order;
1841 }
1842
1843 int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
1844                      unsigned int hctx_idx, unsigned int depth)
1845 {
1846         unsigned int i, j, entries_per_page, max_order = 4;
1847         size_t rq_size, left;
1848         int node;
1849
1850         node = blk_mq_hw_queue_to_node(set->mq_map, hctx_idx);
1851         if (node == NUMA_NO_NODE)
1852                 node = set->numa_node;
1853
1854         INIT_LIST_HEAD(&tags->page_list);
1855
1856         /*
1857          * rq_size is the size of the request plus driver payload, rounded
1858          * to the cacheline size
1859          */
1860         rq_size = round_up(sizeof(struct request) + set->cmd_size,
1861                                 cache_line_size());
1862         left = rq_size * depth;
1863
1864         for (i = 0; i < depth; ) {
1865                 int this_order = max_order;
1866                 struct page *page;
1867                 int to_do;
1868                 void *p;
1869
1870                 while (this_order && left < order_to_size(this_order - 1))
1871                         this_order--;
1872
1873                 do {
1874                         page = alloc_pages_node(node,
1875                                 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
1876                                 this_order);
1877                         if (page)
1878                                 break;
1879                         if (!this_order--)
1880                                 break;
1881                         if (order_to_size(this_order) < rq_size)
1882                                 break;
1883                 } while (1);
1884
1885                 if (!page)
1886                         goto fail;
1887
1888                 page->private = this_order;
1889                 list_add_tail(&page->lru, &tags->page_list);
1890
1891                 p = page_address(page);
1892                 /*
1893                  * Allow kmemleak to scan these pages as they contain pointers
1894                  * to additional allocations like via ops->init_request().
1895                  */
1896                 kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO);
1897                 entries_per_page = order_to_size(this_order) / rq_size;
1898                 to_do = min(entries_per_page, depth - i);
1899                 left -= to_do * rq_size;
1900                 for (j = 0; j < to_do; j++) {
1901                         struct request *rq = p;
1902
1903                         tags->static_rqs[i] = rq;
1904                         if (set->ops->init_request) {
1905                                 if (set->ops->init_request(set, rq, hctx_idx,
1906                                                 node)) {
1907                                         tags->static_rqs[i] = NULL;
1908                                         goto fail;
1909                                 }
1910                         }
1911
1912                         p += rq_size;
1913                         i++;
1914                 }
1915         }
1916         return 0;
1917
1918 fail:
1919         blk_mq_free_rqs(set, tags, hctx_idx);
1920         return -ENOMEM;
1921 }
1922
1923 /*
1924  * 'cpu' is going away. splice any existing rq_list entries from this
1925  * software queue to the hw queue dispatch list, and ensure that it
1926  * gets run.
1927  */
1928 static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
1929 {
1930         struct blk_mq_hw_ctx *hctx;
1931         struct blk_mq_ctx *ctx;
1932         LIST_HEAD(tmp);
1933
1934         hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead);
1935         ctx = __blk_mq_get_ctx(hctx->queue, cpu);
1936
1937         spin_lock(&ctx->lock);
1938         if (!list_empty(&ctx->rq_list)) {
1939                 list_splice_init(&ctx->rq_list, &tmp);
1940                 blk_mq_hctx_clear_pending(hctx, ctx);
1941         }
1942         spin_unlock(&ctx->lock);
1943
1944         if (list_empty(&tmp))
1945                 return 0;
1946
1947         spin_lock(&hctx->lock);
1948         list_splice_tail_init(&tmp, &hctx->dispatch);
1949         spin_unlock(&hctx->lock);
1950
1951         blk_mq_run_hw_queue(hctx, true);
1952         return 0;
1953 }
1954
1955 static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
1956 {
1957         cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD,
1958                                             &hctx->cpuhp_dead);
1959 }
1960
1961 /* hctx->ctxs will be freed in queue's release handler */
1962 static void blk_mq_exit_hctx(struct request_queue *q,
1963                 struct blk_mq_tag_set *set,
1964                 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
1965 {
1966         blk_mq_debugfs_unregister_hctx(hctx);
1967
1968         if (blk_mq_hw_queue_mapped(hctx))
1969                 blk_mq_tag_idle(hctx);
1970
1971         if (set->ops->exit_request)
1972                 set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx);
1973
1974         blk_mq_sched_exit_hctx(q, hctx, hctx_idx);
1975
1976         if (set->ops->exit_hctx)
1977                 set->ops->exit_hctx(hctx, hctx_idx);
1978
1979         if (hctx->flags & BLK_MQ_F_BLOCKING)
1980                 cleanup_srcu_struct(hctx->queue_rq_srcu);
1981
1982         blk_mq_remove_cpuhp(hctx);
1983         blk_free_flush_queue(hctx->fq);
1984         sbitmap_free(&hctx->ctx_map);
1985 }
1986
1987 static void blk_mq_exit_hw_queues(struct request_queue *q,
1988                 struct blk_mq_tag_set *set, int nr_queue)
1989 {
1990         struct blk_mq_hw_ctx *hctx;
1991         unsigned int i;
1992
1993         queue_for_each_hw_ctx(q, hctx, i) {
1994                 if (i == nr_queue)
1995                         break;
1996                 blk_mq_exit_hctx(q, set, hctx, i);
1997         }
1998 }
1999
2000 static int blk_mq_init_hctx(struct request_queue *q,
2001                 struct blk_mq_tag_set *set,
2002                 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
2003 {
2004         int node;
2005
2006         node = hctx->numa_node;
2007         if (node == NUMA_NO_NODE)
2008                 node = hctx->numa_node = set->numa_node;
2009
2010         INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
2011         spin_lock_init(&hctx->lock);
2012         INIT_LIST_HEAD(&hctx->dispatch);
2013         hctx->queue = q;
2014         hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED;
2015
2016         cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
2017
2018         hctx->tags = set->tags[hctx_idx];
2019
2020         /*
2021          * Allocate space for all possible cpus to avoid allocation at
2022          * runtime
2023          */
2024         hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *),
2025                                         GFP_KERNEL, node);
2026         if (!hctx->ctxs)
2027                 goto unregister_cpu_notifier;
2028
2029         if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8), GFP_KERNEL,
2030                               node))
2031                 goto free_ctxs;
2032
2033         hctx->nr_ctx = 0;
2034
2035         if (set->ops->init_hctx &&
2036             set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
2037                 goto free_bitmap;
2038
2039         if (blk_mq_sched_init_hctx(q, hctx, hctx_idx))
2040                 goto exit_hctx;
2041
2042         hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size);
2043         if (!hctx->fq)
2044                 goto sched_exit_hctx;
2045
2046         if (set->ops->init_request &&
2047             set->ops->init_request(set, hctx->fq->flush_rq, hctx_idx,
2048                                    node))
2049                 goto free_fq;
2050
2051         if (hctx->flags & BLK_MQ_F_BLOCKING)
2052                 init_srcu_struct(hctx->queue_rq_srcu);
2053
2054         blk_mq_debugfs_register_hctx(q, hctx);
2055
2056         return 0;
2057
2058  free_fq:
2059         kfree(hctx->fq);
2060  sched_exit_hctx:
2061         blk_mq_sched_exit_hctx(q, hctx, hctx_idx);
2062  exit_hctx:
2063         if (set->ops->exit_hctx)
2064                 set->ops->exit_hctx(hctx, hctx_idx);
2065  free_bitmap:
2066         sbitmap_free(&hctx->ctx_map);
2067  free_ctxs:
2068         kfree(hctx->ctxs);
2069  unregister_cpu_notifier:
2070         blk_mq_remove_cpuhp(hctx);
2071         return -1;
2072 }
2073
2074 static void blk_mq_init_cpu_queues(struct request_queue *q,
2075                                    unsigned int nr_hw_queues)
2076 {
2077         unsigned int i;
2078
2079         for_each_possible_cpu(i) {
2080                 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
2081                 struct blk_mq_hw_ctx *hctx;
2082
2083                 __ctx->cpu = i;
2084                 spin_lock_init(&__ctx->lock);
2085                 INIT_LIST_HEAD(&__ctx->rq_list);
2086                 __ctx->queue = q;
2087
2088                 /* If the cpu isn't present, the cpu is mapped to first hctx */
2089                 if (!cpu_present(i))
2090                         continue;
2091
2092                 hctx = blk_mq_map_queue(q, i);
2093
2094                 /*
2095                  * Set local node, IFF we have more than one hw queue. If
2096                  * not, we remain on the home node of the device
2097                  */
2098                 if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
2099                         hctx->numa_node = local_memory_node(cpu_to_node(i));
2100         }
2101 }
2102
2103 static bool __blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, int hctx_idx)
2104 {
2105         int ret = 0;
2106
2107         set->tags[hctx_idx] = blk_mq_alloc_rq_map(set, hctx_idx,
2108                                         set->queue_depth, set->reserved_tags);
2109         if (!set->tags[hctx_idx])
2110                 return false;
2111
2112         ret = blk_mq_alloc_rqs(set, set->tags[hctx_idx], hctx_idx,
2113                                 set->queue_depth);
2114         if (!ret)
2115                 return true;
2116
2117         blk_mq_free_rq_map(set->tags[hctx_idx]);
2118         set->tags[hctx_idx] = NULL;
2119         return false;
2120 }
2121
2122 static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set,
2123                                          unsigned int hctx_idx)
2124 {
2125         if (set->tags[hctx_idx]) {
2126                 blk_mq_free_rqs(set, set->tags[hctx_idx], hctx_idx);
2127                 blk_mq_free_rq_map(set->tags[hctx_idx]);
2128                 set->tags[hctx_idx] = NULL;
2129         }
2130 }
2131
2132 static void blk_mq_map_swqueue(struct request_queue *q)
2133 {
2134         unsigned int i, hctx_idx;
2135         struct blk_mq_hw_ctx *hctx;
2136         struct blk_mq_ctx *ctx;
2137         struct blk_mq_tag_set *set = q->tag_set;
2138
2139         /*
2140          * Avoid others reading imcomplete hctx->cpumask through sysfs
2141          */
2142         mutex_lock(&q->sysfs_lock);
2143
2144         queue_for_each_hw_ctx(q, hctx, i) {
2145                 cpumask_clear(hctx->cpumask);
2146                 hctx->nr_ctx = 0;
2147         }
2148
2149         /*
2150          * Map software to hardware queues.
2151          *
2152          * If the cpu isn't present, the cpu is mapped to first hctx.
2153          */
2154         for_each_present_cpu(i) {
2155                 hctx_idx = q->mq_map[i];
2156                 /* unmapped hw queue can be remapped after CPU topo changed */
2157                 if (!set->tags[hctx_idx] &&
2158                     !__blk_mq_alloc_rq_map(set, hctx_idx)) {
2159                         /*
2160                          * If tags initialization fail for some hctx,
2161                          * that hctx won't be brought online.  In this
2162                          * case, remap the current ctx to hctx[0] which
2163                          * is guaranteed to always have tags allocated
2164                          */
2165                         q->mq_map[i] = 0;
2166                 }
2167
2168                 ctx = per_cpu_ptr(q->queue_ctx, i);
2169                 hctx = blk_mq_map_queue(q, i);
2170
2171                 cpumask_set_cpu(i, hctx->cpumask);
2172                 ctx->index_hw = hctx->nr_ctx;
2173                 hctx->ctxs[hctx->nr_ctx++] = ctx;
2174         }
2175
2176         mutex_unlock(&q->sysfs_lock);
2177
2178         queue_for_each_hw_ctx(q, hctx, i) {
2179                 /*
2180                  * If no software queues are mapped to this hardware queue,
2181                  * disable it and free the request entries.
2182                  */
2183                 if (!hctx->nr_ctx) {
2184                         /* Never unmap queue 0.  We need it as a
2185                          * fallback in case of a new remap fails
2186                          * allocation
2187                          */
2188                         if (i && set->tags[i])
2189                                 blk_mq_free_map_and_requests(set, i);
2190
2191                         hctx->tags = NULL;
2192                         continue;
2193                 }
2194
2195                 hctx->tags = set->tags[i];
2196                 WARN_ON(!hctx->tags);
2197
2198                 /*
2199                  * Set the map size to the number of mapped software queues.
2200                  * This is more accurate and more efficient than looping
2201                  * over all possibly mapped software queues.
2202                  */
2203                 sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx);
2204
2205                 /*
2206                  * Initialize batch roundrobin counts
2207                  */
2208                 hctx->next_cpu = cpumask_first(hctx->cpumask);
2209                 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
2210         }
2211 }
2212
2213 /*
2214  * Caller needs to ensure that we're either frozen/quiesced, or that
2215  * the queue isn't live yet.
2216  */
2217 static void queue_set_hctx_shared(struct request_queue *q, bool shared)
2218 {
2219         struct blk_mq_hw_ctx *hctx;
2220         int i;
2221
2222         queue_for_each_hw_ctx(q, hctx, i) {
2223                 if (shared) {
2224                         if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
2225                                 atomic_inc(&q->shared_hctx_restart);
2226                         hctx->flags |= BLK_MQ_F_TAG_SHARED;
2227                 } else {
2228                         if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
2229                                 atomic_dec(&q->shared_hctx_restart);
2230                         hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
2231                 }
2232         }
2233 }
2234
2235 static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set,
2236                                         bool shared)
2237 {
2238         struct request_queue *q;
2239
2240         lockdep_assert_held(&set->tag_list_lock);
2241
2242         list_for_each_entry(q, &set->tag_list, tag_set_list) {
2243                 blk_mq_freeze_queue(q);
2244                 queue_set_hctx_shared(q, shared);
2245                 blk_mq_unfreeze_queue(q);
2246         }
2247 }
2248
2249 static void blk_mq_del_queue_tag_set(struct request_queue *q)
2250 {
2251         struct blk_mq_tag_set *set = q->tag_set;
2252
2253         mutex_lock(&set->tag_list_lock);
2254         list_del_rcu(&q->tag_set_list);
2255         if (list_is_singular(&set->tag_list)) {
2256                 /* just transitioned to unshared */
2257                 set->flags &= ~BLK_MQ_F_TAG_SHARED;
2258                 /* update existing queue */
2259                 blk_mq_update_tag_set_depth(set, false);
2260         }
2261         mutex_unlock(&set->tag_list_lock);
2262         synchronize_rcu();
2263         INIT_LIST_HEAD(&q->tag_set_list);
2264 }
2265
2266 static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
2267                                      struct request_queue *q)
2268 {
2269         q->tag_set = set;
2270
2271         mutex_lock(&set->tag_list_lock);
2272
2273         /* Check to see if we're transitioning to shared (from 1 to 2 queues). */
2274         if (!list_empty(&set->tag_list) && !(set->flags & BLK_MQ_F_TAG_SHARED)) {
2275                 set->flags |= BLK_MQ_F_TAG_SHARED;
2276                 /* update existing queue */
2277                 blk_mq_update_tag_set_depth(set, true);
2278         }
2279         if (set->flags & BLK_MQ_F_TAG_SHARED)
2280                 queue_set_hctx_shared(q, true);
2281         list_add_tail_rcu(&q->tag_set_list, &set->tag_list);
2282
2283         mutex_unlock(&set->tag_list_lock);
2284 }
2285
2286 /*
2287  * It is the actual release handler for mq, but we do it from
2288  * request queue's release handler for avoiding use-after-free
2289  * and headache because q->mq_kobj shouldn't have been introduced,
2290  * but we can't group ctx/kctx kobj without it.
2291  */
2292 void blk_mq_release(struct request_queue *q)
2293 {
2294         struct blk_mq_hw_ctx *hctx;
2295         unsigned int i;
2296
2297         /* hctx kobj stays in hctx */
2298         queue_for_each_hw_ctx(q, hctx, i) {
2299                 if (!hctx)
2300                         continue;
2301                 kobject_put(&hctx->kobj);
2302         }
2303
2304         q->mq_map = NULL;
2305
2306         kfree(q->queue_hw_ctx);
2307
2308         /*
2309          * release .mq_kobj and sw queue's kobject now because
2310          * both share lifetime with request queue.
2311          */
2312         blk_mq_sysfs_deinit(q);
2313
2314         free_percpu(q->queue_ctx);
2315 }
2316
2317 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
2318 {
2319         struct request_queue *uninit_q, *q;
2320
2321         uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
2322         if (!uninit_q)
2323                 return ERR_PTR(-ENOMEM);
2324
2325         q = blk_mq_init_allocated_queue(set, uninit_q);
2326         if (IS_ERR(q))
2327                 blk_cleanup_queue(uninit_q);
2328
2329         return q;
2330 }
2331 EXPORT_SYMBOL(blk_mq_init_queue);
2332
2333 static int blk_mq_hw_ctx_size(struct blk_mq_tag_set *tag_set)
2334 {
2335         int hw_ctx_size = sizeof(struct blk_mq_hw_ctx);
2336
2337         BUILD_BUG_ON(ALIGN(offsetof(struct blk_mq_hw_ctx, queue_rq_srcu),
2338                            __alignof__(struct blk_mq_hw_ctx)) !=
2339                      sizeof(struct blk_mq_hw_ctx));
2340
2341         if (tag_set->flags & BLK_MQ_F_BLOCKING)
2342                 hw_ctx_size += sizeof(struct srcu_struct);
2343
2344         return hw_ctx_size;
2345 }
2346
2347 static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
2348                                                 struct request_queue *q)
2349 {
2350         int i, j;
2351         struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx;
2352
2353         blk_mq_sysfs_unregister(q);
2354
2355         /* protect against switching io scheduler  */
2356         mutex_lock(&q->sysfs_lock);
2357         for (i = 0; i < set->nr_hw_queues; i++) {
2358                 int node;
2359
2360                 if (hctxs[i])
2361                         continue;
2362
2363                 node = blk_mq_hw_queue_to_node(q->mq_map, i);
2364                 hctxs[i] = kzalloc_node(blk_mq_hw_ctx_size(set),
2365                                         GFP_KERNEL, node);
2366                 if (!hctxs[i])
2367                         break;
2368
2369                 if (!zalloc_cpumask_var_node(&hctxs[i]->cpumask, GFP_KERNEL,
2370                                                 node)) {
2371                         kfree(hctxs[i]);
2372                         hctxs[i] = NULL;
2373                         break;
2374                 }
2375
2376                 atomic_set(&hctxs[i]->nr_active, 0);
2377                 hctxs[i]->numa_node = node;
2378                 hctxs[i]->queue_num = i;
2379
2380                 if (blk_mq_init_hctx(q, set, hctxs[i], i)) {
2381                         free_cpumask_var(hctxs[i]->cpumask);
2382                         kfree(hctxs[i]);
2383                         hctxs[i] = NULL;
2384                         break;
2385                 }
2386                 blk_mq_hctx_kobj_init(hctxs[i]);
2387         }
2388         for (j = i; j < q->nr_hw_queues; j++) {
2389                 struct blk_mq_hw_ctx *hctx = hctxs[j];
2390
2391                 if (hctx) {
2392                         if (hctx->tags)
2393                                 blk_mq_free_map_and_requests(set, j);
2394                         blk_mq_exit_hctx(q, set, hctx, j);
2395                         kobject_put(&hctx->kobj);
2396                         hctxs[j] = NULL;
2397
2398                 }
2399         }
2400         q->nr_hw_queues = i;
2401         mutex_unlock(&q->sysfs_lock);
2402         blk_mq_sysfs_register(q);
2403 }
2404
2405 struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
2406                                                   struct request_queue *q)
2407 {
2408         /* mark the queue as mq asap */
2409         q->mq_ops = set->ops;
2410
2411         q->poll_cb = blk_stat_alloc_callback(blk_mq_poll_stats_fn,
2412                                              blk_mq_poll_stats_bkt,
2413                                              BLK_MQ_POLL_STATS_BKTS, q);
2414         if (!q->poll_cb)
2415                 goto err_exit;
2416
2417         q->queue_ctx = alloc_percpu(struct blk_mq_ctx);
2418         if (!q->queue_ctx)
2419                 goto err_exit;
2420
2421         /* init q->mq_kobj and sw queues' kobjects */
2422         blk_mq_sysfs_init(q);
2423
2424         q->queue_hw_ctx = kzalloc_node(nr_cpu_ids * sizeof(*(q->queue_hw_ctx)),
2425                                                 GFP_KERNEL, set->numa_node);
2426         if (!q->queue_hw_ctx)
2427                 goto err_percpu;
2428
2429         q->mq_map = set->mq_map;
2430
2431         blk_mq_realloc_hw_ctxs(set, q);
2432         if (!q->nr_hw_queues)
2433                 goto err_hctxs;
2434
2435         INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
2436         blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
2437
2438         q->nr_queues = nr_cpu_ids;
2439
2440         q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
2441
2442         if (!(set->flags & BLK_MQ_F_SG_MERGE))
2443                 q->queue_flags |= 1 << QUEUE_FLAG_NO_SG_MERGE;
2444
2445         q->sg_reserved_size = INT_MAX;
2446
2447         INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work);
2448         INIT_LIST_HEAD(&q->requeue_list);
2449         spin_lock_init(&q->requeue_lock);
2450
2451         blk_queue_make_request(q, blk_mq_make_request);
2452
2453         /*
2454          * Do this after blk_queue_make_request() overrides it...
2455          */
2456         q->nr_requests = set->queue_depth;
2457
2458         /*
2459          * Default to classic polling
2460          */
2461         q->poll_nsec = -1;
2462
2463         if (set->ops->complete)
2464                 blk_queue_softirq_done(q, set->ops->complete);
2465
2466         blk_mq_init_cpu_queues(q, set->nr_hw_queues);
2467         blk_mq_add_queue_tag_set(set, q);
2468         blk_mq_map_swqueue(q);
2469
2470         if (!(set->flags & BLK_MQ_F_NO_SCHED)) {
2471                 int ret;
2472
2473                 ret = blk_mq_sched_init(q);
2474                 if (ret)
2475                         return ERR_PTR(ret);
2476         }
2477
2478         return q;
2479
2480 err_hctxs:
2481         kfree(q->queue_hw_ctx);
2482 err_percpu:
2483         free_percpu(q->queue_ctx);
2484 err_exit:
2485         q->mq_ops = NULL;
2486         return ERR_PTR(-ENOMEM);
2487 }
2488 EXPORT_SYMBOL(blk_mq_init_allocated_queue);
2489
2490 void blk_mq_free_queue(struct request_queue *q)
2491 {
2492         struct blk_mq_tag_set *set = q->tag_set;
2493
2494         /* Checks hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED. */
2495         blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
2496         /* May clear BLK_MQ_F_TAG_QUEUE_SHARED in hctx->flags. */
2497         blk_mq_del_queue_tag_set(q);
2498 }
2499
2500 /* Basically redo blk_mq_init_queue with queue frozen */
2501 static void blk_mq_queue_reinit(struct request_queue *q)
2502 {
2503         WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth));
2504
2505         blk_mq_debugfs_unregister_hctxs(q);
2506         blk_mq_sysfs_unregister(q);
2507
2508         /*
2509          * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
2510          * we should change hctx numa_node according to new topology (this
2511          * involves free and re-allocate memory, worthy doing?)
2512          */
2513
2514         blk_mq_map_swqueue(q);
2515
2516         blk_mq_sysfs_register(q);
2517         blk_mq_debugfs_register_hctxs(q);
2518 }
2519
2520 static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2521 {
2522         int i;
2523
2524         for (i = 0; i < set->nr_hw_queues; i++)
2525                 if (!__blk_mq_alloc_rq_map(set, i))
2526                         goto out_unwind;
2527
2528         return 0;
2529
2530 out_unwind:
2531         while (--i >= 0)
2532                 blk_mq_free_rq_map(set->tags[i]);
2533
2534         return -ENOMEM;
2535 }
2536
2537 /*
2538  * Allocate the request maps associated with this tag_set. Note that this
2539  * may reduce the depth asked for, if memory is tight. set->queue_depth
2540  * will be updated to reflect the allocated depth.
2541  */
2542 static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2543 {
2544         unsigned int depth;
2545         int err;
2546
2547         depth = set->queue_depth;
2548         do {
2549                 err = __blk_mq_alloc_rq_maps(set);
2550                 if (!err)
2551                         break;
2552
2553                 set->queue_depth >>= 1;
2554                 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
2555                         err = -ENOMEM;
2556                         break;
2557                 }
2558         } while (set->queue_depth);
2559
2560         if (!set->queue_depth || err) {
2561                 pr_err("blk-mq: failed to allocate request map\n");
2562                 return -ENOMEM;
2563         }
2564
2565         if (depth != set->queue_depth)
2566                 pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
2567                                                 depth, set->queue_depth);
2568
2569         return 0;
2570 }
2571
2572 static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
2573 {
2574         if (set->ops->map_queues) {
2575                 int cpu;
2576                 /*
2577                  * transport .map_queues is usually done in the following
2578                  * way:
2579                  *
2580                  * for (queue = 0; queue < set->nr_hw_queues; queue++) {
2581                  *      mask = get_cpu_mask(queue)
2582                  *      for_each_cpu(cpu, mask)
2583                  *              set->mq_map[cpu] = queue;
2584                  * }
2585                  *
2586                  * When we need to remap, the table has to be cleared for
2587                  * killing stale mapping since one CPU may not be mapped
2588                  * to any hw queue.
2589                  */
2590                 for_each_possible_cpu(cpu)
2591                         set->mq_map[cpu] = 0;
2592
2593                 return set->ops->map_queues(set);
2594         } else
2595                 return blk_mq_map_queues(set);
2596 }
2597
2598 /*
2599  * Alloc a tag set to be associated with one or more request queues.
2600  * May fail with EINVAL for various error conditions. May adjust the
2601  * requested depth down, if if it too large. In that case, the set
2602  * value will be stored in set->queue_depth.
2603  */
2604 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
2605 {
2606         int ret;
2607
2608         BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);
2609
2610         if (!set->nr_hw_queues)
2611                 return -EINVAL;
2612         if (!set->queue_depth)
2613                 return -EINVAL;
2614         if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
2615                 return -EINVAL;
2616
2617         if (!set->ops->queue_rq)
2618                 return -EINVAL;
2619
2620         if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
2621                 pr_info("blk-mq: reduced tag depth to %u\n",
2622                         BLK_MQ_MAX_DEPTH);
2623                 set->queue_depth = BLK_MQ_MAX_DEPTH;
2624         }
2625
2626         /*
2627          * If a crashdump is active, then we are potentially in a very
2628          * memory constrained environment. Limit us to 1 queue and
2629          * 64 tags to prevent using too much memory.
2630          */
2631         if (is_kdump_kernel()) {
2632                 set->nr_hw_queues = 1;
2633                 set->queue_depth = min(64U, set->queue_depth);
2634         }
2635         /*
2636          * There is no use for more h/w queues than cpus.
2637          */
2638         if (set->nr_hw_queues > nr_cpu_ids)
2639                 set->nr_hw_queues = nr_cpu_ids;
2640
2641         set->tags = kzalloc_node(nr_cpu_ids * sizeof(struct blk_mq_tags *),
2642                                  GFP_KERNEL, set->numa_node);
2643         if (!set->tags)
2644                 return -ENOMEM;
2645
2646         ret = -ENOMEM;
2647         set->mq_map = kzalloc_node(sizeof(*set->mq_map) * nr_cpu_ids,
2648                         GFP_KERNEL, set->numa_node);
2649         if (!set->mq_map)
2650                 goto out_free_tags;
2651
2652         ret = blk_mq_update_queue_map(set);
2653         if (ret)
2654                 goto out_free_mq_map;
2655
2656         ret = blk_mq_alloc_rq_maps(set);
2657         if (ret)
2658                 goto out_free_mq_map;
2659
2660         mutex_init(&set->tag_list_lock);
2661         INIT_LIST_HEAD(&set->tag_list);
2662
2663         return 0;
2664
2665 out_free_mq_map:
2666         kfree(set->mq_map);
2667         set->mq_map = NULL;
2668 out_free_tags:
2669         kfree(set->tags);
2670         set->tags = NULL;
2671         return ret;
2672 }
2673 EXPORT_SYMBOL(blk_mq_alloc_tag_set);
2674
2675 void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
2676 {
2677         int i;
2678
2679         for (i = 0; i < nr_cpu_ids; i++)
2680                 blk_mq_free_map_and_requests(set, i);
2681
2682         kfree(set->mq_map);
2683         set->mq_map = NULL;
2684
2685         kfree(set->tags);
2686         set->tags = NULL;
2687 }
2688 EXPORT_SYMBOL(blk_mq_free_tag_set);
2689
2690 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
2691 {
2692         struct blk_mq_tag_set *set = q->tag_set;
2693         struct blk_mq_hw_ctx *hctx;
2694         int i, ret;
2695
2696         if (!set)
2697                 return -EINVAL;
2698
2699         blk_mq_freeze_queue(q);
2700
2701         ret = 0;
2702         queue_for_each_hw_ctx(q, hctx, i) {
2703                 if (!hctx->tags)
2704                         continue;
2705                 /*
2706                  * If we're using an MQ scheduler, just update the scheduler
2707                  * queue depth. This is similar to what the old code would do.
2708                  */
2709                 if (!hctx->sched_tags) {
2710                         ret = blk_mq_tag_update_depth(hctx, &hctx->tags,
2711                                                         min(nr, set->queue_depth),
2712                                                         false);
2713                 } else {
2714                         ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags,
2715                                                         nr, true);
2716                 }
2717                 if (ret)
2718                         break;
2719         }
2720
2721         if (!ret)
2722                 q->nr_requests = nr;
2723
2724         blk_mq_unfreeze_queue(q);
2725
2726         return ret;
2727 }
2728
2729 static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
2730                                                         int nr_hw_queues)
2731 {
2732         struct request_queue *q;
2733
2734         lockdep_assert_held(&set->tag_list_lock);
2735
2736         if (nr_hw_queues > nr_cpu_ids)
2737                 nr_hw_queues = nr_cpu_ids;
2738         if (nr_hw_queues < 1 || nr_hw_queues == set->nr_hw_queues)
2739                 return;
2740
2741         list_for_each_entry(q, &set->tag_list, tag_set_list)
2742                 blk_mq_freeze_queue(q);
2743         /*
2744          * Sync with blk_mq_queue_tag_busy_iter.
2745          */
2746         synchronize_rcu();
2747
2748         set->nr_hw_queues = nr_hw_queues;
2749         blk_mq_update_queue_map(set);
2750         list_for_each_entry(q, &set->tag_list, tag_set_list) {
2751                 blk_mq_realloc_hw_ctxs(set, q);
2752                 blk_mq_queue_reinit(q);
2753         }
2754
2755         list_for_each_entry(q, &set->tag_list, tag_set_list)
2756                 blk_mq_unfreeze_queue(q);
2757 }
2758
2759 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
2760 {
2761         mutex_lock(&set->tag_list_lock);
2762         __blk_mq_update_nr_hw_queues(set, nr_hw_queues);
2763         mutex_unlock(&set->tag_list_lock);
2764 }
2765 EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
2766
2767 /* Enable polling stats and return whether they were already enabled. */
2768 static bool blk_poll_stats_enable(struct request_queue *q)
2769 {
2770         if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) ||
2771             test_and_set_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags))
2772                 return true;
2773         blk_stat_add_callback(q, q->poll_cb);
2774         return false;
2775 }
2776
2777 static void blk_mq_poll_stats_start(struct request_queue *q)
2778 {
2779         /*
2780          * We don't arm the callback if polling stats are not enabled or the
2781          * callback is already active.
2782          */
2783         if (!test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) ||
2784             blk_stat_is_active(q->poll_cb))
2785                 return;
2786
2787         blk_stat_activate_msecs(q->poll_cb, 100);
2788 }
2789
2790 static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb)
2791 {
2792         struct request_queue *q = cb->data;
2793         int bucket;
2794
2795         for (bucket = 0; bucket < BLK_MQ_POLL_STATS_BKTS; bucket++) {
2796                 if (cb->stat[bucket].nr_samples)
2797                         q->poll_stat[bucket] = cb->stat[bucket];
2798         }
2799 }
2800
2801 static unsigned long blk_mq_poll_nsecs(struct request_queue *q,
2802                                        struct blk_mq_hw_ctx *hctx,
2803                                        struct request *rq)
2804 {
2805         unsigned long ret = 0;
2806         int bucket;
2807
2808         /*
2809          * If stats collection isn't on, don't sleep but turn it on for
2810          * future users
2811          */
2812         if (!blk_poll_stats_enable(q))
2813                 return 0;
2814
2815         /*
2816          * As an optimistic guess, use half of the mean service time
2817          * for this type of request. We can (and should) make this smarter.
2818          * For instance, if the completion latencies are tight, we can
2819          * get closer than just half the mean. This is especially
2820          * important on devices where the completion latencies are longer
2821          * than ~10 usec. We do use the stats for the relevant IO size
2822          * if available which does lead to better estimates.
2823          */
2824         bucket = blk_mq_poll_stats_bkt(rq);
2825         if (bucket < 0)
2826                 return ret;
2827
2828         if (q->poll_stat[bucket].nr_samples)
2829                 ret = (q->poll_stat[bucket].mean + 1) / 2;
2830
2831         return ret;
2832 }
2833
2834 static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
2835                                      struct blk_mq_hw_ctx *hctx,
2836                                      struct request *rq)
2837 {
2838         struct hrtimer_sleeper hs;
2839         enum hrtimer_mode mode;
2840         unsigned int nsecs;
2841         ktime_t kt;
2842
2843         if (test_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags))
2844                 return false;
2845
2846         /*
2847          * poll_nsec can be:
2848          *
2849          * -1:  don't ever hybrid sleep
2850          *  0:  use half of prev avg
2851          * >0:  use this specific value
2852          */
2853         if (q->poll_nsec == -1)
2854                 return false;
2855         else if (q->poll_nsec > 0)
2856                 nsecs = q->poll_nsec;
2857         else
2858                 nsecs = blk_mq_poll_nsecs(q, hctx, rq);
2859
2860         if (!nsecs)
2861                 return false;
2862
2863         set_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags);
2864
2865         /*
2866          * This will be replaced with the stats tracking code, using
2867          * 'avg_completion_time / 2' as the pre-sleep target.
2868          */
2869         kt = nsecs;
2870
2871         mode = HRTIMER_MODE_REL;
2872         hrtimer_init_on_stack(&hs.timer, CLOCK_MONOTONIC, mode);
2873         hrtimer_set_expires(&hs.timer, kt);
2874
2875         hrtimer_init_sleeper(&hs, current);
2876         do {
2877                 if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
2878                         break;
2879                 set_current_state(TASK_UNINTERRUPTIBLE);
2880                 hrtimer_start_expires(&hs.timer, mode);
2881                 if (hs.task)
2882                         io_schedule();
2883                 hrtimer_cancel(&hs.timer);
2884                 mode = HRTIMER_MODE_ABS;
2885         } while (hs.task && !signal_pending(current));
2886
2887         __set_current_state(TASK_RUNNING);
2888         destroy_hrtimer_on_stack(&hs.timer);
2889         return true;
2890 }
2891
2892 static bool __blk_mq_poll(struct blk_mq_hw_ctx *hctx, struct request *rq)
2893 {
2894         struct request_queue *q = hctx->queue;
2895         long state;
2896
2897         /*
2898          * If we sleep, have the caller restart the poll loop to reset
2899          * the state. Like for the other success return cases, the
2900          * caller is responsible for checking if the IO completed. If
2901          * the IO isn't complete, we'll get called again and will go
2902          * straight to the busy poll loop.
2903          */
2904         if (blk_mq_poll_hybrid_sleep(q, hctx, rq))
2905                 return true;
2906
2907         hctx->poll_considered++;
2908
2909         state = current->state;
2910         while (!need_resched()) {
2911                 int ret;
2912
2913                 hctx->poll_invoked++;
2914
2915                 ret = q->mq_ops->poll(hctx, rq->tag);
2916                 if (ret > 0) {
2917                         hctx->poll_success++;
2918                         set_current_state(TASK_RUNNING);
2919                         return true;
2920                 }
2921
2922                 if (signal_pending_state(state, current))
2923                         set_current_state(TASK_RUNNING);
2924
2925                 if (current->state == TASK_RUNNING)
2926                         return true;
2927                 if (ret < 0)
2928                         break;
2929                 cpu_relax();
2930         }
2931
2932         return false;
2933 }
2934
2935 bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie)
2936 {
2937         struct blk_mq_hw_ctx *hctx;
2938         struct blk_plug *plug;
2939         struct request *rq;
2940
2941         if (!q->mq_ops || !q->mq_ops->poll || !blk_qc_t_valid(cookie) ||
2942             !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
2943                 return false;
2944
2945         plug = current->plug;
2946         if (plug)
2947                 blk_flush_plug_list(plug, false);
2948
2949         hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)];
2950         if (!blk_qc_t_is_internal(cookie))
2951                 rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie));
2952         else {
2953                 rq = blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(cookie));
2954                 /*
2955                  * With scheduling, if the request has completed, we'll
2956                  * get a NULL return here, as we clear the sched tag when
2957                  * that happens. The request still remains valid, like always,
2958                  * so we should be safe with just the NULL check.
2959                  */
2960                 if (!rq)
2961                         return false;
2962         }
2963
2964         return __blk_mq_poll(hctx, rq);
2965 }
2966 EXPORT_SYMBOL_GPL(blk_mq_poll);
2967
2968 static int __init blk_mq_init(void)
2969 {
2970         cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
2971                                 blk_mq_hctx_notify_dead);
2972         return 0;
2973 }
2974 subsys_initcall(blk_mq_init);