1 /* SPDX-License-Identifier: GPL-2.0 */
5 #include <linux/blkdev.h>
6 #include <linux/sbitmap.h>
7 #include <linux/lockdep.h>
8 #include <linux/scatterlist.h>
9 #include <linux/prefetch.h>
10 #include <linux/srcu.h>
11 #include <linux/rw_hint.h>
14 struct blk_flush_queue;
16 #define BLKDEV_MIN_RQ 4
17 #define BLKDEV_DEFAULT_RQ 128
24 typedef enum rq_end_io_ret (rq_end_io_fn)(struct request *, blk_status_t);
28 typedef __u32 __bitwise req_flags_t;
30 /* drive already may have started this one */
31 #define RQF_STARTED ((__force req_flags_t)(1 << 1))
32 /* request for flush sequence */
33 #define RQF_FLUSH_SEQ ((__force req_flags_t)(1 << 4))
34 /* merge of different types, fail separately */
35 #define RQF_MIXED_MERGE ((__force req_flags_t)(1 << 5))
36 /* don't call prep for this one */
37 #define RQF_DONTPREP ((__force req_flags_t)(1 << 7))
38 /* use hctx->sched_tags */
39 #define RQF_SCHED_TAGS ((__force req_flags_t)(1 << 8))
40 /* use an I/O scheduler for this request */
41 #define RQF_USE_SCHED ((__force req_flags_t)(1 << 9))
42 /* vaguely specified driver internal error. Ignored by the block layer */
43 #define RQF_FAILED ((__force req_flags_t)(1 << 10))
44 /* don't warn about errors */
45 #define RQF_QUIET ((__force req_flags_t)(1 << 11))
46 /* account into disk and partition IO statistics */
47 #define RQF_IO_STAT ((__force req_flags_t)(1 << 13))
48 /* runtime pm request */
49 #define RQF_PM ((__force req_flags_t)(1 << 15))
50 /* on IO scheduler merge hash */
51 #define RQF_HASHED ((__force req_flags_t)(1 << 16))
52 /* track IO completion time */
53 #define RQF_STATS ((__force req_flags_t)(1 << 17))
54 /* Look at ->special_vec for the actual data payload instead of the
56 #define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18))
57 /* The per-zone write lock is held for this request */
58 #define RQF_ZONE_WRITE_LOCKED ((__force req_flags_t)(1 << 19))
59 /* ->timeout has been called, don't expire again */
60 #define RQF_TIMED_OUT ((__force req_flags_t)(1 << 21))
61 #define RQF_RESV ((__force req_flags_t)(1 << 23))
63 /* flags that prevent us from merging requests: */
64 #define RQF_NOMERGE_FLAGS \
65 (RQF_STARTED | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD)
74 * Try to put the fields that are referenced together in the same cacheline.
76 * If you modify this structure, make sure to update blk_rq_init() and
77 * especially blk_mq_rq_ctx_init() to take care of the added fields.
80 struct request_queue *q;
81 struct blk_mq_ctx *mq_ctx;
82 struct blk_mq_hw_ctx *mq_hctx;
84 blk_opf_t cmd_flags; /* op and common flags */
92 /* the following two fields are internal, NEVER access directly */
93 unsigned int __data_len; /* total data len */
94 sector_t __sector; /* sector cursor */
100 struct list_head queuelist;
101 struct request *rq_next;
104 struct block_device *part;
105 #ifdef CONFIG_BLK_RQ_ALLOC_TIME
106 /* Time that the first bio started allocating this request. */
109 /* Time that this request was allocated for this IO. */
111 /* Time that I/O was submitted to the device. */
112 u64 io_start_time_ns;
114 #ifdef CONFIG_BLK_WBT
115 unsigned short wbt_flags;
118 * rq sectors used for blk stats. It has the same value
119 * with blk_rq_sectors(rq), except that it never be zeroed
122 unsigned short stats_sectors;
125 * Number of scatter-gather DMA addr+len pairs after
126 * physical address coalescing is performed.
128 unsigned short nr_phys_segments;
130 #ifdef CONFIG_BLK_DEV_INTEGRITY
131 unsigned short nr_integrity_segments;
134 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
135 struct bio_crypt_ctx *crypt_ctx;
136 struct blk_crypto_keyslot *crypt_keyslot;
139 enum rw_hint write_hint;
140 unsigned short ioprio;
142 enum mq_rq_state state;
145 unsigned long deadline;
148 * The hash is used inside the scheduler, and killed once the
149 * request reaches the dispatch list. The ipi_list is only used
150 * to queue the request for softirq completion, which is long
151 * after the request has been unhashed (and even removed from
152 * the dispatch list).
155 struct hlist_node hash; /* merge hash */
156 struct llist_node ipi_list;
160 * The rb_node is only used inside the io scheduler, requests
161 * are pruned when moved to the dispatch queue. special_vec must
162 * only be used if RQF_SPECIAL_PAYLOAD is set, and those cannot be
163 * insert into an IO scheduler.
166 struct rb_node rb_node; /* sort/lookup */
167 struct bio_vec special_vec;
171 * Three pointers are available for the IO schedulers, if they need
172 * more they have to dynamically allocate it.
181 rq_end_io_fn *saved_end_io;
187 * completion callback.
189 rq_end_io_fn *end_io;
193 static inline enum req_op req_op(const struct request *req)
195 return req->cmd_flags & REQ_OP_MASK;
198 static inline bool blk_rq_is_passthrough(struct request *rq)
200 return blk_op_is_passthrough(rq->cmd_flags);
203 static inline unsigned short req_get_ioprio(struct request *req)
208 #define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ)
210 #define rq_dma_dir(rq) \
211 (op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)
213 #define rq_list_add(listptr, rq) do { \
214 (rq)->rq_next = *(listptr); \
218 #define rq_list_add_tail(lastpptr, rq) do { \
219 (rq)->rq_next = NULL; \
221 *(lastpptr) = &rq->rq_next; \
224 #define rq_list_pop(listptr) \
226 struct request *__req = NULL; \
227 if ((listptr) && *(listptr)) { \
228 __req = *(listptr); \
229 *(listptr) = __req->rq_next; \
234 #define rq_list_peek(listptr) \
236 struct request *__req = NULL; \
237 if ((listptr) && *(listptr)) \
238 __req = *(listptr); \
242 #define rq_list_for_each(listptr, pos) \
243 for (pos = rq_list_peek((listptr)); pos; pos = rq_list_next(pos))
245 #define rq_list_for_each_safe(listptr, pos, nxt) \
246 for (pos = rq_list_peek((listptr)), nxt = rq_list_next(pos); \
247 pos; pos = nxt, nxt = pos ? rq_list_next(pos) : NULL)
249 #define rq_list_next(rq) (rq)->rq_next
250 #define rq_list_empty(list) ((list) == (struct request *) NULL)
253 * rq_list_move() - move a struct request from one list to another
254 * @src: The source list @rq is currently in
255 * @dst: The destination list that @rq will be appended to
256 * @rq: The request to move
257 * @prev: The request preceding @rq in @src (NULL if @rq is the head)
259 static inline void rq_list_move(struct request **src, struct request **dst,
260 struct request *rq, struct request *prev)
263 prev->rq_next = rq->rq_next;
266 rq_list_add(dst, rq);
270 * enum blk_eh_timer_return - How the timeout handler should proceed
271 * @BLK_EH_DONE: The block driver completed the command or will complete it at
273 * @BLK_EH_RESET_TIMER: Reset the request timer and continue waiting for the
274 * request to complete.
276 enum blk_eh_timer_return {
281 #define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */
282 #define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */
285 * struct blk_mq_hw_ctx - State for a hardware queue facing the hardware
288 struct blk_mq_hw_ctx {
290 /** @lock: Protects the dispatch list. */
293 * @dispatch: Used for requests that are ready to be
294 * dispatched to the hardware but for some reason (e.g. lack of
295 * resources) could not be sent to the hardware. As soon as the
296 * driver can send new requests, requests at this list will
297 * be sent first for a fairer dispatch.
299 struct list_head dispatch;
301 * @state: BLK_MQ_S_* flags. Defines the state of the hw
302 * queue (active, scheduled to restart, stopped).
305 } ____cacheline_aligned_in_smp;
308 * @run_work: Used for scheduling a hardware queue run at a later time.
310 struct delayed_work run_work;
311 /** @cpumask: Map of available CPUs where this hctx can run. */
312 cpumask_var_t cpumask;
314 * @next_cpu: Used by blk_mq_hctx_next_cpu() for round-robin CPU
315 * selection from @cpumask.
319 * @next_cpu_batch: Counter of how many works left in the batch before
320 * changing to the next CPU.
324 /** @flags: BLK_MQ_F_* flags. Defines the behaviour of the queue. */
328 * @sched_data: Pointer owned by the IO scheduler attached to a request
329 * queue. It's up to the IO scheduler how to use this pointer.
333 * @queue: Pointer to the request queue that owns this hardware context.
335 struct request_queue *queue;
336 /** @fq: Queue of requests that need to perform a flush operation. */
337 struct blk_flush_queue *fq;
340 * @driver_data: Pointer to data owned by the block driver that created
346 * @ctx_map: Bitmap for each software queue. If bit is on, there is a
347 * pending request in that software queue.
349 struct sbitmap ctx_map;
352 * @dispatch_from: Software queue to be used when no scheduler was
355 struct blk_mq_ctx *dispatch_from;
357 * @dispatch_busy: Number used by blk_mq_update_dispatch_busy() to
358 * decide if the hw_queue is busy using Exponential Weighted Moving
361 unsigned int dispatch_busy;
363 /** @type: HCTX_TYPE_* flags. Type of hardware queue. */
365 /** @nr_ctx: Number of software queues. */
366 unsigned short nr_ctx;
367 /** @ctxs: Array of software queues. */
368 struct blk_mq_ctx **ctxs;
370 /** @dispatch_wait_lock: Lock for dispatch_wait queue. */
371 spinlock_t dispatch_wait_lock;
373 * @dispatch_wait: Waitqueue to put requests when there is no tag
374 * available at the moment, to wait for another try in the future.
376 wait_queue_entry_t dispatch_wait;
379 * @wait_index: Index of next available dispatch_wait queue to insert
385 * @tags: Tags owned by the block driver. A tag at this set is only
386 * assigned when a request is dispatched from a hardware queue.
388 struct blk_mq_tags *tags;
390 * @sched_tags: Tags owned by I/O scheduler. If there is an I/O
391 * scheduler associated with a request queue, a tag is assigned when
392 * that request is allocated. Else, this member is not used.
394 struct blk_mq_tags *sched_tags;
396 /** @numa_node: NUMA node the storage adapter has been connected to. */
397 unsigned int numa_node;
398 /** @queue_num: Index of this hardware queue. */
399 unsigned int queue_num;
402 * @nr_active: Number of active requests. Only used when a tag set is
403 * shared across request queues.
407 /** @cpuhp_online: List to store request if CPU is going to die */
408 struct hlist_node cpuhp_online;
409 /** @cpuhp_dead: List to store request if some CPU die. */
410 struct hlist_node cpuhp_dead;
411 /** @kobj: Kernel object for sysfs. */
414 #ifdef CONFIG_BLK_DEBUG_FS
416 * @debugfs_dir: debugfs directory for this hardware queue. Named
417 * as cpu<cpu_number>.
419 struct dentry *debugfs_dir;
420 /** @sched_debugfs_dir: debugfs directory for the scheduler. */
421 struct dentry *sched_debugfs_dir;
425 * @hctx_list: if this hctx is not in use, this is an entry in
426 * q->unused_hctx_list.
428 struct list_head hctx_list;
432 * struct blk_mq_queue_map - Map software queues to hardware queues
433 * @mq_map: CPU ID to hardware queue index map. This is an array
434 * with nr_cpu_ids elements. Each element has a value in the range
435 * [@queue_offset, @queue_offset + @nr_queues).
436 * @nr_queues: Number of hardware queues to map CPU IDs onto.
437 * @queue_offset: First hardware queue to map onto. Used by the PCIe NVMe
438 * driver to map each hardware queue type (enum hctx_type) onto a distinct
439 * set of hardware queues.
441 struct blk_mq_queue_map {
442 unsigned int *mq_map;
443 unsigned int nr_queues;
444 unsigned int queue_offset;
448 * enum hctx_type - Type of hardware queue
449 * @HCTX_TYPE_DEFAULT: All I/O not otherwise accounted for.
450 * @HCTX_TYPE_READ: Just for READ I/O.
451 * @HCTX_TYPE_POLL: Polled I/O of any kind.
452 * @HCTX_MAX_TYPES: Number of types of hctx.
463 * struct blk_mq_tag_set - tag set that can be shared between request queues
464 * @ops: Pointers to functions that implement block driver behavior.
465 * @map: One or more ctx -> hctx mappings. One map exists for each
466 * hardware queue type (enum hctx_type) that the driver wishes
467 * to support. There are no restrictions on maps being of the
468 * same size, and it's perfectly legal to share maps between
470 * @nr_maps: Number of elements in the @map array. A number in the range
471 * [1, HCTX_MAX_TYPES].
472 * @nr_hw_queues: Number of hardware queues supported by the block driver that
473 * owns this data structure.
474 * @queue_depth: Number of tags per hardware queue, reserved tags included.
475 * @reserved_tags: Number of tags to set aside for BLK_MQ_REQ_RESERVED tag
477 * @cmd_size: Number of additional bytes to allocate per request. The block
478 * driver owns these additional bytes.
479 * @numa_node: NUMA node the storage adapter has been connected to.
480 * @timeout: Request processing timeout in jiffies.
481 * @flags: Zero or more BLK_MQ_F_* flags.
482 * @driver_data: Pointer to data owned by the block driver that created this
484 * @tags: Tag sets. One tag set per hardware queue. Has @nr_hw_queues
487 * Shared set of tags. Has @nr_hw_queues elements. If set,
488 * shared by all @tags.
489 * @tag_list_lock: Serializes tag_list accesses.
490 * @tag_list: List of the request queues that use this tag set. See also
491 * request_queue.tag_set_list.
492 * @srcu: Use as lock when type of the request queue is blocking
493 * (BLK_MQ_F_BLOCKING).
495 struct blk_mq_tag_set {
496 const struct blk_mq_ops *ops;
497 struct blk_mq_queue_map map[HCTX_MAX_TYPES];
498 unsigned int nr_maps;
499 unsigned int nr_hw_queues;
500 unsigned int queue_depth;
501 unsigned int reserved_tags;
502 unsigned int cmd_size;
504 unsigned int timeout;
508 struct blk_mq_tags **tags;
510 struct blk_mq_tags *shared_tags;
512 struct mutex tag_list_lock;
513 struct list_head tag_list;
514 struct srcu_struct *srcu;
518 * struct blk_mq_queue_data - Data about a request inserted in a queue
520 * @rq: Request pointer.
521 * @last: If it is the last request in the queue.
523 struct blk_mq_queue_data {
528 typedef bool (busy_tag_iter_fn)(struct request *, void *);
531 * struct blk_mq_ops - Callback functions that implements block driver
536 * @queue_rq: Queue a new request from block IO.
538 blk_status_t (*queue_rq)(struct blk_mq_hw_ctx *,
539 const struct blk_mq_queue_data *);
542 * @commit_rqs: If a driver uses bd->last to judge when to submit
543 * requests to hardware, it must define this function. In case of errors
544 * that make us stop issuing further requests, this hook serves the
545 * purpose of kicking the hardware (which the last request otherwise
548 void (*commit_rqs)(struct blk_mq_hw_ctx *);
551 * @queue_rqs: Queue a list of new requests. Driver is guaranteed
552 * that each request belongs to the same queue. If the driver doesn't
553 * empty the @rqlist completely, then the rest will be queued
554 * individually by the block layer upon return.
556 void (*queue_rqs)(struct request **rqlist);
559 * @get_budget: Reserve budget before queue request, once .queue_rq is
560 * run, it is driver's responsibility to release the
561 * reserved budget. Also we have to handle failure case
562 * of .get_budget for avoiding I/O deadlock.
564 int (*get_budget)(struct request_queue *);
567 * @put_budget: Release the reserved budget.
569 void (*put_budget)(struct request_queue *, int);
572 * @set_rq_budget_token: store rq's budget token
574 void (*set_rq_budget_token)(struct request *, int);
576 * @get_rq_budget_token: retrieve rq's budget token
578 int (*get_rq_budget_token)(struct request *);
581 * @timeout: Called on request timeout.
583 enum blk_eh_timer_return (*timeout)(struct request *);
586 * @poll: Called to poll for completion of a specific tag.
588 int (*poll)(struct blk_mq_hw_ctx *, struct io_comp_batch *);
591 * @complete: Mark the request as complete.
593 void (*complete)(struct request *);
596 * @init_hctx: Called when the block layer side of a hardware queue has
597 * been set up, allowing the driver to allocate/init matching
600 int (*init_hctx)(struct blk_mq_hw_ctx *, void *, unsigned int);
602 * @exit_hctx: Ditto for exit/teardown.
604 void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int);
607 * @init_request: Called for every command allocated by the block layer
608 * to allow the driver to set up driver specific data.
610 * Tag greater than or equal to queue_depth is for setting up
613 int (*init_request)(struct blk_mq_tag_set *set, struct request *,
614 unsigned int, unsigned int);
616 * @exit_request: Ditto for exit/teardown.
618 void (*exit_request)(struct blk_mq_tag_set *set, struct request *,
622 * @cleanup_rq: Called before freeing one request which isn't completed
623 * yet, and usually for freeing the driver private data.
625 void (*cleanup_rq)(struct request *);
628 * @busy: If set, returns whether or not this queue currently is busy.
630 bool (*busy)(struct request_queue *);
633 * @map_queues: This allows drivers specify their own queue mapping by
634 * overriding the setup-time function that builds the mq_map.
636 void (*map_queues)(struct blk_mq_tag_set *set);
638 #ifdef CONFIG_BLK_DEBUG_FS
640 * @show_rq: Used by the debugfs implementation to show driver-specific
641 * information about a request.
643 void (*show_rq)(struct seq_file *m, struct request *rq);
648 BLK_MQ_F_SHOULD_MERGE = 1 << 0,
649 BLK_MQ_F_TAG_QUEUE_SHARED = 1 << 1,
651 * Set when this device requires underlying blk-mq device for
654 BLK_MQ_F_STACKING = 1 << 2,
655 BLK_MQ_F_TAG_HCTX_SHARED = 1 << 3,
656 BLK_MQ_F_BLOCKING = 1 << 5,
657 /* Do not allow an I/O scheduler to be configured. */
658 BLK_MQ_F_NO_SCHED = 1 << 6,
660 * Select 'none' during queue registration in case of a single hwq
661 * or shared hwqs instead of 'mq-deadline'.
663 BLK_MQ_F_NO_SCHED_BY_DEFAULT = 1 << 7,
664 BLK_MQ_F_ALLOC_POLICY_START_BIT = 8,
665 BLK_MQ_F_ALLOC_POLICY_BITS = 1,
667 BLK_MQ_S_STOPPED = 0,
668 BLK_MQ_S_TAG_ACTIVE = 1,
669 BLK_MQ_S_SCHED_RESTART = 2,
671 /* hw queue is inactive after all its CPUs become offline */
672 BLK_MQ_S_INACTIVE = 3,
674 BLK_MQ_MAX_DEPTH = 10240,
676 BLK_MQ_CPU_WORK_BATCH = 8,
678 #define BLK_MQ_FLAG_TO_ALLOC_POLICY(flags) \
679 ((flags >> BLK_MQ_F_ALLOC_POLICY_START_BIT) & \
680 ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1))
681 #define BLK_ALLOC_POLICY_TO_MQ_FLAG(policy) \
682 ((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \
683 << BLK_MQ_F_ALLOC_POLICY_START_BIT)
685 #define BLK_MQ_NO_HCTX_IDX (-1U)
687 struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set,
688 struct queue_limits *lim, void *queuedata,
689 struct lock_class_key *lkclass);
690 #define blk_mq_alloc_disk(set, lim, queuedata) \
692 static struct lock_class_key __key; \
694 __blk_mq_alloc_disk(set, lim, queuedata, &__key); \
696 struct gendisk *blk_mq_alloc_disk_for_queue(struct request_queue *q,
697 struct lock_class_key *lkclass);
698 struct request_queue *blk_mq_alloc_queue(struct blk_mq_tag_set *set,
699 struct queue_limits *lim, void *queuedata);
700 int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
701 struct request_queue *q);
702 void blk_mq_destroy_queue(struct request_queue *);
704 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set);
705 int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set,
706 const struct blk_mq_ops *ops, unsigned int queue_depth,
707 unsigned int set_flags);
708 void blk_mq_free_tag_set(struct blk_mq_tag_set *set);
710 void blk_mq_free_request(struct request *rq);
711 int blk_rq_poll(struct request *rq, struct io_comp_batch *iob,
712 unsigned int poll_flags);
714 bool blk_mq_queue_inflight(struct request_queue *q);
717 /* return when out of requests */
718 BLK_MQ_REQ_NOWAIT = (__force blk_mq_req_flags_t)(1 << 0),
719 /* allocate from reserved pool */
720 BLK_MQ_REQ_RESERVED = (__force blk_mq_req_flags_t)(1 << 1),
722 BLK_MQ_REQ_PM = (__force blk_mq_req_flags_t)(1 << 2),
725 struct request *blk_mq_alloc_request(struct request_queue *q, blk_opf_t opf,
726 blk_mq_req_flags_t flags);
727 struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
728 blk_opf_t opf, blk_mq_req_flags_t flags,
729 unsigned int hctx_idx);
732 * Tag address space map.
735 unsigned int nr_tags;
736 unsigned int nr_reserved_tags;
737 unsigned int active_queues;
739 struct sbitmap_queue bitmap_tags;
740 struct sbitmap_queue breserved_tags;
742 struct request **rqs;
743 struct request **static_rqs;
744 struct list_head page_list;
747 * used to clear request reference in rqs[] before freeing one
753 static inline struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags,
756 if (tag < tags->nr_tags) {
757 prefetch(tags->rqs[tag]);
758 return tags->rqs[tag];
765 BLK_MQ_UNIQUE_TAG_BITS = 16,
766 BLK_MQ_UNIQUE_TAG_MASK = (1 << BLK_MQ_UNIQUE_TAG_BITS) - 1,
769 u32 blk_mq_unique_tag(struct request *rq);
771 static inline u16 blk_mq_unique_tag_to_hwq(u32 unique_tag)
773 return unique_tag >> BLK_MQ_UNIQUE_TAG_BITS;
776 static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag)
778 return unique_tag & BLK_MQ_UNIQUE_TAG_MASK;
782 * blk_mq_rq_state() - read the current MQ_RQ_* state of a request
783 * @rq: target request.
785 static inline enum mq_rq_state blk_mq_rq_state(struct request *rq)
787 return READ_ONCE(rq->state);
790 static inline int blk_mq_request_started(struct request *rq)
792 return blk_mq_rq_state(rq) != MQ_RQ_IDLE;
795 static inline int blk_mq_request_completed(struct request *rq)
797 return blk_mq_rq_state(rq) == MQ_RQ_COMPLETE;
802 * Set the state to complete when completing a request from inside ->queue_rq.
803 * This is used by drivers that want to ensure special complete actions that
804 * need access to the request are called on failure, e.g. by nvme for
807 static inline void blk_mq_set_request_complete(struct request *rq)
809 WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
813 * Complete the request directly instead of deferring it to softirq or
814 * completing it another CPU. Useful in preemptible instead of an interrupt.
816 static inline void blk_mq_complete_request_direct(struct request *rq,
817 void (*complete)(struct request *rq))
819 WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
823 void blk_mq_start_request(struct request *rq);
824 void blk_mq_end_request(struct request *rq, blk_status_t error);
825 void __blk_mq_end_request(struct request *rq, blk_status_t error);
826 void blk_mq_end_request_batch(struct io_comp_batch *ib);
829 * Only need start/end time stamping if we have iostat or
830 * blk stats enabled, or using an IO scheduler.
832 static inline bool blk_mq_need_time_stamp(struct request *rq)
835 * passthrough io doesn't use iostat accounting, cgroup stats
836 * and io scheduler functionalities.
838 if (blk_rq_is_passthrough(rq))
840 return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS | RQF_USE_SCHED));
843 static inline bool blk_mq_is_reserved_rq(struct request *rq)
845 return rq->rq_flags & RQF_RESV;
849 * Batched completions only work when there is no I/O error and no special
852 static inline bool blk_mq_add_to_batch(struct request *req,
853 struct io_comp_batch *iob, int ioerror,
854 void (*complete)(struct io_comp_batch *))
857 * blk_mq_end_request_batch() can't end request allocated from
860 if (!iob || (req->rq_flags & RQF_SCHED_TAGS) || ioerror ||
861 (req->end_io && !blk_rq_is_passthrough(req)))
865 iob->complete = complete;
866 else if (iob->complete != complete)
868 iob->need_ts |= blk_mq_need_time_stamp(req);
869 rq_list_add(&iob->req_list, req);
873 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list);
874 void blk_mq_kick_requeue_list(struct request_queue *q);
875 void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
876 void blk_mq_complete_request(struct request *rq);
877 bool blk_mq_complete_request_remote(struct request *rq);
878 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
879 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
880 void blk_mq_stop_hw_queues(struct request_queue *q);
881 void blk_mq_start_hw_queues(struct request_queue *q);
882 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
883 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
884 void blk_mq_quiesce_queue(struct request_queue *q);
885 void blk_mq_wait_quiesce_done(struct blk_mq_tag_set *set);
886 void blk_mq_quiesce_tagset(struct blk_mq_tag_set *set);
887 void blk_mq_unquiesce_tagset(struct blk_mq_tag_set *set);
888 void blk_mq_unquiesce_queue(struct request_queue *q);
889 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
890 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
891 void blk_mq_run_hw_queues(struct request_queue *q, bool async);
892 void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs);
893 void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
894 busy_tag_iter_fn *fn, void *priv);
895 void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset);
896 void blk_mq_freeze_queue(struct request_queue *q);
897 void blk_mq_unfreeze_queue(struct request_queue *q);
898 void blk_freeze_queue_start(struct request_queue *q);
899 void blk_mq_freeze_queue_wait(struct request_queue *q);
900 int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
901 unsigned long timeout);
903 void blk_mq_map_queues(struct blk_mq_queue_map *qmap);
904 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
906 void blk_mq_quiesce_queue_nowait(struct request_queue *q);
908 unsigned int blk_mq_rq_cpu(struct request *rq);
910 bool __blk_should_fake_timeout(struct request_queue *q);
911 static inline bool blk_should_fake_timeout(struct request_queue *q)
913 if (IS_ENABLED(CONFIG_FAIL_IO_TIMEOUT) &&
914 test_bit(QUEUE_FLAG_FAIL_IO, &q->queue_flags))
915 return __blk_should_fake_timeout(q);
920 * blk_mq_rq_from_pdu - cast a PDU to a request
921 * @pdu: the PDU (Protocol Data Unit) to be casted
925 * Driver command data is immediately after the request. So subtract request
926 * size to get back to the original request.
928 static inline struct request *blk_mq_rq_from_pdu(void *pdu)
930 return pdu - sizeof(struct request);
934 * blk_mq_rq_to_pdu - cast a request to a PDU
935 * @rq: the request to be casted
937 * Return: pointer to the PDU
939 * Driver command data is immediately after the request. So add request to get
942 static inline void *blk_mq_rq_to_pdu(struct request *rq)
947 #define queue_for_each_hw_ctx(q, hctx, i) \
948 xa_for_each(&(q)->hctx_table, (i), (hctx))
950 #define hctx_for_each_ctx(hctx, ctx, i) \
951 for ((i) = 0; (i) < (hctx)->nr_ctx && \
952 ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++)
954 static inline void blk_mq_cleanup_rq(struct request *rq)
956 if (rq->q->mq_ops->cleanup_rq)
957 rq->q->mq_ops->cleanup_rq(rq);
960 static inline void blk_rq_bio_prep(struct request *rq, struct bio *bio,
961 unsigned int nr_segs)
963 rq->nr_phys_segments = nr_segs;
964 rq->__data_len = bio->bi_iter.bi_size;
965 rq->bio = rq->biotail = bio;
966 rq->ioprio = bio_prio(bio);
969 void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx,
970 struct lock_class_key *key);
972 static inline bool rq_is_sync(struct request *rq)
974 return op_is_sync(rq->cmd_flags);
977 void blk_rq_init(struct request_queue *q, struct request *rq);
978 int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
979 struct bio_set *bs, gfp_t gfp_mask,
980 int (*bio_ctr)(struct bio *, struct bio *, void *), void *data);
981 void blk_rq_unprep_clone(struct request *rq);
982 blk_status_t blk_insert_cloned_request(struct request *rq);
986 unsigned long offset;
987 unsigned short page_order;
988 unsigned short nr_entries;
993 int blk_rq_map_user(struct request_queue *, struct request *,
994 struct rq_map_data *, void __user *, unsigned long, gfp_t);
995 int blk_rq_map_user_io(struct request *, struct rq_map_data *,
996 void __user *, unsigned long, gfp_t, bool, int, bool, int);
997 int blk_rq_map_user_iov(struct request_queue *, struct request *,
998 struct rq_map_data *, const struct iov_iter *, gfp_t);
999 int blk_rq_unmap_user(struct bio *);
1000 int blk_rq_map_kern(struct request_queue *, struct request *, void *,
1001 unsigned int, gfp_t);
1002 int blk_rq_append_bio(struct request *rq, struct bio *bio);
1003 void blk_execute_rq_nowait(struct request *rq, bool at_head);
1004 blk_status_t blk_execute_rq(struct request *rq, bool at_head);
1005 bool blk_rq_is_poll(struct request *rq);
1007 struct req_iterator {
1008 struct bvec_iter iter;
1012 #define __rq_for_each_bio(_bio, rq) \
1014 for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
1016 #define rq_for_each_segment(bvl, _rq, _iter) \
1017 __rq_for_each_bio(_iter.bio, _rq) \
1018 bio_for_each_segment(bvl, _iter.bio, _iter.iter)
1020 #define rq_for_each_bvec(bvl, _rq, _iter) \
1021 __rq_for_each_bio(_iter.bio, _rq) \
1022 bio_for_each_bvec(bvl, _iter.bio, _iter.iter)
1024 #define rq_iter_last(bvec, _iter) \
1025 (_iter.bio->bi_next == NULL && \
1026 bio_iter_last(bvec, _iter.iter))
1029 * blk_rq_pos() : the current sector
1030 * blk_rq_bytes() : bytes left in the entire request
1031 * blk_rq_cur_bytes() : bytes left in the current segment
1032 * blk_rq_sectors() : sectors left in the entire request
1033 * blk_rq_cur_sectors() : sectors left in the current segment
1034 * blk_rq_stats_sectors() : sectors of the entire request used for stats
1036 static inline sector_t blk_rq_pos(const struct request *rq)
1038 return rq->__sector;
1041 static inline unsigned int blk_rq_bytes(const struct request *rq)
1043 return rq->__data_len;
1046 static inline int blk_rq_cur_bytes(const struct request *rq)
1050 if (!bio_has_data(rq->bio)) /* dataless requests such as discard */
1051 return rq->bio->bi_iter.bi_size;
1052 return bio_iovec(rq->bio).bv_len;
1055 static inline unsigned int blk_rq_sectors(const struct request *rq)
1057 return blk_rq_bytes(rq) >> SECTOR_SHIFT;
1060 static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
1062 return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT;
1065 static inline unsigned int blk_rq_stats_sectors(const struct request *rq)
1067 return rq->stats_sectors;
1071 * Some commands like WRITE SAME have a payload or data transfer size which
1072 * is different from the size of the request. Any driver that supports such
1073 * commands using the RQF_SPECIAL_PAYLOAD flag needs to use this helper to
1074 * calculate the data transfer size.
1076 static inline unsigned int blk_rq_payload_bytes(struct request *rq)
1078 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
1079 return rq->special_vec.bv_len;
1080 return blk_rq_bytes(rq);
1084 * Return the first full biovec in the request. The caller needs to check that
1085 * there are any bvecs before calling this helper.
1087 static inline struct bio_vec req_bvec(struct request *rq)
1089 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
1090 return rq->special_vec;
1091 return mp_bvec_iter_bvec(rq->bio->bi_io_vec, rq->bio->bi_iter);
1094 static inline unsigned int blk_rq_count_bios(struct request *rq)
1096 unsigned int nr_bios = 0;
1099 __rq_for_each_bio(bio, rq)
1105 void blk_steal_bios(struct bio_list *list, struct request *rq);
1108 * Request completion related functions.
1110 * blk_update_request() completes given number of bytes and updates
1111 * the request without completing it.
1113 bool blk_update_request(struct request *rq, blk_status_t error,
1114 unsigned int nr_bytes);
1115 void blk_abort_request(struct request *);
1118 * Number of physical segments as sent to the device.
1120 * Normally this is the number of discontiguous data segments sent by the
1121 * submitter. But for data-less command like discard we might have no
1122 * actual data segments submitted, but the driver might have to add it's
1123 * own special payload. In that case we still return 1 here so that this
1124 * special payload will be mapped.
1126 static inline unsigned short blk_rq_nr_phys_segments(struct request *rq)
1128 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
1130 return rq->nr_phys_segments;
1134 * Number of discard segments (or ranges) the driver needs to fill in.
1135 * Each discard bio merged into a request is counted as one segment.
1137 static inline unsigned short blk_rq_nr_discard_segments(struct request *rq)
1139 return max_t(unsigned short, rq->nr_phys_segments, 1);
1142 int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
1143 struct scatterlist *sglist, struct scatterlist **last_sg);
1144 static inline int blk_rq_map_sg(struct request_queue *q, struct request *rq,
1145 struct scatterlist *sglist)
1147 struct scatterlist *last_sg = NULL;
1149 return __blk_rq_map_sg(q, rq, sglist, &last_sg);
1151 void blk_dump_rq_flags(struct request *, char *);
1153 #ifdef CONFIG_BLK_DEV_ZONED
1154 static inline unsigned int blk_rq_zone_no(struct request *rq)
1156 return disk_zone_no(rq->q->disk, blk_rq_pos(rq));
1159 static inline unsigned int blk_rq_zone_is_seq(struct request *rq)
1161 return disk_zone_is_seq(rq->q->disk, blk_rq_pos(rq));
1165 * blk_rq_is_seq_zoned_write() - Check if @rq requires write serialization.
1166 * @rq: Request to examine.
1168 * Note: REQ_OP_ZONE_APPEND requests do not require serialization.
1170 static inline bool blk_rq_is_seq_zoned_write(struct request *rq)
1172 return op_needs_zoned_write_locking(req_op(rq)) &&
1173 blk_rq_zone_is_seq(rq);
1176 bool blk_req_needs_zone_write_lock(struct request *rq);
1177 bool blk_req_zone_write_trylock(struct request *rq);
1178 void __blk_req_zone_write_lock(struct request *rq);
1179 void __blk_req_zone_write_unlock(struct request *rq);
1181 static inline void blk_req_zone_write_lock(struct request *rq)
1183 if (blk_req_needs_zone_write_lock(rq))
1184 __blk_req_zone_write_lock(rq);
1187 static inline void blk_req_zone_write_unlock(struct request *rq)
1189 if (rq->rq_flags & RQF_ZONE_WRITE_LOCKED)
1190 __blk_req_zone_write_unlock(rq);
1193 static inline bool blk_req_zone_is_write_locked(struct request *rq)
1195 return rq->q->disk->seq_zones_wlock &&
1196 test_bit(blk_rq_zone_no(rq), rq->q->disk->seq_zones_wlock);
1199 static inline bool blk_req_can_dispatch_to_zone(struct request *rq)
1201 if (!blk_req_needs_zone_write_lock(rq))
1203 return !blk_req_zone_is_write_locked(rq);
1205 #else /* CONFIG_BLK_DEV_ZONED */
1206 static inline bool blk_rq_is_seq_zoned_write(struct request *rq)
1211 static inline bool blk_req_needs_zone_write_lock(struct request *rq)
1216 static inline void blk_req_zone_write_lock(struct request *rq)
1220 static inline void blk_req_zone_write_unlock(struct request *rq)
1223 static inline bool blk_req_zone_is_write_locked(struct request *rq)
1228 static inline bool blk_req_can_dispatch_to_zone(struct request *rq)
1232 #endif /* CONFIG_BLK_DEV_ZONED */
1234 #endif /* BLK_MQ_H */