1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Block data types and constants. Directly include this file only to
4 * break include dependency loop.
6 #ifndef __LINUX_BLK_TYPES_H
7 #define __LINUX_BLK_TYPES_H
9 #include <linux/types.h>
10 #include <linux/bvec.h>
11 #include <linux/device.h>
12 #include <linux/ktime.h>
16 struct bio_integrity_payload;
19 struct cgroup_subsys_state;
20 typedef void (bio_end_io_t) (struct bio *);
24 * The basic unit of block I/O is a sector. It is used in a number of contexts
25 * in Linux (blk, bio, genhd). The size of one sector is 512 = 2**9
26 * bytes. Variables of type sector_t represent an offset or size that is a
27 * multiple of 512 bytes. Hence these two constants.
30 #define SECTOR_SHIFT 9
33 #define SECTOR_SIZE (1 << SECTOR_SHIFT)
36 #define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
37 #define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT)
38 #define SECTOR_MASK (PAGE_SECTORS - 1)
41 sector_t bd_start_sect;
42 sector_t bd_nr_sectors;
43 struct gendisk * bd_disk;
44 struct request_queue * bd_queue;
45 struct disk_stats __percpu *bd_stats;
46 unsigned long bd_stamp;
47 bool bd_read_only; /* read-only policy */
50 bool bd_has_submit_bio;
52 struct inode *bd_inode; /* will die */
55 spinlock_t bd_size_lock; /* for bd_inode->i_size updates */
58 const struct blk_holder_ops *bd_holder_ops;
59 struct mutex bd_holder_lock;
60 /* The counter of freeze processes */
61 int bd_fsfreeze_count;
63 struct kobject *bd_holder_dir;
65 /* Mutex for freeze */
66 struct mutex bd_fsfreeze_mutex;
67 struct super_block *bd_fsfreeze_sb;
69 struct partition_meta_info *bd_meta_info;
70 #ifdef CONFIG_FAIL_MAKE_REQUEST
75 * keep this out-of-line as it's both big and not needed in the fast
78 struct device bd_device;
81 #define bdev_whole(_bdev) \
82 ((_bdev)->bd_disk->part0)
84 #define dev_to_bdev(device) \
85 container_of((device), struct block_device, bd_device)
87 #define bdev_kobj(_bdev) \
88 (&((_bdev)->bd_device.kobj))
91 * Block error status values. See block/blk-core:blk_errors for the details.
92 * Alpha cannot write a byte atomically, so we need to use 32-bit value.
94 #if defined(CONFIG_ALPHA) && !defined(__alpha_bwx__)
95 typedef u32 __bitwise blk_status_t;
96 typedef u32 blk_short_t;
98 typedef u8 __bitwise blk_status_t;
99 typedef u16 blk_short_t;
102 #define BLK_STS_NOTSUPP ((__force blk_status_t)1)
103 #define BLK_STS_TIMEOUT ((__force blk_status_t)2)
104 #define BLK_STS_NOSPC ((__force blk_status_t)3)
105 #define BLK_STS_TRANSPORT ((__force blk_status_t)4)
106 #define BLK_STS_TARGET ((__force blk_status_t)5)
107 #define BLK_STS_RESV_CONFLICT ((__force blk_status_t)6)
108 #define BLK_STS_MEDIUM ((__force blk_status_t)7)
109 #define BLK_STS_PROTECTION ((__force blk_status_t)8)
110 #define BLK_STS_RESOURCE ((__force blk_status_t)9)
111 #define BLK_STS_IOERR ((__force blk_status_t)10)
113 /* hack for device mapper, don't use elsewhere: */
114 #define BLK_STS_DM_REQUEUE ((__force blk_status_t)11)
117 * BLK_STS_AGAIN should only be returned if RQF_NOWAIT is set
118 * and the bio would block (cf bio_wouldblock_error())
120 #define BLK_STS_AGAIN ((__force blk_status_t)12)
123 * BLK_STS_DEV_RESOURCE is returned from the driver to the block layer if
124 * device related resources are unavailable, but the driver can guarantee
125 * that the queue will be rerun in the future once resources become
126 * available again. This is typically the case for device specific
127 * resources that are consumed for IO. If the driver fails allocating these
128 * resources, we know that inflight (or pending) IO will free these
129 * resource upon completion.
131 * This is different from BLK_STS_RESOURCE in that it explicitly references
132 * a device specific resource. For resources of wider scope, allocation
133 * failure can happen without having pending IO. This means that we can't
134 * rely on request completions freeing these resources, as IO may not be in
135 * flight. Examples of that are kernel memory allocations, DMA mappings, or
136 * any other system wide resources.
138 #define BLK_STS_DEV_RESOURCE ((__force blk_status_t)13)
141 * BLK_STS_ZONE_RESOURCE is returned from the driver to the block layer if zone
142 * related resources are unavailable, but the driver can guarantee the queue
143 * will be rerun in the future once the resources become available again.
145 * This is different from BLK_STS_DEV_RESOURCE in that it explicitly references
146 * a zone specific resource and IO to a different zone on the same device could
147 * still be served. Examples of that are zones that are write-locked, but a read
148 * to the same zone could be served.
150 #define BLK_STS_ZONE_RESOURCE ((__force blk_status_t)14)
153 * BLK_STS_ZONE_OPEN_RESOURCE is returned from the driver in the completion
154 * path if the device returns a status indicating that too many zone resources
155 * are currently open. The same command should be successful if resubmitted
156 * after the number of open zones decreases below the device's limits, which is
157 * reported in the request_queue's max_open_zones.
159 #define BLK_STS_ZONE_OPEN_RESOURCE ((__force blk_status_t)15)
162 * BLK_STS_ZONE_ACTIVE_RESOURCE is returned from the driver in the completion
163 * path if the device returns a status indicating that too many zone resources
164 * are currently active. The same command should be successful if resubmitted
165 * after the number of active zones decreases below the device's limits, which
166 * is reported in the request_queue's max_active_zones.
168 #define BLK_STS_ZONE_ACTIVE_RESOURCE ((__force blk_status_t)16)
171 * BLK_STS_OFFLINE is returned from the driver when the target device is offline
172 * or is being taken offline. This could help differentiate the case where a
173 * device is intentionally being shut down from a real I/O error.
175 #define BLK_STS_OFFLINE ((__force blk_status_t)17)
178 * BLK_STS_DURATION_LIMIT is returned from the driver when the target device
179 * aborted the command because it exceeded one of its Command Duration Limits.
181 #define BLK_STS_DURATION_LIMIT ((__force blk_status_t)18)
184 * blk_path_error - returns true if error may be path related
185 * @error: status the request was completed with
188 * This classifies block error status into non-retryable errors and ones
189 * that may be successful if retried on a failover path.
192 * %false - retrying failover path will not help
193 * %true - may succeed if retried
195 static inline bool blk_path_error(blk_status_t error)
198 case BLK_STS_NOTSUPP:
201 case BLK_STS_RESV_CONFLICT:
203 case BLK_STS_PROTECTION:
207 /* Anything else could be a path failure, so should be retried */
212 * From most significant bit:
213 * 1 bit: reserved for other usage, see below
214 * 12 bits: original size of bio
215 * 51 bits: issue time of bio
217 #define BIO_ISSUE_RES_BITS 1
218 #define BIO_ISSUE_SIZE_BITS 12
219 #define BIO_ISSUE_RES_SHIFT (64 - BIO_ISSUE_RES_BITS)
220 #define BIO_ISSUE_SIZE_SHIFT (BIO_ISSUE_RES_SHIFT - BIO_ISSUE_SIZE_BITS)
221 #define BIO_ISSUE_TIME_MASK ((1ULL << BIO_ISSUE_SIZE_SHIFT) - 1)
222 #define BIO_ISSUE_SIZE_MASK \
223 (((1ULL << BIO_ISSUE_SIZE_BITS) - 1) << BIO_ISSUE_SIZE_SHIFT)
224 #define BIO_ISSUE_RES_MASK (~((1ULL << BIO_ISSUE_RES_SHIFT) - 1))
226 /* Reserved bit for blk-throtl */
227 #define BIO_ISSUE_THROTL_SKIP_LATENCY (1ULL << 63)
233 static inline u64 __bio_issue_time(u64 time)
235 return time & BIO_ISSUE_TIME_MASK;
238 static inline u64 bio_issue_time(struct bio_issue *issue)
240 return __bio_issue_time(issue->value);
243 static inline sector_t bio_issue_size(struct bio_issue *issue)
245 return ((issue->value & BIO_ISSUE_SIZE_MASK) >> BIO_ISSUE_SIZE_SHIFT);
248 static inline void bio_issue_init(struct bio_issue *issue,
251 size &= (1ULL << BIO_ISSUE_SIZE_BITS) - 1;
252 issue->value = ((issue->value & BIO_ISSUE_RES_MASK) |
253 (ktime_get_ns() & BIO_ISSUE_TIME_MASK) |
254 ((u64)size << BIO_ISSUE_SIZE_SHIFT));
257 typedef __u32 __bitwise blk_opf_t;
259 typedef unsigned int blk_qc_t;
260 #define BLK_QC_T_NONE -1U
263 * main unit of I/O for the block layer and lower layers (ie drivers and
267 struct bio *bi_next; /* request queue link */
268 struct block_device *bi_bdev;
269 blk_opf_t bi_opf; /* bottom bits REQ_OP, top bits
272 unsigned short bi_flags; /* BIO_* below */
273 unsigned short bi_ioprio;
274 blk_status_t bi_status;
275 atomic_t __bi_remaining;
277 struct bvec_iter bi_iter;
280 bio_end_io_t *bi_end_io;
282 #ifdef CONFIG_BLK_CGROUP
284 * Represents the association of the css and request_queue for the bio.
285 * If a bio goes direct to device, it will not have a blkg as it will
286 * not have a request_queue associated with it. The reference is put
287 * on release of the bio.
289 struct blkcg_gq *bi_blkg;
290 struct bio_issue bi_issue;
291 #ifdef CONFIG_BLK_CGROUP_IOCOST
296 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
297 struct bio_crypt_ctx *bi_crypt_context;
301 #if defined(CONFIG_BLK_DEV_INTEGRITY)
302 struct bio_integrity_payload *bi_integrity; /* data integrity */
306 unsigned short bi_vcnt; /* how many bio_vec's */
309 * Everything starting with bi_max_vecs will be preserved by bio_reset()
312 unsigned short bi_max_vecs; /* max bvl_vecs we can hold */
314 atomic_t __bi_cnt; /* pin count */
316 struct bio_vec *bi_io_vec; /* the actual vec list */
318 struct bio_set *bi_pool;
321 * We can inline a number of vecs at the end of the bio, to avoid
322 * double allocations for a small number of bio_vecs. This member
323 * MUST obviously be kept at the very end of the bio.
325 struct bio_vec bi_inline_vecs[];
328 #define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs)
329 #define BIO_MAX_SECTORS (UINT_MAX >> SECTOR_SHIFT)
335 BIO_PAGE_PINNED, /* Unpin pages in bio_release_pages() */
336 BIO_CLONED, /* doesn't own data */
337 BIO_BOUNCED, /* bio is a bounce bio */
338 BIO_QUIET, /* Make BIO Quiet */
339 BIO_CHAIN, /* chained bio, ->bi_remaining in effect */
340 BIO_REFFED, /* bio has elevated ->bi_cnt */
341 BIO_BPS_THROTTLED, /* This bio has already been subjected to
342 * throttling rules. Don't do it again. */
343 BIO_TRACE_COMPLETION, /* bio_endio() should trace the final completion
345 BIO_CGROUP_ACCT, /* has been accounted to a cgroup */
346 BIO_QOS_THROTTLED, /* bio went through rq_qos throttle path */
347 BIO_QOS_MERGED, /* but went through rq_qos merge path */
349 BIO_ZONE_WRITE_LOCKED, /* Owns a zoned device zone write lock */
353 typedef __u32 __bitwise blk_mq_req_flags_t;
355 #define REQ_OP_BITS 8
356 #define REQ_OP_MASK (__force blk_opf_t)((1 << REQ_OP_BITS) - 1)
357 #define REQ_FLAG_BITS 24
360 * enum req_op - Operations common to the bio and request structures.
361 * We use 8 bits for encoding the operation, and the remaining 24 for flags.
363 * The least significant bit of the operation number indicates the data
364 * transfer direction:
366 * - if the least significant bit is set transfers are TO the device
367 * - if the least significant bit is not set transfers are FROM the device
369 * If a operation does not transfer data the least significant bit has no
373 /* read sectors from the device */
374 REQ_OP_READ = (__force blk_opf_t)0,
375 /* write sectors to the device */
376 REQ_OP_WRITE = (__force blk_opf_t)1,
377 /* flush the volatile write cache */
378 REQ_OP_FLUSH = (__force blk_opf_t)2,
379 /* discard sectors */
380 REQ_OP_DISCARD = (__force blk_opf_t)3,
381 /* securely erase sectors */
382 REQ_OP_SECURE_ERASE = (__force blk_opf_t)5,
383 /* write the zero filled sector many times */
384 REQ_OP_WRITE_ZEROES = (__force blk_opf_t)9,
386 REQ_OP_ZONE_OPEN = (__force blk_opf_t)10,
388 REQ_OP_ZONE_CLOSE = (__force blk_opf_t)11,
389 /* Transition a zone to full */
390 REQ_OP_ZONE_FINISH = (__force blk_opf_t)12,
391 /* write data at the current zone write pointer */
392 REQ_OP_ZONE_APPEND = (__force blk_opf_t)13,
393 /* reset a zone write pointer */
394 REQ_OP_ZONE_RESET = (__force blk_opf_t)15,
395 /* reset all the zone present on the device */
396 REQ_OP_ZONE_RESET_ALL = (__force blk_opf_t)17,
398 /* Driver private requests */
399 REQ_OP_DRV_IN = (__force blk_opf_t)34,
400 REQ_OP_DRV_OUT = (__force blk_opf_t)35,
402 REQ_OP_LAST = (__force blk_opf_t)36,
406 __REQ_FAILFAST_DEV = /* no driver retries of device errors */
408 __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
409 __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */
410 __REQ_SYNC, /* request is sync (sync write or read) */
411 __REQ_META, /* metadata io request */
412 __REQ_PRIO, /* boost priority in cfq */
413 __REQ_NOMERGE, /* don't touch this for merging */
414 __REQ_IDLE, /* anticipate more IO after this one */
415 __REQ_INTEGRITY, /* I/O includes block integrity payload */
416 __REQ_FUA, /* forced unit access */
417 __REQ_PREFLUSH, /* request for cache flush */
418 __REQ_RAHEAD, /* read ahead, can fail anytime */
419 __REQ_BACKGROUND, /* background IO */
420 __REQ_NOWAIT, /* Don't wait if request will block */
421 __REQ_POLLED, /* caller polls for completion using bio_poll */
422 __REQ_ALLOC_CACHE, /* allocate IO from cache if available */
423 __REQ_SWAP, /* swap I/O */
424 __REQ_DRV, /* for driver use */
425 __REQ_FS_PRIVATE, /* for file system (submitter) use */
428 * Command specific flags, keep last:
430 /* for REQ_OP_WRITE_ZEROES: */
431 __REQ_NOUNMAP, /* do not free blocks when zeroing */
433 __REQ_NR_BITS, /* stops here */
436 #define REQ_FAILFAST_DEV \
437 (__force blk_opf_t)(1ULL << __REQ_FAILFAST_DEV)
438 #define REQ_FAILFAST_TRANSPORT \
439 (__force blk_opf_t)(1ULL << __REQ_FAILFAST_TRANSPORT)
440 #define REQ_FAILFAST_DRIVER \
441 (__force blk_opf_t)(1ULL << __REQ_FAILFAST_DRIVER)
442 #define REQ_SYNC (__force blk_opf_t)(1ULL << __REQ_SYNC)
443 #define REQ_META (__force blk_opf_t)(1ULL << __REQ_META)
444 #define REQ_PRIO (__force blk_opf_t)(1ULL << __REQ_PRIO)
445 #define REQ_NOMERGE (__force blk_opf_t)(1ULL << __REQ_NOMERGE)
446 #define REQ_IDLE (__force blk_opf_t)(1ULL << __REQ_IDLE)
447 #define REQ_INTEGRITY (__force blk_opf_t)(1ULL << __REQ_INTEGRITY)
448 #define REQ_FUA (__force blk_opf_t)(1ULL << __REQ_FUA)
449 #define REQ_PREFLUSH (__force blk_opf_t)(1ULL << __REQ_PREFLUSH)
450 #define REQ_RAHEAD (__force blk_opf_t)(1ULL << __REQ_RAHEAD)
451 #define REQ_BACKGROUND (__force blk_opf_t)(1ULL << __REQ_BACKGROUND)
452 #define REQ_NOWAIT (__force blk_opf_t)(1ULL << __REQ_NOWAIT)
453 #define REQ_POLLED (__force blk_opf_t)(1ULL << __REQ_POLLED)
454 #define REQ_ALLOC_CACHE (__force blk_opf_t)(1ULL << __REQ_ALLOC_CACHE)
455 #define REQ_SWAP (__force blk_opf_t)(1ULL << __REQ_SWAP)
456 #define REQ_DRV (__force blk_opf_t)(1ULL << __REQ_DRV)
457 #define REQ_FS_PRIVATE (__force blk_opf_t)(1ULL << __REQ_FS_PRIVATE)
459 #define REQ_NOUNMAP (__force blk_opf_t)(1ULL << __REQ_NOUNMAP)
461 #define REQ_FAILFAST_MASK \
462 (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
464 #define REQ_NOMERGE_FLAGS \
465 (REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA)
476 static inline enum req_op bio_op(const struct bio *bio)
478 return bio->bi_opf & REQ_OP_MASK;
481 static inline bool op_is_write(blk_opf_t op)
483 return !!(op & (__force blk_opf_t)1);
487 * Check if the bio or request is one that needs special treatment in the
488 * flush state machine.
490 static inline bool op_is_flush(blk_opf_t op)
492 return op & (REQ_FUA | REQ_PREFLUSH);
496 * Reads are always treated as synchronous, as are requests with the FUA or
497 * PREFLUSH flag. Other operations may be marked as synchronous using the
500 static inline bool op_is_sync(blk_opf_t op)
502 return (op & REQ_OP_MASK) == REQ_OP_READ ||
503 (op & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH));
506 static inline bool op_is_discard(blk_opf_t op)
508 return (op & REQ_OP_MASK) == REQ_OP_DISCARD;
512 * Check if a bio or request operation is a zone management operation, with
513 * the exception of REQ_OP_ZONE_RESET_ALL which is treated as a special case
514 * due to its different handling in the block layer and device response in
515 * case of command failure.
517 static inline bool op_is_zone_mgmt(enum req_op op)
519 switch (op & REQ_OP_MASK) {
520 case REQ_OP_ZONE_RESET:
521 case REQ_OP_ZONE_OPEN:
522 case REQ_OP_ZONE_CLOSE:
523 case REQ_OP_ZONE_FINISH:
530 static inline int op_stat_group(enum req_op op)
532 if (op_is_discard(op))
534 return op_is_write(op);
545 #endif /* __LINUX_BLK_TYPES_H */