1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Block data types and constants. Directly include this file only to
4 * break include dependency loop.
6 #ifndef __LINUX_BLK_TYPES_H
7 #define __LINUX_BLK_TYPES_H
9 #include <linux/types.h>
10 #include <linux/bvec.h>
11 #include <linux/ktime.h>
15 struct bio_integrity_payload;
18 struct cgroup_subsys_state;
19 typedef void (bio_end_io_t) (struct bio *);
25 struct inode * bd_inode; /* will die */
26 struct super_block * bd_super;
27 struct mutex bd_mutex; /* open/close mutex */
33 struct list_head bd_holder_disks;
35 struct block_device * bd_contains;
37 struct hd_struct * bd_part;
38 /* number of times partitions within this device have been opened. */
39 unsigned bd_part_count;
41 spinlock_t bd_size_lock; /* for bd_inode->i_size updates */
42 struct gendisk * bd_disk;
43 struct backing_dev_info *bd_bdi;
45 /* The counter of freeze processes */
46 int bd_fsfreeze_count;
47 /* Mutex for freeze */
48 struct mutex bd_fsfreeze_mutex;
52 * Block error status values. See block/blk-core:blk_errors for the details.
53 * Alpha cannot write a byte atomically, so we need to use 32-bit value.
55 #if defined(CONFIG_ALPHA) && !defined(__alpha_bwx__)
56 typedef u32 __bitwise blk_status_t;
58 typedef u8 __bitwise blk_status_t;
61 #define BLK_STS_NOTSUPP ((__force blk_status_t)1)
62 #define BLK_STS_TIMEOUT ((__force blk_status_t)2)
63 #define BLK_STS_NOSPC ((__force blk_status_t)3)
64 #define BLK_STS_TRANSPORT ((__force blk_status_t)4)
65 #define BLK_STS_TARGET ((__force blk_status_t)5)
66 #define BLK_STS_NEXUS ((__force blk_status_t)6)
67 #define BLK_STS_MEDIUM ((__force blk_status_t)7)
68 #define BLK_STS_PROTECTION ((__force blk_status_t)8)
69 #define BLK_STS_RESOURCE ((__force blk_status_t)9)
70 #define BLK_STS_IOERR ((__force blk_status_t)10)
72 /* hack for device mapper, don't use elsewhere: */
73 #define BLK_STS_DM_REQUEUE ((__force blk_status_t)11)
75 #define BLK_STS_AGAIN ((__force blk_status_t)12)
78 * BLK_STS_DEV_RESOURCE is returned from the driver to the block layer if
79 * device related resources are unavailable, but the driver can guarantee
80 * that the queue will be rerun in the future once resources become
81 * available again. This is typically the case for device specific
82 * resources that are consumed for IO. If the driver fails allocating these
83 * resources, we know that inflight (or pending) IO will free these
84 * resource upon completion.
86 * This is different from BLK_STS_RESOURCE in that it explicitly references
87 * a device specific resource. For resources of wider scope, allocation
88 * failure can happen without having pending IO. This means that we can't
89 * rely on request completions freeing these resources, as IO may not be in
90 * flight. Examples of that are kernel memory allocations, DMA mappings, or
91 * any other system wide resources.
93 #define BLK_STS_DEV_RESOURCE ((__force blk_status_t)13)
96 * BLK_STS_ZONE_RESOURCE is returned from the driver to the block layer if zone
97 * related resources are unavailable, but the driver can guarantee the queue
98 * will be rerun in the future once the resources become available again.
100 * This is different from BLK_STS_DEV_RESOURCE in that it explicitly references
101 * a zone specific resource and IO to a different zone on the same device could
102 * still be served. Examples of that are zones that are write-locked, but a read
103 * to the same zone could be served.
105 #define BLK_STS_ZONE_RESOURCE ((__force blk_status_t)14)
108 * BLK_STS_ZONE_OPEN_RESOURCE is returned from the driver in the completion
109 * path if the device returns a status indicating that too many zone resources
110 * are currently open. The same command should be successful if resubmitted
111 * after the number of open zones decreases below the device's limits, which is
112 * reported in the request_queue's max_open_zones.
114 #define BLK_STS_ZONE_OPEN_RESOURCE ((__force blk_status_t)15)
117 * BLK_STS_ZONE_ACTIVE_RESOURCE is returned from the driver in the completion
118 * path if the device returns a status indicating that too many zone resources
119 * are currently active. The same command should be successful if resubmitted
120 * after the number of active zones decreases below the device's limits, which
121 * is reported in the request_queue's max_active_zones.
123 #define BLK_STS_ZONE_ACTIVE_RESOURCE ((__force blk_status_t)16)
126 * blk_path_error - returns true if error may be path related
127 * @error: status the request was completed with
130 * This classifies block error status into non-retryable errors and ones
131 * that may be successful if retried on a failover path.
134 * %false - retrying failover path will not help
135 * %true - may succeed if retried
137 static inline bool blk_path_error(blk_status_t error)
140 case BLK_STS_NOTSUPP:
145 case BLK_STS_PROTECTION:
149 /* Anything else could be a path failure, so should be retried */
154 * From most significant bit:
155 * 1 bit: reserved for other usage, see below
156 * 12 bits: original size of bio
157 * 51 bits: issue time of bio
159 #define BIO_ISSUE_RES_BITS 1
160 #define BIO_ISSUE_SIZE_BITS 12
161 #define BIO_ISSUE_RES_SHIFT (64 - BIO_ISSUE_RES_BITS)
162 #define BIO_ISSUE_SIZE_SHIFT (BIO_ISSUE_RES_SHIFT - BIO_ISSUE_SIZE_BITS)
163 #define BIO_ISSUE_TIME_MASK ((1ULL << BIO_ISSUE_SIZE_SHIFT) - 1)
164 #define BIO_ISSUE_SIZE_MASK \
165 (((1ULL << BIO_ISSUE_SIZE_BITS) - 1) << BIO_ISSUE_SIZE_SHIFT)
166 #define BIO_ISSUE_RES_MASK (~((1ULL << BIO_ISSUE_RES_SHIFT) - 1))
168 /* Reserved bit for blk-throtl */
169 #define BIO_ISSUE_THROTL_SKIP_LATENCY (1ULL << 63)
175 static inline u64 __bio_issue_time(u64 time)
177 return time & BIO_ISSUE_TIME_MASK;
180 static inline u64 bio_issue_time(struct bio_issue *issue)
182 return __bio_issue_time(issue->value);
185 static inline sector_t bio_issue_size(struct bio_issue *issue)
187 return ((issue->value & BIO_ISSUE_SIZE_MASK) >> BIO_ISSUE_SIZE_SHIFT);
190 static inline void bio_issue_init(struct bio_issue *issue,
193 size &= (1ULL << BIO_ISSUE_SIZE_BITS) - 1;
194 issue->value = ((issue->value & BIO_ISSUE_RES_MASK) |
195 (ktime_get_ns() & BIO_ISSUE_TIME_MASK) |
196 ((u64)size << BIO_ISSUE_SIZE_SHIFT));
200 * main unit of I/O for the block layer and lower layers (ie drivers and
204 struct bio *bi_next; /* request queue link */
205 struct gendisk *bi_disk;
206 unsigned int bi_opf; /* bottom bits req flags,
207 * top bits REQ_OP. Use
210 unsigned short bi_flags; /* status, etc and bvec pool number */
211 unsigned short bi_ioprio;
212 unsigned short bi_write_hint;
213 blk_status_t bi_status;
215 atomic_t __bi_remaining;
217 struct bvec_iter bi_iter;
219 bio_end_io_t *bi_end_io;
222 #ifdef CONFIG_BLK_CGROUP
224 * Represents the association of the css and request_queue for the bio.
225 * If a bio goes direct to device, it will not have a blkg as it will
226 * not have a request_queue associated with it. The reference is put
227 * on release of the bio.
229 struct blkcg_gq *bi_blkg;
230 struct bio_issue bi_issue;
231 #ifdef CONFIG_BLK_CGROUP_IOCOST
236 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
237 struct bio_crypt_ctx *bi_crypt_context;
241 #if defined(CONFIG_BLK_DEV_INTEGRITY)
242 struct bio_integrity_payload *bi_integrity; /* data integrity */
246 unsigned short bi_vcnt; /* how many bio_vec's */
249 * Everything starting with bi_max_vecs will be preserved by bio_reset()
252 unsigned short bi_max_vecs; /* max bvl_vecs we can hold */
254 atomic_t __bi_cnt; /* pin count */
256 struct bio_vec *bi_io_vec; /* the actual vec list */
258 struct bio_set *bi_pool;
261 * We can inline a number of vecs at the end of the bio, to avoid
262 * double allocations for a small number of bio_vecs. This member
263 * MUST obviously be kept at the very end of the bio.
265 struct bio_vec bi_inline_vecs[];
268 #define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs)
274 BIO_NO_PAGE_REF, /* don't put release vec pages */
275 BIO_CLONED, /* doesn't own data */
276 BIO_BOUNCED, /* bio is a bounce bio */
277 BIO_WORKINGSET, /* contains userspace workingset pages */
278 BIO_QUIET, /* Make BIO Quiet */
279 BIO_CHAIN, /* chained bio, ->bi_remaining in effect */
280 BIO_REFFED, /* bio has elevated ->bi_cnt */
281 BIO_THROTTLED, /* This bio has already been subjected to
282 * throttling rules. Don't do it again. */
283 BIO_TRACE_COMPLETION, /* bio_endio() should trace the final completion
285 BIO_CGROUP_ACCT, /* has been accounted to a cgroup */
286 BIO_TRACKED, /* set if bio goes through the rq_qos path */
290 /* See BVEC_POOL_OFFSET below before adding new flags */
293 * We support 6 different bvec pools, the last one is magic in that it
294 * is backed by a mempool.
296 #define BVEC_POOL_NR 6
297 #define BVEC_POOL_MAX (BVEC_POOL_NR - 1)
300 * Top 3 bits of bio flags indicate the pool the bvecs came from. We add
301 * 1 to the actual index so that 0 indicates that there are no bvecs to be
304 #define BVEC_POOL_BITS (3)
305 #define BVEC_POOL_OFFSET (16 - BVEC_POOL_BITS)
306 #define BVEC_POOL_IDX(bio) ((bio)->bi_flags >> BVEC_POOL_OFFSET)
307 #if (1<< BVEC_POOL_BITS) < (BVEC_POOL_NR+1)
308 # error "BVEC_POOL_BITS is too small"
312 * Flags starting here get preserved by bio_reset() - this includes
313 * only BVEC_POOL_IDX()
315 #define BIO_RESET_BITS BVEC_POOL_OFFSET
317 typedef __u32 __bitwise blk_mq_req_flags_t;
320 * Operations and flags common to the bio and request structures.
321 * We use 8 bits for encoding the operation, and the remaining 24 for flags.
323 * The least significant bit of the operation number indicates the data
324 * transfer direction:
326 * - if the least significant bit is set transfers are TO the device
327 * - if the least significant bit is not set transfers are FROM the device
329 * If a operation does not transfer data the least significant bit has no
332 #define REQ_OP_BITS 8
333 #define REQ_OP_MASK ((1 << REQ_OP_BITS) - 1)
334 #define REQ_FLAG_BITS 24
337 /* read sectors from the device */
339 /* write sectors to the device */
341 /* flush the volatile write cache */
343 /* discard sectors */
345 /* securely erase sectors */
346 REQ_OP_SECURE_ERASE = 5,
347 /* write the same sector many times */
348 REQ_OP_WRITE_SAME = 7,
349 /* write the zero filled sector many times */
350 REQ_OP_WRITE_ZEROES = 9,
352 REQ_OP_ZONE_OPEN = 10,
354 REQ_OP_ZONE_CLOSE = 11,
355 /* Transition a zone to full */
356 REQ_OP_ZONE_FINISH = 12,
357 /* write data at the current zone write pointer */
358 REQ_OP_ZONE_APPEND = 13,
359 /* reset a zone write pointer */
360 REQ_OP_ZONE_RESET = 15,
361 /* reset all the zone present on the device */
362 REQ_OP_ZONE_RESET_ALL = 17,
364 /* SCSI passthrough using struct scsi_request */
366 REQ_OP_SCSI_OUT = 33,
367 /* Driver private requests */
375 __REQ_FAILFAST_DEV = /* no driver retries of device errors */
377 __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
378 __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */
379 __REQ_SYNC, /* request is sync (sync write or read) */
380 __REQ_META, /* metadata io request */
381 __REQ_PRIO, /* boost priority in cfq */
382 __REQ_NOMERGE, /* don't touch this for merging */
383 __REQ_IDLE, /* anticipate more IO after this one */
384 __REQ_INTEGRITY, /* I/O includes block integrity payload */
385 __REQ_FUA, /* forced unit access */
386 __REQ_PREFLUSH, /* request for cache flush */
387 __REQ_RAHEAD, /* read ahead, can fail anytime */
388 __REQ_BACKGROUND, /* background IO */
389 __REQ_NOWAIT, /* Don't wait if request will block */
391 * When a shared kthread needs to issue a bio for a cgroup, doing
392 * so synchronously can lead to priority inversions as the kthread
393 * can be trapped waiting for that cgroup. CGROUP_PUNT flag makes
394 * submit_bio() punt the actual issuing to a dedicated per-blkcg
395 * work item to avoid such priority inversions.
399 /* command specific flags for REQ_OP_WRITE_ZEROES: */
400 __REQ_NOUNMAP, /* do not free blocks when zeroing */
406 __REQ_SWAP, /* swapping request. */
407 __REQ_NR_BITS, /* stops here */
410 #define REQ_FAILFAST_DEV (1ULL << __REQ_FAILFAST_DEV)
411 #define REQ_FAILFAST_TRANSPORT (1ULL << __REQ_FAILFAST_TRANSPORT)
412 #define REQ_FAILFAST_DRIVER (1ULL << __REQ_FAILFAST_DRIVER)
413 #define REQ_SYNC (1ULL << __REQ_SYNC)
414 #define REQ_META (1ULL << __REQ_META)
415 #define REQ_PRIO (1ULL << __REQ_PRIO)
416 #define REQ_NOMERGE (1ULL << __REQ_NOMERGE)
417 #define REQ_IDLE (1ULL << __REQ_IDLE)
418 #define REQ_INTEGRITY (1ULL << __REQ_INTEGRITY)
419 #define REQ_FUA (1ULL << __REQ_FUA)
420 #define REQ_PREFLUSH (1ULL << __REQ_PREFLUSH)
421 #define REQ_RAHEAD (1ULL << __REQ_RAHEAD)
422 #define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND)
423 #define REQ_NOWAIT (1ULL << __REQ_NOWAIT)
424 #define REQ_CGROUP_PUNT (1ULL << __REQ_CGROUP_PUNT)
426 #define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP)
427 #define REQ_HIPRI (1ULL << __REQ_HIPRI)
429 #define REQ_DRV (1ULL << __REQ_DRV)
430 #define REQ_SWAP (1ULL << __REQ_SWAP)
432 #define REQ_FAILFAST_MASK \
433 (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
435 #define REQ_NOMERGE_FLAGS \
436 (REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA)
447 #define bio_op(bio) \
448 ((bio)->bi_opf & REQ_OP_MASK)
449 #define req_op(req) \
450 ((req)->cmd_flags & REQ_OP_MASK)
452 /* obsolete, don't use in new code */
453 static inline void bio_set_op_attrs(struct bio *bio, unsigned op,
456 bio->bi_opf = op | op_flags;
459 static inline bool op_is_write(unsigned int op)
465 * Check if the bio or request is one that needs special treatment in the
466 * flush state machine.
468 static inline bool op_is_flush(unsigned int op)
470 return op & (REQ_FUA | REQ_PREFLUSH);
474 * Reads are always treated as synchronous, as are requests with the FUA or
475 * PREFLUSH flag. Other operations may be marked as synchronous using the
478 static inline bool op_is_sync(unsigned int op)
480 return (op & REQ_OP_MASK) == REQ_OP_READ ||
481 (op & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH));
484 static inline bool op_is_discard(unsigned int op)
486 return (op & REQ_OP_MASK) == REQ_OP_DISCARD;
490 * Check if a bio or request operation is a zone management operation, with
491 * the exception of REQ_OP_ZONE_RESET_ALL which is treated as a special case
492 * due to its different handling in the block layer and device response in
493 * case of command failure.
495 static inline bool op_is_zone_mgmt(enum req_opf op)
497 switch (op & REQ_OP_MASK) {
498 case REQ_OP_ZONE_RESET:
499 case REQ_OP_ZONE_OPEN:
500 case REQ_OP_ZONE_CLOSE:
501 case REQ_OP_ZONE_FINISH:
508 static inline int op_stat_group(unsigned int op)
510 if (op_is_discard(op))
512 return op_is_write(op);
515 typedef unsigned int blk_qc_t;
516 #define BLK_QC_T_NONE -1U
517 #define BLK_QC_T_SHIFT 16
518 #define BLK_QC_T_INTERNAL (1U << 31)
520 static inline bool blk_qc_t_valid(blk_qc_t cookie)
522 return cookie != BLK_QC_T_NONE;
525 static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie)
527 return (cookie & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT;
530 static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie)
532 return cookie & ((1u << BLK_QC_T_SHIFT) - 1);
535 static inline bool blk_qc_t_is_internal(blk_qc_t cookie)
537 return (cookie & BLK_QC_T_INTERNAL) != 0;
548 #endif /* __LINUX_BLK_TYPES_H */