1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Block data types and constants. Directly include this file only to
4 * break include dependency loop.
6 #ifndef __LINUX_BLK_TYPES_H
7 #define __LINUX_BLK_TYPES_H
9 #include <linux/types.h>
10 #include <linux/bvec.h>
11 #include <linux/ktime.h>
15 struct bio_integrity_payload;
18 struct cgroup_subsys_state;
19 typedef void (bio_end_io_t) (struct bio *);
23 dev_t bd_dev; /* not a kdev_t - it's a search key */
25 struct inode * bd_inode; /* will die */
26 struct super_block * bd_super;
27 struct mutex bd_mutex; /* open/close mutex */
33 struct list_head bd_holder_disks;
35 struct block_device * bd_contains;
37 struct hd_struct * bd_part;
38 /* number of times partitions within this device have been opened. */
39 unsigned bd_part_count;
41 struct gendisk * bd_disk;
42 struct backing_dev_info *bd_bdi;
44 /* The counter of freeze processes */
45 int bd_fsfreeze_count;
46 /* Mutex for freeze */
47 struct mutex bd_fsfreeze_mutex;
51 * Block error status values. See block/blk-core:blk_errors for the details.
52 * Alpha cannot write a byte atomically, so we need to use 32-bit value.
54 #if defined(CONFIG_ALPHA) && !defined(__alpha_bwx__)
55 typedef u32 __bitwise blk_status_t;
57 typedef u8 __bitwise blk_status_t;
60 #define BLK_STS_NOTSUPP ((__force blk_status_t)1)
61 #define BLK_STS_TIMEOUT ((__force blk_status_t)2)
62 #define BLK_STS_NOSPC ((__force blk_status_t)3)
63 #define BLK_STS_TRANSPORT ((__force blk_status_t)4)
64 #define BLK_STS_TARGET ((__force blk_status_t)5)
65 #define BLK_STS_NEXUS ((__force blk_status_t)6)
66 #define BLK_STS_MEDIUM ((__force blk_status_t)7)
67 #define BLK_STS_PROTECTION ((__force blk_status_t)8)
68 #define BLK_STS_RESOURCE ((__force blk_status_t)9)
69 #define BLK_STS_IOERR ((__force blk_status_t)10)
71 /* hack for device mapper, don't use elsewhere: */
72 #define BLK_STS_DM_REQUEUE ((__force blk_status_t)11)
74 #define BLK_STS_AGAIN ((__force blk_status_t)12)
77 * BLK_STS_DEV_RESOURCE is returned from the driver to the block layer if
78 * device related resources are unavailable, but the driver can guarantee
79 * that the queue will be rerun in the future once resources become
80 * available again. This is typically the case for device specific
81 * resources that are consumed for IO. If the driver fails allocating these
82 * resources, we know that inflight (or pending) IO will free these
83 * resource upon completion.
85 * This is different from BLK_STS_RESOURCE in that it explicitly references
86 * a device specific resource. For resources of wider scope, allocation
87 * failure can happen without having pending IO. This means that we can't
88 * rely on request completions freeing these resources, as IO may not be in
89 * flight. Examples of that are kernel memory allocations, DMA mappings, or
90 * any other system wide resources.
92 #define BLK_STS_DEV_RESOURCE ((__force blk_status_t)13)
95 * BLK_STS_ZONE_RESOURCE is returned from the driver to the block layer if zone
96 * related resources are unavailable, but the driver can guarantee the queue
97 * will be rerun in the future once the resources become available again.
99 * This is different from BLK_STS_DEV_RESOURCE in that it explicitly references
100 * a zone specific resource and IO to a different zone on the same device could
101 * still be served. Examples of that are zones that are write-locked, but a read
102 * to the same zone could be served.
104 #define BLK_STS_ZONE_RESOURCE ((__force blk_status_t)14)
107 * blk_path_error - returns true if error may be path related
108 * @error: status the request was completed with
111 * This classifies block error status into non-retryable errors and ones
112 * that may be successful if retried on a failover path.
115 * %false - retrying failover path will not help
116 * %true - may succeed if retried
118 static inline bool blk_path_error(blk_status_t error)
121 case BLK_STS_NOTSUPP:
126 case BLK_STS_PROTECTION:
130 /* Anything else could be a path failure, so should be retried */
135 * From most significant bit:
136 * 1 bit: reserved for other usage, see below
137 * 12 bits: original size of bio
138 * 51 bits: issue time of bio
140 #define BIO_ISSUE_RES_BITS 1
141 #define BIO_ISSUE_SIZE_BITS 12
142 #define BIO_ISSUE_RES_SHIFT (64 - BIO_ISSUE_RES_BITS)
143 #define BIO_ISSUE_SIZE_SHIFT (BIO_ISSUE_RES_SHIFT - BIO_ISSUE_SIZE_BITS)
144 #define BIO_ISSUE_TIME_MASK ((1ULL << BIO_ISSUE_SIZE_SHIFT) - 1)
145 #define BIO_ISSUE_SIZE_MASK \
146 (((1ULL << BIO_ISSUE_SIZE_BITS) - 1) << BIO_ISSUE_SIZE_SHIFT)
147 #define BIO_ISSUE_RES_MASK (~((1ULL << BIO_ISSUE_RES_SHIFT) - 1))
149 /* Reserved bit for blk-throtl */
150 #define BIO_ISSUE_THROTL_SKIP_LATENCY (1ULL << 63)
156 static inline u64 __bio_issue_time(u64 time)
158 return time & BIO_ISSUE_TIME_MASK;
161 static inline u64 bio_issue_time(struct bio_issue *issue)
163 return __bio_issue_time(issue->value);
166 static inline sector_t bio_issue_size(struct bio_issue *issue)
168 return ((issue->value & BIO_ISSUE_SIZE_MASK) >> BIO_ISSUE_SIZE_SHIFT);
171 static inline void bio_issue_init(struct bio_issue *issue,
174 size &= (1ULL << BIO_ISSUE_SIZE_BITS) - 1;
175 issue->value = ((issue->value & BIO_ISSUE_RES_MASK) |
176 (ktime_get_ns() & BIO_ISSUE_TIME_MASK) |
177 ((u64)size << BIO_ISSUE_SIZE_SHIFT));
181 * main unit of I/O for the block layer and lower layers (ie drivers and
185 struct bio *bi_next; /* request queue link */
186 struct gendisk *bi_disk;
187 unsigned int bi_opf; /* bottom bits req flags,
188 * top bits REQ_OP. Use
191 unsigned short bi_flags; /* status, etc and bvec pool number */
192 unsigned short bi_ioprio;
193 unsigned short bi_write_hint;
194 blk_status_t bi_status;
196 atomic_t __bi_remaining;
198 struct bvec_iter bi_iter;
200 bio_end_io_t *bi_end_io;
203 #ifdef CONFIG_BLK_CGROUP
205 * Represents the association of the css and request_queue for the bio.
206 * If a bio goes direct to device, it will not have a blkg as it will
207 * not have a request_queue associated with it. The reference is put
208 * on release of the bio.
210 struct blkcg_gq *bi_blkg;
211 struct bio_issue bi_issue;
212 #ifdef CONFIG_BLK_CGROUP_IOCOST
217 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
218 struct bio_crypt_ctx *bi_crypt_context;
222 #if defined(CONFIG_BLK_DEV_INTEGRITY)
223 struct bio_integrity_payload *bi_integrity; /* data integrity */
227 unsigned short bi_vcnt; /* how many bio_vec's */
230 * Everything starting with bi_max_vecs will be preserved by bio_reset()
233 unsigned short bi_max_vecs; /* max bvl_vecs we can hold */
235 atomic_t __bi_cnt; /* pin count */
237 struct bio_vec *bi_io_vec; /* the actual vec list */
239 struct bio_set *bi_pool;
242 * We can inline a number of vecs at the end of the bio, to avoid
243 * double allocations for a small number of bio_vecs. This member
244 * MUST obviously be kept at the very end of the bio.
246 struct bio_vec bi_inline_vecs[];
249 #define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs)
255 BIO_NO_PAGE_REF, /* don't put release vec pages */
256 BIO_CLONED, /* doesn't own data */
257 BIO_BOUNCED, /* bio is a bounce bio */
258 BIO_USER_MAPPED, /* contains user pages */
259 BIO_NULL_MAPPED, /* contains invalid user pages */
260 BIO_WORKINGSET, /* contains userspace workingset pages */
261 BIO_QUIET, /* Make BIO Quiet */
262 BIO_CHAIN, /* chained bio, ->bi_remaining in effect */
263 BIO_REFFED, /* bio has elevated ->bi_cnt */
264 BIO_THROTTLED, /* This bio has already been subjected to
265 * throttling rules. Don't do it again. */
266 BIO_TRACE_COMPLETION, /* bio_endio() should trace the final completion
268 BIO_CGROUP_ACCT, /* has been accounted to a cgroup */
269 BIO_TRACKED, /* set if bio goes through the rq_qos path */
273 /* See BVEC_POOL_OFFSET below before adding new flags */
276 * We support 6 different bvec pools, the last one is magic in that it
277 * is backed by a mempool.
279 #define BVEC_POOL_NR 6
280 #define BVEC_POOL_MAX (BVEC_POOL_NR - 1)
283 * Top 3 bits of bio flags indicate the pool the bvecs came from. We add
284 * 1 to the actual index so that 0 indicates that there are no bvecs to be
287 #define BVEC_POOL_BITS (3)
288 #define BVEC_POOL_OFFSET (16 - BVEC_POOL_BITS)
289 #define BVEC_POOL_IDX(bio) ((bio)->bi_flags >> BVEC_POOL_OFFSET)
290 #if (1<< BVEC_POOL_BITS) < (BVEC_POOL_NR+1)
291 # error "BVEC_POOL_BITS is too small"
295 * Flags starting here get preserved by bio_reset() - this includes
296 * only BVEC_POOL_IDX()
298 #define BIO_RESET_BITS BVEC_POOL_OFFSET
300 typedef __u32 __bitwise blk_mq_req_flags_t;
303 * Operations and flags common to the bio and request structures.
304 * We use 8 bits for encoding the operation, and the remaining 24 for flags.
306 * The least significant bit of the operation number indicates the data
307 * transfer direction:
309 * - if the least significant bit is set transfers are TO the device
310 * - if the least significant bit is not set transfers are FROM the device
312 * If a operation does not transfer data the least significant bit has no
315 #define REQ_OP_BITS 8
316 #define REQ_OP_MASK ((1 << REQ_OP_BITS) - 1)
317 #define REQ_FLAG_BITS 24
320 /* read sectors from the device */
322 /* write sectors to the device */
324 /* flush the volatile write cache */
326 /* discard sectors */
328 /* securely erase sectors */
329 REQ_OP_SECURE_ERASE = 5,
330 /* write the same sector many times */
331 REQ_OP_WRITE_SAME = 7,
332 /* write the zero filled sector many times */
333 REQ_OP_WRITE_ZEROES = 9,
335 REQ_OP_ZONE_OPEN = 10,
337 REQ_OP_ZONE_CLOSE = 11,
338 /* Transition a zone to full */
339 REQ_OP_ZONE_FINISH = 12,
340 /* write data at the current zone write pointer */
341 REQ_OP_ZONE_APPEND = 13,
342 /* reset a zone write pointer */
343 REQ_OP_ZONE_RESET = 15,
344 /* reset all the zone present on the device */
345 REQ_OP_ZONE_RESET_ALL = 17,
347 /* SCSI passthrough using struct scsi_request */
349 REQ_OP_SCSI_OUT = 33,
350 /* Driver private requests */
358 __REQ_FAILFAST_DEV = /* no driver retries of device errors */
360 __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
361 __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */
362 __REQ_SYNC, /* request is sync (sync write or read) */
363 __REQ_META, /* metadata io request */
364 __REQ_PRIO, /* boost priority in cfq */
365 __REQ_NOMERGE, /* don't touch this for merging */
366 __REQ_IDLE, /* anticipate more IO after this one */
367 __REQ_INTEGRITY, /* I/O includes block integrity payload */
368 __REQ_FUA, /* forced unit access */
369 __REQ_PREFLUSH, /* request for cache flush */
370 __REQ_RAHEAD, /* read ahead, can fail anytime */
371 __REQ_BACKGROUND, /* background IO */
372 __REQ_NOWAIT, /* Don't wait if request will block */
374 * When a shared kthread needs to issue a bio for a cgroup, doing
375 * so synchronously can lead to priority inversions as the kthread
376 * can be trapped waiting for that cgroup. CGROUP_PUNT flag makes
377 * submit_bio() punt the actual issuing to a dedicated per-blkcg
378 * work item to avoid such priority inversions.
382 /* command specific flags for REQ_OP_WRITE_ZEROES: */
383 __REQ_NOUNMAP, /* do not free blocks when zeroing */
389 __REQ_SWAP, /* swapping request. */
390 __REQ_NR_BITS, /* stops here */
393 #define REQ_FAILFAST_DEV (1ULL << __REQ_FAILFAST_DEV)
394 #define REQ_FAILFAST_TRANSPORT (1ULL << __REQ_FAILFAST_TRANSPORT)
395 #define REQ_FAILFAST_DRIVER (1ULL << __REQ_FAILFAST_DRIVER)
396 #define REQ_SYNC (1ULL << __REQ_SYNC)
397 #define REQ_META (1ULL << __REQ_META)
398 #define REQ_PRIO (1ULL << __REQ_PRIO)
399 #define REQ_NOMERGE (1ULL << __REQ_NOMERGE)
400 #define REQ_IDLE (1ULL << __REQ_IDLE)
401 #define REQ_INTEGRITY (1ULL << __REQ_INTEGRITY)
402 #define REQ_FUA (1ULL << __REQ_FUA)
403 #define REQ_PREFLUSH (1ULL << __REQ_PREFLUSH)
404 #define REQ_RAHEAD (1ULL << __REQ_RAHEAD)
405 #define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND)
406 #define REQ_NOWAIT (1ULL << __REQ_NOWAIT)
407 #define REQ_CGROUP_PUNT (1ULL << __REQ_CGROUP_PUNT)
409 #define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP)
410 #define REQ_HIPRI (1ULL << __REQ_HIPRI)
412 #define REQ_DRV (1ULL << __REQ_DRV)
413 #define REQ_SWAP (1ULL << __REQ_SWAP)
415 #define REQ_FAILFAST_MASK \
416 (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
418 #define REQ_NOMERGE_FLAGS \
419 (REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA)
430 #define bio_op(bio) \
431 ((bio)->bi_opf & REQ_OP_MASK)
432 #define req_op(req) \
433 ((req)->cmd_flags & REQ_OP_MASK)
435 /* obsolete, don't use in new code */
436 static inline void bio_set_op_attrs(struct bio *bio, unsigned op,
439 bio->bi_opf = op | op_flags;
442 static inline bool op_is_write(unsigned int op)
448 * Check if the bio or request is one that needs special treatment in the
449 * flush state machine.
451 static inline bool op_is_flush(unsigned int op)
453 return op & (REQ_FUA | REQ_PREFLUSH);
457 * Reads are always treated as synchronous, as are requests with the FUA or
458 * PREFLUSH flag. Other operations may be marked as synchronous using the
461 static inline bool op_is_sync(unsigned int op)
463 return (op & REQ_OP_MASK) == REQ_OP_READ ||
464 (op & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH));
467 static inline bool op_is_discard(unsigned int op)
469 return (op & REQ_OP_MASK) == REQ_OP_DISCARD;
473 * Check if a bio or request operation is a zone management operation, with
474 * the exception of REQ_OP_ZONE_RESET_ALL which is treated as a special case
475 * due to its different handling in the block layer and device response in
476 * case of command failure.
478 static inline bool op_is_zone_mgmt(enum req_opf op)
480 switch (op & REQ_OP_MASK) {
481 case REQ_OP_ZONE_RESET:
482 case REQ_OP_ZONE_OPEN:
483 case REQ_OP_ZONE_CLOSE:
484 case REQ_OP_ZONE_FINISH:
491 static inline int op_stat_group(unsigned int op)
493 if (op_is_discard(op))
495 return op_is_write(op);
498 typedef unsigned int blk_qc_t;
499 #define BLK_QC_T_NONE -1U
500 #define BLK_QC_T_EAGAIN -2U
501 #define BLK_QC_T_SHIFT 16
502 #define BLK_QC_T_INTERNAL (1U << 31)
504 static inline bool blk_qc_t_valid(blk_qc_t cookie)
506 return cookie != BLK_QC_T_NONE && cookie != BLK_QC_T_EAGAIN;
509 static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie)
511 return (cookie & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT;
514 static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie)
516 return cookie & ((1u << BLK_QC_T_SHIFT) - 1);
519 static inline bool blk_qc_t_is_internal(blk_qc_t cookie)
521 return (cookie & BLK_QC_T_INTERNAL) != 0;
532 #endif /* __LINUX_BLK_TYPES_H */