1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Block data types and constants. Directly include this file only to
4 * break include dependency loop.
6 #ifndef __LINUX_BLK_TYPES_H
7 #define __LINUX_BLK_TYPES_H
9 #include <linux/types.h>
10 #include <linux/bvec.h>
14 struct bio_integrity_payload;
18 struct cgroup_subsys_state;
19 typedef void (bio_end_io_t) (struct bio *);
22 * Block error status values. See block/blk-core:blk_errors for the details.
24 typedef u8 __bitwise blk_status_t;
26 #define BLK_STS_NOTSUPP ((__force blk_status_t)1)
27 #define BLK_STS_TIMEOUT ((__force blk_status_t)2)
28 #define BLK_STS_NOSPC ((__force blk_status_t)3)
29 #define BLK_STS_TRANSPORT ((__force blk_status_t)4)
30 #define BLK_STS_TARGET ((__force blk_status_t)5)
31 #define BLK_STS_NEXUS ((__force blk_status_t)6)
32 #define BLK_STS_MEDIUM ((__force blk_status_t)7)
33 #define BLK_STS_PROTECTION ((__force blk_status_t)8)
34 #define BLK_STS_RESOURCE ((__force blk_status_t)9)
35 #define BLK_STS_IOERR ((__force blk_status_t)10)
37 /* hack for device mapper, don't use elsewhere: */
38 #define BLK_STS_DM_REQUEUE ((__force blk_status_t)11)
40 #define BLK_STS_AGAIN ((__force blk_status_t)12)
43 * blk_path_error - returns true if error may be path related
44 * @error: status the request was completed with
47 * This classifies block error status into non-retryable errors and ones
48 * that may be successful if retried on a failover path.
51 * %false - retrying failover path will not help
52 * %true - may succeed if retried
54 static inline bool blk_path_error(blk_status_t error)
62 case BLK_STS_PROTECTION:
66 /* Anything else could be a path failure, so should be retried */
70 struct blk_issue_stat {
75 * main unit of I/O for the block layer and lower layers (ie drivers and
79 struct bio *bi_next; /* request queue link */
80 struct gendisk *bi_disk;
81 unsigned int bi_opf; /* bottom bits req flags,
82 * top bits REQ_OP. Use
85 unsigned short bi_flags; /* status, etc and bvec pool number */
86 unsigned short bi_ioprio;
87 unsigned short bi_write_hint;
88 blk_status_t bi_status;
91 /* Number of segments in this BIO after
92 * physical address coalescing is performed.
94 unsigned int bi_phys_segments;
97 * To keep track of the max segment size, we account for the
98 * sizes of the first and last mergeable segments in this bio.
100 unsigned int bi_seg_front_size;
101 unsigned int bi_seg_back_size;
103 struct bvec_iter bi_iter;
105 atomic_t __bi_remaining;
106 bio_end_io_t *bi_end_io;
109 #ifdef CONFIG_BLK_CGROUP
111 * Optional ioc and css associated with this bio. Put on bio
112 * release. Read comment on top of bio_associate_current().
114 struct io_context *bi_ioc;
115 struct cgroup_subsys_state *bi_css;
116 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
118 struct blk_issue_stat bi_issue_stat;
122 #if defined(CONFIG_BLK_DEV_INTEGRITY)
123 struct bio_integrity_payload *bi_integrity; /* data integrity */
127 unsigned short bi_vcnt; /* how many bio_vec's */
130 * Everything starting with bi_max_vecs will be preserved by bio_reset()
133 unsigned short bi_max_vecs; /* max bvl_vecs we can hold */
135 atomic_t __bi_cnt; /* pin count */
137 struct bio_vec *bi_io_vec; /* the actual vec list */
139 struct bio_set *bi_pool;
142 * We can inline a number of vecs at the end of the bio, to avoid
143 * double allocations for a small number of bio_vecs. This member
144 * MUST obviously be kept at the very end of the bio.
146 struct bio_vec bi_inline_vecs[0];
149 #define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs)
154 #define BIO_SEG_VALID 1 /* bi_phys_segments valid */
155 #define BIO_CLONED 2 /* doesn't own data */
156 #define BIO_BOUNCED 3 /* bio is a bounce bio */
157 #define BIO_USER_MAPPED 4 /* contains user pages */
158 #define BIO_NULL_MAPPED 5 /* contains invalid user pages */
159 #define BIO_QUIET 6 /* Make BIO Quiet */
160 #define BIO_CHAIN 7 /* chained bio, ->bi_remaining in effect */
161 #define BIO_REFFED 8 /* bio has elevated ->bi_cnt */
162 #define BIO_THROTTLED 9 /* This bio has already been subjected to
163 * throttling rules. Don't do it again. */
164 #define BIO_TRACE_COMPLETION 10 /* bio_endio() should trace the final completion
166 /* See BVEC_POOL_OFFSET below before adding new flags */
169 * We support 6 different bvec pools, the last one is magic in that it
170 * is backed by a mempool.
172 #define BVEC_POOL_NR 6
173 #define BVEC_POOL_MAX (BVEC_POOL_NR - 1)
176 * Top 3 bits of bio flags indicate the pool the bvecs came from. We add
177 * 1 to the actual index so that 0 indicates that there are no bvecs to be
180 #define BVEC_POOL_BITS (3)
181 #define BVEC_POOL_OFFSET (16 - BVEC_POOL_BITS)
182 #define BVEC_POOL_IDX(bio) ((bio)->bi_flags >> BVEC_POOL_OFFSET)
183 #if (1<< BVEC_POOL_BITS) < (BVEC_POOL_NR+1)
184 # error "BVEC_POOL_BITS is too small"
188 * Flags starting here get preserved by bio_reset() - this includes
189 * only BVEC_POOL_IDX()
191 #define BIO_RESET_BITS BVEC_POOL_OFFSET
193 typedef __u32 __bitwise blk_mq_req_flags_t;
196 * Operations and flags common to the bio and request structures.
197 * We use 8 bits for encoding the operation, and the remaining 24 for flags.
199 * The least significant bit of the operation number indicates the data
200 * transfer direction:
202 * - if the least significant bit is set transfers are TO the device
203 * - if the least significant bit is not set transfers are FROM the device
205 * If a operation does not transfer data the least significant bit has no
208 #define REQ_OP_BITS 8
209 #define REQ_OP_MASK ((1 << REQ_OP_BITS) - 1)
210 #define REQ_FLAG_BITS 24
213 /* read sectors from the device */
215 /* write sectors to the device */
217 /* flush the volatile write cache */
219 /* discard sectors */
221 /* get zone information */
222 REQ_OP_ZONE_REPORT = 4,
223 /* securely erase sectors */
224 REQ_OP_SECURE_ERASE = 5,
225 /* seset a zone write pointer */
226 REQ_OP_ZONE_RESET = 6,
227 /* write the same sector many times */
228 REQ_OP_WRITE_SAME = 7,
229 /* write the zero filled sector many times */
230 REQ_OP_WRITE_ZEROES = 9,
232 /* SCSI passthrough using struct scsi_request */
234 REQ_OP_SCSI_OUT = 33,
235 /* Driver private requests */
243 __REQ_FAILFAST_DEV = /* no driver retries of device errors */
245 __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
246 __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */
247 __REQ_SYNC, /* request is sync (sync write or read) */
248 __REQ_META, /* metadata io request */
249 __REQ_PRIO, /* boost priority in cfq */
250 __REQ_NOMERGE, /* don't touch this for merging */
251 __REQ_IDLE, /* anticipate more IO after this one */
252 __REQ_INTEGRITY, /* I/O includes block integrity payload */
253 __REQ_FUA, /* forced unit access */
254 __REQ_PREFLUSH, /* request for cache flush */
255 __REQ_RAHEAD, /* read ahead, can fail anytime */
256 __REQ_BACKGROUND, /* background IO */
257 __REQ_NOWAIT, /* Don't wait if request will block */
259 /* command specific flags for REQ_OP_WRITE_ZEROES: */
260 __REQ_NOUNMAP, /* do not free blocks when zeroing */
265 __REQ_NR_BITS, /* stops here */
268 #define REQ_FAILFAST_DEV (1ULL << __REQ_FAILFAST_DEV)
269 #define REQ_FAILFAST_TRANSPORT (1ULL << __REQ_FAILFAST_TRANSPORT)
270 #define REQ_FAILFAST_DRIVER (1ULL << __REQ_FAILFAST_DRIVER)
271 #define REQ_SYNC (1ULL << __REQ_SYNC)
272 #define REQ_META (1ULL << __REQ_META)
273 #define REQ_PRIO (1ULL << __REQ_PRIO)
274 #define REQ_NOMERGE (1ULL << __REQ_NOMERGE)
275 #define REQ_IDLE (1ULL << __REQ_IDLE)
276 #define REQ_INTEGRITY (1ULL << __REQ_INTEGRITY)
277 #define REQ_FUA (1ULL << __REQ_FUA)
278 #define REQ_PREFLUSH (1ULL << __REQ_PREFLUSH)
279 #define REQ_RAHEAD (1ULL << __REQ_RAHEAD)
280 #define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND)
281 #define REQ_NOWAIT (1ULL << __REQ_NOWAIT)
283 #define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP)
285 #define REQ_DRV (1ULL << __REQ_DRV)
287 #define REQ_FAILFAST_MASK \
288 (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
290 #define REQ_NOMERGE_FLAGS \
291 (REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA)
293 #define bio_op(bio) \
294 ((bio)->bi_opf & REQ_OP_MASK)
295 #define req_op(req) \
296 ((req)->cmd_flags & REQ_OP_MASK)
298 /* obsolete, don't use in new code */
299 static inline void bio_set_op_attrs(struct bio *bio, unsigned op,
302 bio->bi_opf = op | op_flags;
305 static inline bool op_is_write(unsigned int op)
311 * Check if the bio or request is one that needs special treatment in the
312 * flush state machine.
314 static inline bool op_is_flush(unsigned int op)
316 return op & (REQ_FUA | REQ_PREFLUSH);
320 * Reads are always treated as synchronous, as are requests with the FUA or
321 * PREFLUSH flag. Other operations may be marked as synchronous using the
324 static inline bool op_is_sync(unsigned int op)
326 return (op & REQ_OP_MASK) == REQ_OP_READ ||
327 (op & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH));
330 typedef unsigned int blk_qc_t;
331 #define BLK_QC_T_NONE -1U
332 #define BLK_QC_T_SHIFT 16
333 #define BLK_QC_T_INTERNAL (1U << 31)
335 static inline bool blk_qc_t_valid(blk_qc_t cookie)
337 return cookie != BLK_QC_T_NONE;
340 static inline blk_qc_t blk_tag_to_qc_t(unsigned int tag, unsigned int queue_num,
343 blk_qc_t ret = tag | (queue_num << BLK_QC_T_SHIFT);
346 ret |= BLK_QC_T_INTERNAL;
351 static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie)
353 return (cookie & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT;
356 static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie)
358 return cookie & ((1u << BLK_QC_T_SHIFT) - 1);
361 static inline bool blk_qc_t_is_internal(blk_qc_t cookie)
363 return (cookie & BLK_QC_T_INTERNAL) != 0;
374 #endif /* __LINUX_BLK_TYPES_H */