1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 1991, 1992 Linus Torvalds
4 * Copyright (C) 1994, Karl Keyte: Added support for disk statistics
5 * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
6 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
7 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
9 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
13 * This handles all read/write requests to block devices
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/bio.h>
18 #include <linux/blkdev.h>
19 #include <linux/blk-pm.h>
20 #include <linux/blk-integrity.h>
21 #include <linux/highmem.h>
23 #include <linux/pagemap.h>
24 #include <linux/kernel_stat.h>
25 #include <linux/string.h>
26 #include <linux/init.h>
27 #include <linux/completion.h>
28 #include <linux/slab.h>
29 #include <linux/swap.h>
30 #include <linux/writeback.h>
31 #include <linux/task_io_accounting_ops.h>
32 #include <linux/fault-inject.h>
33 #include <linux/list_sort.h>
34 #include <linux/delay.h>
35 #include <linux/ratelimit.h>
36 #include <linux/pm_runtime.h>
37 #include <linux/t10-pi.h>
38 #include <linux/debugfs.h>
39 #include <linux/bpf.h>
40 #include <linux/psi.h>
41 #include <linux/part_stat.h>
42 #include <linux/sched/sysctl.h>
43 #include <linux/blk-crypto.h>
45 #define CREATE_TRACE_POINTS
46 #include <trace/events/block.h>
49 #include "blk-mq-sched.h"
51 #include "blk-cgroup.h"
52 #include "blk-throttle.h"
54 struct dentry *blk_debugfs_root;
56 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
57 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
58 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
59 EXPORT_TRACEPOINT_SYMBOL_GPL(block_split);
60 EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
61 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_insert);
63 DEFINE_IDA(blk_queue_ida);
66 * For queue allocation
68 struct kmem_cache *blk_requestq_cachep;
69 struct kmem_cache *blk_requestq_srcu_cachep;
72 * Controlling structure to kblockd
74 static struct workqueue_struct *kblockd_workqueue;
77 * blk_queue_flag_set - atomically set a queue flag
78 * @flag: flag to be set
81 void blk_queue_flag_set(unsigned int flag, struct request_queue *q)
83 set_bit(flag, &q->queue_flags);
85 EXPORT_SYMBOL(blk_queue_flag_set);
88 * blk_queue_flag_clear - atomically clear a queue flag
89 * @flag: flag to be cleared
92 void blk_queue_flag_clear(unsigned int flag, struct request_queue *q)
94 clear_bit(flag, &q->queue_flags);
96 EXPORT_SYMBOL(blk_queue_flag_clear);
99 * blk_queue_flag_test_and_set - atomically test and set a queue flag
100 * @flag: flag to be set
103 * Returns the previous value of @flag - 0 if the flag was not set and 1 if
104 * the flag was already set.
106 bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q)
108 return test_and_set_bit(flag, &q->queue_flags);
110 EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_set);
112 #define REQ_OP_NAME(name) [REQ_OP_##name] = #name
113 static const char *const blk_op_name[] = {
117 REQ_OP_NAME(DISCARD),
118 REQ_OP_NAME(SECURE_ERASE),
119 REQ_OP_NAME(ZONE_RESET),
120 REQ_OP_NAME(ZONE_RESET_ALL),
121 REQ_OP_NAME(ZONE_OPEN),
122 REQ_OP_NAME(ZONE_CLOSE),
123 REQ_OP_NAME(ZONE_FINISH),
124 REQ_OP_NAME(ZONE_APPEND),
125 REQ_OP_NAME(WRITE_SAME),
126 REQ_OP_NAME(WRITE_ZEROES),
128 REQ_OP_NAME(DRV_OUT),
133 * blk_op_str - Return string XXX in the REQ_OP_XXX.
136 * Description: Centralize block layer function to convert REQ_OP_XXX into
137 * string format. Useful in the debugging and tracing bio or request. For
138 * invalid REQ_OP_XXX it returns string "UNKNOWN".
140 inline const char *blk_op_str(unsigned int op)
142 const char *op_str = "UNKNOWN";
144 if (op < ARRAY_SIZE(blk_op_name) && blk_op_name[op])
145 op_str = blk_op_name[op];
149 EXPORT_SYMBOL_GPL(blk_op_str);
151 static const struct {
155 [BLK_STS_OK] = { 0, "" },
156 [BLK_STS_NOTSUPP] = { -EOPNOTSUPP, "operation not supported" },
157 [BLK_STS_TIMEOUT] = { -ETIMEDOUT, "timeout" },
158 [BLK_STS_NOSPC] = { -ENOSPC, "critical space allocation" },
159 [BLK_STS_TRANSPORT] = { -ENOLINK, "recoverable transport" },
160 [BLK_STS_TARGET] = { -EREMOTEIO, "critical target" },
161 [BLK_STS_NEXUS] = { -EBADE, "critical nexus" },
162 [BLK_STS_MEDIUM] = { -ENODATA, "critical medium" },
163 [BLK_STS_PROTECTION] = { -EILSEQ, "protection" },
164 [BLK_STS_RESOURCE] = { -ENOMEM, "kernel resource" },
165 [BLK_STS_DEV_RESOURCE] = { -EBUSY, "device resource" },
166 [BLK_STS_AGAIN] = { -EAGAIN, "nonblocking retry" },
167 [BLK_STS_OFFLINE] = { -ENODEV, "device offline" },
169 /* device mapper special case, should not leak out: */
170 [BLK_STS_DM_REQUEUE] = { -EREMCHG, "dm internal retry" },
172 /* zone device specific errors */
173 [BLK_STS_ZONE_OPEN_RESOURCE] = { -ETOOMANYREFS, "open zones exceeded" },
174 [BLK_STS_ZONE_ACTIVE_RESOURCE] = { -EOVERFLOW, "active zones exceeded" },
176 /* everything else not covered above: */
177 [BLK_STS_IOERR] = { -EIO, "I/O" },
180 blk_status_t errno_to_blk_status(int errno)
184 for (i = 0; i < ARRAY_SIZE(blk_errors); i++) {
185 if (blk_errors[i].errno == errno)
186 return (__force blk_status_t)i;
189 return BLK_STS_IOERR;
191 EXPORT_SYMBOL_GPL(errno_to_blk_status);
193 int blk_status_to_errno(blk_status_t status)
195 int idx = (__force int)status;
197 if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
199 return blk_errors[idx].errno;
201 EXPORT_SYMBOL_GPL(blk_status_to_errno);
203 const char *blk_status_to_str(blk_status_t status)
205 int idx = (__force int)status;
207 if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
209 return blk_errors[idx].name;
213 * blk_sync_queue - cancel any pending callbacks on a queue
217 * The block layer may perform asynchronous callback activity
218 * on a queue, such as calling the unplug function after a timeout.
219 * A block device may call blk_sync_queue to ensure that any
220 * such activity is cancelled, thus allowing it to release resources
221 * that the callbacks might use. The caller must already have made sure
222 * that its ->submit_bio will not re-add plugging prior to calling
225 * This function does not cancel any asynchronous activity arising
226 * out of elevator or throttling code. That would require elevator_exit()
227 * and blkcg_exit_queue() to be called with queue lock initialized.
230 void blk_sync_queue(struct request_queue *q)
232 del_timer_sync(&q->timeout);
233 cancel_work_sync(&q->timeout_work);
235 EXPORT_SYMBOL(blk_sync_queue);
238 * blk_set_pm_only - increment pm_only counter
239 * @q: request queue pointer
241 void blk_set_pm_only(struct request_queue *q)
243 atomic_inc(&q->pm_only);
245 EXPORT_SYMBOL_GPL(blk_set_pm_only);
247 void blk_clear_pm_only(struct request_queue *q)
251 pm_only = atomic_dec_return(&q->pm_only);
252 WARN_ON_ONCE(pm_only < 0);
254 wake_up_all(&q->mq_freeze_wq);
256 EXPORT_SYMBOL_GPL(blk_clear_pm_only);
259 * blk_put_queue - decrement the request_queue refcount
260 * @q: the request_queue structure to decrement the refcount for
262 * Decrements the refcount of the request_queue kobject. When this reaches 0
263 * we'll have blk_release_queue() called.
265 * Context: Any context, but the last reference must not be dropped from
268 void blk_put_queue(struct request_queue *q)
270 kobject_put(&q->kobj);
272 EXPORT_SYMBOL(blk_put_queue);
274 void blk_queue_start_drain(struct request_queue *q)
277 * When queue DYING flag is set, we need to block new req
278 * entering queue, so we call blk_freeze_queue_start() to
279 * prevent I/O from crossing blk_queue_enter().
281 blk_freeze_queue_start(q);
283 blk_mq_wake_waiters(q);
284 /* Make blk_queue_enter() reexamine the DYING flag. */
285 wake_up_all(&q->mq_freeze_wq);
288 void blk_set_queue_dying(struct request_queue *q)
290 blk_queue_flag_set(QUEUE_FLAG_DYING, q);
291 blk_queue_start_drain(q);
293 EXPORT_SYMBOL_GPL(blk_set_queue_dying);
296 * blk_cleanup_queue - shutdown a request queue
297 * @q: request queue to shutdown
299 * Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and
300 * put it. All future requests will be failed immediately with -ENODEV.
304 void blk_cleanup_queue(struct request_queue *q)
306 /* cannot be called from atomic context */
309 WARN_ON_ONCE(blk_queue_registered(q));
311 /* mark @q DYING, no new request or merges will be allowed afterwards */
312 blk_set_queue_dying(q);
314 blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
315 blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
318 * Drain all requests queued before DYING marking. Set DEAD flag to
319 * prevent that blk_mq_run_hw_queues() accesses the hardware queues
320 * after draining finished.
324 blk_queue_flag_set(QUEUE_FLAG_DEAD, q);
327 if (queue_is_mq(q)) {
328 blk_mq_cancel_work_sync(q);
329 blk_mq_exit_queue(q);
333 * In theory, request pool of sched_tags belongs to request queue.
334 * However, the current implementation requires tag_set for freeing
335 * requests, so free the pool now.
337 * Queue has become frozen, there can't be any in-queue requests, so
338 * it is safe to free requests now.
340 mutex_lock(&q->sysfs_lock);
342 blk_mq_sched_free_rqs(q);
343 mutex_unlock(&q->sysfs_lock);
345 /* @q is and will stay empty, shutdown and put */
348 EXPORT_SYMBOL(blk_cleanup_queue);
351 * blk_queue_enter() - try to increase q->q_usage_counter
352 * @q: request queue pointer
353 * @flags: BLK_MQ_REQ_NOWAIT and/or BLK_MQ_REQ_PM
355 int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
357 const bool pm = flags & BLK_MQ_REQ_PM;
359 while (!blk_try_enter_queue(q, pm)) {
360 if (flags & BLK_MQ_REQ_NOWAIT)
364 * read pair of barrier in blk_freeze_queue_start(), we need to
365 * order reading __PERCPU_REF_DEAD flag of .q_usage_counter and
366 * reading .mq_freeze_depth or queue dying flag, otherwise the
367 * following wait may never return if the two reads are
371 wait_event(q->mq_freeze_wq,
372 (!q->mq_freeze_depth &&
373 blk_pm_resume_queue(pm, q)) ||
375 if (blk_queue_dying(q))
382 int __bio_queue_enter(struct request_queue *q, struct bio *bio)
384 while (!blk_try_enter_queue(q, false)) {
385 struct gendisk *disk = bio->bi_bdev->bd_disk;
387 if (bio->bi_opf & REQ_NOWAIT) {
388 if (test_bit(GD_DEAD, &disk->state))
390 bio_wouldblock_error(bio);
395 * read pair of barrier in blk_freeze_queue_start(), we need to
396 * order reading __PERCPU_REF_DEAD flag of .q_usage_counter and
397 * reading .mq_freeze_depth or queue dying flag, otherwise the
398 * following wait may never return if the two reads are
402 wait_event(q->mq_freeze_wq,
403 (!q->mq_freeze_depth &&
404 blk_pm_resume_queue(false, q)) ||
405 test_bit(GD_DEAD, &disk->state));
406 if (test_bit(GD_DEAD, &disk->state))
416 void blk_queue_exit(struct request_queue *q)
418 percpu_ref_put(&q->q_usage_counter);
421 static void blk_queue_usage_counter_release(struct percpu_ref *ref)
423 struct request_queue *q =
424 container_of(ref, struct request_queue, q_usage_counter);
426 wake_up_all(&q->mq_freeze_wq);
429 static void blk_rq_timed_out_timer(struct timer_list *t)
431 struct request_queue *q = from_timer(q, t, timeout);
433 kblockd_schedule_work(&q->timeout_work);
436 static void blk_timeout_work(struct work_struct *work)
440 struct request_queue *blk_alloc_queue(int node_id, bool alloc_srcu)
442 struct request_queue *q;
445 q = kmem_cache_alloc_node(blk_get_queue_kmem_cache(alloc_srcu),
446 GFP_KERNEL | __GFP_ZERO, node_id);
451 blk_queue_flag_set(QUEUE_FLAG_HAS_SRCU, q);
452 if (init_srcu_struct(q->srcu) != 0)
456 q->last_merge = NULL;
458 q->id = ida_simple_get(&blk_queue_ida, 0, 0, GFP_KERNEL);
462 ret = bioset_init(&q->bio_split, BIO_POOL_SIZE, 0, 0);
466 q->stats = blk_alloc_queue_stats();
472 atomic_set(&q->nr_active_requests_shared_tags, 0);
474 timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
475 INIT_WORK(&q->timeout_work, blk_timeout_work);
476 INIT_LIST_HEAD(&q->icq_list);
478 kobject_init(&q->kobj, &blk_queue_ktype);
480 mutex_init(&q->debugfs_mutex);
481 mutex_init(&q->sysfs_lock);
482 mutex_init(&q->sysfs_dir_lock);
483 spin_lock_init(&q->queue_lock);
485 init_waitqueue_head(&q->mq_freeze_wq);
486 mutex_init(&q->mq_freeze_lock);
489 * Init percpu_ref in atomic mode so that it's faster to shutdown.
490 * See blk_register_queue() for details.
492 if (percpu_ref_init(&q->q_usage_counter,
493 blk_queue_usage_counter_release,
494 PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
497 blk_queue_dma_alignment(q, 511);
498 blk_set_default_limits(&q->limits);
499 q->nr_requests = BLKDEV_DEFAULT_RQ;
504 blk_free_queue_stats(q->stats);
506 bioset_exit(&q->bio_split);
508 ida_simple_remove(&blk_queue_ida, q->id);
511 cleanup_srcu_struct(q->srcu);
513 kmem_cache_free(blk_get_queue_kmem_cache(alloc_srcu), q);
518 * blk_get_queue - increment the request_queue refcount
519 * @q: the request_queue structure to increment the refcount for
521 * Increment the refcount of the request_queue kobject.
523 * Context: Any context.
525 bool blk_get_queue(struct request_queue *q)
527 if (likely(!blk_queue_dying(q))) {
534 EXPORT_SYMBOL(blk_get_queue);
536 #ifdef CONFIG_FAIL_MAKE_REQUEST
538 static DECLARE_FAULT_ATTR(fail_make_request);
540 static int __init setup_fail_make_request(char *str)
542 return setup_fault_attr(&fail_make_request, str);
544 __setup("fail_make_request=", setup_fail_make_request);
546 bool should_fail_request(struct block_device *part, unsigned int bytes)
548 return part->bd_make_it_fail && should_fail(&fail_make_request, bytes);
551 static int __init fail_make_request_debugfs(void)
553 struct dentry *dir = fault_create_debugfs_attr("fail_make_request",
554 NULL, &fail_make_request);
556 return PTR_ERR_OR_ZERO(dir);
559 late_initcall(fail_make_request_debugfs);
560 #endif /* CONFIG_FAIL_MAKE_REQUEST */
562 static inline bool bio_check_ro(struct bio *bio)
564 if (op_is_write(bio_op(bio)) && bdev_read_only(bio->bi_bdev)) {
565 if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
567 pr_warn("Trying to write to read-only block-device %pg\n",
569 /* Older lvm-tools actually trigger this */
576 static noinline int should_fail_bio(struct bio *bio)
578 if (should_fail_request(bdev_whole(bio->bi_bdev), bio->bi_iter.bi_size))
582 ALLOW_ERROR_INJECTION(should_fail_bio, ERRNO);
585 * Check whether this bio extends beyond the end of the device or partition.
586 * This may well happen - the kernel calls bread() without checking the size of
587 * the device, e.g., when mounting a file system.
589 static inline int bio_check_eod(struct bio *bio)
591 sector_t maxsector = bdev_nr_sectors(bio->bi_bdev);
592 unsigned int nr_sectors = bio_sectors(bio);
594 if (nr_sectors && maxsector &&
595 (nr_sectors > maxsector ||
596 bio->bi_iter.bi_sector > maxsector - nr_sectors)) {
597 pr_info_ratelimited("%s: attempt to access beyond end of device\n"
598 "%pg: rw=%d, want=%llu, limit=%llu\n",
600 bio->bi_bdev, bio->bi_opf,
601 bio_end_sector(bio), maxsector);
608 * Remap block n of partition p to block n+start(p) of the disk.
610 static int blk_partition_remap(struct bio *bio)
612 struct block_device *p = bio->bi_bdev;
614 if (unlikely(should_fail_request(p, bio->bi_iter.bi_size)))
616 if (bio_sectors(bio)) {
617 bio->bi_iter.bi_sector += p->bd_start_sect;
618 trace_block_bio_remap(bio, p->bd_dev,
619 bio->bi_iter.bi_sector -
622 bio_set_flag(bio, BIO_REMAPPED);
627 * Check write append to a zoned block device.
629 static inline blk_status_t blk_check_zone_append(struct request_queue *q,
632 sector_t pos = bio->bi_iter.bi_sector;
633 int nr_sectors = bio_sectors(bio);
635 /* Only applicable to zoned block devices */
636 if (!blk_queue_is_zoned(q))
637 return BLK_STS_NOTSUPP;
639 /* The bio sector must point to the start of a sequential zone */
640 if (pos & (blk_queue_zone_sectors(q) - 1) ||
641 !blk_queue_zone_is_seq(q, pos))
642 return BLK_STS_IOERR;
645 * Not allowed to cross zone boundaries. Otherwise, the BIO will be
646 * split and could result in non-contiguous sectors being written in
649 if (nr_sectors > q->limits.chunk_sectors)
650 return BLK_STS_IOERR;
652 /* Make sure the BIO is small enough and will not get split */
653 if (nr_sectors > q->limits.max_zone_append_sectors)
654 return BLK_STS_IOERR;
656 bio->bi_opf |= REQ_NOMERGE;
661 static void __submit_bio(struct bio *bio)
663 struct gendisk *disk = bio->bi_bdev->bd_disk;
665 if (unlikely(!blk_crypto_bio_prep(&bio)))
668 if (!disk->fops->submit_bio) {
669 blk_mq_submit_bio(bio);
670 } else if (likely(bio_queue_enter(bio) == 0)) {
671 disk->fops->submit_bio(bio);
672 blk_queue_exit(disk->queue);
677 * The loop in this function may be a bit non-obvious, and so deserves some
680 * - Before entering the loop, bio->bi_next is NULL (as all callers ensure
681 * that), so we have a list with a single bio.
682 * - We pretend that we have just taken it off a longer list, so we assign
683 * bio_list to a pointer to the bio_list_on_stack, thus initialising the
684 * bio_list of new bios to be added. ->submit_bio() may indeed add some more
685 * bios through a recursive call to submit_bio_noacct. If it did, we find a
686 * non-NULL value in bio_list and re-enter the loop from the top.
687 * - In this case we really did just take the bio of the top of the list (no
688 * pretending) and so remove it from bio_list, and call into ->submit_bio()
691 * bio_list_on_stack[0] contains bios submitted by the current ->submit_bio.
692 * bio_list_on_stack[1] contains bios that were submitted before the current
693 * ->submit_bio_bio, but that haven't been processed yet.
695 static void __submit_bio_noacct(struct bio *bio)
697 struct bio_list bio_list_on_stack[2];
699 BUG_ON(bio->bi_next);
701 bio_list_init(&bio_list_on_stack[0]);
702 current->bio_list = bio_list_on_stack;
705 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
706 struct bio_list lower, same;
709 * Create a fresh bio_list for all subordinate requests.
711 bio_list_on_stack[1] = bio_list_on_stack[0];
712 bio_list_init(&bio_list_on_stack[0]);
717 * Sort new bios into those for a lower level and those for the
720 bio_list_init(&lower);
721 bio_list_init(&same);
722 while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL)
723 if (q == bdev_get_queue(bio->bi_bdev))
724 bio_list_add(&same, bio);
726 bio_list_add(&lower, bio);
729 * Now assemble so we handle the lowest level first.
731 bio_list_merge(&bio_list_on_stack[0], &lower);
732 bio_list_merge(&bio_list_on_stack[0], &same);
733 bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]);
734 } while ((bio = bio_list_pop(&bio_list_on_stack[0])));
736 current->bio_list = NULL;
739 static void __submit_bio_noacct_mq(struct bio *bio)
741 struct bio_list bio_list[2] = { };
743 current->bio_list = bio_list;
747 } while ((bio = bio_list_pop(&bio_list[0])));
749 current->bio_list = NULL;
752 void submit_bio_noacct_nocheck(struct bio *bio)
755 * We only want one ->submit_bio to be active at a time, else stack
756 * usage with stacked devices could be a problem. Use current->bio_list
757 * to collect a list of requests submited by a ->submit_bio method while
758 * it is active, and then process them after it returned.
760 if (current->bio_list)
761 bio_list_add(¤t->bio_list[0], bio);
762 else if (!bio->bi_bdev->bd_disk->fops->submit_bio)
763 __submit_bio_noacct_mq(bio);
765 __submit_bio_noacct(bio);
769 * submit_bio_noacct - re-submit a bio to the block device layer for I/O
770 * @bio: The bio describing the location in memory and on the device.
772 * This is a version of submit_bio() that shall only be used for I/O that is
773 * resubmitted to lower level drivers by stacking block drivers. All file
774 * systems and other upper level users of the block layer should use
775 * submit_bio() instead.
777 void submit_bio_noacct(struct bio *bio)
779 struct block_device *bdev = bio->bi_bdev;
780 struct request_queue *q = bdev_get_queue(bdev);
781 blk_status_t status = BLK_STS_IOERR;
782 struct blk_plug *plug;
786 plug = blk_mq_plug(q, bio);
787 if (plug && plug->nowait)
788 bio->bi_opf |= REQ_NOWAIT;
791 * For a REQ_NOWAIT based request, return -EOPNOTSUPP
792 * if queue does not support NOWAIT.
794 if ((bio->bi_opf & REQ_NOWAIT) && !blk_queue_nowait(q))
797 if (should_fail_bio(bio))
799 if (unlikely(bio_check_ro(bio)))
801 if (!bio_flagged(bio, BIO_REMAPPED)) {
802 if (unlikely(bio_check_eod(bio)))
804 if (bdev->bd_partno && unlikely(blk_partition_remap(bio)))
809 * Filter flush bio's early so that bio based drivers without flush
810 * support don't have to worry about them.
812 if (op_is_flush(bio->bi_opf) &&
813 !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
814 bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA);
815 if (!bio_sectors(bio)) {
821 if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
822 bio_clear_polled(bio);
824 switch (bio_op(bio)) {
826 if (!blk_queue_discard(q))
829 case REQ_OP_SECURE_ERASE:
830 if (!blk_queue_secure_erase(q))
833 case REQ_OP_WRITE_SAME:
834 if (!q->limits.max_write_same_sectors)
837 case REQ_OP_ZONE_APPEND:
838 status = blk_check_zone_append(q, bio);
839 if (status != BLK_STS_OK)
842 case REQ_OP_ZONE_RESET:
843 case REQ_OP_ZONE_OPEN:
844 case REQ_OP_ZONE_CLOSE:
845 case REQ_OP_ZONE_FINISH:
846 if (!blk_queue_is_zoned(q))
849 case REQ_OP_ZONE_RESET_ALL:
850 if (!blk_queue_is_zoned(q) || !blk_queue_zone_resetall(q))
853 case REQ_OP_WRITE_ZEROES:
854 if (!q->limits.max_write_zeroes_sectors)
861 if (blk_throtl_bio(bio))
864 blk_cgroup_bio_start(bio);
865 blkcg_bio_issue_init(bio);
867 if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) {
868 trace_block_bio_queue(bio);
869 /* Now that enqueuing has been traced, we need to trace
870 * completion as well.
872 bio_set_flag(bio, BIO_TRACE_COMPLETION);
874 submit_bio_noacct_nocheck(bio);
878 status = BLK_STS_NOTSUPP;
880 bio->bi_status = status;
883 EXPORT_SYMBOL(submit_bio_noacct);
886 * submit_bio - submit a bio to the block device layer for I/O
887 * @bio: The &struct bio which describes the I/O
889 * submit_bio() is used to submit I/O requests to block devices. It is passed a
890 * fully set up &struct bio that describes the I/O that needs to be done. The
891 * bio will be send to the device described by the bi_bdev field.
893 * The success/failure status of the request, along with notification of
894 * completion, is delivered asynchronously through the ->bi_end_io() callback
895 * in @bio. The bio must NOT be touched by thecaller until ->bi_end_io() has
898 void submit_bio(struct bio *bio)
900 if (blkcg_punt_bio_submit(bio))
904 * If it's a regular read/write or a barrier with data attached,
905 * go through the normal accounting stuff before submission.
907 if (bio_has_data(bio)) {
910 if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME))
911 count = queue_logical_block_size(
912 bdev_get_queue(bio->bi_bdev)) >> 9;
914 count = bio_sectors(bio);
916 if (op_is_write(bio_op(bio))) {
917 count_vm_events(PGPGOUT, count);
919 task_io_account_read(bio->bi_iter.bi_size);
920 count_vm_events(PGPGIN, count);
925 * If we're reading data that is part of the userspace workingset, count
926 * submission time as memory stall. When the device is congested, or
927 * the submitting cgroup IO-throttled, submission can be a significant
928 * part of overall IO time.
930 if (unlikely(bio_op(bio) == REQ_OP_READ &&
931 bio_flagged(bio, BIO_WORKINGSET))) {
932 unsigned long pflags;
934 psi_memstall_enter(&pflags);
935 submit_bio_noacct(bio);
936 psi_memstall_leave(&pflags);
940 submit_bio_noacct(bio);
942 EXPORT_SYMBOL(submit_bio);
945 * bio_poll - poll for BIO completions
946 * @bio: bio to poll for
947 * @iob: batches of IO
948 * @flags: BLK_POLL_* flags that control the behavior
950 * Poll for completions on queue associated with the bio. Returns number of
951 * completed entries found.
953 * Note: the caller must either be the context that submitted @bio, or
954 * be in a RCU critical section to prevent freeing of @bio.
956 int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags)
958 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
959 blk_qc_t cookie = READ_ONCE(bio->bi_cookie);
962 if (cookie == BLK_QC_T_NONE ||
963 !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
966 blk_flush_plug(current->plug, false);
968 if (blk_queue_enter(q, BLK_MQ_REQ_NOWAIT))
970 if (WARN_ON_ONCE(!queue_is_mq(q)))
971 ret = 0; /* not yet implemented, should not happen */
973 ret = blk_mq_poll(q, cookie, iob, flags);
977 EXPORT_SYMBOL_GPL(bio_poll);
980 * Helper to implement file_operations.iopoll. Requires the bio to be stored
981 * in iocb->private, and cleared before freeing the bio.
983 int iocb_bio_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob,
990 * Note: the bio cache only uses SLAB_TYPESAFE_BY_RCU, so bio can
991 * point to a freshly allocated bio at this point. If that happens
992 * we have a few cases to consider:
994 * 1) the bio is beeing initialized and bi_bdev is NULL. We can just
995 * simply nothing in this case
996 * 2) the bio points to a not poll enabled device. bio_poll will catch
998 * 3) the bio points to a poll capable device, including but not
999 * limited to the one that the original bio pointed to. In this
1000 * case we will call into the actual poll method and poll for I/O,
1001 * even if we don't need to, but it won't cause harm either.
1003 * For cases 2) and 3) above the RCU grace period ensures that bi_bdev
1004 * is still allocated. Because partitions hold a reference to the whole
1005 * device bdev and thus disk, the disk is also still valid. Grabbing
1006 * a reference to the queue in bio_poll() ensures the hctxs and requests
1007 * are still valid as well.
1010 bio = READ_ONCE(kiocb->private);
1011 if (bio && bio->bi_bdev)
1012 ret = bio_poll(bio, iob, flags);
1017 EXPORT_SYMBOL_GPL(iocb_bio_iopoll);
1019 void update_io_ticks(struct block_device *part, unsigned long now, bool end)
1021 unsigned long stamp;
1023 stamp = READ_ONCE(part->bd_stamp);
1024 if (unlikely(time_after(now, stamp))) {
1025 if (likely(cmpxchg(&part->bd_stamp, stamp, now) == stamp))
1026 __part_stat_add(part, io_ticks, end ? now - stamp : 1);
1028 if (part->bd_partno) {
1029 part = bdev_whole(part);
1034 static unsigned long __part_start_io_acct(struct block_device *part,
1035 unsigned int sectors, unsigned int op,
1036 unsigned long start_time)
1038 const int sgrp = op_stat_group(op);
1041 update_io_ticks(part, start_time, false);
1042 part_stat_inc(part, ios[sgrp]);
1043 part_stat_add(part, sectors[sgrp], sectors);
1044 part_stat_local_inc(part, in_flight[op_is_write(op)]);
1051 * bio_start_io_acct_time - start I/O accounting for bio based drivers
1052 * @bio: bio to start account for
1053 * @start_time: start time that should be passed back to bio_end_io_acct().
1055 void bio_start_io_acct_time(struct bio *bio, unsigned long start_time)
1057 __part_start_io_acct(bio->bi_bdev, bio_sectors(bio),
1058 bio_op(bio), start_time);
1060 EXPORT_SYMBOL_GPL(bio_start_io_acct_time);
1063 * bio_start_io_acct - start I/O accounting for bio based drivers
1064 * @bio: bio to start account for
1066 * Returns the start time that should be passed back to bio_end_io_acct().
1068 unsigned long bio_start_io_acct(struct bio *bio)
1070 return __part_start_io_acct(bio->bi_bdev, bio_sectors(bio),
1071 bio_op(bio), jiffies);
1073 EXPORT_SYMBOL_GPL(bio_start_io_acct);
1075 unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors,
1078 return __part_start_io_acct(disk->part0, sectors, op, jiffies);
1080 EXPORT_SYMBOL(disk_start_io_acct);
1082 static void __part_end_io_acct(struct block_device *part, unsigned int op,
1083 unsigned long start_time)
1085 const int sgrp = op_stat_group(op);
1086 unsigned long now = READ_ONCE(jiffies);
1087 unsigned long duration = now - start_time;
1090 update_io_ticks(part, now, true);
1091 part_stat_add(part, nsecs[sgrp], jiffies_to_nsecs(duration));
1092 part_stat_local_dec(part, in_flight[op_is_write(op)]);
1096 void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time,
1097 struct block_device *orig_bdev)
1099 __part_end_io_acct(orig_bdev, bio_op(bio), start_time);
1101 EXPORT_SYMBOL_GPL(bio_end_io_acct_remapped);
1103 void disk_end_io_acct(struct gendisk *disk, unsigned int op,
1104 unsigned long start_time)
1106 __part_end_io_acct(disk->part0, op, start_time);
1108 EXPORT_SYMBOL(disk_end_io_acct);
1111 * blk_lld_busy - Check if underlying low-level drivers of a device are busy
1112 * @q : the queue of the device being checked
1115 * Check if underlying low-level drivers of a device are busy.
1116 * If the drivers want to export their busy state, they must set own
1117 * exporting function using blk_queue_lld_busy() first.
1119 * Basically, this function is used only by request stacking drivers
1120 * to stop dispatching requests to underlying devices when underlying
1121 * devices are busy. This behavior helps more I/O merging on the queue
1122 * of the request stacking driver and prevents I/O throughput regression
1123 * on burst I/O load.
1126 * 0 - Not busy (The request stacking driver should dispatch request)
1127 * 1 - Busy (The request stacking driver should stop dispatching request)
1129 int blk_lld_busy(struct request_queue *q)
1131 if (queue_is_mq(q) && q->mq_ops->busy)
1132 return q->mq_ops->busy(q);
1136 EXPORT_SYMBOL_GPL(blk_lld_busy);
1138 int kblockd_schedule_work(struct work_struct *work)
1140 return queue_work(kblockd_workqueue, work);
1142 EXPORT_SYMBOL(kblockd_schedule_work);
1144 int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork,
1145 unsigned long delay)
1147 return mod_delayed_work_on(cpu, kblockd_workqueue, dwork, delay);
1149 EXPORT_SYMBOL(kblockd_mod_delayed_work_on);
1151 void blk_start_plug_nr_ios(struct blk_plug *plug, unsigned short nr_ios)
1153 struct task_struct *tsk = current;
1156 * If this is a nested plug, don't actually assign it.
1161 plug->mq_list = NULL;
1162 plug->cached_rq = NULL;
1163 plug->nr_ios = min_t(unsigned short, nr_ios, BLK_MAX_REQUEST_COUNT);
1165 plug->multiple_queues = false;
1166 plug->has_elevator = false;
1167 plug->nowait = false;
1168 INIT_LIST_HEAD(&plug->cb_list);
1171 * Store ordering should not be needed here, since a potential
1172 * preempt will imply a full memory barrier
1178 * blk_start_plug - initialize blk_plug and track it inside the task_struct
1179 * @plug: The &struct blk_plug that needs to be initialized
1182 * blk_start_plug() indicates to the block layer an intent by the caller
1183 * to submit multiple I/O requests in a batch. The block layer may use
1184 * this hint to defer submitting I/Os from the caller until blk_finish_plug()
1185 * is called. However, the block layer may choose to submit requests
1186 * before a call to blk_finish_plug() if the number of queued I/Os
1187 * exceeds %BLK_MAX_REQUEST_COUNT, or if the size of the I/O is larger than
1188 * %BLK_PLUG_FLUSH_SIZE. The queued I/Os may also be submitted early if
1189 * the task schedules (see below).
1191 * Tracking blk_plug inside the task_struct will help with auto-flushing the
1192 * pending I/O should the task end up blocking between blk_start_plug() and
1193 * blk_finish_plug(). This is important from a performance perspective, but
1194 * also ensures that we don't deadlock. For instance, if the task is blocking
1195 * for a memory allocation, memory reclaim could end up wanting to free a
1196 * page belonging to that request that is currently residing in our private
1197 * plug. By flushing the pending I/O when the process goes to sleep, we avoid
1198 * this kind of deadlock.
1200 void blk_start_plug(struct blk_plug *plug)
1202 blk_start_plug_nr_ios(plug, 1);
1204 EXPORT_SYMBOL(blk_start_plug);
1206 static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
1208 LIST_HEAD(callbacks);
1210 while (!list_empty(&plug->cb_list)) {
1211 list_splice_init(&plug->cb_list, &callbacks);
1213 while (!list_empty(&callbacks)) {
1214 struct blk_plug_cb *cb = list_first_entry(&callbacks,
1217 list_del(&cb->list);
1218 cb->callback(cb, from_schedule);
1223 struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data,
1226 struct blk_plug *plug = current->plug;
1227 struct blk_plug_cb *cb;
1232 list_for_each_entry(cb, &plug->cb_list, list)
1233 if (cb->callback == unplug && cb->data == data)
1236 /* Not currently on the callback list */
1237 BUG_ON(size < sizeof(*cb));
1238 cb = kzalloc(size, GFP_ATOMIC);
1241 cb->callback = unplug;
1242 list_add(&cb->list, &plug->cb_list);
1246 EXPORT_SYMBOL(blk_check_plugged);
1248 void __blk_flush_plug(struct blk_plug *plug, bool from_schedule)
1250 if (!list_empty(&plug->cb_list))
1251 flush_plug_callbacks(plug, from_schedule);
1252 if (!rq_list_empty(plug->mq_list))
1253 blk_mq_flush_plug_list(plug, from_schedule);
1255 * Unconditionally flush out cached requests, even if the unplug
1256 * event came from schedule. Since we know hold references to the
1257 * queue for cached requests, we don't want a blocked task holding
1258 * up a queue freeze/quiesce event.
1260 if (unlikely(!rq_list_empty(plug->cached_rq)))
1261 blk_mq_free_plug_rqs(plug);
1265 * blk_finish_plug - mark the end of a batch of submitted I/O
1266 * @plug: The &struct blk_plug passed to blk_start_plug()
1269 * Indicate that a batch of I/O submissions is complete. This function
1270 * must be paired with an initial call to blk_start_plug(). The intent
1271 * is to allow the block layer to optimize I/O submission. See the
1272 * documentation for blk_start_plug() for more information.
1274 void blk_finish_plug(struct blk_plug *plug)
1276 if (plug == current->plug) {
1277 __blk_flush_plug(plug, false);
1278 current->plug = NULL;
1281 EXPORT_SYMBOL(blk_finish_plug);
1283 void blk_io_schedule(void)
1285 /* Prevent hang_check timer from firing at us during very long I/O */
1286 unsigned long timeout = sysctl_hung_task_timeout_secs * HZ / 2;
1289 io_schedule_timeout(timeout);
1293 EXPORT_SYMBOL_GPL(blk_io_schedule);
1295 int __init blk_dev_init(void)
1297 BUILD_BUG_ON(REQ_OP_LAST >= (1 << REQ_OP_BITS));
1298 BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
1299 sizeof_field(struct request, cmd_flags));
1300 BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
1301 sizeof_field(struct bio, bi_opf));
1302 BUILD_BUG_ON(ALIGN(offsetof(struct request_queue, srcu),
1303 __alignof__(struct request_queue)) !=
1304 sizeof(struct request_queue));
1306 /* used for unplugging and affects IO latency/throughput - HIGHPRI */
1307 kblockd_workqueue = alloc_workqueue("kblockd",
1308 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
1309 if (!kblockd_workqueue)
1310 panic("Failed to create kblockd\n");
1312 blk_requestq_cachep = kmem_cache_create("request_queue",
1313 sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
1315 blk_requestq_srcu_cachep = kmem_cache_create("request_queue_srcu",
1316 sizeof(struct request_queue) +
1317 sizeof(struct srcu_struct), 0, SLAB_PANIC, NULL);
1319 blk_debugfs_root = debugfs_create_dir("block", NULL);