1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 1991, 1992 Linus Torvalds
4 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
5 * Copyright (C) 2016 - 2020 Christoph Hellwig
7 #include <linux/init.h>
9 #include <linux/blkdev.h>
10 #include <linux/buffer_head.h>
11 #include <linux/mpage.h>
12 #include <linux/uio.h>
13 #include <linux/namei.h>
14 #include <linux/task_io_accounting_ops.h>
15 #include <linux/falloc.h>
16 #include <linux/suspend.h>
18 #include <linux/module.h>
21 static inline struct inode *bdev_file_inode(struct file *file)
23 return file->f_mapping->host;
26 static int blkdev_get_block(struct inode *inode, sector_t iblock,
27 struct buffer_head *bh, int create)
29 bh->b_bdev = I_BDEV(inode);
30 bh->b_blocknr = iblock;
31 set_buffer_mapped(bh);
35 static unsigned int dio_bio_write_op(struct kiocb *iocb)
37 unsigned int op = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
39 /* avoid the need for a I/O completion work item */
40 if (iocb->ki_flags & IOCB_DSYNC)
45 #define DIO_INLINE_BIO_VECS 4
47 static void blkdev_bio_end_io_simple(struct bio *bio)
49 struct task_struct *waiter = bio->bi_private;
51 WRITE_ONCE(bio->bi_private, NULL);
52 blk_wake_io_task(waiter);
55 static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb,
56 struct iov_iter *iter, unsigned int nr_pages)
58 struct block_device *bdev = iocb->ki_filp->private_data;
59 struct bio_vec inline_vecs[DIO_INLINE_BIO_VECS], *vecs;
60 loff_t pos = iocb->ki_pos;
61 bool should_dirty = false;
65 if ((pos | iov_iter_alignment(iter)) &
66 (bdev_logical_block_size(bdev) - 1))
69 if (nr_pages <= DIO_INLINE_BIO_VECS)
72 vecs = kmalloc_array(nr_pages, sizeof(struct bio_vec),
78 bio_init(&bio, vecs, nr_pages);
79 bio_set_dev(&bio, bdev);
80 bio.bi_iter.bi_sector = pos >> SECTOR_SHIFT;
81 bio.bi_write_hint = iocb->ki_hint;
82 bio.bi_private = current;
83 bio.bi_end_io = blkdev_bio_end_io_simple;
84 bio.bi_ioprio = iocb->ki_ioprio;
86 ret = bio_iov_iter_get_pages(&bio, iter);
89 ret = bio.bi_iter.bi_size;
91 if (iov_iter_rw(iter) == READ) {
92 bio.bi_opf = REQ_OP_READ;
93 if (iter_is_iovec(iter))
96 bio.bi_opf = dio_bio_write_op(iocb);
97 task_io_account_write(ret);
99 if (iocb->ki_flags & IOCB_NOWAIT)
100 bio.bi_opf |= REQ_NOWAIT;
101 if (iocb->ki_flags & IOCB_HIPRI)
102 bio_set_polled(&bio, iocb);
106 set_current_state(TASK_UNINTERRUPTIBLE);
107 if (!READ_ONCE(bio.bi_private))
109 if (!(iocb->ki_flags & IOCB_HIPRI) || !bio_poll(&bio, NULL, 0))
112 __set_current_state(TASK_RUNNING);
114 bio_release_pages(&bio, should_dirty);
115 if (unlikely(bio.bi_status))
116 ret = blk_status_to_errno(bio.bi_status);
119 if (vecs != inline_vecs)
128 DIO_SHOULD_DIRTY = 1,
135 struct task_struct *waiter;
140 struct bio bio ____cacheline_aligned_in_smp;
143 static struct bio_set blkdev_dio_pool;
145 static void blkdev_bio_end_io(struct bio *bio)
147 struct blkdev_dio *dio = bio->bi_private;
148 bool should_dirty = dio->flags & DIO_SHOULD_DIRTY;
150 if (bio->bi_status && !dio->bio.bi_status)
151 dio->bio.bi_status = bio->bi_status;
153 if (atomic_dec_and_test(&dio->ref)) {
154 if (!(dio->flags & DIO_IS_SYNC)) {
155 struct kiocb *iocb = dio->iocb;
158 WRITE_ONCE(iocb->private, NULL);
160 if (likely(!dio->bio.bi_status)) {
164 ret = blk_status_to_errno(dio->bio.bi_status);
167 dio->iocb->ki_complete(iocb, ret);
170 struct task_struct *waiter = dio->waiter;
172 WRITE_ONCE(dio->waiter, NULL);
173 blk_wake_io_task(waiter);
178 bio_check_pages_dirty(bio);
180 bio_release_pages(bio, false);
185 static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
186 unsigned int nr_pages)
188 struct block_device *bdev = iocb->ki_filp->private_data;
189 struct blk_plug plug;
190 struct blkdev_dio *dio;
192 bool is_read = (iov_iter_rw(iter) == READ), is_sync;
193 loff_t pos = iocb->ki_pos;
196 if ((pos | iov_iter_alignment(iter)) &
197 (bdev_logical_block_size(bdev) - 1))
200 bio = bio_alloc_kiocb(iocb, nr_pages, &blkdev_dio_pool);
202 dio = container_of(bio, struct blkdev_dio, bio);
203 atomic_set(&dio->ref, 1);
205 * Grab an extra reference to ensure the dio structure which is embedded
206 * into the first bio stays around.
210 is_sync = is_sync_kiocb(iocb);
212 dio->flags = DIO_IS_SYNC;
213 dio->waiter = current;
220 if (is_read && iter_is_iovec(iter))
221 dio->flags |= DIO_SHOULD_DIRTY;
223 blk_start_plug(&plug);
226 bio_set_dev(bio, bdev);
227 bio->bi_iter.bi_sector = pos >> SECTOR_SHIFT;
228 bio->bi_write_hint = iocb->ki_hint;
229 bio->bi_private = dio;
230 bio->bi_end_io = blkdev_bio_end_io;
231 bio->bi_ioprio = iocb->ki_ioprio;
233 ret = bio_iov_iter_get_pages(bio, iter);
235 bio->bi_status = BLK_STS_IOERR;
241 bio->bi_opf = REQ_OP_READ;
242 if (dio->flags & DIO_SHOULD_DIRTY)
243 bio_set_pages_dirty(bio);
245 bio->bi_opf = dio_bio_write_op(iocb);
246 task_io_account_write(bio->bi_iter.bi_size);
248 if (iocb->ki_flags & IOCB_NOWAIT)
249 bio->bi_opf |= REQ_NOWAIT;
251 dio->size += bio->bi_iter.bi_size;
252 pos += bio->bi_iter.bi_size;
254 nr_pages = bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS);
259 atomic_inc(&dio->ref);
261 bio = bio_alloc(GFP_KERNEL, nr_pages);
264 blk_finish_plug(&plug);
270 set_current_state(TASK_UNINTERRUPTIBLE);
271 if (!READ_ONCE(dio->waiter))
275 __set_current_state(TASK_RUNNING);
278 ret = blk_status_to_errno(dio->bio.bi_status);
286 static void blkdev_bio_end_io_async(struct bio *bio)
288 struct blkdev_dio *dio = container_of(bio, struct blkdev_dio, bio);
289 struct kiocb *iocb = dio->iocb;
292 WRITE_ONCE(iocb->private, NULL);
294 if (likely(!bio->bi_status)) {
298 ret = blk_status_to_errno(bio->bi_status);
301 iocb->ki_complete(iocb, ret);
303 if (dio->flags & DIO_SHOULD_DIRTY) {
304 bio_check_pages_dirty(bio);
306 bio_release_pages(bio, false);
311 static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb,
312 struct iov_iter *iter,
313 unsigned int nr_pages)
315 struct block_device *bdev = iocb->ki_filp->private_data;
316 struct blkdev_dio *dio;
318 loff_t pos = iocb->ki_pos;
321 if ((pos | iov_iter_alignment(iter)) &
322 (bdev_logical_block_size(bdev) - 1))
325 bio = bio_alloc_kiocb(iocb, nr_pages, &blkdev_dio_pool);
326 dio = container_of(bio, struct blkdev_dio, bio);
329 bio_set_dev(bio, bdev);
330 bio->bi_iter.bi_sector = pos >> SECTOR_SHIFT;
331 bio->bi_write_hint = iocb->ki_hint;
332 bio->bi_end_io = blkdev_bio_end_io_async;
333 bio->bi_ioprio = iocb->ki_ioprio;
335 if (iov_iter_is_bvec(iter)) {
337 * Users don't rely on the iterator being in any particular
338 * state for async I/O returning -EIOCBQUEUED, hence we can
339 * avoid expensive iov_iter_advance(). Bypass
340 * bio_iov_iter_get_pages() and set the bvec directly.
342 bio_iov_bvec_set(bio, iter);
344 ret = bio_iov_iter_get_pages(bio, iter);
350 dio->size = bio->bi_iter.bi_size;
352 if (iov_iter_rw(iter) == READ) {
353 bio->bi_opf = REQ_OP_READ;
354 if (iter_is_iovec(iter)) {
355 dio->flags |= DIO_SHOULD_DIRTY;
356 bio_set_pages_dirty(bio);
359 bio->bi_opf = dio_bio_write_op(iocb);
360 task_io_account_write(bio->bi_iter.bi_size);
363 if (iocb->ki_flags & IOCB_HIPRI) {
364 bio->bi_opf |= REQ_POLLED | REQ_NOWAIT;
366 WRITE_ONCE(iocb->private, bio);
368 if (iocb->ki_flags & IOCB_NOWAIT)
369 bio->bi_opf |= REQ_NOWAIT;
375 static ssize_t blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
377 unsigned int nr_pages;
379 if (!iov_iter_count(iter))
382 nr_pages = bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS + 1);
383 if (likely(nr_pages <= BIO_MAX_VECS)) {
384 if (is_sync_kiocb(iocb))
385 return __blkdev_direct_IO_simple(iocb, iter, nr_pages);
386 return __blkdev_direct_IO_async(iocb, iter, nr_pages);
388 return __blkdev_direct_IO(iocb, iter, bio_max_segs(nr_pages));
391 static int blkdev_writepage(struct page *page, struct writeback_control *wbc)
393 return block_write_full_page(page, blkdev_get_block, wbc);
396 static int blkdev_readpage(struct file * file, struct page * page)
398 return block_read_full_page(page, blkdev_get_block);
401 static void blkdev_readahead(struct readahead_control *rac)
403 mpage_readahead(rac, blkdev_get_block);
406 static int blkdev_write_begin(struct file *file, struct address_space *mapping,
407 loff_t pos, unsigned len, unsigned flags, struct page **pagep,
410 return block_write_begin(mapping, pos, len, flags, pagep,
414 static int blkdev_write_end(struct file *file, struct address_space *mapping,
415 loff_t pos, unsigned len, unsigned copied, struct page *page,
419 ret = block_write_end(file, mapping, pos, len, copied, page, fsdata);
427 static int blkdev_writepages(struct address_space *mapping,
428 struct writeback_control *wbc)
430 return generic_writepages(mapping, wbc);
433 const struct address_space_operations def_blk_aops = {
434 .set_page_dirty = __set_page_dirty_buffers,
435 .readpage = blkdev_readpage,
436 .readahead = blkdev_readahead,
437 .writepage = blkdev_writepage,
438 .write_begin = blkdev_write_begin,
439 .write_end = blkdev_write_end,
440 .writepages = blkdev_writepages,
441 .direct_IO = blkdev_direct_IO,
442 .migratepage = buffer_migrate_page_norefs,
443 .is_dirty_writeback = buffer_check_dirty_writeback,
447 * for a block special file file_inode(file)->i_size is zero
448 * so we compute the size by hand (just as in block_read/write above)
450 static loff_t blkdev_llseek(struct file *file, loff_t offset, int whence)
452 struct inode *bd_inode = bdev_file_inode(file);
455 inode_lock(bd_inode);
456 retval = fixed_size_llseek(file, offset, whence, i_size_read(bd_inode));
457 inode_unlock(bd_inode);
461 static int blkdev_fsync(struct file *filp, loff_t start, loff_t end,
464 struct block_device *bdev = filp->private_data;
467 error = file_write_and_wait_range(filp, start, end);
472 * There is no need to serialise calls to blkdev_issue_flush with
473 * i_mutex and doing so causes performance issues with concurrent
474 * O_SYNC writers to a block device.
476 error = blkdev_issue_flush(bdev);
477 if (error == -EOPNOTSUPP)
483 static int blkdev_open(struct inode *inode, struct file *filp)
485 struct block_device *bdev;
488 * Preserve backwards compatibility and allow large file access
489 * even if userspace doesn't ask for it explicitly. Some mkfs
490 * binary needs it. We might want to drop this workaround
491 * during an unstable branch.
493 filp->f_flags |= O_LARGEFILE;
494 filp->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC;
496 if (filp->f_flags & O_NDELAY)
497 filp->f_mode |= FMODE_NDELAY;
498 if (filp->f_flags & O_EXCL)
499 filp->f_mode |= FMODE_EXCL;
500 if ((filp->f_flags & O_ACCMODE) == 3)
501 filp->f_mode |= FMODE_WRITE_IOCTL;
503 bdev = blkdev_get_by_dev(inode->i_rdev, filp->f_mode, filp);
505 return PTR_ERR(bdev);
507 filp->private_data = bdev;
508 filp->f_mapping = bdev->bd_inode->i_mapping;
509 filp->f_wb_err = filemap_sample_wb_err(filp->f_mapping);
513 static int blkdev_close(struct inode *inode, struct file *filp)
515 struct block_device *bdev = filp->private_data;
517 blkdev_put(bdev, filp->f_mode);
522 * Write data to the block device. Only intended for the block device itself
523 * and the raw driver which basically is a fake block device.
525 * Does not take i_mutex for the write and thus is not for general purpose
528 static ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
530 struct block_device *bdev = iocb->ki_filp->private_data;
531 struct inode *bd_inode = bdev->bd_inode;
532 loff_t size = bdev_nr_bytes(bdev);
533 struct blk_plug plug;
537 if (bdev_read_only(bdev))
540 if (IS_SWAPFILE(bd_inode) && !is_hibernate_resume_dev(bd_inode->i_rdev))
543 if (!iov_iter_count(from))
546 if (iocb->ki_pos >= size)
549 if ((iocb->ki_flags & (IOCB_NOWAIT | IOCB_DIRECT)) == IOCB_NOWAIT)
552 size -= iocb->ki_pos;
553 if (iov_iter_count(from) > size) {
554 shorted = iov_iter_count(from) - size;
555 iov_iter_truncate(from, size);
558 blk_start_plug(&plug);
559 ret = __generic_file_write_iter(iocb, from);
561 ret = generic_write_sync(iocb, ret);
562 iov_iter_reexpand(from, iov_iter_count(from) + shorted);
563 blk_finish_plug(&plug);
567 static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
569 struct block_device *bdev = iocb->ki_filp->private_data;
570 loff_t size = bdev_nr_bytes(bdev);
571 loff_t pos = iocb->ki_pos;
576 if (unlikely(pos + iov_iter_count(to) > size)) {
580 shorted = iov_iter_count(to) - size;
581 iov_iter_truncate(to, size);
584 count = iov_iter_count(to);
586 goto reexpand; /* skip atime */
588 if (iocb->ki_flags & IOCB_DIRECT) {
589 struct address_space *mapping = iocb->ki_filp->f_mapping;
591 if (iocb->ki_flags & IOCB_NOWAIT) {
592 if (filemap_range_needs_writeback(mapping, pos,
598 ret = filemap_write_and_wait_range(mapping, pos,
604 file_accessed(iocb->ki_filp);
606 ret = blkdev_direct_IO(iocb, to);
611 iov_iter_revert(to, count - iov_iter_count(to));
612 if (ret < 0 || !count)
616 ret = filemap_read(iocb, to, ret);
619 if (unlikely(shorted))
620 iov_iter_reexpand(to, iov_iter_count(to) + shorted);
624 #define BLKDEV_FALLOC_FL_SUPPORTED \
625 (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | \
626 FALLOC_FL_ZERO_RANGE | FALLOC_FL_NO_HIDE_STALE)
628 static long blkdev_fallocate(struct file *file, int mode, loff_t start,
631 struct inode *inode = bdev_file_inode(file);
632 struct block_device *bdev = I_BDEV(inode);
633 loff_t end = start + len - 1;
637 /* Fail if we don't recognize the flags. */
638 if (mode & ~BLKDEV_FALLOC_FL_SUPPORTED)
641 /* Don't go off the end of the device. */
642 isize = bdev_nr_bytes(bdev);
646 if (mode & FALLOC_FL_KEEP_SIZE) {
648 end = start + len - 1;
654 * Don't allow IO that isn't aligned to logical block size.
656 if ((start | len) & (bdev_logical_block_size(bdev) - 1))
659 filemap_invalidate_lock(inode->i_mapping);
661 /* Invalidate the page cache, including dirty pages. */
662 error = truncate_bdev_range(bdev, file->f_mode, start, end);
667 case FALLOC_FL_ZERO_RANGE:
668 case FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE:
669 error = blkdev_issue_zeroout(bdev, start >> SECTOR_SHIFT,
670 len >> SECTOR_SHIFT, GFP_KERNEL,
671 BLKDEV_ZERO_NOUNMAP);
673 case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE:
674 error = blkdev_issue_zeroout(bdev, start >> SECTOR_SHIFT,
675 len >> SECTOR_SHIFT, GFP_KERNEL,
676 BLKDEV_ZERO_NOFALLBACK);
678 case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE | FALLOC_FL_NO_HIDE_STALE:
679 error = blkdev_issue_discard(bdev, start >> SECTOR_SHIFT,
680 len >> SECTOR_SHIFT, GFP_KERNEL, 0);
687 filemap_invalidate_unlock(inode->i_mapping);
691 const struct file_operations def_blk_fops = {
693 .release = blkdev_close,
694 .llseek = blkdev_llseek,
695 .read_iter = blkdev_read_iter,
696 .write_iter = blkdev_write_iter,
697 .iopoll = iocb_bio_iopoll,
698 .mmap = generic_file_mmap,
699 .fsync = blkdev_fsync,
700 .unlocked_ioctl = blkdev_ioctl,
702 .compat_ioctl = compat_blkdev_ioctl,
704 .splice_read = generic_file_splice_read,
705 .splice_write = iter_file_splice_write,
706 .fallocate = blkdev_fallocate,
709 static __init int blkdev_init(void)
711 return bioset_init(&blkdev_dio_pool, 4,
712 offsetof(struct blkdev_dio, bio),
713 BIOSET_NEED_BVECS|BIOSET_PERCPU_CACHE);
715 module_init(blkdev_init);