1 // SPDX-License-Identifier: GPL-2.0
3 * Simple file system for zoned block devices exposing zones as files.
5 * Copyright (C) 2019 Western Digital Corporation or its affiliates.
7 #include <linux/module.h>
9 #include <linux/magic.h>
10 #include <linux/iomap.h>
11 #include <linux/init.h>
12 #include <linux/slab.h>
13 #include <linux/blkdev.h>
14 #include <linux/statfs.h>
15 #include <linux/writeback.h>
16 #include <linux/quotaops.h>
17 #include <linux/seq_file.h>
18 #include <linux/parser.h>
19 #include <linux/uio.h>
20 #include <linux/mman.h>
21 #include <linux/sched/mm.h>
22 #include <linux/crc32.h>
23 #include <linux/task_io_accounting_ops.h>
27 static inline int zonefs_zone_mgmt(struct inode *inode,
30 struct zonefs_inode_info *zi = ZONEFS_I(inode);
33 lockdep_assert_held(&zi->i_truncate_mutex);
35 ret = blkdev_zone_mgmt(inode->i_sb->s_bdev, op, zi->i_zsector,
36 zi->i_zone_size >> SECTOR_SHIFT, GFP_NOFS);
38 zonefs_err(inode->i_sb,
39 "Zone management operation %s at %llu failed %d\n",
40 blk_op_str(op), zi->i_zsector, ret);
47 static inline void zonefs_i_size_write(struct inode *inode, loff_t isize)
49 struct zonefs_inode_info *zi = ZONEFS_I(inode);
51 i_size_write(inode, isize);
53 * A full zone is no longer open/active and does not need
56 if (isize >= zi->i_max_size)
57 zi->i_flags &= ~ZONEFS_ZONE_OPEN;
60 static int zonefs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
61 unsigned int flags, struct iomap *iomap,
64 struct zonefs_inode_info *zi = ZONEFS_I(inode);
65 struct super_block *sb = inode->i_sb;
68 /* All I/Os should always be within the file maximum size */
69 if (WARN_ON_ONCE(offset + length > zi->i_max_size))
73 * Sequential zones can only accept direct writes. This is already
74 * checked when writes are issued, so warn if we see a page writeback
77 if (WARN_ON_ONCE(zi->i_ztype == ZONEFS_ZTYPE_SEQ &&
78 (flags & IOMAP_WRITE) && !(flags & IOMAP_DIRECT)))
82 * For conventional zones, all blocks are always mapped. For sequential
83 * zones, all blocks after always mapped below the inode size (zone
84 * write pointer) and unwriten beyond.
86 mutex_lock(&zi->i_truncate_mutex);
87 isize = i_size_read(inode);
89 iomap->type = IOMAP_UNWRITTEN;
91 iomap->type = IOMAP_MAPPED;
92 if (flags & IOMAP_WRITE)
93 length = zi->i_max_size - offset;
95 length = min(length, isize - offset);
96 mutex_unlock(&zi->i_truncate_mutex);
98 iomap->offset = ALIGN_DOWN(offset, sb->s_blocksize);
99 iomap->length = ALIGN(offset + length, sb->s_blocksize) - iomap->offset;
100 iomap->bdev = inode->i_sb->s_bdev;
101 iomap->addr = (zi->i_zsector << SECTOR_SHIFT) + iomap->offset;
106 static const struct iomap_ops zonefs_iomap_ops = {
107 .iomap_begin = zonefs_iomap_begin,
110 static int zonefs_readpage(struct file *unused, struct page *page)
112 return iomap_readpage(page, &zonefs_iomap_ops);
115 static void zonefs_readahead(struct readahead_control *rac)
117 iomap_readahead(rac, &zonefs_iomap_ops);
121 * Map blocks for page writeback. This is used only on conventional zone files,
122 * which implies that the page range can only be within the fixed inode size.
124 static int zonefs_map_blocks(struct iomap_writepage_ctx *wpc,
125 struct inode *inode, loff_t offset)
127 struct zonefs_inode_info *zi = ZONEFS_I(inode);
129 if (WARN_ON_ONCE(zi->i_ztype != ZONEFS_ZTYPE_CNV))
131 if (WARN_ON_ONCE(offset >= i_size_read(inode)))
134 /* If the mapping is already OK, nothing needs to be done */
135 if (offset >= wpc->iomap.offset &&
136 offset < wpc->iomap.offset + wpc->iomap.length)
139 return zonefs_iomap_begin(inode, offset, zi->i_max_size - offset,
140 IOMAP_WRITE, &wpc->iomap, NULL);
143 static const struct iomap_writeback_ops zonefs_writeback_ops = {
144 .map_blocks = zonefs_map_blocks,
147 static int zonefs_writepage(struct page *page, struct writeback_control *wbc)
149 struct iomap_writepage_ctx wpc = { };
151 return iomap_writepage(page, wbc, &wpc, &zonefs_writeback_ops);
154 static int zonefs_writepages(struct address_space *mapping,
155 struct writeback_control *wbc)
157 struct iomap_writepage_ctx wpc = { };
159 return iomap_writepages(mapping, wbc, &wpc, &zonefs_writeback_ops);
162 static const struct address_space_operations zonefs_file_aops = {
163 .readpage = zonefs_readpage,
164 .readahead = zonefs_readahead,
165 .writepage = zonefs_writepage,
166 .writepages = zonefs_writepages,
167 .set_page_dirty = iomap_set_page_dirty,
168 .releasepage = iomap_releasepage,
169 .invalidatepage = iomap_invalidatepage,
170 .migratepage = iomap_migrate_page,
171 .is_partially_uptodate = iomap_is_partially_uptodate,
172 .error_remove_page = generic_error_remove_page,
173 .direct_IO = noop_direct_IO,
176 static void zonefs_update_stats(struct inode *inode, loff_t new_isize)
178 struct super_block *sb = inode->i_sb;
179 struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
180 loff_t old_isize = i_size_read(inode);
183 if (new_isize == old_isize)
186 spin_lock(&sbi->s_lock);
189 * This may be called for an update after an IO error.
190 * So beware of the values seen.
192 if (new_isize < old_isize) {
193 nr_blocks = (old_isize - new_isize) >> sb->s_blocksize_bits;
194 if (sbi->s_used_blocks > nr_blocks)
195 sbi->s_used_blocks -= nr_blocks;
197 sbi->s_used_blocks = 0;
199 sbi->s_used_blocks +=
200 (new_isize - old_isize) >> sb->s_blocksize_bits;
201 if (sbi->s_used_blocks > sbi->s_blocks)
202 sbi->s_used_blocks = sbi->s_blocks;
205 spin_unlock(&sbi->s_lock);
209 * Check a zone condition and adjust its file inode access permissions for
210 * offline and readonly zones. Return the inode size corresponding to the
211 * amount of readable data in the zone.
213 static loff_t zonefs_check_zone_condition(struct inode *inode,
214 struct blk_zone *zone, bool warn,
217 struct zonefs_inode_info *zi = ZONEFS_I(inode);
219 switch (zone->cond) {
220 case BLK_ZONE_COND_OFFLINE:
222 * Dead zone: make the inode immutable, disable all accesses
223 * and set the file size to 0 (zone wp set to zone start).
226 zonefs_warn(inode->i_sb, "inode %lu: offline zone\n",
228 inode->i_flags |= S_IMMUTABLE;
229 inode->i_mode &= ~0777;
230 zone->wp = zone->start;
232 case BLK_ZONE_COND_READONLY:
234 * The write pointer of read-only zones is invalid. If such a
235 * zone is found during mount, the file size cannot be retrieved
236 * so we treat the zone as offline (mount == true case).
237 * Otherwise, keep the file size as it was when last updated
238 * so that the user can recover data. In both cases, writes are
239 * always disabled for the zone.
242 zonefs_warn(inode->i_sb, "inode %lu: read-only zone\n",
244 inode->i_flags |= S_IMMUTABLE;
246 zone->cond = BLK_ZONE_COND_OFFLINE;
247 inode->i_mode &= ~0777;
248 zone->wp = zone->start;
251 inode->i_mode &= ~0222;
252 return i_size_read(inode);
254 if (zi->i_ztype == ZONEFS_ZTYPE_CNV)
255 return zi->i_max_size;
256 return (zone->wp - zone->start) << SECTOR_SHIFT;
260 struct zonefs_ioerr_data {
265 static int zonefs_io_error_cb(struct blk_zone *zone, unsigned int idx,
268 struct zonefs_ioerr_data *err = data;
269 struct inode *inode = err->inode;
270 struct zonefs_inode_info *zi = ZONEFS_I(inode);
271 struct super_block *sb = inode->i_sb;
272 struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
273 loff_t isize, data_size;
276 * Check the zone condition: if the zone is not "bad" (offline or
277 * read-only), read errors are simply signaled to the IO issuer as long
278 * as there is no inconsistency between the inode size and the amount of
279 * data writen in the zone (data_size).
281 data_size = zonefs_check_zone_condition(inode, zone, true, false);
282 isize = i_size_read(inode);
283 if (zone->cond != BLK_ZONE_COND_OFFLINE &&
284 zone->cond != BLK_ZONE_COND_READONLY &&
285 !err->write && isize == data_size)
289 * At this point, we detected either a bad zone or an inconsistency
290 * between the inode size and the amount of data written in the zone.
291 * For the latter case, the cause may be a write IO error or an external
292 * action on the device. Two error patterns exist:
293 * 1) The inode size is lower than the amount of data in the zone:
294 * a write operation partially failed and data was writen at the end
295 * of the file. This can happen in the case of a large direct IO
296 * needing several BIOs and/or write requests to be processed.
297 * 2) The inode size is larger than the amount of data in the zone:
298 * this can happen with a deferred write error with the use of the
299 * device side write cache after getting successful write IO
300 * completions. Other possibilities are (a) an external corruption,
301 * e.g. an application reset the zone directly, or (b) the device
302 * has a serious problem (e.g. firmware bug).
304 * In all cases, warn about inode size inconsistency and handle the
305 * IO error according to the zone condition and to the mount options.
307 if (zi->i_ztype == ZONEFS_ZTYPE_SEQ && isize != data_size)
308 zonefs_warn(sb, "inode %lu: invalid size %lld (should be %lld)\n",
309 inode->i_ino, isize, data_size);
312 * First handle bad zones signaled by hardware. The mount options
313 * errors=zone-ro and errors=zone-offline result in changing the
314 * zone condition to read-only and offline respectively, as if the
315 * condition was signaled by the hardware.
317 if (zone->cond == BLK_ZONE_COND_OFFLINE ||
318 sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_ZOL) {
319 zonefs_warn(sb, "inode %lu: read/write access disabled\n",
321 if (zone->cond != BLK_ZONE_COND_OFFLINE) {
322 zone->cond = BLK_ZONE_COND_OFFLINE;
323 data_size = zonefs_check_zone_condition(inode, zone,
326 } else if (zone->cond == BLK_ZONE_COND_READONLY ||
327 sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_ZRO) {
328 zonefs_warn(sb, "inode %lu: write access disabled\n",
330 if (zone->cond != BLK_ZONE_COND_READONLY) {
331 zone->cond = BLK_ZONE_COND_READONLY;
332 data_size = zonefs_check_zone_condition(inode, zone,
338 * If the filesystem is mounted with the explicit-open mount option, we
339 * need to clear the ZONEFS_ZONE_OPEN flag if the zone transitioned to
340 * the read-only or offline condition, to avoid attempting an explicit
341 * close of the zone when the inode file is closed.
343 if ((sbi->s_mount_opts & ZONEFS_MNTOPT_EXPLICIT_OPEN) &&
344 (zone->cond == BLK_ZONE_COND_OFFLINE ||
345 zone->cond == BLK_ZONE_COND_READONLY))
346 zi->i_flags &= ~ZONEFS_ZONE_OPEN;
349 * If error=remount-ro was specified, any error result in remounting
350 * the volume as read-only.
352 if ((sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_RO) && !sb_rdonly(sb)) {
353 zonefs_warn(sb, "remounting filesystem read-only\n");
354 sb->s_flags |= SB_RDONLY;
358 * Update block usage stats and the inode size to prevent access to
361 zonefs_update_stats(inode, data_size);
362 zonefs_i_size_write(inode, data_size);
363 zi->i_wpoffset = data_size;
369 * When an file IO error occurs, check the file zone to see if there is a change
370 * in the zone condition (e.g. offline or read-only). For a failed write to a
371 * sequential zone, the zone write pointer position must also be checked to
372 * eventually correct the file size and zonefs inode write pointer offset
373 * (which can be out of sync with the drive due to partial write failures).
375 static void __zonefs_io_error(struct inode *inode, bool write)
377 struct zonefs_inode_info *zi = ZONEFS_I(inode);
378 struct super_block *sb = inode->i_sb;
379 struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
380 unsigned int noio_flag;
381 unsigned int nr_zones =
382 zi->i_zone_size >> (sbi->s_zone_sectors_shift + SECTOR_SHIFT);
383 struct zonefs_ioerr_data err = {
390 * Memory allocations in blkdev_report_zones() can trigger a memory
391 * reclaim which may in turn cause a recursion into zonefs as well as
392 * struct request allocations for the same device. The former case may
393 * end up in a deadlock on the inode truncate mutex, while the latter
394 * may prevent IO forward progress. Executing the report zones under
395 * the GFP_NOIO context avoids both problems.
397 noio_flag = memalloc_noio_save();
398 ret = blkdev_report_zones(sb->s_bdev, zi->i_zsector, nr_zones,
399 zonefs_io_error_cb, &err);
401 zonefs_err(sb, "Get inode %lu zone information failed %d\n",
403 memalloc_noio_restore(noio_flag);
406 static void zonefs_io_error(struct inode *inode, bool write)
408 struct zonefs_inode_info *zi = ZONEFS_I(inode);
410 mutex_lock(&zi->i_truncate_mutex);
411 __zonefs_io_error(inode, write);
412 mutex_unlock(&zi->i_truncate_mutex);
415 static int zonefs_file_truncate(struct inode *inode, loff_t isize)
417 struct zonefs_inode_info *zi = ZONEFS_I(inode);
423 * Only sequential zone files can be truncated and truncation is allowed
424 * only down to a 0 size, which is equivalent to a zone reset, and to
425 * the maximum file size, which is equivalent to a zone finish.
427 if (zi->i_ztype != ZONEFS_ZTYPE_SEQ)
431 op = REQ_OP_ZONE_RESET;
432 else if (isize == zi->i_max_size)
433 op = REQ_OP_ZONE_FINISH;
437 inode_dio_wait(inode);
439 /* Serialize against page faults */
440 down_write(&zi->i_mmap_sem);
442 /* Serialize against zonefs_iomap_begin() */
443 mutex_lock(&zi->i_truncate_mutex);
445 old_isize = i_size_read(inode);
446 if (isize == old_isize)
449 ret = zonefs_zone_mgmt(inode, op);
454 * If the mount option ZONEFS_MNTOPT_EXPLICIT_OPEN is set,
455 * take care of open zones.
457 if (zi->i_flags & ZONEFS_ZONE_OPEN) {
459 * Truncating a zone to EMPTY or FULL is the equivalent of
460 * closing the zone. For a truncation to 0, we need to
461 * re-open the zone to ensure new writes can be processed.
462 * For a truncation to the maximum file size, the zone is
463 * closed and writes cannot be accepted anymore, so clear
467 ret = zonefs_zone_mgmt(inode, REQ_OP_ZONE_OPEN);
469 zi->i_flags &= ~ZONEFS_ZONE_OPEN;
472 zonefs_update_stats(inode, isize);
473 truncate_setsize(inode, isize);
474 zi->i_wpoffset = isize;
477 mutex_unlock(&zi->i_truncate_mutex);
478 up_write(&zi->i_mmap_sem);
483 static int zonefs_inode_setattr(struct dentry *dentry, struct iattr *iattr)
485 struct inode *inode = d_inode(dentry);
488 if (unlikely(IS_IMMUTABLE(inode)))
491 ret = setattr_prepare(dentry, iattr);
496 * Since files and directories cannot be created nor deleted, do not
497 * allow setting any write attributes on the sub-directories grouping
498 * files by zone type.
500 if ((iattr->ia_valid & ATTR_MODE) && S_ISDIR(inode->i_mode) &&
501 (iattr->ia_mode & 0222))
504 if (((iattr->ia_valid & ATTR_UID) &&
505 !uid_eq(iattr->ia_uid, inode->i_uid)) ||
506 ((iattr->ia_valid & ATTR_GID) &&
507 !gid_eq(iattr->ia_gid, inode->i_gid))) {
508 ret = dquot_transfer(inode, iattr);
513 if (iattr->ia_valid & ATTR_SIZE) {
514 ret = zonefs_file_truncate(inode, iattr->ia_size);
519 setattr_copy(inode, iattr);
524 static const struct inode_operations zonefs_file_inode_operations = {
525 .setattr = zonefs_inode_setattr,
528 static int zonefs_file_fsync(struct file *file, loff_t start, loff_t end,
531 struct inode *inode = file_inode(file);
534 if (unlikely(IS_IMMUTABLE(inode)))
538 * Since only direct writes are allowed in sequential files, page cache
539 * flush is needed only for conventional zone files.
541 if (ZONEFS_I(inode)->i_ztype == ZONEFS_ZTYPE_CNV)
542 ret = file_write_and_wait_range(file, start, end);
544 ret = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL);
547 zonefs_io_error(inode, true);
552 static vm_fault_t zonefs_filemap_fault(struct vm_fault *vmf)
554 struct zonefs_inode_info *zi = ZONEFS_I(file_inode(vmf->vma->vm_file));
557 down_read(&zi->i_mmap_sem);
558 ret = filemap_fault(vmf);
559 up_read(&zi->i_mmap_sem);
564 static vm_fault_t zonefs_filemap_page_mkwrite(struct vm_fault *vmf)
566 struct inode *inode = file_inode(vmf->vma->vm_file);
567 struct zonefs_inode_info *zi = ZONEFS_I(inode);
570 if (unlikely(IS_IMMUTABLE(inode)))
571 return VM_FAULT_SIGBUS;
574 * Sanity check: only conventional zone files can have shared
575 * writeable mappings.
577 if (WARN_ON_ONCE(zi->i_ztype != ZONEFS_ZTYPE_CNV))
578 return VM_FAULT_NOPAGE;
580 sb_start_pagefault(inode->i_sb);
581 file_update_time(vmf->vma->vm_file);
583 /* Serialize against truncates */
584 down_read(&zi->i_mmap_sem);
585 ret = iomap_page_mkwrite(vmf, &zonefs_iomap_ops);
586 up_read(&zi->i_mmap_sem);
588 sb_end_pagefault(inode->i_sb);
592 static const struct vm_operations_struct zonefs_file_vm_ops = {
593 .fault = zonefs_filemap_fault,
594 .map_pages = filemap_map_pages,
595 .page_mkwrite = zonefs_filemap_page_mkwrite,
598 static int zonefs_file_mmap(struct file *file, struct vm_area_struct *vma)
601 * Conventional zones accept random writes, so their files can support
602 * shared writable mappings. For sequential zone files, only read
603 * mappings are possible since there are no guarantees for write
604 * ordering between msync() and page cache writeback.
606 if (ZONEFS_I(file_inode(file))->i_ztype == ZONEFS_ZTYPE_SEQ &&
607 (vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
611 vma->vm_ops = &zonefs_file_vm_ops;
616 static loff_t zonefs_file_llseek(struct file *file, loff_t offset, int whence)
618 loff_t isize = i_size_read(file_inode(file));
621 * Seeks are limited to below the zone size for conventional zones
622 * and below the zone write pointer for sequential zones. In both
623 * cases, this limit is the inode size.
625 return generic_file_llseek_size(file, offset, whence, isize, isize);
628 static int zonefs_file_write_dio_end_io(struct kiocb *iocb, ssize_t size,
629 int error, unsigned int flags)
631 struct inode *inode = file_inode(iocb->ki_filp);
632 struct zonefs_inode_info *zi = ZONEFS_I(inode);
635 zonefs_io_error(inode, true);
639 if (size && zi->i_ztype != ZONEFS_ZTYPE_CNV) {
641 * Note that we may be seeing completions out of order,
642 * but that is not a problem since a write completed
643 * successfully necessarily means that all preceding writes
644 * were also successful. So we can safely increase the inode
645 * size to the write end location.
647 mutex_lock(&zi->i_truncate_mutex);
648 if (i_size_read(inode) < iocb->ki_pos + size) {
649 zonefs_update_stats(inode, iocb->ki_pos + size);
650 zonefs_i_size_write(inode, iocb->ki_pos + size);
652 mutex_unlock(&zi->i_truncate_mutex);
658 static const struct iomap_dio_ops zonefs_write_dio_ops = {
659 .end_io = zonefs_file_write_dio_end_io,
662 static ssize_t zonefs_file_dio_append(struct kiocb *iocb, struct iov_iter *from)
664 struct inode *inode = file_inode(iocb->ki_filp);
665 struct zonefs_inode_info *zi = ZONEFS_I(inode);
666 struct block_device *bdev = inode->i_sb->s_bdev;
673 max = queue_max_zone_append_sectors(bdev_get_queue(bdev));
674 max = ALIGN_DOWN(max << SECTOR_SHIFT, inode->i_sb->s_blocksize);
675 iov_iter_truncate(from, max);
677 nr_pages = iov_iter_npages(from, BIO_MAX_PAGES);
681 bio = bio_alloc_bioset(GFP_NOFS, nr_pages, &fs_bio_set);
685 bio_set_dev(bio, bdev);
686 bio->bi_iter.bi_sector = zi->i_zsector;
687 bio->bi_write_hint = iocb->ki_hint;
688 bio->bi_ioprio = iocb->ki_ioprio;
689 bio->bi_opf = REQ_OP_ZONE_APPEND | REQ_SYNC | REQ_IDLE;
690 if (iocb->ki_flags & IOCB_DSYNC)
691 bio->bi_opf |= REQ_FUA;
693 ret = bio_iov_iter_get_pages(bio, from);
698 size = bio->bi_iter.bi_size;
699 task_io_account_write(ret);
701 if (iocb->ki_flags & IOCB_HIPRI)
702 bio_set_polled(bio, iocb);
704 ret = submit_bio_wait(bio);
708 zonefs_file_write_dio_end_io(iocb, size, ret, 0);
710 iocb->ki_pos += size;
718 * Handle direct writes. For sequential zone files, this is the only possible
719 * write path. For these files, check that the user is issuing writes
720 * sequentially from the end of the file. This code assumes that the block layer
721 * delivers write requests to the device in sequential order. This is always the
722 * case if a block IO scheduler implementing the ELEVATOR_F_ZBD_SEQ_WRITE
723 * elevator feature is being used (e.g. mq-deadline). The block layer always
724 * automatically select such an elevator for zoned block devices during the
725 * device initialization.
727 static ssize_t zonefs_file_dio_write(struct kiocb *iocb, struct iov_iter *from)
729 struct inode *inode = file_inode(iocb->ki_filp);
730 struct zonefs_inode_info *zi = ZONEFS_I(inode);
731 struct super_block *sb = inode->i_sb;
732 bool sync = is_sync_kiocb(iocb);
738 * For async direct IOs to sequential zone files, refuse IOCB_NOWAIT
739 * as this can cause write reordering (e.g. the first aio gets EAGAIN
740 * on the inode lock but the second goes through but is now unaligned).
742 if (zi->i_ztype == ZONEFS_ZTYPE_SEQ && !sync &&
743 (iocb->ki_flags & IOCB_NOWAIT))
746 if (iocb->ki_flags & IOCB_NOWAIT) {
747 if (!inode_trylock(inode))
753 ret = generic_write_checks(iocb, from);
757 iov_iter_truncate(from, zi->i_max_size - iocb->ki_pos);
758 count = iov_iter_count(from);
760 if ((iocb->ki_pos | count) & (sb->s_blocksize - 1)) {
765 /* Enforce sequential writes (append only) in sequential zones */
766 if (zi->i_ztype == ZONEFS_ZTYPE_SEQ) {
767 mutex_lock(&zi->i_truncate_mutex);
768 if (iocb->ki_pos != zi->i_wpoffset) {
769 mutex_unlock(&zi->i_truncate_mutex);
773 mutex_unlock(&zi->i_truncate_mutex);
778 ret = zonefs_file_dio_append(iocb, from);
780 ret = iomap_dio_rw(iocb, from, &zonefs_iomap_ops,
781 &zonefs_write_dio_ops, sync);
782 if (zi->i_ztype == ZONEFS_ZTYPE_SEQ &&
783 (ret > 0 || ret == -EIOCBQUEUED)) {
786 mutex_lock(&zi->i_truncate_mutex);
787 zi->i_wpoffset += count;
788 mutex_unlock(&zi->i_truncate_mutex);
797 static ssize_t zonefs_file_buffered_write(struct kiocb *iocb,
798 struct iov_iter *from)
800 struct inode *inode = file_inode(iocb->ki_filp);
801 struct zonefs_inode_info *zi = ZONEFS_I(inode);
805 * Direct IO writes are mandatory for sequential zone files so that the
806 * write IO issuing order is preserved.
808 if (zi->i_ztype != ZONEFS_ZTYPE_CNV)
811 if (iocb->ki_flags & IOCB_NOWAIT) {
812 if (!inode_trylock(inode))
818 ret = generic_write_checks(iocb, from);
822 iov_iter_truncate(from, zi->i_max_size - iocb->ki_pos);
824 ret = iomap_file_buffered_write(iocb, from, &zonefs_iomap_ops);
827 else if (ret == -EIO)
828 zonefs_io_error(inode, true);
833 ret = generic_write_sync(iocb, ret);
838 static ssize_t zonefs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
840 struct inode *inode = file_inode(iocb->ki_filp);
842 if (unlikely(IS_IMMUTABLE(inode)))
845 if (sb_rdonly(inode->i_sb))
848 /* Write operations beyond the zone size are not allowed */
849 if (iocb->ki_pos >= ZONEFS_I(inode)->i_max_size)
852 if (iocb->ki_flags & IOCB_DIRECT) {
853 ssize_t ret = zonefs_file_dio_write(iocb, from);
858 return zonefs_file_buffered_write(iocb, from);
861 static int zonefs_file_read_dio_end_io(struct kiocb *iocb, ssize_t size,
862 int error, unsigned int flags)
865 zonefs_io_error(file_inode(iocb->ki_filp), false);
872 static const struct iomap_dio_ops zonefs_read_dio_ops = {
873 .end_io = zonefs_file_read_dio_end_io,
876 static ssize_t zonefs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
878 struct inode *inode = file_inode(iocb->ki_filp);
879 struct zonefs_inode_info *zi = ZONEFS_I(inode);
880 struct super_block *sb = inode->i_sb;
884 /* Offline zones cannot be read */
885 if (unlikely(IS_IMMUTABLE(inode) && !(inode->i_mode & 0777)))
888 if (iocb->ki_pos >= zi->i_max_size)
891 if (iocb->ki_flags & IOCB_NOWAIT) {
892 if (!inode_trylock_shared(inode))
895 inode_lock_shared(inode);
898 /* Limit read operations to written data */
899 mutex_lock(&zi->i_truncate_mutex);
900 isize = i_size_read(inode);
901 if (iocb->ki_pos >= isize) {
902 mutex_unlock(&zi->i_truncate_mutex);
906 iov_iter_truncate(to, isize - iocb->ki_pos);
907 mutex_unlock(&zi->i_truncate_mutex);
909 if (iocb->ki_flags & IOCB_DIRECT) {
910 size_t count = iov_iter_count(to);
912 if ((iocb->ki_pos | count) & (sb->s_blocksize - 1)) {
916 file_accessed(iocb->ki_filp);
917 ret = iomap_dio_rw(iocb, to, &zonefs_iomap_ops,
918 &zonefs_read_dio_ops, is_sync_kiocb(iocb));
920 ret = generic_file_read_iter(iocb, to);
922 zonefs_io_error(inode, false);
926 inode_unlock_shared(inode);
931 static inline bool zonefs_file_use_exp_open(struct inode *inode, struct file *file)
933 struct zonefs_inode_info *zi = ZONEFS_I(inode);
934 struct zonefs_sb_info *sbi = ZONEFS_SB(inode->i_sb);
936 if (!(sbi->s_mount_opts & ZONEFS_MNTOPT_EXPLICIT_OPEN))
939 if (zi->i_ztype != ZONEFS_ZTYPE_SEQ)
942 if (!(file->f_mode & FMODE_WRITE))
948 static int zonefs_open_zone(struct inode *inode)
950 struct zonefs_inode_info *zi = ZONEFS_I(inode);
951 struct zonefs_sb_info *sbi = ZONEFS_SB(inode->i_sb);
954 mutex_lock(&zi->i_truncate_mutex);
957 if (zi->i_wr_refcnt == 1) {
959 if (atomic_inc_return(&sbi->s_open_zones) > sbi->s_max_open_zones) {
960 atomic_dec(&sbi->s_open_zones);
965 if (i_size_read(inode) < zi->i_max_size) {
966 ret = zonefs_zone_mgmt(inode, REQ_OP_ZONE_OPEN);
969 atomic_dec(&sbi->s_open_zones);
972 zi->i_flags |= ZONEFS_ZONE_OPEN;
977 mutex_unlock(&zi->i_truncate_mutex);
982 static int zonefs_file_open(struct inode *inode, struct file *file)
986 ret = generic_file_open(inode, file);
990 if (zonefs_file_use_exp_open(inode, file))
991 return zonefs_open_zone(inode);
996 static void zonefs_close_zone(struct inode *inode)
998 struct zonefs_inode_info *zi = ZONEFS_I(inode);
1001 mutex_lock(&zi->i_truncate_mutex);
1003 if (!zi->i_wr_refcnt) {
1004 struct zonefs_sb_info *sbi = ZONEFS_SB(inode->i_sb);
1005 struct super_block *sb = inode->i_sb;
1008 * If the file zone is full, it is not open anymore and we only
1009 * need to decrement the open count.
1011 if (!(zi->i_flags & ZONEFS_ZONE_OPEN))
1014 ret = zonefs_zone_mgmt(inode, REQ_OP_ZONE_CLOSE);
1016 __zonefs_io_error(inode, false);
1018 * Leaving zones explicitly open may lead to a state
1019 * where most zones cannot be written (zone resources
1020 * exhausted). So take preventive action by remounting
1023 if (zi->i_flags & ZONEFS_ZONE_OPEN &&
1024 !(sb->s_flags & SB_RDONLY)) {
1025 zonefs_warn(sb, "closing zone failed, remounting filesystem read-only\n");
1026 sb->s_flags |= SB_RDONLY;
1029 zi->i_flags &= ~ZONEFS_ZONE_OPEN;
1031 atomic_dec(&sbi->s_open_zones);
1033 mutex_unlock(&zi->i_truncate_mutex);
1036 static int zonefs_file_release(struct inode *inode, struct file *file)
1039 * If we explicitly open a zone we must close it again as well, but the
1040 * zone management operation can fail (either due to an IO error or as
1041 * the zone has gone offline or read-only). Make sure we don't fail the
1042 * close(2) for user-space.
1044 if (zonefs_file_use_exp_open(inode, file))
1045 zonefs_close_zone(inode);
1050 static const struct file_operations zonefs_file_operations = {
1051 .open = zonefs_file_open,
1052 .release = zonefs_file_release,
1053 .fsync = zonefs_file_fsync,
1054 .mmap = zonefs_file_mmap,
1055 .llseek = zonefs_file_llseek,
1056 .read_iter = zonefs_file_read_iter,
1057 .write_iter = zonefs_file_write_iter,
1058 .splice_read = generic_file_splice_read,
1059 .splice_write = iter_file_splice_write,
1060 .iopoll = iomap_dio_iopoll,
1063 static struct kmem_cache *zonefs_inode_cachep;
1065 static struct inode *zonefs_alloc_inode(struct super_block *sb)
1067 struct zonefs_inode_info *zi;
1069 zi = kmem_cache_alloc(zonefs_inode_cachep, GFP_KERNEL);
1073 inode_init_once(&zi->i_vnode);
1074 mutex_init(&zi->i_truncate_mutex);
1075 init_rwsem(&zi->i_mmap_sem);
1076 zi->i_wr_refcnt = 0;
1078 return &zi->i_vnode;
1081 static void zonefs_free_inode(struct inode *inode)
1083 kmem_cache_free(zonefs_inode_cachep, ZONEFS_I(inode));
1089 static int zonefs_statfs(struct dentry *dentry, struct kstatfs *buf)
1091 struct super_block *sb = dentry->d_sb;
1092 struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
1093 enum zonefs_ztype t;
1096 buf->f_type = ZONEFS_MAGIC;
1097 buf->f_bsize = sb->s_blocksize;
1098 buf->f_namelen = ZONEFS_NAME_MAX;
1100 spin_lock(&sbi->s_lock);
1102 buf->f_blocks = sbi->s_blocks;
1103 if (WARN_ON(sbi->s_used_blocks > sbi->s_blocks))
1106 buf->f_bfree = buf->f_blocks - sbi->s_used_blocks;
1107 buf->f_bavail = buf->f_bfree;
1109 for (t = 0; t < ZONEFS_ZTYPE_MAX; t++) {
1110 if (sbi->s_nr_files[t])
1111 buf->f_files += sbi->s_nr_files[t] + 1;
1115 spin_unlock(&sbi->s_lock);
1117 fsid = le64_to_cpup((void *)sbi->s_uuid.b) ^
1118 le64_to_cpup((void *)sbi->s_uuid.b + sizeof(u64));
1119 buf->f_fsid.val[0] = (u32)fsid;
1120 buf->f_fsid.val[1] = (u32)(fsid >> 32);
1126 Opt_errors_ro, Opt_errors_zro, Opt_errors_zol, Opt_errors_repair,
1127 Opt_explicit_open, Opt_err,
1130 static const match_table_t tokens = {
1131 { Opt_errors_ro, "errors=remount-ro"},
1132 { Opt_errors_zro, "errors=zone-ro"},
1133 { Opt_errors_zol, "errors=zone-offline"},
1134 { Opt_errors_repair, "errors=repair"},
1135 { Opt_explicit_open, "explicit-open" },
1139 static int zonefs_parse_options(struct super_block *sb, char *options)
1141 struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
1142 substring_t args[MAX_OPT_ARGS];
1148 while ((p = strsep(&options, ",")) != NULL) {
1154 token = match_token(p, tokens, args);
1157 sbi->s_mount_opts &= ~ZONEFS_MNTOPT_ERRORS_MASK;
1158 sbi->s_mount_opts |= ZONEFS_MNTOPT_ERRORS_RO;
1160 case Opt_errors_zro:
1161 sbi->s_mount_opts &= ~ZONEFS_MNTOPT_ERRORS_MASK;
1162 sbi->s_mount_opts |= ZONEFS_MNTOPT_ERRORS_ZRO;
1164 case Opt_errors_zol:
1165 sbi->s_mount_opts &= ~ZONEFS_MNTOPT_ERRORS_MASK;
1166 sbi->s_mount_opts |= ZONEFS_MNTOPT_ERRORS_ZOL;
1168 case Opt_errors_repair:
1169 sbi->s_mount_opts &= ~ZONEFS_MNTOPT_ERRORS_MASK;
1170 sbi->s_mount_opts |= ZONEFS_MNTOPT_ERRORS_REPAIR;
1172 case Opt_explicit_open:
1173 sbi->s_mount_opts |= ZONEFS_MNTOPT_EXPLICIT_OPEN;
1183 static int zonefs_show_options(struct seq_file *seq, struct dentry *root)
1185 struct zonefs_sb_info *sbi = ZONEFS_SB(root->d_sb);
1187 if (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_RO)
1188 seq_puts(seq, ",errors=remount-ro");
1189 if (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_ZRO)
1190 seq_puts(seq, ",errors=zone-ro");
1191 if (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_ZOL)
1192 seq_puts(seq, ",errors=zone-offline");
1193 if (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_REPAIR)
1194 seq_puts(seq, ",errors=repair");
1199 static int zonefs_remount(struct super_block *sb, int *flags, char *data)
1201 sync_filesystem(sb);
1203 return zonefs_parse_options(sb, data);
1206 static const struct super_operations zonefs_sops = {
1207 .alloc_inode = zonefs_alloc_inode,
1208 .free_inode = zonefs_free_inode,
1209 .statfs = zonefs_statfs,
1210 .remount_fs = zonefs_remount,
1211 .show_options = zonefs_show_options,
1214 static const struct inode_operations zonefs_dir_inode_operations = {
1215 .lookup = simple_lookup,
1216 .setattr = zonefs_inode_setattr,
1219 static void zonefs_init_dir_inode(struct inode *parent, struct inode *inode,
1220 enum zonefs_ztype type)
1222 struct super_block *sb = parent->i_sb;
1224 inode->i_ino = blkdev_nr_zones(sb->s_bdev->bd_disk) + type + 1;
1225 inode_init_owner(inode, parent, S_IFDIR | 0555);
1226 inode->i_op = &zonefs_dir_inode_operations;
1227 inode->i_fop = &simple_dir_operations;
1228 set_nlink(inode, 2);
1232 static void zonefs_init_file_inode(struct inode *inode, struct blk_zone *zone,
1233 enum zonefs_ztype type)
1235 struct super_block *sb = inode->i_sb;
1236 struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
1237 struct zonefs_inode_info *zi = ZONEFS_I(inode);
1239 inode->i_ino = zone->start >> sbi->s_zone_sectors_shift;
1240 inode->i_mode = S_IFREG | sbi->s_perm;
1243 zi->i_zsector = zone->start;
1244 zi->i_zone_size = zone->len << SECTOR_SHIFT;
1246 zi->i_max_size = min_t(loff_t, MAX_LFS_FILESIZE,
1247 zone->capacity << SECTOR_SHIFT);
1248 zi->i_wpoffset = zonefs_check_zone_condition(inode, zone, true, true);
1250 inode->i_uid = sbi->s_uid;
1251 inode->i_gid = sbi->s_gid;
1252 inode->i_size = zi->i_wpoffset;
1253 inode->i_blocks = zi->i_max_size >> SECTOR_SHIFT;
1255 inode->i_op = &zonefs_file_inode_operations;
1256 inode->i_fop = &zonefs_file_operations;
1257 inode->i_mapping->a_ops = &zonefs_file_aops;
1259 sb->s_maxbytes = max(zi->i_max_size, sb->s_maxbytes);
1260 sbi->s_blocks += zi->i_max_size >> sb->s_blocksize_bits;
1261 sbi->s_used_blocks += zi->i_wpoffset >> sb->s_blocksize_bits;
1264 static struct dentry *zonefs_create_inode(struct dentry *parent,
1265 const char *name, struct blk_zone *zone,
1266 enum zonefs_ztype type)
1268 struct inode *dir = d_inode(parent);
1269 struct dentry *dentry;
1270 struct inode *inode;
1272 dentry = d_alloc_name(parent, name);
1276 inode = new_inode(parent->d_sb);
1280 inode->i_ctime = inode->i_mtime = inode->i_atime = dir->i_ctime;
1282 zonefs_init_file_inode(inode, zone, type);
1284 zonefs_init_dir_inode(dir, inode, type);
1285 d_add(dentry, inode);
1296 struct zonefs_zone_data {
1297 struct super_block *sb;
1298 unsigned int nr_zones[ZONEFS_ZTYPE_MAX];
1299 struct blk_zone *zones;
1303 * Create a zone group and populate it with zone files.
1305 static int zonefs_create_zgroup(struct zonefs_zone_data *zd,
1306 enum zonefs_ztype type)
1308 struct super_block *sb = zd->sb;
1309 struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
1310 struct blk_zone *zone, *next, *end;
1311 const char *zgroup_name;
1317 /* If the group is empty, there is nothing to do */
1318 if (!zd->nr_zones[type])
1321 file_name = kmalloc(ZONEFS_NAME_MAX, GFP_KERNEL);
1325 if (type == ZONEFS_ZTYPE_CNV)
1326 zgroup_name = "cnv";
1328 zgroup_name = "seq";
1330 dir = zonefs_create_inode(sb->s_root, zgroup_name, NULL, type);
1337 * The first zone contains the super block: skip it.
1339 end = zd->zones + blkdev_nr_zones(sb->s_bdev->bd_disk);
1340 for (zone = &zd->zones[1]; zone < end; zone = next) {
1343 if (zonefs_zone_type(zone) != type)
1347 * For conventional zones, contiguous zones can be aggregated
1348 * together to form larger files. Note that this overwrites the
1349 * length of the first zone of the set of contiguous zones
1350 * aggregated together. If one offline or read-only zone is
1351 * found, assume that all zones aggregated have the same
1354 if (type == ZONEFS_ZTYPE_CNV &&
1355 (sbi->s_features & ZONEFS_F_AGGRCNV)) {
1356 for (; next < end; next++) {
1357 if (zonefs_zone_type(next) != type)
1359 zone->len += next->len;
1360 zone->capacity += next->capacity;
1361 if (next->cond == BLK_ZONE_COND_READONLY &&
1362 zone->cond != BLK_ZONE_COND_OFFLINE)
1363 zone->cond = BLK_ZONE_COND_READONLY;
1364 else if (next->cond == BLK_ZONE_COND_OFFLINE)
1365 zone->cond = BLK_ZONE_COND_OFFLINE;
1367 if (zone->capacity != zone->len) {
1368 zonefs_err(sb, "Invalid conventional zone capacity\n");
1375 * Use the file number within its group as file name.
1377 snprintf(file_name, ZONEFS_NAME_MAX - 1, "%u", n);
1378 if (!zonefs_create_inode(dir, file_name, zone, type)) {
1386 zonefs_info(sb, "Zone group \"%s\" has %u file%s\n",
1387 zgroup_name, n, n > 1 ? "s" : "");
1389 sbi->s_nr_files[type] = n;
1398 static int zonefs_get_zone_info_cb(struct blk_zone *zone, unsigned int idx,
1401 struct zonefs_zone_data *zd = data;
1404 * Count the number of usable zones: the first zone at index 0 contains
1405 * the super block and is ignored.
1407 switch (zone->type) {
1408 case BLK_ZONE_TYPE_CONVENTIONAL:
1409 zone->wp = zone->start + zone->len;
1411 zd->nr_zones[ZONEFS_ZTYPE_CNV]++;
1413 case BLK_ZONE_TYPE_SEQWRITE_REQ:
1414 case BLK_ZONE_TYPE_SEQWRITE_PREF:
1416 zd->nr_zones[ZONEFS_ZTYPE_SEQ]++;
1419 zonefs_err(zd->sb, "Unsupported zone type 0x%x\n",
1424 memcpy(&zd->zones[idx], zone, sizeof(struct blk_zone));
1429 static int zonefs_get_zone_info(struct zonefs_zone_data *zd)
1431 struct block_device *bdev = zd->sb->s_bdev;
1434 zd->zones = kvcalloc(blkdev_nr_zones(bdev->bd_disk),
1435 sizeof(struct blk_zone), GFP_KERNEL);
1439 /* Get zones information from the device */
1440 ret = blkdev_report_zones(bdev, 0, BLK_ALL_ZONES,
1441 zonefs_get_zone_info_cb, zd);
1443 zonefs_err(zd->sb, "Zone report failed %d\n", ret);
1447 if (ret != blkdev_nr_zones(bdev->bd_disk)) {
1448 zonefs_err(zd->sb, "Invalid zone report (%d/%u zones)\n",
1449 ret, blkdev_nr_zones(bdev->bd_disk));
1456 static inline void zonefs_cleanup_zone_info(struct zonefs_zone_data *zd)
1462 * Read super block information from the device.
1464 static int zonefs_read_super(struct super_block *sb)
1466 struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
1467 struct zonefs_super *super;
1468 u32 crc, stored_crc;
1470 struct bio_vec bio_vec;
1474 page = alloc_page(GFP_KERNEL);
1478 bio_init(&bio, &bio_vec, 1);
1479 bio.bi_iter.bi_sector = 0;
1480 bio.bi_opf = REQ_OP_READ;
1481 bio_set_dev(&bio, sb->s_bdev);
1482 bio_add_page(&bio, page, PAGE_SIZE, 0);
1484 ret = submit_bio_wait(&bio);
1491 if (le32_to_cpu(super->s_magic) != ZONEFS_MAGIC)
1494 stored_crc = le32_to_cpu(super->s_crc);
1496 crc = crc32(~0U, (unsigned char *)super, sizeof(struct zonefs_super));
1497 if (crc != stored_crc) {
1498 zonefs_err(sb, "Invalid checksum (Expected 0x%08x, got 0x%08x)",
1503 sbi->s_features = le64_to_cpu(super->s_features);
1504 if (sbi->s_features & ~ZONEFS_F_DEFINED_FEATURES) {
1505 zonefs_err(sb, "Unknown features set 0x%llx\n",
1510 if (sbi->s_features & ZONEFS_F_UID) {
1511 sbi->s_uid = make_kuid(current_user_ns(),
1512 le32_to_cpu(super->s_uid));
1513 if (!uid_valid(sbi->s_uid)) {
1514 zonefs_err(sb, "Invalid UID feature\n");
1519 if (sbi->s_features & ZONEFS_F_GID) {
1520 sbi->s_gid = make_kgid(current_user_ns(),
1521 le32_to_cpu(super->s_gid));
1522 if (!gid_valid(sbi->s_gid)) {
1523 zonefs_err(sb, "Invalid GID feature\n");
1528 if (sbi->s_features & ZONEFS_F_PERM)
1529 sbi->s_perm = le32_to_cpu(super->s_perm);
1531 if (memchr_inv(super->s_reserved, 0, sizeof(super->s_reserved))) {
1532 zonefs_err(sb, "Reserved area is being used\n");
1536 import_uuid(&sbi->s_uuid, super->s_uuid);
1548 * Check that the device is zoned. If it is, get the list of zones and create
1549 * sub-directories and files according to the device zone configuration and
1552 static int zonefs_fill_super(struct super_block *sb, void *data, int silent)
1554 struct zonefs_zone_data zd;
1555 struct zonefs_sb_info *sbi;
1556 struct inode *inode;
1557 enum zonefs_ztype t;
1560 if (!bdev_is_zoned(sb->s_bdev)) {
1561 zonefs_err(sb, "Not a zoned block device\n");
1566 * Initialize super block information: the maximum file size is updated
1567 * when the zone files are created so that the format option
1568 * ZONEFS_F_AGGRCNV which increases the maximum file size of a file
1569 * beyond the zone size is taken into account.
1571 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
1575 spin_lock_init(&sbi->s_lock);
1576 sb->s_fs_info = sbi;
1577 sb->s_magic = ZONEFS_MAGIC;
1579 sb->s_op = &zonefs_sops;
1580 sb->s_time_gran = 1;
1583 * The block size is set to the device physical sector size to ensure
1584 * that write operations on 512e devices (512B logical block and 4KB
1585 * physical block) are always aligned to the device physical blocks,
1586 * as mandated by the ZBC/ZAC specifications.
1588 sb_set_blocksize(sb, bdev_physical_block_size(sb->s_bdev));
1589 sbi->s_zone_sectors_shift = ilog2(bdev_zone_sectors(sb->s_bdev));
1590 sbi->s_uid = GLOBAL_ROOT_UID;
1591 sbi->s_gid = GLOBAL_ROOT_GID;
1593 sbi->s_mount_opts = ZONEFS_MNTOPT_ERRORS_RO;
1594 sbi->s_max_open_zones = bdev_max_open_zones(sb->s_bdev);
1595 atomic_set(&sbi->s_open_zones, 0);
1596 if (!sbi->s_max_open_zones &&
1597 sbi->s_mount_opts & ZONEFS_MNTOPT_EXPLICIT_OPEN) {
1598 zonefs_info(sb, "No open zones limit. Ignoring explicit_open mount option\n");
1599 sbi->s_mount_opts &= ~ZONEFS_MNTOPT_EXPLICIT_OPEN;
1602 ret = zonefs_read_super(sb);
1606 ret = zonefs_parse_options(sb, data);
1610 memset(&zd, 0, sizeof(struct zonefs_zone_data));
1612 ret = zonefs_get_zone_info(&zd);
1616 zonefs_info(sb, "Mounting %u zones",
1617 blkdev_nr_zones(sb->s_bdev->bd_disk));
1619 /* Create root directory inode */
1621 inode = new_inode(sb);
1625 inode->i_ino = blkdev_nr_zones(sb->s_bdev->bd_disk);
1626 inode->i_mode = S_IFDIR | 0555;
1627 inode->i_ctime = inode->i_mtime = inode->i_atime = current_time(inode);
1628 inode->i_op = &zonefs_dir_inode_operations;
1629 inode->i_fop = &simple_dir_operations;
1630 set_nlink(inode, 2);
1632 sb->s_root = d_make_root(inode);
1636 /* Create and populate files in zone groups directories */
1637 for (t = 0; t < ZONEFS_ZTYPE_MAX; t++) {
1638 ret = zonefs_create_zgroup(&zd, t);
1644 zonefs_cleanup_zone_info(&zd);
1649 static struct dentry *zonefs_mount(struct file_system_type *fs_type,
1650 int flags, const char *dev_name, void *data)
1652 return mount_bdev(fs_type, flags, dev_name, data, zonefs_fill_super);
1655 static void zonefs_kill_super(struct super_block *sb)
1657 struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
1660 d_genocide(sb->s_root);
1661 kill_block_super(sb);
1666 * File system definition and registration.
1668 static struct file_system_type zonefs_type = {
1669 .owner = THIS_MODULE,
1671 .mount = zonefs_mount,
1672 .kill_sb = zonefs_kill_super,
1673 .fs_flags = FS_REQUIRES_DEV,
1676 static int __init zonefs_init_inodecache(void)
1678 zonefs_inode_cachep = kmem_cache_create("zonefs_inode_cache",
1679 sizeof(struct zonefs_inode_info), 0,
1680 (SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD | SLAB_ACCOUNT),
1682 if (zonefs_inode_cachep == NULL)
1687 static void zonefs_destroy_inodecache(void)
1690 * Make sure all delayed rcu free inodes are flushed before we
1691 * destroy the inode cache.
1694 kmem_cache_destroy(zonefs_inode_cachep);
1697 static int __init zonefs_init(void)
1701 BUILD_BUG_ON(sizeof(struct zonefs_super) != ZONEFS_SUPER_SIZE);
1703 ret = zonefs_init_inodecache();
1707 ret = register_filesystem(&zonefs_type);
1709 zonefs_destroy_inodecache();
1716 static void __exit zonefs_exit(void)
1718 zonefs_destroy_inodecache();
1719 unregister_filesystem(&zonefs_type);
1722 MODULE_AUTHOR("Damien Le Moal");
1723 MODULE_DESCRIPTION("Zone file system for zoned block devices");
1724 MODULE_LICENSE("GPL");
1725 module_init(zonefs_init);
1726 module_exit(zonefs_exit);