1 // SPDX-License-Identifier: GPL-2.0
3 * Simple file system for zoned block devices exposing zones as files.
5 * Copyright (C) 2019 Western Digital Corporation or its affiliates.
7 #include <linux/module.h>
8 #include <linux/pagemap.h>
9 #include <linux/magic.h>
10 #include <linux/iomap.h>
11 #include <linux/init.h>
12 #include <linux/slab.h>
13 #include <linux/blkdev.h>
14 #include <linux/statfs.h>
15 #include <linux/writeback.h>
16 #include <linux/quotaops.h>
17 #include <linux/seq_file.h>
18 #include <linux/parser.h>
19 #include <linux/uio.h>
20 #include <linux/mman.h>
21 #include <linux/sched/mm.h>
22 #include <linux/crc32.h>
23 #include <linux/task_io_accounting_ops.h>
27 #define CREATE_TRACE_POINTS
30 static inline int zonefs_zone_mgmt(struct inode *inode,
33 struct zonefs_inode_info *zi = ZONEFS_I(inode);
36 lockdep_assert_held(&zi->i_truncate_mutex);
39 * With ZNS drives, closing an explicitly open zone that has not been
40 * written will change the zone state to "closed", that is, the zone
41 * will remain active. Since this can then cause failure of explicit
42 * open operation on other zones if the drive active zone resources
43 * are exceeded, make sure that the zone does not remain active by
46 if (op == REQ_OP_ZONE_CLOSE && !zi->i_wpoffset)
47 op = REQ_OP_ZONE_RESET;
49 trace_zonefs_zone_mgmt(inode, op);
50 ret = blkdev_zone_mgmt(inode->i_sb->s_bdev, op, zi->i_zsector,
51 zi->i_zone_size >> SECTOR_SHIFT, GFP_NOFS);
53 zonefs_err(inode->i_sb,
54 "Zone management operation %s at %llu failed %d\n",
55 blk_op_str(op), zi->i_zsector, ret);
62 static inline void zonefs_i_size_write(struct inode *inode, loff_t isize)
64 struct zonefs_inode_info *zi = ZONEFS_I(inode);
66 i_size_write(inode, isize);
68 * A full zone is no longer open/active and does not need
71 if (isize >= zi->i_max_size)
72 zi->i_flags &= ~ZONEFS_ZONE_OPEN;
75 static int zonefs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
76 unsigned int flags, struct iomap *iomap,
79 struct zonefs_inode_info *zi = ZONEFS_I(inode);
80 struct super_block *sb = inode->i_sb;
83 /* All I/Os should always be within the file maximum size */
84 if (WARN_ON_ONCE(offset + length > zi->i_max_size))
88 * Sequential zones can only accept direct writes. This is already
89 * checked when writes are issued, so warn if we see a page writeback
92 if (WARN_ON_ONCE(zi->i_ztype == ZONEFS_ZTYPE_SEQ &&
93 (flags & IOMAP_WRITE) && !(flags & IOMAP_DIRECT)))
97 * For conventional zones, all blocks are always mapped. For sequential
98 * zones, all blocks after always mapped below the inode size (zone
99 * write pointer) and unwriten beyond.
101 mutex_lock(&zi->i_truncate_mutex);
102 isize = i_size_read(inode);
104 iomap->type = IOMAP_UNWRITTEN;
106 iomap->type = IOMAP_MAPPED;
107 if (flags & IOMAP_WRITE)
108 length = zi->i_max_size - offset;
110 length = min(length, isize - offset);
111 mutex_unlock(&zi->i_truncate_mutex);
113 iomap->offset = ALIGN_DOWN(offset, sb->s_blocksize);
114 iomap->length = ALIGN(offset + length, sb->s_blocksize) - iomap->offset;
115 iomap->bdev = inode->i_sb->s_bdev;
116 iomap->addr = (zi->i_zsector << SECTOR_SHIFT) + iomap->offset;
118 trace_zonefs_iomap_begin(inode, iomap);
123 static const struct iomap_ops zonefs_iomap_ops = {
124 .iomap_begin = zonefs_iomap_begin,
127 static int zonefs_readpage(struct file *unused, struct page *page)
129 return iomap_readpage(page, &zonefs_iomap_ops);
132 static void zonefs_readahead(struct readahead_control *rac)
134 iomap_readahead(rac, &zonefs_iomap_ops);
138 * Map blocks for page writeback. This is used only on conventional zone files,
139 * which implies that the page range can only be within the fixed inode size.
141 static int zonefs_map_blocks(struct iomap_writepage_ctx *wpc,
142 struct inode *inode, loff_t offset)
144 struct zonefs_inode_info *zi = ZONEFS_I(inode);
146 if (WARN_ON_ONCE(zi->i_ztype != ZONEFS_ZTYPE_CNV))
148 if (WARN_ON_ONCE(offset >= i_size_read(inode)))
151 /* If the mapping is already OK, nothing needs to be done */
152 if (offset >= wpc->iomap.offset &&
153 offset < wpc->iomap.offset + wpc->iomap.length)
156 return zonefs_iomap_begin(inode, offset, zi->i_max_size - offset,
157 IOMAP_WRITE, &wpc->iomap, NULL);
160 static const struct iomap_writeback_ops zonefs_writeback_ops = {
161 .map_blocks = zonefs_map_blocks,
164 static int zonefs_writepage(struct page *page, struct writeback_control *wbc)
166 struct iomap_writepage_ctx wpc = { };
168 return iomap_writepage(page, wbc, &wpc, &zonefs_writeback_ops);
171 static int zonefs_writepages(struct address_space *mapping,
172 struct writeback_control *wbc)
174 struct iomap_writepage_ctx wpc = { };
176 return iomap_writepages(mapping, wbc, &wpc, &zonefs_writeback_ops);
179 static int zonefs_swap_activate(struct swap_info_struct *sis,
180 struct file *swap_file, sector_t *span)
182 struct inode *inode = file_inode(swap_file);
183 struct zonefs_inode_info *zi = ZONEFS_I(inode);
185 if (zi->i_ztype != ZONEFS_ZTYPE_CNV) {
186 zonefs_err(inode->i_sb,
187 "swap file: not a conventional zone file\n");
191 return iomap_swapfile_activate(sis, swap_file, span, &zonefs_iomap_ops);
194 static const struct address_space_operations zonefs_file_aops = {
195 .readpage = zonefs_readpage,
196 .readahead = zonefs_readahead,
197 .writepage = zonefs_writepage,
198 .writepages = zonefs_writepages,
199 .dirty_folio = filemap_dirty_folio,
200 .releasepage = iomap_releasepage,
201 .invalidate_folio = iomap_invalidate_folio,
202 .migratepage = iomap_migrate_page,
203 .is_partially_uptodate = iomap_is_partially_uptodate,
204 .error_remove_page = generic_error_remove_page,
205 .direct_IO = noop_direct_IO,
206 .swap_activate = zonefs_swap_activate,
209 static void zonefs_update_stats(struct inode *inode, loff_t new_isize)
211 struct super_block *sb = inode->i_sb;
212 struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
213 loff_t old_isize = i_size_read(inode);
216 if (new_isize == old_isize)
219 spin_lock(&sbi->s_lock);
222 * This may be called for an update after an IO error.
223 * So beware of the values seen.
225 if (new_isize < old_isize) {
226 nr_blocks = (old_isize - new_isize) >> sb->s_blocksize_bits;
227 if (sbi->s_used_blocks > nr_blocks)
228 sbi->s_used_blocks -= nr_blocks;
230 sbi->s_used_blocks = 0;
232 sbi->s_used_blocks +=
233 (new_isize - old_isize) >> sb->s_blocksize_bits;
234 if (sbi->s_used_blocks > sbi->s_blocks)
235 sbi->s_used_blocks = sbi->s_blocks;
238 spin_unlock(&sbi->s_lock);
242 * Check a zone condition and adjust its file inode access permissions for
243 * offline and readonly zones. Return the inode size corresponding to the
244 * amount of readable data in the zone.
246 static loff_t zonefs_check_zone_condition(struct inode *inode,
247 struct blk_zone *zone, bool warn,
250 struct zonefs_inode_info *zi = ZONEFS_I(inode);
252 switch (zone->cond) {
253 case BLK_ZONE_COND_OFFLINE:
255 * Dead zone: make the inode immutable, disable all accesses
256 * and set the file size to 0 (zone wp set to zone start).
259 zonefs_warn(inode->i_sb, "inode %lu: offline zone\n",
261 inode->i_flags |= S_IMMUTABLE;
262 inode->i_mode &= ~0777;
263 zone->wp = zone->start;
265 case BLK_ZONE_COND_READONLY:
267 * The write pointer of read-only zones is invalid. If such a
268 * zone is found during mount, the file size cannot be retrieved
269 * so we treat the zone as offline (mount == true case).
270 * Otherwise, keep the file size as it was when last updated
271 * so that the user can recover data. In both cases, writes are
272 * always disabled for the zone.
275 zonefs_warn(inode->i_sb, "inode %lu: read-only zone\n",
277 inode->i_flags |= S_IMMUTABLE;
279 zone->cond = BLK_ZONE_COND_OFFLINE;
280 inode->i_mode &= ~0777;
281 zone->wp = zone->start;
284 inode->i_mode &= ~0222;
285 return i_size_read(inode);
286 case BLK_ZONE_COND_FULL:
287 /* The write pointer of full zones is invalid. */
288 return zi->i_max_size;
290 if (zi->i_ztype == ZONEFS_ZTYPE_CNV)
291 return zi->i_max_size;
292 return (zone->wp - zone->start) << SECTOR_SHIFT;
296 struct zonefs_ioerr_data {
301 static int zonefs_io_error_cb(struct blk_zone *zone, unsigned int idx,
304 struct zonefs_ioerr_data *err = data;
305 struct inode *inode = err->inode;
306 struct zonefs_inode_info *zi = ZONEFS_I(inode);
307 struct super_block *sb = inode->i_sb;
308 struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
309 loff_t isize, data_size;
312 * Check the zone condition: if the zone is not "bad" (offline or
313 * read-only), read errors are simply signaled to the IO issuer as long
314 * as there is no inconsistency between the inode size and the amount of
315 * data writen in the zone (data_size).
317 data_size = zonefs_check_zone_condition(inode, zone, true, false);
318 isize = i_size_read(inode);
319 if (zone->cond != BLK_ZONE_COND_OFFLINE &&
320 zone->cond != BLK_ZONE_COND_READONLY &&
321 !err->write && isize == data_size)
325 * At this point, we detected either a bad zone or an inconsistency
326 * between the inode size and the amount of data written in the zone.
327 * For the latter case, the cause may be a write IO error or an external
328 * action on the device. Two error patterns exist:
329 * 1) The inode size is lower than the amount of data in the zone:
330 * a write operation partially failed and data was writen at the end
331 * of the file. This can happen in the case of a large direct IO
332 * needing several BIOs and/or write requests to be processed.
333 * 2) The inode size is larger than the amount of data in the zone:
334 * this can happen with a deferred write error with the use of the
335 * device side write cache after getting successful write IO
336 * completions. Other possibilities are (a) an external corruption,
337 * e.g. an application reset the zone directly, or (b) the device
338 * has a serious problem (e.g. firmware bug).
340 * In all cases, warn about inode size inconsistency and handle the
341 * IO error according to the zone condition and to the mount options.
343 if (zi->i_ztype == ZONEFS_ZTYPE_SEQ && isize != data_size)
344 zonefs_warn(sb, "inode %lu: invalid size %lld (should be %lld)\n",
345 inode->i_ino, isize, data_size);
348 * First handle bad zones signaled by hardware. The mount options
349 * errors=zone-ro and errors=zone-offline result in changing the
350 * zone condition to read-only and offline respectively, as if the
351 * condition was signaled by the hardware.
353 if (zone->cond == BLK_ZONE_COND_OFFLINE ||
354 sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_ZOL) {
355 zonefs_warn(sb, "inode %lu: read/write access disabled\n",
357 if (zone->cond != BLK_ZONE_COND_OFFLINE) {
358 zone->cond = BLK_ZONE_COND_OFFLINE;
359 data_size = zonefs_check_zone_condition(inode, zone,
362 } else if (zone->cond == BLK_ZONE_COND_READONLY ||
363 sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_ZRO) {
364 zonefs_warn(sb, "inode %lu: write access disabled\n",
366 if (zone->cond != BLK_ZONE_COND_READONLY) {
367 zone->cond = BLK_ZONE_COND_READONLY;
368 data_size = zonefs_check_zone_condition(inode, zone,
374 * If the filesystem is mounted with the explicit-open mount option, we
375 * need to clear the ZONEFS_ZONE_OPEN flag if the zone transitioned to
376 * the read-only or offline condition, to avoid attempting an explicit
377 * close of the zone when the inode file is closed.
379 if ((sbi->s_mount_opts & ZONEFS_MNTOPT_EXPLICIT_OPEN) &&
380 (zone->cond == BLK_ZONE_COND_OFFLINE ||
381 zone->cond == BLK_ZONE_COND_READONLY))
382 zi->i_flags &= ~ZONEFS_ZONE_OPEN;
385 * If error=remount-ro was specified, any error result in remounting
386 * the volume as read-only.
388 if ((sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_RO) && !sb_rdonly(sb)) {
389 zonefs_warn(sb, "remounting filesystem read-only\n");
390 sb->s_flags |= SB_RDONLY;
394 * Update block usage stats and the inode size to prevent access to
397 zonefs_update_stats(inode, data_size);
398 zonefs_i_size_write(inode, data_size);
399 zi->i_wpoffset = data_size;
405 * When an file IO error occurs, check the file zone to see if there is a change
406 * in the zone condition (e.g. offline or read-only). For a failed write to a
407 * sequential zone, the zone write pointer position must also be checked to
408 * eventually correct the file size and zonefs inode write pointer offset
409 * (which can be out of sync with the drive due to partial write failures).
411 static void __zonefs_io_error(struct inode *inode, bool write)
413 struct zonefs_inode_info *zi = ZONEFS_I(inode);
414 struct super_block *sb = inode->i_sb;
415 struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
416 unsigned int noio_flag;
417 unsigned int nr_zones =
418 zi->i_zone_size >> (sbi->s_zone_sectors_shift + SECTOR_SHIFT);
419 struct zonefs_ioerr_data err = {
426 * Memory allocations in blkdev_report_zones() can trigger a memory
427 * reclaim which may in turn cause a recursion into zonefs as well as
428 * struct request allocations for the same device. The former case may
429 * end up in a deadlock on the inode truncate mutex, while the latter
430 * may prevent IO forward progress. Executing the report zones under
431 * the GFP_NOIO context avoids both problems.
433 noio_flag = memalloc_noio_save();
434 ret = blkdev_report_zones(sb->s_bdev, zi->i_zsector, nr_zones,
435 zonefs_io_error_cb, &err);
437 zonefs_err(sb, "Get inode %lu zone information failed %d\n",
439 memalloc_noio_restore(noio_flag);
442 static void zonefs_io_error(struct inode *inode, bool write)
444 struct zonefs_inode_info *zi = ZONEFS_I(inode);
446 mutex_lock(&zi->i_truncate_mutex);
447 __zonefs_io_error(inode, write);
448 mutex_unlock(&zi->i_truncate_mutex);
451 static int zonefs_file_truncate(struct inode *inode, loff_t isize)
453 struct zonefs_inode_info *zi = ZONEFS_I(inode);
459 * Only sequential zone files can be truncated and truncation is allowed
460 * only down to a 0 size, which is equivalent to a zone reset, and to
461 * the maximum file size, which is equivalent to a zone finish.
463 if (zi->i_ztype != ZONEFS_ZTYPE_SEQ)
467 op = REQ_OP_ZONE_RESET;
468 else if (isize == zi->i_max_size)
469 op = REQ_OP_ZONE_FINISH;
473 inode_dio_wait(inode);
475 /* Serialize against page faults */
476 filemap_invalidate_lock(inode->i_mapping);
478 /* Serialize against zonefs_iomap_begin() */
479 mutex_lock(&zi->i_truncate_mutex);
481 old_isize = i_size_read(inode);
482 if (isize == old_isize)
485 ret = zonefs_zone_mgmt(inode, op);
490 * If the mount option ZONEFS_MNTOPT_EXPLICIT_OPEN is set,
491 * take care of open zones.
493 if (zi->i_flags & ZONEFS_ZONE_OPEN) {
495 * Truncating a zone to EMPTY or FULL is the equivalent of
496 * closing the zone. For a truncation to 0, we need to
497 * re-open the zone to ensure new writes can be processed.
498 * For a truncation to the maximum file size, the zone is
499 * closed and writes cannot be accepted anymore, so clear
503 ret = zonefs_zone_mgmt(inode, REQ_OP_ZONE_OPEN);
505 zi->i_flags &= ~ZONEFS_ZONE_OPEN;
508 zonefs_update_stats(inode, isize);
509 truncate_setsize(inode, isize);
510 zi->i_wpoffset = isize;
513 mutex_unlock(&zi->i_truncate_mutex);
514 filemap_invalidate_unlock(inode->i_mapping);
519 static int zonefs_inode_setattr(struct user_namespace *mnt_userns,
520 struct dentry *dentry, struct iattr *iattr)
522 struct inode *inode = d_inode(dentry);
525 if (unlikely(IS_IMMUTABLE(inode)))
528 ret = setattr_prepare(&init_user_ns, dentry, iattr);
533 * Since files and directories cannot be created nor deleted, do not
534 * allow setting any write attributes on the sub-directories grouping
535 * files by zone type.
537 if ((iattr->ia_valid & ATTR_MODE) && S_ISDIR(inode->i_mode) &&
538 (iattr->ia_mode & 0222))
541 if (((iattr->ia_valid & ATTR_UID) &&
542 !uid_eq(iattr->ia_uid, inode->i_uid)) ||
543 ((iattr->ia_valid & ATTR_GID) &&
544 !gid_eq(iattr->ia_gid, inode->i_gid))) {
545 ret = dquot_transfer(inode, iattr);
550 if (iattr->ia_valid & ATTR_SIZE) {
551 ret = zonefs_file_truncate(inode, iattr->ia_size);
556 setattr_copy(&init_user_ns, inode, iattr);
561 static const struct inode_operations zonefs_file_inode_operations = {
562 .setattr = zonefs_inode_setattr,
565 static int zonefs_file_fsync(struct file *file, loff_t start, loff_t end,
568 struct inode *inode = file_inode(file);
571 if (unlikely(IS_IMMUTABLE(inode)))
575 * Since only direct writes are allowed in sequential files, page cache
576 * flush is needed only for conventional zone files.
578 if (ZONEFS_I(inode)->i_ztype == ZONEFS_ZTYPE_CNV)
579 ret = file_write_and_wait_range(file, start, end);
581 ret = blkdev_issue_flush(inode->i_sb->s_bdev);
584 zonefs_io_error(inode, true);
589 static vm_fault_t zonefs_filemap_page_mkwrite(struct vm_fault *vmf)
591 struct inode *inode = file_inode(vmf->vma->vm_file);
592 struct zonefs_inode_info *zi = ZONEFS_I(inode);
595 if (unlikely(IS_IMMUTABLE(inode)))
596 return VM_FAULT_SIGBUS;
599 * Sanity check: only conventional zone files can have shared
600 * writeable mappings.
602 if (WARN_ON_ONCE(zi->i_ztype != ZONEFS_ZTYPE_CNV))
603 return VM_FAULT_NOPAGE;
605 sb_start_pagefault(inode->i_sb);
606 file_update_time(vmf->vma->vm_file);
608 /* Serialize against truncates */
609 filemap_invalidate_lock_shared(inode->i_mapping);
610 ret = iomap_page_mkwrite(vmf, &zonefs_iomap_ops);
611 filemap_invalidate_unlock_shared(inode->i_mapping);
613 sb_end_pagefault(inode->i_sb);
617 static const struct vm_operations_struct zonefs_file_vm_ops = {
618 .fault = filemap_fault,
619 .map_pages = filemap_map_pages,
620 .page_mkwrite = zonefs_filemap_page_mkwrite,
623 static int zonefs_file_mmap(struct file *file, struct vm_area_struct *vma)
626 * Conventional zones accept random writes, so their files can support
627 * shared writable mappings. For sequential zone files, only read
628 * mappings are possible since there are no guarantees for write
629 * ordering between msync() and page cache writeback.
631 if (ZONEFS_I(file_inode(file))->i_ztype == ZONEFS_ZTYPE_SEQ &&
632 (vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
636 vma->vm_ops = &zonefs_file_vm_ops;
641 static loff_t zonefs_file_llseek(struct file *file, loff_t offset, int whence)
643 loff_t isize = i_size_read(file_inode(file));
646 * Seeks are limited to below the zone size for conventional zones
647 * and below the zone write pointer for sequential zones. In both
648 * cases, this limit is the inode size.
650 return generic_file_llseek_size(file, offset, whence, isize, isize);
653 static int zonefs_file_write_dio_end_io(struct kiocb *iocb, ssize_t size,
654 int error, unsigned int flags)
656 struct inode *inode = file_inode(iocb->ki_filp);
657 struct zonefs_inode_info *zi = ZONEFS_I(inode);
660 zonefs_io_error(inode, true);
664 if (size && zi->i_ztype != ZONEFS_ZTYPE_CNV) {
666 * Note that we may be seeing completions out of order,
667 * but that is not a problem since a write completed
668 * successfully necessarily means that all preceding writes
669 * were also successful. So we can safely increase the inode
670 * size to the write end location.
672 mutex_lock(&zi->i_truncate_mutex);
673 if (i_size_read(inode) < iocb->ki_pos + size) {
674 zonefs_update_stats(inode, iocb->ki_pos + size);
675 zonefs_i_size_write(inode, iocb->ki_pos + size);
677 mutex_unlock(&zi->i_truncate_mutex);
683 static const struct iomap_dio_ops zonefs_write_dio_ops = {
684 .end_io = zonefs_file_write_dio_end_io,
687 static ssize_t zonefs_file_dio_append(struct kiocb *iocb, struct iov_iter *from)
689 struct inode *inode = file_inode(iocb->ki_filp);
690 struct zonefs_inode_info *zi = ZONEFS_I(inode);
691 struct block_device *bdev = inode->i_sb->s_bdev;
698 max = queue_max_zone_append_sectors(bdev_get_queue(bdev));
699 max = ALIGN_DOWN(max << SECTOR_SHIFT, inode->i_sb->s_blocksize);
700 iov_iter_truncate(from, max);
702 nr_pages = iov_iter_npages(from, BIO_MAX_VECS);
706 bio = bio_alloc(bdev, nr_pages,
707 REQ_OP_ZONE_APPEND | REQ_SYNC | REQ_IDLE, GFP_NOFS);
708 bio->bi_iter.bi_sector = zi->i_zsector;
709 bio->bi_ioprio = iocb->ki_ioprio;
710 if (iocb->ki_flags & IOCB_DSYNC)
711 bio->bi_opf |= REQ_FUA;
713 ret = bio_iov_iter_get_pages(bio, from);
717 size = bio->bi_iter.bi_size;
718 task_io_account_write(size);
720 if (iocb->ki_flags & IOCB_HIPRI)
721 bio_set_polled(bio, iocb);
723 ret = submit_bio_wait(bio);
725 zonefs_file_write_dio_end_io(iocb, size, ret, 0);
726 trace_zonefs_file_dio_append(inode, size, ret);
729 bio_release_pages(bio, false);
733 iocb->ki_pos += size;
741 * Do not exceed the LFS limits nor the file zone size. If pos is under the
742 * limit it becomes a short access. If it exceeds the limit, return -EFBIG.
744 static loff_t zonefs_write_check_limits(struct file *file, loff_t pos,
747 struct inode *inode = file_inode(file);
748 struct zonefs_inode_info *zi = ZONEFS_I(inode);
749 loff_t limit = rlimit(RLIMIT_FSIZE);
750 loff_t max_size = zi->i_max_size;
752 if (limit != RLIM_INFINITY) {
754 send_sig(SIGXFSZ, current, 0);
757 count = min(count, limit - pos);
760 if (!(file->f_flags & O_LARGEFILE))
761 max_size = min_t(loff_t, MAX_NON_LFS, max_size);
763 if (unlikely(pos >= max_size))
766 return min(count, max_size - pos);
769 static ssize_t zonefs_write_checks(struct kiocb *iocb, struct iov_iter *from)
771 struct file *file = iocb->ki_filp;
772 struct inode *inode = file_inode(file);
773 struct zonefs_inode_info *zi = ZONEFS_I(inode);
776 if (IS_SWAPFILE(inode))
779 if (!iov_iter_count(from))
782 if ((iocb->ki_flags & IOCB_NOWAIT) && !(iocb->ki_flags & IOCB_DIRECT))
785 if (iocb->ki_flags & IOCB_APPEND) {
786 if (zi->i_ztype != ZONEFS_ZTYPE_SEQ)
788 mutex_lock(&zi->i_truncate_mutex);
789 iocb->ki_pos = zi->i_wpoffset;
790 mutex_unlock(&zi->i_truncate_mutex);
793 count = zonefs_write_check_limits(file, iocb->ki_pos,
794 iov_iter_count(from));
798 iov_iter_truncate(from, count);
799 return iov_iter_count(from);
803 * Handle direct writes. For sequential zone files, this is the only possible
804 * write path. For these files, check that the user is issuing writes
805 * sequentially from the end of the file. This code assumes that the block layer
806 * delivers write requests to the device in sequential order. This is always the
807 * case if a block IO scheduler implementing the ELEVATOR_F_ZBD_SEQ_WRITE
808 * elevator feature is being used (e.g. mq-deadline). The block layer always
809 * automatically select such an elevator for zoned block devices during the
810 * device initialization.
812 static ssize_t zonefs_file_dio_write(struct kiocb *iocb, struct iov_iter *from)
814 struct inode *inode = file_inode(iocb->ki_filp);
815 struct zonefs_inode_info *zi = ZONEFS_I(inode);
816 struct super_block *sb = inode->i_sb;
817 bool sync = is_sync_kiocb(iocb);
822 * For async direct IOs to sequential zone files, refuse IOCB_NOWAIT
823 * as this can cause write reordering (e.g. the first aio gets EAGAIN
824 * on the inode lock but the second goes through but is now unaligned).
826 if (zi->i_ztype == ZONEFS_ZTYPE_SEQ && !sync &&
827 (iocb->ki_flags & IOCB_NOWAIT))
830 if (iocb->ki_flags & IOCB_NOWAIT) {
831 if (!inode_trylock(inode))
837 count = zonefs_write_checks(iocb, from);
843 if ((iocb->ki_pos | count) & (sb->s_blocksize - 1)) {
848 /* Enforce sequential writes (append only) in sequential zones */
849 if (zi->i_ztype == ZONEFS_ZTYPE_SEQ) {
850 mutex_lock(&zi->i_truncate_mutex);
851 if (iocb->ki_pos != zi->i_wpoffset) {
852 mutex_unlock(&zi->i_truncate_mutex);
856 mutex_unlock(&zi->i_truncate_mutex);
861 ret = zonefs_file_dio_append(iocb, from);
863 ret = iomap_dio_rw(iocb, from, &zonefs_iomap_ops,
864 &zonefs_write_dio_ops, 0, 0);
865 if (zi->i_ztype == ZONEFS_ZTYPE_SEQ &&
866 (ret > 0 || ret == -EIOCBQUEUED)) {
869 mutex_lock(&zi->i_truncate_mutex);
870 zi->i_wpoffset += count;
871 mutex_unlock(&zi->i_truncate_mutex);
880 static ssize_t zonefs_file_buffered_write(struct kiocb *iocb,
881 struct iov_iter *from)
883 struct inode *inode = file_inode(iocb->ki_filp);
884 struct zonefs_inode_info *zi = ZONEFS_I(inode);
888 * Direct IO writes are mandatory for sequential zone files so that the
889 * write IO issuing order is preserved.
891 if (zi->i_ztype != ZONEFS_ZTYPE_CNV)
894 if (iocb->ki_flags & IOCB_NOWAIT) {
895 if (!inode_trylock(inode))
901 ret = zonefs_write_checks(iocb, from);
905 ret = iomap_file_buffered_write(iocb, from, &zonefs_iomap_ops);
908 else if (ret == -EIO)
909 zonefs_io_error(inode, true);
914 ret = generic_write_sync(iocb, ret);
919 static ssize_t zonefs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
921 struct inode *inode = file_inode(iocb->ki_filp);
923 if (unlikely(IS_IMMUTABLE(inode)))
926 if (sb_rdonly(inode->i_sb))
929 /* Write operations beyond the zone size are not allowed */
930 if (iocb->ki_pos >= ZONEFS_I(inode)->i_max_size)
933 if (iocb->ki_flags & IOCB_DIRECT) {
934 ssize_t ret = zonefs_file_dio_write(iocb, from);
939 return zonefs_file_buffered_write(iocb, from);
942 static int zonefs_file_read_dio_end_io(struct kiocb *iocb, ssize_t size,
943 int error, unsigned int flags)
946 zonefs_io_error(file_inode(iocb->ki_filp), false);
953 static const struct iomap_dio_ops zonefs_read_dio_ops = {
954 .end_io = zonefs_file_read_dio_end_io,
957 static ssize_t zonefs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
959 struct inode *inode = file_inode(iocb->ki_filp);
960 struct zonefs_inode_info *zi = ZONEFS_I(inode);
961 struct super_block *sb = inode->i_sb;
965 /* Offline zones cannot be read */
966 if (unlikely(IS_IMMUTABLE(inode) && !(inode->i_mode & 0777)))
969 if (iocb->ki_pos >= zi->i_max_size)
972 if (iocb->ki_flags & IOCB_NOWAIT) {
973 if (!inode_trylock_shared(inode))
976 inode_lock_shared(inode);
979 /* Limit read operations to written data */
980 mutex_lock(&zi->i_truncate_mutex);
981 isize = i_size_read(inode);
982 if (iocb->ki_pos >= isize) {
983 mutex_unlock(&zi->i_truncate_mutex);
987 iov_iter_truncate(to, isize - iocb->ki_pos);
988 mutex_unlock(&zi->i_truncate_mutex);
990 if (iocb->ki_flags & IOCB_DIRECT) {
991 size_t count = iov_iter_count(to);
993 if ((iocb->ki_pos | count) & (sb->s_blocksize - 1)) {
997 file_accessed(iocb->ki_filp);
998 ret = iomap_dio_rw(iocb, to, &zonefs_iomap_ops,
999 &zonefs_read_dio_ops, 0, 0);
1001 ret = generic_file_read_iter(iocb, to);
1003 zonefs_io_error(inode, false);
1007 inode_unlock_shared(inode);
1012 static inline bool zonefs_file_use_exp_open(struct inode *inode, struct file *file)
1014 struct zonefs_inode_info *zi = ZONEFS_I(inode);
1015 struct zonefs_sb_info *sbi = ZONEFS_SB(inode->i_sb);
1017 if (!(sbi->s_mount_opts & ZONEFS_MNTOPT_EXPLICIT_OPEN))
1020 if (zi->i_ztype != ZONEFS_ZTYPE_SEQ)
1023 if (!(file->f_mode & FMODE_WRITE))
1029 static int zonefs_open_zone(struct inode *inode)
1031 struct zonefs_inode_info *zi = ZONEFS_I(inode);
1032 struct zonefs_sb_info *sbi = ZONEFS_SB(inode->i_sb);
1035 mutex_lock(&zi->i_truncate_mutex);
1037 if (!zi->i_wr_refcnt) {
1038 if (atomic_inc_return(&sbi->s_open_zones) > sbi->s_max_open_zones) {
1039 atomic_dec(&sbi->s_open_zones);
1044 if (i_size_read(inode) < zi->i_max_size) {
1045 ret = zonefs_zone_mgmt(inode, REQ_OP_ZONE_OPEN);
1047 atomic_dec(&sbi->s_open_zones);
1050 zi->i_flags |= ZONEFS_ZONE_OPEN;
1057 mutex_unlock(&zi->i_truncate_mutex);
1062 static int zonefs_file_open(struct inode *inode, struct file *file)
1066 ret = generic_file_open(inode, file);
1070 if (zonefs_file_use_exp_open(inode, file))
1071 return zonefs_open_zone(inode);
1076 static void zonefs_close_zone(struct inode *inode)
1078 struct zonefs_inode_info *zi = ZONEFS_I(inode);
1081 mutex_lock(&zi->i_truncate_mutex);
1083 if (!zi->i_wr_refcnt) {
1084 struct zonefs_sb_info *sbi = ZONEFS_SB(inode->i_sb);
1085 struct super_block *sb = inode->i_sb;
1088 * If the file zone is full, it is not open anymore and we only
1089 * need to decrement the open count.
1091 if (!(zi->i_flags & ZONEFS_ZONE_OPEN))
1094 ret = zonefs_zone_mgmt(inode, REQ_OP_ZONE_CLOSE);
1096 __zonefs_io_error(inode, false);
1098 * Leaving zones explicitly open may lead to a state
1099 * where most zones cannot be written (zone resources
1100 * exhausted). So take preventive action by remounting
1103 if (zi->i_flags & ZONEFS_ZONE_OPEN &&
1104 !(sb->s_flags & SB_RDONLY)) {
1105 zonefs_warn(sb, "closing zone failed, remounting filesystem read-only\n");
1106 sb->s_flags |= SB_RDONLY;
1109 zi->i_flags &= ~ZONEFS_ZONE_OPEN;
1111 atomic_dec(&sbi->s_open_zones);
1113 mutex_unlock(&zi->i_truncate_mutex);
1116 static int zonefs_file_release(struct inode *inode, struct file *file)
1119 * If we explicitly open a zone we must close it again as well, but the
1120 * zone management operation can fail (either due to an IO error or as
1121 * the zone has gone offline or read-only). Make sure we don't fail the
1122 * close(2) for user-space.
1124 if (zonefs_file_use_exp_open(inode, file))
1125 zonefs_close_zone(inode);
1130 static const struct file_operations zonefs_file_operations = {
1131 .open = zonefs_file_open,
1132 .release = zonefs_file_release,
1133 .fsync = zonefs_file_fsync,
1134 .mmap = zonefs_file_mmap,
1135 .llseek = zonefs_file_llseek,
1136 .read_iter = zonefs_file_read_iter,
1137 .write_iter = zonefs_file_write_iter,
1138 .splice_read = generic_file_splice_read,
1139 .splice_write = iter_file_splice_write,
1140 .iopoll = iocb_bio_iopoll,
1143 static struct kmem_cache *zonefs_inode_cachep;
1145 static struct inode *zonefs_alloc_inode(struct super_block *sb)
1147 struct zonefs_inode_info *zi;
1149 zi = alloc_inode_sb(sb, zonefs_inode_cachep, GFP_KERNEL);
1153 inode_init_once(&zi->i_vnode);
1154 mutex_init(&zi->i_truncate_mutex);
1155 zi->i_wr_refcnt = 0;
1158 return &zi->i_vnode;
1161 static void zonefs_free_inode(struct inode *inode)
1163 kmem_cache_free(zonefs_inode_cachep, ZONEFS_I(inode));
1169 static int zonefs_statfs(struct dentry *dentry, struct kstatfs *buf)
1171 struct super_block *sb = dentry->d_sb;
1172 struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
1173 enum zonefs_ztype t;
1175 buf->f_type = ZONEFS_MAGIC;
1176 buf->f_bsize = sb->s_blocksize;
1177 buf->f_namelen = ZONEFS_NAME_MAX;
1179 spin_lock(&sbi->s_lock);
1181 buf->f_blocks = sbi->s_blocks;
1182 if (WARN_ON(sbi->s_used_blocks > sbi->s_blocks))
1185 buf->f_bfree = buf->f_blocks - sbi->s_used_blocks;
1186 buf->f_bavail = buf->f_bfree;
1188 for (t = 0; t < ZONEFS_ZTYPE_MAX; t++) {
1189 if (sbi->s_nr_files[t])
1190 buf->f_files += sbi->s_nr_files[t] + 1;
1194 spin_unlock(&sbi->s_lock);
1196 buf->f_fsid = uuid_to_fsid(sbi->s_uuid.b);
1202 Opt_errors_ro, Opt_errors_zro, Opt_errors_zol, Opt_errors_repair,
1203 Opt_explicit_open, Opt_err,
1206 static const match_table_t tokens = {
1207 { Opt_errors_ro, "errors=remount-ro"},
1208 { Opt_errors_zro, "errors=zone-ro"},
1209 { Opt_errors_zol, "errors=zone-offline"},
1210 { Opt_errors_repair, "errors=repair"},
1211 { Opt_explicit_open, "explicit-open" },
1215 static int zonefs_parse_options(struct super_block *sb, char *options)
1217 struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
1218 substring_t args[MAX_OPT_ARGS];
1224 while ((p = strsep(&options, ",")) != NULL) {
1230 token = match_token(p, tokens, args);
1233 sbi->s_mount_opts &= ~ZONEFS_MNTOPT_ERRORS_MASK;
1234 sbi->s_mount_opts |= ZONEFS_MNTOPT_ERRORS_RO;
1236 case Opt_errors_zro:
1237 sbi->s_mount_opts &= ~ZONEFS_MNTOPT_ERRORS_MASK;
1238 sbi->s_mount_opts |= ZONEFS_MNTOPT_ERRORS_ZRO;
1240 case Opt_errors_zol:
1241 sbi->s_mount_opts &= ~ZONEFS_MNTOPT_ERRORS_MASK;
1242 sbi->s_mount_opts |= ZONEFS_MNTOPT_ERRORS_ZOL;
1244 case Opt_errors_repair:
1245 sbi->s_mount_opts &= ~ZONEFS_MNTOPT_ERRORS_MASK;
1246 sbi->s_mount_opts |= ZONEFS_MNTOPT_ERRORS_REPAIR;
1248 case Opt_explicit_open:
1249 sbi->s_mount_opts |= ZONEFS_MNTOPT_EXPLICIT_OPEN;
1259 static int zonefs_show_options(struct seq_file *seq, struct dentry *root)
1261 struct zonefs_sb_info *sbi = ZONEFS_SB(root->d_sb);
1263 if (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_RO)
1264 seq_puts(seq, ",errors=remount-ro");
1265 if (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_ZRO)
1266 seq_puts(seq, ",errors=zone-ro");
1267 if (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_ZOL)
1268 seq_puts(seq, ",errors=zone-offline");
1269 if (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_REPAIR)
1270 seq_puts(seq, ",errors=repair");
1275 static int zonefs_remount(struct super_block *sb, int *flags, char *data)
1277 sync_filesystem(sb);
1279 return zonefs_parse_options(sb, data);
1282 static const struct super_operations zonefs_sops = {
1283 .alloc_inode = zonefs_alloc_inode,
1284 .free_inode = zonefs_free_inode,
1285 .statfs = zonefs_statfs,
1286 .remount_fs = zonefs_remount,
1287 .show_options = zonefs_show_options,
1290 static const struct inode_operations zonefs_dir_inode_operations = {
1291 .lookup = simple_lookup,
1292 .setattr = zonefs_inode_setattr,
1295 static void zonefs_init_dir_inode(struct inode *parent, struct inode *inode,
1296 enum zonefs_ztype type)
1298 struct super_block *sb = parent->i_sb;
1300 inode->i_ino = blkdev_nr_zones(sb->s_bdev->bd_disk) + type + 1;
1301 inode_init_owner(&init_user_ns, inode, parent, S_IFDIR | 0555);
1302 inode->i_op = &zonefs_dir_inode_operations;
1303 inode->i_fop = &simple_dir_operations;
1304 set_nlink(inode, 2);
1308 static int zonefs_init_file_inode(struct inode *inode, struct blk_zone *zone,
1309 enum zonefs_ztype type)
1311 struct super_block *sb = inode->i_sb;
1312 struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
1313 struct zonefs_inode_info *zi = ZONEFS_I(inode);
1316 inode->i_ino = zone->start >> sbi->s_zone_sectors_shift;
1317 inode->i_mode = S_IFREG | sbi->s_perm;
1320 zi->i_zsector = zone->start;
1321 zi->i_zone_size = zone->len << SECTOR_SHIFT;
1323 zi->i_max_size = min_t(loff_t, MAX_LFS_FILESIZE,
1324 zone->capacity << SECTOR_SHIFT);
1325 zi->i_wpoffset = zonefs_check_zone_condition(inode, zone, true, true);
1327 inode->i_uid = sbi->s_uid;
1328 inode->i_gid = sbi->s_gid;
1329 inode->i_size = zi->i_wpoffset;
1330 inode->i_blocks = zi->i_max_size >> SECTOR_SHIFT;
1332 inode->i_op = &zonefs_file_inode_operations;
1333 inode->i_fop = &zonefs_file_operations;
1334 inode->i_mapping->a_ops = &zonefs_file_aops;
1336 sb->s_maxbytes = max(zi->i_max_size, sb->s_maxbytes);
1337 sbi->s_blocks += zi->i_max_size >> sb->s_blocksize_bits;
1338 sbi->s_used_blocks += zi->i_wpoffset >> sb->s_blocksize_bits;
1341 * For sequential zones, make sure that any open zone is closed first
1342 * to ensure that the initial number of open zones is 0, in sync with
1343 * the open zone accounting done when the mount option
1344 * ZONEFS_MNTOPT_EXPLICIT_OPEN is used.
1346 if (type == ZONEFS_ZTYPE_SEQ &&
1347 (zone->cond == BLK_ZONE_COND_IMP_OPEN ||
1348 zone->cond == BLK_ZONE_COND_EXP_OPEN)) {
1349 mutex_lock(&zi->i_truncate_mutex);
1350 ret = zonefs_zone_mgmt(inode, REQ_OP_ZONE_CLOSE);
1351 mutex_unlock(&zi->i_truncate_mutex);
1357 static struct dentry *zonefs_create_inode(struct dentry *parent,
1358 const char *name, struct blk_zone *zone,
1359 enum zonefs_ztype type)
1361 struct inode *dir = d_inode(parent);
1362 struct dentry *dentry;
1363 struct inode *inode;
1366 dentry = d_alloc_name(parent, name);
1370 inode = new_inode(parent->d_sb);
1374 inode->i_ctime = inode->i_mtime = inode->i_atime = dir->i_ctime;
1376 ret = zonefs_init_file_inode(inode, zone, type);
1382 zonefs_init_dir_inode(dir, inode, type);
1385 d_add(dentry, inode);
1396 struct zonefs_zone_data {
1397 struct super_block *sb;
1398 unsigned int nr_zones[ZONEFS_ZTYPE_MAX];
1399 struct blk_zone *zones;
1403 * Create a zone group and populate it with zone files.
1405 static int zonefs_create_zgroup(struct zonefs_zone_data *zd,
1406 enum zonefs_ztype type)
1408 struct super_block *sb = zd->sb;
1409 struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
1410 struct blk_zone *zone, *next, *end;
1411 const char *zgroup_name;
1417 /* If the group is empty, there is nothing to do */
1418 if (!zd->nr_zones[type])
1421 file_name = kmalloc(ZONEFS_NAME_MAX, GFP_KERNEL);
1425 if (type == ZONEFS_ZTYPE_CNV)
1426 zgroup_name = "cnv";
1428 zgroup_name = "seq";
1430 dir = zonefs_create_inode(sb->s_root, zgroup_name, NULL, type);
1437 * The first zone contains the super block: skip it.
1439 end = zd->zones + blkdev_nr_zones(sb->s_bdev->bd_disk);
1440 for (zone = &zd->zones[1]; zone < end; zone = next) {
1443 if (zonefs_zone_type(zone) != type)
1447 * For conventional zones, contiguous zones can be aggregated
1448 * together to form larger files. Note that this overwrites the
1449 * length of the first zone of the set of contiguous zones
1450 * aggregated together. If one offline or read-only zone is
1451 * found, assume that all zones aggregated have the same
1454 if (type == ZONEFS_ZTYPE_CNV &&
1455 (sbi->s_features & ZONEFS_F_AGGRCNV)) {
1456 for (; next < end; next++) {
1457 if (zonefs_zone_type(next) != type)
1459 zone->len += next->len;
1460 zone->capacity += next->capacity;
1461 if (next->cond == BLK_ZONE_COND_READONLY &&
1462 zone->cond != BLK_ZONE_COND_OFFLINE)
1463 zone->cond = BLK_ZONE_COND_READONLY;
1464 else if (next->cond == BLK_ZONE_COND_OFFLINE)
1465 zone->cond = BLK_ZONE_COND_OFFLINE;
1467 if (zone->capacity != zone->len) {
1468 zonefs_err(sb, "Invalid conventional zone capacity\n");
1475 * Use the file number within its group as file name.
1477 snprintf(file_name, ZONEFS_NAME_MAX - 1, "%u", n);
1478 if (!zonefs_create_inode(dir, file_name, zone, type)) {
1486 zonefs_info(sb, "Zone group \"%s\" has %u file%s\n",
1487 zgroup_name, n, n > 1 ? "s" : "");
1489 sbi->s_nr_files[type] = n;
1498 static int zonefs_get_zone_info_cb(struct blk_zone *zone, unsigned int idx,
1501 struct zonefs_zone_data *zd = data;
1504 * Count the number of usable zones: the first zone at index 0 contains
1505 * the super block and is ignored.
1507 switch (zone->type) {
1508 case BLK_ZONE_TYPE_CONVENTIONAL:
1509 zone->wp = zone->start + zone->len;
1511 zd->nr_zones[ZONEFS_ZTYPE_CNV]++;
1513 case BLK_ZONE_TYPE_SEQWRITE_REQ:
1514 case BLK_ZONE_TYPE_SEQWRITE_PREF:
1516 zd->nr_zones[ZONEFS_ZTYPE_SEQ]++;
1519 zonefs_err(zd->sb, "Unsupported zone type 0x%x\n",
1524 memcpy(&zd->zones[idx], zone, sizeof(struct blk_zone));
1529 static int zonefs_get_zone_info(struct zonefs_zone_data *zd)
1531 struct block_device *bdev = zd->sb->s_bdev;
1534 zd->zones = kvcalloc(blkdev_nr_zones(bdev->bd_disk),
1535 sizeof(struct blk_zone), GFP_KERNEL);
1539 /* Get zones information from the device */
1540 ret = blkdev_report_zones(bdev, 0, BLK_ALL_ZONES,
1541 zonefs_get_zone_info_cb, zd);
1543 zonefs_err(zd->sb, "Zone report failed %d\n", ret);
1547 if (ret != blkdev_nr_zones(bdev->bd_disk)) {
1548 zonefs_err(zd->sb, "Invalid zone report (%d/%u zones)\n",
1549 ret, blkdev_nr_zones(bdev->bd_disk));
1556 static inline void zonefs_cleanup_zone_info(struct zonefs_zone_data *zd)
1562 * Read super block information from the device.
1564 static int zonefs_read_super(struct super_block *sb)
1566 struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
1567 struct zonefs_super *super;
1568 u32 crc, stored_crc;
1570 struct bio_vec bio_vec;
1574 page = alloc_page(GFP_KERNEL);
1578 bio_init(&bio, sb->s_bdev, &bio_vec, 1, REQ_OP_READ);
1579 bio.bi_iter.bi_sector = 0;
1580 bio_add_page(&bio, page, PAGE_SIZE, 0);
1582 ret = submit_bio_wait(&bio);
1589 if (le32_to_cpu(super->s_magic) != ZONEFS_MAGIC)
1592 stored_crc = le32_to_cpu(super->s_crc);
1594 crc = crc32(~0U, (unsigned char *)super, sizeof(struct zonefs_super));
1595 if (crc != stored_crc) {
1596 zonefs_err(sb, "Invalid checksum (Expected 0x%08x, got 0x%08x)",
1601 sbi->s_features = le64_to_cpu(super->s_features);
1602 if (sbi->s_features & ~ZONEFS_F_DEFINED_FEATURES) {
1603 zonefs_err(sb, "Unknown features set 0x%llx\n",
1608 if (sbi->s_features & ZONEFS_F_UID) {
1609 sbi->s_uid = make_kuid(current_user_ns(),
1610 le32_to_cpu(super->s_uid));
1611 if (!uid_valid(sbi->s_uid)) {
1612 zonefs_err(sb, "Invalid UID feature\n");
1617 if (sbi->s_features & ZONEFS_F_GID) {
1618 sbi->s_gid = make_kgid(current_user_ns(),
1619 le32_to_cpu(super->s_gid));
1620 if (!gid_valid(sbi->s_gid)) {
1621 zonefs_err(sb, "Invalid GID feature\n");
1626 if (sbi->s_features & ZONEFS_F_PERM)
1627 sbi->s_perm = le32_to_cpu(super->s_perm);
1629 if (memchr_inv(super->s_reserved, 0, sizeof(super->s_reserved))) {
1630 zonefs_err(sb, "Reserved area is being used\n");
1634 import_uuid(&sbi->s_uuid, super->s_uuid);
1646 * Check that the device is zoned. If it is, get the list of zones and create
1647 * sub-directories and files according to the device zone configuration and
1650 static int zonefs_fill_super(struct super_block *sb, void *data, int silent)
1652 struct zonefs_zone_data zd;
1653 struct zonefs_sb_info *sbi;
1654 struct inode *inode;
1655 enum zonefs_ztype t;
1658 if (!bdev_is_zoned(sb->s_bdev)) {
1659 zonefs_err(sb, "Not a zoned block device\n");
1664 * Initialize super block information: the maximum file size is updated
1665 * when the zone files are created so that the format option
1666 * ZONEFS_F_AGGRCNV which increases the maximum file size of a file
1667 * beyond the zone size is taken into account.
1669 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
1673 spin_lock_init(&sbi->s_lock);
1674 sb->s_fs_info = sbi;
1675 sb->s_magic = ZONEFS_MAGIC;
1677 sb->s_op = &zonefs_sops;
1678 sb->s_time_gran = 1;
1681 * The block size is set to the device zone write granularity to ensure
1682 * that write operations are always aligned according to the device
1683 * interface constraints.
1685 sb_set_blocksize(sb, bdev_zone_write_granularity(sb->s_bdev));
1686 sbi->s_zone_sectors_shift = ilog2(bdev_zone_sectors(sb->s_bdev));
1687 sbi->s_uid = GLOBAL_ROOT_UID;
1688 sbi->s_gid = GLOBAL_ROOT_GID;
1690 sbi->s_mount_opts = ZONEFS_MNTOPT_ERRORS_RO;
1691 sbi->s_max_open_zones = bdev_max_open_zones(sb->s_bdev);
1692 atomic_set(&sbi->s_open_zones, 0);
1693 if (!sbi->s_max_open_zones &&
1694 sbi->s_mount_opts & ZONEFS_MNTOPT_EXPLICIT_OPEN) {
1695 zonefs_info(sb, "No open zones limit. Ignoring explicit_open mount option\n");
1696 sbi->s_mount_opts &= ~ZONEFS_MNTOPT_EXPLICIT_OPEN;
1699 ret = zonefs_read_super(sb);
1703 ret = zonefs_parse_options(sb, data);
1707 memset(&zd, 0, sizeof(struct zonefs_zone_data));
1709 ret = zonefs_get_zone_info(&zd);
1713 zonefs_info(sb, "Mounting %u zones",
1714 blkdev_nr_zones(sb->s_bdev->bd_disk));
1716 /* Create root directory inode */
1718 inode = new_inode(sb);
1722 inode->i_ino = blkdev_nr_zones(sb->s_bdev->bd_disk);
1723 inode->i_mode = S_IFDIR | 0555;
1724 inode->i_ctime = inode->i_mtime = inode->i_atime = current_time(inode);
1725 inode->i_op = &zonefs_dir_inode_operations;
1726 inode->i_fop = &simple_dir_operations;
1727 set_nlink(inode, 2);
1729 sb->s_root = d_make_root(inode);
1733 /* Create and populate files in zone groups directories */
1734 for (t = 0; t < ZONEFS_ZTYPE_MAX; t++) {
1735 ret = zonefs_create_zgroup(&zd, t);
1741 zonefs_cleanup_zone_info(&zd);
1746 static struct dentry *zonefs_mount(struct file_system_type *fs_type,
1747 int flags, const char *dev_name, void *data)
1749 return mount_bdev(fs_type, flags, dev_name, data, zonefs_fill_super);
1752 static void zonefs_kill_super(struct super_block *sb)
1754 struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
1757 d_genocide(sb->s_root);
1758 kill_block_super(sb);
1763 * File system definition and registration.
1765 static struct file_system_type zonefs_type = {
1766 .owner = THIS_MODULE,
1768 .mount = zonefs_mount,
1769 .kill_sb = zonefs_kill_super,
1770 .fs_flags = FS_REQUIRES_DEV,
1773 static int __init zonefs_init_inodecache(void)
1775 zonefs_inode_cachep = kmem_cache_create("zonefs_inode_cache",
1776 sizeof(struct zonefs_inode_info), 0,
1777 (SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD | SLAB_ACCOUNT),
1779 if (zonefs_inode_cachep == NULL)
1784 static void zonefs_destroy_inodecache(void)
1787 * Make sure all delayed rcu free inodes are flushed before we
1788 * destroy the inode cache.
1791 kmem_cache_destroy(zonefs_inode_cachep);
1794 static int __init zonefs_init(void)
1798 BUILD_BUG_ON(sizeof(struct zonefs_super) != ZONEFS_SUPER_SIZE);
1800 ret = zonefs_init_inodecache();
1804 ret = register_filesystem(&zonefs_type);
1806 zonefs_destroy_inodecache();
1813 static void __exit zonefs_exit(void)
1815 zonefs_destroy_inodecache();
1816 unregister_filesystem(&zonefs_type);
1819 MODULE_AUTHOR("Damien Le Moal");
1820 MODULE_DESCRIPTION("Zone file system for zoned block devices");
1821 MODULE_LICENSE("GPL");
1822 MODULE_ALIAS_FS("zonefs");
1823 module_init(zonefs_init);
1824 module_exit(zonefs_exit);