1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
13 #include "xfs_mount.h"
14 #include "xfs_inode.h"
15 #include "xfs_btree.h"
17 #include "xfs_alloc.h"
18 #include "xfs_fsops.h"
19 #include "xfs_trans.h"
20 #include "xfs_buf_item.h"
22 #include "xfs_log_priv.h"
24 #include "xfs_extfree_item.h"
25 #include "xfs_mru_cache.h"
26 #include "xfs_inode_item.h"
27 #include "xfs_icache.h"
28 #include "xfs_trace.h"
29 #include "xfs_icreate_item.h"
30 #include "xfs_filestream.h"
31 #include "xfs_quota.h"
32 #include "xfs_sysfs.h"
33 #include "xfs_ondisk.h"
34 #include "xfs_rmap_item.h"
35 #include "xfs_refcount_item.h"
36 #include "xfs_bmap_item.h"
37 #include "xfs_reflink.h"
38 #include "xfs_pwork.h"
41 #include <linux/magic.h>
42 #include <linux/fs_context.h>
43 #include <linux/fs_parser.h>
45 static const struct super_operations xfs_super_operations;
47 static struct kset *xfs_kset; /* top-level xfs sysfs dir */
49 static struct xfs_kobj xfs_dbg_kobj; /* global debug sysfs attrs */
59 xfs_mount_set_dax_mode(
61 enum xfs_dax_mode mode)
65 mp->m_flags &= ~(XFS_MOUNT_DAX_ALWAYS | XFS_MOUNT_DAX_NEVER);
68 mp->m_flags |= XFS_MOUNT_DAX_ALWAYS;
69 mp->m_flags &= ~XFS_MOUNT_DAX_NEVER;
72 mp->m_flags |= XFS_MOUNT_DAX_NEVER;
73 mp->m_flags &= ~XFS_MOUNT_DAX_ALWAYS;
78 static const struct constant_table dax_param_enums[] = {
79 {"inode", XFS_DAX_INODE },
80 {"always", XFS_DAX_ALWAYS },
81 {"never", XFS_DAX_NEVER },
86 * Table driven mount option parser.
89 Opt_logbufs, Opt_logbsize, Opt_logdev, Opt_rtdev,
90 Opt_wsync, Opt_noalign, Opt_swalloc, Opt_sunit, Opt_swidth, Opt_nouuid,
91 Opt_grpid, Opt_nogrpid, Opt_bsdgroups, Opt_sysvgroups,
92 Opt_allocsize, Opt_norecovery, Opt_inode64, Opt_inode32, Opt_ikeep,
93 Opt_noikeep, Opt_largeio, Opt_nolargeio, Opt_attr2, Opt_noattr2,
94 Opt_filestreams, Opt_quota, Opt_noquota, Opt_usrquota, Opt_grpquota,
95 Opt_prjquota, Opt_uquota, Opt_gquota, Opt_pquota,
96 Opt_uqnoenforce, Opt_gqnoenforce, Opt_pqnoenforce, Opt_qnoenforce,
97 Opt_discard, Opt_nodiscard, Opt_dax, Opt_dax_enum,
100 static const struct fs_parameter_spec xfs_fs_parameters[] = {
101 fsparam_u32("logbufs", Opt_logbufs),
102 fsparam_string("logbsize", Opt_logbsize),
103 fsparam_string("logdev", Opt_logdev),
104 fsparam_string("rtdev", Opt_rtdev),
105 fsparam_flag("wsync", Opt_wsync),
106 fsparam_flag("noalign", Opt_noalign),
107 fsparam_flag("swalloc", Opt_swalloc),
108 fsparam_u32("sunit", Opt_sunit),
109 fsparam_u32("swidth", Opt_swidth),
110 fsparam_flag("nouuid", Opt_nouuid),
111 fsparam_flag("grpid", Opt_grpid),
112 fsparam_flag("nogrpid", Opt_nogrpid),
113 fsparam_flag("bsdgroups", Opt_bsdgroups),
114 fsparam_flag("sysvgroups", Opt_sysvgroups),
115 fsparam_string("allocsize", Opt_allocsize),
116 fsparam_flag("norecovery", Opt_norecovery),
117 fsparam_flag("inode64", Opt_inode64),
118 fsparam_flag("inode32", Opt_inode32),
119 fsparam_flag("ikeep", Opt_ikeep),
120 fsparam_flag("noikeep", Opt_noikeep),
121 fsparam_flag("largeio", Opt_largeio),
122 fsparam_flag("nolargeio", Opt_nolargeio),
123 fsparam_flag("attr2", Opt_attr2),
124 fsparam_flag("noattr2", Opt_noattr2),
125 fsparam_flag("filestreams", Opt_filestreams),
126 fsparam_flag("quota", Opt_quota),
127 fsparam_flag("noquota", Opt_noquota),
128 fsparam_flag("usrquota", Opt_usrquota),
129 fsparam_flag("grpquota", Opt_grpquota),
130 fsparam_flag("prjquota", Opt_prjquota),
131 fsparam_flag("uquota", Opt_uquota),
132 fsparam_flag("gquota", Opt_gquota),
133 fsparam_flag("pquota", Opt_pquota),
134 fsparam_flag("uqnoenforce", Opt_uqnoenforce),
135 fsparam_flag("gqnoenforce", Opt_gqnoenforce),
136 fsparam_flag("pqnoenforce", Opt_pqnoenforce),
137 fsparam_flag("qnoenforce", Opt_qnoenforce),
138 fsparam_flag("discard", Opt_discard),
139 fsparam_flag("nodiscard", Opt_nodiscard),
140 fsparam_flag("dax", Opt_dax),
141 fsparam_enum("dax", Opt_dax_enum, dax_param_enums),
145 struct proc_xfs_info {
155 static struct proc_xfs_info xfs_info_set[] = {
156 /* the few simple ones we can get from the mount struct */
157 { XFS_MOUNT_IKEEP, ",ikeep" },
158 { XFS_MOUNT_WSYNC, ",wsync" },
159 { XFS_MOUNT_NOALIGN, ",noalign" },
160 { XFS_MOUNT_SWALLOC, ",swalloc" },
161 { XFS_MOUNT_NOUUID, ",nouuid" },
162 { XFS_MOUNT_NORECOVERY, ",norecovery" },
163 { XFS_MOUNT_ATTR2, ",attr2" },
164 { XFS_MOUNT_FILESTREAMS, ",filestreams" },
165 { XFS_MOUNT_GRPID, ",grpid" },
166 { XFS_MOUNT_DISCARD, ",discard" },
167 { XFS_MOUNT_LARGEIO, ",largeio" },
168 { XFS_MOUNT_DAX_ALWAYS, ",dax=always" },
169 { XFS_MOUNT_DAX_NEVER, ",dax=never" },
172 struct xfs_mount *mp = XFS_M(root->d_sb);
173 struct proc_xfs_info *xfs_infop;
175 for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) {
176 if (mp->m_flags & xfs_infop->flag)
177 seq_puts(m, xfs_infop->str);
180 seq_printf(m, ",inode%d",
181 (mp->m_flags & XFS_MOUNT_SMALL_INUMS) ? 32 : 64);
183 if (mp->m_flags & XFS_MOUNT_ALLOCSIZE)
184 seq_printf(m, ",allocsize=%dk",
185 (1 << mp->m_allocsize_log) >> 10);
187 if (mp->m_logbufs > 0)
188 seq_printf(m, ",logbufs=%d", mp->m_logbufs);
189 if (mp->m_logbsize > 0)
190 seq_printf(m, ",logbsize=%dk", mp->m_logbsize >> 10);
193 seq_show_option(m, "logdev", mp->m_logname);
195 seq_show_option(m, "rtdev", mp->m_rtname);
197 if (mp->m_dalign > 0)
198 seq_printf(m, ",sunit=%d",
199 (int)XFS_FSB_TO_BB(mp, mp->m_dalign));
200 if (mp->m_swidth > 0)
201 seq_printf(m, ",swidth=%d",
202 (int)XFS_FSB_TO_BB(mp, mp->m_swidth));
204 if (mp->m_qflags & XFS_UQUOTA_ACCT) {
205 if (mp->m_qflags & XFS_UQUOTA_ENFD)
206 seq_puts(m, ",usrquota");
208 seq_puts(m, ",uqnoenforce");
211 if (mp->m_qflags & XFS_PQUOTA_ACCT) {
212 if (mp->m_qflags & XFS_PQUOTA_ENFD)
213 seq_puts(m, ",prjquota");
215 seq_puts(m, ",pqnoenforce");
217 if (mp->m_qflags & XFS_GQUOTA_ACCT) {
218 if (mp->m_qflags & XFS_GQUOTA_ENFD)
219 seq_puts(m, ",grpquota");
221 seq_puts(m, ",gqnoenforce");
224 if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT))
225 seq_puts(m, ",noquota");
231 * Set parameters for inode allocation heuristics, taking into account
232 * filesystem size and inode32/inode64 mount options; i.e. specifically
233 * whether or not XFS_MOUNT_SMALL_INUMS is set.
235 * Inode allocation patterns are altered only if inode32 is requested
236 * (XFS_MOUNT_SMALL_INUMS), and the filesystem is sufficiently large.
237 * If altered, XFS_MOUNT_32BITINODES is set as well.
239 * An agcount independent of that in the mount structure is provided
240 * because in the growfs case, mp->m_sb.sb_agcount is not yet updated
241 * to the potentially higher ag count.
243 * Returns the maximum AG index which may contain inodes.
247 struct xfs_mount *mp,
248 xfs_agnumber_t agcount)
250 xfs_agnumber_t index;
251 xfs_agnumber_t maxagi = 0;
252 xfs_sb_t *sbp = &mp->m_sb;
253 xfs_agnumber_t max_metadata;
258 * Calculate how much should be reserved for inodes to meet
259 * the max inode percentage. Used only for inode32.
261 if (M_IGEO(mp)->maxicount) {
264 icount = sbp->sb_dblocks * sbp->sb_imax_pct;
266 icount += sbp->sb_agblocks - 1;
267 do_div(icount, sbp->sb_agblocks);
268 max_metadata = icount;
270 max_metadata = agcount;
273 /* Get the last possible inode in the filesystem */
274 agino = XFS_AGB_TO_AGINO(mp, sbp->sb_agblocks - 1);
275 ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino);
278 * If user asked for no more than 32-bit inodes, and the fs is
279 * sufficiently large, set XFS_MOUNT_32BITINODES if we must alter
280 * the allocator to accommodate the request.
282 if ((mp->m_flags & XFS_MOUNT_SMALL_INUMS) && ino > XFS_MAXINUMBER_32)
283 mp->m_flags |= XFS_MOUNT_32BITINODES;
285 mp->m_flags &= ~XFS_MOUNT_32BITINODES;
287 for (index = 0; index < agcount; index++) {
288 struct xfs_perag *pag;
290 ino = XFS_AGINO_TO_INO(mp, index, agino);
292 pag = xfs_perag_get(mp, index);
294 if (mp->m_flags & XFS_MOUNT_32BITINODES) {
295 if (ino > XFS_MAXINUMBER_32) {
296 pag->pagi_inodeok = 0;
297 pag->pagf_metadata = 0;
299 pag->pagi_inodeok = 1;
301 if (index < max_metadata)
302 pag->pagf_metadata = 1;
304 pag->pagf_metadata = 0;
307 pag->pagi_inodeok = 1;
308 pag->pagf_metadata = 0;
314 return (mp->m_flags & XFS_MOUNT_32BITINODES) ? maxagi : agcount;
319 struct super_block *sb,
320 struct xfs_buftarg *bt)
322 return dax_supported(bt->bt_daxdev, bt->bt_bdev, sb->s_blocksize, 0,
323 bdev_nr_sectors(bt->bt_bdev));
330 struct block_device **bdevp)
334 *bdevp = blkdev_get_by_path(name, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
336 if (IS_ERR(*bdevp)) {
337 error = PTR_ERR(*bdevp);
338 xfs_warn(mp, "Invalid device [%s], error=%d", name, error);
346 struct block_device *bdev)
349 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
354 struct xfs_mount *mp)
356 struct dax_device *dax_ddev = mp->m_ddev_targp->bt_daxdev;
358 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
359 struct block_device *logdev = mp->m_logdev_targp->bt_bdev;
360 struct dax_device *dax_logdev = mp->m_logdev_targp->bt_daxdev;
362 xfs_free_buftarg(mp->m_logdev_targp);
363 xfs_blkdev_put(logdev);
364 fs_put_dax(dax_logdev);
366 if (mp->m_rtdev_targp) {
367 struct block_device *rtdev = mp->m_rtdev_targp->bt_bdev;
368 struct dax_device *dax_rtdev = mp->m_rtdev_targp->bt_daxdev;
370 xfs_free_buftarg(mp->m_rtdev_targp);
371 xfs_blkdev_put(rtdev);
372 fs_put_dax(dax_rtdev);
374 xfs_free_buftarg(mp->m_ddev_targp);
375 fs_put_dax(dax_ddev);
379 * The file system configurations are:
380 * (1) device (partition) with data and internal log
381 * (2) logical volume with data and log subvolumes.
382 * (3) logical volume with data, log, and realtime subvolumes.
384 * We only have to handle opening the log and realtime volumes here if
385 * they are present. The data subvolume has already been opened by
386 * get_sb_bdev() and is stored in sb->s_bdev.
390 struct xfs_mount *mp)
392 struct block_device *ddev = mp->m_super->s_bdev;
393 struct dax_device *dax_ddev = fs_dax_get_by_bdev(ddev);
394 struct dax_device *dax_logdev = NULL, *dax_rtdev = NULL;
395 struct block_device *logdev = NULL, *rtdev = NULL;
399 * Open real time and log devices - order is important.
402 error = xfs_blkdev_get(mp, mp->m_logname, &logdev);
405 dax_logdev = fs_dax_get_by_bdev(logdev);
409 error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev);
411 goto out_close_logdev;
413 if (rtdev == ddev || rtdev == logdev) {
415 "Cannot mount filesystem with identical rtdev and ddev/logdev.");
417 goto out_close_rtdev;
419 dax_rtdev = fs_dax_get_by_bdev(rtdev);
423 * Setup xfs_mount buffer target pointers
426 mp->m_ddev_targp = xfs_alloc_buftarg(mp, ddev, dax_ddev);
427 if (!mp->m_ddev_targp)
428 goto out_close_rtdev;
431 mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev, dax_rtdev);
432 if (!mp->m_rtdev_targp)
433 goto out_free_ddev_targ;
436 if (logdev && logdev != ddev) {
437 mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev, dax_logdev);
438 if (!mp->m_logdev_targp)
439 goto out_free_rtdev_targ;
441 mp->m_logdev_targp = mp->m_ddev_targp;
447 if (mp->m_rtdev_targp)
448 xfs_free_buftarg(mp->m_rtdev_targp);
450 xfs_free_buftarg(mp->m_ddev_targp);
452 xfs_blkdev_put(rtdev);
453 fs_put_dax(dax_rtdev);
455 if (logdev && logdev != ddev) {
456 xfs_blkdev_put(logdev);
457 fs_put_dax(dax_logdev);
460 fs_put_dax(dax_ddev);
465 * Setup xfs_mount buffer target pointers based on superblock
469 struct xfs_mount *mp)
473 error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_sectsize);
477 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
478 unsigned int log_sector_size = BBSIZE;
480 if (xfs_sb_version_hassector(&mp->m_sb))
481 log_sector_size = mp->m_sb.sb_logsectsize;
482 error = xfs_setsize_buftarg(mp->m_logdev_targp,
487 if (mp->m_rtdev_targp) {
488 error = xfs_setsize_buftarg(mp->m_rtdev_targp,
489 mp->m_sb.sb_sectsize);
498 xfs_init_mount_workqueues(
499 struct xfs_mount *mp)
501 mp->m_buf_workqueue = alloc_workqueue("xfs-buf/%s",
502 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
503 1, mp->m_super->s_id);
504 if (!mp->m_buf_workqueue)
507 mp->m_unwritten_workqueue = alloc_workqueue("xfs-conv/%s",
508 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
509 0, mp->m_super->s_id);
510 if (!mp->m_unwritten_workqueue)
511 goto out_destroy_buf;
513 mp->m_cil_workqueue = alloc_workqueue("xfs-cil/%s",
514 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_UNBOUND),
515 0, mp->m_super->s_id);
516 if (!mp->m_cil_workqueue)
517 goto out_destroy_unwritten;
519 mp->m_reclaim_workqueue = alloc_workqueue("xfs-reclaim/%s",
520 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
521 0, mp->m_super->s_id);
522 if (!mp->m_reclaim_workqueue)
523 goto out_destroy_cil;
525 mp->m_gc_workqueue = alloc_workqueue("xfs-gc/%s",
526 WQ_SYSFS | WQ_UNBOUND | WQ_FREEZABLE | WQ_MEM_RECLAIM,
527 0, mp->m_super->s_id);
528 if (!mp->m_gc_workqueue)
529 goto out_destroy_reclaim;
531 mp->m_sync_workqueue = alloc_workqueue("xfs-sync/%s",
532 XFS_WQFLAGS(WQ_FREEZABLE), 0, mp->m_super->s_id);
533 if (!mp->m_sync_workqueue)
534 goto out_destroy_eofb;
539 destroy_workqueue(mp->m_gc_workqueue);
541 destroy_workqueue(mp->m_reclaim_workqueue);
543 destroy_workqueue(mp->m_cil_workqueue);
544 out_destroy_unwritten:
545 destroy_workqueue(mp->m_unwritten_workqueue);
547 destroy_workqueue(mp->m_buf_workqueue);
553 xfs_destroy_mount_workqueues(
554 struct xfs_mount *mp)
556 destroy_workqueue(mp->m_sync_workqueue);
557 destroy_workqueue(mp->m_gc_workqueue);
558 destroy_workqueue(mp->m_reclaim_workqueue);
559 destroy_workqueue(mp->m_cil_workqueue);
560 destroy_workqueue(mp->m_unwritten_workqueue);
561 destroy_workqueue(mp->m_buf_workqueue);
565 xfs_flush_inodes_worker(
566 struct work_struct *work)
568 struct xfs_mount *mp = container_of(work, struct xfs_mount,
569 m_flush_inodes_work);
570 struct super_block *sb = mp->m_super;
572 if (down_read_trylock(&sb->s_umount)) {
574 up_read(&sb->s_umount);
579 * Flush all dirty data to disk. Must not be called while holding an XFS_ILOCK
580 * or a page lock. We use sync_inodes_sb() here to ensure we block while waiting
581 * for IO to complete so that we effectively throttle multiple callers to the
582 * rate at which IO is completing.
586 struct xfs_mount *mp)
589 * If flush_work() returns true then that means we waited for a flush
590 * which was already in progress. Don't bother running another scan.
592 if (flush_work(&mp->m_flush_inodes_work))
595 queue_work(mp->m_sync_workqueue, &mp->m_flush_inodes_work);
596 flush_work(&mp->m_flush_inodes_work);
599 /* Catch misguided souls that try to use this interface on XFS */
600 STATIC struct inode *
602 struct super_block *sb)
611 struct xfs_inode *ip,
614 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
615 struct xfs_bmbt_irec got;
616 struct xfs_iext_cursor icur;
618 if (!ifp || !xfs_iext_lookup_extent(ip, ifp, 0, &icur, &got))
621 if (isnullstartblock(got.br_startblock)) {
622 xfs_warn(ip->i_mount,
623 "ino %llx %s fork has delalloc extent at [0x%llx:0x%llx]",
625 whichfork == XFS_DATA_FORK ? "data" : "cow",
626 got.br_startoff, got.br_blockcount);
628 } while (xfs_iext_next_extent(ifp, &icur, &got));
631 #define xfs_check_delalloc(ip, whichfork) do { } while (0)
635 * Now that the generic code is guaranteed not to be accessing
636 * the linux inode, we can inactivate and reclaim the inode.
639 xfs_fs_destroy_inode(
642 struct xfs_inode *ip = XFS_I(inode);
644 trace_xfs_destroy_inode(ip);
646 ASSERT(!rwsem_is_locked(&inode->i_rwsem));
647 XFS_STATS_INC(ip->i_mount, vn_rele);
648 XFS_STATS_INC(ip->i_mount, vn_remove);
652 if (!XFS_FORCED_SHUTDOWN(ip->i_mount) && ip->i_delayed_blks) {
653 xfs_check_delalloc(ip, XFS_DATA_FORK);
654 xfs_check_delalloc(ip, XFS_COW_FORK);
658 XFS_STATS_INC(ip->i_mount, vn_reclaim);
661 * We should never get here with one of the reclaim flags already set.
663 ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIMABLE));
664 ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIM));
667 * We always use background reclaim here because even if the inode is
668 * clean, it still may be under IO and hence we have wait for IO
669 * completion to occur before we can reclaim the inode. The background
670 * reclaim path handles this more efficiently than we can here, so
671 * simply let background reclaim tear down all inodes.
673 xfs_inode_mark_reclaimable(ip);
681 struct xfs_inode *ip = XFS_I(inode);
682 struct xfs_mount *mp = ip->i_mount;
683 struct xfs_trans *tp;
685 if (!(inode->i_sb->s_flags & SB_LAZYTIME))
687 if (flag != I_DIRTY_SYNC || !(inode->i_state & I_DIRTY_TIME))
690 if (xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp))
692 xfs_ilock(ip, XFS_ILOCK_EXCL);
693 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
694 xfs_trans_log_inode(tp, ip, XFS_ILOG_TIMESTAMP);
695 xfs_trans_commit(tp);
699 * Slab object creation initialisation for the XFS inode.
700 * This covers only the idempotent fields in the XFS inode;
701 * all other fields need to be initialised on allocation
702 * from the slab. This avoids the need to repeatedly initialise
703 * fields in the xfs inode that left in the initialise state
704 * when freeing the inode.
707 xfs_fs_inode_init_once(
710 struct xfs_inode *ip = inode;
712 memset(ip, 0, sizeof(struct xfs_inode));
715 inode_init_once(VFS_I(ip));
718 atomic_set(&ip->i_pincount, 0);
719 spin_lock_init(&ip->i_flags_lock);
721 mrlock_init(&ip->i_mmaplock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
722 "xfsino", ip->i_ino);
723 mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
724 "xfsino", ip->i_ino);
728 * We do an unlocked check for XFS_IDONTCACHE here because we are already
729 * serialised against cache hits here via the inode->i_lock and igrab() in
730 * xfs_iget_cache_hit(). Hence a lookup that might clear this flag will not be
731 * racing with us, and it avoids needing to grab a spinlock here for every inode
732 * we drop the final reference on.
738 struct xfs_inode *ip = XFS_I(inode);
741 * If this unlinked inode is in the middle of recovery, don't
742 * drop the inode just yet; log recovery will take care of
743 * that. See the comment for this inode flag.
745 if (ip->i_flags & XFS_IRECOVERY) {
746 ASSERT(ip->i_mount->m_log->l_flags & XLOG_RECOVERY_NEEDED);
750 return generic_drop_inode(inode);
755 struct xfs_mount *mp)
758 kfree(mp->m_logname);
764 struct super_block *sb,
767 struct xfs_mount *mp = XFS_M(sb);
770 * Doing anything during the async pass would be counterproductive.
775 xfs_log_force(mp, XFS_LOG_SYNC);
778 * The disk must be active because we're syncing.
779 * We schedule log work now (now that the disk is
780 * active) instead of later (when it might not be).
782 flush_delayed_work(&mp->m_log->l_work);
790 struct dentry *dentry,
791 struct kstatfs *statp)
793 struct xfs_mount *mp = XFS_M(dentry->d_sb);
794 xfs_sb_t *sbp = &mp->m_sb;
795 struct xfs_inode *ip = XFS_I(d_inode(dentry));
796 uint64_t fakeinos, id;
803 statp->f_type = XFS_SUPER_MAGIC;
804 statp->f_namelen = MAXNAMELEN - 1;
806 id = huge_encode_dev(mp->m_ddev_targp->bt_dev);
807 statp->f_fsid = u64_to_fsid(id);
809 icount = percpu_counter_sum(&mp->m_icount);
810 ifree = percpu_counter_sum(&mp->m_ifree);
811 fdblocks = percpu_counter_sum(&mp->m_fdblocks);
813 spin_lock(&mp->m_sb_lock);
814 statp->f_bsize = sbp->sb_blocksize;
815 lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0;
816 statp->f_blocks = sbp->sb_dblocks - lsize;
817 spin_unlock(&mp->m_sb_lock);
819 /* make sure statp->f_bfree does not underflow */
820 statp->f_bfree = max_t(int64_t, fdblocks - mp->m_alloc_set_aside, 0);
821 statp->f_bavail = statp->f_bfree;
823 fakeinos = XFS_FSB_TO_INO(mp, statp->f_bfree);
824 statp->f_files = min(icount + fakeinos, (uint64_t)XFS_MAXINUMBER);
825 if (M_IGEO(mp)->maxicount)
826 statp->f_files = min_t(typeof(statp->f_files),
828 M_IGEO(mp)->maxicount);
830 /* If sb_icount overshot maxicount, report actual allocation */
831 statp->f_files = max_t(typeof(statp->f_files),
835 /* make sure statp->f_ffree does not underflow */
836 ffree = statp->f_files - (icount - ifree);
837 statp->f_ffree = max_t(int64_t, ffree, 0);
840 if ((ip->i_diflags & XFS_DIFLAG_PROJINHERIT) &&
841 ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))) ==
842 (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))
843 xfs_qm_statvfs(ip, statp);
845 if (XFS_IS_REALTIME_MOUNT(mp) &&
846 (ip->i_diflags & (XFS_DIFLAG_RTINHERIT | XFS_DIFLAG_REALTIME))) {
847 statp->f_blocks = sbp->sb_rblocks;
848 statp->f_bavail = statp->f_bfree =
849 sbp->sb_frextents * sbp->sb_rextsize;
856 xfs_save_resvblks(struct xfs_mount *mp)
858 uint64_t resblks = 0;
860 mp->m_resblks_save = mp->m_resblks;
861 xfs_reserve_blocks(mp, &resblks, NULL);
865 xfs_restore_resvblks(struct xfs_mount *mp)
869 if (mp->m_resblks_save) {
870 resblks = mp->m_resblks_save;
871 mp->m_resblks_save = 0;
873 resblks = xfs_default_resblks(mp);
875 xfs_reserve_blocks(mp, &resblks, NULL);
879 * Second stage of a freeze. The data is already frozen so we only
880 * need to take care of the metadata. Once that's done sync the superblock
881 * to the log to dirty it in case of a crash while frozen. This ensures that we
882 * will recover the unlinked inode lists on the next mount.
886 struct super_block *sb)
888 struct xfs_mount *mp = XFS_M(sb);
893 * The filesystem is now frozen far enough that memory reclaim
894 * cannot safely operate on the filesystem. Hence we need to
895 * set a GFP_NOFS context here to avoid recursion deadlocks.
897 flags = memalloc_nofs_save();
898 xfs_blockgc_stop(mp);
899 xfs_save_resvblks(mp);
900 ret = xfs_log_quiesce(mp);
901 memalloc_nofs_restore(flags);
907 struct super_block *sb)
909 struct xfs_mount *mp = XFS_M(sb);
911 xfs_restore_resvblks(mp);
912 xfs_log_work_queue(mp);
913 xfs_blockgc_start(mp);
918 * This function fills in xfs_mount_t fields based on mount args.
919 * Note: the superblock _has_ now been read in.
923 struct xfs_mount *mp)
925 int ronly = (mp->m_flags & XFS_MOUNT_RDONLY);
927 /* Fail a mount where the logbuf is smaller than the log stripe */
928 if (xfs_sb_version_haslogv2(&mp->m_sb)) {
929 if (mp->m_logbsize <= 0 &&
930 mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) {
931 mp->m_logbsize = mp->m_sb.sb_logsunit;
932 } else if (mp->m_logbsize > 0 &&
933 mp->m_logbsize < mp->m_sb.sb_logsunit) {
935 "logbuf size must be greater than or equal to log stripe size");
939 /* Fail a mount if the logbuf is larger than 32K */
940 if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) {
942 "logbuf size for version 1 logs must be 16K or 32K");
948 * V5 filesystems always use attr2 format for attributes.
950 if (xfs_sb_version_hascrc(&mp->m_sb) &&
951 (mp->m_flags & XFS_MOUNT_NOATTR2)) {
952 xfs_warn(mp, "Cannot mount a V5 filesystem as noattr2. "
953 "attr2 is always enabled for V5 filesystems.");
958 * mkfs'ed attr2 will turn on attr2 mount unless explicitly
959 * told by noattr2 to turn it off
961 if (xfs_sb_version_hasattr2(&mp->m_sb) &&
962 !(mp->m_flags & XFS_MOUNT_NOATTR2))
963 mp->m_flags |= XFS_MOUNT_ATTR2;
966 * prohibit r/w mounts of read-only filesystems
968 if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !ronly) {
970 "cannot mount a read-only filesystem as read-write");
974 if ((mp->m_qflags & (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE)) &&
975 (mp->m_qflags & (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE)) &&
976 !xfs_sb_version_has_pquotino(&mp->m_sb)) {
978 "Super block does not support project and group quota together");
986 xfs_init_percpu_counters(
987 struct xfs_mount *mp)
991 error = percpu_counter_init(&mp->m_icount, 0, GFP_KERNEL);
995 error = percpu_counter_init(&mp->m_ifree, 0, GFP_KERNEL);
999 error = percpu_counter_init(&mp->m_fdblocks, 0, GFP_KERNEL);
1003 error = percpu_counter_init(&mp->m_delalloc_blks, 0, GFP_KERNEL);
1010 percpu_counter_destroy(&mp->m_fdblocks);
1012 percpu_counter_destroy(&mp->m_ifree);
1014 percpu_counter_destroy(&mp->m_icount);
1019 xfs_reinit_percpu_counters(
1020 struct xfs_mount *mp)
1022 percpu_counter_set(&mp->m_icount, mp->m_sb.sb_icount);
1023 percpu_counter_set(&mp->m_ifree, mp->m_sb.sb_ifree);
1024 percpu_counter_set(&mp->m_fdblocks, mp->m_sb.sb_fdblocks);
1028 xfs_destroy_percpu_counters(
1029 struct xfs_mount *mp)
1031 percpu_counter_destroy(&mp->m_icount);
1032 percpu_counter_destroy(&mp->m_ifree);
1033 percpu_counter_destroy(&mp->m_fdblocks);
1034 ASSERT(XFS_FORCED_SHUTDOWN(mp) ||
1035 percpu_counter_sum(&mp->m_delalloc_blks) == 0);
1036 percpu_counter_destroy(&mp->m_delalloc_blks);
1041 struct super_block *sb)
1043 struct xfs_mount *mp = XFS_M(sb);
1045 /* if ->fill_super failed, we have no mount to tear down */
1049 xfs_notice(mp, "Unmounting Filesystem");
1050 xfs_filestream_unmount(mp);
1054 free_percpu(mp->m_stats.xs_stats);
1055 xfs_destroy_percpu_counters(mp);
1056 xfs_destroy_mount_workqueues(mp);
1057 xfs_close_devices(mp);
1059 sb->s_fs_info = NULL;
1064 xfs_fs_nr_cached_objects(
1065 struct super_block *sb,
1066 struct shrink_control *sc)
1068 /* Paranoia: catch incorrect calls during mount setup or teardown */
1069 if (WARN_ON_ONCE(!sb->s_fs_info))
1071 return xfs_reclaim_inodes_count(XFS_M(sb));
1075 xfs_fs_free_cached_objects(
1076 struct super_block *sb,
1077 struct shrink_control *sc)
1079 return xfs_reclaim_inodes_nr(XFS_M(sb), sc->nr_to_scan);
1082 static const struct super_operations xfs_super_operations = {
1083 .alloc_inode = xfs_fs_alloc_inode,
1084 .destroy_inode = xfs_fs_destroy_inode,
1085 .dirty_inode = xfs_fs_dirty_inode,
1086 .drop_inode = xfs_fs_drop_inode,
1087 .put_super = xfs_fs_put_super,
1088 .sync_fs = xfs_fs_sync_fs,
1089 .freeze_fs = xfs_fs_freeze,
1090 .unfreeze_fs = xfs_fs_unfreeze,
1091 .statfs = xfs_fs_statfs,
1092 .show_options = xfs_fs_show_options,
1093 .nr_cached_objects = xfs_fs_nr_cached_objects,
1094 .free_cached_objects = xfs_fs_free_cached_objects,
1103 int last, shift_left_factor = 0, _res;
1107 value = kstrdup(s, GFP_KERNEL);
1111 last = strlen(value) - 1;
1112 if (value[last] == 'K' || value[last] == 'k') {
1113 shift_left_factor = 10;
1116 if (value[last] == 'M' || value[last] == 'm') {
1117 shift_left_factor = 20;
1120 if (value[last] == 'G' || value[last] == 'g') {
1121 shift_left_factor = 30;
1125 if (kstrtoint(value, base, &_res))
1128 *res = _res << shift_left_factor;
1133 xfs_fs_warn_deprecated(
1134 struct fs_context *fc,
1135 struct fs_parameter *param,
1139 /* Don't print the warning if reconfiguring and current mount point
1140 * already had the flag set
1142 if ((fc->purpose & FS_CONTEXT_FOR_RECONFIGURE) &&
1143 !!(XFS_M(fc->root->d_sb)->m_flags & flag) == value)
1145 xfs_warn(fc->s_fs_info, "%s mount option is deprecated.", param->key);
1149 * Set mount state from a mount option.
1151 * NOTE: mp->m_super is NULL here!
1155 struct fs_context *fc,
1156 struct fs_parameter *param)
1158 struct xfs_mount *parsing_mp = fc->s_fs_info;
1159 struct fs_parse_result result;
1163 opt = fs_parse(fc, xfs_fs_parameters, param, &result);
1169 parsing_mp->m_logbufs = result.uint_32;
1172 if (suffix_kstrtoint(param->string, 10, &parsing_mp->m_logbsize))
1176 kfree(parsing_mp->m_logname);
1177 parsing_mp->m_logname = kstrdup(param->string, GFP_KERNEL);
1178 if (!parsing_mp->m_logname)
1182 kfree(parsing_mp->m_rtname);
1183 parsing_mp->m_rtname = kstrdup(param->string, GFP_KERNEL);
1184 if (!parsing_mp->m_rtname)
1188 if (suffix_kstrtoint(param->string, 10, &size))
1190 parsing_mp->m_allocsize_log = ffs(size) - 1;
1191 parsing_mp->m_flags |= XFS_MOUNT_ALLOCSIZE;
1195 parsing_mp->m_flags |= XFS_MOUNT_GRPID;
1198 case Opt_sysvgroups:
1199 parsing_mp->m_flags &= ~XFS_MOUNT_GRPID;
1202 parsing_mp->m_flags |= XFS_MOUNT_WSYNC;
1204 case Opt_norecovery:
1205 parsing_mp->m_flags |= XFS_MOUNT_NORECOVERY;
1208 parsing_mp->m_flags |= XFS_MOUNT_NOALIGN;
1211 parsing_mp->m_flags |= XFS_MOUNT_SWALLOC;
1214 parsing_mp->m_dalign = result.uint_32;
1217 parsing_mp->m_swidth = result.uint_32;
1220 parsing_mp->m_flags |= XFS_MOUNT_SMALL_INUMS;
1223 parsing_mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS;
1226 parsing_mp->m_flags |= XFS_MOUNT_NOUUID;
1229 parsing_mp->m_flags |= XFS_MOUNT_LARGEIO;
1232 parsing_mp->m_flags &= ~XFS_MOUNT_LARGEIO;
1234 case Opt_filestreams:
1235 parsing_mp->m_flags |= XFS_MOUNT_FILESTREAMS;
1238 parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT;
1239 parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD;
1240 parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ACTIVE;
1245 parsing_mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE |
1248 case Opt_qnoenforce:
1249 case Opt_uqnoenforce:
1250 parsing_mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE);
1251 parsing_mp->m_qflags &= ~XFS_UQUOTA_ENFD;
1255 parsing_mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE |
1258 case Opt_pqnoenforce:
1259 parsing_mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE);
1260 parsing_mp->m_qflags &= ~XFS_PQUOTA_ENFD;
1264 parsing_mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE |
1267 case Opt_gqnoenforce:
1268 parsing_mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE);
1269 parsing_mp->m_qflags &= ~XFS_GQUOTA_ENFD;
1272 parsing_mp->m_flags |= XFS_MOUNT_DISCARD;
1275 parsing_mp->m_flags &= ~XFS_MOUNT_DISCARD;
1277 #ifdef CONFIG_FS_DAX
1279 xfs_mount_set_dax_mode(parsing_mp, XFS_DAX_ALWAYS);
1282 xfs_mount_set_dax_mode(parsing_mp, result.uint_32);
1285 /* Following mount options will be removed in September 2025 */
1287 xfs_fs_warn_deprecated(fc, param, XFS_MOUNT_IKEEP, true);
1288 parsing_mp->m_flags |= XFS_MOUNT_IKEEP;
1291 xfs_fs_warn_deprecated(fc, param, XFS_MOUNT_IKEEP, false);
1292 parsing_mp->m_flags &= ~XFS_MOUNT_IKEEP;
1295 xfs_fs_warn_deprecated(fc, param, XFS_MOUNT_ATTR2, true);
1296 parsing_mp->m_flags |= XFS_MOUNT_ATTR2;
1299 xfs_fs_warn_deprecated(fc, param, XFS_MOUNT_NOATTR2, true);
1300 parsing_mp->m_flags &= ~XFS_MOUNT_ATTR2;
1301 parsing_mp->m_flags |= XFS_MOUNT_NOATTR2;
1304 xfs_warn(parsing_mp, "unknown mount option [%s].", param->key);
1312 xfs_fs_validate_params(
1313 struct xfs_mount *mp)
1316 * no recovery flag requires a read-only mount
1318 if ((mp->m_flags & XFS_MOUNT_NORECOVERY) &&
1319 !(mp->m_flags & XFS_MOUNT_RDONLY)) {
1320 xfs_warn(mp, "no-recovery mounts must be read-only.");
1324 if ((mp->m_flags & XFS_MOUNT_NOALIGN) &&
1325 (mp->m_dalign || mp->m_swidth)) {
1327 "sunit and swidth options incompatible with the noalign option");
1331 if (!IS_ENABLED(CONFIG_XFS_QUOTA) && mp->m_qflags != 0) {
1332 xfs_warn(mp, "quota support not available in this kernel.");
1336 if ((mp->m_dalign && !mp->m_swidth) ||
1337 (!mp->m_dalign && mp->m_swidth)) {
1338 xfs_warn(mp, "sunit and swidth must be specified together");
1342 if (mp->m_dalign && (mp->m_swidth % mp->m_dalign != 0)) {
1344 "stripe width (%d) must be a multiple of the stripe unit (%d)",
1345 mp->m_swidth, mp->m_dalign);
1349 if (mp->m_logbufs != -1 &&
1350 mp->m_logbufs != 0 &&
1351 (mp->m_logbufs < XLOG_MIN_ICLOGS ||
1352 mp->m_logbufs > XLOG_MAX_ICLOGS)) {
1353 xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]",
1354 mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS);
1358 if (mp->m_logbsize != -1 &&
1359 mp->m_logbsize != 0 &&
1360 (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE ||
1361 mp->m_logbsize > XLOG_MAX_RECORD_BSIZE ||
1362 !is_power_of_2(mp->m_logbsize))) {
1364 "invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]",
1369 if ((mp->m_flags & XFS_MOUNT_ALLOCSIZE) &&
1370 (mp->m_allocsize_log > XFS_MAX_IO_LOG ||
1371 mp->m_allocsize_log < XFS_MIN_IO_LOG)) {
1372 xfs_warn(mp, "invalid log iosize: %d [not %d-%d]",
1373 mp->m_allocsize_log, XFS_MIN_IO_LOG, XFS_MAX_IO_LOG);
1382 struct super_block *sb,
1383 struct fs_context *fc)
1385 struct xfs_mount *mp = sb->s_fs_info;
1387 int flags = 0, error;
1391 error = xfs_fs_validate_params(mp);
1393 goto out_free_names;
1395 sb_min_blocksize(sb, BBSIZE);
1396 sb->s_xattr = xfs_xattr_handlers;
1397 sb->s_export_op = &xfs_export_operations;
1398 #ifdef CONFIG_XFS_QUOTA
1399 sb->s_qcop = &xfs_quotactl_operations;
1400 sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
1402 sb->s_op = &xfs_super_operations;
1405 * Delay mount work if the debug hook is set. This is debug
1406 * instrumention to coordinate simulation of xfs mount failures with
1407 * VFS superblock operations
1409 if (xfs_globals.mount_delay) {
1410 xfs_notice(mp, "Delaying mount for %d seconds.",
1411 xfs_globals.mount_delay);
1412 msleep(xfs_globals.mount_delay * 1000);
1415 if (fc->sb_flags & SB_SILENT)
1416 flags |= XFS_MFSI_QUIET;
1418 error = xfs_open_devices(mp);
1420 goto out_free_names;
1422 error = xfs_init_mount_workqueues(mp);
1424 goto out_close_devices;
1426 error = xfs_init_percpu_counters(mp);
1428 goto out_destroy_workqueues;
1430 /* Allocate stats memory before we do operations that might use it */
1431 mp->m_stats.xs_stats = alloc_percpu(struct xfsstats);
1432 if (!mp->m_stats.xs_stats) {
1434 goto out_destroy_counters;
1437 error = xfs_readsb(mp, flags);
1439 goto out_free_stats;
1441 error = xfs_finish_flags(mp);
1445 error = xfs_setup_devices(mp);
1449 /* V4 support is undergoing deprecation. */
1450 if (!xfs_sb_version_hascrc(&mp->m_sb)) {
1451 #ifdef CONFIG_XFS_SUPPORT_V4
1453 "Deprecated V4 format (crc=0) will not be supported after September 2030.");
1456 "Deprecated V4 format (crc=0) not supported by kernel.");
1462 /* Filesystem claims it needs repair, so refuse the mount. */
1463 if (xfs_sb_version_needsrepair(&mp->m_sb)) {
1464 xfs_warn(mp, "Filesystem needs repair. Please run xfs_repair.");
1465 error = -EFSCORRUPTED;
1470 * Don't touch the filesystem if a user tool thinks it owns the primary
1471 * superblock. mkfs doesn't clear the flag from secondary supers, so
1472 * we don't check them at all.
1474 if (mp->m_sb.sb_inprogress) {
1475 xfs_warn(mp, "Offline file system operation in progress!");
1476 error = -EFSCORRUPTED;
1481 * Until this is fixed only page-sized or smaller data blocks work.
1483 if (mp->m_sb.sb_blocksize > PAGE_SIZE) {
1485 "File system with blocksize %d bytes. "
1486 "Only pagesize (%ld) or less will currently work.",
1487 mp->m_sb.sb_blocksize, PAGE_SIZE);
1492 /* Ensure this filesystem fits in the page cache limits */
1493 if (xfs_sb_validate_fsb_count(&mp->m_sb, mp->m_sb.sb_dblocks) ||
1494 xfs_sb_validate_fsb_count(&mp->m_sb, mp->m_sb.sb_rblocks)) {
1496 "file system too large to be mounted on this system.");
1502 * XFS block mappings use 54 bits to store the logical block offset.
1503 * This should suffice to handle the maximum file size that the VFS
1504 * supports (currently 2^63 bytes on 64-bit and ULONG_MAX << PAGE_SHIFT
1505 * bytes on 32-bit), but as XFS and VFS have gotten the s_maxbytes
1506 * calculation wrong on 32-bit kernels in the past, we'll add a WARN_ON
1507 * to check this assertion.
1509 * Avoid integer overflow by comparing the maximum bmbt offset to the
1510 * maximum pagecache offset in units of fs blocks.
1512 if (!xfs_verify_fileoff(mp, XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE))) {
1514 "MAX_LFS_FILESIZE block offset (%llu) exceeds extent map maximum (%llu)!",
1515 XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE),
1521 error = xfs_filestream_mount(mp);
1526 * we must configure the block size in the superblock before we run the
1527 * full mount process as the mount process can lookup and cache inodes.
1529 sb->s_magic = XFS_SUPER_MAGIC;
1530 sb->s_blocksize = mp->m_sb.sb_blocksize;
1531 sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1;
1532 sb->s_maxbytes = MAX_LFS_FILESIZE;
1533 sb->s_max_links = XFS_MAXLINK;
1534 sb->s_time_gran = 1;
1535 if (xfs_sb_version_hasbigtime(&mp->m_sb)) {
1536 sb->s_time_min = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MIN);
1537 sb->s_time_max = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MAX);
1539 sb->s_time_min = XFS_LEGACY_TIME_MIN;
1540 sb->s_time_max = XFS_LEGACY_TIME_MAX;
1542 trace_xfs_inode_timestamp_range(mp, sb->s_time_min, sb->s_time_max);
1543 sb->s_iflags |= SB_I_CGROUPWB;
1545 set_posix_acl_flag(sb);
1547 /* version 5 superblocks support inode version counters. */
1548 if (XFS_SB_VERSION_NUM(&mp->m_sb) == XFS_SB_VERSION_5)
1549 sb->s_flags |= SB_I_VERSION;
1551 if (xfs_sb_version_hasbigtime(&mp->m_sb))
1553 "EXPERIMENTAL big timestamp feature in use. Use at your own risk!");
1555 if (mp->m_flags & XFS_MOUNT_DAX_ALWAYS) {
1556 bool rtdev_is_dax = false, datadev_is_dax;
1559 "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
1561 datadev_is_dax = xfs_buftarg_is_dax(sb, mp->m_ddev_targp);
1562 if (mp->m_rtdev_targp)
1563 rtdev_is_dax = xfs_buftarg_is_dax(sb,
1565 if (!rtdev_is_dax && !datadev_is_dax) {
1567 "DAX unsupported by block device. Turning off DAX.");
1568 xfs_mount_set_dax_mode(mp, XFS_DAX_NEVER);
1570 if (xfs_sb_version_hasreflink(&mp->m_sb)) {
1572 "DAX and reflink cannot be used together!");
1574 goto out_filestream_unmount;
1578 if (mp->m_flags & XFS_MOUNT_DISCARD) {
1579 struct request_queue *q = bdev_get_queue(sb->s_bdev);
1581 if (!blk_queue_discard(q)) {
1582 xfs_warn(mp, "mounting with \"discard\" option, but "
1583 "the device does not support discard");
1584 mp->m_flags &= ~XFS_MOUNT_DISCARD;
1588 if (xfs_sb_version_hasreflink(&mp->m_sb)) {
1589 if (mp->m_sb.sb_rblocks) {
1591 "reflink not compatible with realtime device!");
1593 goto out_filestream_unmount;
1596 if (xfs_globals.always_cow) {
1597 xfs_info(mp, "using DEBUG-only always_cow mode.");
1598 mp->m_always_cow = true;
1602 if (xfs_sb_version_hasrmapbt(&mp->m_sb) && mp->m_sb.sb_rblocks) {
1604 "reverse mapping btree not compatible with realtime device!");
1606 goto out_filestream_unmount;
1609 if (xfs_sb_version_hasinobtcounts(&mp->m_sb))
1611 "EXPERIMENTAL inode btree counters feature in use. Use at your own risk!");
1613 error = xfs_mountfs(mp);
1615 goto out_filestream_unmount;
1617 root = igrab(VFS_I(mp->m_rootip));
1622 sb->s_root = d_make_root(root);
1630 out_filestream_unmount:
1631 xfs_filestream_unmount(mp);
1635 free_percpu(mp->m_stats.xs_stats);
1636 out_destroy_counters:
1637 xfs_destroy_percpu_counters(mp);
1638 out_destroy_workqueues:
1639 xfs_destroy_mount_workqueues(mp);
1641 xfs_close_devices(mp);
1643 sb->s_fs_info = NULL;
1648 xfs_filestream_unmount(mp);
1655 struct fs_context *fc)
1657 return get_tree_bdev(fc, xfs_fs_fill_super);
1662 struct xfs_mount *mp)
1664 struct xfs_sb *sbp = &mp->m_sb;
1667 if (mp->m_flags & XFS_MOUNT_NORECOVERY) {
1669 "ro->rw transition prohibited on norecovery mount");
1673 if (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5 &&
1674 xfs_sb_has_ro_compat_feature(sbp, XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) {
1676 "ro->rw transition prohibited on unknown (0x%x) ro-compat filesystem",
1677 (sbp->sb_features_ro_compat &
1678 XFS_SB_FEAT_RO_COMPAT_UNKNOWN));
1682 mp->m_flags &= ~XFS_MOUNT_RDONLY;
1685 * If this is the first remount to writeable state we might have some
1686 * superblock changes to update.
1688 if (mp->m_update_sb) {
1689 error = xfs_sync_sb(mp, false);
1691 xfs_warn(mp, "failed to write sb changes");
1694 mp->m_update_sb = false;
1698 * Fill out the reserve pool if it is empty. Use the stashed value if
1699 * it is non-zero, otherwise go with the default.
1701 xfs_restore_resvblks(mp);
1702 xfs_log_work_queue(mp);
1704 /* Recover any CoW blocks that never got remapped. */
1705 error = xfs_reflink_recover_cow(mp);
1708 "Error %d recovering leftover CoW allocations.", error);
1709 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1712 xfs_blockgc_start(mp);
1714 /* Create the per-AG metadata reservation pool .*/
1715 error = xfs_fs_reserve_ag_blocks(mp);
1716 if (error && error != -ENOSPC)
1724 struct xfs_mount *mp)
1729 * Cancel background eofb scanning so it cannot race with the final
1730 * log force+buftarg wait and deadlock the remount.
1732 xfs_blockgc_stop(mp);
1734 /* Get rid of any leftover CoW reservations... */
1735 error = xfs_blockgc_free_space(mp, NULL);
1737 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1741 /* Free the per-AG metadata reservation pool. */
1742 error = xfs_fs_unreserve_ag_blocks(mp);
1744 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1749 * Before we sync the metadata, we need to free up the reserve block
1750 * pool so that the used block count in the superblock on disk is
1751 * correct at the end of the remount. Stash the current* reserve pool
1752 * size so that if we get remounted rw, we can return it to the same
1755 xfs_save_resvblks(mp);
1758 mp->m_flags |= XFS_MOUNT_RDONLY;
1764 * Logically we would return an error here to prevent users from believing
1765 * they might have changed mount options using remount which can't be changed.
1767 * But unfortunately mount(8) adds all options from mtab and fstab to the mount
1768 * arguments in some cases so we can't blindly reject options, but have to
1769 * check for each specified option if it actually differs from the currently
1770 * set option and only reject it if that's the case.
1772 * Until that is implemented we return success for every remount request, and
1773 * silently ignore all options that we can't actually change.
1777 struct fs_context *fc)
1779 struct xfs_mount *mp = XFS_M(fc->root->d_sb);
1780 struct xfs_mount *new_mp = fc->s_fs_info;
1781 xfs_sb_t *sbp = &mp->m_sb;
1782 int flags = fc->sb_flags;
1785 /* version 5 superblocks always support version counters. */
1786 if (XFS_SB_VERSION_NUM(&mp->m_sb) == XFS_SB_VERSION_5)
1787 fc->sb_flags |= SB_I_VERSION;
1789 error = xfs_fs_validate_params(new_mp);
1793 sync_filesystem(mp->m_super);
1795 /* inode32 -> inode64 */
1796 if ((mp->m_flags & XFS_MOUNT_SMALL_INUMS) &&
1797 !(new_mp->m_flags & XFS_MOUNT_SMALL_INUMS)) {
1798 mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS;
1799 mp->m_maxagi = xfs_set_inode_alloc(mp, sbp->sb_agcount);
1802 /* inode64 -> inode32 */
1803 if (!(mp->m_flags & XFS_MOUNT_SMALL_INUMS) &&
1804 (new_mp->m_flags & XFS_MOUNT_SMALL_INUMS)) {
1805 mp->m_flags |= XFS_MOUNT_SMALL_INUMS;
1806 mp->m_maxagi = xfs_set_inode_alloc(mp, sbp->sb_agcount);
1810 if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(flags & SB_RDONLY)) {
1811 error = xfs_remount_rw(mp);
1817 if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (flags & SB_RDONLY)) {
1818 error = xfs_remount_ro(mp);
1826 static void xfs_fs_free(
1827 struct fs_context *fc)
1829 struct xfs_mount *mp = fc->s_fs_info;
1832 * mp is stored in the fs_context when it is initialized.
1833 * mp is transferred to the superblock on a successful mount,
1834 * but if an error occurs before the transfer we have to free
1841 static const struct fs_context_operations xfs_context_ops = {
1842 .parse_param = xfs_fs_parse_param,
1843 .get_tree = xfs_fs_get_tree,
1844 .reconfigure = xfs_fs_reconfigure,
1845 .free = xfs_fs_free,
1848 static int xfs_init_fs_context(
1849 struct fs_context *fc)
1851 struct xfs_mount *mp;
1853 mp = kmem_alloc(sizeof(struct xfs_mount), KM_ZERO);
1857 spin_lock_init(&mp->m_sb_lock);
1858 spin_lock_init(&mp->m_agirotor_lock);
1859 INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC);
1860 spin_lock_init(&mp->m_perag_lock);
1861 mutex_init(&mp->m_growlock);
1862 INIT_WORK(&mp->m_flush_inodes_work, xfs_flush_inodes_worker);
1863 INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
1864 mp->m_kobj.kobject.kset = xfs_kset;
1866 * We don't create the finobt per-ag space reservation until after log
1867 * recovery, so we must set this to true so that an ifree transaction
1868 * started during log recovery will not depend on space reservations
1869 * for finobt expansion.
1871 mp->m_finobt_nores = true;
1874 * These can be overridden by the mount option parsing.
1877 mp->m_logbsize = -1;
1878 mp->m_allocsize_log = 16; /* 64k */
1881 * Copy binary VFS mount flags we are interested in.
1883 if (fc->sb_flags & SB_RDONLY)
1884 mp->m_flags |= XFS_MOUNT_RDONLY;
1885 if (fc->sb_flags & SB_DIRSYNC)
1886 mp->m_flags |= XFS_MOUNT_DIRSYNC;
1887 if (fc->sb_flags & SB_SYNCHRONOUS)
1888 mp->m_flags |= XFS_MOUNT_WSYNC;
1891 fc->ops = &xfs_context_ops;
1896 static struct file_system_type xfs_fs_type = {
1897 .owner = THIS_MODULE,
1899 .init_fs_context = xfs_init_fs_context,
1900 .parameters = xfs_fs_parameters,
1901 .kill_sb = kill_block_super,
1902 .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
1904 MODULE_ALIAS_FS("xfs");
1907 xfs_init_zones(void)
1909 xfs_log_ticket_zone = kmem_cache_create("xfs_log_ticket",
1910 sizeof(struct xlog_ticket),
1912 if (!xfs_log_ticket_zone)
1915 xfs_bmap_free_item_zone = kmem_cache_create("xfs_bmap_free_item",
1916 sizeof(struct xfs_extent_free_item),
1918 if (!xfs_bmap_free_item_zone)
1919 goto out_destroy_log_ticket_zone;
1921 xfs_btree_cur_zone = kmem_cache_create("xfs_btree_cur",
1922 sizeof(struct xfs_btree_cur),
1924 if (!xfs_btree_cur_zone)
1925 goto out_destroy_bmap_free_item_zone;
1927 xfs_da_state_zone = kmem_cache_create("xfs_da_state",
1928 sizeof(struct xfs_da_state),
1930 if (!xfs_da_state_zone)
1931 goto out_destroy_btree_cur_zone;
1933 xfs_ifork_zone = kmem_cache_create("xfs_ifork",
1934 sizeof(struct xfs_ifork),
1936 if (!xfs_ifork_zone)
1937 goto out_destroy_da_state_zone;
1939 xfs_trans_zone = kmem_cache_create("xfs_trans",
1940 sizeof(struct xfs_trans),
1942 if (!xfs_trans_zone)
1943 goto out_destroy_ifork_zone;
1947 * The size of the zone allocated buf log item is the maximum
1948 * size possible under XFS. This wastes a little bit of memory,
1949 * but it is much faster.
1951 xfs_buf_item_zone = kmem_cache_create("xfs_buf_item",
1952 sizeof(struct xfs_buf_log_item),
1954 if (!xfs_buf_item_zone)
1955 goto out_destroy_trans_zone;
1957 xfs_efd_zone = kmem_cache_create("xfs_efd_item",
1958 (sizeof(struct xfs_efd_log_item) +
1959 (XFS_EFD_MAX_FAST_EXTENTS - 1) *
1960 sizeof(struct xfs_extent)),
1963 goto out_destroy_buf_item_zone;
1965 xfs_efi_zone = kmem_cache_create("xfs_efi_item",
1966 (sizeof(struct xfs_efi_log_item) +
1967 (XFS_EFI_MAX_FAST_EXTENTS - 1) *
1968 sizeof(struct xfs_extent)),
1971 goto out_destroy_efd_zone;
1973 xfs_inode_zone = kmem_cache_create("xfs_inode",
1974 sizeof(struct xfs_inode), 0,
1975 (SLAB_HWCACHE_ALIGN |
1976 SLAB_RECLAIM_ACCOUNT |
1977 SLAB_MEM_SPREAD | SLAB_ACCOUNT),
1978 xfs_fs_inode_init_once);
1979 if (!xfs_inode_zone)
1980 goto out_destroy_efi_zone;
1982 xfs_ili_zone = kmem_cache_create("xfs_ili",
1983 sizeof(struct xfs_inode_log_item), 0,
1984 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
1987 goto out_destroy_inode_zone;
1989 xfs_icreate_zone = kmem_cache_create("xfs_icr",
1990 sizeof(struct xfs_icreate_item),
1992 if (!xfs_icreate_zone)
1993 goto out_destroy_ili_zone;
1995 xfs_rud_zone = kmem_cache_create("xfs_rud_item",
1996 sizeof(struct xfs_rud_log_item),
1999 goto out_destroy_icreate_zone;
2001 xfs_rui_zone = kmem_cache_create("xfs_rui_item",
2002 xfs_rui_log_item_sizeof(XFS_RUI_MAX_FAST_EXTENTS),
2005 goto out_destroy_rud_zone;
2007 xfs_cud_zone = kmem_cache_create("xfs_cud_item",
2008 sizeof(struct xfs_cud_log_item),
2011 goto out_destroy_rui_zone;
2013 xfs_cui_zone = kmem_cache_create("xfs_cui_item",
2014 xfs_cui_log_item_sizeof(XFS_CUI_MAX_FAST_EXTENTS),
2017 goto out_destroy_cud_zone;
2019 xfs_bud_zone = kmem_cache_create("xfs_bud_item",
2020 sizeof(struct xfs_bud_log_item),
2023 goto out_destroy_cui_zone;
2025 xfs_bui_zone = kmem_cache_create("xfs_bui_item",
2026 xfs_bui_log_item_sizeof(XFS_BUI_MAX_FAST_EXTENTS),
2029 goto out_destroy_bud_zone;
2033 out_destroy_bud_zone:
2034 kmem_cache_destroy(xfs_bud_zone);
2035 out_destroy_cui_zone:
2036 kmem_cache_destroy(xfs_cui_zone);
2037 out_destroy_cud_zone:
2038 kmem_cache_destroy(xfs_cud_zone);
2039 out_destroy_rui_zone:
2040 kmem_cache_destroy(xfs_rui_zone);
2041 out_destroy_rud_zone:
2042 kmem_cache_destroy(xfs_rud_zone);
2043 out_destroy_icreate_zone:
2044 kmem_cache_destroy(xfs_icreate_zone);
2045 out_destroy_ili_zone:
2046 kmem_cache_destroy(xfs_ili_zone);
2047 out_destroy_inode_zone:
2048 kmem_cache_destroy(xfs_inode_zone);
2049 out_destroy_efi_zone:
2050 kmem_cache_destroy(xfs_efi_zone);
2051 out_destroy_efd_zone:
2052 kmem_cache_destroy(xfs_efd_zone);
2053 out_destroy_buf_item_zone:
2054 kmem_cache_destroy(xfs_buf_item_zone);
2055 out_destroy_trans_zone:
2056 kmem_cache_destroy(xfs_trans_zone);
2057 out_destroy_ifork_zone:
2058 kmem_cache_destroy(xfs_ifork_zone);
2059 out_destroy_da_state_zone:
2060 kmem_cache_destroy(xfs_da_state_zone);
2061 out_destroy_btree_cur_zone:
2062 kmem_cache_destroy(xfs_btree_cur_zone);
2063 out_destroy_bmap_free_item_zone:
2064 kmem_cache_destroy(xfs_bmap_free_item_zone);
2065 out_destroy_log_ticket_zone:
2066 kmem_cache_destroy(xfs_log_ticket_zone);
2072 xfs_destroy_zones(void)
2075 * Make sure all delayed rcu free are flushed before we
2079 kmem_cache_destroy(xfs_bui_zone);
2080 kmem_cache_destroy(xfs_bud_zone);
2081 kmem_cache_destroy(xfs_cui_zone);
2082 kmem_cache_destroy(xfs_cud_zone);
2083 kmem_cache_destroy(xfs_rui_zone);
2084 kmem_cache_destroy(xfs_rud_zone);
2085 kmem_cache_destroy(xfs_icreate_zone);
2086 kmem_cache_destroy(xfs_ili_zone);
2087 kmem_cache_destroy(xfs_inode_zone);
2088 kmem_cache_destroy(xfs_efi_zone);
2089 kmem_cache_destroy(xfs_efd_zone);
2090 kmem_cache_destroy(xfs_buf_item_zone);
2091 kmem_cache_destroy(xfs_trans_zone);
2092 kmem_cache_destroy(xfs_ifork_zone);
2093 kmem_cache_destroy(xfs_da_state_zone);
2094 kmem_cache_destroy(xfs_btree_cur_zone);
2095 kmem_cache_destroy(xfs_bmap_free_item_zone);
2096 kmem_cache_destroy(xfs_log_ticket_zone);
2100 xfs_init_workqueues(void)
2103 * The allocation workqueue can be used in memory reclaim situations
2104 * (writepage path), and parallelism is only limited by the number of
2105 * AGs in all the filesystems mounted. Hence use the default large
2106 * max_active value for this workqueue.
2108 xfs_alloc_wq = alloc_workqueue("xfsalloc",
2109 XFS_WQFLAGS(WQ_MEM_RECLAIM | WQ_FREEZABLE), 0);
2113 xfs_discard_wq = alloc_workqueue("xfsdiscard", XFS_WQFLAGS(WQ_UNBOUND),
2115 if (!xfs_discard_wq)
2116 goto out_free_alloc_wq;
2120 destroy_workqueue(xfs_alloc_wq);
2125 xfs_destroy_workqueues(void)
2127 destroy_workqueue(xfs_discard_wq);
2128 destroy_workqueue(xfs_alloc_wq);
2136 xfs_check_ondisk_structs();
2138 printk(KERN_INFO XFS_VERSION_STRING " with "
2139 XFS_BUILD_OPTIONS " enabled\n");
2143 error = xfs_init_zones();
2147 error = xfs_init_workqueues();
2149 goto out_destroy_zones;
2151 error = xfs_mru_cache_init();
2153 goto out_destroy_wq;
2155 error = xfs_buf_init();
2157 goto out_mru_cache_uninit;
2159 error = xfs_init_procfs();
2161 goto out_buf_terminate;
2163 error = xfs_sysctl_register();
2165 goto out_cleanup_procfs;
2167 xfs_kset = kset_create_and_add("xfs", NULL, fs_kobj);
2170 goto out_sysctl_unregister;
2173 xfsstats.xs_kobj.kobject.kset = xfs_kset;
2175 xfsstats.xs_stats = alloc_percpu(struct xfsstats);
2176 if (!xfsstats.xs_stats) {
2178 goto out_kset_unregister;
2181 error = xfs_sysfs_init(&xfsstats.xs_kobj, &xfs_stats_ktype, NULL,
2184 goto out_free_stats;
2187 xfs_dbg_kobj.kobject.kset = xfs_kset;
2188 error = xfs_sysfs_init(&xfs_dbg_kobj, &xfs_dbg_ktype, NULL, "debug");
2190 goto out_remove_stats_kobj;
2193 error = xfs_qm_init();
2195 goto out_remove_dbg_kobj;
2197 error = register_filesystem(&xfs_fs_type);
2204 out_remove_dbg_kobj:
2206 xfs_sysfs_del(&xfs_dbg_kobj);
2207 out_remove_stats_kobj:
2209 xfs_sysfs_del(&xfsstats.xs_kobj);
2211 free_percpu(xfsstats.xs_stats);
2212 out_kset_unregister:
2213 kset_unregister(xfs_kset);
2214 out_sysctl_unregister:
2215 xfs_sysctl_unregister();
2217 xfs_cleanup_procfs();
2219 xfs_buf_terminate();
2220 out_mru_cache_uninit:
2221 xfs_mru_cache_uninit();
2223 xfs_destroy_workqueues();
2225 xfs_destroy_zones();
2234 unregister_filesystem(&xfs_fs_type);
2236 xfs_sysfs_del(&xfs_dbg_kobj);
2238 xfs_sysfs_del(&xfsstats.xs_kobj);
2239 free_percpu(xfsstats.xs_stats);
2240 kset_unregister(xfs_kset);
2241 xfs_sysctl_unregister();
2242 xfs_cleanup_procfs();
2243 xfs_buf_terminate();
2244 xfs_mru_cache_uninit();
2245 xfs_destroy_workqueues();
2246 xfs_destroy_zones();
2247 xfs_uuid_table_free();
2250 module_init(init_xfs_fs);
2251 module_exit(exit_xfs_fs);
2253 MODULE_AUTHOR("Silicon Graphics, Inc.");
2254 MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled");
2255 MODULE_LICENSE("GPL");