1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
13 #include "xfs_mount.h"
14 #include "xfs_inode.h"
15 #include "xfs_btree.h"
17 #include "xfs_alloc.h"
18 #include "xfs_fsops.h"
19 #include "xfs_trans.h"
20 #include "xfs_buf_item.h"
22 #include "xfs_log_priv.h"
24 #include "xfs_extfree_item.h"
25 #include "xfs_mru_cache.h"
26 #include "xfs_inode_item.h"
27 #include "xfs_icache.h"
28 #include "xfs_trace.h"
29 #include "xfs_icreate_item.h"
30 #include "xfs_filestream.h"
31 #include "xfs_quota.h"
32 #include "xfs_sysfs.h"
33 #include "xfs_ondisk.h"
34 #include "xfs_rmap_item.h"
35 #include "xfs_refcount_item.h"
36 #include "xfs_bmap_item.h"
37 #include "xfs_reflink.h"
38 #include "xfs_pwork.h"
41 #include <linux/magic.h>
42 #include <linux/fs_context.h>
43 #include <linux/fs_parser.h>
45 static const struct super_operations xfs_super_operations;
47 static struct kset *xfs_kset; /* top-level xfs sysfs dir */
49 static struct xfs_kobj xfs_dbg_kobj; /* global debug sysfs attrs */
52 #ifdef CONFIG_HOTPLUG_CPU
53 static LIST_HEAD(xfs_mount_list);
54 static DEFINE_SPINLOCK(xfs_mount_list_lock);
56 static inline void xfs_mount_list_add(struct xfs_mount *mp)
58 spin_lock(&xfs_mount_list_lock);
59 list_add(&mp->m_mount_list, &xfs_mount_list);
60 spin_unlock(&xfs_mount_list_lock);
63 static inline void xfs_mount_list_del(struct xfs_mount *mp)
65 spin_lock(&xfs_mount_list_lock);
66 list_del(&mp->m_mount_list);
67 spin_unlock(&xfs_mount_list_lock);
69 #else /* !CONFIG_HOTPLUG_CPU */
70 static inline void xfs_mount_list_add(struct xfs_mount *mp) {}
71 static inline void xfs_mount_list_del(struct xfs_mount *mp) {}
81 xfs_mount_set_dax_mode(
83 enum xfs_dax_mode mode)
87 mp->m_flags &= ~(XFS_MOUNT_DAX_ALWAYS | XFS_MOUNT_DAX_NEVER);
90 mp->m_flags |= XFS_MOUNT_DAX_ALWAYS;
91 mp->m_flags &= ~XFS_MOUNT_DAX_NEVER;
94 mp->m_flags |= XFS_MOUNT_DAX_NEVER;
95 mp->m_flags &= ~XFS_MOUNT_DAX_ALWAYS;
100 static const struct constant_table dax_param_enums[] = {
101 {"inode", XFS_DAX_INODE },
102 {"always", XFS_DAX_ALWAYS },
103 {"never", XFS_DAX_NEVER },
108 * Table driven mount option parser.
111 Opt_logbufs, Opt_logbsize, Opt_logdev, Opt_rtdev,
112 Opt_wsync, Opt_noalign, Opt_swalloc, Opt_sunit, Opt_swidth, Opt_nouuid,
113 Opt_grpid, Opt_nogrpid, Opt_bsdgroups, Opt_sysvgroups,
114 Opt_allocsize, Opt_norecovery, Opt_inode64, Opt_inode32, Opt_ikeep,
115 Opt_noikeep, Opt_largeio, Opt_nolargeio, Opt_attr2, Opt_noattr2,
116 Opt_filestreams, Opt_quota, Opt_noquota, Opt_usrquota, Opt_grpquota,
117 Opt_prjquota, Opt_uquota, Opt_gquota, Opt_pquota,
118 Opt_uqnoenforce, Opt_gqnoenforce, Opt_pqnoenforce, Opt_qnoenforce,
119 Opt_discard, Opt_nodiscard, Opt_dax, Opt_dax_enum,
122 static const struct fs_parameter_spec xfs_fs_parameters[] = {
123 fsparam_u32("logbufs", Opt_logbufs),
124 fsparam_string("logbsize", Opt_logbsize),
125 fsparam_string("logdev", Opt_logdev),
126 fsparam_string("rtdev", Opt_rtdev),
127 fsparam_flag("wsync", Opt_wsync),
128 fsparam_flag("noalign", Opt_noalign),
129 fsparam_flag("swalloc", Opt_swalloc),
130 fsparam_u32("sunit", Opt_sunit),
131 fsparam_u32("swidth", Opt_swidth),
132 fsparam_flag("nouuid", Opt_nouuid),
133 fsparam_flag("grpid", Opt_grpid),
134 fsparam_flag("nogrpid", Opt_nogrpid),
135 fsparam_flag("bsdgroups", Opt_bsdgroups),
136 fsparam_flag("sysvgroups", Opt_sysvgroups),
137 fsparam_string("allocsize", Opt_allocsize),
138 fsparam_flag("norecovery", Opt_norecovery),
139 fsparam_flag("inode64", Opt_inode64),
140 fsparam_flag("inode32", Opt_inode32),
141 fsparam_flag("ikeep", Opt_ikeep),
142 fsparam_flag("noikeep", Opt_noikeep),
143 fsparam_flag("largeio", Opt_largeio),
144 fsparam_flag("nolargeio", Opt_nolargeio),
145 fsparam_flag("attr2", Opt_attr2),
146 fsparam_flag("noattr2", Opt_noattr2),
147 fsparam_flag("filestreams", Opt_filestreams),
148 fsparam_flag("quota", Opt_quota),
149 fsparam_flag("noquota", Opt_noquota),
150 fsparam_flag("usrquota", Opt_usrquota),
151 fsparam_flag("grpquota", Opt_grpquota),
152 fsparam_flag("prjquota", Opt_prjquota),
153 fsparam_flag("uquota", Opt_uquota),
154 fsparam_flag("gquota", Opt_gquota),
155 fsparam_flag("pquota", Opt_pquota),
156 fsparam_flag("uqnoenforce", Opt_uqnoenforce),
157 fsparam_flag("gqnoenforce", Opt_gqnoenforce),
158 fsparam_flag("pqnoenforce", Opt_pqnoenforce),
159 fsparam_flag("qnoenforce", Opt_qnoenforce),
160 fsparam_flag("discard", Opt_discard),
161 fsparam_flag("nodiscard", Opt_nodiscard),
162 fsparam_flag("dax", Opt_dax),
163 fsparam_enum("dax", Opt_dax_enum, dax_param_enums),
167 struct proc_xfs_info {
177 static struct proc_xfs_info xfs_info_set[] = {
178 /* the few simple ones we can get from the mount struct */
179 { XFS_MOUNT_IKEEP, ",ikeep" },
180 { XFS_MOUNT_WSYNC, ",wsync" },
181 { XFS_MOUNT_NOALIGN, ",noalign" },
182 { XFS_MOUNT_SWALLOC, ",swalloc" },
183 { XFS_MOUNT_NOUUID, ",nouuid" },
184 { XFS_MOUNT_NORECOVERY, ",norecovery" },
185 { XFS_MOUNT_ATTR2, ",attr2" },
186 { XFS_MOUNT_FILESTREAMS, ",filestreams" },
187 { XFS_MOUNT_GRPID, ",grpid" },
188 { XFS_MOUNT_DISCARD, ",discard" },
189 { XFS_MOUNT_LARGEIO, ",largeio" },
190 { XFS_MOUNT_DAX_ALWAYS, ",dax=always" },
191 { XFS_MOUNT_DAX_NEVER, ",dax=never" },
194 struct xfs_mount *mp = XFS_M(root->d_sb);
195 struct proc_xfs_info *xfs_infop;
197 for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) {
198 if (mp->m_flags & xfs_infop->flag)
199 seq_puts(m, xfs_infop->str);
202 seq_printf(m, ",inode%d",
203 (mp->m_flags & XFS_MOUNT_SMALL_INUMS) ? 32 : 64);
205 if (mp->m_flags & XFS_MOUNT_ALLOCSIZE)
206 seq_printf(m, ",allocsize=%dk",
207 (1 << mp->m_allocsize_log) >> 10);
209 if (mp->m_logbufs > 0)
210 seq_printf(m, ",logbufs=%d", mp->m_logbufs);
211 if (mp->m_logbsize > 0)
212 seq_printf(m, ",logbsize=%dk", mp->m_logbsize >> 10);
215 seq_show_option(m, "logdev", mp->m_logname);
217 seq_show_option(m, "rtdev", mp->m_rtname);
219 if (mp->m_dalign > 0)
220 seq_printf(m, ",sunit=%d",
221 (int)XFS_FSB_TO_BB(mp, mp->m_dalign));
222 if (mp->m_swidth > 0)
223 seq_printf(m, ",swidth=%d",
224 (int)XFS_FSB_TO_BB(mp, mp->m_swidth));
226 if (mp->m_qflags & XFS_UQUOTA_ENFD)
227 seq_puts(m, ",usrquota");
228 else if (mp->m_qflags & XFS_UQUOTA_ACCT)
229 seq_puts(m, ",uqnoenforce");
231 if (mp->m_qflags & XFS_PQUOTA_ENFD)
232 seq_puts(m, ",prjquota");
233 else if (mp->m_qflags & XFS_PQUOTA_ACCT)
234 seq_puts(m, ",pqnoenforce");
236 if (mp->m_qflags & XFS_GQUOTA_ENFD)
237 seq_puts(m, ",grpquota");
238 else if (mp->m_qflags & XFS_GQUOTA_ACCT)
239 seq_puts(m, ",gqnoenforce");
241 if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT))
242 seq_puts(m, ",noquota");
248 * Set parameters for inode allocation heuristics, taking into account
249 * filesystem size and inode32/inode64 mount options; i.e. specifically
250 * whether or not XFS_MOUNT_SMALL_INUMS is set.
252 * Inode allocation patterns are altered only if inode32 is requested
253 * (XFS_MOUNT_SMALL_INUMS), and the filesystem is sufficiently large.
254 * If altered, XFS_MOUNT_32BITINODES is set as well.
256 * An agcount independent of that in the mount structure is provided
257 * because in the growfs case, mp->m_sb.sb_agcount is not yet updated
258 * to the potentially higher ag count.
260 * Returns the maximum AG index which may contain inodes.
264 struct xfs_mount *mp,
265 xfs_agnumber_t agcount)
267 xfs_agnumber_t index;
268 xfs_agnumber_t maxagi = 0;
269 xfs_sb_t *sbp = &mp->m_sb;
270 xfs_agnumber_t max_metadata;
275 * Calculate how much should be reserved for inodes to meet
276 * the max inode percentage. Used only for inode32.
278 if (M_IGEO(mp)->maxicount) {
281 icount = sbp->sb_dblocks * sbp->sb_imax_pct;
283 icount += sbp->sb_agblocks - 1;
284 do_div(icount, sbp->sb_agblocks);
285 max_metadata = icount;
287 max_metadata = agcount;
290 /* Get the last possible inode in the filesystem */
291 agino = XFS_AGB_TO_AGINO(mp, sbp->sb_agblocks - 1);
292 ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino);
295 * If user asked for no more than 32-bit inodes, and the fs is
296 * sufficiently large, set XFS_MOUNT_32BITINODES if we must alter
297 * the allocator to accommodate the request.
299 if ((mp->m_flags & XFS_MOUNT_SMALL_INUMS) && ino > XFS_MAXINUMBER_32)
300 mp->m_flags |= XFS_MOUNT_32BITINODES;
302 mp->m_flags &= ~XFS_MOUNT_32BITINODES;
304 for (index = 0; index < agcount; index++) {
305 struct xfs_perag *pag;
307 ino = XFS_AGINO_TO_INO(mp, index, agino);
309 pag = xfs_perag_get(mp, index);
311 if (mp->m_flags & XFS_MOUNT_32BITINODES) {
312 if (ino > XFS_MAXINUMBER_32) {
313 pag->pagi_inodeok = 0;
314 pag->pagf_metadata = 0;
316 pag->pagi_inodeok = 1;
318 if (index < max_metadata)
319 pag->pagf_metadata = 1;
321 pag->pagf_metadata = 0;
324 pag->pagi_inodeok = 1;
325 pag->pagf_metadata = 0;
331 return (mp->m_flags & XFS_MOUNT_32BITINODES) ? maxagi : agcount;
338 struct block_device **bdevp)
342 *bdevp = blkdev_get_by_path(name, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
344 if (IS_ERR(*bdevp)) {
345 error = PTR_ERR(*bdevp);
346 xfs_warn(mp, "Invalid device [%s], error=%d", name, error);
354 struct block_device *bdev)
357 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
362 struct xfs_mount *mp)
364 struct dax_device *dax_ddev = mp->m_ddev_targp->bt_daxdev;
366 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
367 struct block_device *logdev = mp->m_logdev_targp->bt_bdev;
368 struct dax_device *dax_logdev = mp->m_logdev_targp->bt_daxdev;
370 xfs_free_buftarg(mp->m_logdev_targp);
371 xfs_blkdev_put(logdev);
372 fs_put_dax(dax_logdev);
374 if (mp->m_rtdev_targp) {
375 struct block_device *rtdev = mp->m_rtdev_targp->bt_bdev;
376 struct dax_device *dax_rtdev = mp->m_rtdev_targp->bt_daxdev;
378 xfs_free_buftarg(mp->m_rtdev_targp);
379 xfs_blkdev_put(rtdev);
380 fs_put_dax(dax_rtdev);
382 xfs_free_buftarg(mp->m_ddev_targp);
383 fs_put_dax(dax_ddev);
387 * The file system configurations are:
388 * (1) device (partition) with data and internal log
389 * (2) logical volume with data and log subvolumes.
390 * (3) logical volume with data, log, and realtime subvolumes.
392 * We only have to handle opening the log and realtime volumes here if
393 * they are present. The data subvolume has already been opened by
394 * get_sb_bdev() and is stored in sb->s_bdev.
398 struct xfs_mount *mp)
400 struct block_device *ddev = mp->m_super->s_bdev;
401 struct dax_device *dax_ddev = fs_dax_get_by_bdev(ddev);
402 struct dax_device *dax_logdev = NULL, *dax_rtdev = NULL;
403 struct block_device *logdev = NULL, *rtdev = NULL;
407 * Open real time and log devices - order is important.
410 error = xfs_blkdev_get(mp, mp->m_logname, &logdev);
413 dax_logdev = fs_dax_get_by_bdev(logdev);
417 error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev);
419 goto out_close_logdev;
421 if (rtdev == ddev || rtdev == logdev) {
423 "Cannot mount filesystem with identical rtdev and ddev/logdev.");
425 goto out_close_rtdev;
427 dax_rtdev = fs_dax_get_by_bdev(rtdev);
431 * Setup xfs_mount buffer target pointers
434 mp->m_ddev_targp = xfs_alloc_buftarg(mp, ddev, dax_ddev);
435 if (!mp->m_ddev_targp)
436 goto out_close_rtdev;
439 mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev, dax_rtdev);
440 if (!mp->m_rtdev_targp)
441 goto out_free_ddev_targ;
444 if (logdev && logdev != ddev) {
445 mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev, dax_logdev);
446 if (!mp->m_logdev_targp)
447 goto out_free_rtdev_targ;
449 mp->m_logdev_targp = mp->m_ddev_targp;
455 if (mp->m_rtdev_targp)
456 xfs_free_buftarg(mp->m_rtdev_targp);
458 xfs_free_buftarg(mp->m_ddev_targp);
460 xfs_blkdev_put(rtdev);
461 fs_put_dax(dax_rtdev);
463 if (logdev && logdev != ddev) {
464 xfs_blkdev_put(logdev);
465 fs_put_dax(dax_logdev);
468 fs_put_dax(dax_ddev);
473 * Setup xfs_mount buffer target pointers based on superblock
477 struct xfs_mount *mp)
481 error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_sectsize);
485 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
486 unsigned int log_sector_size = BBSIZE;
488 if (xfs_has_sector(mp))
489 log_sector_size = mp->m_sb.sb_logsectsize;
490 error = xfs_setsize_buftarg(mp->m_logdev_targp,
495 if (mp->m_rtdev_targp) {
496 error = xfs_setsize_buftarg(mp->m_rtdev_targp,
497 mp->m_sb.sb_sectsize);
506 xfs_init_mount_workqueues(
507 struct xfs_mount *mp)
509 mp->m_buf_workqueue = alloc_workqueue("xfs-buf/%s",
510 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
511 1, mp->m_super->s_id);
512 if (!mp->m_buf_workqueue)
515 mp->m_unwritten_workqueue = alloc_workqueue("xfs-conv/%s",
516 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
517 0, mp->m_super->s_id);
518 if (!mp->m_unwritten_workqueue)
519 goto out_destroy_buf;
521 mp->m_reclaim_workqueue = alloc_workqueue("xfs-reclaim/%s",
522 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
523 0, mp->m_super->s_id);
524 if (!mp->m_reclaim_workqueue)
525 goto out_destroy_unwritten;
527 mp->m_blockgc_wq = alloc_workqueue("xfs-blockgc/%s",
528 XFS_WQFLAGS(WQ_UNBOUND | WQ_FREEZABLE | WQ_MEM_RECLAIM),
529 0, mp->m_super->s_id);
530 if (!mp->m_blockgc_wq)
531 goto out_destroy_reclaim;
533 mp->m_inodegc_wq = alloc_workqueue("xfs-inodegc/%s",
534 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
535 1, mp->m_super->s_id);
536 if (!mp->m_inodegc_wq)
537 goto out_destroy_blockgc;
539 mp->m_sync_workqueue = alloc_workqueue("xfs-sync/%s",
540 XFS_WQFLAGS(WQ_FREEZABLE), 0, mp->m_super->s_id);
541 if (!mp->m_sync_workqueue)
542 goto out_destroy_inodegc;
547 destroy_workqueue(mp->m_inodegc_wq);
549 destroy_workqueue(mp->m_blockgc_wq);
551 destroy_workqueue(mp->m_reclaim_workqueue);
552 out_destroy_unwritten:
553 destroy_workqueue(mp->m_unwritten_workqueue);
555 destroy_workqueue(mp->m_buf_workqueue);
561 xfs_destroy_mount_workqueues(
562 struct xfs_mount *mp)
564 destroy_workqueue(mp->m_sync_workqueue);
565 destroy_workqueue(mp->m_blockgc_wq);
566 destroy_workqueue(mp->m_inodegc_wq);
567 destroy_workqueue(mp->m_reclaim_workqueue);
568 destroy_workqueue(mp->m_unwritten_workqueue);
569 destroy_workqueue(mp->m_buf_workqueue);
573 xfs_flush_inodes_worker(
574 struct work_struct *work)
576 struct xfs_mount *mp = container_of(work, struct xfs_mount,
577 m_flush_inodes_work);
578 struct super_block *sb = mp->m_super;
580 if (down_read_trylock(&sb->s_umount)) {
582 up_read(&sb->s_umount);
587 * Flush all dirty data to disk. Must not be called while holding an XFS_ILOCK
588 * or a page lock. We use sync_inodes_sb() here to ensure we block while waiting
589 * for IO to complete so that we effectively throttle multiple callers to the
590 * rate at which IO is completing.
594 struct xfs_mount *mp)
597 * If flush_work() returns true then that means we waited for a flush
598 * which was already in progress. Don't bother running another scan.
600 if (flush_work(&mp->m_flush_inodes_work))
603 queue_work(mp->m_sync_workqueue, &mp->m_flush_inodes_work);
604 flush_work(&mp->m_flush_inodes_work);
607 /* Catch misguided souls that try to use this interface on XFS */
608 STATIC struct inode *
610 struct super_block *sb)
617 * Now that the generic code is guaranteed not to be accessing
618 * the linux inode, we can inactivate and reclaim the inode.
621 xfs_fs_destroy_inode(
624 struct xfs_inode *ip = XFS_I(inode);
626 trace_xfs_destroy_inode(ip);
628 ASSERT(!rwsem_is_locked(&inode->i_rwsem));
629 XFS_STATS_INC(ip->i_mount, vn_rele);
630 XFS_STATS_INC(ip->i_mount, vn_remove);
631 xfs_inode_mark_reclaimable(ip);
639 struct xfs_inode *ip = XFS_I(inode);
640 struct xfs_mount *mp = ip->i_mount;
641 struct xfs_trans *tp;
643 if (!(inode->i_sb->s_flags & SB_LAZYTIME))
645 if (flag != I_DIRTY_SYNC || !(inode->i_state & I_DIRTY_TIME))
648 if (xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp))
650 xfs_ilock(ip, XFS_ILOCK_EXCL);
651 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
652 xfs_trans_log_inode(tp, ip, XFS_ILOG_TIMESTAMP);
653 xfs_trans_commit(tp);
657 * Slab object creation initialisation for the XFS inode.
658 * This covers only the idempotent fields in the XFS inode;
659 * all other fields need to be initialised on allocation
660 * from the slab. This avoids the need to repeatedly initialise
661 * fields in the xfs inode that left in the initialise state
662 * when freeing the inode.
665 xfs_fs_inode_init_once(
668 struct xfs_inode *ip = inode;
670 memset(ip, 0, sizeof(struct xfs_inode));
673 inode_init_once(VFS_I(ip));
676 atomic_set(&ip->i_pincount, 0);
677 spin_lock_init(&ip->i_flags_lock);
679 mrlock_init(&ip->i_mmaplock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
680 "xfsino", ip->i_ino);
681 mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
682 "xfsino", ip->i_ino);
686 * We do an unlocked check for XFS_IDONTCACHE here because we are already
687 * serialised against cache hits here via the inode->i_lock and igrab() in
688 * xfs_iget_cache_hit(). Hence a lookup that might clear this flag will not be
689 * racing with us, and it avoids needing to grab a spinlock here for every inode
690 * we drop the final reference on.
696 struct xfs_inode *ip = XFS_I(inode);
699 * If this unlinked inode is in the middle of recovery, don't
700 * drop the inode just yet; log recovery will take care of
701 * that. See the comment for this inode flag.
703 if (ip->i_flags & XFS_IRECOVERY) {
704 ASSERT(xlog_recovery_needed(ip->i_mount->m_log));
708 return generic_drop_inode(inode);
713 struct xfs_mount *mp)
716 kfree(mp->m_logname);
722 struct super_block *sb,
725 struct xfs_mount *mp = XFS_M(sb);
727 trace_xfs_fs_sync_fs(mp, __return_address);
730 * Doing anything during the async pass would be counterproductive.
735 xfs_log_force(mp, XFS_LOG_SYNC);
738 * The disk must be active because we're syncing.
739 * We schedule log work now (now that the disk is
740 * active) instead of later (when it might not be).
742 flush_delayed_work(&mp->m_log->l_work);
746 * If we are called with page faults frozen out, it means we are about
747 * to freeze the transaction subsystem. Take the opportunity to shut
748 * down inodegc because once SB_FREEZE_FS is set it's too late to
749 * prevent inactivation races with freeze. The fs doesn't get called
750 * again by the freezing process until after SB_FREEZE_FS has been set,
751 * so it's now or never. Same logic applies to speculative allocation
752 * garbage collection.
754 * We don't care if this is a normal syncfs call that does this or
755 * freeze that does this - we can run this multiple times without issue
756 * and we won't race with a restart because a restart can only occur
757 * when the state is either SB_FREEZE_FS or SB_FREEZE_COMPLETE.
759 if (sb->s_writers.frozen == SB_FREEZE_PAGEFAULT) {
760 xfs_inodegc_stop(mp);
761 xfs_blockgc_stop(mp);
769 struct dentry *dentry,
770 struct kstatfs *statp)
772 struct xfs_mount *mp = XFS_M(dentry->d_sb);
773 xfs_sb_t *sbp = &mp->m_sb;
774 struct xfs_inode *ip = XFS_I(d_inode(dentry));
775 uint64_t fakeinos, id;
782 /* Wait for whatever inactivations are in progress. */
783 xfs_inodegc_flush(mp);
785 statp->f_type = XFS_SUPER_MAGIC;
786 statp->f_namelen = MAXNAMELEN - 1;
788 id = huge_encode_dev(mp->m_ddev_targp->bt_dev);
789 statp->f_fsid = u64_to_fsid(id);
791 icount = percpu_counter_sum(&mp->m_icount);
792 ifree = percpu_counter_sum(&mp->m_ifree);
793 fdblocks = percpu_counter_sum(&mp->m_fdblocks);
795 spin_lock(&mp->m_sb_lock);
796 statp->f_bsize = sbp->sb_blocksize;
797 lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0;
798 statp->f_blocks = sbp->sb_dblocks - lsize;
799 spin_unlock(&mp->m_sb_lock);
801 /* make sure statp->f_bfree does not underflow */
802 statp->f_bfree = max_t(int64_t, fdblocks - mp->m_alloc_set_aside, 0);
803 statp->f_bavail = statp->f_bfree;
805 fakeinos = XFS_FSB_TO_INO(mp, statp->f_bfree);
806 statp->f_files = min(icount + fakeinos, (uint64_t)XFS_MAXINUMBER);
807 if (M_IGEO(mp)->maxicount)
808 statp->f_files = min_t(typeof(statp->f_files),
810 M_IGEO(mp)->maxicount);
812 /* If sb_icount overshot maxicount, report actual allocation */
813 statp->f_files = max_t(typeof(statp->f_files),
817 /* make sure statp->f_ffree does not underflow */
818 ffree = statp->f_files - (icount - ifree);
819 statp->f_ffree = max_t(int64_t, ffree, 0);
822 if ((ip->i_diflags & XFS_DIFLAG_PROJINHERIT) &&
823 ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))) ==
824 (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))
825 xfs_qm_statvfs(ip, statp);
827 if (XFS_IS_REALTIME_MOUNT(mp) &&
828 (ip->i_diflags & (XFS_DIFLAG_RTINHERIT | XFS_DIFLAG_REALTIME))) {
829 statp->f_blocks = sbp->sb_rblocks;
830 statp->f_bavail = statp->f_bfree =
831 sbp->sb_frextents * sbp->sb_rextsize;
838 xfs_save_resvblks(struct xfs_mount *mp)
840 uint64_t resblks = 0;
842 mp->m_resblks_save = mp->m_resblks;
843 xfs_reserve_blocks(mp, &resblks, NULL);
847 xfs_restore_resvblks(struct xfs_mount *mp)
851 if (mp->m_resblks_save) {
852 resblks = mp->m_resblks_save;
853 mp->m_resblks_save = 0;
855 resblks = xfs_default_resblks(mp);
857 xfs_reserve_blocks(mp, &resblks, NULL);
861 * Second stage of a freeze. The data is already frozen so we only
862 * need to take care of the metadata. Once that's done sync the superblock
863 * to the log to dirty it in case of a crash while frozen. This ensures that we
864 * will recover the unlinked inode lists on the next mount.
868 struct super_block *sb)
870 struct xfs_mount *mp = XFS_M(sb);
875 * The filesystem is now frozen far enough that memory reclaim
876 * cannot safely operate on the filesystem. Hence we need to
877 * set a GFP_NOFS context here to avoid recursion deadlocks.
879 flags = memalloc_nofs_save();
880 xfs_save_resvblks(mp);
881 ret = xfs_log_quiesce(mp);
882 memalloc_nofs_restore(flags);
885 * For read-write filesystems, we need to restart the inodegc on error
886 * because we stopped it at SB_FREEZE_PAGEFAULT level and a thaw is not
887 * going to be run to restart it now. We are at SB_FREEZE_FS level
888 * here, so we can restart safely without racing with a stop in
891 if (ret && !(mp->m_flags & XFS_MOUNT_RDONLY)) {
892 xfs_blockgc_start(mp);
893 xfs_inodegc_start(mp);
901 struct super_block *sb)
903 struct xfs_mount *mp = XFS_M(sb);
905 xfs_restore_resvblks(mp);
906 xfs_log_work_queue(mp);
909 * Don't reactivate the inodegc worker on a readonly filesystem because
910 * inodes are sent directly to reclaim. Don't reactivate the blockgc
911 * worker because there are no speculative preallocations on a readonly
914 if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
915 xfs_blockgc_start(mp);
916 xfs_inodegc_start(mp);
923 * This function fills in xfs_mount_t fields based on mount args.
924 * Note: the superblock _has_ now been read in.
928 struct xfs_mount *mp)
930 int ronly = (mp->m_flags & XFS_MOUNT_RDONLY);
932 /* Fail a mount where the logbuf is smaller than the log stripe */
933 if (xfs_has_logv2(mp)) {
934 if (mp->m_logbsize <= 0 &&
935 mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) {
936 mp->m_logbsize = mp->m_sb.sb_logsunit;
937 } else if (mp->m_logbsize > 0 &&
938 mp->m_logbsize < mp->m_sb.sb_logsunit) {
940 "logbuf size must be greater than or equal to log stripe size");
944 /* Fail a mount if the logbuf is larger than 32K */
945 if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) {
947 "logbuf size for version 1 logs must be 16K or 32K");
953 * V5 filesystems always use attr2 format for attributes.
955 if (xfs_has_crc(mp) &&
956 (mp->m_flags & XFS_MOUNT_NOATTR2)) {
957 xfs_warn(mp, "Cannot mount a V5 filesystem as noattr2. "
958 "attr2 is always enabled for V5 filesystems.");
963 * prohibit r/w mounts of read-only filesystems
965 if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !ronly) {
967 "cannot mount a read-only filesystem as read-write");
971 if ((mp->m_qflags & XFS_GQUOTA_ACCT) &&
972 (mp->m_qflags & XFS_PQUOTA_ACCT) &&
973 !xfs_has_pquotino(mp)) {
975 "Super block does not support project and group quota together");
983 xfs_init_percpu_counters(
984 struct xfs_mount *mp)
988 error = percpu_counter_init(&mp->m_icount, 0, GFP_KERNEL);
992 error = percpu_counter_init(&mp->m_ifree, 0, GFP_KERNEL);
996 error = percpu_counter_init(&mp->m_fdblocks, 0, GFP_KERNEL);
1000 error = percpu_counter_init(&mp->m_delalloc_blks, 0, GFP_KERNEL);
1007 percpu_counter_destroy(&mp->m_fdblocks);
1009 percpu_counter_destroy(&mp->m_ifree);
1011 percpu_counter_destroy(&mp->m_icount);
1016 xfs_reinit_percpu_counters(
1017 struct xfs_mount *mp)
1019 percpu_counter_set(&mp->m_icount, mp->m_sb.sb_icount);
1020 percpu_counter_set(&mp->m_ifree, mp->m_sb.sb_ifree);
1021 percpu_counter_set(&mp->m_fdblocks, mp->m_sb.sb_fdblocks);
1025 xfs_destroy_percpu_counters(
1026 struct xfs_mount *mp)
1028 percpu_counter_destroy(&mp->m_icount);
1029 percpu_counter_destroy(&mp->m_ifree);
1030 percpu_counter_destroy(&mp->m_fdblocks);
1031 ASSERT(XFS_FORCED_SHUTDOWN(mp) ||
1032 percpu_counter_sum(&mp->m_delalloc_blks) == 0);
1033 percpu_counter_destroy(&mp->m_delalloc_blks);
1037 xfs_inodegc_init_percpu(
1038 struct xfs_mount *mp)
1040 struct xfs_inodegc *gc;
1043 mp->m_inodegc = alloc_percpu(struct xfs_inodegc);
1047 for_each_possible_cpu(cpu) {
1048 gc = per_cpu_ptr(mp->m_inodegc, cpu);
1049 init_llist_head(&gc->list);
1051 INIT_WORK(&gc->work, xfs_inodegc_worker);
1057 xfs_inodegc_free_percpu(
1058 struct xfs_mount *mp)
1062 free_percpu(mp->m_inodegc);
1067 struct super_block *sb)
1069 struct xfs_mount *mp = XFS_M(sb);
1071 /* if ->fill_super failed, we have no mount to tear down */
1075 xfs_notice(mp, "Unmounting Filesystem");
1076 xfs_filestream_unmount(mp);
1080 free_percpu(mp->m_stats.xs_stats);
1081 xfs_mount_list_del(mp);
1082 xfs_inodegc_free_percpu(mp);
1083 xfs_destroy_percpu_counters(mp);
1084 xfs_destroy_mount_workqueues(mp);
1085 xfs_close_devices(mp);
1087 sb->s_fs_info = NULL;
1092 xfs_fs_nr_cached_objects(
1093 struct super_block *sb,
1094 struct shrink_control *sc)
1096 /* Paranoia: catch incorrect calls during mount setup or teardown */
1097 if (WARN_ON_ONCE(!sb->s_fs_info))
1099 return xfs_reclaim_inodes_count(XFS_M(sb));
1103 xfs_fs_free_cached_objects(
1104 struct super_block *sb,
1105 struct shrink_control *sc)
1107 return xfs_reclaim_inodes_nr(XFS_M(sb), sc->nr_to_scan);
1110 static const struct super_operations xfs_super_operations = {
1111 .alloc_inode = xfs_fs_alloc_inode,
1112 .destroy_inode = xfs_fs_destroy_inode,
1113 .dirty_inode = xfs_fs_dirty_inode,
1114 .drop_inode = xfs_fs_drop_inode,
1115 .put_super = xfs_fs_put_super,
1116 .sync_fs = xfs_fs_sync_fs,
1117 .freeze_fs = xfs_fs_freeze,
1118 .unfreeze_fs = xfs_fs_unfreeze,
1119 .statfs = xfs_fs_statfs,
1120 .show_options = xfs_fs_show_options,
1121 .nr_cached_objects = xfs_fs_nr_cached_objects,
1122 .free_cached_objects = xfs_fs_free_cached_objects,
1131 int last, shift_left_factor = 0, _res;
1135 value = kstrdup(s, GFP_KERNEL);
1139 last = strlen(value) - 1;
1140 if (value[last] == 'K' || value[last] == 'k') {
1141 shift_left_factor = 10;
1144 if (value[last] == 'M' || value[last] == 'm') {
1145 shift_left_factor = 20;
1148 if (value[last] == 'G' || value[last] == 'g') {
1149 shift_left_factor = 30;
1153 if (kstrtoint(value, base, &_res))
1156 *res = _res << shift_left_factor;
1161 xfs_fs_warn_deprecated(
1162 struct fs_context *fc,
1163 struct fs_parameter *param,
1167 /* Don't print the warning if reconfiguring and current mount point
1168 * already had the flag set
1170 if ((fc->purpose & FS_CONTEXT_FOR_RECONFIGURE) &&
1171 !!(XFS_M(fc->root->d_sb)->m_flags & flag) == value)
1173 xfs_warn(fc->s_fs_info, "%s mount option is deprecated.", param->key);
1177 * Set mount state from a mount option.
1179 * NOTE: mp->m_super is NULL here!
1183 struct fs_context *fc,
1184 struct fs_parameter *param)
1186 struct xfs_mount *parsing_mp = fc->s_fs_info;
1187 struct fs_parse_result result;
1191 opt = fs_parse(fc, xfs_fs_parameters, param, &result);
1197 parsing_mp->m_logbufs = result.uint_32;
1200 if (suffix_kstrtoint(param->string, 10, &parsing_mp->m_logbsize))
1204 kfree(parsing_mp->m_logname);
1205 parsing_mp->m_logname = kstrdup(param->string, GFP_KERNEL);
1206 if (!parsing_mp->m_logname)
1210 kfree(parsing_mp->m_rtname);
1211 parsing_mp->m_rtname = kstrdup(param->string, GFP_KERNEL);
1212 if (!parsing_mp->m_rtname)
1216 if (suffix_kstrtoint(param->string, 10, &size))
1218 parsing_mp->m_allocsize_log = ffs(size) - 1;
1219 parsing_mp->m_flags |= XFS_MOUNT_ALLOCSIZE;
1223 parsing_mp->m_flags |= XFS_MOUNT_GRPID;
1226 case Opt_sysvgroups:
1227 parsing_mp->m_flags &= ~XFS_MOUNT_GRPID;
1230 parsing_mp->m_flags |= XFS_MOUNT_WSYNC;
1232 case Opt_norecovery:
1233 parsing_mp->m_flags |= XFS_MOUNT_NORECOVERY;
1236 parsing_mp->m_flags |= XFS_MOUNT_NOALIGN;
1239 parsing_mp->m_flags |= XFS_MOUNT_SWALLOC;
1242 parsing_mp->m_dalign = result.uint_32;
1245 parsing_mp->m_swidth = result.uint_32;
1248 parsing_mp->m_flags |= XFS_MOUNT_SMALL_INUMS;
1251 parsing_mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS;
1254 parsing_mp->m_flags |= XFS_MOUNT_NOUUID;
1257 parsing_mp->m_flags |= XFS_MOUNT_LARGEIO;
1260 parsing_mp->m_flags &= ~XFS_MOUNT_LARGEIO;
1262 case Opt_filestreams:
1263 parsing_mp->m_flags |= XFS_MOUNT_FILESTREAMS;
1266 parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT;
1267 parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD;
1272 parsing_mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ENFD);
1274 case Opt_qnoenforce:
1275 case Opt_uqnoenforce:
1276 parsing_mp->m_qflags |= XFS_UQUOTA_ACCT;
1277 parsing_mp->m_qflags &= ~XFS_UQUOTA_ENFD;
1281 parsing_mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ENFD);
1283 case Opt_pqnoenforce:
1284 parsing_mp->m_qflags |= XFS_PQUOTA_ACCT;
1285 parsing_mp->m_qflags &= ~XFS_PQUOTA_ENFD;
1289 parsing_mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ENFD);
1291 case Opt_gqnoenforce:
1292 parsing_mp->m_qflags |= XFS_GQUOTA_ACCT;
1293 parsing_mp->m_qflags &= ~XFS_GQUOTA_ENFD;
1296 parsing_mp->m_flags |= XFS_MOUNT_DISCARD;
1299 parsing_mp->m_flags &= ~XFS_MOUNT_DISCARD;
1301 #ifdef CONFIG_FS_DAX
1303 xfs_mount_set_dax_mode(parsing_mp, XFS_DAX_ALWAYS);
1306 xfs_mount_set_dax_mode(parsing_mp, result.uint_32);
1309 /* Following mount options will be removed in September 2025 */
1311 xfs_fs_warn_deprecated(fc, param, XFS_MOUNT_IKEEP, true);
1312 parsing_mp->m_flags |= XFS_MOUNT_IKEEP;
1315 xfs_fs_warn_deprecated(fc, param, XFS_MOUNT_IKEEP, false);
1316 parsing_mp->m_flags &= ~XFS_MOUNT_IKEEP;
1319 xfs_fs_warn_deprecated(fc, param, XFS_MOUNT_ATTR2, true);
1320 parsing_mp->m_flags |= XFS_MOUNT_ATTR2;
1323 xfs_fs_warn_deprecated(fc, param, XFS_MOUNT_NOATTR2, true);
1324 parsing_mp->m_flags |= XFS_MOUNT_NOATTR2;
1327 xfs_warn(parsing_mp, "unknown mount option [%s].", param->key);
1335 xfs_fs_validate_params(
1336 struct xfs_mount *mp)
1339 * no recovery flag requires a read-only mount
1341 if ((mp->m_flags & XFS_MOUNT_NORECOVERY) &&
1342 !(mp->m_flags & XFS_MOUNT_RDONLY)) {
1343 xfs_warn(mp, "no-recovery mounts must be read-only.");
1347 if ((mp->m_flags & (XFS_MOUNT_ATTR2|XFS_MOUNT_NOATTR2)) ==
1348 (XFS_MOUNT_ATTR2|XFS_MOUNT_NOATTR2)) {
1349 xfs_warn(mp, "attr2 and noattr2 cannot both be specified.");
1354 if ((mp->m_flags & XFS_MOUNT_NOALIGN) &&
1355 (mp->m_dalign || mp->m_swidth)) {
1357 "sunit and swidth options incompatible with the noalign option");
1361 if (!IS_ENABLED(CONFIG_XFS_QUOTA) && mp->m_qflags != 0) {
1362 xfs_warn(mp, "quota support not available in this kernel.");
1366 if ((mp->m_dalign && !mp->m_swidth) ||
1367 (!mp->m_dalign && mp->m_swidth)) {
1368 xfs_warn(mp, "sunit and swidth must be specified together");
1372 if (mp->m_dalign && (mp->m_swidth % mp->m_dalign != 0)) {
1374 "stripe width (%d) must be a multiple of the stripe unit (%d)",
1375 mp->m_swidth, mp->m_dalign);
1379 if (mp->m_logbufs != -1 &&
1380 mp->m_logbufs != 0 &&
1381 (mp->m_logbufs < XLOG_MIN_ICLOGS ||
1382 mp->m_logbufs > XLOG_MAX_ICLOGS)) {
1383 xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]",
1384 mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS);
1388 if (mp->m_logbsize != -1 &&
1389 mp->m_logbsize != 0 &&
1390 (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE ||
1391 mp->m_logbsize > XLOG_MAX_RECORD_BSIZE ||
1392 !is_power_of_2(mp->m_logbsize))) {
1394 "invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]",
1399 if ((mp->m_flags & XFS_MOUNT_ALLOCSIZE) &&
1400 (mp->m_allocsize_log > XFS_MAX_IO_LOG ||
1401 mp->m_allocsize_log < XFS_MIN_IO_LOG)) {
1402 xfs_warn(mp, "invalid log iosize: %d [not %d-%d]",
1403 mp->m_allocsize_log, XFS_MIN_IO_LOG, XFS_MAX_IO_LOG);
1412 struct super_block *sb,
1413 struct fs_context *fc)
1415 struct xfs_mount *mp = sb->s_fs_info;
1417 int flags = 0, error;
1421 error = xfs_fs_validate_params(mp);
1423 goto out_free_names;
1425 sb_min_blocksize(sb, BBSIZE);
1426 sb->s_xattr = xfs_xattr_handlers;
1427 sb->s_export_op = &xfs_export_operations;
1428 #ifdef CONFIG_XFS_QUOTA
1429 sb->s_qcop = &xfs_quotactl_operations;
1430 sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
1432 sb->s_op = &xfs_super_operations;
1435 * Delay mount work if the debug hook is set. This is debug
1436 * instrumention to coordinate simulation of xfs mount failures with
1437 * VFS superblock operations
1439 if (xfs_globals.mount_delay) {
1440 xfs_notice(mp, "Delaying mount for %d seconds.",
1441 xfs_globals.mount_delay);
1442 msleep(xfs_globals.mount_delay * 1000);
1445 if (fc->sb_flags & SB_SILENT)
1446 flags |= XFS_MFSI_QUIET;
1448 error = xfs_open_devices(mp);
1450 goto out_free_names;
1452 error = xfs_init_mount_workqueues(mp);
1454 goto out_close_devices;
1456 error = xfs_init_percpu_counters(mp);
1458 goto out_destroy_workqueues;
1460 error = xfs_inodegc_init_percpu(mp);
1462 goto out_destroy_counters;
1465 * All percpu data structures requiring cleanup when a cpu goes offline
1466 * must be allocated before adding this @mp to the cpu-dead handler's
1469 xfs_mount_list_add(mp);
1471 /* Allocate stats memory before we do operations that might use it */
1472 mp->m_stats.xs_stats = alloc_percpu(struct xfsstats);
1473 if (!mp->m_stats.xs_stats) {
1475 goto out_destroy_inodegc;
1478 error = xfs_readsb(mp, flags);
1480 goto out_free_stats;
1482 error = xfs_finish_flags(mp);
1486 error = xfs_setup_devices(mp);
1490 /* V4 support is undergoing deprecation. */
1491 if (!xfs_has_crc(mp)) {
1492 #ifdef CONFIG_XFS_SUPPORT_V4
1494 "Deprecated V4 format (crc=0) will not be supported after September 2030.");
1497 "Deprecated V4 format (crc=0) not supported by kernel.");
1503 /* Filesystem claims it needs repair, so refuse the mount. */
1504 if (xfs_sb_version_needsrepair(&mp->m_sb)) {
1505 xfs_warn(mp, "Filesystem needs repair. Please run xfs_repair.");
1506 error = -EFSCORRUPTED;
1511 * Don't touch the filesystem if a user tool thinks it owns the primary
1512 * superblock. mkfs doesn't clear the flag from secondary supers, so
1513 * we don't check them at all.
1515 if (mp->m_sb.sb_inprogress) {
1516 xfs_warn(mp, "Offline file system operation in progress!");
1517 error = -EFSCORRUPTED;
1522 * Until this is fixed only page-sized or smaller data blocks work.
1524 if (mp->m_sb.sb_blocksize > PAGE_SIZE) {
1526 "File system with blocksize %d bytes. "
1527 "Only pagesize (%ld) or less will currently work.",
1528 mp->m_sb.sb_blocksize, PAGE_SIZE);
1533 /* Ensure this filesystem fits in the page cache limits */
1534 if (xfs_sb_validate_fsb_count(&mp->m_sb, mp->m_sb.sb_dblocks) ||
1535 xfs_sb_validate_fsb_count(&mp->m_sb, mp->m_sb.sb_rblocks)) {
1537 "file system too large to be mounted on this system.");
1543 * XFS block mappings use 54 bits to store the logical block offset.
1544 * This should suffice to handle the maximum file size that the VFS
1545 * supports (currently 2^63 bytes on 64-bit and ULONG_MAX << PAGE_SHIFT
1546 * bytes on 32-bit), but as XFS and VFS have gotten the s_maxbytes
1547 * calculation wrong on 32-bit kernels in the past, we'll add a WARN_ON
1548 * to check this assertion.
1550 * Avoid integer overflow by comparing the maximum bmbt offset to the
1551 * maximum pagecache offset in units of fs blocks.
1553 if (!xfs_verify_fileoff(mp, XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE))) {
1555 "MAX_LFS_FILESIZE block offset (%llu) exceeds extent map maximum (%llu)!",
1556 XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE),
1562 error = xfs_filestream_mount(mp);
1567 * we must configure the block size in the superblock before we run the
1568 * full mount process as the mount process can lookup and cache inodes.
1570 sb->s_magic = XFS_SUPER_MAGIC;
1571 sb->s_blocksize = mp->m_sb.sb_blocksize;
1572 sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1;
1573 sb->s_maxbytes = MAX_LFS_FILESIZE;
1574 sb->s_max_links = XFS_MAXLINK;
1575 sb->s_time_gran = 1;
1576 if (xfs_has_bigtime(mp)) {
1577 sb->s_time_min = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MIN);
1578 sb->s_time_max = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MAX);
1580 sb->s_time_min = XFS_LEGACY_TIME_MIN;
1581 sb->s_time_max = XFS_LEGACY_TIME_MAX;
1583 trace_xfs_inode_timestamp_range(mp, sb->s_time_min, sb->s_time_max);
1584 sb->s_iflags |= SB_I_CGROUPWB;
1586 set_posix_acl_flag(sb);
1588 /* version 5 superblocks support inode version counters. */
1589 if (XFS_SB_VERSION_NUM(&mp->m_sb) == XFS_SB_VERSION_5)
1590 sb->s_flags |= SB_I_VERSION;
1592 if (mp->m_flags & XFS_MOUNT_DAX_ALWAYS) {
1593 bool rtdev_is_dax = false, datadev_is_dax;
1596 "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
1598 datadev_is_dax = bdev_dax_supported(mp->m_ddev_targp->bt_bdev,
1600 if (mp->m_rtdev_targp)
1601 rtdev_is_dax = bdev_dax_supported(
1602 mp->m_rtdev_targp->bt_bdev, sb->s_blocksize);
1603 if (!rtdev_is_dax && !datadev_is_dax) {
1605 "DAX unsupported by block device. Turning off DAX.");
1606 xfs_mount_set_dax_mode(mp, XFS_DAX_NEVER);
1608 if (xfs_has_reflink(mp)) {
1610 "DAX and reflink cannot be used together!");
1612 goto out_filestream_unmount;
1616 if (mp->m_flags & XFS_MOUNT_DISCARD) {
1617 struct request_queue *q = bdev_get_queue(sb->s_bdev);
1619 if (!blk_queue_discard(q)) {
1620 xfs_warn(mp, "mounting with \"discard\" option, but "
1621 "the device does not support discard");
1622 mp->m_flags &= ~XFS_MOUNT_DISCARD;
1626 if (xfs_has_reflink(mp)) {
1627 if (mp->m_sb.sb_rblocks) {
1629 "reflink not compatible with realtime device!");
1631 goto out_filestream_unmount;
1634 if (xfs_globals.always_cow) {
1635 xfs_info(mp, "using DEBUG-only always_cow mode.");
1636 mp->m_always_cow = true;
1640 if (xfs_has_rmapbt(mp) && mp->m_sb.sb_rblocks) {
1642 "reverse mapping btree not compatible with realtime device!");
1644 goto out_filestream_unmount;
1647 error = xfs_mountfs(mp);
1649 goto out_filestream_unmount;
1651 root = igrab(VFS_I(mp->m_rootip));
1656 sb->s_root = d_make_root(root);
1664 out_filestream_unmount:
1665 xfs_filestream_unmount(mp);
1669 free_percpu(mp->m_stats.xs_stats);
1670 out_destroy_inodegc:
1671 xfs_mount_list_del(mp);
1672 xfs_inodegc_free_percpu(mp);
1673 out_destroy_counters:
1674 xfs_destroy_percpu_counters(mp);
1675 out_destroy_workqueues:
1676 xfs_destroy_mount_workqueues(mp);
1678 xfs_close_devices(mp);
1680 sb->s_fs_info = NULL;
1685 xfs_filestream_unmount(mp);
1692 struct fs_context *fc)
1694 return get_tree_bdev(fc, xfs_fs_fill_super);
1699 struct xfs_mount *mp)
1701 struct xfs_sb *sbp = &mp->m_sb;
1704 if (mp->m_flags & XFS_MOUNT_NORECOVERY) {
1706 "ro->rw transition prohibited on norecovery mount");
1710 if (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5 &&
1711 xfs_sb_has_ro_compat_feature(sbp, XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) {
1713 "ro->rw transition prohibited on unknown (0x%x) ro-compat filesystem",
1714 (sbp->sb_features_ro_compat &
1715 XFS_SB_FEAT_RO_COMPAT_UNKNOWN));
1719 mp->m_flags &= ~XFS_MOUNT_RDONLY;
1722 * If this is the first remount to writeable state we might have some
1723 * superblock changes to update.
1725 if (mp->m_update_sb) {
1726 error = xfs_sync_sb(mp, false);
1728 xfs_warn(mp, "failed to write sb changes");
1731 mp->m_update_sb = false;
1735 * Fill out the reserve pool if it is empty. Use the stashed value if
1736 * it is non-zero, otherwise go with the default.
1738 xfs_restore_resvblks(mp);
1739 xfs_log_work_queue(mp);
1741 /* Recover any CoW blocks that never got remapped. */
1742 error = xfs_reflink_recover_cow(mp);
1745 "Error %d recovering leftover CoW allocations.", error);
1746 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1749 xfs_blockgc_start(mp);
1751 /* Create the per-AG metadata reservation pool .*/
1752 error = xfs_fs_reserve_ag_blocks(mp);
1753 if (error && error != -ENOSPC)
1756 /* Re-enable the background inode inactivation worker. */
1757 xfs_inodegc_start(mp);
1764 struct xfs_mount *mp)
1769 * Cancel background eofb scanning so it cannot race with the final
1770 * log force+buftarg wait and deadlock the remount.
1772 xfs_blockgc_stop(mp);
1774 /* Get rid of any leftover CoW reservations... */
1775 error = xfs_blockgc_free_space(mp, NULL);
1777 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1782 * Stop the inodegc background worker. xfs_fs_reconfigure already
1783 * flushed all pending inodegc work when it sync'd the filesystem.
1784 * The VFS holds s_umount, so we know that inodes cannot enter
1785 * xfs_fs_destroy_inode during a remount operation. In readonly mode
1786 * we send inodes straight to reclaim, so no inodes will be queued.
1788 xfs_inodegc_stop(mp);
1790 /* Free the per-AG metadata reservation pool. */
1791 error = xfs_fs_unreserve_ag_blocks(mp);
1793 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1798 * Before we sync the metadata, we need to free up the reserve block
1799 * pool so that the used block count in the superblock on disk is
1800 * correct at the end of the remount. Stash the current* reserve pool
1801 * size so that if we get remounted rw, we can return it to the same
1804 xfs_save_resvblks(mp);
1807 mp->m_flags |= XFS_MOUNT_RDONLY;
1813 * Logically we would return an error here to prevent users from believing
1814 * they might have changed mount options using remount which can't be changed.
1816 * But unfortunately mount(8) adds all options from mtab and fstab to the mount
1817 * arguments in some cases so we can't blindly reject options, but have to
1818 * check for each specified option if it actually differs from the currently
1819 * set option and only reject it if that's the case.
1821 * Until that is implemented we return success for every remount request, and
1822 * silently ignore all options that we can't actually change.
1826 struct fs_context *fc)
1828 struct xfs_mount *mp = XFS_M(fc->root->d_sb);
1829 struct xfs_mount *new_mp = fc->s_fs_info;
1830 xfs_sb_t *sbp = &mp->m_sb;
1831 int flags = fc->sb_flags;
1834 /* version 5 superblocks always support version counters. */
1835 if (XFS_SB_VERSION_NUM(&mp->m_sb) == XFS_SB_VERSION_5)
1836 fc->sb_flags |= SB_I_VERSION;
1838 error = xfs_fs_validate_params(new_mp);
1842 sync_filesystem(mp->m_super);
1844 /* inode32 -> inode64 */
1845 if ((mp->m_flags & XFS_MOUNT_SMALL_INUMS) &&
1846 !(new_mp->m_flags & XFS_MOUNT_SMALL_INUMS)) {
1847 mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS;
1848 mp->m_maxagi = xfs_set_inode_alloc(mp, sbp->sb_agcount);
1851 /* inode64 -> inode32 */
1852 if (!(mp->m_flags & XFS_MOUNT_SMALL_INUMS) &&
1853 (new_mp->m_flags & XFS_MOUNT_SMALL_INUMS)) {
1854 mp->m_flags |= XFS_MOUNT_SMALL_INUMS;
1855 mp->m_maxagi = xfs_set_inode_alloc(mp, sbp->sb_agcount);
1859 if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(flags & SB_RDONLY)) {
1860 error = xfs_remount_rw(mp);
1866 if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (flags & SB_RDONLY)) {
1867 error = xfs_remount_ro(mp);
1875 static void xfs_fs_free(
1876 struct fs_context *fc)
1878 struct xfs_mount *mp = fc->s_fs_info;
1881 * mp is stored in the fs_context when it is initialized.
1882 * mp is transferred to the superblock on a successful mount,
1883 * but if an error occurs before the transfer we have to free
1890 static const struct fs_context_operations xfs_context_ops = {
1891 .parse_param = xfs_fs_parse_param,
1892 .get_tree = xfs_fs_get_tree,
1893 .reconfigure = xfs_fs_reconfigure,
1894 .free = xfs_fs_free,
1897 static int xfs_init_fs_context(
1898 struct fs_context *fc)
1900 struct xfs_mount *mp;
1902 mp = kmem_alloc(sizeof(struct xfs_mount), KM_ZERO);
1906 spin_lock_init(&mp->m_sb_lock);
1907 spin_lock_init(&mp->m_agirotor_lock);
1908 INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC);
1909 spin_lock_init(&mp->m_perag_lock);
1910 mutex_init(&mp->m_growlock);
1911 INIT_WORK(&mp->m_flush_inodes_work, xfs_flush_inodes_worker);
1912 INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
1913 mp->m_kobj.kobject.kset = xfs_kset;
1915 * We don't create the finobt per-ag space reservation until after log
1916 * recovery, so we must set this to true so that an ifree transaction
1917 * started during log recovery will not depend on space reservations
1918 * for finobt expansion.
1920 mp->m_finobt_nores = true;
1923 * These can be overridden by the mount option parsing.
1926 mp->m_logbsize = -1;
1927 mp->m_allocsize_log = 16; /* 64k */
1930 * Copy binary VFS mount flags we are interested in.
1932 if (fc->sb_flags & SB_RDONLY)
1933 mp->m_flags |= XFS_MOUNT_RDONLY;
1934 if (fc->sb_flags & SB_DIRSYNC)
1935 mp->m_flags |= XFS_MOUNT_DIRSYNC;
1936 if (fc->sb_flags & SB_SYNCHRONOUS)
1937 mp->m_flags |= XFS_MOUNT_WSYNC;
1940 fc->ops = &xfs_context_ops;
1945 static struct file_system_type xfs_fs_type = {
1946 .owner = THIS_MODULE,
1948 .init_fs_context = xfs_init_fs_context,
1949 .parameters = xfs_fs_parameters,
1950 .kill_sb = kill_block_super,
1951 .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
1953 MODULE_ALIAS_FS("xfs");
1956 xfs_init_zones(void)
1958 xfs_log_ticket_zone = kmem_cache_create("xfs_log_ticket",
1959 sizeof(struct xlog_ticket),
1961 if (!xfs_log_ticket_zone)
1964 xfs_bmap_free_item_zone = kmem_cache_create("xfs_bmap_free_item",
1965 sizeof(struct xfs_extent_free_item),
1967 if (!xfs_bmap_free_item_zone)
1968 goto out_destroy_log_ticket_zone;
1970 xfs_btree_cur_zone = kmem_cache_create("xfs_btree_cur",
1971 sizeof(struct xfs_btree_cur),
1973 if (!xfs_btree_cur_zone)
1974 goto out_destroy_bmap_free_item_zone;
1976 xfs_da_state_zone = kmem_cache_create("xfs_da_state",
1977 sizeof(struct xfs_da_state),
1979 if (!xfs_da_state_zone)
1980 goto out_destroy_btree_cur_zone;
1982 xfs_ifork_zone = kmem_cache_create("xfs_ifork",
1983 sizeof(struct xfs_ifork),
1985 if (!xfs_ifork_zone)
1986 goto out_destroy_da_state_zone;
1988 xfs_trans_zone = kmem_cache_create("xfs_trans",
1989 sizeof(struct xfs_trans),
1991 if (!xfs_trans_zone)
1992 goto out_destroy_ifork_zone;
1996 * The size of the zone allocated buf log item is the maximum
1997 * size possible under XFS. This wastes a little bit of memory,
1998 * but it is much faster.
2000 xfs_buf_item_zone = kmem_cache_create("xfs_buf_item",
2001 sizeof(struct xfs_buf_log_item),
2003 if (!xfs_buf_item_zone)
2004 goto out_destroy_trans_zone;
2006 xfs_efd_zone = kmem_cache_create("xfs_efd_item",
2007 (sizeof(struct xfs_efd_log_item) +
2008 (XFS_EFD_MAX_FAST_EXTENTS - 1) *
2009 sizeof(struct xfs_extent)),
2012 goto out_destroy_buf_item_zone;
2014 xfs_efi_zone = kmem_cache_create("xfs_efi_item",
2015 (sizeof(struct xfs_efi_log_item) +
2016 (XFS_EFI_MAX_FAST_EXTENTS - 1) *
2017 sizeof(struct xfs_extent)),
2020 goto out_destroy_efd_zone;
2022 xfs_inode_zone = kmem_cache_create("xfs_inode",
2023 sizeof(struct xfs_inode), 0,
2024 (SLAB_HWCACHE_ALIGN |
2025 SLAB_RECLAIM_ACCOUNT |
2026 SLAB_MEM_SPREAD | SLAB_ACCOUNT),
2027 xfs_fs_inode_init_once);
2028 if (!xfs_inode_zone)
2029 goto out_destroy_efi_zone;
2031 xfs_ili_zone = kmem_cache_create("xfs_ili",
2032 sizeof(struct xfs_inode_log_item), 0,
2033 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
2036 goto out_destroy_inode_zone;
2038 xfs_icreate_zone = kmem_cache_create("xfs_icr",
2039 sizeof(struct xfs_icreate_item),
2041 if (!xfs_icreate_zone)
2042 goto out_destroy_ili_zone;
2044 xfs_rud_zone = kmem_cache_create("xfs_rud_item",
2045 sizeof(struct xfs_rud_log_item),
2048 goto out_destroy_icreate_zone;
2050 xfs_rui_zone = kmem_cache_create("xfs_rui_item",
2051 xfs_rui_log_item_sizeof(XFS_RUI_MAX_FAST_EXTENTS),
2054 goto out_destroy_rud_zone;
2056 xfs_cud_zone = kmem_cache_create("xfs_cud_item",
2057 sizeof(struct xfs_cud_log_item),
2060 goto out_destroy_rui_zone;
2062 xfs_cui_zone = kmem_cache_create("xfs_cui_item",
2063 xfs_cui_log_item_sizeof(XFS_CUI_MAX_FAST_EXTENTS),
2066 goto out_destroy_cud_zone;
2068 xfs_bud_zone = kmem_cache_create("xfs_bud_item",
2069 sizeof(struct xfs_bud_log_item),
2072 goto out_destroy_cui_zone;
2074 xfs_bui_zone = kmem_cache_create("xfs_bui_item",
2075 xfs_bui_log_item_sizeof(XFS_BUI_MAX_FAST_EXTENTS),
2078 goto out_destroy_bud_zone;
2082 out_destroy_bud_zone:
2083 kmem_cache_destroy(xfs_bud_zone);
2084 out_destroy_cui_zone:
2085 kmem_cache_destroy(xfs_cui_zone);
2086 out_destroy_cud_zone:
2087 kmem_cache_destroy(xfs_cud_zone);
2088 out_destroy_rui_zone:
2089 kmem_cache_destroy(xfs_rui_zone);
2090 out_destroy_rud_zone:
2091 kmem_cache_destroy(xfs_rud_zone);
2092 out_destroy_icreate_zone:
2093 kmem_cache_destroy(xfs_icreate_zone);
2094 out_destroy_ili_zone:
2095 kmem_cache_destroy(xfs_ili_zone);
2096 out_destroy_inode_zone:
2097 kmem_cache_destroy(xfs_inode_zone);
2098 out_destroy_efi_zone:
2099 kmem_cache_destroy(xfs_efi_zone);
2100 out_destroy_efd_zone:
2101 kmem_cache_destroy(xfs_efd_zone);
2102 out_destroy_buf_item_zone:
2103 kmem_cache_destroy(xfs_buf_item_zone);
2104 out_destroy_trans_zone:
2105 kmem_cache_destroy(xfs_trans_zone);
2106 out_destroy_ifork_zone:
2107 kmem_cache_destroy(xfs_ifork_zone);
2108 out_destroy_da_state_zone:
2109 kmem_cache_destroy(xfs_da_state_zone);
2110 out_destroy_btree_cur_zone:
2111 kmem_cache_destroy(xfs_btree_cur_zone);
2112 out_destroy_bmap_free_item_zone:
2113 kmem_cache_destroy(xfs_bmap_free_item_zone);
2114 out_destroy_log_ticket_zone:
2115 kmem_cache_destroy(xfs_log_ticket_zone);
2121 xfs_destroy_zones(void)
2124 * Make sure all delayed rcu free are flushed before we
2128 kmem_cache_destroy(xfs_bui_zone);
2129 kmem_cache_destroy(xfs_bud_zone);
2130 kmem_cache_destroy(xfs_cui_zone);
2131 kmem_cache_destroy(xfs_cud_zone);
2132 kmem_cache_destroy(xfs_rui_zone);
2133 kmem_cache_destroy(xfs_rud_zone);
2134 kmem_cache_destroy(xfs_icreate_zone);
2135 kmem_cache_destroy(xfs_ili_zone);
2136 kmem_cache_destroy(xfs_inode_zone);
2137 kmem_cache_destroy(xfs_efi_zone);
2138 kmem_cache_destroy(xfs_efd_zone);
2139 kmem_cache_destroy(xfs_buf_item_zone);
2140 kmem_cache_destroy(xfs_trans_zone);
2141 kmem_cache_destroy(xfs_ifork_zone);
2142 kmem_cache_destroy(xfs_da_state_zone);
2143 kmem_cache_destroy(xfs_btree_cur_zone);
2144 kmem_cache_destroy(xfs_bmap_free_item_zone);
2145 kmem_cache_destroy(xfs_log_ticket_zone);
2149 xfs_init_workqueues(void)
2152 * The allocation workqueue can be used in memory reclaim situations
2153 * (writepage path), and parallelism is only limited by the number of
2154 * AGs in all the filesystems mounted. Hence use the default large
2155 * max_active value for this workqueue.
2157 xfs_alloc_wq = alloc_workqueue("xfsalloc",
2158 XFS_WQFLAGS(WQ_MEM_RECLAIM | WQ_FREEZABLE), 0);
2162 xfs_discard_wq = alloc_workqueue("xfsdiscard", XFS_WQFLAGS(WQ_UNBOUND),
2164 if (!xfs_discard_wq)
2165 goto out_free_alloc_wq;
2169 destroy_workqueue(xfs_alloc_wq);
2174 xfs_destroy_workqueues(void)
2176 destroy_workqueue(xfs_discard_wq);
2177 destroy_workqueue(xfs_alloc_wq);
2180 #ifdef CONFIG_HOTPLUG_CPU
2185 struct xfs_mount *mp, *n;
2187 spin_lock(&xfs_mount_list_lock);
2188 list_for_each_entry_safe(mp, n, &xfs_mount_list, m_mount_list) {
2189 spin_unlock(&xfs_mount_list_lock);
2190 xfs_inodegc_cpu_dead(mp, cpu);
2191 spin_lock(&xfs_mount_list_lock);
2193 spin_unlock(&xfs_mount_list_lock);
2198 xfs_cpu_hotplug_init(void)
2202 error = cpuhp_setup_state_nocalls(CPUHP_XFS_DEAD, "xfs:dead", NULL,
2206 "Failed to initialise CPU hotplug, error %d. XFS is non-functional.",
2212 xfs_cpu_hotplug_destroy(void)
2214 cpuhp_remove_state_nocalls(CPUHP_XFS_DEAD);
2217 #else /* !CONFIG_HOTPLUG_CPU */
2218 static inline int xfs_cpu_hotplug_init(void) { return 0; }
2219 static inline void xfs_cpu_hotplug_destroy(void) {}
2227 xfs_check_ondisk_structs();
2229 printk(KERN_INFO XFS_VERSION_STRING " with "
2230 XFS_BUILD_OPTIONS " enabled\n");
2234 error = xfs_cpu_hotplug_init();
2238 error = xfs_init_zones();
2240 goto out_destroy_hp;
2242 error = xfs_init_workqueues();
2244 goto out_destroy_zones;
2246 error = xfs_mru_cache_init();
2248 goto out_destroy_wq;
2250 error = xfs_buf_init();
2252 goto out_mru_cache_uninit;
2254 error = xfs_init_procfs();
2256 goto out_buf_terminate;
2258 error = xfs_sysctl_register();
2260 goto out_cleanup_procfs;
2262 xfs_kset = kset_create_and_add("xfs", NULL, fs_kobj);
2265 goto out_sysctl_unregister;
2268 xfsstats.xs_kobj.kobject.kset = xfs_kset;
2270 xfsstats.xs_stats = alloc_percpu(struct xfsstats);
2271 if (!xfsstats.xs_stats) {
2273 goto out_kset_unregister;
2276 error = xfs_sysfs_init(&xfsstats.xs_kobj, &xfs_stats_ktype, NULL,
2279 goto out_free_stats;
2282 xfs_dbg_kobj.kobject.kset = xfs_kset;
2283 error = xfs_sysfs_init(&xfs_dbg_kobj, &xfs_dbg_ktype, NULL, "debug");
2285 goto out_remove_stats_kobj;
2288 error = xfs_qm_init();
2290 goto out_remove_dbg_kobj;
2292 error = register_filesystem(&xfs_fs_type);
2299 out_remove_dbg_kobj:
2301 xfs_sysfs_del(&xfs_dbg_kobj);
2302 out_remove_stats_kobj:
2304 xfs_sysfs_del(&xfsstats.xs_kobj);
2306 free_percpu(xfsstats.xs_stats);
2307 out_kset_unregister:
2308 kset_unregister(xfs_kset);
2309 out_sysctl_unregister:
2310 xfs_sysctl_unregister();
2312 xfs_cleanup_procfs();
2314 xfs_buf_terminate();
2315 out_mru_cache_uninit:
2316 xfs_mru_cache_uninit();
2318 xfs_destroy_workqueues();
2320 xfs_destroy_zones();
2322 xfs_cpu_hotplug_destroy();
2331 unregister_filesystem(&xfs_fs_type);
2333 xfs_sysfs_del(&xfs_dbg_kobj);
2335 xfs_sysfs_del(&xfsstats.xs_kobj);
2336 free_percpu(xfsstats.xs_stats);
2337 kset_unregister(xfs_kset);
2338 xfs_sysctl_unregister();
2339 xfs_cleanup_procfs();
2340 xfs_buf_terminate();
2341 xfs_mru_cache_uninit();
2342 xfs_destroy_workqueues();
2343 xfs_destroy_zones();
2344 xfs_uuid_table_free();
2345 xfs_cpu_hotplug_destroy();
2348 module_init(init_xfs_fs);
2349 module_exit(exit_xfs_fs);
2351 MODULE_AUTHOR("Silicon Graphics, Inc.");
2352 MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled");
2353 MODULE_LICENSE("GPL");