1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/sched/signal.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/statfs.h>
16 #include <linux/seq_file.h>
17 #include <linux/mount.h>
18 #include <linux/kthread.h>
19 #include <linux/delay.h>
20 #include <linux/gfs2_ondisk.h>
21 #include <linux/crc32.h>
22 #include <linux/time.h>
23 #include <linux/wait.h>
24 #include <linux/writeback.h>
25 #include <linux/backing-dev.h>
26 #include <linux/kernel.h>
49 SHOULD_NOT_DELETE_DINODE,
50 SHOULD_DEFER_EVICTION,
54 * gfs2_jindex_free - Clear all the journal index information
55 * @sdp: The GFS2 superblock
59 void gfs2_jindex_free(struct gfs2_sbd *sdp)
61 struct list_head list;
62 struct gfs2_jdesc *jd;
64 spin_lock(&sdp->sd_jindex_spin);
65 list_add(&list, &sdp->sd_jindex_list);
66 list_del_init(&sdp->sd_jindex_list);
68 spin_unlock(&sdp->sd_jindex_spin);
71 while (!list_empty(&list)) {
72 jd = list_first_entry(&list, struct gfs2_jdesc, jd_list);
73 gfs2_free_journal_extents(jd);
74 list_del(&jd->jd_list);
81 static struct gfs2_jdesc *jdesc_find_i(struct list_head *head, unsigned int jid)
83 struct gfs2_jdesc *jd;
85 list_for_each_entry(jd, head, jd_list) {
86 if (jd->jd_jid == jid)
92 struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid)
94 struct gfs2_jdesc *jd;
96 spin_lock(&sdp->sd_jindex_spin);
97 jd = jdesc_find_i(&sdp->sd_jindex_list, jid);
98 spin_unlock(&sdp->sd_jindex_spin);
103 int gfs2_jdesc_check(struct gfs2_jdesc *jd)
105 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
106 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
107 u64 size = i_size_read(jd->jd_inode);
109 if (gfs2_check_internal_file_size(jd->jd_inode, 8 << 20, BIT(30)))
112 jd->jd_blocks = size >> sdp->sd_sb.sb_bsize_shift;
114 if (gfs2_write_alloc_required(ip, 0, size)) {
115 gfs2_consist_inode(ip);
122 static int init_threads(struct gfs2_sbd *sdp)
124 struct task_struct *p;
127 p = kthread_run(gfs2_logd, sdp, "gfs2_logd");
130 fs_err(sdp, "can't start logd thread: %d\n", error);
133 sdp->sd_logd_process = p;
135 p = kthread_run(gfs2_quotad, sdp, "gfs2_quotad");
138 fs_err(sdp, "can't start quotad thread: %d\n", error);
141 sdp->sd_quotad_process = p;
145 kthread_stop(sdp->sd_logd_process);
146 sdp->sd_logd_process = NULL;
151 * gfs2_make_fs_rw - Turn a Read-Only FS into a Read-Write one
152 * @sdp: the filesystem
157 int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
159 struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
160 struct gfs2_glock *j_gl = ip->i_gl;
161 struct gfs2_log_header_host head;
164 error = init_threads(sdp);
166 gfs2_withdraw_delayed(sdp);
170 j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
171 if (gfs2_withdrawn(sdp)) {
176 error = gfs2_find_jhead(sdp->sd_jdesc, &head, false);
177 if (error || gfs2_withdrawn(sdp))
180 if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
186 /* Initialize some head of the log stuff */
187 sdp->sd_log_sequence = head.lh_sequence + 1;
188 gfs2_log_pointers_init(sdp, head.lh_blkno);
190 error = gfs2_quota_init(sdp);
191 if (error || gfs2_withdrawn(sdp))
194 set_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
199 if (sdp->sd_quotad_process)
200 kthread_stop(sdp->sd_quotad_process);
201 sdp->sd_quotad_process = NULL;
202 if (sdp->sd_logd_process)
203 kthread_stop(sdp->sd_logd_process);
204 sdp->sd_logd_process = NULL;
208 void gfs2_statfs_change_in(struct gfs2_statfs_change_host *sc, const void *buf)
210 const struct gfs2_statfs_change *str = buf;
212 sc->sc_total = be64_to_cpu(str->sc_total);
213 sc->sc_free = be64_to_cpu(str->sc_free);
214 sc->sc_dinodes = be64_to_cpu(str->sc_dinodes);
217 void gfs2_statfs_change_out(const struct gfs2_statfs_change_host *sc, void *buf)
219 struct gfs2_statfs_change *str = buf;
221 str->sc_total = cpu_to_be64(sc->sc_total);
222 str->sc_free = cpu_to_be64(sc->sc_free);
223 str->sc_dinodes = cpu_to_be64(sc->sc_dinodes);
226 int gfs2_statfs_init(struct gfs2_sbd *sdp)
228 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
229 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
230 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
231 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
232 struct buffer_head *m_bh, *l_bh;
233 struct gfs2_holder gh;
236 error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, GL_NOCACHE,
241 error = gfs2_meta_inode_buffer(m_ip, &m_bh);
245 if (sdp->sd_args.ar_spectator) {
246 spin_lock(&sdp->sd_statfs_spin);
247 gfs2_statfs_change_in(m_sc, m_bh->b_data +
248 sizeof(struct gfs2_dinode));
249 spin_unlock(&sdp->sd_statfs_spin);
251 error = gfs2_meta_inode_buffer(l_ip, &l_bh);
255 spin_lock(&sdp->sd_statfs_spin);
256 gfs2_statfs_change_in(m_sc, m_bh->b_data +
257 sizeof(struct gfs2_dinode));
258 gfs2_statfs_change_in(l_sc, l_bh->b_data +
259 sizeof(struct gfs2_dinode));
260 spin_unlock(&sdp->sd_statfs_spin);
268 gfs2_glock_dq_uninit(&gh);
272 void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free,
275 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
276 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
277 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
278 struct buffer_head *l_bh;
283 error = gfs2_meta_inode_buffer(l_ip, &l_bh);
287 gfs2_trans_add_meta(l_ip->i_gl, l_bh);
289 spin_lock(&sdp->sd_statfs_spin);
290 l_sc->sc_total += total;
291 l_sc->sc_free += free;
292 l_sc->sc_dinodes += dinodes;
293 gfs2_statfs_change_out(l_sc, l_bh->b_data + sizeof(struct gfs2_dinode));
294 if (sdp->sd_args.ar_statfs_percent) {
295 x = 100 * l_sc->sc_free;
296 y = m_sc->sc_free * sdp->sd_args.ar_statfs_percent;
297 if (x >= y || x <= -y)
300 spin_unlock(&sdp->sd_statfs_spin);
304 gfs2_wake_up_statfs(sdp);
307 void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh,
308 struct buffer_head *l_bh)
310 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
311 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
312 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
313 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
315 gfs2_trans_add_meta(l_ip->i_gl, l_bh);
316 gfs2_trans_add_meta(m_ip->i_gl, m_bh);
318 spin_lock(&sdp->sd_statfs_spin);
319 m_sc->sc_total += l_sc->sc_total;
320 m_sc->sc_free += l_sc->sc_free;
321 m_sc->sc_dinodes += l_sc->sc_dinodes;
322 memset(l_sc, 0, sizeof(struct gfs2_statfs_change));
323 memset(l_bh->b_data + sizeof(struct gfs2_dinode),
324 0, sizeof(struct gfs2_statfs_change));
325 gfs2_statfs_change_out(m_sc, m_bh->b_data + sizeof(struct gfs2_dinode));
326 spin_unlock(&sdp->sd_statfs_spin);
329 int gfs2_statfs_sync(struct super_block *sb, int type)
331 struct gfs2_sbd *sdp = sb->s_fs_info;
332 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
333 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
334 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
335 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
336 struct gfs2_holder gh;
337 struct buffer_head *m_bh, *l_bh;
340 error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, GL_NOCACHE,
345 error = gfs2_meta_inode_buffer(m_ip, &m_bh);
349 spin_lock(&sdp->sd_statfs_spin);
350 gfs2_statfs_change_in(m_sc, m_bh->b_data +
351 sizeof(struct gfs2_dinode));
352 if (!l_sc->sc_total && !l_sc->sc_free && !l_sc->sc_dinodes) {
353 spin_unlock(&sdp->sd_statfs_spin);
356 spin_unlock(&sdp->sd_statfs_spin);
358 error = gfs2_meta_inode_buffer(l_ip, &l_bh);
362 error = gfs2_trans_begin(sdp, 2 * RES_DINODE, 0);
366 update_statfs(sdp, m_bh, l_bh);
367 sdp->sd_statfs_force_sync = 0;
376 gfs2_glock_dq_uninit(&gh);
382 struct list_head list;
383 struct gfs2_holder gh;
387 * gfs2_lock_fs_check_clean - Stop all writes to the FS and check that all
389 * @sdp: the file system
394 static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp)
396 struct gfs2_inode *ip;
397 struct gfs2_jdesc *jd;
400 struct gfs2_log_header_host lh;
403 list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
404 lfcc = kmalloc(sizeof(struct lfcc), GFP_KERNEL);
409 ip = GFS2_I(jd->jd_inode);
410 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &lfcc->gh);
415 list_add(&lfcc->list, &list);
418 error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_EXCLUSIVE,
419 LM_FLAG_NOEXP, &sdp->sd_freeze_gh);
423 list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
424 error = gfs2_jdesc_check(jd);
427 error = gfs2_find_jhead(jd, &lh, false);
430 if (!(lh.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
437 gfs2_freeze_unlock(&sdp->sd_freeze_gh);
440 while (!list_empty(&list)) {
441 lfcc = list_first_entry(&list, struct lfcc, list);
442 list_del(&lfcc->list);
443 gfs2_glock_dq_uninit(&lfcc->gh);
449 void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf)
451 struct gfs2_dinode *str = buf;
453 str->di_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
454 str->di_header.mh_type = cpu_to_be32(GFS2_METATYPE_DI);
455 str->di_header.mh_format = cpu_to_be32(GFS2_FORMAT_DI);
456 str->di_num.no_addr = cpu_to_be64(ip->i_no_addr);
457 str->di_num.no_formal_ino = cpu_to_be64(ip->i_no_formal_ino);
458 str->di_mode = cpu_to_be32(ip->i_inode.i_mode);
459 str->di_uid = cpu_to_be32(i_uid_read(&ip->i_inode));
460 str->di_gid = cpu_to_be32(i_gid_read(&ip->i_inode));
461 str->di_nlink = cpu_to_be32(ip->i_inode.i_nlink);
462 str->di_size = cpu_to_be64(i_size_read(&ip->i_inode));
463 str->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode));
464 str->di_atime = cpu_to_be64(ip->i_inode.i_atime.tv_sec);
465 str->di_mtime = cpu_to_be64(ip->i_inode.i_mtime.tv_sec);
466 str->di_ctime = cpu_to_be64(ip->i_inode.i_ctime.tv_sec);
468 str->di_goal_meta = cpu_to_be64(ip->i_goal);
469 str->di_goal_data = cpu_to_be64(ip->i_goal);
470 str->di_generation = cpu_to_be64(ip->i_generation);
472 str->di_flags = cpu_to_be32(ip->i_diskflags);
473 str->di_height = cpu_to_be16(ip->i_height);
474 str->di_payload_format = cpu_to_be32(S_ISDIR(ip->i_inode.i_mode) &&
475 !(ip->i_diskflags & GFS2_DIF_EXHASH) ?
477 str->di_depth = cpu_to_be16(ip->i_depth);
478 str->di_entries = cpu_to_be32(ip->i_entries);
480 str->di_eattr = cpu_to_be64(ip->i_eattr);
481 str->di_atime_nsec = cpu_to_be32(ip->i_inode.i_atime.tv_nsec);
482 str->di_mtime_nsec = cpu_to_be32(ip->i_inode.i_mtime.tv_nsec);
483 str->di_ctime_nsec = cpu_to_be32(ip->i_inode.i_ctime.tv_nsec);
487 * gfs2_write_inode - Make sure the inode is stable on the disk
489 * @wbc: The writeback control structure
494 static int gfs2_write_inode(struct inode *inode, struct writeback_control *wbc)
496 struct gfs2_inode *ip = GFS2_I(inode);
497 struct gfs2_sbd *sdp = GFS2_SB(inode);
498 struct address_space *metamapping = gfs2_glock2aspace(ip->i_gl);
499 struct backing_dev_info *bdi = inode_to_bdi(metamapping->host);
501 bool flush_all = (wbc->sync_mode == WB_SYNC_ALL || gfs2_is_jdata(ip));
504 gfs2_log_flush(GFS2_SB(inode), ip->i_gl,
505 GFS2_LOG_HEAD_FLUSH_NORMAL |
506 GFS2_LFC_WRITE_INODE);
507 if (bdi->wb.dirty_exceeded)
508 gfs2_ail1_flush(sdp, wbc);
510 filemap_fdatawrite(metamapping);
512 ret = filemap_fdatawait(metamapping);
514 mark_inode_dirty_sync(inode);
516 spin_lock(&inode->i_lock);
517 if (!(inode->i_flags & I_DIRTY))
518 gfs2_ordered_del_inode(ip);
519 spin_unlock(&inode->i_lock);
525 * gfs2_dirty_inode - check for atime updates
526 * @inode: The inode in question
527 * @flags: The type of dirty
529 * Unfortunately it can be called under any combination of inode
530 * glock and transaction lock, so we have to check carefully.
532 * At the moment this deals only with atime - it should be possible
533 * to expand that role in future, once a review of the locking has
537 static void gfs2_dirty_inode(struct inode *inode, int flags)
539 struct gfs2_inode *ip = GFS2_I(inode);
540 struct gfs2_sbd *sdp = GFS2_SB(inode);
541 struct buffer_head *bh;
542 struct gfs2_holder gh;
544 int need_endtrans = 0;
547 if (unlikely(gfs2_withdrawn(sdp)))
549 if (!gfs2_glock_is_locked_by_me(ip->i_gl)) {
550 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
552 fs_err(sdp, "dirty_inode: glock %d\n", ret);
553 gfs2_dump_glock(NULL, ip->i_gl, true);
557 } else if (WARN_ON_ONCE(ip->i_gl->gl_state != LM_ST_EXCLUSIVE))
560 if (current->journal_info == NULL) {
561 ret = gfs2_trans_begin(sdp, RES_DINODE, 0);
563 fs_err(sdp, "dirty_inode: gfs2_trans_begin %d\n", ret);
569 ret = gfs2_meta_inode_buffer(ip, &bh);
571 gfs2_trans_add_meta(ip->i_gl, bh);
572 gfs2_dinode_out(ip, bh->b_data);
580 gfs2_glock_dq_uninit(&gh);
584 * gfs2_make_fs_ro - Turn a Read-Write FS into a Read-Only one
585 * @sdp: the filesystem
590 void gfs2_make_fs_ro(struct gfs2_sbd *sdp)
592 int log_write_allowed = test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
594 gfs2_flush_delete_work(sdp);
595 if (!log_write_allowed && current == sdp->sd_quotad_process)
596 fs_warn(sdp, "The quotad daemon is withdrawing.\n");
597 else if (sdp->sd_quotad_process)
598 kthread_stop(sdp->sd_quotad_process);
599 sdp->sd_quotad_process = NULL;
601 if (!log_write_allowed && current == sdp->sd_logd_process)
602 fs_warn(sdp, "The logd daemon is withdrawing.\n");
603 else if (sdp->sd_logd_process)
604 kthread_stop(sdp->sd_logd_process);
605 sdp->sd_logd_process = NULL;
607 if (log_write_allowed) {
608 gfs2_quota_sync(sdp->sd_vfs, 0);
609 gfs2_statfs_sync(sdp->sd_vfs, 0);
611 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_SHUTDOWN |
612 GFS2_LFC_MAKE_FS_RO);
613 wait_event_timeout(sdp->sd_log_waitq,
614 gfs2_log_is_empty(sdp),
616 gfs2_assert_warn(sdp, gfs2_log_is_empty(sdp));
618 wait_event_timeout(sdp->sd_log_waitq,
619 gfs2_log_is_empty(sdp),
622 gfs2_quota_cleanup(sdp);
624 if (!log_write_allowed)
625 sdp->sd_vfs->s_flags |= SB_RDONLY;
629 * gfs2_put_super - Unmount the filesystem
630 * @sb: The VFS superblock
634 static void gfs2_put_super(struct super_block *sb)
636 struct gfs2_sbd *sdp = sb->s_fs_info;
637 struct gfs2_jdesc *jd;
639 /* No more recovery requests */
640 set_bit(SDF_NORECOVERY, &sdp->sd_flags);
643 /* Wait on outstanding recovery */
645 spin_lock(&sdp->sd_jindex_spin);
646 list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
647 if (!test_bit(JDF_RECOVERY, &jd->jd_flags))
649 spin_unlock(&sdp->sd_jindex_spin);
650 wait_on_bit(&jd->jd_flags, JDF_RECOVERY,
651 TASK_UNINTERRUPTIBLE);
654 spin_unlock(&sdp->sd_jindex_spin);
656 if (!sb_rdonly(sb)) {
657 gfs2_make_fs_ro(sdp);
659 WARN_ON(gfs2_withdrawing(sdp));
661 /* At this point, we're through modifying the disk */
665 iput(sdp->sd_jindex);
666 iput(sdp->sd_statfs_inode);
667 iput(sdp->sd_rindex);
668 iput(sdp->sd_quota_inode);
670 gfs2_glock_put(sdp->sd_rename_gl);
671 gfs2_glock_put(sdp->sd_freeze_gl);
673 if (!sdp->sd_args.ar_spectator) {
674 if (gfs2_holder_initialized(&sdp->sd_journal_gh))
675 gfs2_glock_dq_uninit(&sdp->sd_journal_gh);
676 if (gfs2_holder_initialized(&sdp->sd_jinode_gh))
677 gfs2_glock_dq_uninit(&sdp->sd_jinode_gh);
678 gfs2_glock_dq_uninit(&sdp->sd_sc_gh);
679 gfs2_glock_dq_uninit(&sdp->sd_qc_gh);
680 free_local_statfs_inodes(sdp);
681 iput(sdp->sd_qc_inode);
684 gfs2_glock_dq_uninit(&sdp->sd_live_gh);
685 gfs2_clear_rgrpd(sdp);
686 gfs2_jindex_free(sdp);
687 /* Take apart glock structures and buffer lists */
688 gfs2_gl_hash_clear(sdp);
689 truncate_inode_pages_final(&sdp->sd_aspace);
690 gfs2_delete_debugfs_file(sdp);
691 /* Unmount the locking protocol */
692 gfs2_lm_unmount(sdp);
694 /* At this point, we're through participating in the lockspace */
695 gfs2_sys_fs_del(sdp);
700 * gfs2_sync_fs - sync the filesystem
701 * @sb: the superblock
702 * @wait: true to wait for completion
704 * Flushes the log to disk.
707 static int gfs2_sync_fs(struct super_block *sb, int wait)
709 struct gfs2_sbd *sdp = sb->s_fs_info;
711 gfs2_quota_sync(sb, -1);
713 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
715 return sdp->sd_log_error;
718 void gfs2_freeze_func(struct work_struct *work)
721 struct gfs2_holder freeze_gh;
722 struct gfs2_sbd *sdp = container_of(work, struct gfs2_sbd, sd_freeze_work);
723 struct super_block *sb = sdp->sd_vfs;
725 atomic_inc(&sb->s_active);
726 error = gfs2_freeze_lock(sdp, &freeze_gh, 0);
728 gfs2_assert_withdraw(sdp, 0);
730 atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN);
731 error = thaw_super(sb);
733 fs_info(sdp, "GFS2: couldn't thaw filesystem: %d\n",
735 gfs2_assert_withdraw(sdp, 0);
737 gfs2_freeze_unlock(&freeze_gh);
739 deactivate_super(sb);
740 clear_bit_unlock(SDF_FS_FROZEN, &sdp->sd_flags);
741 wake_up_bit(&sdp->sd_flags, SDF_FS_FROZEN);
746 * gfs2_freeze - prevent further writes to the filesystem
747 * @sb: the VFS structure for the filesystem
751 static int gfs2_freeze(struct super_block *sb)
753 struct gfs2_sbd *sdp = sb->s_fs_info;
756 mutex_lock(&sdp->sd_freeze_mutex);
757 if (atomic_read(&sdp->sd_freeze_state) != SFS_UNFROZEN) {
763 if (gfs2_withdrawn(sdp)) {
768 error = gfs2_lock_fs_check_clean(sdp);
773 fs_err(sdp, "waiting for recovery before freeze\n");
774 else if (error == -EIO) {
775 fs_err(sdp, "Fatal IO error: cannot freeze gfs2 due "
776 "to recovery error.\n");
779 fs_err(sdp, "error freezing FS: %d\n", error);
781 fs_err(sdp, "retrying...\n");
784 set_bit(SDF_FS_FROZEN, &sdp->sd_flags);
786 mutex_unlock(&sdp->sd_freeze_mutex);
791 * gfs2_unfreeze - reallow writes to the filesystem
792 * @sb: the VFS structure for the filesystem
796 static int gfs2_unfreeze(struct super_block *sb)
798 struct gfs2_sbd *sdp = sb->s_fs_info;
800 mutex_lock(&sdp->sd_freeze_mutex);
801 if (atomic_read(&sdp->sd_freeze_state) != SFS_FROZEN ||
802 !gfs2_holder_initialized(&sdp->sd_freeze_gh)) {
803 mutex_unlock(&sdp->sd_freeze_mutex);
807 gfs2_freeze_unlock(&sdp->sd_freeze_gh);
808 mutex_unlock(&sdp->sd_freeze_mutex);
809 return wait_on_bit(&sdp->sd_flags, SDF_FS_FROZEN, TASK_INTERRUPTIBLE);
813 * statfs_slow_fill - fill in the sg for a given RG
815 * @sc: the sc structure
817 * Returns: 0 on success, -ESTALE if the LVB is invalid
820 static int statfs_slow_fill(struct gfs2_rgrpd *rgd,
821 struct gfs2_statfs_change_host *sc)
823 gfs2_rgrp_verify(rgd);
824 sc->sc_total += rgd->rd_data;
825 sc->sc_free += rgd->rd_free;
826 sc->sc_dinodes += rgd->rd_dinodes;
831 * gfs2_statfs_slow - Stat a filesystem using asynchronous locking
832 * @sdp: the filesystem
833 * @sc: the sc info that will be returned
835 * Any error (other than a signal) will cause this routine to fall back
836 * to the synchronous version.
838 * FIXME: This really shouldn't busy wait like this.
843 static int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc)
845 struct gfs2_rgrpd *rgd_next;
846 struct gfs2_holder *gha, *gh;
847 unsigned int slots = 64;
852 memset(sc, 0, sizeof(struct gfs2_statfs_change_host));
853 gha = kmalloc_array(slots, sizeof(struct gfs2_holder), GFP_KERNEL);
856 for (x = 0; x < slots; x++)
857 gfs2_holder_mark_uninitialized(gha + x);
859 rgd_next = gfs2_rgrpd_get_first(sdp);
864 for (x = 0; x < slots; x++) {
867 if (gfs2_holder_initialized(gh) && gfs2_glock_poll(gh)) {
868 err = gfs2_glock_wait(gh);
870 gfs2_holder_uninit(gh);
874 struct gfs2_rgrpd *rgd =
875 gfs2_glock2rgrp(gh->gh_gl);
877 error = statfs_slow_fill(rgd, sc);
879 gfs2_glock_dq_uninit(gh);
883 if (gfs2_holder_initialized(gh))
885 else if (rgd_next && !error) {
886 error = gfs2_glock_nq_init(rgd_next->rd_gl,
890 rgd_next = gfs2_rgrpd_get_next(rgd_next);
894 if (signal_pending(current))
895 error = -ERESTARTSYS;
909 * gfs2_statfs_i - Do a statfs
910 * @sdp: the filesystem
911 * @sc: the sc structure
916 static int gfs2_statfs_i(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc)
918 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
919 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
921 spin_lock(&sdp->sd_statfs_spin);
924 sc->sc_total += l_sc->sc_total;
925 sc->sc_free += l_sc->sc_free;
926 sc->sc_dinodes += l_sc->sc_dinodes;
928 spin_unlock(&sdp->sd_statfs_spin);
932 if (sc->sc_free > sc->sc_total)
933 sc->sc_free = sc->sc_total;
934 if (sc->sc_dinodes < 0)
941 * gfs2_statfs - Gather and return stats about the filesystem
942 * @dentry: The name of the link
945 * Returns: 0 on success or error code
948 static int gfs2_statfs(struct dentry *dentry, struct kstatfs *buf)
950 struct super_block *sb = dentry->d_sb;
951 struct gfs2_sbd *sdp = sb->s_fs_info;
952 struct gfs2_statfs_change_host sc;
955 error = gfs2_rindex_update(sdp);
959 if (gfs2_tune_get(sdp, gt_statfs_slow))
960 error = gfs2_statfs_slow(sdp, &sc);
962 error = gfs2_statfs_i(sdp, &sc);
967 buf->f_type = GFS2_MAGIC;
968 buf->f_bsize = sdp->sd_sb.sb_bsize;
969 buf->f_blocks = sc.sc_total;
970 buf->f_bfree = sc.sc_free;
971 buf->f_bavail = sc.sc_free;
972 buf->f_files = sc.sc_dinodes + sc.sc_free;
973 buf->f_ffree = sc.sc_free;
974 buf->f_namelen = GFS2_FNAMESIZE;
980 * gfs2_drop_inode - Drop an inode (test for remote unlink)
981 * @inode: The inode to drop
983 * If we've received a callback on an iopen lock then it's because a
984 * remote node tried to deallocate the inode but failed due to this node
985 * still having the inode open. Here we mark the link count zero
986 * since we know that it must have reached zero if the GLF_DEMOTE flag
987 * is set on the iopen glock. If we didn't do a disk read since the
988 * remote node removed the final link then we might otherwise miss
989 * this event. This check ensures that this node will deallocate the
990 * inode's blocks, or alternatively pass the baton on to another
991 * node for later deallocation.
994 static int gfs2_drop_inode(struct inode *inode)
996 struct gfs2_inode *ip = GFS2_I(inode);
998 if (!test_bit(GIF_FREE_VFS_INODE, &ip->i_flags) &&
1000 gfs2_holder_initialized(&ip->i_iopen_gh)) {
1001 struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
1002 if (test_bit(GLF_DEMOTE, &gl->gl_flags))
1007 * When under memory pressure when an inode's link count has dropped to
1008 * zero, defer deleting the inode to the delete workqueue. This avoids
1009 * calling into DLM under memory pressure, which can deadlock.
1011 if (!inode->i_nlink &&
1012 unlikely(current->flags & PF_MEMALLOC) &&
1013 gfs2_holder_initialized(&ip->i_iopen_gh)) {
1014 struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
1016 gfs2_glock_hold(gl);
1017 if (!gfs2_queue_delete_work(gl, 0))
1018 gfs2_glock_queue_put(gl);
1022 return generic_drop_inode(inode);
1025 static int is_ancestor(const struct dentry *d1, const struct dentry *d2)
1031 } while (!IS_ROOT(d1));
1036 * gfs2_show_options - Show mount options for /proc/mounts
1037 * @s: seq_file structure
1038 * @root: root of this (sub)tree
1040 * Returns: 0 on success or error code
1043 static int gfs2_show_options(struct seq_file *s, struct dentry *root)
1045 struct gfs2_sbd *sdp = root->d_sb->s_fs_info;
1046 struct gfs2_args *args = &sdp->sd_args;
1049 if (is_ancestor(root, sdp->sd_master_dir))
1050 seq_puts(s, ",meta");
1051 if (args->ar_lockproto[0])
1052 seq_show_option(s, "lockproto", args->ar_lockproto);
1053 if (args->ar_locktable[0])
1054 seq_show_option(s, "locktable", args->ar_locktable);
1055 if (args->ar_hostdata[0])
1056 seq_show_option(s, "hostdata", args->ar_hostdata);
1057 if (args->ar_spectator)
1058 seq_puts(s, ",spectator");
1059 if (args->ar_localflocks)
1060 seq_puts(s, ",localflocks");
1062 seq_puts(s, ",debug");
1063 if (args->ar_posix_acl)
1064 seq_puts(s, ",acl");
1065 if (args->ar_quota != GFS2_QUOTA_DEFAULT) {
1067 switch (args->ar_quota) {
1068 case GFS2_QUOTA_OFF:
1071 case GFS2_QUOTA_ACCOUNT:
1081 seq_printf(s, ",quota=%s", state);
1083 if (args->ar_suiddir)
1084 seq_puts(s, ",suiddir");
1085 if (args->ar_data != GFS2_DATA_DEFAULT) {
1087 switch (args->ar_data) {
1088 case GFS2_DATA_WRITEBACK:
1089 state = "writeback";
1091 case GFS2_DATA_ORDERED:
1098 seq_printf(s, ",data=%s", state);
1100 if (args->ar_discard)
1101 seq_puts(s, ",discard");
1102 val = sdp->sd_tune.gt_logd_secs;
1104 seq_printf(s, ",commit=%d", val);
1105 val = sdp->sd_tune.gt_statfs_quantum;
1107 seq_printf(s, ",statfs_quantum=%d", val);
1108 else if (sdp->sd_tune.gt_statfs_slow)
1109 seq_puts(s, ",statfs_quantum=0");
1110 val = sdp->sd_tune.gt_quota_quantum;
1112 seq_printf(s, ",quota_quantum=%d", val);
1113 if (args->ar_statfs_percent)
1114 seq_printf(s, ",statfs_percent=%d", args->ar_statfs_percent);
1115 if (args->ar_errors != GFS2_ERRORS_DEFAULT) {
1118 switch (args->ar_errors) {
1119 case GFS2_ERRORS_WITHDRAW:
1122 case GFS2_ERRORS_PANIC:
1129 seq_printf(s, ",errors=%s", state);
1131 if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags))
1132 seq_puts(s, ",nobarrier");
1133 if (test_bit(SDF_DEMOTE, &sdp->sd_flags))
1134 seq_puts(s, ",demote_interface_used");
1135 if (args->ar_rgrplvb)
1136 seq_puts(s, ",rgrplvb");
1137 if (args->ar_loccookie)
1138 seq_puts(s, ",loccookie");
1142 static void gfs2_final_release_pages(struct gfs2_inode *ip)
1144 struct inode *inode = &ip->i_inode;
1145 struct gfs2_glock *gl = ip->i_gl;
1147 truncate_inode_pages(gfs2_glock2aspace(ip->i_gl), 0);
1148 truncate_inode_pages(&inode->i_data, 0);
1150 if (atomic_read(&gl->gl_revokes) == 0) {
1151 clear_bit(GLF_LFLUSH, &gl->gl_flags);
1152 clear_bit(GLF_DIRTY, &gl->gl_flags);
1156 static int gfs2_dinode_dealloc(struct gfs2_inode *ip)
1158 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1159 struct gfs2_rgrpd *rgd;
1160 struct gfs2_holder gh;
1163 if (gfs2_get_inode_blocks(&ip->i_inode) != 1) {
1164 gfs2_consist_inode(ip);
1168 error = gfs2_rindex_update(sdp);
1172 error = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
1176 rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr, 1);
1178 gfs2_consist_inode(ip);
1183 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE,
1184 LM_FLAG_NODE_SCOPE, &gh);
1188 error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_STATFS + RES_QUOTA,
1189 sdp->sd_jdesc->jd_blocks);
1191 goto out_rg_gunlock;
1193 gfs2_free_di(rgd, ip);
1195 gfs2_final_release_pages(ip);
1197 gfs2_trans_end(sdp);
1200 gfs2_glock_dq_uninit(&gh);
1202 gfs2_quota_unhold(ip);
1207 * gfs2_glock_put_eventually
1208 * @gl: The glock to put
1210 * When under memory pressure, trigger a deferred glock put to make sure we
1211 * won't call into DLM and deadlock. Otherwise, put the glock directly.
1214 static void gfs2_glock_put_eventually(struct gfs2_glock *gl)
1216 if (current->flags & PF_MEMALLOC)
1217 gfs2_glock_queue_put(gl);
1222 static bool gfs2_upgrade_iopen_glock(struct inode *inode)
1224 struct gfs2_inode *ip = GFS2_I(inode);
1225 struct gfs2_sbd *sdp = GFS2_SB(inode);
1226 struct gfs2_holder *gh = &ip->i_iopen_gh;
1227 long timeout = 5 * HZ;
1230 gh->gh_flags |= GL_NOCACHE;
1231 gfs2_glock_dq_wait(gh);
1234 * If there are no other lock holders, we'll get the lock immediately.
1235 * Otherwise, the other nodes holding the lock will be notified about
1236 * our locking request. If they don't have the inode open, they'll
1237 * evict the cached inode and release the lock. Otherwise, if they
1238 * poke the inode glock, we'll take this as an indication that they
1239 * still need the iopen glock and that they'll take care of deleting
1240 * the inode when they're done. As a last resort, if another node
1241 * keeps holding the iopen glock without showing any activity on the
1242 * inode glock, we'll eventually time out.
1244 * Note that we're passing the LM_FLAG_TRY_1CB flag to the first
1245 * locking request as an optimization to notify lock holders as soon as
1246 * possible. Without that flag, they'd be notified implicitly by the
1247 * second locking request.
1250 gfs2_holder_reinit(LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB | GL_NOCACHE, gh);
1251 error = gfs2_glock_nq(gh);
1252 if (error != GLR_TRYFAILED)
1255 gfs2_holder_reinit(LM_ST_EXCLUSIVE, GL_ASYNC | GL_NOCACHE, gh);
1256 error = gfs2_glock_nq(gh);
1260 timeout = wait_event_interruptible_timeout(sdp->sd_async_glock_wait,
1261 !test_bit(HIF_WAIT, &gh->gh_iflags) ||
1262 test_bit(GLF_DEMOTE, &ip->i_gl->gl_flags),
1264 if (!test_bit(HIF_HOLDER, &gh->gh_iflags)) {
1272 * evict_should_delete - determine whether the inode is eligible for deletion
1273 * @inode: The inode to evict
1274 * @gh: The glock holder structure
1276 * This function determines whether the evicted inode is eligible to be deleted
1277 * and locks the inode glock.
1279 * Returns: the fate of the dinode
1281 static enum dinode_demise evict_should_delete(struct inode *inode,
1282 struct gfs2_holder *gh)
1284 struct gfs2_inode *ip = GFS2_I(inode);
1285 struct super_block *sb = inode->i_sb;
1286 struct gfs2_sbd *sdp = sb->s_fs_info;
1289 if (test_bit(GIF_ALLOC_FAILED, &ip->i_flags)) {
1290 BUG_ON(!gfs2_glock_is_locked_by_me(ip->i_gl));
1294 if (test_bit(GIF_DEFERRED_DELETE, &ip->i_flags))
1295 return SHOULD_DEFER_EVICTION;
1297 /* Deletes should never happen under memory pressure anymore. */
1298 if (WARN_ON_ONCE(current->flags & PF_MEMALLOC))
1299 return SHOULD_DEFER_EVICTION;
1301 /* Must not read inode block until block type has been verified */
1302 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, gh);
1303 if (unlikely(ret)) {
1304 glock_clear_object(ip->i_iopen_gh.gh_gl, ip);
1305 ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
1306 gfs2_glock_dq_uninit(&ip->i_iopen_gh);
1307 return SHOULD_DEFER_EVICTION;
1310 if (gfs2_inode_already_deleted(ip->i_gl, ip->i_no_formal_ino))
1311 return SHOULD_NOT_DELETE_DINODE;
1312 ret = gfs2_check_blk_type(sdp, ip->i_no_addr, GFS2_BLKST_UNLINKED);
1314 return SHOULD_NOT_DELETE_DINODE;
1316 if (test_bit(GIF_INVALID, &ip->i_flags)) {
1317 ret = gfs2_inode_refresh(ip);
1319 return SHOULD_NOT_DELETE_DINODE;
1323 * The inode may have been recreated in the meantime.
1326 return SHOULD_NOT_DELETE_DINODE;
1329 if (gfs2_holder_initialized(&ip->i_iopen_gh) &&
1330 test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) {
1331 if (!gfs2_upgrade_iopen_glock(inode)) {
1332 gfs2_holder_uninit(&ip->i_iopen_gh);
1333 return SHOULD_NOT_DELETE_DINODE;
1336 return SHOULD_DELETE_DINODE;
1340 * evict_unlinked_inode - delete the pieces of an unlinked evicted inode
1341 * @inode: The inode to evict
1343 static int evict_unlinked_inode(struct inode *inode)
1345 struct gfs2_inode *ip = GFS2_I(inode);
1348 if (S_ISDIR(inode->i_mode) &&
1349 (ip->i_diskflags & GFS2_DIF_EXHASH)) {
1350 ret = gfs2_dir_exhash_dealloc(ip);
1356 ret = gfs2_ea_dealloc(ip);
1361 if (!gfs2_is_stuffed(ip)) {
1362 ret = gfs2_file_dealloc(ip);
1367 /* We're about to clear the bitmap for the dinode, but as soon as we
1368 do, gfs2_create_inode can create another inode at the same block
1369 location and try to set gl_object again. We clear gl_object here so
1370 that subsequent inode creates don't see an old gl_object. */
1371 glock_clear_object(ip->i_gl, ip);
1372 ret = gfs2_dinode_dealloc(ip);
1373 gfs2_inode_remember_delete(ip->i_gl, ip->i_no_formal_ino);
1379 * evict_linked_inode - evict an inode whose dinode has not been unlinked
1380 * @inode: The inode to evict
1382 static int evict_linked_inode(struct inode *inode)
1384 struct super_block *sb = inode->i_sb;
1385 struct gfs2_sbd *sdp = sb->s_fs_info;
1386 struct gfs2_inode *ip = GFS2_I(inode);
1387 struct address_space *metamapping;
1390 gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
1391 GFS2_LFC_EVICT_INODE);
1392 metamapping = gfs2_glock2aspace(ip->i_gl);
1393 if (test_bit(GLF_DIRTY, &ip->i_gl->gl_flags)) {
1394 filemap_fdatawrite(metamapping);
1395 filemap_fdatawait(metamapping);
1397 write_inode_now(inode, 1);
1398 gfs2_ail_flush(ip->i_gl, 0);
1400 ret = gfs2_trans_begin(sdp, 0, sdp->sd_jdesc->jd_blocks);
1404 /* Needs to be done before glock release & also in a transaction */
1405 truncate_inode_pages(&inode->i_data, 0);
1406 truncate_inode_pages(metamapping, 0);
1407 gfs2_trans_end(sdp);
1412 * gfs2_evict_inode - Remove an inode from cache
1413 * @inode: The inode to evict
1415 * There are three cases to consider:
1416 * 1. i_nlink == 0, we are final opener (and must deallocate)
1417 * 2. i_nlink == 0, we are not the final opener (and cannot deallocate)
1420 * If the fs is read only, then we have to treat all cases as per #3
1421 * since we are unable to do any deallocation. The inode will be
1422 * deallocated by the next read/write node to attempt an allocation
1423 * in the same resource group
1425 * We have to (at the moment) hold the inodes main lock to cover
1426 * the gap between unlocking the shared lock on the iopen lock and
1427 * taking the exclusive lock. I'd rather do a shared -> exclusive
1428 * conversion on the iopen lock, but we can change that later. This
1429 * is safe, just less efficient.
1432 static void gfs2_evict_inode(struct inode *inode)
1434 struct super_block *sb = inode->i_sb;
1435 struct gfs2_sbd *sdp = sb->s_fs_info;
1436 struct gfs2_inode *ip = GFS2_I(inode);
1437 struct gfs2_holder gh;
1440 if (test_bit(GIF_FREE_VFS_INODE, &ip->i_flags)) {
1445 if (inode->i_nlink || sb_rdonly(sb))
1448 gfs2_holder_mark_uninitialized(&gh);
1449 ret = evict_should_delete(inode, &gh);
1450 if (ret == SHOULD_DEFER_EVICTION)
1452 if (ret == SHOULD_DELETE_DINODE)
1453 ret = evict_unlinked_inode(inode);
1455 ret = evict_linked_inode(inode);
1457 if (gfs2_rs_active(&ip->i_res))
1458 gfs2_rs_deltree(&ip->i_res);
1460 if (gfs2_holder_initialized(&gh)) {
1461 glock_clear_object(ip->i_gl, ip);
1462 gfs2_glock_dq_uninit(&gh);
1464 if (ret && ret != GLR_TRYFAILED && ret != -EROFS)
1465 fs_warn(sdp, "gfs2_evict_inode: %d\n", ret);
1467 truncate_inode_pages_final(&inode->i_data);
1469 gfs2_assert_warn(sdp, ip->i_qadata->qa_ref == 0);
1470 gfs2_rs_delete(ip, NULL);
1471 gfs2_ordered_del_inode(ip);
1473 gfs2_dir_hash_inval(ip);
1475 glock_clear_object(ip->i_gl, ip);
1476 wait_on_bit_io(&ip->i_flags, GIF_GLOP_PENDING, TASK_UNINTERRUPTIBLE);
1477 gfs2_glock_add_to_lru(ip->i_gl);
1478 gfs2_glock_put_eventually(ip->i_gl);
1481 if (gfs2_holder_initialized(&ip->i_iopen_gh)) {
1482 struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
1484 glock_clear_object(gl, ip);
1485 if (test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) {
1486 ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
1487 gfs2_glock_dq(&ip->i_iopen_gh);
1489 gfs2_glock_hold(gl);
1490 gfs2_holder_uninit(&ip->i_iopen_gh);
1491 gfs2_glock_put_eventually(gl);
1495 static struct inode *gfs2_alloc_inode(struct super_block *sb)
1497 struct gfs2_inode *ip;
1499 ip = kmem_cache_alloc(gfs2_inode_cachep, GFP_KERNEL);
1504 gfs2_holder_mark_uninitialized(&ip->i_iopen_gh);
1505 memset(&ip->i_res, 0, sizeof(ip->i_res));
1506 RB_CLEAR_NODE(&ip->i_res.rs_node);
1508 return &ip->i_inode;
1511 static void gfs2_free_inode(struct inode *inode)
1513 kmem_cache_free(gfs2_inode_cachep, GFS2_I(inode));
1516 extern void free_local_statfs_inodes(struct gfs2_sbd *sdp)
1518 struct local_statfs_inode *lsi, *safe;
1520 /* Run through the statfs inodes list to iput and free memory */
1521 list_for_each_entry_safe(lsi, safe, &sdp->sd_sc_inodes_list, si_list) {
1522 if (lsi->si_jid == sdp->sd_jdesc->jd_jid)
1523 sdp->sd_sc_inode = NULL; /* belongs to this node */
1524 if (lsi->si_sc_inode)
1525 iput(lsi->si_sc_inode);
1526 list_del(&lsi->si_list);
1531 extern struct inode *find_local_statfs_inode(struct gfs2_sbd *sdp,
1534 struct local_statfs_inode *lsi;
1536 /* Return the local (per node) statfs inode in the
1537 * sdp->sd_sc_inodes_list corresponding to the 'index'. */
1538 list_for_each_entry(lsi, &sdp->sd_sc_inodes_list, si_list) {
1539 if (lsi->si_jid == index)
1540 return lsi->si_sc_inode;
1545 const struct super_operations gfs2_super_ops = {
1546 .alloc_inode = gfs2_alloc_inode,
1547 .free_inode = gfs2_free_inode,
1548 .write_inode = gfs2_write_inode,
1549 .dirty_inode = gfs2_dirty_inode,
1550 .evict_inode = gfs2_evict_inode,
1551 .put_super = gfs2_put_super,
1552 .sync_fs = gfs2_sync_fs,
1553 .freeze_super = gfs2_freeze,
1554 .thaw_super = gfs2_unfreeze,
1555 .statfs = gfs2_statfs,
1556 .drop_inode = gfs2_drop_inode,
1557 .show_options = gfs2_show_options,