Merge branch 'for-5.3' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup
[linux-2.6-microblaze.git] / fs / gfs2 / ops_fstype.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
4  * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
5  */
6
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
9 #include <linux/sched.h>
10 #include <linux/slab.h>
11 #include <linux/spinlock.h>
12 #include <linux/completion.h>
13 #include <linux/buffer_head.h>
14 #include <linux/blkdev.h>
15 #include <linux/kthread.h>
16 #include <linux/export.h>
17 #include <linux/namei.h>
18 #include <linux/mount.h>
19 #include <linux/gfs2_ondisk.h>
20 #include <linux/quotaops.h>
21 #include <linux/lockdep.h>
22 #include <linux/module.h>
23 #include <linux/backing-dev.h>
24
25 #include "gfs2.h"
26 #include "incore.h"
27 #include "bmap.h"
28 #include "glock.h"
29 #include "glops.h"
30 #include "inode.h"
31 #include "recovery.h"
32 #include "rgrp.h"
33 #include "super.h"
34 #include "sys.h"
35 #include "util.h"
36 #include "log.h"
37 #include "quota.h"
38 #include "dir.h"
39 #include "meta_io.h"
40 #include "trace_gfs2.h"
41 #include "lops.h"
42
43 #define DO 0
44 #define UNDO 1
45
46 /**
47  * gfs2_tune_init - Fill a gfs2_tune structure with default values
48  * @gt: tune
49  *
50  */
51
52 static void gfs2_tune_init(struct gfs2_tune *gt)
53 {
54         spin_lock_init(&gt->gt_spin);
55
56         gt->gt_quota_warn_period = 10;
57         gt->gt_quota_scale_num = 1;
58         gt->gt_quota_scale_den = 1;
59         gt->gt_new_files_jdata = 0;
60         gt->gt_max_readahead = BIT(18);
61         gt->gt_complain_secs = 10;
62 }
63
64 static struct gfs2_sbd *init_sbd(struct super_block *sb)
65 {
66         struct gfs2_sbd *sdp;
67         struct address_space *mapping;
68
69         sdp = kzalloc(sizeof(struct gfs2_sbd), GFP_KERNEL);
70         if (!sdp)
71                 return NULL;
72
73         sdp->sd_vfs = sb;
74         sdp->sd_lkstats = alloc_percpu(struct gfs2_pcpu_lkstats);
75         if (!sdp->sd_lkstats) {
76                 kfree(sdp);
77                 return NULL;
78         }
79         sb->s_fs_info = sdp;
80
81         set_bit(SDF_NOJOURNALID, &sdp->sd_flags);
82         gfs2_tune_init(&sdp->sd_tune);
83
84         init_waitqueue_head(&sdp->sd_glock_wait);
85         atomic_set(&sdp->sd_glock_disposal, 0);
86         init_completion(&sdp->sd_locking_init);
87         init_completion(&sdp->sd_wdack);
88         spin_lock_init(&sdp->sd_statfs_spin);
89
90         spin_lock_init(&sdp->sd_rindex_spin);
91         sdp->sd_rindex_tree.rb_node = NULL;
92
93         INIT_LIST_HEAD(&sdp->sd_jindex_list);
94         spin_lock_init(&sdp->sd_jindex_spin);
95         mutex_init(&sdp->sd_jindex_mutex);
96         init_completion(&sdp->sd_journal_ready);
97
98         INIT_LIST_HEAD(&sdp->sd_quota_list);
99         mutex_init(&sdp->sd_quota_mutex);
100         mutex_init(&sdp->sd_quota_sync_mutex);
101         init_waitqueue_head(&sdp->sd_quota_wait);
102         INIT_LIST_HEAD(&sdp->sd_trunc_list);
103         spin_lock_init(&sdp->sd_trunc_lock);
104         spin_lock_init(&sdp->sd_bitmap_lock);
105
106         mapping = &sdp->sd_aspace;
107
108         address_space_init_once(mapping);
109         mapping->a_ops = &gfs2_rgrp_aops;
110         mapping->host = sb->s_bdev->bd_inode;
111         mapping->flags = 0;
112         mapping_set_gfp_mask(mapping, GFP_NOFS);
113         mapping->private_data = NULL;
114         mapping->writeback_index = 0;
115
116         spin_lock_init(&sdp->sd_log_lock);
117         atomic_set(&sdp->sd_log_pinned, 0);
118         INIT_LIST_HEAD(&sdp->sd_log_revokes);
119         INIT_LIST_HEAD(&sdp->sd_log_ordered);
120         spin_lock_init(&sdp->sd_ordered_lock);
121
122         init_waitqueue_head(&sdp->sd_log_waitq);
123         init_waitqueue_head(&sdp->sd_logd_waitq);
124         spin_lock_init(&sdp->sd_ail_lock);
125         INIT_LIST_HEAD(&sdp->sd_ail1_list);
126         INIT_LIST_HEAD(&sdp->sd_ail2_list);
127
128         init_rwsem(&sdp->sd_log_flush_lock);
129         atomic_set(&sdp->sd_log_in_flight, 0);
130         atomic_set(&sdp->sd_reserving_log, 0);
131         init_waitqueue_head(&sdp->sd_reserving_log_wait);
132         init_waitqueue_head(&sdp->sd_log_flush_wait);
133         atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN);
134         mutex_init(&sdp->sd_freeze_mutex);
135
136         return sdp;
137 }
138
139
140 /**
141  * gfs2_check_sb - Check superblock
142  * @sdp: the filesystem
143  * @sb: The superblock
144  * @silent: Don't print a message if the check fails
145  *
146  * Checks the version code of the FS is one that we understand how to
147  * read and that the sizes of the various on-disk structures have not
148  * changed.
149  */
150
151 static int gfs2_check_sb(struct gfs2_sbd *sdp, int silent)
152 {
153         struct gfs2_sb_host *sb = &sdp->sd_sb;
154
155         if (sb->sb_magic != GFS2_MAGIC ||
156             sb->sb_type != GFS2_METATYPE_SB) {
157                 if (!silent)
158                         pr_warn("not a GFS2 filesystem\n");
159                 return -EINVAL;
160         }
161
162         /*  If format numbers match exactly, we're done.  */
163
164         if (sb->sb_fs_format == GFS2_FORMAT_FS &&
165             sb->sb_multihost_format == GFS2_FORMAT_MULTI)
166                 return 0;
167
168         fs_warn(sdp, "Unknown on-disk format, unable to mount\n");
169
170         return -EINVAL;
171 }
172
173 static void end_bio_io_page(struct bio *bio)
174 {
175         struct page *page = bio->bi_private;
176
177         if (!bio->bi_status)
178                 SetPageUptodate(page);
179         else
180                 pr_warn("error %d reading superblock\n", bio->bi_status);
181         unlock_page(page);
182 }
183
184 static void gfs2_sb_in(struct gfs2_sbd *sdp, const void *buf)
185 {
186         struct gfs2_sb_host *sb = &sdp->sd_sb;
187         struct super_block *s = sdp->sd_vfs;
188         const struct gfs2_sb *str = buf;
189
190         sb->sb_magic = be32_to_cpu(str->sb_header.mh_magic);
191         sb->sb_type = be32_to_cpu(str->sb_header.mh_type);
192         sb->sb_format = be32_to_cpu(str->sb_header.mh_format);
193         sb->sb_fs_format = be32_to_cpu(str->sb_fs_format);
194         sb->sb_multihost_format = be32_to_cpu(str->sb_multihost_format);
195         sb->sb_bsize = be32_to_cpu(str->sb_bsize);
196         sb->sb_bsize_shift = be32_to_cpu(str->sb_bsize_shift);
197         sb->sb_master_dir.no_addr = be64_to_cpu(str->sb_master_dir.no_addr);
198         sb->sb_master_dir.no_formal_ino = be64_to_cpu(str->sb_master_dir.no_formal_ino);
199         sb->sb_root_dir.no_addr = be64_to_cpu(str->sb_root_dir.no_addr);
200         sb->sb_root_dir.no_formal_ino = be64_to_cpu(str->sb_root_dir.no_formal_ino);
201
202         memcpy(sb->sb_lockproto, str->sb_lockproto, GFS2_LOCKNAME_LEN);
203         memcpy(sb->sb_locktable, str->sb_locktable, GFS2_LOCKNAME_LEN);
204         memcpy(&s->s_uuid, str->sb_uuid, 16);
205 }
206
207 /**
208  * gfs2_read_super - Read the gfs2 super block from disk
209  * @sdp: The GFS2 super block
210  * @sector: The location of the super block
211  * @error: The error code to return
212  *
213  * This uses the bio functions to read the super block from disk
214  * because we want to be 100% sure that we never read cached data.
215  * A super block is read twice only during each GFS2 mount and is
216  * never written to by the filesystem. The first time its read no
217  * locks are held, and the only details which are looked at are those
218  * relating to the locking protocol. Once locking is up and working,
219  * the sb is read again under the lock to establish the location of
220  * the master directory (contains pointers to journals etc) and the
221  * root directory.
222  *
223  * Returns: 0 on success or error
224  */
225
226 static int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector, int silent)
227 {
228         struct super_block *sb = sdp->sd_vfs;
229         struct gfs2_sb *p;
230         struct page *page;
231         struct bio *bio;
232
233         page = alloc_page(GFP_NOFS);
234         if (unlikely(!page))
235                 return -ENOMEM;
236
237         ClearPageUptodate(page);
238         ClearPageDirty(page);
239         lock_page(page);
240
241         bio = bio_alloc(GFP_NOFS, 1);
242         bio->bi_iter.bi_sector = sector * (sb->s_blocksize >> 9);
243         bio_set_dev(bio, sb->s_bdev);
244         bio_add_page(bio, page, PAGE_SIZE, 0);
245
246         bio->bi_end_io = end_bio_io_page;
247         bio->bi_private = page;
248         bio_set_op_attrs(bio, REQ_OP_READ, REQ_META);
249         submit_bio(bio);
250         wait_on_page_locked(page);
251         bio_put(bio);
252         if (!PageUptodate(page)) {
253                 __free_page(page);
254                 return -EIO;
255         }
256         p = kmap(page);
257         gfs2_sb_in(sdp, p);
258         kunmap(page);
259         __free_page(page);
260         return gfs2_check_sb(sdp, silent);
261 }
262
263 /**
264  * gfs2_read_sb - Read super block
265  * @sdp: The GFS2 superblock
266  * @silent: Don't print message if mount fails
267  *
268  */
269
270 static int gfs2_read_sb(struct gfs2_sbd *sdp, int silent)
271 {
272         u32 hash_blocks, ind_blocks, leaf_blocks;
273         u32 tmp_blocks;
274         unsigned int x;
275         int error;
276
277         error = gfs2_read_super(sdp, GFS2_SB_ADDR >> sdp->sd_fsb2bb_shift, silent);
278         if (error) {
279                 if (!silent)
280                         fs_err(sdp, "can't read superblock\n");
281                 return error;
282         }
283
284         sdp->sd_fsb2bb_shift = sdp->sd_sb.sb_bsize_shift -
285                                GFS2_BASIC_BLOCK_SHIFT;
286         sdp->sd_fsb2bb = BIT(sdp->sd_fsb2bb_shift);
287         sdp->sd_diptrs = (sdp->sd_sb.sb_bsize -
288                           sizeof(struct gfs2_dinode)) / sizeof(u64);
289         sdp->sd_inptrs = (sdp->sd_sb.sb_bsize -
290                           sizeof(struct gfs2_meta_header)) / sizeof(u64);
291         sdp->sd_jbsize = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header);
292         sdp->sd_hash_bsize = sdp->sd_sb.sb_bsize / 2;
293         sdp->sd_hash_bsize_shift = sdp->sd_sb.sb_bsize_shift - 1;
294         sdp->sd_hash_ptrs = sdp->sd_hash_bsize / sizeof(u64);
295         sdp->sd_qc_per_block = (sdp->sd_sb.sb_bsize -
296                                 sizeof(struct gfs2_meta_header)) /
297                                 sizeof(struct gfs2_quota_change);
298         sdp->sd_blocks_per_bitmap = (sdp->sd_sb.sb_bsize -
299                                      sizeof(struct gfs2_meta_header))
300                 * GFS2_NBBY; /* not the rgrp bitmap, subsequent bitmaps only */
301
302         /* Compute maximum reservation required to add a entry to a directory */
303
304         hash_blocks = DIV_ROUND_UP(sizeof(u64) * BIT(GFS2_DIR_MAX_DEPTH),
305                              sdp->sd_jbsize);
306
307         ind_blocks = 0;
308         for (tmp_blocks = hash_blocks; tmp_blocks > sdp->sd_diptrs;) {
309                 tmp_blocks = DIV_ROUND_UP(tmp_blocks, sdp->sd_inptrs);
310                 ind_blocks += tmp_blocks;
311         }
312
313         leaf_blocks = 2 + GFS2_DIR_MAX_DEPTH;
314
315         sdp->sd_max_dirres = hash_blocks + ind_blocks + leaf_blocks;
316
317         sdp->sd_heightsize[0] = sdp->sd_sb.sb_bsize -
318                                 sizeof(struct gfs2_dinode);
319         sdp->sd_heightsize[1] = sdp->sd_sb.sb_bsize * sdp->sd_diptrs;
320         for (x = 2;; x++) {
321                 u64 space, d;
322                 u32 m;
323
324                 space = sdp->sd_heightsize[x - 1] * sdp->sd_inptrs;
325                 d = space;
326                 m = do_div(d, sdp->sd_inptrs);
327
328                 if (d != sdp->sd_heightsize[x - 1] || m)
329                         break;
330                 sdp->sd_heightsize[x] = space;
331         }
332         sdp->sd_max_height = x;
333         sdp->sd_heightsize[x] = ~0;
334         gfs2_assert(sdp, sdp->sd_max_height <= GFS2_MAX_META_HEIGHT);
335
336         sdp->sd_max_dents_per_leaf = (sdp->sd_sb.sb_bsize -
337                                       sizeof(struct gfs2_leaf)) /
338                                      GFS2_MIN_DIRENT_SIZE;
339         return 0;
340 }
341
342 static int init_names(struct gfs2_sbd *sdp, int silent)
343 {
344         char *proto, *table;
345         int error = 0;
346
347         proto = sdp->sd_args.ar_lockproto;
348         table = sdp->sd_args.ar_locktable;
349
350         /*  Try to autodetect  */
351
352         if (!proto[0] || !table[0]) {
353                 error = gfs2_read_super(sdp, GFS2_SB_ADDR >> sdp->sd_fsb2bb_shift, silent);
354                 if (error)
355                         return error;
356
357                 if (!proto[0])
358                         proto = sdp->sd_sb.sb_lockproto;
359                 if (!table[0])
360                         table = sdp->sd_sb.sb_locktable;
361         }
362
363         if (!table[0])
364                 table = sdp->sd_vfs->s_id;
365
366         strlcpy(sdp->sd_proto_name, proto, GFS2_FSNAME_LEN);
367         strlcpy(sdp->sd_table_name, table, GFS2_FSNAME_LEN);
368
369         table = sdp->sd_table_name;
370         while ((table = strchr(table, '/')))
371                 *table = '_';
372
373         return error;
374 }
375
376 static int init_locking(struct gfs2_sbd *sdp, struct gfs2_holder *mount_gh,
377                         int undo)
378 {
379         int error = 0;
380
381         if (undo)
382                 goto fail_trans;
383
384         error = gfs2_glock_nq_num(sdp,
385                                   GFS2_MOUNT_LOCK, &gfs2_nondisk_glops,
386                                   LM_ST_EXCLUSIVE, LM_FLAG_NOEXP | GL_NOCACHE,
387                                   mount_gh);
388         if (error) {
389                 fs_err(sdp, "can't acquire mount glock: %d\n", error);
390                 goto fail;
391         }
392
393         error = gfs2_glock_nq_num(sdp,
394                                   GFS2_LIVE_LOCK, &gfs2_nondisk_glops,
395                                   LM_ST_SHARED,
396                                   LM_FLAG_NOEXP | GL_EXACT,
397                                   &sdp->sd_live_gh);
398         if (error) {
399                 fs_err(sdp, "can't acquire live glock: %d\n", error);
400                 goto fail_mount;
401         }
402
403         error = gfs2_glock_get(sdp, GFS2_RENAME_LOCK, &gfs2_nondisk_glops,
404                                CREATE, &sdp->sd_rename_gl);
405         if (error) {
406                 fs_err(sdp, "can't create rename glock: %d\n", error);
407                 goto fail_live;
408         }
409
410         error = gfs2_glock_get(sdp, GFS2_FREEZE_LOCK, &gfs2_freeze_glops,
411                                CREATE, &sdp->sd_freeze_gl);
412         if (error) {
413                 fs_err(sdp, "can't create transaction glock: %d\n", error);
414                 goto fail_rename;
415         }
416
417         return 0;
418
419 fail_trans:
420         gfs2_glock_put(sdp->sd_freeze_gl);
421 fail_rename:
422         gfs2_glock_put(sdp->sd_rename_gl);
423 fail_live:
424         gfs2_glock_dq_uninit(&sdp->sd_live_gh);
425 fail_mount:
426         gfs2_glock_dq_uninit(mount_gh);
427 fail:
428         return error;
429 }
430
431 static int gfs2_lookup_root(struct super_block *sb, struct dentry **dptr,
432                             u64 no_addr, const char *name)
433 {
434         struct gfs2_sbd *sdp = sb->s_fs_info;
435         struct dentry *dentry;
436         struct inode *inode;
437
438         inode = gfs2_inode_lookup(sb, DT_DIR, no_addr, 0,
439                                   GFS2_BLKST_FREE /* ignore */);
440         if (IS_ERR(inode)) {
441                 fs_err(sdp, "can't read in %s inode: %ld\n", name, PTR_ERR(inode));
442                 return PTR_ERR(inode);
443         }
444         dentry = d_make_root(inode);
445         if (!dentry) {
446                 fs_err(sdp, "can't alloc %s dentry\n", name);
447                 return -ENOMEM;
448         }
449         *dptr = dentry;
450         return 0;
451 }
452
453 static int init_sb(struct gfs2_sbd *sdp, int silent)
454 {
455         struct super_block *sb = sdp->sd_vfs;
456         struct gfs2_holder sb_gh;
457         u64 no_addr;
458         int ret;
459
460         ret = gfs2_glock_nq_num(sdp, GFS2_SB_LOCK, &gfs2_meta_glops,
461                                 LM_ST_SHARED, 0, &sb_gh);
462         if (ret) {
463                 fs_err(sdp, "can't acquire superblock glock: %d\n", ret);
464                 return ret;
465         }
466
467         ret = gfs2_read_sb(sdp, silent);
468         if (ret) {
469                 fs_err(sdp, "can't read superblock: %d\n", ret);
470                 goto out;
471         }
472
473         /* Set up the buffer cache and SB for real */
474         if (sdp->sd_sb.sb_bsize < bdev_logical_block_size(sb->s_bdev)) {
475                 ret = -EINVAL;
476                 fs_err(sdp, "FS block size (%u) is too small for device "
477                        "block size (%u)\n",
478                        sdp->sd_sb.sb_bsize, bdev_logical_block_size(sb->s_bdev));
479                 goto out;
480         }
481         if (sdp->sd_sb.sb_bsize > PAGE_SIZE) {
482                 ret = -EINVAL;
483                 fs_err(sdp, "FS block size (%u) is too big for machine "
484                        "page size (%u)\n",
485                        sdp->sd_sb.sb_bsize, (unsigned int)PAGE_SIZE);
486                 goto out;
487         }
488         sb_set_blocksize(sb, sdp->sd_sb.sb_bsize);
489
490         /* Get the root inode */
491         no_addr = sdp->sd_sb.sb_root_dir.no_addr;
492         ret = gfs2_lookup_root(sb, &sdp->sd_root_dir, no_addr, "root");
493         if (ret)
494                 goto out;
495
496         /* Get the master inode */
497         no_addr = sdp->sd_sb.sb_master_dir.no_addr;
498         ret = gfs2_lookup_root(sb, &sdp->sd_master_dir, no_addr, "master");
499         if (ret) {
500                 dput(sdp->sd_root_dir);
501                 goto out;
502         }
503         sb->s_root = dget(sdp->sd_args.ar_meta ? sdp->sd_master_dir : sdp->sd_root_dir);
504 out:
505         gfs2_glock_dq_uninit(&sb_gh);
506         return ret;
507 }
508
509 static void gfs2_others_may_mount(struct gfs2_sbd *sdp)
510 {
511         char *message = "FIRSTMOUNT=Done";
512         char *envp[] = { message, NULL };
513
514         fs_info(sdp, "first mount done, others may mount\n");
515
516         if (sdp->sd_lockstruct.ls_ops->lm_first_done)
517                 sdp->sd_lockstruct.ls_ops->lm_first_done(sdp);
518
519         kobject_uevent_env(&sdp->sd_kobj, KOBJ_CHANGE, envp);
520 }
521
522 /**
523  * gfs2_jindex_hold - Grab a lock on the jindex
524  * @sdp: The GFS2 superblock
525  * @ji_gh: the holder for the jindex glock
526  *
527  * Returns: errno
528  */
529
530 static int gfs2_jindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ji_gh)
531 {
532         struct gfs2_inode *dip = GFS2_I(sdp->sd_jindex);
533         struct qstr name;
534         char buf[20];
535         struct gfs2_jdesc *jd;
536         int error;
537
538         name.name = buf;
539
540         mutex_lock(&sdp->sd_jindex_mutex);
541
542         for (;;) {
543                 error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, ji_gh);
544                 if (error)
545                         break;
546
547                 name.len = sprintf(buf, "journal%u", sdp->sd_journals);
548                 name.hash = gfs2_disk_hash(name.name, name.len);
549
550                 error = gfs2_dir_check(sdp->sd_jindex, &name, NULL);
551                 if (error == -ENOENT) {
552                         error = 0;
553                         break;
554                 }
555
556                 gfs2_glock_dq_uninit(ji_gh);
557
558                 if (error)
559                         break;
560
561                 error = -ENOMEM;
562                 jd = kzalloc(sizeof(struct gfs2_jdesc), GFP_KERNEL);
563                 if (!jd)
564                         break;
565
566                 INIT_LIST_HEAD(&jd->extent_list);
567                 INIT_LIST_HEAD(&jd->jd_revoke_list);
568
569                 INIT_WORK(&jd->jd_work, gfs2_recover_func);
570                 jd->jd_inode = gfs2_lookupi(sdp->sd_jindex, &name, 1);
571                 if (!jd->jd_inode || IS_ERR(jd->jd_inode)) {
572                         if (!jd->jd_inode)
573                                 error = -ENOENT;
574                         else
575                                 error = PTR_ERR(jd->jd_inode);
576                         kfree(jd);
577                         break;
578                 }
579
580                 spin_lock(&sdp->sd_jindex_spin);
581                 jd->jd_jid = sdp->sd_journals++;
582                 list_add_tail(&jd->jd_list, &sdp->sd_jindex_list);
583                 spin_unlock(&sdp->sd_jindex_spin);
584         }
585
586         mutex_unlock(&sdp->sd_jindex_mutex);
587
588         return error;
589 }
590
591 /**
592  * check_journal_clean - Make sure a journal is clean for a spectator mount
593  * @sdp: The GFS2 superblock
594  * @jd: The journal descriptor
595  *
596  * Returns: 0 if the journal is clean or locked, else an error
597  */
598 static int check_journal_clean(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd)
599 {
600         int error;
601         struct gfs2_holder j_gh;
602         struct gfs2_log_header_host head;
603         struct gfs2_inode *ip;
604
605         ip = GFS2_I(jd->jd_inode);
606         error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_NOEXP |
607                                    GL_EXACT | GL_NOCACHE, &j_gh);
608         if (error) {
609                 fs_err(sdp, "Error locking journal for spectator mount.\n");
610                 return -EPERM;
611         }
612         error = gfs2_jdesc_check(jd);
613         if (error) {
614                 fs_err(sdp, "Error checking journal for spectator mount.\n");
615                 goto out_unlock;
616         }
617         error = gfs2_find_jhead(jd, &head, false);
618         if (error) {
619                 fs_err(sdp, "Error parsing journal for spectator mount.\n");
620                 goto out_unlock;
621         }
622         if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
623                 error = -EPERM;
624                 fs_err(sdp, "jid=%u: Journal is dirty, so the first mounter "
625                        "must not be a spectator.\n", jd->jd_jid);
626         }
627
628 out_unlock:
629         gfs2_glock_dq_uninit(&j_gh);
630         return error;
631 }
632
633 static int init_journal(struct gfs2_sbd *sdp, int undo)
634 {
635         struct inode *master = d_inode(sdp->sd_master_dir);
636         struct gfs2_holder ji_gh;
637         struct gfs2_inode *ip;
638         int jindex = 1;
639         int error = 0;
640
641         if (undo) {
642                 jindex = 0;
643                 goto fail_jinode_gh;
644         }
645
646         sdp->sd_jindex = gfs2_lookup_simple(master, "jindex");
647         if (IS_ERR(sdp->sd_jindex)) {
648                 fs_err(sdp, "can't lookup journal index: %d\n", error);
649                 return PTR_ERR(sdp->sd_jindex);
650         }
651
652         /* Load in the journal index special file */
653
654         error = gfs2_jindex_hold(sdp, &ji_gh);
655         if (error) {
656                 fs_err(sdp, "can't read journal index: %d\n", error);
657                 goto fail;
658         }
659
660         error = -EUSERS;
661         if (!gfs2_jindex_size(sdp)) {
662                 fs_err(sdp, "no journals!\n");
663                 goto fail_jindex;
664         }
665
666         atomic_set(&sdp->sd_log_blks_needed, 0);
667         if (sdp->sd_args.ar_spectator) {
668                 sdp->sd_jdesc = gfs2_jdesc_find(sdp, 0);
669                 atomic_set(&sdp->sd_log_blks_free, sdp->sd_jdesc->jd_blocks);
670                 atomic_set(&sdp->sd_log_thresh1, 2*sdp->sd_jdesc->jd_blocks/5);
671                 atomic_set(&sdp->sd_log_thresh2, 4*sdp->sd_jdesc->jd_blocks/5);
672         } else {
673                 if (sdp->sd_lockstruct.ls_jid >= gfs2_jindex_size(sdp)) {
674                         fs_err(sdp, "can't mount journal #%u\n",
675                                sdp->sd_lockstruct.ls_jid);
676                         fs_err(sdp, "there are only %u journals (0 - %u)\n",
677                                gfs2_jindex_size(sdp),
678                                gfs2_jindex_size(sdp) - 1);
679                         goto fail_jindex;
680                 }
681                 sdp->sd_jdesc = gfs2_jdesc_find(sdp, sdp->sd_lockstruct.ls_jid);
682
683                 error = gfs2_glock_nq_num(sdp, sdp->sd_lockstruct.ls_jid,
684                                           &gfs2_journal_glops,
685                                           LM_ST_EXCLUSIVE, LM_FLAG_NOEXP,
686                                           &sdp->sd_journal_gh);
687                 if (error) {
688                         fs_err(sdp, "can't acquire journal glock: %d\n", error);
689                         goto fail_jindex;
690                 }
691
692                 ip = GFS2_I(sdp->sd_jdesc->jd_inode);
693                 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED,
694                                            LM_FLAG_NOEXP | GL_EXACT | GL_NOCACHE,
695                                            &sdp->sd_jinode_gh);
696                 if (error) {
697                         fs_err(sdp, "can't acquire journal inode glock: %d\n",
698                                error);
699                         goto fail_journal_gh;
700                 }
701
702                 error = gfs2_jdesc_check(sdp->sd_jdesc);
703                 if (error) {
704                         fs_err(sdp, "my journal (%u) is bad: %d\n",
705                                sdp->sd_jdesc->jd_jid, error);
706                         goto fail_jinode_gh;
707                 }
708                 atomic_set(&sdp->sd_log_blks_free, sdp->sd_jdesc->jd_blocks);
709                 atomic_set(&sdp->sd_log_thresh1, 2*sdp->sd_jdesc->jd_blocks/5);
710                 atomic_set(&sdp->sd_log_thresh2, 4*sdp->sd_jdesc->jd_blocks/5);
711
712                 /* Map the extents for this journal's blocks */
713                 gfs2_map_journal_extents(sdp, sdp->sd_jdesc);
714         }
715         trace_gfs2_log_blocks(sdp, atomic_read(&sdp->sd_log_blks_free));
716
717         if (sdp->sd_lockstruct.ls_first) {
718                 unsigned int x;
719                 for (x = 0; x < sdp->sd_journals; x++) {
720                         struct gfs2_jdesc *jd = gfs2_jdesc_find(sdp, x);
721
722                         if (sdp->sd_args.ar_spectator) {
723                                 error = check_journal_clean(sdp, jd);
724                                 if (error)
725                                         goto fail_jinode_gh;
726                                 continue;
727                         }
728                         error = gfs2_recover_journal(jd, true);
729                         if (error) {
730                                 fs_err(sdp, "error recovering journal %u: %d\n",
731                                        x, error);
732                                 goto fail_jinode_gh;
733                         }
734                 }
735
736                 gfs2_others_may_mount(sdp);
737         } else if (!sdp->sd_args.ar_spectator) {
738                 error = gfs2_recover_journal(sdp->sd_jdesc, true);
739                 if (error) {
740                         fs_err(sdp, "error recovering my journal: %d\n", error);
741                         goto fail_jinode_gh;
742                 }
743         }
744
745         sdp->sd_log_idle = 1;
746         set_bit(SDF_JOURNAL_CHECKED, &sdp->sd_flags);
747         gfs2_glock_dq_uninit(&ji_gh);
748         jindex = 0;
749         INIT_WORK(&sdp->sd_freeze_work, gfs2_freeze_func);
750         return 0;
751
752 fail_jinode_gh:
753         if (!sdp->sd_args.ar_spectator)
754                 gfs2_glock_dq_uninit(&sdp->sd_jinode_gh);
755 fail_journal_gh:
756         if (!sdp->sd_args.ar_spectator)
757                 gfs2_glock_dq_uninit(&sdp->sd_journal_gh);
758 fail_jindex:
759         gfs2_jindex_free(sdp);
760         if (jindex)
761                 gfs2_glock_dq_uninit(&ji_gh);
762 fail:
763         iput(sdp->sd_jindex);
764         return error;
765 }
766
767 static struct lock_class_key gfs2_quota_imutex_key;
768
769 static int init_inodes(struct gfs2_sbd *sdp, int undo)
770 {
771         int error = 0;
772         struct inode *master = d_inode(sdp->sd_master_dir);
773
774         if (undo)
775                 goto fail_qinode;
776
777         error = init_journal(sdp, undo);
778         complete_all(&sdp->sd_journal_ready);
779         if (error)
780                 goto fail;
781
782         /* Read in the master statfs inode */
783         sdp->sd_statfs_inode = gfs2_lookup_simple(master, "statfs");
784         if (IS_ERR(sdp->sd_statfs_inode)) {
785                 error = PTR_ERR(sdp->sd_statfs_inode);
786                 fs_err(sdp, "can't read in statfs inode: %d\n", error);
787                 goto fail_journal;
788         }
789
790         /* Read in the resource index inode */
791         sdp->sd_rindex = gfs2_lookup_simple(master, "rindex");
792         if (IS_ERR(sdp->sd_rindex)) {
793                 error = PTR_ERR(sdp->sd_rindex);
794                 fs_err(sdp, "can't get resource index inode: %d\n", error);
795                 goto fail_statfs;
796         }
797         sdp->sd_rindex_uptodate = 0;
798
799         /* Read in the quota inode */
800         sdp->sd_quota_inode = gfs2_lookup_simple(master, "quota");
801         if (IS_ERR(sdp->sd_quota_inode)) {
802                 error = PTR_ERR(sdp->sd_quota_inode);
803                 fs_err(sdp, "can't get quota file inode: %d\n", error);
804                 goto fail_rindex;
805         }
806         /*
807          * i_rwsem on quota files is special. Since this inode is hidden system
808          * file, we are safe to define locking ourselves.
809          */
810         lockdep_set_class(&sdp->sd_quota_inode->i_rwsem,
811                           &gfs2_quota_imutex_key);
812
813         error = gfs2_rindex_update(sdp);
814         if (error)
815                 goto fail_qinode;
816
817         return 0;
818
819 fail_qinode:
820         iput(sdp->sd_quota_inode);
821 fail_rindex:
822         gfs2_clear_rgrpd(sdp);
823         iput(sdp->sd_rindex);
824 fail_statfs:
825         iput(sdp->sd_statfs_inode);
826 fail_journal:
827         init_journal(sdp, UNDO);
828 fail:
829         return error;
830 }
831
832 static int init_per_node(struct gfs2_sbd *sdp, int undo)
833 {
834         struct inode *pn = NULL;
835         char buf[30];
836         int error = 0;
837         struct gfs2_inode *ip;
838         struct inode *master = d_inode(sdp->sd_master_dir);
839
840         if (sdp->sd_args.ar_spectator)
841                 return 0;
842
843         if (undo)
844                 goto fail_qc_gh;
845
846         pn = gfs2_lookup_simple(master, "per_node");
847         if (IS_ERR(pn)) {
848                 error = PTR_ERR(pn);
849                 fs_err(sdp, "can't find per_node directory: %d\n", error);
850                 return error;
851         }
852
853         sprintf(buf, "statfs_change%u", sdp->sd_jdesc->jd_jid);
854         sdp->sd_sc_inode = gfs2_lookup_simple(pn, buf);
855         if (IS_ERR(sdp->sd_sc_inode)) {
856                 error = PTR_ERR(sdp->sd_sc_inode);
857                 fs_err(sdp, "can't find local \"sc\" file: %d\n", error);
858                 goto fail;
859         }
860
861         sprintf(buf, "quota_change%u", sdp->sd_jdesc->jd_jid);
862         sdp->sd_qc_inode = gfs2_lookup_simple(pn, buf);
863         if (IS_ERR(sdp->sd_qc_inode)) {
864                 error = PTR_ERR(sdp->sd_qc_inode);
865                 fs_err(sdp, "can't find local \"qc\" file: %d\n", error);
866                 goto fail_ut_i;
867         }
868
869         iput(pn);
870         pn = NULL;
871
872         ip = GFS2_I(sdp->sd_sc_inode);
873         error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0,
874                                    &sdp->sd_sc_gh);
875         if (error) {
876                 fs_err(sdp, "can't lock local \"sc\" file: %d\n", error);
877                 goto fail_qc_i;
878         }
879
880         ip = GFS2_I(sdp->sd_qc_inode);
881         error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0,
882                                    &sdp->sd_qc_gh);
883         if (error) {
884                 fs_err(sdp, "can't lock local \"qc\" file: %d\n", error);
885                 goto fail_ut_gh;
886         }
887
888         return 0;
889
890 fail_qc_gh:
891         gfs2_glock_dq_uninit(&sdp->sd_qc_gh);
892 fail_ut_gh:
893         gfs2_glock_dq_uninit(&sdp->sd_sc_gh);
894 fail_qc_i:
895         iput(sdp->sd_qc_inode);
896 fail_ut_i:
897         iput(sdp->sd_sc_inode);
898 fail:
899         iput(pn);
900         return error;
901 }
902
903 static const match_table_t nolock_tokens = {
904         { Opt_jid, "jid=%d\n", },
905         { Opt_err, NULL },
906 };
907
908 static const struct lm_lockops nolock_ops = {
909         .lm_proto_name = "lock_nolock",
910         .lm_put_lock = gfs2_glock_free,
911         .lm_tokens = &nolock_tokens,
912 };
913
914 /**
915  * gfs2_lm_mount - mount a locking protocol
916  * @sdp: the filesystem
917  * @args: mount arguments
918  * @silent: if 1, don't complain if the FS isn't a GFS2 fs
919  *
920  * Returns: errno
921  */
922
923 static int gfs2_lm_mount(struct gfs2_sbd *sdp, int silent)
924 {
925         const struct lm_lockops *lm;
926         struct lm_lockstruct *ls = &sdp->sd_lockstruct;
927         struct gfs2_args *args = &sdp->sd_args;
928         const char *proto = sdp->sd_proto_name;
929         const char *table = sdp->sd_table_name;
930         char *o, *options;
931         int ret;
932
933         if (!strcmp("lock_nolock", proto)) {
934                 lm = &nolock_ops;
935                 sdp->sd_args.ar_localflocks = 1;
936 #ifdef CONFIG_GFS2_FS_LOCKING_DLM
937         } else if (!strcmp("lock_dlm", proto)) {
938                 lm = &gfs2_dlm_ops;
939 #endif
940         } else {
941                 pr_info("can't find protocol %s\n", proto);
942                 return -ENOENT;
943         }
944
945         fs_info(sdp, "Trying to join cluster \"%s\", \"%s\"\n", proto, table);
946
947         ls->ls_ops = lm;
948         ls->ls_first = 1;
949
950         for (options = args->ar_hostdata; (o = strsep(&options, ":")); ) {
951                 substring_t tmp[MAX_OPT_ARGS];
952                 int token, option;
953
954                 if (!o || !*o)
955                         continue;
956
957                 token = match_token(o, *lm->lm_tokens, tmp);
958                 switch (token) {
959                 case Opt_jid:
960                         ret = match_int(&tmp[0], &option);
961                         if (ret || option < 0) 
962                                 goto hostdata_error;
963                         if (test_and_clear_bit(SDF_NOJOURNALID, &sdp->sd_flags))
964                                 ls->ls_jid = option;
965                         break;
966                 case Opt_id:
967                 case Opt_nodir:
968                         /* Obsolete, but left for backward compat purposes */
969                         break;
970                 case Opt_first:
971                         ret = match_int(&tmp[0], &option);
972                         if (ret || (option != 0 && option != 1))
973                                 goto hostdata_error;
974                         ls->ls_first = option;
975                         break;
976                 case Opt_err:
977                 default:
978 hostdata_error:
979                         fs_info(sdp, "unknown hostdata (%s)\n", o);
980                         return -EINVAL;
981                 }
982         }
983
984         if (lm->lm_mount == NULL) {
985                 fs_info(sdp, "Now mounting FS...\n");
986                 complete_all(&sdp->sd_locking_init);
987                 return 0;
988         }
989         ret = lm->lm_mount(sdp, table);
990         if (ret == 0)
991                 fs_info(sdp, "Joined cluster. Now mounting FS...\n");
992         complete_all(&sdp->sd_locking_init);
993         return ret;
994 }
995
996 void gfs2_lm_unmount(struct gfs2_sbd *sdp)
997 {
998         const struct lm_lockops *lm = sdp->sd_lockstruct.ls_ops;
999         if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) &&
1000             lm->lm_unmount)
1001                 lm->lm_unmount(sdp);
1002 }
1003
1004 static int wait_on_journal(struct gfs2_sbd *sdp)
1005 {
1006         if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL)
1007                 return 0;
1008
1009         return wait_on_bit(&sdp->sd_flags, SDF_NOJOURNALID, TASK_INTERRUPTIBLE)
1010                 ? -EINTR : 0;
1011 }
1012
1013 void gfs2_online_uevent(struct gfs2_sbd *sdp)
1014 {
1015         struct super_block *sb = sdp->sd_vfs;
1016         char ro[20];
1017         char spectator[20];
1018         char *envp[] = { ro, spectator, NULL };
1019         sprintf(ro, "RDONLY=%d", sb_rdonly(sb));
1020         sprintf(spectator, "SPECTATOR=%d", sdp->sd_args.ar_spectator ? 1 : 0);
1021         kobject_uevent_env(&sdp->sd_kobj, KOBJ_ONLINE, envp);
1022 }
1023
1024 /**
1025  * fill_super - Read in superblock
1026  * @sb: The VFS superblock
1027  * @data: Mount options
1028  * @silent: Don't complain if it's not a GFS2 filesystem
1029  *
1030  * Returns: errno
1031  */
1032
1033 static int fill_super(struct super_block *sb, struct gfs2_args *args, int silent)
1034 {
1035         struct gfs2_sbd *sdp;
1036         struct gfs2_holder mount_gh;
1037         int error;
1038
1039         sdp = init_sbd(sb);
1040         if (!sdp) {
1041                 pr_warn("can't alloc struct gfs2_sbd\n");
1042                 return -ENOMEM;
1043         }
1044         sdp->sd_args = *args;
1045
1046         if (sdp->sd_args.ar_spectator) {
1047                 sb->s_flags |= SB_RDONLY;
1048                 set_bit(SDF_RORECOVERY, &sdp->sd_flags);
1049         }
1050         if (sdp->sd_args.ar_posix_acl)
1051                 sb->s_flags |= SB_POSIXACL;
1052         if (sdp->sd_args.ar_nobarrier)
1053                 set_bit(SDF_NOBARRIERS, &sdp->sd_flags);
1054
1055         sb->s_flags |= SB_NOSEC;
1056         sb->s_magic = GFS2_MAGIC;
1057         sb->s_op = &gfs2_super_ops;
1058         sb->s_d_op = &gfs2_dops;
1059         sb->s_export_op = &gfs2_export_ops;
1060         sb->s_xattr = gfs2_xattr_handlers;
1061         sb->s_qcop = &gfs2_quotactl_ops;
1062         sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP;
1063         sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE;
1064         sb->s_time_gran = 1;
1065         sb->s_maxbytes = MAX_LFS_FILESIZE;
1066
1067         /* Set up the buffer cache and fill in some fake block size values
1068            to allow us to read-in the on-disk superblock. */
1069         sdp->sd_sb.sb_bsize = sb_min_blocksize(sb, GFS2_BASIC_BLOCK);
1070         sdp->sd_sb.sb_bsize_shift = sb->s_blocksize_bits;
1071         sdp->sd_fsb2bb_shift = sdp->sd_sb.sb_bsize_shift -
1072                                GFS2_BASIC_BLOCK_SHIFT;
1073         sdp->sd_fsb2bb = BIT(sdp->sd_fsb2bb_shift);
1074
1075         sdp->sd_tune.gt_logd_secs = sdp->sd_args.ar_commit;
1076         sdp->sd_tune.gt_quota_quantum = sdp->sd_args.ar_quota_quantum;
1077         if (sdp->sd_args.ar_statfs_quantum) {
1078                 sdp->sd_tune.gt_statfs_slow = 0;
1079                 sdp->sd_tune.gt_statfs_quantum = sdp->sd_args.ar_statfs_quantum;
1080         } else {
1081                 sdp->sd_tune.gt_statfs_slow = 1;
1082                 sdp->sd_tune.gt_statfs_quantum = 30;
1083         }
1084
1085         error = init_names(sdp, silent);
1086         if (error) {
1087                 /* In this case, we haven't initialized sysfs, so we have to
1088                    manually free the sdp. */
1089                 free_percpu(sdp->sd_lkstats);
1090                 kfree(sdp);
1091                 sb->s_fs_info = NULL;
1092                 return error;
1093         }
1094
1095         snprintf(sdp->sd_fsname, sizeof(sdp->sd_fsname), "%s", sdp->sd_table_name);
1096
1097         error = gfs2_sys_fs_add(sdp);
1098         /*
1099          * If we hit an error here, gfs2_sys_fs_add will have called function
1100          * kobject_put which causes the sysfs usage count to go to zero, which
1101          * causes sysfs to call function gfs2_sbd_release, which frees sdp.
1102          * Subsequent error paths here will call gfs2_sys_fs_del, which also
1103          * kobject_put to free sdp.
1104          */
1105         if (error)
1106                 return error;
1107
1108         gfs2_create_debugfs_file(sdp);
1109
1110         error = gfs2_lm_mount(sdp, silent);
1111         if (error)
1112                 goto fail_debug;
1113
1114         error = init_locking(sdp, &mount_gh, DO);
1115         if (error)
1116                 goto fail_lm;
1117
1118         error = init_sb(sdp, silent);
1119         if (error)
1120                 goto fail_locking;
1121
1122         error = wait_on_journal(sdp);
1123         if (error)
1124                 goto fail_sb;
1125
1126         /*
1127          * If user space has failed to join the cluster or some similar
1128          * failure has occurred, then the journal id will contain a
1129          * negative (error) number. This will then be returned to the
1130          * caller (of the mount syscall). We do this even for spectator
1131          * mounts (which just write a jid of 0 to indicate "ok" even though
1132          * the jid is unused in the spectator case)
1133          */
1134         if (sdp->sd_lockstruct.ls_jid < 0) {
1135                 error = sdp->sd_lockstruct.ls_jid;
1136                 sdp->sd_lockstruct.ls_jid = 0;
1137                 goto fail_sb;
1138         }
1139
1140         if (sdp->sd_args.ar_spectator)
1141                 snprintf(sdp->sd_fsname, sizeof(sdp->sd_fsname), "%s.s",
1142                          sdp->sd_table_name);
1143         else
1144                 snprintf(sdp->sd_fsname, sizeof(sdp->sd_fsname), "%s.%u",
1145                          sdp->sd_table_name, sdp->sd_lockstruct.ls_jid);
1146
1147         error = init_inodes(sdp, DO);
1148         if (error)
1149                 goto fail_sb;
1150
1151         error = init_per_node(sdp, DO);
1152         if (error)
1153                 goto fail_inodes;
1154
1155         error = gfs2_statfs_init(sdp);
1156         if (error) {
1157                 fs_err(sdp, "can't initialize statfs subsystem: %d\n", error);
1158                 goto fail_per_node;
1159         }
1160
1161         if (!sb_rdonly(sb)) {
1162                 error = gfs2_make_fs_rw(sdp);
1163                 if (error) {
1164                         fs_err(sdp, "can't make FS RW: %d\n", error);
1165                         goto fail_per_node;
1166                 }
1167         }
1168
1169         gfs2_glock_dq_uninit(&mount_gh);
1170         gfs2_online_uevent(sdp);
1171         return 0;
1172
1173 fail_per_node:
1174         init_per_node(sdp, UNDO);
1175 fail_inodes:
1176         init_inodes(sdp, UNDO);
1177 fail_sb:
1178         if (sdp->sd_root_dir)
1179                 dput(sdp->sd_root_dir);
1180         if (sdp->sd_master_dir)
1181                 dput(sdp->sd_master_dir);
1182         if (sb->s_root)
1183                 dput(sb->s_root);
1184         sb->s_root = NULL;
1185 fail_locking:
1186         init_locking(sdp, &mount_gh, UNDO);
1187 fail_lm:
1188         complete_all(&sdp->sd_journal_ready);
1189         gfs2_gl_hash_clear(sdp);
1190         gfs2_lm_unmount(sdp);
1191 fail_debug:
1192         gfs2_delete_debugfs_file(sdp);
1193         free_percpu(sdp->sd_lkstats);
1194         /* gfs2_sys_fs_del must be the last thing we do, since it causes
1195          * sysfs to call function gfs2_sbd_release, which frees sdp. */
1196         gfs2_sys_fs_del(sdp);
1197         sb->s_fs_info = NULL;
1198         return error;
1199 }
1200
1201 static int set_gfs2_super(struct super_block *s, void *data)
1202 {
1203         s->s_bdev = data;
1204         s->s_dev = s->s_bdev->bd_dev;
1205         s->s_bdi = bdi_get(s->s_bdev->bd_bdi);
1206         return 0;
1207 }
1208
1209 static int test_gfs2_super(struct super_block *s, void *ptr)
1210 {
1211         struct block_device *bdev = ptr;
1212         return (bdev == s->s_bdev);
1213 }
1214
1215 /**
1216  * gfs2_mount - Get the GFS2 superblock
1217  * @fs_type: The GFS2 filesystem type
1218  * @flags: Mount flags
1219  * @dev_name: The name of the device
1220  * @data: The mount arguments
1221  *
1222  * Q. Why not use get_sb_bdev() ?
1223  * A. We need to select one of two root directories to mount, independent
1224  *    of whether this is the initial, or subsequent, mount of this sb
1225  *
1226  * Returns: 0 or -ve on error
1227  */
1228
1229 static struct dentry *gfs2_mount(struct file_system_type *fs_type, int flags,
1230                        const char *dev_name, void *data)
1231 {
1232         struct block_device *bdev;
1233         struct super_block *s;
1234         fmode_t mode = FMODE_READ | FMODE_EXCL;
1235         int error;
1236         struct gfs2_args args;
1237         struct gfs2_sbd *sdp;
1238
1239         if (!(flags & SB_RDONLY))
1240                 mode |= FMODE_WRITE;
1241
1242         bdev = blkdev_get_by_path(dev_name, mode, fs_type);
1243         if (IS_ERR(bdev))
1244                 return ERR_CAST(bdev);
1245
1246         /*
1247          * once the super is inserted into the list by sget, s_umount
1248          * will protect the lockfs code from trying to start a snapshot
1249          * while we are mounting
1250          */
1251         mutex_lock(&bdev->bd_fsfreeze_mutex);
1252         if (bdev->bd_fsfreeze_count > 0) {
1253                 mutex_unlock(&bdev->bd_fsfreeze_mutex);
1254                 error = -EBUSY;
1255                 goto error_bdev;
1256         }
1257         s = sget(fs_type, test_gfs2_super, set_gfs2_super, flags, bdev);
1258         mutex_unlock(&bdev->bd_fsfreeze_mutex);
1259         error = PTR_ERR(s);
1260         if (IS_ERR(s))
1261                 goto error_bdev;
1262
1263         if (s->s_root) {
1264                 /*
1265                  * s_umount nests inside bd_mutex during
1266                  * __invalidate_device().  blkdev_put() acquires
1267                  * bd_mutex and can't be called under s_umount.  Drop
1268                  * s_umount temporarily.  This is safe as we're
1269                  * holding an active reference.
1270                  */
1271                 up_write(&s->s_umount);
1272                 blkdev_put(bdev, mode);
1273                 down_write(&s->s_umount);
1274         } else {
1275                 /* s_mode must be set before deactivate_locked_super calls */
1276                 s->s_mode = mode;
1277         }
1278
1279         memset(&args, 0, sizeof(args));
1280         args.ar_quota = GFS2_QUOTA_DEFAULT;
1281         args.ar_data = GFS2_DATA_DEFAULT;
1282         args.ar_commit = 30;
1283         args.ar_statfs_quantum = 30;
1284         args.ar_quota_quantum = 60;
1285         args.ar_errors = GFS2_ERRORS_DEFAULT;
1286
1287         error = gfs2_mount_args(&args, data);
1288         if (error) {
1289                 pr_warn("can't parse mount arguments\n");
1290                 goto error_super;
1291         }
1292
1293         if (s->s_root) {
1294                 error = -EBUSY;
1295                 if ((flags ^ s->s_flags) & SB_RDONLY)
1296                         goto error_super;
1297         } else {
1298                 snprintf(s->s_id, sizeof(s->s_id), "%pg", bdev);
1299                 sb_set_blocksize(s, block_size(bdev));
1300                 error = fill_super(s, &args, flags & SB_SILENT ? 1 : 0);
1301                 if (error)
1302                         goto error_super;
1303                 s->s_flags |= SB_ACTIVE;
1304                 bdev->bd_super = s;
1305         }
1306
1307         sdp = s->s_fs_info;
1308         if (args.ar_meta)
1309                 return dget(sdp->sd_master_dir);
1310         else
1311                 return dget(sdp->sd_root_dir);
1312
1313 error_super:
1314         deactivate_locked_super(s);
1315         return ERR_PTR(error);
1316 error_bdev:
1317         blkdev_put(bdev, mode);
1318         return ERR_PTR(error);
1319 }
1320
1321 static int set_meta_super(struct super_block *s, void *ptr)
1322 {
1323         return -EINVAL;
1324 }
1325
1326 static struct dentry *gfs2_mount_meta(struct file_system_type *fs_type,
1327                         int flags, const char *dev_name, void *data)
1328 {
1329         struct super_block *s;
1330         struct gfs2_sbd *sdp;
1331         struct path path;
1332         int error;
1333
1334         if (!dev_name || !*dev_name)
1335                 return ERR_PTR(-EINVAL);
1336
1337         error = kern_path(dev_name, LOOKUP_FOLLOW, &path);
1338         if (error) {
1339                 pr_warn("path_lookup on %s returned error %d\n",
1340                         dev_name, error);
1341                 return ERR_PTR(error);
1342         }
1343         s = sget(&gfs2_fs_type, test_gfs2_super, set_meta_super, flags,
1344                  path.dentry->d_sb->s_bdev);
1345         path_put(&path);
1346         if (IS_ERR(s)) {
1347                 pr_warn("gfs2 mount does not exist\n");
1348                 return ERR_CAST(s);
1349         }
1350         if ((flags ^ s->s_flags) & SB_RDONLY) {
1351                 deactivate_locked_super(s);
1352                 return ERR_PTR(-EBUSY);
1353         }
1354         sdp = s->s_fs_info;
1355         return dget(sdp->sd_master_dir);
1356 }
1357
1358 static void gfs2_kill_sb(struct super_block *sb)
1359 {
1360         struct gfs2_sbd *sdp = sb->s_fs_info;
1361
1362         if (sdp == NULL) {
1363                 kill_block_super(sb);
1364                 return;
1365         }
1366
1367         gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_SYNC | GFS2_LFC_KILL_SB);
1368         dput(sdp->sd_root_dir);
1369         dput(sdp->sd_master_dir);
1370         sdp->sd_root_dir = NULL;
1371         sdp->sd_master_dir = NULL;
1372         shrink_dcache_sb(sb);
1373         free_percpu(sdp->sd_lkstats);
1374         kill_block_super(sb);
1375 }
1376
1377 struct file_system_type gfs2_fs_type = {
1378         .name = "gfs2",
1379         .fs_flags = FS_REQUIRES_DEV,
1380         .mount = gfs2_mount,
1381         .kill_sb = gfs2_kill_sb,
1382         .owner = THIS_MODULE,
1383 };
1384 MODULE_ALIAS_FS("gfs2");
1385
1386 struct file_system_type gfs2meta_fs_type = {
1387         .name = "gfs2meta",
1388         .fs_flags = FS_REQUIRES_DEV,
1389         .mount = gfs2_mount_meta,
1390         .owner = THIS_MODULE,
1391 };
1392 MODULE_ALIAS_FS("gfs2meta");