1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
7 #include <linux/spinlock.h>
8 #include <linux/completion.h>
9 #include <linux/buffer_head.h>
10 #include <linux/gfs2_ondisk.h>
11 #include <linux/bio.h>
12 #include <linux/posix_acl.h>
13 #include <linux/security.h>
30 struct workqueue_struct *gfs2_freeze_wq;
32 extern struct workqueue_struct *gfs2_control_wq;
34 static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh)
36 fs_err(gl->gl_name.ln_sbd,
37 "AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page "
39 bh, (unsigned long long)bh->b_blocknr, bh->b_state,
40 bh->b_page->mapping, bh->b_page->flags);
41 fs_err(gl->gl_name.ln_sbd, "AIL glock %u:%llu mapping %p\n",
42 gl->gl_name.ln_type, gl->gl_name.ln_number,
43 gfs2_glock2aspace(gl));
44 gfs2_lm(gl->gl_name.ln_sbd, "AIL error\n");
45 gfs2_withdraw(gl->gl_name.ln_sbd);
49 * __gfs2_ail_flush - remove all buffers for a given lock from the AIL
51 * @fsync: set when called from fsync (not all buffers will be clean)
53 * None of the buffers should be dirty, locked, or pinned.
56 static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync,
57 unsigned int nr_revokes)
59 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
60 struct list_head *head = &gl->gl_ail_list;
61 struct gfs2_bufdata *bd, *tmp;
62 struct buffer_head *bh;
63 const unsigned long b_state = (1UL << BH_Dirty)|(1UL << BH_Pinned)|(1UL << BH_Lock);
66 spin_lock(&sdp->sd_ail_lock);
67 list_for_each_entry_safe_reverse(bd, tmp, head, bd_ail_gl_list) {
71 if (bh->b_state & b_state) {
74 gfs2_ail_error(gl, bh);
76 gfs2_trans_add_revoke(sdp, bd);
79 GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count));
80 spin_unlock(&sdp->sd_ail_lock);
85 static int gfs2_ail_empty_gl(struct gfs2_glock *gl)
87 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
91 memset(&tr, 0, sizeof(tr));
92 INIT_LIST_HEAD(&tr.tr_buf);
93 INIT_LIST_HEAD(&tr.tr_databuf);
94 tr.tr_revokes = atomic_read(&gl->gl_ail_count);
101 * We have nothing on the ail, but there could be revokes on
102 * the sdp revoke queue, in which case, we still want to flush
103 * the log and wait for it to finish.
105 * If the sdp revoke list is empty too, we might still have an
106 * io outstanding for writing revokes, so we should wait for
107 * it before returning.
109 * If none of these conditions are true, our revokes are all
110 * flushed and we can return.
113 have_revokes = !list_empty(&sdp->sd_log_revokes);
114 log_in_flight = atomic_read(&sdp->sd_log_in_flight);
115 gfs2_log_unlock(sdp);
123 /* A shortened, inline version of gfs2_trans_begin()
124 * tr->alloced is not set since the transaction structure is
126 tr.tr_reserved = 1 + gfs2_struct2blk(sdp, tr.tr_revokes);
128 ret = gfs2_log_reserve(sdp, tr.tr_reserved);
131 WARN_ON_ONCE(current->journal_info);
132 current->journal_info = &tr;
134 __gfs2_ail_flush(gl, 0, tr.tr_revokes);
138 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
139 GFS2_LFC_AIL_EMPTY_GL);
143 void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
145 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
146 unsigned int revokes = atomic_read(&gl->gl_ail_count);
147 unsigned int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64);
153 while (revokes > max_revokes)
154 max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64);
156 ret = gfs2_trans_begin(sdp, 0, max_revokes);
159 __gfs2_ail_flush(gl, fsync, max_revokes);
161 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
166 * rgrp_go_sync - sync out the metadata for this glock
169 * Called when demoting or unlocking an EX glock. We must flush
170 * to disk all dirty buffers/pages relating to this glock, and must not
171 * return to caller to demote/unlock the glock until I/O is complete.
174 static int rgrp_go_sync(struct gfs2_glock *gl)
176 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
177 struct address_space *mapping = &sdp->sd_aspace;
178 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
181 if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
183 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
185 gfs2_log_flush(sdp, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
186 GFS2_LFC_RGRP_GO_SYNC);
187 filemap_fdatawrite_range(mapping, gl->gl_vm.start, gl->gl_vm.end);
188 error = filemap_fdatawait_range(mapping, gl->gl_vm.start, gl->gl_vm.end);
190 mapping_set_error(mapping, error);
192 error = gfs2_ail_empty_gl(gl);
194 spin_lock(&gl->gl_lockref.lock);
197 gfs2_free_clones(rgd);
198 spin_unlock(&gl->gl_lockref.lock);
203 * rgrp_go_inval - invalidate the metadata for this glock
207 * We never used LM_ST_DEFERRED with resource groups, so that we
208 * should always see the metadata flag set here.
212 static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
214 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
215 struct address_space *mapping = &sdp->sd_aspace;
216 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
219 gfs2_rgrp_brelse(rgd);
221 WARN_ON_ONCE(!(flags & DIO_METADATA));
222 truncate_inode_pages_range(mapping, gl->gl_vm.start, gl->gl_vm.end);
225 rgd->rd_flags &= ~GFS2_RDF_UPTODATE;
228 static struct gfs2_inode *gfs2_glock2inode(struct gfs2_glock *gl)
230 struct gfs2_inode *ip;
232 spin_lock(&gl->gl_lockref.lock);
235 set_bit(GIF_GLOP_PENDING, &ip->i_flags);
236 spin_unlock(&gl->gl_lockref.lock);
240 struct gfs2_rgrpd *gfs2_glock2rgrp(struct gfs2_glock *gl)
242 struct gfs2_rgrpd *rgd;
244 spin_lock(&gl->gl_lockref.lock);
246 spin_unlock(&gl->gl_lockref.lock);
251 static void gfs2_clear_glop_pending(struct gfs2_inode *ip)
256 clear_bit_unlock(GIF_GLOP_PENDING, &ip->i_flags);
257 wake_up_bit(&ip->i_flags, GIF_GLOP_PENDING);
261 * inode_go_sync - Sync the dirty data and/or metadata for an inode glock
262 * @gl: the glock protecting the inode
266 static int inode_go_sync(struct gfs2_glock *gl)
268 struct gfs2_inode *ip = gfs2_glock2inode(gl);
269 int isreg = ip && S_ISREG(ip->i_inode.i_mode);
270 struct address_space *metamapping = gfs2_glock2aspace(gl);
274 if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
275 unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0);
276 inode_dio_wait(&ip->i_inode);
278 if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
281 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
283 gfs2_log_flush(gl->gl_name.ln_sbd, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
284 GFS2_LFC_INODE_GO_SYNC);
285 filemap_fdatawrite(metamapping);
287 struct address_space *mapping = ip->i_inode.i_mapping;
288 filemap_fdatawrite(mapping);
289 error = filemap_fdatawait(mapping);
290 mapping_set_error(mapping, error);
292 error = filemap_fdatawait(metamapping);
293 mapping_set_error(metamapping, error);
294 gfs2_ail_empty_gl(gl);
296 * Writeback of the data mapping may cause the dirty flag to be set
297 * so we have to clear it again here.
299 smp_mb__before_atomic();
300 clear_bit(GLF_DIRTY, &gl->gl_flags);
303 gfs2_clear_glop_pending(ip);
308 * inode_go_inval - prepare a inode glock to be released
312 * Normally we invalidate everything, but if we are moving into
313 * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we
314 * can keep hold of the metadata, since it won't have changed.
318 static void inode_go_inval(struct gfs2_glock *gl, int flags)
320 struct gfs2_inode *ip = gfs2_glock2inode(gl);
322 if (flags & DIO_METADATA) {
323 struct address_space *mapping = gfs2_glock2aspace(gl);
324 truncate_inode_pages(mapping, 0);
326 set_bit(GIF_INVALID, &ip->i_flags);
327 forget_all_cached_acls(&ip->i_inode);
328 security_inode_invalidate_secctx(&ip->i_inode);
329 gfs2_dir_hash_inval(ip);
333 if (ip == GFS2_I(gl->gl_name.ln_sbd->sd_rindex)) {
334 gfs2_log_flush(gl->gl_name.ln_sbd, NULL,
335 GFS2_LOG_HEAD_FLUSH_NORMAL |
336 GFS2_LFC_INODE_GO_INVAL);
337 gl->gl_name.ln_sbd->sd_rindex_uptodate = 0;
339 if (ip && S_ISREG(ip->i_inode.i_mode))
340 truncate_inode_pages(ip->i_inode.i_mapping, 0);
342 gfs2_clear_glop_pending(ip);
346 * inode_go_demote_ok - Check to see if it's ok to unlock an inode glock
349 * Returns: 1 if it's ok
352 static int inode_go_demote_ok(const struct gfs2_glock *gl)
354 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
356 if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object)
362 static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
364 const struct gfs2_dinode *str = buf;
365 struct timespec64 atime;
368 if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr)))
370 ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino);
371 ip->i_inode.i_mode = be32_to_cpu(str->di_mode);
372 ip->i_inode.i_rdev = 0;
373 switch (ip->i_inode.i_mode & S_IFMT) {
376 ip->i_inode.i_rdev = MKDEV(be32_to_cpu(str->di_major),
377 be32_to_cpu(str->di_minor));
381 i_uid_write(&ip->i_inode, be32_to_cpu(str->di_uid));
382 i_gid_write(&ip->i_inode, be32_to_cpu(str->di_gid));
383 set_nlink(&ip->i_inode, be32_to_cpu(str->di_nlink));
384 i_size_write(&ip->i_inode, be64_to_cpu(str->di_size));
385 gfs2_set_inode_blocks(&ip->i_inode, be64_to_cpu(str->di_blocks));
386 atime.tv_sec = be64_to_cpu(str->di_atime);
387 atime.tv_nsec = be32_to_cpu(str->di_atime_nsec);
388 if (timespec64_compare(&ip->i_inode.i_atime, &atime) < 0)
389 ip->i_inode.i_atime = atime;
390 ip->i_inode.i_mtime.tv_sec = be64_to_cpu(str->di_mtime);
391 ip->i_inode.i_mtime.tv_nsec = be32_to_cpu(str->di_mtime_nsec);
392 ip->i_inode.i_ctime.tv_sec = be64_to_cpu(str->di_ctime);
393 ip->i_inode.i_ctime.tv_nsec = be32_to_cpu(str->di_ctime_nsec);
395 ip->i_goal = be64_to_cpu(str->di_goal_meta);
396 ip->i_generation = be64_to_cpu(str->di_generation);
398 ip->i_diskflags = be32_to_cpu(str->di_flags);
399 ip->i_eattr = be64_to_cpu(str->di_eattr);
400 /* i_diskflags and i_eattr must be set before gfs2_set_inode_flags() */
401 gfs2_set_inode_flags(&ip->i_inode);
402 height = be16_to_cpu(str->di_height);
403 if (unlikely(height > GFS2_MAX_META_HEIGHT))
405 ip->i_height = (u8)height;
407 depth = be16_to_cpu(str->di_depth);
408 if (unlikely(depth > GFS2_DIR_MAX_DEPTH))
410 ip->i_depth = (u8)depth;
411 ip->i_entries = be32_to_cpu(str->di_entries);
413 if (S_ISREG(ip->i_inode.i_mode))
414 gfs2_set_aops(&ip->i_inode);
418 gfs2_consist_inode(ip);
423 * gfs2_inode_refresh - Refresh the incore copy of the dinode
424 * @ip: The GFS2 inode
429 int gfs2_inode_refresh(struct gfs2_inode *ip)
431 struct buffer_head *dibh;
434 error = gfs2_meta_inode_buffer(ip, &dibh);
438 error = gfs2_dinode_in(ip, dibh->b_data);
440 clear_bit(GIF_INVALID, &ip->i_flags);
446 * inode_go_lock - operation done after an inode lock is locked by a process
453 static int inode_go_lock(struct gfs2_holder *gh)
455 struct gfs2_glock *gl = gh->gh_gl;
456 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
457 struct gfs2_inode *ip = gl->gl_object;
460 if (!ip || (gh->gh_flags & GL_SKIP))
463 if (test_bit(GIF_INVALID, &ip->i_flags)) {
464 error = gfs2_inode_refresh(ip);
469 if (gh->gh_state != LM_ST_DEFERRED)
470 inode_dio_wait(&ip->i_inode);
472 if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) &&
473 (gl->gl_state == LM_ST_EXCLUSIVE) &&
474 (gh->gh_state == LM_ST_EXCLUSIVE)) {
475 spin_lock(&sdp->sd_trunc_lock);
476 if (list_empty(&ip->i_trunc_list))
477 list_add(&ip->i_trunc_list, &sdp->sd_trunc_list);
478 spin_unlock(&sdp->sd_trunc_lock);
479 wake_up(&sdp->sd_quota_wait);
487 * inode_go_dump - print information about an inode
490 * @fs_id_buf: file system id (may be empty)
494 static void inode_go_dump(struct seq_file *seq, struct gfs2_glock *gl,
495 const char *fs_id_buf)
497 struct gfs2_inode *ip = gl->gl_object;
498 struct inode *inode = &ip->i_inode;
499 unsigned long nrpages;
504 xa_lock_irq(&inode->i_data.i_pages);
505 nrpages = inode->i_data.nrpages;
506 xa_unlock_irq(&inode->i_data.i_pages);
508 gfs2_print_dbg(seq, "%s I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu "
509 "p:%lu\n", fs_id_buf,
510 (unsigned long long)ip->i_no_formal_ino,
511 (unsigned long long)ip->i_no_addr,
512 IF2DT(ip->i_inode.i_mode), ip->i_flags,
513 (unsigned int)ip->i_diskflags,
514 (unsigned long long)i_size_read(inode), nrpages);
518 * freeze_go_sync - promote/demote the freeze glock
520 * @state: the requested state
525 static int freeze_go_sync(struct gfs2_glock *gl)
528 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
530 if (gl->gl_state == LM_ST_SHARED && !gfs2_withdrawn(sdp) &&
531 test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
532 atomic_set(&sdp->sd_freeze_state, SFS_STARTING_FREEZE);
533 error = freeze_super(sdp->sd_vfs);
535 fs_info(sdp, "GFS2: couldn't freeze filesystem: %d\n",
537 if (gfs2_withdrawn(sdp)) {
538 atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN);
541 gfs2_assert_withdraw(sdp, 0);
543 queue_work(gfs2_freeze_wq, &sdp->sd_freeze_work);
544 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_FREEZE |
545 GFS2_LFC_FREEZE_GO_SYNC);
551 * freeze_go_xmote_bh - After promoting/demoting the freeze glock
556 static int freeze_go_xmote_bh(struct gfs2_glock *gl, struct gfs2_holder *gh)
558 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
559 struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
560 struct gfs2_glock *j_gl = ip->i_gl;
561 struct gfs2_log_header_host head;
564 if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
565 j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
567 error = gfs2_find_jhead(sdp->sd_jdesc, &head, false);
570 if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT))
573 /* Initialize some head of the log stuff */
574 if (!gfs2_withdrawn(sdp)) {
575 sdp->sd_log_sequence = head.lh_sequence + 1;
576 gfs2_log_pointers_init(sdp, head.lh_blkno);
589 static int freeze_go_demote_ok(const struct gfs2_glock *gl)
595 * iopen_go_callback - schedule the dcache entry for the inode to be deleted
598 * gl_lockref.lock lock is held while calling this
600 static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
602 struct gfs2_inode *ip = gl->gl_object;
603 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
605 if (!remote || sb_rdonly(sdp->sd_vfs))
608 if (gl->gl_demote_state == LM_ST_UNLOCKED &&
609 gl->gl_state == LM_ST_SHARED && ip) {
610 gl->gl_lockref.count++;
611 if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
612 gl->gl_lockref.count--;
617 * inode_go_free - wake up anyone waiting for dlm's unlock ast to free it
618 * @gl: glock being freed
620 * For now, this is only used for the journal inode glock. In withdraw
621 * situations, we need to wait for the glock to be freed so that we know
622 * other nodes may proceed with recovery / journal replay.
624 static void inode_go_free(struct gfs2_glock *gl)
626 /* Note that we cannot reference gl_object because it's already set
627 * to NULL by this point in its lifecycle. */
628 if (!test_bit(GLF_FREEING, &gl->gl_flags))
630 clear_bit_unlock(GLF_FREEING, &gl->gl_flags);
631 wake_up_bit(&gl->gl_flags, GLF_FREEING);
635 * nondisk_go_callback - used to signal when a node did a withdraw
636 * @gl: the nondisk glock
637 * @remote: true if this came from a different cluster node
640 static void nondisk_go_callback(struct gfs2_glock *gl, bool remote)
642 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
644 /* Ignore the callback unless it's from another node, and it's the
646 if (!remote || gl->gl_name.ln_number != GFS2_LIVE_LOCK)
649 /* First order of business is to cancel the demote request. We don't
650 * really want to demote a nondisk glock. At best it's just to inform
651 * us of another node's withdraw. We'll keep it in SH mode. */
652 clear_bit(GLF_DEMOTE, &gl->gl_flags);
653 clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
655 /* Ignore the unlock if we're withdrawn, unmounting, or in recovery. */
656 if (test_bit(SDF_NORECOVERY, &sdp->sd_flags) ||
657 test_bit(SDF_WITHDRAWN, &sdp->sd_flags) ||
658 test_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags))
661 /* We only care when a node wants us to unlock, because that means
662 * they want a journal recovered. */
663 if (gl->gl_demote_state != LM_ST_UNLOCKED)
666 if (sdp->sd_args.ar_spectator) {
667 fs_warn(sdp, "Spectator node cannot recover journals.\n");
671 fs_warn(sdp, "Some node has withdrawn; checking for recovery.\n");
672 set_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags);
674 * We can't call remote_withdraw directly here or gfs2_recover_journal
675 * because this is called from the glock unlock function and the
676 * remote_withdraw needs to enqueue and dequeue the same "live" glock
677 * we were called from. So we queue it to the control work queue in
680 queue_delayed_work(gfs2_control_wq, &sdp->sd_control_work, 0);
683 const struct gfs2_glock_operations gfs2_meta_glops = {
684 .go_type = LM_TYPE_META,
685 .go_flags = GLOF_NONDISK,
688 const struct gfs2_glock_operations gfs2_inode_glops = {
689 .go_sync = inode_go_sync,
690 .go_inval = inode_go_inval,
691 .go_demote_ok = inode_go_demote_ok,
692 .go_lock = inode_go_lock,
693 .go_dump = inode_go_dump,
694 .go_type = LM_TYPE_INODE,
695 .go_flags = GLOF_ASPACE | GLOF_LRU,
696 .go_free = inode_go_free,
699 const struct gfs2_glock_operations gfs2_rgrp_glops = {
700 .go_sync = rgrp_go_sync,
701 .go_inval = rgrp_go_inval,
702 .go_lock = gfs2_rgrp_go_lock,
703 .go_dump = gfs2_rgrp_dump,
704 .go_type = LM_TYPE_RGRP,
705 .go_flags = GLOF_LVB,
708 const struct gfs2_glock_operations gfs2_freeze_glops = {
709 .go_sync = freeze_go_sync,
710 .go_xmote_bh = freeze_go_xmote_bh,
711 .go_demote_ok = freeze_go_demote_ok,
712 .go_type = LM_TYPE_NONDISK,
713 .go_flags = GLOF_NONDISK,
716 const struct gfs2_glock_operations gfs2_iopen_glops = {
717 .go_type = LM_TYPE_IOPEN,
718 .go_callback = iopen_go_callback,
719 .go_flags = GLOF_LRU | GLOF_NONDISK,
722 const struct gfs2_glock_operations gfs2_flock_glops = {
723 .go_type = LM_TYPE_FLOCK,
724 .go_flags = GLOF_LRU | GLOF_NONDISK,
727 const struct gfs2_glock_operations gfs2_nondisk_glops = {
728 .go_type = LM_TYPE_NONDISK,
729 .go_flags = GLOF_NONDISK,
730 .go_callback = nondisk_go_callback,
733 const struct gfs2_glock_operations gfs2_quota_glops = {
734 .go_type = LM_TYPE_QUOTA,
735 .go_flags = GLOF_LVB | GLOF_LRU | GLOF_NONDISK,
738 const struct gfs2_glock_operations gfs2_journal_glops = {
739 .go_type = LM_TYPE_JOURNAL,
740 .go_flags = GLOF_NONDISK,
743 const struct gfs2_glock_operations *gfs2_glops_list[] = {
744 [LM_TYPE_META] = &gfs2_meta_glops,
745 [LM_TYPE_INODE] = &gfs2_inode_glops,
746 [LM_TYPE_RGRP] = &gfs2_rgrp_glops,
747 [LM_TYPE_IOPEN] = &gfs2_iopen_glops,
748 [LM_TYPE_FLOCK] = &gfs2_flock_glops,
749 [LM_TYPE_NONDISK] = &gfs2_nondisk_glops,
750 [LM_TYPE_QUOTA] = &gfs2_quota_glops,
751 [LM_TYPE_JOURNAL] = &gfs2_journal_glops,