io-wq: fix double put of 'wq' in error path
[linux-2.6-microblaze.git] / fs / gfs2 / glops.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
4  * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
5  */
6
7 #include <linux/spinlock.h>
8 #include <linux/completion.h>
9 #include <linux/buffer_head.h>
10 #include <linux/gfs2_ondisk.h>
11 #include <linux/bio.h>
12 #include <linux/posix_acl.h>
13 #include <linux/security.h>
14
15 #include "gfs2.h"
16 #include "incore.h"
17 #include "bmap.h"
18 #include "glock.h"
19 #include "glops.h"
20 #include "inode.h"
21 #include "log.h"
22 #include "meta_io.h"
23 #include "recovery.h"
24 #include "rgrp.h"
25 #include "util.h"
26 #include "trans.h"
27 #include "dir.h"
28 #include "lops.h"
29
30 struct workqueue_struct *gfs2_freeze_wq;
31
32 extern struct workqueue_struct *gfs2_control_wq;
33
34 static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh)
35 {
36         fs_err(gl->gl_name.ln_sbd,
37                "AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page "
38                "state 0x%lx\n",
39                bh, (unsigned long long)bh->b_blocknr, bh->b_state,
40                bh->b_page->mapping, bh->b_page->flags);
41         fs_err(gl->gl_name.ln_sbd, "AIL glock %u:%llu mapping %p\n",
42                gl->gl_name.ln_type, gl->gl_name.ln_number,
43                gfs2_glock2aspace(gl));
44         gfs2_lm(gl->gl_name.ln_sbd, "AIL error\n");
45         gfs2_withdraw(gl->gl_name.ln_sbd);
46 }
47
48 /**
49  * __gfs2_ail_flush - remove all buffers for a given lock from the AIL
50  * @gl: the glock
51  * @fsync: set when called from fsync (not all buffers will be clean)
52  *
53  * None of the buffers should be dirty, locked, or pinned.
54  */
55
56 static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync,
57                              unsigned int nr_revokes)
58 {
59         struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
60         struct list_head *head = &gl->gl_ail_list;
61         struct gfs2_bufdata *bd, *tmp;
62         struct buffer_head *bh;
63         const unsigned long b_state = (1UL << BH_Dirty)|(1UL << BH_Pinned)|(1UL << BH_Lock);
64
65         gfs2_log_lock(sdp);
66         spin_lock(&sdp->sd_ail_lock);
67         list_for_each_entry_safe_reverse(bd, tmp, head, bd_ail_gl_list) {
68                 if (nr_revokes == 0)
69                         break;
70                 bh = bd->bd_bh;
71                 if (bh->b_state & b_state) {
72                         if (fsync)
73                                 continue;
74                         gfs2_ail_error(gl, bh);
75                 }
76                 gfs2_trans_add_revoke(sdp, bd);
77                 nr_revokes--;
78         }
79         GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count));
80         spin_unlock(&sdp->sd_ail_lock);
81         gfs2_log_unlock(sdp);
82 }
83
84
85 static int gfs2_ail_empty_gl(struct gfs2_glock *gl)
86 {
87         struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
88         struct gfs2_trans tr;
89         unsigned int revokes;
90         int ret;
91
92         revokes = atomic_read(&gl->gl_ail_count);
93
94         if (!revokes) {
95                 bool have_revokes;
96                 bool log_in_flight;
97
98                 /*
99                  * We have nothing on the ail, but there could be revokes on
100                  * the sdp revoke queue, in which case, we still want to flush
101                  * the log and wait for it to finish.
102                  *
103                  * If the sdp revoke list is empty too, we might still have an
104                  * io outstanding for writing revokes, so we should wait for
105                  * it before returning.
106                  *
107                  * If none of these conditions are true, our revokes are all
108                  * flushed and we can return.
109                  */
110                 gfs2_log_lock(sdp);
111                 have_revokes = !list_empty(&sdp->sd_log_revokes);
112                 log_in_flight = atomic_read(&sdp->sd_log_in_flight);
113                 gfs2_log_unlock(sdp);
114                 if (have_revokes)
115                         goto flush;
116                 if (log_in_flight)
117                         log_flush_wait(sdp);
118                 return 0;
119         }
120
121         memset(&tr, 0, sizeof(tr));
122         set_bit(TR_ONSTACK, &tr.tr_flags);
123         ret = __gfs2_trans_begin(&tr, sdp, 0, revokes, _RET_IP_);
124         if (ret)
125                 goto flush;
126         __gfs2_ail_flush(gl, 0, revokes);
127         gfs2_trans_end(sdp);
128
129 flush:
130         gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
131                        GFS2_LFC_AIL_EMPTY_GL);
132         return 0;
133 }
134
135 void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
136 {
137         struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
138         unsigned int revokes = atomic_read(&gl->gl_ail_count);
139         int ret;
140
141         if (!revokes)
142                 return;
143
144         ret = gfs2_trans_begin(sdp, 0, revokes);
145         if (ret)
146                 return;
147         __gfs2_ail_flush(gl, fsync, revokes);
148         gfs2_trans_end(sdp);
149         gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
150                        GFS2_LFC_AIL_FLUSH);
151 }
152
153 /**
154  * gfs2_rgrp_metasync - sync out the metadata of a resource group
155  * @gl: the glock protecting the resource group
156  *
157  */
158
159 static int gfs2_rgrp_metasync(struct gfs2_glock *gl)
160 {
161         struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
162         struct address_space *metamapping = &sdp->sd_aspace;
163         struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
164         const unsigned bsize = sdp->sd_sb.sb_bsize;
165         loff_t start = (rgd->rd_addr * bsize) & PAGE_MASK;
166         loff_t end = PAGE_ALIGN((rgd->rd_addr + rgd->rd_length) * bsize) - 1;
167         int error;
168
169         filemap_fdatawrite_range(metamapping, start, end);
170         error = filemap_fdatawait_range(metamapping, start, end);
171         WARN_ON_ONCE(error && !gfs2_withdrawn(sdp));
172         mapping_set_error(metamapping, error);
173         if (error)
174                 gfs2_io_error(sdp);
175         return error;
176 }
177
178 /**
179  * rgrp_go_sync - sync out the metadata for this glock
180  * @gl: the glock
181  *
182  * Called when demoting or unlocking an EX glock.  We must flush
183  * to disk all dirty buffers/pages relating to this glock, and must not
184  * return to caller to demote/unlock the glock until I/O is complete.
185  */
186
187 static int rgrp_go_sync(struct gfs2_glock *gl)
188 {
189         struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
190         struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
191         int error;
192
193         if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
194                 return 0;
195         GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
196
197         gfs2_log_flush(sdp, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
198                        GFS2_LFC_RGRP_GO_SYNC);
199         error = gfs2_rgrp_metasync(gl);
200         if (!error)
201                 error = gfs2_ail_empty_gl(gl);
202         gfs2_free_clones(rgd);
203         return error;
204 }
205
206 /**
207  * rgrp_go_inval - invalidate the metadata for this glock
208  * @gl: the glock
209  * @flags:
210  *
211  * We never used LM_ST_DEFERRED with resource groups, so that we
212  * should always see the metadata flag set here.
213  *
214  */
215
216 static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
217 {
218         struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
219         struct address_space *mapping = &sdp->sd_aspace;
220         struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
221         const unsigned bsize = sdp->sd_sb.sb_bsize;
222         loff_t start = (rgd->rd_addr * bsize) & PAGE_MASK;
223         loff_t end = PAGE_ALIGN((rgd->rd_addr + rgd->rd_length) * bsize) - 1;
224
225         gfs2_rgrp_brelse(rgd);
226         WARN_ON_ONCE(!(flags & DIO_METADATA));
227         truncate_inode_pages_range(mapping, start, end);
228         rgd->rd_flags &= ~GFS2_RDF_UPTODATE;
229 }
230
231 static void gfs2_rgrp_go_dump(struct seq_file *seq, struct gfs2_glock *gl,
232                               const char *fs_id_buf)
233 {
234         struct gfs2_rgrpd *rgd = gl->gl_object;
235
236         if (rgd)
237                 gfs2_rgrp_dump(seq, rgd, fs_id_buf);
238 }
239
240 static struct gfs2_inode *gfs2_glock2inode(struct gfs2_glock *gl)
241 {
242         struct gfs2_inode *ip;
243
244         spin_lock(&gl->gl_lockref.lock);
245         ip = gl->gl_object;
246         if (ip)
247                 set_bit(GIF_GLOP_PENDING, &ip->i_flags);
248         spin_unlock(&gl->gl_lockref.lock);
249         return ip;
250 }
251
252 struct gfs2_rgrpd *gfs2_glock2rgrp(struct gfs2_glock *gl)
253 {
254         struct gfs2_rgrpd *rgd;
255
256         spin_lock(&gl->gl_lockref.lock);
257         rgd = gl->gl_object;
258         spin_unlock(&gl->gl_lockref.lock);
259
260         return rgd;
261 }
262
263 static void gfs2_clear_glop_pending(struct gfs2_inode *ip)
264 {
265         if (!ip)
266                 return;
267
268         clear_bit_unlock(GIF_GLOP_PENDING, &ip->i_flags);
269         wake_up_bit(&ip->i_flags, GIF_GLOP_PENDING);
270 }
271
272 /**
273  * gfs2_inode_metasync - sync out the metadata of an inode
274  * @gl: the glock protecting the inode
275  *
276  */
277 int gfs2_inode_metasync(struct gfs2_glock *gl)
278 {
279         struct address_space *metamapping = gfs2_glock2aspace(gl);
280         int error;
281
282         filemap_fdatawrite(metamapping);
283         error = filemap_fdatawait(metamapping);
284         if (error)
285                 gfs2_io_error(gl->gl_name.ln_sbd);
286         return error;
287 }
288
289 /**
290  * inode_go_sync - Sync the dirty metadata of an inode
291  * @gl: the glock protecting the inode
292  *
293  */
294
295 static int inode_go_sync(struct gfs2_glock *gl)
296 {
297         struct gfs2_inode *ip = gfs2_glock2inode(gl);
298         int isreg = ip && S_ISREG(ip->i_inode.i_mode);
299         struct address_space *metamapping = gfs2_glock2aspace(gl);
300         int error = 0, ret;
301
302         if (isreg) {
303                 if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
304                         unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0);
305                 inode_dio_wait(&ip->i_inode);
306         }
307         if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
308                 goto out;
309
310         GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
311
312         gfs2_log_flush(gl->gl_name.ln_sbd, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
313                        GFS2_LFC_INODE_GO_SYNC);
314         filemap_fdatawrite(metamapping);
315         if (isreg) {
316                 struct address_space *mapping = ip->i_inode.i_mapping;
317                 filemap_fdatawrite(mapping);
318                 error = filemap_fdatawait(mapping);
319                 mapping_set_error(mapping, error);
320         }
321         ret = gfs2_inode_metasync(gl);
322         if (!error)
323                 error = ret;
324         gfs2_ail_empty_gl(gl);
325         /*
326          * Writeback of the data mapping may cause the dirty flag to be set
327          * so we have to clear it again here.
328          */
329         smp_mb__before_atomic();
330         clear_bit(GLF_DIRTY, &gl->gl_flags);
331
332 out:
333         gfs2_clear_glop_pending(ip);
334         return error;
335 }
336
337 /**
338  * inode_go_inval - prepare a inode glock to be released
339  * @gl: the glock
340  * @flags:
341  *
342  * Normally we invalidate everything, but if we are moving into
343  * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we
344  * can keep hold of the metadata, since it won't have changed.
345  *
346  */
347
348 static void inode_go_inval(struct gfs2_glock *gl, int flags)
349 {
350         struct gfs2_inode *ip = gfs2_glock2inode(gl);
351
352         if (flags & DIO_METADATA) {
353                 struct address_space *mapping = gfs2_glock2aspace(gl);
354                 truncate_inode_pages(mapping, 0);
355                 if (ip) {
356                         set_bit(GIF_INVALID, &ip->i_flags);
357                         forget_all_cached_acls(&ip->i_inode);
358                         security_inode_invalidate_secctx(&ip->i_inode);
359                         gfs2_dir_hash_inval(ip);
360                 }
361         }
362
363         if (ip == GFS2_I(gl->gl_name.ln_sbd->sd_rindex)) {
364                 gfs2_log_flush(gl->gl_name.ln_sbd, NULL,
365                                GFS2_LOG_HEAD_FLUSH_NORMAL |
366                                GFS2_LFC_INODE_GO_INVAL);
367                 gl->gl_name.ln_sbd->sd_rindex_uptodate = 0;
368         }
369         if (ip && S_ISREG(ip->i_inode.i_mode))
370                 truncate_inode_pages(ip->i_inode.i_mapping, 0);
371
372         gfs2_clear_glop_pending(ip);
373 }
374
375 /**
376  * inode_go_demote_ok - Check to see if it's ok to unlock an inode glock
377  * @gl: the glock
378  *
379  * Returns: 1 if it's ok
380  */
381
382 static int inode_go_demote_ok(const struct gfs2_glock *gl)
383 {
384         struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
385
386         if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object)
387                 return 0;
388
389         return 1;
390 }
391
392 static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
393 {
394         const struct gfs2_dinode *str = buf;
395         struct timespec64 atime;
396         u16 height, depth;
397
398         if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr)))
399                 goto corrupt;
400         ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino);
401         ip->i_inode.i_mode = be32_to_cpu(str->di_mode);
402         ip->i_inode.i_rdev = 0;
403         switch (ip->i_inode.i_mode & S_IFMT) {
404         case S_IFBLK:
405         case S_IFCHR:
406                 ip->i_inode.i_rdev = MKDEV(be32_to_cpu(str->di_major),
407                                            be32_to_cpu(str->di_minor));
408                 break;
409         }
410
411         i_uid_write(&ip->i_inode, be32_to_cpu(str->di_uid));
412         i_gid_write(&ip->i_inode, be32_to_cpu(str->di_gid));
413         set_nlink(&ip->i_inode, be32_to_cpu(str->di_nlink));
414         i_size_write(&ip->i_inode, be64_to_cpu(str->di_size));
415         gfs2_set_inode_blocks(&ip->i_inode, be64_to_cpu(str->di_blocks));
416         atime.tv_sec = be64_to_cpu(str->di_atime);
417         atime.tv_nsec = be32_to_cpu(str->di_atime_nsec);
418         if (timespec64_compare(&ip->i_inode.i_atime, &atime) < 0)
419                 ip->i_inode.i_atime = atime;
420         ip->i_inode.i_mtime.tv_sec = be64_to_cpu(str->di_mtime);
421         ip->i_inode.i_mtime.tv_nsec = be32_to_cpu(str->di_mtime_nsec);
422         ip->i_inode.i_ctime.tv_sec = be64_to_cpu(str->di_ctime);
423         ip->i_inode.i_ctime.tv_nsec = be32_to_cpu(str->di_ctime_nsec);
424
425         ip->i_goal = be64_to_cpu(str->di_goal_meta);
426         ip->i_generation = be64_to_cpu(str->di_generation);
427
428         ip->i_diskflags = be32_to_cpu(str->di_flags);
429         ip->i_eattr = be64_to_cpu(str->di_eattr);
430         /* i_diskflags and i_eattr must be set before gfs2_set_inode_flags() */
431         gfs2_set_inode_flags(&ip->i_inode);
432         height = be16_to_cpu(str->di_height);
433         if (unlikely(height > GFS2_MAX_META_HEIGHT))
434                 goto corrupt;
435         ip->i_height = (u8)height;
436
437         depth = be16_to_cpu(str->di_depth);
438         if (unlikely(depth > GFS2_DIR_MAX_DEPTH))
439                 goto corrupt;
440         ip->i_depth = (u8)depth;
441         ip->i_entries = be32_to_cpu(str->di_entries);
442
443         if (S_ISREG(ip->i_inode.i_mode))
444                 gfs2_set_aops(&ip->i_inode);
445
446         return 0;
447 corrupt:
448         gfs2_consist_inode(ip);
449         return -EIO;
450 }
451
452 /**
453  * gfs2_inode_refresh - Refresh the incore copy of the dinode
454  * @ip: The GFS2 inode
455  *
456  * Returns: errno
457  */
458
459 int gfs2_inode_refresh(struct gfs2_inode *ip)
460 {
461         struct buffer_head *dibh;
462         int error;
463
464         error = gfs2_meta_inode_buffer(ip, &dibh);
465         if (error)
466                 return error;
467
468         error = gfs2_dinode_in(ip, dibh->b_data);
469         brelse(dibh);
470         clear_bit(GIF_INVALID, &ip->i_flags);
471
472         return error;
473 }
474
475 /**
476  * inode_go_lock - operation done after an inode lock is locked by a process
477  * @gl: the glock
478  * @flags:
479  *
480  * Returns: errno
481  */
482
483 static int inode_go_lock(struct gfs2_holder *gh)
484 {
485         struct gfs2_glock *gl = gh->gh_gl;
486         struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
487         struct gfs2_inode *ip = gl->gl_object;
488         int error = 0;
489
490         if (!ip || (gh->gh_flags & GL_SKIP))
491                 return 0;
492
493         if (test_bit(GIF_INVALID, &ip->i_flags)) {
494                 error = gfs2_inode_refresh(ip);
495                 if (error)
496                         return error;
497         }
498
499         if (gh->gh_state != LM_ST_DEFERRED)
500                 inode_dio_wait(&ip->i_inode);
501
502         if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) &&
503             (gl->gl_state == LM_ST_EXCLUSIVE) &&
504             (gh->gh_state == LM_ST_EXCLUSIVE)) {
505                 spin_lock(&sdp->sd_trunc_lock);
506                 if (list_empty(&ip->i_trunc_list))
507                         list_add(&ip->i_trunc_list, &sdp->sd_trunc_list);
508                 spin_unlock(&sdp->sd_trunc_lock);
509                 wake_up(&sdp->sd_quota_wait);
510                 return 1;
511         }
512
513         return error;
514 }
515
516 /**
517  * inode_go_dump - print information about an inode
518  * @seq: The iterator
519  * @ip: the inode
520  * @fs_id_buf: file system id (may be empty)
521  *
522  */
523
524 static void inode_go_dump(struct seq_file *seq, struct gfs2_glock *gl,
525                           const char *fs_id_buf)
526 {
527         struct gfs2_inode *ip = gl->gl_object;
528         struct inode *inode = &ip->i_inode;
529         unsigned long nrpages;
530
531         if (ip == NULL)
532                 return;
533
534         xa_lock_irq(&inode->i_data.i_pages);
535         nrpages = inode->i_data.nrpages;
536         xa_unlock_irq(&inode->i_data.i_pages);
537
538         gfs2_print_dbg(seq, "%s I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu "
539                        "p:%lu\n", fs_id_buf,
540                   (unsigned long long)ip->i_no_formal_ino,
541                   (unsigned long long)ip->i_no_addr,
542                   IF2DT(ip->i_inode.i_mode), ip->i_flags,
543                   (unsigned int)ip->i_diskflags,
544                   (unsigned long long)i_size_read(inode), nrpages);
545 }
546
547 /**
548  * freeze_go_sync - promote/demote the freeze glock
549  * @gl: the glock
550  * @state: the requested state
551  * @flags:
552  *
553  */
554
555 static int freeze_go_sync(struct gfs2_glock *gl)
556 {
557         int error = 0;
558         struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
559
560         /*
561          * We need to check gl_state == LM_ST_SHARED here and not gl_req ==
562          * LM_ST_EXCLUSIVE. That's because when any node does a freeze,
563          * all the nodes should have the freeze glock in SH mode and they all
564          * call do_xmote: One for EX and the others for UN. They ALL must
565          * freeze locally, and they ALL must queue freeze work. The freeze_work
566          * calls freeze_func, which tries to reacquire the freeze glock in SH,
567          * effectively waiting for the thaw on the node who holds it in EX.
568          * Once thawed, the work func acquires the freeze glock in
569          * SH and everybody goes back to thawed.
570          */
571         if (gl->gl_state == LM_ST_SHARED && !gfs2_withdrawn(sdp) &&
572             !test_bit(SDF_NORECOVERY, &sdp->sd_flags)) {
573                 atomic_set(&sdp->sd_freeze_state, SFS_STARTING_FREEZE);
574                 error = freeze_super(sdp->sd_vfs);
575                 if (error) {
576                         fs_info(sdp, "GFS2: couldn't freeze filesystem: %d\n",
577                                 error);
578                         if (gfs2_withdrawn(sdp)) {
579                                 atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN);
580                                 return 0;
581                         }
582                         gfs2_assert_withdraw(sdp, 0);
583                 }
584                 queue_work(gfs2_freeze_wq, &sdp->sd_freeze_work);
585                 if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
586                         gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_FREEZE |
587                                        GFS2_LFC_FREEZE_GO_SYNC);
588                 else /* read-only mounts */
589                         atomic_set(&sdp->sd_freeze_state, SFS_FROZEN);
590         }
591         return 0;
592 }
593
594 /**
595  * freeze_go_xmote_bh - After promoting/demoting the freeze glock
596  * @gl: the glock
597  *
598  */
599
600 static int freeze_go_xmote_bh(struct gfs2_glock *gl, struct gfs2_holder *gh)
601 {
602         struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
603         struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
604         struct gfs2_glock *j_gl = ip->i_gl;
605         struct gfs2_log_header_host head;
606         int error;
607
608         if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
609                 j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
610
611                 error = gfs2_find_jhead(sdp->sd_jdesc, &head, false);
612                 if (error)
613                         gfs2_consist(sdp);
614                 if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT))
615                         gfs2_consist(sdp);
616
617                 /*  Initialize some head of the log stuff  */
618                 if (!gfs2_withdrawn(sdp)) {
619                         sdp->sd_log_sequence = head.lh_sequence + 1;
620                         gfs2_log_pointers_init(sdp, head.lh_blkno);
621                 }
622         }
623         return 0;
624 }
625
626 /**
627  * trans_go_demote_ok
628  * @gl: the glock
629  *
630  * Always returns 0
631  */
632
633 static int freeze_go_demote_ok(const struct gfs2_glock *gl)
634 {
635         return 0;
636 }
637
638 /**
639  * iopen_go_callback - schedule the dcache entry for the inode to be deleted
640  * @gl: the glock
641  *
642  * gl_lockref.lock lock is held while calling this
643  */
644 static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
645 {
646         struct gfs2_inode *ip = gl->gl_object;
647         struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
648
649         if (!remote || sb_rdonly(sdp->sd_vfs))
650                 return;
651
652         if (gl->gl_demote_state == LM_ST_UNLOCKED &&
653             gl->gl_state == LM_ST_SHARED && ip) {
654                 gl->gl_lockref.count++;
655                 if (!queue_delayed_work(gfs2_delete_workqueue,
656                                         &gl->gl_delete, 0))
657                         gl->gl_lockref.count--;
658         }
659 }
660
661 static int iopen_go_demote_ok(const struct gfs2_glock *gl)
662 {
663        return !gfs2_delete_work_queued(gl);
664 }
665
666 /**
667  * inode_go_free - wake up anyone waiting for dlm's unlock ast to free it
668  * @gl: glock being freed
669  *
670  * For now, this is only used for the journal inode glock. In withdraw
671  * situations, we need to wait for the glock to be freed so that we know
672  * other nodes may proceed with recovery / journal replay.
673  */
674 static void inode_go_free(struct gfs2_glock *gl)
675 {
676         /* Note that we cannot reference gl_object because it's already set
677          * to NULL by this point in its lifecycle. */
678         if (!test_bit(GLF_FREEING, &gl->gl_flags))
679                 return;
680         clear_bit_unlock(GLF_FREEING, &gl->gl_flags);
681         wake_up_bit(&gl->gl_flags, GLF_FREEING);
682 }
683
684 /**
685  * nondisk_go_callback - used to signal when a node did a withdraw
686  * @gl: the nondisk glock
687  * @remote: true if this came from a different cluster node
688  *
689  */
690 static void nondisk_go_callback(struct gfs2_glock *gl, bool remote)
691 {
692         struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
693
694         /* Ignore the callback unless it's from another node, and it's the
695            live lock. */
696         if (!remote || gl->gl_name.ln_number != GFS2_LIVE_LOCK)
697                 return;
698
699         /* First order of business is to cancel the demote request. We don't
700          * really want to demote a nondisk glock. At best it's just to inform
701          * us of another node's withdraw. We'll keep it in SH mode. */
702         clear_bit(GLF_DEMOTE, &gl->gl_flags);
703         clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
704
705         /* Ignore the unlock if we're withdrawn, unmounting, or in recovery. */
706         if (test_bit(SDF_NORECOVERY, &sdp->sd_flags) ||
707             test_bit(SDF_WITHDRAWN, &sdp->sd_flags) ||
708             test_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags))
709                 return;
710
711         /* We only care when a node wants us to unlock, because that means
712          * they want a journal recovered. */
713         if (gl->gl_demote_state != LM_ST_UNLOCKED)
714                 return;
715
716         if (sdp->sd_args.ar_spectator) {
717                 fs_warn(sdp, "Spectator node cannot recover journals.\n");
718                 return;
719         }
720
721         fs_warn(sdp, "Some node has withdrawn; checking for recovery.\n");
722         set_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags);
723         /*
724          * We can't call remote_withdraw directly here or gfs2_recover_journal
725          * because this is called from the glock unlock function and the
726          * remote_withdraw needs to enqueue and dequeue the same "live" glock
727          * we were called from. So we queue it to the control work queue in
728          * lock_dlm.
729          */
730         queue_delayed_work(gfs2_control_wq, &sdp->sd_control_work, 0);
731 }
732
733 const struct gfs2_glock_operations gfs2_meta_glops = {
734         .go_type = LM_TYPE_META,
735         .go_flags = GLOF_NONDISK,
736 };
737
738 const struct gfs2_glock_operations gfs2_inode_glops = {
739         .go_sync = inode_go_sync,
740         .go_inval = inode_go_inval,
741         .go_demote_ok = inode_go_demote_ok,
742         .go_lock = inode_go_lock,
743         .go_dump = inode_go_dump,
744         .go_type = LM_TYPE_INODE,
745         .go_flags = GLOF_ASPACE | GLOF_LRU | GLOF_LVB,
746         .go_free = inode_go_free,
747 };
748
749 const struct gfs2_glock_operations gfs2_rgrp_glops = {
750         .go_sync = rgrp_go_sync,
751         .go_inval = rgrp_go_inval,
752         .go_lock = gfs2_rgrp_go_lock,
753         .go_dump = gfs2_rgrp_go_dump,
754         .go_type = LM_TYPE_RGRP,
755         .go_flags = GLOF_LVB,
756 };
757
758 const struct gfs2_glock_operations gfs2_freeze_glops = {
759         .go_sync = freeze_go_sync,
760         .go_xmote_bh = freeze_go_xmote_bh,
761         .go_demote_ok = freeze_go_demote_ok,
762         .go_type = LM_TYPE_NONDISK,
763         .go_flags = GLOF_NONDISK,
764 };
765
766 const struct gfs2_glock_operations gfs2_iopen_glops = {
767         .go_type = LM_TYPE_IOPEN,
768         .go_callback = iopen_go_callback,
769         .go_demote_ok = iopen_go_demote_ok,
770         .go_flags = GLOF_LRU | GLOF_NONDISK,
771         .go_subclass = 1,
772 };
773
774 const struct gfs2_glock_operations gfs2_flock_glops = {
775         .go_type = LM_TYPE_FLOCK,
776         .go_flags = GLOF_LRU | GLOF_NONDISK,
777 };
778
779 const struct gfs2_glock_operations gfs2_nondisk_glops = {
780         .go_type = LM_TYPE_NONDISK,
781         .go_flags = GLOF_NONDISK,
782         .go_callback = nondisk_go_callback,
783 };
784
785 const struct gfs2_glock_operations gfs2_quota_glops = {
786         .go_type = LM_TYPE_QUOTA,
787         .go_flags = GLOF_LVB | GLOF_LRU | GLOF_NONDISK,
788 };
789
790 const struct gfs2_glock_operations gfs2_journal_glops = {
791         .go_type = LM_TYPE_JOURNAL,
792         .go_flags = GLOF_NONDISK,
793 };
794
795 const struct gfs2_glock_operations *gfs2_glops_list[] = {
796         [LM_TYPE_META] = &gfs2_meta_glops,
797         [LM_TYPE_INODE] = &gfs2_inode_glops,
798         [LM_TYPE_RGRP] = &gfs2_rgrp_glops,
799         [LM_TYPE_IOPEN] = &gfs2_iopen_glops,
800         [LM_TYPE_FLOCK] = &gfs2_flock_glops,
801         [LM_TYPE_NONDISK] = &gfs2_nondisk_glops,
802         [LM_TYPE_QUOTA] = &gfs2_quota_glops,
803         [LM_TYPE_JOURNAL] = &gfs2_journal_glops,
804 };
805