Merge branch 'xfs-consolidate-format-defs' into for-next
[linux-2.6-microblaze.git] / fs / xfs / xfs_log.c
1 /*
2  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_shared.h"
21 #include "xfs_format.h"
22 #include "xfs_log_format.h"
23 #include "xfs_trans_resv.h"
24 #include "xfs_mount.h"
25 #include "xfs_error.h"
26 #include "xfs_trans.h"
27 #include "xfs_trans_priv.h"
28 #include "xfs_log.h"
29 #include "xfs_log_priv.h"
30 #include "xfs_log_recover.h"
31 #include "xfs_inode.h"
32 #include "xfs_trace.h"
33 #include "xfs_fsops.h"
34 #include "xfs_cksum.h"
35 #include "xfs_sysfs.h"
36
37 kmem_zone_t     *xfs_log_ticket_zone;
38
39 /* Local miscellaneous function prototypes */
40 STATIC int
41 xlog_commit_record(
42         struct xlog             *log,
43         struct xlog_ticket      *ticket,
44         struct xlog_in_core     **iclog,
45         xfs_lsn_t               *commitlsnp);
46
47 STATIC struct xlog *
48 xlog_alloc_log(
49         struct xfs_mount        *mp,
50         struct xfs_buftarg      *log_target,
51         xfs_daddr_t             blk_offset,
52         int                     num_bblks);
53 STATIC int
54 xlog_space_left(
55         struct xlog             *log,
56         atomic64_t              *head);
57 STATIC int
58 xlog_sync(
59         struct xlog             *log,
60         struct xlog_in_core     *iclog);
61 STATIC void
62 xlog_dealloc_log(
63         struct xlog             *log);
64
65 /* local state machine functions */
66 STATIC void xlog_state_done_syncing(xlog_in_core_t *iclog, int);
67 STATIC void
68 xlog_state_do_callback(
69         struct xlog             *log,
70         int                     aborted,
71         struct xlog_in_core     *iclog);
72 STATIC int
73 xlog_state_get_iclog_space(
74         struct xlog             *log,
75         int                     len,
76         struct xlog_in_core     **iclog,
77         struct xlog_ticket      *ticket,
78         int                     *continued_write,
79         int                     *logoffsetp);
80 STATIC int
81 xlog_state_release_iclog(
82         struct xlog             *log,
83         struct xlog_in_core     *iclog);
84 STATIC void
85 xlog_state_switch_iclogs(
86         struct xlog             *log,
87         struct xlog_in_core     *iclog,
88         int                     eventual_size);
89 STATIC void
90 xlog_state_want_sync(
91         struct xlog             *log,
92         struct xlog_in_core     *iclog);
93
94 STATIC void
95 xlog_grant_push_ail(
96         struct xlog             *log,
97         int                     need_bytes);
98 STATIC void
99 xlog_regrant_reserve_log_space(
100         struct xlog             *log,
101         struct xlog_ticket      *ticket);
102 STATIC void
103 xlog_ungrant_log_space(
104         struct xlog             *log,
105         struct xlog_ticket      *ticket);
106
107 #if defined(DEBUG)
108 STATIC void
109 xlog_verify_dest_ptr(
110         struct xlog             *log,
111         char                    *ptr);
112 STATIC void
113 xlog_verify_grant_tail(
114         struct xlog *log);
115 STATIC void
116 xlog_verify_iclog(
117         struct xlog             *log,
118         struct xlog_in_core     *iclog,
119         int                     count,
120         bool                    syncing);
121 STATIC void
122 xlog_verify_tail_lsn(
123         struct xlog             *log,
124         struct xlog_in_core     *iclog,
125         xfs_lsn_t               tail_lsn);
126 #else
127 #define xlog_verify_dest_ptr(a,b)
128 #define xlog_verify_grant_tail(a)
129 #define xlog_verify_iclog(a,b,c,d)
130 #define xlog_verify_tail_lsn(a,b,c)
131 #endif
132
133 STATIC int
134 xlog_iclogs_empty(
135         struct xlog             *log);
136
137 static void
138 xlog_grant_sub_space(
139         struct xlog             *log,
140         atomic64_t              *head,
141         int                     bytes)
142 {
143         int64_t head_val = atomic64_read(head);
144         int64_t new, old;
145
146         do {
147                 int     cycle, space;
148
149                 xlog_crack_grant_head_val(head_val, &cycle, &space);
150
151                 space -= bytes;
152                 if (space < 0) {
153                         space += log->l_logsize;
154                         cycle--;
155                 }
156
157                 old = head_val;
158                 new = xlog_assign_grant_head_val(cycle, space);
159                 head_val = atomic64_cmpxchg(head, old, new);
160         } while (head_val != old);
161 }
162
163 static void
164 xlog_grant_add_space(
165         struct xlog             *log,
166         atomic64_t              *head,
167         int                     bytes)
168 {
169         int64_t head_val = atomic64_read(head);
170         int64_t new, old;
171
172         do {
173                 int             tmp;
174                 int             cycle, space;
175
176                 xlog_crack_grant_head_val(head_val, &cycle, &space);
177
178                 tmp = log->l_logsize - space;
179                 if (tmp > bytes)
180                         space += bytes;
181                 else {
182                         space = bytes - tmp;
183                         cycle++;
184                 }
185
186                 old = head_val;
187                 new = xlog_assign_grant_head_val(cycle, space);
188                 head_val = atomic64_cmpxchg(head, old, new);
189         } while (head_val != old);
190 }
191
192 STATIC void
193 xlog_grant_head_init(
194         struct xlog_grant_head  *head)
195 {
196         xlog_assign_grant_head(&head->grant, 1, 0);
197         INIT_LIST_HEAD(&head->waiters);
198         spin_lock_init(&head->lock);
199 }
200
201 STATIC void
202 xlog_grant_head_wake_all(
203         struct xlog_grant_head  *head)
204 {
205         struct xlog_ticket      *tic;
206
207         spin_lock(&head->lock);
208         list_for_each_entry(tic, &head->waiters, t_queue)
209                 wake_up_process(tic->t_task);
210         spin_unlock(&head->lock);
211 }
212
213 static inline int
214 xlog_ticket_reservation(
215         struct xlog             *log,
216         struct xlog_grant_head  *head,
217         struct xlog_ticket      *tic)
218 {
219         if (head == &log->l_write_head) {
220                 ASSERT(tic->t_flags & XLOG_TIC_PERM_RESERV);
221                 return tic->t_unit_res;
222         } else {
223                 if (tic->t_flags & XLOG_TIC_PERM_RESERV)
224                         return tic->t_unit_res * tic->t_cnt;
225                 else
226                         return tic->t_unit_res;
227         }
228 }
229
230 STATIC bool
231 xlog_grant_head_wake(
232         struct xlog             *log,
233         struct xlog_grant_head  *head,
234         int                     *free_bytes)
235 {
236         struct xlog_ticket      *tic;
237         int                     need_bytes;
238
239         list_for_each_entry(tic, &head->waiters, t_queue) {
240                 need_bytes = xlog_ticket_reservation(log, head, tic);
241                 if (*free_bytes < need_bytes)
242                         return false;
243
244                 *free_bytes -= need_bytes;
245                 trace_xfs_log_grant_wake_up(log, tic);
246                 wake_up_process(tic->t_task);
247         }
248
249         return true;
250 }
251
252 STATIC int
253 xlog_grant_head_wait(
254         struct xlog             *log,
255         struct xlog_grant_head  *head,
256         struct xlog_ticket      *tic,
257         int                     need_bytes) __releases(&head->lock)
258                                             __acquires(&head->lock)
259 {
260         list_add_tail(&tic->t_queue, &head->waiters);
261
262         do {
263                 if (XLOG_FORCED_SHUTDOWN(log))
264                         goto shutdown;
265                 xlog_grant_push_ail(log, need_bytes);
266
267                 __set_current_state(TASK_UNINTERRUPTIBLE);
268                 spin_unlock(&head->lock);
269
270                 XFS_STATS_INC(xs_sleep_logspace);
271
272                 trace_xfs_log_grant_sleep(log, tic);
273                 schedule();
274                 trace_xfs_log_grant_wake(log, tic);
275
276                 spin_lock(&head->lock);
277                 if (XLOG_FORCED_SHUTDOWN(log))
278                         goto shutdown;
279         } while (xlog_space_left(log, &head->grant) < need_bytes);
280
281         list_del_init(&tic->t_queue);
282         return 0;
283 shutdown:
284         list_del_init(&tic->t_queue);
285         return -EIO;
286 }
287
288 /*
289  * Atomically get the log space required for a log ticket.
290  *
291  * Once a ticket gets put onto head->waiters, it will only return after the
292  * needed reservation is satisfied.
293  *
294  * This function is structured so that it has a lock free fast path. This is
295  * necessary because every new transaction reservation will come through this
296  * path. Hence any lock will be globally hot if we take it unconditionally on
297  * every pass.
298  *
299  * As tickets are only ever moved on and off head->waiters under head->lock, we
300  * only need to take that lock if we are going to add the ticket to the queue
301  * and sleep. We can avoid taking the lock if the ticket was never added to
302  * head->waiters because the t_queue list head will be empty and we hold the
303  * only reference to it so it can safely be checked unlocked.
304  */
305 STATIC int
306 xlog_grant_head_check(
307         struct xlog             *log,
308         struct xlog_grant_head  *head,
309         struct xlog_ticket      *tic,
310         int                     *need_bytes)
311 {
312         int                     free_bytes;
313         int                     error = 0;
314
315         ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY));
316
317         /*
318          * If there are other waiters on the queue then give them a chance at
319          * logspace before us.  Wake up the first waiters, if we do not wake
320          * up all the waiters then go to sleep waiting for more free space,
321          * otherwise try to get some space for this transaction.
322          */
323         *need_bytes = xlog_ticket_reservation(log, head, tic);
324         free_bytes = xlog_space_left(log, &head->grant);
325         if (!list_empty_careful(&head->waiters)) {
326                 spin_lock(&head->lock);
327                 if (!xlog_grant_head_wake(log, head, &free_bytes) ||
328                     free_bytes < *need_bytes) {
329                         error = xlog_grant_head_wait(log, head, tic,
330                                                      *need_bytes);
331                 }
332                 spin_unlock(&head->lock);
333         } else if (free_bytes < *need_bytes) {
334                 spin_lock(&head->lock);
335                 error = xlog_grant_head_wait(log, head, tic, *need_bytes);
336                 spin_unlock(&head->lock);
337         }
338
339         return error;
340 }
341
342 static void
343 xlog_tic_reset_res(xlog_ticket_t *tic)
344 {
345         tic->t_res_num = 0;
346         tic->t_res_arr_sum = 0;
347         tic->t_res_num_ophdrs = 0;
348 }
349
350 static void
351 xlog_tic_add_region(xlog_ticket_t *tic, uint len, uint type)
352 {
353         if (tic->t_res_num == XLOG_TIC_LEN_MAX) {
354                 /* add to overflow and start again */
355                 tic->t_res_o_flow += tic->t_res_arr_sum;
356                 tic->t_res_num = 0;
357                 tic->t_res_arr_sum = 0;
358         }
359
360         tic->t_res_arr[tic->t_res_num].r_len = len;
361         tic->t_res_arr[tic->t_res_num].r_type = type;
362         tic->t_res_arr_sum += len;
363         tic->t_res_num++;
364 }
365
366 /*
367  * Replenish the byte reservation required by moving the grant write head.
368  */
369 int
370 xfs_log_regrant(
371         struct xfs_mount        *mp,
372         struct xlog_ticket      *tic)
373 {
374         struct xlog             *log = mp->m_log;
375         int                     need_bytes;
376         int                     error = 0;
377
378         if (XLOG_FORCED_SHUTDOWN(log))
379                 return -EIO;
380
381         XFS_STATS_INC(xs_try_logspace);
382
383         /*
384          * This is a new transaction on the ticket, so we need to change the
385          * transaction ID so that the next transaction has a different TID in
386          * the log. Just add one to the existing tid so that we can see chains
387          * of rolling transactions in the log easily.
388          */
389         tic->t_tid++;
390
391         xlog_grant_push_ail(log, tic->t_unit_res);
392
393         tic->t_curr_res = tic->t_unit_res;
394         xlog_tic_reset_res(tic);
395
396         if (tic->t_cnt > 0)
397                 return 0;
398
399         trace_xfs_log_regrant(log, tic);
400
401         error = xlog_grant_head_check(log, &log->l_write_head, tic,
402                                       &need_bytes);
403         if (error)
404                 goto out_error;
405
406         xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes);
407         trace_xfs_log_regrant_exit(log, tic);
408         xlog_verify_grant_tail(log);
409         return 0;
410
411 out_error:
412         /*
413          * If we are failing, make sure the ticket doesn't have any current
414          * reservations.  We don't want to add this back when the ticket/
415          * transaction gets cancelled.
416          */
417         tic->t_curr_res = 0;
418         tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */
419         return error;
420 }
421
422 /*
423  * Reserve log space and return a ticket corresponding the reservation.
424  *
425  * Each reservation is going to reserve extra space for a log record header.
426  * When writes happen to the on-disk log, we don't subtract the length of the
427  * log record header from any reservation.  By wasting space in each
428  * reservation, we prevent over allocation problems.
429  */
430 int
431 xfs_log_reserve(
432         struct xfs_mount        *mp,
433         int                     unit_bytes,
434         int                     cnt,
435         struct xlog_ticket      **ticp,
436         __uint8_t               client,
437         bool                    permanent,
438         uint                    t_type)
439 {
440         struct xlog             *log = mp->m_log;
441         struct xlog_ticket      *tic;
442         int                     need_bytes;
443         int                     error = 0;
444
445         ASSERT(client == XFS_TRANSACTION || client == XFS_LOG);
446
447         if (XLOG_FORCED_SHUTDOWN(log))
448                 return -EIO;
449
450         XFS_STATS_INC(xs_try_logspace);
451
452         ASSERT(*ticp == NULL);
453         tic = xlog_ticket_alloc(log, unit_bytes, cnt, client, permanent,
454                                 KM_SLEEP | KM_MAYFAIL);
455         if (!tic)
456                 return -ENOMEM;
457
458         tic->t_trans_type = t_type;
459         *ticp = tic;
460
461         xlog_grant_push_ail(log, tic->t_cnt ? tic->t_unit_res * tic->t_cnt
462                                             : tic->t_unit_res);
463
464         trace_xfs_log_reserve(log, tic);
465
466         error = xlog_grant_head_check(log, &log->l_reserve_head, tic,
467                                       &need_bytes);
468         if (error)
469                 goto out_error;
470
471         xlog_grant_add_space(log, &log->l_reserve_head.grant, need_bytes);
472         xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes);
473         trace_xfs_log_reserve_exit(log, tic);
474         xlog_verify_grant_tail(log);
475         return 0;
476
477 out_error:
478         /*
479          * If we are failing, make sure the ticket doesn't have any current
480          * reservations.  We don't want to add this back when the ticket/
481          * transaction gets cancelled.
482          */
483         tic->t_curr_res = 0;
484         tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */
485         return error;
486 }
487
488
489 /*
490  * NOTES:
491  *
492  *      1. currblock field gets updated at startup and after in-core logs
493  *              marked as with WANT_SYNC.
494  */
495
496 /*
497  * This routine is called when a user of a log manager ticket is done with
498  * the reservation.  If the ticket was ever used, then a commit record for
499  * the associated transaction is written out as a log operation header with
500  * no data.  The flag XLOG_TIC_INITED is set when the first write occurs with
501  * a given ticket.  If the ticket was one with a permanent reservation, then
502  * a few operations are done differently.  Permanent reservation tickets by
503  * default don't release the reservation.  They just commit the current
504  * transaction with the belief that the reservation is still needed.  A flag
505  * must be passed in before permanent reservations are actually released.
506  * When these type of tickets are not released, they need to be set into
507  * the inited state again.  By doing this, a start record will be written
508  * out when the next write occurs.
509  */
510 xfs_lsn_t
511 xfs_log_done(
512         struct xfs_mount        *mp,
513         struct xlog_ticket      *ticket,
514         struct xlog_in_core     **iclog,
515         uint                    flags)
516 {
517         struct xlog             *log = mp->m_log;
518         xfs_lsn_t               lsn = 0;
519
520         if (XLOG_FORCED_SHUTDOWN(log) ||
521             /*
522              * If nothing was ever written, don't write out commit record.
523              * If we get an error, just continue and give back the log ticket.
524              */
525             (((ticket->t_flags & XLOG_TIC_INITED) == 0) &&
526              (xlog_commit_record(log, ticket, iclog, &lsn)))) {
527                 lsn = (xfs_lsn_t) -1;
528                 if (ticket->t_flags & XLOG_TIC_PERM_RESERV) {
529                         flags |= XFS_LOG_REL_PERM_RESERV;
530                 }
531         }
532
533
534         if ((ticket->t_flags & XLOG_TIC_PERM_RESERV) == 0 ||
535             (flags & XFS_LOG_REL_PERM_RESERV)) {
536                 trace_xfs_log_done_nonperm(log, ticket);
537
538                 /*
539                  * Release ticket if not permanent reservation or a specific
540                  * request has been made to release a permanent reservation.
541                  */
542                 xlog_ungrant_log_space(log, ticket);
543                 xfs_log_ticket_put(ticket);
544         } else {
545                 trace_xfs_log_done_perm(log, ticket);
546
547                 xlog_regrant_reserve_log_space(log, ticket);
548                 /* If this ticket was a permanent reservation and we aren't
549                  * trying to release it, reset the inited flags; so next time
550                  * we write, a start record will be written out.
551                  */
552                 ticket->t_flags |= XLOG_TIC_INITED;
553         }
554
555         return lsn;
556 }
557
558 /*
559  * Attaches a new iclog I/O completion callback routine during
560  * transaction commit.  If the log is in error state, a non-zero
561  * return code is handed back and the caller is responsible for
562  * executing the callback at an appropriate time.
563  */
564 int
565 xfs_log_notify(
566         struct xfs_mount        *mp,
567         struct xlog_in_core     *iclog,
568         xfs_log_callback_t      *cb)
569 {
570         int     abortflg;
571
572         spin_lock(&iclog->ic_callback_lock);
573         abortflg = (iclog->ic_state & XLOG_STATE_IOERROR);
574         if (!abortflg) {
575                 ASSERT_ALWAYS((iclog->ic_state == XLOG_STATE_ACTIVE) ||
576                               (iclog->ic_state == XLOG_STATE_WANT_SYNC));
577                 cb->cb_next = NULL;
578                 *(iclog->ic_callback_tail) = cb;
579                 iclog->ic_callback_tail = &(cb->cb_next);
580         }
581         spin_unlock(&iclog->ic_callback_lock);
582         return abortflg;
583 }
584
585 int
586 xfs_log_release_iclog(
587         struct xfs_mount        *mp,
588         struct xlog_in_core     *iclog)
589 {
590         if (xlog_state_release_iclog(mp->m_log, iclog)) {
591                 xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR);
592                 return -EIO;
593         }
594
595         return 0;
596 }
597
598 /*
599  * Mount a log filesystem
600  *
601  * mp           - ubiquitous xfs mount point structure
602  * log_target   - buftarg of on-disk log device
603  * blk_offset   - Start block # where block size is 512 bytes (BBSIZE)
604  * num_bblocks  - Number of BBSIZE blocks in on-disk log
605  *
606  * Return error or zero.
607  */
608 int
609 xfs_log_mount(
610         xfs_mount_t     *mp,
611         xfs_buftarg_t   *log_target,
612         xfs_daddr_t     blk_offset,
613         int             num_bblks)
614 {
615         int             error = 0;
616         int             min_logfsbs;
617
618         if (!(mp->m_flags & XFS_MOUNT_NORECOVERY)) {
619                 xfs_notice(mp, "Mounting V%d Filesystem",
620                            XFS_SB_VERSION_NUM(&mp->m_sb));
621         } else {
622                 xfs_notice(mp,
623 "Mounting V%d filesystem in no-recovery mode. Filesystem will be inconsistent.",
624                            XFS_SB_VERSION_NUM(&mp->m_sb));
625                 ASSERT(mp->m_flags & XFS_MOUNT_RDONLY);
626         }
627
628         mp->m_log = xlog_alloc_log(mp, log_target, blk_offset, num_bblks);
629         if (IS_ERR(mp->m_log)) {
630                 error = PTR_ERR(mp->m_log);
631                 goto out;
632         }
633
634         /*
635          * Validate the given log space and drop a critical message via syslog
636          * if the log size is too small that would lead to some unexpected
637          * situations in transaction log space reservation stage.
638          *
639          * Note: we can't just reject the mount if the validation fails.  This
640          * would mean that people would have to downgrade their kernel just to
641          * remedy the situation as there is no way to grow the log (short of
642          * black magic surgery with xfs_db).
643          *
644          * We can, however, reject mounts for CRC format filesystems, as the
645          * mkfs binary being used to make the filesystem should never create a
646          * filesystem with a log that is too small.
647          */
648         min_logfsbs = xfs_log_calc_minimum_size(mp);
649
650         if (mp->m_sb.sb_logblocks < min_logfsbs) {
651                 xfs_warn(mp,
652                 "Log size %d blocks too small, minimum size is %d blocks",
653                          mp->m_sb.sb_logblocks, min_logfsbs);
654                 error = -EINVAL;
655         } else if (mp->m_sb.sb_logblocks > XFS_MAX_LOG_BLOCKS) {
656                 xfs_warn(mp,
657                 "Log size %d blocks too large, maximum size is %lld blocks",
658                          mp->m_sb.sb_logblocks, XFS_MAX_LOG_BLOCKS);
659                 error = -EINVAL;
660         } else if (XFS_FSB_TO_B(mp, mp->m_sb.sb_logblocks) > XFS_MAX_LOG_BYTES) {
661                 xfs_warn(mp,
662                 "log size %lld bytes too large, maximum size is %lld bytes",
663                          XFS_FSB_TO_B(mp, mp->m_sb.sb_logblocks),
664                          XFS_MAX_LOG_BYTES);
665                 error = -EINVAL;
666         }
667         if (error) {
668                 if (xfs_sb_version_hascrc(&mp->m_sb)) {
669                         xfs_crit(mp, "AAIEEE! Log failed size checks. Abort!");
670                         ASSERT(0);
671                         goto out_free_log;
672                 }
673                 xfs_crit(mp,
674 "Log size out of supported range. Continuing onwards, but if log hangs are\n"
675 "experienced then please report this message in the bug report.");
676         }
677
678         /*
679          * Initialize the AIL now we have a log.
680          */
681         error = xfs_trans_ail_init(mp);
682         if (error) {
683                 xfs_warn(mp, "AIL initialisation failed: error %d", error);
684                 goto out_free_log;
685         }
686         mp->m_log->l_ailp = mp->m_ail;
687
688         /*
689          * skip log recovery on a norecovery mount.  pretend it all
690          * just worked.
691          */
692         if (!(mp->m_flags & XFS_MOUNT_NORECOVERY)) {
693                 int     readonly = (mp->m_flags & XFS_MOUNT_RDONLY);
694
695                 if (readonly)
696                         mp->m_flags &= ~XFS_MOUNT_RDONLY;
697
698                 error = xlog_recover(mp->m_log);
699
700                 if (readonly)
701                         mp->m_flags |= XFS_MOUNT_RDONLY;
702                 if (error) {
703                         xfs_warn(mp, "log mount/recovery failed: error %d",
704                                 error);
705                         goto out_destroy_ail;
706                 }
707         }
708
709         error = xfs_sysfs_init(&mp->m_log->l_kobj, &xfs_log_ktype, &mp->m_kobj,
710                                "log");
711         if (error)
712                 goto out_destroy_ail;
713
714         /* Normal transactions can now occur */
715         mp->m_log->l_flags &= ~XLOG_ACTIVE_RECOVERY;
716
717         /*
718          * Now the log has been fully initialised and we know were our
719          * space grant counters are, we can initialise the permanent ticket
720          * needed for delayed logging to work.
721          */
722         xlog_cil_init_post_recovery(mp->m_log);
723
724         return 0;
725
726 out_destroy_ail:
727         xfs_trans_ail_destroy(mp);
728 out_free_log:
729         xlog_dealloc_log(mp->m_log);
730 out:
731         return error;
732 }
733
734 /*
735  * Finish the recovery of the file system.  This is separate from the
736  * xfs_log_mount() call, because it depends on the code in xfs_mountfs() to read
737  * in the root and real-time bitmap inodes between calling xfs_log_mount() and
738  * here.
739  *
740  * If we finish recovery successfully, start the background log work. If we are
741  * not doing recovery, then we have a RO filesystem and we don't need to start
742  * it.
743  */
744 int
745 xfs_log_mount_finish(xfs_mount_t *mp)
746 {
747         int     error = 0;
748
749         if (!(mp->m_flags & XFS_MOUNT_NORECOVERY)) {
750                 error = xlog_recover_finish(mp->m_log);
751                 if (!error)
752                         xfs_log_work_queue(mp);
753         } else {
754                 ASSERT(mp->m_flags & XFS_MOUNT_RDONLY);
755         }
756
757
758         return error;
759 }
760
761 /*
762  * Final log writes as part of unmount.
763  *
764  * Mark the filesystem clean as unmount happens.  Note that during relocation
765  * this routine needs to be executed as part of source-bag while the
766  * deallocation must not be done until source-end.
767  */
768
769 /*
770  * Unmount record used to have a string "Unmount filesystem--" in the
771  * data section where the "Un" was really a magic number (XLOG_UNMOUNT_TYPE).
772  * We just write the magic number now since that particular field isn't
773  * currently architecture converted and "Unmount" is a bit foo.
774  * As far as I know, there weren't any dependencies on the old behaviour.
775  */
776
777 int
778 xfs_log_unmount_write(xfs_mount_t *mp)
779 {
780         struct xlog      *log = mp->m_log;
781         xlog_in_core_t   *iclog;
782 #ifdef DEBUG
783         xlog_in_core_t   *first_iclog;
784 #endif
785         xlog_ticket_t   *tic = NULL;
786         xfs_lsn_t        lsn;
787         int              error;
788
789         /*
790          * Don't write out unmount record on read-only mounts.
791          * Or, if we are doing a forced umount (typically because of IO errors).
792          */
793         if (mp->m_flags & XFS_MOUNT_RDONLY)
794                 return 0;
795
796         error = _xfs_log_force(mp, XFS_LOG_SYNC, NULL);
797         ASSERT(error || !(XLOG_FORCED_SHUTDOWN(log)));
798
799 #ifdef DEBUG
800         first_iclog = iclog = log->l_iclog;
801         do {
802                 if (!(iclog->ic_state & XLOG_STATE_IOERROR)) {
803                         ASSERT(iclog->ic_state & XLOG_STATE_ACTIVE);
804                         ASSERT(iclog->ic_offset == 0);
805                 }
806                 iclog = iclog->ic_next;
807         } while (iclog != first_iclog);
808 #endif
809         if (! (XLOG_FORCED_SHUTDOWN(log))) {
810                 error = xfs_log_reserve(mp, 600, 1, &tic,
811                                         XFS_LOG, 0, XLOG_UNMOUNT_REC_TYPE);
812                 if (!error) {
813                         /* the data section must be 32 bit size aligned */
814                         struct {
815                             __uint16_t magic;
816                             __uint16_t pad1;
817                             __uint32_t pad2; /* may as well make it 64 bits */
818                         } magic = {
819                                 .magic = XLOG_UNMOUNT_TYPE,
820                         };
821                         struct xfs_log_iovec reg = {
822                                 .i_addr = &magic,
823                                 .i_len = sizeof(magic),
824                                 .i_type = XLOG_REG_TYPE_UNMOUNT,
825                         };
826                         struct xfs_log_vec vec = {
827                                 .lv_niovecs = 1,
828                                 .lv_iovecp = &reg,
829                         };
830
831                         /* remove inited flag, and account for space used */
832                         tic->t_flags = 0;
833                         tic->t_curr_res -= sizeof(magic);
834                         error = xlog_write(log, &vec, tic, &lsn,
835                                            NULL, XLOG_UNMOUNT_TRANS);
836                         /*
837                          * At this point, we're umounting anyway,
838                          * so there's no point in transitioning log state
839                          * to IOERROR. Just continue...
840                          */
841                 }
842
843                 if (error)
844                         xfs_alert(mp, "%s: unmount record failed", __func__);
845
846
847                 spin_lock(&log->l_icloglock);
848                 iclog = log->l_iclog;
849                 atomic_inc(&iclog->ic_refcnt);
850                 xlog_state_want_sync(log, iclog);
851                 spin_unlock(&log->l_icloglock);
852                 error = xlog_state_release_iclog(log, iclog);
853
854                 spin_lock(&log->l_icloglock);
855                 if (!(iclog->ic_state == XLOG_STATE_ACTIVE ||
856                       iclog->ic_state == XLOG_STATE_DIRTY)) {
857                         if (!XLOG_FORCED_SHUTDOWN(log)) {
858                                 xlog_wait(&iclog->ic_force_wait,
859                                                         &log->l_icloglock);
860                         } else {
861                                 spin_unlock(&log->l_icloglock);
862                         }
863                 } else {
864                         spin_unlock(&log->l_icloglock);
865                 }
866                 if (tic) {
867                         trace_xfs_log_umount_write(log, tic);
868                         xlog_ungrant_log_space(log, tic);
869                         xfs_log_ticket_put(tic);
870                 }
871         } else {
872                 /*
873                  * We're already in forced_shutdown mode, couldn't
874                  * even attempt to write out the unmount transaction.
875                  *
876                  * Go through the motions of sync'ing and releasing
877                  * the iclog, even though no I/O will actually happen,
878                  * we need to wait for other log I/Os that may already
879                  * be in progress.  Do this as a separate section of
880                  * code so we'll know if we ever get stuck here that
881                  * we're in this odd situation of trying to unmount
882                  * a file system that went into forced_shutdown as
883                  * the result of an unmount..
884                  */
885                 spin_lock(&log->l_icloglock);
886                 iclog = log->l_iclog;
887                 atomic_inc(&iclog->ic_refcnt);
888
889                 xlog_state_want_sync(log, iclog);
890                 spin_unlock(&log->l_icloglock);
891                 error =  xlog_state_release_iclog(log, iclog);
892
893                 spin_lock(&log->l_icloglock);
894
895                 if ( ! (   iclog->ic_state == XLOG_STATE_ACTIVE
896                         || iclog->ic_state == XLOG_STATE_DIRTY
897                         || iclog->ic_state == XLOG_STATE_IOERROR) ) {
898
899                                 xlog_wait(&iclog->ic_force_wait,
900                                                         &log->l_icloglock);
901                 } else {
902                         spin_unlock(&log->l_icloglock);
903                 }
904         }
905
906         return error;
907 }       /* xfs_log_unmount_write */
908
909 /*
910  * Empty the log for unmount/freeze.
911  *
912  * To do this, we first need to shut down the background log work so it is not
913  * trying to cover the log as we clean up. We then need to unpin all objects in
914  * the log so we can then flush them out. Once they have completed their IO and
915  * run the callbacks removing themselves from the AIL, we can write the unmount
916  * record.
917  */
918 void
919 xfs_log_quiesce(
920         struct xfs_mount        *mp)
921 {
922         cancel_delayed_work_sync(&mp->m_log->l_work);
923         xfs_log_force(mp, XFS_LOG_SYNC);
924
925         /*
926          * The superblock buffer is uncached and while xfs_ail_push_all_sync()
927          * will push it, xfs_wait_buftarg() will not wait for it. Further,
928          * xfs_buf_iowait() cannot be used because it was pushed with the
929          * XBF_ASYNC flag set, so we need to use a lock/unlock pair to wait for
930          * the IO to complete.
931          */
932         xfs_ail_push_all_sync(mp->m_ail);
933         xfs_wait_buftarg(mp->m_ddev_targp);
934         xfs_buf_lock(mp->m_sb_bp);
935         xfs_buf_unlock(mp->m_sb_bp);
936
937         xfs_log_unmount_write(mp);
938 }
939
940 /*
941  * Shut down and release the AIL and Log.
942  *
943  * During unmount, we need to ensure we flush all the dirty metadata objects
944  * from the AIL so that the log is empty before we write the unmount record to
945  * the log. Once this is done, we can tear down the AIL and the log.
946  */
947 void
948 xfs_log_unmount(
949         struct xfs_mount        *mp)
950 {
951         xfs_log_quiesce(mp);
952
953         xfs_trans_ail_destroy(mp);
954
955         xfs_sysfs_del(&mp->m_log->l_kobj);
956
957         xlog_dealloc_log(mp->m_log);
958 }
959
960 void
961 xfs_log_item_init(
962         struct xfs_mount        *mp,
963         struct xfs_log_item     *item,
964         int                     type,
965         const struct xfs_item_ops *ops)
966 {
967         item->li_mountp = mp;
968         item->li_ailp = mp->m_ail;
969         item->li_type = type;
970         item->li_ops = ops;
971         item->li_lv = NULL;
972
973         INIT_LIST_HEAD(&item->li_ail);
974         INIT_LIST_HEAD(&item->li_cil);
975 }
976
977 /*
978  * Wake up processes waiting for log space after we have moved the log tail.
979  */
980 void
981 xfs_log_space_wake(
982         struct xfs_mount        *mp)
983 {
984         struct xlog             *log = mp->m_log;
985         int                     free_bytes;
986
987         if (XLOG_FORCED_SHUTDOWN(log))
988                 return;
989
990         if (!list_empty_careful(&log->l_write_head.waiters)) {
991                 ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY));
992
993                 spin_lock(&log->l_write_head.lock);
994                 free_bytes = xlog_space_left(log, &log->l_write_head.grant);
995                 xlog_grant_head_wake(log, &log->l_write_head, &free_bytes);
996                 spin_unlock(&log->l_write_head.lock);
997         }
998
999         if (!list_empty_careful(&log->l_reserve_head.waiters)) {
1000                 ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY));
1001
1002                 spin_lock(&log->l_reserve_head.lock);
1003                 free_bytes = xlog_space_left(log, &log->l_reserve_head.grant);
1004                 xlog_grant_head_wake(log, &log->l_reserve_head, &free_bytes);
1005                 spin_unlock(&log->l_reserve_head.lock);
1006         }
1007 }
1008
1009 /*
1010  * Determine if we have a transaction that has gone to disk that needs to be
1011  * covered. To begin the transition to the idle state firstly the log needs to
1012  * be idle. That means the CIL, the AIL and the iclogs needs to be empty before
1013  * we start attempting to cover the log.
1014  *
1015  * Only if we are then in a state where covering is needed, the caller is
1016  * informed that dummy transactions are required to move the log into the idle
1017  * state.
1018  *
1019  * If there are any items in the AIl or CIL, then we do not want to attempt to
1020  * cover the log as we may be in a situation where there isn't log space
1021  * available to run a dummy transaction and this can lead to deadlocks when the
1022  * tail of the log is pinned by an item that is modified in the CIL.  Hence
1023  * there's no point in running a dummy transaction at this point because we
1024  * can't start trying to idle the log until both the CIL and AIL are empty.
1025  */
1026 int
1027 xfs_log_need_covered(xfs_mount_t *mp)
1028 {
1029         struct xlog     *log = mp->m_log;
1030         int             needed = 0;
1031
1032         if (!xfs_fs_writable(mp, SB_FREEZE_WRITE))
1033                 return 0;
1034
1035         if (!xlog_cil_empty(log))
1036                 return 0;
1037
1038         spin_lock(&log->l_icloglock);
1039         switch (log->l_covered_state) {
1040         case XLOG_STATE_COVER_DONE:
1041         case XLOG_STATE_COVER_DONE2:
1042         case XLOG_STATE_COVER_IDLE:
1043                 break;
1044         case XLOG_STATE_COVER_NEED:
1045         case XLOG_STATE_COVER_NEED2:
1046                 if (xfs_ail_min_lsn(log->l_ailp))
1047                         break;
1048                 if (!xlog_iclogs_empty(log))
1049                         break;
1050
1051                 needed = 1;
1052                 if (log->l_covered_state == XLOG_STATE_COVER_NEED)
1053                         log->l_covered_state = XLOG_STATE_COVER_DONE;
1054                 else
1055                         log->l_covered_state = XLOG_STATE_COVER_DONE2;
1056                 break;
1057         default:
1058                 needed = 1;
1059                 break;
1060         }
1061         spin_unlock(&log->l_icloglock);
1062         return needed;
1063 }
1064
1065 /*
1066  * We may be holding the log iclog lock upon entering this routine.
1067  */
1068 xfs_lsn_t
1069 xlog_assign_tail_lsn_locked(
1070         struct xfs_mount        *mp)
1071 {
1072         struct xlog             *log = mp->m_log;
1073         struct xfs_log_item     *lip;
1074         xfs_lsn_t               tail_lsn;
1075
1076         assert_spin_locked(&mp->m_ail->xa_lock);
1077
1078         /*
1079          * To make sure we always have a valid LSN for the log tail we keep
1080          * track of the last LSN which was committed in log->l_last_sync_lsn,
1081          * and use that when the AIL was empty.
1082          */
1083         lip = xfs_ail_min(mp->m_ail);
1084         if (lip)
1085                 tail_lsn = lip->li_lsn;
1086         else
1087                 tail_lsn = atomic64_read(&log->l_last_sync_lsn);
1088         trace_xfs_log_assign_tail_lsn(log, tail_lsn);
1089         atomic64_set(&log->l_tail_lsn, tail_lsn);
1090         return tail_lsn;
1091 }
1092
1093 xfs_lsn_t
1094 xlog_assign_tail_lsn(
1095         struct xfs_mount        *mp)
1096 {
1097         xfs_lsn_t               tail_lsn;
1098
1099         spin_lock(&mp->m_ail->xa_lock);
1100         tail_lsn = xlog_assign_tail_lsn_locked(mp);
1101         spin_unlock(&mp->m_ail->xa_lock);
1102
1103         return tail_lsn;
1104 }
1105
1106 /*
1107  * Return the space in the log between the tail and the head.  The head
1108  * is passed in the cycle/bytes formal parms.  In the special case where
1109  * the reserve head has wrapped passed the tail, this calculation is no
1110  * longer valid.  In this case, just return 0 which means there is no space
1111  * in the log.  This works for all places where this function is called
1112  * with the reserve head.  Of course, if the write head were to ever
1113  * wrap the tail, we should blow up.  Rather than catch this case here,
1114  * we depend on other ASSERTions in other parts of the code.   XXXmiken
1115  *
1116  * This code also handles the case where the reservation head is behind
1117  * the tail.  The details of this case are described below, but the end
1118  * result is that we return the size of the log as the amount of space left.
1119  */
1120 STATIC int
1121 xlog_space_left(
1122         struct xlog     *log,
1123         atomic64_t      *head)
1124 {
1125         int             free_bytes;
1126         int             tail_bytes;
1127         int             tail_cycle;
1128         int             head_cycle;
1129         int             head_bytes;
1130
1131         xlog_crack_grant_head(head, &head_cycle, &head_bytes);
1132         xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_bytes);
1133         tail_bytes = BBTOB(tail_bytes);
1134         if (tail_cycle == head_cycle && head_bytes >= tail_bytes)
1135                 free_bytes = log->l_logsize - (head_bytes - tail_bytes);
1136         else if (tail_cycle + 1 < head_cycle)
1137                 return 0;
1138         else if (tail_cycle < head_cycle) {
1139                 ASSERT(tail_cycle == (head_cycle - 1));
1140                 free_bytes = tail_bytes - head_bytes;
1141         } else {
1142                 /*
1143                  * The reservation head is behind the tail.
1144                  * In this case we just want to return the size of the
1145                  * log as the amount of space left.
1146                  */
1147                 xfs_alert(log->l_mp,
1148                         "xlog_space_left: head behind tail\n"
1149                         "  tail_cycle = %d, tail_bytes = %d\n"
1150                         "  GH   cycle = %d, GH   bytes = %d",
1151                         tail_cycle, tail_bytes, head_cycle, head_bytes);
1152                 ASSERT(0);
1153                 free_bytes = log->l_logsize;
1154         }
1155         return free_bytes;
1156 }
1157
1158
1159 /*
1160  * Log function which is called when an io completes.
1161  *
1162  * The log manager needs its own routine, in order to control what
1163  * happens with the buffer after the write completes.
1164  */
1165 void
1166 xlog_iodone(xfs_buf_t *bp)
1167 {
1168         struct xlog_in_core     *iclog = bp->b_fspriv;
1169         struct xlog             *l = iclog->ic_log;
1170         int                     aborted = 0;
1171
1172         /*
1173          * Race to shutdown the filesystem if we see an error.
1174          */
1175         if (XFS_TEST_ERROR(bp->b_error, l->l_mp,
1176                         XFS_ERRTAG_IODONE_IOERR, XFS_RANDOM_IODONE_IOERR)) {
1177                 xfs_buf_ioerror_alert(bp, __func__);
1178                 xfs_buf_stale(bp);
1179                 xfs_force_shutdown(l->l_mp, SHUTDOWN_LOG_IO_ERROR);
1180                 /*
1181                  * This flag will be propagated to the trans-committed
1182                  * callback routines to let them know that the log-commit
1183                  * didn't succeed.
1184                  */
1185                 aborted = XFS_LI_ABORTED;
1186         } else if (iclog->ic_state & XLOG_STATE_IOERROR) {
1187                 aborted = XFS_LI_ABORTED;
1188         }
1189
1190         /* log I/O is always issued ASYNC */
1191         ASSERT(XFS_BUF_ISASYNC(bp));
1192         xlog_state_done_syncing(iclog, aborted);
1193
1194         /*
1195          * drop the buffer lock now that we are done. Nothing references
1196          * the buffer after this, so an unmount waiting on this lock can now
1197          * tear it down safely. As such, it is unsafe to reference the buffer
1198          * (bp) after the unlock as we could race with it being freed.
1199          */
1200         xfs_buf_unlock(bp);
1201 }
1202
1203 /*
1204  * Return size of each in-core log record buffer.
1205  *
1206  * All machines get 8 x 32kB buffers by default, unless tuned otherwise.
1207  *
1208  * If the filesystem blocksize is too large, we may need to choose a
1209  * larger size since the directory code currently logs entire blocks.
1210  */
1211
1212 STATIC void
1213 xlog_get_iclog_buffer_size(
1214         struct xfs_mount        *mp,
1215         struct xlog             *log)
1216 {
1217         int size;
1218         int xhdrs;
1219
1220         if (mp->m_logbufs <= 0)
1221                 log->l_iclog_bufs = XLOG_MAX_ICLOGS;
1222         else
1223                 log->l_iclog_bufs = mp->m_logbufs;
1224
1225         /*
1226          * Buffer size passed in from mount system call.
1227          */
1228         if (mp->m_logbsize > 0) {
1229                 size = log->l_iclog_size = mp->m_logbsize;
1230                 log->l_iclog_size_log = 0;
1231                 while (size != 1) {
1232                         log->l_iclog_size_log++;
1233                         size >>= 1;
1234                 }
1235
1236                 if (xfs_sb_version_haslogv2(&mp->m_sb)) {
1237                         /* # headers = size / 32k
1238                          * one header holds cycles from 32k of data
1239                          */
1240
1241                         xhdrs = mp->m_logbsize / XLOG_HEADER_CYCLE_SIZE;
1242                         if (mp->m_logbsize % XLOG_HEADER_CYCLE_SIZE)
1243                                 xhdrs++;
1244                         log->l_iclog_hsize = xhdrs << BBSHIFT;
1245                         log->l_iclog_heads = xhdrs;
1246                 } else {
1247                         ASSERT(mp->m_logbsize <= XLOG_BIG_RECORD_BSIZE);
1248                         log->l_iclog_hsize = BBSIZE;
1249                         log->l_iclog_heads = 1;
1250                 }
1251                 goto done;
1252         }
1253
1254         /* All machines use 32kB buffers by default. */
1255         log->l_iclog_size = XLOG_BIG_RECORD_BSIZE;
1256         log->l_iclog_size_log = XLOG_BIG_RECORD_BSHIFT;
1257
1258         /* the default log size is 16k or 32k which is one header sector */
1259         log->l_iclog_hsize = BBSIZE;
1260         log->l_iclog_heads = 1;
1261
1262 done:
1263         /* are we being asked to make the sizes selected above visible? */
1264         if (mp->m_logbufs == 0)
1265                 mp->m_logbufs = log->l_iclog_bufs;
1266         if (mp->m_logbsize == 0)
1267                 mp->m_logbsize = log->l_iclog_size;
1268 }       /* xlog_get_iclog_buffer_size */
1269
1270
1271 void
1272 xfs_log_work_queue(
1273         struct xfs_mount        *mp)
1274 {
1275         queue_delayed_work(mp->m_log_workqueue, &mp->m_log->l_work,
1276                                 msecs_to_jiffies(xfs_syncd_centisecs * 10));
1277 }
1278
1279 /*
1280  * Every sync period we need to unpin all items in the AIL and push them to
1281  * disk. If there is nothing dirty, then we might need to cover the log to
1282  * indicate that the filesystem is idle.
1283  */
1284 void
1285 xfs_log_worker(
1286         struct work_struct      *work)
1287 {
1288         struct xlog             *log = container_of(to_delayed_work(work),
1289                                                 struct xlog, l_work);
1290         struct xfs_mount        *mp = log->l_mp;
1291
1292         /* dgc: errors ignored - not fatal and nowhere to report them */
1293         if (xfs_log_need_covered(mp))
1294                 xfs_fs_log_dummy(mp);
1295         else
1296                 xfs_log_force(mp, 0);
1297
1298         /* start pushing all the metadata that is currently dirty */
1299         xfs_ail_push_all(mp->m_ail);
1300
1301         /* queue us up again */
1302         xfs_log_work_queue(mp);
1303 }
1304
1305 /*
1306  * This routine initializes some of the log structure for a given mount point.
1307  * Its primary purpose is to fill in enough, so recovery can occur.  However,
1308  * some other stuff may be filled in too.
1309  */
1310 STATIC struct xlog *
1311 xlog_alloc_log(
1312         struct xfs_mount        *mp,
1313         struct xfs_buftarg      *log_target,
1314         xfs_daddr_t             blk_offset,
1315         int                     num_bblks)
1316 {
1317         struct xlog             *log;
1318         xlog_rec_header_t       *head;
1319         xlog_in_core_t          **iclogp;
1320         xlog_in_core_t          *iclog, *prev_iclog=NULL;
1321         xfs_buf_t               *bp;
1322         int                     i;
1323         int                     error = -ENOMEM;
1324         uint                    log2_size = 0;
1325
1326         log = kmem_zalloc(sizeof(struct xlog), KM_MAYFAIL);
1327         if (!log) {
1328                 xfs_warn(mp, "Log allocation failed: No memory!");
1329                 goto out;
1330         }
1331
1332         log->l_mp          = mp;
1333         log->l_targ        = log_target;
1334         log->l_logsize     = BBTOB(num_bblks);
1335         log->l_logBBstart  = blk_offset;
1336         log->l_logBBsize   = num_bblks;
1337         log->l_covered_state = XLOG_STATE_COVER_IDLE;
1338         log->l_flags       |= XLOG_ACTIVE_RECOVERY;
1339         INIT_DELAYED_WORK(&log->l_work, xfs_log_worker);
1340
1341         log->l_prev_block  = -1;
1342         /* log->l_tail_lsn = 0x100000000LL; cycle = 1; current block = 0 */
1343         xlog_assign_atomic_lsn(&log->l_tail_lsn, 1, 0);
1344         xlog_assign_atomic_lsn(&log->l_last_sync_lsn, 1, 0);
1345         log->l_curr_cycle  = 1;     /* 0 is bad since this is initial value */
1346
1347         xlog_grant_head_init(&log->l_reserve_head);
1348         xlog_grant_head_init(&log->l_write_head);
1349
1350         error = -EFSCORRUPTED;
1351         if (xfs_sb_version_hassector(&mp->m_sb)) {
1352                 log2_size = mp->m_sb.sb_logsectlog;
1353                 if (log2_size < BBSHIFT) {
1354                         xfs_warn(mp, "Log sector size too small (0x%x < 0x%x)",
1355                                 log2_size, BBSHIFT);
1356                         goto out_free_log;
1357                 }
1358
1359                 log2_size -= BBSHIFT;
1360                 if (log2_size > mp->m_sectbb_log) {
1361                         xfs_warn(mp, "Log sector size too large (0x%x > 0x%x)",
1362                                 log2_size, mp->m_sectbb_log);
1363                         goto out_free_log;
1364                 }
1365
1366                 /* for larger sector sizes, must have v2 or external log */
1367                 if (log2_size && log->l_logBBstart > 0 &&
1368                             !xfs_sb_version_haslogv2(&mp->m_sb)) {
1369                         xfs_warn(mp,
1370                 "log sector size (0x%x) invalid for configuration.",
1371                                 log2_size);
1372                         goto out_free_log;
1373                 }
1374         }
1375         log->l_sectBBsize = 1 << log2_size;
1376
1377         xlog_get_iclog_buffer_size(mp, log);
1378
1379         /*
1380          * Use a NULL block for the extra log buffer used during splits so that
1381          * it will trigger errors if we ever try to do IO on it without first
1382          * having set it up properly.
1383          */
1384         error = -ENOMEM;
1385         bp = xfs_buf_alloc(mp->m_logdev_targp, XFS_BUF_DADDR_NULL,
1386                            BTOBB(log->l_iclog_size), 0);
1387         if (!bp)
1388                 goto out_free_log;
1389
1390         /*
1391          * The iclogbuf buffer locks are held over IO but we are not going to do
1392          * IO yet.  Hence unlock the buffer so that the log IO path can grab it
1393          * when appropriately.
1394          */
1395         ASSERT(xfs_buf_islocked(bp));
1396         xfs_buf_unlock(bp);
1397
1398         bp->b_iodone = xlog_iodone;
1399         log->l_xbuf = bp;
1400
1401         spin_lock_init(&log->l_icloglock);
1402         init_waitqueue_head(&log->l_flush_wait);
1403
1404         iclogp = &log->l_iclog;
1405         /*
1406          * The amount of memory to allocate for the iclog structure is
1407          * rather funky due to the way the structure is defined.  It is
1408          * done this way so that we can use different sizes for machines
1409          * with different amounts of memory.  See the definition of
1410          * xlog_in_core_t in xfs_log_priv.h for details.
1411          */
1412         ASSERT(log->l_iclog_size >= 4096);
1413         for (i=0; i < log->l_iclog_bufs; i++) {
1414                 *iclogp = kmem_zalloc(sizeof(xlog_in_core_t), KM_MAYFAIL);
1415                 if (!*iclogp)
1416                         goto out_free_iclog;
1417
1418                 iclog = *iclogp;
1419                 iclog->ic_prev = prev_iclog;
1420                 prev_iclog = iclog;
1421
1422                 bp = xfs_buf_get_uncached(mp->m_logdev_targp,
1423                                                 BTOBB(log->l_iclog_size), 0);
1424                 if (!bp)
1425                         goto out_free_iclog;
1426
1427                 ASSERT(xfs_buf_islocked(bp));
1428                 xfs_buf_unlock(bp);
1429
1430                 bp->b_iodone = xlog_iodone;
1431                 iclog->ic_bp = bp;
1432                 iclog->ic_data = bp->b_addr;
1433 #ifdef DEBUG
1434                 log->l_iclog_bak[i] = (xfs_caddr_t)&(iclog->ic_header);
1435 #endif
1436                 head = &iclog->ic_header;
1437                 memset(head, 0, sizeof(xlog_rec_header_t));
1438                 head->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
1439                 head->h_version = cpu_to_be32(
1440                         xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1);
1441                 head->h_size = cpu_to_be32(log->l_iclog_size);
1442                 /* new fields */
1443                 head->h_fmt = cpu_to_be32(XLOG_FMT);
1444                 memcpy(&head->h_fs_uuid, &mp->m_sb.sb_uuid, sizeof(uuid_t));
1445
1446                 iclog->ic_size = BBTOB(bp->b_length) - log->l_iclog_hsize;
1447                 iclog->ic_state = XLOG_STATE_ACTIVE;
1448                 iclog->ic_log = log;
1449                 atomic_set(&iclog->ic_refcnt, 0);
1450                 spin_lock_init(&iclog->ic_callback_lock);
1451                 iclog->ic_callback_tail = &(iclog->ic_callback);
1452                 iclog->ic_datap = (char *)iclog->ic_data + log->l_iclog_hsize;
1453
1454                 init_waitqueue_head(&iclog->ic_force_wait);
1455                 init_waitqueue_head(&iclog->ic_write_wait);
1456
1457                 iclogp = &iclog->ic_next;
1458         }
1459         *iclogp = log->l_iclog;                 /* complete ring */
1460         log->l_iclog->ic_prev = prev_iclog;     /* re-write 1st prev ptr */
1461
1462         error = xlog_cil_init(log);
1463         if (error)
1464                 goto out_free_iclog;
1465         return log;
1466
1467 out_free_iclog:
1468         for (iclog = log->l_iclog; iclog; iclog = prev_iclog) {
1469                 prev_iclog = iclog->ic_next;
1470                 if (iclog->ic_bp)
1471                         xfs_buf_free(iclog->ic_bp);
1472                 kmem_free(iclog);
1473         }
1474         spinlock_destroy(&log->l_icloglock);
1475         xfs_buf_free(log->l_xbuf);
1476 out_free_log:
1477         kmem_free(log);
1478 out:
1479         return ERR_PTR(error);
1480 }       /* xlog_alloc_log */
1481
1482
1483 /*
1484  * Write out the commit record of a transaction associated with the given
1485  * ticket.  Return the lsn of the commit record.
1486  */
1487 STATIC int
1488 xlog_commit_record(
1489         struct xlog             *log,
1490         struct xlog_ticket      *ticket,
1491         struct xlog_in_core     **iclog,
1492         xfs_lsn_t               *commitlsnp)
1493 {
1494         struct xfs_mount *mp = log->l_mp;
1495         int     error;
1496         struct xfs_log_iovec reg = {
1497                 .i_addr = NULL,
1498                 .i_len = 0,
1499                 .i_type = XLOG_REG_TYPE_COMMIT,
1500         };
1501         struct xfs_log_vec vec = {
1502                 .lv_niovecs = 1,
1503                 .lv_iovecp = &reg,
1504         };
1505
1506         ASSERT_ALWAYS(iclog);
1507         error = xlog_write(log, &vec, ticket, commitlsnp, iclog,
1508                                         XLOG_COMMIT_TRANS);
1509         if (error)
1510                 xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR);
1511         return error;
1512 }
1513
1514 /*
1515  * Push on the buffer cache code if we ever use more than 75% of the on-disk
1516  * log space.  This code pushes on the lsn which would supposedly free up
1517  * the 25% which we want to leave free.  We may need to adopt a policy which
1518  * pushes on an lsn which is further along in the log once we reach the high
1519  * water mark.  In this manner, we would be creating a low water mark.
1520  */
1521 STATIC void
1522 xlog_grant_push_ail(
1523         struct xlog     *log,
1524         int             need_bytes)
1525 {
1526         xfs_lsn_t       threshold_lsn = 0;
1527         xfs_lsn_t       last_sync_lsn;
1528         int             free_blocks;
1529         int             free_bytes;
1530         int             threshold_block;
1531         int             threshold_cycle;
1532         int             free_threshold;
1533
1534         ASSERT(BTOBB(need_bytes) < log->l_logBBsize);
1535
1536         free_bytes = xlog_space_left(log, &log->l_reserve_head.grant);
1537         free_blocks = BTOBBT(free_bytes);
1538
1539         /*
1540          * Set the threshold for the minimum number of free blocks in the
1541          * log to the maximum of what the caller needs, one quarter of the
1542          * log, and 256 blocks.
1543          */
1544         free_threshold = BTOBB(need_bytes);
1545         free_threshold = MAX(free_threshold, (log->l_logBBsize >> 2));
1546         free_threshold = MAX(free_threshold, 256);
1547         if (free_blocks >= free_threshold)
1548                 return;
1549
1550         xlog_crack_atomic_lsn(&log->l_tail_lsn, &threshold_cycle,
1551                                                 &threshold_block);
1552         threshold_block += free_threshold;
1553         if (threshold_block >= log->l_logBBsize) {
1554                 threshold_block -= log->l_logBBsize;
1555                 threshold_cycle += 1;
1556         }
1557         threshold_lsn = xlog_assign_lsn(threshold_cycle,
1558                                         threshold_block);
1559         /*
1560          * Don't pass in an lsn greater than the lsn of the last
1561          * log record known to be on disk. Use a snapshot of the last sync lsn
1562          * so that it doesn't change between the compare and the set.
1563          */
1564         last_sync_lsn = atomic64_read(&log->l_last_sync_lsn);
1565         if (XFS_LSN_CMP(threshold_lsn, last_sync_lsn) > 0)
1566                 threshold_lsn = last_sync_lsn;
1567
1568         /*
1569          * Get the transaction layer to kick the dirty buffers out to
1570          * disk asynchronously. No point in trying to do this if
1571          * the filesystem is shutting down.
1572          */
1573         if (!XLOG_FORCED_SHUTDOWN(log))
1574                 xfs_ail_push(log->l_ailp, threshold_lsn);
1575 }
1576
1577 /*
1578  * Stamp cycle number in every block
1579  */
1580 STATIC void
1581 xlog_pack_data(
1582         struct xlog             *log,
1583         struct xlog_in_core     *iclog,
1584         int                     roundoff)
1585 {
1586         int                     i, j, k;
1587         int                     size = iclog->ic_offset + roundoff;
1588         __be32                  cycle_lsn;
1589         xfs_caddr_t             dp;
1590
1591         cycle_lsn = CYCLE_LSN_DISK(iclog->ic_header.h_lsn);
1592
1593         dp = iclog->ic_datap;
1594         for (i = 0; i < BTOBB(size); i++) {
1595                 if (i >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE))
1596                         break;
1597                 iclog->ic_header.h_cycle_data[i] = *(__be32 *)dp;
1598                 *(__be32 *)dp = cycle_lsn;
1599                 dp += BBSIZE;
1600         }
1601
1602         if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
1603                 xlog_in_core_2_t *xhdr = iclog->ic_data;
1604
1605                 for ( ; i < BTOBB(size); i++) {
1606                         j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
1607                         k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
1608                         xhdr[j].hic_xheader.xh_cycle_data[k] = *(__be32 *)dp;
1609                         *(__be32 *)dp = cycle_lsn;
1610                         dp += BBSIZE;
1611                 }
1612
1613                 for (i = 1; i < log->l_iclog_heads; i++)
1614                         xhdr[i].hic_xheader.xh_cycle = cycle_lsn;
1615         }
1616 }
1617
1618 /*
1619  * Calculate the checksum for a log buffer.
1620  *
1621  * This is a little more complicated than it should be because the various
1622  * headers and the actual data are non-contiguous.
1623  */
1624 __le32
1625 xlog_cksum(
1626         struct xlog             *log,
1627         struct xlog_rec_header  *rhead,
1628         char                    *dp,
1629         int                     size)
1630 {
1631         __uint32_t              crc;
1632
1633         /* first generate the crc for the record header ... */
1634         crc = xfs_start_cksum((char *)rhead,
1635                               sizeof(struct xlog_rec_header),
1636                               offsetof(struct xlog_rec_header, h_crc));
1637
1638         /* ... then for additional cycle data for v2 logs ... */
1639         if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
1640                 union xlog_in_core2 *xhdr = (union xlog_in_core2 *)rhead;
1641                 int             i;
1642
1643                 for (i = 1; i < log->l_iclog_heads; i++) {
1644                         crc = crc32c(crc, &xhdr[i].hic_xheader,
1645                                      sizeof(struct xlog_rec_ext_header));
1646                 }
1647         }
1648
1649         /* ... and finally for the payload */
1650         crc = crc32c(crc, dp, size);
1651
1652         return xfs_end_cksum(crc);
1653 }
1654
1655 /*
1656  * The bdstrat callback function for log bufs. This gives us a central
1657  * place to trap bufs in case we get hit by a log I/O error and need to
1658  * shutdown. Actually, in practice, even when we didn't get a log error,
1659  * we transition the iclogs to IOERROR state *after* flushing all existing
1660  * iclogs to disk. This is because we don't want anymore new transactions to be
1661  * started or completed afterwards.
1662  *
1663  * We lock the iclogbufs here so that we can serialise against IO completion
1664  * during unmount. We might be processing a shutdown triggered during unmount,
1665  * and that can occur asynchronously to the unmount thread, and hence we need to
1666  * ensure that completes before tearing down the iclogbufs. Hence we need to
1667  * hold the buffer lock across the log IO to acheive that.
1668  */
1669 STATIC int
1670 xlog_bdstrat(
1671         struct xfs_buf          *bp)
1672 {
1673         struct xlog_in_core     *iclog = bp->b_fspriv;
1674
1675         xfs_buf_lock(bp);
1676         if (iclog->ic_state & XLOG_STATE_IOERROR) {
1677                 xfs_buf_ioerror(bp, -EIO);
1678                 xfs_buf_stale(bp);
1679                 xfs_buf_ioend(bp);
1680                 /*
1681                  * It would seem logical to return EIO here, but we rely on
1682                  * the log state machine to propagate I/O errors instead of
1683                  * doing it here. Similarly, IO completion will unlock the
1684                  * buffer, so we don't do it here.
1685                  */
1686                 return 0;
1687         }
1688
1689         xfs_buf_submit(bp);
1690         return 0;
1691 }
1692
1693 /*
1694  * Flush out the in-core log (iclog) to the on-disk log in an asynchronous 
1695  * fashion.  Previously, we should have moved the current iclog
1696  * ptr in the log to point to the next available iclog.  This allows further
1697  * write to continue while this code syncs out an iclog ready to go.
1698  * Before an in-core log can be written out, the data section must be scanned
1699  * to save away the 1st word of each BBSIZE block into the header.  We replace
1700  * it with the current cycle count.  Each BBSIZE block is tagged with the
1701  * cycle count because there in an implicit assumption that drives will
1702  * guarantee that entire 512 byte blocks get written at once.  In other words,
1703  * we can't have part of a 512 byte block written and part not written.  By
1704  * tagging each block, we will know which blocks are valid when recovering
1705  * after an unclean shutdown.
1706  *
1707  * This routine is single threaded on the iclog.  No other thread can be in
1708  * this routine with the same iclog.  Changing contents of iclog can there-
1709  * fore be done without grabbing the state machine lock.  Updating the global
1710  * log will require grabbing the lock though.
1711  *
1712  * The entire log manager uses a logical block numbering scheme.  Only
1713  * log_sync (and then only bwrite()) know about the fact that the log may
1714  * not start with block zero on a given device.  The log block start offset
1715  * is added immediately before calling bwrite().
1716  */
1717
1718 STATIC int
1719 xlog_sync(
1720         struct xlog             *log,
1721         struct xlog_in_core     *iclog)
1722 {
1723         xfs_buf_t       *bp;
1724         int             i;
1725         uint            count;          /* byte count of bwrite */
1726         uint            count_init;     /* initial count before roundup */
1727         int             roundoff;       /* roundoff to BB or stripe */
1728         int             split = 0;      /* split write into two regions */
1729         int             error;
1730         int             v2 = xfs_sb_version_haslogv2(&log->l_mp->m_sb);
1731         int             size;
1732
1733         XFS_STATS_INC(xs_log_writes);
1734         ASSERT(atomic_read(&iclog->ic_refcnt) == 0);
1735
1736         /* Add for LR header */
1737         count_init = log->l_iclog_hsize + iclog->ic_offset;
1738
1739         /* Round out the log write size */
1740         if (v2 && log->l_mp->m_sb.sb_logsunit > 1) {
1741                 /* we have a v2 stripe unit to use */
1742                 count = XLOG_LSUNITTOB(log, XLOG_BTOLSUNIT(log, count_init));
1743         } else {
1744                 count = BBTOB(BTOBB(count_init));
1745         }
1746         roundoff = count - count_init;
1747         ASSERT(roundoff >= 0);
1748         ASSERT((v2 && log->l_mp->m_sb.sb_logsunit > 1 && 
1749                 roundoff < log->l_mp->m_sb.sb_logsunit)
1750                 || 
1751                 (log->l_mp->m_sb.sb_logsunit <= 1 && 
1752                  roundoff < BBTOB(1)));
1753
1754         /* move grant heads by roundoff in sync */
1755         xlog_grant_add_space(log, &log->l_reserve_head.grant, roundoff);
1756         xlog_grant_add_space(log, &log->l_write_head.grant, roundoff);
1757
1758         /* put cycle number in every block */
1759         xlog_pack_data(log, iclog, roundoff); 
1760
1761         /* real byte length */
1762         size = iclog->ic_offset;
1763         if (v2)
1764                 size += roundoff;
1765         iclog->ic_header.h_len = cpu_to_be32(size);
1766
1767         bp = iclog->ic_bp;
1768         XFS_BUF_SET_ADDR(bp, BLOCK_LSN(be64_to_cpu(iclog->ic_header.h_lsn)));
1769
1770         XFS_STATS_ADD(xs_log_blocks, BTOBB(count));
1771
1772         /* Do we need to split this write into 2 parts? */
1773         if (XFS_BUF_ADDR(bp) + BTOBB(count) > log->l_logBBsize) {
1774                 char            *dptr;
1775
1776                 split = count - (BBTOB(log->l_logBBsize - XFS_BUF_ADDR(bp)));
1777                 count = BBTOB(log->l_logBBsize - XFS_BUF_ADDR(bp));
1778                 iclog->ic_bwritecnt = 2;
1779
1780                 /*
1781                  * Bump the cycle numbers at the start of each block in the
1782                  * part of the iclog that ends up in the buffer that gets
1783                  * written to the start of the log.
1784                  *
1785                  * Watch out for the header magic number case, though.
1786                  */
1787                 dptr = (char *)&iclog->ic_header + count;
1788                 for (i = 0; i < split; i += BBSIZE) {
1789                         __uint32_t cycle = be32_to_cpu(*(__be32 *)dptr);
1790                         if (++cycle == XLOG_HEADER_MAGIC_NUM)
1791                                 cycle++;
1792                         *(__be32 *)dptr = cpu_to_be32(cycle);
1793
1794                         dptr += BBSIZE;
1795                 }
1796         } else {
1797                 iclog->ic_bwritecnt = 1;
1798         }
1799
1800         /* calculcate the checksum */
1801         iclog->ic_header.h_crc = xlog_cksum(log, &iclog->ic_header,
1802                                             iclog->ic_datap, size);
1803
1804         bp->b_io_length = BTOBB(count);
1805         bp->b_fspriv = iclog;
1806         XFS_BUF_ZEROFLAGS(bp);
1807         XFS_BUF_ASYNC(bp);
1808         bp->b_flags |= XBF_SYNCIO;
1809
1810         if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) {
1811                 bp->b_flags |= XBF_FUA;
1812
1813                 /*
1814                  * Flush the data device before flushing the log to make
1815                  * sure all meta data written back from the AIL actually made
1816                  * it to disk before stamping the new log tail LSN into the
1817                  * log buffer.  For an external log we need to issue the
1818                  * flush explicitly, and unfortunately synchronously here;
1819                  * for an internal log we can simply use the block layer
1820                  * state machine for preflushes.
1821                  */
1822                 if (log->l_mp->m_logdev_targp != log->l_mp->m_ddev_targp)
1823                         xfs_blkdev_issue_flush(log->l_mp->m_ddev_targp);
1824                 else
1825                         bp->b_flags |= XBF_FLUSH;
1826         }
1827
1828         ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1);
1829         ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize);
1830
1831         xlog_verify_iclog(log, iclog, count, true);
1832
1833         /* account for log which doesn't start at block #0 */
1834         XFS_BUF_SET_ADDR(bp, XFS_BUF_ADDR(bp) + log->l_logBBstart);
1835         /*
1836          * Don't call xfs_bwrite here. We do log-syncs even when the filesystem
1837          * is shutting down.
1838          */
1839         XFS_BUF_WRITE(bp);
1840
1841         error = xlog_bdstrat(bp);
1842         if (error) {
1843                 xfs_buf_ioerror_alert(bp, "xlog_sync");
1844                 return error;
1845         }
1846         if (split) {
1847                 bp = iclog->ic_log->l_xbuf;
1848                 XFS_BUF_SET_ADDR(bp, 0);             /* logical 0 */
1849                 xfs_buf_associate_memory(bp,
1850                                 (char *)&iclog->ic_header + count, split);
1851                 bp->b_fspriv = iclog;
1852                 XFS_BUF_ZEROFLAGS(bp);
1853                 XFS_BUF_ASYNC(bp);
1854                 bp->b_flags |= XBF_SYNCIO;
1855                 if (log->l_mp->m_flags & XFS_MOUNT_BARRIER)
1856                         bp->b_flags |= XBF_FUA;
1857
1858                 ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1);
1859                 ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize);
1860
1861                 /* account for internal log which doesn't start at block #0 */
1862                 XFS_BUF_SET_ADDR(bp, XFS_BUF_ADDR(bp) + log->l_logBBstart);
1863                 XFS_BUF_WRITE(bp);
1864                 error = xlog_bdstrat(bp);
1865                 if (error) {
1866                         xfs_buf_ioerror_alert(bp, "xlog_sync (split)");
1867                         return error;
1868                 }
1869         }
1870         return 0;
1871 }       /* xlog_sync */
1872
1873 /*
1874  * Deallocate a log structure
1875  */
1876 STATIC void
1877 xlog_dealloc_log(
1878         struct xlog     *log)
1879 {
1880         xlog_in_core_t  *iclog, *next_iclog;
1881         int             i;
1882
1883         xlog_cil_destroy(log);
1884
1885         /*
1886          * Cycle all the iclogbuf locks to make sure all log IO completion
1887          * is done before we tear down these buffers.
1888          */
1889         iclog = log->l_iclog;
1890         for (i = 0; i < log->l_iclog_bufs; i++) {
1891                 xfs_buf_lock(iclog->ic_bp);
1892                 xfs_buf_unlock(iclog->ic_bp);
1893                 iclog = iclog->ic_next;
1894         }
1895
1896         /*
1897          * Always need to ensure that the extra buffer does not point to memory
1898          * owned by another log buffer before we free it. Also, cycle the lock
1899          * first to ensure we've completed IO on it.
1900          */
1901         xfs_buf_lock(log->l_xbuf);
1902         xfs_buf_unlock(log->l_xbuf);
1903         xfs_buf_set_empty(log->l_xbuf, BTOBB(log->l_iclog_size));
1904         xfs_buf_free(log->l_xbuf);
1905
1906         iclog = log->l_iclog;
1907         for (i = 0; i < log->l_iclog_bufs; i++) {
1908                 xfs_buf_free(iclog->ic_bp);
1909                 next_iclog = iclog->ic_next;
1910                 kmem_free(iclog);
1911                 iclog = next_iclog;
1912         }
1913         spinlock_destroy(&log->l_icloglock);
1914
1915         log->l_mp->m_log = NULL;
1916         kmem_free(log);
1917 }       /* xlog_dealloc_log */
1918
1919 /*
1920  * Update counters atomically now that memcpy is done.
1921  */
1922 /* ARGSUSED */
1923 static inline void
1924 xlog_state_finish_copy(
1925         struct xlog             *log,
1926         struct xlog_in_core     *iclog,
1927         int                     record_cnt,
1928         int                     copy_bytes)
1929 {
1930         spin_lock(&log->l_icloglock);
1931
1932         be32_add_cpu(&iclog->ic_header.h_num_logops, record_cnt);
1933         iclog->ic_offset += copy_bytes;
1934
1935         spin_unlock(&log->l_icloglock);
1936 }       /* xlog_state_finish_copy */
1937
1938
1939
1940
1941 /*
1942  * print out info relating to regions written which consume
1943  * the reservation
1944  */
1945 void
1946 xlog_print_tic_res(
1947         struct xfs_mount        *mp,
1948         struct xlog_ticket      *ticket)
1949 {
1950         uint i;
1951         uint ophdr_spc = ticket->t_res_num_ophdrs * (uint)sizeof(xlog_op_header_t);
1952
1953         /* match with XLOG_REG_TYPE_* in xfs_log.h */
1954         static char *res_type_str[XLOG_REG_TYPE_MAX] = {
1955             "bformat",
1956             "bchunk",
1957             "efi_format",
1958             "efd_format",
1959             "iformat",
1960             "icore",
1961             "iext",
1962             "ibroot",
1963             "ilocal",
1964             "iattr_ext",
1965             "iattr_broot",
1966             "iattr_local",
1967             "qformat",
1968             "dquot",
1969             "quotaoff",
1970             "LR header",
1971             "unmount",
1972             "commit",
1973             "trans header"
1974         };
1975         static char *trans_type_str[XFS_TRANS_TYPE_MAX] = {
1976             "SETATTR_NOT_SIZE",
1977             "SETATTR_SIZE",
1978             "INACTIVE",
1979             "CREATE",
1980             "CREATE_TRUNC",
1981             "TRUNCATE_FILE",
1982             "REMOVE",
1983             "LINK",
1984             "RENAME",
1985             "MKDIR",
1986             "RMDIR",
1987             "SYMLINK",
1988             "SET_DMATTRS",
1989             "GROWFS",
1990             "STRAT_WRITE",
1991             "DIOSTRAT",
1992             "WRITE_SYNC",
1993             "WRITEID",
1994             "ADDAFORK",
1995             "ATTRINVAL",
1996             "ATRUNCATE",
1997             "ATTR_SET",
1998             "ATTR_RM",
1999             "ATTR_FLAG",
2000             "CLEAR_AGI_BUCKET",
2001             "QM_SBCHANGE",
2002             "DUMMY1",
2003             "DUMMY2",
2004             "QM_QUOTAOFF",
2005             "QM_DQALLOC",
2006             "QM_SETQLIM",
2007             "QM_DQCLUSTER",
2008             "QM_QINOCREATE",
2009             "QM_QUOTAOFF_END",
2010             "SB_UNIT",
2011             "FSYNC_TS",
2012             "GROWFSRT_ALLOC",
2013             "GROWFSRT_ZERO",
2014             "GROWFSRT_FREE",
2015             "SWAPEXT"
2016         };
2017
2018         xfs_warn(mp,
2019                 "xlog_write: reservation summary:\n"
2020                 "  trans type  = %s (%u)\n"
2021                 "  unit res    = %d bytes\n"
2022                 "  current res = %d bytes\n"
2023                 "  total reg   = %u bytes (o/flow = %u bytes)\n"
2024                 "  ophdrs      = %u (ophdr space = %u bytes)\n"
2025                 "  ophdr + reg = %u bytes\n"
2026                 "  num regions = %u\n",
2027                 ((ticket->t_trans_type <= 0 ||
2028                   ticket->t_trans_type > XFS_TRANS_TYPE_MAX) ?
2029                   "bad-trans-type" : trans_type_str[ticket->t_trans_type-1]),
2030                 ticket->t_trans_type,
2031                 ticket->t_unit_res,
2032                 ticket->t_curr_res,
2033                 ticket->t_res_arr_sum, ticket->t_res_o_flow,
2034                 ticket->t_res_num_ophdrs, ophdr_spc,
2035                 ticket->t_res_arr_sum +
2036                 ticket->t_res_o_flow + ophdr_spc,
2037                 ticket->t_res_num);
2038
2039         for (i = 0; i < ticket->t_res_num; i++) {
2040                 uint r_type = ticket->t_res_arr[i].r_type;
2041                 xfs_warn(mp, "region[%u]: %s - %u bytes", i,
2042                             ((r_type <= 0 || r_type > XLOG_REG_TYPE_MAX) ?
2043                             "bad-rtype" : res_type_str[r_type-1]),
2044                             ticket->t_res_arr[i].r_len);
2045         }
2046
2047         xfs_alert_tag(mp, XFS_PTAG_LOGRES,
2048                 "xlog_write: reservation ran out. Need to up reservation");
2049         xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR);
2050 }
2051
2052 /*
2053  * Calculate the potential space needed by the log vector.  Each region gets
2054  * its own xlog_op_header_t and may need to be double word aligned.
2055  */
2056 static int
2057 xlog_write_calc_vec_length(
2058         struct xlog_ticket      *ticket,
2059         struct xfs_log_vec      *log_vector)
2060 {
2061         struct xfs_log_vec      *lv;
2062         int                     headers = 0;
2063         int                     len = 0;
2064         int                     i;
2065
2066         /* acct for start rec of xact */
2067         if (ticket->t_flags & XLOG_TIC_INITED)
2068                 headers++;
2069
2070         for (lv = log_vector; lv; lv = lv->lv_next) {
2071                 /* we don't write ordered log vectors */
2072                 if (lv->lv_buf_len == XFS_LOG_VEC_ORDERED)
2073                         continue;
2074
2075                 headers += lv->lv_niovecs;
2076
2077                 for (i = 0; i < lv->lv_niovecs; i++) {
2078                         struct xfs_log_iovec    *vecp = &lv->lv_iovecp[i];
2079
2080                         len += vecp->i_len;
2081                         xlog_tic_add_region(ticket, vecp->i_len, vecp->i_type);
2082                 }
2083         }
2084
2085         ticket->t_res_num_ophdrs += headers;
2086         len += headers * sizeof(struct xlog_op_header);
2087
2088         return len;
2089 }
2090
2091 /*
2092  * If first write for transaction, insert start record  We can't be trying to
2093  * commit if we are inited.  We can't have any "partial_copy" if we are inited.
2094  */
2095 static int
2096 xlog_write_start_rec(
2097         struct xlog_op_header   *ophdr,
2098         struct xlog_ticket      *ticket)
2099 {
2100         if (!(ticket->t_flags & XLOG_TIC_INITED))
2101                 return 0;
2102
2103         ophdr->oh_tid   = cpu_to_be32(ticket->t_tid);
2104         ophdr->oh_clientid = ticket->t_clientid;
2105         ophdr->oh_len = 0;
2106         ophdr->oh_flags = XLOG_START_TRANS;
2107         ophdr->oh_res2 = 0;
2108
2109         ticket->t_flags &= ~XLOG_TIC_INITED;
2110
2111         return sizeof(struct xlog_op_header);
2112 }
2113
2114 static xlog_op_header_t *
2115 xlog_write_setup_ophdr(
2116         struct xlog             *log,
2117         struct xlog_op_header   *ophdr,
2118         struct xlog_ticket      *ticket,
2119         uint                    flags)
2120 {
2121         ophdr->oh_tid = cpu_to_be32(ticket->t_tid);
2122         ophdr->oh_clientid = ticket->t_clientid;
2123         ophdr->oh_res2 = 0;
2124
2125         /* are we copying a commit or unmount record? */
2126         ophdr->oh_flags = flags;
2127
2128         /*
2129          * We've seen logs corrupted with bad transaction client ids.  This
2130          * makes sure that XFS doesn't generate them on.  Turn this into an EIO
2131          * and shut down the filesystem.
2132          */
2133         switch (ophdr->oh_clientid)  {
2134         case XFS_TRANSACTION:
2135         case XFS_VOLUME:
2136         case XFS_LOG:
2137                 break;
2138         default:
2139                 xfs_warn(log->l_mp,
2140                         "Bad XFS transaction clientid 0x%x in ticket 0x%p",
2141                         ophdr->oh_clientid, ticket);
2142                 return NULL;
2143         }
2144
2145         return ophdr;
2146 }
2147
2148 /*
2149  * Set up the parameters of the region copy into the log. This has
2150  * to handle region write split across multiple log buffers - this
2151  * state is kept external to this function so that this code can
2152  * be written in an obvious, self documenting manner.
2153  */
2154 static int
2155 xlog_write_setup_copy(
2156         struct xlog_ticket      *ticket,
2157         struct xlog_op_header   *ophdr,
2158         int                     space_available,
2159         int                     space_required,
2160         int                     *copy_off,
2161         int                     *copy_len,
2162         int                     *last_was_partial_copy,
2163         int                     *bytes_consumed)
2164 {
2165         int                     still_to_copy;
2166
2167         still_to_copy = space_required - *bytes_consumed;
2168         *copy_off = *bytes_consumed;
2169
2170         if (still_to_copy <= space_available) {
2171                 /* write of region completes here */
2172                 *copy_len = still_to_copy;
2173                 ophdr->oh_len = cpu_to_be32(*copy_len);
2174                 if (*last_was_partial_copy)
2175                         ophdr->oh_flags |= (XLOG_END_TRANS|XLOG_WAS_CONT_TRANS);
2176                 *last_was_partial_copy = 0;
2177                 *bytes_consumed = 0;
2178                 return 0;
2179         }
2180
2181         /* partial write of region, needs extra log op header reservation */
2182         *copy_len = space_available;
2183         ophdr->oh_len = cpu_to_be32(*copy_len);
2184         ophdr->oh_flags |= XLOG_CONTINUE_TRANS;
2185         if (*last_was_partial_copy)
2186                 ophdr->oh_flags |= XLOG_WAS_CONT_TRANS;
2187         *bytes_consumed += *copy_len;
2188         (*last_was_partial_copy)++;
2189
2190         /* account for new log op header */
2191         ticket->t_curr_res -= sizeof(struct xlog_op_header);
2192         ticket->t_res_num_ophdrs++;
2193
2194         return sizeof(struct xlog_op_header);
2195 }
2196
2197 static int
2198 xlog_write_copy_finish(
2199         struct xlog             *log,
2200         struct xlog_in_core     *iclog,
2201         uint                    flags,
2202         int                     *record_cnt,
2203         int                     *data_cnt,
2204         int                     *partial_copy,
2205         int                     *partial_copy_len,
2206         int                     log_offset,
2207         struct xlog_in_core     **commit_iclog)
2208 {
2209         if (*partial_copy) {
2210                 /*
2211                  * This iclog has already been marked WANT_SYNC by
2212                  * xlog_state_get_iclog_space.
2213                  */
2214                 xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt);
2215                 *record_cnt = 0;
2216                 *data_cnt = 0;
2217                 return xlog_state_release_iclog(log, iclog);
2218         }
2219
2220         *partial_copy = 0;
2221         *partial_copy_len = 0;
2222
2223         if (iclog->ic_size - log_offset <= sizeof(xlog_op_header_t)) {
2224                 /* no more space in this iclog - push it. */
2225                 xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt);
2226                 *record_cnt = 0;
2227                 *data_cnt = 0;
2228
2229                 spin_lock(&log->l_icloglock);
2230                 xlog_state_want_sync(log, iclog);
2231                 spin_unlock(&log->l_icloglock);
2232
2233                 if (!commit_iclog)
2234                         return xlog_state_release_iclog(log, iclog);
2235                 ASSERT(flags & XLOG_COMMIT_TRANS);
2236                 *commit_iclog = iclog;
2237         }
2238
2239         return 0;
2240 }
2241
2242 /*
2243  * Write some region out to in-core log
2244  *
2245  * This will be called when writing externally provided regions or when
2246  * writing out a commit record for a given transaction.
2247  *
2248  * General algorithm:
2249  *      1. Find total length of this write.  This may include adding to the
2250  *              lengths passed in.
2251  *      2. Check whether we violate the tickets reservation.
2252  *      3. While writing to this iclog
2253  *          A. Reserve as much space in this iclog as can get
2254  *          B. If this is first write, save away start lsn
2255  *          C. While writing this region:
2256  *              1. If first write of transaction, write start record
2257  *              2. Write log operation header (header per region)
2258  *              3. Find out if we can fit entire region into this iclog
2259  *              4. Potentially, verify destination memcpy ptr
2260  *              5. Memcpy (partial) region
2261  *              6. If partial copy, release iclog; otherwise, continue
2262  *                      copying more regions into current iclog
2263  *      4. Mark want sync bit (in simulation mode)
2264  *      5. Release iclog for potential flush to on-disk log.
2265  *
2266  * ERRORS:
2267  * 1.   Panic if reservation is overrun.  This should never happen since
2268  *      reservation amounts are generated internal to the filesystem.
2269  * NOTES:
2270  * 1. Tickets are single threaded data structures.
2271  * 2. The XLOG_END_TRANS & XLOG_CONTINUE_TRANS flags are passed down to the
2272  *      syncing routine.  When a single log_write region needs to span
2273  *      multiple in-core logs, the XLOG_CONTINUE_TRANS bit should be set
2274  *      on all log operation writes which don't contain the end of the
2275  *      region.  The XLOG_END_TRANS bit is used for the in-core log
2276  *      operation which contains the end of the continued log_write region.
2277  * 3. When xlog_state_get_iclog_space() grabs the rest of the current iclog,
2278  *      we don't really know exactly how much space will be used.  As a result,
2279  *      we don't update ic_offset until the end when we know exactly how many
2280  *      bytes have been written out.
2281  */
2282 int
2283 xlog_write(
2284         struct xlog             *log,
2285         struct xfs_log_vec      *log_vector,
2286         struct xlog_ticket      *ticket,
2287         xfs_lsn_t               *start_lsn,
2288         struct xlog_in_core     **commit_iclog,
2289         uint                    flags)
2290 {
2291         struct xlog_in_core     *iclog = NULL;
2292         struct xfs_log_iovec    *vecp;
2293         struct xfs_log_vec      *lv;
2294         int                     len;
2295         int                     index;
2296         int                     partial_copy = 0;
2297         int                     partial_copy_len = 0;
2298         int                     contwr = 0;
2299         int                     record_cnt = 0;
2300         int                     data_cnt = 0;
2301         int                     error;
2302
2303         *start_lsn = 0;
2304
2305         len = xlog_write_calc_vec_length(ticket, log_vector);
2306
2307         /*
2308          * Region headers and bytes are already accounted for.
2309          * We only need to take into account start records and
2310          * split regions in this function.
2311          */
2312         if (ticket->t_flags & XLOG_TIC_INITED)
2313                 ticket->t_curr_res -= sizeof(xlog_op_header_t);
2314
2315         /*
2316          * Commit record headers need to be accounted for. These
2317          * come in as separate writes so are easy to detect.
2318          */
2319         if (flags & (XLOG_COMMIT_TRANS | XLOG_UNMOUNT_TRANS))
2320                 ticket->t_curr_res -= sizeof(xlog_op_header_t);
2321
2322         if (ticket->t_curr_res < 0)
2323                 xlog_print_tic_res(log->l_mp, ticket);
2324
2325         index = 0;
2326         lv = log_vector;
2327         vecp = lv->lv_iovecp;
2328         while (lv && (!lv->lv_niovecs || index < lv->lv_niovecs)) {
2329                 void            *ptr;
2330                 int             log_offset;
2331
2332                 error = xlog_state_get_iclog_space(log, len, &iclog, ticket,
2333                                                    &contwr, &log_offset);
2334                 if (error)
2335                         return error;
2336
2337                 ASSERT(log_offset <= iclog->ic_size - 1);
2338                 ptr = iclog->ic_datap + log_offset;
2339
2340                 /* start_lsn is the first lsn written to. That's all we need. */
2341                 if (!*start_lsn)
2342                         *start_lsn = be64_to_cpu(iclog->ic_header.h_lsn);
2343
2344                 /*
2345                  * This loop writes out as many regions as can fit in the amount
2346                  * of space which was allocated by xlog_state_get_iclog_space().
2347                  */
2348                 while (lv && (!lv->lv_niovecs || index < lv->lv_niovecs)) {
2349                         struct xfs_log_iovec    *reg;
2350                         struct xlog_op_header   *ophdr;
2351                         int                     start_rec_copy;
2352                         int                     copy_len;
2353                         int                     copy_off;
2354                         bool                    ordered = false;
2355
2356                         /* ordered log vectors have no regions to write */
2357                         if (lv->lv_buf_len == XFS_LOG_VEC_ORDERED) {
2358                                 ASSERT(lv->lv_niovecs == 0);
2359                                 ordered = true;
2360                                 goto next_lv;
2361                         }
2362
2363                         reg = &vecp[index];
2364                         ASSERT(reg->i_len % sizeof(__int32_t) == 0);
2365                         ASSERT((unsigned long)ptr % sizeof(__int32_t) == 0);
2366
2367                         start_rec_copy = xlog_write_start_rec(ptr, ticket);
2368                         if (start_rec_copy) {
2369                                 record_cnt++;
2370                                 xlog_write_adv_cnt(&ptr, &len, &log_offset,
2371                                                    start_rec_copy);
2372                         }
2373
2374                         ophdr = xlog_write_setup_ophdr(log, ptr, ticket, flags);
2375                         if (!ophdr)
2376                                 return -EIO;
2377
2378                         xlog_write_adv_cnt(&ptr, &len, &log_offset,
2379                                            sizeof(struct xlog_op_header));
2380
2381                         len += xlog_write_setup_copy(ticket, ophdr,
2382                                                      iclog->ic_size-log_offset,
2383                                                      reg->i_len,
2384                                                      &copy_off, &copy_len,
2385                                                      &partial_copy,
2386                                                      &partial_copy_len);
2387                         xlog_verify_dest_ptr(log, ptr);
2388
2389                         /* copy region */
2390                         ASSERT(copy_len >= 0);
2391                         memcpy(ptr, reg->i_addr + copy_off, copy_len);
2392                         xlog_write_adv_cnt(&ptr, &len, &log_offset, copy_len);
2393
2394                         copy_len += start_rec_copy + sizeof(xlog_op_header_t);
2395                         record_cnt++;
2396                         data_cnt += contwr ? copy_len : 0;
2397
2398                         error = xlog_write_copy_finish(log, iclog, flags,
2399                                                        &record_cnt, &data_cnt,
2400                                                        &partial_copy,
2401                                                        &partial_copy_len,
2402                                                        log_offset,
2403                                                        commit_iclog);
2404                         if (error)
2405                                 return error;
2406
2407                         /*
2408                          * if we had a partial copy, we need to get more iclog
2409                          * space but we don't want to increment the region
2410                          * index because there is still more is this region to
2411                          * write.
2412                          *
2413                          * If we completed writing this region, and we flushed
2414                          * the iclog (indicated by resetting of the record
2415                          * count), then we also need to get more log space. If
2416                          * this was the last record, though, we are done and
2417                          * can just return.
2418                          */
2419                         if (partial_copy)
2420                                 break;
2421
2422                         if (++index == lv->lv_niovecs) {
2423 next_lv:
2424                                 lv = lv->lv_next;
2425                                 index = 0;
2426                                 if (lv)
2427                                         vecp = lv->lv_iovecp;
2428                         }
2429                         if (record_cnt == 0 && ordered == false) {
2430                                 if (!lv)
2431                                         return 0;
2432                                 break;
2433                         }
2434                 }
2435         }
2436
2437         ASSERT(len == 0);
2438
2439         xlog_state_finish_copy(log, iclog, record_cnt, data_cnt);
2440         if (!commit_iclog)
2441                 return xlog_state_release_iclog(log, iclog);
2442
2443         ASSERT(flags & XLOG_COMMIT_TRANS);
2444         *commit_iclog = iclog;
2445         return 0;
2446 }
2447
2448
2449 /*****************************************************************************
2450  *
2451  *              State Machine functions
2452  *
2453  *****************************************************************************
2454  */
2455
2456 /* Clean iclogs starting from the head.  This ordering must be
2457  * maintained, so an iclog doesn't become ACTIVE beyond one that
2458  * is SYNCING.  This is also required to maintain the notion that we use
2459  * a ordered wait queue to hold off would be writers to the log when every
2460  * iclog is trying to sync to disk.
2461  *
2462  * State Change: DIRTY -> ACTIVE
2463  */
2464 STATIC void
2465 xlog_state_clean_log(
2466         struct xlog *log)
2467 {
2468         xlog_in_core_t  *iclog;
2469         int changed = 0;
2470
2471         iclog = log->l_iclog;
2472         do {
2473                 if (iclog->ic_state == XLOG_STATE_DIRTY) {
2474                         iclog->ic_state = XLOG_STATE_ACTIVE;
2475                         iclog->ic_offset       = 0;
2476                         ASSERT(iclog->ic_callback == NULL);
2477                         /*
2478                          * If the number of ops in this iclog indicate it just
2479                          * contains the dummy transaction, we can
2480                          * change state into IDLE (the second time around).
2481                          * Otherwise we should change the state into
2482                          * NEED a dummy.
2483                          * We don't need to cover the dummy.
2484                          */
2485                         if (!changed &&
2486                            (be32_to_cpu(iclog->ic_header.h_num_logops) ==
2487                                         XLOG_COVER_OPS)) {
2488                                 changed = 1;
2489                         } else {
2490                                 /*
2491                                  * We have two dirty iclogs so start over
2492                                  * This could also be num of ops indicates
2493                                  * this is not the dummy going out.
2494                                  */
2495                                 changed = 2;
2496                         }
2497                         iclog->ic_header.h_num_logops = 0;
2498                         memset(iclog->ic_header.h_cycle_data, 0,
2499                               sizeof(iclog->ic_header.h_cycle_data));
2500                         iclog->ic_header.h_lsn = 0;
2501                 } else if (iclog->ic_state == XLOG_STATE_ACTIVE)
2502                         /* do nothing */;
2503                 else
2504                         break;  /* stop cleaning */
2505                 iclog = iclog->ic_next;
2506         } while (iclog != log->l_iclog);
2507
2508         /* log is locked when we are called */
2509         /*
2510          * Change state for the dummy log recording.
2511          * We usually go to NEED. But we go to NEED2 if the changed indicates
2512          * we are done writing the dummy record.
2513          * If we are done with the second dummy recored (DONE2), then
2514          * we go to IDLE.
2515          */
2516         if (changed) {
2517                 switch (log->l_covered_state) {
2518                 case XLOG_STATE_COVER_IDLE:
2519                 case XLOG_STATE_COVER_NEED:
2520                 case XLOG_STATE_COVER_NEED2:
2521                         log->l_covered_state = XLOG_STATE_COVER_NEED;
2522                         break;
2523
2524                 case XLOG_STATE_COVER_DONE:
2525                         if (changed == 1)
2526                                 log->l_covered_state = XLOG_STATE_COVER_NEED2;
2527                         else
2528                                 log->l_covered_state = XLOG_STATE_COVER_NEED;
2529                         break;
2530
2531                 case XLOG_STATE_COVER_DONE2:
2532                         if (changed == 1)
2533                                 log->l_covered_state = XLOG_STATE_COVER_IDLE;
2534                         else
2535                                 log->l_covered_state = XLOG_STATE_COVER_NEED;
2536                         break;
2537
2538                 default:
2539                         ASSERT(0);
2540                 }
2541         }
2542 }       /* xlog_state_clean_log */
2543
2544 STATIC xfs_lsn_t
2545 xlog_get_lowest_lsn(
2546         struct xlog     *log)
2547 {
2548         xlog_in_core_t  *lsn_log;
2549         xfs_lsn_t       lowest_lsn, lsn;
2550
2551         lsn_log = log->l_iclog;
2552         lowest_lsn = 0;
2553         do {
2554             if (!(lsn_log->ic_state & (XLOG_STATE_ACTIVE|XLOG_STATE_DIRTY))) {
2555                 lsn = be64_to_cpu(lsn_log->ic_header.h_lsn);
2556                 if ((lsn && !lowest_lsn) ||
2557                     (XFS_LSN_CMP(lsn, lowest_lsn) < 0)) {
2558                         lowest_lsn = lsn;
2559                 }
2560             }
2561             lsn_log = lsn_log->ic_next;
2562         } while (lsn_log != log->l_iclog);
2563         return lowest_lsn;
2564 }
2565
2566
2567 STATIC void
2568 xlog_state_do_callback(
2569         struct xlog             *log,
2570         int                     aborted,
2571         struct xlog_in_core     *ciclog)
2572 {
2573         xlog_in_core_t     *iclog;
2574         xlog_in_core_t     *first_iclog;        /* used to know when we've
2575                                                  * processed all iclogs once */
2576         xfs_log_callback_t *cb, *cb_next;
2577         int                flushcnt = 0;
2578         xfs_lsn_t          lowest_lsn;
2579         int                ioerrors;    /* counter: iclogs with errors */
2580         int                loopdidcallbacks; /* flag: inner loop did callbacks*/
2581         int                funcdidcallbacks; /* flag: function did callbacks */
2582         int                repeats;     /* for issuing console warnings if
2583                                          * looping too many times */
2584         int                wake = 0;
2585
2586         spin_lock(&log->l_icloglock);
2587         first_iclog = iclog = log->l_iclog;
2588         ioerrors = 0;
2589         funcdidcallbacks = 0;
2590         repeats = 0;
2591
2592         do {
2593                 /*
2594                  * Scan all iclogs starting with the one pointed to by the
2595                  * log.  Reset this starting point each time the log is
2596                  * unlocked (during callbacks).
2597                  *
2598                  * Keep looping through iclogs until one full pass is made
2599                  * without running any callbacks.
2600                  */
2601                 first_iclog = log->l_iclog;
2602                 iclog = log->l_iclog;
2603                 loopdidcallbacks = 0;
2604                 repeats++;
2605
2606                 do {
2607
2608                         /* skip all iclogs in the ACTIVE & DIRTY states */
2609                         if (iclog->ic_state &
2610                             (XLOG_STATE_ACTIVE|XLOG_STATE_DIRTY)) {
2611                                 iclog = iclog->ic_next;
2612                                 continue;
2613                         }
2614
2615                         /*
2616                          * Between marking a filesystem SHUTDOWN and stopping
2617                          * the log, we do flush all iclogs to disk (if there
2618                          * wasn't a log I/O error). So, we do want things to
2619                          * go smoothly in case of just a SHUTDOWN  w/o a
2620                          * LOG_IO_ERROR.
2621                          */
2622                         if (!(iclog->ic_state & XLOG_STATE_IOERROR)) {
2623                                 /*
2624                                  * Can only perform callbacks in order.  Since
2625                                  * this iclog is not in the DONE_SYNC/
2626                                  * DO_CALLBACK state, we skip the rest and
2627                                  * just try to clean up.  If we set our iclog
2628                                  * to DO_CALLBACK, we will not process it when
2629                                  * we retry since a previous iclog is in the
2630                                  * CALLBACK and the state cannot change since
2631                                  * we are holding the l_icloglock.
2632                                  */
2633                                 if (!(iclog->ic_state &
2634                                         (XLOG_STATE_DONE_SYNC |
2635                                                  XLOG_STATE_DO_CALLBACK))) {
2636                                         if (ciclog && (ciclog->ic_state ==
2637                                                         XLOG_STATE_DONE_SYNC)) {
2638                                                 ciclog->ic_state = XLOG_STATE_DO_CALLBACK;
2639                                         }
2640                                         break;
2641                                 }
2642                                 /*
2643                                  * We now have an iclog that is in either the
2644                                  * DO_CALLBACK or DONE_SYNC states. The other
2645                                  * states (WANT_SYNC, SYNCING, or CALLBACK were
2646                                  * caught by the above if and are going to
2647                                  * clean (i.e. we aren't doing their callbacks)
2648                                  * see the above if.
2649                                  */
2650
2651                                 /*
2652                                  * We will do one more check here to see if we
2653                                  * have chased our tail around.
2654                                  */
2655
2656                                 lowest_lsn = xlog_get_lowest_lsn(log);
2657                                 if (lowest_lsn &&
2658                                     XFS_LSN_CMP(lowest_lsn,
2659                                                 be64_to_cpu(iclog->ic_header.h_lsn)) < 0) {
2660                                         iclog = iclog->ic_next;
2661                                         continue; /* Leave this iclog for
2662                                                    * another thread */
2663                                 }
2664
2665                                 iclog->ic_state = XLOG_STATE_CALLBACK;
2666
2667
2668                                 /*
2669                                  * Completion of a iclog IO does not imply that
2670                                  * a transaction has completed, as transactions
2671                                  * can be large enough to span many iclogs. We
2672                                  * cannot change the tail of the log half way
2673                                  * through a transaction as this may be the only
2674                                  * transaction in the log and moving th etail to
2675                                  * point to the middle of it will prevent
2676                                  * recovery from finding the start of the
2677                                  * transaction. Hence we should only update the
2678                                  * last_sync_lsn if this iclog contains
2679                                  * transaction completion callbacks on it.
2680                                  *
2681                                  * We have to do this before we drop the
2682                                  * icloglock to ensure we are the only one that
2683                                  * can update it.
2684                                  */
2685                                 ASSERT(XFS_LSN_CMP(atomic64_read(&log->l_last_sync_lsn),
2686                                         be64_to_cpu(iclog->ic_header.h_lsn)) <= 0);
2687                                 if (iclog->ic_callback)
2688                                         atomic64_set(&log->l_last_sync_lsn,
2689                                                 be64_to_cpu(iclog->ic_header.h_lsn));
2690
2691                         } else
2692                                 ioerrors++;
2693
2694                         spin_unlock(&log->l_icloglock);
2695
2696                         /*
2697                          * Keep processing entries in the callback list until
2698                          * we come around and it is empty.  We need to
2699                          * atomically see that the list is empty and change the
2700                          * state to DIRTY so that we don't miss any more
2701                          * callbacks being added.
2702                          */
2703                         spin_lock(&iclog->ic_callback_lock);
2704                         cb = iclog->ic_callback;
2705                         while (cb) {
2706                                 iclog->ic_callback_tail = &(iclog->ic_callback);
2707                                 iclog->ic_callback = NULL;
2708                                 spin_unlock(&iclog->ic_callback_lock);
2709
2710                                 /* perform callbacks in the order given */
2711                                 for (; cb; cb = cb_next) {
2712                                         cb_next = cb->cb_next;
2713                                         cb->cb_func(cb->cb_arg, aborted);
2714                                 }
2715                                 spin_lock(&iclog->ic_callback_lock);
2716                                 cb = iclog->ic_callback;
2717                         }
2718
2719                         loopdidcallbacks++;
2720                         funcdidcallbacks++;
2721
2722                         spin_lock(&log->l_icloglock);
2723                         ASSERT(iclog->ic_callback == NULL);
2724                         spin_unlock(&iclog->ic_callback_lock);
2725                         if (!(iclog->ic_state & XLOG_STATE_IOERROR))
2726                                 iclog->ic_state = XLOG_STATE_DIRTY;
2727
2728                         /*
2729                          * Transition from DIRTY to ACTIVE if applicable.
2730                          * NOP if STATE_IOERROR.
2731                          */
2732                         xlog_state_clean_log(log);
2733
2734                         /* wake up threads waiting in xfs_log_force() */
2735                         wake_up_all(&iclog->ic_force_wait);
2736
2737                         iclog = iclog->ic_next;
2738                 } while (first_iclog != iclog);
2739
2740                 if (repeats > 5000) {
2741                         flushcnt += repeats;
2742                         repeats = 0;
2743                         xfs_warn(log->l_mp,
2744                                 "%s: possible infinite loop (%d iterations)",
2745                                 __func__, flushcnt);
2746                 }
2747         } while (!ioerrors && loopdidcallbacks);
2748
2749         /*
2750          * make one last gasp attempt to see if iclogs are being left in
2751          * limbo..
2752          */
2753 #ifdef DEBUG
2754         if (funcdidcallbacks) {
2755                 first_iclog = iclog = log->l_iclog;
2756                 do {
2757                         ASSERT(iclog->ic_state != XLOG_STATE_DO_CALLBACK);
2758                         /*
2759                          * Terminate the loop if iclogs are found in states
2760                          * which will cause other threads to clean up iclogs.
2761                          *
2762                          * SYNCING - i/o completion will go through logs
2763                          * DONE_SYNC - interrupt thread should be waiting for
2764                          *              l_icloglock
2765                          * IOERROR - give up hope all ye who enter here
2766                          */
2767                         if (iclog->ic_state == XLOG_STATE_WANT_SYNC ||
2768                             iclog->ic_state == XLOG_STATE_SYNCING ||
2769                             iclog->ic_state == XLOG_STATE_DONE_SYNC ||
2770                             iclog->ic_state == XLOG_STATE_IOERROR )
2771                                 break;
2772                         iclog = iclog->ic_next;
2773                 } while (first_iclog != iclog);
2774         }
2775 #endif
2776
2777         if (log->l_iclog->ic_state & (XLOG_STATE_ACTIVE|XLOG_STATE_IOERROR))
2778                 wake = 1;
2779         spin_unlock(&log->l_icloglock);
2780
2781         if (wake)
2782                 wake_up_all(&log->l_flush_wait);
2783 }
2784
2785
2786 /*
2787  * Finish transitioning this iclog to the dirty state.
2788  *
2789  * Make sure that we completely execute this routine only when this is
2790  * the last call to the iclog.  There is a good chance that iclog flushes,
2791  * when we reach the end of the physical log, get turned into 2 separate
2792  * calls to bwrite.  Hence, one iclog flush could generate two calls to this
2793  * routine.  By using the reference count bwritecnt, we guarantee that only
2794  * the second completion goes through.
2795  *
2796  * Callbacks could take time, so they are done outside the scope of the
2797  * global state machine log lock.
2798  */
2799 STATIC void
2800 xlog_state_done_syncing(
2801         xlog_in_core_t  *iclog,
2802         int             aborted)
2803 {
2804         struct xlog        *log = iclog->ic_log;
2805
2806         spin_lock(&log->l_icloglock);
2807
2808         ASSERT(iclog->ic_state == XLOG_STATE_SYNCING ||
2809                iclog->ic_state == XLOG_STATE_IOERROR);
2810         ASSERT(atomic_read(&iclog->ic_refcnt) == 0);
2811         ASSERT(iclog->ic_bwritecnt == 1 || iclog->ic_bwritecnt == 2);
2812
2813
2814         /*
2815          * If we got an error, either on the first buffer, or in the case of
2816          * split log writes, on the second, we mark ALL iclogs STATE_IOERROR,
2817          * and none should ever be attempted to be written to disk
2818          * again.
2819          */
2820         if (iclog->ic_state != XLOG_STATE_IOERROR) {
2821                 if (--iclog->ic_bwritecnt == 1) {
2822                         spin_unlock(&log->l_icloglock);
2823                         return;
2824                 }
2825                 iclog->ic_state = XLOG_STATE_DONE_SYNC;
2826         }
2827
2828         /*
2829          * Someone could be sleeping prior to writing out the next
2830          * iclog buffer, we wake them all, one will get to do the
2831          * I/O, the others get to wait for the result.
2832          */
2833         wake_up_all(&iclog->ic_write_wait);
2834         spin_unlock(&log->l_icloglock);
2835         xlog_state_do_callback(log, aborted, iclog);    /* also cleans log */
2836 }       /* xlog_state_done_syncing */
2837
2838
2839 /*
2840  * If the head of the in-core log ring is not (ACTIVE or DIRTY), then we must
2841  * sleep.  We wait on the flush queue on the head iclog as that should be
2842  * the first iclog to complete flushing. Hence if all iclogs are syncing,
2843  * we will wait here and all new writes will sleep until a sync completes.
2844  *
2845  * The in-core logs are used in a circular fashion. They are not used
2846  * out-of-order even when an iclog past the head is free.
2847  *
2848  * return:
2849  *      * log_offset where xlog_write() can start writing into the in-core
2850  *              log's data space.
2851  *      * in-core log pointer to which xlog_write() should write.
2852  *      * boolean indicating this is a continued write to an in-core log.
2853  *              If this is the last write, then the in-core log's offset field
2854  *              needs to be incremented, depending on the amount of data which
2855  *              is copied.
2856  */
2857 STATIC int
2858 xlog_state_get_iclog_space(
2859         struct xlog             *log,
2860         int                     len,
2861         struct xlog_in_core     **iclogp,
2862         struct xlog_ticket      *ticket,
2863         int                     *continued_write,
2864         int                     *logoffsetp)
2865 {
2866         int               log_offset;
2867         xlog_rec_header_t *head;
2868         xlog_in_core_t    *iclog;
2869         int               error;
2870
2871 restart:
2872         spin_lock(&log->l_icloglock);
2873         if (XLOG_FORCED_SHUTDOWN(log)) {
2874                 spin_unlock(&log->l_icloglock);
2875                 return -EIO;
2876         }
2877
2878         iclog = log->l_iclog;
2879         if (iclog->ic_state != XLOG_STATE_ACTIVE) {
2880                 XFS_STATS_INC(xs_log_noiclogs);
2881
2882                 /* Wait for log writes to have flushed */
2883                 xlog_wait(&log->l_flush_wait, &log->l_icloglock);
2884                 goto restart;
2885         }
2886
2887         head = &iclog->ic_header;
2888
2889         atomic_inc(&iclog->ic_refcnt);  /* prevents sync */
2890         log_offset = iclog->ic_offset;
2891
2892         /* On the 1st write to an iclog, figure out lsn.  This works
2893          * if iclogs marked XLOG_STATE_WANT_SYNC always write out what they are
2894          * committing to.  If the offset is set, that's how many blocks
2895          * must be written.
2896          */
2897         if (log_offset == 0) {
2898                 ticket->t_curr_res -= log->l_iclog_hsize;
2899                 xlog_tic_add_region(ticket,
2900                                     log->l_iclog_hsize,
2901                                     XLOG_REG_TYPE_LRHEADER);
2902                 head->h_cycle = cpu_to_be32(log->l_curr_cycle);
2903                 head->h_lsn = cpu_to_be64(
2904                         xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block));
2905                 ASSERT(log->l_curr_block >= 0);
2906         }
2907
2908         /* If there is enough room to write everything, then do it.  Otherwise,
2909          * claim the rest of the region and make sure the XLOG_STATE_WANT_SYNC
2910          * bit is on, so this will get flushed out.  Don't update ic_offset
2911          * until you know exactly how many bytes get copied.  Therefore, wait
2912          * until later to update ic_offset.
2913          *
2914          * xlog_write() algorithm assumes that at least 2 xlog_op_header_t's
2915          * can fit into remaining data section.
2916          */
2917         if (iclog->ic_size - iclog->ic_offset < 2*sizeof(xlog_op_header_t)) {
2918                 xlog_state_switch_iclogs(log, iclog, iclog->ic_size);
2919
2920                 /*
2921                  * If I'm the only one writing to this iclog, sync it to disk.
2922                  * We need to do an atomic compare and decrement here to avoid
2923                  * racing with concurrent atomic_dec_and_lock() calls in
2924                  * xlog_state_release_iclog() when there is more than one
2925                  * reference to the iclog.
2926                  */
2927                 if (!atomic_add_unless(&iclog->ic_refcnt, -1, 1)) {
2928                         /* we are the only one */
2929                         spin_unlock(&log->l_icloglock);
2930                         error = xlog_state_release_iclog(log, iclog);
2931                         if (error)
2932                                 return error;
2933                 } else {
2934                         spin_unlock(&log->l_icloglock);
2935                 }
2936                 goto restart;
2937         }
2938
2939         /* Do we have enough room to write the full amount in the remainder
2940          * of this iclog?  Or must we continue a write on the next iclog and
2941          * mark this iclog as completely taken?  In the case where we switch
2942          * iclogs (to mark it taken), this particular iclog will release/sync
2943          * to disk in xlog_write().
2944          */
2945         if (len <= iclog->ic_size - iclog->ic_offset) {
2946                 *continued_write = 0;
2947                 iclog->ic_offset += len;
2948         } else {
2949                 *continued_write = 1;
2950                 xlog_state_switch_iclogs(log, iclog, iclog->ic_size);
2951         }
2952         *iclogp = iclog;
2953
2954         ASSERT(iclog->ic_offset <= iclog->ic_size);
2955         spin_unlock(&log->l_icloglock);
2956
2957         *logoffsetp = log_offset;
2958         return 0;
2959 }       /* xlog_state_get_iclog_space */
2960
2961 /* The first cnt-1 times through here we don't need to
2962  * move the grant write head because the permanent
2963  * reservation has reserved cnt times the unit amount.
2964  * Release part of current permanent unit reservation and
2965  * reset current reservation to be one units worth.  Also
2966  * move grant reservation head forward.
2967  */
2968 STATIC void
2969 xlog_regrant_reserve_log_space(
2970         struct xlog             *log,
2971         struct xlog_ticket      *ticket)
2972 {
2973         trace_xfs_log_regrant_reserve_enter(log, ticket);
2974
2975         if (ticket->t_cnt > 0)
2976                 ticket->t_cnt--;
2977
2978         xlog_grant_sub_space(log, &log->l_reserve_head.grant,
2979                                         ticket->t_curr_res);
2980         xlog_grant_sub_space(log, &log->l_write_head.grant,
2981                                         ticket->t_curr_res);
2982         ticket->t_curr_res = ticket->t_unit_res;
2983         xlog_tic_reset_res(ticket);
2984
2985         trace_xfs_log_regrant_reserve_sub(log, ticket);
2986
2987         /* just return if we still have some of the pre-reserved space */
2988         if (ticket->t_cnt > 0)
2989                 return;
2990
2991         xlog_grant_add_space(log, &log->l_reserve_head.grant,
2992                                         ticket->t_unit_res);
2993
2994         trace_xfs_log_regrant_reserve_exit(log, ticket);
2995
2996         ticket->t_curr_res = ticket->t_unit_res;
2997         xlog_tic_reset_res(ticket);
2998 }       /* xlog_regrant_reserve_log_space */
2999
3000
3001 /*
3002  * Give back the space left from a reservation.
3003  *
3004  * All the information we need to make a correct determination of space left
3005  * is present.  For non-permanent reservations, things are quite easy.  The
3006  * count should have been decremented to zero.  We only need to deal with the
3007  * space remaining in the current reservation part of the ticket.  If the
3008  * ticket contains a permanent reservation, there may be left over space which
3009  * needs to be released.  A count of N means that N-1 refills of the current
3010  * reservation can be done before we need to ask for more space.  The first
3011  * one goes to fill up the first current reservation.  Once we run out of
3012  * space, the count will stay at zero and the only space remaining will be
3013  * in the current reservation field.
3014  */
3015 STATIC void
3016 xlog_ungrant_log_space(
3017         struct xlog             *log,
3018         struct xlog_ticket      *ticket)
3019 {
3020         int     bytes;
3021
3022         if (ticket->t_cnt > 0)
3023                 ticket->t_cnt--;
3024
3025         trace_xfs_log_ungrant_enter(log, ticket);
3026         trace_xfs_log_ungrant_sub(log, ticket);
3027
3028         /*
3029          * If this is a permanent reservation ticket, we may be able to free
3030          * up more space based on the remaining count.
3031          */
3032         bytes = ticket->t_curr_res;
3033         if (ticket->t_cnt > 0) {
3034                 ASSERT(ticket->t_flags & XLOG_TIC_PERM_RESERV);
3035                 bytes += ticket->t_unit_res*ticket->t_cnt;
3036         }
3037
3038         xlog_grant_sub_space(log, &log->l_reserve_head.grant, bytes);
3039         xlog_grant_sub_space(log, &log->l_write_head.grant, bytes);
3040
3041         trace_xfs_log_ungrant_exit(log, ticket);
3042
3043         xfs_log_space_wake(log->l_mp);
3044 }
3045
3046 /*
3047  * Flush iclog to disk if this is the last reference to the given iclog and
3048  * the WANT_SYNC bit is set.
3049  *
3050  * When this function is entered, the iclog is not necessarily in the
3051  * WANT_SYNC state.  It may be sitting around waiting to get filled.
3052  *
3053  *
3054  */
3055 STATIC int
3056 xlog_state_release_iclog(
3057         struct xlog             *log,
3058         struct xlog_in_core     *iclog)
3059 {
3060         int             sync = 0;       /* do we sync? */
3061
3062         if (iclog->ic_state & XLOG_STATE_IOERROR)
3063                 return -EIO;
3064
3065         ASSERT(atomic_read(&iclog->ic_refcnt) > 0);
3066         if (!atomic_dec_and_lock(&iclog->ic_refcnt, &log->l_icloglock))
3067                 return 0;
3068
3069         if (iclog->ic_state & XLOG_STATE_IOERROR) {
3070                 spin_unlock(&log->l_icloglock);
3071                 return -EIO;
3072         }
3073         ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE ||
3074                iclog->ic_state == XLOG_STATE_WANT_SYNC);
3075
3076         if (iclog->ic_state == XLOG_STATE_WANT_SYNC) {
3077                 /* update tail before writing to iclog */
3078                 xfs_lsn_t tail_lsn = xlog_assign_tail_lsn(log->l_mp);
3079                 sync++;
3080                 iclog->ic_state = XLOG_STATE_SYNCING;
3081                 iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn);
3082                 xlog_verify_tail_lsn(log, iclog, tail_lsn);
3083                 /* cycle incremented when incrementing curr_block */
3084         }
3085         spin_unlock(&log->l_icloglock);
3086
3087         /*
3088          * We let the log lock go, so it's possible that we hit a log I/O
3089          * error or some other SHUTDOWN condition that marks the iclog
3090          * as XLOG_STATE_IOERROR before the bwrite. However, we know that
3091          * this iclog has consistent data, so we ignore IOERROR
3092          * flags after this point.
3093          */
3094         if (sync)
3095                 return xlog_sync(log, iclog);
3096         return 0;
3097 }       /* xlog_state_release_iclog */
3098
3099
3100 /*
3101  * This routine will mark the current iclog in the ring as WANT_SYNC
3102  * and move the current iclog pointer to the next iclog in the ring.
3103  * When this routine is called from xlog_state_get_iclog_space(), the
3104  * exact size of the iclog has not yet been determined.  All we know is
3105  * that every data block.  We have run out of space in this log record.
3106  */
3107 STATIC void
3108 xlog_state_switch_iclogs(
3109         struct xlog             *log,
3110         struct xlog_in_core     *iclog,
3111         int                     eventual_size)
3112 {
3113         ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
3114         if (!eventual_size)
3115                 eventual_size = iclog->ic_offset;
3116         iclog->ic_state = XLOG_STATE_WANT_SYNC;
3117         iclog->ic_header.h_prev_block = cpu_to_be32(log->l_prev_block);
3118         log->l_prev_block = log->l_curr_block;
3119         log->l_prev_cycle = log->l_curr_cycle;
3120
3121         /* roll log?: ic_offset changed later */
3122         log->l_curr_block += BTOBB(eventual_size)+BTOBB(log->l_iclog_hsize);
3123
3124         /* Round up to next log-sunit */
3125         if (xfs_sb_version_haslogv2(&log->l_mp->m_sb) &&
3126             log->l_mp->m_sb.sb_logsunit > 1) {
3127                 __uint32_t sunit_bb = BTOBB(log->l_mp->m_sb.sb_logsunit);
3128                 log->l_curr_block = roundup(log->l_curr_block, sunit_bb);
3129         }
3130
3131         if (log->l_curr_block >= log->l_logBBsize) {
3132                 log->l_curr_cycle++;
3133                 if (log->l_curr_cycle == XLOG_HEADER_MAGIC_NUM)
3134                         log->l_curr_cycle++;
3135                 log->l_curr_block -= log->l_logBBsize;
3136                 ASSERT(log->l_curr_block >= 0);
3137         }
3138         ASSERT(iclog == log->l_iclog);
3139         log->l_iclog = iclog->ic_next;
3140 }       /* xlog_state_switch_iclogs */
3141
3142 /*
3143  * Write out all data in the in-core log as of this exact moment in time.
3144  *
3145  * Data may be written to the in-core log during this call.  However,
3146  * we don't guarantee this data will be written out.  A change from past
3147  * implementation means this routine will *not* write out zero length LRs.
3148  *
3149  * Basically, we try and perform an intelligent scan of the in-core logs.
3150  * If we determine there is no flushable data, we just return.  There is no
3151  * flushable data if:
3152  *
3153  *      1. the current iclog is active and has no data; the previous iclog
3154  *              is in the active or dirty state.
3155  *      2. the current iclog is drity, and the previous iclog is in the
3156  *              active or dirty state.
3157  *
3158  * We may sleep if:
3159  *
3160  *      1. the current iclog is not in the active nor dirty state.
3161  *      2. the current iclog dirty, and the previous iclog is not in the
3162  *              active nor dirty state.
3163  *      3. the current iclog is active, and there is another thread writing
3164  *              to this particular iclog.
3165  *      4. a) the current iclog is active and has no other writers
3166  *         b) when we return from flushing out this iclog, it is still
3167  *              not in the active nor dirty state.
3168  */
3169 int
3170 _xfs_log_force(
3171         struct xfs_mount        *mp,
3172         uint                    flags,
3173         int                     *log_flushed)
3174 {
3175         struct xlog             *log = mp->m_log;
3176         struct xlog_in_core     *iclog;
3177         xfs_lsn_t               lsn;
3178
3179         XFS_STATS_INC(xs_log_force);
3180
3181         xlog_cil_force(log);
3182
3183         spin_lock(&log->l_icloglock);
3184
3185         iclog = log->l_iclog;
3186         if (iclog->ic_state & XLOG_STATE_IOERROR) {
3187                 spin_unlock(&log->l_icloglock);
3188                 return -EIO;
3189         }
3190
3191         /* If the head iclog is not active nor dirty, we just attach
3192          * ourselves to the head and go to sleep.
3193          */
3194         if (iclog->ic_state == XLOG_STATE_ACTIVE ||
3195             iclog->ic_state == XLOG_STATE_DIRTY) {
3196                 /*
3197                  * If the head is dirty or (active and empty), then
3198                  * we need to look at the previous iclog.  If the previous
3199                  * iclog is active or dirty we are done.  There is nothing
3200                  * to sync out.  Otherwise, we attach ourselves to the
3201                  * previous iclog and go to sleep.
3202                  */
3203                 if (iclog->ic_state == XLOG_STATE_DIRTY ||
3204                     (atomic_read(&iclog->ic_refcnt) == 0
3205                      && iclog->ic_offset == 0)) {
3206                         iclog = iclog->ic_prev;
3207                         if (iclog->ic_state == XLOG_STATE_ACTIVE ||
3208                             iclog->ic_state == XLOG_STATE_DIRTY)
3209                                 goto no_sleep;
3210                         else
3211                                 goto maybe_sleep;
3212                 } else {
3213                         if (atomic_read(&iclog->ic_refcnt) == 0) {
3214                                 /* We are the only one with access to this
3215                                  * iclog.  Flush it out now.  There should
3216                                  * be a roundoff of zero to show that someone
3217                                  * has already taken care of the roundoff from
3218                                  * the previous sync.
3219                                  */
3220                                 atomic_inc(&iclog->ic_refcnt);
3221                                 lsn = be64_to_cpu(iclog->ic_header.h_lsn);
3222                                 xlog_state_switch_iclogs(log, iclog, 0);
3223                                 spin_unlock(&log->l_icloglock);
3224
3225                                 if (xlog_state_release_iclog(log, iclog))
3226                                         return -EIO;
3227
3228                                 if (log_flushed)
3229                                         *log_flushed = 1;
3230                                 spin_lock(&log->l_icloglock);
3231                                 if (be64_to_cpu(iclog->ic_header.h_lsn) == lsn &&
3232                                     iclog->ic_state != XLOG_STATE_DIRTY)
3233                                         goto maybe_sleep;
3234                                 else
3235                                         goto no_sleep;
3236                         } else {
3237                                 /* Someone else is writing to this iclog.
3238                                  * Use its call to flush out the data.  However,
3239                                  * the other thread may not force out this LR,
3240                                  * so we mark it WANT_SYNC.
3241                                  */
3242                                 xlog_state_switch_iclogs(log, iclog, 0);
3243                                 goto maybe_sleep;
3244                         }
3245                 }
3246         }
3247
3248         /* By the time we come around again, the iclog could've been filled
3249          * which would give it another lsn.  If we have a new lsn, just
3250          * return because the relevant data has been flushed.
3251          */
3252 maybe_sleep:
3253         if (flags & XFS_LOG_SYNC) {
3254                 /*
3255                  * We must check if we're shutting down here, before
3256                  * we wait, while we're holding the l_icloglock.
3257                  * Then we check again after waking up, in case our
3258                  * sleep was disturbed by a bad news.
3259                  */
3260                 if (iclog->ic_state & XLOG_STATE_IOERROR) {
3261                         spin_unlock(&log->l_icloglock);
3262                         return -EIO;
3263                 }
3264                 XFS_STATS_INC(xs_log_force_sleep);
3265                 xlog_wait(&iclog->ic_force_wait, &log->l_icloglock);
3266                 /*
3267                  * No need to grab the log lock here since we're
3268                  * only deciding whether or not to return EIO
3269                  * and the memory read should be atomic.
3270                  */
3271                 if (iclog->ic_state & XLOG_STATE_IOERROR)
3272                         return -EIO;
3273                 if (log_flushed)
3274                         *log_flushed = 1;
3275         } else {
3276
3277 no_sleep:
3278                 spin_unlock(&log->l_icloglock);
3279         }
3280         return 0;
3281 }
3282
3283 /*
3284  * Wrapper for _xfs_log_force(), to be used when caller doesn't care
3285  * about errors or whether the log was flushed or not. This is the normal
3286  * interface to use when trying to unpin items or move the log forward.
3287  */
3288 void
3289 xfs_log_force(
3290         xfs_mount_t     *mp,
3291         uint            flags)
3292 {
3293         int     error;
3294
3295         trace_xfs_log_force(mp, 0);
3296         error = _xfs_log_force(mp, flags, NULL);
3297         if (error)
3298                 xfs_warn(mp, "%s: error %d returned.", __func__, error);
3299 }
3300
3301 /*
3302  * Force the in-core log to disk for a specific LSN.
3303  *
3304  * Find in-core log with lsn.
3305  *      If it is in the DIRTY state, just return.
3306  *      If it is in the ACTIVE state, move the in-core log into the WANT_SYNC
3307  *              state and go to sleep or return.
3308  *      If it is in any other state, go to sleep or return.
3309  *
3310  * Synchronous forces are implemented with a signal variable. All callers
3311  * to force a given lsn to disk will wait on a the sv attached to the
3312  * specific in-core log.  When given in-core log finally completes its
3313  * write to disk, that thread will wake up all threads waiting on the
3314  * sv.
3315  */
3316 int
3317 _xfs_log_force_lsn(
3318         struct xfs_mount        *mp,
3319         xfs_lsn_t               lsn,
3320         uint                    flags,
3321         int                     *log_flushed)
3322 {
3323         struct xlog             *log = mp->m_log;
3324         struct xlog_in_core     *iclog;
3325         int                     already_slept = 0;
3326
3327         ASSERT(lsn != 0);
3328
3329         XFS_STATS_INC(xs_log_force);
3330
3331         lsn = xlog_cil_force_lsn(log, lsn);
3332         if (lsn == NULLCOMMITLSN)
3333                 return 0;
3334
3335 try_again:
3336         spin_lock(&log->l_icloglock);
3337         iclog = log->l_iclog;
3338         if (iclog->ic_state & XLOG_STATE_IOERROR) {
3339                 spin_unlock(&log->l_icloglock);
3340                 return -EIO;
3341         }
3342
3343         do {
3344                 if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn) {
3345                         iclog = iclog->ic_next;
3346                         continue;
3347                 }
3348
3349                 if (iclog->ic_state == XLOG_STATE_DIRTY) {
3350                         spin_unlock(&log->l_icloglock);
3351                         return 0;
3352                 }
3353
3354                 if (iclog->ic_state == XLOG_STATE_ACTIVE) {
3355                         /*
3356                          * We sleep here if we haven't already slept (e.g.
3357                          * this is the first time we've looked at the correct
3358                          * iclog buf) and the buffer before us is going to
3359                          * be sync'ed. The reason for this is that if we
3360                          * are doing sync transactions here, by waiting for
3361                          * the previous I/O to complete, we can allow a few
3362                          * more transactions into this iclog before we close
3363                          * it down.
3364                          *
3365                          * Otherwise, we mark the buffer WANT_SYNC, and bump
3366                          * up the refcnt so we can release the log (which
3367                          * drops the ref count).  The state switch keeps new
3368                          * transaction commits from using this buffer.  When
3369                          * the current commits finish writing into the buffer,
3370                          * the refcount will drop to zero and the buffer will
3371                          * go out then.
3372                          */
3373                         if (!already_slept &&
3374                             (iclog->ic_prev->ic_state &
3375                              (XLOG_STATE_WANT_SYNC | XLOG_STATE_SYNCING))) {
3376                                 ASSERT(!(iclog->ic_state & XLOG_STATE_IOERROR));
3377
3378                                 XFS_STATS_INC(xs_log_force_sleep);
3379
3380                                 xlog_wait(&iclog->ic_prev->ic_write_wait,
3381                                                         &log->l_icloglock);
3382                                 if (log_flushed)
3383                                         *log_flushed = 1;
3384                                 already_slept = 1;
3385                                 goto try_again;
3386                         }
3387                         atomic_inc(&iclog->ic_refcnt);
3388                         xlog_state_switch_iclogs(log, iclog, 0);
3389                         spin_unlock(&log->l_icloglock);
3390                         if (xlog_state_release_iclog(log, iclog))
3391                                 return -EIO;
3392                         if (log_flushed)
3393                                 *log_flushed = 1;
3394                         spin_lock(&log->l_icloglock);
3395                 }
3396
3397                 if ((flags & XFS_LOG_SYNC) && /* sleep */
3398                     !(iclog->ic_state &
3399                       (XLOG_STATE_ACTIVE | XLOG_STATE_DIRTY))) {
3400                         /*
3401                          * Don't wait on completion if we know that we've
3402                          * gotten a log write error.
3403                          */
3404                         if (iclog->ic_state & XLOG_STATE_IOERROR) {
3405                                 spin_unlock(&log->l_icloglock);
3406                                 return -EIO;
3407                         }
3408                         XFS_STATS_INC(xs_log_force_sleep);
3409                         xlog_wait(&iclog->ic_force_wait, &log->l_icloglock);
3410                         /*
3411                          * No need to grab the log lock here since we're
3412                          * only deciding whether or not to return EIO
3413                          * and the memory read should be atomic.
3414                          */
3415                         if (iclog->ic_state & XLOG_STATE_IOERROR)
3416                                 return -EIO;
3417
3418                         if (log_flushed)
3419                                 *log_flushed = 1;
3420                 } else {                /* just return */
3421                         spin_unlock(&log->l_icloglock);
3422                 }
3423
3424                 return 0;
3425         } while (iclog != log->l_iclog);
3426
3427         spin_unlock(&log->l_icloglock);
3428         return 0;
3429 }
3430
3431 /*
3432  * Wrapper for _xfs_log_force_lsn(), to be used when caller doesn't care
3433  * about errors or whether the log was flushed or not. This is the normal
3434  * interface to use when trying to unpin items or move the log forward.
3435  */
3436 void
3437 xfs_log_force_lsn(
3438         xfs_mount_t     *mp,
3439         xfs_lsn_t       lsn,
3440         uint            flags)
3441 {
3442         int     error;
3443
3444         trace_xfs_log_force(mp, lsn);
3445         error = _xfs_log_force_lsn(mp, lsn, flags, NULL);
3446         if (error)
3447                 xfs_warn(mp, "%s: error %d returned.", __func__, error);
3448 }
3449
3450 /*
3451  * Called when we want to mark the current iclog as being ready to sync to
3452  * disk.
3453  */
3454 STATIC void
3455 xlog_state_want_sync(
3456         struct xlog             *log,
3457         struct xlog_in_core     *iclog)
3458 {
3459         assert_spin_locked(&log->l_icloglock);
3460
3461         if (iclog->ic_state == XLOG_STATE_ACTIVE) {
3462                 xlog_state_switch_iclogs(log, iclog, 0);
3463         } else {
3464                 ASSERT(iclog->ic_state &
3465                         (XLOG_STATE_WANT_SYNC|XLOG_STATE_IOERROR));
3466         }
3467 }
3468
3469
3470 /*****************************************************************************
3471  *
3472  *              TICKET functions
3473  *
3474  *****************************************************************************
3475  */
3476
3477 /*
3478  * Free a used ticket when its refcount falls to zero.
3479  */
3480 void
3481 xfs_log_ticket_put(
3482         xlog_ticket_t   *ticket)
3483 {
3484         ASSERT(atomic_read(&ticket->t_ref) > 0);
3485         if (atomic_dec_and_test(&ticket->t_ref))
3486                 kmem_zone_free(xfs_log_ticket_zone, ticket);
3487 }
3488
3489 xlog_ticket_t *
3490 xfs_log_ticket_get(
3491         xlog_ticket_t   *ticket)
3492 {
3493         ASSERT(atomic_read(&ticket->t_ref) > 0);
3494         atomic_inc(&ticket->t_ref);
3495         return ticket;
3496 }
3497
3498 /*
3499  * Figure out the total log space unit (in bytes) that would be
3500  * required for a log ticket.
3501  */
3502 int
3503 xfs_log_calc_unit_res(
3504         struct xfs_mount        *mp,
3505         int                     unit_bytes)
3506 {
3507         struct xlog             *log = mp->m_log;
3508         int                     iclog_space;
3509         uint                    num_headers;
3510
3511         /*
3512          * Permanent reservations have up to 'cnt'-1 active log operations
3513          * in the log.  A unit in this case is the amount of space for one
3514          * of these log operations.  Normal reservations have a cnt of 1
3515          * and their unit amount is the total amount of space required.
3516          *
3517          * The following lines of code account for non-transaction data
3518          * which occupy space in the on-disk log.
3519          *
3520          * Normal form of a transaction is:
3521          * <oph><trans-hdr><start-oph><reg1-oph><reg1><reg2-oph>...<commit-oph>
3522          * and then there are LR hdrs, split-recs and roundoff at end of syncs.
3523          *
3524          * We need to account for all the leadup data and trailer data
3525          * around the transaction data.
3526          * And then we need to account for the worst case in terms of using
3527          * more space.
3528          * The worst case will happen if:
3529          * - the placement of the transaction happens to be such that the
3530          *   roundoff is at its maximum
3531          * - the transaction data is synced before the commit record is synced
3532          *   i.e. <transaction-data><roundoff> | <commit-rec><roundoff>
3533          *   Therefore the commit record is in its own Log Record.
3534          *   This can happen as the commit record is called with its
3535          *   own region to xlog_write().
3536          *   This then means that in the worst case, roundoff can happen for
3537          *   the commit-rec as well.
3538          *   The commit-rec is smaller than padding in this scenario and so it is
3539          *   not added separately.
3540          */
3541
3542         /* for trans header */
3543         unit_bytes += sizeof(xlog_op_header_t);
3544         unit_bytes += sizeof(xfs_trans_header_t);
3545
3546         /* for start-rec */
3547         unit_bytes += sizeof(xlog_op_header_t);
3548
3549         /*
3550          * for LR headers - the space for data in an iclog is the size minus
3551          * the space used for the headers. If we use the iclog size, then we
3552          * undercalculate the number of headers required.
3553          *
3554          * Furthermore - the addition of op headers for split-recs might
3555          * increase the space required enough to require more log and op
3556          * headers, so take that into account too.
3557          *
3558          * IMPORTANT: This reservation makes the assumption that if this
3559          * transaction is the first in an iclog and hence has the LR headers
3560          * accounted to it, then the remaining space in the iclog is
3561          * exclusively for this transaction.  i.e. if the transaction is larger
3562          * than the iclog, it will be the only thing in that iclog.
3563          * Fundamentally, this means we must pass the entire log vector to
3564          * xlog_write to guarantee this.
3565          */
3566         iclog_space = log->l_iclog_size - log->l_iclog_hsize;
3567         num_headers = howmany(unit_bytes, iclog_space);
3568
3569         /* for split-recs - ophdrs added when data split over LRs */
3570         unit_bytes += sizeof(xlog_op_header_t) * num_headers;
3571
3572         /* add extra header reservations if we overrun */
3573         while (!num_headers ||
3574                howmany(unit_bytes, iclog_space) > num_headers) {
3575                 unit_bytes += sizeof(xlog_op_header_t);
3576                 num_headers++;
3577         }
3578         unit_bytes += log->l_iclog_hsize * num_headers;
3579
3580         /* for commit-rec LR header - note: padding will subsume the ophdr */
3581         unit_bytes += log->l_iclog_hsize;
3582
3583         /* for roundoff padding for transaction data and one for commit record */
3584         if (xfs_sb_version_haslogv2(&mp->m_sb) && mp->m_sb.sb_logsunit > 1) {
3585                 /* log su roundoff */
3586                 unit_bytes += 2 * mp->m_sb.sb_logsunit;
3587         } else {
3588                 /* BB roundoff */
3589                 unit_bytes += 2 * BBSIZE;
3590         }
3591
3592         return unit_bytes;
3593 }
3594
3595 /*
3596  * Allocate and initialise a new log ticket.
3597  */
3598 struct xlog_ticket *
3599 xlog_ticket_alloc(
3600         struct xlog             *log,
3601         int                     unit_bytes,
3602         int                     cnt,
3603         char                    client,
3604         bool                    permanent,
3605         xfs_km_flags_t          alloc_flags)
3606 {
3607         struct xlog_ticket      *tic;
3608         int                     unit_res;
3609
3610         tic = kmem_zone_zalloc(xfs_log_ticket_zone, alloc_flags);
3611         if (!tic)
3612                 return NULL;
3613
3614         unit_res = xfs_log_calc_unit_res(log->l_mp, unit_bytes);
3615
3616         atomic_set(&tic->t_ref, 1);
3617         tic->t_task             = current;
3618         INIT_LIST_HEAD(&tic->t_queue);
3619         tic->t_unit_res         = unit_res;
3620         tic->t_curr_res         = unit_res;
3621         tic->t_cnt              = cnt;
3622         tic->t_ocnt             = cnt;
3623         tic->t_tid              = prandom_u32();
3624         tic->t_clientid         = client;
3625         tic->t_flags            = XLOG_TIC_INITED;
3626         tic->t_trans_type       = 0;
3627         if (permanent)
3628                 tic->t_flags |= XLOG_TIC_PERM_RESERV;
3629
3630         xlog_tic_reset_res(tic);
3631
3632         return tic;
3633 }
3634
3635
3636 /******************************************************************************
3637  *
3638  *              Log debug routines
3639  *
3640  ******************************************************************************
3641  */
3642 #if defined(DEBUG)
3643 /*
3644  * Make sure that the destination ptr is within the valid data region of
3645  * one of the iclogs.  This uses backup pointers stored in a different
3646  * part of the log in case we trash the log structure.
3647  */
3648 void
3649 xlog_verify_dest_ptr(
3650         struct xlog     *log,
3651         char            *ptr)
3652 {
3653         int i;
3654         int good_ptr = 0;
3655
3656         for (i = 0; i < log->l_iclog_bufs; i++) {
3657                 if (ptr >= log->l_iclog_bak[i] &&
3658                     ptr <= log->l_iclog_bak[i] + log->l_iclog_size)
3659                         good_ptr++;
3660         }
3661
3662         if (!good_ptr)
3663                 xfs_emerg(log->l_mp, "%s: invalid ptr", __func__);
3664 }
3665
3666 /*
3667  * Check to make sure the grant write head didn't just over lap the tail.  If
3668  * the cycles are the same, we can't be overlapping.  Otherwise, make sure that
3669  * the cycles differ by exactly one and check the byte count.
3670  *
3671  * This check is run unlocked, so can give false positives. Rather than assert
3672  * on failures, use a warn-once flag and a panic tag to allow the admin to
3673  * determine if they want to panic the machine when such an error occurs. For
3674  * debug kernels this will have the same effect as using an assert but, unlinke
3675  * an assert, it can be turned off at runtime.
3676  */
3677 STATIC void
3678 xlog_verify_grant_tail(
3679         struct xlog     *log)
3680 {
3681         int             tail_cycle, tail_blocks;
3682         int             cycle, space;
3683
3684         xlog_crack_grant_head(&log->l_write_head.grant, &cycle, &space);
3685         xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_blocks);
3686         if (tail_cycle != cycle) {
3687                 if (cycle - 1 != tail_cycle &&
3688                     !(log->l_flags & XLOG_TAIL_WARN)) {
3689                         xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES,
3690                                 "%s: cycle - 1 != tail_cycle", __func__);
3691                         log->l_flags |= XLOG_TAIL_WARN;
3692                 }
3693
3694                 if (space > BBTOB(tail_blocks) &&
3695                     !(log->l_flags & XLOG_TAIL_WARN)) {
3696                         xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES,
3697                                 "%s: space > BBTOB(tail_blocks)", __func__);
3698                         log->l_flags |= XLOG_TAIL_WARN;
3699                 }
3700         }
3701 }
3702
3703 /* check if it will fit */
3704 STATIC void
3705 xlog_verify_tail_lsn(
3706         struct xlog             *log,
3707         struct xlog_in_core     *iclog,
3708         xfs_lsn_t               tail_lsn)
3709 {
3710     int blocks;
3711
3712     if (CYCLE_LSN(tail_lsn) == log->l_prev_cycle) {
3713         blocks =
3714             log->l_logBBsize - (log->l_prev_block - BLOCK_LSN(tail_lsn));
3715         if (blocks < BTOBB(iclog->ic_offset)+BTOBB(log->l_iclog_hsize))
3716                 xfs_emerg(log->l_mp, "%s: ran out of log space", __func__);
3717     } else {
3718         ASSERT(CYCLE_LSN(tail_lsn)+1 == log->l_prev_cycle);
3719
3720         if (BLOCK_LSN(tail_lsn) == log->l_prev_block)
3721                 xfs_emerg(log->l_mp, "%s: tail wrapped", __func__);
3722
3723         blocks = BLOCK_LSN(tail_lsn) - log->l_prev_block;
3724         if (blocks < BTOBB(iclog->ic_offset) + 1)
3725                 xfs_emerg(log->l_mp, "%s: ran out of log space", __func__);
3726     }
3727 }       /* xlog_verify_tail_lsn */
3728
3729 /*
3730  * Perform a number of checks on the iclog before writing to disk.
3731  *
3732  * 1. Make sure the iclogs are still circular
3733  * 2. Make sure we have a good magic number
3734  * 3. Make sure we don't have magic numbers in the data
3735  * 4. Check fields of each log operation header for:
3736  *      A. Valid client identifier
3737  *      B. tid ptr value falls in valid ptr space (user space code)
3738  *      C. Length in log record header is correct according to the
3739  *              individual operation headers within record.
3740  * 5. When a bwrite will occur within 5 blocks of the front of the physical
3741  *      log, check the preceding blocks of the physical log to make sure all
3742  *      the cycle numbers agree with the current cycle number.
3743  */
3744 STATIC void
3745 xlog_verify_iclog(
3746         struct xlog             *log,
3747         struct xlog_in_core     *iclog,
3748         int                     count,
3749         bool                    syncing)
3750 {
3751         xlog_op_header_t        *ophead;
3752         xlog_in_core_t          *icptr;
3753         xlog_in_core_2_t        *xhdr;
3754         xfs_caddr_t             ptr;
3755         xfs_caddr_t             base_ptr;
3756         __psint_t               field_offset;
3757         __uint8_t               clientid;
3758         int                     len, i, j, k, op_len;
3759         int                     idx;
3760
3761         /* check validity of iclog pointers */
3762         spin_lock(&log->l_icloglock);
3763         icptr = log->l_iclog;
3764         for (i = 0; i < log->l_iclog_bufs; i++, icptr = icptr->ic_next)
3765                 ASSERT(icptr);
3766
3767         if (icptr != log->l_iclog)
3768                 xfs_emerg(log->l_mp, "%s: corrupt iclog ring", __func__);
3769         spin_unlock(&log->l_icloglock);
3770
3771         /* check log magic numbers */
3772         if (iclog->ic_header.h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
3773                 xfs_emerg(log->l_mp, "%s: invalid magic num", __func__);
3774
3775         ptr = (xfs_caddr_t) &iclog->ic_header;
3776         for (ptr += BBSIZE; ptr < ((xfs_caddr_t)&iclog->ic_header) + count;
3777              ptr += BBSIZE) {
3778                 if (*(__be32 *)ptr == cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
3779                         xfs_emerg(log->l_mp, "%s: unexpected magic num",
3780                                 __func__);
3781         }
3782
3783         /* check fields */
3784         len = be32_to_cpu(iclog->ic_header.h_num_logops);
3785         ptr = iclog->ic_datap;
3786         base_ptr = ptr;
3787         ophead = (xlog_op_header_t *)ptr;
3788         xhdr = iclog->ic_data;
3789         for (i = 0; i < len; i++) {
3790                 ophead = (xlog_op_header_t *)ptr;
3791
3792                 /* clientid is only 1 byte */
3793                 field_offset = (__psint_t)
3794                                ((xfs_caddr_t)&(ophead->oh_clientid) - base_ptr);
3795                 if (!syncing || (field_offset & 0x1ff)) {
3796                         clientid = ophead->oh_clientid;
3797                 } else {
3798                         idx = BTOBBT((xfs_caddr_t)&(ophead->oh_clientid) - iclog->ic_datap);
3799                         if (idx >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) {
3800                                 j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3801                                 k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3802                                 clientid = xlog_get_client_id(
3803                                         xhdr[j].hic_xheader.xh_cycle_data[k]);
3804                         } else {
3805                                 clientid = xlog_get_client_id(
3806                                         iclog->ic_header.h_cycle_data[idx]);
3807                         }
3808                 }
3809                 if (clientid != XFS_TRANSACTION && clientid != XFS_LOG)
3810                         xfs_warn(log->l_mp,
3811                                 "%s: invalid clientid %d op 0x%p offset 0x%lx",
3812                                 __func__, clientid, ophead,
3813                                 (unsigned long)field_offset);
3814
3815                 /* check length */
3816                 field_offset = (__psint_t)
3817                                ((xfs_caddr_t)&(ophead->oh_len) - base_ptr);
3818                 if (!syncing || (field_offset & 0x1ff)) {
3819                         op_len = be32_to_cpu(ophead->oh_len);
3820                 } else {
3821                         idx = BTOBBT((__psint_t)&ophead->oh_len -
3822                                     (__psint_t)iclog->ic_datap);
3823                         if (idx >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) {
3824                                 j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3825                                 k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3826                                 op_len = be32_to_cpu(xhdr[j].hic_xheader.xh_cycle_data[k]);
3827                         } else {
3828                                 op_len = be32_to_cpu(iclog->ic_header.h_cycle_data[idx]);
3829                         }
3830                 }
3831                 ptr += sizeof(xlog_op_header_t) + op_len;
3832         }
3833 }       /* xlog_verify_iclog */
3834 #endif
3835
3836 /*
3837  * Mark all iclogs IOERROR. l_icloglock is held by the caller.
3838  */
3839 STATIC int
3840 xlog_state_ioerror(
3841         struct xlog     *log)
3842 {
3843         xlog_in_core_t  *iclog, *ic;
3844
3845         iclog = log->l_iclog;
3846         if (! (iclog->ic_state & XLOG_STATE_IOERROR)) {
3847                 /*
3848                  * Mark all the incore logs IOERROR.
3849                  * From now on, no log flushes will result.
3850                  */
3851                 ic = iclog;
3852                 do {
3853                         ic->ic_state = XLOG_STATE_IOERROR;
3854                         ic = ic->ic_next;
3855                 } while (ic != iclog);
3856                 return 0;
3857         }
3858         /*
3859          * Return non-zero, if state transition has already happened.
3860          */
3861         return 1;
3862 }
3863
3864 /*
3865  * This is called from xfs_force_shutdown, when we're forcibly
3866  * shutting down the filesystem, typically because of an IO error.
3867  * Our main objectives here are to make sure that:
3868  *      a. if !logerror, flush the logs to disk. Anything modified
3869  *         after this is ignored.
3870  *      b. the filesystem gets marked 'SHUTDOWN' for all interested
3871  *         parties to find out, 'atomically'.
3872  *      c. those who're sleeping on log reservations, pinned objects and
3873  *          other resources get woken up, and be told the bad news.
3874  *      d. nothing new gets queued up after (b) and (c) are done.
3875  *
3876  * Note: for the !logerror case we need to flush the regions held in memory out
3877  * to disk first. This needs to be done before the log is marked as shutdown,
3878  * otherwise the iclog writes will fail.
3879  */
3880 int
3881 xfs_log_force_umount(
3882         struct xfs_mount        *mp,
3883         int                     logerror)
3884 {
3885         struct xlog     *log;
3886         int             retval;
3887
3888         log = mp->m_log;
3889
3890         /*
3891          * If this happens during log recovery, don't worry about
3892          * locking; the log isn't open for business yet.
3893          */
3894         if (!log ||
3895             log->l_flags & XLOG_ACTIVE_RECOVERY) {
3896                 mp->m_flags |= XFS_MOUNT_FS_SHUTDOWN;
3897                 if (mp->m_sb_bp)
3898                         XFS_BUF_DONE(mp->m_sb_bp);
3899                 return 0;
3900         }
3901
3902         /*
3903          * Somebody could've already done the hard work for us.
3904          * No need to get locks for this.
3905          */
3906         if (logerror && log->l_iclog->ic_state & XLOG_STATE_IOERROR) {
3907                 ASSERT(XLOG_FORCED_SHUTDOWN(log));
3908                 return 1;
3909         }
3910
3911         /*
3912          * Flush all the completed transactions to disk before marking the log
3913          * being shut down. We need to do it in this order to ensure that
3914          * completed operations are safely on disk before we shut down, and that
3915          * we don't have to issue any buffer IO after the shutdown flags are set
3916          * to guarantee this.
3917          */
3918         if (!logerror)
3919                 _xfs_log_force(mp, XFS_LOG_SYNC, NULL);
3920
3921         /*
3922          * mark the filesystem and the as in a shutdown state and wake
3923          * everybody up to tell them the bad news.
3924          */
3925         spin_lock(&log->l_icloglock);
3926         mp->m_flags |= XFS_MOUNT_FS_SHUTDOWN;
3927         if (mp->m_sb_bp)
3928                 XFS_BUF_DONE(mp->m_sb_bp);
3929
3930         /*
3931          * Mark the log and the iclogs with IO error flags to prevent any
3932          * further log IO from being issued or completed.
3933          */
3934         log->l_flags |= XLOG_IO_ERROR;
3935         retval = xlog_state_ioerror(log);
3936         spin_unlock(&log->l_icloglock);
3937
3938         /*
3939          * We don't want anybody waiting for log reservations after this. That
3940          * means we have to wake up everybody queued up on reserveq as well as
3941          * writeq.  In addition, we make sure in xlog_{re}grant_log_space that
3942          * we don't enqueue anything once the SHUTDOWN flag is set, and this
3943          * action is protected by the grant locks.
3944          */
3945         xlog_grant_head_wake_all(&log->l_reserve_head);
3946         xlog_grant_head_wake_all(&log->l_write_head);
3947
3948         /*
3949          * Wake up everybody waiting on xfs_log_force. Wake the CIL push first
3950          * as if the log writes were completed. The abort handling in the log
3951          * item committed callback functions will do this again under lock to
3952          * avoid races.
3953          */
3954         wake_up_all(&log->l_cilp->xc_commit_wait);
3955         xlog_state_do_callback(log, XFS_LI_ABORTED, NULL);
3956
3957 #ifdef XFSERRORDEBUG
3958         {
3959                 xlog_in_core_t  *iclog;
3960
3961                 spin_lock(&log->l_icloglock);
3962                 iclog = log->l_iclog;
3963                 do {
3964                         ASSERT(iclog->ic_callback == 0);
3965                         iclog = iclog->ic_next;
3966                 } while (iclog != log->l_iclog);
3967                 spin_unlock(&log->l_icloglock);
3968         }
3969 #endif
3970         /* return non-zero if log IOERROR transition had already happened */
3971         return retval;
3972 }
3973
3974 STATIC int
3975 xlog_iclogs_empty(
3976         struct xlog     *log)
3977 {
3978         xlog_in_core_t  *iclog;
3979
3980         iclog = log->l_iclog;
3981         do {
3982                 /* endianness does not matter here, zero is zero in
3983                  * any language.
3984                  */
3985                 if (iclog->ic_header.h_num_logops)
3986                         return 0;
3987                 iclog = iclog->ic_next;
3988         } while (iclog != log->l_iclog);
3989         return 1;
3990 }
3991