]> Pileus Git - ~andy/linux/blob - fs/xfs/xfs_log.c
ARM: 7839/1: entry: fix tracing of ARM-private syscalls
[~andy/linux] / fs / xfs / xfs_log.c
1 /*
2  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_types.h"
21 #include "xfs_log.h"
22 #include "xfs_trans.h"
23 #include "xfs_sb.h"
24 #include "xfs_ag.h"
25 #include "xfs_mount.h"
26 #include "xfs_error.h"
27 #include "xfs_log_priv.h"
28 #include "xfs_buf_item.h"
29 #include "xfs_bmap_btree.h"
30 #include "xfs_alloc_btree.h"
31 #include "xfs_ialloc_btree.h"
32 #include "xfs_log_recover.h"
33 #include "xfs_trans_priv.h"
34 #include "xfs_dinode.h"
35 #include "xfs_inode.h"
36 #include "xfs_trace.h"
37 #include "xfs_fsops.h"
38 #include "xfs_cksum.h"
39
40 kmem_zone_t     *xfs_log_ticket_zone;
41
42 /* Local miscellaneous function prototypes */
43 STATIC int
44 xlog_commit_record(
45         struct xlog             *log,
46         struct xlog_ticket      *ticket,
47         struct xlog_in_core     **iclog,
48         xfs_lsn_t               *commitlsnp);
49
50 STATIC struct xlog *
51 xlog_alloc_log(
52         struct xfs_mount        *mp,
53         struct xfs_buftarg      *log_target,
54         xfs_daddr_t             blk_offset,
55         int                     num_bblks);
56 STATIC int
57 xlog_space_left(
58         struct xlog             *log,
59         atomic64_t              *head);
60 STATIC int
61 xlog_sync(
62         struct xlog             *log,
63         struct xlog_in_core     *iclog);
64 STATIC void
65 xlog_dealloc_log(
66         struct xlog             *log);
67
68 /* local state machine functions */
69 STATIC void xlog_state_done_syncing(xlog_in_core_t *iclog, int);
70 STATIC void
71 xlog_state_do_callback(
72         struct xlog             *log,
73         int                     aborted,
74         struct xlog_in_core     *iclog);
75 STATIC int
76 xlog_state_get_iclog_space(
77         struct xlog             *log,
78         int                     len,
79         struct xlog_in_core     **iclog,
80         struct xlog_ticket      *ticket,
81         int                     *continued_write,
82         int                     *logoffsetp);
83 STATIC int
84 xlog_state_release_iclog(
85         struct xlog             *log,
86         struct xlog_in_core     *iclog);
87 STATIC void
88 xlog_state_switch_iclogs(
89         struct xlog             *log,
90         struct xlog_in_core     *iclog,
91         int                     eventual_size);
92 STATIC void
93 xlog_state_want_sync(
94         struct xlog             *log,
95         struct xlog_in_core     *iclog);
96
97 STATIC void
98 xlog_grant_push_ail(
99         struct xlog             *log,
100         int                     need_bytes);
101 STATIC void
102 xlog_regrant_reserve_log_space(
103         struct xlog             *log,
104         struct xlog_ticket      *ticket);
105 STATIC void
106 xlog_ungrant_log_space(
107         struct xlog             *log,
108         struct xlog_ticket      *ticket);
109
110 #if defined(DEBUG)
111 STATIC void
112 xlog_verify_dest_ptr(
113         struct xlog             *log,
114         char                    *ptr);
115 STATIC void
116 xlog_verify_grant_tail(
117         struct xlog *log);
118 STATIC void
119 xlog_verify_iclog(
120         struct xlog             *log,
121         struct xlog_in_core     *iclog,
122         int                     count,
123         bool                    syncing);
124 STATIC void
125 xlog_verify_tail_lsn(
126         struct xlog             *log,
127         struct xlog_in_core     *iclog,
128         xfs_lsn_t               tail_lsn);
129 #else
130 #define xlog_verify_dest_ptr(a,b)
131 #define xlog_verify_grant_tail(a)
132 #define xlog_verify_iclog(a,b,c,d)
133 #define xlog_verify_tail_lsn(a,b,c)
134 #endif
135
136 STATIC int
137 xlog_iclogs_empty(
138         struct xlog             *log);
139
140 static void
141 xlog_grant_sub_space(
142         struct xlog             *log,
143         atomic64_t              *head,
144         int                     bytes)
145 {
146         int64_t head_val = atomic64_read(head);
147         int64_t new, old;
148
149         do {
150                 int     cycle, space;
151
152                 xlog_crack_grant_head_val(head_val, &cycle, &space);
153
154                 space -= bytes;
155                 if (space < 0) {
156                         space += log->l_logsize;
157                         cycle--;
158                 }
159
160                 old = head_val;
161                 new = xlog_assign_grant_head_val(cycle, space);
162                 head_val = atomic64_cmpxchg(head, old, new);
163         } while (head_val != old);
164 }
165
166 static void
167 xlog_grant_add_space(
168         struct xlog             *log,
169         atomic64_t              *head,
170         int                     bytes)
171 {
172         int64_t head_val = atomic64_read(head);
173         int64_t new, old;
174
175         do {
176                 int             tmp;
177                 int             cycle, space;
178
179                 xlog_crack_grant_head_val(head_val, &cycle, &space);
180
181                 tmp = log->l_logsize - space;
182                 if (tmp > bytes)
183                         space += bytes;
184                 else {
185                         space = bytes - tmp;
186                         cycle++;
187                 }
188
189                 old = head_val;
190                 new = xlog_assign_grant_head_val(cycle, space);
191                 head_val = atomic64_cmpxchg(head, old, new);
192         } while (head_val != old);
193 }
194
195 STATIC void
196 xlog_grant_head_init(
197         struct xlog_grant_head  *head)
198 {
199         xlog_assign_grant_head(&head->grant, 1, 0);
200         INIT_LIST_HEAD(&head->waiters);
201         spin_lock_init(&head->lock);
202 }
203
204 STATIC void
205 xlog_grant_head_wake_all(
206         struct xlog_grant_head  *head)
207 {
208         struct xlog_ticket      *tic;
209
210         spin_lock(&head->lock);
211         list_for_each_entry(tic, &head->waiters, t_queue)
212                 wake_up_process(tic->t_task);
213         spin_unlock(&head->lock);
214 }
215
216 static inline int
217 xlog_ticket_reservation(
218         struct xlog             *log,
219         struct xlog_grant_head  *head,
220         struct xlog_ticket      *tic)
221 {
222         if (head == &log->l_write_head) {
223                 ASSERT(tic->t_flags & XLOG_TIC_PERM_RESERV);
224                 return tic->t_unit_res;
225         } else {
226                 if (tic->t_flags & XLOG_TIC_PERM_RESERV)
227                         return tic->t_unit_res * tic->t_cnt;
228                 else
229                         return tic->t_unit_res;
230         }
231 }
232
233 STATIC bool
234 xlog_grant_head_wake(
235         struct xlog             *log,
236         struct xlog_grant_head  *head,
237         int                     *free_bytes)
238 {
239         struct xlog_ticket      *tic;
240         int                     need_bytes;
241
242         list_for_each_entry(tic, &head->waiters, t_queue) {
243                 need_bytes = xlog_ticket_reservation(log, head, tic);
244                 if (*free_bytes < need_bytes)
245                         return false;
246
247                 *free_bytes -= need_bytes;
248                 trace_xfs_log_grant_wake_up(log, tic);
249                 wake_up_process(tic->t_task);
250         }
251
252         return true;
253 }
254
255 STATIC int
256 xlog_grant_head_wait(
257         struct xlog             *log,
258         struct xlog_grant_head  *head,
259         struct xlog_ticket      *tic,
260         int                     need_bytes) __releases(&head->lock)
261                                             __acquires(&head->lock)
262 {
263         list_add_tail(&tic->t_queue, &head->waiters);
264
265         do {
266                 if (XLOG_FORCED_SHUTDOWN(log))
267                         goto shutdown;
268                 xlog_grant_push_ail(log, need_bytes);
269
270                 __set_current_state(TASK_UNINTERRUPTIBLE);
271                 spin_unlock(&head->lock);
272
273                 XFS_STATS_INC(xs_sleep_logspace);
274
275                 trace_xfs_log_grant_sleep(log, tic);
276                 schedule();
277                 trace_xfs_log_grant_wake(log, tic);
278
279                 spin_lock(&head->lock);
280                 if (XLOG_FORCED_SHUTDOWN(log))
281                         goto shutdown;
282         } while (xlog_space_left(log, &head->grant) < need_bytes);
283
284         list_del_init(&tic->t_queue);
285         return 0;
286 shutdown:
287         list_del_init(&tic->t_queue);
288         return XFS_ERROR(EIO);
289 }
290
291 /*
292  * Atomically get the log space required for a log ticket.
293  *
294  * Once a ticket gets put onto head->waiters, it will only return after the
295  * needed reservation is satisfied.
296  *
297  * This function is structured so that it has a lock free fast path. This is
298  * necessary because every new transaction reservation will come through this
299  * path. Hence any lock will be globally hot if we take it unconditionally on
300  * every pass.
301  *
302  * As tickets are only ever moved on and off head->waiters under head->lock, we
303  * only need to take that lock if we are going to add the ticket to the queue
304  * and sleep. We can avoid taking the lock if the ticket was never added to
305  * head->waiters because the t_queue list head will be empty and we hold the
306  * only reference to it so it can safely be checked unlocked.
307  */
308 STATIC int
309 xlog_grant_head_check(
310         struct xlog             *log,
311         struct xlog_grant_head  *head,
312         struct xlog_ticket      *tic,
313         int                     *need_bytes)
314 {
315         int                     free_bytes;
316         int                     error = 0;
317
318         ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY));
319
320         /*
321          * If there are other waiters on the queue then give them a chance at
322          * logspace before us.  Wake up the first waiters, if we do not wake
323          * up all the waiters then go to sleep waiting for more free space,
324          * otherwise try to get some space for this transaction.
325          */
326         *need_bytes = xlog_ticket_reservation(log, head, tic);
327         free_bytes = xlog_space_left(log, &head->grant);
328         if (!list_empty_careful(&head->waiters)) {
329                 spin_lock(&head->lock);
330                 if (!xlog_grant_head_wake(log, head, &free_bytes) ||
331                     free_bytes < *need_bytes) {
332                         error = xlog_grant_head_wait(log, head, tic,
333                                                      *need_bytes);
334                 }
335                 spin_unlock(&head->lock);
336         } else if (free_bytes < *need_bytes) {
337                 spin_lock(&head->lock);
338                 error = xlog_grant_head_wait(log, head, tic, *need_bytes);
339                 spin_unlock(&head->lock);
340         }
341
342         return error;
343 }
344
345 static void
346 xlog_tic_reset_res(xlog_ticket_t *tic)
347 {
348         tic->t_res_num = 0;
349         tic->t_res_arr_sum = 0;
350         tic->t_res_num_ophdrs = 0;
351 }
352
353 static void
354 xlog_tic_add_region(xlog_ticket_t *tic, uint len, uint type)
355 {
356         if (tic->t_res_num == XLOG_TIC_LEN_MAX) {
357                 /* add to overflow and start again */
358                 tic->t_res_o_flow += tic->t_res_arr_sum;
359                 tic->t_res_num = 0;
360                 tic->t_res_arr_sum = 0;
361         }
362
363         tic->t_res_arr[tic->t_res_num].r_len = len;
364         tic->t_res_arr[tic->t_res_num].r_type = type;
365         tic->t_res_arr_sum += len;
366         tic->t_res_num++;
367 }
368
369 /*
370  * Replenish the byte reservation required by moving the grant write head.
371  */
372 int
373 xfs_log_regrant(
374         struct xfs_mount        *mp,
375         struct xlog_ticket      *tic)
376 {
377         struct xlog             *log = mp->m_log;
378         int                     need_bytes;
379         int                     error = 0;
380
381         if (XLOG_FORCED_SHUTDOWN(log))
382                 return XFS_ERROR(EIO);
383
384         XFS_STATS_INC(xs_try_logspace);
385
386         /*
387          * This is a new transaction on the ticket, so we need to change the
388          * transaction ID so that the next transaction has a different TID in
389          * the log. Just add one to the existing tid so that we can see chains
390          * of rolling transactions in the log easily.
391          */
392         tic->t_tid++;
393
394         xlog_grant_push_ail(log, tic->t_unit_res);
395
396         tic->t_curr_res = tic->t_unit_res;
397         xlog_tic_reset_res(tic);
398
399         if (tic->t_cnt > 0)
400                 return 0;
401
402         trace_xfs_log_regrant(log, tic);
403
404         error = xlog_grant_head_check(log, &log->l_write_head, tic,
405                                       &need_bytes);
406         if (error)
407                 goto out_error;
408
409         xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes);
410         trace_xfs_log_regrant_exit(log, tic);
411         xlog_verify_grant_tail(log);
412         return 0;
413
414 out_error:
415         /*
416          * If we are failing, make sure the ticket doesn't have any current
417          * reservations.  We don't want to add this back when the ticket/
418          * transaction gets cancelled.
419          */
420         tic->t_curr_res = 0;
421         tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */
422         return error;
423 }
424
425 /*
426  * Reserve log space and return a ticket corresponding the reservation.
427  *
428  * Each reservation is going to reserve extra space for a log record header.
429  * When writes happen to the on-disk log, we don't subtract the length of the
430  * log record header from any reservation.  By wasting space in each
431  * reservation, we prevent over allocation problems.
432  */
433 int
434 xfs_log_reserve(
435         struct xfs_mount        *mp,
436         int                     unit_bytes,
437         int                     cnt,
438         struct xlog_ticket      **ticp,
439         __uint8_t               client,
440         bool                    permanent,
441         uint                    t_type)
442 {
443         struct xlog             *log = mp->m_log;
444         struct xlog_ticket      *tic;
445         int                     need_bytes;
446         int                     error = 0;
447
448         ASSERT(client == XFS_TRANSACTION || client == XFS_LOG);
449
450         if (XLOG_FORCED_SHUTDOWN(log))
451                 return XFS_ERROR(EIO);
452
453         XFS_STATS_INC(xs_try_logspace);
454
455         ASSERT(*ticp == NULL);
456         tic = xlog_ticket_alloc(log, unit_bytes, cnt, client, permanent,
457                                 KM_SLEEP | KM_MAYFAIL);
458         if (!tic)
459                 return XFS_ERROR(ENOMEM);
460
461         tic->t_trans_type = t_type;
462         *ticp = tic;
463
464         xlog_grant_push_ail(log, tic->t_cnt ? tic->t_unit_res * tic->t_cnt
465                                             : tic->t_unit_res);
466
467         trace_xfs_log_reserve(log, tic);
468
469         error = xlog_grant_head_check(log, &log->l_reserve_head, tic,
470                                       &need_bytes);
471         if (error)
472                 goto out_error;
473
474         xlog_grant_add_space(log, &log->l_reserve_head.grant, need_bytes);
475         xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes);
476         trace_xfs_log_reserve_exit(log, tic);
477         xlog_verify_grant_tail(log);
478         return 0;
479
480 out_error:
481         /*
482          * If we are failing, make sure the ticket doesn't have any current
483          * reservations.  We don't want to add this back when the ticket/
484          * transaction gets cancelled.
485          */
486         tic->t_curr_res = 0;
487         tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */
488         return error;
489 }
490
491
492 /*
493  * NOTES:
494  *
495  *      1. currblock field gets updated at startup and after in-core logs
496  *              marked as with WANT_SYNC.
497  */
498
499 /*
500  * This routine is called when a user of a log manager ticket is done with
501  * the reservation.  If the ticket was ever used, then a commit record for
502  * the associated transaction is written out as a log operation header with
503  * no data.  The flag XLOG_TIC_INITED is set when the first write occurs with
504  * a given ticket.  If the ticket was one with a permanent reservation, then
505  * a few operations are done differently.  Permanent reservation tickets by
506  * default don't release the reservation.  They just commit the current
507  * transaction with the belief that the reservation is still needed.  A flag
508  * must be passed in before permanent reservations are actually released.
509  * When these type of tickets are not released, they need to be set into
510  * the inited state again.  By doing this, a start record will be written
511  * out when the next write occurs.
512  */
513 xfs_lsn_t
514 xfs_log_done(
515         struct xfs_mount        *mp,
516         struct xlog_ticket      *ticket,
517         struct xlog_in_core     **iclog,
518         uint                    flags)
519 {
520         struct xlog             *log = mp->m_log;
521         xfs_lsn_t               lsn = 0;
522
523         if (XLOG_FORCED_SHUTDOWN(log) ||
524             /*
525              * If nothing was ever written, don't write out commit record.
526              * If we get an error, just continue and give back the log ticket.
527              */
528             (((ticket->t_flags & XLOG_TIC_INITED) == 0) &&
529              (xlog_commit_record(log, ticket, iclog, &lsn)))) {
530                 lsn = (xfs_lsn_t) -1;
531                 if (ticket->t_flags & XLOG_TIC_PERM_RESERV) {
532                         flags |= XFS_LOG_REL_PERM_RESERV;
533                 }
534         }
535
536
537         if ((ticket->t_flags & XLOG_TIC_PERM_RESERV) == 0 ||
538             (flags & XFS_LOG_REL_PERM_RESERV)) {
539                 trace_xfs_log_done_nonperm(log, ticket);
540
541                 /*
542                  * Release ticket if not permanent reservation or a specific
543                  * request has been made to release a permanent reservation.
544                  */
545                 xlog_ungrant_log_space(log, ticket);
546                 xfs_log_ticket_put(ticket);
547         } else {
548                 trace_xfs_log_done_perm(log, ticket);
549
550                 xlog_regrant_reserve_log_space(log, ticket);
551                 /* If this ticket was a permanent reservation and we aren't
552                  * trying to release it, reset the inited flags; so next time
553                  * we write, a start record will be written out.
554                  */
555                 ticket->t_flags |= XLOG_TIC_INITED;
556         }
557
558         return lsn;
559 }
560
561 /*
562  * Attaches a new iclog I/O completion callback routine during
563  * transaction commit.  If the log is in error state, a non-zero
564  * return code is handed back and the caller is responsible for
565  * executing the callback at an appropriate time.
566  */
567 int
568 xfs_log_notify(
569         struct xfs_mount        *mp,
570         struct xlog_in_core     *iclog,
571         xfs_log_callback_t      *cb)
572 {
573         int     abortflg;
574
575         spin_lock(&iclog->ic_callback_lock);
576         abortflg = (iclog->ic_state & XLOG_STATE_IOERROR);
577         if (!abortflg) {
578                 ASSERT_ALWAYS((iclog->ic_state == XLOG_STATE_ACTIVE) ||
579                               (iclog->ic_state == XLOG_STATE_WANT_SYNC));
580                 cb->cb_next = NULL;
581                 *(iclog->ic_callback_tail) = cb;
582                 iclog->ic_callback_tail = &(cb->cb_next);
583         }
584         spin_unlock(&iclog->ic_callback_lock);
585         return abortflg;
586 }
587
588 int
589 xfs_log_release_iclog(
590         struct xfs_mount        *mp,
591         struct xlog_in_core     *iclog)
592 {
593         if (xlog_state_release_iclog(mp->m_log, iclog)) {
594                 xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR);
595                 return EIO;
596         }
597
598         return 0;
599 }
600
601 /*
602  * Mount a log filesystem
603  *
604  * mp           - ubiquitous xfs mount point structure
605  * log_target   - buftarg of on-disk log device
606  * blk_offset   - Start block # where block size is 512 bytes (BBSIZE)
607  * num_bblocks  - Number of BBSIZE blocks in on-disk log
608  *
609  * Return error or zero.
610  */
611 int
612 xfs_log_mount(
613         xfs_mount_t     *mp,
614         xfs_buftarg_t   *log_target,
615         xfs_daddr_t     blk_offset,
616         int             num_bblks)
617 {
618         int             error = 0;
619         int             min_logfsbs;
620
621         if (!(mp->m_flags & XFS_MOUNT_NORECOVERY))
622                 xfs_notice(mp, "Mounting Filesystem");
623         else {
624                 xfs_notice(mp,
625 "Mounting filesystem in no-recovery mode.  Filesystem will be inconsistent.");
626                 ASSERT(mp->m_flags & XFS_MOUNT_RDONLY);
627         }
628
629         mp->m_log = xlog_alloc_log(mp, log_target, blk_offset, num_bblks);
630         if (IS_ERR(mp->m_log)) {
631                 error = -PTR_ERR(mp->m_log);
632                 goto out;
633         }
634
635         /*
636          * Validate the given log space and drop a critical message via syslog
637          * if the log size is too small that would lead to some unexpected
638          * situations in transaction log space reservation stage.
639          *
640          * Note: we can't just reject the mount if the validation fails.  This
641          * would mean that people would have to downgrade their kernel just to
642          * remedy the situation as there is no way to grow the log (short of
643          * black magic surgery with xfs_db).
644          *
645          * We can, however, reject mounts for CRC format filesystems, as the
646          * mkfs binary being used to make the filesystem should never create a
647          * filesystem with a log that is too small.
648          */
649         min_logfsbs = xfs_log_calc_minimum_size(mp);
650
651         if (mp->m_sb.sb_logblocks < min_logfsbs) {
652                 xfs_warn(mp,
653                 "Log size %d blocks too small, minimum size is %d blocks",
654                          mp->m_sb.sb_logblocks, min_logfsbs);
655                 error = EINVAL;
656         } else if (mp->m_sb.sb_logblocks > XFS_MAX_LOG_BLOCKS) {
657                 xfs_warn(mp,
658                 "Log size %d blocks too large, maximum size is %lld blocks",
659                          mp->m_sb.sb_logblocks, XFS_MAX_LOG_BLOCKS);
660                 error = EINVAL;
661         } else if (XFS_FSB_TO_B(mp, mp->m_sb.sb_logblocks) > XFS_MAX_LOG_BYTES) {
662                 xfs_warn(mp,
663                 "log size %lld bytes too large, maximum size is %lld bytes",
664                          XFS_FSB_TO_B(mp, mp->m_sb.sb_logblocks),
665                          XFS_MAX_LOG_BYTES);
666                 error = EINVAL;
667         }
668         if (error) {
669                 if (xfs_sb_version_hascrc(&mp->m_sb)) {
670                         xfs_crit(mp, "AAIEEE! Log failed size checks. Abort!");
671                         ASSERT(0);
672                         goto out_free_log;
673                 }
674                 xfs_crit(mp,
675 "Log size out of supported range. Continuing onwards, but if log hangs are\n"
676 "experienced then please report this message in the bug report.");
677         }
678
679         /*
680          * Initialize the AIL now we have a log.
681          */
682         error = xfs_trans_ail_init(mp);
683         if (error) {
684                 xfs_warn(mp, "AIL initialisation failed: error %d", error);
685                 goto out_free_log;
686         }
687         mp->m_log->l_ailp = mp->m_ail;
688
689         /*
690          * skip log recovery on a norecovery mount.  pretend it all
691          * just worked.
692          */
693         if (!(mp->m_flags & XFS_MOUNT_NORECOVERY)) {
694                 int     readonly = (mp->m_flags & XFS_MOUNT_RDONLY);
695
696                 if (readonly)
697                         mp->m_flags &= ~XFS_MOUNT_RDONLY;
698
699                 error = xlog_recover(mp->m_log);
700
701                 if (readonly)
702                         mp->m_flags |= XFS_MOUNT_RDONLY;
703                 if (error) {
704                         xfs_warn(mp, "log mount/recovery failed: error %d",
705                                 error);
706                         goto out_destroy_ail;
707                 }
708         }
709
710         /* Normal transactions can now occur */
711         mp->m_log->l_flags &= ~XLOG_ACTIVE_RECOVERY;
712
713         /*
714          * Now the log has been fully initialised and we know were our
715          * space grant counters are, we can initialise the permanent ticket
716          * needed for delayed logging to work.
717          */
718         xlog_cil_init_post_recovery(mp->m_log);
719
720         return 0;
721
722 out_destroy_ail:
723         xfs_trans_ail_destroy(mp);
724 out_free_log:
725         xlog_dealloc_log(mp->m_log);
726 out:
727         return error;
728 }
729
730 /*
731  * Finish the recovery of the file system.  This is separate from the
732  * xfs_log_mount() call, because it depends on the code in xfs_mountfs() to read
733  * in the root and real-time bitmap inodes between calling xfs_log_mount() and
734  * here.
735  *
736  * If we finish recovery successfully, start the background log work. If we are
737  * not doing recovery, then we have a RO filesystem and we don't need to start
738  * it.
739  */
740 int
741 xfs_log_mount_finish(xfs_mount_t *mp)
742 {
743         int     error = 0;
744
745         if (!(mp->m_flags & XFS_MOUNT_NORECOVERY)) {
746                 error = xlog_recover_finish(mp->m_log);
747                 if (!error)
748                         xfs_log_work_queue(mp);
749         } else {
750                 ASSERT(mp->m_flags & XFS_MOUNT_RDONLY);
751         }
752
753
754         return error;
755 }
756
757 /*
758  * Final log writes as part of unmount.
759  *
760  * Mark the filesystem clean as unmount happens.  Note that during relocation
761  * this routine needs to be executed as part of source-bag while the
762  * deallocation must not be done until source-end.
763  */
764
765 /*
766  * Unmount record used to have a string "Unmount filesystem--" in the
767  * data section where the "Un" was really a magic number (XLOG_UNMOUNT_TYPE).
768  * We just write the magic number now since that particular field isn't
769  * currently architecture converted and "Unmount" is a bit foo.
770  * As far as I know, there weren't any dependencies on the old behaviour.
771  */
772
773 int
774 xfs_log_unmount_write(xfs_mount_t *mp)
775 {
776         struct xlog      *log = mp->m_log;
777         xlog_in_core_t   *iclog;
778 #ifdef DEBUG
779         xlog_in_core_t   *first_iclog;
780 #endif
781         xlog_ticket_t   *tic = NULL;
782         xfs_lsn_t        lsn;
783         int              error;
784
785         /*
786          * Don't write out unmount record on read-only mounts.
787          * Or, if we are doing a forced umount (typically because of IO errors).
788          */
789         if (mp->m_flags & XFS_MOUNT_RDONLY)
790                 return 0;
791
792         error = _xfs_log_force(mp, XFS_LOG_SYNC, NULL);
793         ASSERT(error || !(XLOG_FORCED_SHUTDOWN(log)));
794
795 #ifdef DEBUG
796         first_iclog = iclog = log->l_iclog;
797         do {
798                 if (!(iclog->ic_state & XLOG_STATE_IOERROR)) {
799                         ASSERT(iclog->ic_state & XLOG_STATE_ACTIVE);
800                         ASSERT(iclog->ic_offset == 0);
801                 }
802                 iclog = iclog->ic_next;
803         } while (iclog != first_iclog);
804 #endif
805         if (! (XLOG_FORCED_SHUTDOWN(log))) {
806                 error = xfs_log_reserve(mp, 600, 1, &tic,
807                                         XFS_LOG, 0, XLOG_UNMOUNT_REC_TYPE);
808                 if (!error) {
809                         /* the data section must be 32 bit size aligned */
810                         struct {
811                             __uint16_t magic;
812                             __uint16_t pad1;
813                             __uint32_t pad2; /* may as well make it 64 bits */
814                         } magic = {
815                                 .magic = XLOG_UNMOUNT_TYPE,
816                         };
817                         struct xfs_log_iovec reg = {
818                                 .i_addr = &magic,
819                                 .i_len = sizeof(magic),
820                                 .i_type = XLOG_REG_TYPE_UNMOUNT,
821                         };
822                         struct xfs_log_vec vec = {
823                                 .lv_niovecs = 1,
824                                 .lv_iovecp = &reg,
825                         };
826
827                         /* remove inited flag, and account for space used */
828                         tic->t_flags = 0;
829                         tic->t_curr_res -= sizeof(magic);
830                         error = xlog_write(log, &vec, tic, &lsn,
831                                            NULL, XLOG_UNMOUNT_TRANS);
832                         /*
833                          * At this point, we're umounting anyway,
834                          * so there's no point in transitioning log state
835                          * to IOERROR. Just continue...
836                          */
837                 }
838
839                 if (error)
840                         xfs_alert(mp, "%s: unmount record failed", __func__);
841
842
843                 spin_lock(&log->l_icloglock);
844                 iclog = log->l_iclog;
845                 atomic_inc(&iclog->ic_refcnt);
846                 xlog_state_want_sync(log, iclog);
847                 spin_unlock(&log->l_icloglock);
848                 error = xlog_state_release_iclog(log, iclog);
849
850                 spin_lock(&log->l_icloglock);
851                 if (!(iclog->ic_state == XLOG_STATE_ACTIVE ||
852                       iclog->ic_state == XLOG_STATE_DIRTY)) {
853                         if (!XLOG_FORCED_SHUTDOWN(log)) {
854                                 xlog_wait(&iclog->ic_force_wait,
855                                                         &log->l_icloglock);
856                         } else {
857                                 spin_unlock(&log->l_icloglock);
858                         }
859                 } else {
860                         spin_unlock(&log->l_icloglock);
861                 }
862                 if (tic) {
863                         trace_xfs_log_umount_write(log, tic);
864                         xlog_ungrant_log_space(log, tic);
865                         xfs_log_ticket_put(tic);
866                 }
867         } else {
868                 /*
869                  * We're already in forced_shutdown mode, couldn't
870                  * even attempt to write out the unmount transaction.
871                  *
872                  * Go through the motions of sync'ing and releasing
873                  * the iclog, even though no I/O will actually happen,
874                  * we need to wait for other log I/Os that may already
875                  * be in progress.  Do this as a separate section of
876                  * code so we'll know if we ever get stuck here that
877                  * we're in this odd situation of trying to unmount
878                  * a file system that went into forced_shutdown as
879                  * the result of an unmount..
880                  */
881                 spin_lock(&log->l_icloglock);
882                 iclog = log->l_iclog;
883                 atomic_inc(&iclog->ic_refcnt);
884
885                 xlog_state_want_sync(log, iclog);
886                 spin_unlock(&log->l_icloglock);
887                 error =  xlog_state_release_iclog(log, iclog);
888
889                 spin_lock(&log->l_icloglock);
890
891                 if ( ! (   iclog->ic_state == XLOG_STATE_ACTIVE
892                         || iclog->ic_state == XLOG_STATE_DIRTY
893                         || iclog->ic_state == XLOG_STATE_IOERROR) ) {
894
895                                 xlog_wait(&iclog->ic_force_wait,
896                                                         &log->l_icloglock);
897                 } else {
898                         spin_unlock(&log->l_icloglock);
899                 }
900         }
901
902         return error;
903 }       /* xfs_log_unmount_write */
904
905 /*
906  * Empty the log for unmount/freeze.
907  *
908  * To do this, we first need to shut down the background log work so it is not
909  * trying to cover the log as we clean up. We then need to unpin all objects in
910  * the log so we can then flush them out. Once they have completed their IO and
911  * run the callbacks removing themselves from the AIL, we can write the unmount
912  * record.
913  */
914 void
915 xfs_log_quiesce(
916         struct xfs_mount        *mp)
917 {
918         cancel_delayed_work_sync(&mp->m_log->l_work);
919         xfs_log_force(mp, XFS_LOG_SYNC);
920
921         /*
922          * The superblock buffer is uncached and while xfs_ail_push_all_sync()
923          * will push it, xfs_wait_buftarg() will not wait for it. Further,
924          * xfs_buf_iowait() cannot be used because it was pushed with the
925          * XBF_ASYNC flag set, so we need to use a lock/unlock pair to wait for
926          * the IO to complete.
927          */
928         xfs_ail_push_all_sync(mp->m_ail);
929         xfs_wait_buftarg(mp->m_ddev_targp);
930         xfs_buf_lock(mp->m_sb_bp);
931         xfs_buf_unlock(mp->m_sb_bp);
932
933         xfs_log_unmount_write(mp);
934 }
935
936 /*
937  * Shut down and release the AIL and Log.
938  *
939  * During unmount, we need to ensure we flush all the dirty metadata objects
940  * from the AIL so that the log is empty before we write the unmount record to
941  * the log. Once this is done, we can tear down the AIL and the log.
942  */
943 void
944 xfs_log_unmount(
945         struct xfs_mount        *mp)
946 {
947         xfs_log_quiesce(mp);
948
949         xfs_trans_ail_destroy(mp);
950         xlog_dealloc_log(mp->m_log);
951 }
952
953 void
954 xfs_log_item_init(
955         struct xfs_mount        *mp,
956         struct xfs_log_item     *item,
957         int                     type,
958         const struct xfs_item_ops *ops)
959 {
960         item->li_mountp = mp;
961         item->li_ailp = mp->m_ail;
962         item->li_type = type;
963         item->li_ops = ops;
964         item->li_lv = NULL;
965
966         INIT_LIST_HEAD(&item->li_ail);
967         INIT_LIST_HEAD(&item->li_cil);
968 }
969
970 /*
971  * Wake up processes waiting for log space after we have moved the log tail.
972  */
973 void
974 xfs_log_space_wake(
975         struct xfs_mount        *mp)
976 {
977         struct xlog             *log = mp->m_log;
978         int                     free_bytes;
979
980         if (XLOG_FORCED_SHUTDOWN(log))
981                 return;
982
983         if (!list_empty_careful(&log->l_write_head.waiters)) {
984                 ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY));
985
986                 spin_lock(&log->l_write_head.lock);
987                 free_bytes = xlog_space_left(log, &log->l_write_head.grant);
988                 xlog_grant_head_wake(log, &log->l_write_head, &free_bytes);
989                 spin_unlock(&log->l_write_head.lock);
990         }
991
992         if (!list_empty_careful(&log->l_reserve_head.waiters)) {
993                 ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY));
994
995                 spin_lock(&log->l_reserve_head.lock);
996                 free_bytes = xlog_space_left(log, &log->l_reserve_head.grant);
997                 xlog_grant_head_wake(log, &log->l_reserve_head, &free_bytes);
998                 spin_unlock(&log->l_reserve_head.lock);
999         }
1000 }
1001
1002 /*
1003  * Determine if we have a transaction that has gone to disk
1004  * that needs to be covered. To begin the transition to the idle state
1005  * firstly the log needs to be idle (no AIL and nothing in the iclogs).
1006  * If we are then in a state where covering is needed, the caller is informed
1007  * that dummy transactions are required to move the log into the idle state.
1008  *
1009  * Because this is called as part of the sync process, we should also indicate
1010  * that dummy transactions should be issued in anything but the covered or
1011  * idle states. This ensures that the log tail is accurately reflected in
1012  * the log at the end of the sync, hence if a crash occurrs avoids replay
1013  * of transactions where the metadata is already on disk.
1014  */
1015 int
1016 xfs_log_need_covered(xfs_mount_t *mp)
1017 {
1018         int             needed = 0;
1019         struct xlog     *log = mp->m_log;
1020
1021         if (!xfs_fs_writable(mp))
1022                 return 0;
1023
1024         spin_lock(&log->l_icloglock);
1025         switch (log->l_covered_state) {
1026         case XLOG_STATE_COVER_DONE:
1027         case XLOG_STATE_COVER_DONE2:
1028         case XLOG_STATE_COVER_IDLE:
1029                 break;
1030         case XLOG_STATE_COVER_NEED:
1031         case XLOG_STATE_COVER_NEED2:
1032                 if (!xfs_ail_min_lsn(log->l_ailp) &&
1033                     xlog_iclogs_empty(log)) {
1034                         if (log->l_covered_state == XLOG_STATE_COVER_NEED)
1035                                 log->l_covered_state = XLOG_STATE_COVER_DONE;
1036                         else
1037                                 log->l_covered_state = XLOG_STATE_COVER_DONE2;
1038                 }
1039                 /* FALLTHRU */
1040         default:
1041                 needed = 1;
1042                 break;
1043         }
1044         spin_unlock(&log->l_icloglock);
1045         return needed;
1046 }
1047
1048 /*
1049  * We may be holding the log iclog lock upon entering this routine.
1050  */
1051 xfs_lsn_t
1052 xlog_assign_tail_lsn_locked(
1053         struct xfs_mount        *mp)
1054 {
1055         struct xlog             *log = mp->m_log;
1056         struct xfs_log_item     *lip;
1057         xfs_lsn_t               tail_lsn;
1058
1059         assert_spin_locked(&mp->m_ail->xa_lock);
1060
1061         /*
1062          * To make sure we always have a valid LSN for the log tail we keep
1063          * track of the last LSN which was committed in log->l_last_sync_lsn,
1064          * and use that when the AIL was empty.
1065          */
1066         lip = xfs_ail_min(mp->m_ail);
1067         if (lip)
1068                 tail_lsn = lip->li_lsn;
1069         else
1070                 tail_lsn = atomic64_read(&log->l_last_sync_lsn);
1071         atomic64_set(&log->l_tail_lsn, tail_lsn);
1072         return tail_lsn;
1073 }
1074
1075 xfs_lsn_t
1076 xlog_assign_tail_lsn(
1077         struct xfs_mount        *mp)
1078 {
1079         xfs_lsn_t               tail_lsn;
1080
1081         spin_lock(&mp->m_ail->xa_lock);
1082         tail_lsn = xlog_assign_tail_lsn_locked(mp);
1083         spin_unlock(&mp->m_ail->xa_lock);
1084
1085         return tail_lsn;
1086 }
1087
1088 /*
1089  * Return the space in the log between the tail and the head.  The head
1090  * is passed in the cycle/bytes formal parms.  In the special case where
1091  * the reserve head has wrapped passed the tail, this calculation is no
1092  * longer valid.  In this case, just return 0 which means there is no space
1093  * in the log.  This works for all places where this function is called
1094  * with the reserve head.  Of course, if the write head were to ever
1095  * wrap the tail, we should blow up.  Rather than catch this case here,
1096  * we depend on other ASSERTions in other parts of the code.   XXXmiken
1097  *
1098  * This code also handles the case where the reservation head is behind
1099  * the tail.  The details of this case are described below, but the end
1100  * result is that we return the size of the log as the amount of space left.
1101  */
1102 STATIC int
1103 xlog_space_left(
1104         struct xlog     *log,
1105         atomic64_t      *head)
1106 {
1107         int             free_bytes;
1108         int             tail_bytes;
1109         int             tail_cycle;
1110         int             head_cycle;
1111         int             head_bytes;
1112
1113         xlog_crack_grant_head(head, &head_cycle, &head_bytes);
1114         xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_bytes);
1115         tail_bytes = BBTOB(tail_bytes);
1116         if (tail_cycle == head_cycle && head_bytes >= tail_bytes)
1117                 free_bytes = log->l_logsize - (head_bytes - tail_bytes);
1118         else if (tail_cycle + 1 < head_cycle)
1119                 return 0;
1120         else if (tail_cycle < head_cycle) {
1121                 ASSERT(tail_cycle == (head_cycle - 1));
1122                 free_bytes = tail_bytes - head_bytes;
1123         } else {
1124                 /*
1125                  * The reservation head is behind the tail.
1126                  * In this case we just want to return the size of the
1127                  * log as the amount of space left.
1128                  */
1129                 xfs_alert(log->l_mp,
1130                         "xlog_space_left: head behind tail\n"
1131                         "  tail_cycle = %d, tail_bytes = %d\n"
1132                         "  GH   cycle = %d, GH   bytes = %d",
1133                         tail_cycle, tail_bytes, head_cycle, head_bytes);
1134                 ASSERT(0);
1135                 free_bytes = log->l_logsize;
1136         }
1137         return free_bytes;
1138 }
1139
1140
1141 /*
1142  * Log function which is called when an io completes.
1143  *
1144  * The log manager needs its own routine, in order to control what
1145  * happens with the buffer after the write completes.
1146  */
1147 void
1148 xlog_iodone(xfs_buf_t *bp)
1149 {
1150         struct xlog_in_core     *iclog = bp->b_fspriv;
1151         struct xlog             *l = iclog->ic_log;
1152         int                     aborted = 0;
1153
1154         /*
1155          * Race to shutdown the filesystem if we see an error.
1156          */
1157         if (XFS_TEST_ERROR((xfs_buf_geterror(bp)), l->l_mp,
1158                         XFS_ERRTAG_IODONE_IOERR, XFS_RANDOM_IODONE_IOERR)) {
1159                 xfs_buf_ioerror_alert(bp, __func__);
1160                 xfs_buf_stale(bp);
1161                 xfs_force_shutdown(l->l_mp, SHUTDOWN_LOG_IO_ERROR);
1162                 /*
1163                  * This flag will be propagated to the trans-committed
1164                  * callback routines to let them know that the log-commit
1165                  * didn't succeed.
1166                  */
1167                 aborted = XFS_LI_ABORTED;
1168         } else if (iclog->ic_state & XLOG_STATE_IOERROR) {
1169                 aborted = XFS_LI_ABORTED;
1170         }
1171
1172         /* log I/O is always issued ASYNC */
1173         ASSERT(XFS_BUF_ISASYNC(bp));
1174         xlog_state_done_syncing(iclog, aborted);
1175         /*
1176          * do not reference the buffer (bp) here as we could race
1177          * with it being freed after writing the unmount record to the
1178          * log.
1179          */
1180 }
1181
1182 /*
1183  * Return size of each in-core log record buffer.
1184  *
1185  * All machines get 8 x 32kB buffers by default, unless tuned otherwise.
1186  *
1187  * If the filesystem blocksize is too large, we may need to choose a
1188  * larger size since the directory code currently logs entire blocks.
1189  */
1190
1191 STATIC void
1192 xlog_get_iclog_buffer_size(
1193         struct xfs_mount        *mp,
1194         struct xlog             *log)
1195 {
1196         int size;
1197         int xhdrs;
1198
1199         if (mp->m_logbufs <= 0)
1200                 log->l_iclog_bufs = XLOG_MAX_ICLOGS;
1201         else
1202                 log->l_iclog_bufs = mp->m_logbufs;
1203
1204         /*
1205          * Buffer size passed in from mount system call.
1206          */
1207         if (mp->m_logbsize > 0) {
1208                 size = log->l_iclog_size = mp->m_logbsize;
1209                 log->l_iclog_size_log = 0;
1210                 while (size != 1) {
1211                         log->l_iclog_size_log++;
1212                         size >>= 1;
1213                 }
1214
1215                 if (xfs_sb_version_haslogv2(&mp->m_sb)) {
1216                         /* # headers = size / 32k
1217                          * one header holds cycles from 32k of data
1218                          */
1219
1220                         xhdrs = mp->m_logbsize / XLOG_HEADER_CYCLE_SIZE;
1221                         if (mp->m_logbsize % XLOG_HEADER_CYCLE_SIZE)
1222                                 xhdrs++;
1223                         log->l_iclog_hsize = xhdrs << BBSHIFT;
1224                         log->l_iclog_heads = xhdrs;
1225                 } else {
1226                         ASSERT(mp->m_logbsize <= XLOG_BIG_RECORD_BSIZE);
1227                         log->l_iclog_hsize = BBSIZE;
1228                         log->l_iclog_heads = 1;
1229                 }
1230                 goto done;
1231         }
1232
1233         /* All machines use 32kB buffers by default. */
1234         log->l_iclog_size = XLOG_BIG_RECORD_BSIZE;
1235         log->l_iclog_size_log = XLOG_BIG_RECORD_BSHIFT;
1236
1237         /* the default log size is 16k or 32k which is one header sector */
1238         log->l_iclog_hsize = BBSIZE;
1239         log->l_iclog_heads = 1;
1240
1241 done:
1242         /* are we being asked to make the sizes selected above visible? */
1243         if (mp->m_logbufs == 0)
1244                 mp->m_logbufs = log->l_iclog_bufs;
1245         if (mp->m_logbsize == 0)
1246                 mp->m_logbsize = log->l_iclog_size;
1247 }       /* xlog_get_iclog_buffer_size */
1248
1249
1250 void
1251 xfs_log_work_queue(
1252         struct xfs_mount        *mp)
1253 {
1254         queue_delayed_work(mp->m_log_workqueue, &mp->m_log->l_work,
1255                                 msecs_to_jiffies(xfs_syncd_centisecs * 10));
1256 }
1257
1258 /*
1259  * Every sync period we need to unpin all items in the AIL and push them to
1260  * disk. If there is nothing dirty, then we might need to cover the log to
1261  * indicate that the filesystem is idle.
1262  */
1263 void
1264 xfs_log_worker(
1265         struct work_struct      *work)
1266 {
1267         struct xlog             *log = container_of(to_delayed_work(work),
1268                                                 struct xlog, l_work);
1269         struct xfs_mount        *mp = log->l_mp;
1270
1271         /* dgc: errors ignored - not fatal and nowhere to report them */
1272         if (xfs_log_need_covered(mp))
1273                 xfs_fs_log_dummy(mp);
1274         else
1275                 xfs_log_force(mp, 0);
1276
1277         /* start pushing all the metadata that is currently dirty */
1278         xfs_ail_push_all(mp->m_ail);
1279
1280         /* queue us up again */
1281         xfs_log_work_queue(mp);
1282 }
1283
1284 /*
1285  * This routine initializes some of the log structure for a given mount point.
1286  * Its primary purpose is to fill in enough, so recovery can occur.  However,
1287  * some other stuff may be filled in too.
1288  */
1289 STATIC struct xlog *
1290 xlog_alloc_log(
1291         struct xfs_mount        *mp,
1292         struct xfs_buftarg      *log_target,
1293         xfs_daddr_t             blk_offset,
1294         int                     num_bblks)
1295 {
1296         struct xlog             *log;
1297         xlog_rec_header_t       *head;
1298         xlog_in_core_t          **iclogp;
1299         xlog_in_core_t          *iclog, *prev_iclog=NULL;
1300         xfs_buf_t               *bp;
1301         int                     i;
1302         int                     error = ENOMEM;
1303         uint                    log2_size = 0;
1304
1305         log = kmem_zalloc(sizeof(struct xlog), KM_MAYFAIL);
1306         if (!log) {
1307                 xfs_warn(mp, "Log allocation failed: No memory!");
1308                 goto out;
1309         }
1310
1311         log->l_mp          = mp;
1312         log->l_targ        = log_target;
1313         log->l_logsize     = BBTOB(num_bblks);
1314         log->l_logBBstart  = blk_offset;
1315         log->l_logBBsize   = num_bblks;
1316         log->l_covered_state = XLOG_STATE_COVER_IDLE;
1317         log->l_flags       |= XLOG_ACTIVE_RECOVERY;
1318         INIT_DELAYED_WORK(&log->l_work, xfs_log_worker);
1319
1320         log->l_prev_block  = -1;
1321         /* log->l_tail_lsn = 0x100000000LL; cycle = 1; current block = 0 */
1322         xlog_assign_atomic_lsn(&log->l_tail_lsn, 1, 0);
1323         xlog_assign_atomic_lsn(&log->l_last_sync_lsn, 1, 0);
1324         log->l_curr_cycle  = 1;     /* 0 is bad since this is initial value */
1325
1326         xlog_grant_head_init(&log->l_reserve_head);
1327         xlog_grant_head_init(&log->l_write_head);
1328
1329         error = EFSCORRUPTED;
1330         if (xfs_sb_version_hassector(&mp->m_sb)) {
1331                 log2_size = mp->m_sb.sb_logsectlog;
1332                 if (log2_size < BBSHIFT) {
1333                         xfs_warn(mp, "Log sector size too small (0x%x < 0x%x)",
1334                                 log2_size, BBSHIFT);
1335                         goto out_free_log;
1336                 }
1337
1338                 log2_size -= BBSHIFT;
1339                 if (log2_size > mp->m_sectbb_log) {
1340                         xfs_warn(mp, "Log sector size too large (0x%x > 0x%x)",
1341                                 log2_size, mp->m_sectbb_log);
1342                         goto out_free_log;
1343                 }
1344
1345                 /* for larger sector sizes, must have v2 or external log */
1346                 if (log2_size && log->l_logBBstart > 0 &&
1347                             !xfs_sb_version_haslogv2(&mp->m_sb)) {
1348                         xfs_warn(mp,
1349                 "log sector size (0x%x) invalid for configuration.",
1350                                 log2_size);
1351                         goto out_free_log;
1352                 }
1353         }
1354         log->l_sectBBsize = 1 << log2_size;
1355
1356         xlog_get_iclog_buffer_size(mp, log);
1357
1358         error = ENOMEM;
1359         bp = xfs_buf_alloc(mp->m_logdev_targp, 0, BTOBB(log->l_iclog_size), 0);
1360         if (!bp)
1361                 goto out_free_log;
1362         bp->b_iodone = xlog_iodone;
1363         ASSERT(xfs_buf_islocked(bp));
1364         log->l_xbuf = bp;
1365
1366         spin_lock_init(&log->l_icloglock);
1367         init_waitqueue_head(&log->l_flush_wait);
1368
1369         iclogp = &log->l_iclog;
1370         /*
1371          * The amount of memory to allocate for the iclog structure is
1372          * rather funky due to the way the structure is defined.  It is
1373          * done this way so that we can use different sizes for machines
1374          * with different amounts of memory.  See the definition of
1375          * xlog_in_core_t in xfs_log_priv.h for details.
1376          */
1377         ASSERT(log->l_iclog_size >= 4096);
1378         for (i=0; i < log->l_iclog_bufs; i++) {
1379                 *iclogp = kmem_zalloc(sizeof(xlog_in_core_t), KM_MAYFAIL);
1380                 if (!*iclogp)
1381                         goto out_free_iclog;
1382
1383                 iclog = *iclogp;
1384                 iclog->ic_prev = prev_iclog;
1385                 prev_iclog = iclog;
1386
1387                 bp = xfs_buf_get_uncached(mp->m_logdev_targp,
1388                                                 BTOBB(log->l_iclog_size), 0);
1389                 if (!bp)
1390                         goto out_free_iclog;
1391
1392                 bp->b_iodone = xlog_iodone;
1393                 iclog->ic_bp = bp;
1394                 iclog->ic_data = bp->b_addr;
1395 #ifdef DEBUG
1396                 log->l_iclog_bak[i] = (xfs_caddr_t)&(iclog->ic_header);
1397 #endif
1398                 head = &iclog->ic_header;
1399                 memset(head, 0, sizeof(xlog_rec_header_t));
1400                 head->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
1401                 head->h_version = cpu_to_be32(
1402                         xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1);
1403                 head->h_size = cpu_to_be32(log->l_iclog_size);
1404                 /* new fields */
1405                 head->h_fmt = cpu_to_be32(XLOG_FMT);
1406                 memcpy(&head->h_fs_uuid, &mp->m_sb.sb_uuid, sizeof(uuid_t));
1407
1408                 iclog->ic_size = BBTOB(bp->b_length) - log->l_iclog_hsize;
1409                 iclog->ic_state = XLOG_STATE_ACTIVE;
1410                 iclog->ic_log = log;
1411                 atomic_set(&iclog->ic_refcnt, 0);
1412                 spin_lock_init(&iclog->ic_callback_lock);
1413                 iclog->ic_callback_tail = &(iclog->ic_callback);
1414                 iclog->ic_datap = (char *)iclog->ic_data + log->l_iclog_hsize;
1415
1416                 ASSERT(xfs_buf_islocked(iclog->ic_bp));
1417                 init_waitqueue_head(&iclog->ic_force_wait);
1418                 init_waitqueue_head(&iclog->ic_write_wait);
1419
1420                 iclogp = &iclog->ic_next;
1421         }
1422         *iclogp = log->l_iclog;                 /* complete ring */
1423         log->l_iclog->ic_prev = prev_iclog;     /* re-write 1st prev ptr */
1424
1425         error = xlog_cil_init(log);
1426         if (error)
1427                 goto out_free_iclog;
1428         return log;
1429
1430 out_free_iclog:
1431         for (iclog = log->l_iclog; iclog; iclog = prev_iclog) {
1432                 prev_iclog = iclog->ic_next;
1433                 if (iclog->ic_bp)
1434                         xfs_buf_free(iclog->ic_bp);
1435                 kmem_free(iclog);
1436         }
1437         spinlock_destroy(&log->l_icloglock);
1438         xfs_buf_free(log->l_xbuf);
1439 out_free_log:
1440         kmem_free(log);
1441 out:
1442         return ERR_PTR(-error);
1443 }       /* xlog_alloc_log */
1444
1445
1446 /*
1447  * Write out the commit record of a transaction associated with the given
1448  * ticket.  Return the lsn of the commit record.
1449  */
1450 STATIC int
1451 xlog_commit_record(
1452         struct xlog             *log,
1453         struct xlog_ticket      *ticket,
1454         struct xlog_in_core     **iclog,
1455         xfs_lsn_t               *commitlsnp)
1456 {
1457         struct xfs_mount *mp = log->l_mp;
1458         int     error;
1459         struct xfs_log_iovec reg = {
1460                 .i_addr = NULL,
1461                 .i_len = 0,
1462                 .i_type = XLOG_REG_TYPE_COMMIT,
1463         };
1464         struct xfs_log_vec vec = {
1465                 .lv_niovecs = 1,
1466                 .lv_iovecp = &reg,
1467         };
1468
1469         ASSERT_ALWAYS(iclog);
1470         error = xlog_write(log, &vec, ticket, commitlsnp, iclog,
1471                                         XLOG_COMMIT_TRANS);
1472         if (error)
1473                 xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR);
1474         return error;
1475 }
1476
1477 /*
1478  * Push on the buffer cache code if we ever use more than 75% of the on-disk
1479  * log space.  This code pushes on the lsn which would supposedly free up
1480  * the 25% which we want to leave free.  We may need to adopt a policy which
1481  * pushes on an lsn which is further along in the log once we reach the high
1482  * water mark.  In this manner, we would be creating a low water mark.
1483  */
1484 STATIC void
1485 xlog_grant_push_ail(
1486         struct xlog     *log,
1487         int             need_bytes)
1488 {
1489         xfs_lsn_t       threshold_lsn = 0;
1490         xfs_lsn_t       last_sync_lsn;
1491         int             free_blocks;
1492         int             free_bytes;
1493         int             threshold_block;
1494         int             threshold_cycle;
1495         int             free_threshold;
1496
1497         ASSERT(BTOBB(need_bytes) < log->l_logBBsize);
1498
1499         free_bytes = xlog_space_left(log, &log->l_reserve_head.grant);
1500         free_blocks = BTOBBT(free_bytes);
1501
1502         /*
1503          * Set the threshold for the minimum number of free blocks in the
1504          * log to the maximum of what the caller needs, one quarter of the
1505          * log, and 256 blocks.
1506          */
1507         free_threshold = BTOBB(need_bytes);
1508         free_threshold = MAX(free_threshold, (log->l_logBBsize >> 2));
1509         free_threshold = MAX(free_threshold, 256);
1510         if (free_blocks >= free_threshold)
1511                 return;
1512
1513         xlog_crack_atomic_lsn(&log->l_tail_lsn, &threshold_cycle,
1514                                                 &threshold_block);
1515         threshold_block += free_threshold;
1516         if (threshold_block >= log->l_logBBsize) {
1517                 threshold_block -= log->l_logBBsize;
1518                 threshold_cycle += 1;
1519         }
1520         threshold_lsn = xlog_assign_lsn(threshold_cycle,
1521                                         threshold_block);
1522         /*
1523          * Don't pass in an lsn greater than the lsn of the last
1524          * log record known to be on disk. Use a snapshot of the last sync lsn
1525          * so that it doesn't change between the compare and the set.
1526          */
1527         last_sync_lsn = atomic64_read(&log->l_last_sync_lsn);
1528         if (XFS_LSN_CMP(threshold_lsn, last_sync_lsn) > 0)
1529                 threshold_lsn = last_sync_lsn;
1530
1531         /*
1532          * Get the transaction layer to kick the dirty buffers out to
1533          * disk asynchronously. No point in trying to do this if
1534          * the filesystem is shutting down.
1535          */
1536         if (!XLOG_FORCED_SHUTDOWN(log))
1537                 xfs_ail_push(log->l_ailp, threshold_lsn);
1538 }
1539
1540 /*
1541  * Stamp cycle number in every block
1542  */
1543 STATIC void
1544 xlog_pack_data(
1545         struct xlog             *log,
1546         struct xlog_in_core     *iclog,
1547         int                     roundoff)
1548 {
1549         int                     i, j, k;
1550         int                     size = iclog->ic_offset + roundoff;
1551         __be32                  cycle_lsn;
1552         xfs_caddr_t             dp;
1553
1554         cycle_lsn = CYCLE_LSN_DISK(iclog->ic_header.h_lsn);
1555
1556         dp = iclog->ic_datap;
1557         for (i = 0; i < BTOBB(size); i++) {
1558                 if (i >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE))
1559                         break;
1560                 iclog->ic_header.h_cycle_data[i] = *(__be32 *)dp;
1561                 *(__be32 *)dp = cycle_lsn;
1562                 dp += BBSIZE;
1563         }
1564
1565         if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
1566                 xlog_in_core_2_t *xhdr = iclog->ic_data;
1567
1568                 for ( ; i < BTOBB(size); i++) {
1569                         j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
1570                         k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
1571                         xhdr[j].hic_xheader.xh_cycle_data[k] = *(__be32 *)dp;
1572                         *(__be32 *)dp = cycle_lsn;
1573                         dp += BBSIZE;
1574                 }
1575
1576                 for (i = 1; i < log->l_iclog_heads; i++)
1577                         xhdr[i].hic_xheader.xh_cycle = cycle_lsn;
1578         }
1579 }
1580
1581 /*
1582  * Calculate the checksum for a log buffer.
1583  *
1584  * This is a little more complicated than it should be because the various
1585  * headers and the actual data are non-contiguous.
1586  */
1587 __le32
1588 xlog_cksum(
1589         struct xlog             *log,
1590         struct xlog_rec_header  *rhead,
1591         char                    *dp,
1592         int                     size)
1593 {
1594         __uint32_t              crc;
1595
1596         /* first generate the crc for the record header ... */
1597         crc = xfs_start_cksum((char *)rhead,
1598                               sizeof(struct xlog_rec_header),
1599                               offsetof(struct xlog_rec_header, h_crc));
1600
1601         /* ... then for additional cycle data for v2 logs ... */
1602         if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
1603                 union xlog_in_core2 *xhdr = (union xlog_in_core2 *)rhead;
1604                 int             i;
1605
1606                 for (i = 1; i < log->l_iclog_heads; i++) {
1607                         crc = crc32c(crc, &xhdr[i].hic_xheader,
1608                                      sizeof(struct xlog_rec_ext_header));
1609                 }
1610         }
1611
1612         /* ... and finally for the payload */
1613         crc = crc32c(crc, dp, size);
1614
1615         return xfs_end_cksum(crc);
1616 }
1617
1618 /*
1619  * The bdstrat callback function for log bufs. This gives us a central
1620  * place to trap bufs in case we get hit by a log I/O error and need to
1621  * shutdown. Actually, in practice, even when we didn't get a log error,
1622  * we transition the iclogs to IOERROR state *after* flushing all existing
1623  * iclogs to disk. This is because we don't want anymore new transactions to be
1624  * started or completed afterwards.
1625  */
1626 STATIC int
1627 xlog_bdstrat(
1628         struct xfs_buf          *bp)
1629 {
1630         struct xlog_in_core     *iclog = bp->b_fspriv;
1631
1632         if (iclog->ic_state & XLOG_STATE_IOERROR) {
1633                 xfs_buf_ioerror(bp, EIO);
1634                 xfs_buf_stale(bp);
1635                 xfs_buf_ioend(bp, 0);
1636                 /*
1637                  * It would seem logical to return EIO here, but we rely on
1638                  * the log state machine to propagate I/O errors instead of
1639                  * doing it here.
1640                  */
1641                 return 0;
1642         }
1643
1644         xfs_buf_iorequest(bp);
1645         return 0;
1646 }
1647
1648 /*
1649  * Flush out the in-core log (iclog) to the on-disk log in an asynchronous 
1650  * fashion.  Previously, we should have moved the current iclog
1651  * ptr in the log to point to the next available iclog.  This allows further
1652  * write to continue while this code syncs out an iclog ready to go.
1653  * Before an in-core log can be written out, the data section must be scanned
1654  * to save away the 1st word of each BBSIZE block into the header.  We replace
1655  * it with the current cycle count.  Each BBSIZE block is tagged with the
1656  * cycle count because there in an implicit assumption that drives will
1657  * guarantee that entire 512 byte blocks get written at once.  In other words,
1658  * we can't have part of a 512 byte block written and part not written.  By
1659  * tagging each block, we will know which blocks are valid when recovering
1660  * after an unclean shutdown.
1661  *
1662  * This routine is single threaded on the iclog.  No other thread can be in
1663  * this routine with the same iclog.  Changing contents of iclog can there-
1664  * fore be done without grabbing the state machine lock.  Updating the global
1665  * log will require grabbing the lock though.
1666  *
1667  * The entire log manager uses a logical block numbering scheme.  Only
1668  * log_sync (and then only bwrite()) know about the fact that the log may
1669  * not start with block zero on a given device.  The log block start offset
1670  * is added immediately before calling bwrite().
1671  */
1672
1673 STATIC int
1674 xlog_sync(
1675         struct xlog             *log,
1676         struct xlog_in_core     *iclog)
1677 {
1678         xfs_buf_t       *bp;
1679         int             i;
1680         uint            count;          /* byte count of bwrite */
1681         uint            count_init;     /* initial count before roundup */
1682         int             roundoff;       /* roundoff to BB or stripe */
1683         int             split = 0;      /* split write into two regions */
1684         int             error;
1685         int             v2 = xfs_sb_version_haslogv2(&log->l_mp->m_sb);
1686         int             size;
1687
1688         XFS_STATS_INC(xs_log_writes);
1689         ASSERT(atomic_read(&iclog->ic_refcnt) == 0);
1690
1691         /* Add for LR header */
1692         count_init = log->l_iclog_hsize + iclog->ic_offset;
1693
1694         /* Round out the log write size */
1695         if (v2 && log->l_mp->m_sb.sb_logsunit > 1) {
1696                 /* we have a v2 stripe unit to use */
1697                 count = XLOG_LSUNITTOB(log, XLOG_BTOLSUNIT(log, count_init));
1698         } else {
1699                 count = BBTOB(BTOBB(count_init));
1700         }
1701         roundoff = count - count_init;
1702         ASSERT(roundoff >= 0);
1703         ASSERT((v2 && log->l_mp->m_sb.sb_logsunit > 1 && 
1704                 roundoff < log->l_mp->m_sb.sb_logsunit)
1705                 || 
1706                 (log->l_mp->m_sb.sb_logsunit <= 1 && 
1707                  roundoff < BBTOB(1)));
1708
1709         /* move grant heads by roundoff in sync */
1710         xlog_grant_add_space(log, &log->l_reserve_head.grant, roundoff);
1711         xlog_grant_add_space(log, &log->l_write_head.grant, roundoff);
1712
1713         /* put cycle number in every block */
1714         xlog_pack_data(log, iclog, roundoff); 
1715
1716         /* real byte length */
1717         size = iclog->ic_offset;
1718         if (v2)
1719                 size += roundoff;
1720         iclog->ic_header.h_len = cpu_to_be32(size);
1721
1722         bp = iclog->ic_bp;
1723         XFS_BUF_SET_ADDR(bp, BLOCK_LSN(be64_to_cpu(iclog->ic_header.h_lsn)));
1724
1725         XFS_STATS_ADD(xs_log_blocks, BTOBB(count));
1726
1727         /* Do we need to split this write into 2 parts? */
1728         if (XFS_BUF_ADDR(bp) + BTOBB(count) > log->l_logBBsize) {
1729                 char            *dptr;
1730
1731                 split = count - (BBTOB(log->l_logBBsize - XFS_BUF_ADDR(bp)));
1732                 count = BBTOB(log->l_logBBsize - XFS_BUF_ADDR(bp));
1733                 iclog->ic_bwritecnt = 2;
1734
1735                 /*
1736                  * Bump the cycle numbers at the start of each block in the
1737                  * part of the iclog that ends up in the buffer that gets
1738                  * written to the start of the log.
1739                  *
1740                  * Watch out for the header magic number case, though.
1741                  */
1742                 dptr = (char *)&iclog->ic_header + count;
1743                 for (i = 0; i < split; i += BBSIZE) {
1744                         __uint32_t cycle = be32_to_cpu(*(__be32 *)dptr);
1745                         if (++cycle == XLOG_HEADER_MAGIC_NUM)
1746                                 cycle++;
1747                         *(__be32 *)dptr = cpu_to_be32(cycle);
1748
1749                         dptr += BBSIZE;
1750                 }
1751         } else {
1752                 iclog->ic_bwritecnt = 1;
1753         }
1754
1755         /* calculcate the checksum */
1756         iclog->ic_header.h_crc = xlog_cksum(log, &iclog->ic_header,
1757                                             iclog->ic_datap, size);
1758
1759         bp->b_io_length = BTOBB(count);
1760         bp->b_fspriv = iclog;
1761         XFS_BUF_ZEROFLAGS(bp);
1762         XFS_BUF_ASYNC(bp);
1763         bp->b_flags |= XBF_SYNCIO;
1764
1765         if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) {
1766                 bp->b_flags |= XBF_FUA;
1767
1768                 /*
1769                  * Flush the data device before flushing the log to make
1770                  * sure all meta data written back from the AIL actually made
1771                  * it to disk before stamping the new log tail LSN into the
1772                  * log buffer.  For an external log we need to issue the
1773                  * flush explicitly, and unfortunately synchronously here;
1774                  * for an internal log we can simply use the block layer
1775                  * state machine for preflushes.
1776                  */
1777                 if (log->l_mp->m_logdev_targp != log->l_mp->m_ddev_targp)
1778                         xfs_blkdev_issue_flush(log->l_mp->m_ddev_targp);
1779                 else
1780                         bp->b_flags |= XBF_FLUSH;
1781         }
1782
1783         ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1);
1784         ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize);
1785
1786         xlog_verify_iclog(log, iclog, count, true);
1787
1788         /* account for log which doesn't start at block #0 */
1789         XFS_BUF_SET_ADDR(bp, XFS_BUF_ADDR(bp) + log->l_logBBstart);
1790         /*
1791          * Don't call xfs_bwrite here. We do log-syncs even when the filesystem
1792          * is shutting down.
1793          */
1794         XFS_BUF_WRITE(bp);
1795
1796         error = xlog_bdstrat(bp);
1797         if (error) {
1798                 xfs_buf_ioerror_alert(bp, "xlog_sync");
1799                 return error;
1800         }
1801         if (split) {
1802                 bp = iclog->ic_log->l_xbuf;
1803                 XFS_BUF_SET_ADDR(bp, 0);             /* logical 0 */
1804                 xfs_buf_associate_memory(bp,
1805                                 (char *)&iclog->ic_header + count, split);
1806                 bp->b_fspriv = iclog;
1807                 XFS_BUF_ZEROFLAGS(bp);
1808                 XFS_BUF_ASYNC(bp);
1809                 bp->b_flags |= XBF_SYNCIO;
1810                 if (log->l_mp->m_flags & XFS_MOUNT_BARRIER)
1811                         bp->b_flags |= XBF_FUA;
1812
1813                 ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1);
1814                 ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize);
1815
1816                 /* account for internal log which doesn't start at block #0 */
1817                 XFS_BUF_SET_ADDR(bp, XFS_BUF_ADDR(bp) + log->l_logBBstart);
1818                 XFS_BUF_WRITE(bp);
1819                 error = xlog_bdstrat(bp);
1820                 if (error) {
1821                         xfs_buf_ioerror_alert(bp, "xlog_sync (split)");
1822                         return error;
1823                 }
1824         }
1825         return 0;
1826 }       /* xlog_sync */
1827
1828 /*
1829  * Deallocate a log structure
1830  */
1831 STATIC void
1832 xlog_dealloc_log(
1833         struct xlog     *log)
1834 {
1835         xlog_in_core_t  *iclog, *next_iclog;
1836         int             i;
1837
1838         xlog_cil_destroy(log);
1839
1840         /*
1841          * always need to ensure that the extra buffer does not point to memory
1842          * owned by another log buffer before we free it.
1843          */
1844         xfs_buf_set_empty(log->l_xbuf, BTOBB(log->l_iclog_size));
1845         xfs_buf_free(log->l_xbuf);
1846
1847         iclog = log->l_iclog;
1848         for (i=0; i<log->l_iclog_bufs; i++) {
1849                 xfs_buf_free(iclog->ic_bp);
1850                 next_iclog = iclog->ic_next;
1851                 kmem_free(iclog);
1852                 iclog = next_iclog;
1853         }
1854         spinlock_destroy(&log->l_icloglock);
1855
1856         log->l_mp->m_log = NULL;
1857         kmem_free(log);
1858 }       /* xlog_dealloc_log */
1859
1860 /*
1861  * Update counters atomically now that memcpy is done.
1862  */
1863 /* ARGSUSED */
1864 static inline void
1865 xlog_state_finish_copy(
1866         struct xlog             *log,
1867         struct xlog_in_core     *iclog,
1868         int                     record_cnt,
1869         int                     copy_bytes)
1870 {
1871         spin_lock(&log->l_icloglock);
1872
1873         be32_add_cpu(&iclog->ic_header.h_num_logops, record_cnt);
1874         iclog->ic_offset += copy_bytes;
1875
1876         spin_unlock(&log->l_icloglock);
1877 }       /* xlog_state_finish_copy */
1878
1879
1880
1881
1882 /*
1883  * print out info relating to regions written which consume
1884  * the reservation
1885  */
1886 void
1887 xlog_print_tic_res(
1888         struct xfs_mount        *mp,
1889         struct xlog_ticket      *ticket)
1890 {
1891         uint i;
1892         uint ophdr_spc = ticket->t_res_num_ophdrs * (uint)sizeof(xlog_op_header_t);
1893
1894         /* match with XLOG_REG_TYPE_* in xfs_log.h */
1895         static char *res_type_str[XLOG_REG_TYPE_MAX] = {
1896             "bformat",
1897             "bchunk",
1898             "efi_format",
1899             "efd_format",
1900             "iformat",
1901             "icore",
1902             "iext",
1903             "ibroot",
1904             "ilocal",
1905             "iattr_ext",
1906             "iattr_broot",
1907             "iattr_local",
1908             "qformat",
1909             "dquot",
1910             "quotaoff",
1911             "LR header",
1912             "unmount",
1913             "commit",
1914             "trans header"
1915         };
1916         static char *trans_type_str[XFS_TRANS_TYPE_MAX] = {
1917             "SETATTR_NOT_SIZE",
1918             "SETATTR_SIZE",
1919             "INACTIVE",
1920             "CREATE",
1921             "CREATE_TRUNC",
1922             "TRUNCATE_FILE",
1923             "REMOVE",
1924             "LINK",
1925             "RENAME",
1926             "MKDIR",
1927             "RMDIR",
1928             "SYMLINK",
1929             "SET_DMATTRS",
1930             "GROWFS",
1931             "STRAT_WRITE",
1932             "DIOSTRAT",
1933             "WRITE_SYNC",
1934             "WRITEID",
1935             "ADDAFORK",
1936             "ATTRINVAL",
1937             "ATRUNCATE",
1938             "ATTR_SET",
1939             "ATTR_RM",
1940             "ATTR_FLAG",
1941             "CLEAR_AGI_BUCKET",
1942             "QM_SBCHANGE",
1943             "DUMMY1",
1944             "DUMMY2",
1945             "QM_QUOTAOFF",
1946             "QM_DQALLOC",
1947             "QM_SETQLIM",
1948             "QM_DQCLUSTER",
1949             "QM_QINOCREATE",
1950             "QM_QUOTAOFF_END",
1951             "SB_UNIT",
1952             "FSYNC_TS",
1953             "GROWFSRT_ALLOC",
1954             "GROWFSRT_ZERO",
1955             "GROWFSRT_FREE",
1956             "SWAPEXT"
1957         };
1958
1959         xfs_warn(mp,
1960                 "xlog_write: reservation summary:\n"
1961                 "  trans type  = %s (%u)\n"
1962                 "  unit res    = %d bytes\n"
1963                 "  current res = %d bytes\n"
1964                 "  total reg   = %u bytes (o/flow = %u bytes)\n"
1965                 "  ophdrs      = %u (ophdr space = %u bytes)\n"
1966                 "  ophdr + reg = %u bytes\n"
1967                 "  num regions = %u\n",
1968                 ((ticket->t_trans_type <= 0 ||
1969                   ticket->t_trans_type > XFS_TRANS_TYPE_MAX) ?
1970                   "bad-trans-type" : trans_type_str[ticket->t_trans_type-1]),
1971                 ticket->t_trans_type,
1972                 ticket->t_unit_res,
1973                 ticket->t_curr_res,
1974                 ticket->t_res_arr_sum, ticket->t_res_o_flow,
1975                 ticket->t_res_num_ophdrs, ophdr_spc,
1976                 ticket->t_res_arr_sum +
1977                 ticket->t_res_o_flow + ophdr_spc,
1978                 ticket->t_res_num);
1979
1980         for (i = 0; i < ticket->t_res_num; i++) {
1981                 uint r_type = ticket->t_res_arr[i].r_type;
1982                 xfs_warn(mp, "region[%u]: %s - %u bytes\n", i,
1983                             ((r_type <= 0 || r_type > XLOG_REG_TYPE_MAX) ?
1984                             "bad-rtype" : res_type_str[r_type-1]),
1985                             ticket->t_res_arr[i].r_len);
1986         }
1987
1988         xfs_alert_tag(mp, XFS_PTAG_LOGRES,
1989                 "xlog_write: reservation ran out. Need to up reservation");
1990         xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR);
1991 }
1992
1993 /*
1994  * Calculate the potential space needed by the log vector.  Each region gets
1995  * its own xlog_op_header_t and may need to be double word aligned.
1996  */
1997 static int
1998 xlog_write_calc_vec_length(
1999         struct xlog_ticket      *ticket,
2000         struct xfs_log_vec      *log_vector)
2001 {
2002         struct xfs_log_vec      *lv;
2003         int                     headers = 0;
2004         int                     len = 0;
2005         int                     i;
2006
2007         /* acct for start rec of xact */
2008         if (ticket->t_flags & XLOG_TIC_INITED)
2009                 headers++;
2010
2011         for (lv = log_vector; lv; lv = lv->lv_next) {
2012                 /* we don't write ordered log vectors */
2013                 if (lv->lv_buf_len == XFS_LOG_VEC_ORDERED)
2014                         continue;
2015
2016                 headers += lv->lv_niovecs;
2017
2018                 for (i = 0; i < lv->lv_niovecs; i++) {
2019                         struct xfs_log_iovec    *vecp = &lv->lv_iovecp[i];
2020
2021                         len += vecp->i_len;
2022                         xlog_tic_add_region(ticket, vecp->i_len, vecp->i_type);
2023                 }
2024         }
2025
2026         ticket->t_res_num_ophdrs += headers;
2027         len += headers * sizeof(struct xlog_op_header);
2028
2029         return len;
2030 }
2031
2032 /*
2033  * If first write for transaction, insert start record  We can't be trying to
2034  * commit if we are inited.  We can't have any "partial_copy" if we are inited.
2035  */
2036 static int
2037 xlog_write_start_rec(
2038         struct xlog_op_header   *ophdr,
2039         struct xlog_ticket      *ticket)
2040 {
2041         if (!(ticket->t_flags & XLOG_TIC_INITED))
2042                 return 0;
2043
2044         ophdr->oh_tid   = cpu_to_be32(ticket->t_tid);
2045         ophdr->oh_clientid = ticket->t_clientid;
2046         ophdr->oh_len = 0;
2047         ophdr->oh_flags = XLOG_START_TRANS;
2048         ophdr->oh_res2 = 0;
2049
2050         ticket->t_flags &= ~XLOG_TIC_INITED;
2051
2052         return sizeof(struct xlog_op_header);
2053 }
2054
2055 static xlog_op_header_t *
2056 xlog_write_setup_ophdr(
2057         struct xlog             *log,
2058         struct xlog_op_header   *ophdr,
2059         struct xlog_ticket      *ticket,
2060         uint                    flags)
2061 {
2062         ophdr->oh_tid = cpu_to_be32(ticket->t_tid);
2063         ophdr->oh_clientid = ticket->t_clientid;
2064         ophdr->oh_res2 = 0;
2065
2066         /* are we copying a commit or unmount record? */
2067         ophdr->oh_flags = flags;
2068
2069         /*
2070          * We've seen logs corrupted with bad transaction client ids.  This
2071          * makes sure that XFS doesn't generate them on.  Turn this into an EIO
2072          * and shut down the filesystem.
2073          */
2074         switch (ophdr->oh_clientid)  {
2075         case XFS_TRANSACTION:
2076         case XFS_VOLUME:
2077         case XFS_LOG:
2078                 break;
2079         default:
2080                 xfs_warn(log->l_mp,
2081                         "Bad XFS transaction clientid 0x%x in ticket 0x%p",
2082                         ophdr->oh_clientid, ticket);
2083                 return NULL;
2084         }
2085
2086         return ophdr;
2087 }
2088
2089 /*
2090  * Set up the parameters of the region copy into the log. This has
2091  * to handle region write split across multiple log buffers - this
2092  * state is kept external to this function so that this code can
2093  * be written in an obvious, self documenting manner.
2094  */
2095 static int
2096 xlog_write_setup_copy(
2097         struct xlog_ticket      *ticket,
2098         struct xlog_op_header   *ophdr,
2099         int                     space_available,
2100         int                     space_required,
2101         int                     *copy_off,
2102         int                     *copy_len,
2103         int                     *last_was_partial_copy,
2104         int                     *bytes_consumed)
2105 {
2106         int                     still_to_copy;
2107
2108         still_to_copy = space_required - *bytes_consumed;
2109         *copy_off = *bytes_consumed;
2110
2111         if (still_to_copy <= space_available) {
2112                 /* write of region completes here */
2113                 *copy_len = still_to_copy;
2114                 ophdr->oh_len = cpu_to_be32(*copy_len);
2115                 if (*last_was_partial_copy)
2116                         ophdr->oh_flags |= (XLOG_END_TRANS|XLOG_WAS_CONT_TRANS);
2117                 *last_was_partial_copy = 0;
2118                 *bytes_consumed = 0;
2119                 return 0;
2120         }
2121
2122         /* partial write of region, needs extra log op header reservation */
2123         *copy_len = space_available;
2124         ophdr->oh_len = cpu_to_be32(*copy_len);
2125         ophdr->oh_flags |= XLOG_CONTINUE_TRANS;
2126         if (*last_was_partial_copy)
2127                 ophdr->oh_flags |= XLOG_WAS_CONT_TRANS;
2128         *bytes_consumed += *copy_len;
2129         (*last_was_partial_copy)++;
2130
2131         /* account for new log op header */
2132         ticket->t_curr_res -= sizeof(struct xlog_op_header);
2133         ticket->t_res_num_ophdrs++;
2134
2135         return sizeof(struct xlog_op_header);
2136 }
2137
2138 static int
2139 xlog_write_copy_finish(
2140         struct xlog             *log,
2141         struct xlog_in_core     *iclog,
2142         uint                    flags,
2143         int                     *record_cnt,
2144         int                     *data_cnt,
2145         int                     *partial_copy,
2146         int                     *partial_copy_len,
2147         int                     log_offset,
2148         struct xlog_in_core     **commit_iclog)
2149 {
2150         if (*partial_copy) {
2151                 /*
2152                  * This iclog has already been marked WANT_SYNC by
2153                  * xlog_state_get_iclog_space.
2154                  */
2155                 xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt);
2156                 *record_cnt = 0;
2157                 *data_cnt = 0;
2158                 return xlog_state_release_iclog(log, iclog);
2159         }
2160
2161         *partial_copy = 0;
2162         *partial_copy_len = 0;
2163
2164         if (iclog->ic_size - log_offset <= sizeof(xlog_op_header_t)) {
2165                 /* no more space in this iclog - push it. */
2166                 xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt);
2167                 *record_cnt = 0;
2168                 *data_cnt = 0;
2169
2170                 spin_lock(&log->l_icloglock);
2171                 xlog_state_want_sync(log, iclog);
2172                 spin_unlock(&log->l_icloglock);
2173
2174                 if (!commit_iclog)
2175                         return xlog_state_release_iclog(log, iclog);
2176                 ASSERT(flags & XLOG_COMMIT_TRANS);
2177                 *commit_iclog = iclog;
2178         }
2179
2180         return 0;
2181 }
2182
2183 /*
2184  * Write some region out to in-core log
2185  *
2186  * This will be called when writing externally provided regions or when
2187  * writing out a commit record for a given transaction.
2188  *
2189  * General algorithm:
2190  *      1. Find total length of this write.  This may include adding to the
2191  *              lengths passed in.
2192  *      2. Check whether we violate the tickets reservation.
2193  *      3. While writing to this iclog
2194  *          A. Reserve as much space in this iclog as can get
2195  *          B. If this is first write, save away start lsn
2196  *          C. While writing this region:
2197  *              1. If first write of transaction, write start record
2198  *              2. Write log operation header (header per region)
2199  *              3. Find out if we can fit entire region into this iclog
2200  *              4. Potentially, verify destination memcpy ptr
2201  *              5. Memcpy (partial) region
2202  *              6. If partial copy, release iclog; otherwise, continue
2203  *                      copying more regions into current iclog
2204  *      4. Mark want sync bit (in simulation mode)
2205  *      5. Release iclog for potential flush to on-disk log.
2206  *
2207  * ERRORS:
2208  * 1.   Panic if reservation is overrun.  This should never happen since
2209  *      reservation amounts are generated internal to the filesystem.
2210  * NOTES:
2211  * 1. Tickets are single threaded data structures.
2212  * 2. The XLOG_END_TRANS & XLOG_CONTINUE_TRANS flags are passed down to the
2213  *      syncing routine.  When a single log_write region needs to span
2214  *      multiple in-core logs, the XLOG_CONTINUE_TRANS bit should be set
2215  *      on all log operation writes which don't contain the end of the
2216  *      region.  The XLOG_END_TRANS bit is used for the in-core log
2217  *      operation which contains the end of the continued log_write region.
2218  * 3. When xlog_state_get_iclog_space() grabs the rest of the current iclog,
2219  *      we don't really know exactly how much space will be used.  As a result,
2220  *      we don't update ic_offset until the end when we know exactly how many
2221  *      bytes have been written out.
2222  */
2223 int
2224 xlog_write(
2225         struct xlog             *log,
2226         struct xfs_log_vec      *log_vector,
2227         struct xlog_ticket      *ticket,
2228         xfs_lsn_t               *start_lsn,
2229         struct xlog_in_core     **commit_iclog,
2230         uint                    flags)
2231 {
2232         struct xlog_in_core     *iclog = NULL;
2233         struct xfs_log_iovec    *vecp;
2234         struct xfs_log_vec      *lv;
2235         int                     len;
2236         int                     index;
2237         int                     partial_copy = 0;
2238         int                     partial_copy_len = 0;
2239         int                     contwr = 0;
2240         int                     record_cnt = 0;
2241         int                     data_cnt = 0;
2242         int                     error;
2243
2244         *start_lsn = 0;
2245
2246         len = xlog_write_calc_vec_length(ticket, log_vector);
2247
2248         /*
2249          * Region headers and bytes are already accounted for.
2250          * We only need to take into account start records and
2251          * split regions in this function.
2252          */
2253         if (ticket->t_flags & XLOG_TIC_INITED)
2254                 ticket->t_curr_res -= sizeof(xlog_op_header_t);
2255
2256         /*
2257          * Commit record headers need to be accounted for. These
2258          * come in as separate writes so are easy to detect.
2259          */
2260         if (flags & (XLOG_COMMIT_TRANS | XLOG_UNMOUNT_TRANS))
2261                 ticket->t_curr_res -= sizeof(xlog_op_header_t);
2262
2263         if (ticket->t_curr_res < 0)
2264                 xlog_print_tic_res(log->l_mp, ticket);
2265
2266         index = 0;
2267         lv = log_vector;
2268         vecp = lv->lv_iovecp;
2269         while (lv && (!lv->lv_niovecs || index < lv->lv_niovecs)) {
2270                 void            *ptr;
2271                 int             log_offset;
2272
2273                 error = xlog_state_get_iclog_space(log, len, &iclog, ticket,
2274                                                    &contwr, &log_offset);
2275                 if (error)
2276                         return error;
2277
2278                 ASSERT(log_offset <= iclog->ic_size - 1);
2279                 ptr = iclog->ic_datap + log_offset;
2280
2281                 /* start_lsn is the first lsn written to. That's all we need. */
2282                 if (!*start_lsn)
2283                         *start_lsn = be64_to_cpu(iclog->ic_header.h_lsn);
2284
2285                 /*
2286                  * This loop writes out as many regions as can fit in the amount
2287                  * of space which was allocated by xlog_state_get_iclog_space().
2288                  */
2289                 while (lv && (!lv->lv_niovecs || index < lv->lv_niovecs)) {
2290                         struct xfs_log_iovec    *reg;
2291                         struct xlog_op_header   *ophdr;
2292                         int                     start_rec_copy;
2293                         int                     copy_len;
2294                         int                     copy_off;
2295                         bool                    ordered = false;
2296
2297                         /* ordered log vectors have no regions to write */
2298                         if (lv->lv_buf_len == XFS_LOG_VEC_ORDERED) {
2299                                 ASSERT(lv->lv_niovecs == 0);
2300                                 ordered = true;
2301                                 goto next_lv;
2302                         }
2303
2304                         reg = &vecp[index];
2305                         ASSERT(reg->i_len % sizeof(__int32_t) == 0);
2306                         ASSERT((unsigned long)ptr % sizeof(__int32_t) == 0);
2307
2308                         start_rec_copy = xlog_write_start_rec(ptr, ticket);
2309                         if (start_rec_copy) {
2310                                 record_cnt++;
2311                                 xlog_write_adv_cnt(&ptr, &len, &log_offset,
2312                                                    start_rec_copy);
2313                         }
2314
2315                         ophdr = xlog_write_setup_ophdr(log, ptr, ticket, flags);
2316                         if (!ophdr)
2317                                 return XFS_ERROR(EIO);
2318
2319                         xlog_write_adv_cnt(&ptr, &len, &log_offset,
2320                                            sizeof(struct xlog_op_header));
2321
2322                         len += xlog_write_setup_copy(ticket, ophdr,
2323                                                      iclog->ic_size-log_offset,
2324                                                      reg->i_len,
2325                                                      &copy_off, &copy_len,
2326                                                      &partial_copy,
2327                                                      &partial_copy_len);
2328                         xlog_verify_dest_ptr(log, ptr);
2329
2330                         /* copy region */
2331                         ASSERT(copy_len >= 0);
2332                         memcpy(ptr, reg->i_addr + copy_off, copy_len);
2333                         xlog_write_adv_cnt(&ptr, &len, &log_offset, copy_len);
2334
2335                         copy_len += start_rec_copy + sizeof(xlog_op_header_t);
2336                         record_cnt++;
2337                         data_cnt += contwr ? copy_len : 0;
2338
2339                         error = xlog_write_copy_finish(log, iclog, flags,
2340                                                        &record_cnt, &data_cnt,
2341                                                        &partial_copy,
2342                                                        &partial_copy_len,
2343                                                        log_offset,
2344                                                        commit_iclog);
2345                         if (error)
2346                                 return error;
2347
2348                         /*
2349                          * if we had a partial copy, we need to get more iclog
2350                          * space but we don't want to increment the region
2351                          * index because there is still more is this region to
2352                          * write.
2353                          *
2354                          * If we completed writing this region, and we flushed
2355                          * the iclog (indicated by resetting of the record
2356                          * count), then we also need to get more log space. If
2357                          * this was the last record, though, we are done and
2358                          * can just return.
2359                          */
2360                         if (partial_copy)
2361                                 break;
2362
2363                         if (++index == lv->lv_niovecs) {
2364 next_lv:
2365                                 lv = lv->lv_next;
2366                                 index = 0;
2367                                 if (lv)
2368                                         vecp = lv->lv_iovecp;
2369                         }
2370                         if (record_cnt == 0 && ordered == false) {
2371                                 if (!lv)
2372                                         return 0;
2373                                 break;
2374                         }
2375                 }
2376         }
2377
2378         ASSERT(len == 0);
2379
2380         xlog_state_finish_copy(log, iclog, record_cnt, data_cnt);
2381         if (!commit_iclog)
2382                 return xlog_state_release_iclog(log, iclog);
2383
2384         ASSERT(flags & XLOG_COMMIT_TRANS);
2385         *commit_iclog = iclog;
2386         return 0;
2387 }
2388
2389
2390 /*****************************************************************************
2391  *
2392  *              State Machine functions
2393  *
2394  *****************************************************************************
2395  */
2396
2397 /* Clean iclogs starting from the head.  This ordering must be
2398  * maintained, so an iclog doesn't become ACTIVE beyond one that
2399  * is SYNCING.  This is also required to maintain the notion that we use
2400  * a ordered wait queue to hold off would be writers to the log when every
2401  * iclog is trying to sync to disk.
2402  *
2403  * State Change: DIRTY -> ACTIVE
2404  */
2405 STATIC void
2406 xlog_state_clean_log(
2407         struct xlog *log)
2408 {
2409         xlog_in_core_t  *iclog;
2410         int changed = 0;
2411
2412         iclog = log->l_iclog;
2413         do {
2414                 if (iclog->ic_state == XLOG_STATE_DIRTY) {
2415                         iclog->ic_state = XLOG_STATE_ACTIVE;
2416                         iclog->ic_offset       = 0;
2417                         ASSERT(iclog->ic_callback == NULL);
2418                         /*
2419                          * If the number of ops in this iclog indicate it just
2420                          * contains the dummy transaction, we can
2421                          * change state into IDLE (the second time around).
2422                          * Otherwise we should change the state into
2423                          * NEED a dummy.
2424                          * We don't need to cover the dummy.
2425                          */
2426                         if (!changed &&
2427                            (be32_to_cpu(iclog->ic_header.h_num_logops) ==
2428                                         XLOG_COVER_OPS)) {
2429                                 changed = 1;
2430                         } else {
2431                                 /*
2432                                  * We have two dirty iclogs so start over
2433                                  * This could also be num of ops indicates
2434                                  * this is not the dummy going out.
2435                                  */
2436                                 changed = 2;
2437                         }
2438                         iclog->ic_header.h_num_logops = 0;
2439                         memset(iclog->ic_header.h_cycle_data, 0,
2440                               sizeof(iclog->ic_header.h_cycle_data));
2441                         iclog->ic_header.h_lsn = 0;
2442                 } else if (iclog->ic_state == XLOG_STATE_ACTIVE)
2443                         /* do nothing */;
2444                 else
2445                         break;  /* stop cleaning */
2446                 iclog = iclog->ic_next;
2447         } while (iclog != log->l_iclog);
2448
2449         /* log is locked when we are called */
2450         /*
2451          * Change state for the dummy log recording.
2452          * We usually go to NEED. But we go to NEED2 if the changed indicates
2453          * we are done writing the dummy record.
2454          * If we are done with the second dummy recored (DONE2), then
2455          * we go to IDLE.
2456          */
2457         if (changed) {
2458                 switch (log->l_covered_state) {
2459                 case XLOG_STATE_COVER_IDLE:
2460                 case XLOG_STATE_COVER_NEED:
2461                 case XLOG_STATE_COVER_NEED2:
2462                         log->l_covered_state = XLOG_STATE_COVER_NEED;
2463                         break;
2464
2465                 case XLOG_STATE_COVER_DONE:
2466                         if (changed == 1)
2467                                 log->l_covered_state = XLOG_STATE_COVER_NEED2;
2468                         else
2469                                 log->l_covered_state = XLOG_STATE_COVER_NEED;
2470                         break;
2471
2472                 case XLOG_STATE_COVER_DONE2:
2473                         if (changed == 1)
2474                                 log->l_covered_state = XLOG_STATE_COVER_IDLE;
2475                         else
2476                                 log->l_covered_state = XLOG_STATE_COVER_NEED;
2477                         break;
2478
2479                 default:
2480                         ASSERT(0);
2481                 }
2482         }
2483 }       /* xlog_state_clean_log */
2484
2485 STATIC xfs_lsn_t
2486 xlog_get_lowest_lsn(
2487         struct xlog     *log)
2488 {
2489         xlog_in_core_t  *lsn_log;
2490         xfs_lsn_t       lowest_lsn, lsn;
2491
2492         lsn_log = log->l_iclog;
2493         lowest_lsn = 0;
2494         do {
2495             if (!(lsn_log->ic_state & (XLOG_STATE_ACTIVE|XLOG_STATE_DIRTY))) {
2496                 lsn = be64_to_cpu(lsn_log->ic_header.h_lsn);
2497                 if ((lsn && !lowest_lsn) ||
2498                     (XFS_LSN_CMP(lsn, lowest_lsn) < 0)) {
2499                         lowest_lsn = lsn;
2500                 }
2501             }
2502             lsn_log = lsn_log->ic_next;
2503         } while (lsn_log != log->l_iclog);
2504         return lowest_lsn;
2505 }
2506
2507
2508 STATIC void
2509 xlog_state_do_callback(
2510         struct xlog             *log,
2511         int                     aborted,
2512         struct xlog_in_core     *ciclog)
2513 {
2514         xlog_in_core_t     *iclog;
2515         xlog_in_core_t     *first_iclog;        /* used to know when we've
2516                                                  * processed all iclogs once */
2517         xfs_log_callback_t *cb, *cb_next;
2518         int                flushcnt = 0;
2519         xfs_lsn_t          lowest_lsn;
2520         int                ioerrors;    /* counter: iclogs with errors */
2521         int                loopdidcallbacks; /* flag: inner loop did callbacks*/
2522         int                funcdidcallbacks; /* flag: function did callbacks */
2523         int                repeats;     /* for issuing console warnings if
2524                                          * looping too many times */
2525         int                wake = 0;
2526
2527         spin_lock(&log->l_icloglock);
2528         first_iclog = iclog = log->l_iclog;
2529         ioerrors = 0;
2530         funcdidcallbacks = 0;
2531         repeats = 0;
2532
2533         do {
2534                 /*
2535                  * Scan all iclogs starting with the one pointed to by the
2536                  * log.  Reset this starting point each time the log is
2537                  * unlocked (during callbacks).
2538                  *
2539                  * Keep looping through iclogs until one full pass is made
2540                  * without running any callbacks.
2541                  */
2542                 first_iclog = log->l_iclog;
2543                 iclog = log->l_iclog;
2544                 loopdidcallbacks = 0;
2545                 repeats++;
2546
2547                 do {
2548
2549                         /* skip all iclogs in the ACTIVE & DIRTY states */
2550                         if (iclog->ic_state &
2551                             (XLOG_STATE_ACTIVE|XLOG_STATE_DIRTY)) {
2552                                 iclog = iclog->ic_next;
2553                                 continue;
2554                         }
2555
2556                         /*
2557                          * Between marking a filesystem SHUTDOWN and stopping
2558                          * the log, we do flush all iclogs to disk (if there
2559                          * wasn't a log I/O error). So, we do want things to
2560                          * go smoothly in case of just a SHUTDOWN  w/o a
2561                          * LOG_IO_ERROR.
2562                          */
2563                         if (!(iclog->ic_state & XLOG_STATE_IOERROR)) {
2564                                 /*
2565                                  * Can only perform callbacks in order.  Since
2566                                  * this iclog is not in the DONE_SYNC/
2567                                  * DO_CALLBACK state, we skip the rest and
2568                                  * just try to clean up.  If we set our iclog
2569                                  * to DO_CALLBACK, we will not process it when
2570                                  * we retry since a previous iclog is in the
2571                                  * CALLBACK and the state cannot change since
2572                                  * we are holding the l_icloglock.
2573                                  */
2574                                 if (!(iclog->ic_state &
2575                                         (XLOG_STATE_DONE_SYNC |
2576                                                  XLOG_STATE_DO_CALLBACK))) {
2577                                         if (ciclog && (ciclog->ic_state ==
2578                                                         XLOG_STATE_DONE_SYNC)) {
2579                                                 ciclog->ic_state = XLOG_STATE_DO_CALLBACK;
2580                                         }
2581                                         break;
2582                                 }
2583                                 /*
2584                                  * We now have an iclog that is in either the
2585                                  * DO_CALLBACK or DONE_SYNC states. The other
2586                                  * states (WANT_SYNC, SYNCING, or CALLBACK were
2587                                  * caught by the above if and are going to
2588                                  * clean (i.e. we aren't doing their callbacks)
2589                                  * see the above if.
2590                                  */
2591
2592                                 /*
2593                                  * We will do one more check here to see if we
2594                                  * have chased our tail around.
2595                                  */
2596
2597                                 lowest_lsn = xlog_get_lowest_lsn(log);
2598                                 if (lowest_lsn &&
2599                                     XFS_LSN_CMP(lowest_lsn,
2600                                                 be64_to_cpu(iclog->ic_header.h_lsn)) < 0) {
2601                                         iclog = iclog->ic_next;
2602                                         continue; /* Leave this iclog for
2603                                                    * another thread */
2604                                 }
2605
2606                                 iclog->ic_state = XLOG_STATE_CALLBACK;
2607
2608
2609                                 /*
2610                                  * Completion of a iclog IO does not imply that
2611                                  * a transaction has completed, as transactions
2612                                  * can be large enough to span many iclogs. We
2613                                  * cannot change the tail of the log half way
2614                                  * through a transaction as this may be the only
2615                                  * transaction in the log and moving th etail to
2616                                  * point to the middle of it will prevent
2617                                  * recovery from finding the start of the
2618                                  * transaction. Hence we should only update the
2619                                  * last_sync_lsn if this iclog contains
2620                                  * transaction completion callbacks on it.
2621                                  *
2622                                  * We have to do this before we drop the
2623                                  * icloglock to ensure we are the only one that
2624                                  * can update it.
2625                                  */
2626                                 ASSERT(XFS_LSN_CMP(atomic64_read(&log->l_last_sync_lsn),
2627                                         be64_to_cpu(iclog->ic_header.h_lsn)) <= 0);
2628                                 if (iclog->ic_callback)
2629                                         atomic64_set(&log->l_last_sync_lsn,
2630                                                 be64_to_cpu(iclog->ic_header.h_lsn));
2631
2632                         } else
2633                                 ioerrors++;
2634
2635                         spin_unlock(&log->l_icloglock);
2636
2637                         /*
2638                          * Keep processing entries in the callback list until
2639                          * we come around and it is empty.  We need to
2640                          * atomically see that the list is empty and change the
2641                          * state to DIRTY so that we don't miss any more
2642                          * callbacks being added.
2643                          */
2644                         spin_lock(&iclog->ic_callback_lock);
2645                         cb = iclog->ic_callback;
2646                         while (cb) {
2647                                 iclog->ic_callback_tail = &(iclog->ic_callback);
2648                                 iclog->ic_callback = NULL;
2649                                 spin_unlock(&iclog->ic_callback_lock);
2650
2651                                 /* perform callbacks in the order given */
2652                                 for (; cb; cb = cb_next) {
2653                                         cb_next = cb->cb_next;
2654                                         cb->cb_func(cb->cb_arg, aborted);
2655                                 }
2656                                 spin_lock(&iclog->ic_callback_lock);
2657                                 cb = iclog->ic_callback;
2658                         }
2659
2660                         loopdidcallbacks++;
2661                         funcdidcallbacks++;
2662
2663                         spin_lock(&log->l_icloglock);
2664                         ASSERT(iclog->ic_callback == NULL);
2665                         spin_unlock(&iclog->ic_callback_lock);
2666                         if (!(iclog->ic_state & XLOG_STATE_IOERROR))
2667                                 iclog->ic_state = XLOG_STATE_DIRTY;
2668
2669                         /*
2670                          * Transition from DIRTY to ACTIVE if applicable.
2671                          * NOP if STATE_IOERROR.
2672                          */
2673                         xlog_state_clean_log(log);
2674
2675                         /* wake up threads waiting in xfs_log_force() */
2676                         wake_up_all(&iclog->ic_force_wait);
2677
2678                         iclog = iclog->ic_next;
2679                 } while (first_iclog != iclog);
2680
2681                 if (repeats > 5000) {
2682                         flushcnt += repeats;
2683                         repeats = 0;
2684                         xfs_warn(log->l_mp,
2685                                 "%s: possible infinite loop (%d iterations)",
2686                                 __func__, flushcnt);
2687                 }
2688         } while (!ioerrors && loopdidcallbacks);
2689
2690         /*
2691          * make one last gasp attempt to see if iclogs are being left in
2692          * limbo..
2693          */
2694 #ifdef DEBUG
2695         if (funcdidcallbacks) {
2696                 first_iclog = iclog = log->l_iclog;
2697                 do {
2698                         ASSERT(iclog->ic_state != XLOG_STATE_DO_CALLBACK);
2699                         /*
2700                          * Terminate the loop if iclogs are found in states
2701                          * which will cause other threads to clean up iclogs.
2702                          *
2703                          * SYNCING - i/o completion will go through logs
2704                          * DONE_SYNC - interrupt thread should be waiting for
2705                          *              l_icloglock
2706                          * IOERROR - give up hope all ye who enter here
2707                          */
2708                         if (iclog->ic_state == XLOG_STATE_WANT_SYNC ||
2709                             iclog->ic_state == XLOG_STATE_SYNCING ||
2710                             iclog->ic_state == XLOG_STATE_DONE_SYNC ||
2711                             iclog->ic_state == XLOG_STATE_IOERROR )
2712                                 break;
2713                         iclog = iclog->ic_next;
2714                 } while (first_iclog != iclog);
2715         }
2716 #endif
2717
2718         if (log->l_iclog->ic_state & (XLOG_STATE_ACTIVE|XLOG_STATE_IOERROR))
2719                 wake = 1;
2720         spin_unlock(&log->l_icloglock);
2721
2722         if (wake)
2723                 wake_up_all(&log->l_flush_wait);
2724 }
2725
2726
2727 /*
2728  * Finish transitioning this iclog to the dirty state.
2729  *
2730  * Make sure that we completely execute this routine only when this is
2731  * the last call to the iclog.  There is a good chance that iclog flushes,
2732  * when we reach the end of the physical log, get turned into 2 separate
2733  * calls to bwrite.  Hence, one iclog flush could generate two calls to this
2734  * routine.  By using the reference count bwritecnt, we guarantee that only
2735  * the second completion goes through.
2736  *
2737  * Callbacks could take time, so they are done outside the scope of the
2738  * global state machine log lock.
2739  */
2740 STATIC void
2741 xlog_state_done_syncing(
2742         xlog_in_core_t  *iclog,
2743         int             aborted)
2744 {
2745         struct xlog        *log = iclog->ic_log;
2746
2747         spin_lock(&log->l_icloglock);
2748
2749         ASSERT(iclog->ic_state == XLOG_STATE_SYNCING ||
2750                iclog->ic_state == XLOG_STATE_IOERROR);
2751         ASSERT(atomic_read(&iclog->ic_refcnt) == 0);
2752         ASSERT(iclog->ic_bwritecnt == 1 || iclog->ic_bwritecnt == 2);
2753
2754
2755         /*
2756          * If we got an error, either on the first buffer, or in the case of
2757          * split log writes, on the second, we mark ALL iclogs STATE_IOERROR,
2758          * and none should ever be attempted to be written to disk
2759          * again.
2760          */
2761         if (iclog->ic_state != XLOG_STATE_IOERROR) {
2762                 if (--iclog->ic_bwritecnt == 1) {
2763                         spin_unlock(&log->l_icloglock);
2764                         return;
2765                 }
2766                 iclog->ic_state = XLOG_STATE_DONE_SYNC;
2767         }
2768
2769         /*
2770          * Someone could be sleeping prior to writing out the next
2771          * iclog buffer, we wake them all, one will get to do the
2772          * I/O, the others get to wait for the result.
2773          */
2774         wake_up_all(&iclog->ic_write_wait);
2775         spin_unlock(&log->l_icloglock);
2776         xlog_state_do_callback(log, aborted, iclog);    /* also cleans log */
2777 }       /* xlog_state_done_syncing */
2778
2779
2780 /*
2781  * If the head of the in-core log ring is not (ACTIVE or DIRTY), then we must
2782  * sleep.  We wait on the flush queue on the head iclog as that should be
2783  * the first iclog to complete flushing. Hence if all iclogs are syncing,
2784  * we will wait here and all new writes will sleep until a sync completes.
2785  *
2786  * The in-core logs are used in a circular fashion. They are not used
2787  * out-of-order even when an iclog past the head is free.
2788  *
2789  * return:
2790  *      * log_offset where xlog_write() can start writing into the in-core
2791  *              log's data space.
2792  *      * in-core log pointer to which xlog_write() should write.
2793  *      * boolean indicating this is a continued write to an in-core log.
2794  *              If this is the last write, then the in-core log's offset field
2795  *              needs to be incremented, depending on the amount of data which
2796  *              is copied.
2797  */
2798 STATIC int
2799 xlog_state_get_iclog_space(
2800         struct xlog             *log,
2801         int                     len,
2802         struct xlog_in_core     **iclogp,
2803         struct xlog_ticket      *ticket,
2804         int                     *continued_write,
2805         int                     *logoffsetp)
2806 {
2807         int               log_offset;
2808         xlog_rec_header_t *head;
2809         xlog_in_core_t    *iclog;
2810         int               error;
2811
2812 restart:
2813         spin_lock(&log->l_icloglock);
2814         if (XLOG_FORCED_SHUTDOWN(log)) {
2815                 spin_unlock(&log->l_icloglock);
2816                 return XFS_ERROR(EIO);
2817         }
2818
2819         iclog = log->l_iclog;
2820         if (iclog->ic_state != XLOG_STATE_ACTIVE) {
2821                 XFS_STATS_INC(xs_log_noiclogs);
2822
2823                 /* Wait for log writes to have flushed */
2824                 xlog_wait(&log->l_flush_wait, &log->l_icloglock);
2825                 goto restart;
2826         }
2827
2828         head = &iclog->ic_header;
2829
2830         atomic_inc(&iclog->ic_refcnt);  /* prevents sync */
2831         log_offset = iclog->ic_offset;
2832
2833         /* On the 1st write to an iclog, figure out lsn.  This works
2834          * if iclogs marked XLOG_STATE_WANT_SYNC always write out what they are
2835          * committing to.  If the offset is set, that's how many blocks
2836          * must be written.
2837          */
2838         if (log_offset == 0) {
2839                 ticket->t_curr_res -= log->l_iclog_hsize;
2840                 xlog_tic_add_region(ticket,
2841                                     log->l_iclog_hsize,
2842                                     XLOG_REG_TYPE_LRHEADER);
2843                 head->h_cycle = cpu_to_be32(log->l_curr_cycle);
2844                 head->h_lsn = cpu_to_be64(
2845                         xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block));
2846                 ASSERT(log->l_curr_block >= 0);
2847         }
2848
2849         /* If there is enough room to write everything, then do it.  Otherwise,
2850          * claim the rest of the region and make sure the XLOG_STATE_WANT_SYNC
2851          * bit is on, so this will get flushed out.  Don't update ic_offset
2852          * until you know exactly how many bytes get copied.  Therefore, wait
2853          * until later to update ic_offset.
2854          *
2855          * xlog_write() algorithm assumes that at least 2 xlog_op_header_t's
2856          * can fit into remaining data section.
2857          */
2858         if (iclog->ic_size - iclog->ic_offset < 2*sizeof(xlog_op_header_t)) {
2859                 xlog_state_switch_iclogs(log, iclog, iclog->ic_size);
2860
2861                 /*
2862                  * If I'm the only one writing to this iclog, sync it to disk.
2863                  * We need to do an atomic compare and decrement here to avoid
2864                  * racing with concurrent atomic_dec_and_lock() calls in
2865                  * xlog_state_release_iclog() when there is more than one
2866                  * reference to the iclog.
2867                  */
2868                 if (!atomic_add_unless(&iclog->ic_refcnt, -1, 1)) {
2869                         /* we are the only one */
2870                         spin_unlock(&log->l_icloglock);
2871                         error = xlog_state_release_iclog(log, iclog);
2872                         if (error)
2873                                 return error;
2874                 } else {
2875                         spin_unlock(&log->l_icloglock);
2876                 }
2877                 goto restart;
2878         }
2879
2880         /* Do we have enough room to write the full amount in the remainder
2881          * of this iclog?  Or must we continue a write on the next iclog and
2882          * mark this iclog as completely taken?  In the case where we switch
2883          * iclogs (to mark it taken), this particular iclog will release/sync
2884          * to disk in xlog_write().
2885          */
2886         if (len <= iclog->ic_size - iclog->ic_offset) {
2887                 *continued_write = 0;
2888                 iclog->ic_offset += len;
2889         } else {
2890                 *continued_write = 1;
2891                 xlog_state_switch_iclogs(log, iclog, iclog->ic_size);
2892         }
2893         *iclogp = iclog;
2894
2895         ASSERT(iclog->ic_offset <= iclog->ic_size);
2896         spin_unlock(&log->l_icloglock);
2897
2898         *logoffsetp = log_offset;
2899         return 0;
2900 }       /* xlog_state_get_iclog_space */
2901
2902 /* The first cnt-1 times through here we don't need to
2903  * move the grant write head because the permanent
2904  * reservation has reserved cnt times the unit amount.
2905  * Release part of current permanent unit reservation and
2906  * reset current reservation to be one units worth.  Also
2907  * move grant reservation head forward.
2908  */
2909 STATIC void
2910 xlog_regrant_reserve_log_space(
2911         struct xlog             *log,
2912         struct xlog_ticket      *ticket)
2913 {
2914         trace_xfs_log_regrant_reserve_enter(log, ticket);
2915
2916         if (ticket->t_cnt > 0)
2917                 ticket->t_cnt--;
2918
2919         xlog_grant_sub_space(log, &log->l_reserve_head.grant,
2920                                         ticket->t_curr_res);
2921         xlog_grant_sub_space(log, &log->l_write_head.grant,
2922                                         ticket->t_curr_res);
2923         ticket->t_curr_res = ticket->t_unit_res;
2924         xlog_tic_reset_res(ticket);
2925
2926         trace_xfs_log_regrant_reserve_sub(log, ticket);
2927
2928         /* just return if we still have some of the pre-reserved space */
2929         if (ticket->t_cnt > 0)
2930                 return;
2931
2932         xlog_grant_add_space(log, &log->l_reserve_head.grant,
2933                                         ticket->t_unit_res);
2934
2935         trace_xfs_log_regrant_reserve_exit(log, ticket);
2936
2937         ticket->t_curr_res = ticket->t_unit_res;
2938         xlog_tic_reset_res(ticket);
2939 }       /* xlog_regrant_reserve_log_space */
2940
2941
2942 /*
2943  * Give back the space left from a reservation.
2944  *
2945  * All the information we need to make a correct determination of space left
2946  * is present.  For non-permanent reservations, things are quite easy.  The
2947  * count should have been decremented to zero.  We only need to deal with the
2948  * space remaining in the current reservation part of the ticket.  If the
2949  * ticket contains a permanent reservation, there may be left over space which
2950  * needs to be released.  A count of N means that N-1 refills of the current
2951  * reservation can be done before we need to ask for more space.  The first
2952  * one goes to fill up the first current reservation.  Once we run out of
2953  * space, the count will stay at zero and the only space remaining will be
2954  * in the current reservation field.
2955  */
2956 STATIC void
2957 xlog_ungrant_log_space(
2958         struct xlog             *log,
2959         struct xlog_ticket      *ticket)
2960 {
2961         int     bytes;
2962
2963         if (ticket->t_cnt > 0)
2964                 ticket->t_cnt--;
2965
2966         trace_xfs_log_ungrant_enter(log, ticket);
2967         trace_xfs_log_ungrant_sub(log, ticket);
2968
2969         /*
2970          * If this is a permanent reservation ticket, we may be able to free
2971          * up more space based on the remaining count.
2972          */
2973         bytes = ticket->t_curr_res;
2974         if (ticket->t_cnt > 0) {
2975                 ASSERT(ticket->t_flags & XLOG_TIC_PERM_RESERV);
2976                 bytes += ticket->t_unit_res*ticket->t_cnt;
2977         }
2978
2979         xlog_grant_sub_space(log, &log->l_reserve_head.grant, bytes);
2980         xlog_grant_sub_space(log, &log->l_write_head.grant, bytes);
2981
2982         trace_xfs_log_ungrant_exit(log, ticket);
2983
2984         xfs_log_space_wake(log->l_mp);
2985 }
2986
2987 /*
2988  * Flush iclog to disk if this is the last reference to the given iclog and
2989  * the WANT_SYNC bit is set.
2990  *
2991  * When this function is entered, the iclog is not necessarily in the
2992  * WANT_SYNC state.  It may be sitting around waiting to get filled.
2993  *
2994  *
2995  */
2996 STATIC int
2997 xlog_state_release_iclog(
2998         struct xlog             *log,
2999         struct xlog_in_core     *iclog)
3000 {
3001         int             sync = 0;       /* do we sync? */
3002
3003         if (iclog->ic_state & XLOG_STATE_IOERROR)
3004                 return XFS_ERROR(EIO);
3005
3006         ASSERT(atomic_read(&iclog->ic_refcnt) > 0);
3007         if (!atomic_dec_and_lock(&iclog->ic_refcnt, &log->l_icloglock))
3008                 return 0;
3009
3010         if (iclog->ic_state & XLOG_STATE_IOERROR) {
3011                 spin_unlock(&log->l_icloglock);
3012                 return XFS_ERROR(EIO);
3013         }
3014         ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE ||
3015                iclog->ic_state == XLOG_STATE_WANT_SYNC);
3016
3017         if (iclog->ic_state == XLOG_STATE_WANT_SYNC) {
3018                 /* update tail before writing to iclog */
3019                 xfs_lsn_t tail_lsn = xlog_assign_tail_lsn(log->l_mp);
3020                 sync++;
3021                 iclog->ic_state = XLOG_STATE_SYNCING;
3022                 iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn);
3023                 xlog_verify_tail_lsn(log, iclog, tail_lsn);
3024                 /* cycle incremented when incrementing curr_block */
3025         }
3026         spin_unlock(&log->l_icloglock);
3027
3028         /*
3029          * We let the log lock go, so it's possible that we hit a log I/O
3030          * error or some other SHUTDOWN condition that marks the iclog
3031          * as XLOG_STATE_IOERROR before the bwrite. However, we know that
3032          * this iclog has consistent data, so we ignore IOERROR
3033          * flags after this point.
3034          */
3035         if (sync)
3036                 return xlog_sync(log, iclog);
3037         return 0;
3038 }       /* xlog_state_release_iclog */
3039
3040
3041 /*
3042  * This routine will mark the current iclog in the ring as WANT_SYNC
3043  * and move the current iclog pointer to the next iclog in the ring.
3044  * When this routine is called from xlog_state_get_iclog_space(), the
3045  * exact size of the iclog has not yet been determined.  All we know is
3046  * that every data block.  We have run out of space in this log record.
3047  */
3048 STATIC void
3049 xlog_state_switch_iclogs(
3050         struct xlog             *log,
3051         struct xlog_in_core     *iclog,
3052         int                     eventual_size)
3053 {
3054         ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
3055         if (!eventual_size)
3056                 eventual_size = iclog->ic_offset;
3057         iclog->ic_state = XLOG_STATE_WANT_SYNC;
3058         iclog->ic_header.h_prev_block = cpu_to_be32(log->l_prev_block);
3059         log->l_prev_block = log->l_curr_block;
3060         log->l_prev_cycle = log->l_curr_cycle;
3061
3062         /* roll log?: ic_offset changed later */
3063         log->l_curr_block += BTOBB(eventual_size)+BTOBB(log->l_iclog_hsize);
3064
3065         /* Round up to next log-sunit */
3066         if (xfs_sb_version_haslogv2(&log->l_mp->m_sb) &&
3067             log->l_mp->m_sb.sb_logsunit > 1) {
3068                 __uint32_t sunit_bb = BTOBB(log->l_mp->m_sb.sb_logsunit);
3069                 log->l_curr_block = roundup(log->l_curr_block, sunit_bb);
3070         }
3071
3072         if (log->l_curr_block >= log->l_logBBsize) {
3073                 log->l_curr_cycle++;
3074                 if (log->l_curr_cycle == XLOG_HEADER_MAGIC_NUM)
3075                         log->l_curr_cycle++;
3076                 log->l_curr_block -= log->l_logBBsize;
3077                 ASSERT(log->l_curr_block >= 0);
3078         }
3079         ASSERT(iclog == log->l_iclog);
3080         log->l_iclog = iclog->ic_next;
3081 }       /* xlog_state_switch_iclogs */
3082
3083 /*
3084  * Write out all data in the in-core log as of this exact moment in time.
3085  *
3086  * Data may be written to the in-core log during this call.  However,
3087  * we don't guarantee this data will be written out.  A change from past
3088  * implementation means this routine will *not* write out zero length LRs.
3089  *
3090  * Basically, we try and perform an intelligent scan of the in-core logs.
3091  * If we determine there is no flushable data, we just return.  There is no
3092  * flushable data if:
3093  *
3094  *      1. the current iclog is active and has no data; the previous iclog
3095  *              is in the active or dirty state.
3096  *      2. the current iclog is drity, and the previous iclog is in the
3097  *              active or dirty state.
3098  *
3099  * We may sleep if:
3100  *
3101  *      1. the current iclog is not in the active nor dirty state.
3102  *      2. the current iclog dirty, and the previous iclog is not in the
3103  *              active nor dirty state.
3104  *      3. the current iclog is active, and there is another thread writing
3105  *              to this particular iclog.
3106  *      4. a) the current iclog is active and has no other writers
3107  *         b) when we return from flushing out this iclog, it is still
3108  *              not in the active nor dirty state.
3109  */
3110 int
3111 _xfs_log_force(
3112         struct xfs_mount        *mp,
3113         uint                    flags,
3114         int                     *log_flushed)
3115 {
3116         struct xlog             *log = mp->m_log;
3117         struct xlog_in_core     *iclog;
3118         xfs_lsn_t               lsn;
3119
3120         XFS_STATS_INC(xs_log_force);
3121
3122         xlog_cil_force(log);
3123
3124         spin_lock(&log->l_icloglock);
3125
3126         iclog = log->l_iclog;
3127         if (iclog->ic_state & XLOG_STATE_IOERROR) {
3128                 spin_unlock(&log->l_icloglock);
3129                 return XFS_ERROR(EIO);
3130         }
3131
3132         /* If the head iclog is not active nor dirty, we just attach
3133          * ourselves to the head and go to sleep.
3134          */
3135         if (iclog->ic_state == XLOG_STATE_ACTIVE ||
3136             iclog->ic_state == XLOG_STATE_DIRTY) {
3137                 /*
3138                  * If the head is dirty or (active and empty), then
3139                  * we need to look at the previous iclog.  If the previous
3140                  * iclog is active or dirty we are done.  There is nothing
3141                  * to sync out.  Otherwise, we attach ourselves to the
3142                  * previous iclog and go to sleep.
3143                  */
3144                 if (iclog->ic_state == XLOG_STATE_DIRTY ||
3145                     (atomic_read(&iclog->ic_refcnt) == 0
3146                      && iclog->ic_offset == 0)) {
3147                         iclog = iclog->ic_prev;
3148                         if (iclog->ic_state == XLOG_STATE_ACTIVE ||
3149                             iclog->ic_state == XLOG_STATE_DIRTY)
3150                                 goto no_sleep;
3151                         else
3152                                 goto maybe_sleep;
3153                 } else {
3154                         if (atomic_read(&iclog->ic_refcnt) == 0) {
3155                                 /* We are the only one with access to this
3156                                  * iclog.  Flush it out now.  There should
3157                                  * be a roundoff of zero to show that someone
3158                                  * has already taken care of the roundoff from
3159                                  * the previous sync.
3160                                  */
3161                                 atomic_inc(&iclog->ic_refcnt);
3162                                 lsn = be64_to_cpu(iclog->ic_header.h_lsn);
3163                                 xlog_state_switch_iclogs(log, iclog, 0);
3164                                 spin_unlock(&log->l_icloglock);
3165
3166                                 if (xlog_state_release_iclog(log, iclog))
3167                                         return XFS_ERROR(EIO);
3168
3169                                 if (log_flushed)
3170                                         *log_flushed = 1;
3171                                 spin_lock(&log->l_icloglock);
3172                                 if (be64_to_cpu(iclog->ic_header.h_lsn) == lsn &&
3173                                     iclog->ic_state != XLOG_STATE_DIRTY)
3174                                         goto maybe_sleep;
3175                                 else
3176                                         goto no_sleep;
3177                         } else {
3178                                 /* Someone else is writing to this iclog.
3179                                  * Use its call to flush out the data.  However,
3180                                  * the other thread may not force out this LR,
3181                                  * so we mark it WANT_SYNC.
3182                                  */
3183                                 xlog_state_switch_iclogs(log, iclog, 0);
3184                                 goto maybe_sleep;
3185                         }
3186                 }
3187         }
3188
3189         /* By the time we come around again, the iclog could've been filled
3190          * which would give it another lsn.  If we have a new lsn, just
3191          * return because the relevant data has been flushed.
3192          */
3193 maybe_sleep:
3194         if (flags & XFS_LOG_SYNC) {
3195                 /*
3196                  * We must check if we're shutting down here, before
3197                  * we wait, while we're holding the l_icloglock.
3198                  * Then we check again after waking up, in case our
3199                  * sleep was disturbed by a bad news.
3200                  */
3201                 if (iclog->ic_state & XLOG_STATE_IOERROR) {
3202                         spin_unlock(&log->l_icloglock);
3203                         return XFS_ERROR(EIO);
3204                 }
3205                 XFS_STATS_INC(xs_log_force_sleep);
3206                 xlog_wait(&iclog->ic_force_wait, &log->l_icloglock);
3207                 /*
3208                  * No need to grab the log lock here since we're
3209                  * only deciding whether or not to return EIO
3210                  * and the memory read should be atomic.
3211                  */
3212                 if (iclog->ic_state & XLOG_STATE_IOERROR)
3213                         return XFS_ERROR(EIO);
3214                 if (log_flushed)
3215                         *log_flushed = 1;
3216         } else {
3217
3218 no_sleep:
3219                 spin_unlock(&log->l_icloglock);
3220         }
3221         return 0;
3222 }
3223
3224 /*
3225  * Wrapper for _xfs_log_force(), to be used when caller doesn't care
3226  * about errors or whether the log was flushed or not. This is the normal
3227  * interface to use when trying to unpin items or move the log forward.
3228  */
3229 void
3230 xfs_log_force(
3231         xfs_mount_t     *mp,
3232         uint            flags)
3233 {
3234         int     error;
3235
3236         trace_xfs_log_force(mp, 0);
3237         error = _xfs_log_force(mp, flags, NULL);
3238         if (error)
3239                 xfs_warn(mp, "%s: error %d returned.", __func__, error);
3240 }
3241
3242 /*
3243  * Force the in-core log to disk for a specific LSN.
3244  *
3245  * Find in-core log with lsn.
3246  *      If it is in the DIRTY state, just return.
3247  *      If it is in the ACTIVE state, move the in-core log into the WANT_SYNC
3248  *              state and go to sleep or return.
3249  *      If it is in any other state, go to sleep or return.
3250  *
3251  * Synchronous forces are implemented with a signal variable. All callers
3252  * to force a given lsn to disk will wait on a the sv attached to the
3253  * specific in-core log.  When given in-core log finally completes its
3254  * write to disk, that thread will wake up all threads waiting on the
3255  * sv.
3256  */
3257 int
3258 _xfs_log_force_lsn(
3259         struct xfs_mount        *mp,
3260         xfs_lsn_t               lsn,
3261         uint                    flags,
3262         int                     *log_flushed)
3263 {
3264         struct xlog             *log = mp->m_log;
3265         struct xlog_in_core     *iclog;
3266         int                     already_slept = 0;
3267
3268         ASSERT(lsn != 0);
3269
3270         XFS_STATS_INC(xs_log_force);
3271
3272         lsn = xlog_cil_force_lsn(log, lsn);
3273         if (lsn == NULLCOMMITLSN)
3274                 return 0;
3275
3276 try_again:
3277         spin_lock(&log->l_icloglock);
3278         iclog = log->l_iclog;
3279         if (iclog->ic_state & XLOG_STATE_IOERROR) {
3280                 spin_unlock(&log->l_icloglock);
3281                 return XFS_ERROR(EIO);
3282         }
3283
3284         do {
3285                 if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn) {
3286                         iclog = iclog->ic_next;
3287                         continue;
3288                 }
3289
3290                 if (iclog->ic_state == XLOG_STATE_DIRTY) {
3291                         spin_unlock(&log->l_icloglock);
3292                         return 0;
3293                 }
3294
3295                 if (iclog->ic_state == XLOG_STATE_ACTIVE) {
3296                         /*
3297                          * We sleep here if we haven't already slept (e.g.
3298                          * this is the first time we've looked at the correct
3299                          * iclog buf) and the buffer before us is going to
3300                          * be sync'ed. The reason for this is that if we
3301                          * are doing sync transactions here, by waiting for
3302                          * the previous I/O to complete, we can allow a few
3303                          * more transactions into this iclog before we close
3304                          * it down.
3305                          *
3306                          * Otherwise, we mark the buffer WANT_SYNC, and bump
3307                          * up the refcnt so we can release the log (which
3308                          * drops the ref count).  The state switch keeps new
3309                          * transaction commits from using this buffer.  When
3310                          * the current commits finish writing into the buffer,
3311                          * the refcount will drop to zero and the buffer will
3312                          * go out then.
3313                          */
3314                         if (!already_slept &&
3315                             (iclog->ic_prev->ic_state &
3316                              (XLOG_STATE_WANT_SYNC | XLOG_STATE_SYNCING))) {
3317                                 ASSERT(!(iclog->ic_state & XLOG_STATE_IOERROR));
3318
3319                                 XFS_STATS_INC(xs_log_force_sleep);
3320
3321                                 xlog_wait(&iclog->ic_prev->ic_write_wait,
3322                                                         &log->l_icloglock);
3323                                 if (log_flushed)
3324                                         *log_flushed = 1;
3325                                 already_slept = 1;
3326                                 goto try_again;
3327                         }
3328                         atomic_inc(&iclog->ic_refcnt);
3329                         xlog_state_switch_iclogs(log, iclog, 0);
3330                         spin_unlock(&log->l_icloglock);
3331                         if (xlog_state_release_iclog(log, iclog))
3332                                 return XFS_ERROR(EIO);
3333                         if (log_flushed)
3334                                 *log_flushed = 1;
3335                         spin_lock(&log->l_icloglock);
3336                 }
3337
3338                 if ((flags & XFS_LOG_SYNC) && /* sleep */
3339                     !(iclog->ic_state &
3340                       (XLOG_STATE_ACTIVE | XLOG_STATE_DIRTY))) {
3341                         /*
3342                          * Don't wait on completion if we know that we've
3343                          * gotten a log write error.
3344                          */
3345                         if (iclog->ic_state & XLOG_STATE_IOERROR) {
3346                                 spin_unlock(&log->l_icloglock);
3347                                 return XFS_ERROR(EIO);
3348                         }
3349                         XFS_STATS_INC(xs_log_force_sleep);
3350                         xlog_wait(&iclog->ic_force_wait, &log->l_icloglock);
3351                         /*
3352                          * No need to grab the log lock here since we're
3353                          * only deciding whether or not to return EIO
3354                          * and the memory read should be atomic.
3355                          */
3356                         if (iclog->ic_state & XLOG_STATE_IOERROR)
3357                                 return XFS_ERROR(EIO);
3358
3359                         if (log_flushed)
3360                                 *log_flushed = 1;
3361                 } else {                /* just return */
3362                         spin_unlock(&log->l_icloglock);
3363                 }
3364
3365                 return 0;
3366         } while (iclog != log->l_iclog);
3367
3368         spin_unlock(&log->l_icloglock);
3369         return 0;
3370 }
3371
3372 /*
3373  * Wrapper for _xfs_log_force_lsn(), to be used when caller doesn't care
3374  * about errors or whether the log was flushed or not. This is the normal
3375  * interface to use when trying to unpin items or move the log forward.
3376  */
3377 void
3378 xfs_log_force_lsn(
3379         xfs_mount_t     *mp,
3380         xfs_lsn_t       lsn,
3381         uint            flags)
3382 {
3383         int     error;
3384
3385         trace_xfs_log_force(mp, lsn);
3386         error = _xfs_log_force_lsn(mp, lsn, flags, NULL);
3387         if (error)
3388                 xfs_warn(mp, "%s: error %d returned.", __func__, error);
3389 }
3390
3391 /*
3392  * Called when we want to mark the current iclog as being ready to sync to
3393  * disk.
3394  */
3395 STATIC void
3396 xlog_state_want_sync(
3397         struct xlog             *log,
3398         struct xlog_in_core     *iclog)
3399 {
3400         assert_spin_locked(&log->l_icloglock);
3401
3402         if (iclog->ic_state == XLOG_STATE_ACTIVE) {
3403                 xlog_state_switch_iclogs(log, iclog, 0);
3404         } else {
3405                 ASSERT(iclog->ic_state &
3406                         (XLOG_STATE_WANT_SYNC|XLOG_STATE_IOERROR));
3407         }
3408 }
3409
3410
3411 /*****************************************************************************
3412  *
3413  *              TICKET functions
3414  *
3415  *****************************************************************************
3416  */
3417
3418 /*
3419  * Free a used ticket when its refcount falls to zero.
3420  */
3421 void
3422 xfs_log_ticket_put(
3423         xlog_ticket_t   *ticket)
3424 {
3425         ASSERT(atomic_read(&ticket->t_ref) > 0);
3426         if (atomic_dec_and_test(&ticket->t_ref))
3427                 kmem_zone_free(xfs_log_ticket_zone, ticket);
3428 }
3429
3430 xlog_ticket_t *
3431 xfs_log_ticket_get(
3432         xlog_ticket_t   *ticket)
3433 {
3434         ASSERT(atomic_read(&ticket->t_ref) > 0);
3435         atomic_inc(&ticket->t_ref);
3436         return ticket;
3437 }
3438
3439 /*
3440  * Figure out the total log space unit (in bytes) that would be
3441  * required for a log ticket.
3442  */
3443 int
3444 xfs_log_calc_unit_res(
3445         struct xfs_mount        *mp,
3446         int                     unit_bytes)
3447 {
3448         struct xlog             *log = mp->m_log;
3449         int                     iclog_space;
3450         uint                    num_headers;
3451
3452         /*
3453          * Permanent reservations have up to 'cnt'-1 active log operations
3454          * in the log.  A unit in this case is the amount of space for one
3455          * of these log operations.  Normal reservations have a cnt of 1
3456          * and their unit amount is the total amount of space required.
3457          *
3458          * The following lines of code account for non-transaction data
3459          * which occupy space in the on-disk log.
3460          *
3461          * Normal form of a transaction is:
3462          * <oph><trans-hdr><start-oph><reg1-oph><reg1><reg2-oph>...<commit-oph>
3463          * and then there are LR hdrs, split-recs and roundoff at end of syncs.
3464          *
3465          * We need to account for all the leadup data and trailer data
3466          * around the transaction data.
3467          * And then we need to account for the worst case in terms of using
3468          * more space.
3469          * The worst case will happen if:
3470          * - the placement of the transaction happens to be such that the
3471          *   roundoff is at its maximum
3472          * - the transaction data is synced before the commit record is synced
3473          *   i.e. <transaction-data><roundoff> | <commit-rec><roundoff>
3474          *   Therefore the commit record is in its own Log Record.
3475          *   This can happen as the commit record is called with its
3476          *   own region to xlog_write().
3477          *   This then means that in the worst case, roundoff can happen for
3478          *   the commit-rec as well.
3479          *   The commit-rec is smaller than padding in this scenario and so it is
3480          *   not added separately.
3481          */
3482
3483         /* for trans header */
3484         unit_bytes += sizeof(xlog_op_header_t);
3485         unit_bytes += sizeof(xfs_trans_header_t);
3486
3487         /* for start-rec */
3488         unit_bytes += sizeof(xlog_op_header_t);
3489
3490         /*
3491          * for LR headers - the space for data in an iclog is the size minus
3492          * the space used for the headers. If we use the iclog size, then we
3493          * undercalculate the number of headers required.
3494          *
3495          * Furthermore - the addition of op headers for split-recs might
3496          * increase the space required enough to require more log and op
3497          * headers, so take that into account too.
3498          *
3499          * IMPORTANT: This reservation makes the assumption that if this
3500          * transaction is the first in an iclog and hence has the LR headers
3501          * accounted to it, then the remaining space in the iclog is
3502          * exclusively for this transaction.  i.e. if the transaction is larger
3503          * than the iclog, it will be the only thing in that iclog.
3504          * Fundamentally, this means we must pass the entire log vector to
3505          * xlog_write to guarantee this.
3506          */
3507         iclog_space = log->l_iclog_size - log->l_iclog_hsize;
3508         num_headers = howmany(unit_bytes, iclog_space);
3509
3510         /* for split-recs - ophdrs added when data split over LRs */
3511         unit_bytes += sizeof(xlog_op_header_t) * num_headers;
3512
3513         /* add extra header reservations if we overrun */
3514         while (!num_headers ||
3515                howmany(unit_bytes, iclog_space) > num_headers) {
3516                 unit_bytes += sizeof(xlog_op_header_t);
3517                 num_headers++;
3518         }
3519         unit_bytes += log->l_iclog_hsize * num_headers;
3520
3521         /* for commit-rec LR header - note: padding will subsume the ophdr */
3522         unit_bytes += log->l_iclog_hsize;
3523
3524         /* for roundoff padding for transaction data and one for commit record */
3525         if (xfs_sb_version_haslogv2(&mp->m_sb) && mp->m_sb.sb_logsunit > 1) {
3526                 /* log su roundoff */
3527                 unit_bytes += 2 * mp->m_sb.sb_logsunit;
3528         } else {
3529                 /* BB roundoff */
3530                 unit_bytes += 2 * BBSIZE;
3531         }
3532
3533         return unit_bytes;
3534 }
3535
3536 /*
3537  * Allocate and initialise a new log ticket.
3538  */
3539 struct xlog_ticket *
3540 xlog_ticket_alloc(
3541         struct xlog             *log,
3542         int                     unit_bytes,
3543         int                     cnt,
3544         char                    client,
3545         bool                    permanent,
3546         xfs_km_flags_t          alloc_flags)
3547 {
3548         struct xlog_ticket      *tic;
3549         int                     unit_res;
3550
3551         tic = kmem_zone_zalloc(xfs_log_ticket_zone, alloc_flags);
3552         if (!tic)
3553                 return NULL;
3554
3555         unit_res = xfs_log_calc_unit_res(log->l_mp, unit_bytes);
3556
3557         atomic_set(&tic->t_ref, 1);
3558         tic->t_task             = current;
3559         INIT_LIST_HEAD(&tic->t_queue);
3560         tic->t_unit_res         = unit_res;
3561         tic->t_curr_res         = unit_res;
3562         tic->t_cnt              = cnt;
3563         tic->t_ocnt             = cnt;
3564         tic->t_tid              = prandom_u32();
3565         tic->t_clientid         = client;
3566         tic->t_flags            = XLOG_TIC_INITED;
3567         tic->t_trans_type       = 0;
3568         if (permanent)
3569                 tic->t_flags |= XLOG_TIC_PERM_RESERV;
3570
3571         xlog_tic_reset_res(tic);
3572
3573         return tic;
3574 }
3575
3576
3577 /******************************************************************************
3578  *
3579  *              Log debug routines
3580  *
3581  ******************************************************************************
3582  */
3583 #if defined(DEBUG)
3584 /*
3585  * Make sure that the destination ptr is within the valid data region of
3586  * one of the iclogs.  This uses backup pointers stored in a different
3587  * part of the log in case we trash the log structure.
3588  */
3589 void
3590 xlog_verify_dest_ptr(
3591         struct xlog     *log,
3592         char            *ptr)
3593 {
3594         int i;
3595         int good_ptr = 0;
3596
3597         for (i = 0; i < log->l_iclog_bufs; i++) {
3598                 if (ptr >= log->l_iclog_bak[i] &&
3599                     ptr <= log->l_iclog_bak[i] + log->l_iclog_size)
3600                         good_ptr++;
3601         }
3602
3603         if (!good_ptr)
3604                 xfs_emerg(log->l_mp, "%s: invalid ptr", __func__);
3605 }
3606
3607 /*
3608  * Check to make sure the grant write head didn't just over lap the tail.  If
3609  * the cycles are the same, we can't be overlapping.  Otherwise, make sure that
3610  * the cycles differ by exactly one and check the byte count.
3611  *
3612  * This check is run unlocked, so can give false positives. Rather than assert
3613  * on failures, use a warn-once flag and a panic tag to allow the admin to
3614  * determine if they want to panic the machine when such an error occurs. For
3615  * debug kernels this will have the same effect as using an assert but, unlinke
3616  * an assert, it can be turned off at runtime.
3617  */
3618 STATIC void
3619 xlog_verify_grant_tail(
3620         struct xlog     *log)
3621 {
3622         int             tail_cycle, tail_blocks;
3623         int             cycle, space;
3624
3625         xlog_crack_grant_head(&log->l_write_head.grant, &cycle, &space);
3626         xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_blocks);
3627         if (tail_cycle != cycle) {
3628                 if (cycle - 1 != tail_cycle &&
3629                     !(log->l_flags & XLOG_TAIL_WARN)) {
3630                         xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES,
3631                                 "%s: cycle - 1 != tail_cycle", __func__);
3632                         log->l_flags |= XLOG_TAIL_WARN;
3633                 }
3634
3635                 if (space > BBTOB(tail_blocks) &&
3636                     !(log->l_flags & XLOG_TAIL_WARN)) {
3637                         xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES,
3638                                 "%s: space > BBTOB(tail_blocks)", __func__);
3639                         log->l_flags |= XLOG_TAIL_WARN;
3640                 }
3641         }
3642 }
3643
3644 /* check if it will fit */
3645 STATIC void
3646 xlog_verify_tail_lsn(
3647         struct xlog             *log,
3648         struct xlog_in_core     *iclog,
3649         xfs_lsn_t               tail_lsn)
3650 {
3651     int blocks;
3652
3653     if (CYCLE_LSN(tail_lsn) == log->l_prev_cycle) {
3654         blocks =
3655             log->l_logBBsize - (log->l_prev_block - BLOCK_LSN(tail_lsn));
3656         if (blocks < BTOBB(iclog->ic_offset)+BTOBB(log->l_iclog_hsize))
3657                 xfs_emerg(log->l_mp, "%s: ran out of log space", __func__);
3658     } else {
3659         ASSERT(CYCLE_LSN(tail_lsn)+1 == log->l_prev_cycle);
3660
3661         if (BLOCK_LSN(tail_lsn) == log->l_prev_block)
3662                 xfs_emerg(log->l_mp, "%s: tail wrapped", __func__);
3663
3664         blocks = BLOCK_LSN(tail_lsn) - log->l_prev_block;
3665         if (blocks < BTOBB(iclog->ic_offset) + 1)
3666                 xfs_emerg(log->l_mp, "%s: ran out of log space", __func__);
3667     }
3668 }       /* xlog_verify_tail_lsn */
3669
3670 /*
3671  * Perform a number of checks on the iclog before writing to disk.
3672  *
3673  * 1. Make sure the iclogs are still circular
3674  * 2. Make sure we have a good magic number
3675  * 3. Make sure we don't have magic numbers in the data
3676  * 4. Check fields of each log operation header for:
3677  *      A. Valid client identifier
3678  *      B. tid ptr value falls in valid ptr space (user space code)
3679  *      C. Length in log record header is correct according to the
3680  *              individual operation headers within record.
3681  * 5. When a bwrite will occur within 5 blocks of the front of the physical
3682  *      log, check the preceding blocks of the physical log to make sure all
3683  *      the cycle numbers agree with the current cycle number.
3684  */
3685 STATIC void
3686 xlog_verify_iclog(
3687         struct xlog             *log,
3688         struct xlog_in_core     *iclog,
3689         int                     count,
3690         bool                    syncing)
3691 {
3692         xlog_op_header_t        *ophead;
3693         xlog_in_core_t          *icptr;
3694         xlog_in_core_2_t        *xhdr;
3695         xfs_caddr_t             ptr;
3696         xfs_caddr_t             base_ptr;
3697         __psint_t               field_offset;
3698         __uint8_t               clientid;
3699         int                     len, i, j, k, op_len;
3700         int                     idx;
3701
3702         /* check validity of iclog pointers */
3703         spin_lock(&log->l_icloglock);
3704         icptr = log->l_iclog;
3705         for (i=0; i < log->l_iclog_bufs; i++) {
3706                 if (icptr == NULL)
3707                         xfs_emerg(log->l_mp, "%s: invalid ptr", __func__);
3708                 icptr = icptr->ic_next;
3709         }
3710         if (icptr != log->l_iclog)
3711                 xfs_emerg(log->l_mp, "%s: corrupt iclog ring", __func__);
3712         spin_unlock(&log->l_icloglock);
3713
3714         /* check log magic numbers */
3715         if (iclog->ic_header.h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
3716                 xfs_emerg(log->l_mp, "%s: invalid magic num", __func__);
3717
3718         ptr = (xfs_caddr_t) &iclog->ic_header;
3719         for (ptr += BBSIZE; ptr < ((xfs_caddr_t)&iclog->ic_header) + count;
3720              ptr += BBSIZE) {
3721                 if (*(__be32 *)ptr == cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
3722                         xfs_emerg(log->l_mp, "%s: unexpected magic num",
3723                                 __func__);
3724         }
3725
3726         /* check fields */
3727         len = be32_to_cpu(iclog->ic_header.h_num_logops);
3728         ptr = iclog->ic_datap;
3729         base_ptr = ptr;
3730         ophead = (xlog_op_header_t *)ptr;
3731         xhdr = iclog->ic_data;
3732         for (i = 0; i < len; i++) {
3733                 ophead = (xlog_op_header_t *)ptr;
3734
3735                 /* clientid is only 1 byte */
3736                 field_offset = (__psint_t)
3737                                ((xfs_caddr_t)&(ophead->oh_clientid) - base_ptr);
3738                 if (!syncing || (field_offset & 0x1ff)) {
3739                         clientid = ophead->oh_clientid;
3740                 } else {
3741                         idx = BTOBBT((xfs_caddr_t)&(ophead->oh_clientid) - iclog->ic_datap);
3742                         if (idx >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) {
3743                                 j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3744                                 k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3745                                 clientid = xlog_get_client_id(
3746                                         xhdr[j].hic_xheader.xh_cycle_data[k]);
3747                         } else {
3748                                 clientid = xlog_get_client_id(
3749                                         iclog->ic_header.h_cycle_data[idx]);
3750                         }
3751                 }
3752                 if (clientid != XFS_TRANSACTION && clientid != XFS_LOG)
3753                         xfs_warn(log->l_mp,
3754                                 "%s: invalid clientid %d op 0x%p offset 0x%lx",
3755                                 __func__, clientid, ophead,
3756                                 (unsigned long)field_offset);
3757
3758                 /* check length */
3759                 field_offset = (__psint_t)
3760                                ((xfs_caddr_t)&(ophead->oh_len) - base_ptr);
3761                 if (!syncing || (field_offset & 0x1ff)) {
3762                         op_len = be32_to_cpu(ophead->oh_len);
3763                 } else {
3764                         idx = BTOBBT((__psint_t)&ophead->oh_len -
3765                                     (__psint_t)iclog->ic_datap);
3766                         if (idx >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) {
3767                                 j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3768                                 k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3769                                 op_len = be32_to_cpu(xhdr[j].hic_xheader.xh_cycle_data[k]);
3770                         } else {
3771                                 op_len = be32_to_cpu(iclog->ic_header.h_cycle_data[idx]);
3772                         }
3773                 }
3774                 ptr += sizeof(xlog_op_header_t) + op_len;
3775         }
3776 }       /* xlog_verify_iclog */
3777 #endif
3778
3779 /*
3780  * Mark all iclogs IOERROR. l_icloglock is held by the caller.
3781  */
3782 STATIC int
3783 xlog_state_ioerror(
3784         struct xlog     *log)
3785 {
3786         xlog_in_core_t  *iclog, *ic;
3787
3788         iclog = log->l_iclog;
3789         if (! (iclog->ic_state & XLOG_STATE_IOERROR)) {
3790                 /*
3791                  * Mark all the incore logs IOERROR.
3792                  * From now on, no log flushes will result.
3793                  */
3794                 ic = iclog;
3795                 do {
3796                         ic->ic_state = XLOG_STATE_IOERROR;
3797                         ic = ic->ic_next;
3798                 } while (ic != iclog);
3799                 return 0;
3800         }
3801         /*
3802          * Return non-zero, if state transition has already happened.
3803          */
3804         return 1;
3805 }
3806
3807 /*
3808  * This is called from xfs_force_shutdown, when we're forcibly
3809  * shutting down the filesystem, typically because of an IO error.
3810  * Our main objectives here are to make sure that:
3811  *      a. the filesystem gets marked 'SHUTDOWN' for all interested
3812  *         parties to find out, 'atomically'.
3813  *      b. those who're sleeping on log reservations, pinned objects and
3814  *          other resources get woken up, and be told the bad news.
3815  *      c. nothing new gets queued up after (a) and (b) are done.
3816  *      d. if !logerror, flush the iclogs to disk, then seal them off
3817  *         for business.
3818  *
3819  * Note: for delayed logging the !logerror case needs to flush the regions
3820  * held in memory out to the iclogs before flushing them to disk. This needs
3821  * to be done before the log is marked as shutdown, otherwise the flush to the
3822  * iclogs will fail.
3823  */
3824 int
3825 xfs_log_force_umount(
3826         struct xfs_mount        *mp,
3827         int                     logerror)
3828 {
3829         struct xlog     *log;
3830         int             retval;
3831
3832         log = mp->m_log;
3833
3834         /*
3835          * If this happens during log recovery, don't worry about
3836          * locking; the log isn't open for business yet.
3837          */
3838         if (!log ||
3839             log->l_flags & XLOG_ACTIVE_RECOVERY) {
3840                 mp->m_flags |= XFS_MOUNT_FS_SHUTDOWN;
3841                 if (mp->m_sb_bp)
3842                         XFS_BUF_DONE(mp->m_sb_bp);
3843                 return 0;
3844         }
3845
3846         /*
3847          * Somebody could've already done the hard work for us.
3848          * No need to get locks for this.
3849          */
3850         if (logerror && log->l_iclog->ic_state & XLOG_STATE_IOERROR) {
3851                 ASSERT(XLOG_FORCED_SHUTDOWN(log));
3852                 return 1;
3853         }
3854         retval = 0;
3855
3856         /*
3857          * Flush the in memory commit item list before marking the log as
3858          * being shut down. We need to do it in this order to ensure all the
3859          * completed transactions are flushed to disk with the xfs_log_force()
3860          * call below.
3861          */
3862         if (!logerror)
3863                 xlog_cil_force(log);
3864
3865         /*
3866          * mark the filesystem and the as in a shutdown state and wake
3867          * everybody up to tell them the bad news.
3868          */
3869         spin_lock(&log->l_icloglock);
3870         mp->m_flags |= XFS_MOUNT_FS_SHUTDOWN;
3871         if (mp->m_sb_bp)
3872                 XFS_BUF_DONE(mp->m_sb_bp);
3873
3874         /*
3875          * This flag is sort of redundant because of the mount flag, but
3876          * it's good to maintain the separation between the log and the rest
3877          * of XFS.
3878          */
3879         log->l_flags |= XLOG_IO_ERROR;
3880
3881         /*
3882          * If we hit a log error, we want to mark all the iclogs IOERROR
3883          * while we're still holding the loglock.
3884          */
3885         if (logerror)
3886                 retval = xlog_state_ioerror(log);
3887         spin_unlock(&log->l_icloglock);
3888
3889         /*
3890          * We don't want anybody waiting for log reservations after this. That
3891          * means we have to wake up everybody queued up on reserveq as well as
3892          * writeq.  In addition, we make sure in xlog_{re}grant_log_space that
3893          * we don't enqueue anything once the SHUTDOWN flag is set, and this
3894          * action is protected by the grant locks.
3895          */
3896         xlog_grant_head_wake_all(&log->l_reserve_head);
3897         xlog_grant_head_wake_all(&log->l_write_head);
3898
3899         if (!(log->l_iclog->ic_state & XLOG_STATE_IOERROR)) {
3900                 ASSERT(!logerror);
3901                 /*
3902                  * Force the incore logs to disk before shutting the
3903                  * log down completely.
3904                  */
3905                 _xfs_log_force(mp, XFS_LOG_SYNC, NULL);
3906
3907                 spin_lock(&log->l_icloglock);
3908                 retval = xlog_state_ioerror(log);
3909                 spin_unlock(&log->l_icloglock);
3910         }
3911         /*
3912          * Wake up everybody waiting on xfs_log_force.
3913          * Callback all log item committed functions as if the
3914          * log writes were completed.
3915          */
3916         xlog_state_do_callback(log, XFS_LI_ABORTED, NULL);
3917
3918 #ifdef XFSERRORDEBUG
3919         {
3920                 xlog_in_core_t  *iclog;
3921
3922                 spin_lock(&log->l_icloglock);
3923                 iclog = log->l_iclog;
3924                 do {
3925                         ASSERT(iclog->ic_callback == 0);
3926                         iclog = iclog->ic_next;
3927                 } while (iclog != log->l_iclog);
3928                 spin_unlock(&log->l_icloglock);
3929         }
3930 #endif
3931         /* return non-zero if log IOERROR transition had already happened */
3932         return retval;
3933 }
3934
3935 STATIC int
3936 xlog_iclogs_empty(
3937         struct xlog     *log)
3938 {
3939         xlog_in_core_t  *iclog;
3940
3941         iclog = log->l_iclog;
3942         do {
3943                 /* endianness does not matter here, zero is zero in
3944                  * any language.
3945                  */
3946                 if (iclog->ic_header.h_num_logops)
3947                         return 0;
3948                 iclog = iclog->ic_next;
3949         } while (iclog != log->l_iclog);
3950         return 1;
3951 }
3952