1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4   * All Rights Reserved.
5   */
6  #include "xfs.h"
7  #include "xfs_fs.h"
8  #include "xfs_shared.h"
9  #include "xfs_format.h"
10  #include "xfs_log_format.h"
11  #include "xfs_trans_resv.h"
12  #include "xfs_mount.h"
13  #include "xfs_errortag.h"
14  #include "xfs_error.h"
15  #include "xfs_trans.h"
16  #include "xfs_trans_priv.h"
17  #include "xfs_log.h"
18  #include "xfs_log_priv.h"
19  #include "xfs_trace.h"
20  #include "xfs_sysfs.h"
21  #include "xfs_sb.h"
22  #include "xfs_health.h"
23  
24  struct kmem_cache	*xfs_log_ticket_cache;
25  
26  /* Local miscellaneous function prototypes */
27  STATIC struct xlog *
28  xlog_alloc_log(
29  	struct xfs_mount	*mp,
30  	struct xfs_buftarg	*log_target,
31  	xfs_daddr_t		blk_offset,
32  	int			num_bblks);
33  STATIC void
34  xlog_dealloc_log(
35  	struct xlog		*log);
36  
37  /* local state machine functions */
38  STATIC void xlog_state_done_syncing(
39  	struct xlog_in_core	*iclog);
40  STATIC void xlog_state_do_callback(
41  	struct xlog		*log);
42  STATIC int
43  xlog_state_get_iclog_space(
44  	struct xlog		*log,
45  	int			len,
46  	struct xlog_in_core	**iclog,
47  	struct xlog_ticket	*ticket,
48  	int			*logoffsetp);
49  STATIC void
50  xlog_sync(
51  	struct xlog		*log,
52  	struct xlog_in_core	*iclog,
53  	struct xlog_ticket	*ticket);
54  #if defined(DEBUG)
55  STATIC void
56  xlog_verify_iclog(
57  	struct xlog		*log,
58  	struct xlog_in_core	*iclog,
59  	int			count);
60  STATIC void
61  xlog_verify_tail_lsn(
62  	struct xlog		*log,
63  	struct xlog_in_core	*iclog);
64  #else
65  #define xlog_verify_iclog(a,b,c)
66  #define xlog_verify_tail_lsn(a,b)
67  #endif
68  
69  STATIC int
70  xlog_iclogs_empty(
71  	struct xlog		*log);
72  
73  static int
74  xfs_log_cover(struct xfs_mount *);
75  
76  /*
77   * We need to make sure the buffer pointer returned is naturally aligned for the
78   * biggest basic data type we put into it. We have already accounted for this
79   * padding when sizing the buffer.
80   *
81   * However, this padding does not get written into the log, and hence we have to
82   * track the space used by the log vectors separately to prevent log space hangs
83   * due to inaccurate accounting (i.e. a leak) of the used log space through the
84   * CIL context ticket.
85   *
86   * We also add space for the xlog_op_header that describes this region in the
87   * log. This prepends the data region we return to the caller to copy their data
88   * into, so do all the static initialisation of the ophdr now. Because the ophdr
89   * is not 8 byte aligned, we have to be careful to ensure that we align the
90   * start of the buffer such that the region we return to the call is 8 byte
91   * aligned and packed against the tail of the ophdr.
92   */
93  void *
xlog_prepare_iovec(struct xfs_log_vec * lv,struct xfs_log_iovec ** vecp,uint type)94  xlog_prepare_iovec(
95  	struct xfs_log_vec	*lv,
96  	struct xfs_log_iovec	**vecp,
97  	uint			type)
98  {
99  	struct xfs_log_iovec	*vec = *vecp;
100  	struct xlog_op_header	*oph;
101  	uint32_t		len;
102  	void			*buf;
103  
104  	if (vec) {
105  		ASSERT(vec - lv->lv_iovecp < lv->lv_niovecs);
106  		vec++;
107  	} else {
108  		vec = &lv->lv_iovecp[0];
109  	}
110  
111  	len = lv->lv_buf_len + sizeof(struct xlog_op_header);
112  	if (!IS_ALIGNED(len, sizeof(uint64_t))) {
113  		lv->lv_buf_len = round_up(len, sizeof(uint64_t)) -
114  					sizeof(struct xlog_op_header);
115  	}
116  
117  	vec->i_type = type;
118  	vec->i_addr = lv->lv_buf + lv->lv_buf_len;
119  
120  	oph = vec->i_addr;
121  	oph->oh_clientid = XFS_TRANSACTION;
122  	oph->oh_res2 = 0;
123  	oph->oh_flags = 0;
124  
125  	buf = vec->i_addr + sizeof(struct xlog_op_header);
126  	ASSERT(IS_ALIGNED((unsigned long)buf, sizeof(uint64_t)));
127  
128  	*vecp = vec;
129  	return buf;
130  }
131  
132  static inline void
xlog_grant_sub_space(struct xlog_grant_head * head,int64_t bytes)133  xlog_grant_sub_space(
134  	struct xlog_grant_head	*head,
135  	int64_t			bytes)
136  {
137  	atomic64_sub(bytes, &head->grant);
138  }
139  
140  static inline void
xlog_grant_add_space(struct xlog_grant_head * head,int64_t bytes)141  xlog_grant_add_space(
142  	struct xlog_grant_head	*head,
143  	int64_t			bytes)
144  {
145  	atomic64_add(bytes, &head->grant);
146  }
147  
148  static void
xlog_grant_head_init(struct xlog_grant_head * head)149  xlog_grant_head_init(
150  	struct xlog_grant_head	*head)
151  {
152  	atomic64_set(&head->grant, 0);
153  	INIT_LIST_HEAD(&head->waiters);
154  	spin_lock_init(&head->lock);
155  }
156  
157  void
xlog_grant_return_space(struct xlog * log,xfs_lsn_t old_head,xfs_lsn_t new_head)158  xlog_grant_return_space(
159  	struct xlog	*log,
160  	xfs_lsn_t	old_head,
161  	xfs_lsn_t	new_head)
162  {
163  	int64_t		diff = xlog_lsn_sub(log, new_head, old_head);
164  
165  	xlog_grant_sub_space(&log->l_reserve_head, diff);
166  	xlog_grant_sub_space(&log->l_write_head, diff);
167  }
168  
169  /*
170   * Return the space in the log between the tail and the head.  In the case where
171   * we have overrun available reservation space, return 0. The memory barrier
172   * pairs with the smp_wmb() in xlog_cil_ail_insert() to ensure that grant head
173   * vs tail space updates are seen in the correct order and hence avoid
174   * transients as space is transferred from the grant heads to the AIL on commit
175   * completion.
176   */
177  static uint64_t
xlog_grant_space_left(struct xlog * log,struct xlog_grant_head * head)178  xlog_grant_space_left(
179  	struct xlog		*log,
180  	struct xlog_grant_head	*head)
181  {
182  	int64_t			free_bytes;
183  
184  	smp_rmb();	/* paired with smp_wmb in xlog_cil_ail_insert() */
185  	free_bytes = log->l_logsize - READ_ONCE(log->l_tail_space) -
186  			atomic64_read(&head->grant);
187  	if (free_bytes > 0)
188  		return free_bytes;
189  	return 0;
190  }
191  
192  STATIC void
xlog_grant_head_wake_all(struct xlog_grant_head * head)193  xlog_grant_head_wake_all(
194  	struct xlog_grant_head	*head)
195  {
196  	struct xlog_ticket	*tic;
197  
198  	spin_lock(&head->lock);
199  	list_for_each_entry(tic, &head->waiters, t_queue)
200  		wake_up_process(tic->t_task);
201  	spin_unlock(&head->lock);
202  }
203  
204  static inline int
xlog_ticket_reservation(struct xlog * log,struct xlog_grant_head * head,struct xlog_ticket * tic)205  xlog_ticket_reservation(
206  	struct xlog		*log,
207  	struct xlog_grant_head	*head,
208  	struct xlog_ticket	*tic)
209  {
210  	if (head == &log->l_write_head) {
211  		ASSERT(tic->t_flags & XLOG_TIC_PERM_RESERV);
212  		return tic->t_unit_res;
213  	}
214  
215  	if (tic->t_flags & XLOG_TIC_PERM_RESERV)
216  		return tic->t_unit_res * tic->t_cnt;
217  
218  	return tic->t_unit_res;
219  }
220  
221  STATIC bool
xlog_grant_head_wake(struct xlog * log,struct xlog_grant_head * head,int * free_bytes)222  xlog_grant_head_wake(
223  	struct xlog		*log,
224  	struct xlog_grant_head	*head,
225  	int			*free_bytes)
226  {
227  	struct xlog_ticket	*tic;
228  	int			need_bytes;
229  
230  	list_for_each_entry(tic, &head->waiters, t_queue) {
231  		need_bytes = xlog_ticket_reservation(log, head, tic);
232  		if (*free_bytes < need_bytes)
233  			return false;
234  
235  		*free_bytes -= need_bytes;
236  		trace_xfs_log_grant_wake_up(log, tic);
237  		wake_up_process(tic->t_task);
238  	}
239  
240  	return true;
241  }
242  
243  STATIC int
xlog_grant_head_wait(struct xlog * log,struct xlog_grant_head * head,struct xlog_ticket * tic,int need_bytes)244  xlog_grant_head_wait(
245  	struct xlog		*log,
246  	struct xlog_grant_head	*head,
247  	struct xlog_ticket	*tic,
248  	int			need_bytes) __releases(&head->lock)
249  					    __acquires(&head->lock)
250  {
251  	list_add_tail(&tic->t_queue, &head->waiters);
252  
253  	do {
254  		if (xlog_is_shutdown(log))
255  			goto shutdown;
256  
257  		__set_current_state(TASK_UNINTERRUPTIBLE);
258  		spin_unlock(&head->lock);
259  
260  		XFS_STATS_INC(log->l_mp, xs_sleep_logspace);
261  
262  		/* Push on the AIL to free up all the log space. */
263  		xfs_ail_push_all(log->l_ailp);
264  
265  		trace_xfs_log_grant_sleep(log, tic);
266  		schedule();
267  		trace_xfs_log_grant_wake(log, tic);
268  
269  		spin_lock(&head->lock);
270  		if (xlog_is_shutdown(log))
271  			goto shutdown;
272  	} while (xlog_grant_space_left(log, head) < need_bytes);
273  
274  	list_del_init(&tic->t_queue);
275  	return 0;
276  shutdown:
277  	list_del_init(&tic->t_queue);
278  	return -EIO;
279  }
280  
281  /*
282   * Atomically get the log space required for a log ticket.
283   *
284   * Once a ticket gets put onto head->waiters, it will only return after the
285   * needed reservation is satisfied.
286   *
287   * This function is structured so that it has a lock free fast path. This is
288   * necessary because every new transaction reservation will come through this
289   * path. Hence any lock will be globally hot if we take it unconditionally on
290   * every pass.
291   *
292   * As tickets are only ever moved on and off head->waiters under head->lock, we
293   * only need to take that lock if we are going to add the ticket to the queue
294   * and sleep. We can avoid taking the lock if the ticket was never added to
295   * head->waiters because the t_queue list head will be empty and we hold the
296   * only reference to it so it can safely be checked unlocked.
297   */
298  STATIC int
xlog_grant_head_check(struct xlog * log,struct xlog_grant_head * head,struct xlog_ticket * tic,int * need_bytes)299  xlog_grant_head_check(
300  	struct xlog		*log,
301  	struct xlog_grant_head	*head,
302  	struct xlog_ticket	*tic,
303  	int			*need_bytes)
304  {
305  	int			free_bytes;
306  	int			error = 0;
307  
308  	ASSERT(!xlog_in_recovery(log));
309  
310  	/*
311  	 * If there are other waiters on the queue then give them a chance at
312  	 * logspace before us.  Wake up the first waiters, if we do not wake
313  	 * up all the waiters then go to sleep waiting for more free space,
314  	 * otherwise try to get some space for this transaction.
315  	 */
316  	*need_bytes = xlog_ticket_reservation(log, head, tic);
317  	free_bytes = xlog_grant_space_left(log, head);
318  	if (!list_empty_careful(&head->waiters)) {
319  		spin_lock(&head->lock);
320  		if (!xlog_grant_head_wake(log, head, &free_bytes) ||
321  		    free_bytes < *need_bytes) {
322  			error = xlog_grant_head_wait(log, head, tic,
323  						     *need_bytes);
324  		}
325  		spin_unlock(&head->lock);
326  	} else if (free_bytes < *need_bytes) {
327  		spin_lock(&head->lock);
328  		error = xlog_grant_head_wait(log, head, tic, *need_bytes);
329  		spin_unlock(&head->lock);
330  	}
331  
332  	return error;
333  }
334  
335  bool
xfs_log_writable(struct xfs_mount * mp)336  xfs_log_writable(
337  	struct xfs_mount	*mp)
338  {
339  	/*
340  	 * Do not write to the log on norecovery mounts, if the data or log
341  	 * devices are read-only, or if the filesystem is shutdown. Read-only
342  	 * mounts allow internal writes for log recovery and unmount purposes,
343  	 * so don't restrict that case.
344  	 */
345  	if (xfs_has_norecovery(mp))
346  		return false;
347  	if (xfs_readonly_buftarg(mp->m_ddev_targp))
348  		return false;
349  	if (xfs_readonly_buftarg(mp->m_log->l_targ))
350  		return false;
351  	if (xlog_is_shutdown(mp->m_log))
352  		return false;
353  	return true;
354  }
355  
356  /*
357   * Replenish the byte reservation required by moving the grant write head.
358   */
359  int
xfs_log_regrant(struct xfs_mount * mp,struct xlog_ticket * tic)360  xfs_log_regrant(
361  	struct xfs_mount	*mp,
362  	struct xlog_ticket	*tic)
363  {
364  	struct xlog		*log = mp->m_log;
365  	int			need_bytes;
366  	int			error = 0;
367  
368  	if (xlog_is_shutdown(log))
369  		return -EIO;
370  
371  	XFS_STATS_INC(mp, xs_try_logspace);
372  
373  	/*
374  	 * This is a new transaction on the ticket, so we need to change the
375  	 * transaction ID so that the next transaction has a different TID in
376  	 * the log. Just add one to the existing tid so that we can see chains
377  	 * of rolling transactions in the log easily.
378  	 */
379  	tic->t_tid++;
380  	tic->t_curr_res = tic->t_unit_res;
381  	if (tic->t_cnt > 0)
382  		return 0;
383  
384  	trace_xfs_log_regrant(log, tic);
385  
386  	error = xlog_grant_head_check(log, &log->l_write_head, tic,
387  				      &need_bytes);
388  	if (error)
389  		goto out_error;
390  
391  	xlog_grant_add_space(&log->l_write_head, need_bytes);
392  	trace_xfs_log_regrant_exit(log, tic);
393  	return 0;
394  
395  out_error:
396  	/*
397  	 * If we are failing, make sure the ticket doesn't have any current
398  	 * reservations.  We don't want to add this back when the ticket/
399  	 * transaction gets cancelled.
400  	 */
401  	tic->t_curr_res = 0;
402  	tic->t_cnt = 0;	/* ungrant will give back unit_res * t_cnt. */
403  	return error;
404  }
405  
406  /*
407   * Reserve log space and return a ticket corresponding to the reservation.
408   *
409   * Each reservation is going to reserve extra space for a log record header.
410   * When writes happen to the on-disk log, we don't subtract the length of the
411   * log record header from any reservation.  By wasting space in each
412   * reservation, we prevent over allocation problems.
413   */
414  int
xfs_log_reserve(struct xfs_mount * mp,int unit_bytes,int cnt,struct xlog_ticket ** ticp,bool permanent)415  xfs_log_reserve(
416  	struct xfs_mount	*mp,
417  	int			unit_bytes,
418  	int			cnt,
419  	struct xlog_ticket	**ticp,
420  	bool			permanent)
421  {
422  	struct xlog		*log = mp->m_log;
423  	struct xlog_ticket	*tic;
424  	int			need_bytes;
425  	int			error = 0;
426  
427  	if (xlog_is_shutdown(log))
428  		return -EIO;
429  
430  	XFS_STATS_INC(mp, xs_try_logspace);
431  
432  	ASSERT(*ticp == NULL);
433  	tic = xlog_ticket_alloc(log, unit_bytes, cnt, permanent);
434  	*ticp = tic;
435  	trace_xfs_log_reserve(log, tic);
436  	error = xlog_grant_head_check(log, &log->l_reserve_head, tic,
437  				      &need_bytes);
438  	if (error)
439  		goto out_error;
440  
441  	xlog_grant_add_space(&log->l_reserve_head, need_bytes);
442  	xlog_grant_add_space(&log->l_write_head, need_bytes);
443  	trace_xfs_log_reserve_exit(log, tic);
444  	return 0;
445  
446  out_error:
447  	/*
448  	 * If we are failing, make sure the ticket doesn't have any current
449  	 * reservations.  We don't want to add this back when the ticket/
450  	 * transaction gets cancelled.
451  	 */
452  	tic->t_curr_res = 0;
453  	tic->t_cnt = 0;	/* ungrant will give back unit_res * t_cnt. */
454  	return error;
455  }
456  
457  /*
458   * Run all the pending iclog callbacks and wake log force waiters and iclog
459   * space waiters so they can process the newly set shutdown state. We really
460   * don't care what order we process callbacks here because the log is shut down
461   * and so state cannot change on disk anymore. However, we cannot wake waiters
462   * until the callbacks have been processed because we may be in unmount and
463   * we must ensure that all AIL operations the callbacks perform have completed
464   * before we tear down the AIL.
465   *
466   * We avoid processing actively referenced iclogs so that we don't run callbacks
467   * while the iclog owner might still be preparing the iclog for IO submssion.
468   * These will be caught by xlog_state_iclog_release() and call this function
469   * again to process any callbacks that may have been added to that iclog.
470   */
471  static void
xlog_state_shutdown_callbacks(struct xlog * log)472  xlog_state_shutdown_callbacks(
473  	struct xlog		*log)
474  {
475  	struct xlog_in_core	*iclog;
476  	LIST_HEAD(cb_list);
477  
478  	iclog = log->l_iclog;
479  	do {
480  		if (atomic_read(&iclog->ic_refcnt)) {
481  			/* Reference holder will re-run iclog callbacks. */
482  			continue;
483  		}
484  		list_splice_init(&iclog->ic_callbacks, &cb_list);
485  		spin_unlock(&log->l_icloglock);
486  
487  		xlog_cil_process_committed(&cb_list);
488  
489  		spin_lock(&log->l_icloglock);
490  		wake_up_all(&iclog->ic_write_wait);
491  		wake_up_all(&iclog->ic_force_wait);
492  	} while ((iclog = iclog->ic_next) != log->l_iclog);
493  
494  	wake_up_all(&log->l_flush_wait);
495  }
496  
497  /*
498   * Flush iclog to disk if this is the last reference to the given iclog and the
499   * it is in the WANT_SYNC state.
500   *
501   * If XLOG_ICL_NEED_FUA is already set on the iclog, we need to ensure that the
502   * log tail is updated correctly. NEED_FUA indicates that the iclog will be
503   * written to stable storage, and implies that a commit record is contained
504   * within the iclog. We need to ensure that the log tail does not move beyond
505   * the tail that the first commit record in the iclog ordered against, otherwise
506   * correct recovery of that checkpoint becomes dependent on future operations
507   * performed on this iclog.
508   *
509   * Hence if NEED_FUA is set and the current iclog tail lsn is empty, write the
510   * current tail into iclog. Once the iclog tail is set, future operations must
511   * not modify it, otherwise they potentially violate ordering constraints for
512   * the checkpoint commit that wrote the initial tail lsn value. The tail lsn in
513   * the iclog will get zeroed on activation of the iclog after sync, so we
514   * always capture the tail lsn on the iclog on the first NEED_FUA release
515   * regardless of the number of active reference counts on this iclog.
516   */
517  int
xlog_state_release_iclog(struct xlog * log,struct xlog_in_core * iclog,struct xlog_ticket * ticket)518  xlog_state_release_iclog(
519  	struct xlog		*log,
520  	struct xlog_in_core	*iclog,
521  	struct xlog_ticket	*ticket)
522  {
523  	bool			last_ref;
524  
525  	lockdep_assert_held(&log->l_icloglock);
526  
527  	trace_xlog_iclog_release(iclog, _RET_IP_);
528  	/*
529  	 * Grabbing the current log tail needs to be atomic w.r.t. the writing
530  	 * of the tail LSN into the iclog so we guarantee that the log tail does
531  	 * not move between the first time we know that the iclog needs to be
532  	 * made stable and when we eventually submit it.
533  	 */
534  	if ((iclog->ic_state == XLOG_STATE_WANT_SYNC ||
535  	     (iclog->ic_flags & XLOG_ICL_NEED_FUA)) &&
536  	    !iclog->ic_header.h_tail_lsn) {
537  		iclog->ic_header.h_tail_lsn =
538  				cpu_to_be64(atomic64_read(&log->l_tail_lsn));
539  	}
540  
541  	last_ref = atomic_dec_and_test(&iclog->ic_refcnt);
542  
543  	if (xlog_is_shutdown(log)) {
544  		/*
545  		 * If there are no more references to this iclog, process the
546  		 * pending iclog callbacks that were waiting on the release of
547  		 * this iclog.
548  		 */
549  		if (last_ref)
550  			xlog_state_shutdown_callbacks(log);
551  		return -EIO;
552  	}
553  
554  	if (!last_ref)
555  		return 0;
556  
557  	if (iclog->ic_state != XLOG_STATE_WANT_SYNC) {
558  		ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
559  		return 0;
560  	}
561  
562  	iclog->ic_state = XLOG_STATE_SYNCING;
563  	xlog_verify_tail_lsn(log, iclog);
564  	trace_xlog_iclog_syncing(iclog, _RET_IP_);
565  
566  	spin_unlock(&log->l_icloglock);
567  	xlog_sync(log, iclog, ticket);
568  	spin_lock(&log->l_icloglock);
569  	return 0;
570  }
571  
572  /*
573   * Mount a log filesystem
574   *
575   * mp		- ubiquitous xfs mount point structure
576   * log_target	- buftarg of on-disk log device
577   * blk_offset	- Start block # where block size is 512 bytes (BBSIZE)
578   * num_bblocks	- Number of BBSIZE blocks in on-disk log
579   *
580   * Return error or zero.
581   */
582  int
xfs_log_mount(xfs_mount_t * mp,struct xfs_buftarg * log_target,xfs_daddr_t blk_offset,int num_bblks)583  xfs_log_mount(
584  	xfs_mount_t		*mp,
585  	struct xfs_buftarg	*log_target,
586  	xfs_daddr_t		blk_offset,
587  	int			num_bblks)
588  {
589  	struct xlog		*log;
590  	int			error = 0;
591  	int			min_logfsbs;
592  
593  	if (!xfs_has_norecovery(mp)) {
594  		xfs_notice(mp, "Mounting V%d Filesystem %pU",
595  			   XFS_SB_VERSION_NUM(&mp->m_sb),
596  			   &mp->m_sb.sb_uuid);
597  	} else {
598  		xfs_notice(mp,
599  "Mounting V%d filesystem %pU in no-recovery mode. Filesystem will be inconsistent.",
600  			   XFS_SB_VERSION_NUM(&mp->m_sb),
601  			   &mp->m_sb.sb_uuid);
602  		ASSERT(xfs_is_readonly(mp));
603  	}
604  
605  	log = xlog_alloc_log(mp, log_target, blk_offset, num_bblks);
606  	if (IS_ERR(log)) {
607  		error = PTR_ERR(log);
608  		goto out;
609  	}
610  	mp->m_log = log;
611  
612  	/*
613  	 * Now that we have set up the log and it's internal geometry
614  	 * parameters, we can validate the given log space and drop a critical
615  	 * message via syslog if the log size is too small. A log that is too
616  	 * small can lead to unexpected situations in transaction log space
617  	 * reservation stage. The superblock verifier has already validated all
618  	 * the other log geometry constraints, so we don't have to check those
619  	 * here.
620  	 *
621  	 * Note: For v4 filesystems, we can't just reject the mount if the
622  	 * validation fails.  This would mean that people would have to
623  	 * downgrade their kernel just to remedy the situation as there is no
624  	 * way to grow the log (short of black magic surgery with xfs_db).
625  	 *
626  	 * We can, however, reject mounts for V5 format filesystems, as the
627  	 * mkfs binary being used to make the filesystem should never create a
628  	 * filesystem with a log that is too small.
629  	 */
630  	min_logfsbs = xfs_log_calc_minimum_size(mp);
631  	if (mp->m_sb.sb_logblocks < min_logfsbs) {
632  		xfs_warn(mp,
633  		"Log size %d blocks too small, minimum size is %d blocks",
634  			 mp->m_sb.sb_logblocks, min_logfsbs);
635  
636  		/*
637  		 * Log check errors are always fatal on v5; or whenever bad
638  		 * metadata leads to a crash.
639  		 */
640  		if (xfs_has_crc(mp)) {
641  			xfs_crit(mp, "AAIEEE! Log failed size checks. Abort!");
642  			ASSERT(0);
643  			error = -EINVAL;
644  			goto out_free_log;
645  		}
646  		xfs_crit(mp, "Log size out of supported range.");
647  		xfs_crit(mp,
648  "Continuing onwards, but if log hangs are experienced then please report this message in the bug report.");
649  	}
650  
651  	/*
652  	 * Initialize the AIL now we have a log.
653  	 */
654  	error = xfs_trans_ail_init(mp);
655  	if (error) {
656  		xfs_warn(mp, "AIL initialisation failed: error %d", error);
657  		goto out_free_log;
658  	}
659  	log->l_ailp = mp->m_ail;
660  
661  	/*
662  	 * skip log recovery on a norecovery mount.  pretend it all
663  	 * just worked.
664  	 */
665  	if (!xfs_has_norecovery(mp)) {
666  		error = xlog_recover(log);
667  		if (error) {
668  			xfs_warn(mp, "log mount/recovery failed: error %d",
669  				error);
670  			xlog_recover_cancel(log);
671  			goto out_destroy_ail;
672  		}
673  	}
674  
675  	error = xfs_sysfs_init(&log->l_kobj, &xfs_log_ktype, &mp->m_kobj,
676  			       "log");
677  	if (error)
678  		goto out_destroy_ail;
679  
680  	/* Normal transactions can now occur */
681  	clear_bit(XLOG_ACTIVE_RECOVERY, &log->l_opstate);
682  
683  	/*
684  	 * Now the log has been fully initialised and we know were our
685  	 * space grant counters are, we can initialise the permanent ticket
686  	 * needed for delayed logging to work.
687  	 */
688  	xlog_cil_init_post_recovery(log);
689  
690  	return 0;
691  
692  out_destroy_ail:
693  	xfs_trans_ail_destroy(mp);
694  out_free_log:
695  	xlog_dealloc_log(log);
696  out:
697  	return error;
698  }
699  
700  /*
701   * Finish the recovery of the file system.  This is separate from the
702   * xfs_log_mount() call, because it depends on the code in xfs_mountfs() to read
703   * in the root and real-time bitmap inodes between calling xfs_log_mount() and
704   * here.
705   *
706   * If we finish recovery successfully, start the background log work. If we are
707   * not doing recovery, then we have a RO filesystem and we don't need to start
708   * it.
709   */
710  int
xfs_log_mount_finish(struct xfs_mount * mp)711  xfs_log_mount_finish(
712  	struct xfs_mount	*mp)
713  {
714  	struct xlog		*log = mp->m_log;
715  	int			error = 0;
716  
717  	if (xfs_has_norecovery(mp)) {
718  		ASSERT(xfs_is_readonly(mp));
719  		return 0;
720  	}
721  
722  	/*
723  	 * During the second phase of log recovery, we need iget and
724  	 * iput to behave like they do for an active filesystem.
725  	 * xfs_fs_drop_inode needs to be able to prevent the deletion
726  	 * of inodes before we're done replaying log items on those
727  	 * inodes.  Turn it off immediately after recovery finishes
728  	 * so that we don't leak the quota inodes if subsequent mount
729  	 * activities fail.
730  	 *
731  	 * We let all inodes involved in redo item processing end up on
732  	 * the LRU instead of being evicted immediately so that if we do
733  	 * something to an unlinked inode, the irele won't cause
734  	 * premature truncation and freeing of the inode, which results
735  	 * in log recovery failure.  We have to evict the unreferenced
736  	 * lru inodes after clearing SB_ACTIVE because we don't
737  	 * otherwise clean up the lru if there's a subsequent failure in
738  	 * xfs_mountfs, which leads to us leaking the inodes if nothing
739  	 * else (e.g. quotacheck) references the inodes before the
740  	 * mount failure occurs.
741  	 */
742  	mp->m_super->s_flags |= SB_ACTIVE;
743  	xfs_log_work_queue(mp);
744  	if (xlog_recovery_needed(log))
745  		error = xlog_recover_finish(log);
746  	mp->m_super->s_flags &= ~SB_ACTIVE;
747  	evict_inodes(mp->m_super);
748  
749  	/*
750  	 * Drain the buffer LRU after log recovery. This is required for v4
751  	 * filesystems to avoid leaving around buffers with NULL verifier ops,
752  	 * but we do it unconditionally to make sure we're always in a clean
753  	 * cache state after mount.
754  	 *
755  	 * Don't push in the error case because the AIL may have pending intents
756  	 * that aren't removed until recovery is cancelled.
757  	 */
758  	if (xlog_recovery_needed(log)) {
759  		if (!error) {
760  			xfs_log_force(mp, XFS_LOG_SYNC);
761  			xfs_ail_push_all_sync(mp->m_ail);
762  		}
763  		xfs_notice(mp, "Ending recovery (logdev: %s)",
764  				mp->m_logname ? mp->m_logname : "internal");
765  	} else {
766  		xfs_info(mp, "Ending clean mount");
767  	}
768  	xfs_buftarg_drain(mp->m_ddev_targp);
769  
770  	clear_bit(XLOG_RECOVERY_NEEDED, &log->l_opstate);
771  
772  	/* Make sure the log is dead if we're returning failure. */
773  	ASSERT(!error || xlog_is_shutdown(log));
774  
775  	return error;
776  }
777  
778  /*
779   * The mount has failed. Cancel the recovery if it hasn't completed and destroy
780   * the log.
781   */
782  void
xfs_log_mount_cancel(struct xfs_mount * mp)783  xfs_log_mount_cancel(
784  	struct xfs_mount	*mp)
785  {
786  	xlog_recover_cancel(mp->m_log);
787  	xfs_log_unmount(mp);
788  }
789  
790  /*
791   * Flush out the iclog to disk ensuring that device caches are flushed and
792   * the iclog hits stable storage before any completion waiters are woken.
793   */
794  static inline int
xlog_force_iclog(struct xlog_in_core * iclog)795  xlog_force_iclog(
796  	struct xlog_in_core	*iclog)
797  {
798  	atomic_inc(&iclog->ic_refcnt);
799  	iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA;
800  	if (iclog->ic_state == XLOG_STATE_ACTIVE)
801  		xlog_state_switch_iclogs(iclog->ic_log, iclog, 0);
802  	return xlog_state_release_iclog(iclog->ic_log, iclog, NULL);
803  }
804  
805  /*
806   * Cycle all the iclogbuf locks to make sure all log IO completion
807   * is done before we tear down these buffers.
808   */
809  static void
xlog_wait_iclog_completion(struct xlog * log)810  xlog_wait_iclog_completion(struct xlog *log)
811  {
812  	int		i;
813  	struct xlog_in_core	*iclog = log->l_iclog;
814  
815  	for (i = 0; i < log->l_iclog_bufs; i++) {
816  		down(&iclog->ic_sema);
817  		up(&iclog->ic_sema);
818  		iclog = iclog->ic_next;
819  	}
820  }
821  
822  /*
823   * Wait for the iclog and all prior iclogs to be written disk as required by the
824   * log force state machine. Waiting on ic_force_wait ensures iclog completions
825   * have been ordered and callbacks run before we are woken here, hence
826   * guaranteeing that all the iclogs up to this one are on stable storage.
827   */
828  int
xlog_wait_on_iclog(struct xlog_in_core * iclog)829  xlog_wait_on_iclog(
830  	struct xlog_in_core	*iclog)
831  		__releases(iclog->ic_log->l_icloglock)
832  {
833  	struct xlog		*log = iclog->ic_log;
834  
835  	trace_xlog_iclog_wait_on(iclog, _RET_IP_);
836  	if (!xlog_is_shutdown(log) &&
837  	    iclog->ic_state != XLOG_STATE_ACTIVE &&
838  	    iclog->ic_state != XLOG_STATE_DIRTY) {
839  		XFS_STATS_INC(log->l_mp, xs_log_force_sleep);
840  		xlog_wait(&iclog->ic_force_wait, &log->l_icloglock);
841  	} else {
842  		spin_unlock(&log->l_icloglock);
843  	}
844  
845  	if (xlog_is_shutdown(log))
846  		return -EIO;
847  	return 0;
848  }
849  
850  /*
851   * Write out an unmount record using the ticket provided. We have to account for
852   * the data space used in the unmount ticket as this write is not done from a
853   * transaction context that has already done the accounting for us.
854   */
855  static int
xlog_write_unmount_record(struct xlog * log,struct xlog_ticket * ticket)856  xlog_write_unmount_record(
857  	struct xlog		*log,
858  	struct xlog_ticket	*ticket)
859  {
860  	struct  {
861  		struct xlog_op_header ophdr;
862  		struct xfs_unmount_log_format ulf;
863  	} unmount_rec = {
864  		.ophdr = {
865  			.oh_clientid = XFS_LOG,
866  			.oh_tid = cpu_to_be32(ticket->t_tid),
867  			.oh_flags = XLOG_UNMOUNT_TRANS,
868  		},
869  		.ulf = {
870  			.magic = XLOG_UNMOUNT_TYPE,
871  		},
872  	};
873  	struct xfs_log_iovec reg = {
874  		.i_addr = &unmount_rec,
875  		.i_len = sizeof(unmount_rec),
876  		.i_type = XLOG_REG_TYPE_UNMOUNT,
877  	};
878  	struct xfs_log_vec vec = {
879  		.lv_niovecs = 1,
880  		.lv_iovecp = &reg,
881  	};
882  	LIST_HEAD(lv_chain);
883  	list_add(&vec.lv_list, &lv_chain);
884  
885  	BUILD_BUG_ON((sizeof(struct xlog_op_header) +
886  		      sizeof(struct xfs_unmount_log_format)) !=
887  							sizeof(unmount_rec));
888  
889  	/* account for space used by record data */
890  	ticket->t_curr_res -= sizeof(unmount_rec);
891  
892  	return xlog_write(log, NULL, &lv_chain, ticket, reg.i_len);
893  }
894  
895  /*
896   * Mark the filesystem clean by writing an unmount record to the head of the
897   * log.
898   */
899  static void
xlog_unmount_write(struct xlog * log)900  xlog_unmount_write(
901  	struct xlog		*log)
902  {
903  	struct xfs_mount	*mp = log->l_mp;
904  	struct xlog_in_core	*iclog;
905  	struct xlog_ticket	*tic = NULL;
906  	int			error;
907  
908  	error = xfs_log_reserve(mp, 600, 1, &tic, 0);
909  	if (error)
910  		goto out_err;
911  
912  	error = xlog_write_unmount_record(log, tic);
913  	/*
914  	 * At this point, we're umounting anyway, so there's no point in
915  	 * transitioning log state to shutdown. Just continue...
916  	 */
917  out_err:
918  	if (error)
919  		xfs_alert(mp, "%s: unmount record failed", __func__);
920  
921  	spin_lock(&log->l_icloglock);
922  	iclog = log->l_iclog;
923  	error = xlog_force_iclog(iclog);
924  	xlog_wait_on_iclog(iclog);
925  
926  	if (tic) {
927  		trace_xfs_log_umount_write(log, tic);
928  		xfs_log_ticket_ungrant(log, tic);
929  	}
930  }
931  
932  static void
xfs_log_unmount_verify_iclog(struct xlog * log)933  xfs_log_unmount_verify_iclog(
934  	struct xlog		*log)
935  {
936  	struct xlog_in_core	*iclog = log->l_iclog;
937  
938  	do {
939  		ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
940  		ASSERT(iclog->ic_offset == 0);
941  	} while ((iclog = iclog->ic_next) != log->l_iclog);
942  }
943  
944  /*
945   * Unmount record used to have a string "Unmount filesystem--" in the
946   * data section where the "Un" was really a magic number (XLOG_UNMOUNT_TYPE).
947   * We just write the magic number now since that particular field isn't
948   * currently architecture converted and "Unmount" is a bit foo.
949   * As far as I know, there weren't any dependencies on the old behaviour.
950   */
951  static void
xfs_log_unmount_write(struct xfs_mount * mp)952  xfs_log_unmount_write(
953  	struct xfs_mount	*mp)
954  {
955  	struct xlog		*log = mp->m_log;
956  
957  	if (!xfs_log_writable(mp))
958  		return;
959  
960  	xfs_log_force(mp, XFS_LOG_SYNC);
961  
962  	if (xlog_is_shutdown(log))
963  		return;
964  
965  	/*
966  	 * If we think the summary counters are bad, avoid writing the unmount
967  	 * record to force log recovery at next mount, after which the summary
968  	 * counters will be recalculated.  Refer to xlog_check_unmount_rec for
969  	 * more details.
970  	 */
971  	if (XFS_TEST_ERROR(xfs_fs_has_sickness(mp, XFS_SICK_FS_COUNTERS), mp,
972  			XFS_ERRTAG_FORCE_SUMMARY_RECALC)) {
973  		xfs_alert(mp, "%s: will fix summary counters at next mount",
974  				__func__);
975  		return;
976  	}
977  
978  	xfs_log_unmount_verify_iclog(log);
979  	xlog_unmount_write(log);
980  }
981  
982  /*
983   * Empty the log for unmount/freeze.
984   *
985   * To do this, we first need to shut down the background log work so it is not
986   * trying to cover the log as we clean up. We then need to unpin all objects in
987   * the log so we can then flush them out. Once they have completed their IO and
988   * run the callbacks removing themselves from the AIL, we can cover the log.
989   */
990  int
xfs_log_quiesce(struct xfs_mount * mp)991  xfs_log_quiesce(
992  	struct xfs_mount	*mp)
993  {
994  	/*
995  	 * Clear log incompat features since we're quiescing the log.  Report
996  	 * failures, though it's not fatal to have a higher log feature
997  	 * protection level than the log contents actually require.
998  	 */
999  	if (xfs_clear_incompat_log_features(mp)) {
1000  		int error;
1001  
1002  		error = xfs_sync_sb(mp, false);
1003  		if (error)
1004  			xfs_warn(mp,
1005  	"Failed to clear log incompat features on quiesce");
1006  	}
1007  
1008  	cancel_delayed_work_sync(&mp->m_log->l_work);
1009  	xfs_log_force(mp, XFS_LOG_SYNC);
1010  
1011  	/*
1012  	 * The superblock buffer is uncached and while xfs_ail_push_all_sync()
1013  	 * will push it, xfs_buftarg_wait() will not wait for it. Further,
1014  	 * xfs_buf_iowait() cannot be used because it was pushed with the
1015  	 * XBF_ASYNC flag set, so we need to use a lock/unlock pair to wait for
1016  	 * the IO to complete.
1017  	 */
1018  	xfs_ail_push_all_sync(mp->m_ail);
1019  	xfs_buftarg_wait(mp->m_ddev_targp);
1020  	xfs_buf_lock(mp->m_sb_bp);
1021  	xfs_buf_unlock(mp->m_sb_bp);
1022  
1023  	return xfs_log_cover(mp);
1024  }
1025  
1026  void
xfs_log_clean(struct xfs_mount * mp)1027  xfs_log_clean(
1028  	struct xfs_mount	*mp)
1029  {
1030  	xfs_log_quiesce(mp);
1031  	xfs_log_unmount_write(mp);
1032  }
1033  
1034  /*
1035   * Shut down and release the AIL and Log.
1036   *
1037   * During unmount, we need to ensure we flush all the dirty metadata objects
1038   * from the AIL so that the log is empty before we write the unmount record to
1039   * the log. Once this is done, we can tear down the AIL and the log.
1040   */
1041  void
xfs_log_unmount(struct xfs_mount * mp)1042  xfs_log_unmount(
1043  	struct xfs_mount	*mp)
1044  {
1045  	xfs_log_clean(mp);
1046  
1047  	/*
1048  	 * If shutdown has come from iclog IO context, the log
1049  	 * cleaning will have been skipped and so we need to wait
1050  	 * for the iclog to complete shutdown processing before we
1051  	 * tear anything down.
1052  	 */
1053  	xlog_wait_iclog_completion(mp->m_log);
1054  
1055  	xfs_buftarg_drain(mp->m_ddev_targp);
1056  
1057  	xfs_trans_ail_destroy(mp);
1058  
1059  	xfs_sysfs_del(&mp->m_log->l_kobj);
1060  
1061  	xlog_dealloc_log(mp->m_log);
1062  }
1063  
1064  void
xfs_log_item_init(struct xfs_mount * mp,struct xfs_log_item * item,int type,const struct xfs_item_ops * ops)1065  xfs_log_item_init(
1066  	struct xfs_mount	*mp,
1067  	struct xfs_log_item	*item,
1068  	int			type,
1069  	const struct xfs_item_ops *ops)
1070  {
1071  	item->li_log = mp->m_log;
1072  	item->li_ailp = mp->m_ail;
1073  	item->li_type = type;
1074  	item->li_ops = ops;
1075  	item->li_lv = NULL;
1076  
1077  	INIT_LIST_HEAD(&item->li_ail);
1078  	INIT_LIST_HEAD(&item->li_cil);
1079  	INIT_LIST_HEAD(&item->li_bio_list);
1080  	INIT_LIST_HEAD(&item->li_trans);
1081  }
1082  
1083  /*
1084   * Wake up processes waiting for log space after we have moved the log tail.
1085   */
1086  void
xfs_log_space_wake(struct xfs_mount * mp)1087  xfs_log_space_wake(
1088  	struct xfs_mount	*mp)
1089  {
1090  	struct xlog		*log = mp->m_log;
1091  	int			free_bytes;
1092  
1093  	if (xlog_is_shutdown(log))
1094  		return;
1095  
1096  	if (!list_empty_careful(&log->l_write_head.waiters)) {
1097  		ASSERT(!xlog_in_recovery(log));
1098  
1099  		spin_lock(&log->l_write_head.lock);
1100  		free_bytes = xlog_grant_space_left(log, &log->l_write_head);
1101  		xlog_grant_head_wake(log, &log->l_write_head, &free_bytes);
1102  		spin_unlock(&log->l_write_head.lock);
1103  	}
1104  
1105  	if (!list_empty_careful(&log->l_reserve_head.waiters)) {
1106  		ASSERT(!xlog_in_recovery(log));
1107  
1108  		spin_lock(&log->l_reserve_head.lock);
1109  		free_bytes = xlog_grant_space_left(log, &log->l_reserve_head);
1110  		xlog_grant_head_wake(log, &log->l_reserve_head, &free_bytes);
1111  		spin_unlock(&log->l_reserve_head.lock);
1112  	}
1113  }
1114  
1115  /*
1116   * Determine if we have a transaction that has gone to disk that needs to be
1117   * covered. To begin the transition to the idle state firstly the log needs to
1118   * be idle. That means the CIL, the AIL and the iclogs needs to be empty before
1119   * we start attempting to cover the log.
1120   *
1121   * Only if we are then in a state where covering is needed, the caller is
1122   * informed that dummy transactions are required to move the log into the idle
1123   * state.
1124   *
1125   * If there are any items in the AIl or CIL, then we do not want to attempt to
1126   * cover the log as we may be in a situation where there isn't log space
1127   * available to run a dummy transaction and this can lead to deadlocks when the
1128   * tail of the log is pinned by an item that is modified in the CIL.  Hence
1129   * there's no point in running a dummy transaction at this point because we
1130   * can't start trying to idle the log until both the CIL and AIL are empty.
1131   */
1132  static bool
xfs_log_need_covered(struct xfs_mount * mp)1133  xfs_log_need_covered(
1134  	struct xfs_mount	*mp)
1135  {
1136  	struct xlog		*log = mp->m_log;
1137  	bool			needed = false;
1138  
1139  	if (!xlog_cil_empty(log))
1140  		return false;
1141  
1142  	spin_lock(&log->l_icloglock);
1143  	switch (log->l_covered_state) {
1144  	case XLOG_STATE_COVER_DONE:
1145  	case XLOG_STATE_COVER_DONE2:
1146  	case XLOG_STATE_COVER_IDLE:
1147  		break;
1148  	case XLOG_STATE_COVER_NEED:
1149  	case XLOG_STATE_COVER_NEED2:
1150  		if (xfs_ail_min_lsn(log->l_ailp))
1151  			break;
1152  		if (!xlog_iclogs_empty(log))
1153  			break;
1154  
1155  		needed = true;
1156  		if (log->l_covered_state == XLOG_STATE_COVER_NEED)
1157  			log->l_covered_state = XLOG_STATE_COVER_DONE;
1158  		else
1159  			log->l_covered_state = XLOG_STATE_COVER_DONE2;
1160  		break;
1161  	default:
1162  		needed = true;
1163  		break;
1164  	}
1165  	spin_unlock(&log->l_icloglock);
1166  	return needed;
1167  }
1168  
1169  /*
1170   * Explicitly cover the log. This is similar to background log covering but
1171   * intended for usage in quiesce codepaths. The caller is responsible to ensure
1172   * the log is idle and suitable for covering. The CIL, iclog buffers and AIL
1173   * must all be empty.
1174   */
1175  static int
xfs_log_cover(struct xfs_mount * mp)1176  xfs_log_cover(
1177  	struct xfs_mount	*mp)
1178  {
1179  	int			error = 0;
1180  	bool			need_covered;
1181  
1182  	ASSERT((xlog_cil_empty(mp->m_log) && xlog_iclogs_empty(mp->m_log) &&
1183  	        !xfs_ail_min_lsn(mp->m_log->l_ailp)) ||
1184  		xlog_is_shutdown(mp->m_log));
1185  
1186  	if (!xfs_log_writable(mp))
1187  		return 0;
1188  
1189  	/*
1190  	 * xfs_log_need_covered() is not idempotent because it progresses the
1191  	 * state machine if the log requires covering. Therefore, we must call
1192  	 * this function once and use the result until we've issued an sb sync.
1193  	 * Do so first to make that abundantly clear.
1194  	 *
1195  	 * Fall into the covering sequence if the log needs covering or the
1196  	 * mount has lazy superblock accounting to sync to disk. The sb sync
1197  	 * used for covering accumulates the in-core counters, so covering
1198  	 * handles this for us.
1199  	 */
1200  	need_covered = xfs_log_need_covered(mp);
1201  	if (!need_covered && !xfs_has_lazysbcount(mp))
1202  		return 0;
1203  
1204  	/*
1205  	 * To cover the log, commit the superblock twice (at most) in
1206  	 * independent checkpoints. The first serves as a reference for the
1207  	 * tail pointer. The sync transaction and AIL push empties the AIL and
1208  	 * updates the in-core tail to the LSN of the first checkpoint. The
1209  	 * second commit updates the on-disk tail with the in-core LSN,
1210  	 * covering the log. Push the AIL one more time to leave it empty, as
1211  	 * we found it.
1212  	 */
1213  	do {
1214  		error = xfs_sync_sb(mp, true);
1215  		if (error)
1216  			break;
1217  		xfs_ail_push_all_sync(mp->m_ail);
1218  	} while (xfs_log_need_covered(mp));
1219  
1220  	return error;
1221  }
1222  
1223  static void
xlog_ioend_work(struct work_struct * work)1224  xlog_ioend_work(
1225  	struct work_struct	*work)
1226  {
1227  	struct xlog_in_core     *iclog =
1228  		container_of(work, struct xlog_in_core, ic_end_io_work);
1229  	struct xlog		*log = iclog->ic_log;
1230  	int			error;
1231  
1232  	error = blk_status_to_errno(iclog->ic_bio.bi_status);
1233  #ifdef DEBUG
1234  	/* treat writes with injected CRC errors as failed */
1235  	if (iclog->ic_fail_crc)
1236  		error = -EIO;
1237  #endif
1238  
1239  	/*
1240  	 * Race to shutdown the filesystem if we see an error.
1241  	 */
1242  	if (XFS_TEST_ERROR(error, log->l_mp, XFS_ERRTAG_IODONE_IOERR)) {
1243  		xfs_alert(log->l_mp, "log I/O error %d", error);
1244  		xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
1245  	}
1246  
1247  	xlog_state_done_syncing(iclog);
1248  	bio_uninit(&iclog->ic_bio);
1249  
1250  	/*
1251  	 * Drop the lock to signal that we are done. Nothing references the
1252  	 * iclog after this, so an unmount waiting on this lock can now tear it
1253  	 * down safely. As such, it is unsafe to reference the iclog after the
1254  	 * unlock as we could race with it being freed.
1255  	 */
1256  	up(&iclog->ic_sema);
1257  }
1258  
1259  /*
1260   * Return size of each in-core log record buffer.
1261   *
1262   * All machines get 8 x 32kB buffers by default, unless tuned otherwise.
1263   *
1264   * If the filesystem blocksize is too large, we may need to choose a
1265   * larger size since the directory code currently logs entire blocks.
1266   */
1267  STATIC void
xlog_get_iclog_buffer_size(struct xfs_mount * mp,struct xlog * log)1268  xlog_get_iclog_buffer_size(
1269  	struct xfs_mount	*mp,
1270  	struct xlog		*log)
1271  {
1272  	if (mp->m_logbufs <= 0)
1273  		mp->m_logbufs = XLOG_MAX_ICLOGS;
1274  	if (mp->m_logbsize <= 0)
1275  		mp->m_logbsize = XLOG_BIG_RECORD_BSIZE;
1276  
1277  	log->l_iclog_bufs = mp->m_logbufs;
1278  	log->l_iclog_size = mp->m_logbsize;
1279  
1280  	/*
1281  	 * # headers = size / 32k - one header holds cycles from 32k of data.
1282  	 */
1283  	log->l_iclog_heads =
1284  		DIV_ROUND_UP(mp->m_logbsize, XLOG_HEADER_CYCLE_SIZE);
1285  	log->l_iclog_hsize = log->l_iclog_heads << BBSHIFT;
1286  }
1287  
1288  void
xfs_log_work_queue(struct xfs_mount * mp)1289  xfs_log_work_queue(
1290  	struct xfs_mount        *mp)
1291  {
1292  	queue_delayed_work(mp->m_sync_workqueue, &mp->m_log->l_work,
1293  				msecs_to_jiffies(xfs_syncd_centisecs * 10));
1294  }
1295  
1296  /*
1297   * Clear the log incompat flags if we have the opportunity.
1298   *
1299   * This only happens if we're about to log the second dummy transaction as part
1300   * of covering the log.
1301   */
1302  static inline void
xlog_clear_incompat(struct xlog * log)1303  xlog_clear_incompat(
1304  	struct xlog		*log)
1305  {
1306  	struct xfs_mount	*mp = log->l_mp;
1307  
1308  	if (!xfs_sb_has_incompat_log_feature(&mp->m_sb,
1309  				XFS_SB_FEAT_INCOMPAT_LOG_ALL))
1310  		return;
1311  
1312  	if (log->l_covered_state != XLOG_STATE_COVER_DONE2)
1313  		return;
1314  
1315  	xfs_clear_incompat_log_features(mp);
1316  }
1317  
1318  /*
1319   * Every sync period we need to unpin all items in the AIL and push them to
1320   * disk. If there is nothing dirty, then we might need to cover the log to
1321   * indicate that the filesystem is idle.
1322   */
1323  static void
xfs_log_worker(struct work_struct * work)1324  xfs_log_worker(
1325  	struct work_struct	*work)
1326  {
1327  	struct xlog		*log = container_of(to_delayed_work(work),
1328  						struct xlog, l_work);
1329  	struct xfs_mount	*mp = log->l_mp;
1330  
1331  	/* dgc: errors ignored - not fatal and nowhere to report them */
1332  	if (xfs_fs_writable(mp, SB_FREEZE_WRITE) && xfs_log_need_covered(mp)) {
1333  		/*
1334  		 * Dump a transaction into the log that contains no real change.
1335  		 * This is needed to stamp the current tail LSN into the log
1336  		 * during the covering operation.
1337  		 *
1338  		 * We cannot use an inode here for this - that will push dirty
1339  		 * state back up into the VFS and then periodic inode flushing
1340  		 * will prevent log covering from making progress. Hence we
1341  		 * synchronously log the superblock instead to ensure the
1342  		 * superblock is immediately unpinned and can be written back.
1343  		 */
1344  		xlog_clear_incompat(log);
1345  		xfs_sync_sb(mp, true);
1346  	} else
1347  		xfs_log_force(mp, 0);
1348  
1349  	/* start pushing all the metadata that is currently dirty */
1350  	xfs_ail_push_all(mp->m_ail);
1351  
1352  	/* queue us up again */
1353  	xfs_log_work_queue(mp);
1354  }
1355  
1356  /*
1357   * This routine initializes some of the log structure for a given mount point.
1358   * Its primary purpose is to fill in enough, so recovery can occur.  However,
1359   * some other stuff may be filled in too.
1360   */
1361  STATIC struct xlog *
xlog_alloc_log(struct xfs_mount * mp,struct xfs_buftarg * log_target,xfs_daddr_t blk_offset,int num_bblks)1362  xlog_alloc_log(
1363  	struct xfs_mount	*mp,
1364  	struct xfs_buftarg	*log_target,
1365  	xfs_daddr_t		blk_offset,
1366  	int			num_bblks)
1367  {
1368  	struct xlog		*log;
1369  	xlog_rec_header_t	*head;
1370  	xlog_in_core_t		**iclogp;
1371  	xlog_in_core_t		*iclog, *prev_iclog=NULL;
1372  	int			i;
1373  	int			error = -ENOMEM;
1374  	uint			log2_size = 0;
1375  
1376  	log = kzalloc(sizeof(struct xlog), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
1377  	if (!log) {
1378  		xfs_warn(mp, "Log allocation failed: No memory!");
1379  		goto out;
1380  	}
1381  
1382  	log->l_mp	   = mp;
1383  	log->l_targ	   = log_target;
1384  	log->l_logsize     = BBTOB(num_bblks);
1385  	log->l_logBBstart  = blk_offset;
1386  	log->l_logBBsize   = num_bblks;
1387  	log->l_covered_state = XLOG_STATE_COVER_IDLE;
1388  	set_bit(XLOG_ACTIVE_RECOVERY, &log->l_opstate);
1389  	INIT_DELAYED_WORK(&log->l_work, xfs_log_worker);
1390  	INIT_LIST_HEAD(&log->r_dfops);
1391  
1392  	log->l_prev_block  = -1;
1393  	/* log->l_tail_lsn = 0x100000000LL; cycle = 1; current block = 0 */
1394  	xlog_assign_atomic_lsn(&log->l_tail_lsn, 1, 0);
1395  	log->l_curr_cycle  = 1;	    /* 0 is bad since this is initial value */
1396  
1397  	if (xfs_has_logv2(mp) && mp->m_sb.sb_logsunit > 1)
1398  		log->l_iclog_roundoff = mp->m_sb.sb_logsunit;
1399  	else
1400  		log->l_iclog_roundoff = BBSIZE;
1401  
1402  	xlog_grant_head_init(&log->l_reserve_head);
1403  	xlog_grant_head_init(&log->l_write_head);
1404  
1405  	error = -EFSCORRUPTED;
1406  	if (xfs_has_sector(mp)) {
1407  	        log2_size = mp->m_sb.sb_logsectlog;
1408  		if (log2_size < BBSHIFT) {
1409  			xfs_warn(mp, "Log sector size too small (0x%x < 0x%x)",
1410  				log2_size, BBSHIFT);
1411  			goto out_free_log;
1412  		}
1413  
1414  	        log2_size -= BBSHIFT;
1415  		if (log2_size > mp->m_sectbb_log) {
1416  			xfs_warn(mp, "Log sector size too large (0x%x > 0x%x)",
1417  				log2_size, mp->m_sectbb_log);
1418  			goto out_free_log;
1419  		}
1420  
1421  		/* for larger sector sizes, must have v2 or external log */
1422  		if (log2_size && log->l_logBBstart > 0 &&
1423  			    !xfs_has_logv2(mp)) {
1424  			xfs_warn(mp,
1425  		"log sector size (0x%x) invalid for configuration.",
1426  				log2_size);
1427  			goto out_free_log;
1428  		}
1429  	}
1430  	log->l_sectBBsize = 1 << log2_size;
1431  
1432  	xlog_get_iclog_buffer_size(mp, log);
1433  
1434  	spin_lock_init(&log->l_icloglock);
1435  	init_waitqueue_head(&log->l_flush_wait);
1436  
1437  	iclogp = &log->l_iclog;
1438  	/*
1439  	 * The amount of memory to allocate for the iclog structure is
1440  	 * rather funky due to the way the structure is defined.  It is
1441  	 * done this way so that we can use different sizes for machines
1442  	 * with different amounts of memory.  See the definition of
1443  	 * xlog_in_core_t in xfs_log_priv.h for details.
1444  	 */
1445  	ASSERT(log->l_iclog_size >= 4096);
1446  	for (i = 0; i < log->l_iclog_bufs; i++) {
1447  		size_t bvec_size = howmany(log->l_iclog_size, PAGE_SIZE) *
1448  				sizeof(struct bio_vec);
1449  
1450  		iclog = kzalloc(sizeof(*iclog) + bvec_size,
1451  				GFP_KERNEL | __GFP_RETRY_MAYFAIL);
1452  		if (!iclog)
1453  			goto out_free_iclog;
1454  
1455  		*iclogp = iclog;
1456  		iclog->ic_prev = prev_iclog;
1457  		prev_iclog = iclog;
1458  
1459  		iclog->ic_data = kvzalloc(log->l_iclog_size,
1460  				GFP_KERNEL | __GFP_RETRY_MAYFAIL);
1461  		if (!iclog->ic_data)
1462  			goto out_free_iclog;
1463  		head = &iclog->ic_header;
1464  		memset(head, 0, sizeof(xlog_rec_header_t));
1465  		head->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
1466  		head->h_version = cpu_to_be32(
1467  			xfs_has_logv2(log->l_mp) ? 2 : 1);
1468  		head->h_size = cpu_to_be32(log->l_iclog_size);
1469  		/* new fields */
1470  		head->h_fmt = cpu_to_be32(XLOG_FMT);
1471  		memcpy(&head->h_fs_uuid, &mp->m_sb.sb_uuid, sizeof(uuid_t));
1472  
1473  		iclog->ic_size = log->l_iclog_size - log->l_iclog_hsize;
1474  		iclog->ic_state = XLOG_STATE_ACTIVE;
1475  		iclog->ic_log = log;
1476  		atomic_set(&iclog->ic_refcnt, 0);
1477  		INIT_LIST_HEAD(&iclog->ic_callbacks);
1478  		iclog->ic_datap = (void *)iclog->ic_data + log->l_iclog_hsize;
1479  
1480  		init_waitqueue_head(&iclog->ic_force_wait);
1481  		init_waitqueue_head(&iclog->ic_write_wait);
1482  		INIT_WORK(&iclog->ic_end_io_work, xlog_ioend_work);
1483  		sema_init(&iclog->ic_sema, 1);
1484  
1485  		iclogp = &iclog->ic_next;
1486  	}
1487  	*iclogp = log->l_iclog;			/* complete ring */
1488  	log->l_iclog->ic_prev = prev_iclog;	/* re-write 1st prev ptr */
1489  
1490  	log->l_ioend_workqueue = alloc_workqueue("xfs-log/%s",
1491  			XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM |
1492  				    WQ_HIGHPRI),
1493  			0, mp->m_super->s_id);
1494  	if (!log->l_ioend_workqueue)
1495  		goto out_free_iclog;
1496  
1497  	error = xlog_cil_init(log);
1498  	if (error)
1499  		goto out_destroy_workqueue;
1500  	return log;
1501  
1502  out_destroy_workqueue:
1503  	destroy_workqueue(log->l_ioend_workqueue);
1504  out_free_iclog:
1505  	for (iclog = log->l_iclog; iclog; iclog = prev_iclog) {
1506  		prev_iclog = iclog->ic_next;
1507  		kvfree(iclog->ic_data);
1508  		kfree(iclog);
1509  		if (prev_iclog == log->l_iclog)
1510  			break;
1511  	}
1512  out_free_log:
1513  	kfree(log);
1514  out:
1515  	return ERR_PTR(error);
1516  }	/* xlog_alloc_log */
1517  
1518  /*
1519   * Stamp cycle number in every block
1520   */
1521  STATIC void
xlog_pack_data(struct xlog * log,struct xlog_in_core * iclog,int roundoff)1522  xlog_pack_data(
1523  	struct xlog		*log,
1524  	struct xlog_in_core	*iclog,
1525  	int			roundoff)
1526  {
1527  	int			i, j, k;
1528  	int			size = iclog->ic_offset + roundoff;
1529  	__be32			cycle_lsn;
1530  	char			*dp;
1531  
1532  	cycle_lsn = CYCLE_LSN_DISK(iclog->ic_header.h_lsn);
1533  
1534  	dp = iclog->ic_datap;
1535  	for (i = 0; i < BTOBB(size); i++) {
1536  		if (i >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE))
1537  			break;
1538  		iclog->ic_header.h_cycle_data[i] = *(__be32 *)dp;
1539  		*(__be32 *)dp = cycle_lsn;
1540  		dp += BBSIZE;
1541  	}
1542  
1543  	if (xfs_has_logv2(log->l_mp)) {
1544  		xlog_in_core_2_t *xhdr = iclog->ic_data;
1545  
1546  		for ( ; i < BTOBB(size); i++) {
1547  			j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
1548  			k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
1549  			xhdr[j].hic_xheader.xh_cycle_data[k] = *(__be32 *)dp;
1550  			*(__be32 *)dp = cycle_lsn;
1551  			dp += BBSIZE;
1552  		}
1553  
1554  		for (i = 1; i < log->l_iclog_heads; i++)
1555  			xhdr[i].hic_xheader.xh_cycle = cycle_lsn;
1556  	}
1557  }
1558  
1559  /*
1560   * Calculate the checksum for a log buffer.
1561   *
1562   * This is a little more complicated than it should be because the various
1563   * headers and the actual data are non-contiguous.
1564   */
1565  __le32
xlog_cksum(struct xlog * log,struct xlog_rec_header * rhead,char * dp,int size)1566  xlog_cksum(
1567  	struct xlog		*log,
1568  	struct xlog_rec_header	*rhead,
1569  	char			*dp,
1570  	int			size)
1571  {
1572  	uint32_t		crc;
1573  
1574  	/* first generate the crc for the record header ... */
1575  	crc = xfs_start_cksum_update((char *)rhead,
1576  			      sizeof(struct xlog_rec_header),
1577  			      offsetof(struct xlog_rec_header, h_crc));
1578  
1579  	/* ... then for additional cycle data for v2 logs ... */
1580  	if (xfs_has_logv2(log->l_mp)) {
1581  		union xlog_in_core2 *xhdr = (union xlog_in_core2 *)rhead;
1582  		int		i;
1583  		int		xheads;
1584  
1585  		xheads = DIV_ROUND_UP(size, XLOG_HEADER_CYCLE_SIZE);
1586  
1587  		for (i = 1; i < xheads; i++) {
1588  			crc = crc32c(crc, &xhdr[i].hic_xheader,
1589  				     sizeof(struct xlog_rec_ext_header));
1590  		}
1591  	}
1592  
1593  	/* ... and finally for the payload */
1594  	crc = crc32c(crc, dp, size);
1595  
1596  	return xfs_end_cksum(crc);
1597  }
1598  
1599  static void
xlog_bio_end_io(struct bio * bio)1600  xlog_bio_end_io(
1601  	struct bio		*bio)
1602  {
1603  	struct xlog_in_core	*iclog = bio->bi_private;
1604  
1605  	queue_work(iclog->ic_log->l_ioend_workqueue,
1606  		   &iclog->ic_end_io_work);
1607  }
1608  
1609  static int
xlog_map_iclog_data(struct bio * bio,void * data,size_t count)1610  xlog_map_iclog_data(
1611  	struct bio		*bio,
1612  	void			*data,
1613  	size_t			count)
1614  {
1615  	do {
1616  		struct page	*page = kmem_to_page(data);
1617  		unsigned int	off = offset_in_page(data);
1618  		size_t		len = min_t(size_t, count, PAGE_SIZE - off);
1619  
1620  		if (bio_add_page(bio, page, len, off) != len)
1621  			return -EIO;
1622  
1623  		data += len;
1624  		count -= len;
1625  	} while (count);
1626  
1627  	return 0;
1628  }
1629  
1630  STATIC void
xlog_write_iclog(struct xlog * log,struct xlog_in_core * iclog,uint64_t bno,unsigned int count)1631  xlog_write_iclog(
1632  	struct xlog		*log,
1633  	struct xlog_in_core	*iclog,
1634  	uint64_t		bno,
1635  	unsigned int		count)
1636  {
1637  	ASSERT(bno < log->l_logBBsize);
1638  	trace_xlog_iclog_write(iclog, _RET_IP_);
1639  
1640  	/*
1641  	 * We lock the iclogbufs here so that we can serialise against I/O
1642  	 * completion during unmount.  We might be processing a shutdown
1643  	 * triggered during unmount, and that can occur asynchronously to the
1644  	 * unmount thread, and hence we need to ensure that completes before
1645  	 * tearing down the iclogbufs.  Hence we need to hold the buffer lock
1646  	 * across the log IO to archieve that.
1647  	 */
1648  	down(&iclog->ic_sema);
1649  	if (xlog_is_shutdown(log)) {
1650  		/*
1651  		 * It would seem logical to return EIO here, but we rely on
1652  		 * the log state machine to propagate I/O errors instead of
1653  		 * doing it here.  We kick of the state machine and unlock
1654  		 * the buffer manually, the code needs to be kept in sync
1655  		 * with the I/O completion path.
1656  		 */
1657  		goto sync;
1658  	}
1659  
1660  	/*
1661  	 * We use REQ_SYNC | REQ_IDLE here to tell the block layer the are more
1662  	 * IOs coming immediately after this one. This prevents the block layer
1663  	 * writeback throttle from throttling log writes behind background
1664  	 * metadata writeback and causing priority inversions.
1665  	 */
1666  	bio_init(&iclog->ic_bio, log->l_targ->bt_bdev, iclog->ic_bvec,
1667  		 howmany(count, PAGE_SIZE),
1668  		 REQ_OP_WRITE | REQ_META | REQ_SYNC | REQ_IDLE);
1669  	iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart + bno;
1670  	iclog->ic_bio.bi_end_io = xlog_bio_end_io;
1671  	iclog->ic_bio.bi_private = iclog;
1672  
1673  	if (iclog->ic_flags & XLOG_ICL_NEED_FLUSH) {
1674  		iclog->ic_bio.bi_opf |= REQ_PREFLUSH;
1675  		/*
1676  		 * For external log devices, we also need to flush the data
1677  		 * device cache first to ensure all metadata writeback covered
1678  		 * by the LSN in this iclog is on stable storage. This is slow,
1679  		 * but it *must* complete before we issue the external log IO.
1680  		 *
1681  		 * If the flush fails, we cannot conclude that past metadata
1682  		 * writeback from the log succeeded.  Repeating the flush is
1683  		 * not possible, hence we must shut down with log IO error to
1684  		 * avoid shutdown re-entering this path and erroring out again.
1685  		 */
1686  		if (log->l_targ != log->l_mp->m_ddev_targp &&
1687  		    blkdev_issue_flush(log->l_mp->m_ddev_targp->bt_bdev))
1688  			goto shutdown;
1689  	}
1690  	if (iclog->ic_flags & XLOG_ICL_NEED_FUA)
1691  		iclog->ic_bio.bi_opf |= REQ_FUA;
1692  
1693  	iclog->ic_flags &= ~(XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA);
1694  
1695  	if (xlog_map_iclog_data(&iclog->ic_bio, iclog->ic_data, count))
1696  		goto shutdown;
1697  
1698  	if (is_vmalloc_addr(iclog->ic_data))
1699  		flush_kernel_vmap_range(iclog->ic_data, count);
1700  
1701  	/*
1702  	 * If this log buffer would straddle the end of the log we will have
1703  	 * to split it up into two bios, so that we can continue at the start.
1704  	 */
1705  	if (bno + BTOBB(count) > log->l_logBBsize) {
1706  		struct bio *split;
1707  
1708  		split = bio_split(&iclog->ic_bio, log->l_logBBsize - bno,
1709  				  GFP_NOIO, &fs_bio_set);
1710  		bio_chain(split, &iclog->ic_bio);
1711  		submit_bio(split);
1712  
1713  		/* restart at logical offset zero for the remainder */
1714  		iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart;
1715  	}
1716  
1717  	submit_bio(&iclog->ic_bio);
1718  	return;
1719  shutdown:
1720  	xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
1721  sync:
1722  	xlog_state_done_syncing(iclog);
1723  	up(&iclog->ic_sema);
1724  }
1725  
1726  /*
1727   * We need to bump cycle number for the part of the iclog that is
1728   * written to the start of the log. Watch out for the header magic
1729   * number case, though.
1730   */
1731  static void
xlog_split_iclog(struct xlog * log,void * data,uint64_t bno,unsigned int count)1732  xlog_split_iclog(
1733  	struct xlog		*log,
1734  	void			*data,
1735  	uint64_t		bno,
1736  	unsigned int		count)
1737  {
1738  	unsigned int		split_offset = BBTOB(log->l_logBBsize - bno);
1739  	unsigned int		i;
1740  
1741  	for (i = split_offset; i < count; i += BBSIZE) {
1742  		uint32_t cycle = get_unaligned_be32(data + i);
1743  
1744  		if (++cycle == XLOG_HEADER_MAGIC_NUM)
1745  			cycle++;
1746  		put_unaligned_be32(cycle, data + i);
1747  	}
1748  }
1749  
1750  static int
xlog_calc_iclog_size(struct xlog * log,struct xlog_in_core * iclog,uint32_t * roundoff)1751  xlog_calc_iclog_size(
1752  	struct xlog		*log,
1753  	struct xlog_in_core	*iclog,
1754  	uint32_t		*roundoff)
1755  {
1756  	uint32_t		count_init, count;
1757  
1758  	/* Add for LR header */
1759  	count_init = log->l_iclog_hsize + iclog->ic_offset;
1760  	count = roundup(count_init, log->l_iclog_roundoff);
1761  
1762  	*roundoff = count - count_init;
1763  
1764  	ASSERT(count >= count_init);
1765  	ASSERT(*roundoff < log->l_iclog_roundoff);
1766  	return count;
1767  }
1768  
1769  /*
1770   * Flush out the in-core log (iclog) to the on-disk log in an asynchronous
1771   * fashion.  Previously, we should have moved the current iclog
1772   * ptr in the log to point to the next available iclog.  This allows further
1773   * write to continue while this code syncs out an iclog ready to go.
1774   * Before an in-core log can be written out, the data section must be scanned
1775   * to save away the 1st word of each BBSIZE block into the header.  We replace
1776   * it with the current cycle count.  Each BBSIZE block is tagged with the
1777   * cycle count because there in an implicit assumption that drives will
1778   * guarantee that entire 512 byte blocks get written at once.  In other words,
1779   * we can't have part of a 512 byte block written and part not written.  By
1780   * tagging each block, we will know which blocks are valid when recovering
1781   * after an unclean shutdown.
1782   *
1783   * This routine is single threaded on the iclog.  No other thread can be in
1784   * this routine with the same iclog.  Changing contents of iclog can there-
1785   * fore be done without grabbing the state machine lock.  Updating the global
1786   * log will require grabbing the lock though.
1787   *
1788   * The entire log manager uses a logical block numbering scheme.  Only
1789   * xlog_write_iclog knows about the fact that the log may not start with
1790   * block zero on a given device.
1791   */
1792  STATIC void
xlog_sync(struct xlog * log,struct xlog_in_core * iclog,struct xlog_ticket * ticket)1793  xlog_sync(
1794  	struct xlog		*log,
1795  	struct xlog_in_core	*iclog,
1796  	struct xlog_ticket	*ticket)
1797  {
1798  	unsigned int		count;		/* byte count of bwrite */
1799  	unsigned int		roundoff;       /* roundoff to BB or stripe */
1800  	uint64_t		bno;
1801  	unsigned int		size;
1802  
1803  	ASSERT(atomic_read(&iclog->ic_refcnt) == 0);
1804  	trace_xlog_iclog_sync(iclog, _RET_IP_);
1805  
1806  	count = xlog_calc_iclog_size(log, iclog, &roundoff);
1807  
1808  	/*
1809  	 * If we have a ticket, account for the roundoff via the ticket
1810  	 * reservation to avoid touching the hot grant heads needlessly.
1811  	 * Otherwise, we have to move grant heads directly.
1812  	 */
1813  	if (ticket) {
1814  		ticket->t_curr_res -= roundoff;
1815  	} else {
1816  		xlog_grant_add_space(&log->l_reserve_head, roundoff);
1817  		xlog_grant_add_space(&log->l_write_head, roundoff);
1818  	}
1819  
1820  	/* put cycle number in every block */
1821  	xlog_pack_data(log, iclog, roundoff);
1822  
1823  	/* real byte length */
1824  	size = iclog->ic_offset;
1825  	if (xfs_has_logv2(log->l_mp))
1826  		size += roundoff;
1827  	iclog->ic_header.h_len = cpu_to_be32(size);
1828  
1829  	XFS_STATS_INC(log->l_mp, xs_log_writes);
1830  	XFS_STATS_ADD(log->l_mp, xs_log_blocks, BTOBB(count));
1831  
1832  	bno = BLOCK_LSN(be64_to_cpu(iclog->ic_header.h_lsn));
1833  
1834  	/* Do we need to split this write into 2 parts? */
1835  	if (bno + BTOBB(count) > log->l_logBBsize)
1836  		xlog_split_iclog(log, &iclog->ic_header, bno, count);
1837  
1838  	/* calculcate the checksum */
1839  	iclog->ic_header.h_crc = xlog_cksum(log, &iclog->ic_header,
1840  					    iclog->ic_datap, size);
1841  	/*
1842  	 * Intentionally corrupt the log record CRC based on the error injection
1843  	 * frequency, if defined. This facilitates testing log recovery in the
1844  	 * event of torn writes. Hence, set the IOABORT state to abort the log
1845  	 * write on I/O completion and shutdown the fs. The subsequent mount
1846  	 * detects the bad CRC and attempts to recover.
1847  	 */
1848  #ifdef DEBUG
1849  	if (XFS_TEST_ERROR(false, log->l_mp, XFS_ERRTAG_LOG_BAD_CRC)) {
1850  		iclog->ic_header.h_crc &= cpu_to_le32(0xAAAAAAAA);
1851  		iclog->ic_fail_crc = true;
1852  		xfs_warn(log->l_mp,
1853  	"Intentionally corrupted log record at LSN 0x%llx. Shutdown imminent.",
1854  			 be64_to_cpu(iclog->ic_header.h_lsn));
1855  	}
1856  #endif
1857  	xlog_verify_iclog(log, iclog, count);
1858  	xlog_write_iclog(log, iclog, bno, count);
1859  }
1860  
1861  /*
1862   * Deallocate a log structure
1863   */
1864  STATIC void
xlog_dealloc_log(struct xlog * log)1865  xlog_dealloc_log(
1866  	struct xlog	*log)
1867  {
1868  	xlog_in_core_t	*iclog, *next_iclog;
1869  	int		i;
1870  
1871  	/*
1872  	 * Destroy the CIL after waiting for iclog IO completion because an
1873  	 * iclog EIO error will try to shut down the log, which accesses the
1874  	 * CIL to wake up the waiters.
1875  	 */
1876  	xlog_cil_destroy(log);
1877  
1878  	iclog = log->l_iclog;
1879  	for (i = 0; i < log->l_iclog_bufs; i++) {
1880  		next_iclog = iclog->ic_next;
1881  		kvfree(iclog->ic_data);
1882  		kfree(iclog);
1883  		iclog = next_iclog;
1884  	}
1885  
1886  	log->l_mp->m_log = NULL;
1887  	destroy_workqueue(log->l_ioend_workqueue);
1888  	kfree(log);
1889  }
1890  
1891  /*
1892   * Update counters atomically now that memcpy is done.
1893   */
1894  static inline void
xlog_state_finish_copy(struct xlog * log,struct xlog_in_core * iclog,int record_cnt,int copy_bytes)1895  xlog_state_finish_copy(
1896  	struct xlog		*log,
1897  	struct xlog_in_core	*iclog,
1898  	int			record_cnt,
1899  	int			copy_bytes)
1900  {
1901  	lockdep_assert_held(&log->l_icloglock);
1902  
1903  	be32_add_cpu(&iclog->ic_header.h_num_logops, record_cnt);
1904  	iclog->ic_offset += copy_bytes;
1905  }
1906  
1907  /*
1908   * print out info relating to regions written which consume
1909   * the reservation
1910   */
1911  void
xlog_print_tic_res(struct xfs_mount * mp,struct xlog_ticket * ticket)1912  xlog_print_tic_res(
1913  	struct xfs_mount	*mp,
1914  	struct xlog_ticket	*ticket)
1915  {
1916  	xfs_warn(mp, "ticket reservation summary:");
1917  	xfs_warn(mp, "  unit res    = %d bytes", ticket->t_unit_res);
1918  	xfs_warn(mp, "  current res = %d bytes", ticket->t_curr_res);
1919  	xfs_warn(mp, "  original count  = %d", ticket->t_ocnt);
1920  	xfs_warn(mp, "  remaining count = %d", ticket->t_cnt);
1921  }
1922  
1923  /*
1924   * Print a summary of the transaction.
1925   */
1926  void
xlog_print_trans(struct xfs_trans * tp)1927  xlog_print_trans(
1928  	struct xfs_trans	*tp)
1929  {
1930  	struct xfs_mount	*mp = tp->t_mountp;
1931  	struct xfs_log_item	*lip;
1932  
1933  	/* dump core transaction and ticket info */
1934  	xfs_warn(mp, "transaction summary:");
1935  	xfs_warn(mp, "  log res   = %d", tp->t_log_res);
1936  	xfs_warn(mp, "  log count = %d", tp->t_log_count);
1937  	xfs_warn(mp, "  flags     = 0x%x", tp->t_flags);
1938  
1939  	xlog_print_tic_res(mp, tp->t_ticket);
1940  
1941  	/* dump each log item */
1942  	list_for_each_entry(lip, &tp->t_items, li_trans) {
1943  		struct xfs_log_vec	*lv = lip->li_lv;
1944  		struct xfs_log_iovec	*vec;
1945  		int			i;
1946  
1947  		xfs_warn(mp, "log item: ");
1948  		xfs_warn(mp, "  type	= 0x%x", lip->li_type);
1949  		xfs_warn(mp, "  flags	= 0x%lx", lip->li_flags);
1950  		if (!lv)
1951  			continue;
1952  		xfs_warn(mp, "  niovecs	= %d", lv->lv_niovecs);
1953  		xfs_warn(mp, "  size	= %d", lv->lv_size);
1954  		xfs_warn(mp, "  bytes	= %d", lv->lv_bytes);
1955  		xfs_warn(mp, "  buf len	= %d", lv->lv_buf_len);
1956  
1957  		/* dump each iovec for the log item */
1958  		vec = lv->lv_iovecp;
1959  		for (i = 0; i < lv->lv_niovecs; i++) {
1960  			int dumplen = min(vec->i_len, 32);
1961  
1962  			xfs_warn(mp, "  iovec[%d]", i);
1963  			xfs_warn(mp, "    type	= 0x%x", vec->i_type);
1964  			xfs_warn(mp, "    len	= %d", vec->i_len);
1965  			xfs_warn(mp, "    first %d bytes of iovec[%d]:", dumplen, i);
1966  			xfs_hex_dump(vec->i_addr, dumplen);
1967  
1968  			vec++;
1969  		}
1970  	}
1971  }
1972  
1973  static inline void
xlog_write_iovec(struct xlog_in_core * iclog,uint32_t * log_offset,void * data,uint32_t write_len,int * bytes_left,uint32_t * record_cnt,uint32_t * data_cnt)1974  xlog_write_iovec(
1975  	struct xlog_in_core	*iclog,
1976  	uint32_t		*log_offset,
1977  	void			*data,
1978  	uint32_t		write_len,
1979  	int			*bytes_left,
1980  	uint32_t		*record_cnt,
1981  	uint32_t		*data_cnt)
1982  {
1983  	ASSERT(*log_offset < iclog->ic_log->l_iclog_size);
1984  	ASSERT(*log_offset % sizeof(int32_t) == 0);
1985  	ASSERT(write_len % sizeof(int32_t) == 0);
1986  
1987  	memcpy(iclog->ic_datap + *log_offset, data, write_len);
1988  	*log_offset += write_len;
1989  	*bytes_left -= write_len;
1990  	(*record_cnt)++;
1991  	*data_cnt += write_len;
1992  }
1993  
1994  /*
1995   * Write log vectors into a single iclog which is guaranteed by the caller
1996   * to have enough space to write the entire log vector into.
1997   */
1998  static void
xlog_write_full(struct xfs_log_vec * lv,struct xlog_ticket * ticket,struct xlog_in_core * iclog,uint32_t * log_offset,uint32_t * len,uint32_t * record_cnt,uint32_t * data_cnt)1999  xlog_write_full(
2000  	struct xfs_log_vec	*lv,
2001  	struct xlog_ticket	*ticket,
2002  	struct xlog_in_core	*iclog,
2003  	uint32_t		*log_offset,
2004  	uint32_t		*len,
2005  	uint32_t		*record_cnt,
2006  	uint32_t		*data_cnt)
2007  {
2008  	int			index;
2009  
2010  	ASSERT(*log_offset + *len <= iclog->ic_size ||
2011  		iclog->ic_state == XLOG_STATE_WANT_SYNC);
2012  
2013  	/*
2014  	 * Ordered log vectors have no regions to write so this
2015  	 * loop will naturally skip them.
2016  	 */
2017  	for (index = 0; index < lv->lv_niovecs; index++) {
2018  		struct xfs_log_iovec	*reg = &lv->lv_iovecp[index];
2019  		struct xlog_op_header	*ophdr = reg->i_addr;
2020  
2021  		ophdr->oh_tid = cpu_to_be32(ticket->t_tid);
2022  		xlog_write_iovec(iclog, log_offset, reg->i_addr,
2023  				reg->i_len, len, record_cnt, data_cnt);
2024  	}
2025  }
2026  
2027  static int
xlog_write_get_more_iclog_space(struct xlog_ticket * ticket,struct xlog_in_core ** iclogp,uint32_t * log_offset,uint32_t len,uint32_t * record_cnt,uint32_t * data_cnt)2028  xlog_write_get_more_iclog_space(
2029  	struct xlog_ticket	*ticket,
2030  	struct xlog_in_core	**iclogp,
2031  	uint32_t		*log_offset,
2032  	uint32_t		len,
2033  	uint32_t		*record_cnt,
2034  	uint32_t		*data_cnt)
2035  {
2036  	struct xlog_in_core	*iclog = *iclogp;
2037  	struct xlog		*log = iclog->ic_log;
2038  	int			error;
2039  
2040  	spin_lock(&log->l_icloglock);
2041  	ASSERT(iclog->ic_state == XLOG_STATE_WANT_SYNC);
2042  	xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt);
2043  	error = xlog_state_release_iclog(log, iclog, ticket);
2044  	spin_unlock(&log->l_icloglock);
2045  	if (error)
2046  		return error;
2047  
2048  	error = xlog_state_get_iclog_space(log, len, &iclog, ticket,
2049  					log_offset);
2050  	if (error)
2051  		return error;
2052  	*record_cnt = 0;
2053  	*data_cnt = 0;
2054  	*iclogp = iclog;
2055  	return 0;
2056  }
2057  
2058  /*
2059   * Write log vectors into a single iclog which is smaller than the current chain
2060   * length. We write until we cannot fit a full record into the remaining space
2061   * and then stop. We return the log vector that is to be written that cannot
2062   * wholly fit in the iclog.
2063   */
2064  static int
xlog_write_partial(struct xfs_log_vec * lv,struct xlog_ticket * ticket,struct xlog_in_core ** iclogp,uint32_t * log_offset,uint32_t * len,uint32_t * record_cnt,uint32_t * data_cnt)2065  xlog_write_partial(
2066  	struct xfs_log_vec	*lv,
2067  	struct xlog_ticket	*ticket,
2068  	struct xlog_in_core	**iclogp,
2069  	uint32_t		*log_offset,
2070  	uint32_t		*len,
2071  	uint32_t		*record_cnt,
2072  	uint32_t		*data_cnt)
2073  {
2074  	struct xlog_in_core	*iclog = *iclogp;
2075  	struct xlog_op_header	*ophdr;
2076  	int			index = 0;
2077  	uint32_t		rlen;
2078  	int			error;
2079  
2080  	/* walk the logvec, copying until we run out of space in the iclog */
2081  	for (index = 0; index < lv->lv_niovecs; index++) {
2082  		struct xfs_log_iovec	*reg = &lv->lv_iovecp[index];
2083  		uint32_t		reg_offset = 0;
2084  
2085  		/*
2086  		 * The first region of a continuation must have a non-zero
2087  		 * length otherwise log recovery will just skip over it and
2088  		 * start recovering from the next opheader it finds. Because we
2089  		 * mark the next opheader as a continuation, recovery will then
2090  		 * incorrectly add the continuation to the previous region and
2091  		 * that breaks stuff.
2092  		 *
2093  		 * Hence if there isn't space for region data after the
2094  		 * opheader, then we need to start afresh with a new iclog.
2095  		 */
2096  		if (iclog->ic_size - *log_offset <=
2097  					sizeof(struct xlog_op_header)) {
2098  			error = xlog_write_get_more_iclog_space(ticket,
2099  					&iclog, log_offset, *len, record_cnt,
2100  					data_cnt);
2101  			if (error)
2102  				return error;
2103  		}
2104  
2105  		ophdr = reg->i_addr;
2106  		rlen = min_t(uint32_t, reg->i_len, iclog->ic_size - *log_offset);
2107  
2108  		ophdr->oh_tid = cpu_to_be32(ticket->t_tid);
2109  		ophdr->oh_len = cpu_to_be32(rlen - sizeof(struct xlog_op_header));
2110  		if (rlen != reg->i_len)
2111  			ophdr->oh_flags |= XLOG_CONTINUE_TRANS;
2112  
2113  		xlog_write_iovec(iclog, log_offset, reg->i_addr,
2114  				rlen, len, record_cnt, data_cnt);
2115  
2116  		/* If we wrote the whole region, move to the next. */
2117  		if (rlen == reg->i_len)
2118  			continue;
2119  
2120  		/*
2121  		 * We now have a partially written iovec, but it can span
2122  		 * multiple iclogs so we loop here. First we release the iclog
2123  		 * we currently have, then we get a new iclog and add a new
2124  		 * opheader. Then we continue copying from where we were until
2125  		 * we either complete the iovec or fill the iclog. If we
2126  		 * complete the iovec, then we increment the index and go right
2127  		 * back to the top of the outer loop. if we fill the iclog, we
2128  		 * run the inner loop again.
2129  		 *
2130  		 * This is complicated by the tail of a region using all the
2131  		 * space in an iclog and hence requiring us to release the iclog
2132  		 * and get a new one before returning to the outer loop. We must
2133  		 * always guarantee that we exit this inner loop with at least
2134  		 * space for log transaction opheaders left in the current
2135  		 * iclog, hence we cannot just terminate the loop at the end
2136  		 * of the of the continuation. So we loop while there is no
2137  		 * space left in the current iclog, and check for the end of the
2138  		 * continuation after getting a new iclog.
2139  		 */
2140  		do {
2141  			/*
2142  			 * Ensure we include the continuation opheader in the
2143  			 * space we need in the new iclog by adding that size
2144  			 * to the length we require. This continuation opheader
2145  			 * needs to be accounted to the ticket as the space it
2146  			 * consumes hasn't been accounted to the lv we are
2147  			 * writing.
2148  			 */
2149  			error = xlog_write_get_more_iclog_space(ticket,
2150  					&iclog, log_offset,
2151  					*len + sizeof(struct xlog_op_header),
2152  					record_cnt, data_cnt);
2153  			if (error)
2154  				return error;
2155  
2156  			ophdr = iclog->ic_datap + *log_offset;
2157  			ophdr->oh_tid = cpu_to_be32(ticket->t_tid);
2158  			ophdr->oh_clientid = XFS_TRANSACTION;
2159  			ophdr->oh_res2 = 0;
2160  			ophdr->oh_flags = XLOG_WAS_CONT_TRANS;
2161  
2162  			ticket->t_curr_res -= sizeof(struct xlog_op_header);
2163  			*log_offset += sizeof(struct xlog_op_header);
2164  			*data_cnt += sizeof(struct xlog_op_header);
2165  
2166  			/*
2167  			 * If rlen fits in the iclog, then end the region
2168  			 * continuation. Otherwise we're going around again.
2169  			 */
2170  			reg_offset += rlen;
2171  			rlen = reg->i_len - reg_offset;
2172  			if (rlen <= iclog->ic_size - *log_offset)
2173  				ophdr->oh_flags |= XLOG_END_TRANS;
2174  			else
2175  				ophdr->oh_flags |= XLOG_CONTINUE_TRANS;
2176  
2177  			rlen = min_t(uint32_t, rlen, iclog->ic_size - *log_offset);
2178  			ophdr->oh_len = cpu_to_be32(rlen);
2179  
2180  			xlog_write_iovec(iclog, log_offset,
2181  					reg->i_addr + reg_offset,
2182  					rlen, len, record_cnt, data_cnt);
2183  
2184  		} while (ophdr->oh_flags & XLOG_CONTINUE_TRANS);
2185  	}
2186  
2187  	/*
2188  	 * No more iovecs remain in this logvec so return the next log vec to
2189  	 * the caller so it can go back to fast path copying.
2190  	 */
2191  	*iclogp = iclog;
2192  	return 0;
2193  }
2194  
2195  /*
2196   * Write some region out to in-core log
2197   *
2198   * This will be called when writing externally provided regions or when
2199   * writing out a commit record for a given transaction.
2200   *
2201   * General algorithm:
2202   *	1. Find total length of this write.  This may include adding to the
2203   *		lengths passed in.
2204   *	2. Check whether we violate the tickets reservation.
2205   *	3. While writing to this iclog
2206   *	    A. Reserve as much space in this iclog as can get
2207   *	    B. If this is first write, save away start lsn
2208   *	    C. While writing this region:
2209   *		1. If first write of transaction, write start record
2210   *		2. Write log operation header (header per region)
2211   *		3. Find out if we can fit entire region into this iclog
2212   *		4. Potentially, verify destination memcpy ptr
2213   *		5. Memcpy (partial) region
2214   *		6. If partial copy, release iclog; otherwise, continue
2215   *			copying more regions into current iclog
2216   *	4. Mark want sync bit (in simulation mode)
2217   *	5. Release iclog for potential flush to on-disk log.
2218   *
2219   * ERRORS:
2220   * 1.	Panic if reservation is overrun.  This should never happen since
2221   *	reservation amounts are generated internal to the filesystem.
2222   * NOTES:
2223   * 1. Tickets are single threaded data structures.
2224   * 2. The XLOG_END_TRANS & XLOG_CONTINUE_TRANS flags are passed down to the
2225   *	syncing routine.  When a single log_write region needs to span
2226   *	multiple in-core logs, the XLOG_CONTINUE_TRANS bit should be set
2227   *	on all log operation writes which don't contain the end of the
2228   *	region.  The XLOG_END_TRANS bit is used for the in-core log
2229   *	operation which contains the end of the continued log_write region.
2230   * 3. When xlog_state_get_iclog_space() grabs the rest of the current iclog,
2231   *	we don't really know exactly how much space will be used.  As a result,
2232   *	we don't update ic_offset until the end when we know exactly how many
2233   *	bytes have been written out.
2234   */
2235  int
xlog_write(struct xlog * log,struct xfs_cil_ctx * ctx,struct list_head * lv_chain,struct xlog_ticket * ticket,uint32_t len)2236  xlog_write(
2237  	struct xlog		*log,
2238  	struct xfs_cil_ctx	*ctx,
2239  	struct list_head	*lv_chain,
2240  	struct xlog_ticket	*ticket,
2241  	uint32_t		len)
2242  
2243  {
2244  	struct xlog_in_core	*iclog = NULL;
2245  	struct xfs_log_vec	*lv;
2246  	uint32_t		record_cnt = 0;
2247  	uint32_t		data_cnt = 0;
2248  	int			error = 0;
2249  	int			log_offset;
2250  
2251  	if (ticket->t_curr_res < 0) {
2252  		xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES,
2253  		     "ctx ticket reservation ran out. Need to up reservation");
2254  		xlog_print_tic_res(log->l_mp, ticket);
2255  		xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
2256  	}
2257  
2258  	error = xlog_state_get_iclog_space(log, len, &iclog, ticket,
2259  					   &log_offset);
2260  	if (error)
2261  		return error;
2262  
2263  	ASSERT(log_offset <= iclog->ic_size - 1);
2264  
2265  	/*
2266  	 * If we have a context pointer, pass it the first iclog we are
2267  	 * writing to so it can record state needed for iclog write
2268  	 * ordering.
2269  	 */
2270  	if (ctx)
2271  		xlog_cil_set_ctx_write_state(ctx, iclog);
2272  
2273  	list_for_each_entry(lv, lv_chain, lv_list) {
2274  		/*
2275  		 * If the entire log vec does not fit in the iclog, punt it to
2276  		 * the partial copy loop which can handle this case.
2277  		 */
2278  		if (lv->lv_niovecs &&
2279  		    lv->lv_bytes > iclog->ic_size - log_offset) {
2280  			error = xlog_write_partial(lv, ticket, &iclog,
2281  					&log_offset, &len, &record_cnt,
2282  					&data_cnt);
2283  			if (error) {
2284  				/*
2285  				 * We have no iclog to release, so just return
2286  				 * the error immediately.
2287  				 */
2288  				return error;
2289  			}
2290  		} else {
2291  			xlog_write_full(lv, ticket, iclog, &log_offset,
2292  					 &len, &record_cnt, &data_cnt);
2293  		}
2294  	}
2295  	ASSERT(len == 0);
2296  
2297  	/*
2298  	 * We've already been guaranteed that the last writes will fit inside
2299  	 * the current iclog, and hence it will already have the space used by
2300  	 * those writes accounted to it. Hence we do not need to update the
2301  	 * iclog with the number of bytes written here.
2302  	 */
2303  	spin_lock(&log->l_icloglock);
2304  	xlog_state_finish_copy(log, iclog, record_cnt, 0);
2305  	error = xlog_state_release_iclog(log, iclog, ticket);
2306  	spin_unlock(&log->l_icloglock);
2307  
2308  	return error;
2309  }
2310  
2311  static void
xlog_state_activate_iclog(struct xlog_in_core * iclog,int * iclogs_changed)2312  xlog_state_activate_iclog(
2313  	struct xlog_in_core	*iclog,
2314  	int			*iclogs_changed)
2315  {
2316  	ASSERT(list_empty_careful(&iclog->ic_callbacks));
2317  	trace_xlog_iclog_activate(iclog, _RET_IP_);
2318  
2319  	/*
2320  	 * If the number of ops in this iclog indicate it just contains the
2321  	 * dummy transaction, we can change state into IDLE (the second time
2322  	 * around). Otherwise we should change the state into NEED a dummy.
2323  	 * We don't need to cover the dummy.
2324  	 */
2325  	if (*iclogs_changed == 0 &&
2326  	    iclog->ic_header.h_num_logops == cpu_to_be32(XLOG_COVER_OPS)) {
2327  		*iclogs_changed = 1;
2328  	} else {
2329  		/*
2330  		 * We have two dirty iclogs so start over.  This could also be
2331  		 * num of ops indicating this is not the dummy going out.
2332  		 */
2333  		*iclogs_changed = 2;
2334  	}
2335  
2336  	iclog->ic_state	= XLOG_STATE_ACTIVE;
2337  	iclog->ic_offset = 0;
2338  	iclog->ic_header.h_num_logops = 0;
2339  	memset(iclog->ic_header.h_cycle_data, 0,
2340  		sizeof(iclog->ic_header.h_cycle_data));
2341  	iclog->ic_header.h_lsn = 0;
2342  	iclog->ic_header.h_tail_lsn = 0;
2343  }
2344  
2345  /*
2346   * Loop through all iclogs and mark all iclogs currently marked DIRTY as
2347   * ACTIVE after iclog I/O has completed.
2348   */
2349  static void
xlog_state_activate_iclogs(struct xlog * log,int * iclogs_changed)2350  xlog_state_activate_iclogs(
2351  	struct xlog		*log,
2352  	int			*iclogs_changed)
2353  {
2354  	struct xlog_in_core	*iclog = log->l_iclog;
2355  
2356  	do {
2357  		if (iclog->ic_state == XLOG_STATE_DIRTY)
2358  			xlog_state_activate_iclog(iclog, iclogs_changed);
2359  		/*
2360  		 * The ordering of marking iclogs ACTIVE must be maintained, so
2361  		 * an iclog doesn't become ACTIVE beyond one that is SYNCING.
2362  		 */
2363  		else if (iclog->ic_state != XLOG_STATE_ACTIVE)
2364  			break;
2365  	} while ((iclog = iclog->ic_next) != log->l_iclog);
2366  }
2367  
2368  static int
xlog_covered_state(int prev_state,int iclogs_changed)2369  xlog_covered_state(
2370  	int			prev_state,
2371  	int			iclogs_changed)
2372  {
2373  	/*
2374  	 * We go to NEED for any non-covering writes. We go to NEED2 if we just
2375  	 * wrote the first covering record (DONE). We go to IDLE if we just
2376  	 * wrote the second covering record (DONE2) and remain in IDLE until a
2377  	 * non-covering write occurs.
2378  	 */
2379  	switch (prev_state) {
2380  	case XLOG_STATE_COVER_IDLE:
2381  		if (iclogs_changed == 1)
2382  			return XLOG_STATE_COVER_IDLE;
2383  		fallthrough;
2384  	case XLOG_STATE_COVER_NEED:
2385  	case XLOG_STATE_COVER_NEED2:
2386  		break;
2387  	case XLOG_STATE_COVER_DONE:
2388  		if (iclogs_changed == 1)
2389  			return XLOG_STATE_COVER_NEED2;
2390  		break;
2391  	case XLOG_STATE_COVER_DONE2:
2392  		if (iclogs_changed == 1)
2393  			return XLOG_STATE_COVER_IDLE;
2394  		break;
2395  	default:
2396  		ASSERT(0);
2397  	}
2398  
2399  	return XLOG_STATE_COVER_NEED;
2400  }
2401  
2402  STATIC void
xlog_state_clean_iclog(struct xlog * log,struct xlog_in_core * dirty_iclog)2403  xlog_state_clean_iclog(
2404  	struct xlog		*log,
2405  	struct xlog_in_core	*dirty_iclog)
2406  {
2407  	int			iclogs_changed = 0;
2408  
2409  	trace_xlog_iclog_clean(dirty_iclog, _RET_IP_);
2410  
2411  	dirty_iclog->ic_state = XLOG_STATE_DIRTY;
2412  
2413  	xlog_state_activate_iclogs(log, &iclogs_changed);
2414  	wake_up_all(&dirty_iclog->ic_force_wait);
2415  
2416  	if (iclogs_changed) {
2417  		log->l_covered_state = xlog_covered_state(log->l_covered_state,
2418  				iclogs_changed);
2419  	}
2420  }
2421  
2422  STATIC xfs_lsn_t
xlog_get_lowest_lsn(struct xlog * log)2423  xlog_get_lowest_lsn(
2424  	struct xlog		*log)
2425  {
2426  	struct xlog_in_core	*iclog = log->l_iclog;
2427  	xfs_lsn_t		lowest_lsn = 0, lsn;
2428  
2429  	do {
2430  		if (iclog->ic_state == XLOG_STATE_ACTIVE ||
2431  		    iclog->ic_state == XLOG_STATE_DIRTY)
2432  			continue;
2433  
2434  		lsn = be64_to_cpu(iclog->ic_header.h_lsn);
2435  		if ((lsn && !lowest_lsn) || XFS_LSN_CMP(lsn, lowest_lsn) < 0)
2436  			lowest_lsn = lsn;
2437  	} while ((iclog = iclog->ic_next) != log->l_iclog);
2438  
2439  	return lowest_lsn;
2440  }
2441  
2442  /*
2443   * Return true if we need to stop processing, false to continue to the next
2444   * iclog. The caller will need to run callbacks if the iclog is returned in the
2445   * XLOG_STATE_CALLBACK state.
2446   */
2447  static bool
xlog_state_iodone_process_iclog(struct xlog * log,struct xlog_in_core * iclog)2448  xlog_state_iodone_process_iclog(
2449  	struct xlog		*log,
2450  	struct xlog_in_core	*iclog)
2451  {
2452  	xfs_lsn_t		lowest_lsn;
2453  	xfs_lsn_t		header_lsn;
2454  
2455  	switch (iclog->ic_state) {
2456  	case XLOG_STATE_ACTIVE:
2457  	case XLOG_STATE_DIRTY:
2458  		/*
2459  		 * Skip all iclogs in the ACTIVE & DIRTY states:
2460  		 */
2461  		return false;
2462  	case XLOG_STATE_DONE_SYNC:
2463  		/*
2464  		 * Now that we have an iclog that is in the DONE_SYNC state, do
2465  		 * one more check here to see if we have chased our tail around.
2466  		 * If this is not the lowest lsn iclog, then we will leave it
2467  		 * for another completion to process.
2468  		 */
2469  		header_lsn = be64_to_cpu(iclog->ic_header.h_lsn);
2470  		lowest_lsn = xlog_get_lowest_lsn(log);
2471  		if (lowest_lsn && XFS_LSN_CMP(lowest_lsn, header_lsn) < 0)
2472  			return false;
2473  		/*
2474  		 * If there are no callbacks on this iclog, we can mark it clean
2475  		 * immediately and return. Otherwise we need to run the
2476  		 * callbacks.
2477  		 */
2478  		if (list_empty(&iclog->ic_callbacks)) {
2479  			xlog_state_clean_iclog(log, iclog);
2480  			return false;
2481  		}
2482  		trace_xlog_iclog_callback(iclog, _RET_IP_);
2483  		iclog->ic_state = XLOG_STATE_CALLBACK;
2484  		return false;
2485  	default:
2486  		/*
2487  		 * Can only perform callbacks in order.  Since this iclog is not
2488  		 * in the DONE_SYNC state, we skip the rest and just try to
2489  		 * clean up.
2490  		 */
2491  		return true;
2492  	}
2493  }
2494  
2495  /*
2496   * Loop over all the iclogs, running attached callbacks on them. Return true if
2497   * we ran any callbacks, indicating that we dropped the icloglock. We don't need
2498   * to handle transient shutdown state here at all because
2499   * xlog_state_shutdown_callbacks() will be run to do the necessary shutdown
2500   * cleanup of the callbacks.
2501   */
2502  static bool
xlog_state_do_iclog_callbacks(struct xlog * log)2503  xlog_state_do_iclog_callbacks(
2504  	struct xlog		*log)
2505  		__releases(&log->l_icloglock)
2506  		__acquires(&log->l_icloglock)
2507  {
2508  	struct xlog_in_core	*first_iclog = log->l_iclog;
2509  	struct xlog_in_core	*iclog = first_iclog;
2510  	bool			ran_callback = false;
2511  
2512  	do {
2513  		LIST_HEAD(cb_list);
2514  
2515  		if (xlog_state_iodone_process_iclog(log, iclog))
2516  			break;
2517  		if (iclog->ic_state != XLOG_STATE_CALLBACK) {
2518  			iclog = iclog->ic_next;
2519  			continue;
2520  		}
2521  		list_splice_init(&iclog->ic_callbacks, &cb_list);
2522  		spin_unlock(&log->l_icloglock);
2523  
2524  		trace_xlog_iclog_callbacks_start(iclog, _RET_IP_);
2525  		xlog_cil_process_committed(&cb_list);
2526  		trace_xlog_iclog_callbacks_done(iclog, _RET_IP_);
2527  		ran_callback = true;
2528  
2529  		spin_lock(&log->l_icloglock);
2530  		xlog_state_clean_iclog(log, iclog);
2531  		iclog = iclog->ic_next;
2532  	} while (iclog != first_iclog);
2533  
2534  	return ran_callback;
2535  }
2536  
2537  
2538  /*
2539   * Loop running iclog completion callbacks until there are no more iclogs in a
2540   * state that can run callbacks.
2541   */
2542  STATIC void
xlog_state_do_callback(struct xlog * log)2543  xlog_state_do_callback(
2544  	struct xlog		*log)
2545  {
2546  	int			flushcnt = 0;
2547  	int			repeats = 0;
2548  
2549  	spin_lock(&log->l_icloglock);
2550  	while (xlog_state_do_iclog_callbacks(log)) {
2551  		if (xlog_is_shutdown(log))
2552  			break;
2553  
2554  		if (++repeats > 5000) {
2555  			flushcnt += repeats;
2556  			repeats = 0;
2557  			xfs_warn(log->l_mp,
2558  				"%s: possible infinite loop (%d iterations)",
2559  				__func__, flushcnt);
2560  		}
2561  	}
2562  
2563  	if (log->l_iclog->ic_state == XLOG_STATE_ACTIVE)
2564  		wake_up_all(&log->l_flush_wait);
2565  
2566  	spin_unlock(&log->l_icloglock);
2567  }
2568  
2569  
2570  /*
2571   * Finish transitioning this iclog to the dirty state.
2572   *
2573   * Callbacks could take time, so they are done outside the scope of the
2574   * global state machine log lock.
2575   */
2576  STATIC void
xlog_state_done_syncing(struct xlog_in_core * iclog)2577  xlog_state_done_syncing(
2578  	struct xlog_in_core	*iclog)
2579  {
2580  	struct xlog		*log = iclog->ic_log;
2581  
2582  	spin_lock(&log->l_icloglock);
2583  	ASSERT(atomic_read(&iclog->ic_refcnt) == 0);
2584  	trace_xlog_iclog_sync_done(iclog, _RET_IP_);
2585  
2586  	/*
2587  	 * If we got an error, either on the first buffer, or in the case of
2588  	 * split log writes, on the second, we shut down the file system and
2589  	 * no iclogs should ever be attempted to be written to disk again.
2590  	 */
2591  	if (!xlog_is_shutdown(log)) {
2592  		ASSERT(iclog->ic_state == XLOG_STATE_SYNCING);
2593  		iclog->ic_state = XLOG_STATE_DONE_SYNC;
2594  	}
2595  
2596  	/*
2597  	 * Someone could be sleeping prior to writing out the next
2598  	 * iclog buffer, we wake them all, one will get to do the
2599  	 * I/O, the others get to wait for the result.
2600  	 */
2601  	wake_up_all(&iclog->ic_write_wait);
2602  	spin_unlock(&log->l_icloglock);
2603  	xlog_state_do_callback(log);
2604  }
2605  
2606  /*
2607   * If the head of the in-core log ring is not (ACTIVE or DIRTY), then we must
2608   * sleep.  We wait on the flush queue on the head iclog as that should be
2609   * the first iclog to complete flushing. Hence if all iclogs are syncing,
2610   * we will wait here and all new writes will sleep until a sync completes.
2611   *
2612   * The in-core logs are used in a circular fashion. They are not used
2613   * out-of-order even when an iclog past the head is free.
2614   *
2615   * return:
2616   *	* log_offset where xlog_write() can start writing into the in-core
2617   *		log's data space.
2618   *	* in-core log pointer to which xlog_write() should write.
2619   *	* boolean indicating this is a continued write to an in-core log.
2620   *		If this is the last write, then the in-core log's offset field
2621   *		needs to be incremented, depending on the amount of data which
2622   *		is copied.
2623   */
2624  STATIC int
xlog_state_get_iclog_space(struct xlog * log,int len,struct xlog_in_core ** iclogp,struct xlog_ticket * ticket,int * logoffsetp)2625  xlog_state_get_iclog_space(
2626  	struct xlog		*log,
2627  	int			len,
2628  	struct xlog_in_core	**iclogp,
2629  	struct xlog_ticket	*ticket,
2630  	int			*logoffsetp)
2631  {
2632  	int		  log_offset;
2633  	xlog_rec_header_t *head;
2634  	xlog_in_core_t	  *iclog;
2635  
2636  restart:
2637  	spin_lock(&log->l_icloglock);
2638  	if (xlog_is_shutdown(log)) {
2639  		spin_unlock(&log->l_icloglock);
2640  		return -EIO;
2641  	}
2642  
2643  	iclog = log->l_iclog;
2644  	if (iclog->ic_state != XLOG_STATE_ACTIVE) {
2645  		XFS_STATS_INC(log->l_mp, xs_log_noiclogs);
2646  
2647  		/* Wait for log writes to have flushed */
2648  		xlog_wait(&log->l_flush_wait, &log->l_icloglock);
2649  		goto restart;
2650  	}
2651  
2652  	head = &iclog->ic_header;
2653  
2654  	atomic_inc(&iclog->ic_refcnt);	/* prevents sync */
2655  	log_offset = iclog->ic_offset;
2656  
2657  	trace_xlog_iclog_get_space(iclog, _RET_IP_);
2658  
2659  	/* On the 1st write to an iclog, figure out lsn.  This works
2660  	 * if iclogs marked XLOG_STATE_WANT_SYNC always write out what they are
2661  	 * committing to.  If the offset is set, that's how many blocks
2662  	 * must be written.
2663  	 */
2664  	if (log_offset == 0) {
2665  		ticket->t_curr_res -= log->l_iclog_hsize;
2666  		head->h_cycle = cpu_to_be32(log->l_curr_cycle);
2667  		head->h_lsn = cpu_to_be64(
2668  			xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block));
2669  		ASSERT(log->l_curr_block >= 0);
2670  	}
2671  
2672  	/* If there is enough room to write everything, then do it.  Otherwise,
2673  	 * claim the rest of the region and make sure the XLOG_STATE_WANT_SYNC
2674  	 * bit is on, so this will get flushed out.  Don't update ic_offset
2675  	 * until you know exactly how many bytes get copied.  Therefore, wait
2676  	 * until later to update ic_offset.
2677  	 *
2678  	 * xlog_write() algorithm assumes that at least 2 xlog_op_header_t's
2679  	 * can fit into remaining data section.
2680  	 */
2681  	if (iclog->ic_size - iclog->ic_offset < 2*sizeof(xlog_op_header_t)) {
2682  		int		error = 0;
2683  
2684  		xlog_state_switch_iclogs(log, iclog, iclog->ic_size);
2685  
2686  		/*
2687  		 * If we are the only one writing to this iclog, sync it to
2688  		 * disk.  We need to do an atomic compare and decrement here to
2689  		 * avoid racing with concurrent atomic_dec_and_lock() calls in
2690  		 * xlog_state_release_iclog() when there is more than one
2691  		 * reference to the iclog.
2692  		 */
2693  		if (!atomic_add_unless(&iclog->ic_refcnt, -1, 1))
2694  			error = xlog_state_release_iclog(log, iclog, ticket);
2695  		spin_unlock(&log->l_icloglock);
2696  		if (error)
2697  			return error;
2698  		goto restart;
2699  	}
2700  
2701  	/* Do we have enough room to write the full amount in the remainder
2702  	 * of this iclog?  Or must we continue a write on the next iclog and
2703  	 * mark this iclog as completely taken?  In the case where we switch
2704  	 * iclogs (to mark it taken), this particular iclog will release/sync
2705  	 * to disk in xlog_write().
2706  	 */
2707  	if (len <= iclog->ic_size - iclog->ic_offset)
2708  		iclog->ic_offset += len;
2709  	else
2710  		xlog_state_switch_iclogs(log, iclog, iclog->ic_size);
2711  	*iclogp = iclog;
2712  
2713  	ASSERT(iclog->ic_offset <= iclog->ic_size);
2714  	spin_unlock(&log->l_icloglock);
2715  
2716  	*logoffsetp = log_offset;
2717  	return 0;
2718  }
2719  
2720  /*
2721   * The first cnt-1 times a ticket goes through here we don't need to move the
2722   * grant write head because the permanent reservation has reserved cnt times the
2723   * unit amount.  Release part of current permanent unit reservation and reset
2724   * current reservation to be one units worth.  Also move grant reservation head
2725   * forward.
2726   */
2727  void
xfs_log_ticket_regrant(struct xlog * log,struct xlog_ticket * ticket)2728  xfs_log_ticket_regrant(
2729  	struct xlog		*log,
2730  	struct xlog_ticket	*ticket)
2731  {
2732  	trace_xfs_log_ticket_regrant(log, ticket);
2733  
2734  	if (ticket->t_cnt > 0)
2735  		ticket->t_cnt--;
2736  
2737  	xlog_grant_sub_space(&log->l_reserve_head, ticket->t_curr_res);
2738  	xlog_grant_sub_space(&log->l_write_head, ticket->t_curr_res);
2739  	ticket->t_curr_res = ticket->t_unit_res;
2740  
2741  	trace_xfs_log_ticket_regrant_sub(log, ticket);
2742  
2743  	/* just return if we still have some of the pre-reserved space */
2744  	if (!ticket->t_cnt) {
2745  		xlog_grant_add_space(&log->l_reserve_head, ticket->t_unit_res);
2746  		trace_xfs_log_ticket_regrant_exit(log, ticket);
2747  
2748  		ticket->t_curr_res = ticket->t_unit_res;
2749  	}
2750  
2751  	xfs_log_ticket_put(ticket);
2752  }
2753  
2754  /*
2755   * Give back the space left from a reservation.
2756   *
2757   * All the information we need to make a correct determination of space left
2758   * is present.  For non-permanent reservations, things are quite easy.  The
2759   * count should have been decremented to zero.  We only need to deal with the
2760   * space remaining in the current reservation part of the ticket.  If the
2761   * ticket contains a permanent reservation, there may be left over space which
2762   * needs to be released.  A count of N means that N-1 refills of the current
2763   * reservation can be done before we need to ask for more space.  The first
2764   * one goes to fill up the first current reservation.  Once we run out of
2765   * space, the count will stay at zero and the only space remaining will be
2766   * in the current reservation field.
2767   */
2768  void
xfs_log_ticket_ungrant(struct xlog * log,struct xlog_ticket * ticket)2769  xfs_log_ticket_ungrant(
2770  	struct xlog		*log,
2771  	struct xlog_ticket	*ticket)
2772  {
2773  	int			bytes;
2774  
2775  	trace_xfs_log_ticket_ungrant(log, ticket);
2776  
2777  	if (ticket->t_cnt > 0)
2778  		ticket->t_cnt--;
2779  
2780  	trace_xfs_log_ticket_ungrant_sub(log, ticket);
2781  
2782  	/*
2783  	 * If this is a permanent reservation ticket, we may be able to free
2784  	 * up more space based on the remaining count.
2785  	 */
2786  	bytes = ticket->t_curr_res;
2787  	if (ticket->t_cnt > 0) {
2788  		ASSERT(ticket->t_flags & XLOG_TIC_PERM_RESERV);
2789  		bytes += ticket->t_unit_res*ticket->t_cnt;
2790  	}
2791  
2792  	xlog_grant_sub_space(&log->l_reserve_head, bytes);
2793  	xlog_grant_sub_space(&log->l_write_head, bytes);
2794  
2795  	trace_xfs_log_ticket_ungrant_exit(log, ticket);
2796  
2797  	xfs_log_space_wake(log->l_mp);
2798  	xfs_log_ticket_put(ticket);
2799  }
2800  
2801  /*
2802   * This routine will mark the current iclog in the ring as WANT_SYNC and move
2803   * the current iclog pointer to the next iclog in the ring.
2804   */
2805  void
xlog_state_switch_iclogs(struct xlog * log,struct xlog_in_core * iclog,int eventual_size)2806  xlog_state_switch_iclogs(
2807  	struct xlog		*log,
2808  	struct xlog_in_core	*iclog,
2809  	int			eventual_size)
2810  {
2811  	ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
2812  	assert_spin_locked(&log->l_icloglock);
2813  	trace_xlog_iclog_switch(iclog, _RET_IP_);
2814  
2815  	if (!eventual_size)
2816  		eventual_size = iclog->ic_offset;
2817  	iclog->ic_state = XLOG_STATE_WANT_SYNC;
2818  	iclog->ic_header.h_prev_block = cpu_to_be32(log->l_prev_block);
2819  	log->l_prev_block = log->l_curr_block;
2820  	log->l_prev_cycle = log->l_curr_cycle;
2821  
2822  	/* roll log?: ic_offset changed later */
2823  	log->l_curr_block += BTOBB(eventual_size)+BTOBB(log->l_iclog_hsize);
2824  
2825  	/* Round up to next log-sunit */
2826  	if (log->l_iclog_roundoff > BBSIZE) {
2827  		uint32_t sunit_bb = BTOBB(log->l_iclog_roundoff);
2828  		log->l_curr_block = roundup(log->l_curr_block, sunit_bb);
2829  	}
2830  
2831  	if (log->l_curr_block >= log->l_logBBsize) {
2832  		/*
2833  		 * Rewind the current block before the cycle is bumped to make
2834  		 * sure that the combined LSN never transiently moves forward
2835  		 * when the log wraps to the next cycle. This is to support the
2836  		 * unlocked sample of these fields from xlog_valid_lsn(). Most
2837  		 * other cases should acquire l_icloglock.
2838  		 */
2839  		log->l_curr_block -= log->l_logBBsize;
2840  		ASSERT(log->l_curr_block >= 0);
2841  		smp_wmb();
2842  		log->l_curr_cycle++;
2843  		if (log->l_curr_cycle == XLOG_HEADER_MAGIC_NUM)
2844  			log->l_curr_cycle++;
2845  	}
2846  	ASSERT(iclog == log->l_iclog);
2847  	log->l_iclog = iclog->ic_next;
2848  }
2849  
2850  /*
2851   * Force the iclog to disk and check if the iclog has been completed before
2852   * xlog_force_iclog() returns. This can happen on synchronous (e.g.
2853   * pmem) or fast async storage because we drop the icloglock to issue the IO.
2854   * If completion has already occurred, tell the caller so that it can avoid an
2855   * unnecessary wait on the iclog.
2856   */
2857  static int
xlog_force_and_check_iclog(struct xlog_in_core * iclog,bool * completed)2858  xlog_force_and_check_iclog(
2859  	struct xlog_in_core	*iclog,
2860  	bool			*completed)
2861  {
2862  	xfs_lsn_t		lsn = be64_to_cpu(iclog->ic_header.h_lsn);
2863  	int			error;
2864  
2865  	*completed = false;
2866  	error = xlog_force_iclog(iclog);
2867  	if (error)
2868  		return error;
2869  
2870  	/*
2871  	 * If the iclog has already been completed and reused the header LSN
2872  	 * will have been rewritten by completion
2873  	 */
2874  	if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn)
2875  		*completed = true;
2876  	return 0;
2877  }
2878  
2879  /*
2880   * Write out all data in the in-core log as of this exact moment in time.
2881   *
2882   * Data may be written to the in-core log during this call.  However,
2883   * we don't guarantee this data will be written out.  A change from past
2884   * implementation means this routine will *not* write out zero length LRs.
2885   *
2886   * Basically, we try and perform an intelligent scan of the in-core logs.
2887   * If we determine there is no flushable data, we just return.  There is no
2888   * flushable data if:
2889   *
2890   *	1. the current iclog is active and has no data; the previous iclog
2891   *		is in the active or dirty state.
2892   *	2. the current iclog is drity, and the previous iclog is in the
2893   *		active or dirty state.
2894   *
2895   * We may sleep if:
2896   *
2897   *	1. the current iclog is not in the active nor dirty state.
2898   *	2. the current iclog dirty, and the previous iclog is not in the
2899   *		active nor dirty state.
2900   *	3. the current iclog is active, and there is another thread writing
2901   *		to this particular iclog.
2902   *	4. a) the current iclog is active and has no other writers
2903   *	   b) when we return from flushing out this iclog, it is still
2904   *		not in the active nor dirty state.
2905   */
2906  int
xfs_log_force(struct xfs_mount * mp,uint flags)2907  xfs_log_force(
2908  	struct xfs_mount	*mp,
2909  	uint			flags)
2910  {
2911  	struct xlog		*log = mp->m_log;
2912  	struct xlog_in_core	*iclog;
2913  
2914  	XFS_STATS_INC(mp, xs_log_force);
2915  	trace_xfs_log_force(mp, 0, _RET_IP_);
2916  
2917  	xlog_cil_force(log);
2918  
2919  	spin_lock(&log->l_icloglock);
2920  	if (xlog_is_shutdown(log))
2921  		goto out_error;
2922  
2923  	iclog = log->l_iclog;
2924  	trace_xlog_iclog_force(iclog, _RET_IP_);
2925  
2926  	if (iclog->ic_state == XLOG_STATE_DIRTY ||
2927  	    (iclog->ic_state == XLOG_STATE_ACTIVE &&
2928  	     atomic_read(&iclog->ic_refcnt) == 0 && iclog->ic_offset == 0)) {
2929  		/*
2930  		 * If the head is dirty or (active and empty), then we need to
2931  		 * look at the previous iclog.
2932  		 *
2933  		 * If the previous iclog is active or dirty we are done.  There
2934  		 * is nothing to sync out. Otherwise, we attach ourselves to the
2935  		 * previous iclog and go to sleep.
2936  		 */
2937  		iclog = iclog->ic_prev;
2938  	} else if (iclog->ic_state == XLOG_STATE_ACTIVE) {
2939  		if (atomic_read(&iclog->ic_refcnt) == 0) {
2940  			/* We have exclusive access to this iclog. */
2941  			bool	completed;
2942  
2943  			if (xlog_force_and_check_iclog(iclog, &completed))
2944  				goto out_error;
2945  
2946  			if (completed)
2947  				goto out_unlock;
2948  		} else {
2949  			/*
2950  			 * Someone else is still writing to this iclog, so we
2951  			 * need to ensure that when they release the iclog it
2952  			 * gets synced immediately as we may be waiting on it.
2953  			 */
2954  			xlog_state_switch_iclogs(log, iclog, 0);
2955  		}
2956  	}
2957  
2958  	/*
2959  	 * The iclog we are about to wait on may contain the checkpoint pushed
2960  	 * by the above xlog_cil_force() call, but it may not have been pushed
2961  	 * to disk yet. Like the ACTIVE case above, we need to make sure caches
2962  	 * are flushed when this iclog is written.
2963  	 */
2964  	if (iclog->ic_state == XLOG_STATE_WANT_SYNC)
2965  		iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA;
2966  
2967  	if (flags & XFS_LOG_SYNC)
2968  		return xlog_wait_on_iclog(iclog);
2969  out_unlock:
2970  	spin_unlock(&log->l_icloglock);
2971  	return 0;
2972  out_error:
2973  	spin_unlock(&log->l_icloglock);
2974  	return -EIO;
2975  }
2976  
2977  /*
2978   * Force the log to a specific LSN.
2979   *
2980   * If an iclog with that lsn can be found:
2981   *	If it is in the DIRTY state, just return.
2982   *	If it is in the ACTIVE state, move the in-core log into the WANT_SYNC
2983   *		state and go to sleep or return.
2984   *	If it is in any other state, go to sleep or return.
2985   *
2986   * Synchronous forces are implemented with a wait queue.  All callers trying
2987   * to force a given lsn to disk must wait on the queue attached to the
2988   * specific in-core log.  When given in-core log finally completes its write
2989   * to disk, that thread will wake up all threads waiting on the queue.
2990   */
2991  static int
xlog_force_lsn(struct xlog * log,xfs_lsn_t lsn,uint flags,int * log_flushed,bool already_slept)2992  xlog_force_lsn(
2993  	struct xlog		*log,
2994  	xfs_lsn_t		lsn,
2995  	uint			flags,
2996  	int			*log_flushed,
2997  	bool			already_slept)
2998  {
2999  	struct xlog_in_core	*iclog;
3000  	bool			completed;
3001  
3002  	spin_lock(&log->l_icloglock);
3003  	if (xlog_is_shutdown(log))
3004  		goto out_error;
3005  
3006  	iclog = log->l_iclog;
3007  	while (be64_to_cpu(iclog->ic_header.h_lsn) != lsn) {
3008  		trace_xlog_iclog_force_lsn(iclog, _RET_IP_);
3009  		iclog = iclog->ic_next;
3010  		if (iclog == log->l_iclog)
3011  			goto out_unlock;
3012  	}
3013  
3014  	switch (iclog->ic_state) {
3015  	case XLOG_STATE_ACTIVE:
3016  		/*
3017  		 * We sleep here if we haven't already slept (e.g. this is the
3018  		 * first time we've looked at the correct iclog buf) and the
3019  		 * buffer before us is going to be sync'ed.  The reason for this
3020  		 * is that if we are doing sync transactions here, by waiting
3021  		 * for the previous I/O to complete, we can allow a few more
3022  		 * transactions into this iclog before we close it down.
3023  		 *
3024  		 * Otherwise, we mark the buffer WANT_SYNC, and bump up the
3025  		 * refcnt so we can release the log (which drops the ref count).
3026  		 * The state switch keeps new transaction commits from using
3027  		 * this buffer.  When the current commits finish writing into
3028  		 * the buffer, the refcount will drop to zero and the buffer
3029  		 * will go out then.
3030  		 */
3031  		if (!already_slept &&
3032  		    (iclog->ic_prev->ic_state == XLOG_STATE_WANT_SYNC ||
3033  		     iclog->ic_prev->ic_state == XLOG_STATE_SYNCING)) {
3034  			xlog_wait(&iclog->ic_prev->ic_write_wait,
3035  					&log->l_icloglock);
3036  			return -EAGAIN;
3037  		}
3038  		if (xlog_force_and_check_iclog(iclog, &completed))
3039  			goto out_error;
3040  		if (log_flushed)
3041  			*log_flushed = 1;
3042  		if (completed)
3043  			goto out_unlock;
3044  		break;
3045  	case XLOG_STATE_WANT_SYNC:
3046  		/*
3047  		 * This iclog may contain the checkpoint pushed by the
3048  		 * xlog_cil_force_seq() call, but there are other writers still
3049  		 * accessing it so it hasn't been pushed to disk yet. Like the
3050  		 * ACTIVE case above, we need to make sure caches are flushed
3051  		 * when this iclog is written.
3052  		 */
3053  		iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA;
3054  		break;
3055  	default:
3056  		/*
3057  		 * The entire checkpoint was written by the CIL force and is on
3058  		 * its way to disk already. It will be stable when it
3059  		 * completes, so we don't need to manipulate caches here at all.
3060  		 * We just need to wait for completion if necessary.
3061  		 */
3062  		break;
3063  	}
3064  
3065  	if (flags & XFS_LOG_SYNC)
3066  		return xlog_wait_on_iclog(iclog);
3067  out_unlock:
3068  	spin_unlock(&log->l_icloglock);
3069  	return 0;
3070  out_error:
3071  	spin_unlock(&log->l_icloglock);
3072  	return -EIO;
3073  }
3074  
3075  /*
3076   * Force the log to a specific checkpoint sequence.
3077   *
3078   * First force the CIL so that all the required changes have been flushed to the
3079   * iclogs. If the CIL force completed it will return a commit LSN that indicates
3080   * the iclog that needs to be flushed to stable storage. If the caller needs
3081   * a synchronous log force, we will wait on the iclog with the LSN returned by
3082   * xlog_cil_force_seq() to be completed.
3083   */
3084  int
xfs_log_force_seq(struct xfs_mount * mp,xfs_csn_t seq,uint flags,int * log_flushed)3085  xfs_log_force_seq(
3086  	struct xfs_mount	*mp,
3087  	xfs_csn_t		seq,
3088  	uint			flags,
3089  	int			*log_flushed)
3090  {
3091  	struct xlog		*log = mp->m_log;
3092  	xfs_lsn_t		lsn;
3093  	int			ret;
3094  	ASSERT(seq != 0);
3095  
3096  	XFS_STATS_INC(mp, xs_log_force);
3097  	trace_xfs_log_force(mp, seq, _RET_IP_);
3098  
3099  	lsn = xlog_cil_force_seq(log, seq);
3100  	if (lsn == NULLCOMMITLSN)
3101  		return 0;
3102  
3103  	ret = xlog_force_lsn(log, lsn, flags, log_flushed, false);
3104  	if (ret == -EAGAIN) {
3105  		XFS_STATS_INC(mp, xs_log_force_sleep);
3106  		ret = xlog_force_lsn(log, lsn, flags, log_flushed, true);
3107  	}
3108  	return ret;
3109  }
3110  
3111  /*
3112   * Free a used ticket when its refcount falls to zero.
3113   */
3114  void
xfs_log_ticket_put(xlog_ticket_t * ticket)3115  xfs_log_ticket_put(
3116  	xlog_ticket_t	*ticket)
3117  {
3118  	ASSERT(atomic_read(&ticket->t_ref) > 0);
3119  	if (atomic_dec_and_test(&ticket->t_ref))
3120  		kmem_cache_free(xfs_log_ticket_cache, ticket);
3121  }
3122  
3123  xlog_ticket_t *
xfs_log_ticket_get(xlog_ticket_t * ticket)3124  xfs_log_ticket_get(
3125  	xlog_ticket_t	*ticket)
3126  {
3127  	ASSERT(atomic_read(&ticket->t_ref) > 0);
3128  	atomic_inc(&ticket->t_ref);
3129  	return ticket;
3130  }
3131  
3132  /*
3133   * Figure out the total log space unit (in bytes) that would be
3134   * required for a log ticket.
3135   */
3136  static int
xlog_calc_unit_res(struct xlog * log,int unit_bytes,int * niclogs)3137  xlog_calc_unit_res(
3138  	struct xlog		*log,
3139  	int			unit_bytes,
3140  	int			*niclogs)
3141  {
3142  	int			iclog_space;
3143  	uint			num_headers;
3144  
3145  	/*
3146  	 * Permanent reservations have up to 'cnt'-1 active log operations
3147  	 * in the log.  A unit in this case is the amount of space for one
3148  	 * of these log operations.  Normal reservations have a cnt of 1
3149  	 * and their unit amount is the total amount of space required.
3150  	 *
3151  	 * The following lines of code account for non-transaction data
3152  	 * which occupy space in the on-disk log.
3153  	 *
3154  	 * Normal form of a transaction is:
3155  	 * <oph><trans-hdr><start-oph><reg1-oph><reg1><reg2-oph>...<commit-oph>
3156  	 * and then there are LR hdrs, split-recs and roundoff at end of syncs.
3157  	 *
3158  	 * We need to account for all the leadup data and trailer data
3159  	 * around the transaction data.
3160  	 * And then we need to account for the worst case in terms of using
3161  	 * more space.
3162  	 * The worst case will happen if:
3163  	 * - the placement of the transaction happens to be such that the
3164  	 *   roundoff is at its maximum
3165  	 * - the transaction data is synced before the commit record is synced
3166  	 *   i.e. <transaction-data><roundoff> | <commit-rec><roundoff>
3167  	 *   Therefore the commit record is in its own Log Record.
3168  	 *   This can happen as the commit record is called with its
3169  	 *   own region to xlog_write().
3170  	 *   This then means that in the worst case, roundoff can happen for
3171  	 *   the commit-rec as well.
3172  	 *   The commit-rec is smaller than padding in this scenario and so it is
3173  	 *   not added separately.
3174  	 */
3175  
3176  	/* for trans header */
3177  	unit_bytes += sizeof(xlog_op_header_t);
3178  	unit_bytes += sizeof(xfs_trans_header_t);
3179  
3180  	/* for start-rec */
3181  	unit_bytes += sizeof(xlog_op_header_t);
3182  
3183  	/*
3184  	 * for LR headers - the space for data in an iclog is the size minus
3185  	 * the space used for the headers. If we use the iclog size, then we
3186  	 * undercalculate the number of headers required.
3187  	 *
3188  	 * Furthermore - the addition of op headers for split-recs might
3189  	 * increase the space required enough to require more log and op
3190  	 * headers, so take that into account too.
3191  	 *
3192  	 * IMPORTANT: This reservation makes the assumption that if this
3193  	 * transaction is the first in an iclog and hence has the LR headers
3194  	 * accounted to it, then the remaining space in the iclog is
3195  	 * exclusively for this transaction.  i.e. if the transaction is larger
3196  	 * than the iclog, it will be the only thing in that iclog.
3197  	 * Fundamentally, this means we must pass the entire log vector to
3198  	 * xlog_write to guarantee this.
3199  	 */
3200  	iclog_space = log->l_iclog_size - log->l_iclog_hsize;
3201  	num_headers = howmany(unit_bytes, iclog_space);
3202  
3203  	/* for split-recs - ophdrs added when data split over LRs */
3204  	unit_bytes += sizeof(xlog_op_header_t) * num_headers;
3205  
3206  	/* add extra header reservations if we overrun */
3207  	while (!num_headers ||
3208  	       howmany(unit_bytes, iclog_space) > num_headers) {
3209  		unit_bytes += sizeof(xlog_op_header_t);
3210  		num_headers++;
3211  	}
3212  	unit_bytes += log->l_iclog_hsize * num_headers;
3213  
3214  	/* for commit-rec LR header - note: padding will subsume the ophdr */
3215  	unit_bytes += log->l_iclog_hsize;
3216  
3217  	/* roundoff padding for transaction data and one for commit record */
3218  	unit_bytes += 2 * log->l_iclog_roundoff;
3219  
3220  	if (niclogs)
3221  		*niclogs = num_headers;
3222  	return unit_bytes;
3223  }
3224  
3225  int
xfs_log_calc_unit_res(struct xfs_mount * mp,int unit_bytes)3226  xfs_log_calc_unit_res(
3227  	struct xfs_mount	*mp,
3228  	int			unit_bytes)
3229  {
3230  	return xlog_calc_unit_res(mp->m_log, unit_bytes, NULL);
3231  }
3232  
3233  /*
3234   * Allocate and initialise a new log ticket.
3235   */
3236  struct xlog_ticket *
xlog_ticket_alloc(struct xlog * log,int unit_bytes,int cnt,bool permanent)3237  xlog_ticket_alloc(
3238  	struct xlog		*log,
3239  	int			unit_bytes,
3240  	int			cnt,
3241  	bool			permanent)
3242  {
3243  	struct xlog_ticket	*tic;
3244  	int			unit_res;
3245  
3246  	tic = kmem_cache_zalloc(xfs_log_ticket_cache,
3247  			GFP_KERNEL | __GFP_NOFAIL);
3248  
3249  	unit_res = xlog_calc_unit_res(log, unit_bytes, &tic->t_iclog_hdrs);
3250  
3251  	atomic_set(&tic->t_ref, 1);
3252  	tic->t_task		= current;
3253  	INIT_LIST_HEAD(&tic->t_queue);
3254  	tic->t_unit_res		= unit_res;
3255  	tic->t_curr_res		= unit_res;
3256  	tic->t_cnt		= cnt;
3257  	tic->t_ocnt		= cnt;
3258  	tic->t_tid		= get_random_u32();
3259  	if (permanent)
3260  		tic->t_flags |= XLOG_TIC_PERM_RESERV;
3261  
3262  	return tic;
3263  }
3264  
3265  #if defined(DEBUG)
3266  static void
xlog_verify_dump_tail(struct xlog * log,struct xlog_in_core * iclog)3267  xlog_verify_dump_tail(
3268  	struct xlog		*log,
3269  	struct xlog_in_core	*iclog)
3270  {
3271  	xfs_alert(log->l_mp,
3272  "ran out of log space tail 0x%llx/0x%llx, head lsn 0x%llx, head 0x%x/0x%x, prev head 0x%x/0x%x",
3273  			iclog ? be64_to_cpu(iclog->ic_header.h_tail_lsn) : -1,
3274  			atomic64_read(&log->l_tail_lsn),
3275  			log->l_ailp->ail_head_lsn,
3276  			log->l_curr_cycle, log->l_curr_block,
3277  			log->l_prev_cycle, log->l_prev_block);
3278  	xfs_alert(log->l_mp,
3279  "write grant 0x%llx, reserve grant 0x%llx, tail_space 0x%llx, size 0x%x, iclog flags 0x%x",
3280  			atomic64_read(&log->l_write_head.grant),
3281  			atomic64_read(&log->l_reserve_head.grant),
3282  			log->l_tail_space, log->l_logsize,
3283  			iclog ? iclog->ic_flags : -1);
3284  }
3285  
3286  /* Check if the new iclog will fit in the log. */
3287  STATIC void
xlog_verify_tail_lsn(struct xlog * log,struct xlog_in_core * iclog)3288  xlog_verify_tail_lsn(
3289  	struct xlog		*log,
3290  	struct xlog_in_core	*iclog)
3291  {
3292  	xfs_lsn_t	tail_lsn = be64_to_cpu(iclog->ic_header.h_tail_lsn);
3293  	int		blocks;
3294  
3295  	if (CYCLE_LSN(tail_lsn) == log->l_prev_cycle) {
3296  		blocks = log->l_logBBsize -
3297  				(log->l_prev_block - BLOCK_LSN(tail_lsn));
3298  		if (blocks < BTOBB(iclog->ic_offset) +
3299  					BTOBB(log->l_iclog_hsize)) {
3300  			xfs_emerg(log->l_mp,
3301  					"%s: ran out of log space", __func__);
3302  			xlog_verify_dump_tail(log, iclog);
3303  		}
3304  		return;
3305  	}
3306  
3307  	if (CYCLE_LSN(tail_lsn) + 1 != log->l_prev_cycle) {
3308  		xfs_emerg(log->l_mp, "%s: head has wrapped tail.", __func__);
3309  		xlog_verify_dump_tail(log, iclog);
3310  		return;
3311  	}
3312  	if (BLOCK_LSN(tail_lsn) == log->l_prev_block) {
3313  		xfs_emerg(log->l_mp, "%s: tail wrapped", __func__);
3314  		xlog_verify_dump_tail(log, iclog);
3315  		return;
3316  	}
3317  
3318  	blocks = BLOCK_LSN(tail_lsn) - log->l_prev_block;
3319  	if (blocks < BTOBB(iclog->ic_offset) + 1) {
3320  		xfs_emerg(log->l_mp, "%s: ran out of iclog space", __func__);
3321  		xlog_verify_dump_tail(log, iclog);
3322  	}
3323  }
3324  
3325  /*
3326   * Perform a number of checks on the iclog before writing to disk.
3327   *
3328   * 1. Make sure the iclogs are still circular
3329   * 2. Make sure we have a good magic number
3330   * 3. Make sure we don't have magic numbers in the data
3331   * 4. Check fields of each log operation header for:
3332   *	A. Valid client identifier
3333   *	B. tid ptr value falls in valid ptr space (user space code)
3334   *	C. Length in log record header is correct according to the
3335   *		individual operation headers within record.
3336   * 5. When a bwrite will occur within 5 blocks of the front of the physical
3337   *	log, check the preceding blocks of the physical log to make sure all
3338   *	the cycle numbers agree with the current cycle number.
3339   */
3340  STATIC void
xlog_verify_iclog(struct xlog * log,struct xlog_in_core * iclog,int count)3341  xlog_verify_iclog(
3342  	struct xlog		*log,
3343  	struct xlog_in_core	*iclog,
3344  	int			count)
3345  {
3346  	xlog_op_header_t	*ophead;
3347  	xlog_in_core_t		*icptr;
3348  	xlog_in_core_2_t	*xhdr;
3349  	void			*base_ptr, *ptr, *p;
3350  	ptrdiff_t		field_offset;
3351  	uint8_t			clientid;
3352  	int			len, i, j, k, op_len;
3353  	int			idx;
3354  
3355  	/* check validity of iclog pointers */
3356  	spin_lock(&log->l_icloglock);
3357  	icptr = log->l_iclog;
3358  	for (i = 0; i < log->l_iclog_bufs; i++, icptr = icptr->ic_next)
3359  		ASSERT(icptr);
3360  
3361  	if (icptr != log->l_iclog)
3362  		xfs_emerg(log->l_mp, "%s: corrupt iclog ring", __func__);
3363  	spin_unlock(&log->l_icloglock);
3364  
3365  	/* check log magic numbers */
3366  	if (iclog->ic_header.h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
3367  		xfs_emerg(log->l_mp, "%s: invalid magic num", __func__);
3368  
3369  	base_ptr = ptr = &iclog->ic_header;
3370  	p = &iclog->ic_header;
3371  	for (ptr += BBSIZE; ptr < base_ptr + count; ptr += BBSIZE) {
3372  		if (*(__be32 *)ptr == cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
3373  			xfs_emerg(log->l_mp, "%s: unexpected magic num",
3374  				__func__);
3375  	}
3376  
3377  	/* check fields */
3378  	len = be32_to_cpu(iclog->ic_header.h_num_logops);
3379  	base_ptr = ptr = iclog->ic_datap;
3380  	ophead = ptr;
3381  	xhdr = iclog->ic_data;
3382  	for (i = 0; i < len; i++) {
3383  		ophead = ptr;
3384  
3385  		/* clientid is only 1 byte */
3386  		p = &ophead->oh_clientid;
3387  		field_offset = p - base_ptr;
3388  		if (field_offset & 0x1ff) {
3389  			clientid = ophead->oh_clientid;
3390  		} else {
3391  			idx = BTOBBT((void *)&ophead->oh_clientid - iclog->ic_datap);
3392  			if (idx >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) {
3393  				j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3394  				k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3395  				clientid = xlog_get_client_id(
3396  					xhdr[j].hic_xheader.xh_cycle_data[k]);
3397  			} else {
3398  				clientid = xlog_get_client_id(
3399  					iclog->ic_header.h_cycle_data[idx]);
3400  			}
3401  		}
3402  		if (clientid != XFS_TRANSACTION && clientid != XFS_LOG) {
3403  			xfs_warn(log->l_mp,
3404  				"%s: op %d invalid clientid %d op "PTR_FMT" offset 0x%lx",
3405  				__func__, i, clientid, ophead,
3406  				(unsigned long)field_offset);
3407  		}
3408  
3409  		/* check length */
3410  		p = &ophead->oh_len;
3411  		field_offset = p - base_ptr;
3412  		if (field_offset & 0x1ff) {
3413  			op_len = be32_to_cpu(ophead->oh_len);
3414  		} else {
3415  			idx = BTOBBT((void *)&ophead->oh_len - iclog->ic_datap);
3416  			if (idx >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) {
3417  				j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3418  				k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3419  				op_len = be32_to_cpu(xhdr[j].hic_xheader.xh_cycle_data[k]);
3420  			} else {
3421  				op_len = be32_to_cpu(iclog->ic_header.h_cycle_data[idx]);
3422  			}
3423  		}
3424  		ptr += sizeof(xlog_op_header_t) + op_len;
3425  	}
3426  }
3427  #endif
3428  
3429  /*
3430   * Perform a forced shutdown on the log.
3431   *
3432   * This can be called from low level log code to trigger a shutdown, or from the
3433   * high level mount shutdown code when the mount shuts down.
3434   *
3435   * Our main objectives here are to make sure that:
3436   *	a. if the shutdown was not due to a log IO error, flush the logs to
3437   *	   disk. Anything modified after this is ignored.
3438   *	b. the log gets atomically marked 'XLOG_IO_ERROR' for all interested
3439   *	   parties to find out. Nothing new gets queued after this is done.
3440   *	c. Tasks sleeping on log reservations, pinned objects and
3441   *	   other resources get woken up.
3442   *	d. The mount is also marked as shut down so that log triggered shutdowns
3443   *	   still behave the same as if they called xfs_forced_shutdown().
3444   *
3445   * Return true if the shutdown cause was a log IO error and we actually shut the
3446   * log down.
3447   */
3448  bool
xlog_force_shutdown(struct xlog * log,uint32_t shutdown_flags)3449  xlog_force_shutdown(
3450  	struct xlog	*log,
3451  	uint32_t	shutdown_flags)
3452  {
3453  	bool		log_error = (shutdown_flags & SHUTDOWN_LOG_IO_ERROR);
3454  
3455  	if (!log)
3456  		return false;
3457  
3458  	/*
3459  	 * Flush all the completed transactions to disk before marking the log
3460  	 * being shut down. We need to do this first as shutting down the log
3461  	 * before the force will prevent the log force from flushing the iclogs
3462  	 * to disk.
3463  	 *
3464  	 * When we are in recovery, there are no transactions to flush, and
3465  	 * we don't want to touch the log because we don't want to perturb the
3466  	 * current head/tail for future recovery attempts. Hence we need to
3467  	 * avoid a log force in this case.
3468  	 *
3469  	 * If we are shutting down due to a log IO error, then we must avoid
3470  	 * trying to write the log as that may just result in more IO errors and
3471  	 * an endless shutdown/force loop.
3472  	 */
3473  	if (!log_error && !xlog_in_recovery(log))
3474  		xfs_log_force(log->l_mp, XFS_LOG_SYNC);
3475  
3476  	/*
3477  	 * Atomically set the shutdown state. If the shutdown state is already
3478  	 * set, there someone else is performing the shutdown and so we are done
3479  	 * here. This should never happen because we should only ever get called
3480  	 * once by the first shutdown caller.
3481  	 *
3482  	 * Much of the log state machine transitions assume that shutdown state
3483  	 * cannot change once they hold the log->l_icloglock. Hence we need to
3484  	 * hold that lock here, even though we use the atomic test_and_set_bit()
3485  	 * operation to set the shutdown state.
3486  	 */
3487  	spin_lock(&log->l_icloglock);
3488  	if (test_and_set_bit(XLOG_IO_ERROR, &log->l_opstate)) {
3489  		spin_unlock(&log->l_icloglock);
3490  		return false;
3491  	}
3492  	spin_unlock(&log->l_icloglock);
3493  
3494  	/*
3495  	 * If this log shutdown also sets the mount shutdown state, issue a
3496  	 * shutdown warning message.
3497  	 */
3498  	if (!xfs_set_shutdown(log->l_mp)) {
3499  		xfs_alert_tag(log->l_mp, XFS_PTAG_SHUTDOWN_LOGERROR,
3500  "Filesystem has been shut down due to log error (0x%x).",
3501  				shutdown_flags);
3502  		xfs_alert(log->l_mp,
3503  "Please unmount the filesystem and rectify the problem(s).");
3504  		if (xfs_error_level >= XFS_ERRLEVEL_HIGH)
3505  			xfs_stack_trace();
3506  	}
3507  
3508  	/*
3509  	 * We don't want anybody waiting for log reservations after this. That
3510  	 * means we have to wake up everybody queued up on reserveq as well as
3511  	 * writeq.  In addition, we make sure in xlog_{re}grant_log_space that
3512  	 * we don't enqueue anything once the SHUTDOWN flag is set, and this
3513  	 * action is protected by the grant locks.
3514  	 */
3515  	xlog_grant_head_wake_all(&log->l_reserve_head);
3516  	xlog_grant_head_wake_all(&log->l_write_head);
3517  
3518  	/*
3519  	 * Wake up everybody waiting on xfs_log_force. Wake the CIL push first
3520  	 * as if the log writes were completed. The abort handling in the log
3521  	 * item committed callback functions will do this again under lock to
3522  	 * avoid races.
3523  	 */
3524  	spin_lock(&log->l_cilp->xc_push_lock);
3525  	wake_up_all(&log->l_cilp->xc_start_wait);
3526  	wake_up_all(&log->l_cilp->xc_commit_wait);
3527  	spin_unlock(&log->l_cilp->xc_push_lock);
3528  
3529  	spin_lock(&log->l_icloglock);
3530  	xlog_state_shutdown_callbacks(log);
3531  	spin_unlock(&log->l_icloglock);
3532  
3533  	wake_up_var(&log->l_opstate);
3534  	return log_error;
3535  }
3536  
3537  STATIC int
xlog_iclogs_empty(struct xlog * log)3538  xlog_iclogs_empty(
3539  	struct xlog	*log)
3540  {
3541  	xlog_in_core_t	*iclog;
3542  
3543  	iclog = log->l_iclog;
3544  	do {
3545  		/* endianness does not matter here, zero is zero in
3546  		 * any language.
3547  		 */
3548  		if (iclog->ic_header.h_num_logops)
3549  			return 0;
3550  		iclog = iclog->ic_next;
3551  	} while (iclog != log->l_iclog);
3552  	return 1;
3553  }
3554  
3555  /*
3556   * Verify that an LSN stamped into a piece of metadata is valid. This is
3557   * intended for use in read verifiers on v5 superblocks.
3558   */
3559  bool
xfs_log_check_lsn(struct xfs_mount * mp,xfs_lsn_t lsn)3560  xfs_log_check_lsn(
3561  	struct xfs_mount	*mp,
3562  	xfs_lsn_t		lsn)
3563  {
3564  	struct xlog		*log = mp->m_log;
3565  	bool			valid;
3566  
3567  	/*
3568  	 * norecovery mode skips mount-time log processing and unconditionally
3569  	 * resets the in-core LSN. We can't validate in this mode, but
3570  	 * modifications are not allowed anyways so just return true.
3571  	 */
3572  	if (xfs_has_norecovery(mp))
3573  		return true;
3574  
3575  	/*
3576  	 * Some metadata LSNs are initialized to NULL (e.g., the agfl). This is
3577  	 * handled by recovery and thus safe to ignore here.
3578  	 */
3579  	if (lsn == NULLCOMMITLSN)
3580  		return true;
3581  
3582  	valid = xlog_valid_lsn(mp->m_log, lsn);
3583  
3584  	/* warn the user about what's gone wrong before verifier failure */
3585  	if (!valid) {
3586  		spin_lock(&log->l_icloglock);
3587  		xfs_warn(mp,
3588  "Corruption warning: Metadata has LSN (%d:%d) ahead of current LSN (%d:%d). "
3589  "Please unmount and run xfs_repair (>= v4.3) to resolve.",
3590  			 CYCLE_LSN(lsn), BLOCK_LSN(lsn),
3591  			 log->l_curr_cycle, log->l_curr_block);
3592  		spin_unlock(&log->l_icloglock);
3593  	}
3594  
3595  	return valid;
3596  }
3597