1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
4   * Copyright (c) 2008 Dave Chinner
5   * All Rights Reserved.
6   */
7  #include "xfs.h"
8  #include "xfs_fs.h"
9  #include "xfs_shared.h"
10  #include "xfs_format.h"
11  #include "xfs_log_format.h"
12  #include "xfs_trans_resv.h"
13  #include "xfs_mount.h"
14  #include "xfs_trans.h"
15  #include "xfs_trans_priv.h"
16  #include "xfs_trace.h"
17  #include "xfs_errortag.h"
18  #include "xfs_error.h"
19  #include "xfs_log.h"
20  #include "xfs_log_priv.h"
21  
22  #ifdef DEBUG
23  /*
24   * Check that the list is sorted as it should be.
25   *
26   * Called with the ail lock held, but we don't want to assert fail with it
27   * held otherwise we'll lock everything up and won't be able to debug the
28   * cause. Hence we sample and check the state under the AIL lock and return if
29   * everything is fine, otherwise we drop the lock and run the ASSERT checks.
30   * Asserts may not be fatal, so pick the lock back up and continue onwards.
31   */
32  STATIC void
xfs_ail_check(struct xfs_ail * ailp,struct xfs_log_item * lip)33  xfs_ail_check(
34  	struct xfs_ail		*ailp,
35  	struct xfs_log_item	*lip)
36  	__must_hold(&ailp->ail_lock)
37  {
38  	struct xfs_log_item	*prev_lip;
39  	struct xfs_log_item	*next_lip;
40  	xfs_lsn_t		prev_lsn = NULLCOMMITLSN;
41  	xfs_lsn_t		next_lsn = NULLCOMMITLSN;
42  	xfs_lsn_t		lsn;
43  	bool			in_ail;
44  
45  
46  	if (list_empty(&ailp->ail_head))
47  		return;
48  
49  	/*
50  	 * Sample then check the next and previous entries are valid.
51  	 */
52  	in_ail = test_bit(XFS_LI_IN_AIL, &lip->li_flags);
53  	prev_lip = list_entry(lip->li_ail.prev, struct xfs_log_item, li_ail);
54  	if (&prev_lip->li_ail != &ailp->ail_head)
55  		prev_lsn = prev_lip->li_lsn;
56  	next_lip = list_entry(lip->li_ail.next, struct xfs_log_item, li_ail);
57  	if (&next_lip->li_ail != &ailp->ail_head)
58  		next_lsn = next_lip->li_lsn;
59  	lsn = lip->li_lsn;
60  
61  	if (in_ail &&
62  	    (prev_lsn == NULLCOMMITLSN || XFS_LSN_CMP(prev_lsn, lsn) <= 0) &&
63  	    (next_lsn == NULLCOMMITLSN || XFS_LSN_CMP(next_lsn, lsn) >= 0))
64  		return;
65  
66  	spin_unlock(&ailp->ail_lock);
67  	ASSERT(in_ail);
68  	ASSERT(prev_lsn == NULLCOMMITLSN || XFS_LSN_CMP(prev_lsn, lsn) <= 0);
69  	ASSERT(next_lsn == NULLCOMMITLSN || XFS_LSN_CMP(next_lsn, lsn) >= 0);
70  	spin_lock(&ailp->ail_lock);
71  }
72  #else /* !DEBUG */
73  #define	xfs_ail_check(a,l)
74  #endif /* DEBUG */
75  
76  /*
77   * Return a pointer to the last item in the AIL.  If the AIL is empty, then
78   * return NULL.
79   */
80  static struct xfs_log_item *
xfs_ail_max(struct xfs_ail * ailp)81  xfs_ail_max(
82  	struct xfs_ail  *ailp)
83  {
84  	if (list_empty(&ailp->ail_head))
85  		return NULL;
86  
87  	return list_entry(ailp->ail_head.prev, struct xfs_log_item, li_ail);
88  }
89  
90  /*
91   * Return a pointer to the item which follows the given item in the AIL.  If
92   * the given item is the last item in the list, then return NULL.
93   */
94  static struct xfs_log_item *
xfs_ail_next(struct xfs_ail * ailp,struct xfs_log_item * lip)95  xfs_ail_next(
96  	struct xfs_ail		*ailp,
97  	struct xfs_log_item	*lip)
98  {
99  	if (lip->li_ail.next == &ailp->ail_head)
100  		return NULL;
101  
102  	return list_first_entry(&lip->li_ail, struct xfs_log_item, li_ail);
103  }
104  
105  /*
106   * This is called by the log manager code to determine the LSN of the tail of
107   * the log.  This is exactly the LSN of the first item in the AIL.  If the AIL
108   * is empty, then this function returns 0.
109   *
110   * We need the AIL lock in order to get a coherent read of the lsn of the last
111   * item in the AIL.
112   */
113  static xfs_lsn_t
__xfs_ail_min_lsn(struct xfs_ail * ailp)114  __xfs_ail_min_lsn(
115  	struct xfs_ail		*ailp)
116  {
117  	struct xfs_log_item	*lip = xfs_ail_min(ailp);
118  
119  	if (lip)
120  		return lip->li_lsn;
121  	return 0;
122  }
123  
124  xfs_lsn_t
xfs_ail_min_lsn(struct xfs_ail * ailp)125  xfs_ail_min_lsn(
126  	struct xfs_ail		*ailp)
127  {
128  	xfs_lsn_t		lsn;
129  
130  	spin_lock(&ailp->ail_lock);
131  	lsn = __xfs_ail_min_lsn(ailp);
132  	spin_unlock(&ailp->ail_lock);
133  
134  	return lsn;
135  }
136  
137  /*
138   * The cursor keeps track of where our current traversal is up to by tracking
139   * the next item in the list for us. However, for this to be safe, removing an
140   * object from the AIL needs to invalidate any cursor that points to it. hence
141   * the traversal cursor needs to be linked to the struct xfs_ail so that
142   * deletion can search all the active cursors for invalidation.
143   */
144  STATIC void
xfs_trans_ail_cursor_init(struct xfs_ail * ailp,struct xfs_ail_cursor * cur)145  xfs_trans_ail_cursor_init(
146  	struct xfs_ail		*ailp,
147  	struct xfs_ail_cursor	*cur)
148  {
149  	cur->item = NULL;
150  	list_add_tail(&cur->list, &ailp->ail_cursors);
151  }
152  
153  /*
154   * Get the next item in the traversal and advance the cursor.  If the cursor
155   * was invalidated (indicated by a lip of 1), restart the traversal.
156   */
157  struct xfs_log_item *
xfs_trans_ail_cursor_next(struct xfs_ail * ailp,struct xfs_ail_cursor * cur)158  xfs_trans_ail_cursor_next(
159  	struct xfs_ail		*ailp,
160  	struct xfs_ail_cursor	*cur)
161  {
162  	struct xfs_log_item	*lip = cur->item;
163  
164  	if ((uintptr_t)lip & 1)
165  		lip = xfs_ail_min(ailp);
166  	if (lip)
167  		cur->item = xfs_ail_next(ailp, lip);
168  	return lip;
169  }
170  
171  /*
172   * When the traversal is complete, we need to remove the cursor from the list
173   * of traversing cursors.
174   */
175  void
xfs_trans_ail_cursor_done(struct xfs_ail_cursor * cur)176  xfs_trans_ail_cursor_done(
177  	struct xfs_ail_cursor	*cur)
178  {
179  	cur->item = NULL;
180  	list_del_init(&cur->list);
181  }
182  
183  /*
184   * Invalidate any cursor that is pointing to this item. This is called when an
185   * item is removed from the AIL. Any cursor pointing to this object is now
186   * invalid and the traversal needs to be terminated so it doesn't reference a
187   * freed object. We set the low bit of the cursor item pointer so we can
188   * distinguish between an invalidation and the end of the list when getting the
189   * next item from the cursor.
190   */
191  STATIC void
xfs_trans_ail_cursor_clear(struct xfs_ail * ailp,struct xfs_log_item * lip)192  xfs_trans_ail_cursor_clear(
193  	struct xfs_ail		*ailp,
194  	struct xfs_log_item	*lip)
195  {
196  	struct xfs_ail_cursor	*cur;
197  
198  	list_for_each_entry(cur, &ailp->ail_cursors, list) {
199  		if (cur->item == lip)
200  			cur->item = (struct xfs_log_item *)
201  					((uintptr_t)cur->item | 1);
202  	}
203  }
204  
205  /*
206   * Find the first item in the AIL with the given @lsn by searching in ascending
207   * LSN order and initialise the cursor to point to the next item for a
208   * ascending traversal.  Pass a @lsn of zero to initialise the cursor to the
209   * first item in the AIL. Returns NULL if the list is empty.
210   */
211  struct xfs_log_item *
xfs_trans_ail_cursor_first(struct xfs_ail * ailp,struct xfs_ail_cursor * cur,xfs_lsn_t lsn)212  xfs_trans_ail_cursor_first(
213  	struct xfs_ail		*ailp,
214  	struct xfs_ail_cursor	*cur,
215  	xfs_lsn_t		lsn)
216  {
217  	struct xfs_log_item	*lip;
218  
219  	xfs_trans_ail_cursor_init(ailp, cur);
220  
221  	if (lsn == 0) {
222  		lip = xfs_ail_min(ailp);
223  		goto out;
224  	}
225  
226  	list_for_each_entry(lip, &ailp->ail_head, li_ail) {
227  		if (XFS_LSN_CMP(lip->li_lsn, lsn) >= 0)
228  			goto out;
229  	}
230  	return NULL;
231  
232  out:
233  	if (lip)
234  		cur->item = xfs_ail_next(ailp, lip);
235  	return lip;
236  }
237  
238  static struct xfs_log_item *
__xfs_trans_ail_cursor_last(struct xfs_ail * ailp,xfs_lsn_t lsn)239  __xfs_trans_ail_cursor_last(
240  	struct xfs_ail		*ailp,
241  	xfs_lsn_t		lsn)
242  {
243  	struct xfs_log_item	*lip;
244  
245  	list_for_each_entry_reverse(lip, &ailp->ail_head, li_ail) {
246  		if (XFS_LSN_CMP(lip->li_lsn, lsn) <= 0)
247  			return lip;
248  	}
249  	return NULL;
250  }
251  
252  /*
253   * Find the last item in the AIL with the given @lsn by searching in descending
254   * LSN order and initialise the cursor to point to that item.  If there is no
255   * item with the value of @lsn, then it sets the cursor to the last item with an
256   * LSN lower than @lsn.  Returns NULL if the list is empty.
257   */
258  struct xfs_log_item *
xfs_trans_ail_cursor_last(struct xfs_ail * ailp,struct xfs_ail_cursor * cur,xfs_lsn_t lsn)259  xfs_trans_ail_cursor_last(
260  	struct xfs_ail		*ailp,
261  	struct xfs_ail_cursor	*cur,
262  	xfs_lsn_t		lsn)
263  {
264  	xfs_trans_ail_cursor_init(ailp, cur);
265  	cur->item = __xfs_trans_ail_cursor_last(ailp, lsn);
266  	return cur->item;
267  }
268  
269  /*
270   * Splice the log item list into the AIL at the given LSN. We splice to the
271   * tail of the given LSN to maintain insert order for push traversals. The
272   * cursor is optional, allowing repeated updates to the same LSN to avoid
273   * repeated traversals.  This should not be called with an empty list.
274   */
275  static void
xfs_ail_splice(struct xfs_ail * ailp,struct xfs_ail_cursor * cur,struct list_head * list,xfs_lsn_t lsn)276  xfs_ail_splice(
277  	struct xfs_ail		*ailp,
278  	struct xfs_ail_cursor	*cur,
279  	struct list_head	*list,
280  	xfs_lsn_t		lsn)
281  {
282  	struct xfs_log_item	*lip;
283  
284  	ASSERT(!list_empty(list));
285  
286  	/*
287  	 * Use the cursor to determine the insertion point if one is
288  	 * provided.  If not, or if the one we got is not valid,
289  	 * find the place in the AIL where the items belong.
290  	 */
291  	lip = cur ? cur->item : NULL;
292  	if (!lip || (uintptr_t)lip & 1)
293  		lip = __xfs_trans_ail_cursor_last(ailp, lsn);
294  
295  	/*
296  	 * If a cursor is provided, we know we're processing the AIL
297  	 * in lsn order, and future items to be spliced in will
298  	 * follow the last one being inserted now.  Update the
299  	 * cursor to point to that last item, now while we have a
300  	 * reliable pointer to it.
301  	 */
302  	if (cur)
303  		cur->item = list_entry(list->prev, struct xfs_log_item, li_ail);
304  
305  	/*
306  	 * Finally perform the splice.  Unless the AIL was empty,
307  	 * lip points to the item in the AIL _after_ which the new
308  	 * items should go.  If lip is null the AIL was empty, so
309  	 * the new items go at the head of the AIL.
310  	 */
311  	if (lip)
312  		list_splice(list, &lip->li_ail);
313  	else
314  		list_splice(list, &ailp->ail_head);
315  }
316  
317  /*
318   * Delete the given item from the AIL.  Return a pointer to the item.
319   */
320  static void
xfs_ail_delete(struct xfs_ail * ailp,struct xfs_log_item * lip)321  xfs_ail_delete(
322  	struct xfs_ail		*ailp,
323  	struct xfs_log_item	*lip)
324  {
325  	xfs_ail_check(ailp, lip);
326  	list_del(&lip->li_ail);
327  	xfs_trans_ail_cursor_clear(ailp, lip);
328  }
329  
330  /*
331   * Requeue a failed buffer for writeback.
332   *
333   * We clear the log item failed state here as well, but we have to be careful
334   * about reference counts because the only active reference counts on the buffer
335   * may be the failed log items. Hence if we clear the log item failed state
336   * before queuing the buffer for IO we can release all active references to
337   * the buffer and free it, leading to use after free problems in
338   * xfs_buf_delwri_queue. It makes no difference to the buffer or log items which
339   * order we process them in - the buffer is locked, and we own the buffer list
340   * so nothing on them is going to change while we are performing this action.
341   *
342   * Hence we can safely queue the buffer for IO before we clear the failed log
343   * item state, therefore  always having an active reference to the buffer and
344   * avoiding the transient zero-reference state that leads to use-after-free.
345   */
346  static inline int
xfsaild_resubmit_item(struct xfs_log_item * lip,struct list_head * buffer_list)347  xfsaild_resubmit_item(
348  	struct xfs_log_item	*lip,
349  	struct list_head	*buffer_list)
350  {
351  	struct xfs_buf		*bp = lip->li_buf;
352  
353  	if (!xfs_buf_trylock(bp))
354  		return XFS_ITEM_LOCKED;
355  
356  	if (!xfs_buf_delwri_queue(bp, buffer_list)) {
357  		xfs_buf_unlock(bp);
358  		return XFS_ITEM_FLUSHING;
359  	}
360  
361  	/* protected by ail_lock */
362  	list_for_each_entry(lip, &bp->b_li_list, li_bio_list) {
363  		if (bp->b_flags & _XBF_INODES)
364  			clear_bit(XFS_LI_FAILED, &lip->li_flags);
365  		else
366  			xfs_clear_li_failed(lip);
367  	}
368  
369  	xfs_buf_unlock(bp);
370  	return XFS_ITEM_SUCCESS;
371  }
372  
373  static inline uint
xfsaild_push_item(struct xfs_ail * ailp,struct xfs_log_item * lip)374  xfsaild_push_item(
375  	struct xfs_ail		*ailp,
376  	struct xfs_log_item	*lip)
377  {
378  	/*
379  	 * If log item pinning is enabled, skip the push and track the item as
380  	 * pinned. This can help induce head-behind-tail conditions.
381  	 */
382  	if (XFS_TEST_ERROR(false, ailp->ail_log->l_mp, XFS_ERRTAG_LOG_ITEM_PIN))
383  		return XFS_ITEM_PINNED;
384  
385  	/*
386  	 * Consider the item pinned if a push callback is not defined so the
387  	 * caller will force the log. This should only happen for intent items
388  	 * as they are unpinned once the associated done item is committed to
389  	 * the on-disk log.
390  	 */
391  	if (!lip->li_ops->iop_push)
392  		return XFS_ITEM_PINNED;
393  	if (test_bit(XFS_LI_FAILED, &lip->li_flags))
394  		return xfsaild_resubmit_item(lip, &ailp->ail_buf_list);
395  	return lip->li_ops->iop_push(lip, &ailp->ail_buf_list);
396  }
397  
398  /*
399   * Compute the LSN that we'd need to push the log tail towards in order to have
400   * at least 25% of the log space free.  If the log free space already meets this
401   * threshold, this function returns the lowest LSN in the AIL to slowly keep
402   * writeback ticking over and the tail of the log moving forward.
403   */
404  static xfs_lsn_t
xfs_ail_calc_push_target(struct xfs_ail * ailp)405  xfs_ail_calc_push_target(
406  	struct xfs_ail		*ailp)
407  {
408  	struct xlog		*log = ailp->ail_log;
409  	struct xfs_log_item	*lip;
410  	xfs_lsn_t		target_lsn;
411  	xfs_lsn_t		max_lsn;
412  	xfs_lsn_t		min_lsn;
413  	int32_t			free_bytes;
414  	uint32_t		target_block;
415  	uint32_t		target_cycle;
416  
417  	lockdep_assert_held(&ailp->ail_lock);
418  
419  	lip = xfs_ail_max(ailp);
420  	if (!lip)
421  		return NULLCOMMITLSN;
422  
423  	max_lsn = lip->li_lsn;
424  	min_lsn = __xfs_ail_min_lsn(ailp);
425  
426  	/*
427  	 * If we are supposed to push all the items in the AIL, we want to push
428  	 * to the current head. We then clear the push flag so that we don't
429  	 * keep pushing newly queued items beyond where the push all command was
430  	 * run. If the push waiter wants to empty the ail, it should queue
431  	 * itself on the ail_empty wait queue.
432  	 */
433  	if (test_and_clear_bit(XFS_AIL_OPSTATE_PUSH_ALL, &ailp->ail_opstate))
434  		return max_lsn;
435  
436  	/* If someone wants the AIL empty, keep pushing everything we have. */
437  	if (waitqueue_active(&ailp->ail_empty))
438  		return max_lsn;
439  
440  	/*
441  	 * Background pushing - attempt to keep 25% of the log free and if we
442  	 * have that much free retain the existing target.
443  	 */
444  	free_bytes = log->l_logsize - xlog_lsn_sub(log, max_lsn, min_lsn);
445  	if (free_bytes >= log->l_logsize >> 2)
446  		return ailp->ail_target;
447  
448  	target_cycle = CYCLE_LSN(min_lsn);
449  	target_block = BLOCK_LSN(min_lsn) + (log->l_logBBsize >> 2);
450  	if (target_block >= log->l_logBBsize) {
451  		target_block -= log->l_logBBsize;
452  		target_cycle += 1;
453  	}
454  	target_lsn = xlog_assign_lsn(target_cycle, target_block);
455  
456  	/* Cap the target to the highest LSN known to be in the AIL. */
457  	if (XFS_LSN_CMP(target_lsn, max_lsn) > 0)
458  		return max_lsn;
459  
460  	/* If the existing target is higher than the new target, keep it. */
461  	if (XFS_LSN_CMP(ailp->ail_target, target_lsn) >= 0)
462  		return ailp->ail_target;
463  	return target_lsn;
464  }
465  
466  static long
xfsaild_push(struct xfs_ail * ailp)467  xfsaild_push(
468  	struct xfs_ail		*ailp)
469  {
470  	struct xfs_mount	*mp = ailp->ail_log->l_mp;
471  	struct xfs_ail_cursor	cur;
472  	struct xfs_log_item	*lip;
473  	xfs_lsn_t		lsn;
474  	long			tout;
475  	int			stuck = 0;
476  	int			flushing = 0;
477  	int			count = 0;
478  
479  	/*
480  	 * If we encountered pinned items or did not finish writing out all
481  	 * buffers the last time we ran, force a background CIL push to get the
482  	 * items unpinned in the near future. We do not wait on the CIL push as
483  	 * that could stall us for seconds if there is enough background IO
484  	 * load. Stalling for that long when the tail of the log is pinned and
485  	 * needs flushing will hard stop the transaction subsystem when log
486  	 * space runs out.
487  	 */
488  	if (ailp->ail_log_flush && ailp->ail_last_pushed_lsn == 0 &&
489  	    (!list_empty_careful(&ailp->ail_buf_list) ||
490  	     xfs_ail_min_lsn(ailp))) {
491  		ailp->ail_log_flush = 0;
492  
493  		XFS_STATS_INC(mp, xs_push_ail_flush);
494  		xlog_cil_flush(ailp->ail_log);
495  	}
496  
497  	spin_lock(&ailp->ail_lock);
498  	WRITE_ONCE(ailp->ail_target, xfs_ail_calc_push_target(ailp));
499  	if (ailp->ail_target == NULLCOMMITLSN)
500  		goto out_done;
501  
502  	/* we're done if the AIL is empty or our push has reached the end */
503  	lip = xfs_trans_ail_cursor_first(ailp, &cur, ailp->ail_last_pushed_lsn);
504  	if (!lip)
505  		goto out_done_cursor;
506  
507  	XFS_STATS_INC(mp, xs_push_ail);
508  
509  	ASSERT(ailp->ail_target != NULLCOMMITLSN);
510  
511  	lsn = lip->li_lsn;
512  	while ((XFS_LSN_CMP(lip->li_lsn, ailp->ail_target) <= 0)) {
513  		int	lock_result;
514  
515  		if (test_bit(XFS_LI_FLUSHING, &lip->li_flags))
516  			goto next_item;
517  
518  		/*
519  		 * Note that iop_push may unlock and reacquire the AIL lock.  We
520  		 * rely on the AIL cursor implementation to be able to deal with
521  		 * the dropped lock.
522  		 */
523  		lock_result = xfsaild_push_item(ailp, lip);
524  		switch (lock_result) {
525  		case XFS_ITEM_SUCCESS:
526  			XFS_STATS_INC(mp, xs_push_ail_success);
527  			trace_xfs_ail_push(lip);
528  
529  			ailp->ail_last_pushed_lsn = lsn;
530  			break;
531  
532  		case XFS_ITEM_FLUSHING:
533  			/*
534  			 * The item or its backing buffer is already being
535  			 * flushed.  The typical reason for that is that an
536  			 * inode buffer is locked because we already pushed the
537  			 * updates to it as part of inode clustering.
538  			 *
539  			 * We do not want to stop flushing just because lots
540  			 * of items are already being flushed, but we need to
541  			 * re-try the flushing relatively soon if most of the
542  			 * AIL is being flushed.
543  			 */
544  			XFS_STATS_INC(mp, xs_push_ail_flushing);
545  			trace_xfs_ail_flushing(lip);
546  
547  			flushing++;
548  			ailp->ail_last_pushed_lsn = lsn;
549  			break;
550  
551  		case XFS_ITEM_PINNED:
552  			XFS_STATS_INC(mp, xs_push_ail_pinned);
553  			trace_xfs_ail_pinned(lip);
554  
555  			stuck++;
556  			ailp->ail_log_flush++;
557  			break;
558  		case XFS_ITEM_LOCKED:
559  			XFS_STATS_INC(mp, xs_push_ail_locked);
560  			trace_xfs_ail_locked(lip);
561  
562  			stuck++;
563  			break;
564  		default:
565  			ASSERT(0);
566  			break;
567  		}
568  
569  		count++;
570  
571  		/*
572  		 * Are there too many items we can't do anything with?
573  		 *
574  		 * If we are skipping too many items because we can't flush
575  		 * them or they are already being flushed, we back off and
576  		 * given them time to complete whatever operation is being
577  		 * done. i.e. remove pressure from the AIL while we can't make
578  		 * progress so traversals don't slow down further inserts and
579  		 * removals to/from the AIL.
580  		 *
581  		 * The value of 100 is an arbitrary magic number based on
582  		 * observation.
583  		 */
584  		if (stuck > 100)
585  			break;
586  
587  next_item:
588  		lip = xfs_trans_ail_cursor_next(ailp, &cur);
589  		if (lip == NULL)
590  			break;
591  		if (lip->li_lsn != lsn && count > 1000)
592  			break;
593  		lsn = lip->li_lsn;
594  	}
595  
596  out_done_cursor:
597  	xfs_trans_ail_cursor_done(&cur);
598  out_done:
599  	spin_unlock(&ailp->ail_lock);
600  
601  	if (xfs_buf_delwri_submit_nowait(&ailp->ail_buf_list))
602  		ailp->ail_log_flush++;
603  
604  	if (!count || XFS_LSN_CMP(lsn, ailp->ail_target) >= 0) {
605  		/*
606  		 * We reached the target or the AIL is empty, so wait a bit
607  		 * longer for I/O to complete and remove pushed items from the
608  		 * AIL before we start the next scan from the start of the AIL.
609  		 */
610  		tout = 50;
611  		ailp->ail_last_pushed_lsn = 0;
612  	} else if (((stuck + flushing) * 100) / count > 90) {
613  		/*
614  		 * Either there is a lot of contention on the AIL or we are
615  		 * stuck due to operations in progress. "Stuck" in this case
616  		 * is defined as >90% of the items we tried to push were stuck.
617  		 *
618  		 * Backoff a bit more to allow some I/O to complete before
619  		 * restarting from the start of the AIL. This prevents us from
620  		 * spinning on the same items, and if they are pinned will all
621  		 * the restart to issue a log force to unpin the stuck items.
622  		 */
623  		tout = 20;
624  		ailp->ail_last_pushed_lsn = 0;
625  	} else {
626  		/*
627  		 * Assume we have more work to do in a short while.
628  		 */
629  		tout = 0;
630  	}
631  
632  	return tout;
633  }
634  
635  static int
xfsaild(void * data)636  xfsaild(
637  	void		*data)
638  {
639  	struct xfs_ail	*ailp = data;
640  	long		tout = 0;	/* milliseconds */
641  	unsigned int	noreclaim_flag;
642  
643  	noreclaim_flag = memalloc_noreclaim_save();
644  	set_freezable();
645  
646  	while (1) {
647  		/*
648  		 * Long waits of 50ms or more occur when we've run out of items
649  		 * to push, so we only want uninterruptible state if we're
650  		 * actually blocked on something.
651  		 */
652  		if (tout && tout <= 20)
653  			set_current_state(TASK_KILLABLE|TASK_FREEZABLE);
654  		else
655  			set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
656  
657  		/*
658  		 * Check kthread_should_stop() after we set the task state to
659  		 * guarantee that we either see the stop bit and exit or the
660  		 * task state is reset to runnable such that it's not scheduled
661  		 * out indefinitely and detects the stop bit at next iteration.
662  		 * A memory barrier is included in above task state set to
663  		 * serialize again kthread_stop().
664  		 */
665  		if (kthread_should_stop()) {
666  			__set_current_state(TASK_RUNNING);
667  
668  			/*
669  			 * The caller forces out the AIL before stopping the
670  			 * thread in the common case, which means the delwri
671  			 * queue is drained. In the shutdown case, the queue may
672  			 * still hold relogged buffers that haven't been
673  			 * submitted because they were pinned since added to the
674  			 * queue.
675  			 *
676  			 * Log I/O error processing stales the underlying buffer
677  			 * and clears the delwri state, expecting the buf to be
678  			 * removed on the next submission attempt. That won't
679  			 * happen if we're shutting down, so this is the last
680  			 * opportunity to release such buffers from the queue.
681  			 */
682  			ASSERT(list_empty(&ailp->ail_buf_list) ||
683  			       xlog_is_shutdown(ailp->ail_log));
684  			xfs_buf_delwri_cancel(&ailp->ail_buf_list);
685  			break;
686  		}
687  
688  		/* Idle if the AIL is empty. */
689  		spin_lock(&ailp->ail_lock);
690  		if (!xfs_ail_min(ailp) && list_empty(&ailp->ail_buf_list)) {
691  			spin_unlock(&ailp->ail_lock);
692  			schedule();
693  			tout = 0;
694  			continue;
695  		}
696  		spin_unlock(&ailp->ail_lock);
697  
698  		if (tout)
699  			schedule_timeout(msecs_to_jiffies(tout));
700  
701  		__set_current_state(TASK_RUNNING);
702  
703  		try_to_freeze();
704  
705  		tout = xfsaild_push(ailp);
706  	}
707  
708  	memalloc_noreclaim_restore(noreclaim_flag);
709  	return 0;
710  }
711  
712  /*
713   * Push out all items in the AIL immediately and wait until the AIL is empty.
714   */
715  void
xfs_ail_push_all_sync(struct xfs_ail * ailp)716  xfs_ail_push_all_sync(
717  	struct xfs_ail  *ailp)
718  {
719  	DEFINE_WAIT(wait);
720  
721  	spin_lock(&ailp->ail_lock);
722  	while (xfs_ail_max(ailp) != NULL) {
723  		prepare_to_wait(&ailp->ail_empty, &wait, TASK_UNINTERRUPTIBLE);
724  		wake_up_process(ailp->ail_task);
725  		spin_unlock(&ailp->ail_lock);
726  		schedule();
727  		spin_lock(&ailp->ail_lock);
728  	}
729  	spin_unlock(&ailp->ail_lock);
730  
731  	finish_wait(&ailp->ail_empty, &wait);
732  }
733  
734  void
__xfs_ail_assign_tail_lsn(struct xfs_ail * ailp)735  __xfs_ail_assign_tail_lsn(
736  	struct xfs_ail		*ailp)
737  {
738  	struct xlog		*log = ailp->ail_log;
739  	xfs_lsn_t		tail_lsn;
740  
741  	assert_spin_locked(&ailp->ail_lock);
742  
743  	if (xlog_is_shutdown(log))
744  		return;
745  
746  	tail_lsn = __xfs_ail_min_lsn(ailp);
747  	if (!tail_lsn)
748  		tail_lsn = ailp->ail_head_lsn;
749  
750  	WRITE_ONCE(log->l_tail_space,
751  			xlog_lsn_sub(log, ailp->ail_head_lsn, tail_lsn));
752  	trace_xfs_log_assign_tail_lsn(log, tail_lsn);
753  	atomic64_set(&log->l_tail_lsn, tail_lsn);
754  }
755  
756  /*
757   * Callers should pass the original tail lsn so that we can detect if the tail
758   * has moved as a result of the operation that was performed. If the caller
759   * needs to force a tail space update, it should pass NULLCOMMITLSN to bypass
760   * the "did the tail LSN change?" checks. If the caller wants to avoid a tail
761   * update (e.g. it knows the tail did not change) it should pass an @old_lsn of
762   * 0.
763   */
764  void
xfs_ail_update_finish(struct xfs_ail * ailp,xfs_lsn_t old_lsn)765  xfs_ail_update_finish(
766  	struct xfs_ail		*ailp,
767  	xfs_lsn_t		old_lsn) __releases(ailp->ail_lock)
768  {
769  	struct xlog		*log = ailp->ail_log;
770  
771  	/* If the tail lsn hasn't changed, don't do updates or wakeups. */
772  	if (!old_lsn || old_lsn == __xfs_ail_min_lsn(ailp)) {
773  		spin_unlock(&ailp->ail_lock);
774  		return;
775  	}
776  
777  	__xfs_ail_assign_tail_lsn(ailp);
778  	if (list_empty(&ailp->ail_head))
779  		wake_up_all(&ailp->ail_empty);
780  	spin_unlock(&ailp->ail_lock);
781  	xfs_log_space_wake(log->l_mp);
782  }
783  
784  /*
785   * xfs_trans_ail_update - bulk AIL insertion operation.
786   *
787   * @xfs_trans_ail_update takes an array of log items that all need to be
788   * positioned at the same LSN in the AIL. If an item is not in the AIL, it will
789   * be added.  Otherwise, it will be repositioned  by removing it and re-adding
790   * it to the AIL. If we move the first item in the AIL, update the log tail to
791   * match the new minimum LSN in the AIL.
792   *
793   * This function takes the AIL lock once to execute the update operations on
794   * all the items in the array, and as such should not be called with the AIL
795   * lock held. As a result, once we have the AIL lock, we need to check each log
796   * item LSN to confirm it needs to be moved forward in the AIL.
797   *
798   * To optimise the insert operation, we delete all the items from the AIL in
799   * the first pass, moving them into a temporary list, then splice the temporary
800   * list into the correct position in the AIL. This avoids needing to do an
801   * insert operation on every item.
802   *
803   * This function must be called with the AIL lock held.  The lock is dropped
804   * before returning.
805   */
806  void
xfs_trans_ail_update_bulk(struct xfs_ail * ailp,struct xfs_ail_cursor * cur,struct xfs_log_item ** log_items,int nr_items,xfs_lsn_t lsn)807  xfs_trans_ail_update_bulk(
808  	struct xfs_ail		*ailp,
809  	struct xfs_ail_cursor	*cur,
810  	struct xfs_log_item	**log_items,
811  	int			nr_items,
812  	xfs_lsn_t		lsn) __releases(ailp->ail_lock)
813  {
814  	struct xfs_log_item	*mlip;
815  	xfs_lsn_t		tail_lsn = 0;
816  	int			i;
817  	LIST_HEAD(tmp);
818  
819  	ASSERT(nr_items > 0);		/* Not required, but true. */
820  	mlip = xfs_ail_min(ailp);
821  
822  	for (i = 0; i < nr_items; i++) {
823  		struct xfs_log_item *lip = log_items[i];
824  		if (test_and_set_bit(XFS_LI_IN_AIL, &lip->li_flags)) {
825  			/* check if we really need to move the item */
826  			if (XFS_LSN_CMP(lsn, lip->li_lsn) <= 0)
827  				continue;
828  
829  			trace_xfs_ail_move(lip, lip->li_lsn, lsn);
830  			if (mlip == lip && !tail_lsn)
831  				tail_lsn = lip->li_lsn;
832  
833  			xfs_ail_delete(ailp, lip);
834  		} else {
835  			trace_xfs_ail_insert(lip, 0, lsn);
836  		}
837  		lip->li_lsn = lsn;
838  		list_add_tail(&lip->li_ail, &tmp);
839  	}
840  
841  	if (!list_empty(&tmp))
842  		xfs_ail_splice(ailp, cur, &tmp, lsn);
843  
844  	/*
845  	 * If this is the first insert, wake up the push daemon so it can
846  	 * actively scan for items to push. We also need to do a log tail
847  	 * LSN update to ensure that it is correctly tracked by the log, so
848  	 * set the tail_lsn to NULLCOMMITLSN so that xfs_ail_update_finish()
849  	 * will see that the tail lsn has changed and will update the tail
850  	 * appropriately.
851  	 */
852  	if (!mlip) {
853  		wake_up_process(ailp->ail_task);
854  		tail_lsn = NULLCOMMITLSN;
855  	}
856  
857  	xfs_ail_update_finish(ailp, tail_lsn);
858  }
859  
860  /* Insert a log item into the AIL. */
861  void
xfs_trans_ail_insert(struct xfs_ail * ailp,struct xfs_log_item * lip,xfs_lsn_t lsn)862  xfs_trans_ail_insert(
863  	struct xfs_ail		*ailp,
864  	struct xfs_log_item	*lip,
865  	xfs_lsn_t		lsn)
866  {
867  	spin_lock(&ailp->ail_lock);
868  	xfs_trans_ail_update_bulk(ailp, NULL, &lip, 1, lsn);
869  }
870  
871  /*
872   * Delete one log item from the AIL.
873   *
874   * If this item was at the tail of the AIL, return the LSN of the log item so
875   * that we can use it to check if the LSN of the tail of the log has moved
876   * when finishing up the AIL delete process in xfs_ail_update_finish().
877   */
878  xfs_lsn_t
xfs_ail_delete_one(struct xfs_ail * ailp,struct xfs_log_item * lip)879  xfs_ail_delete_one(
880  	struct xfs_ail		*ailp,
881  	struct xfs_log_item	*lip)
882  {
883  	struct xfs_log_item	*mlip = xfs_ail_min(ailp);
884  	xfs_lsn_t		lsn = lip->li_lsn;
885  
886  	trace_xfs_ail_delete(lip, mlip->li_lsn, lip->li_lsn);
887  	xfs_ail_delete(ailp, lip);
888  	clear_bit(XFS_LI_IN_AIL, &lip->li_flags);
889  	lip->li_lsn = 0;
890  
891  	if (mlip == lip)
892  		return lsn;
893  	return 0;
894  }
895  
896  void
xfs_trans_ail_delete(struct xfs_log_item * lip,int shutdown_type)897  xfs_trans_ail_delete(
898  	struct xfs_log_item	*lip,
899  	int			shutdown_type)
900  {
901  	struct xfs_ail		*ailp = lip->li_ailp;
902  	struct xlog		*log = ailp->ail_log;
903  	xfs_lsn_t		tail_lsn;
904  
905  	spin_lock(&ailp->ail_lock);
906  	if (!test_bit(XFS_LI_IN_AIL, &lip->li_flags)) {
907  		spin_unlock(&ailp->ail_lock);
908  		if (shutdown_type && !xlog_is_shutdown(log)) {
909  			xfs_alert_tag(log->l_mp, XFS_PTAG_AILDELETE,
910  	"%s: attempting to delete a log item that is not in the AIL",
911  					__func__);
912  			xlog_force_shutdown(log, shutdown_type);
913  		}
914  		return;
915  	}
916  
917  	/* xfs_ail_update_finish() drops the AIL lock */
918  	xfs_clear_li_failed(lip);
919  	tail_lsn = xfs_ail_delete_one(ailp, lip);
920  	xfs_ail_update_finish(ailp, tail_lsn);
921  }
922  
923  int
xfs_trans_ail_init(xfs_mount_t * mp)924  xfs_trans_ail_init(
925  	xfs_mount_t	*mp)
926  {
927  	struct xfs_ail	*ailp;
928  
929  	ailp = kzalloc(sizeof(struct xfs_ail),
930  			GFP_KERNEL | __GFP_RETRY_MAYFAIL);
931  	if (!ailp)
932  		return -ENOMEM;
933  
934  	ailp->ail_log = mp->m_log;
935  	INIT_LIST_HEAD(&ailp->ail_head);
936  	INIT_LIST_HEAD(&ailp->ail_cursors);
937  	spin_lock_init(&ailp->ail_lock);
938  	INIT_LIST_HEAD(&ailp->ail_buf_list);
939  	init_waitqueue_head(&ailp->ail_empty);
940  
941  	ailp->ail_task = kthread_run(xfsaild, ailp, "xfsaild/%s",
942  				mp->m_super->s_id);
943  	if (IS_ERR(ailp->ail_task))
944  		goto out_free_ailp;
945  
946  	mp->m_ail = ailp;
947  	return 0;
948  
949  out_free_ailp:
950  	kfree(ailp);
951  	return -ENOMEM;
952  }
953  
954  void
xfs_trans_ail_destroy(xfs_mount_t * mp)955  xfs_trans_ail_destroy(
956  	xfs_mount_t	*mp)
957  {
958  	struct xfs_ail	*ailp = mp->m_ail;
959  
960  	kthread_stop(ailp->ail_task);
961  	kfree(ailp);
962  }
963