Lines Matching full:log
35 struct xlog *log);
41 struct xlog *log);
44 struct xlog *log,
51 struct xlog *log,
57 struct xlog *log,
62 struct xlog *log,
71 struct xlog *log);
81 * However, this padding does not get written into the log, and hence we have to
82 * track the space used by the log vectors separately to prevent log space hangs
83 * due to inaccurate accounting (i.e. a leak) of the used log space through the
87 * log. This prepends the data region we return to the caller to copy their data
159 struct xlog *log, in xlog_grant_return_space() argument
163 int64_t diff = xlog_lsn_sub(log, new_head, old_head); in xlog_grant_return_space()
165 xlog_grant_sub_space(&log->l_reserve_head, diff); in xlog_grant_return_space()
166 xlog_grant_sub_space(&log->l_write_head, diff); in xlog_grant_return_space()
170 * Return the space in the log between the tail and the head. In the case where
179 struct xlog *log, in xlog_grant_space_left() argument
185 free_bytes = log->l_logsize - READ_ONCE(log->l_tail_space) - in xlog_grant_space_left()
206 struct xlog *log, in xlog_ticket_reservation() argument
210 if (head == &log->l_write_head) { in xlog_ticket_reservation()
223 struct xlog *log, in xlog_grant_head_wake() argument
231 need_bytes = xlog_ticket_reservation(log, head, tic); in xlog_grant_head_wake()
236 trace_xfs_log_grant_wake_up(log, tic); in xlog_grant_head_wake()
245 struct xlog *log, in xlog_grant_head_wait() argument
254 if (xlog_is_shutdown(log)) in xlog_grant_head_wait()
260 XFS_STATS_INC(log->l_mp, xs_sleep_logspace); in xlog_grant_head_wait()
262 /* Push on the AIL to free up all the log space. */ in xlog_grant_head_wait()
263 xfs_ail_push_all(log->l_ailp); in xlog_grant_head_wait()
265 trace_xfs_log_grant_sleep(log, tic); in xlog_grant_head_wait()
267 trace_xfs_log_grant_wake(log, tic); in xlog_grant_head_wait()
270 if (xlog_is_shutdown(log)) in xlog_grant_head_wait()
272 } while (xlog_grant_space_left(log, head) < need_bytes); in xlog_grant_head_wait()
282 * Atomically get the log space required for a log ticket.
300 struct xlog *log, in xlog_grant_head_check() argument
308 ASSERT(!xlog_in_recovery(log)); in xlog_grant_head_check()
316 *need_bytes = xlog_ticket_reservation(log, head, tic); in xlog_grant_head_check()
317 free_bytes = xlog_grant_space_left(log, head); in xlog_grant_head_check()
320 if (!xlog_grant_head_wake(log, head, &free_bytes) || in xlog_grant_head_check()
322 error = xlog_grant_head_wait(log, head, tic, in xlog_grant_head_check()
328 error = xlog_grant_head_wait(log, head, tic, *need_bytes); in xlog_grant_head_check()
340 * Do not write to the log on norecovery mounts, if the data or log in xfs_log_writable()
342 * mounts allow internal writes for log recovery and unmount purposes, in xfs_log_writable()
364 struct xlog *log = mp->m_log; in xfs_log_regrant() local
368 if (xlog_is_shutdown(log)) in xfs_log_regrant()
376 * the log. Just add one to the existing tid so that we can see chains in xfs_log_regrant()
377 * of rolling transactions in the log easily. in xfs_log_regrant()
384 trace_xfs_log_regrant(log, tic); in xfs_log_regrant()
386 error = xlog_grant_head_check(log, &log->l_write_head, tic, in xfs_log_regrant()
391 xlog_grant_add_space(&log->l_write_head, need_bytes); in xfs_log_regrant()
392 trace_xfs_log_regrant_exit(log, tic); in xfs_log_regrant()
407 * Reserve log space and return a ticket corresponding to the reservation.
409 * Each reservation is going to reserve extra space for a log record header.
410 * When writes happen to the on-disk log, we don't subtract the length of the
411 * log record header from any reservation. By wasting space in each
422 struct xlog *log = mp->m_log; in xfs_log_reserve() local
427 if (xlog_is_shutdown(log)) in xfs_log_reserve()
433 tic = xlog_ticket_alloc(log, unit_bytes, cnt, permanent); in xfs_log_reserve()
435 trace_xfs_log_reserve(log, tic); in xfs_log_reserve()
436 error = xlog_grant_head_check(log, &log->l_reserve_head, tic, in xfs_log_reserve()
441 xlog_grant_add_space(&log->l_reserve_head, need_bytes); in xfs_log_reserve()
442 xlog_grant_add_space(&log->l_write_head, need_bytes); in xfs_log_reserve()
443 trace_xfs_log_reserve_exit(log, tic); in xfs_log_reserve()
458 * Run all the pending iclog callbacks and wake log force waiters and iclog
460 * don't care what order we process callbacks here because the log is shut down
473 struct xlog *log) in xlog_state_shutdown_callbacks() argument
478 iclog = log->l_iclog; in xlog_state_shutdown_callbacks()
485 spin_unlock(&log->l_icloglock); in xlog_state_shutdown_callbacks()
489 spin_lock(&log->l_icloglock); in xlog_state_shutdown_callbacks()
492 } while ((iclog = iclog->ic_next) != log->l_iclog); in xlog_state_shutdown_callbacks()
494 wake_up_all(&log->l_flush_wait); in xlog_state_shutdown_callbacks()
502 * log tail is updated correctly. NEED_FUA indicates that the iclog will be
504 * within the iclog. We need to ensure that the log tail does not move beyond
519 struct xlog *log, in xlog_state_release_iclog() argument
525 lockdep_assert_held(&log->l_icloglock); in xlog_state_release_iclog()
529 * Grabbing the current log tail needs to be atomic w.r.t. the writing in xlog_state_release_iclog()
530 * of the tail LSN into the iclog so we guarantee that the log tail does in xlog_state_release_iclog()
538 cpu_to_be64(atomic64_read(&log->l_tail_lsn)); in xlog_state_release_iclog()
543 if (xlog_is_shutdown(log)) { in xlog_state_release_iclog()
550 xlog_state_shutdown_callbacks(log); in xlog_state_release_iclog()
563 xlog_verify_tail_lsn(log, iclog); in xlog_state_release_iclog()
566 spin_unlock(&log->l_icloglock); in xlog_state_release_iclog()
567 xlog_sync(log, iclog, ticket); in xlog_state_release_iclog()
568 spin_lock(&log->l_icloglock); in xlog_state_release_iclog()
573 * Mount a log filesystem
576 * log_target - buftarg of on-disk log device
578 * num_bblocks - Number of BBSIZE blocks in on-disk log
589 struct xlog *log; in xfs_log_mount() local
605 log = xlog_alloc_log(mp, log_target, blk_offset, num_bblks); in xfs_log_mount()
606 if (IS_ERR(log)) { in xfs_log_mount()
607 error = PTR_ERR(log); in xfs_log_mount()
610 mp->m_log = log; in xfs_log_mount()
613 * Now that we have set up the log and it's internal geometry in xfs_log_mount()
614 * parameters, we can validate the given log space and drop a critical in xfs_log_mount()
615 * message via syslog if the log size is too small. A log that is too in xfs_log_mount()
616 * small can lead to unexpected situations in transaction log space in xfs_log_mount()
618 * the other log geometry constraints, so we don't have to check those in xfs_log_mount()
624 * way to grow the log (short of black magic surgery with xfs_db). in xfs_log_mount()
628 * filesystem with a log that is too small. in xfs_log_mount()
633 "Log size %d blocks too small, minimum size is %d blocks", in xfs_log_mount()
637 * Log check errors are always fatal on v5; or whenever bad in xfs_log_mount()
641 xfs_crit(mp, "AAIEEE! Log failed size checks. Abort!"); in xfs_log_mount()
646 xfs_crit(mp, "Log size out of supported range."); in xfs_log_mount()
648 "Continuing onwards, but if log hangs are experienced then please report this message in the bug re… in xfs_log_mount()
652 * Initialize the AIL now we have a log. in xfs_log_mount()
659 log->l_ailp = mp->m_ail; in xfs_log_mount()
662 * skip log recovery on a norecovery mount. pretend it all in xfs_log_mount()
666 error = xlog_recover(log); in xfs_log_mount()
668 xfs_warn(mp, "log mount/recovery failed: error %d", in xfs_log_mount()
670 xlog_recover_cancel(log); in xfs_log_mount()
675 error = xfs_sysfs_init(&log->l_kobj, &xfs_log_ktype, &mp->m_kobj, in xfs_log_mount()
676 "log"); in xfs_log_mount()
681 clear_bit(XLOG_ACTIVE_RECOVERY, &log->l_opstate); in xfs_log_mount()
684 * Now the log has been fully initialised and we know were our in xfs_log_mount()
688 xlog_cil_init_post_recovery(log); in xfs_log_mount()
695 xlog_dealloc_log(log); in xfs_log_mount()
706 * If we finish recovery successfully, start the background log work. If we are
714 struct xlog *log = mp->m_log; in xfs_log_mount_finish() local
723 * During the second phase of log recovery, we need iget and in xfs_log_mount_finish()
726 * of inodes before we're done replaying log items on those in xfs_log_mount_finish()
735 * in log recovery failure. We have to evict the unreferenced in xfs_log_mount_finish()
744 if (xlog_recovery_needed(log)) in xfs_log_mount_finish()
745 error = xlog_recover_finish(log); in xfs_log_mount_finish()
750 * Drain the buffer LRU after log recovery. This is required for v4 in xfs_log_mount_finish()
758 if (xlog_recovery_needed(log)) { in xfs_log_mount_finish()
770 clear_bit(XLOG_RECOVERY_NEEDED, &log->l_opstate); in xfs_log_mount_finish()
772 /* Make sure the log is dead if we're returning failure. */ in xfs_log_mount_finish()
773 ASSERT(!error || xlog_is_shutdown(log)); in xfs_log_mount_finish()
780 * the log.
806 * Cycle all the iclogbuf locks to make sure all log IO completion
810 xlog_wait_iclog_completion(struct xlog *log) in xlog_wait_iclog_completion() argument
813 struct xlog_in_core *iclog = log->l_iclog; in xlog_wait_iclog_completion()
815 for (i = 0; i < log->l_iclog_bufs; i++) { in xlog_wait_iclog_completion()
824 * log force state machine. Waiting on ic_force_wait ensures iclog completions
833 struct xlog *log = iclog->ic_log; in xlog_wait_on_iclog() local
836 if (!xlog_is_shutdown(log) && in xlog_wait_on_iclog()
839 XFS_STATS_INC(log->l_mp, xs_log_force_sleep); in xlog_wait_on_iclog()
840 xlog_wait(&iclog->ic_force_wait, &log->l_icloglock); in xlog_wait_on_iclog()
842 spin_unlock(&log->l_icloglock); in xlog_wait_on_iclog()
845 if (xlog_is_shutdown(log)) in xlog_wait_on_iclog()
857 struct xlog *log, in xlog_write_unmount_record() argument
892 return xlog_write(log, NULL, &lv_chain, ticket, reg.i_len); in xlog_write_unmount_record()
897 * log.
901 struct xlog *log) in xlog_unmount_write() argument
903 struct xfs_mount *mp = log->l_mp; in xlog_unmount_write()
912 error = xlog_write_unmount_record(log, tic); in xlog_unmount_write()
915 * transitioning log state to shutdown. Just continue... in xlog_unmount_write()
921 spin_lock(&log->l_icloglock); in xlog_unmount_write()
922 iclog = log->l_iclog; in xlog_unmount_write()
927 trace_xfs_log_umount_write(log, tic); in xlog_unmount_write()
928 xfs_log_ticket_ungrant(log, tic); in xlog_unmount_write()
934 struct xlog *log) in xfs_log_unmount_verify_iclog() argument
936 struct xlog_in_core *iclog = log->l_iclog; in xfs_log_unmount_verify_iclog()
941 } while ((iclog = iclog->ic_next) != log->l_iclog); in xfs_log_unmount_verify_iclog()
955 struct xlog *log = mp->m_log; in xfs_log_unmount_write() local
962 if (xlog_is_shutdown(log)) in xfs_log_unmount_write()
967 * record to force log recovery at next mount, after which the summary in xfs_log_unmount_write()
978 xfs_log_unmount_verify_iclog(log); in xfs_log_unmount_write()
979 xlog_unmount_write(log); in xfs_log_unmount_write()
983 * Empty the log for unmount/freeze.
985 * To do this, we first need to shut down the background log work so it is not
986 * trying to cover the log as we clean up. We then need to unpin all objects in
987 * the log so we can then flush them out. Once they have completed their IO and
988 * run the callbacks removing themselves from the AIL, we can cover the log.
995 * Clear log incompat features since we're quiescing the log. Report in xfs_log_quiesce()
996 * failures, though it's not fatal to have a higher log feature in xfs_log_quiesce()
997 * protection level than the log contents actually require. in xfs_log_quiesce()
1005 "Failed to clear log incompat features on quiesce"); in xfs_log_quiesce()
1035 * Shut down and release the AIL and Log.
1038 * from the AIL so that the log is empty before we write the unmount record to
1039 * the log. Once this is done, we can tear down the AIL and the log.
1048 * If shutdown has come from iclog IO context, the log in xfs_log_unmount()
1084 * Wake up processes waiting for log space after we have moved the log tail.
1090 struct xlog *log = mp->m_log; in xfs_log_space_wake() local
1093 if (xlog_is_shutdown(log)) in xfs_log_space_wake()
1096 if (!list_empty_careful(&log->l_write_head.waiters)) { in xfs_log_space_wake()
1097 ASSERT(!xlog_in_recovery(log)); in xfs_log_space_wake()
1099 spin_lock(&log->l_write_head.lock); in xfs_log_space_wake()
1100 free_bytes = xlog_grant_space_left(log, &log->l_write_head); in xfs_log_space_wake()
1101 xlog_grant_head_wake(log, &log->l_write_head, &free_bytes); in xfs_log_space_wake()
1102 spin_unlock(&log->l_write_head.lock); in xfs_log_space_wake()
1105 if (!list_empty_careful(&log->l_reserve_head.waiters)) { in xfs_log_space_wake()
1106 ASSERT(!xlog_in_recovery(log)); in xfs_log_space_wake()
1108 spin_lock(&log->l_reserve_head.lock); in xfs_log_space_wake()
1109 free_bytes = xlog_grant_space_left(log, &log->l_reserve_head); in xfs_log_space_wake()
1110 xlog_grant_head_wake(log, &log->l_reserve_head, &free_bytes); in xfs_log_space_wake()
1111 spin_unlock(&log->l_reserve_head.lock); in xfs_log_space_wake()
1117 * covered. To begin the transition to the idle state firstly the log needs to
1119 * we start attempting to cover the log.
1122 * informed that dummy transactions are required to move the log into the idle
1126 * cover the log as we may be in a situation where there isn't log space
1128 * tail of the log is pinned by an item that is modified in the CIL. Hence
1130 * can't start trying to idle the log until both the CIL and AIL are empty.
1136 struct xlog *log = mp->m_log; in xfs_log_need_covered() local
1139 if (!xlog_cil_empty(log)) in xfs_log_need_covered()
1142 spin_lock(&log->l_icloglock); in xfs_log_need_covered()
1143 switch (log->l_covered_state) { in xfs_log_need_covered()
1150 if (xfs_ail_min_lsn(log->l_ailp)) in xfs_log_need_covered()
1152 if (!xlog_iclogs_empty(log)) in xfs_log_need_covered()
1156 if (log->l_covered_state == XLOG_STATE_COVER_NEED) in xfs_log_need_covered()
1157 log->l_covered_state = XLOG_STATE_COVER_DONE; in xfs_log_need_covered()
1159 log->l_covered_state = XLOG_STATE_COVER_DONE2; in xfs_log_need_covered()
1165 spin_unlock(&log->l_icloglock); in xfs_log_need_covered()
1170 * Explicitly cover the log. This is similar to background log covering but
1172 * the log is idle and suitable for covering. The CIL, iclog buffers and AIL
1191 * state machine if the log requires covering. Therefore, we must call in xfs_log_cover()
1195 * Fall into the covering sequence if the log needs covering or the in xfs_log_cover()
1205 * To cover the log, commit the superblock twice (at most) in in xfs_log_cover()
1210 * covering the log. Push the AIL one more time to leave it empty, as in xfs_log_cover()
1229 struct xlog *log = iclog->ic_log; in xlog_ioend_work() local
1242 if (XFS_TEST_ERROR(error, log->l_mp, XFS_ERRTAG_IODONE_IOERR)) { in xlog_ioend_work()
1243 xfs_alert(log->l_mp, "log I/O error %d", error); in xlog_ioend_work()
1244 xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR); in xlog_ioend_work()
1260 * Return size of each in-core log record buffer.
1270 struct xlog *log) in xlog_get_iclog_buffer_size() argument
1277 log->l_iclog_bufs = mp->m_logbufs; in xlog_get_iclog_buffer_size()
1278 log->l_iclog_size = mp->m_logbsize; in xlog_get_iclog_buffer_size()
1283 log->l_iclog_heads = in xlog_get_iclog_buffer_size()
1285 log->l_iclog_hsize = log->l_iclog_heads << BBSHIFT; in xlog_get_iclog_buffer_size()
1297 * Clear the log incompat flags if we have the opportunity.
1299 * This only happens if we're about to log the second dummy transaction as part
1300 * of covering the log.
1304 struct xlog *log) in xlog_clear_incompat() argument
1306 struct xfs_mount *mp = log->l_mp; in xlog_clear_incompat()
1312 if (log->l_covered_state != XLOG_STATE_COVER_DONE2) in xlog_clear_incompat()
1320 * disk. If there is nothing dirty, then we might need to cover the log to
1327 struct xlog *log = container_of(to_delayed_work(work), in xfs_log_worker() local
1329 struct xfs_mount *mp = log->l_mp; in xfs_log_worker()
1334 * Dump a transaction into the log that contains no real change. in xfs_log_worker()
1335 * This is needed to stamp the current tail LSN into the log in xfs_log_worker()
1340 * will prevent log covering from making progress. Hence we in xfs_log_worker()
1341 * synchronously log the superblock instead to ensure the in xfs_log_worker()
1344 xlog_clear_incompat(log); in xfs_log_worker()
1357 * This routine initializes some of the log structure for a given mount point.
1368 struct xlog *log; in xlog_alloc_log() local
1376 log = kzalloc(sizeof(struct xlog), GFP_KERNEL | __GFP_RETRY_MAYFAIL); in xlog_alloc_log()
1377 if (!log) { in xlog_alloc_log()
1378 xfs_warn(mp, "Log allocation failed: No memory!"); in xlog_alloc_log()
1382 log->l_mp = mp; in xlog_alloc_log()
1383 log->l_targ = log_target; in xlog_alloc_log()
1384 log->l_logsize = BBTOB(num_bblks); in xlog_alloc_log()
1385 log->l_logBBstart = blk_offset; in xlog_alloc_log()
1386 log->l_logBBsize = num_bblks; in xlog_alloc_log()
1387 log->l_covered_state = XLOG_STATE_COVER_IDLE; in xlog_alloc_log()
1388 set_bit(XLOG_ACTIVE_RECOVERY, &log->l_opstate); in xlog_alloc_log()
1389 INIT_DELAYED_WORK(&log->l_work, xfs_log_worker); in xlog_alloc_log()
1390 INIT_LIST_HEAD(&log->r_dfops); in xlog_alloc_log()
1392 log->l_prev_block = -1; in xlog_alloc_log()
1393 /* log->l_tail_lsn = 0x100000000LL; cycle = 1; current block = 0 */ in xlog_alloc_log()
1394 xlog_assign_atomic_lsn(&log->l_tail_lsn, 1, 0); in xlog_alloc_log()
1395 log->l_curr_cycle = 1; /* 0 is bad since this is initial value */ in xlog_alloc_log()
1398 log->l_iclog_roundoff = mp->m_sb.sb_logsunit; in xlog_alloc_log()
1400 log->l_iclog_roundoff = BBSIZE; in xlog_alloc_log()
1402 xlog_grant_head_init(&log->l_reserve_head); in xlog_alloc_log()
1403 xlog_grant_head_init(&log->l_write_head); in xlog_alloc_log()
1409 xfs_warn(mp, "Log sector size too small (0x%x < 0x%x)", in xlog_alloc_log()
1416 xfs_warn(mp, "Log sector size too large (0x%x > 0x%x)", in xlog_alloc_log()
1421 /* for larger sector sizes, must have v2 or external log */ in xlog_alloc_log()
1422 if (log2_size && log->l_logBBstart > 0 && in xlog_alloc_log()
1425 "log sector size (0x%x) invalid for configuration.", in xlog_alloc_log()
1430 log->l_sectBBsize = 1 << log2_size; in xlog_alloc_log()
1432 xlog_get_iclog_buffer_size(mp, log); in xlog_alloc_log()
1434 spin_lock_init(&log->l_icloglock); in xlog_alloc_log()
1435 init_waitqueue_head(&log->l_flush_wait); in xlog_alloc_log()
1437 iclogp = &log->l_iclog; in xlog_alloc_log()
1445 ASSERT(log->l_iclog_size >= 4096); in xlog_alloc_log()
1446 for (i = 0; i < log->l_iclog_bufs; i++) { in xlog_alloc_log()
1447 size_t bvec_size = howmany(log->l_iclog_size, PAGE_SIZE) * in xlog_alloc_log()
1459 iclog->ic_data = kvzalloc(log->l_iclog_size, in xlog_alloc_log()
1467 xfs_has_logv2(log->l_mp) ? 2 : 1); in xlog_alloc_log()
1468 head->h_size = cpu_to_be32(log->l_iclog_size); in xlog_alloc_log()
1473 iclog->ic_size = log->l_iclog_size - log->l_iclog_hsize; in xlog_alloc_log()
1475 iclog->ic_log = log; in xlog_alloc_log()
1478 iclog->ic_datap = (void *)iclog->ic_data + log->l_iclog_hsize; in xlog_alloc_log()
1487 *iclogp = log->l_iclog; /* complete ring */ in xlog_alloc_log()
1488 log->l_iclog->ic_prev = prev_iclog; /* re-write 1st prev ptr */ in xlog_alloc_log()
1490 log->l_ioend_workqueue = alloc_workqueue("xfs-log/%s", in xlog_alloc_log()
1494 if (!log->l_ioend_workqueue) in xlog_alloc_log()
1497 error = xlog_cil_init(log); in xlog_alloc_log()
1500 return log; in xlog_alloc_log()
1503 destroy_workqueue(log->l_ioend_workqueue); in xlog_alloc_log()
1505 for (iclog = log->l_iclog; iclog; iclog = prev_iclog) { in xlog_alloc_log()
1509 if (prev_iclog == log->l_iclog) in xlog_alloc_log()
1513 kfree(log); in xlog_alloc_log()
1523 struct xlog *log, in xlog_pack_data() argument
1543 if (xfs_has_logv2(log->l_mp)) { in xlog_pack_data()
1554 for (i = 1; i < log->l_iclog_heads; i++) in xlog_pack_data()
1560 * Calculate the checksum for a log buffer.
1567 struct xlog *log, in xlog_cksum() argument
1580 if (xfs_has_logv2(log->l_mp)) { in xlog_cksum()
1632 struct xlog *log, in xlog_write_iclog() argument
1637 ASSERT(bno < log->l_logBBsize); in xlog_write_iclog()
1646 * across the log IO to archieve that. in xlog_write_iclog()
1649 if (xlog_is_shutdown(log)) { in xlog_write_iclog()
1652 * the log state machine to propagate I/O errors instead of in xlog_write_iclog()
1663 * writeback throttle from throttling log writes behind background in xlog_write_iclog()
1666 bio_init(&iclog->ic_bio, log->l_targ->bt_bdev, iclog->ic_bvec, in xlog_write_iclog()
1669 iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart + bno; in xlog_write_iclog()
1676 * For external log devices, we also need to flush the data in xlog_write_iclog()
1679 * but it *must* complete before we issue the external log IO. in xlog_write_iclog()
1682 * writeback from the log succeeded. Repeating the flush is in xlog_write_iclog()
1683 * not possible, hence we must shut down with log IO error to in xlog_write_iclog()
1686 if (log->l_targ != log->l_mp->m_ddev_targp && in xlog_write_iclog()
1687 blkdev_issue_flush(log->l_mp->m_ddev_targp->bt_bdev)) in xlog_write_iclog()
1702 * If this log buffer would straddle the end of the log we will have in xlog_write_iclog()
1705 if (bno + BTOBB(count) > log->l_logBBsize) { in xlog_write_iclog()
1708 split = bio_split(&iclog->ic_bio, log->l_logBBsize - bno, in xlog_write_iclog()
1714 iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart; in xlog_write_iclog()
1720 xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR); in xlog_write_iclog()
1728 * written to the start of the log. Watch out for the header magic
1733 struct xlog *log, in xlog_split_iclog() argument
1738 unsigned int split_offset = BBTOB(log->l_logBBsize - bno); in xlog_split_iclog()
1752 struct xlog *log, in xlog_calc_iclog_size() argument
1759 count_init = log->l_iclog_hsize + iclog->ic_offset; in xlog_calc_iclog_size()
1760 count = roundup(count_init, log->l_iclog_roundoff); in xlog_calc_iclog_size()
1765 ASSERT(*roundoff < log->l_iclog_roundoff); in xlog_calc_iclog_size()
1770 * Flush out the in-core log (iclog) to the on-disk log in an asynchronous
1772 * ptr in the log to point to the next available iclog. This allows further
1774 * Before an in-core log can be written out, the data section must be scanned
1786 * log will require grabbing the lock though.
1788 * The entire log manager uses a logical block numbering scheme. Only
1789 * xlog_write_iclog knows about the fact that the log may not start with
1794 struct xlog *log, in xlog_sync() argument
1806 count = xlog_calc_iclog_size(log, iclog, &roundoff); in xlog_sync()
1816 xlog_grant_add_space(&log->l_reserve_head, roundoff); in xlog_sync()
1817 xlog_grant_add_space(&log->l_write_head, roundoff); in xlog_sync()
1821 xlog_pack_data(log, iclog, roundoff); in xlog_sync()
1825 if (xfs_has_logv2(log->l_mp)) in xlog_sync()
1829 XFS_STATS_INC(log->l_mp, xs_log_writes); in xlog_sync()
1830 XFS_STATS_ADD(log->l_mp, xs_log_blocks, BTOBB(count)); in xlog_sync()
1835 if (bno + BTOBB(count) > log->l_logBBsize) in xlog_sync()
1836 xlog_split_iclog(log, &iclog->ic_header, bno, count); in xlog_sync()
1839 iclog->ic_header.h_crc = xlog_cksum(log, &iclog->ic_header, in xlog_sync()
1842 * Intentionally corrupt the log record CRC based on the error injection in xlog_sync()
1843 * frequency, if defined. This facilitates testing log recovery in the in xlog_sync()
1844 * event of torn writes. Hence, set the IOABORT state to abort the log in xlog_sync()
1849 if (XFS_TEST_ERROR(false, log->l_mp, XFS_ERRTAG_LOG_BAD_CRC)) { in xlog_sync()
1852 xfs_warn(log->l_mp, in xlog_sync()
1853 "Intentionally corrupted log record at LSN 0x%llx. Shutdown imminent.", in xlog_sync()
1857 xlog_verify_iclog(log, iclog, count); in xlog_sync()
1858 xlog_write_iclog(log, iclog, bno, count); in xlog_sync()
1862 * Deallocate a log structure
1866 struct xlog *log) in xlog_dealloc_log() argument
1873 * iclog EIO error will try to shut down the log, which accesses the in xlog_dealloc_log()
1876 xlog_cil_destroy(log); in xlog_dealloc_log()
1878 iclog = log->l_iclog; in xlog_dealloc_log()
1879 for (i = 0; i < log->l_iclog_bufs; i++) { in xlog_dealloc_log()
1886 log->l_mp->m_log = NULL; in xlog_dealloc_log()
1887 destroy_workqueue(log->l_ioend_workqueue); in xlog_dealloc_log()
1888 kfree(log); in xlog_dealloc_log()
1896 struct xlog *log, in xlog_state_finish_copy() argument
1901 lockdep_assert_held(&log->l_icloglock); in xlog_state_finish_copy()
1935 xfs_warn(mp, " log res = %d", tp->t_log_res); in xlog_print_trans()
1936 xfs_warn(mp, " log count = %d", tp->t_log_count); in xlog_print_trans()
1941 /* dump each log item */ in xlog_print_trans()
1947 xfs_warn(mp, "log item: "); in xlog_print_trans()
1957 /* dump each iovec for the log item */ in xlog_print_trans()
1995 * Write log vectors into a single iclog which is guaranteed by the caller
1996 * to have enough space to write the entire log vector into.
2014 * Ordered log vectors have no regions to write so this in xlog_write_full()
2037 struct xlog *log = iclog->ic_log; in xlog_write_get_more_iclog_space() local
2040 spin_lock(&log->l_icloglock); in xlog_write_get_more_iclog_space()
2042 xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt); in xlog_write_get_more_iclog_space()
2043 error = xlog_state_release_iclog(log, iclog, ticket); in xlog_write_get_more_iclog_space()
2044 spin_unlock(&log->l_icloglock); in xlog_write_get_more_iclog_space()
2048 error = xlog_state_get_iclog_space(log, len, &iclog, ticket, in xlog_write_get_more_iclog_space()
2059 * Write log vectors into a single iclog which is smaller than the current chain
2061 * and then stop. We return the log vector that is to be written that cannot
2087 * length otherwise log recovery will just skip over it and in xlog_write_partial()
2134 * space for log transaction opheaders left in the current in xlog_write_partial()
2188 * No more iovecs remain in this logvec so return the next log vec to in xlog_write_partial()
2196 * Write some region out to in-core log
2210 * 2. Write log operation header (header per region)
2217 * 5. Release iclog for potential flush to on-disk log.
2227 * on all log operation writes which don't contain the end of the
2228 * region. The XLOG_END_TRANS bit is used for the in-core log
2237 struct xlog *log, in xlog_write() argument
2252 xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES, in xlog_write()
2254 xlog_print_tic_res(log->l_mp, ticket); in xlog_write()
2255 xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR); in xlog_write()
2258 error = xlog_state_get_iclog_space(log, len, &iclog, ticket, in xlog_write()
2275 * If the entire log vec does not fit in the iclog, punt it to in xlog_write()
2303 spin_lock(&log->l_icloglock); in xlog_write()
2304 xlog_state_finish_copy(log, iclog, record_cnt, 0); in xlog_write()
2305 error = xlog_state_release_iclog(log, iclog, ticket); in xlog_write()
2306 spin_unlock(&log->l_icloglock); in xlog_write()
2351 struct xlog *log, in xlog_state_activate_iclogs() argument
2354 struct xlog_in_core *iclog = log->l_iclog; in xlog_state_activate_iclogs()
2365 } while ((iclog = iclog->ic_next) != log->l_iclog); in xlog_state_activate_iclogs()
2404 struct xlog *log, in xlog_state_clean_iclog() argument
2413 xlog_state_activate_iclogs(log, &iclogs_changed); in xlog_state_clean_iclog()
2417 log->l_covered_state = xlog_covered_state(log->l_covered_state, in xlog_state_clean_iclog()
2424 struct xlog *log) in xlog_get_lowest_lsn() argument
2426 struct xlog_in_core *iclog = log->l_iclog; in xlog_get_lowest_lsn()
2437 } while ((iclog = iclog->ic_next) != log->l_iclog); in xlog_get_lowest_lsn()
2449 struct xlog *log, in xlog_state_iodone_process_iclog() argument
2470 lowest_lsn = xlog_get_lowest_lsn(log); in xlog_state_iodone_process_iclog()
2479 xlog_state_clean_iclog(log, iclog); in xlog_state_iodone_process_iclog()
2504 struct xlog *log) in xlog_state_do_iclog_callbacks() argument
2505 __releases(&log->l_icloglock) in xlog_state_do_iclog_callbacks()
2506 __acquires(&log->l_icloglock) in xlog_state_do_iclog_callbacks()
2508 struct xlog_in_core *first_iclog = log->l_iclog; in xlog_state_do_iclog_callbacks()
2515 if (xlog_state_iodone_process_iclog(log, iclog)) in xlog_state_do_iclog_callbacks()
2522 spin_unlock(&log->l_icloglock); in xlog_state_do_iclog_callbacks()
2529 spin_lock(&log->l_icloglock); in xlog_state_do_iclog_callbacks()
2530 xlog_state_clean_iclog(log, iclog); in xlog_state_do_iclog_callbacks()
2544 struct xlog *log) in xlog_state_do_callback() argument
2549 spin_lock(&log->l_icloglock); in xlog_state_do_callback()
2550 while (xlog_state_do_iclog_callbacks(log)) { in xlog_state_do_callback()
2551 if (xlog_is_shutdown(log)) in xlog_state_do_callback()
2557 xfs_warn(log->l_mp, in xlog_state_do_callback()
2563 if (log->l_iclog->ic_state == XLOG_STATE_ACTIVE) in xlog_state_do_callback()
2564 wake_up_all(&log->l_flush_wait); in xlog_state_do_callback()
2566 spin_unlock(&log->l_icloglock); in xlog_state_do_callback()
2574 * global state machine log lock.
2580 struct xlog *log = iclog->ic_log; in xlog_state_done_syncing() local
2582 spin_lock(&log->l_icloglock); in xlog_state_done_syncing()
2588 * split log writes, on the second, we shut down the file system and in xlog_state_done_syncing()
2591 if (!xlog_is_shutdown(log)) { in xlog_state_done_syncing()
2602 spin_unlock(&log->l_icloglock); in xlog_state_done_syncing()
2603 xlog_state_do_callback(log); in xlog_state_done_syncing()
2607 * If the head of the in-core log ring is not (ACTIVE or DIRTY), then we must
2617 * log's data space.
2618 * * in-core log pointer to which xlog_write() should write.
2619 * * boolean indicating this is a continued write to an in-core log.
2620 * If this is the last write, then the in-core log's offset field
2626 struct xlog *log, in xlog_state_get_iclog_space() argument
2637 spin_lock(&log->l_icloglock); in xlog_state_get_iclog_space()
2638 if (xlog_is_shutdown(log)) { in xlog_state_get_iclog_space()
2639 spin_unlock(&log->l_icloglock); in xlog_state_get_iclog_space()
2643 iclog = log->l_iclog; in xlog_state_get_iclog_space()
2645 XFS_STATS_INC(log->l_mp, xs_log_noiclogs); in xlog_state_get_iclog_space()
2647 /* Wait for log writes to have flushed */ in xlog_state_get_iclog_space()
2648 xlog_wait(&log->l_flush_wait, &log->l_icloglock); in xlog_state_get_iclog_space()
2665 ticket->t_curr_res -= log->l_iclog_hsize; in xlog_state_get_iclog_space()
2666 head->h_cycle = cpu_to_be32(log->l_curr_cycle); in xlog_state_get_iclog_space()
2668 xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block)); in xlog_state_get_iclog_space()
2669 ASSERT(log->l_curr_block >= 0); in xlog_state_get_iclog_space()
2684 xlog_state_switch_iclogs(log, iclog, iclog->ic_size); in xlog_state_get_iclog_space()
2694 error = xlog_state_release_iclog(log, iclog, ticket); in xlog_state_get_iclog_space()
2695 spin_unlock(&log->l_icloglock); in xlog_state_get_iclog_space()
2710 xlog_state_switch_iclogs(log, iclog, iclog->ic_size); in xlog_state_get_iclog_space()
2714 spin_unlock(&log->l_icloglock); in xlog_state_get_iclog_space()
2729 struct xlog *log, in xfs_log_ticket_regrant() argument
2732 trace_xfs_log_ticket_regrant(log, ticket); in xfs_log_ticket_regrant()
2737 xlog_grant_sub_space(&log->l_reserve_head, ticket->t_curr_res); in xfs_log_ticket_regrant()
2738 xlog_grant_sub_space(&log->l_write_head, ticket->t_curr_res); in xfs_log_ticket_regrant()
2741 trace_xfs_log_ticket_regrant_sub(log, ticket); in xfs_log_ticket_regrant()
2745 xlog_grant_add_space(&log->l_reserve_head, ticket->t_unit_res); in xfs_log_ticket_regrant()
2746 trace_xfs_log_ticket_regrant_exit(log, ticket); in xfs_log_ticket_regrant()
2770 struct xlog *log, in xfs_log_ticket_ungrant() argument
2775 trace_xfs_log_ticket_ungrant(log, ticket); in xfs_log_ticket_ungrant()
2780 trace_xfs_log_ticket_ungrant_sub(log, ticket); in xfs_log_ticket_ungrant()
2792 xlog_grant_sub_space(&log->l_reserve_head, bytes); in xfs_log_ticket_ungrant()
2793 xlog_grant_sub_space(&log->l_write_head, bytes); in xfs_log_ticket_ungrant()
2795 trace_xfs_log_ticket_ungrant_exit(log, ticket); in xfs_log_ticket_ungrant()
2797 xfs_log_space_wake(log->l_mp); in xfs_log_ticket_ungrant()
2807 struct xlog *log, in xlog_state_switch_iclogs() argument
2812 assert_spin_locked(&log->l_icloglock); in xlog_state_switch_iclogs()
2818 iclog->ic_header.h_prev_block = cpu_to_be32(log->l_prev_block); in xlog_state_switch_iclogs()
2819 log->l_prev_block = log->l_curr_block; in xlog_state_switch_iclogs()
2820 log->l_prev_cycle = log->l_curr_cycle; in xlog_state_switch_iclogs()
2822 /* roll log?: ic_offset changed later */ in xlog_state_switch_iclogs()
2823 log->l_curr_block += BTOBB(eventual_size)+BTOBB(log->l_iclog_hsize); in xlog_state_switch_iclogs()
2825 /* Round up to next log-sunit */ in xlog_state_switch_iclogs()
2826 if (log->l_iclog_roundoff > BBSIZE) { in xlog_state_switch_iclogs()
2827 uint32_t sunit_bb = BTOBB(log->l_iclog_roundoff); in xlog_state_switch_iclogs()
2828 log->l_curr_block = roundup(log->l_curr_block, sunit_bb); in xlog_state_switch_iclogs()
2831 if (log->l_curr_block >= log->l_logBBsize) { in xlog_state_switch_iclogs()
2835 * when the log wraps to the next cycle. This is to support the in xlog_state_switch_iclogs()
2839 log->l_curr_block -= log->l_logBBsize; in xlog_state_switch_iclogs()
2840 ASSERT(log->l_curr_block >= 0); in xlog_state_switch_iclogs()
2842 log->l_curr_cycle++; in xlog_state_switch_iclogs()
2843 if (log->l_curr_cycle == XLOG_HEADER_MAGIC_NUM) in xlog_state_switch_iclogs()
2844 log->l_curr_cycle++; in xlog_state_switch_iclogs()
2846 ASSERT(iclog == log->l_iclog); in xlog_state_switch_iclogs()
2847 log->l_iclog = iclog->ic_next; in xlog_state_switch_iclogs()
2880 * Write out all data in the in-core log as of this exact moment in time.
2882 * Data may be written to the in-core log during this call. However,
2911 struct xlog *log = mp->m_log; in xfs_log_force() local
2917 xlog_cil_force(log); in xfs_log_force()
2919 spin_lock(&log->l_icloglock); in xfs_log_force()
2920 if (xlog_is_shutdown(log)) in xfs_log_force()
2923 iclog = log->l_iclog; in xfs_log_force()
2954 xlog_state_switch_iclogs(log, iclog, 0); in xfs_log_force()
2970 spin_unlock(&log->l_icloglock); in xfs_log_force()
2973 spin_unlock(&log->l_icloglock); in xfs_log_force()
2978 * Force the log to a specific LSN.
2982 * If it is in the ACTIVE state, move the in-core log into the WANT_SYNC
2988 * specific in-core log. When given in-core log finally completes its write
2993 struct xlog *log, in xlog_force_lsn() argument
3002 spin_lock(&log->l_icloglock); in xlog_force_lsn()
3003 if (xlog_is_shutdown(log)) in xlog_force_lsn()
3006 iclog = log->l_iclog; in xlog_force_lsn()
3010 if (iclog == log->l_iclog) in xlog_force_lsn()
3025 * refcnt so we can release the log (which drops the ref count). in xlog_force_lsn()
3035 &log->l_icloglock); in xlog_force_lsn()
3068 spin_unlock(&log->l_icloglock); in xlog_force_lsn()
3071 spin_unlock(&log->l_icloglock); in xlog_force_lsn()
3076 * Force the log to a specific checkpoint sequence.
3081 * a synchronous log force, we will wait on the iclog with the LSN returned by
3091 struct xlog *log = mp->m_log; in xfs_log_force_seq() local
3099 lsn = xlog_cil_force_seq(log, seq); in xfs_log_force_seq()
3103 ret = xlog_force_lsn(log, lsn, flags, log_flushed, false); in xfs_log_force_seq()
3106 ret = xlog_force_lsn(log, lsn, flags, log_flushed, true); in xfs_log_force_seq()
3133 * Figure out the total log space unit (in bytes) that would be
3134 * required for a log ticket.
3138 struct xlog *log, in xlog_calc_unit_res() argument
3146 * Permanent reservations have up to 'cnt'-1 active log operations in xlog_calc_unit_res()
3147 * in the log. A unit in this case is the amount of space for one in xlog_calc_unit_res()
3148 * of these log operations. Normal reservations have a cnt of 1 in xlog_calc_unit_res()
3152 * which occupy space in the on-disk log. in xlog_calc_unit_res()
3167 * Therefore the commit record is in its own Log Record. in xlog_calc_unit_res()
3189 * increase the space required enough to require more log and op in xlog_calc_unit_res()
3197 * Fundamentally, this means we must pass the entire log vector to in xlog_calc_unit_res()
3200 iclog_space = log->l_iclog_size - log->l_iclog_hsize; in xlog_calc_unit_res()
3212 unit_bytes += log->l_iclog_hsize * num_headers; in xlog_calc_unit_res()
3215 unit_bytes += log->l_iclog_hsize; in xlog_calc_unit_res()
3218 unit_bytes += 2 * log->l_iclog_roundoff; in xlog_calc_unit_res()
3234 * Allocate and initialise a new log ticket.
3238 struct xlog *log, in xlog_ticket_alloc() argument
3249 unit_res = xlog_calc_unit_res(log, unit_bytes, &tic->t_iclog_hdrs); in xlog_ticket_alloc()
3268 struct xlog *log, in xlog_verify_dump_tail() argument
3271 xfs_alert(log->l_mp, in xlog_verify_dump_tail()
3272 "ran out of log space tail 0x%llx/0x%llx, head lsn 0x%llx, head 0x%x/0x%x, prev head 0x%x/0x%x", in xlog_verify_dump_tail()
3274 atomic64_read(&log->l_tail_lsn), in xlog_verify_dump_tail()
3275 log->l_ailp->ail_head_lsn, in xlog_verify_dump_tail()
3276 log->l_curr_cycle, log->l_curr_block, in xlog_verify_dump_tail()
3277 log->l_prev_cycle, log->l_prev_block); in xlog_verify_dump_tail()
3278 xfs_alert(log->l_mp, in xlog_verify_dump_tail()
3280 atomic64_read(&log->l_write_head.grant), in xlog_verify_dump_tail()
3281 atomic64_read(&log->l_reserve_head.grant), in xlog_verify_dump_tail()
3282 log->l_tail_space, log->l_logsize, in xlog_verify_dump_tail()
3286 /* Check if the new iclog will fit in the log. */
3289 struct xlog *log, in xlog_verify_tail_lsn() argument
3295 if (CYCLE_LSN(tail_lsn) == log->l_prev_cycle) { in xlog_verify_tail_lsn()
3296 blocks = log->l_logBBsize - in xlog_verify_tail_lsn()
3297 (log->l_prev_block - BLOCK_LSN(tail_lsn)); in xlog_verify_tail_lsn()
3299 BTOBB(log->l_iclog_hsize)) { in xlog_verify_tail_lsn()
3300 xfs_emerg(log->l_mp, in xlog_verify_tail_lsn()
3301 "%s: ran out of log space", __func__); in xlog_verify_tail_lsn()
3302 xlog_verify_dump_tail(log, iclog); in xlog_verify_tail_lsn()
3307 if (CYCLE_LSN(tail_lsn) + 1 != log->l_prev_cycle) { in xlog_verify_tail_lsn()
3308 xfs_emerg(log->l_mp, "%s: head has wrapped tail.", __func__); in xlog_verify_tail_lsn()
3309 xlog_verify_dump_tail(log, iclog); in xlog_verify_tail_lsn()
3312 if (BLOCK_LSN(tail_lsn) == log->l_prev_block) { in xlog_verify_tail_lsn()
3313 xfs_emerg(log->l_mp, "%s: tail wrapped", __func__); in xlog_verify_tail_lsn()
3314 xlog_verify_dump_tail(log, iclog); in xlog_verify_tail_lsn()
3318 blocks = BLOCK_LSN(tail_lsn) - log->l_prev_block; in xlog_verify_tail_lsn()
3320 xfs_emerg(log->l_mp, "%s: ran out of iclog space", __func__); in xlog_verify_tail_lsn()
3321 xlog_verify_dump_tail(log, iclog); in xlog_verify_tail_lsn()
3331 * 4. Check fields of each log operation header for:
3334 * C. Length in log record header is correct according to the
3337 * log, check the preceding blocks of the physical log to make sure all
3342 struct xlog *log, in xlog_verify_iclog() argument
3356 spin_lock(&log->l_icloglock); in xlog_verify_iclog()
3357 icptr = log->l_iclog; in xlog_verify_iclog()
3358 for (i = 0; i < log->l_iclog_bufs; i++, icptr = icptr->ic_next) in xlog_verify_iclog()
3361 if (icptr != log->l_iclog) in xlog_verify_iclog()
3362 xfs_emerg(log->l_mp, "%s: corrupt iclog ring", __func__); in xlog_verify_iclog()
3363 spin_unlock(&log->l_icloglock); in xlog_verify_iclog()
3365 /* check log magic numbers */ in xlog_verify_iclog()
3367 xfs_emerg(log->l_mp, "%s: invalid magic num", __func__); in xlog_verify_iclog()
3373 xfs_emerg(log->l_mp, "%s: unexpected magic num", in xlog_verify_iclog()
3403 xfs_warn(log->l_mp, in xlog_verify_iclog()
3430 * Perform a forced shutdown on the log.
3432 * This can be called from low level log code to trigger a shutdown, or from the
3436 * a. if the shutdown was not due to a log IO error, flush the logs to
3438 * b. the log gets atomically marked 'XLOG_IO_ERROR' for all interested
3440 * c. Tasks sleeping on log reservations, pinned objects and
3442 * d. The mount is also marked as shut down so that log triggered shutdowns
3445 * Return true if the shutdown cause was a log IO error and we actually shut the
3446 * log down.
3450 struct xlog *log, in xlog_force_shutdown() argument
3455 if (!log) in xlog_force_shutdown()
3459 * Flush all the completed transactions to disk before marking the log in xlog_force_shutdown()
3460 * being shut down. We need to do this first as shutting down the log in xlog_force_shutdown()
3461 * before the force will prevent the log force from flushing the iclogs in xlog_force_shutdown()
3465 * we don't want to touch the log because we don't want to perturb the in xlog_force_shutdown()
3467 * avoid a log force in this case. in xlog_force_shutdown()
3469 * If we are shutting down due to a log IO error, then we must avoid in xlog_force_shutdown()
3470 * trying to write the log as that may just result in more IO errors and in xlog_force_shutdown()
3473 if (!log_error && !xlog_in_recovery(log)) in xlog_force_shutdown()
3474 xfs_log_force(log->l_mp, XFS_LOG_SYNC); in xlog_force_shutdown()
3482 * Much of the log state machine transitions assume that shutdown state in xlog_force_shutdown()
3483 * cannot change once they hold the log->l_icloglock. Hence we need to in xlog_force_shutdown()
3487 spin_lock(&log->l_icloglock); in xlog_force_shutdown()
3488 if (test_and_set_bit(XLOG_IO_ERROR, &log->l_opstate)) { in xlog_force_shutdown()
3489 spin_unlock(&log->l_icloglock); in xlog_force_shutdown()
3492 spin_unlock(&log->l_icloglock); in xlog_force_shutdown()
3495 * If this log shutdown also sets the mount shutdown state, issue a in xlog_force_shutdown()
3498 if (!xfs_set_shutdown(log->l_mp)) { in xlog_force_shutdown()
3499 xfs_alert_tag(log->l_mp, XFS_PTAG_SHUTDOWN_LOGERROR, in xlog_force_shutdown()
3500 "Filesystem has been shut down due to log error (0x%x).", in xlog_force_shutdown()
3502 xfs_alert(log->l_mp, in xlog_force_shutdown()
3509 * We don't want anybody waiting for log reservations after this. That in xlog_force_shutdown()
3515 xlog_grant_head_wake_all(&log->l_reserve_head); in xlog_force_shutdown()
3516 xlog_grant_head_wake_all(&log->l_write_head); in xlog_force_shutdown()
3520 * as if the log writes were completed. The abort handling in the log in xlog_force_shutdown()
3524 spin_lock(&log->l_cilp->xc_push_lock); in xlog_force_shutdown()
3525 wake_up_all(&log->l_cilp->xc_start_wait); in xlog_force_shutdown()
3526 wake_up_all(&log->l_cilp->xc_commit_wait); in xlog_force_shutdown()
3527 spin_unlock(&log->l_cilp->xc_push_lock); in xlog_force_shutdown()
3529 spin_lock(&log->l_icloglock); in xlog_force_shutdown()
3530 xlog_state_shutdown_callbacks(log); in xlog_force_shutdown()
3531 spin_unlock(&log->l_icloglock); in xlog_force_shutdown()
3533 wake_up_var(&log->l_opstate); in xlog_force_shutdown()
3539 struct xlog *log) in xlog_iclogs_empty() argument
3543 iclog = log->l_iclog; in xlog_iclogs_empty()
3551 } while (iclog != log->l_iclog); in xlog_iclogs_empty()
3564 struct xlog *log = mp->m_log; in xfs_log_check_lsn() local
3568 * norecovery mode skips mount-time log processing and unconditionally in xfs_log_check_lsn()
3586 spin_lock(&log->l_icloglock); in xfs_log_check_lsn()
3591 log->l_curr_cycle, log->l_curr_block); in xfs_log_check_lsn()
3592 spin_unlock(&log->l_icloglock); in xfs_log_check_lsn()