Lines Matching full:log

51  * Verify the log-relative block number and length in basic blocks are valid for
52 * an operation involving the given XFS log buffer. Returns true if the fields
57 struct xlog *log, in xlog_verify_bno() argument
61 if (blk_no < 0 || blk_no >= log->l_logBBsize) in xlog_verify_bno()
63 if (bbcount <= 0 || (blk_no + bbcount) > log->l_logBBsize) in xlog_verify_bno()
69 * Allocate a buffer to hold log data. The buffer needs to be able to map to
70 * a range of nbblks basic blocks at any valid offset within the log.
74 struct xlog *log, in xlog_alloc_buffer() argument
78 * Pass log block 0 since we don't have an addr yet, buffer will be in xlog_alloc_buffer()
81 if (XFS_IS_CORRUPT(log->l_mp, !xlog_verify_bno(log, 0, nbblks))) { in xlog_alloc_buffer()
82 xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer", in xlog_alloc_buffer()
88 * We do log I/O in units of log sectors (a power-of-2 multiple of the in xlog_alloc_buffer()
90 * the basic blocks required for complete log sectors. in xlog_alloc_buffer()
96 * issue. Nor will this be a problem if the log I/O is done in basic in xlog_alloc_buffer()
98 * extra log sector to ensure there's space to accommodate this in xlog_alloc_buffer()
101 if (nbblks > 1 && log->l_sectBBsize > 1) in xlog_alloc_buffer()
102 nbblks += log->l_sectBBsize; in xlog_alloc_buffer()
103 nbblks = round_up(nbblks, log->l_sectBBsize); in xlog_alloc_buffer()
109 * in a log buffer. The buffer covers a log sector-aligned region.
113 struct xlog *log, in xlog_align() argument
116 return BBTOB(blk_no & ((xfs_daddr_t)log->l_sectBBsize - 1)); in xlog_align()
121 struct xlog *log, in xlog_do_io() argument
129 if (XFS_IS_CORRUPT(log->l_mp, !xlog_verify_bno(log, blk_no, nbblks))) { in xlog_do_io()
130 xfs_warn(log->l_mp, in xlog_do_io()
131 "Invalid log block/length (0x%llx, 0x%x) for buffer", in xlog_do_io()
136 blk_no = round_down(blk_no, log->l_sectBBsize); in xlog_do_io()
137 nbblks = round_up(nbblks, log->l_sectBBsize); in xlog_do_io()
140 error = xfs_rw_bdev(log->l_targ->bt_bdev, log->l_logBBstart + blk_no, in xlog_do_io()
142 if (error && !xlog_is_shutdown(log)) { in xlog_do_io()
143 xfs_alert(log->l_mp, in xlog_do_io()
144 "log recovery %s I/O error at daddr 0x%llx len %d error %d", in xlog_do_io()
153 struct xlog *log, in xlog_bread_noalign() argument
158 return xlog_do_io(log, blk_no, nbblks, data, REQ_OP_READ); in xlog_bread_noalign()
163 struct xlog *log, in xlog_bread() argument
171 error = xlog_do_io(log, blk_no, nbblks, data, REQ_OP_READ); in xlog_bread()
173 *offset = data + xlog_align(log, blk_no); in xlog_bread()
179 struct xlog *log, in xlog_bwrite() argument
184 return xlog_do_io(log, blk_no, nbblks, data, REQ_OP_WRITE); in xlog_bwrite()
189 * dump debug superblock and log record information
198 xfs_debug(mp, " log : uuid = %pU, fmt = %d", in xlog_header_check_dump()
206 * check log record header for recovery
218 * a dirty log created in IRIX. in xlog_header_check_recover()
222 "dirty log written in incompatible format - can't recover"); in xlog_header_check_recover()
229 "dirty log entry has mismatched uuid - can't recover"); in xlog_header_check_recover()
237 * read the head block of the log and check the header
249 * h_fs_uuid is null, we assume this log was last mounted in xlog_header_check_mount()
252 xfs_warn(mp, "null uuid in log - IRIX style log"); in xlog_header_check_mount()
255 xfs_warn(mp, "log has mismatched uuid - can't recover"); in xlog_header_check_mount()
264 * log which contains the given cycle. It uses a binary search algorithm.
270 struct xlog *log, in xlog_find_cycle_start() argument
285 error = xlog_bread(log, mid_blk, 1, buffer, &offset); in xlog_find_cycle_start()
313 struct xlog *log, in xlog_find_verify_cycle() argument
330 * a log sector, or we're out of luck. in xlog_find_verify_cycle()
333 while (bufblks > log->l_logBBsize) in xlog_find_verify_cycle()
335 while (!(buffer = xlog_alloc_buffer(log, bufblks))) { in xlog_find_verify_cycle()
337 if (bufblks < log->l_sectBBsize) in xlog_find_verify_cycle()
346 error = xlog_bread(log, i, bcount, buffer, &buf); in xlog_find_verify_cycle()
369 xlog_logrec_hblks(struct xlog *log, struct xlog_rec_header *rh) in xlog_logrec_hblks() argument
371 if (xfs_has_logv2(log->l_mp)) { in xlog_logrec_hblks()
382 * Potentially backup over partial log record write.
385 * a good log record. Therefore, we subtract one to get the block number
388 * last log record is split over the end of the physical log.
395 struct xlog *log, in xlog_find_verify_log_record() argument
411 buffer = xlog_alloc_buffer(log, num_blks); in xlog_find_verify_log_record()
413 buffer = xlog_alloc_buffer(log, 1); in xlog_find_verify_log_record()
418 error = xlog_bread(log, start_blk, num_blks, buffer, &offset); in xlog_find_verify_log_record()
426 /* valid log record not found */ in xlog_find_verify_log_record()
427 xfs_warn(log->l_mp, in xlog_find_verify_log_record()
428 "Log inconsistent (didn't find previous header)"); in xlog_find_verify_log_record()
435 error = xlog_bread(log, i, 1, buffer, &offset); in xlog_find_verify_log_record()
450 * We hit the beginning of the physical log & still no header. Return in xlog_find_verify_log_record()
452 * will be called again for the end of the physical log. in xlog_find_verify_log_record()
460 * We have the final block of the good log (the first block in xlog_find_verify_log_record()
461 * of the log record _before_ the head. So we check the uuid. in xlog_find_verify_log_record()
463 if ((error = xlog_header_check_mount(log->l_mp, head))) in xlog_find_verify_log_record()
467 * We may have found a log record header before we expected one. in xlog_find_verify_log_record()
469 * up reading an entire log record. In this case, we don't want to in xlog_find_verify_log_record()
470 * reset last_blk. Only when last_blk points in the middle of a log in xlog_find_verify_log_record()
473 xhdrs = xlog_logrec_hblks(log, head); in xlog_find_verify_log_record()
485 * Head is defined to be the point of the log where the next log write
489 * current cycle number -1 won't be present in the log if we start writing
499 struct xlog *log, in xlog_find_head() argument
508 int error, log_bbnum = log->l_logBBsize; in xlog_find_head()
510 /* Is the end of the log device zeroed? */ in xlog_find_head()
511 error = xlog_find_zeroed(log, &first_blk); in xlog_find_head()
513 xfs_warn(log->l_mp, "empty log check failed"); in xlog_find_head()
523 * log so we can store the uuid in there in xlog_find_head()
525 xfs_warn(log->l_mp, "totally zeroed log"); in xlog_find_head()
532 buffer = xlog_alloc_buffer(log, 1); in xlog_find_head()
536 error = xlog_bread(log, 0, 1, buffer, &offset); in xlog_find_head()
543 error = xlog_bread(log, last_blk, 1, buffer, &offset); in xlog_find_head()
552 * then the entire log is stamped with the same cycle number. In this in xlog_find_head()
563 * In this case we believe that the entire log should have in xlog_find_head()
573 * log, as one of the latest writes at the beginning was in xlog_find_head()
579 * end of the log. in xlog_find_head()
581 * In the 256k log case, we will read from the beginning to the in xlog_find_head()
582 * end of the log and search for cycle numbers equal to x-1. in xlog_find_head()
584 * because we know that they cannot be the head since the log in xlog_find_head()
592 * number matching last_half_cycle. We expect the log to be in xlog_find_head()
601 * the log, then we look for occurrences of last_half_cycle - 1 in xlog_find_head()
602 * at the end of the log. The cases we're looking for look in xlog_find_head()
613 error = xlog_find_cycle_start(log, buffer, first_blk, &head_blk, in xlog_find_head()
623 * in the in-core log. The following number can be made tighter if in xlog_find_head()
626 num_scan_bblks = min_t(int, log_bbnum, XLOG_TOTAL_REC_SHIFT(log)); in xlog_find_head()
633 if ((error = xlog_find_verify_cycle(log, in xlog_find_head()
639 } else { /* need to read 2 parts of log */ in xlog_find_head()
641 * We are going to scan backwards in the log in two parts. in xlog_find_head()
642 * First we scan the physical end of the log. In this part in xlog_find_head()
643 * of the log, we are looking for blocks with cycle number in xlog_find_head()
645 * If we find one, then we know that the log starts there, as in xlog_find_head()
647 * the end of the physical log. The simple case for this is in xlog_find_head()
650 * If all of the blocks at the end of the log have cycle number in xlog_find_head()
652 * the log looking for occurrences of last_half_cycle. If we in xlog_find_head()
662 * In a 256k log, the scan at the end of the log will see the in xlog_find_head()
664 * certainly not the head of the log. By searching for in xlog_find_head()
670 if ((error = xlog_find_verify_cycle(log, start_blk, in xlog_find_head()
680 * Scan beginning of log now. The last part of the physical in xlog_find_head()
681 * log is good. This scan needs to verify that it doesn't find in xlog_find_head()
686 if ((error = xlog_find_verify_cycle(log, in xlog_find_head()
697 * the middle of a log record. in xlog_find_head()
699 num_scan_bblks = XLOG_REC_SHIFT(log); in xlog_find_head()
704 error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0); in xlog_find_head()
712 error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0); in xlog_find_head()
716 /* We hit the beginning of the log during our search */ in xlog_find_head()
722 error = xlog_find_verify_log_record(log, start_blk, in xlog_find_head()
750 xfs_warn(log->l_mp, "failed to find log head"); in xlog_find_head()
755 * Seek backwards in the log for log record headers.
757 * Given a starting log block, walk backwards until we find the provided number
759 * records encountered or a negative error code. The log block and buffer
764 struct xlog *log, in xlog_rseek_logrec_hdr() argument
783 * block in the log. in xlog_rseek_logrec_hdr()
787 error = xlog_bread(log, i, 1, buffer, &offset); in xlog_rseek_logrec_hdr()
800 * If we haven't hit the tail block or the log record header count, in xlog_rseek_logrec_hdr()
801 * start looking again from the end of the physical log. Note that in xlog_rseek_logrec_hdr()
805 for (i = log->l_logBBsize - 1; i >= (int) tail_blk; i--) { in xlog_rseek_logrec_hdr()
806 error = xlog_bread(log, i, 1, buffer, &offset); in xlog_rseek_logrec_hdr()
828 * Seek forward in the log for log record headers.
832 * number of records encountered or a negative error code. The log block and
838 struct xlog *log, in xlog_seek_logrec_hdr() argument
857 * block in the log. in xlog_seek_logrec_hdr()
859 end_blk = head_blk > tail_blk ? head_blk : log->l_logBBsize - 1; in xlog_seek_logrec_hdr()
861 error = xlog_bread(log, i, 1, buffer, &offset); in xlog_seek_logrec_hdr()
874 * If we haven't hit the head block or the log record header count, in xlog_seek_logrec_hdr()
875 * start looking again from the start of the physical log. in xlog_seek_logrec_hdr()
879 error = xlog_bread(log, i, 1, buffer, &offset); in xlog_seek_logrec_hdr()
901 * Calculate distance from head to tail (i.e., unused space in the log).
905 struct xlog *log, in xlog_tail_distance() argument
912 return tail_blk + (log->l_logBBsize - head_blk); in xlog_tail_distance()
916 * Verify the log tail. This is particularly important when torn or incomplete
917 * writes have been detected near the front of the log and the head has been
924 * log with garbage. This is not a coherency problem because the tail must have
925 * been pushed before it can be overwritten, but appears as log corruption to
929 * Therefore, CRC check the log from tail to head. If a failure occurs and the
936 struct xlog *log, in xlog_verify_tail() argument
949 buffer = xlog_alloc_buffer(log, 1); in xlog_verify_tail()
957 error = xlog_seek_logrec_hdr(log, head_blk, *tail_blk, 1, buffer, in xlog_verify_tail()
972 error = xlog_do_recovery_pass(log, head_blk, *tail_blk, in xlog_verify_tail()
981 tail_distance = xlog_tail_distance(log, head_blk, first_bad); in xlog_verify_tail()
986 error = xlog_seek_logrec_hdr(log, head_blk, first_bad, 2, in xlog_verify_tail()
993 error = xlog_do_recovery_pass(log, head_blk, *tail_blk, in xlog_verify_tail()
998 xfs_warn(log->l_mp, in xlog_verify_tail()
1007 * Detect and trim torn writes from the head of the log.
1010 * log in the event of a crash. Our only means to detect this scenario is via
1013 * only a certain number (XLOG_MAX_ICLOGS) of log records can be written out at
1015 * the log and treat failures in this range as torn writes as a matter of
1017 * record in the log and the tail is updated from that record and verified.
1021 struct xlog *log, in xlog_verify_head() argument
1027 bool *wrapped) /* last rec. wraps phys. log */ in xlog_verify_head()
1038 * Check the head of the log for torn writes. Search backwards from the in xlog_verify_head()
1039 * head until we hit the tail or the maximum number of log record I/Os in xlog_verify_head()
1043 tmp_buffer = xlog_alloc_buffer(log, 1); in xlog_verify_head()
1046 error = xlog_rseek_logrec_hdr(log, *head_blk, *tail_blk, in xlog_verify_head()
1056 * log block of the first bad record is saved in first_bad. in xlog_verify_head()
1058 error = xlog_do_recovery_pass(log, *head_blk, tmp_rhead_blk, in xlog_verify_head()
1066 xfs_warn(log->l_mp, in xlog_verify_head()
1067 "Torn write (CRC failure) detected at log block 0x%llx. Truncating head block from 0x%llx.", in xlog_verify_head()
1078 found = xlog_rseek_logrec_hdr(log, first_bad, *tail_blk, 1, in xlog_verify_head()
1087 * log record and set the tail block based on the last good in xlog_verify_head()
1104 return xlog_verify_tail(log, *head_blk, tail_blk, in xlog_verify_head()
1109 * We need to make sure we handle log wrapping properly, so we can't use the
1111 * log.
1113 * The log is limited to 32 bit sizes, so we use the appropriate modulus
1118 struct xlog *log, in xlog_wrap_logbno() argument
1123 div_s64_rem(bno, log->l_logBBsize, &mod); in xlog_wrap_logbno()
1128 * Check whether the head of the log points to an unmount record. In other
1129 * words, determine whether the log is clean. If so, update the in-core state
1134 struct xlog *log, in xlog_check_unmount_rec() argument
1154 * log, we convert to a log block before comparing to the head_blk. in xlog_check_unmount_rec()
1160 hblks = xlog_logrec_hblks(log, rhead); in xlog_check_unmount_rec()
1161 after_umount_blk = xlog_wrap_logbno(log, in xlog_check_unmount_rec()
1166 umount_data_blk = xlog_wrap_logbno(log, rhead_blk + hblks); in xlog_check_unmount_rec()
1167 error = xlog_bread(log, umount_data_blk, 1, buffer, &offset); in xlog_check_unmount_rec()
1174 * Set tail and last sync so that newly written log in xlog_check_unmount_rec()
1178 xlog_assign_atomic_lsn(&log->l_tail_lsn, in xlog_check_unmount_rec()
1179 log->l_curr_cycle, after_umount_blk); in xlog_check_unmount_rec()
1180 log->l_ailp->ail_head_lsn = in xlog_check_unmount_rec()
1181 atomic64_read(&log->l_tail_lsn); in xlog_check_unmount_rec()
1193 struct xlog *log, in xlog_set_state() argument
1200 * Reset log values according to the state of the log when we in xlog_set_state()
1203 * continuing the cycle of the last good log record. At this in xlog_set_state()
1204 * point we have guaranteed that all partial log records have been in xlog_set_state()
1205 * accounted for. Therefore, we know that the last good log record in xlog_set_state()
1207 * of the physical log. in xlog_set_state()
1209 log->l_prev_block = rhead_blk; in xlog_set_state()
1210 log->l_curr_block = (int)head_blk; in xlog_set_state()
1211 log->l_curr_cycle = be32_to_cpu(rhead->h_cycle); in xlog_set_state()
1213 log->l_curr_cycle++; in xlog_set_state()
1214 atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn)); in xlog_set_state()
1215 log->l_ailp->ail_head_lsn = be64_to_cpu(rhead->h_lsn); in xlog_set_state()
1219 * Find the sync block number or the tail of the log.
1222 * associated buffers synced to disk. Every log record header has
1225 * log record header to believe.
1227 * The following algorithm uses the log record header with the largest
1228 * lsn. The entire log record does not need to be valid. We only care
1236 struct xlog *log, in xlog_find_tail() argument
1250 * Find previous log record in xlog_find_tail()
1252 if ((error = xlog_find_head(log, head_blk))) in xlog_find_tail()
1256 buffer = xlog_alloc_buffer(log, 1); in xlog_find_tail()
1260 error = xlog_bread(log, 0, 1, buffer, &offset); in xlog_find_tail()
1266 /* leave all other log inited values alone */ in xlog_find_tail()
1272 * Search backwards through the log looking for the log record header in xlog_find_tail()
1276 error = xlog_rseek_logrec_hdr(log, *head_blk, *head_blk, 1, buffer, in xlog_find_tail()
1281 xfs_warn(log->l_mp, "%s: couldn't find sync record", __func__); in xlog_find_tail()
1288 * Set the log state based on the current head record. in xlog_find_tail()
1290 xlog_set_state(log, *head_blk, rhead, rhead_blk, wrapped); in xlog_find_tail()
1291 tail_lsn = atomic64_read(&log->l_tail_lsn); in xlog_find_tail()
1294 * Look for an unmount record at the head of the log. This sets the log in xlog_find_tail()
1297 error = xlog_check_unmount_rec(log, head_blk, tail_blk, rhead, in xlog_find_tail()
1303 * Verify the log head if the log is not clean (e.g., we have anything in xlog_find_tail()
1306 * considered torn writes and the log head is trimmed accordingly. in xlog_find_tail()
1308 * Note that we can only run CRC verification when the log is dirty in xlog_find_tail()
1309 * because there's no guarantee that the log data behind an unmount in xlog_find_tail()
1315 error = xlog_verify_head(log, head_blk, tail_blk, buffer, in xlog_find_tail()
1322 xlog_set_state(log, *head_blk, rhead, rhead_blk, in xlog_find_tail()
1324 tail_lsn = atomic64_read(&log->l_tail_lsn); in xlog_find_tail()
1325 error = xlog_check_unmount_rec(log, head_blk, tail_blk, in xlog_find_tail()
1339 xfs_set_clean(log->l_mp); in xlog_find_tail()
1344 * because we allow multiple outstanding log writes concurrently, in xlog_find_tail()
1360 if (!xfs_readonly_buftarg(log->l_targ)) in xlog_find_tail()
1361 error = xlog_clear_stale_blocks(log, tail_lsn); in xlog_find_tail()
1367 xfs_warn(log->l_mp, "failed to locate log tail"); in xlog_find_tail()
1372 * Is the log zeroed at all?
1378 * If the log is partially zeroed, this routine will pass back the blkno
1383 * 0 => the log is completely written to
1384 * 1 => use *blk_no as the first block of the log
1389 struct xlog *log, in xlog_find_zeroed() argument
1397 int error, log_bbnum = log->l_logBBsize; in xlog_find_zeroed()
1402 /* check totally zeroed log */ in xlog_find_zeroed()
1403 buffer = xlog_alloc_buffer(log, 1); in xlog_find_zeroed()
1406 error = xlog_bread(log, 0, 1, buffer, &offset); in xlog_find_zeroed()
1411 if (first_cycle == 0) { /* completely zeroed log */ in xlog_find_zeroed()
1416 /* check partially zeroed log */ in xlog_find_zeroed()
1417 error = xlog_bread(log, log_bbnum-1, 1, buffer, &offset); in xlog_find_zeroed()
1422 if (last_cycle != 0) { /* log completely written to */ in xlog_find_zeroed()
1427 /* we have a partially zeroed log */ in xlog_find_zeroed()
1429 error = xlog_find_cycle_start(log, buffer, 0, &last_blk, 0); in xlog_find_zeroed()
1435 * the entire log is made up of log records which are the same size, in xlog_find_zeroed()
1439 num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log); in xlog_find_zeroed()
1452 if ((error = xlog_find_verify_cycle(log, start_blk, in xlog_find_zeroed()
1459 * Potentially backup over partial log record write. We don't need in xlog_find_zeroed()
1460 * to search the end of the log because we know it is zero. in xlog_find_zeroed()
1462 error = xlog_find_verify_log_record(log, start_blk, &last_blk, 0); in xlog_find_zeroed()
1478 * to initialize a buffer full of empty log record headers and write
1479 * them into the log.
1483 struct xlog *log, in xlog_add_record() argument
1496 xfs_has_logv2(log->l_mp) ? 2 : 1); in xlog_add_record()
1500 memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t)); in xlog_add_record()
1505 struct xlog *log, in xlog_write_log_records() argument
1515 int sectbb = log->l_sectBBsize; in xlog_write_log_records()
1525 * log sector, or we're out of luck. in xlog_write_log_records()
1528 while (bufblks > log->l_logBBsize) in xlog_write_log_records()
1530 while (!(buffer = xlog_alloc_buffer(log, bufblks))) { in xlog_write_log_records()
1542 error = xlog_bread_noalign(log, start_block, 1, buffer); in xlog_write_log_records()
1561 error = xlog_bread_noalign(log, ealign, sectbb, in xlog_write_log_records()
1568 offset = buffer + xlog_align(log, start_block); in xlog_write_log_records()
1570 xlog_add_record(log, offset, cycle, i+j, in xlog_write_log_records()
1574 error = xlog_bwrite(log, start_block, endcount, buffer); in xlog_write_log_records()
1587 * This routine is called to blow away any incomplete log writes out
1588 * in front of the log head. We do this so that we won't become confused
1590 * If we leave the partial log records out there, this situation could
1593 * with empty log records with the old cycle number rather than the
1597 * the log so that we will not write over the unmount record after a
1598 * clean unmount in a 512 block log. Doing so would leave the log without
1599 * any valid log records in it until a new one was written. If we crashed
1604 struct xlog *log, in xlog_clear_stale_blocks() argument
1615 head_cycle = log->l_curr_cycle; in xlog_clear_stale_blocks()
1616 head_block = log->l_curr_block; in xlog_clear_stale_blocks()
1619 * Figure out the distance between the new head of the log in xlog_clear_stale_blocks()
1622 * we don't want to overwrite the tail of the log. in xlog_clear_stale_blocks()
1626 * The tail is behind the head in the physical log, in xlog_clear_stale_blocks()
1628 * distance from the head to the end of the log plus in xlog_clear_stale_blocks()
1629 * the distance from the beginning of the log to the in xlog_clear_stale_blocks()
1632 if (XFS_IS_CORRUPT(log->l_mp, in xlog_clear_stale_blocks()
1634 head_block >= log->l_logBBsize)) in xlog_clear_stale_blocks()
1636 tail_distance = tail_block + (log->l_logBBsize - head_block); in xlog_clear_stale_blocks()
1639 * The head is behind the tail in the physical log, in xlog_clear_stale_blocks()
1643 if (XFS_IS_CORRUPT(log->l_mp, in xlog_clear_stale_blocks()
1659 max_distance = XLOG_TOTAL_REC_SHIFT(log); in xlog_clear_stale_blocks()
1669 if ((head_block + max_distance) <= log->l_logBBsize) { in xlog_clear_stale_blocks()
1672 * wrapping around the end of the log. Just do it in xlog_clear_stale_blocks()
1674 * current cycle minus one so that the log will look like: in xlog_clear_stale_blocks()
1677 error = xlog_write_log_records(log, (head_cycle - 1), in xlog_clear_stale_blocks()
1684 * We need to wrap around the end of the physical log in in xlog_clear_stale_blocks()
1687 * end of the physical log, and it should use the current in xlog_clear_stale_blocks()
1690 distance = log->l_logBBsize - head_block; in xlog_clear_stale_blocks()
1691 error = xlog_write_log_records(log, (head_cycle - 1), in xlog_clear_stale_blocks()
1699 * Now write the blocks at the start of the physical log. in xlog_clear_stale_blocks()
1706 distance = max_distance - (log->l_logBBsize - head_block); in xlog_clear_stale_blocks()
1707 error = xlog_write_log_records(log, head_cycle, 0, distance, in xlog_clear_stale_blocks()
1722 struct xlog *log, in xlog_recover_release_intent() argument
1728 list_for_each_entry_safe(dfp, n, &log->r_dfops, dfp_list) { in xlog_recover_release_intent()
1738 xfs_defer_cancel_recovery(log->l_mp, dfp); in xlog_recover_release_intent()
1767 * Get an inode so that we can recover a log operation.
1769 * Log intent items that target inodes effectively contain a file handle.
1771 * other file handles. Log intent items defined after this validation weakness
1799 * Log recover routines
1837 * Sort the log items in the transaction.
1887 struct xlog *log, in xlog_recover_reorder_trans() argument
1905 xfs_warn(log->l_mp, in xlog_recover_reorder_trans()
1906 "%s: unrecognized type of log operation (%d)", in xlog_recover_reorder_trans()
1927 trace_xfs_log_recover_item_reorder_head(log, in xlog_recover_reorder_trans()
1935 trace_xfs_log_recover_item_reorder_tail(log, in xlog_recover_reorder_trans()
1956 struct xlog *log, in xlog_buf_readahead() argument
1961 if (!xlog_is_buffer_cancelled(log, blkno, len)) in xlog_buf_readahead()
1962 xfs_buf_readahead(log->l_mp->m_ddev_targp, blkno, len, ops); in xlog_buf_readahead()
1967 * log intent item that was found during recovery.
1971 struct xlog *log, in xlog_recover_intent_item() argument
1978 xfs_defer_start_recovery(lip, &log->r_dfops, ops); in xlog_recover_intent_item()
1984 xfs_trans_ail_insert(log->l_ailp, lip, lsn); in xlog_recover_intent_item()
1990 struct xlog *log, in xlog_recover_items_pass2() argument
1999 trace_xfs_log_recover_item_recover(log, trans, item, in xlog_recover_items_pass2()
2003 error = item->ri_ops->commit_pass2(log, buffer_list, in xlog_recover_items_pass2()
2020 struct xlog *log, in xlog_recover_commit_trans() argument
2036 error = xlog_recover_reorder_trans(log, trans, pass); in xlog_recover_commit_trans()
2041 trace_xfs_log_recover_item_recover(log, trans, item, pass); in xlog_recover_commit_trans()
2046 error = item->ri_ops->commit_pass1(log, item); in xlog_recover_commit_trans()
2050 item->ri_ops->ra_pass2(log, item); in xlog_recover_commit_trans()
2054 error = xlog_recover_items_pass2(log, trans, in xlog_recover_commit_trans()
2072 error = xlog_recover_items_pass2(log, trans, in xlog_recover_commit_trans()
2097 struct xlog *log, in xlog_recover_add_to_cont_trans() argument
2113 xfs_warn(log->l_mp, "%s: bad header length", __func__); in xlog_recover_add_to_cont_trans()
2137 trace_xfs_log_recover_item_add_cont(log, trans, item, 0); in xlog_recover_add_to_cont_trans()
2152 * will appear in the current log item.
2156 struct xlog *log, in xlog_recover_add_to_trans() argument
2168 /* we need to catch log corruptions here */ in xlog_recover_add_to_trans()
2170 xfs_warn(log->l_mp, "%s: bad header magic number", in xlog_recover_add_to_trans()
2177 xfs_warn(log->l_mp, "%s: bad header length", __func__); in xlog_recover_add_to_trans()
2211 xfs_warn(log->l_mp, in xlog_recover_add_to_trans()
2212 "bad number of regions (%d) in inode log format", in xlog_recover_add_to_trans()
2225 xfs_warn(log->l_mp, in xlog_recover_add_to_trans()
2226 "log item region count (%d) overflowed size (%d)", in xlog_recover_add_to_trans()
2237 trace_xfs_log_recover_item_add(log, trans, item, 0); in xlog_recover_add_to_trans()
2273 struct xlog *log, in xlog_recovery_process_trans() argument
2297 error = xlog_recover_add_to_trans(log, trans, dp, len); in xlog_recovery_process_trans()
2300 error = xlog_recover_add_to_cont_trans(log, trans, dp, len); in xlog_recovery_process_trans()
2303 error = xlog_recover_commit_trans(log, trans, pass, in xlog_recovery_process_trans()
2312 xfs_warn(log->l_mp, "%s: Unmount LR", __func__); in xlog_recovery_process_trans()
2317 xfs_warn(log->l_mp, "%s: bad flag 0x%x", __func__, flags); in xlog_recovery_process_trans()
2380 struct xlog *log, in xlog_recover_process_ophdr() argument
2396 xfs_warn(log->l_mp, "%s: bad clientid 0x%x", in xlog_recover_process_ophdr()
2407 xfs_warn(log->l_mp, "%s: bad length 0x%x", __func__, len); in xlog_recover_process_ophdr()
2424 * - Log recovery skips items with a metadata LSN >= the current LSN of in xlog_recover_process_ophdr()
2432 * In other words, we are allowed to submit a buffer from log recovery in xlog_recover_process_ophdr()
2437 * LSN. Therefore, track the current LSN of each commit log record as it in xlog_recover_process_ophdr()
2441 if (log->l_recovery_lsn != trans->r_lsn && in xlog_recover_process_ophdr()
2446 log->l_recovery_lsn = trans->r_lsn; in xlog_recover_process_ophdr()
2449 return xlog_recovery_process_trans(log, trans, dp, len, in xlog_recover_process_ophdr()
2464 struct xlog *log, in xlog_recover_process_data() argument
2479 /* check the log format matches our own - else we can't recover */ in xlog_recover_process_data()
2480 if (xlog_header_check_recover(log->l_mp, rhead)) in xlog_recover_process_data()
2483 trace_xfs_log_recover_record(log, rhead, pass); in xlog_recover_process_data()
2489 xfs_warn(log->l_mp, "%s: op header overrun", __func__); in xlog_recover_process_data()
2494 error = xlog_recover_process_ophdr(log, rhash, rhead, ohead, in xlog_recover_process_data()
2523 * in recovery no matter how full the log might be. in xlog_finish_defer_ops()
2568 * When this is called, all of the log intent items which did not have
2569 * corresponding log done items should be in the AIL. What we do now is update
2572 * Since we process the log intent items in normal transactions, they will be
2585 struct xlog *log) in xlog_recover_process_intents() argument
2593 last_lsn = xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block); in xlog_recover_process_intents()
2596 list_for_each_entry_safe(dfp, n, &log->r_dfops, dfp_list) { in xlog_recover_process_intents()
2601 * the last transaction we found in the log at the start in xlog_recover_process_intents()
2612 * The recovery function can free the log item, so we must not in xlog_recover_process_intents()
2616 error = xfs_defer_finish_recovery(log->l_mp, dfp, in xlog_recover_process_intents()
2624 error = xlog_finish_defer_ops(log->l_mp, &capture_list); in xlog_recover_process_intents()
2630 xlog_abort_defer_ops(log->l_mp, &capture_list); in xlog_recover_process_intents()
2636 * pending log intent items that we haven't started recovery on so they don't
2641 struct xlog *log) in xlog_recover_cancel_intents() argument
2645 list_for_each_entry_safe(dfp, n, &log->r_dfops, dfp_list) { in xlog_recover_cancel_intents()
2648 xfs_defer_cancel_recovery(log->l_mp, dfp); in xlog_recover_cancel_intents()
2789 * of log space.
2793 * can lead to deadlocks if the recovery process runs out of log reservation
2847 struct xlog *log) in xlog_recover_process_iunlinks() argument
2852 for_each_perag(log->l_mp, agno, pag) in xlog_recover_process_iunlinks()
2860 struct xlog *log) in xlog_unpack_data() argument
2870 if (xfs_has_logv2(log->l_mp)) { in xlog_unpack_data()
2882 * CRC check, unpack and process a log record.
2886 struct xlog *log, in xlog_recover_process() argument
2896 crc = xlog_cksum(log, rhead, dp, be32_to_cpu(rhead->h_len)); in xlog_recover_process()
2918 if (old_crc || xfs_has_crc(log->l_mp)) { in xlog_recover_process()
2919 xfs_alert(log->l_mp, in xlog_recover_process()
2920 "log record CRC mismatch: found 0x%x, expected 0x%x.", in xlog_recover_process()
2928 * fatal log corruption failure. in xlog_recover_process()
2930 if (xfs_has_crc(log->l_mp)) { in xlog_recover_process()
2931 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, log->l_mp); in xlog_recover_process()
2936 xlog_unpack_data(rhead, dp, log); in xlog_recover_process()
2938 return xlog_recover_process_data(log, rhash, rhead, dp, pass, in xlog_recover_process()
2944 struct xlog *log, in xlog_valid_rec_header() argument
2951 if (XFS_IS_CORRUPT(log->l_mp, in xlog_valid_rec_header()
2954 if (XFS_IS_CORRUPT(log->l_mp, in xlog_valid_rec_header()
2958 xfs_warn(log->l_mp, "%s: unrecognised log version (%d).", in xlog_valid_rec_header()
2968 if (XFS_IS_CORRUPT(log->l_mp, hlen <= 0 || hlen > bufsize)) in xlog_valid_rec_header()
2971 if (XFS_IS_CORRUPT(log->l_mp, in xlog_valid_rec_header()
2972 blkno > log->l_logBBsize || blkno > INT_MAX)) in xlog_valid_rec_header()
2978 * Read the log from tail to head and process the log records found.
2980 * and where the active portion of the log wraps around the end of
2981 * the physical log separately. The pass parameter is passed through
2987 struct xlog *log, in xlog_do_recovery_pass() argument
2991 xfs_daddr_t *first_bad) /* out: first bad log rec */ in xlog_do_recovery_pass()
3012 hbp = xlog_alloc_buffer(log, hblks); in xlog_do_recovery_pass()
3018 * h_size. Use this to tell how many sectors make up the log header. in xlog_do_recovery_pass()
3020 if (xfs_has_logv2(log->l_mp)) { in xlog_do_recovery_pass()
3026 error = xlog_bread(log, tail_blk, 1, hbp, &offset); in xlog_do_recovery_pass()
3036 * log buffer can be too small for the record and cause an in xlog_do_recovery_pass()
3045 if (h_len > h_size && h_len <= log->l_mp->m_logbsize && in xlog_do_recovery_pass()
3047 xfs_warn(log->l_mp, in xlog_do_recovery_pass()
3049 h_size, log->l_mp->m_logbsize); in xlog_do_recovery_pass()
3050 h_size = log->l_mp->m_logbsize; in xlog_do_recovery_pass()
3053 error = xlog_valid_rec_header(log, rhead, tail_blk, h_size); in xlog_do_recovery_pass()
3068 hbp = xlog_alloc_buffer(log, hblks); in xlog_do_recovery_pass()
3074 ASSERT(log->l_sectBBsize == 1); in xlog_do_recovery_pass()
3078 dbp = xlog_alloc_buffer(log, BTOBB(h_size)); in xlog_do_recovery_pass()
3087 * Perform recovery around the end of the physical log. in xlog_do_recovery_pass()
3091 while (blk_no < log->l_logBBsize) { in xlog_do_recovery_pass()
3093 * Check for header wrapping around physical end-of-log in xlog_do_recovery_pass()
3098 if (blk_no + hblks <= log->l_logBBsize) { in xlog_do_recovery_pass()
3100 error = xlog_bread(log, blk_no, hblks, hbp, in xlog_do_recovery_pass()
3105 /* This LR is split across physical log end */ in xlog_do_recovery_pass()
3106 if (blk_no != log->l_logBBsize) { in xlog_do_recovery_pass()
3107 /* some data before physical log end */ in xlog_do_recovery_pass()
3109 split_hblks = log->l_logBBsize - (int)blk_no; in xlog_do_recovery_pass()
3111 error = xlog_bread(log, blk_no, in xlog_do_recovery_pass()
3124 * - the log start is guaranteed to be sector in xlog_do_recovery_pass()
3126 * - we read the log end (LR header start) in xlog_do_recovery_pass()
3127 * _first_, then the log start (LR header end) in xlog_do_recovery_pass()
3131 error = xlog_bread_noalign(log, 0, in xlog_do_recovery_pass()
3138 error = xlog_valid_rec_header(log, rhead, in xlog_do_recovery_pass()
3147 * Read the log record data in multiple reads if it in xlog_do_recovery_pass()
3148 * wraps around the end of the log. Note that if the in xlog_do_recovery_pass()
3150 * end of the log. The record data is contiguous in in xlog_do_recovery_pass()
3153 if (blk_no + bblks <= log->l_logBBsize || in xlog_do_recovery_pass()
3154 blk_no >= log->l_logBBsize) { in xlog_do_recovery_pass()
3155 rblk_no = xlog_wrap_logbno(log, blk_no); in xlog_do_recovery_pass()
3156 error = xlog_bread(log, rblk_no, bblks, dbp, in xlog_do_recovery_pass()
3161 /* This log record is split across the in xlog_do_recovery_pass()
3162 * physical end of log */ in xlog_do_recovery_pass()
3165 if (blk_no != log->l_logBBsize) { in xlog_do_recovery_pass()
3167 * end of log */ in xlog_do_recovery_pass()
3171 log->l_logBBsize - (int)blk_no; in xlog_do_recovery_pass()
3173 error = xlog_bread(log, blk_no, in xlog_do_recovery_pass()
3186 * - the log start is guaranteed to be sector in xlog_do_recovery_pass()
3188 * - we read the log end (LR header start) in xlog_do_recovery_pass()
3189 * _first_, then the log start (LR header end) in xlog_do_recovery_pass()
3192 error = xlog_bread_noalign(log, 0, in xlog_do_recovery_pass()
3199 error = xlog_recover_process(log, rhash, rhead, offset, in xlog_do_recovery_pass()
3208 ASSERT(blk_no >= log->l_logBBsize); in xlog_do_recovery_pass()
3209 blk_no -= log->l_logBBsize; in xlog_do_recovery_pass()
3213 /* read first part of physical log */ in xlog_do_recovery_pass()
3215 error = xlog_bread(log, blk_no, hblks, hbp, &offset); in xlog_do_recovery_pass()
3220 error = xlog_valid_rec_header(log, rhead, blk_no, h_size); in xlog_do_recovery_pass()
3226 error = xlog_bread(log, blk_no+hblks, bblks, dbp, in xlog_do_recovery_pass()
3231 error = xlog_recover_process(log, rhash, rhead, offset, pass, in xlog_do_recovery_pass()
3264 xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR); in xlog_do_recovery_pass()
3289 * Do the recovery of the log. We actually do this in two phases.
3291 * of cancelling a record written into the log. The first pass
3293 * second pass replays log items normally except for those which
3295 * takes place in the log item type specific routines.
3297 * The table of items which have cancel records in the log is allocated
3299 * the log recovery has been completed.
3303 struct xlog *log, in xlog_do_log_recovery() argument
3312 * First do a pass to find all of the cancelled buf log items. in xlog_do_log_recovery()
3315 error = xlog_alloc_buf_cancel_table(log); in xlog_do_log_recovery()
3319 error = xlog_do_recovery_pass(log, head_blk, tail_blk, in xlog_do_log_recovery()
3325 * Then do a second pass to actually recover the items in the log. in xlog_do_log_recovery()
3328 error = xlog_do_recovery_pass(log, head_blk, tail_blk, in xlog_do_log_recovery()
3331 xlog_check_buf_cancel_table(log); in xlog_do_log_recovery()
3333 xlog_free_buf_cancel_table(log); in xlog_do_log_recovery()
3342 struct xlog *log, in xlog_do_recover() argument
3346 struct xfs_mount *mp = log->l_mp; in xlog_do_recover()
3351 trace_xfs_log_recover(log, head_blk, tail_blk); in xlog_do_recover()
3354 * First replay the images in the log. in xlog_do_recover()
3356 error = xlog_do_log_recovery(log, head_blk, tail_blk); in xlog_do_recover()
3360 if (xlog_is_shutdown(log)) in xlog_do_recover()
3366 * iunlinks, we can free up the entire log. This was set in in xlog_do_recover()
3371 xfs_ail_assign_tail_lsn(log->l_ailp); in xlog_do_recover()
3381 if (!xlog_is_shutdown(log)) { in xlog_do_recover()
3398 clear_bit(XLOG_ACTIVE_RECOVERY, &log->l_opstate); in xlog_do_recover()
3403 * Perform recovery and re-initialize some log variables in xlog_find_tail.
3409 struct xlog *log) in xlog_recover() argument
3414 /* find the tail of the log */ in xlog_recover()
3415 error = xlog_find_tail(log, &head_blk, &tail_blk); in xlog_recover()
3420 * The superblock was read before the log was available and thus the LSN in xlog_recover()
3424 if (xfs_has_crc(log->l_mp) && in xlog_recover()
3425 !xfs_log_check_lsn(log->l_mp, log->l_mp->m_sb.sb_lsn)) in xlog_recover()
3440 if ((error = xfs_dev_is_read_only(log->l_mp, "recovery"))) { in xlog_recover()
3445 * Version 5 superblock log feature mask validation. We know the in xlog_recover()
3446 * log is dirty so check if there are any unknown log features in xlog_recover()
3451 if (xfs_sb_is_v5(&log->l_mp->m_sb) && in xlog_recover()
3452 xfs_sb_has_incompat_log_feature(&log->l_mp->m_sb, in xlog_recover()
3454 xfs_warn(log->l_mp, in xlog_recover()
3455 "Superblock has unknown incompatible log features (0x%x) enabled.", in xlog_recover()
3456 (log->l_mp->m_sb.sb_features_log_incompat & in xlog_recover()
3458 xfs_warn(log->l_mp, in xlog_recover()
3459 "The log can not be fully and/or safely recovered by this kernel."); in xlog_recover()
3460 xfs_warn(log->l_mp, in xlog_recover()
3461 "Please recover the log on a kernel that supports the unknown features."); in xlog_recover()
3466 * Delay log recovery if the debug hook is set. This is debug in xlog_recover()
3468 * log recovery. in xlog_recover()
3471 xfs_notice(log->l_mp, in xlog_recover()
3472 "Delaying log recovery for %d seconds.", in xlog_recover()
3477 xfs_notice(log->l_mp, "Starting recovery (logdev: %s)", in xlog_recover()
3478 log->l_mp->m_logname ? log->l_mp->m_logname in xlog_recover()
3481 error = xlog_do_recover(log, head_blk, tail_blk); in xlog_recover()
3482 set_bit(XLOG_RECOVERY_NEEDED, &log->l_opstate); in xlog_recover()
3503 struct xlog *log) in xlog_recover_finish() argument
3508 error = xlog_recover_process_intents(log); in xlog_recover_finish()
3517 xlog_recover_cancel_intents(log); in xlog_recover_finish()
3518 xfs_alert(log->l_mp, "Failed to recover intents"); in xlog_recover_finish()
3519 xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR); in xlog_recover_finish()
3524 * Sync the log to get all the intents out of the AIL. This isn't in xlog_recover_finish()
3528 xfs_log_force(log->l_mp, XFS_LOG_SYNC); in xlog_recover_finish()
3530 xlog_recover_process_iunlinks(log); in xlog_recover_finish()
3539 error = xfs_reflink_recover_cow(log->l_mp); in xlog_recover_finish()
3541 xfs_alert(log->l_mp, in xlog_recover_finish()
3545 * If we get an error here, make sure the log is shut down in xlog_recover_finish()
3546 * but return zero so that any log items committed since the in xlog_recover_finish()
3550 xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR); in xlog_recover_finish()
3562 struct xlog *log) in xlog_recover_cancel() argument
3564 if (xlog_recovery_needed(log)) in xlog_recover_cancel()
3565 xlog_recover_cancel_intents(log); in xlog_recover_cancel()