Lines Matching +full:ip +full:- +full:blocks

1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (c) 2018-2024 Oracle. All Rights Reserved.
65 * I) Reverse mappings for all non-space metadata and file data are collected
69 * 1.1. Create a bitmap BMBIT to track bmbt blocks if necessary.
74 * just visit the bmbt blocks to set the corresponding BMBIT areas.
96 * 2. Estimate the number of rmapbt blocks needed to store NR records. (= RMB)
97 * 3. Reserve RMB blocks through the newbt using the allocator in normap mode.
102 * 8. Estimate the number of rmapbt blocks needed for NR + AGNR rmaps. (= RMB')
103 * 9. If RMB' >= RMB, reserve RMB' - RMB more newbt blocks, set RMB = RMB',
112 * IV) Reap the old btree blocks.
120 * that they were the old rmapbt blocks.
139 /* in-memory btree cursor for the xfs_btree_bload iteration */
148 /* Number of non-freespace records found. */
177 return -ENOMEM; in xrep_setup_ag_rmapbt()
179 rr->sc = sc; in xrep_setup_ag_rmapbt()
180 sc->buf = rr; in xrep_setup_ag_rmapbt()
193 if (xfs_rmap_check_irec(sc->sa.pag, rec) != NULL) in xrep_rmap_check_mapping()
194 return -EFSCORRUPTED; in xrep_rmap_check_mapping()
197 error = xfs_alloc_has_records(sc->sa.bno_cur, rec->rm_startblock, in xrep_rmap_check_mapping()
198 rec->rm_blockcount, &outcome); in xrep_rmap_check_mapping()
202 return -EFSCORRUPTED; in xrep_rmap_check_mapping()
207 /* Store a reverse-mapping record. */
224 struct xfs_scrub *sc = rr->sc; in xrep_rmap_stash()
231 if (xchk_iscan_aborted(&rr->iscan)) in xrep_rmap_stash()
232 return -EFSCORRUPTED; in xrep_rmap_stash()
234 trace_xrep_rmap_found(sc->mp, sc->sa.pag->pag_agno, &rmap); in xrep_rmap_stash()
236 mutex_lock(&rr->lock); in xrep_rmap_stash()
237 mcur = xfs_rmapbt_mem_cursor(sc->sa.pag, sc->tp, &rr->rmap_btree); in xrep_rmap_stash()
243 error = xfbtree_trans_commit(&rr->rmap_btree, sc->tp); in xrep_rmap_stash()
247 mutex_unlock(&rr->lock); in xrep_rmap_stash()
251 xfbtree_trans_cancel(&rr->rmap_btree, sc->tp); in xrep_rmap_stash()
253 xchk_iscan_abort(&rr->iscan); in xrep_rmap_stash()
254 mutex_unlock(&rr->lock); in xrep_rmap_stash()
271 struct xrep_rmap *rr = rsr->rr; in xrep_rmap_stash_run()
273 return xrep_rmap_stash(rr, start, len, rsr->owner, 0, rsr->rmap_flags); in xrep_rmap_stash_run()
278 * that the ranges are in units of FS blocks.
288 .owner = oinfo->oi_owner, in xrep_rmap_stash_bitmap()
292 if (oinfo->oi_flags & XFS_OWNER_INFO_ATTR_FORK) in xrep_rmap_stash_bitmap()
294 if (oinfo->oi_flags & XFS_OWNER_INFO_BMBT_BLOCK) in xrep_rmap_stash_bitmap()
310 /* Bitmap of bmbt blocks in this AG. */
324 if (rf->accum.rm_blockcount == 0) in xrep_rmap_stash_accumulated()
327 return xrep_rmap_stash(rf->rr, rf->accum.rm_startblock, in xrep_rmap_stash_accumulated()
328 rf->accum.rm_blockcount, rf->accum.rm_owner, in xrep_rmap_stash_accumulated()
329 rf->accum.rm_offset, rf->accum.rm_flags); in xrep_rmap_stash_accumulated()
340 struct xfs_mount *mp = rf->rr->sc->mp; in xrep_rmap_visit_bmbt()
341 struct xfs_rmap_irec *accum = &rf->accum; in xrep_rmap_visit_bmbt()
346 if (XFS_FSB_TO_AGNO(mp, rec->br_startblock) != in xrep_rmap_visit_bmbt()
347 rf->rr->sc->sa.pag->pag_agno) in xrep_rmap_visit_bmbt()
350 agbno = XFS_FSB_TO_AGBNO(mp, rec->br_startblock); in xrep_rmap_visit_bmbt()
351 if (rf->whichfork == XFS_ATTR_FORK) in xrep_rmap_visit_bmbt()
353 if (rec->br_state == XFS_EXT_UNWRITTEN) in xrep_rmap_visit_bmbt()
357 if (accum->rm_blockcount > 0 && in xrep_rmap_visit_bmbt()
358 rec->br_startoff == accum->rm_offset + accum->rm_blockcount && in xrep_rmap_visit_bmbt()
359 agbno == accum->rm_startblock + accum->rm_blockcount && in xrep_rmap_visit_bmbt()
360 rmap_flags == accum->rm_flags) { in xrep_rmap_visit_bmbt()
361 accum->rm_blockcount += rec->br_blockcount; in xrep_rmap_visit_bmbt()
370 accum->rm_startblock = agbno; in xrep_rmap_visit_bmbt()
371 accum->rm_blockcount = rec->br_blockcount; in xrep_rmap_visit_bmbt()
372 accum->rm_offset = rec->br_startoff; in xrep_rmap_visit_bmbt()
373 accum->rm_flags = rmap_flags; in xrep_rmap_visit_bmbt()
393 fsbno = XFS_DADDR_TO_FSB(cur->bc_mp, xfs_buf_daddr(bp)); in xrep_rmap_visit_iroot_btree_block()
394 if (XFS_FSB_TO_AGNO(cur->bc_mp, fsbno) != rf->rr->sc->sa.pag->pag_agno) in xrep_rmap_visit_iroot_btree_block()
397 agbno = XFS_FSB_TO_AGBNO(cur->bc_mp, fsbno); in xrep_rmap_visit_iroot_btree_block()
398 return xagb_bitmap_set(&rf->bmbt_blocks, agbno, 1); in xrep_rmap_visit_iroot_btree_block()
411 struct xrep_rmap *rr = rf->rr; in xrep_rmap_scan_iroot_btree()
414 xagb_bitmap_init(&rf->bmbt_blocks); in xrep_rmap_scan_iroot_btree()
416 /* Record all the blocks in the btree itself. */ in xrep_rmap_scan_iroot_btree()
422 /* Emit rmaps for the btree blocks. */ in xrep_rmap_scan_iroot_btree()
423 xfs_rmap_ino_bmbt_owner(&oinfo, rf->accum.rm_owner, rf->whichfork); in xrep_rmap_scan_iroot_btree()
424 error = xrep_rmap_stash_bitmap(rr, &rf->bmbt_blocks, &oinfo); in xrep_rmap_scan_iroot_btree()
431 xagb_bitmap_destroy(&rf->bmbt_blocks); in xrep_rmap_scan_iroot_btree()
443 struct xfs_inode *ip, in xrep_rmap_scan_bmbt() argument
446 struct xrep_rmap *rr = rf->rr; in xrep_rmap_scan_bmbt()
452 ifp = xfs_ifork_ptr(ip, rf->whichfork); in xrep_rmap_scan_bmbt()
453 cur = xfs_bmbt_init_cursor(rr->sc->mp, rr->sc->tp, ip, rf->whichfork); in xrep_rmap_scan_bmbt()
455 if (!xfs_ifork_is_realtime(ip, rf->whichfork) && in xrep_rmap_scan_bmbt()
471 /* Scan for the bmbt blocks, which always live on the data device. */ in xrep_rmap_scan_bmbt()
479 * Iterate the in-core extent cache to collect rmap records for anything in
506 struct xfs_inode *ip, in xrep_rmap_scan_ifork() argument
510 .accum = { .rm_owner = ip->i_ino, }, in xrep_rmap_scan_ifork()
514 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); in xrep_rmap_scan_ifork()
520 if (ifp->if_format == XFS_DINODE_FMT_BTREE) { in xrep_rmap_scan_ifork()
525 * the btree blocks themselves, even if this is a realtime in xrep_rmap_scan_ifork()
528 error = xrep_rmap_scan_bmbt(&rf, ip, &mappings_done); in xrep_rmap_scan_ifork()
531 } else if (ifp->if_format != XFS_DINODE_FMT_EXTENTS) { in xrep_rmap_scan_ifork()
536 if (xfs_ifork_is_realtime(ip, whichfork)) in xrep_rmap_scan_ifork()
550 struct xfs_inode *ip) in xrep_rmap_scan_ilock() argument
554 if (xfs_need_iread_extents(&ip->i_df)) { in xrep_rmap_scan_ilock()
559 if (xfs_inode_has_attr_fork(ip) && xfs_need_iread_extents(&ip->i_af)) in xrep_rmap_scan_ilock()
563 xfs_ilock(ip, lock_mode); in xrep_rmap_scan_ilock()
571 struct xfs_inode *ip) in xrep_rmap_scan_inode() argument
573 unsigned int lock_mode = xrep_rmap_scan_ilock(ip); in xrep_rmap_scan_inode()
577 error = xrep_rmap_scan_ifork(rr, ip, XFS_DATA_FORK); in xrep_rmap_scan_inode()
582 error = xrep_rmap_scan_ifork(rr, ip, XFS_ATTR_FORK); in xrep_rmap_scan_inode()
588 xchk_iscan_mark_visited(&rr->iscan, ip); in xrep_rmap_scan_inode()
590 xfs_iunlock(ip, lock_mode); in xrep_rmap_scan_inode()
611 struct xfs_mount *mp = cur->bc_mp; in xrep_rmap_walk_inobt()
619 /* Record the inobt blocks. */ in xrep_rmap_walk_inobt()
620 error = xagb_bitmap_set_btcur_path(&ri->inobt_blocks, cur); in xrep_rmap_walk_inobt()
625 if (xfs_inobt_check_irec(cur->bc_ag.pag, &irec) != NULL) in xrep_rmap_walk_inobt()
626 return -EFSCORRUPTED; in xrep_rmap_walk_inobt()
630 /* Record a non-sparse inode chunk. */ in xrep_rmap_walk_inobt()
634 XFS_INODES_PER_CHUNK / mp->m_sb.sb_inopblock); in xrep_rmap_walk_inobt()
636 return xagb_bitmap_set(&ri->ichunk_blocks, agbno, aglen); in xrep_rmap_walk_inobt()
640 iperhole = max_t(xfs_agino_t, mp->m_sb.sb_inopblock, in xrep_rmap_walk_inobt()
642 aglen = iperhole / mp->m_sb.sb_inopblock; in xrep_rmap_walk_inobt()
652 error = xagb_bitmap_set(&ri->ichunk_blocks, agbno, aglen); in xrep_rmap_walk_inobt()
660 /* Collect rmaps for the blocks containing inode btrees and the inode chunks. */
668 struct xfs_scrub *sc = rr->sc; in xrep_rmap_find_inode_rmaps()
676 * chunks and the blocks in the inobt itself. in xrep_rmap_find_inode_rmaps()
678 error = xfs_btree_query_all(sc->sa.ino_cur, xrep_rmap_walk_inobt, &ri); in xrep_rmap_find_inode_rmaps()
687 struct xfs_agi *agi = sc->sa.agi_bp->b_addr; in xrep_rmap_find_inode_rmaps()
690 be32_to_cpu(agi->agi_root), 1); in xrep_rmap_find_inode_rmaps()
696 if (xfs_has_finobt(sc->mp)) { in xrep_rmap_find_inode_rmaps()
698 sc->sa.fino_cur); in xrep_rmap_find_inode_rmaps()
727 irec->rc_domain != XFS_REFC_DOMAIN_COW) in xrep_rmap_walk_cowblocks()
728 return -EFSCORRUPTED; in xrep_rmap_walk_cowblocks()
730 return xagb_bitmap_set(bitmap, irec->rc_startblock, irec->rc_blockcount); in xrep_rmap_walk_cowblocks()
734 * Collect rmaps for the blocks containing the refcount btree, and all CoW
748 .rc_startblock = -1U, in xrep_rmap_find_refcount_rmaps()
751 struct xfs_scrub *sc = rr->sc; in xrep_rmap_find_refcount_rmaps()
754 if (!xfs_has_reflink(sc->mp)) in xrep_rmap_find_refcount_rmaps()
761 error = xagb_bitmap_set_btblocks(&refcountbt_blocks, sc->sa.refc_cur); in xrep_rmap_find_refcount_rmaps()
766 error = xfs_refcount_query_range(sc->sa.refc_cur, &low, &high, in xrep_rmap_find_refcount_rmaps()
789 struct xfs_scrub *sc = rr->sc; in xrep_rmap_find_agheader_rmaps()
791 /* Create a record for the AG sb->agfl. */ in xrep_rmap_find_agheader_rmaps()
792 return xrep_rmap_stash(rr, XFS_SB_BLOCK(sc->mp), in xrep_rmap_find_agheader_rmaps()
793 XFS_AGFL_BLOCK(sc->mp) - XFS_SB_BLOCK(sc->mp) + 1, in xrep_rmap_find_agheader_rmaps()
802 struct xfs_scrub *sc = rr->sc; in xrep_rmap_find_log_rmaps()
804 if (!xfs_ag_contains_log(sc->mp, sc->sa.pag->pag_agno)) in xrep_rmap_find_log_rmaps()
808 XFS_FSB_TO_AGBNO(sc->mp, sc->mp->m_sb.sb_logstart), in xrep_rmap_find_log_rmaps()
809 sc->mp->m_sb.sb_logblocks, XFS_RMAP_OWN_LOG, 0, 0); in xrep_rmap_find_log_rmaps()
822 error = xrep_rmap_check_mapping(rr->sc, rec); in xrep_rmap_check_record()
826 rr->nr_records++; in xrep_rmap_check_record()
831 * Generate all the reverse-mappings for this AG, a list of the old rmapbt
832 * blocks, and the new btreeblks count. Figure out if we have enough free
840 struct xfs_scrub *sc = rr->sc; in xrep_rmap_find_rmaps()
841 struct xchk_ag *sa = &sc->sa; in xrep_rmap_find_rmaps()
842 struct xfs_inode *ip; in xrep_rmap_find_rmaps() local
846 /* Find all the per-AG metadata. */ in xrep_rmap_find_rmaps()
847 xrep_ag_btcur_init(sc, &sc->sa); in xrep_rmap_find_rmaps()
863 xchk_ag_btcur_free(&sc->sa); in xrep_rmap_find_rmaps()
882 sa->agf_bp = NULL; in xrep_rmap_find_rmaps()
883 sa->agi_bp = NULL; in xrep_rmap_find_rmaps()
890 while ((error = xchk_iscan_iter(&rr->iscan, &ip)) == 1) { in xrep_rmap_find_rmaps()
891 error = xrep_rmap_scan_inode(rr, ip); in xrep_rmap_find_rmaps()
892 xchk_irele(sc, ip); in xrep_rmap_find_rmaps()
899 xchk_iscan_iter_finish(&rr->iscan); in xrep_rmap_find_rmaps()
916 * If a hook failed to update the in-memory btree, we lack the data to in xrep_rmap_find_rmaps()
919 if (xchk_iscan_aborted(&rr->iscan)) in xrep_rmap_find_rmaps()
920 return -EFSCORRUPTED; in xrep_rmap_find_rmaps()
925 * all actively-owned space in the filesystem. At the same time, check in xrep_rmap_find_rmaps()
929 mcur = xfs_rmapbt_mem_cursor(rr->sc->sa.pag, NULL, &rr->rmap_btree); in xrep_rmap_find_rmaps()
930 sc->sa.bno_cur = xfs_bnobt_init_cursor(sc->mp, sc->tp, sc->sa.agf_bp, in xrep_rmap_find_rmaps()
931 sc->sa.pag); in xrep_rmap_find_rmaps()
933 rr->nr_records = 0; in xrep_rmap_find_rmaps()
936 xfs_btree_del_cursor(sc->sa.bno_cur, error); in xrep_rmap_find_rmaps()
937 sc->sa.bno_cur = NULL; in xrep_rmap_find_rmaps()
959 return xagb_bitmap_set(ra->bitmap, agbno, 1); in xrep_rmap_walk_agfl()
964 * number of blocks needed to store the previously observed rmapbt records and
966 * blocks, return a bitmap of OWN_AG extents in @freesp_blocks and set @done to
979 .agno = rr->sc->sa.pag->pag_agno, in xrep_rmap_try_reserve()
981 struct xfs_scrub *sc = rr->sc; in xrep_rmap_try_reserve()
983 struct xfs_agf *agf = sc->sa.agf_bp->b_addr; in xrep_rmap_try_reserve()
991 * this function to reflect however many btree blocks we need to store in xrep_rmap_try_reserve()
993 * made to support the new rmapbt blocks), so we save the old value in xrep_rmap_try_reserve()
994 * here so we can decide if we've reserved enough blocks. in xrep_rmap_try_reserve()
996 nr_blocks = rr->new_btree.bload.nr_blocks; in xrep_rmap_try_reserve()
1004 error = xrep_newbt_alloc_blocks(&rr->new_btree, in xrep_rmap_try_reserve()
1005 nr_blocks - *blocks_reserved); in xrep_rmap_try_reserve()
1009 *blocks_reserved = rr->new_btree.bload.nr_blocks; in xrep_rmap_try_reserve()
1014 /* Set all the bnobt blocks in the bitmap. */ in xrep_rmap_try_reserve()
1015 sc->sa.bno_cur = xfs_bnobt_init_cursor(sc->mp, sc->tp, sc->sa.agf_bp, in xrep_rmap_try_reserve()
1016 sc->sa.pag); in xrep_rmap_try_reserve()
1017 error = xagb_bitmap_set_btblocks(freesp_blocks, sc->sa.bno_cur); in xrep_rmap_try_reserve()
1018 xfs_btree_del_cursor(sc->sa.bno_cur, error); in xrep_rmap_try_reserve()
1019 sc->sa.bno_cur = NULL; in xrep_rmap_try_reserve()
1023 /* Set all the cntbt blocks in the bitmap. */ in xrep_rmap_try_reserve()
1024 sc->sa.cnt_cur = xfs_cntbt_init_cursor(sc->mp, sc->tp, sc->sa.agf_bp, in xrep_rmap_try_reserve()
1025 sc->sa.pag); in xrep_rmap_try_reserve()
1026 error = xagb_bitmap_set_btblocks(freesp_blocks, sc->sa.cnt_cur); in xrep_rmap_try_reserve()
1027 xfs_btree_del_cursor(sc->sa.cnt_cur, error); in xrep_rmap_try_reserve()
1028 sc->sa.cnt_cur = NULL; in xrep_rmap_try_reserve()
1033 rr->freesp_btblocks = xagb_bitmap_hweight(freesp_blocks) - 2; in xrep_rmap_try_reserve()
1035 /* Set all the new rmapbt blocks in the bitmap. */ in xrep_rmap_try_reserve()
1036 list_for_each_entry_safe(resv, n, &rr->new_btree.resv_list, list) { in xrep_rmap_try_reserve()
1037 error = xagb_bitmap_set(freesp_blocks, resv->agbno, resv->len); in xrep_rmap_try_reserve()
1042 /* Set all the AGFL blocks in the bitmap. */ in xrep_rmap_try_reserve()
1043 error = xfs_alloc_read_agfl(sc->sa.pag, sc->tp, &agfl_bp); in xrep_rmap_try_reserve()
1047 error = xfs_agfl_walk(sc->mp, agf, agfl_bp, xrep_rmap_walk_agfl, &ra); in xrep_rmap_try_reserve()
1054 /* Compute how many blocks we'll need for all the rmaps. */ in xrep_rmap_try_reserve()
1056 &rr->new_btree.bload, rr->nr_records + freesp_records); in xrep_rmap_try_reserve()
1060 /* We're done when we don't need more blocks. */ in xrep_rmap_try_reserve()
1061 *done = nr_blocks >= rr->new_btree.bload.nr_blocks; in xrep_rmap_try_reserve()
1079 /* Compute how many blocks we'll need for the rmaps collected so far. */ in xrep_rmap_reserve_space()
1081 &rr->new_btree.bload, rr->nr_records); in xrep_rmap_reserve_space()
1086 if (xchk_should_terminate(rr->sc, &error)) in xrep_rmap_reserve_space()
1093 * number of blocks needed to store the previously observed rmapbt in xrep_rmap_reserve_space()
1095 * Finish when we don't need more blocks. in xrep_rmap_reserve_space()
1105 xrep_ag_btcur_init(rr->sc, &rr->sc->sa); in xrep_rmap_reserve_space()
1107 xchk_ag_btcur_free(&rr->sc->sa); in xrep_rmap_reserve_space()
1121 struct xfs_scrub *sc = rr->sc; in xrep_rmap_reset_counters()
1122 struct xfs_perag *pag = sc->sa.pag; in xrep_rmap_reset_counters()
1123 struct xfs_agf *agf = sc->sa.agf_bp->b_addr; in xrep_rmap_reset_counters()
1130 rmap_btblocks = rr->new_btree.afake.af_blocks - 1; in xrep_rmap_reset_counters()
1131 agf->agf_btreeblks = cpu_to_be32(rr->freesp_btblocks + rmap_btblocks); in xrep_rmap_reset_counters()
1132 xfs_alloc_log_agf(sc->tp, sc->sa.agf_bp, XFS_AGF_BTREEBLKS); in xrep_rmap_reset_counters()
1136 * process to reap the old btree blocks will race with the AIL trying in xrep_rmap_reset_counters()
1137 * to checkpoint the old btree blocks into the filesystem. If the new in xrep_rmap_reset_counters()
1142 * height values before re-initializing the perag info from the updated in xrep_rmap_reset_counters()
1145 pag->pagf_repair_rmap_level = pag->pagf_rmap_level; in xrep_rmap_reset_counters()
1168 error = xfs_btree_increment(rr->mcur, 0, &stat); in xrep_rmap_get_records()
1172 return -EFSCORRUPTED; in xrep_rmap_get_records()
1174 error = xfs_rmap_get_rec(rr->mcur, &cur->bc_rec.r, &stat); in xrep_rmap_get_records()
1178 return -EFSCORRUPTED; in xrep_rmap_get_records()
1181 cur->bc_ops->init_rec_from_cur(cur, block_rec); in xrep_rmap_get_records()
1187 /* Feed one of the new btree blocks to the bulk loader. */
1196 return xrep_newbt_claim_block(cur, &rr->new_btree, ptr); in xrep_rmap_claim_block()
1210 * compute the OWN_AG records /after/ allocating blocks for the records in xrep_rmap_alloc_vextent()
1213 * for new AGFL blocks. in xrep_rmap_alloc_vextent()
1220 * If xrep_fix_freelist fixed the freelist by moving blocks from the in xrep_rmap_alloc_vextent()
1221 * free space btrees or by removing blocks from the AGFL and queueing in xrep_rmap_alloc_vextent()
1227 * btree's blocks, which means that we can't have EFIs for former AGFL in xrep_rmap_alloc_vextent()
1228 * blocks attached to the repair transaction when we commit the new in xrep_rmap_alloc_vextent()
1271 struct xfs_scrub *sc = rr->sc; in xrep_rmap_build_new_tree()
1272 struct xfs_perag *pag = sc->sa.pag; in xrep_rmap_build_new_tree()
1273 struct xfs_agf *agf = sc->sa.agf_bp->b_addr; in xrep_rmap_build_new_tree()
1280 * per-AG rmapbt reservation after we commit the new btree root and in xrep_rmap_build_new_tree()
1281 * want to dispose of the old btree blocks. in xrep_rmap_build_new_tree()
1283 rr->old_rmapbt_fsbcount = be32_to_cpu(agf->agf_rmap_blocks); in xrep_rmap_build_new_tree()
1289 * attach it to the AG header. The new blocks are accounted to the in xrep_rmap_build_new_tree()
1290 * rmapbt per-AG reservation, which we will adjust further after in xrep_rmap_build_new_tree()
1293 fsbno = XFS_AGB_TO_FSB(sc->mp, pag->pag_agno, XFS_RMAP_BLOCK(sc->mp)); in xrep_rmap_build_new_tree()
1294 xrep_newbt_init_ag(&rr->new_btree, sc, &XFS_RMAP_OINFO_SKIP_UPDATE, in xrep_rmap_build_new_tree()
1296 rr->new_btree.bload.get_records = xrep_rmap_get_records; in xrep_rmap_build_new_tree()
1297 rr->new_btree.bload.claim_block = xrep_rmap_claim_block; in xrep_rmap_build_new_tree()
1298 rr->new_btree.alloc_vextent = xrep_rmap_alloc_vextent; in xrep_rmap_build_new_tree()
1299 rmap_cur = xfs_rmapbt_init_cursor(sc->mp, NULL, NULL, pag); in xrep_rmap_build_new_tree()
1300 xfs_btree_stage_afakeroot(rmap_cur, &rr->new_btree.afake); in xrep_rmap_build_new_tree()
1303 * Initialize @rr->new_btree, reserve space for the new rmapbt, in xrep_rmap_build_new_tree()
1314 rr->mcur = xfs_rmapbt_mem_cursor(rr->sc->sa.pag, NULL, in xrep_rmap_build_new_tree()
1315 &rr->rmap_btree); in xrep_rmap_build_new_tree()
1317 error = xrep_rmap_count_records(rr->mcur, &rr->nr_records); in xrep_rmap_build_new_tree()
1324 * that we don't trip the verifiers when writing the new btree blocks in xrep_rmap_build_new_tree()
1327 pag->pagf_repair_rmap_level = rr->new_btree.bload.btree_height; in xrep_rmap_build_new_tree()
1331 * increment in ->get_records positions us at the first record. in xrep_rmap_build_new_tree()
1333 error = xfs_btree_goto_left_edge(rr->mcur); in xrep_rmap_build_new_tree()
1338 error = xfs_btree_bload(rmap_cur, &rr->new_btree.bload, rr); in xrep_rmap_build_new_tree()
1346 xfs_rmapbt_commit_staged_btree(rmap_cur, sc->tp, sc->sa.agf_bp); in xrep_rmap_build_new_tree()
1348 xfs_btree_del_cursor(rr->mcur, 0); in xrep_rmap_build_new_tree()
1349 rr->mcur = NULL; in xrep_rmap_build_new_tree()
1353 * updating the in-memory btree. Abort the scan to stop live updates. in xrep_rmap_build_new_tree()
1355 xchk_iscan_abort(&rr->iscan); in xrep_rmap_build_new_tree()
1358 * The newly committed rmap recordset includes mappings for the blocks in xrep_rmap_build_new_tree()
1363 rr->new_btree.oinfo = XFS_RMAP_OINFO_AG; in xrep_rmap_build_new_tree()
1370 /* Dispose of any unused blocks and the accounting information. */ in xrep_rmap_build_new_tree()
1371 error = xrep_newbt_commit(&rr->new_btree); in xrep_rmap_build_new_tree()
1378 pag->pagf_repair_rmap_level = 0; in xrep_rmap_build_new_tree()
1380 xfs_btree_del_cursor(rr->mcur, error); in xrep_rmap_build_new_tree()
1384 xrep_newbt_cancel(&rr->new_btree); in xrep_rmap_build_new_tree()
1404 return xagb_bitmap_clear(&rfg->rmap_gaps, rec->ar_startblock, in xrep_rmap_find_freesp()
1405 rec->ar_blockcount); in xrep_rmap_find_freesp()
1418 if (rec->rm_startblock > rfg->next_agbno) { in xrep_rmap_find_gaps()
1419 error = xagb_bitmap_set(&rfg->rmap_gaps, rfg->next_agbno, in xrep_rmap_find_gaps()
1420 rec->rm_startblock - rfg->next_agbno); in xrep_rmap_find_gaps()
1425 rfg->next_agbno = max_t(xfs_agblock_t, rfg->next_agbno, in xrep_rmap_find_gaps()
1426 rec->rm_startblock + rec->rm_blockcount); in xrep_rmap_find_gaps()
1431 * Reap the old rmapbt blocks. Now that the rmapbt is fully rebuilt, we make
1443 struct xfs_scrub *sc = rr->sc; in xrep_rmap_remove_old_tree()
1444 struct xfs_agf *agf = sc->sa.agf_bp->b_addr; in xrep_rmap_remove_old_tree()
1445 struct xfs_perag *pag = sc->sa.pag; in xrep_rmap_remove_old_tree()
1453 mcur = xfs_rmapbt_mem_cursor(rr->sc->sa.pag, NULL, &rr->rmap_btree); in xrep_rmap_remove_old_tree()
1461 agend = be32_to_cpu(agf->agf_length); in xrep_rmap_remove_old_tree()
1464 agend - rfg.next_agbno); in xrep_rmap_remove_old_tree()
1470 sc->sa.bno_cur = xfs_bnobt_init_cursor(sc->mp, sc->tp, sc->sa.agf_bp, in xrep_rmap_remove_old_tree()
1471 sc->sa.pag); in xrep_rmap_remove_old_tree()
1472 error = xfs_alloc_query_all(sc->sa.bno_cur, xrep_rmap_find_freesp, in xrep_rmap_remove_old_tree()
1474 xfs_btree_del_cursor(sc->sa.bno_cur, error); in xrep_rmap_remove_old_tree()
1475 sc->sa.bno_cur = NULL; in xrep_rmap_remove_old_tree()
1480 * Free the "free" blocks that the new rmapbt knows about but the bnobt in xrep_rmap_remove_old_tree()
1481 * doesn't--these are the old rmapbt blocks. Credit the old rmapbt in xrep_rmap_remove_old_tree()
1482 * block usage count back to the per-AG rmapbt reservation (and not in xrep_rmap_remove_old_tree()
1492 * Now that we've zapped all the old rmapbt blocks we can turn off in xrep_rmap_remove_old_tree()
1493 * the alternate height mechanism and reset the per-AG space in xrep_rmap_remove_old_tree()
1496 pag->pagf_repair_rmap_level = 0; in xrep_rmap_remove_old_tree()
1497 sc->flags |= XREP_RESET_PERAG_RESV; in xrep_rmap_remove_old_tree()
1514 * metadata. IOWs, the in-memory btree knows about the AG headers, the in xrep_rmapbt_want_live_update()
1517 * the in-memory rmap btree. in xrep_rmapbt_want_live_update()
1520 * have re-locked the AGF and are ready to reserve space for the new in xrep_rmapbt_want_live_update()
1523 if (XFS_RMAP_NON_INODE_OWNER(oi->oi_owner)) in xrep_rmapbt_want_live_update()
1524 return oi->oi_owner != XFS_RMAP_OWN_AG; in xrep_rmapbt_want_live_update()
1527 return xchk_iscan_want_live_update(iscan, oi->oi_owner); in xrep_rmapbt_want_live_update()
1551 mp = rr->sc->mp; in xrep_rmapbt_live_update()
1553 if (!xrep_rmapbt_want_live_update(&rr->iscan, &p->oinfo)) in xrep_rmapbt_live_update()
1556 trace_xrep_rmap_live_update(mp, rr->sc->sa.pag->pag_agno, action, p); in xrep_rmapbt_live_update()
1562 mutex_lock(&rr->lock); in xrep_rmapbt_live_update()
1563 mcur = xfs_rmapbt_mem_cursor(rr->sc->sa.pag, tp, &rr->rmap_btree); in xrep_rmapbt_live_update()
1564 error = __xfs_rmap_finish_intent(mcur, action, p->startblock, in xrep_rmapbt_live_update()
1565 p->blockcount, &p->oinfo, p->unwritten); in xrep_rmapbt_live_update()
1570 error = xfbtree_trans_commit(&rr->rmap_btree, tp); in xrep_rmapbt_live_update()
1575 mutex_unlock(&rr->lock); in xrep_rmapbt_live_update()
1579 xfbtree_trans_cancel(&rr->rmap_btree, tp); in xrep_rmapbt_live_update()
1582 mutex_unlock(&rr->lock); in xrep_rmapbt_live_update()
1583 xchk_iscan_abort(&rr->iscan); in xrep_rmapbt_live_update()
1593 struct xfs_scrub *sc = rr->sc; in xrep_rmap_setup_scan()
1596 mutex_init(&rr->lock); in xrep_rmap_setup_scan()
1598 /* Set up in-memory rmap btree */ in xrep_rmap_setup_scan()
1599 error = xfs_rmapbt_mem_init(sc->mp, &rr->rmap_btree, sc->xmbtp, in xrep_rmap_setup_scan()
1600 sc->sa.pag->pag_agno); in xrep_rmap_setup_scan()
1605 xchk_iscan_start(sc, 30000, 100, &rr->iscan); in xrep_rmap_setup_scan()
1608 * Hook into live rmap operations so that we can update our in-memory in xrep_rmap_setup_scan()
1613 ASSERT(sc->flags & XCHK_FSGATES_RMAP); in xrep_rmap_setup_scan()
1614 xfs_rmap_hook_setup(&rr->rhook, xrep_rmapbt_live_update); in xrep_rmap_setup_scan()
1615 error = xfs_rmap_hook_add(sc->sa.pag, &rr->rhook); in xrep_rmap_setup_scan()
1621 xchk_iscan_teardown(&rr->iscan); in xrep_rmap_setup_scan()
1622 xfbtree_destroy(&rr->rmap_btree); in xrep_rmap_setup_scan()
1624 mutex_destroy(&rr->lock); in xrep_rmap_setup_scan()
1633 struct xfs_scrub *sc = rr->sc; in xrep_rmap_teardown()
1635 xchk_iscan_abort(&rr->iscan); in xrep_rmap_teardown()
1636 xfs_rmap_hook_del(sc->sa.pag, &rr->rhook); in xrep_rmap_teardown()
1637 xchk_iscan_teardown(&rr->iscan); in xrep_rmap_teardown()
1638 xfbtree_destroy(&rr->rmap_btree); in xrep_rmap_teardown()
1639 mutex_destroy(&rr->lock); in xrep_rmap_teardown()
1647 struct xrep_rmap *rr = sc->buf; in xrep_rmapbt()
1656 * These rmaps won't change even as we try to allocate blocks. in xrep_rmapbt()