Lines Matching +full:level +full:- +full:high

1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
43 __be32 magic = ops->buf_ops->magic[idx]; in xfs_btree_magic()
45 /* Ensure we asked for crc for crc-only magics. */ in xfs_btree_magic()
57 * on x86-64. Yes, gcc-11 fails to inline them, and explicit inlining of these
122 int level, in __xfs_btree_check_lblock_hdr() argument
125 struct xfs_mount *mp = cur->bc_mp; in __xfs_btree_check_lblock_hdr()
128 if (!uuid_equal(&block->bb_u.l.bb_uuid, &mp->m_sb.sb_meta_uuid)) in __xfs_btree_check_lblock_hdr()
130 if (block->bb_u.l.bb_blkno != in __xfs_btree_check_lblock_hdr()
133 if (block->bb_u.l.bb_pad != cpu_to_be32(0)) in __xfs_btree_check_lblock_hdr()
137 if (be32_to_cpu(block->bb_magic) != xfs_btree_magic(mp, cur->bc_ops)) in __xfs_btree_check_lblock_hdr()
139 if (be16_to_cpu(block->bb_level) != level) in __xfs_btree_check_lblock_hdr()
141 if (be16_to_cpu(block->bb_numrecs) > in __xfs_btree_check_lblock_hdr()
142 cur->bc_ops->get_maxrecs(cur, level)) in __xfs_btree_check_lblock_hdr()
156 int level, in __xfs_btree_check_fsblock() argument
159 struct xfs_mount *mp = cur->bc_mp; in __xfs_btree_check_fsblock()
163 fa = __xfs_btree_check_lblock_hdr(cur, block, level, bp); in __xfs_btree_check_fsblock()
168 * For inode-rooted btrees, the root block sits in the inode fork. In in __xfs_btree_check_fsblock()
172 if (block->bb_u.l.bb_leftsib != cpu_to_be64(NULLFSBLOCK)) in __xfs_btree_check_fsblock()
174 if (block->bb_u.l.bb_rightsib != cpu_to_be64(NULLFSBLOCK)) in __xfs_btree_check_fsblock()
181 block->bb_u.l.bb_leftsib); in __xfs_btree_check_fsblock()
184 block->bb_u.l.bb_rightsib); in __xfs_btree_check_fsblock()
189 * Check an in-memory btree block header. Return the address of the failing
196 int level, in __xfs_btree_check_memblock() argument
199 struct xfs_buftarg *btp = cur->bc_mem.xfbtree->target; in __xfs_btree_check_memblock()
203 fa = __xfs_btree_check_lblock_hdr(cur, block, level, bp); in __xfs_btree_check_memblock()
209 block->bb_u.l.bb_leftsib); in __xfs_btree_check_memblock()
212 block->bb_u.l.bb_rightsib); in __xfs_btree_check_memblock()
224 int level, in __xfs_btree_check_agblock() argument
227 struct xfs_mount *mp = cur->bc_mp; in __xfs_btree_check_agblock()
228 struct xfs_perag *pag = cur->bc_ag.pag; in __xfs_btree_check_agblock()
233 if (!uuid_equal(&block->bb_u.s.bb_uuid, &mp->m_sb.sb_meta_uuid)) in __xfs_btree_check_agblock()
235 if (block->bb_u.s.bb_blkno != cpu_to_be64(xfs_buf_daddr(bp))) in __xfs_btree_check_agblock()
239 if (be32_to_cpu(block->bb_magic) != xfs_btree_magic(mp, cur->bc_ops)) in __xfs_btree_check_agblock()
241 if (be16_to_cpu(block->bb_level) != level) in __xfs_btree_check_agblock()
243 if (be16_to_cpu(block->bb_numrecs) > in __xfs_btree_check_agblock()
244 cur->bc_ops->get_maxrecs(cur, level)) in __xfs_btree_check_agblock()
249 block->bb_u.s.bb_leftsib); in __xfs_btree_check_agblock()
252 block->bb_u.s.bb_rightsib); in __xfs_btree_check_agblock()
265 int level, in __xfs_btree_check_block() argument
268 switch (cur->bc_ops->type) { in __xfs_btree_check_block()
270 return __xfs_btree_check_memblock(cur, block, level, bp); in __xfs_btree_check_block()
272 return __xfs_btree_check_agblock(cur, block, level, bp); in __xfs_btree_check_block()
274 return __xfs_btree_check_fsblock(cur, block, level, bp); in __xfs_btree_check_block()
283 if (cur->bc_ops->ptr_len == XFS_BTREE_SHORT_PTR_LEN) in xfs_btree_block_errtag()
295 int level, /* level of the btree block */ in xfs_btree_check_block() argument
298 struct xfs_mount *mp = cur->bc_mp; in xfs_btree_check_block()
301 fa = __xfs_btree_check_block(cur, block, level, bp); in xfs_btree_check_block()
307 return -EFSCORRUPTED; in xfs_btree_check_block()
317 int level) in __xfs_btree_check_ptr() argument
319 if (level <= 0) in __xfs_btree_check_ptr()
320 return -EFSCORRUPTED; in __xfs_btree_check_ptr()
322 switch (cur->bc_ops->type) { in __xfs_btree_check_ptr()
324 if (!xfbtree_verify_bno(cur->bc_mem.xfbtree, in __xfs_btree_check_ptr()
325 be64_to_cpu((&ptr->l)[index]))) in __xfs_btree_check_ptr()
326 return -EFSCORRUPTED; in __xfs_btree_check_ptr()
329 if (!xfs_verify_fsbno(cur->bc_mp, in __xfs_btree_check_ptr()
330 be64_to_cpu((&ptr->l)[index]))) in __xfs_btree_check_ptr()
331 return -EFSCORRUPTED; in __xfs_btree_check_ptr()
334 if (!xfs_verify_agbno(cur->bc_ag.pag, in __xfs_btree_check_ptr()
335 be32_to_cpu((&ptr->s)[index]))) in __xfs_btree_check_ptr()
336 return -EFSCORRUPTED; in __xfs_btree_check_ptr()
344 * Check that a given (indexed) btree pointer at a certain level of a
352 int level) in xfs_btree_check_ptr() argument
356 error = __xfs_btree_check_ptr(cur, ptr, index, level); in xfs_btree_check_ptr()
358 switch (cur->bc_ops->type) { in xfs_btree_check_ptr()
360 xfs_err(cur->bc_mp, in xfs_btree_check_ptr()
361 "In-memory: Corrupt %sbt flags 0x%x pointer at level %d index %d fa %pS.", in xfs_btree_check_ptr()
362 cur->bc_ops->name, cur->bc_flags, level, index, in xfs_btree_check_ptr()
366 xfs_err(cur->bc_mp, in xfs_btree_check_ptr()
367 "Inode %llu fork %d: Corrupt %sbt pointer at level %d index %d.", in xfs_btree_check_ptr()
368 cur->bc_ino.ip->i_ino, in xfs_btree_check_ptr()
369 cur->bc_ino.whichfork, cur->bc_ops->name, in xfs_btree_check_ptr()
370 level, index); in xfs_btree_check_ptr()
373 xfs_err(cur->bc_mp, in xfs_btree_check_ptr()
374 "AG %u: Corrupt %sbt pointer at level %d index %d.", in xfs_btree_check_ptr()
375 cur->bc_ag.pag->pag_agno, cur->bc_ops->name, in xfs_btree_check_ptr()
376 level, index); in xfs_btree_check_ptr()
393 * long-form btree header.
404 struct xfs_buf_log_item *bip = bp->b_log_item; in xfs_btree_fsblock_calc_crc()
406 if (!xfs_has_crc(bp->b_mount)) in xfs_btree_fsblock_calc_crc()
409 block->bb_u.l.bb_lsn = cpu_to_be64(bip->bli_item.li_lsn); in xfs_btree_fsblock_calc_crc()
418 struct xfs_mount *mp = bp->b_mount; in xfs_btree_fsblock_verify_crc()
421 if (!xfs_log_check_lsn(mp, be64_to_cpu(block->bb_u.l.bb_lsn))) in xfs_btree_fsblock_verify_crc()
431 * short-form btree header.
442 struct xfs_buf_log_item *bip = bp->b_log_item; in xfs_btree_agblock_calc_crc()
444 if (!xfs_has_crc(bp->b_mount)) in xfs_btree_agblock_calc_crc()
447 block->bb_u.s.bb_lsn = cpu_to_be64(bip->bli_item.li_lsn); in xfs_btree_agblock_calc_crc()
456 struct xfs_mount *mp = bp->b_mount; in xfs_btree_agblock_verify_crc()
459 if (!xfs_log_check_lsn(mp, be64_to_cpu(block->bb_u.s.bb_lsn))) in xfs_btree_agblock_verify_crc()
480 if (unlikely(cur->bc_flags & XFS_BTREE_STAGING)) { in xfs_btree_free_block()
482 return -EFSCORRUPTED; in xfs_btree_free_block()
485 error = cur->bc_ops->free_block(cur, bp); in xfs_btree_free_block()
487 xfs_trans_binval(cur->bc_tp, bp); in xfs_btree_free_block()
501 int i; /* btree level */ in xfs_btree_del_cursor()
507 * code works from level n down to 0, and if we get an error along the in xfs_btree_del_cursor()
510 for (i = 0; i < cur->bc_nlevels; i++) { in xfs_btree_del_cursor()
511 if (cur->bc_levels[i].bp) in xfs_btree_del_cursor()
512 xfs_trans_brelse(cur->bc_tp, cur->bc_levels[i].bp); in xfs_btree_del_cursor()
523 ASSERT(!xfs_btree_is_bmap(cur->bc_ops) || cur->bc_bmap.allocated == 0 || in xfs_btree_del_cursor()
524 xfs_is_shutdown(cur->bc_mp) || error != 0); in xfs_btree_del_cursor()
526 switch (cur->bc_ops->type) { in xfs_btree_del_cursor()
528 if (cur->bc_ag.pag) in xfs_btree_del_cursor()
529 xfs_perag_put(cur->bc_ag.pag); in xfs_btree_del_cursor()
535 if (cur->bc_mem.pag) in xfs_btree_del_cursor()
536 xfs_perag_put(cur->bc_mem.pag); in xfs_btree_del_cursor()
540 kmem_cache_free(cur->bc_cache, cur); in xfs_btree_del_cursor()
548 if (cur->bc_ops->type == XFS_BTREE_TYPE_MEM) in xfs_btree_buftarg()
549 return cur->bc_mem.xfbtree->target; in xfs_btree_buftarg()
550 return cur->bc_mp->m_ddev_targp; in xfs_btree_buftarg()
558 if (cur->bc_ops->type == XFS_BTREE_TYPE_MEM) in xfs_btree_bbsize()
560 return cur->bc_mp->m_bsize; in xfs_btree_bbsize()
565 * Allocate a new one, copy the record, re-get the buffers.
572 struct xfs_mount *mp = cur->bc_mp; in xfs_btree_dup_cursor()
573 struct xfs_trans *tp = cur->bc_tp; in xfs_btree_dup_cursor()
583 if (unlikely(cur->bc_flags & XFS_BTREE_STAGING)) { in xfs_btree_dup_cursor()
585 return -EFSCORRUPTED; in xfs_btree_dup_cursor()
591 new = cur->bc_ops->dup_cursor(cur); in xfs_btree_dup_cursor()
596 new->bc_rec = cur->bc_rec; in xfs_btree_dup_cursor()
599 * For each level current, re-get the buffer and copy the ptr value. in xfs_btree_dup_cursor()
601 for (i = 0; i < new->bc_nlevels; i++) { in xfs_btree_dup_cursor()
602 new->bc_levels[i].ptr = cur->bc_levels[i].ptr; in xfs_btree_dup_cursor()
603 new->bc_levels[i].ra = cur->bc_levels[i].ra; in xfs_btree_dup_cursor()
604 bp = cur->bc_levels[i].bp; in xfs_btree_dup_cursor()
610 cur->bc_ops->buf_ops); in xfs_btree_dup_cursor()
619 new->bc_levels[i].bp = bp; in xfs_btree_dup_cursor()
628 * There are two types of blocks in the btree: leaf and non-leaf blocks.
631 * the values. A non-leaf block also starts with the same header, and
633 * to the btree blocks at the previous level.
635 * +--------+-------+-------+-------+-------+-------+-------+
637 * +--------+-------+-------+-------+-------+-------+-------+
639 * +--------+-------+-------+-------+-------+-------+-------+
640 * Non-Leaf: | header | key 1 | key 2 | key N | ptr 1 | ptr 2 | ptr N |
641 * +--------+-------+-------+-------+-------+-------+-------+
657 * However, nodes are different: each pointer has two associated keys -- one
662 * that matches any record in the leaf and to recursively update the high keys
666 * +--------+-----+-----+-----+-----+-----+-------+-------+-----+
667 * Non-Leaf: | header | lo1 | hi1 | lo2 | hi2 | ... | ptr 1 | ptr 2 | ... |
668 * +--------+-----+-----+-----+-----+-----+-------+-------+-----+
671 * depth-first search and use the low and high keys to decide if we can skip
674 * entries. For a non-overlapped tree, simply search for the record associated
675 * with the lowest key and iterate forward until a non-matching record is
683 * 1: +- file A startblock B offset C length D -----------+
684 * 2: +- file E startblock F offset G length H --------------+
685 * 3: +- file I startblock F offset J length K --+
686 * 4: +- file L... --+
690 * that ends at (B+D-1) (i.e. record 1)? A LE lookup of (B+D-1) would return
692 * query would return records 1 and 2 because they both overlap (B+D-1), and
695 * In the non-overlapped case you can do a LE lookup and decrement the cursor
704 if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) { in xfs_btree_block_len()
705 if (xfs_has_crc(cur->bc_mp)) in xfs_btree_block_len()
709 if (xfs_has_crc(cur->bc_mp)) in xfs_btree_block_len()
715 * Calculate offset of the n-th record in a btree block.
723 (n - 1) * cur->bc_ops->rec_len; in xfs_btree_rec_offset()
727 * Calculate offset of the n-th key in a btree block.
735 (n - 1) * cur->bc_ops->key_len; in xfs_btree_key_offset()
739 * Calculate offset of the n-th high key in a btree block.
747 (n - 1) * cur->bc_ops->key_len + (cur->bc_ops->key_len / 2); in xfs_btree_high_key_offset()
751 * Calculate offset of the n-th block pointer in a btree block.
757 int level) in xfs_btree_ptr_offset() argument
760 cur->bc_ops->get_maxrecs(cur, level) * cur->bc_ops->key_len + in xfs_btree_ptr_offset()
761 (n - 1) * cur->bc_ops->ptr_len; in xfs_btree_ptr_offset()
765 * Return a pointer to the n-th record in the btree block.
778 * Return a pointer to the n-th key in the btree block.
791 * Return a pointer to the n-th high key in the btree block.
804 * Return a pointer to the n-th block pointer in the btree block.
812 int level = xfs_btree_get_level(block); in xfs_btree_ptr_addr() local
814 ASSERT(block->bb_level != 0); in xfs_btree_ptr_addr()
817 ((char *)block + xfs_btree_ptr_offset(cur, n, level)); in xfs_btree_ptr_addr()
824 ASSERT(cur->bc_ops->type == XFS_BTREE_TYPE_INODE); in xfs_btree_ifork_ptr()
826 if (cur->bc_flags & XFS_BTREE_STAGING) in xfs_btree_ifork_ptr()
827 return cur->bc_ino.ifake->if_fork; in xfs_btree_ifork_ptr()
828 return xfs_ifork_ptr(cur->bc_ino.ip, cur->bc_ino.whichfork); in xfs_btree_ifork_ptr()
843 return (struct xfs_btree_block *)ifp->if_broot; in xfs_btree_get_iroot()
847 * Retrieve the block pointer from the cursor at the given level.
853 int level, /* level in btree */ in xfs_btree_get_block() argument
856 if (xfs_btree_at_iroot(cur, level)) { in xfs_btree_get_block()
861 *bpp = cur->bc_levels[level].bp; in xfs_btree_get_block()
866 * Change the cursor to point to the first record at the given level.
872 int level) /* level to change */ in xfs_btree_firstrec() argument
878 * Get the block pointer for this level. in xfs_btree_firstrec()
880 block = xfs_btree_get_block(cur, level, &bp); in xfs_btree_firstrec()
881 if (xfs_btree_check_block(cur, block, level, bp)) in xfs_btree_firstrec()
886 if (!block->bb_numrecs) in xfs_btree_firstrec()
891 cur->bc_levels[level].ptr = 1; in xfs_btree_firstrec()
897 * at the given level. Other levels are unaffected.
902 int level) /* level to change */ in xfs_btree_lastrec() argument
908 * Get the block pointer for this level. in xfs_btree_lastrec()
910 block = xfs_btree_get_block(cur, level, &bp); in xfs_btree_lastrec()
911 if (xfs_btree_check_block(cur, block, level, bp)) in xfs_btree_lastrec()
916 if (!block->bb_numrecs) in xfs_btree_lastrec()
921 cur->bc_levels[level].ptr = be16_to_cpu(block->bb_numrecs); in xfs_btree_lastrec()
953 for (i = nbits - 1, imask = 1u << i; ; i--, imask >>= 1) { in xfs_btree_offsets()
955 *last = offsets[i + 1] - 1; in xfs_btree_offsets()
967 struct xfs_mount *mp = cur->bc_mp; in xfs_btree_readahead_fsblock()
968 xfs_fsblock_t left = be64_to_cpu(block->bb_u.l.bb_leftsib); in xfs_btree_readahead_fsblock()
969 xfs_fsblock_t right = be64_to_cpu(block->bb_u.l.bb_rightsib); in xfs_btree_readahead_fsblock()
973 xfs_buf_readahead(mp->m_ddev_targp, XFS_FSB_TO_DADDR(mp, left), in xfs_btree_readahead_fsblock()
974 mp->m_bsize, cur->bc_ops->buf_ops); in xfs_btree_readahead_fsblock()
979 xfs_buf_readahead(mp->m_ddev_targp, XFS_FSB_TO_DADDR(mp, right), in xfs_btree_readahead_fsblock()
980 mp->m_bsize, cur->bc_ops->buf_ops); in xfs_btree_readahead_fsblock()
993 struct xfs_buftarg *btp = cur->bc_mem.xfbtree->target; in xfs_btree_readahead_memblock()
994 xfbno_t left = be64_to_cpu(block->bb_u.l.bb_leftsib); in xfs_btree_readahead_memblock()
995 xfbno_t right = be64_to_cpu(block->bb_u.l.bb_rightsib); in xfs_btree_readahead_memblock()
1000 cur->bc_ops->buf_ops); in xfs_btree_readahead_memblock()
1006 cur->bc_ops->buf_ops); in xfs_btree_readahead_memblock()
1019 struct xfs_mount *mp = cur->bc_mp; in xfs_btree_readahead_agblock()
1020 xfs_agnumber_t agno = cur->bc_ag.pag->pag_agno; in xfs_btree_readahead_agblock()
1021 xfs_agblock_t left = be32_to_cpu(block->bb_u.s.bb_leftsib); in xfs_btree_readahead_agblock()
1022 xfs_agblock_t right = be32_to_cpu(block->bb_u.s.bb_rightsib); in xfs_btree_readahead_agblock()
1026 xfs_buf_readahead(mp->m_ddev_targp, in xfs_btree_readahead_agblock()
1028 mp->m_bsize, cur->bc_ops->buf_ops); in xfs_btree_readahead_agblock()
1033 xfs_buf_readahead(mp->m_ddev_targp, in xfs_btree_readahead_agblock()
1035 mp->m_bsize, cur->bc_ops->buf_ops); in xfs_btree_readahead_agblock()
1043 * Read-ahead btree blocks, at the given level.
1049 int lev, /* level in btree */ in xfs_btree_readahead()
1055 * No readahead needed if we are at the root level and the in xfs_btree_readahead()
1061 if ((cur->bc_levels[lev].ra | lr) == cur->bc_levels[lev].ra) in xfs_btree_readahead()
1064 cur->bc_levels[lev].ra |= lr; in xfs_btree_readahead()
1065 block = XFS_BUF_TO_BLOCK(cur->bc_levels[lev].bp); in xfs_btree_readahead()
1067 switch (cur->bc_ops->type) { in xfs_btree_readahead()
1092 switch (cur->bc_ops->type) { in xfs_btree_ptr_to_daddr()
1094 *daddr = XFS_AGB_TO_DADDR(cur->bc_mp, cur->bc_ag.pag->pag_agno, in xfs_btree_ptr_to_daddr()
1095 be32_to_cpu(ptr->s)); in xfs_btree_ptr_to_daddr()
1098 *daddr = XFS_FSB_TO_DADDR(cur->bc_mp, be64_to_cpu(ptr->l)); in xfs_btree_ptr_to_daddr()
1101 *daddr = xfbno_to_daddr(be64_to_cpu(ptr->l)); in xfs_btree_ptr_to_daddr()
1125 cur->bc_ops->buf_ops); in xfs_btree_readahead_ptr()
1129 * Set the buffer for level "lev" in the cursor to bp, releasing
1135 int lev, /* level in btree */ in xfs_btree_setbuf()
1140 if (cur->bc_levels[lev].bp) in xfs_btree_setbuf()
1141 xfs_trans_brelse(cur->bc_tp, cur->bc_levels[lev].bp); in xfs_btree_setbuf()
1142 cur->bc_levels[lev].bp = bp; in xfs_btree_setbuf()
1143 cur->bc_levels[lev].ra = 0; in xfs_btree_setbuf()
1146 if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) { in xfs_btree_setbuf()
1147 if (b->bb_u.l.bb_leftsib == cpu_to_be64(NULLFSBLOCK)) in xfs_btree_setbuf()
1148 cur->bc_levels[lev].ra |= XFS_BTCUR_LEFTRA; in xfs_btree_setbuf()
1149 if (b->bb_u.l.bb_rightsib == cpu_to_be64(NULLFSBLOCK)) in xfs_btree_setbuf()
1150 cur->bc_levels[lev].ra |= XFS_BTCUR_RIGHTRA; in xfs_btree_setbuf()
1152 if (b->bb_u.s.bb_leftsib == cpu_to_be32(NULLAGBLOCK)) in xfs_btree_setbuf()
1153 cur->bc_levels[lev].ra |= XFS_BTCUR_LEFTRA; in xfs_btree_setbuf()
1154 if (b->bb_u.s.bb_rightsib == cpu_to_be32(NULLAGBLOCK)) in xfs_btree_setbuf()
1155 cur->bc_levels[lev].ra |= XFS_BTCUR_RIGHTRA; in xfs_btree_setbuf()
1164 if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) in xfs_btree_ptr_is_null()
1165 return ptr->l == cpu_to_be64(NULLFSBLOCK); in xfs_btree_ptr_is_null()
1167 return ptr->s == cpu_to_be32(NULLAGBLOCK); in xfs_btree_ptr_is_null()
1175 if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) in xfs_btree_set_ptr_null()
1176 ptr->l = cpu_to_be64(NULLFSBLOCK); in xfs_btree_set_ptr_null()
1178 ptr->s = cpu_to_be32(NULLAGBLOCK); in xfs_btree_set_ptr_null()
1187 if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) in xfs_btree_ptrs_equal()
1188 return ptr1->l == ptr2->l; in xfs_btree_ptrs_equal()
1189 return ptr1->s == ptr2->s; in xfs_btree_ptrs_equal()
1204 if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) { in xfs_btree_get_sibling()
1206 ptr->l = block->bb_u.l.bb_rightsib; in xfs_btree_get_sibling()
1208 ptr->l = block->bb_u.l.bb_leftsib; in xfs_btree_get_sibling()
1211 ptr->s = block->bb_u.s.bb_rightsib; in xfs_btree_get_sibling()
1213 ptr->s = block->bb_u.s.bb_leftsib; in xfs_btree_get_sibling()
1226 if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) { in xfs_btree_set_sibling()
1228 block->bb_u.l.bb_rightsib = ptr->l; in xfs_btree_set_sibling()
1230 block->bb_u.l.bb_leftsib = ptr->l; in xfs_btree_set_sibling()
1233 block->bb_u.s.bb_rightsib = ptr->s; in xfs_btree_set_sibling()
1235 block->bb_u.s.bb_leftsib = ptr->s; in xfs_btree_set_sibling()
1245 __u16 level, in __xfs_btree_init_block() argument
1252 buf->bb_magic = cpu_to_be32(magic); in __xfs_btree_init_block()
1253 buf->bb_level = cpu_to_be16(level); in __xfs_btree_init_block()
1254 buf->bb_numrecs = cpu_to_be16(numrecs); in __xfs_btree_init_block()
1256 if (ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) { in __xfs_btree_init_block()
1257 buf->bb_u.l.bb_leftsib = cpu_to_be64(NULLFSBLOCK); in __xfs_btree_init_block()
1258 buf->bb_u.l.bb_rightsib = cpu_to_be64(NULLFSBLOCK); in __xfs_btree_init_block()
1260 buf->bb_u.l.bb_blkno = cpu_to_be64(blkno); in __xfs_btree_init_block()
1261 buf->bb_u.l.bb_owner = cpu_to_be64(owner); in __xfs_btree_init_block()
1262 uuid_copy(&buf->bb_u.l.bb_uuid, &mp->m_sb.sb_meta_uuid); in __xfs_btree_init_block()
1263 buf->bb_u.l.bb_pad = 0; in __xfs_btree_init_block()
1264 buf->bb_u.l.bb_lsn = 0; in __xfs_btree_init_block()
1267 buf->bb_u.s.bb_leftsib = cpu_to_be32(NULLAGBLOCK); in __xfs_btree_init_block()
1268 buf->bb_u.s.bb_rightsib = cpu_to_be32(NULLAGBLOCK); in __xfs_btree_init_block()
1270 buf->bb_u.s.bb_blkno = cpu_to_be64(blkno); in __xfs_btree_init_block()
1272 buf->bb_u.s.bb_owner = cpu_to_be32((__u32)owner); in __xfs_btree_init_block()
1273 uuid_copy(&buf->bb_u.s.bb_uuid, &mp->m_sb.sb_meta_uuid); in __xfs_btree_init_block()
1274 buf->bb_u.s.bb_lsn = 0; in __xfs_btree_init_block()
1284 __u16 level, in xfs_btree_init_block() argument
1288 __xfs_btree_init_block(mp, block, ops, XFS_BUF_DADDR_NULL, level, in xfs_btree_init_block()
1297 __u16 level, in xfs_btree_init_buf() argument
1302 xfs_buf_daddr(bp), level, numrecs, owner); in xfs_btree_init_buf()
1303 bp->b_ops = ops->buf_ops; in xfs_btree_init_buf()
1310 switch (cur->bc_ops->type) { in xfs_btree_owner()
1312 return cur->bc_mem.xfbtree->owner; in xfs_btree_owner()
1314 return cur->bc_ino.ip->i_ino; in xfs_btree_owner()
1316 return cur->bc_ag.pag->pag_agno; in xfs_btree_owner()
1327 int level, in xfs_btree_init_block_cur() argument
1330 xfs_btree_init_buf(cur->bc_mp, bp, cur->bc_ops, level, numrecs, in xfs_btree_init_block_cur()
1340 switch (cur->bc_ops->type) { in xfs_btree_buf_to_ptr()
1342 ptr->s = cpu_to_be32(xfs_daddr_to_agbno(cur->bc_mp, in xfs_btree_buf_to_ptr()
1346 ptr->l = cpu_to_be64(XFS_DADDR_TO_FSB(cur->bc_mp, in xfs_btree_buf_to_ptr()
1350 ptr->l = cpu_to_be64(xfs_daddr_to_xfbno(xfs_buf_daddr(bp))); in xfs_btree_buf_to_ptr()
1360 xfs_buf_set_ref(bp, cur->bc_ops->lru_refs); in xfs_btree_set_refs()
1376 error = xfs_trans_get_buf(cur->bc_tp, xfs_btree_buftarg(cur), d, in xfs_btree_get_buf_block()
1381 (*bpp)->b_ops = cur->bc_ops->buf_ops; in xfs_btree_get_buf_block()
1398 struct xfs_mount *mp = cur->bc_mp; in xfs_btree_read_buf_block()
1408 error = xfs_trans_read_buf(mp, cur->bc_tp, xfs_btree_buftarg(cur), d, in xfs_btree_read_buf_block()
1410 cur->bc_ops->buf_ops); in xfs_btree_read_buf_block()
1432 memcpy(dst_key, src_key, numkeys * cur->bc_ops->key_len); in xfs_btree_copy_keys()
1446 memcpy(dst_rec, src_rec, numrecs * cur->bc_ops->rec_len); in xfs_btree_copy_recs()
1460 memcpy(dst_ptr, src_ptr, numptrs * cur->bc_ops->ptr_len); in xfs_btree_copy_ptrs()
1476 ASSERT(dir == 1 || dir == -1); in xfs_btree_shift_keys()
1478 dst_key = (char *)key + (dir * cur->bc_ops->key_len); in xfs_btree_shift_keys()
1479 memmove(dst_key, key, numkeys * cur->bc_ops->key_len); in xfs_btree_shift_keys()
1495 ASSERT(dir == 1 || dir == -1); in xfs_btree_shift_recs()
1497 dst_rec = (char *)rec + (dir * cur->bc_ops->rec_len); in xfs_btree_shift_recs()
1498 memmove(dst_rec, rec, numrecs * cur->bc_ops->rec_len); in xfs_btree_shift_recs()
1514 ASSERT(dir == 1 || dir == -1); in xfs_btree_shift_ptrs()
1516 dst_ptr = (char *)ptr + (dir * cur->bc_ops->ptr_len); in xfs_btree_shift_ptrs()
1517 memmove(dst_ptr, ptr, numptrs * cur->bc_ops->ptr_len); in xfs_btree_shift_ptrs()
1532 xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF); in xfs_btree_log_keys()
1533 xfs_trans_log_buf(cur->bc_tp, bp, in xfs_btree_log_keys()
1535 xfs_btree_key_offset(cur, last + 1) - 1); in xfs_btree_log_keys()
1537 xfs_trans_log_inode(cur->bc_tp, cur->bc_ino.ip, in xfs_btree_log_keys()
1538 xfs_ilog_fbroot(cur->bc_ino.whichfork)); in xfs_btree_log_keys()
1553 xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF); in xfs_btree_log_recs()
1554 xfs_trans_log_buf(cur->bc_tp, bp, in xfs_btree_log_recs()
1556 xfs_btree_rec_offset(cur, last + 1) - 1); in xfs_btree_log_recs()
1573 int level = xfs_btree_get_level(block); in xfs_btree_log_ptrs() local
1575 xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF); in xfs_btree_log_ptrs()
1576 xfs_trans_log_buf(cur->bc_tp, bp, in xfs_btree_log_ptrs()
1577 xfs_btree_ptr_offset(cur, first, level), in xfs_btree_log_ptrs()
1578 xfs_btree_ptr_offset(cur, last + 1, level) - 1); in xfs_btree_log_ptrs()
1580 xfs_trans_log_inode(cur->bc_tp, cur->bc_ino.ip, in xfs_btree_log_ptrs()
1581 xfs_ilog_fbroot(cur->bc_ino.whichfork)); in xfs_btree_log_ptrs()
1628 if (xfs_has_crc(cur->bc_mp)) { in xfs_btree_log_block()
1643 (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) ? in xfs_btree_log_block()
1646 xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF); in xfs_btree_log_block()
1647 xfs_trans_log_buf(cur->bc_tp, bp, first, last); in xfs_btree_log_block()
1649 xfs_trans_log_inode(cur->bc_tp, cur->bc_ino.ip, in xfs_btree_log_block()
1650 xfs_ilog_fbroot(cur->bc_ino.whichfork)); in xfs_btree_log_block()
1655 * Increment cursor by one record at the level.
1656 * For nonzero levels the leaf-ward information is untouched.
1661 int level, in xfs_btree_increment() argument
1670 ASSERT(level < cur->bc_nlevels); in xfs_btree_increment()
1672 /* Read-ahead to the right at this level. */ in xfs_btree_increment()
1673 xfs_btree_readahead(cur, level, XFS_BTCUR_RIGHTRA); in xfs_btree_increment()
1676 block = xfs_btree_get_block(cur, level, &bp); in xfs_btree_increment()
1679 error = xfs_btree_check_block(cur, block, level, bp); in xfs_btree_increment()
1685 if (++cur->bc_levels[level].ptr <= xfs_btree_get_numrecs(block)) in xfs_btree_increment()
1699 for (lev = level + 1; lev < cur->bc_nlevels; lev++) { in xfs_btree_increment()
1708 if (++cur->bc_levels[lev].ptr <= xfs_btree_get_numrecs(block)) in xfs_btree_increment()
1711 /* Read-ahead the right block for the next loop. */ in xfs_btree_increment()
1719 if (lev == cur->bc_nlevels) { in xfs_btree_increment()
1720 if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE) in xfs_btree_increment()
1724 error = -EFSCORRUPTED; in xfs_btree_increment()
1727 ASSERT(lev < cur->bc_nlevels); in xfs_btree_increment()
1733 for (block = xfs_btree_get_block(cur, lev, &bp); lev > level; ) { in xfs_btree_increment()
1736 ptrp = xfs_btree_ptr_addr(cur, cur->bc_levels[lev].ptr, block); in xfs_btree_increment()
1737 --lev; in xfs_btree_increment()
1743 cur->bc_levels[lev].ptr = 1; in xfs_btree_increment()
1758 * Decrement cursor by one record at the level.
1759 * For nonzero levels the leaf-ward information is untouched.
1764 int level, in xfs_btree_decrement() argument
1773 ASSERT(level < cur->bc_nlevels); in xfs_btree_decrement()
1775 /* Read-ahead to the left at this level. */ in xfs_btree_decrement()
1776 xfs_btree_readahead(cur, level, XFS_BTCUR_LEFTRA); in xfs_btree_decrement()
1779 if (--cur->bc_levels[level].ptr > 0) in xfs_btree_decrement()
1783 block = xfs_btree_get_block(cur, level, &bp); in xfs_btree_decrement()
1786 error = xfs_btree_check_block(cur, block, level, bp); in xfs_btree_decrement()
1802 for (lev = level + 1; lev < cur->bc_nlevels; lev++) { in xfs_btree_decrement()
1803 if (--cur->bc_levels[lev].ptr > 0) in xfs_btree_decrement()
1805 /* Read-ahead the left block for the next loop. */ in xfs_btree_decrement()
1813 if (lev == cur->bc_nlevels) { in xfs_btree_decrement()
1814 if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE) in xfs_btree_decrement()
1818 error = -EFSCORRUPTED; in xfs_btree_decrement()
1821 ASSERT(lev < cur->bc_nlevels); in xfs_btree_decrement()
1827 for (block = xfs_btree_get_block(cur, lev, &bp); lev > level; ) { in xfs_btree_decrement()
1830 ptrp = xfs_btree_ptr_addr(cur, cur->bc_levels[lev].ptr, block); in xfs_btree_decrement()
1831 --lev; in xfs_btree_decrement()
1836 cur->bc_levels[lev].ptr = xfs_btree_get_numrecs(block); in xfs_btree_decrement()
1861 if (!xfs_has_crc(cur->bc_mp) || in xfs_btree_check_block_owner()
1862 (cur->bc_flags & XFS_BTREE_BMBT_INVALID_OWNER)) in xfs_btree_check_block_owner()
1866 if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) { in xfs_btree_check_block_owner()
1867 if (be64_to_cpu(block->bb_u.l.bb_owner) != owner) in xfs_btree_check_block_owner()
1870 if (be32_to_cpu(block->bb_u.s.bb_owner) != owner) in xfs_btree_check_block_owner()
1880 int level, /* level in the btree */ in xfs_btree_lookup_get_block() argument
1889 if (xfs_btree_at_iroot(cur, level)) { in xfs_btree_lookup_get_block()
1895 * If the old buffer at this level for the disk address we are in xfs_btree_lookup_get_block()
1896 * looking for re-use it. in xfs_btree_lookup_get_block()
1900 bp = cur->bc_levels[level].bp; in xfs_btree_lookup_get_block()
1917 /* Did we get the level we were looking for? */ in xfs_btree_lookup_get_block()
1918 if (be16_to_cpu((*blkp)->bb_level) != level) in xfs_btree_lookup_get_block()
1922 if (level != 0 && be16_to_cpu((*blkp)->bb_numrecs) == 0) in xfs_btree_lookup_get_block()
1925 xfs_btree_setbuf(cur, level, bp); in xfs_btree_lookup_get_block()
1931 xfs_trans_brelse(cur->bc_tp, bp); in xfs_btree_lookup_get_block()
1933 return -EFSCORRUPTED; in xfs_btree_lookup_get_block()
1937 * Get current search key. For level 0 we don't actually have a key
1944 int level, in xfs_lookup_get_search_key() argument
1949 if (level == 0) { in xfs_lookup_get_search_key()
1950 cur->bc_ops->init_key_from_rec(kp, in xfs_lookup_get_search_key()
1966 if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE) { in xfs_btree_init_ptr_from_cur()
1968 * Inode-rooted btrees call xfs_btree_get_iroot to find the root in xfs_btree_init_ptr_from_cur()
1971 ptr->l = 0; in xfs_btree_init_ptr_from_cur()
1972 } else if (cur->bc_flags & XFS_BTREE_STAGING) { in xfs_btree_init_ptr_from_cur()
1973 ptr->s = cpu_to_be32(cur->bc_ag.afake->af_root); in xfs_btree_init_ptr_from_cur()
1975 cur->bc_ops->init_ptr_from_cur(cur, ptr); in xfs_btree_init_ptr_from_cur()
1993 int level; /* level in the btree */ in xfs_btree_lookup() local
1999 /* No such thing as a zero-level tree. */ in xfs_btree_lookup()
2000 if (XFS_IS_CORRUPT(cur->bc_mp, cur->bc_nlevels == 0)) { in xfs_btree_lookup()
2002 return -EFSCORRUPTED; in xfs_btree_lookup()
2013 * Iterate over each level in the btree, starting at the root. in xfs_btree_lookup()
2014 * For each level above the leaves, find the key we need, based in xfs_btree_lookup()
2016 * pointer down to the next level. in xfs_btree_lookup()
2018 for (level = cur->bc_nlevels - 1, diff = 1; level >= 0; level--) { in xfs_btree_lookup()
2020 error = xfs_btree_lookup_get_block(cur, level, pp, &block); in xfs_btree_lookup()
2026 * If we already had a key match at a higher level, we in xfs_btree_lookup()
2033 int high; /* high entry number */ in xfs_btree_lookup() local
2036 /* Set low and high entry numbers, 1-based. */ in xfs_btree_lookup()
2038 high = xfs_btree_get_numrecs(block); in xfs_btree_lookup()
2039 if (!high) { in xfs_btree_lookup()
2041 if (level != 0 || cur->bc_nlevels != 1) { in xfs_btree_lookup()
2044 cur->bc_mp, block, in xfs_btree_lookup()
2047 return -EFSCORRUPTED; in xfs_btree_lookup()
2050 cur->bc_levels[0].ptr = dir != XFS_LOOKUP_LE; in xfs_btree_lookup()
2056 while (low <= high) { in xfs_btree_lookup()
2062 /* keyno is average of low and high. */ in xfs_btree_lookup()
2063 keyno = (low + high) >> 1; in xfs_btree_lookup()
2066 kp = xfs_lookup_get_search_key(cur, level, in xfs_btree_lookup()
2071 * - less than, move right in xfs_btree_lookup()
2072 * - greater than, move left in xfs_btree_lookup()
2073 * - equal, we're done in xfs_btree_lookup()
2075 diff = cur->bc_ops->key_diff(cur, kp); in xfs_btree_lookup()
2079 high = keyno - 1; in xfs_btree_lookup()
2086 * If there are more levels, set up for the next level in xfs_btree_lookup()
2089 if (level > 0) { in xfs_btree_lookup()
2094 if (diff > 0 && --keyno < 1) in xfs_btree_lookup()
2098 error = xfs_btree_debug_check_ptr(cur, pp, 0, level); in xfs_btree_lookup()
2102 cur->bc_levels[level].ptr = keyno; in xfs_btree_lookup()
2119 cur->bc_levels[0].ptr = keyno; in xfs_btree_lookup()
2123 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) { in xfs_btree_lookup()
2125 return -EFSCORRUPTED; in xfs_btree_lookup()
2131 keyno--; in xfs_btree_lookup()
2132 cur->bc_levels[0].ptr = keyno; in xfs_btree_lookup()
2147 /* Find the high key storage area from a regular key. */
2153 ASSERT(cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING); in xfs_btree_high_key_from_key()
2155 (cur->bc_ops->key_len / 2)); in xfs_btree_high_key_from_key()
2158 /* Determine the low (and high if overlapped) keys of a leaf block */
2168 union xfs_btree_key *high; in xfs_btree_get_leaf_keys() local
2172 cur->bc_ops->init_key_from_rec(key, rec); in xfs_btree_get_leaf_keys()
2174 if (cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING) { in xfs_btree_get_leaf_keys()
2176 cur->bc_ops->init_high_key_from_rec(&max_hkey, rec); in xfs_btree_get_leaf_keys()
2179 cur->bc_ops->init_high_key_from_rec(&hkey, rec); in xfs_btree_get_leaf_keys()
2184 high = xfs_btree_high_key_from_key(cur, key); in xfs_btree_get_leaf_keys()
2185 memcpy(high, &max_hkey, cur->bc_ops->key_len / 2); in xfs_btree_get_leaf_keys()
2189 /* Determine the low (and high if overlapped) keys of a node block */
2198 union xfs_btree_key *high; in xfs_btree_get_node_keys() local
2201 if (cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING) { in xfs_btree_get_node_keys()
2203 cur->bc_ops->key_len / 2); in xfs_btree_get_node_keys()
2212 high = xfs_btree_high_key_from_key(cur, key); in xfs_btree_get_node_keys()
2213 memcpy(high, max_hkey, cur->bc_ops->key_len / 2); in xfs_btree_get_node_keys()
2216 cur->bc_ops->key_len); in xfs_btree_get_node_keys()
2227 if (be16_to_cpu(block->bb_level) == 0) in xfs_btree_get_keys()
2245 return (cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING) || ptr == 1; in xfs_btree_needs_key_update()
2249 * Update the low and high parent keys of the given level, progressing
2251 * level do not need updating.
2256 int level, in __xfs_btree_updkeys() argument
2261 union xfs_btree_key key; /* keys from current level */ in __xfs_btree_updkeys()
2262 union xfs_btree_key *lkey; /* keys from the next level up */ in __xfs_btree_updkeys()
2264 union xfs_btree_key *nlkey; /* keys from the next level up */ in __xfs_btree_updkeys()
2269 ASSERT(cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING); in __xfs_btree_updkeys()
2272 if (level + 1 >= cur->bc_nlevels) in __xfs_btree_updkeys()
2275 trace_xfs_btree_updkeys(cur, level, bp0); in __xfs_btree_updkeys()
2280 for (level++; level < cur->bc_nlevels; level++) { in __xfs_btree_updkeys()
2284 block = xfs_btree_get_block(cur, level, &bp); in __xfs_btree_updkeys()
2285 trace_xfs_btree_updkeys(cur, level, bp); in __xfs_btree_updkeys()
2287 error = xfs_btree_check_block(cur, block, level, bp); in __xfs_btree_updkeys()
2291 ptr = cur->bc_levels[level].ptr; in __xfs_btree_updkeys()
2300 if (level + 1 >= cur->bc_nlevels) in __xfs_btree_updkeys()
2308 /* Update all the keys from some level in cursor back to the root. */
2312 int level) in xfs_btree_updkeys_force() argument
2317 block = xfs_btree_get_block(cur, level, &bp); in xfs_btree_updkeys_force()
2318 return __xfs_btree_updkeys(cur, level, block, bp, true); in xfs_btree_updkeys_force()
2322 * Update the parent keys of the given level, progressing towards the root.
2327 int level) in xfs_btree_update_keys() argument
2335 ASSERT(level >= 0); in xfs_btree_update_keys()
2337 block = xfs_btree_get_block(cur, level, &bp); in xfs_btree_update_keys()
2338 if (cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING) in xfs_btree_update_keys()
2339 return __xfs_btree_updkeys(cur, level, block, bp, false); in xfs_btree_update_keys()
2342 * Go up the tree from this level toward the root. in xfs_btree_update_keys()
2343 * At each level, update the key value to the value input. in xfs_btree_update_keys()
2344 * Stop when we reach a level where the cursor isn't pointing in xfs_btree_update_keys()
2348 for (level++, ptr = 1; ptr == 1 && level < cur->bc_nlevels; level++) { in xfs_btree_update_keys()
2352 block = xfs_btree_get_block(cur, level, &bp); in xfs_btree_update_keys()
2354 error = xfs_btree_check_block(cur, block, level, bp); in xfs_btree_update_keys()
2358 ptr = cur->bc_levels[level].ptr; in xfs_btree_update_keys()
2392 ptr = cur->bc_levels[0].ptr; in xfs_btree_update()
2413 * Move 1 record left from cur/level if possible.
2419 int level, in xfs_btree_lshift() argument
2436 if (xfs_btree_at_iroot(cur, level)) in xfs_btree_lshift()
2440 right = xfs_btree_get_block(cur, level, &rbp); in xfs_btree_lshift()
2443 error = xfs_btree_check_block(cur, right, level, rbp); in xfs_btree_lshift()
2457 if (cur->bc_levels[level].ptr <= 1) in xfs_btree_lshift()
2467 if (lrecs == cur->bc_ops->get_maxrecs(cur, level)) in xfs_btree_lshift()
2478 rrecs--; in xfs_btree_lshift()
2484 * If non-leaf, copy a key and a ptr to the left block. in xfs_btree_lshift()
2487 if (level > 0) { in xfs_btree_lshift()
2488 /* It's a non-leaf. Move keys and pointers. */ in xfs_btree_lshift()
2498 error = xfs_btree_debug_check_ptr(cur, rpp, 0, level); in xfs_btree_lshift()
2508 ASSERT(cur->bc_ops->keys_inorder(cur, in xfs_btree_lshift()
2509 xfs_btree_key_addr(cur, lrecs - 1, left), lkp)); in xfs_btree_lshift()
2520 ASSERT(cur->bc_ops->recs_inorder(cur, in xfs_btree_lshift()
2521 xfs_btree_rec_addr(cur, lrecs - 1, left), lrp)); in xfs_btree_lshift()
2533 XFS_BTREE_STATS_ADD(cur, moves, rrecs - 1); in xfs_btree_lshift()
2534 if (level > 0) { in xfs_btree_lshift()
2537 error = xfs_btree_debug_check_ptr(cur, rpp, i + 1, level); in xfs_btree_lshift()
2544 -1, rrecs); in xfs_btree_lshift()
2547 -1, rrecs); in xfs_btree_lshift()
2555 -1, rrecs); in xfs_btree_lshift()
2563 if (cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING) { in xfs_btree_lshift()
2567 i = xfs_btree_firstrec(tcur, level); in xfs_btree_lshift()
2568 if (XFS_IS_CORRUPT(tcur->bc_mp, i != 1)) { in xfs_btree_lshift()
2570 error = -EFSCORRUPTED; in xfs_btree_lshift()
2574 error = xfs_btree_decrement(tcur, level, &i); in xfs_btree_lshift()
2578 /* Update the parent high keys of the left block, if needed. */ in xfs_btree_lshift()
2579 error = xfs_btree_update_keys(tcur, level); in xfs_btree_lshift()
2587 error = xfs_btree_update_keys(cur, level); in xfs_btree_lshift()
2592 cur->bc_levels[level].ptr--; in xfs_btree_lshift()
2610 * Move 1 record right from cur/level if possible.
2616 int level, in xfs_btree_rshift() argument
2631 if (xfs_btree_at_iroot(cur, level)) in xfs_btree_rshift()
2635 left = xfs_btree_get_block(cur, level, &lbp); in xfs_btree_rshift()
2638 error = xfs_btree_check_block(cur, left, level, lbp); in xfs_btree_rshift()
2653 if (cur->bc_levels[level].ptr >= lrecs) in xfs_btree_rshift()
2663 if (rrecs == cur->bc_ops->get_maxrecs(cur, level)) in xfs_btree_rshift()
2673 if (level > 0) { in xfs_btree_rshift()
2684 for (i = rrecs - 1; i >= 0; i--) { in xfs_btree_rshift()
2685 error = xfs_btree_debug_check_ptr(cur, rpp, i, level); in xfs_btree_rshift()
2693 error = xfs_btree_debug_check_ptr(cur, lpp, 0, level); in xfs_btree_rshift()
2704 ASSERT(cur->bc_ops->keys_inorder(cur, rkp, in xfs_btree_rshift()
2724 xfs_btree_set_numrecs(left, --lrecs); in xfs_btree_rshift()
2737 i = xfs_btree_lastrec(tcur, level); in xfs_btree_rshift()
2738 if (XFS_IS_CORRUPT(tcur->bc_mp, i != 1)) { in xfs_btree_rshift()
2740 error = -EFSCORRUPTED; in xfs_btree_rshift()
2744 error = xfs_btree_increment(tcur, level, &i); in xfs_btree_rshift()
2748 /* Update the parent high keys of the left block, if needed. */ in xfs_btree_rshift()
2749 if (cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING) { in xfs_btree_rshift()
2750 error = xfs_btree_update_keys(cur, level); in xfs_btree_rshift()
2756 error = xfs_btree_update_keys(tcur, level); in xfs_btree_rshift()
2793 if (unlikely(cur->bc_flags & XFS_BTREE_STAGING)) { in xfs_btree_alloc_block()
2795 return -EFSCORRUPTED; in xfs_btree_alloc_block()
2798 error = cur->bc_ops->alloc_block(cur, hint_block, new_block, stat); in xfs_btree_alloc_block()
2804 * Split cur/level block in half.
2811 int level, in __xfs_btree_split() argument
2823 union xfs_btree_ptr rrptr; /* right-right sibling ptr */ in __xfs_btree_split()
2824 struct xfs_buf *rrbp; /* right-right buffer pointer */ in __xfs_btree_split()
2825 struct xfs_btree_block *rrblock; /* right-right btree block */ in __xfs_btree_split()
2835 left = xfs_btree_get_block(cur, level, &lbp); in __xfs_btree_split()
2838 error = xfs_btree_check_block(cur, left, level, lbp); in __xfs_btree_split()
2868 if ((lrecs & 1) && cur->bc_levels[level].ptr <= rrecs + 1) in __xfs_btree_split()
2870 src_index = (lrecs - rrecs + 1); in __xfs_btree_split()
2875 lrecs -= rrecs; in __xfs_btree_split()
2884 if (level > 0) { in __xfs_btree_split()
2885 /* It's a non-leaf. Move keys and pointers. */ in __xfs_btree_split()
2897 error = xfs_btree_debug_check_ptr(cur, lpp, i, level); in __xfs_btree_split()
2952 /* Update the parent high keys of the left block, if needed. */ in __xfs_btree_split()
2953 if (cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING) { in __xfs_btree_split()
2954 error = xfs_btree_update_keys(cur, level); in __xfs_btree_split()
2964 if (cur->bc_levels[level].ptr > lrecs + 1) { in __xfs_btree_split()
2965 xfs_btree_setbuf(cur, level, rbp); in __xfs_btree_split()
2966 cur->bc_levels[level].ptr -= lrecs; in __xfs_btree_split()
2972 if (level + 1 < cur->bc_nlevels) { in __xfs_btree_split()
2976 (*curp)->bc_levels[level + 1].ptr++; in __xfs_btree_split()
2992 int level; member
3021 if (args->kswapd) in xfs_btree_split_worker()
3025 xfs_trans_set_context(args->cur->bc_tp); in xfs_btree_split_worker()
3027 args->result = __xfs_btree_split(args->cur, args->level, args->ptrp, in xfs_btree_split_worker()
3028 args->key, args->curp, args->stat); in xfs_btree_split_worker()
3030 xfs_trans_clear_context(args->cur->bc_tp); in xfs_btree_split_worker()
3037 complete(args->done); in xfs_btree_split_worker()
3046 * Care must be taken here - the work queue rescuer thread introduces potential
3062 int level, in xfs_btree_split() argument
3071 if (!xfs_btree_is_bmap(cur->bc_ops) || in xfs_btree_split()
3072 cur->bc_tp->t_highest_agno == NULLAGNUMBER) in xfs_btree_split()
3073 return __xfs_btree_split(cur, level, ptrp, key, curp, stat); in xfs_btree_split()
3076 args.level = level; in xfs_btree_split()
3101 int *stat) /* return status - 0 fail */ in xfs_btree_new_iroot()
3111 int level; /* btree level */ in xfs_btree_new_iroot() local
3117 ASSERT(cur->bc_ops->type == XFS_BTREE_TYPE_INODE); in xfs_btree_new_iroot()
3119 level = cur->bc_nlevels - 1; in xfs_btree_new_iroot()
3143 if (xfs_has_crc(cur->bc_mp)) { in xfs_btree_new_iroot()
3145 if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) in xfs_btree_new_iroot()
3146 cblock->bb_u.l.bb_blkno = bno; in xfs_btree_new_iroot()
3148 cblock->bb_u.s.bb_blkno = bno; in xfs_btree_new_iroot()
3151 be16_add_cpu(&block->bb_level, 1); in xfs_btree_new_iroot()
3153 cur->bc_nlevels++; in xfs_btree_new_iroot()
3154 ASSERT(cur->bc_nlevels <= cur->bc_maxlevels); in xfs_btree_new_iroot()
3155 cur->bc_levels[level + 1].ptr = 1; in xfs_btree_new_iroot()
3162 for (i = 0; i < be16_to_cpu(cblock->bb_numrecs); i++) { in xfs_btree_new_iroot()
3163 error = xfs_btree_debug_check_ptr(cur, pp, i, level); in xfs_btree_new_iroot()
3170 error = xfs_btree_debug_check_ptr(cur, &nptr, 0, level); in xfs_btree_new_iroot()
3176 xfs_iroot_realloc(cur->bc_ino.ip, in xfs_btree_new_iroot()
3177 1 - xfs_btree_get_numrecs(cblock), in xfs_btree_new_iroot()
3178 cur->bc_ino.whichfork); in xfs_btree_new_iroot()
3180 xfs_btree_setbuf(cur, level, cbp); in xfs_btree_new_iroot()
3184 * the root is at the right level. in xfs_btree_new_iroot()
3187 xfs_btree_log_keys(cur, cbp, 1, be16_to_cpu(cblock->bb_numrecs)); in xfs_btree_new_iroot()
3188 xfs_btree_log_ptrs(cur, cbp, 1, be16_to_cpu(cblock->bb_numrecs)); in xfs_btree_new_iroot()
3191 XFS_ILOG_CORE | xfs_ilog_fbroot(cur->bc_ino.whichfork); in xfs_btree_new_iroot()
3204 if (cur->bc_flags & XFS_BTREE_STAGING) { in xfs_btree_set_root()
3205 /* Update the btree root information for a per-AG fake root. */ in xfs_btree_set_root()
3206 cur->bc_ag.afake->af_root = be32_to_cpu(ptr->s); in xfs_btree_set_root()
3207 cur->bc_ag.afake->af_levels += inc; in xfs_btree_set_root()
3209 cur->bc_ops->set_root(cur, ptr, inc); in xfs_btree_set_root()
3252 /* Set the root in the holding structure increasing the level by 1. */ in xfs_btree_new_root()
3256 * At the previous root level there are now two blocks: the old root, in xfs_btree_new_root()
3261 block = xfs_btree_get_block(cur, cur->bc_nlevels - 1, &bp); in xfs_btree_new_root()
3264 error = xfs_btree_check_block(cur, block, cur->bc_nlevels - 1, bp); in xfs_btree_new_root()
3294 xfs_btree_init_block_cur(cur, nbp, cur->bc_nlevels, 2); in xfs_btree_new_root()
3330 xfs_btree_setbuf(cur, cur->bc_nlevels, nbp); in xfs_btree_new_root()
3331 cur->bc_levels[cur->bc_nlevels].ptr = nptr; in xfs_btree_new_root()
3332 cur->bc_nlevels++; in xfs_btree_new_root()
3333 ASSERT(cur->bc_nlevels <= cur->bc_maxlevels); in xfs_btree_new_root()
3346 int level, /* btree level */ in xfs_btree_make_block_unfull() argument
3357 if (xfs_btree_at_iroot(cur, level)) { in xfs_btree_make_block_unfull()
3358 struct xfs_inode *ip = cur->bc_ino.ip; in xfs_btree_make_block_unfull()
3360 if (numrecs < cur->bc_ops->get_dmaxrecs(cur, level)) { in xfs_btree_make_block_unfull()
3362 xfs_iroot_realloc(ip, 1, cur->bc_ino.whichfork); in xfs_btree_make_block_unfull()
3372 xfs_trans_log_inode(cur->bc_tp, ip, logflags); in xfs_btree_make_block_unfull()
3379 error = xfs_btree_rshift(cur, level, stat); in xfs_btree_make_block_unfull()
3384 error = xfs_btree_lshift(cur, level, stat); in xfs_btree_make_block_unfull()
3389 *oindex = *index = cur->bc_levels[level].ptr; in xfs_btree_make_block_unfull()
3396 * If this works we have to re-set our variables because we in xfs_btree_make_block_unfull()
3399 error = xfs_btree_split(cur, level, nptr, key, ncur, stat); in xfs_btree_make_block_unfull()
3404 *index = cur->bc_levels[level].ptr; in xfs_btree_make_block_unfull()
3409 * Insert one record/level. Return information to the caller
3410 * allowing the next level up to proceed if necessary.
3415 int level, /* level to insert record at */ in xfs_btree_insrec() argument
3440 * root level, allocate a new root block and we're done. in xfs_btree_insrec()
3442 if (cur->bc_ops->type != XFS_BTREE_TYPE_INODE && in xfs_btree_insrec()
3443 level >= cur->bc_nlevels) { in xfs_btree_insrec()
3451 ptr = cur->bc_levels[level].ptr; in xfs_btree_insrec()
3462 block = xfs_btree_get_block(cur, level, &bp); in xfs_btree_insrec()
3467 error = xfs_btree_check_block(cur, block, level, bp); in xfs_btree_insrec()
3473 if (level == 0) { in xfs_btree_insrec()
3474 ASSERT(cur->bc_ops->recs_inorder(cur, rec, in xfs_btree_insrec()
3477 ASSERT(cur->bc_ops->keys_inorder(cur, key, in xfs_btree_insrec()
3485 * make the block un-full. in xfs_btree_insrec()
3488 if (numrecs == cur->bc_ops->get_maxrecs(cur, level)) { in xfs_btree_insrec()
3489 error = xfs_btree_make_block_unfull(cur, level, numrecs, in xfs_btree_insrec()
3499 block = xfs_btree_get_block(cur, level, &bp); in xfs_btree_insrec()
3503 error = xfs_btree_check_block(cur, block, level, bp); in xfs_btree_insrec()
3512 XFS_BTREE_STATS_ADD(cur, moves, numrecs - ptr + 1); in xfs_btree_insrec()
3514 if (level > 0) { in xfs_btree_insrec()
3522 for (i = numrecs - ptr; i >= 0; i--) { in xfs_btree_insrec()
3523 error = xfs_btree_debug_check_ptr(cur, pp, i, level); in xfs_btree_insrec()
3528 xfs_btree_shift_keys(cur, kp, 1, numrecs - ptr + 1); in xfs_btree_insrec()
3529 xfs_btree_shift_ptrs(cur, pp, 1, numrecs - ptr + 1); in xfs_btree_insrec()
3531 error = xfs_btree_debug_check_ptr(cur, ptrp, 0, level); in xfs_btree_insrec()
3544 ASSERT(cur->bc_ops->keys_inorder(cur, kp, in xfs_btree_insrec()
3554 xfs_btree_shift_recs(cur, rp, 1, numrecs - ptr + 1); in xfs_btree_insrec()
3562 ASSERT(cur->bc_ops->recs_inorder(cur, rp, in xfs_btree_insrec()
3582 error = xfs_btree_update_keys(cur, level); in xfs_btree_insrec()
3609 * A multi-level split of the tree on insert will invalidate the original
3620 int level; /* current level number in btree */ in xfs_btree_insert() local
3623 struct xfs_btree_cur *pcur; /* previous level's cursor */ in xfs_btree_insert()
3628 level = 0; in xfs_btree_insert()
3636 cur->bc_ops->init_rec_from_cur(cur, &rec); in xfs_btree_insert()
3637 cur->bc_ops->init_key_from_rec(key, &rec); in xfs_btree_insert()
3640 * Loop going up the tree, starting at the leaf level. in xfs_btree_insert()
3642 * the insert is finished with this level. in xfs_btree_insert()
3646 * Insert nrec/nptr into this level of the tree. in xfs_btree_insert()
3649 error = xfs_btree_insrec(pcur, level, &nptr, &rec, key, in xfs_btree_insert()
3657 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) { in xfs_btree_insert()
3659 error = -EFSCORRUPTED; in xfs_btree_insert()
3662 level++; in xfs_btree_insert()
3672 if (cur->bc_ops->update_cursor && in xfs_btree_insert()
3673 !(cur->bc_flags & XFS_BTREE_STAGING)) in xfs_btree_insert()
3674 cur->bc_ops->update_cursor(pcur, cur); in xfs_btree_insert()
3675 cur->bc_nlevels = pcur->bc_nlevels; in xfs_btree_insert()
3692 * Try to merge a non-leaf block back into the inode root.
3703 int whichfork = cur->bc_ino.whichfork; in xfs_btree_kill_iroot()
3704 struct xfs_inode *ip = cur->bc_ino.ip; in xfs_btree_kill_iroot()
3713 int level; in xfs_btree_kill_iroot() local
3722 ASSERT(cur->bc_ops->type == XFS_BTREE_TYPE_INODE); in xfs_btree_kill_iroot()
3723 ASSERT(cur->bc_nlevels > 1); in xfs_btree_kill_iroot()
3729 level = cur->bc_nlevels - 1; in xfs_btree_kill_iroot()
3730 if (level == 1) in xfs_btree_kill_iroot()
3740 cblock = xfs_btree_get_block(cur, level - 1, &cbp); in xfs_btree_kill_iroot()
3744 * Only do this if the next level will fit. in xfs_btree_kill_iroot()
3746 * instead of freeing the root you free the next level. in xfs_btree_kill_iroot()
3748 if (numrecs > cur->bc_ops->get_dmaxrecs(cur, level)) in xfs_btree_kill_iroot()
3760 index = numrecs - cur->bc_ops->get_maxrecs(cur, level); in xfs_btree_kill_iroot()
3762 xfs_iroot_realloc(cur->bc_ino.ip, index, in xfs_btree_kill_iroot()
3763 cur->bc_ino.whichfork); in xfs_btree_kill_iroot()
3764 block = ifp->if_broot; in xfs_btree_kill_iroot()
3767 be16_add_cpu(&block->bb_numrecs, index); in xfs_btree_kill_iroot()
3768 ASSERT(block->bb_numrecs == cblock->bb_numrecs); in xfs_btree_kill_iroot()
3778 error = xfs_btree_debug_check_ptr(cur, cpp, i, level - 1); in xfs_btree_kill_iroot()
3789 cur->bc_levels[level - 1].bp = NULL; in xfs_btree_kill_iroot()
3790 be16_add_cpu(&block->bb_level, -1); in xfs_btree_kill_iroot()
3791 xfs_trans_log_inode(cur->bc_tp, ip, in xfs_btree_kill_iroot()
3792 XFS_ILOG_CORE | xfs_ilog_fbroot(cur->bc_ino.whichfork)); in xfs_btree_kill_iroot()
3793 cur->bc_nlevels--; in xfs_btree_kill_iroot()
3805 int level, in xfs_btree_kill_root() argument
3813 * Update the root pointer, decreasing the level by 1 and then in xfs_btree_kill_root()
3816 xfs_btree_set_root(cur, newroot, -1); in xfs_btree_kill_root()
3822 cur->bc_levels[level].bp = NULL; in xfs_btree_kill_root()
3823 cur->bc_levels[level].ra = 0; in xfs_btree_kill_root()
3824 cur->bc_nlevels--; in xfs_btree_kill_root()
3832 int level, in xfs_btree_dec_cursor() argument
3838 if (level > 0) { in xfs_btree_dec_cursor()
3839 error = xfs_btree_decrement(cur, level, &i); in xfs_btree_dec_cursor()
3849 * Single level of the btree record deletion routine.
3850 * Delete record pointed to by cur/level.
3852 * Return 0 for error, 1 for done, 2 to go on to the next level.
3857 int level, /* level removing record from */ in xfs_btree_delrec() argument
3858 int *stat) /* fail/done/go-on */ in xfs_btree_delrec()
3873 struct xfs_btree_block *rrblock; /* right-right btree block */ in xfs_btree_delrec()
3874 struct xfs_buf *rrbp; /* right-right buffer pointer */ in xfs_btree_delrec()
3882 ptr = cur->bc_levels[level].ptr; in xfs_btree_delrec()
3889 block = xfs_btree_get_block(cur, level, &bp); in xfs_btree_delrec()
3893 error = xfs_btree_check_block(cur, block, level, bp); in xfs_btree_delrec()
3905 XFS_BTREE_STATS_ADD(cur, moves, numrecs - ptr); in xfs_btree_delrec()
3908 if (level > 0) { in xfs_btree_delrec()
3916 for (i = 0; i < numrecs - ptr; i++) { in xfs_btree_delrec()
3917 error = xfs_btree_debug_check_ptr(cur, lpp, i, level); in xfs_btree_delrec()
3923 xfs_btree_shift_keys(cur, lkp, -1, numrecs - ptr); in xfs_btree_delrec()
3924 xfs_btree_shift_ptrs(cur, lpp, -1, numrecs - ptr); in xfs_btree_delrec()
3925 xfs_btree_log_keys(cur, bp, ptr, numrecs - 1); in xfs_btree_delrec()
3926 xfs_btree_log_ptrs(cur, bp, ptr, numrecs - 1); in xfs_btree_delrec()
3933 -1, numrecs - ptr); in xfs_btree_delrec()
3934 xfs_btree_log_recs(cur, bp, ptr, numrecs - 1); in xfs_btree_delrec()
3941 xfs_btree_set_numrecs(block, --numrecs); in xfs_btree_delrec()
3945 * We're at the root level. First, shrink the root block in-memory. in xfs_btree_delrec()
3946 * Try to get rid of the next level down. If we can't then there's in xfs_btree_delrec()
3949 if (xfs_btree_at_iroot(cur, level)) { in xfs_btree_delrec()
3950 xfs_iroot_realloc(cur->bc_ino.ip, -1, cur->bc_ino.whichfork); in xfs_btree_delrec()
3956 error = xfs_btree_dec_cursor(cur, level, stat); in xfs_btree_delrec()
3964 * If this is the root level, and there's only one entry left, and it's in xfs_btree_delrec()
3965 * NOT the leaf level, then we can get rid of this level. in xfs_btree_delrec()
3967 if (level == cur->bc_nlevels - 1) { in xfs_btree_delrec()
3968 if (numrecs == 1 && level > 0) { in xfs_btree_delrec()
3975 error = xfs_btree_kill_root(cur, bp, level, pp); in xfs_btree_delrec()
3978 } else if (level > 0) { in xfs_btree_delrec()
3979 error = xfs_btree_dec_cursor(cur, level, stat); in xfs_btree_delrec()
3992 error = xfs_btree_update_keys(cur, level); in xfs_btree_delrec()
4001 if (numrecs >= cur->bc_ops->get_minrecs(cur, level)) { in xfs_btree_delrec()
4002 error = xfs_btree_dec_cursor(cur, level, stat); in xfs_btree_delrec()
4011 * see if we can re-balance by moving only one record. in xfs_btree_delrec()
4016 if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE) { in xfs_btree_delrec()
4019 * into the root and delete it. Can't go up to next level, in xfs_btree_delrec()
4024 level == cur->bc_nlevels - 2) { in xfs_btree_delrec()
4027 error = xfs_btree_dec_cursor(cur, level, stat); in xfs_btree_delrec()
4039 * disrupt the next level up. in xfs_btree_delrec()
4054 i = xfs_btree_lastrec(tcur, level); in xfs_btree_delrec()
4055 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) { in xfs_btree_delrec()
4057 error = -EFSCORRUPTED; in xfs_btree_delrec()
4061 error = xfs_btree_increment(tcur, level, &i); in xfs_btree_delrec()
4064 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) { in xfs_btree_delrec()
4066 error = -EFSCORRUPTED; in xfs_btree_delrec()
4070 i = xfs_btree_lastrec(tcur, level); in xfs_btree_delrec()
4071 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) { in xfs_btree_delrec()
4073 error = -EFSCORRUPTED; in xfs_btree_delrec()
4078 right = xfs_btree_get_block(tcur, level, &rbp); in xfs_btree_delrec()
4080 error = xfs_btree_check_block(tcur, right, level, rbp); in xfs_btree_delrec()
4089 * won't make it too empty, and left-shifting an entry out in xfs_btree_delrec()
4092 if (xfs_btree_get_numrecs(right) - 1 >= in xfs_btree_delrec()
4093 cur->bc_ops->get_minrecs(tcur, level)) { in xfs_btree_delrec()
4094 error = xfs_btree_lshift(tcur, level, &i); in xfs_btree_delrec()
4099 cur->bc_ops->get_minrecs(tcur, level)); in xfs_btree_delrec()
4104 error = xfs_btree_dec_cursor(cur, level, stat); in xfs_btree_delrec()
4118 i = xfs_btree_firstrec(tcur, level); in xfs_btree_delrec()
4119 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) { in xfs_btree_delrec()
4121 error = -EFSCORRUPTED; in xfs_btree_delrec()
4125 error = xfs_btree_decrement(tcur, level, &i); in xfs_btree_delrec()
4128 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) { in xfs_btree_delrec()
4130 error = -EFSCORRUPTED; in xfs_btree_delrec()
4145 i = xfs_btree_firstrec(tcur, level); in xfs_btree_delrec()
4146 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) { in xfs_btree_delrec()
4148 error = -EFSCORRUPTED; in xfs_btree_delrec()
4152 error = xfs_btree_decrement(tcur, level, &i); in xfs_btree_delrec()
4155 i = xfs_btree_firstrec(tcur, level); in xfs_btree_delrec()
4156 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) { in xfs_btree_delrec()
4158 error = -EFSCORRUPTED; in xfs_btree_delrec()
4163 left = xfs_btree_get_block(tcur, level, &lbp); in xfs_btree_delrec()
4165 error = xfs_btree_check_block(cur, left, level, lbp); in xfs_btree_delrec()
4174 * won't make it too empty, and right-shifting an entry out in xfs_btree_delrec()
4177 if (xfs_btree_get_numrecs(left) - 1 >= in xfs_btree_delrec()
4178 cur->bc_ops->get_minrecs(tcur, level)) { in xfs_btree_delrec()
4179 error = xfs_btree_rshift(tcur, level, &i); in xfs_btree_delrec()
4184 cur->bc_ops->get_minrecs(tcur, level)); in xfs_btree_delrec()
4187 if (level == 0) in xfs_btree_delrec()
4188 cur->bc_levels[0].ptr++; in xfs_btree_delrec()
4211 cur->bc_ops->get_maxrecs(cur, level)) { in xfs_btree_delrec()
4228 cur->bc_ops->get_maxrecs(cur, level)) { in xfs_btree_delrec()
4245 error = xfs_btree_dec_cursor(cur, level, stat); in xfs_btree_delrec()
4259 if (level > 0) { in xfs_btree_delrec()
4260 /* It's a non-leaf. Move keys and pointers. */ in xfs_btree_delrec()
4272 error = xfs_btree_debug_check_ptr(cur, rpp, i, level); in xfs_btree_delrec()
4325 cur->bc_levels[level].bp = lbp; in xfs_btree_delrec()
4326 cur->bc_levels[level].ptr += lrecs; in xfs_btree_delrec()
4327 cur->bc_levels[level].ra = 0; in xfs_btree_delrec()
4330 * If we joined with the right neighbor and there's a level above in xfs_btree_delrec()
4331 * us, increment the cursor at that level. in xfs_btree_delrec()
4333 else if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE || in xfs_btree_delrec()
4334 level + 1 < cur->bc_nlevels) { in xfs_btree_delrec()
4335 error = xfs_btree_increment(cur, level + 1, &i); in xfs_btree_delrec()
4341 * Readjust the ptr at this level if it's not a leaf, since it's in xfs_btree_delrec()
4344 * We can't use decrement because it would change the next level up. in xfs_btree_delrec()
4346 if (level > 0) in xfs_btree_delrec()
4347 cur->bc_levels[level].ptr--; in xfs_btree_delrec()
4352 * bc_levels[level + 1].ptr points to the old block so that the caller in xfs_btree_delrec()
4359 /* Return value means the next level up has something to do. */ in xfs_btree_delrec()
4380 int level; in xfs_btree_delete() local
4385 * Go up the tree, starting at leaf level. in xfs_btree_delete()
4387 * If 2 is returned then a join was done; go to the next level. in xfs_btree_delete()
4390 for (level = 0, i = 2; i == 2; level++) { in xfs_btree_delete()
4391 error = xfs_btree_delrec(cur, level, &i); in xfs_btree_delete()
4400 * have updated the parent high keys so we have to do that here. in xfs_btree_delete()
4402 if (joined && (cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING)) { in xfs_btree_delete()
4409 for (level = 1; level < cur->bc_nlevels; level++) { in xfs_btree_delete()
4410 if (cur->bc_levels[level].ptr == 0) { in xfs_btree_delete()
4411 error = xfs_btree_decrement(cur, level, &i); in xfs_btree_delete()
4426 * Get the data from the pointed-to record.
4441 ptr = cur->bc_levels[0].ptr; in xfs_btree_get_rec()
4470 int level, in xfs_btree_visit_block() argument
4480 xfs_btree_readahead(cur, level, XFS_BTCUR_RIGHTRA); in xfs_btree_visit_block()
4481 block = xfs_btree_get_block(cur, level, &bp); in xfs_btree_visit_block()
4484 error = fn(cur, level, data); in xfs_btree_visit_block()
4491 return -ENOENT; in xfs_btree_visit_block()
4502 return -EFSCORRUPTED; in xfs_btree_visit_block()
4505 return xfs_btree_lookup_get_block(cur, level, &rptr, &block); in xfs_btree_visit_block()
4518 int level; in xfs_btree_visit_blocks() local
4524 /* for each level */ in xfs_btree_visit_blocks()
4525 for (level = cur->bc_nlevels - 1; level >= 0; level--) { in xfs_btree_visit_blocks()
4527 error = xfs_btree_lookup_get_block(cur, level, &lptr, &block); in xfs_btree_visit_blocks()
4531 /* readahead the left most block for the next level down */ in xfs_btree_visit_blocks()
4532 if (level > 0) { in xfs_btree_visit_blocks()
4547 /* for each buffer in the level */ in xfs_btree_visit_blocks()
4549 error = xfs_btree_visit_block(cur, level, fn, data); in xfs_btree_visit_blocks()
4552 if (error != -ENOENT) in xfs_btree_visit_blocks()
4567 * We do the btree walk in the most optimal manner possible - we have sibling
4568 * pointers so we can just walk all the blocks on each level from left to right
4569 * in a single pass, and then move to the next level and do the same. We can
4591 int level, in xfs_btree_block_change_owner() argument
4599 block = xfs_btree_get_block(cur, level, &bp); in xfs_btree_block_change_owner()
4600 if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) { in xfs_btree_block_change_owner()
4601 if (block->bb_u.l.bb_owner == cpu_to_be64(bbcoi->new_owner)) in xfs_btree_block_change_owner()
4603 block->bb_u.l.bb_owner = cpu_to_be64(bbcoi->new_owner); in xfs_btree_block_change_owner()
4605 if (block->bb_u.s.bb_owner == cpu_to_be32(bbcoi->new_owner)) in xfs_btree_block_change_owner()
4607 block->bb_u.s.bb_owner = cpu_to_be32(bbcoi->new_owner); in xfs_btree_block_change_owner()
4614 * block is formatted into the on-disk inode fork. We still change it, in xfs_btree_block_change_owner()
4618 ASSERT(cur->bc_ops->type == XFS_BTREE_TYPE_INODE); in xfs_btree_block_change_owner()
4619 ASSERT(level == cur->bc_nlevels - 1); in xfs_btree_block_change_owner()
4623 if (cur->bc_tp) { in xfs_btree_block_change_owner()
4624 if (!xfs_trans_ordered_buf(cur->bc_tp, bp)) { in xfs_btree_block_change_owner()
4626 return -EAGAIN; in xfs_btree_block_change_owner()
4629 xfs_buf_delwri_queue(bp, bbcoi->buffer_list); in xfs_btree_block_change_owner()
4650 /* Verify the v5 fields of a long-format btree block. */
4656 struct xfs_mount *mp = bp->b_mount; in xfs_btree_fsblock_v5hdr_verify()
4661 if (!uuid_equal(&block->bb_u.l.bb_uuid, &mp->m_sb.sb_meta_uuid)) in xfs_btree_fsblock_v5hdr_verify()
4663 if (block->bb_u.l.bb_blkno != cpu_to_be64(xfs_buf_daddr(bp))) in xfs_btree_fsblock_v5hdr_verify()
4666 be64_to_cpu(block->bb_u.l.bb_owner) != owner) in xfs_btree_fsblock_v5hdr_verify()
4671 /* Verify a long-format btree block. */
4677 struct xfs_mount *mp = bp->b_mount; in xfs_btree_fsblock_verify()
4682 ASSERT(!xfs_buftarg_is_mem(bp->b_target)); in xfs_btree_fsblock_verify()
4685 if (be16_to_cpu(block->bb_numrecs) > max_recs) in xfs_btree_fsblock_verify()
4691 block->bb_u.l.bb_leftsib); in xfs_btree_fsblock_verify()
4694 block->bb_u.l.bb_rightsib); in xfs_btree_fsblock_verify()
4698 /* Verify an in-memory btree block. */
4705 struct xfs_buftarg *btp = bp->b_target; in xfs_btree_memblock_verify()
4709 ASSERT(xfs_buftarg_is_mem(bp->b_target)); in xfs_btree_memblock_verify()
4712 if (be16_to_cpu(block->bb_numrecs) > max_recs) in xfs_btree_memblock_verify()
4718 block->bb_u.l.bb_leftsib); in xfs_btree_memblock_verify()
4722 block->bb_u.l.bb_rightsib); in xfs_btree_memblock_verify()
4729 * xfs_btree_agblock_v5hdr_verify() -- verify the v5 fields of a short-format
4738 struct xfs_mount *mp = bp->b_mount; in xfs_btree_agblock_v5hdr_verify()
4740 struct xfs_perag *pag = bp->b_pag; in xfs_btree_agblock_v5hdr_verify()
4744 if (!uuid_equal(&block->bb_u.s.bb_uuid, &mp->m_sb.sb_meta_uuid)) in xfs_btree_agblock_v5hdr_verify()
4746 if (block->bb_u.s.bb_blkno != cpu_to_be64(xfs_buf_daddr(bp))) in xfs_btree_agblock_v5hdr_verify()
4748 if (pag && be32_to_cpu(block->bb_u.s.bb_owner) != pag->pag_agno) in xfs_btree_agblock_v5hdr_verify()
4754 * xfs_btree_agblock_verify() -- verify a short-format btree block
4764 struct xfs_mount *mp = bp->b_mount; in xfs_btree_agblock_verify()
4769 ASSERT(!xfs_buftarg_is_mem(bp->b_target)); in xfs_btree_agblock_verify()
4772 if (be16_to_cpu(block->bb_numrecs) > max_recs) in xfs_btree_agblock_verify()
4777 fa = xfs_btree_check_agblock_siblings(bp->b_pag, agbno, in xfs_btree_agblock_verify()
4778 block->bb_u.s.bb_leftsib); in xfs_btree_agblock_verify()
4780 fa = xfs_btree_check_agblock_siblings(bp->b_pag, agbno, in xfs_btree_agblock_verify()
4781 block->bb_u.s.bb_rightsib); in xfs_btree_agblock_verify()
4831 * We start by assuming a single level tree consumes a single block, then track
4832 * the number of blocks each node level consumes until we no longer have space
4833 * to store the next node level. At this point, we are indexing all the leaf
4848 unsigned long long blocks_left = leaf_blocks - 1; in xfs_btree_space_to_height()
4855 blocks_left -= node_blocks; in xfs_btree_space_to_height()
4882 ASSERT(cur->bc_ops->init_high_key_from_rec); in xfs_btree_simple_query_range()
4883 ASSERT(cur->bc_ops->diff_two_keys); in xfs_btree_simple_query_range()
4909 cur->bc_ops->init_high_key_from_rec(&rec_key, recp); in xfs_btree_simple_query_range()
4916 cur->bc_ops->init_key_from_rec(&rec_key, recp); in xfs_btree_simple_query_range()
4942 * First, generate keys for the low and high records passed in.
4944 * For any leaf node, generate the high and low keys for the record.
4945 * If the record keys overlap with the query low/high keys, pass the
4948 * For any internal node, compare the low and high keys of each
4949 * pointer against the query low/high keys. If there's an overlap,
4953 * that is greater than the query's high key.
4971 int level; in xfs_btree_overlapped_query_range() local
4977 level = cur->bc_nlevels - 1; in xfs_btree_overlapped_query_range()
4979 error = xfs_btree_lookup_get_block(cur, level, &ptr, &block); in xfs_btree_overlapped_query_range()
4982 xfs_btree_get_block(cur, level, &bp); in xfs_btree_overlapped_query_range()
4983 trace_xfs_btree_overlapped_query_range(cur, level, bp); in xfs_btree_overlapped_query_range()
4985 error = xfs_btree_check_block(cur, block, level, bp); in xfs_btree_overlapped_query_range()
4989 cur->bc_levels[level].ptr = 1; in xfs_btree_overlapped_query_range()
4991 while (level < cur->bc_nlevels) { in xfs_btree_overlapped_query_range()
4992 block = xfs_btree_get_block(cur, level, &bp); in xfs_btree_overlapped_query_range()
4995 if (cur->bc_levels[level].ptr > in xfs_btree_overlapped_query_range()
4996 be16_to_cpu(block->bb_numrecs)) { in xfs_btree_overlapped_query_range()
4998 if (level < cur->bc_nlevels - 1) in xfs_btree_overlapped_query_range()
4999 cur->bc_levels[level + 1].ptr++; in xfs_btree_overlapped_query_range()
5000 level++; in xfs_btree_overlapped_query_range()
5004 if (level == 0) { in xfs_btree_overlapped_query_range()
5006 recp = xfs_btree_rec_addr(cur, cur->bc_levels[0].ptr, in xfs_btree_overlapped_query_range()
5009 cur->bc_ops->init_high_key_from_rec(&rec_hkey, recp); in xfs_btree_overlapped_query_range()
5010 cur->bc_ops->init_key_from_rec(&rec_key, recp); in xfs_btree_overlapped_query_range()
5013 * If (query's high key < record's low key), then there in xfs_btree_overlapped_query_range()
5015 * up to the leaf level to find more record blocks. in xfs_btree_overlapped_query_range()
5017 * If (record's high key >= query's low key) and in xfs_btree_overlapped_query_range()
5018 * (query's high key >= record's low key), then in xfs_btree_overlapped_query_range()
5028 cur->bc_levels[level].ptr++; in xfs_btree_overlapped_query_range()
5033 lkp = xfs_btree_key_addr(cur, cur->bc_levels[level].ptr, block); in xfs_btree_overlapped_query_range()
5034 hkp = xfs_btree_high_key_addr(cur, cur->bc_levels[level].ptr, in xfs_btree_overlapped_query_range()
5036 pp = xfs_btree_ptr_addr(cur, cur->bc_levels[level].ptr, block); in xfs_btree_overlapped_query_range()
5039 * If (query's high key < pointer's low key), then there are no in xfs_btree_overlapped_query_range()
5040 * more interesting keys in this block. Pop up one leaf level in xfs_btree_overlapped_query_range()
5043 * If (pointer's high key >= query's low key) and in xfs_btree_overlapped_query_range()
5044 * (query's high key >= pointer's low key), then in xfs_btree_overlapped_query_range()
5050 level--; in xfs_btree_overlapped_query_range()
5051 error = xfs_btree_lookup_get_block(cur, level, pp, in xfs_btree_overlapped_query_range()
5055 xfs_btree_get_block(cur, level, &bp); in xfs_btree_overlapped_query_range()
5056 trace_xfs_btree_overlapped_query_range(cur, level, bp); in xfs_btree_overlapped_query_range()
5058 error = xfs_btree_check_block(cur, block, level, bp); in xfs_btree_overlapped_query_range()
5062 cur->bc_levels[level].ptr = 1; in xfs_btree_overlapped_query_range()
5065 cur->bc_levels[level].ptr++; in xfs_btree_overlapped_query_range()
5071 * block, a subsequent non-error cursor deletion will not release in xfs_btree_overlapped_query_range()
5072 * node-level buffers, causing a buffer leak. This is quite possible in xfs_btree_overlapped_query_range()
5073 * with a zero-results range query, so release the buffers if we in xfs_btree_overlapped_query_range()
5076 if (cur->bc_levels[0].bp == NULL) { in xfs_btree_overlapped_query_range()
5077 for (i = 0; i < cur->bc_nlevels; i++) { in xfs_btree_overlapped_query_range()
5078 if (cur->bc_levels[i].bp) { in xfs_btree_overlapped_query_range()
5079 xfs_trans_brelse(cur->bc_tp, in xfs_btree_overlapped_query_range()
5080 cur->bc_levels[i].bp); in xfs_btree_overlapped_query_range()
5081 cur->bc_levels[i].bp = NULL; in xfs_btree_overlapped_query_range()
5082 cur->bc_levels[i].ptr = 0; in xfs_btree_overlapped_query_range()
5083 cur->bc_levels[i].ra = 0; in xfs_btree_overlapped_query_range()
5099 cur->bc_rec = *irec; in xfs_btree_key_from_irec()
5100 cur->bc_ops->init_rec_from_cur(cur, &rec); in xfs_btree_key_from_irec()
5101 cur->bc_ops->init_key_from_rec(key, &rec); in xfs_btree_key_from_irec()
5108 * code. This function returns -ECANCELED, zero, or a negative error code.
5125 /* Enforce low key <= high key. */ in xfs_btree_query_range()
5127 return -EINVAL; in xfs_btree_query_range()
5129 if (!(cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING)) in xfs_btree_query_range()
5146 memset(&cur->bc_rec, 0, sizeof(cur->bc_rec)); in xfs_btree_query_all()
5156 int level, in xfs_btree_count_blocks_helper() argument
5183 if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) in xfs_btree_diff_two_ptrs()
5184 return (int64_t)be64_to_cpu(a->l) - be64_to_cpu(b->l); in xfs_btree_diff_two_ptrs()
5185 return (int64_t)be32_to_cpu(a->s) - be32_to_cpu(b->s); in xfs_btree_diff_two_ptrs()
5213 cur->bc_ops->init_key_from_rec(&rec_key, rec); in xfs_btree_has_records_helper()
5215 if (info->outcome == XBTREE_RECPACKING_EMPTY) { in xfs_btree_has_records_helper()
5216 info->outcome = XBTREE_RECPACKING_SPARSE; in xfs_btree_has_records_helper()
5223 if (xfs_btree_masked_keycmp_lt(cur, &info->start_key, &rec_key, in xfs_btree_has_records_helper()
5224 info->key_mask)) in xfs_btree_has_records_helper()
5225 return -ECANCELED; in xfs_btree_has_records_helper()
5234 key_contig = cur->bc_ops->keys_contiguous(cur, &info->high_key, in xfs_btree_has_records_helper()
5235 &rec_key, info->key_mask); in xfs_btree_has_records_helper()
5237 !(cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING)) in xfs_btree_has_records_helper()
5238 return -EFSCORRUPTED; in xfs_btree_has_records_helper()
5240 return -ECANCELED; in xfs_btree_has_records_helper()
5244 * If high_key(rec) is larger than any other high key we've seen, in xfs_btree_has_records_helper()
5247 cur->bc_ops->init_high_key_from_rec(&rec_high_key, rec); in xfs_btree_has_records_helper()
5248 if (xfs_btree_masked_keycmp_gt(cur, &rec_high_key, &info->high_key, in xfs_btree_has_records_helper()
5249 info->key_mask)) in xfs_btree_has_records_helper()
5250 info->high_key = rec_high_key; /* struct copy */ in xfs_btree_has_records_helper()
5273 const union xfs_btree_irec *high, in xfs_btree_has_records() argument
5284 if (!cur->bc_ops->keys_contiguous) { in xfs_btree_has_records()
5286 return -EOPNOTSUPP; in xfs_btree_has_records()
5290 xfs_btree_key_from_irec(cur, &info.end_key, high); in xfs_btree_has_records()
5292 error = xfs_btree_query_range(cur, low, high, in xfs_btree_has_records()
5294 if (error == -ECANCELED) in xfs_btree_has_records()
5327 if (cur->bc_levels[0].ptr < xfs_btree_get_numrecs(block)) in xfs_btree_has_more_records()
5331 if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) in xfs_btree_has_more_records()
5332 return block->bb_u.l.bb_rightsib != cpu_to_be64(NULLFSBLOCK); in xfs_btree_has_more_records()
5334 return block->bb_u.s.bb_rightsib != cpu_to_be32(NULLAGBLOCK); in xfs_btree_has_more_records()
5384 memset(&cur->bc_rec, 0, sizeof(cur->bc_rec)); in xfs_btree_goto_left_edge()
5397 return -EFSCORRUPTED; in xfs_btree_goto_left_edge()