Lines Matching refs:cur
126 struct xfs_btree_cur *cur, in __xfs_btree_check_lblock_hdr() argument
131 struct xfs_mount *mp = cur->bc_mp; in __xfs_btree_check_lblock_hdr()
143 if (be32_to_cpu(block->bb_magic) != xfs_btree_magic(mp, cur->bc_ops)) in __xfs_btree_check_lblock_hdr()
148 cur->bc_ops->get_maxrecs(cur, level)) in __xfs_btree_check_lblock_hdr()
160 struct xfs_btree_cur *cur, in __xfs_btree_check_fsblock() argument
165 struct xfs_mount *mp = cur->bc_mp; in __xfs_btree_check_fsblock()
169 fa = __xfs_btree_check_lblock_hdr(cur, block, level, bp); in __xfs_btree_check_fsblock()
200 struct xfs_btree_cur *cur, in __xfs_btree_check_memblock() argument
205 struct xfs_buftarg *btp = cur->bc_mem.xfbtree->target; in __xfs_btree_check_memblock()
209 fa = __xfs_btree_check_lblock_hdr(cur, block, level, bp); in __xfs_btree_check_memblock()
228 struct xfs_btree_cur *cur, in __xfs_btree_check_agblock() argument
233 struct xfs_mount *mp = cur->bc_mp; in __xfs_btree_check_agblock()
234 struct xfs_perag *pag = to_perag(cur->bc_group); in __xfs_btree_check_agblock()
245 if (be32_to_cpu(block->bb_magic) != xfs_btree_magic(mp, cur->bc_ops)) in __xfs_btree_check_agblock()
250 cur->bc_ops->get_maxrecs(cur, level)) in __xfs_btree_check_agblock()
269 struct xfs_btree_cur *cur, in __xfs_btree_check_block() argument
274 switch (cur->bc_ops->type) { in __xfs_btree_check_block()
276 return __xfs_btree_check_memblock(cur, block, level, bp); in __xfs_btree_check_block()
278 return __xfs_btree_check_agblock(cur, block, level, bp); in __xfs_btree_check_block()
280 return __xfs_btree_check_fsblock(cur, block, level, bp); in __xfs_btree_check_block()
287 static inline unsigned int xfs_btree_block_errtag(struct xfs_btree_cur *cur) in xfs_btree_block_errtag() argument
289 if (cur->bc_ops->ptr_len == XFS_BTREE_SHORT_PTR_LEN) in xfs_btree_block_errtag()
299 struct xfs_btree_cur *cur, /* btree cursor */ in xfs_btree_check_block() argument
304 struct xfs_mount *mp = cur->bc_mp; in xfs_btree_check_block()
307 fa = __xfs_btree_check_block(cur, block, level, bp); in xfs_btree_check_block()
309 XFS_TEST_ERROR(mp, xfs_btree_block_errtag(cur))) { in xfs_btree_check_block()
312 xfs_btree_mark_sick(cur); in xfs_btree_check_block()
320 struct xfs_btree_cur *cur, in __xfs_btree_check_ptr() argument
328 switch (cur->bc_ops->type) { in __xfs_btree_check_ptr()
330 if (!xfbtree_verify_bno(cur->bc_mem.xfbtree, in __xfs_btree_check_ptr()
335 if (!xfs_verify_fsbno(cur->bc_mp, in __xfs_btree_check_ptr()
340 if (!xfs_verify_agbno(to_perag(cur->bc_group), in __xfs_btree_check_ptr()
355 struct xfs_btree_cur *cur, in xfs_btree_check_ptr() argument
362 error = __xfs_btree_check_ptr(cur, ptr, index, level); in xfs_btree_check_ptr()
364 switch (cur->bc_ops->type) { in xfs_btree_check_ptr()
366 xfs_err(cur->bc_mp, in xfs_btree_check_ptr()
368 cur->bc_ops->name, cur->bc_flags, level, index, in xfs_btree_check_ptr()
372 xfs_err(cur->bc_mp, in xfs_btree_check_ptr()
374 cur->bc_ino.ip->i_ino, in xfs_btree_check_ptr()
375 cur->bc_ino.whichfork, cur->bc_ops->name, in xfs_btree_check_ptr()
379 xfs_err(cur->bc_mp, in xfs_btree_check_ptr()
381 cur->bc_group->xg_gno, cur->bc_ops->name, in xfs_btree_check_ptr()
385 xfs_btree_mark_sick(cur); in xfs_btree_check_ptr()
475 struct xfs_btree_cur *cur, in xfs_btree_free_block() argument
480 trace_xfs_btree_free_block(cur, bp); in xfs_btree_free_block()
486 if (unlikely(cur->bc_flags & XFS_BTREE_STAGING)) { in xfs_btree_free_block()
491 error = cur->bc_ops->free_block(cur, bp); in xfs_btree_free_block()
493 xfs_trans_binval(cur->bc_tp, bp); in xfs_btree_free_block()
494 XFS_BTREE_STATS_INC(cur, free); in xfs_btree_free_block()
504 struct xfs_btree_cur *cur, /* btree cursor */ in xfs_btree_del_cursor() argument
516 for (i = 0; i < cur->bc_nlevels; i++) { in xfs_btree_del_cursor()
517 if (cur->bc_levels[i].bp) in xfs_btree_del_cursor()
518 xfs_trans_brelse(cur->bc_tp, cur->bc_levels[i].bp); in xfs_btree_del_cursor()
529 ASSERT(!xfs_btree_is_bmap(cur->bc_ops) || cur->bc_bmap.allocated == 0 || in xfs_btree_del_cursor()
530 xfs_is_shutdown(cur->bc_mp) || error != 0); in xfs_btree_del_cursor()
532 if (cur->bc_group) in xfs_btree_del_cursor()
533 xfs_group_put(cur->bc_group); in xfs_btree_del_cursor()
534 kmem_cache_free(cur->bc_cache, cur); in xfs_btree_del_cursor()
540 struct xfs_btree_cur *cur) in xfs_btree_buftarg() argument
542 if (cur->bc_ops->type == XFS_BTREE_TYPE_MEM) in xfs_btree_buftarg()
543 return cur->bc_mem.xfbtree->target; in xfs_btree_buftarg()
544 return cur->bc_mp->m_ddev_targp; in xfs_btree_buftarg()
550 struct xfs_btree_cur *cur) in xfs_btree_bbsize() argument
552 if (cur->bc_ops->type == XFS_BTREE_TYPE_MEM) in xfs_btree_bbsize()
554 return cur->bc_mp->m_bsize; in xfs_btree_bbsize()
563 struct xfs_btree_cur *cur, /* input cursor */ in xfs_btree_dup_cursor() argument
566 struct xfs_mount *mp = cur->bc_mp; in xfs_btree_dup_cursor()
567 struct xfs_trans *tp = cur->bc_tp; in xfs_btree_dup_cursor()
577 if (unlikely(cur->bc_flags & XFS_BTREE_STAGING)) { in xfs_btree_dup_cursor()
585 new = cur->bc_ops->dup_cursor(cur); in xfs_btree_dup_cursor()
590 new->bc_rec = cur->bc_rec; in xfs_btree_dup_cursor()
596 new->bc_levels[i].ptr = cur->bc_levels[i].ptr; in xfs_btree_dup_cursor()
597 new->bc_levels[i].ra = cur->bc_levels[i].ra; in xfs_btree_dup_cursor()
598 bp = cur->bc_levels[i].bp; in xfs_btree_dup_cursor()
601 xfs_btree_buftarg(cur), in xfs_btree_dup_cursor()
603 xfs_btree_bbsize(cur), 0, &bp, in xfs_btree_dup_cursor()
604 cur->bc_ops->buf_ops); in xfs_btree_dup_cursor()
696 static inline size_t xfs_btree_block_len(struct xfs_btree_cur *cur) in xfs_btree_block_len() argument
698 if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) { in xfs_btree_block_len()
699 if (xfs_has_crc(cur->bc_mp)) in xfs_btree_block_len()
703 if (xfs_has_crc(cur->bc_mp)) in xfs_btree_block_len()
713 struct xfs_btree_cur *cur, in xfs_btree_rec_offset() argument
716 return xfs_btree_block_len(cur) + in xfs_btree_rec_offset()
717 (n - 1) * cur->bc_ops->rec_len; in xfs_btree_rec_offset()
725 struct xfs_btree_cur *cur, in xfs_btree_key_offset() argument
728 return xfs_btree_block_len(cur) + in xfs_btree_key_offset()
729 (n - 1) * cur->bc_ops->key_len; in xfs_btree_key_offset()
737 struct xfs_btree_cur *cur, in xfs_btree_high_key_offset() argument
740 return xfs_btree_block_len(cur) + in xfs_btree_high_key_offset()
741 (n - 1) * cur->bc_ops->key_len + (cur->bc_ops->key_len / 2); in xfs_btree_high_key_offset()
749 struct xfs_btree_cur *cur, in xfs_btree_ptr_offset() argument
753 return xfs_btree_block_len(cur) + in xfs_btree_ptr_offset()
754 cur->bc_ops->get_maxrecs(cur, level) * cur->bc_ops->key_len + in xfs_btree_ptr_offset()
755 (n - 1) * cur->bc_ops->ptr_len; in xfs_btree_ptr_offset()
763 struct xfs_btree_cur *cur, in xfs_btree_rec_addr() argument
768 ((char *)block + xfs_btree_rec_offset(cur, n)); in xfs_btree_rec_addr()
776 struct xfs_btree_cur *cur, in xfs_btree_key_addr() argument
781 ((char *)block + xfs_btree_key_offset(cur, n)); in xfs_btree_key_addr()
789 struct xfs_btree_cur *cur, in xfs_btree_high_key_addr() argument
794 ((char *)block + xfs_btree_high_key_offset(cur, n)); in xfs_btree_high_key_addr()
802 struct xfs_btree_cur *cur, in xfs_btree_ptr_addr() argument
811 ((char *)block + xfs_btree_ptr_offset(cur, n, level)); in xfs_btree_ptr_addr()
816 struct xfs_btree_cur *cur) in xfs_btree_ifork_ptr() argument
818 ASSERT(cur->bc_ops->type == XFS_BTREE_TYPE_INODE); in xfs_btree_ifork_ptr()
820 if (cur->bc_flags & XFS_BTREE_STAGING) in xfs_btree_ifork_ptr()
821 return cur->bc_ino.ifake->if_fork; in xfs_btree_ifork_ptr()
822 return xfs_ifork_ptr(cur->bc_ino.ip, cur->bc_ino.whichfork); in xfs_btree_ifork_ptr()
833 struct xfs_btree_cur *cur) in xfs_btree_get_iroot() argument
835 struct xfs_ifork *ifp = xfs_btree_ifork_ptr(cur); in xfs_btree_get_iroot()
846 struct xfs_btree_cur *cur, /* btree cursor */ in xfs_btree_get_block() argument
850 if (xfs_btree_at_iroot(cur, level)) { in xfs_btree_get_block()
852 return xfs_btree_get_iroot(cur); in xfs_btree_get_block()
855 *bpp = cur->bc_levels[level].bp; in xfs_btree_get_block()
865 struct xfs_btree_cur *cur, /* btree cursor */ in xfs_btree_firstrec() argument
874 block = xfs_btree_get_block(cur, level, &bp); in xfs_btree_firstrec()
875 if (xfs_btree_check_block(cur, block, level, bp)) in xfs_btree_firstrec()
885 cur->bc_levels[level].ptr = 1; in xfs_btree_firstrec()
895 struct xfs_btree_cur *cur, /* btree cursor */ in xfs_btree_lastrec() argument
904 block = xfs_btree_get_block(cur, level, &bp); in xfs_btree_lastrec()
905 if (xfs_btree_check_block(cur, block, level, bp)) in xfs_btree_lastrec()
915 cur->bc_levels[level].ptr = be16_to_cpu(block->bb_numrecs); in xfs_btree_lastrec()
957 struct xfs_btree_cur *cur, in xfs_btree_readahead_fsblock() argument
961 struct xfs_mount *mp = cur->bc_mp; in xfs_btree_readahead_fsblock()
968 mp->m_bsize, cur->bc_ops->buf_ops); in xfs_btree_readahead_fsblock()
974 mp->m_bsize, cur->bc_ops->buf_ops); in xfs_btree_readahead_fsblock()
983 struct xfs_btree_cur *cur, in xfs_btree_readahead_memblock() argument
987 struct xfs_buftarg *btp = cur->bc_mem.xfbtree->target; in xfs_btree_readahead_memblock()
994 cur->bc_ops->buf_ops); in xfs_btree_readahead_memblock()
1000 cur->bc_ops->buf_ops); in xfs_btree_readahead_memblock()
1009 struct xfs_btree_cur *cur, in xfs_btree_readahead_agblock() argument
1013 struct xfs_mount *mp = cur->bc_mp; in xfs_btree_readahead_agblock()
1014 struct xfs_perag *pag = to_perag(cur->bc_group); in xfs_btree_readahead_agblock()
1022 cur->bc_ops->buf_ops); in xfs_btree_readahead_agblock()
1029 cur->bc_ops->buf_ops); in xfs_btree_readahead_agblock()
1042 struct xfs_btree_cur *cur, /* btree cursor */ in xfs_btree_readahead() argument
1052 if (xfs_btree_at_iroot(cur, lev)) in xfs_btree_readahead()
1055 if ((cur->bc_levels[lev].ra | lr) == cur->bc_levels[lev].ra) in xfs_btree_readahead()
1058 cur->bc_levels[lev].ra |= lr; in xfs_btree_readahead()
1059 block = XFS_BUF_TO_BLOCK(cur->bc_levels[lev].bp); in xfs_btree_readahead()
1061 switch (cur->bc_ops->type) { in xfs_btree_readahead()
1063 return xfs_btree_readahead_agblock(cur, lr, block); in xfs_btree_readahead()
1065 return xfs_btree_readahead_fsblock(cur, lr, block); in xfs_btree_readahead()
1067 return xfs_btree_readahead_memblock(cur, lr, block); in xfs_btree_readahead()
1076 struct xfs_btree_cur *cur, in xfs_btree_ptr_to_daddr() argument
1082 error = xfs_btree_check_ptr(cur, ptr, 0, 1); in xfs_btree_ptr_to_daddr()
1086 switch (cur->bc_ops->type) { in xfs_btree_ptr_to_daddr()
1088 *daddr = xfs_agbno_to_daddr(to_perag(cur->bc_group), in xfs_btree_ptr_to_daddr()
1092 *daddr = XFS_FSB_TO_DADDR(cur->bc_mp, be64_to_cpu(ptr->l)); in xfs_btree_ptr_to_daddr()
1109 struct xfs_btree_cur *cur, in xfs_btree_readahead_ptr() argument
1115 if (xfs_btree_ptr_to_daddr(cur, ptr, &daddr)) in xfs_btree_readahead_ptr()
1117 xfs_buf_readahead(xfs_btree_buftarg(cur), daddr, in xfs_btree_readahead_ptr()
1118 xfs_btree_bbsize(cur) * count, in xfs_btree_readahead_ptr()
1119 cur->bc_ops->buf_ops); in xfs_btree_readahead_ptr()
1128 struct xfs_btree_cur *cur, /* btree cursor */ in xfs_btree_setbuf() argument
1134 if (cur->bc_levels[lev].bp) in xfs_btree_setbuf()
1135 xfs_trans_brelse(cur->bc_tp, cur->bc_levels[lev].bp); in xfs_btree_setbuf()
1136 cur->bc_levels[lev].bp = bp; in xfs_btree_setbuf()
1137 cur->bc_levels[lev].ra = 0; in xfs_btree_setbuf()
1140 if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) { in xfs_btree_setbuf()
1142 cur->bc_levels[lev].ra |= XFS_BTCUR_LEFTRA; in xfs_btree_setbuf()
1144 cur->bc_levels[lev].ra |= XFS_BTCUR_RIGHTRA; in xfs_btree_setbuf()
1147 cur->bc_levels[lev].ra |= XFS_BTCUR_LEFTRA; in xfs_btree_setbuf()
1149 cur->bc_levels[lev].ra |= XFS_BTCUR_RIGHTRA; in xfs_btree_setbuf()
1155 struct xfs_btree_cur *cur, in xfs_btree_ptr_is_null() argument
1158 if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) in xfs_btree_ptr_is_null()
1166 struct xfs_btree_cur *cur, in xfs_btree_set_ptr_null() argument
1169 if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) in xfs_btree_set_ptr_null()
1177 struct xfs_btree_cur *cur, in xfs_btree_ptrs_equal() argument
1181 if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) in xfs_btree_ptrs_equal()
1191 struct xfs_btree_cur *cur, in xfs_btree_get_sibling() argument
1198 if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) { in xfs_btree_get_sibling()
1213 struct xfs_btree_cur *cur, in xfs_btree_set_sibling() argument
1220 if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) { in xfs_btree_set_sibling()
1302 struct xfs_btree_cur *cur) in xfs_btree_owner() argument
1304 switch (cur->bc_ops->type) { in xfs_btree_owner()
1306 return cur->bc_mem.xfbtree->owner; in xfs_btree_owner()
1308 return cur->bc_ino.ip->i_ino; in xfs_btree_owner()
1310 return cur->bc_group->xg_gno; in xfs_btree_owner()
1319 struct xfs_btree_cur *cur, in xfs_btree_init_block_cur() argument
1324 xfs_btree_init_buf(cur->bc_mp, bp, cur->bc_ops, level, numrecs, in xfs_btree_init_block_cur()
1325 xfs_btree_owner(cur)); in xfs_btree_init_block_cur()
1330 struct xfs_btree_cur *cur, in xfs_btree_buf_to_ptr() argument
1334 switch (cur->bc_ops->type) { in xfs_btree_buf_to_ptr()
1336 ptr->s = cpu_to_be32(xfs_daddr_to_agbno(cur->bc_mp, in xfs_btree_buf_to_ptr()
1340 ptr->l = cpu_to_be64(XFS_DADDR_TO_FSB(cur->bc_mp, in xfs_btree_buf_to_ptr()
1351 struct xfs_btree_cur *cur, in xfs_btree_set_refs() argument
1354 xfs_buf_set_ref(bp, cur->bc_ops->lru_refs); in xfs_btree_set_refs()
1359 struct xfs_btree_cur *cur, in xfs_btree_get_buf_block() argument
1367 error = xfs_btree_ptr_to_daddr(cur, ptr, &d); in xfs_btree_get_buf_block()
1370 error = xfs_trans_get_buf(cur->bc_tp, xfs_btree_buftarg(cur), d, in xfs_btree_get_buf_block()
1371 xfs_btree_bbsize(cur), 0, bpp); in xfs_btree_get_buf_block()
1375 (*bpp)->b_ops = cur->bc_ops->buf_ops; in xfs_btree_get_buf_block()
1386 struct xfs_btree_cur *cur, in xfs_btree_read_buf_block() argument
1392 struct xfs_mount *mp = cur->bc_mp; in xfs_btree_read_buf_block()
1399 error = xfs_btree_ptr_to_daddr(cur, ptr, &d); in xfs_btree_read_buf_block()
1402 error = xfs_trans_read_buf(mp, cur->bc_tp, xfs_btree_buftarg(cur), d, in xfs_btree_read_buf_block()
1403 xfs_btree_bbsize(cur), flags, bpp, in xfs_btree_read_buf_block()
1404 cur->bc_ops->buf_ops); in xfs_btree_read_buf_block()
1406 xfs_btree_mark_sick(cur); in xfs_btree_read_buf_block()
1410 xfs_btree_set_refs(cur, *bpp); in xfs_btree_read_buf_block()
1420 struct xfs_btree_cur *cur, in xfs_btree_copy_keys() argument
1426 memcpy(dst_key, src_key, numkeys * cur->bc_ops->key_len); in xfs_btree_copy_keys()
1434 struct xfs_btree_cur *cur, in xfs_btree_copy_recs() argument
1440 memcpy(dst_rec, src_rec, numrecs * cur->bc_ops->rec_len); in xfs_btree_copy_recs()
1448 struct xfs_btree_cur *cur, in xfs_btree_copy_ptrs() argument
1454 memcpy(dst_ptr, src_ptr, numptrs * cur->bc_ops->ptr_len); in xfs_btree_copy_ptrs()
1462 struct xfs_btree_cur *cur, in xfs_btree_shift_keys() argument
1472 dst_key = (char *)key + (dir * cur->bc_ops->key_len); in xfs_btree_shift_keys()
1473 memmove(dst_key, key, numkeys * cur->bc_ops->key_len); in xfs_btree_shift_keys()
1481 struct xfs_btree_cur *cur, in xfs_btree_shift_recs() argument
1491 dst_rec = (char *)rec + (dir * cur->bc_ops->rec_len); in xfs_btree_shift_recs()
1492 memmove(dst_rec, rec, numrecs * cur->bc_ops->rec_len); in xfs_btree_shift_recs()
1500 struct xfs_btree_cur *cur, in xfs_btree_shift_ptrs() argument
1510 dst_ptr = (char *)ptr + (dir * cur->bc_ops->ptr_len); in xfs_btree_shift_ptrs()
1511 memmove(dst_ptr, ptr, numptrs * cur->bc_ops->ptr_len); in xfs_btree_shift_ptrs()
1519 struct xfs_btree_cur *cur, in xfs_btree_log_keys() argument
1526 xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF); in xfs_btree_log_keys()
1527 xfs_trans_log_buf(cur->bc_tp, bp, in xfs_btree_log_keys()
1528 xfs_btree_key_offset(cur, first), in xfs_btree_log_keys()
1529 xfs_btree_key_offset(cur, last + 1) - 1); in xfs_btree_log_keys()
1531 xfs_trans_log_inode(cur->bc_tp, cur->bc_ino.ip, in xfs_btree_log_keys()
1532 xfs_ilog_fbroot(cur->bc_ino.whichfork)); in xfs_btree_log_keys()
1541 struct xfs_btree_cur *cur, in xfs_btree_log_recs() argument
1547 xfs_trans_log_inode(cur->bc_tp, cur->bc_ino.ip, in xfs_btree_log_recs()
1548 xfs_ilog_fbroot(cur->bc_ino.whichfork)); in xfs_btree_log_recs()
1552 xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF); in xfs_btree_log_recs()
1553 xfs_trans_log_buf(cur->bc_tp, bp, in xfs_btree_log_recs()
1554 xfs_btree_rec_offset(cur, first), in xfs_btree_log_recs()
1555 xfs_btree_rec_offset(cur, last + 1) - 1); in xfs_btree_log_recs()
1563 struct xfs_btree_cur *cur, /* btree cursor */ in xfs_btree_log_ptrs() argument
1573 xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF); in xfs_btree_log_ptrs()
1574 xfs_trans_log_buf(cur->bc_tp, bp, in xfs_btree_log_ptrs()
1575 xfs_btree_ptr_offset(cur, first, level), in xfs_btree_log_ptrs()
1576 xfs_btree_ptr_offset(cur, last + 1, level) - 1); in xfs_btree_log_ptrs()
1578 xfs_trans_log_inode(cur->bc_tp, cur->bc_ino.ip, in xfs_btree_log_ptrs()
1579 xfs_ilog_fbroot(cur->bc_ino.whichfork)); in xfs_btree_log_ptrs()
1589 struct xfs_btree_cur *cur, /* btree cursor */ in xfs_btree_log_block() argument
1626 if (xfs_has_crc(cur->bc_mp)) { in xfs_btree_log_block()
1641 (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) ? in xfs_btree_log_block()
1644 xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF); in xfs_btree_log_block()
1645 xfs_trans_log_buf(cur->bc_tp, bp, first, last); in xfs_btree_log_block()
1647 xfs_trans_log_inode(cur->bc_tp, cur->bc_ino.ip, in xfs_btree_log_block()
1648 xfs_ilog_fbroot(cur->bc_ino.whichfork)); in xfs_btree_log_block()
1658 struct xfs_btree_cur *cur, in xfs_btree_increment() argument
1668 ASSERT(level < cur->bc_nlevels); in xfs_btree_increment()
1671 xfs_btree_readahead(cur, level, XFS_BTCUR_RIGHTRA); in xfs_btree_increment()
1674 block = xfs_btree_get_block(cur, level, &bp); in xfs_btree_increment()
1677 error = xfs_btree_check_block(cur, block, level, bp); in xfs_btree_increment()
1683 if (++cur->bc_levels[level].ptr <= xfs_btree_get_numrecs(block)) in xfs_btree_increment()
1687 xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB); in xfs_btree_increment()
1688 if (xfs_btree_ptr_is_null(cur, &ptr)) in xfs_btree_increment()
1691 XFS_BTREE_STATS_INC(cur, increment); in xfs_btree_increment()
1697 for (lev = level + 1; lev < cur->bc_nlevels; lev++) { in xfs_btree_increment()
1698 block = xfs_btree_get_block(cur, lev, &bp); in xfs_btree_increment()
1701 error = xfs_btree_check_block(cur, block, lev, bp); in xfs_btree_increment()
1706 if (++cur->bc_levels[lev].ptr <= xfs_btree_get_numrecs(block)) in xfs_btree_increment()
1710 xfs_btree_readahead(cur, lev, XFS_BTCUR_RIGHTRA); in xfs_btree_increment()
1717 if (lev == cur->bc_nlevels) { in xfs_btree_increment()
1718 if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE) in xfs_btree_increment()
1721 xfs_btree_mark_sick(cur); in xfs_btree_increment()
1725 ASSERT(lev < cur->bc_nlevels); in xfs_btree_increment()
1731 for (block = xfs_btree_get_block(cur, lev, &bp); lev > level; ) { in xfs_btree_increment()
1734 ptrp = xfs_btree_ptr_addr(cur, cur->bc_levels[lev].ptr, block); in xfs_btree_increment()
1736 error = xfs_btree_read_buf_block(cur, ptrp, 0, &block, &bp); in xfs_btree_increment()
1740 xfs_btree_setbuf(cur, lev, bp); in xfs_btree_increment()
1741 cur->bc_levels[lev].ptr = 1; in xfs_btree_increment()
1761 struct xfs_btree_cur *cur, in xfs_btree_decrement() argument
1771 ASSERT(level < cur->bc_nlevels); in xfs_btree_decrement()
1774 xfs_btree_readahead(cur, level, XFS_BTCUR_LEFTRA); in xfs_btree_decrement()
1777 if (--cur->bc_levels[level].ptr > 0) in xfs_btree_decrement()
1781 block = xfs_btree_get_block(cur, level, &bp); in xfs_btree_decrement()
1784 error = xfs_btree_check_block(cur, block, level, bp); in xfs_btree_decrement()
1790 xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_LEFTSIB); in xfs_btree_decrement()
1791 if (xfs_btree_ptr_is_null(cur, &ptr)) in xfs_btree_decrement()
1794 XFS_BTREE_STATS_INC(cur, decrement); in xfs_btree_decrement()
1800 for (lev = level + 1; lev < cur->bc_nlevels; lev++) { in xfs_btree_decrement()
1801 if (--cur->bc_levels[lev].ptr > 0) in xfs_btree_decrement()
1804 xfs_btree_readahead(cur, lev, XFS_BTCUR_LEFTRA); in xfs_btree_decrement()
1811 if (lev == cur->bc_nlevels) { in xfs_btree_decrement()
1812 if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE) in xfs_btree_decrement()
1815 xfs_btree_mark_sick(cur); in xfs_btree_decrement()
1819 ASSERT(lev < cur->bc_nlevels); in xfs_btree_decrement()
1825 for (block = xfs_btree_get_block(cur, lev, &bp); lev > level; ) { in xfs_btree_decrement()
1828 ptrp = xfs_btree_ptr_addr(cur, cur->bc_levels[lev].ptr, block); in xfs_btree_decrement()
1830 error = xfs_btree_read_buf_block(cur, ptrp, 0, &block, &bp); in xfs_btree_decrement()
1833 xfs_btree_setbuf(cur, lev, bp); in xfs_btree_decrement()
1834 cur->bc_levels[lev].ptr = xfs_btree_get_numrecs(block); in xfs_btree_decrement()
1854 struct xfs_btree_cur *cur, in xfs_btree_check_block_owner() argument
1859 if (!xfs_has_crc(cur->bc_mp) || in xfs_btree_check_block_owner()
1860 (cur->bc_flags & XFS_BTREE_BMBT_INVALID_OWNER)) in xfs_btree_check_block_owner()
1863 owner = xfs_btree_owner(cur); in xfs_btree_check_block_owner()
1864 if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) { in xfs_btree_check_block_owner()
1877 struct xfs_btree_cur *cur, /* btree cursor */ in xfs_btree_lookup_get_block() argument
1887 if (xfs_btree_at_iroot(cur, level)) { in xfs_btree_lookup_get_block()
1888 *blkp = xfs_btree_get_iroot(cur); in xfs_btree_lookup_get_block()
1898 bp = cur->bc_levels[level].bp; in xfs_btree_lookup_get_block()
1899 error = xfs_btree_ptr_to_daddr(cur, pp, &daddr); in xfs_btree_lookup_get_block()
1907 error = xfs_btree_read_buf_block(cur, pp, 0, blkp, &bp); in xfs_btree_lookup_get_block()
1912 if (xfs_btree_check_block_owner(cur, *blkp) != NULL) in xfs_btree_lookup_get_block()
1923 xfs_btree_setbuf(cur, level, bp); in xfs_btree_lookup_get_block()
1929 xfs_trans_brelse(cur->bc_tp, bp); in xfs_btree_lookup_get_block()
1930 xfs_btree_mark_sick(cur); in xfs_btree_lookup_get_block()
1941 struct xfs_btree_cur *cur, in xfs_lookup_get_search_key() argument
1948 cur->bc_ops->init_key_from_rec(kp, in xfs_lookup_get_search_key()
1949 xfs_btree_rec_addr(cur, keyno, block)); in xfs_lookup_get_search_key()
1953 return xfs_btree_key_addr(cur, keyno, block); in xfs_lookup_get_search_key()
1961 struct xfs_btree_cur *cur, in xfs_btree_init_ptr_from_cur() argument
1964 if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE) { in xfs_btree_init_ptr_from_cur()
1970 } else if (cur->bc_flags & XFS_BTREE_STAGING) { in xfs_btree_init_ptr_from_cur()
1971 ptr->s = cpu_to_be32(cur->bc_ag.afake->af_root); in xfs_btree_init_ptr_from_cur()
1973 cur->bc_ops->init_ptr_from_cur(cur, ptr); in xfs_btree_init_ptr_from_cur()
1983 struct xfs_btree_cur *cur, /* btree cursor */ in xfs_btree_lookup() argument
1995 XFS_BTREE_STATS_INC(cur, lookup); in xfs_btree_lookup()
1998 if (XFS_IS_CORRUPT(cur->bc_mp, cur->bc_nlevels == 0)) { in xfs_btree_lookup()
1999 xfs_btree_mark_sick(cur); in xfs_btree_lookup()
2007 xfs_btree_init_ptr_from_cur(cur, &ptr); in xfs_btree_lookup()
2016 for (level = cur->bc_nlevels - 1, cmp_r = 1; level >= 0; level--) { in xfs_btree_lookup()
2018 error = xfs_btree_lookup_get_block(cur, level, pp, &block); in xfs_btree_lookup()
2039 if (level != 0 || cur->bc_nlevels != 1) { in xfs_btree_lookup()
2042 cur->bc_mp, block, in xfs_btree_lookup()
2044 xfs_btree_mark_sick(cur); in xfs_btree_lookup()
2048 cur->bc_levels[0].ptr = dir != XFS_LOOKUP_LE; in xfs_btree_lookup()
2058 XFS_BTREE_STATS_INC(cur, compare); in xfs_btree_lookup()
2064 kp = xfs_lookup_get_search_key(cur, level, in xfs_btree_lookup()
2074 cmp_r = cur->bc_ops->cmp_key_with_cur(cur, kp); in xfs_btree_lookup()
2095 pp = xfs_btree_ptr_addr(cur, keyno, block); in xfs_btree_lookup()
2097 error = xfs_btree_debug_check_ptr(cur, pp, 0, level); in xfs_btree_lookup()
2101 cur->bc_levels[level].ptr = keyno; in xfs_btree_lookup()
2112 xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB); in xfs_btree_lookup()
2115 !xfs_btree_ptr_is_null(cur, &ptr)) { in xfs_btree_lookup()
2118 cur->bc_levels[0].ptr = keyno; in xfs_btree_lookup()
2119 error = xfs_btree_increment(cur, 0, &i); in xfs_btree_lookup()
2122 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) { in xfs_btree_lookup()
2123 xfs_btree_mark_sick(cur); in xfs_btree_lookup()
2131 cur->bc_levels[0].ptr = keyno; in xfs_btree_lookup()
2149 struct xfs_btree_cur *cur, in xfs_btree_high_key_from_key() argument
2152 ASSERT(cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING); in xfs_btree_high_key_from_key()
2154 (cur->bc_ops->key_len / 2)); in xfs_btree_high_key_from_key()
2160 struct xfs_btree_cur *cur, in xfs_btree_get_leaf_keys() argument
2170 rec = xfs_btree_rec_addr(cur, 1, block); in xfs_btree_get_leaf_keys()
2171 cur->bc_ops->init_key_from_rec(key, rec); in xfs_btree_get_leaf_keys()
2173 if (cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING) { in xfs_btree_get_leaf_keys()
2175 cur->bc_ops->init_high_key_from_rec(&max_hkey, rec); in xfs_btree_get_leaf_keys()
2177 rec = xfs_btree_rec_addr(cur, n, block); in xfs_btree_get_leaf_keys()
2178 cur->bc_ops->init_high_key_from_rec(&hkey, rec); in xfs_btree_get_leaf_keys()
2179 if (xfs_btree_keycmp_gt(cur, &hkey, &max_hkey)) in xfs_btree_get_leaf_keys()
2183 high = xfs_btree_high_key_from_key(cur, key); in xfs_btree_get_leaf_keys()
2184 memcpy(high, &max_hkey, cur->bc_ops->key_len / 2); in xfs_btree_get_leaf_keys()
2191 struct xfs_btree_cur *cur, in xfs_btree_get_node_keys() argument
2200 if (cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING) { in xfs_btree_get_node_keys()
2201 memcpy(key, xfs_btree_key_addr(cur, 1, block), in xfs_btree_get_node_keys()
2202 cur->bc_ops->key_len / 2); in xfs_btree_get_node_keys()
2204 max_hkey = xfs_btree_high_key_addr(cur, 1, block); in xfs_btree_get_node_keys()
2206 hkey = xfs_btree_high_key_addr(cur, n, block); in xfs_btree_get_node_keys()
2207 if (xfs_btree_keycmp_gt(cur, hkey, max_hkey)) in xfs_btree_get_node_keys()
2211 high = xfs_btree_high_key_from_key(cur, key); in xfs_btree_get_node_keys()
2212 memcpy(high, max_hkey, cur->bc_ops->key_len / 2); in xfs_btree_get_node_keys()
2214 memcpy(key, xfs_btree_key_addr(cur, 1, block), in xfs_btree_get_node_keys()
2215 cur->bc_ops->key_len); in xfs_btree_get_node_keys()
2222 struct xfs_btree_cur *cur, in xfs_btree_get_keys() argument
2227 xfs_btree_get_leaf_keys(cur, block, key); in xfs_btree_get_keys()
2229 xfs_btree_get_node_keys(cur, block, key); in xfs_btree_get_keys()
2241 struct xfs_btree_cur *cur, in xfs_btree_needs_key_update() argument
2244 return (cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING) || ptr == 1; in xfs_btree_needs_key_update()
2254 struct xfs_btree_cur *cur, in __xfs_btree_updkeys() argument
2268 ASSERT(cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING); in __xfs_btree_updkeys()
2271 if (level + 1 >= cur->bc_nlevels) in __xfs_btree_updkeys()
2274 trace_xfs_btree_updkeys(cur, level, bp0); in __xfs_btree_updkeys()
2277 hkey = xfs_btree_high_key_from_key(cur, lkey); in __xfs_btree_updkeys()
2278 xfs_btree_get_keys(cur, block, lkey); in __xfs_btree_updkeys()
2279 for (level++; level < cur->bc_nlevels; level++) { in __xfs_btree_updkeys()
2283 block = xfs_btree_get_block(cur, level, &bp); in __xfs_btree_updkeys()
2284 trace_xfs_btree_updkeys(cur, level, bp); in __xfs_btree_updkeys()
2286 error = xfs_btree_check_block(cur, block, level, bp); in __xfs_btree_updkeys()
2290 ptr = cur->bc_levels[level].ptr; in __xfs_btree_updkeys()
2291 nlkey = xfs_btree_key_addr(cur, ptr, block); in __xfs_btree_updkeys()
2292 nhkey = xfs_btree_high_key_addr(cur, ptr, block); in __xfs_btree_updkeys()
2294 xfs_btree_keycmp_eq(cur, nlkey, lkey) && in __xfs_btree_updkeys()
2295 xfs_btree_keycmp_eq(cur, nhkey, hkey)) in __xfs_btree_updkeys()
2297 xfs_btree_copy_keys(cur, nlkey, lkey, 1); in __xfs_btree_updkeys()
2298 xfs_btree_log_keys(cur, bp, ptr, ptr); in __xfs_btree_updkeys()
2299 if (level + 1 >= cur->bc_nlevels) in __xfs_btree_updkeys()
2301 xfs_btree_get_node_keys(cur, block, lkey); in __xfs_btree_updkeys()
2310 struct xfs_btree_cur *cur, in xfs_btree_updkeys_force() argument
2316 block = xfs_btree_get_block(cur, level, &bp); in xfs_btree_updkeys_force()
2317 return __xfs_btree_updkeys(cur, level, block, bp, true); in xfs_btree_updkeys_force()
2325 struct xfs_btree_cur *cur, in xfs_btree_update_keys() argument
2336 block = xfs_btree_get_block(cur, level, &bp); in xfs_btree_update_keys()
2337 if (cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING) in xfs_btree_update_keys()
2338 return __xfs_btree_updkeys(cur, level, block, bp, false); in xfs_btree_update_keys()
2346 xfs_btree_get_keys(cur, block, &key); in xfs_btree_update_keys()
2347 for (level++, ptr = 1; ptr == 1 && level < cur->bc_nlevels; level++) { in xfs_btree_update_keys()
2351 block = xfs_btree_get_block(cur, level, &bp); in xfs_btree_update_keys()
2353 error = xfs_btree_check_block(cur, block, level, bp); in xfs_btree_update_keys()
2357 ptr = cur->bc_levels[level].ptr; in xfs_btree_update_keys()
2358 kp = xfs_btree_key_addr(cur, ptr, block); in xfs_btree_update_keys()
2359 xfs_btree_copy_keys(cur, kp, &key, 1); in xfs_btree_update_keys()
2360 xfs_btree_log_keys(cur, bp, ptr, ptr); in xfs_btree_update_keys()
2373 struct xfs_btree_cur *cur, in xfs_btree_update() argument
2383 block = xfs_btree_get_block(cur, 0, &bp); in xfs_btree_update()
2386 error = xfs_btree_check_block(cur, block, 0, bp); in xfs_btree_update()
2391 ptr = cur->bc_levels[0].ptr; in xfs_btree_update()
2392 rp = xfs_btree_rec_addr(cur, ptr, block); in xfs_btree_update()
2395 xfs_btree_copy_recs(cur, rp, rec, 1); in xfs_btree_update()
2396 xfs_btree_log_recs(cur, bp, ptr, ptr); in xfs_btree_update()
2399 if (xfs_btree_needs_key_update(cur, ptr)) { in xfs_btree_update()
2400 error = xfs_btree_update_keys(cur, 0); in xfs_btree_update()
2417 struct xfs_btree_cur *cur, in xfs_btree_lshift() argument
2435 if (xfs_btree_at_iroot(cur, level)) in xfs_btree_lshift()
2439 right = xfs_btree_get_block(cur, level, &rbp); in xfs_btree_lshift()
2442 error = xfs_btree_check_block(cur, right, level, rbp); in xfs_btree_lshift()
2448 xfs_btree_get_sibling(cur, right, &lptr, XFS_BB_LEFTSIB); in xfs_btree_lshift()
2449 if (xfs_btree_ptr_is_null(cur, &lptr)) in xfs_btree_lshift()
2456 if (cur->bc_levels[level].ptr <= 1) in xfs_btree_lshift()
2460 error = xfs_btree_read_buf_block(cur, &lptr, 0, &left, &lbp); in xfs_btree_lshift()
2466 if (lrecs == cur->bc_ops->get_maxrecs(cur, level)) in xfs_btree_lshift()
2479 XFS_BTREE_STATS_INC(cur, lshift); in xfs_btree_lshift()
2480 XFS_BTREE_STATS_ADD(cur, moves, 1); in xfs_btree_lshift()
2491 lkp = xfs_btree_key_addr(cur, lrecs, left); in xfs_btree_lshift()
2492 rkp = xfs_btree_key_addr(cur, 1, right); in xfs_btree_lshift()
2494 lpp = xfs_btree_ptr_addr(cur, lrecs, left); in xfs_btree_lshift()
2495 rpp = xfs_btree_ptr_addr(cur, 1, right); in xfs_btree_lshift()
2497 error = xfs_btree_debug_check_ptr(cur, rpp, 0, level); in xfs_btree_lshift()
2501 xfs_btree_copy_keys(cur, lkp, rkp, 1); in xfs_btree_lshift()
2502 xfs_btree_copy_ptrs(cur, lpp, rpp, 1); in xfs_btree_lshift()
2504 xfs_btree_log_keys(cur, lbp, lrecs, lrecs); in xfs_btree_lshift()
2505 xfs_btree_log_ptrs(cur, lbp, lrecs, lrecs); in xfs_btree_lshift()
2507 ASSERT(cur->bc_ops->keys_inorder(cur, in xfs_btree_lshift()
2508 xfs_btree_key_addr(cur, lrecs - 1, left), lkp)); in xfs_btree_lshift()
2513 lrp = xfs_btree_rec_addr(cur, lrecs, left); in xfs_btree_lshift()
2514 rrp = xfs_btree_rec_addr(cur, 1, right); in xfs_btree_lshift()
2516 xfs_btree_copy_recs(cur, lrp, rrp, 1); in xfs_btree_lshift()
2517 xfs_btree_log_recs(cur, lbp, lrecs, lrecs); in xfs_btree_lshift()
2519 ASSERT(cur->bc_ops->recs_inorder(cur, in xfs_btree_lshift()
2520 xfs_btree_rec_addr(cur, lrecs - 1, left), lrp)); in xfs_btree_lshift()
2524 xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS); in xfs_btree_lshift()
2527 xfs_btree_log_block(cur, rbp, XFS_BB_NUMRECS); in xfs_btree_lshift()
2532 XFS_BTREE_STATS_ADD(cur, moves, rrecs - 1); in xfs_btree_lshift()
2536 error = xfs_btree_debug_check_ptr(cur, rpp, i + 1, level); in xfs_btree_lshift()
2541 xfs_btree_shift_keys(cur, in xfs_btree_lshift()
2542 xfs_btree_key_addr(cur, 2, right), in xfs_btree_lshift()
2544 xfs_btree_shift_ptrs(cur, in xfs_btree_lshift()
2545 xfs_btree_ptr_addr(cur, 2, right), in xfs_btree_lshift()
2548 xfs_btree_log_keys(cur, rbp, 1, rrecs); in xfs_btree_lshift()
2549 xfs_btree_log_ptrs(cur, rbp, 1, rrecs); in xfs_btree_lshift()
2552 xfs_btree_shift_recs(cur, in xfs_btree_lshift()
2553 xfs_btree_rec_addr(cur, 2, right), in xfs_btree_lshift()
2555 xfs_btree_log_recs(cur, rbp, 1, rrecs); in xfs_btree_lshift()
2562 if (cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING) { in xfs_btree_lshift()
2563 error = xfs_btree_dup_cursor(cur, &tcur); in xfs_btree_lshift()
2568 xfs_btree_mark_sick(cur); in xfs_btree_lshift()
2586 error = xfs_btree_update_keys(cur, level); in xfs_btree_lshift()
2591 cur->bc_levels[level].ptr--; in xfs_btree_lshift()
2614 struct xfs_btree_cur *cur, in xfs_btree_rshift() argument
2630 if (xfs_btree_at_iroot(cur, level)) in xfs_btree_rshift()
2634 left = xfs_btree_get_block(cur, level, &lbp); in xfs_btree_rshift()
2637 error = xfs_btree_check_block(cur, left, level, lbp); in xfs_btree_rshift()
2643 xfs_btree_get_sibling(cur, left, &rptr, XFS_BB_RIGHTSIB); in xfs_btree_rshift()
2644 if (xfs_btree_ptr_is_null(cur, &rptr)) in xfs_btree_rshift()
2652 if (cur->bc_levels[level].ptr >= lrecs) in xfs_btree_rshift()
2656 error = xfs_btree_read_buf_block(cur, &rptr, 0, &right, &rbp); in xfs_btree_rshift()
2662 if (rrecs == cur->bc_ops->get_maxrecs(cur, level)) in xfs_btree_rshift()
2665 XFS_BTREE_STATS_INC(cur, rshift); in xfs_btree_rshift()
2666 XFS_BTREE_STATS_ADD(cur, moves, rrecs); in xfs_btree_rshift()
2678 lkp = xfs_btree_key_addr(cur, lrecs, left); in xfs_btree_rshift()
2679 lpp = xfs_btree_ptr_addr(cur, lrecs, left); in xfs_btree_rshift()
2680 rkp = xfs_btree_key_addr(cur, 1, right); in xfs_btree_rshift()
2681 rpp = xfs_btree_ptr_addr(cur, 1, right); in xfs_btree_rshift()
2684 error = xfs_btree_debug_check_ptr(cur, rpp, i, level); in xfs_btree_rshift()
2689 xfs_btree_shift_keys(cur, rkp, 1, rrecs); in xfs_btree_rshift()
2690 xfs_btree_shift_ptrs(cur, rpp, 1, rrecs); in xfs_btree_rshift()
2692 error = xfs_btree_debug_check_ptr(cur, lpp, 0, level); in xfs_btree_rshift()
2697 xfs_btree_copy_keys(cur, rkp, lkp, 1); in xfs_btree_rshift()
2698 xfs_btree_copy_ptrs(cur, rpp, lpp, 1); in xfs_btree_rshift()
2700 xfs_btree_log_keys(cur, rbp, 1, rrecs + 1); in xfs_btree_rshift()
2701 xfs_btree_log_ptrs(cur, rbp, 1, rrecs + 1); in xfs_btree_rshift()
2703 ASSERT(cur->bc_ops->keys_inorder(cur, rkp, in xfs_btree_rshift()
2704 xfs_btree_key_addr(cur, 2, right))); in xfs_btree_rshift()
2710 lrp = xfs_btree_rec_addr(cur, lrecs, left); in xfs_btree_rshift()
2711 rrp = xfs_btree_rec_addr(cur, 1, right); in xfs_btree_rshift()
2713 xfs_btree_shift_recs(cur, rrp, 1, rrecs); in xfs_btree_rshift()
2716 xfs_btree_copy_recs(cur, rrp, lrp, 1); in xfs_btree_rshift()
2717 xfs_btree_log_recs(cur, rbp, 1, rrecs + 1); in xfs_btree_rshift()
2724 xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS); in xfs_btree_rshift()
2727 xfs_btree_log_block(cur, rbp, XFS_BB_NUMRECS); in xfs_btree_rshift()
2733 error = xfs_btree_dup_cursor(cur, &tcur); in xfs_btree_rshift()
2738 xfs_btree_mark_sick(cur); in xfs_btree_rshift()
2748 if (cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING) { in xfs_btree_rshift()
2749 error = xfs_btree_update_keys(cur, level); in xfs_btree_rshift()
2778 struct xfs_btree_cur *cur, in xfs_btree_alloc_block() argument
2792 if (unlikely(cur->bc_flags & XFS_BTREE_STAGING)) { in xfs_btree_alloc_block()
2797 error = cur->bc_ops->alloc_block(cur, hint_block, new_block, stat); in xfs_btree_alloc_block()
2798 trace_xfs_btree_alloc_block(cur, new_block, *stat, error); in xfs_btree_alloc_block()
2809 struct xfs_btree_cur *cur, in __xfs_btree_split() argument
2831 XFS_BTREE_STATS_INC(cur, split); in __xfs_btree_split()
2834 left = xfs_btree_get_block(cur, level, &lbp); in __xfs_btree_split()
2837 error = xfs_btree_check_block(cur, left, level, lbp); in __xfs_btree_split()
2842 xfs_btree_buf_to_ptr(cur, lbp, &lptr); in __xfs_btree_split()
2845 error = xfs_btree_alloc_block(cur, &lptr, &rptr, stat); in __xfs_btree_split()
2850 XFS_BTREE_STATS_INC(cur, alloc); in __xfs_btree_split()
2853 error = xfs_btree_get_buf_block(cur, &rptr, &right, &rbp); in __xfs_btree_split()
2858 xfs_btree_init_block_cur(cur, rbp, xfs_btree_get_level(left), 0); in __xfs_btree_split()
2867 if ((lrecs & 1) && cur->bc_levels[level].ptr <= rrecs + 1) in __xfs_btree_split()
2871 XFS_BTREE_STATS_ADD(cur, moves, rrecs); in __xfs_btree_split()
2890 lkp = xfs_btree_key_addr(cur, src_index, left); in __xfs_btree_split()
2891 lpp = xfs_btree_ptr_addr(cur, src_index, left); in __xfs_btree_split()
2892 rkp = xfs_btree_key_addr(cur, 1, right); in __xfs_btree_split()
2893 rpp = xfs_btree_ptr_addr(cur, 1, right); in __xfs_btree_split()
2896 error = xfs_btree_debug_check_ptr(cur, lpp, i, level); in __xfs_btree_split()
2902 xfs_btree_copy_keys(cur, rkp, lkp, rrecs); in __xfs_btree_split()
2903 xfs_btree_copy_ptrs(cur, rpp, lpp, rrecs); in __xfs_btree_split()
2905 xfs_btree_log_keys(cur, rbp, 1, rrecs); in __xfs_btree_split()
2906 xfs_btree_log_ptrs(cur, rbp, 1, rrecs); in __xfs_btree_split()
2909 xfs_btree_get_node_keys(cur, right, key); in __xfs_btree_split()
2915 lrp = xfs_btree_rec_addr(cur, src_index, left); in __xfs_btree_split()
2916 rrp = xfs_btree_rec_addr(cur, 1, right); in __xfs_btree_split()
2919 xfs_btree_copy_recs(cur, rrp, lrp, rrecs); in __xfs_btree_split()
2920 xfs_btree_log_recs(cur, rbp, 1, rrecs); in __xfs_btree_split()
2923 xfs_btree_get_leaf_keys(cur, right, key); in __xfs_btree_split()
2930 xfs_btree_get_sibling(cur, left, &rrptr, XFS_BB_RIGHTSIB); in __xfs_btree_split()
2931 xfs_btree_set_sibling(cur, right, &rrptr, XFS_BB_RIGHTSIB); in __xfs_btree_split()
2932 xfs_btree_set_sibling(cur, right, &lptr, XFS_BB_LEFTSIB); in __xfs_btree_split()
2933 xfs_btree_set_sibling(cur, left, &rptr, XFS_BB_RIGHTSIB); in __xfs_btree_split()
2935 xfs_btree_log_block(cur, rbp, XFS_BB_ALL_BITS); in __xfs_btree_split()
2936 xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB); in __xfs_btree_split()
2942 if (!xfs_btree_ptr_is_null(cur, &rrptr)) { in __xfs_btree_split()
2943 error = xfs_btree_read_buf_block(cur, &rrptr, in __xfs_btree_split()
2947 xfs_btree_set_sibling(cur, rrblock, &rptr, XFS_BB_LEFTSIB); in __xfs_btree_split()
2948 xfs_btree_log_block(cur, rrbp, XFS_BB_LEFTSIB); in __xfs_btree_split()
2952 if (cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING) { in __xfs_btree_split()
2953 error = xfs_btree_update_keys(cur, level); in __xfs_btree_split()
2963 if (cur->bc_levels[level].ptr > lrecs + 1) { in __xfs_btree_split()
2964 xfs_btree_setbuf(cur, level, rbp); in __xfs_btree_split()
2965 cur->bc_levels[level].ptr -= lrecs; in __xfs_btree_split()
2971 if (level + 1 < cur->bc_nlevels) { in __xfs_btree_split()
2972 error = xfs_btree_dup_cursor(cur, curp); in __xfs_btree_split()
2990 struct xfs_btree_cur *cur; member
3024 xfs_trans_set_context(args->cur->bc_tp); in xfs_btree_split_worker()
3026 args->result = __xfs_btree_split(args->cur, args->level, args->ptrp, in xfs_btree_split_worker()
3029 xfs_trans_clear_context(args->cur->bc_tp); in xfs_btree_split_worker()
3060 struct xfs_btree_cur *cur, in xfs_btree_split() argument
3070 if (!xfs_btree_is_bmap(cur->bc_ops) || in xfs_btree_split()
3071 cur->bc_tp->t_highest_agno == NULLAGNUMBER) in xfs_btree_split()
3072 return __xfs_btree_split(cur, level, ptrp, key, curp, stat); in xfs_btree_split()
3074 args.cur = cur; in xfs_btree_split()
3095 struct xfs_btree_cur *cur, in xfs_btree_promote_leaf_iroot() argument
3109 rp = xfs_btree_rec_addr(cur, 1, block); in xfs_btree_promote_leaf_iroot()
3110 crp = xfs_btree_rec_addr(cur, 1, cblock); in xfs_btree_promote_leaf_iroot()
3111 xfs_btree_copy_recs(cur, crp, rp, numrecs); in xfs_btree_promote_leaf_iroot()
3122 cur->bc_ops->broot_realloc(cur, 0); in xfs_btree_promote_leaf_iroot()
3123 cur->bc_nlevels++; in xfs_btree_promote_leaf_iroot()
3124 cur->bc_levels[1].ptr = 1; in xfs_btree_promote_leaf_iroot()
3130 broot = cur->bc_ops->broot_realloc(cur, 1); in xfs_btree_promote_leaf_iroot()
3131 xfs_btree_init_block(cur->bc_mp, broot, cur->bc_ops, in xfs_btree_promote_leaf_iroot()
3132 cur->bc_nlevels - 1, 1, cur->bc_ino.ip->i_ino); in xfs_btree_promote_leaf_iroot()
3134 pp = xfs_btree_ptr_addr(cur, 1, broot); in xfs_btree_promote_leaf_iroot()
3135 kp = xfs_btree_key_addr(cur, 1, broot); in xfs_btree_promote_leaf_iroot()
3136 xfs_btree_copy_ptrs(cur, pp, cptr, 1); in xfs_btree_promote_leaf_iroot()
3137 xfs_btree_get_keys(cur, cblock, kp); in xfs_btree_promote_leaf_iroot()
3140 xfs_btree_setbuf(cur, 0, cbp); in xfs_btree_promote_leaf_iroot()
3141 xfs_btree_log_block(cur, cbp, XFS_BB_ALL_BITS); in xfs_btree_promote_leaf_iroot()
3142 xfs_btree_log_recs(cur, cbp, 1, numrecs); in xfs_btree_promote_leaf_iroot()
3154 struct xfs_btree_cur *cur, in xfs_btree_promote_node_iroot() argument
3175 cur->bc_nlevels++; in xfs_btree_promote_node_iroot()
3176 cur->bc_levels[level + 1].ptr = 1; in xfs_btree_promote_node_iroot()
3183 kp = xfs_btree_key_addr(cur, 1, block); in xfs_btree_promote_node_iroot()
3184 ckp = xfs_btree_key_addr(cur, 1, cblock); in xfs_btree_promote_node_iroot()
3185 xfs_btree_copy_keys(cur, ckp, kp, numrecs); in xfs_btree_promote_node_iroot()
3188 pp = xfs_btree_ptr_addr(cur, 1, block); in xfs_btree_promote_node_iroot()
3189 cpp = xfs_btree_ptr_addr(cur, 1, cblock); in xfs_btree_promote_node_iroot()
3191 error = xfs_btree_debug_check_ptr(cur, pp, i, level); in xfs_btree_promote_node_iroot()
3195 xfs_btree_copy_ptrs(cur, cpp, pp, numrecs); in xfs_btree_promote_node_iroot()
3201 error = xfs_btree_debug_check_ptr(cur, cptr, 0, level); in xfs_btree_promote_node_iroot()
3204 xfs_btree_copy_ptrs(cur, pp, cptr, 1); in xfs_btree_promote_node_iroot()
3205 xfs_btree_get_keys(cur, cblock, kp); in xfs_btree_promote_node_iroot()
3207 cur->bc_ops->broot_realloc(cur, 1); in xfs_btree_promote_node_iroot()
3210 xfs_btree_setbuf(cur, level, cbp); in xfs_btree_promote_node_iroot()
3211 xfs_btree_log_block(cur, cbp, XFS_BB_ALL_BITS); in xfs_btree_promote_node_iroot()
3212 xfs_btree_log_keys(cur, cbp, 1, numrecs); in xfs_btree_promote_node_iroot()
3213 xfs_btree_log_ptrs(cur, cbp, 1, numrecs); in xfs_btree_promote_node_iroot()
3223 struct xfs_btree_cur *cur, /* btree cursor */ in xfs_btree_new_iroot() argument
3235 XFS_BTREE_STATS_INC(cur, newroot); in xfs_btree_new_iroot()
3237 ASSERT(cur->bc_ops->type == XFS_BTREE_TYPE_INODE); in xfs_btree_new_iroot()
3239 level = cur->bc_nlevels - 1; in xfs_btree_new_iroot()
3241 block = xfs_btree_get_iroot(cur); in xfs_btree_new_iroot()
3242 ASSERT(level > 0 || (cur->bc_ops->geom_flags & XFS_BTGEO_IROOT_RECORDS)); in xfs_btree_new_iroot()
3244 aptr = *xfs_btree_ptr_addr(cur, 1, block); in xfs_btree_new_iroot()
3246 aptr.l = cpu_to_be64(XFS_INO_TO_FSB(cur->bc_mp, in xfs_btree_new_iroot()
3247 cur->bc_ino.ip->i_ino)); in xfs_btree_new_iroot()
3250 error = xfs_btree_alloc_block(cur, &aptr, &nptr, stat); in xfs_btree_new_iroot()
3256 XFS_BTREE_STATS_INC(cur, alloc); in xfs_btree_new_iroot()
3259 error = xfs_btree_get_buf_block(cur, &nptr, &cblock, &cbp); in xfs_btree_new_iroot()
3267 memcpy(cblock, block, xfs_btree_block_len(cur)); in xfs_btree_new_iroot()
3268 if (xfs_has_crc(cur->bc_mp)) { in xfs_btree_new_iroot()
3270 if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) in xfs_btree_new_iroot()
3277 error = xfs_btree_promote_node_iroot(cur, block, level, cbp, in xfs_btree_new_iroot()
3282 xfs_btree_promote_leaf_iroot(cur, block, cbp, &nptr, cblock); in xfs_btree_new_iroot()
3285 *logflags |= XFS_ILOG_CORE | xfs_ilog_fbroot(cur->bc_ino.whichfork); in xfs_btree_new_iroot()
3294 struct xfs_btree_cur *cur, in xfs_btree_set_root() argument
3298 if (cur->bc_flags & XFS_BTREE_STAGING) { in xfs_btree_set_root()
3300 cur->bc_ag.afake->af_root = be32_to_cpu(ptr->s); in xfs_btree_set_root()
3301 cur->bc_ag.afake->af_levels += inc; in xfs_btree_set_root()
3303 cur->bc_ops->set_root(cur, ptr, inc); in xfs_btree_set_root()
3312 struct xfs_btree_cur *cur, /* btree cursor */ in xfs_btree_new_root() argument
3328 XFS_BTREE_STATS_INC(cur, newroot); in xfs_btree_new_root()
3331 xfs_btree_init_ptr_from_cur(cur, &rptr); in xfs_btree_new_root()
3334 error = xfs_btree_alloc_block(cur, &rptr, &lptr, stat); in xfs_btree_new_root()
3339 XFS_BTREE_STATS_INC(cur, alloc); in xfs_btree_new_root()
3342 error = xfs_btree_get_buf_block(cur, &lptr, &new, &nbp); in xfs_btree_new_root()
3347 xfs_btree_set_root(cur, &lptr, 1); in xfs_btree_new_root()
3355 block = xfs_btree_get_block(cur, cur->bc_nlevels - 1, &bp); in xfs_btree_new_root()
3358 error = xfs_btree_check_block(cur, block, cur->bc_nlevels - 1, bp); in xfs_btree_new_root()
3363 xfs_btree_get_sibling(cur, block, &rptr, XFS_BB_RIGHTSIB); in xfs_btree_new_root()
3364 if (!xfs_btree_ptr_is_null(cur, &rptr)) { in xfs_btree_new_root()
3367 xfs_btree_buf_to_ptr(cur, lbp, &lptr); in xfs_btree_new_root()
3369 error = xfs_btree_read_buf_block(cur, &rptr, 0, &right, &rbp); in xfs_btree_new_root()
3377 xfs_btree_buf_to_ptr(cur, rbp, &rptr); in xfs_btree_new_root()
3379 xfs_btree_get_sibling(cur, right, &lptr, XFS_BB_LEFTSIB); in xfs_btree_new_root()
3380 error = xfs_btree_read_buf_block(cur, &lptr, 0, &left, &lbp); in xfs_btree_new_root()
3388 xfs_btree_init_block_cur(cur, nbp, cur->bc_nlevels, 2); in xfs_btree_new_root()
3389 xfs_btree_log_block(cur, nbp, XFS_BB_ALL_BITS); in xfs_btree_new_root()
3390 ASSERT(!xfs_btree_ptr_is_null(cur, &lptr) && in xfs_btree_new_root()
3391 !xfs_btree_ptr_is_null(cur, &rptr)); in xfs_btree_new_root()
3399 xfs_btree_get_node_keys(cur, left, in xfs_btree_new_root()
3400 xfs_btree_key_addr(cur, 1, new)); in xfs_btree_new_root()
3401 xfs_btree_get_node_keys(cur, right, in xfs_btree_new_root()
3402 xfs_btree_key_addr(cur, 2, new)); in xfs_btree_new_root()
3409 xfs_btree_get_leaf_keys(cur, left, in xfs_btree_new_root()
3410 xfs_btree_key_addr(cur, 1, new)); in xfs_btree_new_root()
3411 xfs_btree_get_leaf_keys(cur, right, in xfs_btree_new_root()
3412 xfs_btree_key_addr(cur, 2, new)); in xfs_btree_new_root()
3414 xfs_btree_log_keys(cur, nbp, 1, 2); in xfs_btree_new_root()
3417 xfs_btree_copy_ptrs(cur, in xfs_btree_new_root()
3418 xfs_btree_ptr_addr(cur, 1, new), &lptr, 1); in xfs_btree_new_root()
3419 xfs_btree_copy_ptrs(cur, in xfs_btree_new_root()
3420 xfs_btree_ptr_addr(cur, 2, new), &rptr, 1); in xfs_btree_new_root()
3421 xfs_btree_log_ptrs(cur, nbp, 1, 2); in xfs_btree_new_root()
3424 xfs_btree_setbuf(cur, cur->bc_nlevels, nbp); in xfs_btree_new_root()
3425 cur->bc_levels[cur->bc_nlevels].ptr = nptr; in xfs_btree_new_root()
3426 cur->bc_nlevels++; in xfs_btree_new_root()
3427 ASSERT(cur->bc_nlevels <= cur->bc_maxlevels); in xfs_btree_new_root()
3439 struct xfs_btree_cur *cur, /* btree cursor */ in xfs_btree_make_block_unfull() argument
3451 if (xfs_btree_at_iroot(cur, level)) { in xfs_btree_make_block_unfull()
3452 struct xfs_inode *ip = cur->bc_ino.ip; in xfs_btree_make_block_unfull()
3454 if (numrecs < cur->bc_ops->get_dmaxrecs(cur, level)) { in xfs_btree_make_block_unfull()
3456 cur->bc_ops->broot_realloc(cur, numrecs + 1); in xfs_btree_make_block_unfull()
3462 error = xfs_btree_new_iroot(cur, &logflags, stat); in xfs_btree_make_block_unfull()
3466 xfs_trans_log_inode(cur->bc_tp, ip, logflags); in xfs_btree_make_block_unfull()
3473 error = xfs_btree_rshift(cur, level, stat); in xfs_btree_make_block_unfull()
3478 error = xfs_btree_lshift(cur, level, stat); in xfs_btree_make_block_unfull()
3483 *oindex = *index = cur->bc_levels[level].ptr; in xfs_btree_make_block_unfull()
3493 error = xfs_btree_split(cur, level, nptr, key, ncur, stat); in xfs_btree_make_block_unfull()
3498 *index = cur->bc_levels[level].ptr; in xfs_btree_make_block_unfull()
3508 struct xfs_btree_cur *cur, /* btree cursor */ in xfs_btree_insrec() argument
3536 if (cur->bc_ops->type != XFS_BTREE_TYPE_INODE && in xfs_btree_insrec()
3537 level >= cur->bc_nlevels) { in xfs_btree_insrec()
3538 error = xfs_btree_new_root(cur, stat); in xfs_btree_insrec()
3539 xfs_btree_set_ptr_null(cur, ptrp); in xfs_btree_insrec()
3545 ptr = cur->bc_levels[level].ptr; in xfs_btree_insrec()
3553 XFS_BTREE_STATS_INC(cur, insrec); in xfs_btree_insrec()
3556 block = xfs_btree_get_block(cur, level, &bp); in xfs_btree_insrec()
3561 error = xfs_btree_check_block(cur, block, level, bp); in xfs_btree_insrec()
3568 ASSERT(cur->bc_ops->recs_inorder(cur, rec, in xfs_btree_insrec()
3569 xfs_btree_rec_addr(cur, ptr, block))); in xfs_btree_insrec()
3571 ASSERT(cur->bc_ops->keys_inorder(cur, key, in xfs_btree_insrec()
3572 xfs_btree_key_addr(cur, ptr, block))); in xfs_btree_insrec()
3581 xfs_btree_set_ptr_null(cur, &nptr); in xfs_btree_insrec()
3582 if (numrecs == cur->bc_ops->get_maxrecs(cur, level)) { in xfs_btree_insrec()
3583 error = xfs_btree_make_block_unfull(cur, level, numrecs, in xfs_btree_insrec()
3593 block = xfs_btree_get_block(cur, level, &bp); in xfs_btree_insrec()
3597 error = xfs_btree_check_block(cur, block, level, bp); in xfs_btree_insrec()
3606 XFS_BTREE_STATS_ADD(cur, moves, numrecs - ptr + 1); in xfs_btree_insrec()
3613 kp = xfs_btree_key_addr(cur, ptr, block); in xfs_btree_insrec()
3614 pp = xfs_btree_ptr_addr(cur, ptr, block); in xfs_btree_insrec()
3617 error = xfs_btree_debug_check_ptr(cur, pp, i, level); in xfs_btree_insrec()
3622 xfs_btree_shift_keys(cur, kp, 1, numrecs - ptr + 1); in xfs_btree_insrec()
3623 xfs_btree_shift_ptrs(cur, pp, 1, numrecs - ptr + 1); in xfs_btree_insrec()
3625 error = xfs_btree_debug_check_ptr(cur, ptrp, 0, level); in xfs_btree_insrec()
3630 xfs_btree_copy_keys(cur, kp, key, 1); in xfs_btree_insrec()
3631 xfs_btree_copy_ptrs(cur, pp, ptrp, 1); in xfs_btree_insrec()
3634 xfs_btree_log_ptrs(cur, bp, ptr, numrecs); in xfs_btree_insrec()
3635 xfs_btree_log_keys(cur, bp, ptr, numrecs); in xfs_btree_insrec()
3638 ASSERT(cur->bc_ops->keys_inorder(cur, kp, in xfs_btree_insrec()
3639 xfs_btree_key_addr(cur, ptr + 1, block))); in xfs_btree_insrec()
3646 rp = xfs_btree_rec_addr(cur, ptr, block); in xfs_btree_insrec()
3648 xfs_btree_shift_recs(cur, rp, 1, numrecs - ptr + 1); in xfs_btree_insrec()
3651 xfs_btree_copy_recs(cur, rp, rec, 1); in xfs_btree_insrec()
3653 xfs_btree_log_recs(cur, bp, ptr, numrecs); in xfs_btree_insrec()
3656 ASSERT(cur->bc_ops->recs_inorder(cur, rp, in xfs_btree_insrec()
3657 xfs_btree_rec_addr(cur, ptr + 1, block))); in xfs_btree_insrec()
3663 xfs_btree_log_block(cur, bp, XFS_BB_NUMRECS); in xfs_btree_insrec()
3689 if (!xfs_btree_ptr_is_null(cur, &nptr) && in xfs_btree_insrec()
3691 xfs_btree_get_keys(cur, block, lkey); in xfs_btree_insrec()
3692 } else if (xfs_btree_needs_key_update(cur, optr)) { in xfs_btree_insrec()
3693 error = xfs_btree_update_keys(cur, level); in xfs_btree_insrec()
3703 if (!xfs_btree_ptr_is_null(cur, &nptr)) { in xfs_btree_insrec()
3704 xfs_btree_copy_keys(cur, key, lkey, 1); in xfs_btree_insrec()
3726 struct xfs_btree_cur *cur, in xfs_btree_insert() argument
3741 pcur = cur; in xfs_btree_insert()
3744 xfs_btree_set_ptr_null(cur, &nptr); in xfs_btree_insert()
3747 cur->bc_ops->init_rec_from_cur(cur, &rec); in xfs_btree_insert()
3748 cur->bc_ops->init_key_from_rec(key, &rec); in xfs_btree_insert()
3763 if (pcur != cur) in xfs_btree_insert()
3768 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) { in xfs_btree_insert()
3769 xfs_btree_mark_sick(cur); in xfs_btree_insert()
3780 if (pcur != cur && in xfs_btree_insert()
3781 (ncur || xfs_btree_ptr_is_null(cur, &nptr))) { in xfs_btree_insert()
3783 if (cur->bc_ops->update_cursor && in xfs_btree_insert()
3784 !(cur->bc_flags & XFS_BTREE_STAGING)) in xfs_btree_insert()
3785 cur->bc_ops->update_cursor(pcur, cur); in xfs_btree_insert()
3786 cur->bc_nlevels = pcur->bc_nlevels; in xfs_btree_insert()
3794 } while (!xfs_btree_ptr_is_null(cur, &nptr)); in xfs_btree_insert()
3805 struct xfs_btree_cur *cur, in xfs_btree_demote_leaf_child() argument
3821 cur->bc_ops->broot_realloc(cur, 0); in xfs_btree_demote_leaf_child()
3822 cur->bc_nlevels--; in xfs_btree_demote_leaf_child()
3828 broot = cur->bc_ops->broot_realloc(cur, numrecs); in xfs_btree_demote_leaf_child()
3829 xfs_btree_init_block(cur->bc_mp, broot, cur->bc_ops, 0, numrecs, in xfs_btree_demote_leaf_child()
3830 cur->bc_ino.ip->i_ino); in xfs_btree_demote_leaf_child()
3832 rp = xfs_btree_rec_addr(cur, 1, broot); in xfs_btree_demote_leaf_child()
3833 crp = xfs_btree_rec_addr(cur, 1, cblock); in xfs_btree_demote_leaf_child()
3834 xfs_btree_copy_recs(cur, rp, crp, numrecs); in xfs_btree_demote_leaf_child()
3836 cur->bc_levels[0].bp = NULL; in xfs_btree_demote_leaf_child()
3848 struct xfs_btree_cur *cur, in xfs_btree_demote_node_child() argument
3866 block = cur->bc_ops->broot_realloc(cur, numrecs); in xfs_btree_demote_node_child()
3872 kp = xfs_btree_key_addr(cur, 1, block); in xfs_btree_demote_node_child()
3873 ckp = xfs_btree_key_addr(cur, 1, cblock); in xfs_btree_demote_node_child()
3874 xfs_btree_copy_keys(cur, kp, ckp, numrecs); in xfs_btree_demote_node_child()
3877 pp = xfs_btree_ptr_addr(cur, 1, block); in xfs_btree_demote_node_child()
3878 cpp = xfs_btree_ptr_addr(cur, 1, cblock); in xfs_btree_demote_node_child()
3880 error = xfs_btree_debug_check_ptr(cur, cpp, i, level - 1); in xfs_btree_demote_node_child()
3884 xfs_btree_copy_ptrs(cur, pp, cpp, numrecs); in xfs_btree_demote_node_child()
3887 cur->bc_levels[level - 1].bp = NULL; in xfs_btree_demote_node_child()
3889 cur->bc_nlevels--; in xfs_btree_demote_node_child()
3903 struct xfs_btree_cur *cur) in xfs_btree_kill_iroot() argument
3905 struct xfs_inode *ip = cur->bc_ino.ip; in xfs_btree_kill_iroot()
3916 ASSERT(cur->bc_ops->type == XFS_BTREE_TYPE_INODE); in xfs_btree_kill_iroot()
3917 ASSERT((cur->bc_ops->geom_flags & XFS_BTGEO_IROOT_RECORDS) || in xfs_btree_kill_iroot()
3918 cur->bc_nlevels > 1); in xfs_btree_kill_iroot()
3924 level = cur->bc_nlevels - 1; in xfs_btree_kill_iroot()
3925 if (level == 1 && !(cur->bc_ops->geom_flags & XFS_BTGEO_IROOT_RECORDS)) in xfs_btree_kill_iroot()
3935 block = xfs_btree_get_iroot(cur); in xfs_btree_kill_iroot()
3939 cblock = xfs_btree_get_block(cur, level - 1, &cbp); in xfs_btree_kill_iroot()
3947 if (numrecs > cur->bc_ops->get_dmaxrecs(cur, level)) in xfs_btree_kill_iroot()
3950 XFS_BTREE_STATS_INC(cur, killroot); in xfs_btree_kill_iroot()
3953 xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_LEFTSIB); in xfs_btree_kill_iroot()
3954 ASSERT(xfs_btree_ptr_is_null(cur, &ptr)); in xfs_btree_kill_iroot()
3955 xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB); in xfs_btree_kill_iroot()
3956 ASSERT(xfs_btree_ptr_is_null(cur, &ptr)); in xfs_btree_kill_iroot()
3960 error = xfs_btree_demote_node_child(cur, cblock, level, in xfs_btree_kill_iroot()
3965 xfs_btree_demote_leaf_child(cur, cblock, numrecs); in xfs_btree_kill_iroot()
3967 error = xfs_btree_free_block(cur, cbp); in xfs_btree_kill_iroot()
3971 xfs_trans_log_inode(cur->bc_tp, ip, in xfs_btree_kill_iroot()
3972 XFS_ILOG_CORE | xfs_ilog_fbroot(cur->bc_ino.whichfork)); in xfs_btree_kill_iroot()
3982 struct xfs_btree_cur *cur, in xfs_btree_kill_root() argument
3989 XFS_BTREE_STATS_INC(cur, killroot); in xfs_btree_kill_root()
3995 xfs_btree_set_root(cur, newroot, -1); in xfs_btree_kill_root()
3997 error = xfs_btree_free_block(cur, bp); in xfs_btree_kill_root()
4001 cur->bc_levels[level].bp = NULL; in xfs_btree_kill_root()
4002 cur->bc_levels[level].ra = 0; in xfs_btree_kill_root()
4003 cur->bc_nlevels--; in xfs_btree_kill_root()
4010 struct xfs_btree_cur *cur, in xfs_btree_dec_cursor() argument
4018 error = xfs_btree_decrement(cur, level, &i); in xfs_btree_dec_cursor()
4035 struct xfs_btree_cur *cur, /* btree cursor */ in xfs_btree_delrec() argument
4061 ptr = cur->bc_levels[level].ptr; in xfs_btree_delrec()
4068 block = xfs_btree_get_block(cur, level, &bp); in xfs_btree_delrec()
4072 error = xfs_btree_check_block(cur, block, level, bp); in xfs_btree_delrec()
4083 XFS_BTREE_STATS_INC(cur, delrec); in xfs_btree_delrec()
4084 XFS_BTREE_STATS_ADD(cur, moves, numrecs - ptr); in xfs_btree_delrec()
4092 lkp = xfs_btree_key_addr(cur, ptr + 1, block); in xfs_btree_delrec()
4093 lpp = xfs_btree_ptr_addr(cur, ptr + 1, block); in xfs_btree_delrec()
4096 error = xfs_btree_debug_check_ptr(cur, lpp, i, level); in xfs_btree_delrec()
4102 xfs_btree_shift_keys(cur, lkp, -1, numrecs - ptr); in xfs_btree_delrec()
4103 xfs_btree_shift_ptrs(cur, lpp, -1, numrecs - ptr); in xfs_btree_delrec()
4104 xfs_btree_log_keys(cur, bp, ptr, numrecs - 1); in xfs_btree_delrec()
4105 xfs_btree_log_ptrs(cur, bp, ptr, numrecs - 1); in xfs_btree_delrec()
4110 xfs_btree_shift_recs(cur, in xfs_btree_delrec()
4111 xfs_btree_rec_addr(cur, ptr + 1, block), in xfs_btree_delrec()
4113 xfs_btree_log_recs(cur, bp, ptr, numrecs - 1); in xfs_btree_delrec()
4121 xfs_btree_log_block(cur, bp, XFS_BB_NUMRECS); in xfs_btree_delrec()
4128 if (xfs_btree_at_iroot(cur, level)) { in xfs_btree_delrec()
4129 cur->bc_ops->broot_realloc(cur, numrecs); in xfs_btree_delrec()
4131 error = xfs_btree_kill_iroot(cur); in xfs_btree_delrec()
4135 error = xfs_btree_dec_cursor(cur, level, stat); in xfs_btree_delrec()
4146 if (level == cur->bc_nlevels - 1) { in xfs_btree_delrec()
4153 pp = xfs_btree_ptr_addr(cur, 1, block); in xfs_btree_delrec()
4154 error = xfs_btree_kill_root(cur, bp, level, pp); in xfs_btree_delrec()
4158 error = xfs_btree_dec_cursor(cur, level, stat); in xfs_btree_delrec()
4170 if (xfs_btree_needs_key_update(cur, ptr)) { in xfs_btree_delrec()
4171 error = xfs_btree_update_keys(cur, level); in xfs_btree_delrec()
4180 if (numrecs >= cur->bc_ops->get_minrecs(cur, level)) { in xfs_btree_delrec()
4181 error = xfs_btree_dec_cursor(cur, level, stat); in xfs_btree_delrec()
4192 xfs_btree_get_sibling(cur, block, &rptr, XFS_BB_RIGHTSIB); in xfs_btree_delrec()
4193 xfs_btree_get_sibling(cur, block, &lptr, XFS_BB_LEFTSIB); in xfs_btree_delrec()
4195 if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE) { in xfs_btree_delrec()
4201 if (xfs_btree_ptr_is_null(cur, &rptr) && in xfs_btree_delrec()
4202 xfs_btree_ptr_is_null(cur, &lptr) && in xfs_btree_delrec()
4203 level == cur->bc_nlevels - 2) { in xfs_btree_delrec()
4204 error = xfs_btree_kill_iroot(cur); in xfs_btree_delrec()
4206 error = xfs_btree_dec_cursor(cur, level, stat); in xfs_btree_delrec()
4213 ASSERT(!xfs_btree_ptr_is_null(cur, &rptr) || in xfs_btree_delrec()
4214 !xfs_btree_ptr_is_null(cur, &lptr)); in xfs_btree_delrec()
4220 error = xfs_btree_dup_cursor(cur, &tcur); in xfs_btree_delrec()
4228 if (!xfs_btree_ptr_is_null(cur, &rptr)) { in xfs_btree_delrec()
4234 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) { in xfs_btree_delrec()
4235 xfs_btree_mark_sick(cur); in xfs_btree_delrec()
4243 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) { in xfs_btree_delrec()
4244 xfs_btree_mark_sick(cur); in xfs_btree_delrec()
4250 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) { in xfs_btree_delrec()
4251 xfs_btree_mark_sick(cur); in xfs_btree_delrec()
4272 cur->bc_ops->get_minrecs(tcur, level)) { in xfs_btree_delrec()
4278 cur->bc_ops->get_minrecs(tcur, level)); in xfs_btree_delrec()
4283 error = xfs_btree_dec_cursor(cur, level, stat); in xfs_btree_delrec()
4296 if (!xfs_btree_ptr_is_null(cur, &lptr)) { in xfs_btree_delrec()
4298 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) { in xfs_btree_delrec()
4299 xfs_btree_mark_sick(cur); in xfs_btree_delrec()
4307 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) { in xfs_btree_delrec()
4308 xfs_btree_mark_sick(cur); in xfs_btree_delrec()
4319 if (!xfs_btree_ptr_is_null(cur, &lptr)) { in xfs_btree_delrec()
4325 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) { in xfs_btree_delrec()
4326 xfs_btree_mark_sick(cur); in xfs_btree_delrec()
4335 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) { in xfs_btree_delrec()
4336 xfs_btree_mark_sick(cur); in xfs_btree_delrec()
4344 error = xfs_btree_check_block(cur, left, level, lbp); in xfs_btree_delrec()
4357 cur->bc_ops->get_minrecs(tcur, level)) { in xfs_btree_delrec()
4363 cur->bc_ops->get_minrecs(tcur, level)); in xfs_btree_delrec()
4367 cur->bc_levels[0].ptr++; in xfs_btree_delrec()
4386 ASSERT(!xfs_btree_ptr_is_null(cur, &cptr)); in xfs_btree_delrec()
4388 if (!xfs_btree_ptr_is_null(cur, &lptr) && in xfs_btree_delrec()
4390 cur->bc_ops->get_maxrecs(cur, level)) { in xfs_btree_delrec()
4398 error = xfs_btree_read_buf_block(cur, &lptr, 0, &left, &lbp); in xfs_btree_delrec()
4405 } else if (!xfs_btree_ptr_is_null(cur, &rptr) && in xfs_btree_delrec()
4407 cur->bc_ops->get_maxrecs(cur, level)) { in xfs_btree_delrec()
4415 error = xfs_btree_read_buf_block(cur, &rptr, 0, &right, &rbp); in xfs_btree_delrec()
4424 error = xfs_btree_dec_cursor(cur, level, stat); in xfs_btree_delrec()
4437 XFS_BTREE_STATS_ADD(cur, moves, rrecs); in xfs_btree_delrec()
4445 lkp = xfs_btree_key_addr(cur, lrecs + 1, left); in xfs_btree_delrec()
4446 lpp = xfs_btree_ptr_addr(cur, lrecs + 1, left); in xfs_btree_delrec()
4447 rkp = xfs_btree_key_addr(cur, 1, right); in xfs_btree_delrec()
4448 rpp = xfs_btree_ptr_addr(cur, 1, right); in xfs_btree_delrec()
4451 error = xfs_btree_debug_check_ptr(cur, rpp, i, level); in xfs_btree_delrec()
4456 xfs_btree_copy_keys(cur, lkp, rkp, rrecs); in xfs_btree_delrec()
4457 xfs_btree_copy_ptrs(cur, lpp, rpp, rrecs); in xfs_btree_delrec()
4459 xfs_btree_log_keys(cur, lbp, lrecs + 1, lrecs + rrecs); in xfs_btree_delrec()
4460 xfs_btree_log_ptrs(cur, lbp, lrecs + 1, lrecs + rrecs); in xfs_btree_delrec()
4466 lrp = xfs_btree_rec_addr(cur, lrecs + 1, left); in xfs_btree_delrec()
4467 rrp = xfs_btree_rec_addr(cur, 1, right); in xfs_btree_delrec()
4469 xfs_btree_copy_recs(cur, lrp, rrp, rrecs); in xfs_btree_delrec()
4470 xfs_btree_log_recs(cur, lbp, lrecs + 1, lrecs + rrecs); in xfs_btree_delrec()
4473 XFS_BTREE_STATS_INC(cur, join); in xfs_btree_delrec()
4480 xfs_btree_get_sibling(cur, right, &cptr, XFS_BB_RIGHTSIB); in xfs_btree_delrec()
4481 xfs_btree_set_sibling(cur, left, &cptr, XFS_BB_RIGHTSIB); in xfs_btree_delrec()
4482 xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB); in xfs_btree_delrec()
4485 xfs_btree_get_sibling(cur, left, &cptr, XFS_BB_RIGHTSIB); in xfs_btree_delrec()
4486 if (!xfs_btree_ptr_is_null(cur, &cptr)) { in xfs_btree_delrec()
4487 error = xfs_btree_read_buf_block(cur, &cptr, 0, &rrblock, &rrbp); in xfs_btree_delrec()
4490 xfs_btree_set_sibling(cur, rrblock, &lptr, XFS_BB_LEFTSIB); in xfs_btree_delrec()
4491 xfs_btree_log_block(cur, rrbp, XFS_BB_LEFTSIB); in xfs_btree_delrec()
4495 error = xfs_btree_free_block(cur, rbp); in xfs_btree_delrec()
4504 cur->bc_levels[level].bp = lbp; in xfs_btree_delrec()
4505 cur->bc_levels[level].ptr += lrecs; in xfs_btree_delrec()
4506 cur->bc_levels[level].ra = 0; in xfs_btree_delrec()
4512 else if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE || in xfs_btree_delrec()
4513 level + 1 < cur->bc_nlevels) { in xfs_btree_delrec()
4514 error = xfs_btree_increment(cur, level + 1, &i); in xfs_btree_delrec()
4526 cur->bc_levels[level].ptr--; in xfs_btree_delrec()
4555 struct xfs_btree_cur *cur, in xfs_btree_delete() argument
4570 error = xfs_btree_delrec(cur, level, &i); in xfs_btree_delete()
4581 if (joined && (cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING)) { in xfs_btree_delete()
4582 error = xfs_btree_updkeys_force(cur, 0); in xfs_btree_delete()
4588 for (level = 1; level < cur->bc_nlevels; level++) { in xfs_btree_delete()
4589 if (cur->bc_levels[level].ptr == 0) { in xfs_btree_delete()
4590 error = xfs_btree_decrement(cur, level, &i); in xfs_btree_delete()
4609 struct xfs_btree_cur *cur, /* btree cursor */ in xfs_btree_get_rec() argument
4620 ptr = cur->bc_levels[0].ptr; in xfs_btree_get_rec()
4621 block = xfs_btree_get_block(cur, 0, &bp); in xfs_btree_get_rec()
4624 error = xfs_btree_check_block(cur, block, 0, bp); in xfs_btree_get_rec()
4640 *recp = xfs_btree_rec_addr(cur, ptr, block); in xfs_btree_get_rec()
4648 struct xfs_btree_cur *cur, in xfs_btree_visit_block() argument
4659 xfs_btree_readahead(cur, level, XFS_BTCUR_RIGHTRA); in xfs_btree_visit_block()
4660 block = xfs_btree_get_block(cur, level, &bp); in xfs_btree_visit_block()
4663 error = fn(cur, level, data); in xfs_btree_visit_block()
4668 xfs_btree_get_sibling(cur, block, &rptr, XFS_BB_RIGHTSIB); in xfs_btree_visit_block()
4669 if (xfs_btree_ptr_is_null(cur, &rptr)) in xfs_btree_visit_block()
4678 xfs_btree_buf_to_ptr(cur, bp, &bufptr); in xfs_btree_visit_block()
4679 if (xfs_btree_ptrs_equal(cur, &rptr, &bufptr)) { in xfs_btree_visit_block()
4680 xfs_btree_mark_sick(cur); in xfs_btree_visit_block()
4684 return xfs_btree_lookup_get_block(cur, level, &rptr, &block); in xfs_btree_visit_block()
4691 struct xfs_btree_cur *cur, in xfs_btree_visit_blocks() argument
4701 xfs_btree_init_ptr_from_cur(cur, &lptr); in xfs_btree_visit_blocks()
4704 for (level = cur->bc_nlevels - 1; level >= 0; level--) { in xfs_btree_visit_blocks()
4706 error = xfs_btree_lookup_get_block(cur, level, &lptr, &block); in xfs_btree_visit_blocks()
4714 ptr = xfs_btree_ptr_addr(cur, 1, block); in xfs_btree_visit_blocks()
4715 xfs_btree_readahead_ptr(cur, ptr, 1); in xfs_btree_visit_blocks()
4718 xfs_btree_copy_ptrs(cur, &lptr, ptr, 1); in xfs_btree_visit_blocks()
4728 error = xfs_btree_visit_block(cur, level, fn, data); in xfs_btree_visit_blocks()
4769 struct xfs_btree_cur *cur, in xfs_btree_block_change_owner() argument
4778 block = xfs_btree_get_block(cur, level, &bp); in xfs_btree_block_change_owner()
4779 if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) { in xfs_btree_block_change_owner()
4797 ASSERT(cur->bc_ops->type == XFS_BTREE_TYPE_INODE); in xfs_btree_block_change_owner()
4798 ASSERT(level == cur->bc_nlevels - 1); in xfs_btree_block_change_owner()
4802 if (cur->bc_tp) { in xfs_btree_block_change_owner()
4803 if (!xfs_trans_ordered_buf(cur->bc_tp, bp)) { in xfs_btree_block_change_owner()
4804 xfs_btree_log_block(cur, bp, XFS_BB_OWNER); in xfs_btree_block_change_owner()
4816 struct xfs_btree_cur *cur, in xfs_btree_change_owner() argument
4825 return xfs_btree_visit_blocks(cur, xfs_btree_block_change_owner, in xfs_btree_change_owner()
5049 struct xfs_btree_cur *cur, in xfs_btree_simple_query_range() argument
5061 ASSERT(cur->bc_ops->init_high_key_from_rec); in xfs_btree_simple_query_range()
5062 ASSERT(cur->bc_ops->cmp_two_keys); in xfs_btree_simple_query_range()
5069 error = xfs_btree_lookup(cur, XFS_LOOKUP_LE, &stat); in xfs_btree_simple_query_range()
5075 error = xfs_btree_increment(cur, 0, &stat); in xfs_btree_simple_query_range()
5082 error = xfs_btree_get_rec(cur, &recp, &stat); in xfs_btree_simple_query_range()
5088 cur->bc_ops->init_high_key_from_rec(&rec_key, recp); in xfs_btree_simple_query_range()
5090 if (xfs_btree_keycmp_gt(cur, low_key, &rec_key)) in xfs_btree_simple_query_range()
5095 cur->bc_ops->init_key_from_rec(&rec_key, recp); in xfs_btree_simple_query_range()
5096 if (xfs_btree_keycmp_gt(cur, &rec_key, high_key)) in xfs_btree_simple_query_range()
5100 error = fn(cur, recp, priv); in xfs_btree_simple_query_range()
5106 error = xfs_btree_increment(cur, 0, &stat); in xfs_btree_simple_query_range()
5136 struct xfs_btree_cur *cur, in xfs_btree_overlapped_query_range() argument
5156 level = cur->bc_nlevels - 1; in xfs_btree_overlapped_query_range()
5157 xfs_btree_init_ptr_from_cur(cur, &ptr); in xfs_btree_overlapped_query_range()
5158 error = xfs_btree_lookup_get_block(cur, level, &ptr, &block); in xfs_btree_overlapped_query_range()
5161 xfs_btree_get_block(cur, level, &bp); in xfs_btree_overlapped_query_range()
5162 trace_xfs_btree_overlapped_query_range(cur, level, bp); in xfs_btree_overlapped_query_range()
5164 error = xfs_btree_check_block(cur, block, level, bp); in xfs_btree_overlapped_query_range()
5168 cur->bc_levels[level].ptr = 1; in xfs_btree_overlapped_query_range()
5170 while (level < cur->bc_nlevels) { in xfs_btree_overlapped_query_range()
5171 block = xfs_btree_get_block(cur, level, &bp); in xfs_btree_overlapped_query_range()
5174 if (cur->bc_levels[level].ptr > in xfs_btree_overlapped_query_range()
5177 if (level < cur->bc_nlevels - 1) in xfs_btree_overlapped_query_range()
5178 cur->bc_levels[level + 1].ptr++; in xfs_btree_overlapped_query_range()
5185 recp = xfs_btree_rec_addr(cur, cur->bc_levels[0].ptr, in xfs_btree_overlapped_query_range()
5188 cur->bc_ops->init_high_key_from_rec(&rec_hkey, recp); in xfs_btree_overlapped_query_range()
5189 cur->bc_ops->init_key_from_rec(&rec_key, recp); in xfs_btree_overlapped_query_range()
5200 if (xfs_btree_keycmp_lt(cur, high_key, &rec_key)) in xfs_btree_overlapped_query_range()
5202 if (xfs_btree_keycmp_ge(cur, &rec_hkey, low_key)) { in xfs_btree_overlapped_query_range()
5203 error = fn(cur, recp, priv); in xfs_btree_overlapped_query_range()
5207 cur->bc_levels[level].ptr++; in xfs_btree_overlapped_query_range()
5212 lkp = xfs_btree_key_addr(cur, cur->bc_levels[level].ptr, block); in xfs_btree_overlapped_query_range()
5213 hkp = xfs_btree_high_key_addr(cur, cur->bc_levels[level].ptr, in xfs_btree_overlapped_query_range()
5215 pp = xfs_btree_ptr_addr(cur, cur->bc_levels[level].ptr, block); in xfs_btree_overlapped_query_range()
5226 if (xfs_btree_keycmp_lt(cur, high_key, lkp)) in xfs_btree_overlapped_query_range()
5228 if (xfs_btree_keycmp_ge(cur, hkp, low_key)) { in xfs_btree_overlapped_query_range()
5230 error = xfs_btree_lookup_get_block(cur, level, pp, in xfs_btree_overlapped_query_range()
5234 xfs_btree_get_block(cur, level, &bp); in xfs_btree_overlapped_query_range()
5235 trace_xfs_btree_overlapped_query_range(cur, level, bp); in xfs_btree_overlapped_query_range()
5237 error = xfs_btree_check_block(cur, block, level, bp); in xfs_btree_overlapped_query_range()
5241 cur->bc_levels[level].ptr = 1; in xfs_btree_overlapped_query_range()
5244 cur->bc_levels[level].ptr++; in xfs_btree_overlapped_query_range()
5255 if (cur->bc_levels[0].bp == NULL) { in xfs_btree_overlapped_query_range()
5256 for (i = 0; i < cur->bc_nlevels; i++) { in xfs_btree_overlapped_query_range()
5257 if (cur->bc_levels[i].bp) { in xfs_btree_overlapped_query_range()
5258 xfs_trans_brelse(cur->bc_tp, in xfs_btree_overlapped_query_range()
5259 cur->bc_levels[i].bp); in xfs_btree_overlapped_query_range()
5260 cur->bc_levels[i].bp = NULL; in xfs_btree_overlapped_query_range()
5261 cur->bc_levels[i].ptr = 0; in xfs_btree_overlapped_query_range()
5262 cur->bc_levels[i].ra = 0; in xfs_btree_overlapped_query_range()
5272 struct xfs_btree_cur *cur, in xfs_btree_key_from_irec() argument
5278 cur->bc_rec = *irec; in xfs_btree_key_from_irec()
5279 cur->bc_ops->init_rec_from_cur(cur, &rec); in xfs_btree_key_from_irec()
5280 cur->bc_ops->init_key_from_rec(key, &rec); in xfs_btree_key_from_irec()
5291 struct xfs_btree_cur *cur, in xfs_btree_query_range() argument
5301 xfs_btree_key_from_irec(cur, &high_key, high_rec); in xfs_btree_query_range()
5302 xfs_btree_key_from_irec(cur, &low_key, low_rec); in xfs_btree_query_range()
5305 if (!xfs_btree_keycmp_le(cur, &low_key, &high_key)) in xfs_btree_query_range()
5308 if (!(cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING)) in xfs_btree_query_range()
5309 return xfs_btree_simple_query_range(cur, &low_key, in xfs_btree_query_range()
5311 return xfs_btree_overlapped_query_range(cur, &low_key, &high_key, in xfs_btree_query_range()
5318 struct xfs_btree_cur *cur, in xfs_btree_query_all() argument
5325 memset(&cur->bc_rec, 0, sizeof(cur->bc_rec)); in xfs_btree_query_all()
5329 return xfs_btree_simple_query_range(cur, &low_key, &high_key, fn, priv); in xfs_btree_query_all()
5334 struct xfs_btree_cur *cur, in xfs_btree_count_blocks_helper() argument
5347 struct xfs_btree_cur *cur, in xfs_btree_count_blocks() argument
5351 return xfs_btree_visit_blocks(cur, xfs_btree_count_blocks_helper, in xfs_btree_count_blocks()
5358 struct xfs_btree_cur *cur, in xfs_btree_cmp_two_ptrs() argument
5362 if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) in xfs_btree_cmp_two_ptrs()
5383 struct xfs_btree_cur *cur, in xfs_btree_has_records_helper() argument
5392 cur->bc_ops->init_key_from_rec(&rec_key, rec); in xfs_btree_has_records_helper()
5402 if (xfs_btree_masked_keycmp_lt(cur, &info->start_key, &rec_key, in xfs_btree_has_records_helper()
5413 key_contig = cur->bc_ops->keys_contiguous(cur, &info->high_key, in xfs_btree_has_records_helper()
5416 !(cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING)) in xfs_btree_has_records_helper()
5426 cur->bc_ops->init_high_key_from_rec(&rec_high_key, rec); in xfs_btree_has_records_helper()
5427 if (xfs_btree_masked_keycmp_gt(cur, &rec_high_key, &info->high_key, in xfs_btree_has_records_helper()
5450 struct xfs_btree_cur *cur, in xfs_btree_has_records() argument
5463 if (!cur->bc_ops->keys_contiguous) { in xfs_btree_has_records()
5468 xfs_btree_key_from_irec(cur, &info.start_key, low); in xfs_btree_has_records()
5469 xfs_btree_key_from_irec(cur, &info.end_key, high); in xfs_btree_has_records()
5471 error = xfs_btree_query_range(cur, low, high, in xfs_btree_has_records()
5486 if (xfs_btree_masked_keycmp_ge(cur, &info.high_key, &info.end_key, in xfs_btree_has_records()
5498 struct xfs_btree_cur *cur) in xfs_btree_has_more_records() argument
5503 block = xfs_btree_get_block(cur, 0, &bp); in xfs_btree_has_more_records()
5506 if (cur->bc_levels[0].ptr < xfs_btree_get_numrecs(block)) in xfs_btree_has_more_records()
5510 if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) in xfs_btree_has_more_records()
5566 struct xfs_btree_cur *cur) in xfs_btree_goto_left_edge() argument
5571 memset(&cur->bc_rec, 0, sizeof(cur->bc_rec)); in xfs_btree_goto_left_edge()
5572 error = xfs_btree_lookup(cur, XFS_LOOKUP_LE, &stat); in xfs_btree_goto_left_edge()
5578 error = xfs_btree_decrement(cur, 0, &stat); in xfs_btree_goto_left_edge()
5583 xfs_btree_mark_sick(cur); in xfs_btree_goto_left_edge()
5593 struct xfs_btree_cur *cur, in xfs_btree_alloc_metafile_block() argument
5599 .mp = cur->bc_mp, in xfs_btree_alloc_metafile_block()
5600 .tp = cur->bc_tp, in xfs_btree_alloc_metafile_block()
5606 struct xfs_inode *ip = cur->bc_ino.ip; in xfs_btree_alloc_metafile_block()
5611 xfs_rmap_ino_bmbt_owner(&args.oinfo, ip->i_ino, cur->bc_ino.whichfork); in xfs_btree_alloc_metafile_block()
5613 XFS_INO_TO_FSB(cur->bc_mp, ip->i_ino)); in xfs_btree_alloc_metafile_block()
5632 struct xfs_btree_cur *cur, in xfs_btree_free_metafile_block() argument
5636 struct xfs_mount *mp = cur->bc_mp; in xfs_btree_free_metafile_block()
5637 struct xfs_inode *ip = cur->bc_ino.ip; in xfs_btree_free_metafile_block()
5638 struct xfs_trans *tp = cur->bc_tp; in xfs_btree_free_metafile_block()
5644 xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, cur->bc_ino.whichfork); in xfs_btree_free_metafile_block()