Lines Matching +full:operating +full:- +full:points +full:- +full:v2
6 * License v2 as published by the Free Software Foundation.
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
22 #include "disk-io.h"
24 #include "print-tree.h"
57 if (!p->nodes[i] || !p->locks[i]) in btrfs_set_path_blocking()
59 btrfs_set_lock_blocking_rw(p->nodes[i], p->locks[i]); in btrfs_set_path_blocking()
60 if (p->locks[i] == BTRFS_READ_LOCK) in btrfs_set_path_blocking()
61 p->locks[i] = BTRFS_READ_LOCK_BLOCKING; in btrfs_set_path_blocking()
62 else if (p->locks[i] == BTRFS_WRITE_LOCK) in btrfs_set_path_blocking()
63 p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING; in btrfs_set_path_blocking()
97 for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) { in btrfs_clear_path_blocking()
98 if (p->nodes[i] && p->locks[i]) { in btrfs_clear_path_blocking()
99 btrfs_clear_lock_blocking_rw(p->nodes[i], p->locks[i]); in btrfs_clear_path_blocking()
100 if (p->locks[i] == BTRFS_WRITE_LOCK_BLOCKING) in btrfs_clear_path_blocking()
101 p->locks[i] = BTRFS_WRITE_LOCK; in btrfs_clear_path_blocking()
102 else if (p->locks[i] == BTRFS_READ_LOCK_BLOCKING) in btrfs_clear_path_blocking()
103 p->locks[i] = BTRFS_READ_LOCK; in btrfs_clear_path_blocking()
133 p->slots[i] = 0; in btrfs_release_path()
134 if (!p->nodes[i]) in btrfs_release_path()
136 if (p->locks[i]) { in btrfs_release_path()
137 btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]); in btrfs_release_path()
138 p->locks[i] = 0; in btrfs_release_path()
140 free_extent_buffer(p->nodes[i]); in btrfs_release_path()
141 p->nodes[i] = NULL; in btrfs_release_path()
160 eb = rcu_dereference(root->node); in btrfs_root_node()
177 if (eb == root->node) in btrfs_lock_root_node()
196 if (eb == root->node) in btrfs_read_lock_root_node()
210 if (root->track_dirty && list_empty(&root->dirty_list)) { in add_root_to_dirty_list()
211 list_add(&root->dirty_list, in add_root_to_dirty_list()
212 &root->fs_info->dirty_cowonly_roots); in add_root_to_dirty_list()
231 WARN_ON(root->ref_cows && trans->transid != in btrfs_copy_root()
232 root->fs_info->running_transaction->transid); in btrfs_copy_root()
233 WARN_ON(root->ref_cows && trans->transid != root->last_trans); in btrfs_copy_root()
241 cow = btrfs_alloc_free_block(trans, root, buf->len, 0, in btrfs_copy_root()
243 buf->start, 0, 1); in btrfs_copy_root()
247 copy_extent_buffer(cow, buf, 0, 0, cow->len); in btrfs_copy_root()
248 btrfs_set_header_bytenr(cow, cow->start); in btrfs_copy_root()
249 btrfs_set_header_generation(cow, trans->transid); in btrfs_copy_root()
258 write_extent_buffer(cow, root->fs_info->fsid, in btrfs_copy_root()
262 WARN_ON(btrfs_header_generation(buf) > trans->transid); in btrfs_copy_root()
288 if (root->ref_cows && in btrfs_block_can_be_shared()
289 buf != root->node && buf != root->commit_root && in btrfs_block_can_be_shared()
291 btrfs_root_last_snapshot(&root->root_item) || in btrfs_block_can_be_shared()
295 if (root->ref_cows && in btrfs_block_can_be_shared()
321 * tree (btrfs_header_owner(buf) == root->root_key.objectid), in update_ref_for_cow()
325 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID), in update_ref_for_cow()
332 ret = btrfs_lookup_extent_info(trans, root, buf->start, in update_ref_for_cow()
333 buf->len, &refs, &flags); in update_ref_for_cow()
338 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID || in update_ref_for_cow()
350 if ((owner == root->root_key.objectid || in update_ref_for_cow()
351 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && in update_ref_for_cow()
356 if (root->root_key.objectid == in update_ref_for_cow()
366 if (root->root_key.objectid == in update_ref_for_cow()
375 buf->start, in update_ref_for_cow()
376 buf->len, in update_ref_for_cow()
382 if (root->root_key.objectid == in update_ref_for_cow()
403 * search_start -- an allocation hint for the new block
405 * empty_size -- a hint that you plan on doing more cow. This is the size in
428 WARN_ON(root->ref_cows && trans->transid != in __btrfs_cow_block()
429 root->fs_info->running_transaction->transid); in __btrfs_cow_block()
430 WARN_ON(root->ref_cows && trans->transid != root->last_trans); in __btrfs_cow_block()
439 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) { in __btrfs_cow_block()
441 parent_start = parent->start; in __btrfs_cow_block()
447 cow = btrfs_alloc_free_block(trans, root, buf->len, parent_start, in __btrfs_cow_block()
448 root->root_key.objectid, &disk_key, in __btrfs_cow_block()
455 copy_extent_buffer(cow, buf, 0, 0, cow->len); in __btrfs_cow_block()
456 btrfs_set_header_bytenr(cow, cow->start); in __btrfs_cow_block()
457 btrfs_set_header_generation(cow, trans->transid); in __btrfs_cow_block()
461 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) in __btrfs_cow_block()
464 btrfs_set_header_owner(cow, root->root_key.objectid); in __btrfs_cow_block()
466 write_extent_buffer(cow, root->fs_info->fsid, in __btrfs_cow_block()
472 if (root->ref_cows) in __btrfs_cow_block()
475 if (buf == root->node) { in __btrfs_cow_block()
477 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID || in __btrfs_cow_block()
479 parent_start = buf->start; in __btrfs_cow_block()
484 rcu_assign_pointer(root->node, cow); in __btrfs_cow_block()
491 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) in __btrfs_cow_block()
492 parent_start = parent->start; in __btrfs_cow_block()
496 WARN_ON(trans->transid != btrfs_header_generation(parent)); in __btrfs_cow_block()
498 cow->start); in __btrfs_cow_block()
500 trans->transid); in __btrfs_cow_block()
531 if (btrfs_header_generation(buf) == trans->transid && in should_cow_block()
533 !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID && in should_cow_block()
535 !root->force_cow) in should_cow_block()
553 if (trans->transaction != root->fs_info->running_transaction) { in btrfs_cow_block()
555 (unsigned long long)trans->transid, in btrfs_cow_block()
557 root->fs_info->running_transaction->transid); in btrfs_cow_block()
560 if (trans->transid != root->fs_info->generation) { in btrfs_cow_block()
562 (unsigned long long)trans->transid, in btrfs_cow_block()
563 (unsigned long long)root->fs_info->generation); in btrfs_cow_block()
572 search_start = buf->start & ~((u64)(1024 * 1024 * 1024) - 1); in btrfs_cow_block()
592 if (blocknr < other && other - (blocknr + blocksize) < 32768) in close_blocks()
594 if (blocknr > other && blocknr - (other + blocksize) < 32768) in close_blocks()
616 if (k1->objectid > k2->objectid) in btrfs_comp_cpu_keys()
618 if (k1->objectid < k2->objectid) in btrfs_comp_cpu_keys()
619 return -1; in btrfs_comp_cpu_keys()
620 if (k1->type > k2->type) in btrfs_comp_cpu_keys()
622 if (k1->type < k2->type) in btrfs_comp_cpu_keys()
623 return -1; in btrfs_comp_cpu_keys()
624 if (k1->offset > k2->offset) in btrfs_comp_cpu_keys()
626 if (k1->offset < k2->offset) in btrfs_comp_cpu_keys()
627 return -1; in btrfs_comp_cpu_keys()
661 if (trans->transaction != root->fs_info->running_transaction) in btrfs_realloc_node()
663 if (trans->transid != root->fs_info->generation) in btrfs_realloc_node()
667 blocksize = btrfs_level_size(root, parent_level - 1); in btrfs_realloc_node()
689 other = btrfs_node_blockptr(parent, i - 1); in btrfs_realloc_node()
692 if (!close && i < end_slot - 2) { in btrfs_realloc_node()
715 return -EIO; in btrfs_realloc_node()
728 (end_slot - i) * blocksize)); in btrfs_realloc_node()
734 search_start = cur->start; in btrfs_realloc_node()
735 last_block = cur->start; in btrfs_realloc_node()
744 * The leaf data grows from end-to-front in the node.
754 return btrfs_item_offset_nr(leaf, nr - 1); in leaf_data_end()
762 * the slot in the array is returned via slot, and it points to
798 tmp = (struct btrfs_disk_key *)(kaddr + offset - in generic_bin_search()
807 tmp = (struct btrfs_disk_key *)(kaddr + offset - in generic_bin_search()
845 return -1; in bin_search()
856 spin_lock(&root->accounting_lock); in root_add_used()
857 btrfs_set_root_used(&root->root_item, in root_add_used()
858 btrfs_root_used(&root->root_item) + size); in root_add_used()
859 spin_unlock(&root->accounting_lock); in root_add_used()
864 spin_lock(&root->accounting_lock); in root_sub_used()
865 btrfs_set_root_used(&root->root_item, in root_sub_used()
866 btrfs_root_used(&root->root_item) - size); in root_sub_used()
867 spin_unlock(&root->accounting_lock); in root_sub_used()
870 /* given a node and slot number, this reads the blocks it points to. The
886 btrfs_level_size(root, level - 1), in read_node_slot()
906 int orig_slot = path->slots[level]; in balance_level()
912 mid = path->nodes[level]; in balance_level()
914 WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK && in balance_level()
915 path->locks[level] != BTRFS_WRITE_LOCK_BLOCKING); in balance_level()
916 WARN_ON(btrfs_header_generation(mid) != trans->transid); in balance_level()
920 if (level < BTRFS_MAX_LEVEL - 1) { in balance_level()
921 parent = path->nodes[level + 1]; in balance_level()
922 pslot = path->slots[level + 1]; in balance_level()
947 rcu_assign_pointer(root->node, child); in balance_level()
952 path->locks[level] = 0; in balance_level()
953 path->nodes[level] = NULL; in balance_level()
959 root_sub_used(root, mid->len); in balance_level()
971 left = read_node_slot(root, parent, pslot - 1); in balance_level()
976 parent, pslot - 1, &left); in balance_level()
1008 if (wret < 0 && wret != -ENOSPC) in balance_level()
1017 root_sub_used(root, right->len); in balance_level()
1057 root_sub_used(root, mid->len); in balance_level()
1074 path->nodes[level] = left; in balance_level()
1075 path->slots[level + 1] -= 1; in balance_level()
1076 path->slots[level] = orig_slot; in balance_level()
1082 orig_slot -= btrfs_header_nritems(left); in balance_level()
1083 path->slots[level] = orig_slot; in balance_level()
1088 btrfs_node_blockptr(path->nodes[level], path->slots[level])) in balance_level()
1096 if (path->nodes[level] != left) in balance_level()
1118 int orig_slot = path->slots[level]; in push_nodes_for_insert()
1123 mid = path->nodes[level]; in push_nodes_for_insert()
1124 WARN_ON(btrfs_header_generation(mid) != trans->transid); in push_nodes_for_insert()
1126 if (level < BTRFS_MAX_LEVEL - 1) { in push_nodes_for_insert()
1127 parent = path->nodes[level + 1]; in push_nodes_for_insert()
1128 pslot = path->slots[level + 1]; in push_nodes_for_insert()
1134 left = read_node_slot(root, parent, pslot - 1); in push_nodes_for_insert()
1144 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) { in push_nodes_for_insert()
1148 pslot - 1, &left); in push_nodes_for_insert()
1165 path->nodes[level] = left; in push_nodes_for_insert()
1166 path->slots[level + 1] -= 1; in push_nodes_for_insert()
1167 path->slots[level] = orig_slot; in push_nodes_for_insert()
1171 orig_slot -= in push_nodes_for_insert()
1173 path->slots[level] = orig_slot; in push_nodes_for_insert()
1194 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) { in push_nodes_for_insert()
1217 path->nodes[level] = right; in push_nodes_for_insert()
1218 path->slots[level + 1] += 1; in push_nodes_for_insert()
1219 path->slots[level] = orig_slot - in push_nodes_for_insert()
1250 int direction = path->reada; in reada_for_search()
1259 if (!path->nodes[level]) in reada_for_search()
1262 node = path->nodes[level]; in reada_for_search()
1265 blocksize = btrfs_level_size(root, level - 1); in reada_for_search()
1281 nr--; in reada_for_search()
1287 if (path->reada < 0 && objectid) { in reada_for_search()
1293 if ((search <= target && target - search <= 65536) || in reada_for_search()
1294 (search > target && search - target <= 65536)) { in reada_for_search()
1306 * returns -EAGAIN if it had to drop the path, or zero if everything was in
1322 parent = path->nodes[level + 1]; in reada_for_balance()
1327 slot = path->slots[level + 1]; in reada_for_balance()
1331 block1 = btrfs_node_blockptr(parent, slot - 1); in reada_for_balance()
1332 gen = btrfs_node_ptr_generation(parent, slot - 1); in reada_for_balance()
1347 ret = -EAGAIN; in reada_for_balance()
1377 * callers might also have set path->keep_locks, which tells this code to keep
1378 * the lock if the path points to the last slot in the block. This is part of
1393 if (!path->nodes[i]) in unlock_up()
1395 if (!path->locks[i]) in unlock_up()
1397 if (!no_skips && path->slots[i] == 0) { in unlock_up()
1401 if (!no_skips && path->keep_locks) { in unlock_up()
1403 t = path->nodes[i]; in unlock_up()
1405 if (nritems < 1 || path->slots[i] >= nritems - 1) { in unlock_up()
1413 t = path->nodes[i]; in unlock_up()
1414 if (i >= lowest_unlock && i > skip_level && path->locks[i]) { in unlock_up()
1415 btrfs_tree_unlock_rw(t, path->locks[i]); in unlock_up()
1416 path->locks[i] = 0; in unlock_up()
1434 if (path->keep_locks) in btrfs_unlock_up_safe()
1438 if (!path->nodes[i]) in btrfs_unlock_up_safe()
1440 if (!path->locks[i]) in btrfs_unlock_up_safe()
1442 btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]); in btrfs_unlock_up_safe()
1443 path->locks[i] = 0; in btrfs_unlock_up_safe()
1453 * reada. -EAGAIN is returned and the search must be repeated.
1470 blocksize = btrfs_level_size(root, level - 1); in read_block_for_search()
1500 return -EIO; in read_block_for_search()
1515 if (p->reada) in read_block_for_search()
1516 reada_for_search(root, p, level, slot, key->objectid); in read_block_for_search()
1520 ret = -EAGAIN; in read_block_for_search()
1530 ret = -EIO; in read_block_for_search()
1538 * for node-level blocks and does any balancing required based on
1542 * drop the path, -EAGAIN is returned and btrfs_search_slot must
1552 if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >= in setup_nodes_for_search()
1553 BTRFS_NODEPTRS_PER_BLOCK(root) - 3) { in setup_nodes_for_search()
1575 b = p->nodes[level]; in setup_nodes_for_search()
1598 b = p->nodes[level]; in setup_nodes_for_search()
1608 ret = -EAGAIN; in setup_nodes_for_search()
1618 * If the key isn't found, the path points to the slot where it should
1641 lowest_level = p->lowest_level; in btrfs_search_slot()
1643 WARN_ON(p->nodes[0] != NULL); in btrfs_search_slot()
1662 write_lock_level = -1; in btrfs_search_slot()
1664 if (cow && (p->keep_locks || p->lowest_level)) in btrfs_search_slot()
1673 if (p->search_commit_root) { in btrfs_search_slot()
1678 b = root->commit_root; in btrfs_search_slot()
1681 if (!p->skip_locking) in btrfs_search_slot()
1684 if (p->skip_locking) { in btrfs_search_slot()
1705 p->nodes[level] = b; in btrfs_search_slot()
1706 if (!p->skip_locking) in btrfs_search_slot()
1707 p->locks[level] = root_lock; in btrfs_search_slot()
1738 p->nodes[level + 1], in btrfs_search_slot()
1739 p->slots[level + 1], &b); in btrfs_search_slot()
1748 p->nodes[level] = b; in btrfs_search_slot()
1760 * operating on. in btrfs_search_slot()
1771 slot -= 1; in btrfs_search_slot()
1773 p->slots[level] = slot; in btrfs_search_slot()
1776 if (err == -EAGAIN) in btrfs_search_slot()
1782 b = p->nodes[level]; in btrfs_search_slot()
1783 slot = p->slots[level]; in btrfs_search_slot()
1802 p->slots[level]++; in btrfs_search_slot()
1808 if (err == -EAGAIN) in btrfs_search_slot()
1815 if (!p->skip_locking) { in btrfs_search_slot()
1825 p->locks[level] = BTRFS_WRITE_LOCK; in btrfs_search_slot()
1834 p->locks[level] = BTRFS_READ_LOCK; in btrfs_search_slot()
1836 p->nodes[level] = b; in btrfs_search_slot()
1839 p->slots[level] = slot; in btrfs_search_slot()
1859 if (!p->search_for_split) in btrfs_search_slot()
1870 if (!p->leave_spinning) in btrfs_search_slot()
1879 * making sure the right key of each node is points to 'key'.
1884 * If this fails to write a tree block, it returns -1, but continues
1896 int tslot = path->slots[i]; in fixup_low_keys()
1897 if (!path->nodes[i]) in fixup_low_keys()
1899 t = path->nodes[i]; in fixup_low_keys()
1901 btrfs_mark_buffer_dirty(path->nodes[i]); in fixup_low_keys()
1922 eb = path->nodes[0]; in btrfs_set_item_key_safe()
1923 slot = path->slots[0]; in btrfs_set_item_key_safe()
1925 btrfs_item_key(eb, &disk_key, slot - 1); in btrfs_set_item_key_safe()
1927 return -1; in btrfs_set_item_key_safe()
1929 if (slot < btrfs_header_nritems(eb) - 1) { in btrfs_set_item_key_safe()
1932 return -1; in btrfs_set_item_key_safe()
1961 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems; in push_node_left()
1962 WARN_ON(btrfs_header_generation(src) != trans->transid); in push_node_left()
1963 WARN_ON(btrfs_header_generation(dst) != trans->transid); in push_node_left()
1977 if (src_nritems - push_items < 8) { in push_node_left()
1980 push_items -= 8; in push_node_left()
1984 push_items = min(src_nritems - 8, push_items); in push_node_left()
1994 (src_nritems - push_items) * in push_node_left()
1997 btrfs_set_header_nritems(src, src_nritems - push_items); in push_node_left()
2025 WARN_ON(btrfs_header_generation(src) != trans->transid); in balance_node_right()
2026 WARN_ON(btrfs_header_generation(dst) != trans->transid); in balance_node_right()
2030 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems; in balance_node_right()
2052 btrfs_node_key_ptr_offset(src_nritems - push_items), in balance_node_right()
2055 btrfs_set_header_nritems(src, src_nritems - push_items); in balance_node_right()
2081 BUG_ON(path->nodes[level]); in insert_new_root()
2082 BUG_ON(path->nodes[level-1] != root->node); in insert_new_root()
2084 lower = path->nodes[level-1]; in insert_new_root()
2090 c = btrfs_alloc_free_block(trans, root, root->nodesize, 0, in insert_new_root()
2091 root->root_key.objectid, &lower_key, in insert_new_root()
2092 level, root->node->start, 0, 0); in insert_new_root()
2096 root_add_used(root, root->nodesize); in insert_new_root()
2101 btrfs_set_header_bytenr(c, c->start); in insert_new_root()
2102 btrfs_set_header_generation(c, trans->transid); in insert_new_root()
2104 btrfs_set_header_owner(c, root->root_key.objectid); in insert_new_root()
2106 write_extent_buffer(c, root->fs_info->fsid, in insert_new_root()
2110 write_extent_buffer(c, root->fs_info->chunk_tree_uuid, in insert_new_root()
2115 btrfs_set_node_blockptr(c, 0, lower->start); in insert_new_root()
2117 WARN_ON(lower_gen != trans->transid); in insert_new_root()
2123 old = root->node; in insert_new_root()
2124 rcu_assign_pointer(root->node, c); in insert_new_root()
2126 /* the super has an extra ref to root->node */ in insert_new_root()
2131 path->nodes[level] = c; in insert_new_root()
2132 path->locks[level] = BTRFS_WRITE_LOCK; in insert_new_root()
2133 path->slots[level] = 0; in insert_new_root()
2142 * blocknr is the block the key points to.
2153 BUG_ON(!path->nodes[level]); in insert_ptr()
2154 btrfs_assert_tree_locked(path->nodes[level]); in insert_ptr()
2155 lower = path->nodes[level]; in insert_ptr()
2164 (nritems - slot) * sizeof(struct btrfs_key_ptr)); in insert_ptr()
2168 WARN_ON(trans->transid == 0); in insert_ptr()
2169 btrfs_set_node_ptr_generation(lower, slot, trans->transid); in insert_ptr()
2196 c = path->nodes[level]; in split_node()
2197 WARN_ON(btrfs_header_generation(c) != trans->transid); in split_node()
2198 if (c == root->node) { in split_node()
2205 c = path->nodes[level]; in split_node()
2207 BTRFS_NODEPTRS_PER_BLOCK(root) - 3) in split_node()
2217 split = btrfs_alloc_free_block(trans, root, root->nodesize, 0, in split_node()
2218 root->root_key.objectid, in split_node()
2219 &disk_key, level, c->start, 0, 0); in split_node()
2223 root_add_used(root, root->nodesize); in split_node()
2227 btrfs_set_header_bytenr(split, split->start); in split_node()
2228 btrfs_set_header_generation(split, trans->transid); in split_node()
2230 btrfs_set_header_owner(split, root->root_key.objectid); in split_node()
2231 write_extent_buffer(split, root->fs_info->fsid, in split_node()
2234 write_extent_buffer(split, root->fs_info->chunk_tree_uuid, in split_node()
2242 (c_nritems - mid) * sizeof(struct btrfs_key_ptr)); in split_node()
2243 btrfs_set_header_nritems(split, c_nritems - mid); in split_node()
2250 wret = insert_ptr(trans, root, path, &disk_key, split->start, in split_node()
2251 path->slots[level + 1] + 1, in split_node()
2256 if (path->slots[level] >= mid) { in split_node()
2257 path->slots[level] -= mid; in split_node()
2260 path->nodes[level] = split; in split_node()
2261 path->slots[level + 1] += 1; in split_node()
2278 int end = min(nritems, start + nr) - 1; in leaf_space_used()
2283 data_len = data_len - btrfs_item_offset_nr(l, end); in leaf_space_used()
2299 ret = BTRFS_LEAF_DATA_SIZE(root) - leaf_space_used(leaf, 0, nritems); in btrfs_leaf_free_space()
2321 struct extent_buffer *left = path->nodes[0]; in __push_leaf_right()
2322 struct extent_buffer *upper = path->nodes[1]; in __push_leaf_right()
2339 if (path->slots[0] >= left_nritems) in __push_leaf_right()
2342 slot = path->slots[1]; in __push_leaf_right()
2343 i = left_nritems - 1; in __push_leaf_right()
2348 if (path->slots[0] > i) in __push_leaf_right()
2350 if (path->slots[0] == i) { in __push_leaf_right()
2357 if (path->slots[0] == i) in __push_leaf_right()
2368 i--; in __push_leaf_right()
2380 push_space = btrfs_item_end_nr(left, left_nritems - push_items); in __push_leaf_right()
2381 push_space -= leaf_data_end(root, left); in __push_leaf_right()
2386 btrfs_leaf_data(right) + data_end - push_space, in __push_leaf_right()
2388 BTRFS_LEAF_DATA_SIZE(root) - data_end); in __push_leaf_right()
2392 BTRFS_LEAF_DATA_SIZE(root) - push_space, in __push_leaf_right()
2402 btrfs_item_nr_offset(left_nritems - push_items), in __push_leaf_right()
2411 push_space -= btrfs_item_size(right, item); in __push_leaf_right()
2415 left_nritems -= push_items; in __push_leaf_right()
2430 if (path->slots[0] >= left_nritems) { in __push_leaf_right()
2431 path->slots[0] -= left_nritems; in __push_leaf_right()
2432 if (btrfs_header_nritems(path->nodes[0]) == 0) in __push_leaf_right()
2433 clean_tree_block(trans, root, path->nodes[0]); in __push_leaf_right()
2434 btrfs_tree_unlock(path->nodes[0]); in __push_leaf_right()
2435 free_extent_buffer(path->nodes[0]); in __push_leaf_right()
2436 path->nodes[0] = right; in __push_leaf_right()
2437 path->slots[1] += 1; in __push_leaf_right()
2465 struct extent_buffer *left = path->nodes[0]; in push_leaf_right()
2473 if (!path->nodes[1]) in push_leaf_right()
2476 slot = path->slots[1]; in push_leaf_right()
2477 upper = path->nodes[1]; in push_leaf_right()
2478 if (slot >= btrfs_header_nritems(upper) - 1) in push_leaf_right()
2481 btrfs_assert_tree_locked(path->nodes[1]); in push_leaf_right()
2521 * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the
2532 struct extent_buffer *right = path->nodes[0]; in __push_leaf_left()
2547 nr = min(right_nritems - 1, max_slot); in __push_leaf_left()
2553 if (path->slots[0] < i) in __push_leaf_left()
2555 if (path->slots[0] == i) { in __push_leaf_left()
2562 if (path->slots[0] == i) in __push_leaf_left()
2586 push_space = BTRFS_LEAF_DATA_SIZE(root) - in __push_leaf_left()
2587 btrfs_item_offset_nr(right, push_items - 1); in __push_leaf_left()
2590 leaf_data_end(root, left) - push_space, in __push_leaf_left()
2592 btrfs_item_offset_nr(right, push_items - 1), in __push_leaf_left()
2597 old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1); in __push_leaf_left()
2605 ioff - (BTRFS_LEAF_DATA_SIZE(root) - old_left_item_size)); in __push_leaf_left()
2617 push_space = btrfs_item_offset_nr(right, push_items - 1) - in __push_leaf_left()
2620 BTRFS_LEAF_DATA_SIZE(root) - push_space, in __push_leaf_left()
2626 (btrfs_header_nritems(right) - push_items) * in __push_leaf_left()
2629 right_nritems -= push_items; in __push_leaf_left()
2635 push_space = push_space - btrfs_item_size(right, item); in __push_leaf_left()
2651 if (path->slots[0] < push_items) { in __push_leaf_left()
2652 path->slots[0] += old_left_nritems; in __push_leaf_left()
2653 btrfs_tree_unlock(path->nodes[0]); in __push_leaf_left()
2654 free_extent_buffer(path->nodes[0]); in __push_leaf_left()
2655 path->nodes[0] = left; in __push_leaf_left()
2656 path->slots[1] -= 1; in __push_leaf_left()
2660 path->slots[0] -= push_items; in __push_leaf_left()
2662 BUG_ON(path->slots[0] < 0); in __push_leaf_left()
2675 * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the
2682 struct extent_buffer *right = path->nodes[0]; in push_leaf_left()
2689 slot = path->slots[1]; in push_leaf_left()
2692 if (!path->nodes[1]) in push_leaf_left()
2699 btrfs_assert_tree_locked(path->nodes[1]); in push_leaf_left()
2701 left = read_node_slot(root, path->nodes[1], slot - 1); in push_leaf_left()
2716 path->nodes[1], slot - 1, &left); in push_leaf_left()
2718 /* we hit -ENOSPC, but it isn't fatal here */ in push_leaf_left()
2758 nritems = nritems - mid; in copy_for_split()
2760 data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(root, l); in copy_for_split()
2767 btrfs_leaf_data(right) + BTRFS_LEAF_DATA_SIZE(root) - in copy_for_split()
2771 rt_data_off = BTRFS_LEAF_DATA_SIZE(root) - in copy_for_split()
2785 wret = insert_ptr(trans, root, path, &disk_key, right->start, in copy_for_split()
2786 path->slots[1] + 1, 1); in copy_for_split()
2792 BUG_ON(path->slots[0] != slot); in copy_for_split()
2795 btrfs_tree_unlock(path->nodes[0]); in copy_for_split()
2796 free_extent_buffer(path->nodes[0]); in copy_for_split()
2797 path->nodes[0] = right; in copy_for_split()
2798 path->slots[0] -= mid; in copy_for_split()
2799 path->slots[1] += 1; in copy_for_split()
2805 BUG_ON(path->slots[0] < 0); in copy_for_split()
2813 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
2830 slot = path->slots[0]; in push_for_double_split()
2843 nritems = btrfs_header_nritems(path->nodes[0]); in push_for_double_split()
2848 if (path->slots[0] == 0 || path->slots[0] == nritems) in push_for_double_split()
2851 if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size) in push_for_double_split()
2855 slot = path->slots[0]; in push_for_double_split()
2892 l = path->nodes[0]; in split_leaf()
2893 slot = path->slots[0]; in split_leaf()
2896 return -EOVERFLOW; in split_leaf()
2906 data_size, 0, (u32)-1); in split_leaf()
2910 l = path->nodes[0]; in split_leaf()
2917 if (!path->nodes[1]) { in split_leaf()
2924 l = path->nodes[0]; in split_leaf()
2925 slot = path->slots[0]; in split_leaf()
2931 leaf_space_used(l, mid, nritems - mid) + data_size > in split_leaf()
2938 leaf_space_used(l, mid, nritems - mid) + in split_leaf()
2956 leaf_space_used(l, mid, nritems - mid) + in split_leaf()
2971 right = btrfs_alloc_free_block(trans, root, root->leafsize, 0, in split_leaf()
2972 root->root_key.objectid, in split_leaf()
2973 &disk_key, 0, l->start, 0, 0); in split_leaf()
2977 root_add_used(root, root->leafsize); in split_leaf()
2980 btrfs_set_header_bytenr(right, right->start); in split_leaf()
2981 btrfs_set_header_generation(right, trans->transid); in split_leaf()
2983 btrfs_set_header_owner(right, root->root_key.objectid); in split_leaf()
2985 write_extent_buffer(right, root->fs_info->fsid, in split_leaf()
2989 write_extent_buffer(right, root->fs_info->chunk_tree_uuid, in split_leaf()
2997 &disk_key, right->start, in split_leaf()
2998 path->slots[1] + 1, 1); in split_leaf()
3002 btrfs_tree_unlock(path->nodes[0]); in split_leaf()
3003 free_extent_buffer(path->nodes[0]); in split_leaf()
3004 path->nodes[0] = right; in split_leaf()
3005 path->slots[0] = 0; in split_leaf()
3006 path->slots[1] += 1; in split_leaf()
3011 right->start, in split_leaf()
3012 path->slots[1], 1); in split_leaf()
3015 btrfs_tree_unlock(path->nodes[0]); in split_leaf()
3016 free_extent_buffer(path->nodes[0]); in split_leaf()
3017 path->nodes[0] = right; in split_leaf()
3018 path->slots[0] = 0; in split_leaf()
3019 if (path->slots[1] == 0) { in split_leaf()
3044 if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size) in split_leaf()
3060 leaf = path->nodes[0]; in setup_leaf_for_split()
3061 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); in setup_leaf_for_split()
3069 item_size = btrfs_item_size_nr(leaf, path->slots[0]); in setup_leaf_for_split()
3071 fi = btrfs_item_ptr(leaf, path->slots[0], in setup_leaf_for_split()
3077 path->keep_locks = 1; in setup_leaf_for_split()
3078 path->search_for_split = 1; in setup_leaf_for_split()
3080 path->search_for_split = 0; in setup_leaf_for_split()
3084 ret = -EAGAIN; in setup_leaf_for_split()
3085 leaf = path->nodes[0]; in setup_leaf_for_split()
3087 if (ret > 0 || item_size != btrfs_item_size_nr(leaf, path->slots[0])) in setup_leaf_for_split()
3091 if (btrfs_leaf_free_space(root, path->nodes[0]) >= ins_len) in setup_leaf_for_split()
3095 fi = btrfs_item_ptr(leaf, path->slots[0], in setup_leaf_for_split()
3106 path->keep_locks = 0; in setup_leaf_for_split()
3110 path->keep_locks = 0; in setup_leaf_for_split()
3130 leaf = path->nodes[0]; in split_item()
3135 item = btrfs_item_nr(leaf, path->slots[0]); in split_item()
3141 return -ENOMEM; in split_item()
3144 path->slots[0]), item_size); in split_item()
3146 slot = path->slots[0] + 1; in split_item()
3152 (nritems - slot) * sizeof(struct btrfs_item)); in split_item()
3161 btrfs_set_item_size(leaf, new_item, item_size - split_offset); in split_item()
3164 orig_offset + item_size - split_offset); in split_item()
3171 btrfs_item_ptr_offset(leaf, path->slots[0]), in split_item()
3177 item_size - split_offset); in split_item()
3233 leaf = path->nodes[0]; in btrfs_duplicate_item()
3234 item_size = btrfs_item_size_nr(leaf, path->slots[0]); in btrfs_duplicate_item()
3240 path->slots[0]++; in btrfs_duplicate_item()
3246 leaf = path->nodes[0]; in btrfs_duplicate_item()
3248 btrfs_item_ptr_offset(leaf, path->slots[0]), in btrfs_duplicate_item()
3249 btrfs_item_ptr_offset(leaf, path->slots[0] - 1), in btrfs_duplicate_item()
3275 leaf = path->nodes[0]; in btrfs_truncate_item()
3276 slot = path->slots[0]; in btrfs_truncate_item()
3287 size_diff = old_size - new_size; in btrfs_truncate_item()
3308 data_end, old_data_start + new_size - data_end); in btrfs_truncate_item()
3322 (unsigned long)fi - size_diff); in btrfs_truncate_item()
3336 data_end, old_data_start - data_end); in btrfs_truncate_item()
3372 leaf = path->nodes[0]; in btrfs_extend_item()
3381 slot = path->slots[0]; in btrfs_extend_item()
3401 btrfs_set_item_offset(leaf, item, ioff - data_size); in btrfs_extend_item()
3406 data_end - data_size, btrfs_leaf_data(leaf) + in btrfs_extend_item()
3407 data_end, old_data - data_end); in btrfs_extend_item()
3458 return -EEXIST; in btrfs_insert_some_items()
3462 leaf = path->nodes[0]; in btrfs_insert_some_items()
3468 for (i = nr; i >= 0; i--) { in btrfs_insert_some_items()
3469 total_data -= data_size[i]; in btrfs_insert_some_items()
3470 total_size -= data_size[i] + sizeof(struct btrfs_item); in btrfs_insert_some_items()
3477 slot = path->slots[0]; in btrfs_insert_some_items()
3510 btrfs_set_item_offset(leaf, item, ioff - total_data); in btrfs_insert_some_items()
3515 (nritems - slot) * sizeof(struct btrfs_item)); in btrfs_insert_some_items()
3519 data_end - total_data, btrfs_leaf_data(leaf) + in btrfs_insert_some_items()
3520 data_end, old_data - data_end); in btrfs_insert_some_items()
3537 btrfs_set_item_offset(leaf, item, data_end - data_size[i]); in btrfs_insert_some_items()
3538 data_end -= data_size[i]; in btrfs_insert_some_items()
3579 leaf = path->nodes[0]; in setup_items_for_insert()
3580 slot = path->slots[0]; in setup_items_for_insert()
3610 btrfs_set_item_offset(leaf, item, ioff - total_data); in setup_items_for_insert()
3615 (nritems - slot) * sizeof(struct btrfs_item)); in setup_items_for_insert()
3619 data_end - total_data, btrfs_leaf_data(leaf) + in setup_items_for_insert()
3620 data_end, old_data - data_end); in setup_items_for_insert()
3629 btrfs_set_item_offset(leaf, item, data_end - data_size[i]); in setup_items_for_insert()
3630 data_end -= data_size[i]; in setup_items_for_insert()
3673 return -EEXIST; in btrfs_insert_empty_items()
3677 slot = path->slots[0]; in btrfs_insert_empty_items()
3702 return -ENOMEM; in btrfs_insert_item()
3705 leaf = path->nodes[0]; in btrfs_insert_item()
3706 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); in btrfs_insert_item()
3723 struct extent_buffer *parent = path->nodes[level]; in del_ptr()
3729 if (slot != nritems - 1) { in del_ptr()
3734 (nritems - slot - 1)); in del_ptr()
3736 nritems--; in del_ptr()
3738 if (nritems == 0 && parent == root->node) { in del_ptr()
3739 BUG_ON(btrfs_header_level(root->node) != 1); in del_ptr()
3741 btrfs_set_header_level(root->node, 0); in del_ptr()
3755 * a helper function to delete the leaf pointed to by path->slots[1] and
3756 * path->nodes[1].
3758 * This deletes the pointer in path->nodes[1] and frees the leaf
3762 * all the proper balancing. path->nodes[1] must be locked.
3771 WARN_ON(btrfs_header_generation(leaf) != trans->transid); in btrfs_del_leaf()
3772 ret = del_ptr(trans, root, path, 1, path->slots[1]); in btrfs_del_leaf()
3782 root_sub_used(root, leaf->len); in btrfs_del_leaf()
3803 leaf = path->nodes[0]; in btrfs_del_items()
3804 last_off = btrfs_item_offset_nr(leaf, slot + nr - 1); in btrfs_del_items()
3817 last_off - data_end); in btrfs_del_items()
3830 (nritems - slot - nr)); in btrfs_del_items()
3832 btrfs_set_header_nritems(leaf, nritems - nr); in btrfs_del_items()
3833 nritems -= nr; in btrfs_del_items()
3837 if (leaf == root->node) { in btrfs_del_items()
3860 * make sure the path still points to our leaf in btrfs_del_items()
3863 slot = path->slots[1]; in btrfs_del_items()
3868 1, (u32)-1); in btrfs_del_items()
3869 if (wret < 0 && wret != -ENOSPC) in btrfs_del_items()
3872 if (path->nodes[0] == leaf && in btrfs_del_items()
3876 if (wret < 0 && wret != -ENOSPC) in btrfs_del_items()
3881 path->slots[1] = slot; in btrfs_del_items()
3891 if (path->nodes[0] == leaf) in btrfs_del_items()
3916 btrfs_item_key_to_cpu(path->nodes[0], &key, 0); in btrfs_prev_leaf()
3919 key.offset--; in btrfs_prev_leaf()
3921 key.type--; in btrfs_prev_leaf()
3923 key.objectid--; in btrfs_prev_leaf()
3931 btrfs_item_key(path->nodes[0], &found_key, 0); in btrfs_prev_leaf()
3947 * This does lock as it descends, and path->keep_locks should be set
3950 * This honors path->lowest_level to prevent descent past a given level
3973 WARN_ON(!path->keep_locks); in btrfs_search_forward()
3977 WARN_ON(path->nodes[level]); in btrfs_search_forward()
3978 path->nodes[level] = cur; in btrfs_search_forward()
3979 path->locks[level] = BTRFS_READ_LOCK; in btrfs_search_forward()
3991 if (level == path->lowest_level) { in btrfs_search_forward()
3995 path->slots[level] = slot; in btrfs_search_forward()
4000 slot--; in btrfs_search_forward()
4030 btrfs_level_size(root, level - 1)); in btrfs_search_forward()
4046 path->slots[level] = slot; in btrfs_search_forward()
4059 path->slots[level] = slot; in btrfs_search_forward()
4060 if (level == path->lowest_level) { in btrfs_search_forward()
4071 path->locks[level - 1] = BTRFS_READ_LOCK; in btrfs_search_forward()
4072 path->nodes[level - 1] = cur; in btrfs_search_forward()
4092 * path->keep_locks should be set to 1 on the search made before
4102 WARN_ON(!path->keep_locks); in btrfs_find_next_key()
4104 if (!path->nodes[level]) in btrfs_find_next_key()
4107 slot = path->slots[level] + 1; in btrfs_find_next_key()
4108 c = path->nodes[level]; in btrfs_find_next_key()
4115 !path->nodes[level + 1]) in btrfs_find_next_key()
4118 if (path->locks[level + 1]) { in btrfs_find_next_key()
4123 slot = btrfs_header_nritems(c) - 1; in btrfs_find_next_key()
4129 orig_lowest = path->lowest_level; in btrfs_find_next_key()
4131 path->lowest_level = level; in btrfs_find_next_key()
4134 path->lowest_level = orig_lowest; in btrfs_find_next_key()
4138 c = path->nodes[level]; in btrfs_find_next_key()
4139 slot = path->slots[level]; in btrfs_find_next_key()
4154 btrfs_level_size(root, level - 1)); in btrfs_find_next_key()
4188 int old_spinning = path->leave_spinning; in btrfs_next_leaf()
4191 nritems = btrfs_header_nritems(path->nodes[0]); in btrfs_next_leaf()
4195 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1); in btrfs_next_leaf()
4202 path->keep_locks = 1; in btrfs_next_leaf()
4203 path->leave_spinning = 1; in btrfs_next_leaf()
4206 path->keep_locks = 0; in btrfs_next_leaf()
4211 nritems = btrfs_header_nritems(path->nodes[0]); in btrfs_next_leaf()
4218 if (nritems > 0 && path->slots[0] < nritems - 1) { in btrfs_next_leaf()
4220 path->slots[0]++; in btrfs_next_leaf()
4226 if (!path->nodes[level]) { in btrfs_next_leaf()
4231 slot = path->slots[level] + 1; in btrfs_next_leaf()
4232 c = path->nodes[level]; in btrfs_next_leaf()
4248 next_rw_lock = path->locks[level]; in btrfs_next_leaf()
4251 if (ret == -EAGAIN) in btrfs_next_leaf()
4259 if (!path->skip_locking) { in btrfs_next_leaf()
4271 path->slots[level] = slot; in btrfs_next_leaf()
4273 level--; in btrfs_next_leaf()
4274 c = path->nodes[level]; in btrfs_next_leaf()
4275 if (path->locks[level]) in btrfs_next_leaf()
4276 btrfs_tree_unlock_rw(c, path->locks[level]); in btrfs_next_leaf()
4279 path->nodes[level] = next; in btrfs_next_leaf()
4280 path->slots[level] = 0; in btrfs_next_leaf()
4281 if (!path->skip_locking) in btrfs_next_leaf()
4282 path->locks[level] = next_rw_lock; in btrfs_next_leaf()
4288 if (ret == -EAGAIN) in btrfs_next_leaf()
4296 if (!path->skip_locking) { in btrfs_next_leaf()
4310 path->leave_spinning = old_spinning; in btrfs_next_leaf()
4333 if (path->slots[0] == 0) { in btrfs_previous_item()
4339 path->slots[0]--; in btrfs_previous_item()
4341 leaf = path->nodes[0]; in btrfs_previous_item()
4345 if (path->slots[0] == nritems) in btrfs_previous_item()
4346 path->slots[0]--; in btrfs_previous_item()
4348 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); in btrfs_previous_item()