Lines Matching full:leaf
54 * The leaf data grows from end-to-front in the node. this returns the address
55 * of the start of the last item, which is the stop of the leaf data stack.
57 static unsigned int leaf_data_end(const struct extent_buffer *leaf) in leaf_data_end() argument
59 u32 nr = btrfs_header_nritems(leaf); in leaf_data_end()
62 return BTRFS_LEAF_DATA_SIZE(leaf->fs_info); in leaf_data_end()
63 return btrfs_item_offset(leaf, nr - 1); in leaf_data_end()
67 * Move data in a @leaf (using memmove, safe for overlapping ranges).
69 * @leaf: leaf that we're doing a memmove on
75 * the leaf. The btrfs_item offset's start directly after the header, so we
76 * have to adjust any offsets to account for the header in the leaf. This
79 static inline void memmove_leaf_data(const struct extent_buffer *leaf, in memmove_leaf_data() argument
84 memmove_extent_buffer(leaf, btrfs_item_nr_offset(leaf, 0) + dst_offset, in memmove_leaf_data()
85 btrfs_item_nr_offset(leaf, 0) + src_offset, len); in memmove_leaf_data()
91 * @dst: destination leaf that we're copying into
92 * @src: source leaf that we're copying from
98 * the leaf. The btrfs_item offset's start directly after the header, so we
99 * have to adjust any offsets to account for the header in the leaf. This
112 * Move items in a @leaf (using memmove).
114 * @dst: destination leaf for the items
120 * appropriate offsets into the leaf from the item numbers.
122 static inline void memmove_leaf_items(const struct extent_buffer *leaf, in memmove_leaf_items() argument
125 memmove_extent_buffer(leaf, btrfs_item_nr_offset(leaf, dst_item), in memmove_leaf_items()
126 btrfs_item_nr_offset(leaf, src_item), in memmove_leaf_items()
133 * @dst: destination leaf for the items
134 * @src: source leaf for the items
140 * appropriate offsets into the leaf from the item numbers.
1845 struct extent_buffer *leaf = path->nodes[0]; in search_leaf() local
1852 * If we are doing an insertion, the leaf has enough free space and the in search_leaf()
1855 * binary search on the leaf (with search_for_key_slot()), allowing other in search_leaf()
1860 * Cache the leaf free space, since we will need it later and it in search_leaf()
1863 leaf_free_space = btrfs_leaf_free_space(leaf); in search_leaf()
1866 * !path->locks[1] means we have a single node tree, the leaf is in search_leaf()
1872 ASSERT(btrfs_header_nritems(leaf) > 0); in search_leaf()
1873 btrfs_item_key(leaf, &first_key, 0); in search_leaf()
1894 * leaf and there's no need to split the leaf. in search_leaf()
1927 ret = search_for_key_slot(leaf, search_low_slot, key, in search_leaf()
1940 * accounts the size btrfs_item, deduct it here so leaf space in search_leaf()
1988 * If @key is found, 0 is returned and you can find the item in the leaf level
1991 * If @key isn't found, 1 is returned and the leaf level of the path (level 0)
2342 * Search the tree again to find a leaf with smaller keys.
2379 * Previous key not found. Even if we were at slot 0 of the leaf we had in btrfs_prev_leaf()
2383 * sibling leaf into the front of the leaf we had due to an insertion in btrfs_prev_leaf()
2410 * item might have been pushed to the first slot (0) of the leaf we in btrfs_prev_leaf()
2412 * previous key can exist as the only element of a leaf (big fat item). in btrfs_prev_leaf()
2440 struct extent_buffer *leaf; in btrfs_search_slot_for_read() local
2449 * but in case the previous item is the last in a leaf, path points in btrfs_search_slot_for_read()
2450 * to the first free slot in the previous leaf, i.e. at an invalid in btrfs_search_slot_for_read()
2453 leaf = p->nodes[0]; in btrfs_search_slot_for_read()
2456 if (p->slots[0] >= btrfs_header_nritems(leaf)) { in btrfs_search_slot_for_read()
2477 leaf = p->nodes[0]; in btrfs_search_slot_for_read()
2478 if (p->slots[0] == btrfs_header_nritems(leaf)) in btrfs_search_slot_for_read()
2550 * fixing up pointers when a given leaf/node is not in slot 0 of the
2640 * Leaf @left | Leaf @right
2644 * Key f6 in leaf @left itself is valid, but not valid when the next
2645 * key in leaf @right is 7.
3060 * how many bytes are required to store the items in a leaf. start
3061 * and nr indicate which items in the leaf to check. This totals up the
3080 * The space between the end of the leaf items and
3081 * the start of the leaf data. IOW, how much room
3082 * the leaf has left for both items and data
3084 int btrfs_leaf_free_space(const struct extent_buffer *leaf) in btrfs_leaf_free_space() argument
3086 struct btrfs_fs_info *fs_info = leaf->fs_info; in btrfs_leaf_free_space()
3087 int nritems = btrfs_header_nritems(leaf); in btrfs_leaf_free_space()
3090 ret = BTRFS_LEAF_DATA_SIZE(fs_info) - leaf_space_used(leaf, 0, nritems); in btrfs_leaf_free_space()
3093 "leaf free space ret %d, leaf data size %lu, used %d nritems %d", in btrfs_leaf_free_space()
3096 leaf_space_used(leaf, 0, nritems), nritems); in btrfs_leaf_free_space()
3212 /* then fixup the leaf pointer in the path */ in __push_leaf_right()
3234 * push some data in the path leaf to the right, trying to free up at
3240 * this will push starting from min_slot to the end of the leaf. It won't
3293 /* Key greater than all keys in the leaf, right neighbor has in push_leaf_right()
3294 * enough room for it and we're not emptying our leaf to delete in push_leaf_right()
3296 * no need to touch/dirty our left leaf. */ in push_leaf_right()
3314 * push some data in the path leaf to the left, trying to free up at
3317 * max_slot can put a limit on how far into the leaf we'll push items. The
3431 /* then fixup the leaf pointer in the path */ in __push_leaf_left()
3452 * push some data in the path leaf to the left, trying to free up at
3455 * max_slot can put a limit on how far into the leaf we'll push items. The
3518 * split the path's leaf in two, making sure there is at least data_size
3519 * available for the resulting leaf level of the path.
3582 * of a leaf. A double split can leave us with 3 mostly empty leaves:
3583 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
3607 * right leaf in push_for_double_split()
3618 * our goal is to get our slot at the start or end of a leaf. If in push_for_double_split()
3627 /* try to push all the items before our slot into the next leaf */ in push_for_double_split()
3645 * split the path's leaf in two, making sure there is at least data_size
3646 * available for the resulting leaf level of the path.
3805 * We create a new leaf 'right' for the required ins_len and in split_leaf()
3806 * we'll do btrfs_mark_buffer_dirty() on this leaf after copying in split_leaf()
3840 struct extent_buffer *leaf; in setup_leaf_for_split() local
3846 leaf = path->nodes[0]; in setup_leaf_for_split()
3847 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); in setup_leaf_for_split()
3852 if (btrfs_leaf_free_space(leaf) >= ins_len) in setup_leaf_for_split()
3855 item_size = btrfs_item_size(leaf, path->slots[0]); in setup_leaf_for_split()
3857 fi = btrfs_item_ptr(leaf, path->slots[0], in setup_leaf_for_split()
3859 extent_len = btrfs_file_extent_num_bytes(leaf, fi); in setup_leaf_for_split()
3873 leaf = path->nodes[0]; in setup_leaf_for_split()
3875 if (item_size != btrfs_item_size(leaf, path->slots[0])) in setup_leaf_for_split()
3878 /* the leaf has changed, it now has room. return now */ in setup_leaf_for_split()
3883 fi = btrfs_item_ptr(leaf, path->slots[0], in setup_leaf_for_split()
3885 if (extent_len != btrfs_file_extent_num_bytes(leaf, fi)) in setup_leaf_for_split()
3906 struct extent_buffer *leaf; in split_item() local
3914 leaf = path->nodes[0]; in split_item()
3917 * setup_leaf_for_split() to make room for the new item in the leaf. in split_item()
3919 if (WARN_ON(btrfs_leaf_free_space(leaf) < sizeof(struct btrfs_item))) in split_item()
3923 orig_offset = btrfs_item_offset(leaf, path->slots[0]); in split_item()
3924 item_size = btrfs_item_size(leaf, path->slots[0]); in split_item()
3930 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf, in split_item()
3934 nritems = btrfs_header_nritems(leaf); in split_item()
3937 memmove_leaf_items(leaf, slot + 1, slot, nritems - slot); in split_item()
3941 btrfs_set_item_key(leaf, &disk_key, slot); in split_item()
3943 btrfs_set_item_offset(leaf, slot, orig_offset); in split_item()
3944 btrfs_set_item_size(leaf, slot, item_size - split_offset); in split_item()
3946 btrfs_set_item_offset(leaf, orig_slot, in split_item()
3948 btrfs_set_item_size(leaf, orig_slot, split_offset); in split_item()
3950 btrfs_set_header_nritems(leaf, nritems + 1); in split_item()
3953 write_extent_buffer(leaf, buf, in split_item()
3954 btrfs_item_ptr_offset(leaf, path->slots[0]), in split_item()
3958 write_extent_buffer(leaf, buf + split_offset, in split_item()
3959 btrfs_item_ptr_offset(leaf, slot), in split_item()
3961 btrfs_mark_buffer_dirty(trans, leaf); in split_item()
3963 BUG_ON(btrfs_leaf_free_space(leaf) < 0); in split_item()
3981 * leaf the entire time.
4009 struct extent_buffer *leaf; in btrfs_truncate_item() local
4018 leaf = path->nodes[0]; in btrfs_truncate_item()
4021 old_size = btrfs_item_size(leaf, slot); in btrfs_truncate_item()
4025 nritems = btrfs_header_nritems(leaf); in btrfs_truncate_item()
4026 data_end = leaf_data_end(leaf); in btrfs_truncate_item()
4028 old_data_start = btrfs_item_offset(leaf, slot); in btrfs_truncate_item()
4039 btrfs_init_map_token(&token, leaf); in btrfs_truncate_item()
4049 memmove_leaf_data(leaf, data_end + size_diff, data_end, in btrfs_truncate_item()
4055 btrfs_item_key(leaf, &disk_key, slot); in btrfs_truncate_item()
4061 fi = btrfs_item_ptr(leaf, slot, in btrfs_truncate_item()
4066 if (btrfs_file_extent_type(leaf, fi) == in btrfs_truncate_item()
4068 ptr = btrfs_item_ptr_offset(leaf, slot); in btrfs_truncate_item()
4069 memmove_extent_buffer(leaf, ptr, in btrfs_truncate_item()
4075 memmove_leaf_data(leaf, data_end + size_diff, data_end, in btrfs_truncate_item()
4080 btrfs_set_item_key(leaf, &disk_key, slot); in btrfs_truncate_item()
4085 btrfs_set_item_size(leaf, slot, new_size); in btrfs_truncate_item()
4086 btrfs_mark_buffer_dirty(trans, leaf); in btrfs_truncate_item()
4088 if (btrfs_leaf_free_space(leaf) < 0) { in btrfs_truncate_item()
4089 btrfs_print_leaf(leaf); in btrfs_truncate_item()
4101 struct extent_buffer *leaf; in btrfs_extend_item() local
4109 leaf = path->nodes[0]; in btrfs_extend_item()
4111 nritems = btrfs_header_nritems(leaf); in btrfs_extend_item()
4112 data_end = leaf_data_end(leaf); in btrfs_extend_item()
4114 if (btrfs_leaf_free_space(leaf) < data_size) { in btrfs_extend_item()
4115 btrfs_print_leaf(leaf); in btrfs_extend_item()
4119 old_data = btrfs_item_data_end(leaf, slot); in btrfs_extend_item()
4123 btrfs_print_leaf(leaf); in btrfs_extend_item()
4124 btrfs_crit(leaf->fs_info, "slot %d too large, nritems %d", in btrfs_extend_item()
4133 btrfs_init_map_token(&token, leaf); in btrfs_extend_item()
4142 memmove_leaf_data(leaf, data_end - data_size, data_end, in btrfs_extend_item()
4146 old_size = btrfs_item_size(leaf, slot); in btrfs_extend_item()
4147 btrfs_set_item_size(leaf, slot, old_size + data_size); in btrfs_extend_item()
4148 btrfs_mark_buffer_dirty(trans, leaf); in btrfs_extend_item()
4150 if (btrfs_leaf_free_space(leaf) < 0) { in btrfs_extend_item()
4151 btrfs_print_leaf(leaf); in btrfs_extend_item()
4161 * @path: points to the leaf/slot where we are going to insert new items
4176 struct extent_buffer *leaf; in setup_items_for_insert() local
4184 * can use them while we modify the leaf. in setup_items_for_insert()
4192 leaf = path->nodes[0]; in setup_items_for_insert()
4195 nritems = btrfs_header_nritems(leaf); in setup_items_for_insert()
4196 data_end = leaf_data_end(leaf); in setup_items_for_insert()
4199 if (btrfs_leaf_free_space(leaf) < total_size) { in setup_items_for_insert()
4200 btrfs_print_leaf(leaf); in setup_items_for_insert()
4202 total_size, btrfs_leaf_free_space(leaf)); in setup_items_for_insert()
4206 btrfs_init_map_token(&token, leaf); in setup_items_for_insert()
4208 unsigned int old_data = btrfs_item_data_end(leaf, slot); in setup_items_for_insert()
4211 btrfs_print_leaf(leaf); in setup_items_for_insert()
4213 "item at slot %d with data offset %u beyond data end of leaf %u", in setup_items_for_insert()
4229 memmove_leaf_items(leaf, slot + batch->nr, slot, nritems - slot); in setup_items_for_insert()
4232 memmove_leaf_data(leaf, data_end - batch->total_data_size, in setup_items_for_insert()
4240 btrfs_set_item_key(leaf, &disk_key, slot + i); in setup_items_for_insert()
4246 btrfs_set_header_nritems(leaf, nritems + batch->nr); in setup_items_for_insert()
4247 btrfs_mark_buffer_dirty(trans, leaf); in setup_items_for_insert()
4249 if (btrfs_leaf_free_space(leaf) < 0) { in setup_items_for_insert()
4250 btrfs_print_leaf(leaf); in setup_items_for_insert()
4256 * Insert a new item into a leaf.
4260 * @path: A path pointing to the target leaf and slot.
4317 struct extent_buffer *leaf; in btrfs_insert_item() local
4325 leaf = path->nodes[0]; in btrfs_insert_item()
4326 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); in btrfs_insert_item()
4327 write_extent_buffer(leaf, data, ptr, data_size); in btrfs_insert_item()
4328 btrfs_mark_buffer_dirty(trans, leaf); in btrfs_insert_item()
4336 * It guarantees both items live in the same tree leaf and the new item is
4339 * This allows us to split a file extent in place, keeping a lock on the leaf
4347 struct extent_buffer *leaf; in btrfs_duplicate_item() local
4351 leaf = path->nodes[0]; in btrfs_duplicate_item()
4352 item_size = btrfs_item_size(leaf, path->slots[0]); in btrfs_duplicate_item()
4360 leaf = path->nodes[0]; in btrfs_duplicate_item()
4361 memcpy_extent_buffer(leaf, in btrfs_duplicate_item()
4362 btrfs_item_ptr_offset(leaf, path->slots[0]), in btrfs_duplicate_item()
4363 btrfs_item_ptr_offset(leaf, path->slots[0] - 1), in btrfs_duplicate_item()
4411 /* just turn the root into a leaf and break */ in btrfs_del_ptr()
4424 * a helper function to delete the leaf pointed to by path->slots[1] and
4427 * This deletes the pointer in path->nodes[1] and frees the leaf
4430 * The path must have already been setup for deleting the leaf, including
4436 struct extent_buffer *leaf) in btrfs_del_leaf() argument
4440 WARN_ON(btrfs_header_generation(leaf) != trans->transid); in btrfs_del_leaf()
4453 atomic_inc(&leaf->refs); in btrfs_del_leaf()
4454 btrfs_free_tree_block(trans, btrfs_root_id(root), leaf, 0, 1); in btrfs_del_leaf()
4455 free_extent_buffer_stale(leaf); in btrfs_del_leaf()
4459 * delete the item at the leaf level in path. If that empties
4460 * the leaf, remove it from the tree
4466 struct extent_buffer *leaf; in btrfs_del_items() local
4471 leaf = path->nodes[0]; in btrfs_del_items()
4472 nritems = btrfs_header_nritems(leaf); in btrfs_del_items()
4475 const u32 last_off = btrfs_item_offset(leaf, slot + nr - 1); in btrfs_del_items()
4476 const int data_end = leaf_data_end(leaf); in btrfs_del_items()
4482 dsize += btrfs_item_size(leaf, slot + i); in btrfs_del_items()
4484 memmove_leaf_data(leaf, data_end + dsize, data_end, in btrfs_del_items()
4487 btrfs_init_map_token(&token, leaf); in btrfs_del_items()
4495 memmove_leaf_items(leaf, slot, slot + nr, nritems - slot - nr); in btrfs_del_items()
4497 btrfs_set_header_nritems(leaf, nritems - nr); in btrfs_del_items()
4500 /* delete the leaf if we've emptied it */ in btrfs_del_items()
4502 if (leaf == root->node) { in btrfs_del_items()
4503 btrfs_set_header_level(leaf, 0); in btrfs_del_items()
4505 btrfs_clear_buffer_dirty(trans, leaf); in btrfs_del_items()
4506 ret = btrfs_del_leaf(trans, root, path, leaf); in btrfs_del_items()
4511 int used = leaf_space_used(leaf, 0, nritems); in btrfs_del_items()
4515 btrfs_item_key(leaf, &disk_key, 0); in btrfs_del_items()
4520 * Try to delete the leaf if it is mostly empty. We do this by in btrfs_del_items()
4523 * not ideal, but future insertions might fill the leaf with more in btrfs_del_items()
4525 * leaf due to deletions on those leaves. in btrfs_del_items()
4531 * make sure the path still points to our leaf in btrfs_del_items()
4535 atomic_inc(&leaf->refs); in btrfs_del_items()
4538 * left neighbour leaf, and that's the first item. in btrfs_del_items()
4541 btrfs_item_size(leaf, 0); in btrfs_del_items()
4547 if (path->nodes[0] == leaf && in btrfs_del_items()
4548 btrfs_header_nritems(leaf)) { in btrfs_del_items()
4551 * leaf to its left neighbour, then attempt to in btrfs_del_items()
4555 * it's pointless to end up with a leaf having in btrfs_del_items()
4559 nritems = btrfs_header_nritems(leaf); in btrfs_del_items()
4560 min_push_space = leaf_space_used(leaf, 0, nritems); in btrfs_del_items()
4567 if (btrfs_header_nritems(leaf) == 0) { in btrfs_del_items()
4569 ret = btrfs_del_leaf(trans, root, path, leaf); in btrfs_del_items()
4572 free_extent_buffer(leaf); in btrfs_del_items()
4580 if (path->nodes[0] == leaf) in btrfs_del_items()
4581 btrfs_mark_buffer_dirty(trans, leaf); in btrfs_del_items()
4582 free_extent_buffer(leaf); in btrfs_del_items()
4585 btrfs_mark_buffer_dirty(trans, leaf); in btrfs_del_items()
4864 * This one should be returned as well, or we can get leaf corruption in btrfs_next_old_leaf()
4930 * itself waiting for the leaf we've currently in btrfs_next_old_leaf()
5010 struct extent_buffer *leaf; in btrfs_previous_item() local
5022 leaf = path->nodes[0]; in btrfs_previous_item()
5023 nritems = btrfs_header_nritems(leaf); in btrfs_previous_item()
5029 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); in btrfs_previous_item()
5051 struct extent_buffer *leaf; in btrfs_previous_extent_item() local
5063 leaf = path->nodes[0]; in btrfs_previous_extent_item()
5064 nritems = btrfs_header_nritems(leaf); in btrfs_previous_extent_item()
5070 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); in btrfs_previous_extent_item()