Lines Matching full:tree

7 #include "extent-io-tree.h"
46 pr_err("BTRFS: state leak: start %llu end %llu state %u in tree %d refs %d\n", in btrfs_extent_state_leak_debug_check()
55 #define btrfs_debug_check_extent_io_range(tree, start, end) \ argument
56 __btrfs_debug_check_extent_io_range(__func__, (tree), (start), (end))
58 struct extent_io_tree *tree, in __btrfs_debug_check_extent_io_range() argument
64 if (tree->owner != IO_TREE_INODE_IO) in __btrfs_debug_check_extent_io_range()
67 inode = extent_io_tree_to_inode_const(tree); in __btrfs_debug_check_extent_io_range()
84 * The only tree allowed to set the inode is IO_TREE_INODE_IO.
86 static bool is_inode_io_tree(const struct extent_io_tree *tree) in is_inode_io_tree() argument
88 return tree->owner == IO_TREE_INODE_IO; in is_inode_io_tree()
91 /* Return the inode if it's valid for the given tree, otherwise NULL. */
92 struct btrfs_inode *extent_io_tree_to_inode(struct extent_io_tree *tree) in extent_io_tree_to_inode() argument
94 if (tree->owner == IO_TREE_INODE_IO) in extent_io_tree_to_inode()
95 return tree->inode; in extent_io_tree_to_inode()
100 const struct btrfs_inode *extent_io_tree_to_inode_const(const struct extent_io_tree *tree) in extent_io_tree_to_inode_const() argument
102 if (tree->owner == IO_TREE_INODE_IO) in extent_io_tree_to_inode_const()
103 return tree->inode; in extent_io_tree_to_inode_const()
108 const struct btrfs_fs_info *extent_io_tree_to_fs_info(const struct extent_io_tree *tree) in extent_io_tree_to_fs_info() argument
110 if (tree->owner == IO_TREE_INODE_IO) in extent_io_tree_to_fs_info()
111 return tree->inode->root->fs_info; in extent_io_tree_to_fs_info()
112 return tree->fs_info; in extent_io_tree_to_fs_info()
116 struct extent_io_tree *tree, unsigned int owner) in extent_io_tree_init() argument
118 tree->state = RB_ROOT; in extent_io_tree_init()
119 spin_lock_init(&tree->lock); in extent_io_tree_init()
120 tree->fs_info = fs_info; in extent_io_tree_init()
121 tree->owner = owner; in extent_io_tree_init()
125 * Empty an io tree, removing and freeing every extent state record from the
126 * tree. This should be called once we are sure no other task can access the
127 * tree anymore, so no tree updates happen after we empty the tree and there
131 void extent_io_tree_release(struct extent_io_tree *tree) in extent_io_tree_release() argument
137 spin_lock(&tree->lock); in extent_io_tree_release()
138 root = tree->state; in extent_io_tree_release()
139 tree->state = RB_ROOT; in extent_io_tree_release()
145 * No need for a memory barrier here, as we are holding the tree in extent_io_tree_release()
151 cond_resched_lock(&tree->lock); in extent_io_tree_release()
155 * be accessing the tree anymore. in extent_io_tree_release()
157 ASSERT(RB_EMPTY_ROOT(&tree->state)); in extent_io_tree_release()
158 spin_unlock(&tree->lock); in extent_io_tree_release()
241 * Search @tree for an entry that contains @offset. Such entry would have
244 * @tree: the tree to search
245 * @offset: offset that should fall within an entry in @tree
247 * entry in the tree)
257 static inline struct extent_state *tree_search_for_insert(struct extent_io_tree *tree, in tree_search_for_insert() argument
262 struct rb_root *root = &tree->state; in tree_search_for_insert()
292 * Search offset in the tree or fill neighbor rbtree node pointers.
294 * @tree: the tree to search
295 * @offset: offset that should fall within an entry in @tree
303 static struct extent_state *tree_search_prev_next(struct extent_io_tree *tree, in tree_search_prev_next() argument
308 struct rb_root *root = &tree->state; in tree_search_prev_next()
341 * Inexact rb-tree search, return the next entry if @offset is not found
343 static inline struct extent_state *tree_search(struct extent_io_tree *tree, u64 offset) in tree_search() argument
345 return tree_search_for_insert(tree, offset, NULL, NULL); in tree_search()
348 static void extent_io_tree_panic(const struct extent_io_tree *tree, in extent_io_tree_panic() argument
353 btrfs_panic(extent_io_tree_to_fs_info(tree), err, in extent_io_tree_panic()
354 "extent io tree error on %s state start %llu end %llu", in extent_io_tree_panic()
358 static void merge_prev_state(struct extent_io_tree *tree, struct extent_state *state) in merge_prev_state() argument
364 if (is_inode_io_tree(tree)) in merge_prev_state()
365 btrfs_merge_delalloc_extent(extent_io_tree_to_inode(tree), in merge_prev_state()
368 rb_erase(&prev->rb_node, &tree->state); in merge_prev_state()
374 static void merge_next_state(struct extent_io_tree *tree, struct extent_state *state) in merge_next_state() argument
380 if (is_inode_io_tree(tree)) in merge_next_state()
381 btrfs_merge_delalloc_extent(extent_io_tree_to_inode(tree), in merge_next_state()
384 rb_erase(&next->rb_node, &tree->state); in merge_next_state()
393 * tree. Extents with EXTENT_IO in their state field are not merged because
397 * This should be called with the tree lock held.
399 static void merge_state(struct extent_io_tree *tree, struct extent_state *state) in merge_state() argument
404 merge_prev_state(tree, state); in merge_state()
405 merge_next_state(tree, state); in merge_state()
408 static void set_state_bits(struct extent_io_tree *tree, in set_state_bits() argument
415 if (is_inode_io_tree(tree)) in set_state_bits()
416 btrfs_set_delalloc_extent(extent_io_tree_to_inode(tree), state, bits); in set_state_bits()
424 * Insert an extent_state struct into the tree. 'bits' are set on the
429 * may be an existing record in the tree that was expanded to accommodate the
435 * The tree lock is not taken internally. This is a utility function and
438 static struct extent_state *insert_state(struct extent_io_tree *tree, in insert_state() argument
449 set_state_bits(tree, state, bits, changeset); in insert_state()
451 node = &tree->state.rb_node; in insert_state()
461 if (is_inode_io_tree(tree)) in insert_state()
463 extent_io_tree_to_inode(tree), in insert_state()
466 merge_prev_state(tree, entry); in insert_state()
474 if (is_inode_io_tree(tree)) in insert_state()
476 extent_io_tree_to_inode(tree), in insert_state()
479 merge_next_state(tree, entry); in insert_state()
490 rb_insert_color(&state->rb_node, &tree->state); in insert_state()
496 * Insert state to @tree to the location given by @node and @parent.
498 static void insert_state_fast(struct extent_io_tree *tree, in insert_state_fast() argument
503 set_state_bits(tree, state, bits, changeset); in insert_state_fast()
505 rb_insert_color(&state->rb_node, &tree->state); in insert_state_fast()
506 merge_state(tree, state); in insert_state_fast()
515 * the tree has 'orig' at [orig->start, orig->end]. After calling, there
516 * are two extent state structs in the tree:
520 * The tree locks are not taken by this function. They need to be held
523 static int split_state(struct extent_io_tree *tree, struct extent_state *orig, in split_state() argument
529 if (is_inode_io_tree(tree)) in split_state()
530 btrfs_split_delalloc_extent(extent_io_tree_to_inode(tree), orig, in split_state()
557 rb_insert_color(&prealloc->rb_node, &tree->state); in split_state()
567 * struct is freed and removed from the tree
569 static struct extent_state *clear_state_bit(struct extent_io_tree *tree, in clear_state_bit() argument
578 if (is_inode_io_tree(tree)) in clear_state_bit()
579 btrfs_clear_delalloc_extent(extent_io_tree_to_inode(tree), state, in clear_state_bit()
590 rb_erase(&state->rb_node, &tree->state); in clear_state_bit()
597 merge_state(tree, state); in clear_state_bit()
614 * Clear some bits on a range in the tree. This may require splitting or
615 * inserting elements in the tree, so the gfp mask is used to indicate which
619 * range from the tree regardless of state (ie for truncate).
623 * This takes the tree lock, and returns 0 on success and < 0 on error.
625 int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, in __clear_extent_bit() argument
640 btrfs_debug_check_extent_io_range(tree, start, end); in __clear_extent_bit()
641 trace_btrfs_clear_extent_bit(tree, start, end - start + 1, bits); in __clear_extent_bit()
657 * is the case if we only have in the tree extent states that in __clear_extent_bit()
664 spin_lock(&tree->lock); in __clear_extent_bit()
685 state = tree_search(tree, start); in __clear_extent_bit()
719 err = split_state(tree, state, prealloc, start); in __clear_extent_bit()
721 extent_io_tree_panic(tree, state, "split", err); in __clear_extent_bit()
727 state = clear_state_bit(tree, state, bits, wake, changeset); in __clear_extent_bit()
741 err = split_state(tree, state, prealloc, end + 1); in __clear_extent_bit()
743 extent_io_tree_panic(tree, state, "split", err); in __clear_extent_bit()
748 clear_state_bit(tree, prealloc, bits, wake, changeset); in __clear_extent_bit()
754 state = clear_state_bit(tree, state, bits, wake, changeset); in __clear_extent_bit()
765 spin_unlock(&tree->lock); in __clear_extent_bit()
771 spin_unlock(&tree->lock); in __clear_extent_bit()
780 * Wait for one or more bits to clear on a range in the state tree.
782 * The tree lock is taken by this function
784 static void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, in wait_extent_bit() argument
789 btrfs_debug_check_extent_io_range(tree, start, end); in wait_extent_bit()
791 spin_lock(&tree->lock); in wait_extent_bit()
794 * Maintain cached_state, as we may not remove it from the tree if there in wait_extent_bit()
808 state = tree_search(tree, start); in wait_extent_bit()
821 spin_unlock(&tree->lock); in wait_extent_bit()
823 spin_lock(&tree->lock); in wait_extent_bit()
833 if (!cond_resched_lock(&tree->lock)) { in wait_extent_bit()
845 spin_unlock(&tree->lock); in wait_extent_bit()
869 * tree->lock must be held. NULL will returned if nothing was found after
872 static struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree, in find_first_extent_bit_state() argument
881 state = tree_search(tree, start); in find_first_extent_bit_state()
891 * Find the first offset in the io tree with one or more @bits set.
898 bool find_first_extent_bit(struct extent_io_tree *tree, u64 start, in find_first_extent_bit() argument
905 spin_lock(&tree->lock); in find_first_extent_bit()
930 state = find_first_extent_bit_state(tree, start, bits); in find_first_extent_bit()
939 spin_unlock(&tree->lock); in find_first_extent_bit()
946 * @tree: io tree to check
954 * will drop the tree->lock, so use this helper if you want to find the actual
956 * then walk down the tree until we find a non-contiguous area. The area
959 int find_contiguous_extent_bit(struct extent_io_tree *tree, u64 start, in find_contiguous_extent_bit() argument
965 ASSERT(!btrfs_fs_incompat(extent_io_tree_to_fs_info(tree), NO_HOLES)); in find_contiguous_extent_bit()
967 spin_lock(&tree->lock); in find_contiguous_extent_bit()
968 state = find_first_extent_bit_state(tree, start, bits); in find_contiguous_extent_bit()
979 spin_unlock(&tree->lock); in find_contiguous_extent_bit()
987 * True is returned if we find something, false if nothing was in the tree.
989 bool btrfs_find_delalloc_range(struct extent_io_tree *tree, u64 *start, in btrfs_find_delalloc_range() argument
998 spin_lock(&tree->lock); in btrfs_find_delalloc_range()
1004 state = tree_search(tree, cur_start); in btrfs_find_delalloc_range()
1034 spin_unlock(&tree->lock); in btrfs_find_delalloc_range()
1039 * Set some bits on a range in the tree. This may require allocations or
1050 * [start, end] is inclusive This takes the tree lock.
1052 static int __set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, in __set_extent_bit() argument
1069 btrfs_debug_check_extent_io_range(tree, start, end); in __set_extent_bit()
1070 trace_btrfs_set_extent_bit(tree, start, end - start + 1, bits); in __set_extent_bit()
1081 * is the case if we only have in the tree extent states that in __set_extent_bit()
1088 spin_lock(&tree->lock); in __set_extent_bit()
1099 state = tree_search_for_insert(tree, start, &p, &parent); in __set_extent_bit()
1106 insert_state_fast(tree, prealloc, p, parent, bits, changeset); in __set_extent_bit()
1129 set_state_bits(tree, state, bits, changeset); in __set_extent_bit()
1131 merge_state(tree, state); in __set_extent_bit()
1178 err = split_state(tree, state, prealloc, start); in __set_extent_bit()
1180 extent_io_tree_panic(tree, state, "split", err); in __set_extent_bit()
1186 set_state_bits(tree, state, bits, changeset); in __set_extent_bit()
1188 merge_state(tree, state); in __set_extent_bit()
1225 inserted_state = insert_state(tree, prealloc, bits, changeset); in __set_extent_bit()
1228 extent_io_tree_panic(tree, prealloc, "insert", err); in __set_extent_bit()
1254 err = split_state(tree, state, prealloc, end + 1); in __set_extent_bit()
1256 extent_io_tree_panic(tree, state, "split", err); in __set_extent_bit()
1258 set_state_bits(tree, prealloc, bits, changeset); in __set_extent_bit()
1260 merge_state(tree, prealloc); in __set_extent_bit()
1268 spin_unlock(&tree->lock); in __set_extent_bit()
1274 spin_unlock(&tree->lock); in __set_extent_bit()
1282 int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, in set_extent_bit() argument
1285 return __set_extent_bit(tree, start, end, bits, NULL, NULL, in set_extent_bit()
1292 * @tree: the io tree to search
1307 int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, in convert_extent_bit() argument
1320 btrfs_debug_check_extent_io_range(tree, start, end); in convert_extent_bit()
1321 trace_btrfs_convert_extent_bit(tree, start, end - start + 1, bits, in convert_extent_bit()
1331 * after locking the tree. in convert_extent_bit()
1338 spin_lock(&tree->lock); in convert_extent_bit()
1350 state = tree_search_for_insert(tree, start, &p, &parent); in convert_extent_bit()
1359 insert_state_fast(tree, prealloc, p, parent, bits, NULL); in convert_extent_bit()
1375 set_state_bits(tree, state, bits, NULL); in convert_extent_bit()
1377 state = clear_state_bit(tree, state, clear_bits, 0, NULL); in convert_extent_bit()
1408 err = split_state(tree, state, prealloc, start); in convert_extent_bit()
1410 extent_io_tree_panic(tree, state, "split", err); in convert_extent_bit()
1415 set_state_bits(tree, state, bits, NULL); in convert_extent_bit()
1417 state = clear_state_bit(tree, state, clear_bits, 0, NULL); in convert_extent_bit()
1455 inserted_state = insert_state(tree, prealloc, bits, NULL); in convert_extent_bit()
1458 extent_io_tree_panic(tree, prealloc, "insert", err); in convert_extent_bit()
1479 err = split_state(tree, state, prealloc, end + 1); in convert_extent_bit()
1481 extent_io_tree_panic(tree, state, "split", err); in convert_extent_bit()
1483 set_state_bits(tree, prealloc, bits, NULL); in convert_extent_bit()
1485 clear_state_bit(tree, prealloc, clear_bits, 0, NULL); in convert_extent_bit()
1493 spin_unlock(&tree->lock); in convert_extent_bit()
1499 spin_unlock(&tree->lock); in convert_extent_bit()
1510 * @tree: the tree to search
1521 void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start, in find_first_clear_extent_bit() argument
1527 spin_lock(&tree->lock); in find_first_clear_extent_bit()
1531 state = tree_search_prev_next(tree, start, &prev, &next); in find_first_clear_extent_bit()
1534 * Tree is completely empty, send full range and let in find_first_clear_extent_bit()
1611 spin_unlock(&tree->lock); in find_first_clear_extent_bit()
1615 * Count the number of bytes in the tree that have a given bit(s) set for a
1618 * @tree: The io tree to search.
1639 u64 count_range_bits(struct extent_io_tree *tree, in count_range_bits() argument
1654 spin_lock(&tree->lock); in count_range_bits()
1673 * when there are holes between records in the tree. If there is in count_range_bits()
1689 state = tree_search(tree, cur_start); in count_range_bits()
1719 spin_unlock(&tree->lock); in count_range_bits()
1727 bool test_range_bit_exists(struct extent_io_tree *tree, u64 start, u64 end, u32 bit) in test_range_bit_exists() argument
1734 spin_lock(&tree->lock); in test_range_bit_exists()
1735 state = tree_search(tree, start); in test_range_bit_exists()
1751 spin_unlock(&tree->lock); in test_range_bit_exists()
1758 bool test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, u32 bit, in test_range_bit() argument
1766 spin_lock(&tree->lock); in test_range_bit()
1771 state = tree_search(tree, start); in test_range_bit()
1802 spin_unlock(&tree->lock); in test_range_bit()
1807 int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, in set_record_extent_bits() argument
1818 return __set_extent_bit(tree, start, end, bits, NULL, NULL, NULL, changeset); in set_record_extent_bits()
1821 int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, in clear_record_extent_bits() argument
1830 return __clear_extent_bit(tree, start, end, bits, NULL, changeset); in clear_record_extent_bits()
1833 int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end, in try_lock_extent() argument
1839 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, &failed_start, in try_lock_extent()
1843 clear_extent_bit(tree, start, failed_start - 1, in try_lock_extent()
1854 int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, in lock_extent() argument
1861 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, &failed_start, in lock_extent()
1865 clear_extent_bit(tree, start, failed_start - 1, in lock_extent()
1868 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED, in lock_extent()
1870 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, in lock_extent()