Lines Matching full:tree
44 struct extent_io_tree *tree; member
84 "state %lu in tree %p refs %d\n", in extent_io_exit()
87 state->state, state->tree, atomic_read(&state->refs)); in extent_io_exit()
107 void extent_io_tree_init(struct extent_io_tree *tree, in extent_io_tree_init() argument
110 tree->state = RB_ROOT; in extent_io_tree_init()
111 INIT_RADIX_TREE(&tree->buffer, GFP_ATOMIC); in extent_io_tree_init()
112 tree->ops = NULL; in extent_io_tree_init()
113 tree->dirty_bytes = 0; in extent_io_tree_init()
114 spin_lock_init(&tree->lock); in extent_io_tree_init()
115 spin_lock_init(&tree->buffer_lock); in extent_io_tree_init()
116 tree->mapping = mapping; in extent_io_tree_init()
131 state->tree = NULL; in alloc_extent_state()
150 WARN_ON(state->tree); in free_extent_state()
185 static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset, in __etree_search() argument
189 struct rb_root *root = &tree->state; in __etree_search()
230 static inline struct rb_node *tree_search(struct extent_io_tree *tree, in tree_search() argument
236 ret = __etree_search(tree, offset, &prev, NULL); in tree_search()
242 static void merge_cb(struct extent_io_tree *tree, struct extent_state *new, in merge_cb() argument
245 if (tree->ops && tree->ops->merge_extent_hook) in merge_cb()
246 tree->ops->merge_extent_hook(tree->mapping->host, new, in merge_cb()
253 * extent in the tree. Extents with EXTENT_IO in their state field
257 * This should be called with the tree lock held.
259 static void merge_state(struct extent_io_tree *tree, in merge_state() argument
273 merge_cb(tree, state, other); in merge_state()
275 other->tree = NULL; in merge_state()
276 rb_erase(&other->rb_node, &tree->state); in merge_state()
285 merge_cb(tree, state, other); in merge_state()
287 other->tree = NULL; in merge_state()
288 rb_erase(&other->rb_node, &tree->state); in merge_state()
294 static void set_state_cb(struct extent_io_tree *tree, in set_state_cb() argument
297 if (tree->ops && tree->ops->set_bit_hook) in set_state_cb()
298 tree->ops->set_bit_hook(tree->mapping->host, state, bits); in set_state_cb()
301 static void clear_state_cb(struct extent_io_tree *tree, in clear_state_cb() argument
304 if (tree->ops && tree->ops->clear_bit_hook) in clear_state_cb()
305 tree->ops->clear_bit_hook(tree->mapping->host, state, bits); in clear_state_cb()
308 static void set_state_bits(struct extent_io_tree *tree,
312 * insert an extent_state struct into the tree. 'bits' are set on the
318 * The tree lock is not taken internally. This is a utility function and
321 static int insert_state(struct extent_io_tree *tree, in insert_state() argument
336 set_state_bits(tree, state, bits); in insert_state()
338 node = tree_insert(&tree->state, end, &state->rb_node); in insert_state()
348 state->tree = tree; in insert_state()
349 merge_state(tree, state); in insert_state()
353 static void split_cb(struct extent_io_tree *tree, struct extent_state *orig, in split_cb() argument
356 if (tree->ops && tree->ops->split_extent_hook) in split_cb()
357 tree->ops->split_extent_hook(tree->mapping->host, orig, split); in split_cb()
366 * the tree has 'orig' at [orig->start, orig->end]. After calling, there
367 * are two extent state structs in the tree:
371 * The tree locks are not taken by this function. They need to be held
374 static int split_state(struct extent_io_tree *tree, struct extent_state *orig, in split_state() argument
379 split_cb(tree, orig, split); in split_state()
386 node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node); in split_state()
391 prealloc->tree = tree; in split_state()
398 * forcibly remove the state from the tree (delete == 1).
401 * struct is freed and removed from the tree
403 static int clear_state_bit(struct extent_io_tree *tree, in clear_state_bit() argument
412 WARN_ON(range > tree->dirty_bytes); in clear_state_bit()
413 tree->dirty_bytes -= range; in clear_state_bit()
415 clear_state_cb(tree, state, bits); in clear_state_bit()
420 if (state->tree) { in clear_state_bit()
421 rb_erase(&state->rb_node, &tree->state); in clear_state_bit()
422 state->tree = NULL; in clear_state_bit()
428 merge_state(tree, state); in clear_state_bit()
443 * clear some bits on a range in the tree. This may require splitting
444 * or inserting elements in the tree, so the gfp mask is used to
448 * the given range from the tree regardless of state (ie for truncate).
452 * This takes the tree lock, and returns < 0 on error, > 0 if any of the
455 int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, in clear_extent_bit() argument
483 spin_lock(&tree->lock); in clear_extent_bit()
492 if (cached && cached->tree && cached->start <= start && in clear_extent_bit()
506 node = tree_search(tree, start); in clear_extent_bit()
544 err = split_state(tree, state, prealloc, start); in clear_extent_bit()
550 set |= clear_state_bit(tree, state, &bits, wake); in clear_extent_bit()
566 err = split_state(tree, state, prealloc, end + 1); in clear_extent_bit()
571 set |= clear_state_bit(tree, prealloc, &bits, wake); in clear_extent_bit()
577 set |= clear_state_bit(tree, state, &bits, wake); in clear_extent_bit()
590 spin_unlock(&tree->lock); in clear_extent_bit()
599 spin_unlock(&tree->lock); in clear_extent_bit()
605 static int wait_on_state(struct extent_io_tree *tree, in wait_on_state() argument
607 __releases(tree->lock) in wait_on_state()
608 __acquires(tree->lock) in wait_on_state()
612 spin_unlock(&tree->lock); in wait_on_state()
614 spin_lock(&tree->lock); in wait_on_state()
620 * waits for one or more bits to clear on a range in the state tree.
622 * The tree lock is taken by this function
624 int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits) in wait_extent_bit() argument
629 spin_lock(&tree->lock); in wait_extent_bit()
636 node = tree_search(tree, start); in wait_extent_bit()
648 wait_on_state(tree, state); in wait_extent_bit()
657 cond_resched_lock(&tree->lock); in wait_extent_bit()
660 spin_unlock(&tree->lock); in wait_extent_bit()
664 static void set_state_bits(struct extent_io_tree *tree, in set_state_bits() argument
670 set_state_cb(tree, state, bits); in set_state_bits()
673 tree->dirty_bytes += range; in set_state_bits()
699 * set some bits on a range in the tree. This may require allocations or
706 * [start, end] is inclusive This takes the tree lock.
709 int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, in set_extent_bit() argument
727 spin_lock(&tree->lock); in set_extent_bit()
731 state->tree) { in set_extent_bit()
740 node = tree_search(tree, start); in set_extent_bit()
744 err = insert_state(tree, prealloc, start, end, &bits); in set_extent_bit()
768 set_state_bits(tree, state, &bits); in set_extent_bit()
771 merge_state(tree, state); in set_extent_bit()
811 err = split_state(tree, state, prealloc, start); in set_extent_bit()
817 set_state_bits(tree, state, &bits); in set_extent_bit()
819 merge_state(tree, state); in set_extent_bit()
847 err = insert_state(tree, prealloc, start, this_end, in set_extent_bit()
875 err = split_state(tree, state, prealloc, end + 1); in set_extent_bit()
878 set_state_bits(tree, prealloc, &bits); in set_extent_bit()
880 merge_state(tree, prealloc); in set_extent_bit()
888 spin_unlock(&tree->lock); in set_extent_bit()
897 spin_unlock(&tree->lock); in set_extent_bit()
905 * @tree: the io tree to search
918 int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, in convert_extent_bit() argument
935 spin_lock(&tree->lock); in convert_extent_bit()
940 node = tree_search(tree, start); in convert_extent_bit()
947 err = insert_state(tree, prealloc, start, end, &bits); in convert_extent_bit()
966 set_state_bits(tree, state, &bits); in convert_extent_bit()
967 clear_state_bit(tree, state, &clear_bits, 0); in convert_extent_bit()
1004 err = split_state(tree, state, prealloc, start); in convert_extent_bit()
1010 set_state_bits(tree, state, &bits); in convert_extent_bit()
1011 clear_state_bit(tree, state, &clear_bits, 0); in convert_extent_bit()
1042 err = insert_state(tree, prealloc, start, this_end, in convert_extent_bit()
1067 err = split_state(tree, state, prealloc, end + 1); in convert_extent_bit()
1070 set_state_bits(tree, prealloc, &bits); in convert_extent_bit()
1071 clear_state_bit(tree, prealloc, &clear_bits, 0); in convert_extent_bit()
1079 spin_unlock(&tree->lock); in convert_extent_bit()
1088 spin_unlock(&tree->lock); in convert_extent_bit()
1095 int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, in set_extent_dirty() argument
1098 return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL, in set_extent_dirty()
1102 int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, in set_extent_bits() argument
1105 return set_extent_bit(tree, start, end, bits, 0, NULL, in set_extent_bits()
1109 int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, in clear_extent_bits() argument
1112 return clear_extent_bit(tree, start, end, bits, 0, 0, NULL, mask); in clear_extent_bits()
1115 int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end, in set_extent_delalloc() argument
1118 return set_extent_bit(tree, start, end, in set_extent_delalloc()
1123 int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, in clear_extent_dirty() argument
1126 return clear_extent_bit(tree, start, end, in clear_extent_dirty()
1131 int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end, in set_extent_new() argument
1134 return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL, in set_extent_new()
1138 int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, in set_extent_uptodate() argument
1141 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, in set_extent_uptodate()
1145 static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, in clear_extent_uptodate() argument
1149 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, in clear_extent_uptodate()
1157 int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, in lock_extent_bits() argument
1163 err = set_extent_bit(tree, start, end, EXTENT_LOCKED | bits, in lock_extent_bits()
1167 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED); in lock_extent_bits()
1177 int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask) in lock_extent() argument
1179 return lock_extent_bits(tree, start, end, 0, NULL, mask); in lock_extent()
1182 int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end, in try_lock_extent() argument
1188 err = set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED, in try_lock_extent()
1192 clear_extent_bit(tree, start, failed_start - 1, in try_lock_extent()
1199 int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end, in unlock_extent_cached() argument
1202 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached, in unlock_extent_cached()
1206 int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask) in unlock_extent() argument
1208 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL, in unlock_extent()
1213 * helper function to set both pages and extents in the tree writeback
1215 static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end) in set_range_writeback() argument
1222 page = find_get_page(tree->mapping, index); in set_range_writeback()
1232 * return it. tree->lock must be held. NULL will returned if
1235 struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree, in find_first_extent_bit_state() argument
1245 node = tree_search(tree, start); in find_first_extent_bit_state()
1263 * find the first offset in the io tree with 'bits' set. zero is
1269 int find_first_extent_bit(struct extent_io_tree *tree, u64 start, in find_first_extent_bit() argument
1275 spin_lock(&tree->lock); in find_first_extent_bit()
1276 state = find_first_extent_bit_state(tree, start, bits); in find_first_extent_bit()
1282 spin_unlock(&tree->lock); in find_first_extent_bit()
1290 * 1 is returned if we find something, 0 if nothing was in the tree
1292 static noinline u64 find_delalloc_range(struct extent_io_tree *tree, in find_delalloc_range() argument
1302 spin_lock(&tree->lock); in find_delalloc_range()
1308 node = tree_search(tree, cur_start); in find_delalloc_range()
1342 spin_unlock(&tree->lock); in find_delalloc_range()
1442 * 1 is returned if we find something, 0 if nothing was in the tree
1445 struct extent_io_tree *tree, in find_lock_delalloc_range() argument
1461 found = find_delalloc_range(tree, &delalloc_start, &delalloc_end, in find_lock_delalloc_range()
1506 lock_extent_bits(tree, delalloc_start, delalloc_end, in find_lock_delalloc_range()
1510 ret = test_range_bit(tree, delalloc_start, delalloc_end, in find_lock_delalloc_range()
1513 unlock_extent_cached(tree, delalloc_start, delalloc_end, in find_lock_delalloc_range()
1528 struct extent_io_tree *tree, in extent_clear_unlock_delalloc() argument
1548 clear_extent_bit(tree, start, end, clear_bits, 1, 0, NULL, GFP_NOFS); in extent_clear_unlock_delalloc()
1585 * count the number of bytes in the tree that have a given bit(s)
1589 u64 count_range_bits(struct extent_io_tree *tree, in count_range_bits() argument
1605 spin_lock(&tree->lock); in count_range_bits()
1607 total_bytes = tree->dirty_bytes; in count_range_bits()
1614 node = tree_search(tree, cur_start); in count_range_bits()
1642 spin_unlock(&tree->lock); in count_range_bits()
1647 * set the private field for a given byte offset in the tree. If there isn't
1650 int set_state_private(struct extent_io_tree *tree, u64 start, u64 private) in set_state_private() argument
1656 spin_lock(&tree->lock); in set_state_private()
1661 node = tree_search(tree, start); in set_state_private()
1673 spin_unlock(&tree->lock); in set_state_private()
1677 int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private) in get_state_private() argument
1683 spin_lock(&tree->lock); in get_state_private()
1688 node = tree_search(tree, start); in get_state_private()
1700 spin_unlock(&tree->lock); in get_state_private()
1705 * searches a range in the state tree for a given mask.
1706 * If 'filled' == 1, this returns 1 only if every extent in the tree
1710 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, in test_range_bit() argument
1717 spin_lock(&tree->lock); in test_range_bit()
1718 if (cached && cached->tree && cached->start <= start && in test_range_bit()
1722 node = tree_search(tree, start); in test_range_bit()
1756 spin_unlock(&tree->lock); in test_range_bit()
1762 * extents in the tree for that page are up to date
1764 static int check_page_uptodate(struct extent_io_tree *tree, in check_page_uptodate() argument
1769 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL)) in check_page_uptodate()
1775 * helper function to unlock a page if all the extents in the tree
1778 static int check_page_locked(struct extent_io_tree *tree, in check_page_locked() argument
1783 if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL)) in check_page_locked()
1790 * in the tree for that page are done with writeback
1792 static int check_page_writeback(struct extent_io_tree *tree, in check_page_writeback() argument
1916 * each time an IO finishes, we do a fast check in the IO failure tree
1995 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree; in bio_readpage_error() local
2047 /* set the bits in the private failure tree */ in bio_readpage_error()
2053 /* set the bits in the inode's tree */ in bio_readpage_error()
2055 ret = set_extent_bits(tree, start, end, EXTENT_DAMAGED, in bio_readpage_error()
2091 spin_lock(&tree->lock); in bio_readpage_error()
2092 state = find_first_extent_bit_state(tree, failrec->start, in bio_readpage_error()
2096 spin_unlock(&tree->lock); in bio_readpage_error()
2156 ret = tree->ops->submit_bio_hook(inode, read_mode, bio, in bio_readpage_error()
2167 struct extent_io_tree *tree; in end_extent_writepage() local
2170 tree = &BTRFS_I(page->mapping->host)->io_tree; in end_extent_writepage()
2172 if (tree->ops && tree->ops->writepage_end_io_hook) { in end_extent_writepage()
2173 ret = tree->ops->writepage_end_io_hook(page, start, in end_extent_writepage()
2179 if (!uptodate && tree->ops && in end_extent_writepage()
2180 tree->ops->writepage_io_failed_hook) { in end_extent_writepage()
2181 ret = tree->ops->writepage_io_failed_hook(NULL, page, in end_extent_writepage()
2189 clear_extent_uptodate(tree, start, end, NULL, GFP_NOFS); in end_extent_writepage()
2199 * clear the writeback bits in the extent tree for this IO
2202 * Scheduling is not allowed, so the extent state tree is expected
2208 struct extent_io_tree *tree; in end_bio_extent_writepage() local
2215 tree = &BTRFS_I(page->mapping->host)->io_tree; in end_bio_extent_writepage()
2235 check_page_writeback(tree, page); in end_bio_extent_writepage()
2245 * set the page up to date if all extents in the tree are uptodate
2246 * clear the lock bit in the extent tree
2249 * Scheduling is not allowed, so the extent state tree is expected
2257 struct extent_io_tree *tree; in end_bio_extent_readpage() local
2274 tree = &BTRFS_I(page->mapping->host)->io_tree; in end_bio_extent_readpage()
2288 spin_lock(&tree->lock); in end_bio_extent_readpage()
2289 state = find_first_extent_bit_state(tree, start, EXTENT_LOCKED); in end_bio_extent_readpage()
2297 spin_unlock(&tree->lock); in end_bio_extent_readpage()
2299 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) { in end_bio_extent_readpage()
2300 ret = tree->ops->readpage_end_io_hook(page, start, end, in end_bio_extent_readpage()
2331 if (tree->ops && tree->ops->readpage_io_failed_hook) { in end_bio_extent_readpage()
2332 ret = tree->ops->readpage_io_failed_hook( in end_bio_extent_readpage()
2341 set_extent_uptodate(tree, start, end, &cached, in end_bio_extent_readpage()
2344 unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC); in end_bio_extent_readpage()
2356 check_page_uptodate(tree, page); in end_bio_extent_readpage()
2361 check_page_locked(tree, page); in end_bio_extent_readpage()
2395 struct extent_io_tree *tree = bio->bi_private; in submit_one_bio() local
2404 if (tree->ops && tree->ops->submit_bio_hook) in submit_one_bio()
2405 ret = tree->ops->submit_bio_hook(page->mapping->host, rw, bio, in submit_one_bio()
2416 static int submit_extent_page(int rw, struct extent_io_tree *tree, in submit_extent_page() argument
2444 (tree->ops && tree->ops->merge_bio_hook && in submit_extent_page()
2445 tree->ops->merge_bio_hook(page, offset, page_size, bio, in submit_extent_page()
2466 bio->bi_private = tree; in submit_extent_page()
2493 * into the tree that are removed when the IO is done (by the end_io
2496 static int __extent_read_full_page(struct extent_io_tree *tree, in __extent_read_full_page() argument
2534 lock_extent(tree, start, end, GFP_NOFS); in __extent_read_full_page()
2538 unlock_extent(tree, start, end, GFP_NOFS); in __extent_read_full_page()
2565 set_extent_uptodate(tree, cur, cur + iosize - 1, in __extent_read_full_page()
2567 unlock_extent_cached(tree, cur, cur + iosize - 1, in __extent_read_full_page()
2575 unlock_extent(tree, cur, end, GFP_NOFS); in __extent_read_full_page()
2615 set_extent_uptodate(tree, cur, cur + iosize - 1, in __extent_read_full_page()
2617 unlock_extent_cached(tree, cur, cur + iosize - 1, in __extent_read_full_page()
2624 if (test_range_bit(tree, cur, cur_end, in __extent_read_full_page()
2626 check_page_uptodate(tree, page); in __extent_read_full_page()
2627 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); in __extent_read_full_page()
2637 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); in __extent_read_full_page()
2644 if (tree->ops && tree->ops->readpage_io_hook) { in __extent_read_full_page()
2645 ret = tree->ops->readpage_io_hook(page, cur, in __extent_read_full_page()
2651 ret = submit_extent_page(READ, tree, page, in __extent_read_full_page()
2674 int extent_read_full_page(struct extent_io_tree *tree, struct page *page, in extent_read_full_page() argument
2681 ret = __extent_read_full_page(tree, page, get_extent, &bio, mirror_num, in extent_read_full_page()
2700 * records are inserted to lock ranges in the tree, and as dirty areas
2709 struct extent_io_tree *tree = epd->tree; in __extent_writepage() local
2769 if (!tree->ops || !tree->ops->fill_delalloc) in __extent_writepage()
2784 nr_delalloc = find_lock_delalloc_range(inode, tree, in __extent_writepage()
2793 ret = tree->ops->fill_delalloc(inode, page, in __extent_writepage()
2832 if (tree->ops && tree->ops->writepage_start_hook) { in __extent_writepage()
2833 ret = tree->ops->writepage_start_hook(page, start, in __extent_writepage()
2856 if (tree->ops && tree->ops->writepage_end_io_hook) in __extent_writepage()
2857 tree->ops->writepage_end_io_hook(page, start, in __extent_writepage()
2866 if (tree->ops && tree->ops->writepage_end_io_hook) in __extent_writepage()
2867 tree->ops->writepage_end_io_hook(page, cur, in __extent_writepage()
2900 if (!compressed && tree->ops && in __extent_writepage()
2901 tree->ops->writepage_end_io_hook) in __extent_writepage()
2902 tree->ops->writepage_end_io_hook(page, cur, in __extent_writepage()
2918 if (0 && !test_range_bit(tree, cur, cur + iosize - 1, in __extent_writepage()
2925 if (tree->ops && tree->ops->writepage_io_hook) { in __extent_writepage()
2926 ret = tree->ops->writepage_io_hook(page, cur, in __extent_writepage()
2936 set_range_writeback(tree, cur, cur + iosize - 1); in __extent_writepage()
2944 ret = submit_extent_page(write_flags, tree, page, in __extent_writepage()
2986 static int extent_write_cache_pages(struct extent_io_tree *tree, in extent_write_cache_pages() argument
3034 if (tree->ops && in extent_write_cache_pages()
3035 tree->ops->write_cache_pages_lock_hook) { in extent_write_cache_pages()
3036 tree->ops->write_cache_pages_lock_hook(page, in extent_write_cache_pages()
3116 int extent_write_full_page(struct extent_io_tree *tree, struct page *page, in extent_write_full_page() argument
3123 .tree = tree, in extent_write_full_page()
3135 int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode, in extent_write_locked_range() argument
3147 .tree = tree, in extent_write_locked_range()
3164 if (tree->ops && tree->ops->writepage_end_io_hook) in extent_write_locked_range()
3165 tree->ops->writepage_end_io_hook(page, start, in extent_write_locked_range()
3178 int extent_writepages(struct extent_io_tree *tree, in extent_writepages() argument
3186 .tree = tree, in extent_writepages()
3192 ret = extent_write_cache_pages(tree, mapping, wbc, in extent_writepages()
3199 int extent_readpages(struct extent_io_tree *tree, in extent_readpages() argument
3215 __extent_read_full_page(tree, page, get_extent, in extent_readpages()
3229 * records from the tree
3231 int extent_invalidatepage(struct extent_io_tree *tree, in extent_invalidatepage() argument
3243 lock_extent_bits(tree, start, end, 0, &cached_state, GFP_NOFS); in extent_invalidatepage()
3245 clear_extent_bit(tree, start, end, in extent_invalidatepage()
3258 struct extent_io_tree *tree, struct page *page, in try_release_extent_state() argument
3265 if (test_range_bit(tree, start, end, in try_release_extent_state()
3275 ret = clear_extent_bit(tree, start, end, in try_release_extent_state()
3296 struct extent_io_tree *tree, struct page *page, in try_release_extent_mapping() argument
3320 if (!test_range_bit(tree, em->start, in try_release_extent_mapping()
3325 /* once for the rb tree */ in try_release_extent_mapping()
3335 return try_release_extent_state(map, tree, page, mask); in try_release_extent_mapping()
3564 * be in the radix tree. in extent_buffer_page()
3579 static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree, in __alloc_extent_buffer() argument
3659 struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, in alloc_extent_buffer() argument
3669 struct address_space *mapping = tree->mapping; in alloc_extent_buffer()
3674 eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT); in alloc_extent_buffer()
3682 eb = __alloc_extent_buffer(tree, start, len, GFP_NOFS); in alloc_extent_buffer()
3729 spin_lock(&tree->buffer_lock); in alloc_extent_buffer()
3730 ret = radix_tree_insert(&tree->buffer, start >> PAGE_CACHE_SHIFT, eb); in alloc_extent_buffer()
3732 exists = radix_tree_lookup(&tree->buffer, in alloc_extent_buffer()
3736 spin_unlock(&tree->buffer_lock); in alloc_extent_buffer()
3740 /* add one reference for the tree */ in alloc_extent_buffer()
3742 spin_unlock(&tree->buffer_lock); in alloc_extent_buffer()
3751 * after the extent buffer is in the radix tree so in alloc_extent_buffer()
3770 struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree, in find_extent_buffer() argument
3776 eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT); in find_extent_buffer()
3798 int clear_extent_buffer_dirty(struct extent_io_tree *tree, in clear_extent_buffer_dirty() argument
3833 int set_extent_buffer_dirty(struct extent_io_tree *tree, in set_extent_buffer_dirty() argument
3863 int clear_extent_buffer_uptodate(struct extent_io_tree *tree, in clear_extent_buffer_uptodate() argument
3874 clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1, in clear_extent_buffer_uptodate()
3885 int set_extent_buffer_uptodate(struct extent_io_tree *tree, in set_extent_buffer_uptodate() argument
3895 set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1, in set_extent_buffer_uptodate()
3903 check_page_uptodate(tree, page); in set_extent_buffer_uptodate()
3911 int extent_range_uptodate(struct extent_io_tree *tree, in extent_range_uptodate() argument
3921 ret = test_range_bit(tree, start, end, in extent_range_uptodate()
3928 page = find_get_page(tree->mapping, index); in extent_range_uptodate()
3942 int extent_buffer_uptodate(struct extent_io_tree *tree, in extent_buffer_uptodate() argument
3956 ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1, in extent_buffer_uptodate()
3973 int read_extent_buffer_pages(struct extent_io_tree *tree, in read_extent_buffer_pages() argument
3993 if (test_range_bit(tree, eb->start, eb->start + eb->len - 1, in read_extent_buffer_pages()
4041 err = __extent_read_full_page(tree, page, in read_extent_buffer_pages()
4419 int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page) in try_release_extent_buffer() argument
4425 spin_lock(&tree->buffer_lock); in try_release_extent_buffer()
4426 eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT); in try_release_extent_buffer()
4428 spin_unlock(&tree->buffer_lock); in try_release_extent_buffer()
4446 radix_tree_delete(&tree->buffer, start >> PAGE_CACHE_SHIFT); in try_release_extent_buffer()
4448 spin_unlock(&tree->buffer_lock); in try_release_extent_buffer()