Lines Matching +full:parent +full:- +full:locked
1 // SPDX-License-Identifier: GPL-2.0
8 #include "disk-io.h"
9 #include "print-tree.h"
14 #include "delalloc-space.h"
17 #include "file-item.h"
42 * This value is different for compressed/non-compressed extents, thus
52 if (defrag1->root > defrag2->root) in __compare_inode_defrag()
54 else if (defrag1->root < defrag2->root) in __compare_inode_defrag()
55 return -1; in __compare_inode_defrag()
56 else if (defrag1->ino > defrag2->ino) in __compare_inode_defrag()
58 else if (defrag1->ino < defrag2->ino) in __compare_inode_defrag()
59 return -1; in __compare_inode_defrag()
76 struct btrfs_fs_info *fs_info = inode->root->fs_info; in __btrfs_add_inode_defrag()
79 struct rb_node *parent = NULL; in __btrfs_add_inode_defrag() local
82 p = &fs_info->defrag_inodes.rb_node; in __btrfs_add_inode_defrag()
84 parent = *p; in __btrfs_add_inode_defrag()
85 entry = rb_entry(parent, struct inode_defrag, rb_node); in __btrfs_add_inode_defrag()
89 p = &parent->rb_left; in __btrfs_add_inode_defrag()
91 p = &parent->rb_right; in __btrfs_add_inode_defrag()
98 if (defrag->transid < entry->transid) in __btrfs_add_inode_defrag()
99 entry->transid = defrag->transid; in __btrfs_add_inode_defrag()
100 entry->extent_thresh = min(defrag->extent_thresh, in __btrfs_add_inode_defrag()
101 entry->extent_thresh); in __btrfs_add_inode_defrag()
102 return -EEXIST; in __btrfs_add_inode_defrag()
105 set_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags); in __btrfs_add_inode_defrag()
106 rb_link_node(&defrag->rb_node, parent, p); in __btrfs_add_inode_defrag()
107 rb_insert_color(&defrag->rb_node, &fs_info->defrag_inodes); in __btrfs_add_inode_defrag()
128 struct btrfs_root *root = inode->root; in btrfs_add_inode_defrag()
129 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_add_inode_defrag()
137 if (test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags)) in btrfs_add_inode_defrag()
141 transid = trans->transid; in btrfs_add_inode_defrag()
143 transid = inode->root->last_trans; in btrfs_add_inode_defrag()
147 return -ENOMEM; in btrfs_add_inode_defrag()
149 defrag->ino = btrfs_ino(inode); in btrfs_add_inode_defrag()
150 defrag->transid = transid; in btrfs_add_inode_defrag()
151 defrag->root = root->root_key.objectid; in btrfs_add_inode_defrag()
152 defrag->extent_thresh = extent_thresh; in btrfs_add_inode_defrag()
154 spin_lock(&fs_info->defrag_inodes_lock); in btrfs_add_inode_defrag()
155 if (!test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags)) { in btrfs_add_inode_defrag()
158 * and then re-read this inode, this new inode doesn't have in btrfs_add_inode_defrag()
167 spin_unlock(&fs_info->defrag_inodes_lock); in btrfs_add_inode_defrag()
181 struct rb_node *parent = NULL; in btrfs_pick_defrag_inode() local
187 spin_lock(&fs_info->defrag_inodes_lock); in btrfs_pick_defrag_inode()
188 p = fs_info->defrag_inodes.rb_node; in btrfs_pick_defrag_inode()
190 parent = p; in btrfs_pick_defrag_inode()
191 entry = rb_entry(parent, struct inode_defrag, rb_node); in btrfs_pick_defrag_inode()
195 p = parent->rb_left; in btrfs_pick_defrag_inode()
197 p = parent->rb_right; in btrfs_pick_defrag_inode()
202 if (parent && __compare_inode_defrag(&tmp, entry) > 0) { in btrfs_pick_defrag_inode()
203 parent = rb_next(parent); in btrfs_pick_defrag_inode()
204 if (parent) in btrfs_pick_defrag_inode()
205 entry = rb_entry(parent, struct inode_defrag, rb_node); in btrfs_pick_defrag_inode()
211 rb_erase(parent, &fs_info->defrag_inodes); in btrfs_pick_defrag_inode()
212 spin_unlock(&fs_info->defrag_inodes_lock); in btrfs_pick_defrag_inode()
221 spin_lock(&fs_info->defrag_inodes_lock); in btrfs_cleanup_defrag_inodes()
222 node = rb_first(&fs_info->defrag_inodes); in btrfs_cleanup_defrag_inodes()
224 rb_erase(node, &fs_info->defrag_inodes); in btrfs_cleanup_defrag_inodes()
228 cond_resched_lock(&fs_info->defrag_inodes_lock); in btrfs_cleanup_defrag_inodes()
230 node = rb_first(&fs_info->defrag_inodes); in btrfs_cleanup_defrag_inodes()
232 spin_unlock(&fs_info->defrag_inodes_lock); in btrfs_cleanup_defrag_inodes()
247 if (test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state)) in __btrfs_run_defrag_inode()
253 inode_root = btrfs_get_fs_root(fs_info, defrag->root, true); in __btrfs_run_defrag_inode()
259 inode = btrfs_iget(fs_info->sb, defrag->ino, inode_root); in __btrfs_run_defrag_inode()
272 clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags); in __btrfs_run_defrag_inode()
274 range.len = (u64)-1; in __btrfs_run_defrag_inode()
276 range.extent_thresh = defrag->extent_thresh; in __btrfs_run_defrag_inode()
278 sb_start_write(fs_info->sb); in __btrfs_run_defrag_inode()
279 ret = btrfs_defrag_file(inode, NULL, &range, defrag->transid, in __btrfs_run_defrag_inode()
281 sb_end_write(fs_info->sb); in __btrfs_run_defrag_inode()
287 cur = max(cur + fs_info->sectorsize, range.start); in __btrfs_run_defrag_inode()
304 atomic_inc(&fs_info->defrag_running); in btrfs_run_defrag_inodes()
307 if (test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state)) in btrfs_run_defrag_inodes()
325 first_ino = defrag->ino + 1; in btrfs_run_defrag_inodes()
326 root_objectid = defrag->root; in btrfs_run_defrag_inodes()
330 atomic_dec(&fs_info->defrag_running); in btrfs_run_defrag_inodes()
336 wake_up(&fs_info->transaction_wait); in btrfs_run_defrag_inodes()
345 if (blocknr < other && other - (blocknr + blocksize) < SZ_32K) in close_blocks()
347 if (blocknr > other && blocknr - (other + blocksize) < SZ_32K) in close_blocks()
358 struct extent_buffer *parent, in btrfs_realloc_node() argument
362 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_realloc_node()
363 const u32 blocksize = fs_info->nodesize; in btrfs_realloc_node()
364 const int end_slot = btrfs_header_nritems(parent) - 1; in btrfs_realloc_node()
376 if (unlikely(trans->transaction != fs_info->running_transaction || in btrfs_realloc_node()
377 trans->transid != fs_info->generation)) { in btrfs_realloc_node()
378 btrfs_abort_transaction(trans, -EUCLEAN); in btrfs_realloc_node()
380 "unexpected transaction when attempting to reallocate parent %llu for root %llu, transaction %llu r… in btrfs_realloc_node()
381 parent->start, btrfs_root_id(root), trans->transid, in btrfs_realloc_node()
382 fs_info->running_transaction->transid, in btrfs_realloc_node()
383 fs_info->generation); in btrfs_realloc_node()
384 return -EUCLEAN; in btrfs_realloc_node()
387 if (btrfs_header_nritems(parent) <= 1) in btrfs_realloc_node()
397 btrfs_node_key(parent, &disk_key, i); in btrfs_realloc_node()
402 blocknr = btrfs_node_blockptr(parent, i); in btrfs_realloc_node()
407 other = btrfs_node_blockptr(parent, i - 1); in btrfs_realloc_node()
411 other = btrfs_node_blockptr(parent, i + 1); in btrfs_realloc_node()
419 cur = btrfs_read_node_slot(parent, i); in btrfs_realloc_node()
426 ret = btrfs_force_cow_block(trans, root, cur, parent, i, in btrfs_realloc_node()
429 (end_slot - i) * blocksize), in btrfs_realloc_node()
436 search_start = cur->start; in btrfs_realloc_node()
437 last_block = cur->start; in btrfs_realloc_node()
462 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) in btrfs_defrag_leaves()
467 ret = -ENOMEM; in btrfs_defrag_leaves()
471 level = btrfs_header_level(root->node); in btrfs_defrag_leaves()
476 if (root->defrag_progress.objectid == 0) { in btrfs_defrag_leaves()
482 root->defrag_max.objectid = 0; in btrfs_defrag_leaves()
484 btrfs_node_key_to_cpu(root_node, &root->defrag_max, in btrfs_defrag_leaves()
485 nritems - 1); in btrfs_defrag_leaves()
490 memcpy(&key, &root->defrag_progress, sizeof(key)); in btrfs_defrag_leaves()
493 path->keep_locks = 1; in btrfs_defrag_leaves()
505 * leafs from path->nodes[1], so set lowest_level to 1 to avoid later in btrfs_defrag_leaves()
506 * a deadlock (attempting to write lock an already write locked leaf). in btrfs_defrag_leaves()
508 path->lowest_level = 1; in btrfs_defrag_leaves()
515 if (!path->nodes[1]) { in btrfs_defrag_leaves()
520 * The node at level 1 must always be locked when our path has in btrfs_defrag_leaves()
522 * path->slots[1]. in btrfs_defrag_leaves()
524 BUG_ON(path->locks[1] == 0); in btrfs_defrag_leaves()
526 path->nodes[1], 0, in btrfs_defrag_leaves()
528 &root->defrag_progress); in btrfs_defrag_leaves()
530 WARN_ON(ret == -EAGAIN); in btrfs_defrag_leaves()
536 * without COWing, this is because even with path->keep_locks = 1, in btrfs_defrag_leaves()
538 * node when path->slots[node_level - 1] does not point to the last in btrfs_defrag_leaves()
542 path->slots[1] = btrfs_header_nritems(path->nodes[1]); in btrfs_defrag_leaves()
546 memcpy(&root->defrag_progress, &key, sizeof(key)); in btrfs_defrag_leaves()
547 ret = -EAGAIN; in btrfs_defrag_leaves()
551 if (ret == -EAGAIN) { in btrfs_defrag_leaves()
552 if (root->defrag_max.objectid > root->defrag_progress.objectid) in btrfs_defrag_leaves()
554 if (root->defrag_max.type > root->defrag_progress.type) in btrfs_defrag_leaves()
556 if (root->defrag_max.offset > root->defrag_progress.offset) in btrfs_defrag_leaves()
561 if (ret != -EAGAIN) in btrfs_defrag_leaves()
562 memset(&root->defrag_progress, 0, in btrfs_defrag_leaves()
563 sizeof(root->defrag_progress)); in btrfs_defrag_leaves()
573 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_defrag_root()
576 if (test_and_set_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state)) in btrfs_defrag_root()
594 if (btrfs_fs_closing(fs_info) || ret != -EAGAIN) in btrfs_defrag_root()
599 ret = -EAGAIN; in btrfs_defrag_root()
603 clear_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state); in btrfs_defrag_root()
612 * - No extent_map will be added to inode->extent_tree
615 * - Extra optimization to skip file extents older than @newer_than
629 struct btrfs_root *root = inode->root; in defrag_get_extent()
639 ret = -ENOMEM; in defrag_get_extent()
666 path.slots[0] = btrfs_header_nritems(path.nodes[0]) - 1; in defrag_get_extent()
678 path.slots[0]--; in defrag_get_extent()
704 * | |<- File extent ->| in defrag_get_extent()
705 * \- start in defrag_get_extent()
710 em->start = start; in defrag_get_extent()
711 em->orig_start = start; in defrag_get_extent()
712 em->block_start = EXTENT_MAP_HOLE; in defrag_get_extent()
713 em->len = key.offset - start; in defrag_get_extent()
722 * |<- file extent ->| | in defrag_get_extent()
723 * \- start in defrag_get_extent()
755 u64 newer_than, bool locked) in defrag_lookup_extent() argument
757 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; in defrag_lookup_extent()
758 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; in defrag_lookup_extent()
760 const u32 sectorsize = BTRFS_I(inode)->root->fs_info->sectorsize; in defrag_lookup_extent()
766 read_lock(&em_tree->lock); in defrag_lookup_extent()
768 read_unlock(&em_tree->lock); in defrag_lookup_extent()
771 * We can get a merged extent, in that case, we need to re-search in defrag_lookup_extent()
778 if (em && (em->flags & EXTENT_FLAG_MERGED) && in defrag_lookup_extent()
779 newer_than && em->generation >= newer_than) { in defrag_lookup_extent()
786 u64 end = start + sectorsize - 1; in defrag_lookup_extent()
789 if (!locked) in defrag_lookup_extent()
792 if (!locked) in defrag_lookup_extent()
807 return fs_info->max_extent_size; in get_extent_max_capacity()
811 u32 extent_thresh, u64 newer_than, bool locked) in defrag_check_next_extent() argument
813 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in defrag_check_next_extent()
818 if (em->start + em->len >= i_size_read(inode)) in defrag_check_next_extent()
827 next = defrag_lookup_extent(inode, em->start + em->len, newer_than, locked); in defrag_check_next_extent()
829 if (!next || next->block_start >= EXTENT_MAP_LAST_BYTE) in defrag_check_next_extent()
831 if (next->flags & EXTENT_FLAG_PREALLOC) in defrag_check_next_extent()
837 if (next->len >= get_extent_max_capacity(fs_info, em)) in defrag_check_next_extent()
840 if (next->generation < newer_than) in defrag_check_next_extent()
843 if (next->len >= extent_thresh) in defrag_check_next_extent()
857 * - Returned page is locked and has been set up properly.
858 * - No ordered extent exists in the page.
859 * - The page is uptodate.
866 struct address_space *mapping = inode->vfs_inode.i_mapping; in defrag_prepare_one_page()
869 u64 page_end = page_start + PAGE_SIZE - 1; in defrag_prepare_one_page()
877 return ERR_PTR(-ENOMEM); in defrag_prepare_one_page()
880 * Since we can defragment files opened read-only, we can encounter in defrag_prepare_one_page()
890 return ERR_PTR(-ETXTBSY); in defrag_prepare_one_page()
904 lock_extent(&inode->io_tree, page_start, page_end, &cached_state); in defrag_prepare_one_page()
906 unlock_extent(&inode->io_tree, page_start, page_end, in defrag_prepare_one_page()
919 if (page->mapping != mapping || !PagePrivate(page)) { in defrag_prepare_one_page()
933 if (page->mapping != mapping || !PagePrivate(page)) { in defrag_prepare_one_page()
941 return ERR_PTR(-EIO); in defrag_prepare_one_page()
964 * @locked: if the range has already held extent lock
970 bool locked, struct list_head *target_list, in defrag_collect_targets() argument
973 struct btrfs_fs_info *fs_info = inode->root->fs_info; in defrag_collect_targets()
985 em = defrag_lookup_extent(&inode->vfs_inode, cur, newer_than, locked); in defrag_collect_targets()
995 if (em->block_start == EXTENT_MAP_INLINE && in defrag_collect_targets()
996 em->len <= inode->root->fs_info->max_inline) in defrag_collect_targets()
1000 if (em->block_start == EXTENT_MAP_HOLE || in defrag_collect_targets()
1001 (em->flags & EXTENT_FLAG_PREALLOC)) in defrag_collect_targets()
1005 if (em->generation < newer_than) in defrag_collect_targets()
1009 if (em->generation == (u64)-1) in defrag_collect_targets()
1016 range_len = em->len - (cur - em->start); in defrag_collect_targets()
1026 * locked, and starting writeback, or finishing an ordered in defrag_collect_targets()
1037 if (test_range_bit_exists(&inode->io_tree, cur, cur + range_len - 1, in defrag_collect_targets()
1049 if (em->len >= extent_thresh) in defrag_collect_targets()
1056 if (em->len >= get_extent_max_capacity(fs_info, em)) in defrag_collect_targets()
1065 if (em->block_start == EXTENT_MAP_INLINE) in defrag_collect_targets()
1068 next_mergeable = defrag_check_next_extent(&inode->vfs_inode, em, in defrag_collect_targets()
1069 extent_thresh, newer_than, locked); in defrag_collect_targets()
1076 last = list_entry(target_list->prev, in defrag_collect_targets()
1079 if (last->start + last->len != cur) in defrag_collect_targets()
1087 range_len = min(extent_map_end(em), start + len) - cur; in defrag_collect_targets()
1095 last = list_entry(target_list->prev, in defrag_collect_targets()
1097 ASSERT(last->start + last->len <= cur); in defrag_collect_targets()
1098 if (last->start + last->len == cur) { in defrag_collect_targets()
1100 last->len += range_len; in defrag_collect_targets()
1110 ret = -ENOMEM; in defrag_collect_targets()
1113 new->start = cur; in defrag_collect_targets()
1114 new->len = range_len; in defrag_collect_targets()
1115 list_add_tail(&new->list, target_list); in defrag_collect_targets()
1126 list_del_init(&entry->list); in defrag_collect_targets()
1152 * @pages: locked pages covering the defrag range
1153 * @nr_pages: number of locked pages
1157 * - Pages are prepared
1158 * Pages should be locked, no ordered extent in the pages range,
1161 * - Extent bits are locked
1168 struct btrfs_fs_info *fs_info = inode->root->fs_info; in defrag_one_locked_target()
1170 const u64 start = target->start; in defrag_one_locked_target()
1171 const u64 len = target->len; in defrag_one_locked_target()
1172 unsigned long last_index = (start + len - 1) >> PAGE_SHIFT; in defrag_one_locked_target()
1178 ASSERT(last_index - first_index + 1 <= nr_pages); in defrag_one_locked_target()
1183 clear_extent_bit(&inode->io_tree, start, start + len - 1, in defrag_one_locked_target()
1186 set_extent_bit(&inode->io_tree, start, start + len - 1, in defrag_one_locked_target()
1190 for (i = start_index - first_index; i <= last_index - first_index; i++) { in defrag_one_locked_target()
1209 const u32 sectorsize = inode->root->fs_info->sectorsize; in defrag_one_range()
1210 u64 last_index = (start + len - 1) >> PAGE_SHIFT; in defrag_one_range()
1212 unsigned int nr_pages = last_index - start_index + 1; in defrag_one_range()
1221 return -ENOMEM; in defrag_one_range()
1236 lock_extent(&inode->io_tree, start_index << PAGE_SHIFT, in defrag_one_range()
1237 (last_index << PAGE_SHIFT) + PAGE_SIZE - 1, in defrag_one_range()
1240 * Now we have a consistent view about the extent map, re-check in defrag_one_range()
1243 * And this time we have extent locked already, pass @locked = true in defrag_one_range()
1260 list_del_init(&entry->list); in defrag_one_range()
1264 unlock_extent(&inode->io_tree, start_index << PAGE_SHIFT, in defrag_one_range()
1265 (last_index << PAGE_SHIFT) + PAGE_SIZE - 1, in defrag_one_range()
1286 const u32 sectorsize = inode->root->fs_info->sectorsize; in defrag_one_cluster()
1299 u32 range_len = entry->len; in defrag_one_cluster()
1309 (max_sectors - *sectors_defragged) * sectorsize); in defrag_one_cluster()
1317 if (entry->start + range_len <= *last_scanned_ret) in defrag_one_cluster()
1321 page_cache_sync_readahead(inode->vfs_inode.i_mapping, in defrag_one_cluster()
1322 ra, NULL, entry->start >> PAGE_SHIFT, in defrag_one_cluster()
1323 ((entry->start + range_len - 1) >> PAGE_SHIFT) - in defrag_one_cluster()
1324 (entry->start >> PAGE_SHIFT) + 1); in defrag_one_cluster()
1327 * we locked the pages. in defrag_one_cluster()
1331 ret = defrag_one_range(inode, entry->start, range_len, in defrag_one_cluster()
1337 inode->root->fs_info->sectorsize_bits; in defrag_one_cluster()
1341 list_del_init(&entry->list); in defrag_one_cluster()
1360 * Return >=0 for the number of sectors defragged, and range->start will be updated
1369 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in btrfs_defrag_file()
1374 bool do_compress = (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS); in btrfs_defrag_file()
1378 u32 extent_thresh = range->extent_thresh; in btrfs_defrag_file()
1384 if (range->start >= isize) in btrfs_defrag_file()
1385 return -EINVAL; in btrfs_defrag_file()
1388 if (range->compress_type >= BTRFS_NR_COMPRESS_TYPES) in btrfs_defrag_file()
1389 return -EINVAL; in btrfs_defrag_file()
1390 if (range->compress_type) in btrfs_defrag_file()
1391 compress_type = range->compress_type; in btrfs_defrag_file()
1397 if (range->start + range->len > range->start) { in btrfs_defrag_file()
1399 last_byte = min(isize, range->start + range->len); in btrfs_defrag_file()
1406 cur = round_down(range->start, fs_info->sectorsize); in btrfs_defrag_file()
1407 last_byte = round_up(last_byte, fs_info->sectorsize) - 1; in btrfs_defrag_file()
1418 file_ra_state_init(ra, inode->i_mapping); in btrfs_defrag_file()
1426 if (start_index < inode->i_mapping->writeback_index) in btrfs_defrag_file()
1427 inode->i_mapping->writeback_index = start_index; in btrfs_defrag_file()
1435 ret = -EAGAIN; in btrfs_defrag_file()
1441 (SZ_256K >> PAGE_SHIFT)) << PAGE_SHIFT) - 1; in btrfs_defrag_file()
1446 ret = -ETXTBSY; in btrfs_defrag_file()
1450 if (!(inode->i_sb->s_flags & SB_ACTIVE)) { in btrfs_defrag_file()
1455 BTRFS_I(inode)->defrag_compress = compress_type; in btrfs_defrag_file()
1457 cluster_end + 1 - cur, extent_thresh, in btrfs_defrag_file()
1462 balance_dirty_pages_ratelimited(inode->i_mapping); in btrfs_defrag_file()
1481 range->start = cur; in btrfs_defrag_file()
1487 if (range->flags & BTRFS_DEFRAG_RANGE_START_IO) { in btrfs_defrag_file()
1488 filemap_flush(inode->i_mapping); in btrfs_defrag_file()
1490 &BTRFS_I(inode)->runtime_flags)) in btrfs_defrag_file()
1491 filemap_flush(inode->i_mapping); in btrfs_defrag_file()
1493 if (range->compress_type == BTRFS_COMPRESS_LZO) in btrfs_defrag_file()
1495 else if (range->compress_type == BTRFS_COMPRESS_ZSTD) in btrfs_defrag_file()
1501 BTRFS_I(inode)->defrag_compress = BTRFS_COMPRESS_NONE; in btrfs_defrag_file()
1519 return -ENOMEM; in btrfs_auto_defrag_init()