Lines Matching defs:delayed_refs
349 static bool btrfs_delayed_ref_lock(struct btrfs_delayed_ref_root *delayed_refs,
352 lockdep_assert_held(&delayed_refs->lock);
357 spin_unlock(&delayed_refs->lock);
360 spin_lock(&delayed_refs->lock);
371 struct btrfs_delayed_ref_root *delayed_refs,
385 struct btrfs_delayed_ref_root *delayed_refs,
414 drop_delayed_ref(fs_info, delayed_refs, head, next);
417 drop_delayed_ref(fs_info, delayed_refs, head, ref);
432 struct btrfs_delayed_ref_root *delayed_refs,
455 if (merge_ref(fs_info, delayed_refs, head, ref, seq))
477 struct btrfs_delayed_ref_root *delayed_refs)
485 spin_lock(&delayed_refs->lock);
487 start_index = (delayed_refs->run_delayed_start >> fs_info->sectorsize_bits);
488 xa_for_each_start(&delayed_refs->head_refs, found_index, head, start_index) {
495 if (delayed_refs->run_delayed_start == 0) {
496 spin_unlock(&delayed_refs->lock);
499 delayed_refs->run_delayed_start = 0;
504 WARN_ON(delayed_refs->num_heads_ready == 0);
505 delayed_refs->num_heads_ready--;
506 delayed_refs->run_delayed_start = head->bytenr +
509 locked = btrfs_delayed_ref_lock(delayed_refs, head);
510 spin_unlock(&delayed_refs->lock);
523 void btrfs_unselect_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
526 spin_lock(&delayed_refs->lock);
528 delayed_refs->num_heads_ready++;
529 spin_unlock(&delayed_refs->lock);
534 struct btrfs_delayed_ref_root *delayed_refs,
539 lockdep_assert_held(&delayed_refs->lock);
542 xa_erase(&delayed_refs->head_refs, index);
544 delayed_refs->num_heads--;
546 delayed_refs->num_heads_ready--;
586 struct btrfs_delayed_ref_root *root = &trans->transaction->delayed_refs;
638 struct btrfs_delayed_ref_root *delayed_refs =
639 &trans->transaction->delayed_refs;
713 delayed_refs->pending_csums -= existing->num_bytes;
717 delayed_refs->pending_csums += existing->num_bytes;
815 struct btrfs_delayed_ref_root *delayed_refs;
819 delayed_refs = &trans->transaction->delayed_refs;
820 lockdep_assert_held(&delayed_refs->lock);
825 xa_release(&delayed_refs->dirty_extents, index);
838 ret = btrfs_qgroup_trace_extent_nolock(fs_info, delayed_refs, qrecord,
842 xa_release(&delayed_refs->dirty_extents, index);
854 existing = xa_load(&delayed_refs->head_refs, index);
864 existing = xa_store(&delayed_refs->head_refs, index, head_ref, GFP_ATOMIC);
872 * delayed_refs->lock.
884 delayed_refs->pending_csums += head_ref->num_bytes;
888 delayed_refs->num_heads++;
889 delayed_refs->num_heads_ready++;
995 struct btrfs_delayed_ref_root *delayed_refs;
1014 delayed_refs = &trans->transaction->delayed_refs;
1022 if (xa_reserve(&delayed_refs->dirty_extents, index, GFP_NOFS)) {
1029 ret = xa_reserve(&delayed_refs->head_refs, index, GFP_NOFS);
1032 xa_release(&delayed_refs->dirty_extents, index);
1040 spin_lock(&delayed_refs->lock);
1049 xa_release(&delayed_refs->head_refs, index);
1050 spin_unlock(&delayed_refs->lock);
1057 spin_unlock(&delayed_refs->lock);
1115 struct btrfs_delayed_ref_root *delayed_refs;
1132 delayed_refs = &trans->transaction->delayed_refs;
1134 ret = xa_reserve(&delayed_refs->head_refs, index, GFP_NOFS);
1140 spin_lock(&delayed_refs->lock);
1144 xa_release(&delayed_refs->head_refs, index);
1145 spin_unlock(&delayed_refs->lock);
1149 spin_unlock(&delayed_refs->lock);
1173 struct btrfs_delayed_ref_root *delayed_refs,
1178 lockdep_assert_held(&delayed_refs->lock);
1180 return xa_load(&delayed_refs->head_refs, index);
1252 struct btrfs_delayed_ref_root *delayed_refs = &trans->delayed_refs;
1256 spin_lock(&delayed_refs->lock);
1262 head = find_first_ref_head(delayed_refs);
1266 if (!btrfs_delayed_ref_lock(delayed_refs, head))
1274 drop_delayed_ref(fs_info, delayed_refs, head, ref);
1279 btrfs_delete_ref_head(fs_info, delayed_refs, head);
1281 spin_unlock(&delayed_refs->lock);
1316 btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head);
1319 spin_lock(&delayed_refs->lock);
1325 spin_unlock(&delayed_refs->lock);