Lines Matching full:path

97 static inline bool btree_path_pos_before_node(struct btree_path *path,  in btree_path_pos_before_node()  argument
100 return bpos_lt(path->pos, b->data->min_key); in btree_path_pos_before_node()
103 static inline bool btree_path_pos_after_node(struct btree_path *path, in btree_path_pos_after_node() argument
106 return bpos_gt(path->pos, b->key.k.p); in btree_path_pos_after_node()
109 static inline bool btree_path_pos_in_node(struct btree_path *path, in btree_path_pos_in_node() argument
112 return path->btree_id == b->c.btree_id && in btree_path_pos_in_node()
113 !btree_path_pos_before_node(path, b) && in btree_path_pos_in_node()
114 !btree_path_pos_after_node(path, b); in btree_path_pos_in_node()
122 struct btree_path *path) in bch2_btree_path_verify_cached() argument
125 bool locked = btree_node_locked(path, 0); in bch2_btree_path_verify_cached()
127 if (!bch2_btree_node_relock(trans, path, 0)) in bch2_btree_path_verify_cached()
130 ck = (void *) path->l[0].b; in bch2_btree_path_verify_cached()
131 BUG_ON(ck->key.btree_id != path->btree_id || in bch2_btree_path_verify_cached()
132 !bkey_eq(ck->key.pos, path->pos)); in bch2_btree_path_verify_cached()
135 btree_node_unlock(trans, path, 0); in bch2_btree_path_verify_cached()
139 struct btree_path *path, unsigned level) in bch2_btree_path_verify_level() argument
153 l = &path->l[level]; in bch2_btree_path_verify_level()
155 locked = btree_node_locked(path, level); in bch2_btree_path_verify_level()
157 if (path->cached) { in bch2_btree_path_verify_level()
159 bch2_btree_path_verify_cached(trans, path); in bch2_btree_path_verify_level()
163 if (!btree_path_node(path, level)) in bch2_btree_path_verify_level()
166 if (!bch2_btree_node_relock_notrace(trans, path, level)) in bch2_btree_path_verify_level()
169 BUG_ON(!btree_path_pos_in_node(path, l->b)); in bch2_btree_path_verify_level()
181 if (p && bkey_iter_pos_cmp(l->b, p, &path->pos) >= 0) { in bch2_btree_path_verify_level()
186 if (k && bkey_iter_pos_cmp(l->b, k, &path->pos) < 0) { in bch2_btree_path_verify_level()
192 btree_node_unlock(trans, path, level); in bch2_btree_path_verify_level()
195 bch2_bpos_to_text(&buf1, path->pos); in bch2_btree_path_verify_level()
213 panic("path should be %s key at level %u:\n" in bch2_btree_path_verify_level()
214 "path pos %s\n" in bch2_btree_path_verify_level()
221 struct btree_path *path) in bch2_btree_path_verify() argument
226 EBUG_ON(path->btree_id >= BTREE_ID_NR); in bch2_btree_path_verify()
228 for (i = 0; i < (!path->cached ? BTREE_MAX_DEPTH : 1); i++) { in bch2_btree_path_verify()
229 if (!path->l[i].b) { in bch2_btree_path_verify()
230 BUG_ON(!path->cached && in bch2_btree_path_verify()
231 bch2_btree_id_root(c, path->btree_id)->b->c.level > i); in bch2_btree_path_verify()
235 bch2_btree_path_verify_level(trans, path, i); in bch2_btree_path_verify()
238 bch2_btree_path_verify_locks(path); in bch2_btree_path_verify()
243 struct btree_path *path; in bch2_trans_verify_paths() local
246 trans_for_each_path(trans, path, iter) in bch2_trans_verify_paths()
247 bch2_btree_path_verify(trans, path); in bch2_trans_verify_paths()
335 struct btree_path *path; in bch2_assert_pos_locked() local
341 trans_for_each_path_inorder(trans, path, iter) { in bch2_assert_pos_locked()
342 int cmp = cmp_int(path->btree_id, id) ?: in bch2_assert_pos_locked()
343 cmp_int(path->cached, key_cache); in bch2_assert_pos_locked()
350 if (!btree_node_locked(path, 0) || in bch2_assert_pos_locked()
351 !path->should_be_locked) in bch2_assert_pos_locked()
355 if (bkey_ge(pos, path->l[0].b->data->min_key) && in bch2_assert_pos_locked()
356 bkey_le(pos, path->l[0].b->key.k.p)) in bch2_assert_pos_locked()
359 if (bkey_eq(pos, path->pos)) in bch2_assert_pos_locked()
375 struct btree_path *path, unsigned l) {} in bch2_btree_path_verify_level() argument
377 struct btree_path *path) {} in bch2_btree_path_verify() argument
384 /* Btree path: fixups after btree updates */
403 static void __bch2_btree_path_fix_key_modified(struct btree_path *path, in __bch2_btree_path_fix_key_modified() argument
407 struct btree_path_level *l = &path->l[b->c.level]; in __bch2_btree_path_fix_key_modified()
412 if (bkey_iter_pos_cmp(l->b, where, &path->pos) < 0) in __bch2_btree_path_fix_key_modified()
420 struct btree_path *path; in bch2_btree_path_fix_key_modified() local
423 trans_for_each_path_with_node(trans, b, path, i) { in bch2_btree_path_fix_key_modified()
424 __bch2_btree_path_fix_key_modified(path, b, where); in bch2_btree_path_fix_key_modified()
425 bch2_btree_path_verify_level(trans, path, b->c.level); in bch2_btree_path_fix_key_modified()
429 static void __bch2_btree_node_iter_fix(struct btree_path *path, in __bch2_btree_node_iter_fix() argument
453 bkey_iter_pos_cmp(b, where, &path->pos) >= 0) { in __bch2_btree_node_iter_fix()
468 bkey_iter_pos_cmp(b, where, &path->pos) >= 0) { in __bch2_btree_node_iter_fix()
521 struct btree_path *path, in bch2_btree_node_iter_fix() argument
532 if (node_iter != &path->l[b->c.level].iter) { in bch2_btree_node_iter_fix()
533 __bch2_btree_node_iter_fix(path, b, node_iter, t, in bch2_btree_node_iter_fix()
548 /* Btree path level: pointer to a particular btree node and node iter */
576 struct btree_path *path, in btree_path_level_peek() argument
583 path->pos = k.k ? k.k->p : l->b->key.k.p; in btree_path_level_peek()
585 bch2_btree_path_verify_level(trans, path, l - path->l); in btree_path_level_peek()
590 struct btree_path *path, in btree_path_level_prev() argument
597 path->pos = k.k ? k.k->p : l->b->data->min_key; in btree_path_level_prev()
599 bch2_btree_path_verify_level(trans, path, l - path->l); in btree_path_level_prev()
603 static inline bool btree_path_advance_to_pos(struct btree_path *path, in btree_path_advance_to_pos() argument
611 bkey_iter_pos_cmp(l->b, k, &path->pos) < 0) { in btree_path_advance_to_pos()
622 static inline void __btree_path_level_init(struct btree_path *path, in __btree_path_level_init() argument
625 struct btree_path_level *l = &path->l[level]; in __btree_path_level_init()
627 bch2_btree_node_iter_init(&l->iter, l->b, &path->pos); in __btree_path_level_init()
638 struct btree_path *path, in bch2_btree_path_level_init() argument
641 BUG_ON(path->cached); in bch2_btree_path_level_init()
643 EBUG_ON(!btree_path_pos_in_node(path, b)); in bch2_btree_path_level_init()
645 path->l[b->c.level].lock_seq = six_lock_seq(&b->c.lock); in bch2_btree_path_level_init()
646 path->l[b->c.level].b = b; in bch2_btree_path_level_init()
647 __btree_path_level_init(path, b->c.level); in bch2_btree_path_level_init()
650 /* Btree path: fixups after btree node updates: */
662 i->old_v = bch2_btree_path_peek_slot(trans->paths + i->path, &i->old_k).v; in bch2_trans_revalidate_updates_in_node()
682 struct btree_path *path, in bch2_trans_node_add() argument
687 BUG_ON(!btree_path_pos_in_node(path, b)); in bch2_trans_node_add()
689 while ((prev = prev_btree_path(trans, path)) && in bch2_trans_node_add()
691 path = prev; in bch2_trans_node_add()
694 path && btree_path_pos_in_node(path, b); in bch2_trans_node_add()
695 path = next_btree_path(trans, path)) in bch2_trans_node_add()
696 if (path->uptodate == BTREE_ITER_UPTODATE && !path->cached) { in bch2_trans_node_add()
698 btree_lock_want(path, b->c.level); in bch2_trans_node_add()
701 btree_node_unlock(trans, path, b->c.level); in bch2_trans_node_add()
703 mark_btree_node_locked(trans, path, b->c.level, t); in bch2_trans_node_add()
706 bch2_btree_path_level_init(trans, path, b); in bch2_trans_node_add()
718 struct btree_path *path; in bch2_trans_node_reinit_iter() local
721 trans_for_each_path_with_node(trans, b, path, i) in bch2_trans_node_reinit_iter()
722 __btree_path_level_init(path, b->c.level); in bch2_trans_node_reinit_iter()
727 /* Btree path: traverse, set_pos: */
730 struct btree_path *path, in btree_path_lock_root() argument
735 struct btree *b, **rootp = &bch2_btree_id_root(c, path->btree_id)->b; in btree_path_lock_root()
740 EBUG_ON(path->nodes_locked); in btree_path_lock_root()
744 path->level = READ_ONCE(b->c.level); in btree_path_lock_root()
746 if (unlikely(path->level < depth_want)) { in btree_path_lock_root()
753 path->level = depth_want; in btree_path_lock_root()
754 for (i = path->level; i < BTREE_MAX_DEPTH; i++) in btree_path_lock_root()
755 path->l[i].b = NULL; in btree_path_lock_root()
759 lock_type = __btree_lock_want(path, path->level); in btree_path_lock_root()
760 ret = btree_node_lock(trans, path, &b->c, in btree_path_lock_root()
761 path->level, lock_type, trace_ip); in btree_path_lock_root()
771 b->c.level == path->level && in btree_path_lock_root()
773 for (i = 0; i < path->level; i++) in btree_path_lock_root()
774 path->l[i].b = ERR_PTR(-BCH_ERR_no_btree_node_lock_root); in btree_path_lock_root()
775 path->l[path->level].b = b; in btree_path_lock_root()
776 for (i = path->level + 1; i < BTREE_MAX_DEPTH; i++) in btree_path_lock_root()
777 path->l[i].b = NULL; in btree_path_lock_root()
779 mark_btree_node_locked(trans, path, path->level, in btree_path_lock_root()
781 bch2_btree_path_level_init(trans, path, b); in btree_path_lock_root()
790 static int btree_path_prefetch(struct btree_trans *trans, struct btree_path *path) in btree_path_prefetch() argument
793 struct btree_path_level *l = path_l(path); in btree_path_prefetch()
798 ? (path->level > 1 ? 0 : 2) in btree_path_prefetch()
799 : (path->level > 1 ? 1 : 16); in btree_path_prefetch()
800 bool was_locked = btree_node_locked(path, path->level); in btree_path_prefetch()
806 if (!bch2_btree_node_relock(trans, path, path->level)) in btree_path_prefetch()
815 ret = bch2_btree_node_prefetch(trans, path, tmp.k, path->btree_id, in btree_path_prefetch()
816 path->level - 1); in btree_path_prefetch()
820 btree_node_unlock(trans, path, path->level); in btree_path_prefetch()
826 static int btree_path_prefetch_j(struct btree_trans *trans, struct btree_path *path, in btree_path_prefetch_j() argument
833 ? (path->level > 1 ? 0 : 2) in btree_path_prefetch_j()
834 : (path->level > 1 ? 1 : 16); in btree_path_prefetch_j()
835 bool was_locked = btree_node_locked(path, path->level); in btree_path_prefetch_j()
841 if (!bch2_btree_node_relock(trans, path, path->level)) in btree_path_prefetch_j()
850 ret = bch2_btree_node_prefetch(trans, path, tmp.k, path->btree_id, in btree_path_prefetch_j()
851 path->level - 1); in btree_path_prefetch_j()
855 btree_node_unlock(trans, path, path->level); in btree_path_prefetch_j()
862 struct btree_path *path, in btree_node_mem_ptr_set() argument
865 struct btree_path_level *l = &path->l[plevel]; in btree_node_mem_ptr_set()
866 bool locked = btree_node_locked(path, plevel); in btree_node_mem_ptr_set()
870 if (!bch2_btree_node_relock(trans, path, plevel)) in btree_node_mem_ptr_set()
880 btree_node_unlock(trans, path, plevel); in btree_node_mem_ptr_set()
884 struct btree_path *path, in btree_node_iter_and_journal_peek() argument
889 struct btree_path_level *l = path_l(path); in btree_node_iter_and_journal_peek()
894 __bch2_btree_and_journal_iter_init_node_iter(&jiter, c, l->b, l->iter, path->pos); in btree_node_iter_and_journal_peek()
902 ret = btree_path_prefetch_j(trans, path, &jiter); in btree_node_iter_and_journal_peek()
909 struct btree_path *path, in btree_path_down() argument
914 struct btree_path_level *l = path_l(path); in btree_path_down()
916 unsigned level = path->level - 1; in btree_path_down()
917 enum six_lock_type lock_type = __btree_lock_want(path, level); in btree_path_down()
921 EBUG_ON(!btree_node_locked(path, path->level)); in btree_path_down()
926 ret = btree_node_iter_and_journal_peek(trans, path, flags, &tmp); in btree_path_down()
935 ret = btree_path_prefetch(trans, path); in btree_path_down()
941 b = bch2_btree_node_get(trans, path, tmp.k, level, lock_type, trace_ip); in btree_path_down()
949 btree_node_mem_ptr_set(trans, path, level + 1, b); in btree_path_down()
951 if (btree_node_read_locked(path, level + 1)) in btree_path_down()
952 btree_node_unlock(trans, path, level + 1); in btree_path_down()
954 mark_btree_node_locked(trans, path, level, in btree_path_down()
956 path->level = level; in btree_path_down()
957 bch2_btree_path_level_init(trans, path, b); in btree_path_down()
959 bch2_btree_path_verify_locks(path); in btree_path_down()
969 struct btree_path *path; in bch2_btree_path_traverse_all() local
982 trans_for_each_path(trans, path, i) in bch2_btree_path_traverse_all()
983 path->should_be_locked = false; in bch2_btree_path_traverse_all()
1007 * Traversing a path can cause another path to be added at about in bch2_btree_path_traverse_all()
1027 * (path->uptodate < BTREE_ITER_NEED_TRAVERSE); however, since in bch2_btree_path_traverse_all()
1028 * path->should_be_locked is not set yet, we might have unlocked and in bch2_btree_path_traverse_all()
1029 * then failed to relock a path - that's fine. in bch2_btree_path_traverse_all()
1040 static inline bool btree_path_check_pos_in_node(struct btree_path *path, in btree_path_check_pos_in_node() argument
1043 if (check_pos < 0 && btree_path_pos_before_node(path, path->l[l].b)) in btree_path_check_pos_in_node()
1045 if (check_pos > 0 && btree_path_pos_after_node(path, path->l[l].b)) in btree_path_check_pos_in_node()
1051 struct btree_path *path, in btree_path_good_node() argument
1054 return is_btree_node(path, l) && in btree_path_good_node()
1055 bch2_btree_node_relock(trans, path, l) && in btree_path_good_node()
1056 btree_path_check_pos_in_node(path, l, check_pos); in btree_path_good_node()
1060 struct btree_path *path, in btree_path_set_level_down() argument
1065 path->level = new_level; in btree_path_set_level_down()
1067 for (l = path->level + 1; l < BTREE_MAX_DEPTH; l++) in btree_path_set_level_down()
1068 if (btree_lock_want(path, l) == BTREE_NODE_UNLOCKED) in btree_path_set_level_down()
1069 btree_node_unlock(trans, path, l); in btree_path_set_level_down()
1071 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE); in btree_path_set_level_down()
1072 bch2_btree_path_verify(trans, path); in btree_path_set_level_down()
1076 struct btree_path *path, in __btree_path_up_until_good_node() argument
1079 unsigned i, l = path->level; in __btree_path_up_until_good_node()
1081 while (btree_path_node(path, l) && in __btree_path_up_until_good_node()
1082 !btree_path_good_node(trans, path, l, check_pos)) in __btree_path_up_until_good_node()
1083 __btree_path_set_level_up(trans, path, l++); in __btree_path_up_until_good_node()
1087 i < path->locks_want && btree_path_node(path, i); in __btree_path_up_until_good_node()
1089 if (!bch2_btree_node_relock(trans, path, i)) { in __btree_path_up_until_good_node()
1091 __btree_path_set_level_up(trans, path, l++); in __btree_path_up_until_good_node()
1099 struct btree_path *path, in btree_path_up_until_good_node() argument
1102 return likely(btree_node_locked(path, path->level) && in btree_path_up_until_good_node()
1103 btree_path_check_pos_in_node(path, path->level, check_pos)) in btree_path_up_until_good_node()
1104 ? path->level in btree_path_up_until_good_node()
1105 : __btree_path_up_until_good_node(trans, path, check_pos); in btree_path_up_until_good_node()
1122 struct btree_path *path = &trans->paths[path_idx]; in bch2_btree_path_traverse_one() local
1123 unsigned depth_want = path->level; in bch2_btree_path_traverse_one()
1133 * Ensure we obey path->should_be_locked: if it's set, we can't unlock in bch2_btree_path_traverse_one()
1134 * and re-traverse the path without a transaction restart: in bch2_btree_path_traverse_one()
1136 if (path->should_be_locked) { in bch2_btree_path_traverse_one()
1137 ret = bch2_btree_path_relock(trans, path, trace_ip); in bch2_btree_path_traverse_one()
1141 if (path->cached) { in bch2_btree_path_traverse_one()
1142 ret = bch2_btree_path_traverse_cached(trans, path, flags); in bch2_btree_path_traverse_one()
1146 path = &trans->paths[path_idx]; in bch2_btree_path_traverse_one()
1148 if (unlikely(path->level >= BTREE_MAX_DEPTH)) in bch2_btree_path_traverse_one()
1151 path->level = btree_path_up_until_good_node(trans, path, 0); in bch2_btree_path_traverse_one()
1153 EBUG_ON(btree_path_node(path, path->level) && in bch2_btree_path_traverse_one()
1154 !btree_node_locked(path, path->level)); in bch2_btree_path_traverse_one()
1157 * Note: path->nodes[path->level] may be temporarily NULL here - that in bch2_btree_path_traverse_one()
1162 while (path->level > depth_want) { in bch2_btree_path_traverse_one()
1163 ret = btree_path_node(path, path->level) in bch2_btree_path_traverse_one()
1164 ? btree_path_down(trans, path, flags, trace_ip) in bch2_btree_path_traverse_one()
1165 : btree_path_lock_root(trans, path, depth_want, trace_ip); in bch2_btree_path_traverse_one()
1176 __bch2_btree_path_unlock(trans, path); in bch2_btree_path_traverse_one()
1177 path->level = depth_want; in bch2_btree_path_traverse_one()
1178 path->l[path->level].b = ERR_PTR(ret); in bch2_btree_path_traverse_one()
1183 path->uptodate = BTREE_ITER_UPTODATE; in bch2_btree_path_traverse_one()
1189 bch2_btree_path_verify(trans, path); in bch2_btree_path_traverse_one()
1221 btree_path_idx_t path, bool intent, unsigned long ip) in __bch2_btree_path_make_mut() argument
1223 __btree_path_put(trans->paths + path, intent); in __bch2_btree_path_make_mut()
1224 path = btree_path_clone(trans, path, intent); in __bch2_btree_path_make_mut()
1225 trans->paths[path].preserve = false; in __bch2_btree_path_make_mut()
1226 return path; in __bch2_btree_path_make_mut()
1241 struct btree_path *path = trans->paths + path_idx; in __bch2_btree_path_set_pos() local
1242 path->pos = new_pos; in __bch2_btree_path_set_pos()
1245 if (unlikely(path->cached)) { in __bch2_btree_path_set_pos()
1246 btree_node_unlock(trans, path, 0); in __bch2_btree_path_set_pos()
1247 path->l[0].b = ERR_PTR(-BCH_ERR_no_btree_node_up); in __bch2_btree_path_set_pos()
1248 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE); in __bch2_btree_path_set_pos()
1252 unsigned level = btree_path_up_until_good_node(trans, path, cmp); in __bch2_btree_path_set_pos()
1254 if (btree_path_node(path, level)) { in __bch2_btree_path_set_pos()
1255 struct btree_path_level *l = &path->l[level]; in __bch2_btree_path_set_pos()
1257 BUG_ON(!btree_node_locked(path, level)); in __bch2_btree_path_set_pos()
1265 !btree_path_advance_to_pos(path, l, 8)) in __bch2_btree_path_set_pos()
1266 bch2_btree_node_iter_init(&l->iter, l->b, &path->pos); in __bch2_btree_path_set_pos()
1276 if (unlikely(level != path->level)) { in __bch2_btree_path_set_pos()
1277 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE); in __bch2_btree_path_set_pos()
1278 __bch2_btree_path_unlock(trans, path); in __bch2_btree_path_set_pos()
1281 bch2_btree_path_verify(trans, path); in __bch2_btree_path_set_pos()
1285 /* Btree path: main interface: */
1287 static struct btree_path *have_path_at_pos(struct btree_trans *trans, struct btree_path *path) in have_path_at_pos() argument
1291 sib = prev_btree_path(trans, path); in have_path_at_pos()
1292 if (sib && !btree_path_cmp(sib, path)) in have_path_at_pos()
1295 sib = next_btree_path(trans, path); in have_path_at_pos()
1296 if (sib && !btree_path_cmp(sib, path)) in have_path_at_pos()
1302 static struct btree_path *have_node_at_pos(struct btree_trans *trans, struct btree_path *path) in have_node_at_pos() argument
1306 sib = prev_btree_path(trans, path); in have_node_at_pos()
1307 if (sib && sib->level == path->level && path_l(sib)->b == path_l(path)->b) in have_node_at_pos()
1310 sib = next_btree_path(trans, path); in have_node_at_pos()
1311 if (sib && sib->level == path->level && path_l(sib)->b == path_l(path)->b) in have_node_at_pos()
1317 static inline void __bch2_path_free(struct btree_trans *trans, btree_path_idx_t path) in __bch2_path_free() argument
1319 __bch2_btree_path_unlock(trans, trans->paths + path); in __bch2_path_free()
1320 btree_path_list_remove(trans, trans->paths + path); in __bch2_path_free()
1321 __clear_bit(path, trans->paths_allocated); in __bch2_path_free()
1326 struct btree_path *path = trans->paths + path_idx, *dup; in bch2_path_put() local
1328 if (!__btree_path_put(path, intent)) in bch2_path_put()
1331 dup = path->preserve in bch2_path_put()
1332 ? have_path_at_pos(trans, path) in bch2_path_put()
1333 : have_node_at_pos(trans, path); in bch2_path_put()
1335 if (!dup && !(!path->preserve && !is_btree_node(path, path->level))) in bch2_path_put()
1338 if (path->should_be_locked && in bch2_path_put()
1344 dup->preserve |= path->preserve; in bch2_path_put()
1345 dup->should_be_locked |= path->should_be_locked; in bch2_path_put()
1351 static void bch2_path_put_nokeep(struct btree_trans *trans, btree_path_idx_t path, in bch2_path_put_nokeep() argument
1354 if (!__btree_path_put(trans->paths + path, intent)) in bch2_path_put_nokeep()
1357 __bch2_path_free(trans, path); in bch2_path_put_nokeep()
1420 struct btree_path *path = trans->paths + path_idx; in bch2_btree_path_to_text() local
1422 prt_printf(out, "path: idx %2u ref %u:%u %c %c btree=%s l=%u pos ", in bch2_btree_path_to_text()
1423 path_idx, path->ref, path->intent_ref, in bch2_btree_path_to_text()
1424 path->preserve ? 'P' : ' ', in bch2_btree_path_to_text()
1425 path->should_be_locked ? 'S' : ' ', in bch2_btree_path_to_text()
1426 bch2_btree_id_str(path->btree_id), in bch2_btree_path_to_text()
1427 path->level); in bch2_btree_path_to_text()
1428 bch2_bpos_to_text(out, path->pos); in bch2_btree_path_to_text()
1430 prt_printf(out, " locks %u", path->nodes_locked); in bch2_btree_path_to_text()
1432 prt_printf(out, " %pS", (void *) path->ip_allocated); in bch2_btree_path_to_text()
1516 bch_err(trans->c, "trans path overflow"); in btree_path_overflow()
1574 * Do this before marking the new path as allocated, since it won't be in btree_path_alloc()
1582 struct btree_path *path = &trans->paths[idx]; in btree_path_alloc() local
1583 path->ref = 0; in btree_path_alloc()
1584 path->intent_ref = 0; in btree_path_alloc()
1585 path->nodes_locked = 0; in btree_path_alloc()
1597 struct btree_path *path; in bch2_path_get() local
1608 trans_for_each_path_inorder(trans, path, iter) { in bch2_path_get()
1609 if (__btree_path_cmp(path, in bch2_path_get()
1625 path = trans->paths + path_idx; in bch2_path_get()
1628 path = trans->paths + path_idx; in bch2_path_get()
1630 __btree_path_get(path, intent); in bch2_path_get()
1631 path->pos = pos; in bch2_path_get()
1632 path->btree_id = btree_id; in bch2_path_get()
1633 path->cached = cached; in bch2_path_get()
1634 path->uptodate = BTREE_ITER_NEED_TRAVERSE; in bch2_path_get()
1635 path->should_be_locked = false; in bch2_path_get()
1636 path->level = level; in bch2_path_get()
1637 path->locks_want = locks_want; in bch2_path_get()
1638 path->nodes_locked = 0; in bch2_path_get()
1639 for (unsigned i = 0; i < ARRAY_SIZE(path->l); i++) in bch2_path_get()
1640 path->l[i].b = ERR_PTR(-BCH_ERR_no_btree_node_init); in bch2_path_get()
1642 path->ip_allocated = ip; in bch2_path_get()
1648 path->preserve = true; in bch2_path_get()
1650 if (path->intent_ref) in bch2_path_get()
1654 * If the path has locks_want greater than requested, we don't downgrade in bch2_path_get()
1662 if (locks_want > path->locks_want) in bch2_path_get()
1663 bch2_btree_path_upgrade_noupgrade_sibs(trans, path, locks_want, NULL); in bch2_path_get()
1668 struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *path, struct bkey *u) in bch2_btree_path_peek_slot() argument
1671 struct btree_path_level *l = path_l(path); in bch2_btree_path_peek_slot()
1678 EBUG_ON(path->uptodate != BTREE_ITER_UPTODATE); in bch2_btree_path_peek_slot()
1679 EBUG_ON(!btree_node_locked(path, path->level)); in bch2_btree_path_peek_slot()
1681 if (!path->cached) { in bch2_btree_path_peek_slot()
1685 EBUG_ON(k.k && bkey_deleted(k.k) && bpos_eq(k.k->p, path->pos)); in bch2_btree_path_peek_slot()
1687 if (!k.k || !bpos_eq(path->pos, k.k->p)) in bch2_btree_path_peek_slot()
1690 struct bkey_cached *ck = (void *) path->l[0].b; in bch2_btree_path_peek_slot()
1693 (path->btree_id != ck->key.btree_id || in bch2_btree_path_peek_slot()
1694 !bkey_eq(path->pos, ck->key.pos))); in bch2_btree_path_peek_slot()
1705 u->p = path->pos; in bch2_btree_path_peek_slot()
1714 return bch2_btree_path_traverse(iter->trans, iter->path, iter->flags); in __bch2_btree_iter_traverse()
1723 iter->path = bch2_btree_path_set_pos(trans, iter->path, in bch2_btree_iter_traverse()
1728 ret = bch2_btree_path_traverse(iter->trans, iter->path, iter->flags); in bch2_btree_iter_traverse()
1732 btree_path_set_should_be_locked(trans->paths + iter->path); in bch2_btree_iter_traverse()
1744 EBUG_ON(trans->paths[iter->path].cached); in bch2_btree_iter_peek_node()
1747 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags); in bch2_btree_iter_peek_node()
1751 struct btree_path *path = btree_iter_path(trans, iter); in bch2_btree_iter_peek_node() local
1752 b = btree_path_node(path, path->level); in bch2_btree_iter_peek_node()
1761 iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p, in bch2_btree_iter_peek_node()
1792 EBUG_ON(trans->paths[iter->path].cached); in bch2_btree_iter_next_node()
1796 struct btree_path *path = btree_iter_path(trans, iter); in bch2_btree_iter_next_node() local
1799 if (!btree_path_node(path, path->level)) in bch2_btree_iter_next_node()
1803 if (!btree_path_node(path, path->level + 1)) { in bch2_btree_iter_next_node()
1804 btree_path_set_level_up(trans, path); in bch2_btree_iter_next_node()
1808 if (!bch2_btree_node_relock(trans, path, path->level + 1)) { in bch2_btree_iter_next_node()
1809 __bch2_btree_path_unlock(trans, path); in bch2_btree_iter_next_node()
1810 path->l[path->level].b = ERR_PTR(-BCH_ERR_no_btree_node_relock); in bch2_btree_iter_next_node()
1811 path->l[path->level + 1].b = ERR_PTR(-BCH_ERR_no_btree_node_relock); in bch2_btree_iter_next_node()
1812 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE); in bch2_btree_iter_next_node()
1813 trace_and_count(trans->c, trans_restart_relock_next_node, trans, _THIS_IP_, path); in bch2_btree_iter_next_node()
1818 b = btree_path_node(path, path->level + 1); in bch2_btree_iter_next_node()
1821 __btree_path_set_level_up(trans, path, path->level++); in bch2_btree_iter_next_node()
1827 iter->path = bch2_btree_path_set_pos(trans, iter->path, in bch2_btree_iter_next_node()
1832 path = btree_iter_path(trans, iter); in bch2_btree_iter_next_node()
1833 btree_path_set_level_down(trans, path, iter->min_depth); in bch2_btree_iter_next_node()
1835 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags); in bch2_btree_iter_next_node()
1839 path = btree_iter_path(trans, iter); in bch2_btree_iter_next_node()
1840 b = path->l[path->level].b; in bch2_btree_iter_next_node()
1846 iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p, in bch2_btree_iter_next_node()
1909 struct btree_path *path = btree_iter_path(trans, iter); in bch2_btree_trans_peek_updates() local
1910 struct bpos end = path_l(path)->b->key.k.p; in bch2_btree_trans_peek_updates()
1915 bpos_ge(i->k->k.p, path->pos) && in bch2_btree_trans_peek_updates()
1939 struct btree_path *path = btree_iter_path(trans, iter); in bch2_btree_journal_peek() local
1942 path->level, in bch2_btree_journal_peek()
1943 path->pos, in bch2_btree_journal_peek()
1952 struct btree_path *path = btree_iter_path(trans, iter); in btree_trans_peek_slot_journal() local
1953 struct bkey_i *k = bch2_btree_journal_peek(trans, iter, path->pos); in btree_trans_peek_slot_journal()
1968 struct btree_path *path = btree_iter_path(trans, iter); in btree_trans_peek_journal() local
1971 k.k ? k.k->p : path_l(path)->b->key.k.p); in btree_trans_peek_journal()
2040 iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key, in __bch2_btree_iter_peek()
2044 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags); in __bch2_btree_iter_peek()
2052 struct btree_path *path = btree_iter_path(trans, iter); in __bch2_btree_iter_peek() local
2053 l = path_l(path); in __bch2_btree_iter_peek()
2062 btree_path_set_should_be_locked(path); in __bch2_btree_iter_peek()
2185 * advance, same as on exit for iter->path, but only up in bch2_btree_iter_peek_upto()
2188 __btree_path_get(trans->paths + iter->path, iter->flags & BTREE_ITER_INTENT); in bch2_btree_iter_peek_upto()
2189 iter->update_path = iter->path; in bch2_btree_iter_peek_upto()
2240 iter->path = bch2_btree_path_set_pos(trans, iter->path, k.k->p, in bch2_btree_iter_peek_upto()
2317 iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key, in bch2_btree_iter_peek_prev()
2321 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags); in bch2_btree_iter_peek_prev()
2329 struct btree_path *path = btree_iter_path(trans, iter); in bch2_btree_iter_peek_prev() local
2331 k = btree_path_level_peek(trans, path, &path->l[0], &iter->k); in bch2_btree_iter_peek_prev()
2336 k = btree_path_level_prev(trans, path, &path->l[0], &iter->k); in bch2_btree_iter_peek_prev()
2353 bch2_path_put_nokeep(trans, iter->path, in bch2_btree_iter_peek_prev()
2355 iter->path = saved_path; in bch2_btree_iter_peek_prev()
2368 saved_path = btree_path_clone(trans, iter->path, in bch2_btree_iter_peek_prev()
2370 path = btree_iter_path(trans, iter); in bch2_btree_iter_peek_prev()
2387 btree_path_set_should_be_locked(path); in bch2_btree_iter_peek_prev()
2389 } else if (likely(!bpos_eq(path->l[0].b->data->min_key, POS_MIN))) { in bch2_btree_iter_peek_prev()
2391 search_key = bpos_predecessor(path->l[0].b->data->min_key); in bch2_btree_iter_peek_prev()
2454 iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key, in bch2_btree_iter_peek_slot()
2458 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags); in bch2_btree_iter_peek_slot()
2483 /* We're not returning a key from iter->path: */ in bch2_btree_iter_peek_slot()
2487 k = bch2_btree_path_peek_slot(trans->paths + iter->path, &iter->k); in bch2_btree_iter_peek_slot()
2587 struct btree_path *path; in btree_trans_verify_sorted_refs() local
2592 trans_for_each_path(trans, path, i) { in btree_trans_verify_sorted_refs()
2593 BUG_ON(path->sorted_idx >= trans->nr_sorted); in btree_trans_verify_sorted_refs()
2594 BUG_ON(trans->sorted[path->sorted_idx] != i); in btree_trans_verify_sorted_refs()
2607 struct btree_path *path, *prev = NULL; in btree_trans_verify_sorted() local
2613 trans_for_each_path_inorder(trans, path, iter) { in btree_trans_verify_sorted()
2614 if (prev && btree_path_cmp(prev, path) > 0) { in btree_trans_verify_sorted()
2618 prev = path; in btree_trans_verify_sorted()
2668 struct btree_path *path) in btree_path_list_remove() argument
2670 EBUG_ON(path->sorted_idx >= trans->nr_sorted); in btree_path_list_remove()
2673 memmove_u64s_down_small(trans->sorted + path->sorted_idx, in btree_path_list_remove()
2674 trans->sorted + path->sorted_idx + 1, in btree_path_list_remove()
2675 DIV_ROUND_UP(trans->nr_sorted - path->sorted_idx, in btree_path_list_remove()
2678 array_remove_item(trans->sorted, trans->nr_sorted, path->sorted_idx); in btree_path_list_remove()
2680 for (unsigned i = path->sorted_idx; i < trans->nr_sorted; i++) in btree_path_list_remove()
2688 struct btree_path *path = trans->paths + path_idx; in btree_path_list_add() local
2690 path->sorted_idx = pos ? trans->paths[pos].sorted_idx + 1 : trans->nr_sorted; in btree_path_list_add()
2693 memmove_u64s_up_small(trans->sorted + path->sorted_idx + 1, in btree_path_list_add()
2694 trans->sorted + path->sorted_idx, in btree_path_list_add()
2695 DIV_ROUND_UP(trans->nr_sorted - path->sorted_idx, in btree_path_list_add()
2698 trans->sorted[path->sorted_idx] = path_idx; in btree_path_list_add()
2700 array_insert_item(trans->sorted, trans->nr_sorted, path->sorted_idx, path_idx); in btree_path_list_add()
2703 for (unsigned i = path->sorted_idx; i < trans->nr_sorted; i++) in btree_path_list_add()
2714 if (iter->path) in bch2_trans_iter_exit()
2715 bch2_path_put(trans, iter->path, in bch2_trans_iter_exit()
2720 iter->path = 0; in bch2_trans_iter_exit()
2754 struct btree_path *path = btree_iter_path(trans, iter); in bch2_trans_node_iter_init() local
2755 BUG_ON(path->locks_want < min(locks_want, BTREE_MAX_DEPTH)); in bch2_trans_node_iter_init()
2756 BUG_ON(path->level != depth); in bch2_trans_node_iter_init()
2765 if (src->path) in bch2_trans_copy_iter()
2766 __btree_path_get(trans->paths + src->path, src->flags & BTREE_ITER_INTENT); in bch2_trans_copy_iter()
2834 struct btree_path *path; in bch2_trans_srcu_unlock() local
2837 trans_for_each_path(trans, path, i) in bch2_trans_srcu_unlock()
2838 if (path->cached && !btree_node_locked(path, 0)) in bch2_trans_srcu_unlock()
2839 path->l[0].b = ERR_PTR(-BCH_ERR_no_btree_node_srcu_reset); in bch2_trans_srcu_unlock()
2868 struct btree_path *path; in bch2_trans_begin() local
2878 trans_for_each_path(trans, path, i) { in bch2_trans_begin()
2879 path->should_be_locked = false; in bch2_trans_begin()
2886 if (!trans->restarted && path->btree_id != BTREE_ID_subvolumes) in bch2_trans_begin()
2887 path->preserve = false; in bch2_trans_begin()
2894 if (!path->ref && !path->preserve) in bch2_trans_begin()
2897 path->preserve = false; in bch2_trans_begin()
2972 * but the data move path calls bch2_write when we in __bch2_trans_get()
3033 struct btree_path *path; in check_btree_paths_leaked() local
3036 trans_for_each_path(trans, path, i) in check_btree_paths_leaked()
3037 if (path->ref) in check_btree_paths_leaked()
3042 trans_for_each_path(trans, path, i) in check_btree_paths_leaked()
3043 if (path->ref) in check_btree_paths_leaked()
3045 bch2_btree_id_str(path->btree_id), in check_btree_paths_leaked()
3046 (void *) path->ip_allocated); in check_btree_paths_leaked()
3060 __btree_path_put(trans->paths + i->path, true); in bch2_trans_put()
3161 struct btree_path *path = paths + idx; in bch2_btree_trans_to_text() local
3162 if (!path->nodes_locked) in bch2_btree_trans_to_text()
3165 prt_printf(out, " path %u %c l=%u %s:", in bch2_btree_trans_to_text()
3167 path->cached ? 'c' : 'b', in bch2_btree_trans_to_text()
3168 path->level, in bch2_btree_trans_to_text()
3169 bch2_btree_id_str(path->btree_id)); in bch2_btree_trans_to_text()
3170 bch2_bpos_to_text(out, path->pos); in bch2_btree_trans_to_text()
3174 if (btree_node_locked(path, l) && in bch2_btree_trans_to_text()
3175 !IS_ERR_OR_NULL(b = (void *) READ_ONCE(path->l[l].b))) { in bch2_btree_trans_to_text()
3177 lock_types[btree_node_locked_type(path, l)], l); in bch2_btree_trans_to_text()