Lines Matching full:path
24 struct btree_path *path; in bch2_btree_node_lock_counts() local
33 trans_for_each_path(trans, path, i) in bch2_btree_node_lock_counts()
34 if (path != skip && &path->l[level].b->c == b) { in bch2_btree_node_lock_counts()
35 int t = btree_node_locked_type(path, level); in bch2_btree_node_lock_counts()
47 struct btree_path *path, struct btree *b) in bch2_btree_node_unlock_write() argument
49 bch2_btree_node_unlock_write_inlined(trans, path, b); in bch2_btree_node_unlock_write()
327 struct btree_path *path = paths + path_idx; in bch2_check_for_deadlock() local
328 if (!path->nodes_locked) in bch2_check_for_deadlock()
340 int lock_held = btree_node_locked_type(path, top->level); in bch2_check_for_deadlock()
345 b = &READ_ONCE(path->l[top->level].b)->c; in bch2_check_for_deadlock()
419 int __bch2_btree_node_lock_write(struct btree_trans *trans, struct btree_path *path, in __bch2_btree_node_lock_write() argument
438 mark_btree_node_locked_noreset(path, b->level, BTREE_NODE_INTENT_LOCKED); in __bch2_btree_node_lock_write()
444 struct btree_path *path, in bch2_btree_node_lock_write_nofail() argument
447 int ret = __btree_node_lock_write(trans, path, b, true); in bch2_btree_node_lock_write_nofail()
454 struct btree_path *path, in btree_path_get_locks() argument
458 unsigned l = path->level; in btree_path_get_locks()
462 if (!btree_path_node(path, l)) in btree_path_get_locks()
466 ? bch2_btree_node_upgrade(trans, path, l) in btree_path_get_locks()
467 : bch2_btree_node_relock(trans, path, l))) { in btree_path_get_locks()
472 f->b = path->l[l].b; in btree_path_get_locks()
477 } while (l < path->locks_want); in btree_path_get_locks()
485 __bch2_btree_path_unlock(trans, path); in btree_path_get_locks()
486 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE); in btree_path_get_locks()
489 path->l[fail_idx].b = upgrade in btree_path_get_locks()
496 if (path->uptodate == BTREE_ITER_NEED_RELOCK) in btree_path_get_locks()
497 path->uptodate = BTREE_ITER_UPTODATE; in btree_path_get_locks()
499 return path->uptodate < BTREE_ITER_NEED_RELOCK; in btree_path_get_locks()
503 struct btree_path *path, unsigned level, in __bch2_btree_node_relock() argument
506 struct btree *b = btree_path_node(path, level); in __bch2_btree_node_relock()
507 int want = __btree_lock_want(path, level); in __bch2_btree_node_relock()
512 if (six_relock_type(&b->c.lock, want, path->l[level].lock_seq) || in __bch2_btree_node_relock()
513 (btree_node_lock_seq_matches(path, b, level) && in __bch2_btree_node_relock()
515 mark_btree_node_locked(trans, path, level, want); in __bch2_btree_node_relock()
520 trace_and_count(trans->c, btree_path_relock_fail, trans, _RET_IP_, path, level); in __bch2_btree_node_relock()
527 struct btree_path *path, unsigned level) in bch2_btree_node_upgrade() argument
529 struct btree *b = path->l[level].b; in bch2_btree_node_upgrade()
531 if (!is_btree_node(path, level)) in bch2_btree_node_upgrade()
534 switch (btree_lock_want(path, level)) { in bch2_btree_node_upgrade()
536 BUG_ON(btree_node_locked(path, level)); in bch2_btree_node_upgrade()
539 BUG_ON(btree_node_intent_locked(path, level)); in bch2_btree_node_upgrade()
540 return bch2_btree_node_relock(trans, path, level); in bch2_btree_node_upgrade()
547 if (btree_node_intent_locked(path, level)) in bch2_btree_node_upgrade()
553 if (btree_node_locked(path, level) in bch2_btree_node_upgrade()
555 : six_relock_type(&b->c.lock, SIX_LOCK_intent, path->l[level].lock_seq)) in bch2_btree_node_upgrade()
558 if (btree_node_lock_seq_matches(path, b, level) && in bch2_btree_node_upgrade()
560 btree_node_unlock(trans, path, level); in bch2_btree_node_upgrade()
564 trace_and_count(trans->c, btree_path_upgrade_fail, trans, _RET_IP_, path, level); in bch2_btree_node_upgrade()
567 mark_btree_node_locked_noreset(path, level, BTREE_NODE_INTENT_LOCKED); in bch2_btree_node_upgrade()
571 /* Btree path locking: */
577 struct btree_path *path) in bch2_btree_path_relock_intent() argument
581 for (l = path->level; in bch2_btree_path_relock_intent()
582 l < path->locks_want && btree_path_node(path, l); in bch2_btree_path_relock_intent()
584 if (!bch2_btree_node_relock(trans, path, l)) { in bch2_btree_path_relock_intent()
585 __bch2_btree_path_unlock(trans, path); in bch2_btree_path_relock_intent()
586 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE); in bch2_btree_path_relock_intent()
587 trace_and_count(trans->c, trans_restart_relock_path_intent, trans, _RET_IP_, path); in bch2_btree_path_relock_intent()
596 bool bch2_btree_path_relock_norestart(struct btree_trans *trans, struct btree_path *path) in bch2_btree_path_relock_norestart() argument
600 bool ret = btree_path_get_locks(trans, path, false, &f); in bch2_btree_path_relock_norestart()
606 struct btree_path *path, unsigned long trace_ip) in __bch2_btree_path_relock() argument
608 if (!bch2_btree_path_relock_norestart(trans, path)) { in __bch2_btree_path_relock()
609 trace_and_count(trans->c, trans_restart_relock_path, trans, trace_ip, path); in __bch2_btree_path_relock()
617 struct btree_path *path, in bch2_btree_path_upgrade_noupgrade_sibs() argument
621 EBUG_ON(path->locks_want >= new_locks_want); in bch2_btree_path_upgrade_noupgrade_sibs()
623 path->locks_want = new_locks_want; in bch2_btree_path_upgrade_noupgrade_sibs()
625 bool ret = btree_path_get_locks(trans, path, true, f); in bch2_btree_path_upgrade_noupgrade_sibs()
631 struct btree_path *path, in __bch2_btree_path_upgrade() argument
635 bool ret = bch2_btree_path_upgrade_noupgrade_sibs(trans, path, new_locks_want, f); in __bch2_btree_path_upgrade()
658 if (!path->cached && !trans->in_traverse_all) { in __bch2_btree_path_upgrade()
663 if (linked != path && in __bch2_btree_path_upgrade()
664 linked->cached == path->cached && in __bch2_btree_path_upgrade()
665 linked->btree_id == path->btree_id && in __bch2_btree_path_upgrade()
677 struct btree_path *path, in __bch2_btree_path_downgrade() argument
680 unsigned l, old_locks_want = path->locks_want; in __bch2_btree_path_downgrade()
685 EBUG_ON(path->locks_want < new_locks_want); in __bch2_btree_path_downgrade()
687 path->locks_want = new_locks_want; in __bch2_btree_path_downgrade()
689 while (path->nodes_locked && in __bch2_btree_path_downgrade()
690 (l = btree_path_highest_level_locked(path)) >= path->locks_want) { in __bch2_btree_path_downgrade()
691 if (l > path->level) { in __bch2_btree_path_downgrade()
692 btree_node_unlock(trans, path, l); in __bch2_btree_path_downgrade()
694 if (btree_node_intent_locked(path, l)) { in __bch2_btree_path_downgrade()
695 six_lock_downgrade(&path->l[l].b->c.lock); in __bch2_btree_path_downgrade()
696 mark_btree_node_locked_noreset(path, l, BTREE_NODE_READ_LOCKED); in __bch2_btree_path_downgrade()
702 bch2_btree_path_verify_locks(path); in __bch2_btree_path_downgrade()
704 trace_path_downgrade(trans, _RET_IP_, path, old_locks_want); in __bch2_btree_path_downgrade()
711 struct btree_path *path; in bch2_trans_downgrade() local
717 trans_for_each_path(trans, path, i) in bch2_trans_downgrade()
718 if (path->ref) in bch2_trans_downgrade()
719 bch2_btree_path_downgrade(trans, path); in bch2_trans_downgrade()
724 struct btree_path *path; in __bch2_trans_unlock() local
727 trans_for_each_path(trans, path, i) in __bch2_trans_unlock()
728 __bch2_btree_path_unlock(trans, path); in __bch2_trans_unlock()
731 …atic noinline __cold int bch2_trans_relock_fail(struct btree_trans *trans, struct btree_path *path, in bch2_trans_relock_fail() argument
740 bch2_bpos_to_text(&buf, path->pos); in bch2_trans_relock_fail()
741 prt_printf(&buf, " l=%u seq=%u node seq=", f->l, path->l[f->l].lock_seq); in bch2_trans_relock_fail()
775 struct btree_path *path; in __bch2_trans_relock() local
778 trans_for_each_path(trans, path, i) { in __bch2_trans_relock()
781 if (path->should_be_locked && in __bch2_trans_relock()
782 !btree_path_get_locks(trans, path, false, &f)) in __bch2_trans_relock()
783 return bch2_trans_relock_fail(trans, path, &f, trace); in __bch2_trans_relock()
824 struct btree_path *path; in bch2_trans_unlock_write() local
827 trans_for_each_path(trans, path, i) in bch2_trans_unlock_write()
829 if (btree_node_write_locked(path, l)) in bch2_trans_unlock_write()
830 bch2_btree_node_unlock_write(trans, path, path->l[l].b); in bch2_trans_unlock_write()
847 void bch2_btree_path_verify_locks(struct btree_path *path) in bch2_btree_path_verify_locks() argument
850 * A path may be uptodate and yet have nothing locked if and only if in bch2_btree_path_verify_locks()
851 * there is no node at path->level, which generally means we were in bch2_btree_path_verify_locks()
854 BUG_ON(path->uptodate == BTREE_ITER_UPTODATE && in bch2_btree_path_verify_locks()
855 btree_path_node(path, path->level) && in bch2_btree_path_verify_locks()
856 !path->nodes_locked); in bch2_btree_path_verify_locks()
858 if (!path->nodes_locked) in bch2_btree_path_verify_locks()
862 int want = btree_lock_want(path, l); in bch2_btree_path_verify_locks()
863 int have = btree_node_locked_type(path, l); in bch2_btree_path_verify_locks()
865 BUG_ON(!is_btree_node(path, l) && have != BTREE_NODE_UNLOCKED); in bch2_btree_path_verify_locks()
867 BUG_ON(is_btree_node(path, l) && in bch2_btree_path_verify_locks()
872 BUG_ON(btree_node_locked(path, l) && in bch2_btree_path_verify_locks()
873 path->l[l].lock_seq != six_lock_seq(&path->l[l].b->c.lock)); in bch2_btree_path_verify_locks()
879 struct btree_path *path; in bch2_trans_locked() local
882 trans_for_each_path(trans, path, i) in bch2_trans_locked()
883 if (path->nodes_locked) in bch2_trans_locked()
895 struct btree_path *path; in bch2_trans_verify_locks() local
898 trans_for_each_path(trans, path, i) in bch2_trans_verify_locks()
899 bch2_btree_path_verify_locks(path); in bch2_trans_verify_locks()