Lines Matching full:path

33 	struct btree_path *path;  in bch2_btree_node_lock_counts()  local
42 trans_for_each_path(trans, path, i) in bch2_btree_node_lock_counts()
43 if (path != skip && &path->l[level].b->c == b) { in bch2_btree_node_lock_counts()
44 int t = btree_node_locked_type(path, level); in bch2_btree_node_lock_counts()
56 struct btree_path *path, struct btree *b) in bch2_btree_node_unlock_write() argument
58 bch2_btree_node_unlock_write_inlined(trans, path, b); in bch2_btree_node_unlock_write()
323 struct btree_path *path = paths + path_idx; in bch2_check_for_deadlock() local
324 if (!path->nodes_locked) in bch2_check_for_deadlock()
336 int lock_held = btree_node_locked_type(path, top->level); in bch2_check_for_deadlock()
341 b = &READ_ONCE(path->l[top->level].b)->c; in bch2_check_for_deadlock()
415 int __bch2_btree_node_lock_write(struct btree_trans *trans, struct btree_path *path, in __bch2_btree_node_lock_write() argument
434 mark_btree_node_locked_noreset(path, b->level, BTREE_NODE_INTENT_LOCKED); in __bch2_btree_node_lock_write()
440 struct btree_path *path, in bch2_btree_node_lock_write_nofail() argument
469 ret = __btree_node_lock_write(trans, path, b, true); in bch2_btree_node_lock_write_nofail()
476 struct btree_path *path, in btree_path_get_locks() argument
480 unsigned l = path->level; in btree_path_get_locks()
484 if (!btree_path_node(path, l)) in btree_path_get_locks()
488 ? bch2_btree_node_upgrade(trans, path, l) in btree_path_get_locks()
489 : bch2_btree_node_relock(trans, path, l))) { in btree_path_get_locks()
494 f->b = path->l[l].b; in btree_path_get_locks()
499 } while (l < path->locks_want); in btree_path_get_locks()
507 __bch2_btree_path_unlock(trans, path); in btree_path_get_locks()
508 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE); in btree_path_get_locks()
511 path->l[fail_idx].b = upgrade in btree_path_get_locks()
518 if (path->uptodate == BTREE_ITER_NEED_RELOCK) in btree_path_get_locks()
519 path->uptodate = BTREE_ITER_UPTODATE; in btree_path_get_locks()
523 return path->uptodate < BTREE_ITER_NEED_RELOCK; in btree_path_get_locks()
527 struct btree_path *path, unsigned level, in __bch2_btree_node_relock() argument
530 struct btree *b = btree_path_node(path, level); in __bch2_btree_node_relock()
531 int want = __btree_lock_want(path, level); in __bch2_btree_node_relock()
536 if (six_relock_type(&b->c.lock, want, path->l[level].lock_seq) || in __bch2_btree_node_relock()
537 (btree_node_lock_seq_matches(path, b, level) && in __bch2_btree_node_relock()
539 mark_btree_node_locked(trans, path, level, want); in __bch2_btree_node_relock()
544 trace_and_count(trans->c, btree_path_relock_fail, trans, _RET_IP_, path, level); in __bch2_btree_node_relock()
551 struct btree_path *path, unsigned level) in bch2_btree_node_upgrade() argument
553 struct btree *b = path->l[level].b; in bch2_btree_node_upgrade()
554 struct six_lock_count count = bch2_btree_node_lock_counts(trans, path, &b->c, level); in bch2_btree_node_upgrade()
556 if (!is_btree_node(path, level)) in bch2_btree_node_upgrade()
559 switch (btree_lock_want(path, level)) { in bch2_btree_node_upgrade()
561 BUG_ON(btree_node_locked(path, level)); in bch2_btree_node_upgrade()
564 BUG_ON(btree_node_intent_locked(path, level)); in bch2_btree_node_upgrade()
565 return bch2_btree_node_relock(trans, path, level); in bch2_btree_node_upgrade()
572 if (btree_node_intent_locked(path, level)) in bch2_btree_node_upgrade()
578 if (btree_node_locked(path, level)) { in bch2_btree_node_upgrade()
588 if (six_relock_type(&b->c.lock, SIX_LOCK_intent, path->l[level].lock_seq)) in bch2_btree_node_upgrade()
593 * Do we already have an intent lock via another path? If so, just bump in bch2_btree_node_upgrade()
596 if (btree_node_lock_seq_matches(path, b, level) && in bch2_btree_node_upgrade()
598 btree_node_unlock(trans, path, level); in bch2_btree_node_upgrade()
602 trace_and_count(trans->c, btree_path_upgrade_fail, trans, _RET_IP_, path, level); in bch2_btree_node_upgrade()
605 mark_btree_node_locked_noreset(path, level, BTREE_NODE_INTENT_LOCKED); in bch2_btree_node_upgrade()
609 /* Btree path locking: */
615 struct btree_path *path) in bch2_btree_path_relock_intent() argument
619 for (l = path->level; in bch2_btree_path_relock_intent()
620 l < path->locks_want && btree_path_node(path, l); in bch2_btree_path_relock_intent()
622 if (!bch2_btree_node_relock(trans, path, l)) { in bch2_btree_path_relock_intent()
623 __bch2_btree_path_unlock(trans, path); in bch2_btree_path_relock_intent()
624 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE); in bch2_btree_path_relock_intent()
625 trace_and_count(trans->c, trans_restart_relock_path_intent, trans, _RET_IP_, path); in bch2_btree_path_relock_intent()
634 bool bch2_btree_path_relock_norestart(struct btree_trans *trans, struct btree_path *path) in bch2_btree_path_relock_norestart() argument
638 return btree_path_get_locks(trans, path, false, &f); in bch2_btree_path_relock_norestart()
642 struct btree_path *path, unsigned long trace_ip) in __bch2_btree_path_relock() argument
644 if (!bch2_btree_path_relock_norestart(trans, path)) { in __bch2_btree_path_relock()
645 trace_and_count(trans->c, trans_restart_relock_path, trans, trace_ip, path); in __bch2_btree_path_relock()
653 struct btree_path *path, in bch2_btree_path_upgrade_noupgrade_sibs() argument
657 EBUG_ON(path->locks_want >= new_locks_want); in bch2_btree_path_upgrade_noupgrade_sibs()
659 path->locks_want = new_locks_want; in bch2_btree_path_upgrade_noupgrade_sibs()
661 return btree_path_get_locks(trans, path, true, f); in bch2_btree_path_upgrade_noupgrade_sibs()
665 struct btree_path *path, in __bch2_btree_path_upgrade() argument
669 if (bch2_btree_path_upgrade_noupgrade_sibs(trans, path, new_locks_want, f)) in __bch2_btree_path_upgrade()
691 if (!path->cached && !trans->in_traverse_all) { in __bch2_btree_path_upgrade()
696 if (linked != path && in __bch2_btree_path_upgrade()
697 linked->cached == path->cached && in __bch2_btree_path_upgrade()
698 linked->btree_id == path->btree_id && in __bch2_btree_path_upgrade()
709 struct btree_path *path, in __bch2_btree_path_downgrade() argument
712 unsigned l, old_locks_want = path->locks_want; in __bch2_btree_path_downgrade()
717 EBUG_ON(path->locks_want < new_locks_want); in __bch2_btree_path_downgrade()
719 path->locks_want = new_locks_want; in __bch2_btree_path_downgrade()
721 while (path->nodes_locked && in __bch2_btree_path_downgrade()
722 (l = btree_path_highest_level_locked(path)) >= path->locks_want) { in __bch2_btree_path_downgrade()
723 if (l > path->level) { in __bch2_btree_path_downgrade()
724 btree_node_unlock(trans, path, l); in __bch2_btree_path_downgrade()
726 if (btree_node_intent_locked(path, l)) { in __bch2_btree_path_downgrade()
727 six_lock_downgrade(&path->l[l].b->c.lock); in __bch2_btree_path_downgrade()
728 mark_btree_node_locked_noreset(path, l, BTREE_NODE_READ_LOCKED); in __bch2_btree_path_downgrade()
734 bch2_btree_path_verify_locks(path); in __bch2_btree_path_downgrade()
736 trace_path_downgrade(trans, _RET_IP_, path, old_locks_want); in __bch2_btree_path_downgrade()
743 struct btree_path *path; in bch2_trans_downgrade() local
749 trans_for_each_path(trans, path, i) in bch2_trans_downgrade()
750 bch2_btree_path_downgrade(trans, path); in bch2_trans_downgrade()
755 struct btree_path *path; in bch2_trans_relock() local
761 trans_for_each_path(trans, path, i) { in bch2_trans_relock()
764 if (path->should_be_locked && in bch2_trans_relock()
765 !btree_path_get_locks(trans, path, false, &f)) { in bch2_trans_relock()
769 bch2_bpos_to_text(&buf, path->pos); in bch2_trans_relock()
771 f.l, path->l[f.l].lock_seq); in bch2_trans_relock()
799 struct btree_path *path; in bch2_trans_relock_notrace() local
805 trans_for_each_path(trans, path, i) in bch2_trans_relock_notrace()
806 if (path->should_be_locked && in bch2_trans_relock_notrace()
807 !bch2_btree_path_relock_norestart(trans, path)) { in bch2_trans_relock_notrace()
815 struct btree_path *path; in bch2_trans_unlock_noassert() local
818 trans_for_each_path(trans, path, i) in bch2_trans_unlock_noassert()
819 __bch2_btree_path_unlock(trans, path); in bch2_trans_unlock_noassert()
824 struct btree_path *path; in bch2_trans_unlock() local
827 trans_for_each_path(trans, path, i) in bch2_trans_unlock()
828 __bch2_btree_path_unlock(trans, path); in bch2_trans_unlock()
839 struct btree_path *path; in bch2_trans_locked() local
842 trans_for_each_path(trans, path, i) in bch2_trans_locked()
843 if (path->nodes_locked) in bch2_trans_locked()
862 void bch2_btree_path_verify_locks(struct btree_path *path) in bch2_btree_path_verify_locks() argument
866 if (!path->nodes_locked) { in bch2_btree_path_verify_locks()
867 BUG_ON(path->uptodate == BTREE_ITER_UPTODATE && in bch2_btree_path_verify_locks()
868 btree_path_node(path, path->level)); in bch2_btree_path_verify_locks()
873 int want = btree_lock_want(path, l); in bch2_btree_path_verify_locks()
874 int have = btree_node_locked_type(path, l); in bch2_btree_path_verify_locks()
876 BUG_ON(!is_btree_node(path, l) && have != BTREE_NODE_UNLOCKED); in bch2_btree_path_verify_locks()
878 BUG_ON(is_btree_node(path, l) && in bch2_btree_path_verify_locks()
887 struct btree_path *path; in bch2_trans_verify_locks() local
890 trans_for_each_path(trans, path, i) in bch2_trans_verify_locks()
891 bch2_btree_path_verify_locks(path); in bch2_trans_verify_locks()