Lines Matching +full:top +full:- +full:level

1 // SPDX-License-Identifier: GPL-2.0
12 __six_lock_init(&b->lock, "b->c.lock", &bch2_btree_node_lock_key, flags); in bch2_btree_lock_init()
13 lockdep_set_novalidate_class(&b->lock); in bch2_btree_lock_init()
20 //Re-enable when lock_class_is_held() is merged: in bch2_assert_btree_nodes_not_locked()
31 unsigned level) in bch2_btree_node_lock_counts() argument
43 if (path != skip && &path->l[level].b->c == b) { in bch2_btree_node_lock_counts()
44 int t = btree_node_locked_type(path, level); in bch2_btree_node_lock_counts()
73 u8 level; member
86 prt_printf(out, "Found lock cycle (%u entries):", g->nr); in print_cycle()
89 for (i = g->g; i < g->g + g->nr; i++) { in print_cycle()
90 struct task_struct *task = READ_ONCE(i->trans->locking_wait.task); in print_cycle()
94 bch2_btree_trans_to_text(out, i->trans); in print_cycle()
95 bch2_prt_task_backtrace(out, task, i == g->g ? 5 : 1, GFP_NOWAIT); in print_cycle()
103 for (i = g->g; i != g->g + g->nr; i++) { in print_chain()
104 struct task_struct *task = i->trans->locking_wait.task; in print_chain()
105 if (i != g->g) in print_chain()
106 prt_str(out, "<- "); in print_chain()
107 prt_printf(out, "%u ", task ?task->pid : 0); in print_chain()
114 closure_put(&g->g[--g->nr].trans->ref); in lock_graph_up()
119 while (g->nr) in lock_graph_pop_all()
125 g->g[g->nr++] = (struct trans_waiting_for_lock) { in __lock_graph_down()
127 .node_want = trans->locking, in __lock_graph_down()
128 .lock_want = trans->locking_wait.lock_want, in __lock_graph_down()
134 closure_get(&trans->ref); in lock_graph_down()
142 for (i = g->g + 1; i < g->g + g->nr; i++) in lock_graph_remove_non_waiters()
143 if (i->trans->locking != i->node_want || in lock_graph_remove_non_waiters()
144 i->trans->locking_wait.start_time != i[-1].lock_start_time) { in lock_graph_remove_non_waiters()
145 while (g->g + g->nr > i) in lock_graph_remove_non_waiters()
155 struct bch_fs *c = trans->c; in trace_would_deadlock()
172 if (i == g->g) { in abort_lock()
173 trace_would_deadlock(g, i->trans); in abort_lock()
174 return btree_trans_restart(i->trans, BCH_ERR_transaction_restart_would_deadlock); in abort_lock()
176 i->trans->lock_must_abort = true; in abort_lock()
177 wake_up_process(i->trans->locking_wait.task); in abort_lock()
184 if (trans->lock_may_not_fail) in btree_trans_abort_preference()
186 if (trans->locking_wait.lock_want == SIX_LOCK_write) in btree_trans_abort_preference()
188 if (!trans->in_traverse_all) in btree_trans_abort_preference()
205 ret = -1; in break_cycle()
209 for (i = g->g; i < g->g + g->nr; i++) { in break_cycle()
210 pref = btree_trans_abort_preference(i->trans); in break_cycle()
220 prt_printf(&buf, bch2_fmt(g->g->trans->c, "cycle of nofail locks")); in break_cycle()
222 for (i = g->g; i < g->g + g->nr; i++) { in break_cycle()
223 struct btree_trans *trans = i->trans; in break_cycle()
230 bch2_prt_task_backtrace(&buf, trans->locking_wait.task, 2, GFP_NOWAIT); in break_cycle()
243 while (g->nr) in break_cycle()
251 struct btree_trans *orig_trans = g->g->trans; in lock_graph_descend()
254 for (i = g->g; i < g->g + g->nr; i++) in lock_graph_descend()
255 if (i->trans == trans) { in lock_graph_descend()
256 closure_put(&trans->ref); in lock_graph_descend()
260 if (g->nr == ARRAY_SIZE(g->g)) { in lock_graph_descend()
261 closure_put(&trans->ref); in lock_graph_descend()
263 if (orig_trans->lock_may_not_fail) in lock_graph_descend()
266 while (g->nr) in lock_graph_descend()
272 trace_and_count(trans->c, trans_restart_would_deadlock_recursion_limit, trans, _RET_IP_); in lock_graph_descend()
288 struct trans_waiting_for_lock *top; in bch2_check_for_deadlock() local
295 if (trans->lock_must_abort) { in bch2_check_for_deadlock()
297 return -1; in bch2_check_for_deadlock()
305 /* trans->paths is rcu protected vs. freeing */ in bch2_check_for_deadlock()
308 cycle->atomic++; in bch2_check_for_deadlock()
313 top = &g.g[g.nr - 1]; in bch2_check_for_deadlock()
315 struct btree_path *paths = rcu_dereference(top->trans->paths); in bch2_check_for_deadlock()
322 path_idx, top->path_idx) { in bch2_check_for_deadlock()
324 if (!path->nodes_locked) in bch2_check_for_deadlock()
327 if (path_idx != top->path_idx) { in bch2_check_for_deadlock()
328 top->path_idx = path_idx; in bch2_check_for_deadlock()
329 top->level = 0; in bch2_check_for_deadlock()
330 top->lock_start_time = 0; in bch2_check_for_deadlock()
334 top->level < BTREE_MAX_DEPTH; in bch2_check_for_deadlock()
335 top->level++, top->lock_start_time = 0) { in bch2_check_for_deadlock()
336 int lock_held = btree_node_locked_type(path, top->level); in bch2_check_for_deadlock()
341 b = &READ_ONCE(path->l[top->level].b)->c; in bch2_check_for_deadlock()
347 * structures - which means it can't be blocked in bch2_check_for_deadlock()
366 if (list_empty_careful(&b->lock.wait_list)) in bch2_check_for_deadlock()
369 raw_spin_lock(&b->lock.wait_lock); in bch2_check_for_deadlock()
370 list_for_each_entry(trans, &b->lock.wait_list, locking_wait.list) { in bch2_check_for_deadlock()
371 BUG_ON(b != trans->locking); in bch2_check_for_deadlock()
373 if (top->lock_start_time && in bch2_check_for_deadlock()
374 time_after_eq64(top->lock_start_time, trans->locking_wait.start_time)) in bch2_check_for_deadlock()
377 top->lock_start_time = trans->locking_wait.start_time; in bch2_check_for_deadlock()
380 if (trans == top->trans || in bch2_check_for_deadlock()
381 !lock_type_conflicts(lock_held, trans->locking_wait.lock_want)) in bch2_check_for_deadlock()
384 closure_get(&trans->ref); in bch2_check_for_deadlock()
385 raw_spin_unlock(&b->lock.wait_lock); in bch2_check_for_deadlock()
393 raw_spin_unlock(&b->lock.wait_lock); in bch2_check_for_deadlock()
403 --cycle->atomic; in bch2_check_for_deadlock()
419 int readers = bch2_btree_node_lock_counts(trans, NULL, b, b->level).n[SIX_LOCK_read]; in __bch2_btree_node_lock_write()
423 * Must drop our read locks before calling six_lock_write() - in __bch2_btree_node_lock_write()
428 six_lock_readers_add(&b->lock, -readers); in __bch2_btree_node_lock_write()
431 six_lock_readers_add(&b->lock, readers); in __bch2_btree_node_lock_write()
434 mark_btree_node_locked_noreset(path, b->level, BTREE_NODE_INTENT_LOCKED); in __bch2_btree_node_lock_write()
453 * hack - but by dropping read locks first, this should never fail, and in bch2_btree_node_lock_write_nofail()
459 if (!linked->nodes_locked) in bch2_btree_node_lock_write_nofail()
480 unsigned l = path->level; in btree_path_get_locks()
481 int fail_idx = -1; in btree_path_get_locks()
493 f->l = l; in btree_path_get_locks()
494 f->b = path->l[l].b; in btree_path_get_locks()
499 } while (l < path->locks_want); in btree_path_get_locks()
511 path->l[fail_idx].b = upgrade in btree_path_get_locks()
512 ? ERR_PTR(-BCH_ERR_no_btree_node_upgrade) in btree_path_get_locks()
513 : ERR_PTR(-BCH_ERR_no_btree_node_relock); in btree_path_get_locks()
514 --fail_idx; in btree_path_get_locks()
518 if (path->uptodate == BTREE_ITER_NEED_RELOCK) in btree_path_get_locks()
519 path->uptodate = BTREE_ITER_UPTODATE; in btree_path_get_locks()
523 return path->uptodate < BTREE_ITER_NEED_RELOCK; in btree_path_get_locks()
527 struct btree_path *path, unsigned level, in __bch2_btree_node_relock() argument
530 struct btree *b = btree_path_node(path, level); in __bch2_btree_node_relock()
531 int want = __btree_lock_want(path, level); in __bch2_btree_node_relock()
536 if (six_relock_type(&b->c.lock, want, path->l[level].lock_seq) || in __bch2_btree_node_relock()
537 (btree_node_lock_seq_matches(path, b, level) && in __bch2_btree_node_relock()
538 btree_node_lock_increment(trans, &b->c, level, want))) { in __bch2_btree_node_relock()
539 mark_btree_node_locked(trans, path, level, want); in __bch2_btree_node_relock()
543 if (trace && !trans->notrace_relock_fail) in __bch2_btree_node_relock()
544 trace_and_count(trans->c, btree_path_relock_fail, trans, _RET_IP_, path, level); in __bch2_btree_node_relock()
551 struct btree_path *path, unsigned level) in bch2_btree_node_upgrade() argument
553 struct btree *b = path->l[level].b; in bch2_btree_node_upgrade()
554 struct six_lock_count count = bch2_btree_node_lock_counts(trans, path, &b->c, level); in bch2_btree_node_upgrade()
556 if (!is_btree_node(path, level)) in bch2_btree_node_upgrade()
559 switch (btree_lock_want(path, level)) { in bch2_btree_node_upgrade()
561 BUG_ON(btree_node_locked(path, level)); in bch2_btree_node_upgrade()
564 BUG_ON(btree_node_intent_locked(path, level)); in bch2_btree_node_upgrade()
565 return bch2_btree_node_relock(trans, path, level); in bch2_btree_node_upgrade()
572 if (btree_node_intent_locked(path, level)) in bch2_btree_node_upgrade()
578 if (btree_node_locked(path, level)) { in bch2_btree_node_upgrade()
581 six_lock_readers_add(&b->c.lock, -count.n[SIX_LOCK_read]); in bch2_btree_node_upgrade()
582 ret = six_lock_tryupgrade(&b->c.lock); in bch2_btree_node_upgrade()
583 six_lock_readers_add(&b->c.lock, count.n[SIX_LOCK_read]); in bch2_btree_node_upgrade()
588 if (six_relock_type(&b->c.lock, SIX_LOCK_intent, path->l[level].lock_seq)) in bch2_btree_node_upgrade()
596 if (btree_node_lock_seq_matches(path, b, level) && in bch2_btree_node_upgrade()
597 btree_node_lock_increment(trans, &b->c, level, BTREE_NODE_INTENT_LOCKED)) { in bch2_btree_node_upgrade()
598 btree_node_unlock(trans, path, level); in bch2_btree_node_upgrade()
602 trace_and_count(trans->c, btree_path_upgrade_fail, trans, _RET_IP_, path, level); in bch2_btree_node_upgrade()
605 mark_btree_node_locked_noreset(path, level, BTREE_NODE_INTENT_LOCKED); in bch2_btree_node_upgrade()
612 * Only for btree_cache.c - only relocks intent locks
619 for (l = path->level; in bch2_btree_path_relock_intent()
620 l < path->locks_want && btree_path_node(path, l); in bch2_btree_path_relock_intent()
625 trace_and_count(trans->c, trans_restart_relock_path_intent, trans, _RET_IP_, path); in bch2_btree_path_relock_intent()
645 trace_and_count(trans->c, trans_restart_relock_path, trans, trace_ip, path); in __bch2_btree_path_relock()
657 EBUG_ON(path->locks_want >= new_locks_want); in bch2_btree_path_upgrade_noupgrade_sibs()
659 path->locks_want = new_locks_want; in bch2_btree_path_upgrade_noupgrade_sibs()
673 * XXX: this is ugly - we'd prefer to not be mucking with other in __bch2_btree_path_upgrade()
676 * On failure to upgrade the iterator, setting iter->locks_want and in __bch2_btree_path_upgrade()
684 * an iterator is a copy - for now, we'll just upgrade any other in __bch2_btree_path_upgrade()
688 * before interior nodes - now that's handled by in __bch2_btree_path_upgrade()
691 if (!path->cached && !trans->in_traverse_all) { in __bch2_btree_path_upgrade()
697 linked->cached == path->cached && in __bch2_btree_path_upgrade()
698 linked->btree_id == path->btree_id && in __bch2_btree_path_upgrade()
699 linked->locks_want < new_locks_want) { in __bch2_btree_path_upgrade()
700 linked->locks_want = new_locks_want; in __bch2_btree_path_upgrade()
712 unsigned l, old_locks_want = path->locks_want; in __bch2_btree_path_downgrade()
714 if (trans->restarted) in __bch2_btree_path_downgrade()
717 EBUG_ON(path->locks_want < new_locks_want); in __bch2_btree_path_downgrade()
719 path->locks_want = new_locks_want; in __bch2_btree_path_downgrade()
721 while (path->nodes_locked && in __bch2_btree_path_downgrade()
722 (l = btree_path_highest_level_locked(path)) >= path->locks_want) { in __bch2_btree_path_downgrade()
723 if (l > path->level) { in __bch2_btree_path_downgrade()
727 six_lock_downgrade(&path->l[l].b->c.lock); in __bch2_btree_path_downgrade()
746 if (trans->restarted) in bch2_trans_downgrade()
758 if (unlikely(trans->restarted)) in bch2_trans_relock()
759 return -((int) trans->restarted); in bch2_trans_relock()
764 if (path->should_be_locked && in bch2_trans_relock()
769 bch2_bpos_to_text(&buf, path->pos); in bch2_trans_relock()
771 f.l, path->l[f.l].lock_seq); in bch2_trans_relock()
775 prt_printf(&buf, "%u", f.b->c.lock.seq); in bch2_trans_relock()
778 bch2_btree_node_lock_counts(trans, NULL, &f.b->c, f.l); in bch2_trans_relock()
781 c = six_lock_counts(&f.b->c.lock); in bch2_trans_relock()
789 count_event(trans->c, trans_restart_relock); in bch2_trans_relock()
802 if (unlikely(trans->restarted)) in bch2_trans_relock_notrace()
803 return -((int) trans->restarted); in bch2_trans_relock_notrace()
806 if (path->should_be_locked && in bch2_trans_relock_notrace()
843 if (path->nodes_locked) in bch2_trans_locked()
866 if (!path->nodes_locked) { in bch2_btree_path_verify_locks()
867 BUG_ON(path->uptodate == BTREE_ITER_UPTODATE && in bch2_btree_path_verify_locks()
868 btree_path_node(path, path->level)); in bch2_btree_path_verify_locks()