Lines Matching +full:- +full:g +full:-
1 // SPDX-License-Identifier: GPL-2.0
13 __six_lock_init(&b->lock, "b->c.lock", &bch2_btree_node_lock_key, flags, gfp); in bch2_btree_lock_init()
14 lockdep_set_notrack_class(&b->lock); in bch2_btree_lock_init()
34 if (path != skip && &path->l[level].b->c == b) { in bch2_btree_node_lock_counts()
69 struct trans_waiting_for_lock g[8]; member
73 static noinline void print_cycle(struct printbuf *out, struct lock_graph *g) in print_cycle() argument
77 prt_printf(out, "Found lock cycle (%u entries):\n", g->nr); in print_cycle()
79 for (i = g->g; i < g->g + g->nr; i++) { in print_cycle()
80 struct task_struct *task = READ_ONCE(i->trans->locking_wait.task); in print_cycle()
84 bch2_btree_trans_to_text(out, i->trans); in print_cycle()
85 bch2_prt_task_backtrace(out, task, i == g->g ? 5 : 1, GFP_NOWAIT); in print_cycle()
89 static noinline void print_chain(struct printbuf *out, struct lock_graph *g) in print_chain() argument
93 for (i = g->g; i != g->g + g->nr; i++) { in print_chain()
94 struct task_struct *task = READ_ONCE(i->trans->locking_wait.task); in print_chain()
95 if (i != g->g) in print_chain()
96 prt_str(out, "<- "); in print_chain()
97 prt_printf(out, "%u ", task ? task->pid : 0); in print_chain()
102 static void lock_graph_up(struct lock_graph *g) in lock_graph_up() argument
104 closure_put(&g->g[--g->nr].trans->ref); in lock_graph_up()
107 static noinline void lock_graph_pop_all(struct lock_graph *g) in lock_graph_pop_all() argument
109 while (g->nr) in lock_graph_pop_all()
110 lock_graph_up(g); in lock_graph_pop_all()
113 static noinline void lock_graph_pop_from(struct lock_graph *g, struct trans_waiting_for_lock *i) in lock_graph_pop_from() argument
115 while (g->g + g->nr > i) in lock_graph_pop_from()
116 lock_graph_up(g); in lock_graph_pop_from()
119 static void __lock_graph_down(struct lock_graph *g, struct btree_trans *trans) in __lock_graph_down() argument
121 g->g[g->nr++] = (struct trans_waiting_for_lock) { in __lock_graph_down()
123 .node_want = trans->locking, in __lock_graph_down()
124 .lock_want = trans->locking_wait.lock_want, in __lock_graph_down()
128 static void lock_graph_down(struct lock_graph *g, struct btree_trans *trans) in lock_graph_down() argument
130 closure_get(&trans->ref); in lock_graph_down()
131 __lock_graph_down(g, trans); in lock_graph_down()
134 static bool lock_graph_remove_non_waiters(struct lock_graph *g, in lock_graph_remove_non_waiters() argument
139 if (from->trans->locking != from->node_want) { in lock_graph_remove_non_waiters()
140 lock_graph_pop_from(g, from); in lock_graph_remove_non_waiters()
144 for (i = from + 1; i < g->g + g->nr; i++) in lock_graph_remove_non_waiters()
145 if (i->trans->locking != i->node_want || in lock_graph_remove_non_waiters()
146 i->trans->locking_wait.start_time != i[-1].lock_start_time) { in lock_graph_remove_non_waiters()
147 lock_graph_pop_from(g, i); in lock_graph_remove_non_waiters()
154 static void trace_would_deadlock(struct lock_graph *g, struct btree_trans *trans) in trace_would_deadlock() argument
156 struct bch_fs *c = trans->c; in trace_would_deadlock()
164 print_cycle(&buf, g); in trace_would_deadlock()
171 static int abort_lock(struct lock_graph *g, struct trans_waiting_for_lock *i) in abort_lock() argument
173 if (i == g->g) { in abort_lock()
174 trace_would_deadlock(g, i->trans); in abort_lock()
175 return btree_trans_restart_foreign_task(i->trans, in abort_lock()
179 i->trans->lock_must_abort = true; in abort_lock()
180 wake_up_process(i->trans->locking_wait.task); in abort_lock()
187 if (trans->lock_may_not_fail) in btree_trans_abort_preference()
189 if (trans->locking_wait.lock_want == SIX_LOCK_write) in btree_trans_abort_preference()
191 if (!trans->in_traverse_all) in btree_trans_abort_preference()
196 static noinline int break_cycle(struct lock_graph *g, struct printbuf *cycle, in break_cycle() argument
203 if (lock_graph_remove_non_waiters(g, from)) in break_cycle()
208 print_cycle(cycle, g); in break_cycle()
209 ret = -1; in break_cycle()
213 for (i = from; i < g->g + g->nr; i++) { in break_cycle()
214 pref = btree_trans_abort_preference(i->trans); in break_cycle()
225 prt_printf(&buf, bch2_fmt(g->g->trans->c, "cycle of nofail locks")); in break_cycle()
227 for (i = g->g; i < g->g + g->nr; i++) { in break_cycle()
228 struct btree_trans *trans = i->trans; in break_cycle()
234 bch2_prt_task_backtrace(&buf, trans->locking_wait.task, 2, GFP_NOWAIT); in break_cycle()
244 ret = abort_lock(g, abort); in break_cycle()
247 lock_graph_pop_all(g); in break_cycle()
249 lock_graph_pop_from(g, abort); in break_cycle()
253 static int lock_graph_descend(struct lock_graph *g, struct btree_trans *trans, in lock_graph_descend() argument
256 struct btree_trans *orig_trans = g->g->trans; in lock_graph_descend()
259 for (i = g->g; i < g->g + g->nr; i++) in lock_graph_descend()
260 if (i->trans == trans) { in lock_graph_descend()
261 closure_put(&trans->ref); in lock_graph_descend()
262 return break_cycle(g, cycle, i); in lock_graph_descend()
265 if (g->nr == ARRAY_SIZE(g->g)) { in lock_graph_descend()
266 closure_put(&trans->ref); in lock_graph_descend()
268 if (orig_trans->lock_may_not_fail) in lock_graph_descend()
271 lock_graph_pop_all(g); in lock_graph_descend()
276 trace_and_count(trans->c, trans_restart_would_deadlock_recursion_limit, trans, _RET_IP_); in lock_graph_descend()
280 __lock_graph_down(g, trans); in lock_graph_descend()
291 struct lock_graph g; in bch2_check_for_deadlock() local
297 g.nr = 0; in bch2_check_for_deadlock()
299 if (trans->lock_must_abort && !trans->lock_may_not_fail) { in bch2_check_for_deadlock()
301 return -1; in bch2_check_for_deadlock()
303 trace_would_deadlock(&g, trans); in bch2_check_for_deadlock()
307 lock_graph_down(&g, trans); in bch2_check_for_deadlock()
309 /* trans->paths is rcu protected vs. freeing */ in bch2_check_for_deadlock()
312 cycle->atomic++; in bch2_check_for_deadlock()
314 if (!g.nr) in bch2_check_for_deadlock()
317 top = &g.g[g.nr - 1]; in bch2_check_for_deadlock()
319 struct btree_path *paths = rcu_dereference(top->trans->paths); in bch2_check_for_deadlock()
326 path_idx, top->path_idx) { in bch2_check_for_deadlock()
328 if (!path->nodes_locked) in bch2_check_for_deadlock()
331 if (path_idx != top->path_idx) { in bch2_check_for_deadlock()
332 top->path_idx = path_idx; in bch2_check_for_deadlock()
333 top->level = 0; in bch2_check_for_deadlock()
334 top->lock_start_time = 0; in bch2_check_for_deadlock()
338 top->level < BTREE_MAX_DEPTH; in bch2_check_for_deadlock()
339 top->level++, top->lock_start_time = 0) { in bch2_check_for_deadlock()
340 int lock_held = btree_node_locked_type(path, top->level); in bch2_check_for_deadlock()
345 b = &READ_ONCE(path->l[top->level].b)->c; in bch2_check_for_deadlock()
351 * structures - which means it can't be blocked in bch2_check_for_deadlock()
354 if (!lock_graph_remove_non_waiters(&g, g.g)) { in bch2_check_for_deadlock()
364 lock_graph_pop_all(&g); in bch2_check_for_deadlock()
370 if (list_empty_careful(&b->lock.wait_list)) in bch2_check_for_deadlock()
373 raw_spin_lock(&b->lock.wait_lock); in bch2_check_for_deadlock()
374 list_for_each_entry(trans, &b->lock.wait_list, locking_wait.list) { in bch2_check_for_deadlock()
375 BUG_ON(b != trans->locking); in bch2_check_for_deadlock()
377 if (top->lock_start_time && in bch2_check_for_deadlock()
378 time_after_eq64(top->lock_start_time, trans->locking_wait.start_time)) in bch2_check_for_deadlock()
381 top->lock_start_time = trans->locking_wait.start_time; in bch2_check_for_deadlock()
384 if (trans == top->trans || in bch2_check_for_deadlock()
385 !lock_type_conflicts(lock_held, trans->locking_wait.lock_want)) in bch2_check_for_deadlock()
388 closure_get(&trans->ref); in bch2_check_for_deadlock()
389 raw_spin_unlock(&b->lock.wait_lock); in bch2_check_for_deadlock()
391 ret = lock_graph_descend(&g, trans, cycle); in bch2_check_for_deadlock()
397 raw_spin_unlock(&b->lock.wait_lock); in bch2_check_for_deadlock()
401 if (g.nr > 1 && cycle) in bch2_check_for_deadlock()
402 print_chain(cycle, &g); in bch2_check_for_deadlock()
403 lock_graph_up(&g); in bch2_check_for_deadlock()
407 --cycle->atomic; in bch2_check_for_deadlock()
423 int readers = bch2_btree_node_lock_counts(trans, NULL, b, b->level).n[SIX_LOCK_read]; in __bch2_btree_node_lock_write()
427 * Must drop our read locks before calling six_lock_write() - in __bch2_btree_node_lock_write()
432 six_lock_readers_add(&b->lock, -readers); in __bch2_btree_node_lock_write()
435 six_lock_readers_add(&b->lock, readers); in __bch2_btree_node_lock_write()
438 mark_btree_node_locked_noreset(path, b->level, BTREE_NODE_INTENT_LOCKED); in __bch2_btree_node_lock_write()
458 unsigned l = path->level; in btree_path_get_locks()
459 int fail_idx = -1; in btree_path_get_locks()
471 f->l = l; in btree_path_get_locks()
472 f->b = path->l[l].b; in btree_path_get_locks()
477 } while (l < path->locks_want); in btree_path_get_locks()
489 path->l[fail_idx].b = upgrade in btree_path_get_locks()
490 ? ERR_PTR(-BCH_ERR_no_btree_node_upgrade) in btree_path_get_locks()
491 : ERR_PTR(-BCH_ERR_no_btree_node_relock); in btree_path_get_locks()
492 --fail_idx; in btree_path_get_locks()
496 if (path->uptodate == BTREE_ITER_NEED_RELOCK) in btree_path_get_locks()
497 path->uptodate = BTREE_ITER_UPTODATE; in btree_path_get_locks()
499 return path->uptodate < BTREE_ITER_NEED_RELOCK; in btree_path_get_locks()
512 if (six_relock_type(&b->c.lock, want, path->l[level].lock_seq) || in __bch2_btree_node_relock()
514 btree_node_lock_increment(trans, &b->c, level, want))) { in __bch2_btree_node_relock()
519 if (trace && !trans->notrace_relock_fail) in __bch2_btree_node_relock()
520 trace_and_count(trans->c, btree_path_relock_fail, trans, _RET_IP_, path, level); in __bch2_btree_node_relock()
529 struct btree *b = path->l[level].b; in bch2_btree_node_upgrade()
554 ? six_lock_tryupgrade(&b->c.lock) in bch2_btree_node_upgrade()
555 : six_relock_type(&b->c.lock, SIX_LOCK_intent, path->l[level].lock_seq)) in bch2_btree_node_upgrade()
559 btree_node_lock_increment(trans, &b->c, level, BTREE_NODE_INTENT_LOCKED)) { in bch2_btree_node_upgrade()
564 trace_and_count(trans->c, btree_path_upgrade_fail, trans, _RET_IP_, path, level); in bch2_btree_node_upgrade()
574 * Only for btree_cache.c - only relocks intent locks
581 for (l = path->level; in bch2_btree_path_relock_intent()
582 l < path->locks_want && btree_path_node(path, l); in bch2_btree_path_relock_intent()
587 trace_and_count(trans->c, trans_restart_relock_path_intent, trans, _RET_IP_, path); in bch2_btree_path_relock_intent()
609 trace_and_count(trans->c, trans_restart_relock_path, trans, trace_ip, path); in __bch2_btree_path_relock()
621 EBUG_ON(path->locks_want >= new_locks_want); in bch2_btree_path_upgrade_noupgrade_sibs()
623 path->locks_want = new_locks_want; in bch2_btree_path_upgrade_noupgrade_sibs()
640 * XXX: this is ugly - we'd prefer to not be mucking with other in __bch2_btree_path_upgrade()
643 * On failure to upgrade the iterator, setting iter->locks_want and in __bch2_btree_path_upgrade()
651 * an iterator is a copy - for now, we'll just upgrade any other in __bch2_btree_path_upgrade()
655 * before interior nodes - now that's handled by in __bch2_btree_path_upgrade()
658 if (!path->cached && !trans->in_traverse_all) { in __bch2_btree_path_upgrade()
664 linked->cached == path->cached && in __bch2_btree_path_upgrade()
665 linked->btree_id == path->btree_id && in __bch2_btree_path_upgrade()
666 linked->locks_want < new_locks_want) { in __bch2_btree_path_upgrade()
667 linked->locks_want = new_locks_want; in __bch2_btree_path_upgrade()
680 unsigned l, old_locks_want = path->locks_want; in __bch2_btree_path_downgrade()
682 if (trans->restarted) in __bch2_btree_path_downgrade()
685 EBUG_ON(path->locks_want < new_locks_want); in __bch2_btree_path_downgrade()
687 path->locks_want = new_locks_want; in __bch2_btree_path_downgrade()
689 while (path->nodes_locked && in __bch2_btree_path_downgrade()
690 (l = btree_path_highest_level_locked(path)) >= path->locks_want) { in __bch2_btree_path_downgrade()
691 if (l > path->level) { in __bch2_btree_path_downgrade()
695 six_lock_downgrade(&path->l[l].b->c.lock); in __bch2_btree_path_downgrade()
714 if (trans->restarted) in bch2_trans_downgrade()
718 if (path->ref) in bch2_trans_downgrade()
740 bch2_bpos_to_text(&buf, path->pos); in bch2_trans_relock_fail()
741 prt_printf(&buf, " l=%u seq=%u node seq=", f->l, path->l[f->l].lock_seq); in bch2_trans_relock_fail()
742 if (IS_ERR_OR_NULL(f->b)) { in bch2_trans_relock_fail()
743 prt_str(&buf, bch2_err_str(PTR_ERR(f->b))); in bch2_trans_relock_fail()
745 prt_printf(&buf, "%u", f->b->c.lock.seq); in bch2_trans_relock_fail()
748 bch2_btree_node_lock_counts(trans, NULL, &f->b->c, f->l); in bch2_trans_relock_fail()
751 c = six_lock_counts(&f->b->c.lock); in bch2_trans_relock_fail()
759 count_event(trans->c, trans_restart_relock); in bch2_trans_relock_fail()
770 if (unlikely(trans->restarted)) in __bch2_trans_relock()
771 return -((int) trans->restarted); in __bch2_trans_relock()
772 if (unlikely(trans->locked)) in __bch2_trans_relock()
781 if (path->should_be_locked && in __bch2_trans_relock()
830 bch2_btree_node_unlock_write(trans, path, path->l[l].b); in bch2_trans_unlock_write()
851 * there is no node at path->level, which generally means we were in bch2_btree_path_verify_locks()
854 BUG_ON(path->uptodate == BTREE_ITER_UPTODATE && in bch2_btree_path_verify_locks()
855 btree_path_node(path, path->level) && in bch2_btree_path_verify_locks()
856 !path->nodes_locked); in bch2_btree_path_verify_locks()
858 if (!path->nodes_locked) in bch2_btree_path_verify_locks()
873 path->l[l].lock_seq != six_lock_seq(&path->l[l].b->c.lock)); in bch2_btree_path_verify_locks()
883 if (path->nodes_locked) in bch2_trans_locked()
890 if (!trans->locked) { in bch2_trans_verify_locks()