1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _BCACHEFS_BTREE_LOCKING_H 3 #define _BCACHEFS_BTREE_LOCKING_H 4 5 /* 6 * Only for internal btree use: 7 * 8 * The btree iterator tracks what locks it wants to take, and what locks it 9 * currently has - here we have wrappers for locking/unlocking btree nodes and 10 * updating the iterator state 11 */ 12 13 #include "btree_iter.h" 14 #include "six.h" 15 16 void bch2_btree_lock_init(struct btree_bkey_cached_common *, enum six_lock_init_flags, gfp_t gfp); 17 18 void bch2_trans_unlock_noassert(struct btree_trans *); 19 void bch2_trans_unlock_write(struct btree_trans *); 20 21 static inline bool is_btree_node(struct btree_path *path, unsigned l) 22 { 23 return l < BTREE_MAX_DEPTH && !IS_ERR_OR_NULL(path->l[l].b); 24 } 25 26 static inline struct btree_transaction_stats *btree_trans_stats(struct btree_trans *trans) 27 { 28 return trans->fn_idx < ARRAY_SIZE(trans->c->btree_transaction_stats) 29 ? &trans->c->btree_transaction_stats[trans->fn_idx] 30 : NULL; 31 } 32 33 /* matches six lock types */ 34 enum btree_node_locked_type { 35 BTREE_NODE_UNLOCKED = -1, 36 BTREE_NODE_READ_LOCKED = SIX_LOCK_read, 37 BTREE_NODE_INTENT_LOCKED = SIX_LOCK_intent, 38 BTREE_NODE_WRITE_LOCKED = SIX_LOCK_write, 39 }; 40 41 static inline int btree_node_locked_type(struct btree_path *path, 42 unsigned level) 43 { 44 return BTREE_NODE_UNLOCKED + ((path->nodes_locked >> (level << 1)) & 3); 45 } 46 47 static inline bool btree_node_write_locked(struct btree_path *path, unsigned l) 48 { 49 return btree_node_locked_type(path, l) == BTREE_NODE_WRITE_LOCKED; 50 } 51 52 static inline bool btree_node_intent_locked(struct btree_path *path, unsigned l) 53 { 54 return btree_node_locked_type(path, l) == BTREE_NODE_INTENT_LOCKED; 55 } 56 57 static inline bool btree_node_read_locked(struct btree_path *path, unsigned l) 58 { 59 return btree_node_locked_type(path, l) == BTREE_NODE_READ_LOCKED; 60 } 61 62 static inline bool btree_node_locked(struct btree_path *path, unsigned level) 63 { 64 return btree_node_locked_type(path, level) != BTREE_NODE_UNLOCKED; 65 } 66 67 static inline void mark_btree_node_locked_noreset(struct btree_path *path, 68 unsigned level, 69 enum btree_node_locked_type type) 70 { 71 /* relying on this to avoid a branch */ 72 BUILD_BUG_ON(SIX_LOCK_read != 0); 73 BUILD_BUG_ON(SIX_LOCK_intent != 1); 74 75 path->nodes_locked &= ~(3U << (level << 1)); 76 path->nodes_locked |= (type + 1) << (level << 1); 77 } 78 79 static inline void mark_btree_node_locked(struct btree_trans *trans, 80 struct btree_path *path, 81 unsigned level, 82 enum btree_node_locked_type type) 83 { 84 mark_btree_node_locked_noreset(path, level, (enum btree_node_locked_type) type); 85 #ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS 86 path->l[level].lock_taken_time = local_clock(); 87 #endif 88 } 89 90 static inline enum six_lock_type __btree_lock_want(struct btree_path *path, int level) 91 { 92 return level < path->locks_want 93 ? SIX_LOCK_intent 94 : SIX_LOCK_read; 95 } 96 97 static inline enum btree_node_locked_type 98 btree_lock_want(struct btree_path *path, int level) 99 { 100 if (level < path->level) 101 return BTREE_NODE_UNLOCKED; 102 if (level < path->locks_want) 103 return BTREE_NODE_INTENT_LOCKED; 104 if (level == path->level) 105 return BTREE_NODE_READ_LOCKED; 106 return BTREE_NODE_UNLOCKED; 107 } 108 109 static void btree_trans_lock_hold_time_update(struct btree_trans *trans, 110 struct btree_path *path, unsigned level) 111 { 112 #ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS 113 __bch2_time_stats_update(&btree_trans_stats(trans)->lock_hold_times, 114 path->l[level].lock_taken_time, 115 local_clock()); 116 #endif 117 } 118 119 /* unlock: */ 120 121 void bch2_btree_node_unlock_write(struct btree_trans *, 122 struct btree_path *, struct btree *); 123 124 static inline void btree_node_unlock(struct btree_trans *trans, 125 struct btree_path *path, unsigned level) 126 { 127 int lock_type = btree_node_locked_type(path, level); 128 129 EBUG_ON(level >= BTREE_MAX_DEPTH); 130 131 if (lock_type != BTREE_NODE_UNLOCKED) { 132 if (unlikely(lock_type == BTREE_NODE_WRITE_LOCKED)) { 133 bch2_btree_node_unlock_write(trans, path, path->l[level].b); 134 lock_type = BTREE_NODE_INTENT_LOCKED; 135 } 136 six_unlock_type(&path->l[level].b->c.lock, lock_type); 137 btree_trans_lock_hold_time_update(trans, path, level); 138 mark_btree_node_locked_noreset(path, level, BTREE_NODE_UNLOCKED); 139 } 140 } 141 142 static inline int btree_path_lowest_level_locked(struct btree_path *path) 143 { 144 return __ffs(path->nodes_locked) >> 1; 145 } 146 147 static inline int btree_path_highest_level_locked(struct btree_path *path) 148 { 149 return __fls(path->nodes_locked) >> 1; 150 } 151 152 static inline void __bch2_btree_path_unlock(struct btree_trans *trans, 153 struct btree_path *path) 154 { 155 btree_path_set_dirty(path, BTREE_ITER_NEED_RELOCK); 156 157 while (path->nodes_locked) 158 btree_node_unlock(trans, path, btree_path_lowest_level_locked(path)); 159 } 160 161 /* 162 * Updates the saved lock sequence number, so that bch2_btree_node_relock() will 163 * succeed: 164 */ 165 static inline void 166 __bch2_btree_node_unlock_write(struct btree_trans *trans, struct btree *b) 167 { 168 if (!b->c.lock.write_lock_recurse) { 169 struct btree_path *linked; 170 unsigned i; 171 172 trans_for_each_path_with_node(trans, b, linked, i) 173 linked->l[b->c.level].lock_seq++; 174 } 175 176 six_unlock_write(&b->c.lock); 177 } 178 179 static inline void 180 bch2_btree_node_unlock_write_inlined(struct btree_trans *trans, struct btree_path *path, 181 struct btree *b) 182 { 183 EBUG_ON(path->l[b->c.level].b != b); 184 EBUG_ON(path->l[b->c.level].lock_seq != six_lock_seq(&b->c.lock)); 185 EBUG_ON(btree_node_locked_type(path, b->c.level) != SIX_LOCK_write); 186 187 mark_btree_node_locked_noreset(path, b->c.level, BTREE_NODE_INTENT_LOCKED); 188 __bch2_btree_node_unlock_write(trans, b); 189 } 190 191 int bch2_six_check_for_deadlock(struct six_lock *lock, void *p); 192 193 /* lock: */ 194 195 static inline void trans_set_locked(struct btree_trans *trans, bool try) 196 { 197 if (!trans->locked) { 198 lock_acquire_exclusive(&trans->dep_map, 0, try, NULL, _THIS_IP_); 199 trans->locked = true; 200 trans->last_unlock_ip = 0; 201 202 trans->pf_memalloc_nofs = (current->flags & PF_MEMALLOC_NOFS) != 0; 203 current->flags |= PF_MEMALLOC_NOFS; 204 } 205 } 206 207 static inline void trans_set_unlocked(struct btree_trans *trans) 208 { 209 if (trans->locked) { 210 lock_release(&trans->dep_map, _THIS_IP_); 211 trans->locked = false; 212 trans->last_unlock_ip = _RET_IP_; 213 214 if (!trans->pf_memalloc_nofs) 215 current->flags &= ~PF_MEMALLOC_NOFS; 216 } 217 } 218 219 static inline int __btree_node_lock_nopath(struct btree_trans *trans, 220 struct btree_bkey_cached_common *b, 221 enum six_lock_type type, 222 bool lock_may_not_fail, 223 unsigned long ip) 224 { 225 trans->lock_may_not_fail = lock_may_not_fail; 226 trans->lock_must_abort = false; 227 trans->locking = b; 228 229 int ret = six_lock_ip_waiter(&b->lock, type, &trans->locking_wait, 230 bch2_six_check_for_deadlock, trans, ip); 231 WRITE_ONCE(trans->locking, NULL); 232 WRITE_ONCE(trans->locking_wait.start_time, 0); 233 234 if (!ret) 235 trace_btree_path_lock(trans, _THIS_IP_, b); 236 return ret; 237 } 238 239 static inline int __must_check 240 btree_node_lock_nopath(struct btree_trans *trans, 241 struct btree_bkey_cached_common *b, 242 enum six_lock_type type, 243 unsigned long ip) 244 { 245 return __btree_node_lock_nopath(trans, b, type, false, ip); 246 } 247 248 static inline void btree_node_lock_nopath_nofail(struct btree_trans *trans, 249 struct btree_bkey_cached_common *b, 250 enum six_lock_type type) 251 { 252 int ret = __btree_node_lock_nopath(trans, b, type, true, _THIS_IP_); 253 254 BUG_ON(ret); 255 } 256 257 /* 258 * Lock a btree node if we already have it locked on one of our linked 259 * iterators: 260 */ 261 static inline bool btree_node_lock_increment(struct btree_trans *trans, 262 struct btree_bkey_cached_common *b, 263 unsigned level, 264 enum btree_node_locked_type want) 265 { 266 struct btree_path *path; 267 unsigned i; 268 269 trans_for_each_path(trans, path, i) 270 if (&path->l[level].b->c == b && 271 btree_node_locked_type(path, level) >= want) { 272 six_lock_increment(&b->lock, (enum six_lock_type) want); 273 return true; 274 } 275 276 return false; 277 } 278 279 static inline int btree_node_lock(struct btree_trans *trans, 280 struct btree_path *path, 281 struct btree_bkey_cached_common *b, 282 unsigned level, 283 enum six_lock_type type, 284 unsigned long ip) 285 { 286 int ret = 0; 287 288 EBUG_ON(level >= BTREE_MAX_DEPTH); 289 bch2_trans_verify_not_unlocked_or_in_restart(trans); 290 291 if (likely(six_trylock_type(&b->lock, type)) || 292 btree_node_lock_increment(trans, b, level, (enum btree_node_locked_type) type) || 293 !(ret = btree_node_lock_nopath(trans, b, type, btree_path_ip_allocated(path)))) { 294 #ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS 295 path->l[b->level].lock_taken_time = local_clock(); 296 #endif 297 } 298 299 return ret; 300 } 301 302 int __bch2_btree_node_lock_write(struct btree_trans *, struct btree_path *, 303 struct btree_bkey_cached_common *b, bool); 304 305 static inline int __btree_node_lock_write(struct btree_trans *trans, 306 struct btree_path *path, 307 struct btree_bkey_cached_common *b, 308 bool lock_may_not_fail) 309 { 310 EBUG_ON(&path->l[b->level].b->c != b); 311 EBUG_ON(path->l[b->level].lock_seq != six_lock_seq(&b->lock)); 312 EBUG_ON(!btree_node_intent_locked(path, b->level)); 313 314 /* 315 * six locks are unfair, and read locks block while a thread wants a 316 * write lock: thus, we need to tell the cycle detector we have a write 317 * lock _before_ taking the lock: 318 */ 319 mark_btree_node_locked_noreset(path, b->level, BTREE_NODE_WRITE_LOCKED); 320 321 return likely(six_trylock_write(&b->lock)) 322 ? 0 323 : __bch2_btree_node_lock_write(trans, path, b, lock_may_not_fail); 324 } 325 326 static inline int __must_check 327 bch2_btree_node_lock_write(struct btree_trans *trans, 328 struct btree_path *path, 329 struct btree_bkey_cached_common *b) 330 { 331 return __btree_node_lock_write(trans, path, b, false); 332 } 333 334 void bch2_btree_node_lock_write_nofail(struct btree_trans *, 335 struct btree_path *, 336 struct btree_bkey_cached_common *); 337 338 /* relock: */ 339 340 bool bch2_btree_path_relock_norestart(struct btree_trans *, struct btree_path *); 341 int __bch2_btree_path_relock(struct btree_trans *, 342 struct btree_path *, unsigned long); 343 344 static inline int bch2_btree_path_relock(struct btree_trans *trans, 345 struct btree_path *path, unsigned long trace_ip) 346 { 347 return btree_node_locked(path, path->level) 348 ? 0 349 : __bch2_btree_path_relock(trans, path, trace_ip); 350 } 351 352 bool __bch2_btree_node_relock(struct btree_trans *, struct btree_path *, unsigned, bool trace); 353 354 static inline bool bch2_btree_node_relock(struct btree_trans *trans, 355 struct btree_path *path, unsigned level) 356 { 357 EBUG_ON(btree_node_locked(path, level) && 358 !btree_node_write_locked(path, level) && 359 btree_node_locked_type(path, level) != __btree_lock_want(path, level)); 360 361 return likely(btree_node_locked(path, level)) || 362 (!IS_ERR_OR_NULL(path->l[level].b) && 363 __bch2_btree_node_relock(trans, path, level, true)); 364 } 365 366 static inline bool bch2_btree_node_relock_notrace(struct btree_trans *trans, 367 struct btree_path *path, unsigned level) 368 { 369 EBUG_ON(btree_node_locked(path, level) && 370 !btree_node_write_locked(path, level) && 371 btree_node_locked_type(path, level) != __btree_lock_want(path, level)); 372 373 return likely(btree_node_locked(path, level)) || 374 (!IS_ERR_OR_NULL(path->l[level].b) && 375 __bch2_btree_node_relock(trans, path, level, false)); 376 } 377 378 /* upgrade */ 379 380 bool bch2_btree_path_upgrade_noupgrade_sibs(struct btree_trans *, 381 struct btree_path *, unsigned, 382 struct get_locks_fail *); 383 384 bool __bch2_btree_path_upgrade(struct btree_trans *, 385 struct btree_path *, unsigned, 386 struct get_locks_fail *); 387 388 static inline int bch2_btree_path_upgrade(struct btree_trans *trans, 389 struct btree_path *path, 390 unsigned new_locks_want) 391 { 392 struct get_locks_fail f = {}; 393 unsigned old_locks_want = path->locks_want; 394 395 new_locks_want = min(new_locks_want, BTREE_MAX_DEPTH); 396 397 if (path->locks_want < new_locks_want 398 ? __bch2_btree_path_upgrade(trans, path, new_locks_want, &f) 399 : path->nodes_locked) 400 return 0; 401 402 trace_and_count(trans->c, trans_restart_upgrade, trans, _THIS_IP_, path, 403 old_locks_want, new_locks_want, &f); 404 return btree_trans_restart(trans, BCH_ERR_transaction_restart_upgrade); 405 } 406 407 /* misc: */ 408 409 static inline void btree_path_set_should_be_locked(struct btree_trans *trans, struct btree_path *path) 410 { 411 EBUG_ON(!btree_node_locked(path, path->level)); 412 EBUG_ON(path->uptodate); 413 414 path->should_be_locked = true; 415 trace_btree_path_should_be_locked(trans, path); 416 } 417 418 static inline void __btree_path_set_level_up(struct btree_trans *trans, 419 struct btree_path *path, 420 unsigned l) 421 { 422 btree_node_unlock(trans, path, l); 423 path->l[l].b = ERR_PTR(-BCH_ERR_no_btree_node_up); 424 } 425 426 static inline void btree_path_set_level_up(struct btree_trans *trans, 427 struct btree_path *path) 428 { 429 __btree_path_set_level_up(trans, path, path->level++); 430 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE); 431 } 432 433 /* debug */ 434 435 struct six_lock_count bch2_btree_node_lock_counts(struct btree_trans *, 436 struct btree_path *, 437 struct btree_bkey_cached_common *b, 438 unsigned); 439 440 int bch2_check_for_deadlock(struct btree_trans *, struct printbuf *); 441 442 #ifdef CONFIG_BCACHEFS_DEBUG 443 void bch2_btree_path_verify_locks(struct btree_path *); 444 void bch2_trans_verify_locks(struct btree_trans *); 445 #else 446 static inline void bch2_btree_path_verify_locks(struct btree_path *path) {} 447 static inline void bch2_trans_verify_locks(struct btree_trans *trans) {} 448 #endif 449 450 #endif /* _BCACHEFS_BTREE_LOCKING_H */ 451