1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "bkey_methods.h"
5 #include "bkey_buf.h"
6 #include "btree_cache.h"
7 #include "btree_iter.h"
8 #include "btree_journal_iter.h"
9 #include "btree_key_cache.h"
10 #include "btree_locking.h"
11 #include "btree_update.h"
12 #include "debug.h"
13 #include "error.h"
14 #include "extents.h"
15 #include "journal.h"
16 #include "journal_io.h"
17 #include "replicas.h"
18 #include "snapshot.h"
19 #include "trace.h"
20
21 #include <linux/random.h>
22 #include <linux/prefetch.h>
23
24 static inline void btree_path_list_remove(struct btree_trans *, struct btree_path *);
25 static inline void btree_path_list_add(struct btree_trans *,
26 btree_path_idx_t, btree_path_idx_t);
27
btree_iter_ip_allocated(struct btree_iter * iter)28 static inline unsigned long btree_iter_ip_allocated(struct btree_iter *iter)
29 {
30 #ifdef TRACK_PATH_ALLOCATED
31 return iter->ip_allocated;
32 #else
33 return 0;
34 #endif
35 }
36
37 static btree_path_idx_t btree_path_alloc(struct btree_trans *, btree_path_idx_t);
38 static void bch2_trans_srcu_lock(struct btree_trans *);
39
__btree_path_cmp(const struct btree_path * l,enum btree_id r_btree_id,bool r_cached,struct bpos r_pos,unsigned r_level)40 static inline int __btree_path_cmp(const struct btree_path *l,
41 enum btree_id r_btree_id,
42 bool r_cached,
43 struct bpos r_pos,
44 unsigned r_level)
45 {
46 /*
47 * Must match lock ordering as defined by __bch2_btree_node_lock:
48 */
49 return cmp_int(l->btree_id, r_btree_id) ?:
50 cmp_int((int) l->cached, (int) r_cached) ?:
51 bpos_cmp(l->pos, r_pos) ?:
52 -cmp_int(l->level, r_level);
53 }
54
btree_path_cmp(const struct btree_path * l,const struct btree_path * r)55 static inline int btree_path_cmp(const struct btree_path *l,
56 const struct btree_path *r)
57 {
58 return __btree_path_cmp(l, r->btree_id, r->cached, r->pos, r->level);
59 }
60
bkey_successor(struct btree_iter * iter,struct bpos p)61 static inline struct bpos bkey_successor(struct btree_iter *iter, struct bpos p)
62 {
63 /* Are we iterating over keys in all snapshots? */
64 if (iter->flags & BTREE_ITER_all_snapshots) {
65 p = bpos_successor(p);
66 } else {
67 p = bpos_nosnap_successor(p);
68 p.snapshot = iter->snapshot;
69 }
70
71 return p;
72 }
73
bkey_predecessor(struct btree_iter * iter,struct bpos p)74 static inline struct bpos bkey_predecessor(struct btree_iter *iter, struct bpos p)
75 {
76 /* Are we iterating over keys in all snapshots? */
77 if (iter->flags & BTREE_ITER_all_snapshots) {
78 p = bpos_predecessor(p);
79 } else {
80 p = bpos_nosnap_predecessor(p);
81 p.snapshot = iter->snapshot;
82 }
83
84 return p;
85 }
86
btree_iter_search_key(struct btree_iter * iter)87 static inline struct bpos btree_iter_search_key(struct btree_iter *iter)
88 {
89 struct bpos pos = iter->pos;
90
91 if ((iter->flags & BTREE_ITER_is_extents) &&
92 !bkey_eq(pos, POS_MAX))
93 pos = bkey_successor(iter, pos);
94 return pos;
95 }
96
btree_path_pos_before_node(struct btree_path * path,struct btree * b)97 static inline bool btree_path_pos_before_node(struct btree_path *path,
98 struct btree *b)
99 {
100 return bpos_lt(path->pos, b->data->min_key);
101 }
102
btree_path_pos_after_node(struct btree_path * path,struct btree * b)103 static inline bool btree_path_pos_after_node(struct btree_path *path,
104 struct btree *b)
105 {
106 return bpos_gt(path->pos, b->key.k.p);
107 }
108
btree_path_pos_in_node(struct btree_path * path,struct btree * b)109 static inline bool btree_path_pos_in_node(struct btree_path *path,
110 struct btree *b)
111 {
112 return path->btree_id == b->c.btree_id &&
113 !btree_path_pos_before_node(path, b) &&
114 !btree_path_pos_after_node(path, b);
115 }
116
117 /* Btree iterator: */
118
119 #ifdef CONFIG_BCACHEFS_DEBUG
120
bch2_btree_path_verify_cached(struct btree_trans * trans,struct btree_path * path)121 static void bch2_btree_path_verify_cached(struct btree_trans *trans,
122 struct btree_path *path)
123 {
124 struct bkey_cached *ck;
125 bool locked = btree_node_locked(path, 0);
126
127 if (!bch2_btree_node_relock(trans, path, 0))
128 return;
129
130 ck = (void *) path->l[0].b;
131 BUG_ON(ck->key.btree_id != path->btree_id ||
132 !bkey_eq(ck->key.pos, path->pos));
133
134 if (!locked)
135 btree_node_unlock(trans, path, 0);
136 }
137
bch2_btree_path_verify_level(struct btree_trans * trans,struct btree_path * path,unsigned level)138 static void bch2_btree_path_verify_level(struct btree_trans *trans,
139 struct btree_path *path, unsigned level)
140 {
141 struct btree_path_level *l;
142 struct btree_node_iter tmp;
143 bool locked;
144 struct bkey_packed *p, *k;
145 struct printbuf buf1 = PRINTBUF;
146 struct printbuf buf2 = PRINTBUF;
147 struct printbuf buf3 = PRINTBUF;
148 const char *msg;
149
150 if (!bch2_debug_check_iterators)
151 return;
152
153 l = &path->l[level];
154 tmp = l->iter;
155 locked = btree_node_locked(path, level);
156
157 if (path->cached) {
158 if (!level)
159 bch2_btree_path_verify_cached(trans, path);
160 return;
161 }
162
163 if (!btree_path_node(path, level))
164 return;
165
166 if (!bch2_btree_node_relock_notrace(trans, path, level))
167 return;
168
169 BUG_ON(!btree_path_pos_in_node(path, l->b));
170
171 bch2_btree_node_iter_verify(&l->iter, l->b);
172
173 /*
174 * For interior nodes, the iterator will have skipped past deleted keys:
175 */
176 p = level
177 ? bch2_btree_node_iter_prev(&tmp, l->b)
178 : bch2_btree_node_iter_prev_all(&tmp, l->b);
179 k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
180
181 if (p && bkey_iter_pos_cmp(l->b, p, &path->pos) >= 0) {
182 msg = "before";
183 goto err;
184 }
185
186 if (k && bkey_iter_pos_cmp(l->b, k, &path->pos) < 0) {
187 msg = "after";
188 goto err;
189 }
190
191 if (!locked)
192 btree_node_unlock(trans, path, level);
193 return;
194 err:
195 bch2_bpos_to_text(&buf1, path->pos);
196
197 if (p) {
198 struct bkey uk = bkey_unpack_key(l->b, p);
199
200 bch2_bkey_to_text(&buf2, &uk);
201 } else {
202 prt_printf(&buf2, "(none)");
203 }
204
205 if (k) {
206 struct bkey uk = bkey_unpack_key(l->b, k);
207
208 bch2_bkey_to_text(&buf3, &uk);
209 } else {
210 prt_printf(&buf3, "(none)");
211 }
212
213 panic("path should be %s key at level %u:\n"
214 "path pos %s\n"
215 "prev key %s\n"
216 "cur key %s\n",
217 msg, level, buf1.buf, buf2.buf, buf3.buf);
218 }
219
bch2_btree_path_verify(struct btree_trans * trans,struct btree_path * path)220 static void bch2_btree_path_verify(struct btree_trans *trans,
221 struct btree_path *path)
222 {
223 struct bch_fs *c = trans->c;
224
225 for (unsigned i = 0; i < (!path->cached ? BTREE_MAX_DEPTH : 1); i++) {
226 if (!path->l[i].b) {
227 BUG_ON(!path->cached &&
228 bch2_btree_id_root(c, path->btree_id)->b->c.level > i);
229 break;
230 }
231
232 bch2_btree_path_verify_level(trans, path, i);
233 }
234
235 bch2_btree_path_verify_locks(path);
236 }
237
bch2_trans_verify_paths(struct btree_trans * trans)238 void bch2_trans_verify_paths(struct btree_trans *trans)
239 {
240 struct btree_path *path;
241 unsigned iter;
242
243 trans_for_each_path(trans, path, iter)
244 bch2_btree_path_verify(trans, path);
245 }
246
bch2_btree_iter_verify(struct btree_trans * trans,struct btree_iter * iter)247 static void bch2_btree_iter_verify(struct btree_trans *trans, struct btree_iter *iter)
248 {
249 BUG_ON(!!(iter->flags & BTREE_ITER_cached) != btree_iter_path(trans, iter)->cached);
250
251 BUG_ON((iter->flags & BTREE_ITER_is_extents) &&
252 (iter->flags & BTREE_ITER_all_snapshots));
253
254 BUG_ON(!(iter->flags & BTREE_ITER_snapshot_field) &&
255 (iter->flags & BTREE_ITER_all_snapshots) &&
256 !btree_type_has_snapshot_field(iter->btree_id));
257
258 if (iter->update_path)
259 bch2_btree_path_verify(trans, &trans->paths[iter->update_path]);
260 bch2_btree_path_verify(trans, btree_iter_path(trans, iter));
261 }
262
bch2_btree_iter_verify_entry_exit(struct btree_iter * iter)263 static void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter)
264 {
265 BUG_ON((iter->flags & BTREE_ITER_filter_snapshots) &&
266 !iter->pos.snapshot);
267
268 BUG_ON(!(iter->flags & BTREE_ITER_all_snapshots) &&
269 iter->pos.snapshot != iter->snapshot);
270
271 BUG_ON(iter->flags & BTREE_ITER_all_snapshots ? !bpos_eq(iter->pos, iter->k.p) :
272 !(iter->flags & BTREE_ITER_is_extents) ? !bkey_eq(iter->pos, iter->k.p) :
273 (bkey_lt(iter->pos, bkey_start_pos(&iter->k)) ||
274 bkey_gt(iter->pos, iter->k.p)));
275 }
276
bch2_btree_iter_verify_ret(struct btree_trans * trans,struct btree_iter * iter,struct bkey_s_c k)277 static int bch2_btree_iter_verify_ret(struct btree_trans *trans,
278 struct btree_iter *iter, struct bkey_s_c k)
279 {
280 struct btree_iter copy;
281 struct bkey_s_c prev;
282 int ret = 0;
283
284 if (!bch2_debug_check_iterators)
285 return 0;
286
287 if (!(iter->flags & BTREE_ITER_filter_snapshots))
288 return 0;
289
290 if (bkey_err(k) || !k.k)
291 return 0;
292
293 BUG_ON(!bch2_snapshot_is_ancestor(trans->c,
294 iter->snapshot,
295 k.k->p.snapshot));
296
297 bch2_trans_iter_init(trans, ©, iter->btree_id, iter->pos,
298 BTREE_ITER_nopreserve|
299 BTREE_ITER_all_snapshots);
300 prev = bch2_btree_iter_prev(trans, ©);
301 if (!prev.k)
302 goto out;
303
304 ret = bkey_err(prev);
305 if (ret)
306 goto out;
307
308 if (bkey_eq(prev.k->p, k.k->p) &&
309 bch2_snapshot_is_ancestor(trans->c, iter->snapshot,
310 prev.k->p.snapshot) > 0) {
311 struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
312
313 bch2_bkey_to_text(&buf1, k.k);
314 bch2_bkey_to_text(&buf2, prev.k);
315
316 panic("iter snap %u\n"
317 "k %s\n"
318 "prev %s\n",
319 iter->snapshot,
320 buf1.buf, buf2.buf);
321 }
322 out:
323 bch2_trans_iter_exit(trans, ©);
324 return ret;
325 }
326
bch2_assert_pos_locked(struct btree_trans * trans,enum btree_id id,struct bpos pos)327 void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id,
328 struct bpos pos)
329 {
330 bch2_trans_verify_not_unlocked_or_in_restart(trans);
331
332 struct btree_path *path;
333 struct trans_for_each_path_inorder_iter iter;
334 struct printbuf buf = PRINTBUF;
335
336 btree_trans_sort_paths(trans);
337
338 trans_for_each_path_inorder(trans, path, iter) {
339 if (path->btree_id != id ||
340 !btree_node_locked(path, 0) ||
341 !path->should_be_locked)
342 continue;
343
344 if (!path->cached) {
345 if (bkey_ge(pos, path->l[0].b->data->min_key) &&
346 bkey_le(pos, path->l[0].b->key.k.p))
347 return;
348 } else {
349 if (bkey_eq(pos, path->pos))
350 return;
351 }
352 }
353
354 bch2_dump_trans_paths_updates(trans);
355 bch2_bpos_to_text(&buf, pos);
356
357 panic("not locked: %s %s\n", bch2_btree_id_str(id), buf.buf);
358 }
359
360 #else
361
bch2_btree_path_verify_level(struct btree_trans * trans,struct btree_path * path,unsigned l)362 static inline void bch2_btree_path_verify_level(struct btree_trans *trans,
363 struct btree_path *path, unsigned l) {}
bch2_btree_path_verify(struct btree_trans * trans,struct btree_path * path)364 static inline void bch2_btree_path_verify(struct btree_trans *trans,
365 struct btree_path *path) {}
bch2_btree_iter_verify(struct btree_trans * trans,struct btree_iter * iter)366 static inline void bch2_btree_iter_verify(struct btree_trans *trans,
367 struct btree_iter *iter) {}
bch2_btree_iter_verify_entry_exit(struct btree_iter * iter)368 static inline void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter) {}
bch2_btree_iter_verify_ret(struct btree_trans * trans,struct btree_iter * iter,struct bkey_s_c k)369 static inline int bch2_btree_iter_verify_ret(struct btree_trans *trans, struct btree_iter *iter,
370 struct bkey_s_c k) { return 0; }
371
372 #endif
373
374 /* Btree path: fixups after btree updates */
375
btree_node_iter_set_set_pos(struct btree_node_iter * iter,struct btree * b,struct bset_tree * t,struct bkey_packed * k)376 static void btree_node_iter_set_set_pos(struct btree_node_iter *iter,
377 struct btree *b,
378 struct bset_tree *t,
379 struct bkey_packed *k)
380 {
381 struct btree_node_iter_set *set;
382
383 btree_node_iter_for_each(iter, set)
384 if (set->end == t->end_offset) {
385 set->k = __btree_node_key_to_offset(b, k);
386 bch2_btree_node_iter_sort(iter, b);
387 return;
388 }
389
390 bch2_btree_node_iter_push(iter, b, k, btree_bkey_last(b, t));
391 }
392
__bch2_btree_path_fix_key_modified(struct btree_path * path,struct btree * b,struct bkey_packed * where)393 static void __bch2_btree_path_fix_key_modified(struct btree_path *path,
394 struct btree *b,
395 struct bkey_packed *where)
396 {
397 struct btree_path_level *l = &path->l[b->c.level];
398
399 if (where != bch2_btree_node_iter_peek_all(&l->iter, l->b))
400 return;
401
402 if (bkey_iter_pos_cmp(l->b, where, &path->pos) < 0)
403 bch2_btree_node_iter_advance(&l->iter, l->b);
404 }
405
bch2_btree_path_fix_key_modified(struct btree_trans * trans,struct btree * b,struct bkey_packed * where)406 void bch2_btree_path_fix_key_modified(struct btree_trans *trans,
407 struct btree *b,
408 struct bkey_packed *where)
409 {
410 struct btree_path *path;
411 unsigned i;
412
413 trans_for_each_path_with_node(trans, b, path, i) {
414 __bch2_btree_path_fix_key_modified(path, b, where);
415 bch2_btree_path_verify_level(trans, path, b->c.level);
416 }
417 }
418
__bch2_btree_node_iter_fix(struct btree_path * path,struct btree * b,struct btree_node_iter * node_iter,struct bset_tree * t,struct bkey_packed * where,unsigned clobber_u64s,unsigned new_u64s)419 static void __bch2_btree_node_iter_fix(struct btree_path *path,
420 struct btree *b,
421 struct btree_node_iter *node_iter,
422 struct bset_tree *t,
423 struct bkey_packed *where,
424 unsigned clobber_u64s,
425 unsigned new_u64s)
426 {
427 const struct bkey_packed *end = btree_bkey_last(b, t);
428 struct btree_node_iter_set *set;
429 unsigned offset = __btree_node_key_to_offset(b, where);
430 int shift = new_u64s - clobber_u64s;
431 unsigned old_end = t->end_offset - shift;
432 unsigned orig_iter_pos = node_iter->data[0].k;
433 bool iter_current_key_modified =
434 orig_iter_pos >= offset &&
435 orig_iter_pos <= offset + clobber_u64s;
436
437 btree_node_iter_for_each(node_iter, set)
438 if (set->end == old_end)
439 goto found;
440
441 /* didn't find the bset in the iterator - might have to readd it: */
442 if (new_u64s &&
443 bkey_iter_pos_cmp(b, where, &path->pos) >= 0) {
444 bch2_btree_node_iter_push(node_iter, b, where, end);
445 goto fixup_done;
446 } else {
447 /* Iterator is after key that changed */
448 return;
449 }
450 found:
451 set->end = t->end_offset;
452
453 /* Iterator hasn't gotten to the key that changed yet: */
454 if (set->k < offset)
455 return;
456
457 if (new_u64s &&
458 bkey_iter_pos_cmp(b, where, &path->pos) >= 0) {
459 set->k = offset;
460 } else if (set->k < offset + clobber_u64s) {
461 set->k = offset + new_u64s;
462 if (set->k == set->end)
463 bch2_btree_node_iter_set_drop(node_iter, set);
464 } else {
465 /* Iterator is after key that changed */
466 set->k = (int) set->k + shift;
467 return;
468 }
469
470 bch2_btree_node_iter_sort(node_iter, b);
471 fixup_done:
472 if (node_iter->data[0].k != orig_iter_pos)
473 iter_current_key_modified = true;
474
475 /*
476 * When a new key is added, and the node iterator now points to that
477 * key, the iterator might have skipped past deleted keys that should
478 * come after the key the iterator now points to. We have to rewind to
479 * before those deleted keys - otherwise
480 * bch2_btree_node_iter_prev_all() breaks:
481 */
482 if (!bch2_btree_node_iter_end(node_iter) &&
483 iter_current_key_modified &&
484 b->c.level) {
485 struct bkey_packed *k, *k2, *p;
486
487 k = bch2_btree_node_iter_peek_all(node_iter, b);
488
489 for_each_bset(b, t) {
490 bool set_pos = false;
491
492 if (node_iter->data[0].end == t->end_offset)
493 continue;
494
495 k2 = bch2_btree_node_iter_bset_pos(node_iter, b, t);
496
497 while ((p = bch2_bkey_prev_all(b, t, k2)) &&
498 bkey_iter_cmp(b, k, p) < 0) {
499 k2 = p;
500 set_pos = true;
501 }
502
503 if (set_pos)
504 btree_node_iter_set_set_pos(node_iter,
505 b, t, k2);
506 }
507 }
508 }
509
bch2_btree_node_iter_fix(struct btree_trans * trans,struct btree_path * path,struct btree * b,struct btree_node_iter * node_iter,struct bkey_packed * where,unsigned clobber_u64s,unsigned new_u64s)510 void bch2_btree_node_iter_fix(struct btree_trans *trans,
511 struct btree_path *path,
512 struct btree *b,
513 struct btree_node_iter *node_iter,
514 struct bkey_packed *where,
515 unsigned clobber_u64s,
516 unsigned new_u64s)
517 {
518 struct bset_tree *t = bch2_bkey_to_bset_inlined(b, where);
519 struct btree_path *linked;
520 unsigned i;
521
522 if (node_iter != &path->l[b->c.level].iter) {
523 __bch2_btree_node_iter_fix(path, b, node_iter, t,
524 where, clobber_u64s, new_u64s);
525
526 if (bch2_debug_check_iterators)
527 bch2_btree_node_iter_verify(node_iter, b);
528 }
529
530 trans_for_each_path_with_node(trans, b, linked, i) {
531 __bch2_btree_node_iter_fix(linked, b,
532 &linked->l[b->c.level].iter, t,
533 where, clobber_u64s, new_u64s);
534 bch2_btree_path_verify_level(trans, linked, b->c.level);
535 }
536 }
537
538 /* Btree path level: pointer to a particular btree node and node iter */
539
__btree_iter_unpack(struct bch_fs * c,struct btree_path_level * l,struct bkey * u,struct bkey_packed * k)540 static inline struct bkey_s_c __btree_iter_unpack(struct bch_fs *c,
541 struct btree_path_level *l,
542 struct bkey *u,
543 struct bkey_packed *k)
544 {
545 if (unlikely(!k)) {
546 /*
547 * signal to bch2_btree_iter_peek_slot() that we're currently at
548 * a hole
549 */
550 u->type = KEY_TYPE_deleted;
551 return bkey_s_c_null;
552 }
553
554 return bkey_disassemble(l->b, k, u);
555 }
556
btree_path_level_peek_all(struct bch_fs * c,struct btree_path_level * l,struct bkey * u)557 static inline struct bkey_s_c btree_path_level_peek_all(struct bch_fs *c,
558 struct btree_path_level *l,
559 struct bkey *u)
560 {
561 return __btree_iter_unpack(c, l, u,
562 bch2_btree_node_iter_peek_all(&l->iter, l->b));
563 }
564
btree_path_level_prev(struct btree_trans * trans,struct btree_path * path,struct btree_path_level * l,struct bkey * u)565 static inline struct bkey_s_c btree_path_level_prev(struct btree_trans *trans,
566 struct btree_path *path,
567 struct btree_path_level *l,
568 struct bkey *u)
569 {
570 struct bkey_s_c k = __btree_iter_unpack(trans->c, l, u,
571 bch2_btree_node_iter_prev(&l->iter, l->b));
572
573 path->pos = k.k ? k.k->p : l->b->data->min_key;
574 trans->paths_sorted = false;
575 bch2_btree_path_verify_level(trans, path, l - path->l);
576 return k;
577 }
578
btree_path_advance_to_pos(struct btree_path * path,struct btree_path_level * l,int max_advance)579 static inline bool btree_path_advance_to_pos(struct btree_path *path,
580 struct btree_path_level *l,
581 int max_advance)
582 {
583 struct bkey_packed *k;
584 int nr_advanced = 0;
585
586 while ((k = bch2_btree_node_iter_peek_all(&l->iter, l->b)) &&
587 bkey_iter_pos_cmp(l->b, k, &path->pos) < 0) {
588 if (max_advance > 0 && nr_advanced >= max_advance)
589 return false;
590
591 bch2_btree_node_iter_advance(&l->iter, l->b);
592 nr_advanced++;
593 }
594
595 return true;
596 }
597
__btree_path_level_init(struct btree_path * path,unsigned level)598 static inline void __btree_path_level_init(struct btree_path *path,
599 unsigned level)
600 {
601 struct btree_path_level *l = &path->l[level];
602
603 bch2_btree_node_iter_init(&l->iter, l->b, &path->pos);
604
605 /*
606 * Iterators to interior nodes should always be pointed at the first non
607 * whiteout:
608 */
609 if (level)
610 bch2_btree_node_iter_peek(&l->iter, l->b);
611 }
612
bch2_btree_path_level_init(struct btree_trans * trans,struct btree_path * path,struct btree * b)613 void bch2_btree_path_level_init(struct btree_trans *trans,
614 struct btree_path *path,
615 struct btree *b)
616 {
617 BUG_ON(path->cached);
618
619 EBUG_ON(!btree_path_pos_in_node(path, b));
620
621 path->l[b->c.level].lock_seq = six_lock_seq(&b->c.lock);
622 path->l[b->c.level].b = b;
623 __btree_path_level_init(path, b->c.level);
624 }
625
626 /* Btree path: fixups after btree node updates: */
627
bch2_trans_revalidate_updates_in_node(struct btree_trans * trans,struct btree * b)628 static void bch2_trans_revalidate_updates_in_node(struct btree_trans *trans, struct btree *b)
629 {
630 struct bch_fs *c = trans->c;
631
632 trans_for_each_update(trans, i)
633 if (!i->cached &&
634 i->level == b->c.level &&
635 i->btree_id == b->c.btree_id &&
636 bpos_cmp(i->k->k.p, b->data->min_key) >= 0 &&
637 bpos_cmp(i->k->k.p, b->data->max_key) <= 0) {
638 i->old_v = bch2_btree_path_peek_slot(trans->paths + i->path, &i->old_k).v;
639
640 if (unlikely(trans->journal_replay_not_finished)) {
641 struct bkey_i *j_k =
642 bch2_journal_keys_peek_slot(c, i->btree_id, i->level,
643 i->k->k.p);
644
645 if (j_k) {
646 i->old_k = j_k->k;
647 i->old_v = &j_k->v;
648 }
649 }
650 }
651 }
652
653 /*
654 * A btree node is being replaced - update the iterator to point to the new
655 * node:
656 */
bch2_trans_node_add(struct btree_trans * trans,struct btree_path * path,struct btree * b)657 void bch2_trans_node_add(struct btree_trans *trans,
658 struct btree_path *path,
659 struct btree *b)
660 {
661 struct btree_path *prev;
662
663 BUG_ON(!btree_path_pos_in_node(path, b));
664
665 while ((prev = prev_btree_path(trans, path)) &&
666 btree_path_pos_in_node(prev, b))
667 path = prev;
668
669 for (;
670 path && btree_path_pos_in_node(path, b);
671 path = next_btree_path(trans, path))
672 if (path->uptodate == BTREE_ITER_UPTODATE && !path->cached) {
673 enum btree_node_locked_type t =
674 btree_lock_want(path, b->c.level);
675
676 if (t != BTREE_NODE_UNLOCKED) {
677 btree_node_unlock(trans, path, b->c.level);
678 six_lock_increment(&b->c.lock, (enum six_lock_type) t);
679 mark_btree_node_locked(trans, path, b->c.level, t);
680 }
681
682 bch2_btree_path_level_init(trans, path, b);
683 }
684
685 bch2_trans_revalidate_updates_in_node(trans, b);
686 }
687
bch2_trans_node_drop(struct btree_trans * trans,struct btree * b)688 void bch2_trans_node_drop(struct btree_trans *trans,
689 struct btree *b)
690 {
691 struct btree_path *path;
692 unsigned i, level = b->c.level;
693
694 trans_for_each_path(trans, path, i)
695 if (path->l[level].b == b) {
696 btree_node_unlock(trans, path, level);
697 path->l[level].b = ERR_PTR(-BCH_ERR_no_btree_node_init);
698 }
699 }
700
701 /*
702 * A btree node has been modified in such a way as to invalidate iterators - fix
703 * them:
704 */
bch2_trans_node_reinit_iter(struct btree_trans * trans,struct btree * b)705 void bch2_trans_node_reinit_iter(struct btree_trans *trans, struct btree *b)
706 {
707 struct btree_path *path;
708 unsigned i;
709
710 trans_for_each_path_with_node(trans, b, path, i)
711 __btree_path_level_init(path, b->c.level);
712
713 bch2_trans_revalidate_updates_in_node(trans, b);
714 }
715
716 /* Btree path: traverse, set_pos: */
717
btree_path_lock_root(struct btree_trans * trans,struct btree_path * path,unsigned depth_want,unsigned long trace_ip)718 static inline int btree_path_lock_root(struct btree_trans *trans,
719 struct btree_path *path,
720 unsigned depth_want,
721 unsigned long trace_ip)
722 {
723 struct bch_fs *c = trans->c;
724 struct btree_root *r = bch2_btree_id_root(c, path->btree_id);
725 enum six_lock_type lock_type;
726 unsigned i;
727 int ret;
728
729 EBUG_ON(path->nodes_locked);
730
731 while (1) {
732 struct btree *b = READ_ONCE(r->b);
733 if (unlikely(!b)) {
734 BUG_ON(!r->error);
735 return r->error;
736 }
737
738 path->level = READ_ONCE(b->c.level);
739
740 if (unlikely(path->level < depth_want)) {
741 /*
742 * the root is at a lower depth than the depth we want:
743 * got to the end of the btree, or we're walking nodes
744 * greater than some depth and there are no nodes >=
745 * that depth
746 */
747 path->level = depth_want;
748 for (i = path->level; i < BTREE_MAX_DEPTH; i++)
749 path->l[i].b = NULL;
750 return 1;
751 }
752
753 lock_type = __btree_lock_want(path, path->level);
754 ret = btree_node_lock(trans, path, &b->c,
755 path->level, lock_type, trace_ip);
756 if (unlikely(ret)) {
757 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
758 return ret;
759 BUG();
760 }
761
762 if (likely(b == READ_ONCE(r->b) &&
763 b->c.level == path->level &&
764 !race_fault())) {
765 for (i = 0; i < path->level; i++)
766 path->l[i].b = ERR_PTR(-BCH_ERR_no_btree_node_lock_root);
767 path->l[path->level].b = b;
768 for (i = path->level + 1; i < BTREE_MAX_DEPTH; i++)
769 path->l[i].b = NULL;
770
771 mark_btree_node_locked(trans, path, path->level,
772 (enum btree_node_locked_type) lock_type);
773 bch2_btree_path_level_init(trans, path, b);
774 return 0;
775 }
776
777 six_unlock_type(&b->c.lock, lock_type);
778 }
779 }
780
781 noinline
btree_path_prefetch(struct btree_trans * trans,struct btree_path * path)782 static int btree_path_prefetch(struct btree_trans *trans, struct btree_path *path)
783 {
784 struct bch_fs *c = trans->c;
785 struct btree_path_level *l = path_l(path);
786 struct btree_node_iter node_iter = l->iter;
787 struct bkey_packed *k;
788 struct bkey_buf tmp;
789 unsigned nr = test_bit(BCH_FS_started, &c->flags)
790 ? (path->level > 1 ? 0 : 2)
791 : (path->level > 1 ? 1 : 16);
792 bool was_locked = btree_node_locked(path, path->level);
793 int ret = 0;
794
795 bch2_bkey_buf_init(&tmp);
796
797 while (nr-- && !ret) {
798 if (!bch2_btree_node_relock(trans, path, path->level))
799 break;
800
801 bch2_btree_node_iter_advance(&node_iter, l->b);
802 k = bch2_btree_node_iter_peek(&node_iter, l->b);
803 if (!k)
804 break;
805
806 bch2_bkey_buf_unpack(&tmp, c, l->b, k);
807 ret = bch2_btree_node_prefetch(trans, path, tmp.k, path->btree_id,
808 path->level - 1);
809 }
810
811 if (!was_locked)
812 btree_node_unlock(trans, path, path->level);
813
814 bch2_bkey_buf_exit(&tmp, c);
815 return ret;
816 }
817
btree_path_prefetch_j(struct btree_trans * trans,struct btree_path * path,struct btree_and_journal_iter * jiter)818 static int btree_path_prefetch_j(struct btree_trans *trans, struct btree_path *path,
819 struct btree_and_journal_iter *jiter)
820 {
821 struct bch_fs *c = trans->c;
822 struct bkey_s_c k;
823 struct bkey_buf tmp;
824 unsigned nr = test_bit(BCH_FS_started, &c->flags)
825 ? (path->level > 1 ? 0 : 2)
826 : (path->level > 1 ? 1 : 16);
827 bool was_locked = btree_node_locked(path, path->level);
828 int ret = 0;
829
830 bch2_bkey_buf_init(&tmp);
831
832 jiter->fail_if_too_many_whiteouts = true;
833
834 while (nr-- && !ret) {
835 if (!bch2_btree_node_relock(trans, path, path->level))
836 break;
837
838 bch2_btree_and_journal_iter_advance(jiter);
839 k = bch2_btree_and_journal_iter_peek(jiter);
840 if (!k.k)
841 break;
842
843 bch2_bkey_buf_reassemble(&tmp, c, k);
844 ret = bch2_btree_node_prefetch(trans, path, tmp.k, path->btree_id,
845 path->level - 1);
846 }
847
848 if (!was_locked)
849 btree_node_unlock(trans, path, path->level);
850
851 bch2_bkey_buf_exit(&tmp, c);
852 return ret;
853 }
854
btree_node_mem_ptr_set(struct btree_trans * trans,struct btree_path * path,unsigned plevel,struct btree * b)855 static noinline void btree_node_mem_ptr_set(struct btree_trans *trans,
856 struct btree_path *path,
857 unsigned plevel, struct btree *b)
858 {
859 struct btree_path_level *l = &path->l[plevel];
860 bool locked = btree_node_locked(path, plevel);
861 struct bkey_packed *k;
862 struct bch_btree_ptr_v2 *bp;
863
864 if (!bch2_btree_node_relock(trans, path, plevel))
865 return;
866
867 k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
868 BUG_ON(k->type != KEY_TYPE_btree_ptr_v2);
869
870 bp = (void *) bkeyp_val(&l->b->format, k);
871 bp->mem_ptr = (unsigned long)b;
872
873 if (!locked)
874 btree_node_unlock(trans, path, plevel);
875 }
876
btree_node_iter_and_journal_peek(struct btree_trans * trans,struct btree_path * path,unsigned flags,struct bkey_buf * out)877 static noinline int btree_node_iter_and_journal_peek(struct btree_trans *trans,
878 struct btree_path *path,
879 unsigned flags,
880 struct bkey_buf *out)
881 {
882 struct bch_fs *c = trans->c;
883 struct btree_path_level *l = path_l(path);
884 struct btree_and_journal_iter jiter;
885 struct bkey_s_c k;
886 int ret = 0;
887
888 __bch2_btree_and_journal_iter_init_node_iter(trans, &jiter, l->b, l->iter, path->pos);
889
890 k = bch2_btree_and_journal_iter_peek(&jiter);
891 if (!k.k) {
892 struct printbuf buf = PRINTBUF;
893
894 prt_str(&buf, "node not found at pos ");
895 bch2_bpos_to_text(&buf, path->pos);
896 prt_str(&buf, " at btree ");
897 bch2_btree_pos_to_text(&buf, c, l->b);
898
899 ret = bch2_fs_topology_error(c, "%s", buf.buf);
900 printbuf_exit(&buf);
901 goto err;
902 }
903
904 bch2_bkey_buf_reassemble(out, c, k);
905
906 if ((flags & BTREE_ITER_prefetch) &&
907 c->opts.btree_node_prefetch)
908 ret = btree_path_prefetch_j(trans, path, &jiter);
909
910 err:
911 bch2_btree_and_journal_iter_exit(&jiter);
912 return ret;
913 }
914
btree_path_down(struct btree_trans * trans,struct btree_path * path,unsigned flags,unsigned long trace_ip)915 static __always_inline int btree_path_down(struct btree_trans *trans,
916 struct btree_path *path,
917 unsigned flags,
918 unsigned long trace_ip)
919 {
920 struct bch_fs *c = trans->c;
921 struct btree_path_level *l = path_l(path);
922 struct btree *b;
923 unsigned level = path->level - 1;
924 enum six_lock_type lock_type = __btree_lock_want(path, level);
925 struct bkey_buf tmp;
926 int ret;
927
928 EBUG_ON(!btree_node_locked(path, path->level));
929
930 bch2_bkey_buf_init(&tmp);
931
932 if (unlikely(trans->journal_replay_not_finished)) {
933 ret = btree_node_iter_and_journal_peek(trans, path, flags, &tmp);
934 if (ret)
935 goto err;
936 } else {
937 struct bkey_packed *k = bch2_btree_node_iter_peek(&l->iter, l->b);
938 if (!k) {
939 struct printbuf buf = PRINTBUF;
940
941 prt_str(&buf, "node not found at pos ");
942 bch2_bpos_to_text(&buf, path->pos);
943 prt_str(&buf, " within parent node ");
944 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&l->b->key));
945
946 bch2_fs_fatal_error(c, "%s", buf.buf);
947 printbuf_exit(&buf);
948 ret = -BCH_ERR_btree_need_topology_repair;
949 goto err;
950 }
951
952 bch2_bkey_buf_unpack(&tmp, c, l->b, k);
953
954 if ((flags & BTREE_ITER_prefetch) &&
955 c->opts.btree_node_prefetch) {
956 ret = btree_path_prefetch(trans, path);
957 if (ret)
958 goto err;
959 }
960 }
961
962 b = bch2_btree_node_get(trans, path, tmp.k, level, lock_type, trace_ip);
963 ret = PTR_ERR_OR_ZERO(b);
964 if (unlikely(ret))
965 goto err;
966
967 if (likely(!trans->journal_replay_not_finished &&
968 tmp.k->k.type == KEY_TYPE_btree_ptr_v2) &&
969 unlikely(b != btree_node_mem_ptr(tmp.k)))
970 btree_node_mem_ptr_set(trans, path, level + 1, b);
971
972 if (btree_node_read_locked(path, level + 1))
973 btree_node_unlock(trans, path, level + 1);
974
975 mark_btree_node_locked(trans, path, level,
976 (enum btree_node_locked_type) lock_type);
977 path->level = level;
978 bch2_btree_path_level_init(trans, path, b);
979
980 bch2_btree_path_verify_locks(path);
981 err:
982 bch2_bkey_buf_exit(&tmp, c);
983 return ret;
984 }
985
bch2_btree_path_traverse_all(struct btree_trans * trans)986 static int bch2_btree_path_traverse_all(struct btree_trans *trans)
987 {
988 struct bch_fs *c = trans->c;
989 struct btree_path *path;
990 unsigned long trace_ip = _RET_IP_;
991 unsigned i;
992 int ret = 0;
993
994 if (trans->in_traverse_all)
995 return -BCH_ERR_transaction_restart_in_traverse_all;
996
997 trans->in_traverse_all = true;
998 retry_all:
999 trans->restarted = 0;
1000 trans->last_restarted_ip = 0;
1001
1002 trans_for_each_path(trans, path, i)
1003 path->should_be_locked = false;
1004
1005 btree_trans_sort_paths(trans);
1006
1007 bch2_trans_unlock(trans);
1008 cond_resched();
1009 trans_set_locked(trans, false);
1010
1011 if (unlikely(trans->memory_allocation_failure)) {
1012 struct closure cl;
1013
1014 closure_init_stack(&cl);
1015
1016 do {
1017 ret = bch2_btree_cache_cannibalize_lock(trans, &cl);
1018 closure_sync(&cl);
1019 } while (ret);
1020 }
1021
1022 /* Now, redo traversals in correct order: */
1023 i = 0;
1024 while (i < trans->nr_sorted) {
1025 btree_path_idx_t idx = trans->sorted[i];
1026
1027 /*
1028 * Traversing a path can cause another path to be added at about
1029 * the same position:
1030 */
1031 if (trans->paths[idx].uptodate) {
1032 __btree_path_get(trans, &trans->paths[idx], false);
1033 ret = bch2_btree_path_traverse_one(trans, idx, 0, _THIS_IP_);
1034 __btree_path_put(trans, &trans->paths[idx], false);
1035
1036 if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
1037 bch2_err_matches(ret, ENOMEM))
1038 goto retry_all;
1039 if (ret)
1040 goto err;
1041 } else {
1042 i++;
1043 }
1044 }
1045
1046 /*
1047 * We used to assert that all paths had been traversed here
1048 * (path->uptodate < BTREE_ITER_NEED_TRAVERSE); however, since
1049 * path->should_be_locked is not set yet, we might have unlocked and
1050 * then failed to relock a path - that's fine.
1051 */
1052 err:
1053 bch2_btree_cache_cannibalize_unlock(trans);
1054
1055 trans->in_traverse_all = false;
1056
1057 trace_and_count(c, trans_traverse_all, trans, trace_ip);
1058 return ret;
1059 }
1060
btree_path_check_pos_in_node(struct btree_path * path,unsigned l,int check_pos)1061 static inline bool btree_path_check_pos_in_node(struct btree_path *path,
1062 unsigned l, int check_pos)
1063 {
1064 if (check_pos < 0 && btree_path_pos_before_node(path, path->l[l].b))
1065 return false;
1066 if (check_pos > 0 && btree_path_pos_after_node(path, path->l[l].b))
1067 return false;
1068 return true;
1069 }
1070
btree_path_good_node(struct btree_trans * trans,struct btree_path * path,unsigned l,int check_pos)1071 static inline bool btree_path_good_node(struct btree_trans *trans,
1072 struct btree_path *path,
1073 unsigned l, int check_pos)
1074 {
1075 return is_btree_node(path, l) &&
1076 bch2_btree_node_relock(trans, path, l) &&
1077 btree_path_check_pos_in_node(path, l, check_pos);
1078 }
1079
btree_path_set_level_down(struct btree_trans * trans,struct btree_path * path,unsigned new_level)1080 static void btree_path_set_level_down(struct btree_trans *trans,
1081 struct btree_path *path,
1082 unsigned new_level)
1083 {
1084 unsigned l;
1085
1086 path->level = new_level;
1087
1088 for (l = path->level + 1; l < BTREE_MAX_DEPTH; l++)
1089 if (btree_lock_want(path, l) == BTREE_NODE_UNLOCKED)
1090 btree_node_unlock(trans, path, l);
1091
1092 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1093 bch2_btree_path_verify(trans, path);
1094 }
1095
__btree_path_up_until_good_node(struct btree_trans * trans,struct btree_path * path,int check_pos)1096 static noinline unsigned __btree_path_up_until_good_node(struct btree_trans *trans,
1097 struct btree_path *path,
1098 int check_pos)
1099 {
1100 unsigned i, l = path->level;
1101 again:
1102 while (btree_path_node(path, l) &&
1103 !btree_path_good_node(trans, path, l, check_pos))
1104 __btree_path_set_level_up(trans, path, l++);
1105
1106 /* If we need intent locks, take them too: */
1107 for (i = l + 1;
1108 i < path->locks_want && btree_path_node(path, i);
1109 i++)
1110 if (!bch2_btree_node_relock(trans, path, i)) {
1111 while (l <= i)
1112 __btree_path_set_level_up(trans, path, l++);
1113 goto again;
1114 }
1115
1116 return l;
1117 }
1118
btree_path_up_until_good_node(struct btree_trans * trans,struct btree_path * path,int check_pos)1119 static inline unsigned btree_path_up_until_good_node(struct btree_trans *trans,
1120 struct btree_path *path,
1121 int check_pos)
1122 {
1123 return likely(btree_node_locked(path, path->level) &&
1124 btree_path_check_pos_in_node(path, path->level, check_pos))
1125 ? path->level
1126 : __btree_path_up_until_good_node(trans, path, check_pos);
1127 }
1128
1129 /*
1130 * This is the main state machine for walking down the btree - walks down to a
1131 * specified depth
1132 *
1133 * Returns 0 on success, -EIO on error (error reading in a btree node).
1134 *
1135 * On error, caller (peek_node()/peek_key()) must return NULL; the error is
1136 * stashed in the iterator and returned from bch2_trans_exit().
1137 */
bch2_btree_path_traverse_one(struct btree_trans * trans,btree_path_idx_t path_idx,unsigned flags,unsigned long trace_ip)1138 int bch2_btree_path_traverse_one(struct btree_trans *trans,
1139 btree_path_idx_t path_idx,
1140 unsigned flags,
1141 unsigned long trace_ip)
1142 {
1143 struct btree_path *path = &trans->paths[path_idx];
1144 unsigned depth_want = path->level;
1145 int ret = -((int) trans->restarted);
1146
1147 if (unlikely(ret))
1148 goto out;
1149
1150 if (unlikely(!trans->srcu_held))
1151 bch2_trans_srcu_lock(trans);
1152
1153 trace_btree_path_traverse_start(trans, path);
1154
1155 /*
1156 * Ensure we obey path->should_be_locked: if it's set, we can't unlock
1157 * and re-traverse the path without a transaction restart:
1158 */
1159 if (path->should_be_locked) {
1160 ret = bch2_btree_path_relock(trans, path, trace_ip);
1161 goto out;
1162 }
1163
1164 if (path->cached) {
1165 ret = bch2_btree_path_traverse_cached(trans, path_idx, flags);
1166 goto out;
1167 }
1168
1169 path = &trans->paths[path_idx];
1170
1171 if (unlikely(path->level >= BTREE_MAX_DEPTH))
1172 goto out_uptodate;
1173
1174 path->level = btree_path_up_until_good_node(trans, path, 0);
1175 unsigned max_level = path->level;
1176
1177 EBUG_ON(btree_path_node(path, path->level) &&
1178 !btree_node_locked(path, path->level));
1179
1180 /*
1181 * Note: path->nodes[path->level] may be temporarily NULL here - that
1182 * would indicate to other code that we got to the end of the btree,
1183 * here it indicates that relocking the root failed - it's critical that
1184 * btree_path_lock_root() comes next and that it can't fail
1185 */
1186 while (path->level > depth_want) {
1187 ret = btree_path_node(path, path->level)
1188 ? btree_path_down(trans, path, flags, trace_ip)
1189 : btree_path_lock_root(trans, path, depth_want, trace_ip);
1190 if (unlikely(ret)) {
1191 if (ret == 1) {
1192 /*
1193 * No nodes at this level - got to the end of
1194 * the btree:
1195 */
1196 ret = 0;
1197 goto out;
1198 }
1199
1200 __bch2_btree_path_unlock(trans, path);
1201 path->level = depth_want;
1202 path->l[path->level].b = ERR_PTR(ret);
1203 goto out;
1204 }
1205 }
1206
1207 if (unlikely(max_level > path->level)) {
1208 struct btree_path *linked;
1209 unsigned iter;
1210
1211 trans_for_each_path_with_node(trans, path_l(path)->b, linked, iter)
1212 for (unsigned j = path->level + 1; j < max_level; j++)
1213 linked->l[j] = path->l[j];
1214 }
1215
1216 out_uptodate:
1217 path->uptodate = BTREE_ITER_UPTODATE;
1218 trace_btree_path_traverse_end(trans, path);
1219 out:
1220 if (bch2_err_matches(ret, BCH_ERR_transaction_restart) != !!trans->restarted)
1221 panic("ret %s (%i) trans->restarted %s (%i)\n",
1222 bch2_err_str(ret), ret,
1223 bch2_err_str(trans->restarted), trans->restarted);
1224 bch2_btree_path_verify(trans, path);
1225 return ret;
1226 }
1227
btree_path_copy(struct btree_trans * trans,struct btree_path * dst,struct btree_path * src)1228 static inline void btree_path_copy(struct btree_trans *trans, struct btree_path *dst,
1229 struct btree_path *src)
1230 {
1231 unsigned i, offset = offsetof(struct btree_path, pos);
1232
1233 memcpy((void *) dst + offset,
1234 (void *) src + offset,
1235 sizeof(struct btree_path) - offset);
1236
1237 for (i = 0; i < BTREE_MAX_DEPTH; i++) {
1238 unsigned t = btree_node_locked_type(dst, i);
1239
1240 if (t != BTREE_NODE_UNLOCKED)
1241 six_lock_increment(&dst->l[i].b->c.lock, t);
1242 }
1243 }
1244
btree_path_clone(struct btree_trans * trans,btree_path_idx_t src,bool intent,unsigned long ip)1245 static btree_path_idx_t btree_path_clone(struct btree_trans *trans, btree_path_idx_t src,
1246 bool intent, unsigned long ip)
1247 {
1248 btree_path_idx_t new = btree_path_alloc(trans, src);
1249 btree_path_copy(trans, trans->paths + new, trans->paths + src);
1250 __btree_path_get(trans, trans->paths + new, intent);
1251 #ifdef TRACK_PATH_ALLOCATED
1252 trans->paths[new].ip_allocated = ip;
1253 #endif
1254 return new;
1255 }
1256
1257 __flatten
__bch2_btree_path_make_mut(struct btree_trans * trans,btree_path_idx_t path,bool intent,unsigned long ip)1258 btree_path_idx_t __bch2_btree_path_make_mut(struct btree_trans *trans,
1259 btree_path_idx_t path, bool intent, unsigned long ip)
1260 {
1261 struct btree_path *old = trans->paths + path;
1262 __btree_path_put(trans, trans->paths + path, intent);
1263 path = btree_path_clone(trans, path, intent, ip);
1264 trace_btree_path_clone(trans, old, trans->paths + path);
1265 trans->paths[path].preserve = false;
1266 return path;
1267 }
1268
1269 btree_path_idx_t __must_check
__bch2_btree_path_set_pos(struct btree_trans * trans,btree_path_idx_t path_idx,struct bpos new_pos,bool intent,unsigned long ip)1270 __bch2_btree_path_set_pos(struct btree_trans *trans,
1271 btree_path_idx_t path_idx, struct bpos new_pos,
1272 bool intent, unsigned long ip)
1273 {
1274 int cmp = bpos_cmp(new_pos, trans->paths[path_idx].pos);
1275
1276 bch2_trans_verify_not_unlocked_or_in_restart(trans);
1277 EBUG_ON(!trans->paths[path_idx].ref);
1278
1279 trace_btree_path_set_pos(trans, trans->paths + path_idx, &new_pos);
1280
1281 path_idx = bch2_btree_path_make_mut(trans, path_idx, intent, ip);
1282
1283 struct btree_path *path = trans->paths + path_idx;
1284 path->pos = new_pos;
1285 trans->paths_sorted = false;
1286
1287 if (unlikely(path->cached)) {
1288 btree_node_unlock(trans, path, 0);
1289 path->l[0].b = ERR_PTR(-BCH_ERR_no_btree_node_up);
1290 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1291 goto out;
1292 }
1293
1294 unsigned level = btree_path_up_until_good_node(trans, path, cmp);
1295
1296 if (btree_path_node(path, level)) {
1297 struct btree_path_level *l = &path->l[level];
1298
1299 BUG_ON(!btree_node_locked(path, level));
1300 /*
1301 * We might have to skip over many keys, or just a few: try
1302 * advancing the node iterator, and if we have to skip over too
1303 * many keys just reinit it (or if we're rewinding, since that
1304 * is expensive).
1305 */
1306 if (cmp < 0 ||
1307 !btree_path_advance_to_pos(path, l, 8))
1308 bch2_btree_node_iter_init(&l->iter, l->b, &path->pos);
1309
1310 /*
1311 * Iterators to interior nodes should always be pointed at the first non
1312 * whiteout:
1313 */
1314 if (unlikely(level))
1315 bch2_btree_node_iter_peek(&l->iter, l->b);
1316 }
1317
1318 if (unlikely(level != path->level)) {
1319 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1320 __bch2_btree_path_unlock(trans, path);
1321 }
1322 out:
1323 bch2_btree_path_verify(trans, path);
1324 return path_idx;
1325 }
1326
1327 /* Btree path: main interface: */
1328
have_path_at_pos(struct btree_trans * trans,struct btree_path * path)1329 static struct btree_path *have_path_at_pos(struct btree_trans *trans, struct btree_path *path)
1330 {
1331 struct btree_path *sib;
1332
1333 sib = prev_btree_path(trans, path);
1334 if (sib && !btree_path_cmp(sib, path))
1335 return sib;
1336
1337 sib = next_btree_path(trans, path);
1338 if (sib && !btree_path_cmp(sib, path))
1339 return sib;
1340
1341 return NULL;
1342 }
1343
have_node_at_pos(struct btree_trans * trans,struct btree_path * path)1344 static struct btree_path *have_node_at_pos(struct btree_trans *trans, struct btree_path *path)
1345 {
1346 struct btree_path *sib;
1347
1348 sib = prev_btree_path(trans, path);
1349 if (sib && sib->level == path->level && path_l(sib)->b == path_l(path)->b)
1350 return sib;
1351
1352 sib = next_btree_path(trans, path);
1353 if (sib && sib->level == path->level && path_l(sib)->b == path_l(path)->b)
1354 return sib;
1355
1356 return NULL;
1357 }
1358
__bch2_path_free(struct btree_trans * trans,btree_path_idx_t path)1359 static inline void __bch2_path_free(struct btree_trans *trans, btree_path_idx_t path)
1360 {
1361 __bch2_btree_path_unlock(trans, trans->paths + path);
1362 btree_path_list_remove(trans, trans->paths + path);
1363 __clear_bit(path, trans->paths_allocated);
1364 }
1365
bch2_btree_path_can_relock(struct btree_trans * trans,struct btree_path * path)1366 static bool bch2_btree_path_can_relock(struct btree_trans *trans, struct btree_path *path)
1367 {
1368 unsigned l = path->level;
1369
1370 do {
1371 if (!btree_path_node(path, l))
1372 break;
1373
1374 if (!is_btree_node(path, l))
1375 return false;
1376
1377 if (path->l[l].lock_seq != path->l[l].b->c.lock.seq)
1378 return false;
1379
1380 l++;
1381 } while (l < path->locks_want);
1382
1383 return true;
1384 }
1385
bch2_path_put(struct btree_trans * trans,btree_path_idx_t path_idx,bool intent)1386 void bch2_path_put(struct btree_trans *trans, btree_path_idx_t path_idx, bool intent)
1387 {
1388 struct btree_path *path = trans->paths + path_idx, *dup;
1389
1390 if (!__btree_path_put(trans, path, intent))
1391 return;
1392
1393 dup = path->preserve
1394 ? have_path_at_pos(trans, path)
1395 : have_node_at_pos(trans, path);
1396
1397 trace_btree_path_free(trans, path_idx, dup);
1398
1399 if (!dup && !(!path->preserve && !is_btree_node(path, path->level)))
1400 return;
1401
1402 if (path->should_be_locked && !trans->restarted) {
1403 if (!dup)
1404 return;
1405
1406 if (!(trans->locked
1407 ? bch2_btree_path_relock_norestart(trans, dup)
1408 : bch2_btree_path_can_relock(trans, dup)))
1409 return;
1410 }
1411
1412 if (dup) {
1413 dup->preserve |= path->preserve;
1414 dup->should_be_locked |= path->should_be_locked;
1415 }
1416
1417 __bch2_path_free(trans, path_idx);
1418 }
1419
bch2_path_put_nokeep(struct btree_trans * trans,btree_path_idx_t path,bool intent)1420 static void bch2_path_put_nokeep(struct btree_trans *trans, btree_path_idx_t path,
1421 bool intent)
1422 {
1423 if (!__btree_path_put(trans, trans->paths + path, intent))
1424 return;
1425
1426 __bch2_path_free(trans, path);
1427 }
1428
bch2_trans_restart_error(struct btree_trans * trans,u32 restart_count)1429 void __noreturn bch2_trans_restart_error(struct btree_trans *trans, u32 restart_count)
1430 {
1431 panic("trans->restart_count %u, should be %u, last restarted by %pS\n",
1432 trans->restart_count, restart_count,
1433 (void *) trans->last_begin_ip);
1434 }
1435
bch2_trans_in_restart_error(struct btree_trans * trans)1436 static void __noreturn bch2_trans_in_restart_error(struct btree_trans *trans)
1437 {
1438 #ifdef CONFIG_BCACHEFS_DEBUG
1439 struct printbuf buf = PRINTBUF;
1440 bch2_prt_backtrace(&buf, &trans->last_restarted_trace);
1441 panic("in transaction restart: %s, last restarted by\n%s",
1442 bch2_err_str(trans->restarted),
1443 buf.buf);
1444 #else
1445 panic("in transaction restart: %s, last restarted by %pS\n",
1446 bch2_err_str(trans->restarted),
1447 (void *) trans->last_restarted_ip);
1448 #endif
1449 }
1450
bch2_trans_unlocked_or_in_restart_error(struct btree_trans * trans)1451 void __noreturn bch2_trans_unlocked_or_in_restart_error(struct btree_trans *trans)
1452 {
1453 if (trans->restarted)
1454 bch2_trans_in_restart_error(trans);
1455
1456 if (!trans->locked)
1457 panic("trans should be locked, unlocked by %pS\n",
1458 (void *) trans->last_unlock_ip);
1459
1460 BUG();
1461 }
1462
1463 noinline __cold
bch2_trans_updates_to_text(struct printbuf * buf,struct btree_trans * trans)1464 void bch2_trans_updates_to_text(struct printbuf *buf, struct btree_trans *trans)
1465 {
1466 prt_printf(buf, "%u transaction updates for %s journal seq %llu\n",
1467 trans->nr_updates, trans->fn, trans->journal_res.seq);
1468 printbuf_indent_add(buf, 2);
1469
1470 trans_for_each_update(trans, i) {
1471 struct bkey_s_c old = { &i->old_k, i->old_v };
1472
1473 prt_str(buf, "update: btree=");
1474 bch2_btree_id_to_text(buf, i->btree_id);
1475 prt_printf(buf, " cached=%u %pS\n",
1476 i->cached,
1477 (void *) i->ip_allocated);
1478
1479 prt_printf(buf, " old ");
1480 bch2_bkey_val_to_text(buf, trans->c, old);
1481 prt_newline(buf);
1482
1483 prt_printf(buf, " new ");
1484 bch2_bkey_val_to_text(buf, trans->c, bkey_i_to_s_c(i->k));
1485 prt_newline(buf);
1486 }
1487
1488 for (struct jset_entry *e = trans->journal_entries;
1489 e != btree_trans_journal_entries_top(trans);
1490 e = vstruct_next(e)) {
1491 bch2_journal_entry_to_text(buf, trans->c, e);
1492 prt_newline(buf);
1493 }
1494
1495 printbuf_indent_sub(buf, 2);
1496 }
1497
bch2_btree_path_to_text_short(struct printbuf * out,struct btree_trans * trans,btree_path_idx_t path_idx)1498 static void bch2_btree_path_to_text_short(struct printbuf *out, struct btree_trans *trans, btree_path_idx_t path_idx)
1499 {
1500 struct btree_path *path = trans->paths + path_idx;
1501
1502 prt_printf(out, "path: idx %3u ref %u:%u %c %c %c ",
1503 path_idx, path->ref, path->intent_ref,
1504 path->preserve ? 'P' : ' ',
1505 path->should_be_locked ? 'S' : ' ',
1506 path->cached ? 'C' : 'B');
1507 bch2_btree_id_level_to_text(out, path->btree_id, path->level);
1508 prt_str(out, " pos ");
1509 bch2_bpos_to_text(out, path->pos);
1510
1511 if (!path->cached && btree_node_locked(path, path->level)) {
1512 prt_char(out, ' ');
1513 struct btree *b = path_l(path)->b;
1514 bch2_bpos_to_text(out, b->data->min_key);
1515 prt_char(out, '-');
1516 bch2_bpos_to_text(out, b->key.k.p);
1517 }
1518
1519 #ifdef TRACK_PATH_ALLOCATED
1520 prt_printf(out, " %pS", (void *) path->ip_allocated);
1521 #endif
1522 }
1523
btree_node_locked_str(enum btree_node_locked_type t)1524 static const char *btree_node_locked_str(enum btree_node_locked_type t)
1525 {
1526 switch (t) {
1527 case BTREE_NODE_UNLOCKED:
1528 return "unlocked";
1529 case BTREE_NODE_READ_LOCKED:
1530 return "read";
1531 case BTREE_NODE_INTENT_LOCKED:
1532 return "intent";
1533 case BTREE_NODE_WRITE_LOCKED:
1534 return "write";
1535 default:
1536 return NULL;
1537 }
1538 }
1539
bch2_btree_path_to_text(struct printbuf * out,struct btree_trans * trans,btree_path_idx_t path_idx)1540 void bch2_btree_path_to_text(struct printbuf *out, struct btree_trans *trans, btree_path_idx_t path_idx)
1541 {
1542 bch2_btree_path_to_text_short(out, trans, path_idx);
1543
1544 struct btree_path *path = trans->paths + path_idx;
1545
1546 prt_printf(out, " uptodate %u locks_want %u", path->uptodate, path->locks_want);
1547 prt_newline(out);
1548
1549 printbuf_indent_add(out, 2);
1550 for (unsigned l = 0; l < BTREE_MAX_DEPTH; l++) {
1551 prt_printf(out, "l=%u locks %s seq %u node ", l,
1552 btree_node_locked_str(btree_node_locked_type(path, l)),
1553 path->l[l].lock_seq);
1554
1555 int ret = PTR_ERR_OR_ZERO(path->l[l].b);
1556 if (ret)
1557 prt_str(out, bch2_err_str(ret));
1558 else
1559 prt_printf(out, "%px", path->l[l].b);
1560 prt_newline(out);
1561 }
1562 printbuf_indent_sub(out, 2);
1563 }
1564
1565 static noinline __cold
__bch2_trans_paths_to_text(struct printbuf * out,struct btree_trans * trans,bool nosort)1566 void __bch2_trans_paths_to_text(struct printbuf *out, struct btree_trans *trans,
1567 bool nosort)
1568 {
1569 struct trans_for_each_path_inorder_iter iter;
1570
1571 if (!nosort)
1572 btree_trans_sort_paths(trans);
1573
1574 trans_for_each_path_idx_inorder(trans, iter) {
1575 bch2_btree_path_to_text_short(out, trans, iter.path_idx);
1576 prt_newline(out);
1577 }
1578 }
1579
1580 noinline __cold
bch2_trans_paths_to_text(struct printbuf * out,struct btree_trans * trans)1581 void bch2_trans_paths_to_text(struct printbuf *out, struct btree_trans *trans)
1582 {
1583 __bch2_trans_paths_to_text(out, trans, false);
1584 }
1585
1586 static noinline __cold
__bch2_dump_trans_paths_updates(struct btree_trans * trans,bool nosort)1587 void __bch2_dump_trans_paths_updates(struct btree_trans *trans, bool nosort)
1588 {
1589 struct printbuf buf = PRINTBUF;
1590
1591 __bch2_trans_paths_to_text(&buf, trans, nosort);
1592 bch2_trans_updates_to_text(&buf, trans);
1593
1594 bch2_print_str(trans->c, buf.buf);
1595 printbuf_exit(&buf);
1596 }
1597
1598 noinline __cold
bch2_dump_trans_paths_updates(struct btree_trans * trans)1599 void bch2_dump_trans_paths_updates(struct btree_trans *trans)
1600 {
1601 __bch2_dump_trans_paths_updates(trans, false);
1602 }
1603
1604 noinline __cold
bch2_trans_update_max_paths(struct btree_trans * trans)1605 static void bch2_trans_update_max_paths(struct btree_trans *trans)
1606 {
1607 struct btree_transaction_stats *s = btree_trans_stats(trans);
1608 struct printbuf buf = PRINTBUF;
1609 size_t nr = bitmap_weight(trans->paths_allocated, trans->nr_paths);
1610
1611 bch2_trans_paths_to_text(&buf, trans);
1612
1613 if (!buf.allocation_failure) {
1614 mutex_lock(&s->lock);
1615 if (nr > s->nr_max_paths) {
1616 s->nr_max_paths = nr;
1617 swap(s->max_paths_text, buf.buf);
1618 }
1619 mutex_unlock(&s->lock);
1620 }
1621
1622 printbuf_exit(&buf);
1623
1624 trans->nr_paths_max = nr;
1625 }
1626
1627 noinline __cold
__bch2_btree_trans_too_many_iters(struct btree_trans * trans)1628 int __bch2_btree_trans_too_many_iters(struct btree_trans *trans)
1629 {
1630 if (trace_trans_restart_too_many_iters_enabled()) {
1631 struct printbuf buf = PRINTBUF;
1632
1633 bch2_trans_paths_to_text(&buf, trans);
1634 trace_trans_restart_too_many_iters(trans, _THIS_IP_, buf.buf);
1635 printbuf_exit(&buf);
1636 }
1637
1638 count_event(trans->c, trans_restart_too_many_iters);
1639
1640 return btree_trans_restart(trans, BCH_ERR_transaction_restart_too_many_iters);
1641 }
1642
btree_path_overflow(struct btree_trans * trans)1643 static noinline void btree_path_overflow(struct btree_trans *trans)
1644 {
1645 bch2_dump_trans_paths_updates(trans);
1646 bch_err(trans->c, "trans path overflow");
1647 }
1648
btree_paths_realloc(struct btree_trans * trans)1649 static noinline void btree_paths_realloc(struct btree_trans *trans)
1650 {
1651 unsigned nr = trans->nr_paths * 2;
1652
1653 void *p = kvzalloc(BITS_TO_LONGS(nr) * sizeof(unsigned long) +
1654 sizeof(struct btree_trans_paths) +
1655 nr * sizeof(struct btree_path) +
1656 nr * sizeof(btree_path_idx_t) + 8 +
1657 nr * sizeof(struct btree_insert_entry), GFP_KERNEL|__GFP_NOFAIL);
1658
1659 unsigned long *paths_allocated = p;
1660 memcpy(paths_allocated, trans->paths_allocated, BITS_TO_LONGS(trans->nr_paths) * sizeof(unsigned long));
1661 p += BITS_TO_LONGS(nr) * sizeof(unsigned long);
1662
1663 p += sizeof(struct btree_trans_paths);
1664 struct btree_path *paths = p;
1665 *trans_paths_nr(paths) = nr;
1666 memcpy(paths, trans->paths, trans->nr_paths * sizeof(struct btree_path));
1667 p += nr * sizeof(struct btree_path);
1668
1669 btree_path_idx_t *sorted = p;
1670 memcpy(sorted, trans->sorted, trans->nr_sorted * sizeof(btree_path_idx_t));
1671 p += nr * sizeof(btree_path_idx_t) + 8;
1672
1673 struct btree_insert_entry *updates = p;
1674 memcpy(updates, trans->updates, trans->nr_paths * sizeof(struct btree_insert_entry));
1675
1676 unsigned long *old = trans->paths_allocated;
1677
1678 rcu_assign_pointer(trans->paths_allocated, paths_allocated);
1679 rcu_assign_pointer(trans->paths, paths);
1680 rcu_assign_pointer(trans->sorted, sorted);
1681 rcu_assign_pointer(trans->updates, updates);
1682
1683 trans->nr_paths = nr;
1684
1685 if (old != trans->_paths_allocated)
1686 kfree_rcu_mightsleep(old);
1687 }
1688
btree_path_alloc(struct btree_trans * trans,btree_path_idx_t pos)1689 static inline btree_path_idx_t btree_path_alloc(struct btree_trans *trans,
1690 btree_path_idx_t pos)
1691 {
1692 btree_path_idx_t idx = find_first_zero_bit(trans->paths_allocated, trans->nr_paths);
1693
1694 if (unlikely(idx == trans->nr_paths)) {
1695 if (trans->nr_paths == BTREE_ITER_MAX) {
1696 btree_path_overflow(trans);
1697 return 0;
1698 }
1699
1700 btree_paths_realloc(trans);
1701 }
1702
1703 /*
1704 * Do this before marking the new path as allocated, since it won't be
1705 * initialized yet:
1706 */
1707 if (unlikely(idx > trans->nr_paths_max))
1708 bch2_trans_update_max_paths(trans);
1709
1710 __set_bit(idx, trans->paths_allocated);
1711
1712 struct btree_path *path = &trans->paths[idx];
1713 path->ref = 0;
1714 path->intent_ref = 0;
1715 path->nodes_locked = 0;
1716
1717 btree_path_list_add(trans, pos, idx);
1718 trans->paths_sorted = false;
1719 return idx;
1720 }
1721
bch2_path_get(struct btree_trans * trans,enum btree_id btree_id,struct bpos pos,unsigned locks_want,unsigned level,unsigned flags,unsigned long ip)1722 btree_path_idx_t bch2_path_get(struct btree_trans *trans,
1723 enum btree_id btree_id, struct bpos pos,
1724 unsigned locks_want, unsigned level,
1725 unsigned flags, unsigned long ip)
1726 {
1727 struct btree_path *path;
1728 bool cached = flags & BTREE_ITER_cached;
1729 bool intent = flags & BTREE_ITER_intent;
1730 struct trans_for_each_path_inorder_iter iter;
1731 btree_path_idx_t path_pos = 0, path_idx;
1732
1733 bch2_trans_verify_not_unlocked_or_in_restart(trans);
1734 bch2_trans_verify_locks(trans);
1735
1736 btree_trans_sort_paths(trans);
1737
1738 trans_for_each_path_inorder(trans, path, iter) {
1739 if (__btree_path_cmp(path,
1740 btree_id,
1741 cached,
1742 pos,
1743 level) > 0)
1744 break;
1745
1746 path_pos = iter.path_idx;
1747 }
1748
1749 if (path_pos &&
1750 trans->paths[path_pos].cached == cached &&
1751 trans->paths[path_pos].btree_id == btree_id &&
1752 trans->paths[path_pos].level == level) {
1753 trace_btree_path_get(trans, trans->paths + path_pos, &pos);
1754
1755 __btree_path_get(trans, trans->paths + path_pos, intent);
1756 path_idx = bch2_btree_path_set_pos(trans, path_pos, pos, intent, ip);
1757 path = trans->paths + path_idx;
1758 } else {
1759 path_idx = btree_path_alloc(trans, path_pos);
1760 path = trans->paths + path_idx;
1761
1762 __btree_path_get(trans, path, intent);
1763 path->pos = pos;
1764 path->btree_id = btree_id;
1765 path->cached = cached;
1766 path->uptodate = BTREE_ITER_NEED_TRAVERSE;
1767 path->should_be_locked = false;
1768 path->level = level;
1769 path->locks_want = locks_want;
1770 path->nodes_locked = 0;
1771 for (unsigned i = 0; i < ARRAY_SIZE(path->l); i++)
1772 path->l[i].b = ERR_PTR(-BCH_ERR_no_btree_node_init);
1773 #ifdef TRACK_PATH_ALLOCATED
1774 path->ip_allocated = ip;
1775 #endif
1776 trans->paths_sorted = false;
1777
1778 trace_btree_path_alloc(trans, path);
1779 }
1780
1781 if (!(flags & BTREE_ITER_nopreserve))
1782 path->preserve = true;
1783
1784 if (path->intent_ref)
1785 locks_want = max(locks_want, level + 1);
1786
1787 /*
1788 * If the path has locks_want greater than requested, we don't downgrade
1789 * it here - on transaction restart because btree node split needs to
1790 * upgrade locks, we might be putting/getting the iterator again.
1791 * Downgrading iterators only happens via bch2_trans_downgrade(), after
1792 * a successful transaction commit.
1793 */
1794
1795 locks_want = min(locks_want, BTREE_MAX_DEPTH);
1796 if (locks_want > path->locks_want)
1797 bch2_btree_path_upgrade_noupgrade_sibs(trans, path, locks_want, NULL);
1798
1799 return path_idx;
1800 }
1801
bch2_path_get_unlocked_mut(struct btree_trans * trans,enum btree_id btree_id,unsigned level,struct bpos pos)1802 btree_path_idx_t bch2_path_get_unlocked_mut(struct btree_trans *trans,
1803 enum btree_id btree_id,
1804 unsigned level,
1805 struct bpos pos)
1806 {
1807 btree_path_idx_t path_idx = bch2_path_get(trans, btree_id, pos, level + 1, level,
1808 BTREE_ITER_nopreserve|
1809 BTREE_ITER_intent, _RET_IP_);
1810 path_idx = bch2_btree_path_make_mut(trans, path_idx, true, _RET_IP_);
1811
1812 struct btree_path *path = trans->paths + path_idx;
1813 bch2_btree_path_downgrade(trans, path);
1814 __bch2_btree_path_unlock(trans, path);
1815 return path_idx;
1816 }
1817
bch2_btree_path_peek_slot(struct btree_path * path,struct bkey * u)1818 struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *path, struct bkey *u)
1819 {
1820
1821 struct btree_path_level *l = path_l(path);
1822 struct bkey_packed *_k;
1823 struct bkey_s_c k;
1824
1825 if (unlikely(!l->b))
1826 return bkey_s_c_null;
1827
1828 EBUG_ON(path->uptodate != BTREE_ITER_UPTODATE);
1829 EBUG_ON(!btree_node_locked(path, path->level));
1830
1831 if (!path->cached) {
1832 _k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
1833 k = _k ? bkey_disassemble(l->b, _k, u) : bkey_s_c_null;
1834
1835 EBUG_ON(k.k && bkey_deleted(k.k) && bpos_eq(k.k->p, path->pos));
1836
1837 if (!k.k || !bpos_eq(path->pos, k.k->p))
1838 goto hole;
1839 } else {
1840 struct bkey_cached *ck = (void *) path->l[0].b;
1841 if (!ck)
1842 return bkey_s_c_null;
1843
1844 EBUG_ON(path->btree_id != ck->key.btree_id ||
1845 !bkey_eq(path->pos, ck->key.pos));
1846
1847 *u = ck->k->k;
1848 k = (struct bkey_s_c) { u, &ck->k->v };
1849 }
1850
1851 return k;
1852 hole:
1853 bkey_init(u);
1854 u->p = path->pos;
1855 return (struct bkey_s_c) { u, NULL };
1856 }
1857
bch2_set_btree_iter_dontneed(struct btree_trans * trans,struct btree_iter * iter)1858 void bch2_set_btree_iter_dontneed(struct btree_trans *trans, struct btree_iter *iter)
1859 {
1860 if (!iter->path || trans->restarted)
1861 return;
1862
1863 struct btree_path *path = btree_iter_path(trans, iter);
1864 path->preserve = false;
1865 if (path->ref == 1)
1866 path->should_be_locked = false;
1867 }
1868 /* Btree iterators: */
1869
1870 int __must_check
__bch2_btree_iter_traverse(struct btree_trans * trans,struct btree_iter * iter)1871 __bch2_btree_iter_traverse(struct btree_trans *trans, struct btree_iter *iter)
1872 {
1873 return bch2_btree_path_traverse(trans, iter->path, iter->flags);
1874 }
1875
1876 int __must_check
bch2_btree_iter_traverse(struct btree_trans * trans,struct btree_iter * iter)1877 bch2_btree_iter_traverse(struct btree_trans *trans, struct btree_iter *iter)
1878 {
1879 bch2_trans_verify_not_unlocked_or_in_restart(trans);
1880
1881 iter->path = bch2_btree_path_set_pos(trans, iter->path,
1882 btree_iter_search_key(iter),
1883 iter->flags & BTREE_ITER_intent,
1884 btree_iter_ip_allocated(iter));
1885
1886 int ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
1887 if (ret)
1888 return ret;
1889
1890 struct btree_path *path = btree_iter_path(trans, iter);
1891 if (btree_path_node(path, path->level))
1892 btree_path_set_should_be_locked(trans, path);
1893 return 0;
1894 }
1895
1896 /* Iterate across nodes (leaf and interior nodes) */
1897
bch2_btree_iter_peek_node(struct btree_trans * trans,struct btree_iter * iter)1898 struct btree *bch2_btree_iter_peek_node(struct btree_trans *trans,
1899 struct btree_iter *iter)
1900 {
1901 struct btree *b = NULL;
1902 int ret;
1903
1904 EBUG_ON(trans->paths[iter->path].cached);
1905 bch2_btree_iter_verify(trans, iter);
1906
1907 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
1908 if (ret)
1909 goto err;
1910
1911 struct btree_path *path = btree_iter_path(trans, iter);
1912 b = btree_path_node(path, path->level);
1913 if (!b)
1914 goto out;
1915
1916 BUG_ON(bpos_lt(b->key.k.p, iter->pos));
1917
1918 bkey_init(&iter->k);
1919 iter->k.p = iter->pos = b->key.k.p;
1920
1921 iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
1922 iter->flags & BTREE_ITER_intent,
1923 btree_iter_ip_allocated(iter));
1924 btree_path_set_should_be_locked(trans, btree_iter_path(trans, iter));
1925 out:
1926 bch2_btree_iter_verify_entry_exit(iter);
1927 bch2_btree_iter_verify(trans, iter);
1928
1929 return b;
1930 err:
1931 b = ERR_PTR(ret);
1932 goto out;
1933 }
1934
1935 /* Only kept for -tools */
bch2_btree_iter_peek_node_and_restart(struct btree_trans * trans,struct btree_iter * iter)1936 struct btree *bch2_btree_iter_peek_node_and_restart(struct btree_trans *trans,
1937 struct btree_iter *iter)
1938 {
1939 struct btree *b;
1940
1941 while (b = bch2_btree_iter_peek_node(trans, iter),
1942 bch2_err_matches(PTR_ERR_OR_ZERO(b), BCH_ERR_transaction_restart))
1943 bch2_trans_begin(trans);
1944
1945 return b;
1946 }
1947
bch2_btree_iter_next_node(struct btree_trans * trans,struct btree_iter * iter)1948 struct btree *bch2_btree_iter_next_node(struct btree_trans *trans, struct btree_iter *iter)
1949 {
1950 struct btree *b = NULL;
1951 int ret;
1952
1953 EBUG_ON(trans->paths[iter->path].cached);
1954 bch2_trans_verify_not_unlocked_or_in_restart(trans);
1955 bch2_btree_iter_verify(trans, iter);
1956
1957 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
1958 if (ret)
1959 goto err;
1960
1961
1962 struct btree_path *path = btree_iter_path(trans, iter);
1963
1964 /* already at end? */
1965 if (!btree_path_node(path, path->level))
1966 return NULL;
1967
1968 /* got to end? */
1969 if (!btree_path_node(path, path->level + 1)) {
1970 btree_path_set_level_up(trans, path);
1971 return NULL;
1972 }
1973
1974 /*
1975 * We don't correctly handle nodes with extra intent locks here:
1976 * downgrade so we don't violate locking invariants
1977 */
1978 bch2_btree_path_downgrade(trans, path);
1979
1980 if (!bch2_btree_node_relock(trans, path, path->level + 1)) {
1981 __bch2_btree_path_unlock(trans, path);
1982 path->l[path->level].b = ERR_PTR(-BCH_ERR_no_btree_node_relock);
1983 path->l[path->level + 1].b = ERR_PTR(-BCH_ERR_no_btree_node_relock);
1984 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1985 trace_and_count(trans->c, trans_restart_relock_next_node, trans, _THIS_IP_, path);
1986 ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
1987 goto err;
1988 }
1989
1990 b = btree_path_node(path, path->level + 1);
1991
1992 if (bpos_eq(iter->pos, b->key.k.p)) {
1993 __btree_path_set_level_up(trans, path, path->level++);
1994 } else {
1995 if (btree_lock_want(path, path->level + 1) == BTREE_NODE_UNLOCKED)
1996 btree_node_unlock(trans, path, path->level + 1);
1997
1998 /*
1999 * Haven't gotten to the end of the parent node: go back down to
2000 * the next child node
2001 */
2002 iter->path = bch2_btree_path_set_pos(trans, iter->path,
2003 bpos_successor(iter->pos),
2004 iter->flags & BTREE_ITER_intent,
2005 btree_iter_ip_allocated(iter));
2006
2007 path = btree_iter_path(trans, iter);
2008 btree_path_set_level_down(trans, path, iter->min_depth);
2009
2010 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2011 if (ret)
2012 goto err;
2013
2014 path = btree_iter_path(trans, iter);
2015 b = path->l[path->level].b;
2016 }
2017
2018 bkey_init(&iter->k);
2019 iter->k.p = iter->pos = b->key.k.p;
2020
2021 iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
2022 iter->flags & BTREE_ITER_intent,
2023 btree_iter_ip_allocated(iter));
2024 btree_path_set_should_be_locked(trans, btree_iter_path(trans, iter));
2025 EBUG_ON(btree_iter_path(trans, iter)->uptodate);
2026 out:
2027 bch2_btree_iter_verify_entry_exit(iter);
2028 bch2_btree_iter_verify(trans, iter);
2029
2030 return b;
2031 err:
2032 b = ERR_PTR(ret);
2033 goto out;
2034 }
2035
2036 /* Iterate across keys (in leaf nodes only) */
2037
bch2_btree_iter_advance(struct btree_trans * trans,struct btree_iter * iter)2038 inline bool bch2_btree_iter_advance(struct btree_trans *trans, struct btree_iter *iter)
2039 {
2040 struct bpos pos = iter->k.p;
2041 bool ret = !(iter->flags & BTREE_ITER_all_snapshots
2042 ? bpos_eq(pos, SPOS_MAX)
2043 : bkey_eq(pos, SPOS_MAX));
2044
2045 if (ret && !(iter->flags & BTREE_ITER_is_extents))
2046 pos = bkey_successor(iter, pos);
2047 bch2_btree_iter_set_pos(trans, iter, pos);
2048 return ret;
2049 }
2050
bch2_btree_iter_rewind(struct btree_trans * trans,struct btree_iter * iter)2051 inline bool bch2_btree_iter_rewind(struct btree_trans *trans, struct btree_iter *iter)
2052 {
2053 struct bpos pos = bkey_start_pos(&iter->k);
2054 bool ret = !(iter->flags & BTREE_ITER_all_snapshots
2055 ? bpos_eq(pos, POS_MIN)
2056 : bkey_eq(pos, POS_MIN));
2057
2058 if (ret && !(iter->flags & BTREE_ITER_is_extents))
2059 pos = bkey_predecessor(iter, pos);
2060 bch2_btree_iter_set_pos(trans, iter, pos);
2061 return ret;
2062 }
2063
2064 static noinline
bch2_btree_trans_peek_prev_updates(struct btree_trans * trans,struct btree_iter * iter,struct bkey_s_c * k)2065 void bch2_btree_trans_peek_prev_updates(struct btree_trans *trans, struct btree_iter *iter,
2066 struct bkey_s_c *k)
2067 {
2068 struct bpos end = path_l(btree_iter_path(trans, iter))->b->data->min_key;
2069
2070 trans_for_each_update(trans, i)
2071 if (!i->key_cache_already_flushed &&
2072 i->btree_id == iter->btree_id &&
2073 bpos_le(i->k->k.p, iter->pos) &&
2074 bpos_ge(i->k->k.p, k->k ? k->k->p : end)) {
2075 iter->k = i->k->k;
2076 *k = bkey_i_to_s_c(i->k);
2077 }
2078 }
2079
2080 static noinline
bch2_btree_trans_peek_updates(struct btree_trans * trans,struct btree_iter * iter,struct bkey_s_c * k)2081 void bch2_btree_trans_peek_updates(struct btree_trans *trans, struct btree_iter *iter,
2082 struct bkey_s_c *k)
2083 {
2084 struct btree_path *path = btree_iter_path(trans, iter);
2085 struct bpos end = path_l(path)->b->key.k.p;
2086
2087 trans_for_each_update(trans, i)
2088 if (!i->key_cache_already_flushed &&
2089 i->btree_id == iter->btree_id &&
2090 bpos_ge(i->k->k.p, path->pos) &&
2091 bpos_le(i->k->k.p, k->k ? k->k->p : end)) {
2092 iter->k = i->k->k;
2093 *k = bkey_i_to_s_c(i->k);
2094 }
2095 }
2096
2097 static noinline
bch2_btree_trans_peek_slot_updates(struct btree_trans * trans,struct btree_iter * iter,struct bkey_s_c * k)2098 void bch2_btree_trans_peek_slot_updates(struct btree_trans *trans, struct btree_iter *iter,
2099 struct bkey_s_c *k)
2100 {
2101 trans_for_each_update(trans, i)
2102 if (!i->key_cache_already_flushed &&
2103 i->btree_id == iter->btree_id &&
2104 bpos_eq(i->k->k.p, iter->pos)) {
2105 iter->k = i->k->k;
2106 *k = bkey_i_to_s_c(i->k);
2107 }
2108 }
2109
bch2_btree_journal_peek(struct btree_trans * trans,struct btree_iter * iter,struct bpos end_pos)2110 static struct bkey_i *bch2_btree_journal_peek(struct btree_trans *trans,
2111 struct btree_iter *iter,
2112 struct bpos end_pos)
2113 {
2114 struct btree_path *path = btree_iter_path(trans, iter);
2115
2116 return bch2_journal_keys_peek_max(trans->c, iter->btree_id,
2117 path->level,
2118 path->pos,
2119 end_pos,
2120 &iter->journal_idx);
2121 }
2122
2123 static noinline
btree_trans_peek_slot_journal(struct btree_trans * trans,struct btree_iter * iter)2124 struct bkey_s_c btree_trans_peek_slot_journal(struct btree_trans *trans,
2125 struct btree_iter *iter)
2126 {
2127 struct btree_path *path = btree_iter_path(trans, iter);
2128 struct bkey_i *k = bch2_btree_journal_peek(trans, iter, path->pos);
2129
2130 if (k) {
2131 iter->k = k->k;
2132 return bkey_i_to_s_c(k);
2133 } else {
2134 return bkey_s_c_null;
2135 }
2136 }
2137
2138 static noinline
btree_trans_peek_journal(struct btree_trans * trans,struct btree_iter * iter,struct bkey_s_c * k)2139 void btree_trans_peek_journal(struct btree_trans *trans,
2140 struct btree_iter *iter,
2141 struct bkey_s_c *k)
2142 {
2143 struct btree_path *path = btree_iter_path(trans, iter);
2144 struct bkey_i *next_journal =
2145 bch2_btree_journal_peek(trans, iter,
2146 k->k ? k->k->p : path_l(path)->b->key.k.p);
2147 if (next_journal) {
2148 iter->k = next_journal->k;
2149 *k = bkey_i_to_s_c(next_journal);
2150 }
2151 }
2152
bch2_btree_journal_peek_prev(struct btree_trans * trans,struct btree_iter * iter,struct bpos end_pos)2153 static struct bkey_i *bch2_btree_journal_peek_prev(struct btree_trans *trans,
2154 struct btree_iter *iter,
2155 struct bpos end_pos)
2156 {
2157 struct btree_path *path = btree_iter_path(trans, iter);
2158
2159 return bch2_journal_keys_peek_prev_min(trans->c, iter->btree_id,
2160 path->level,
2161 path->pos,
2162 end_pos,
2163 &iter->journal_idx);
2164 }
2165
2166 static noinline
btree_trans_peek_prev_journal(struct btree_trans * trans,struct btree_iter * iter,struct bkey_s_c * k)2167 void btree_trans_peek_prev_journal(struct btree_trans *trans,
2168 struct btree_iter *iter,
2169 struct bkey_s_c *k)
2170 {
2171 struct btree_path *path = btree_iter_path(trans, iter);
2172 struct bkey_i *next_journal =
2173 bch2_btree_journal_peek_prev(trans, iter,
2174 k->k ? k->k->p : path_l(path)->b->key.k.p);
2175
2176 if (next_journal) {
2177 iter->k = next_journal->k;
2178 *k = bkey_i_to_s_c(next_journal);
2179 }
2180 }
2181
2182 /*
2183 * Checks btree key cache for key at iter->pos and returns it if present, or
2184 * bkey_s_c_null:
2185 */
2186 static noinline
btree_trans_peek_key_cache(struct btree_trans * trans,struct btree_iter * iter,struct bpos pos)2187 struct bkey_s_c btree_trans_peek_key_cache(struct btree_trans *trans, struct btree_iter *iter,
2188 struct bpos pos)
2189 {
2190 struct bch_fs *c = trans->c;
2191 struct bkey u;
2192 struct bkey_s_c k;
2193 int ret;
2194
2195 bch2_trans_verify_not_unlocked_or_in_restart(trans);
2196
2197 if ((iter->flags & BTREE_ITER_key_cache_fill) &&
2198 bpos_eq(iter->pos, pos))
2199 return bkey_s_c_null;
2200
2201 if (!bch2_btree_key_cache_find(c, iter->btree_id, pos))
2202 return bkey_s_c_null;
2203
2204 if (!iter->key_cache_path)
2205 iter->key_cache_path = bch2_path_get(trans, iter->btree_id, pos,
2206 iter->flags & BTREE_ITER_intent, 0,
2207 iter->flags|BTREE_ITER_cached|
2208 BTREE_ITER_cached_nofill,
2209 _THIS_IP_);
2210
2211 iter->key_cache_path = bch2_btree_path_set_pos(trans, iter->key_cache_path, pos,
2212 iter->flags & BTREE_ITER_intent,
2213 btree_iter_ip_allocated(iter));
2214
2215 ret = bch2_btree_path_traverse(trans, iter->key_cache_path,
2216 iter->flags|BTREE_ITER_cached) ?:
2217 bch2_btree_path_relock(trans, btree_iter_path(trans, iter), _THIS_IP_);
2218 if (unlikely(ret))
2219 return bkey_s_c_err(ret);
2220
2221 k = bch2_btree_path_peek_slot(trans->paths + iter->key_cache_path, &u);
2222 if (!k.k)
2223 return k;
2224
2225 if ((iter->flags & BTREE_ITER_all_snapshots) &&
2226 !bpos_eq(pos, k.k->p))
2227 return bkey_s_c_null;
2228
2229 iter->k = u;
2230 k.k = &iter->k;
2231 btree_path_set_should_be_locked(trans, trans->paths + iter->key_cache_path);
2232 return k;
2233 }
2234
__bch2_btree_iter_peek(struct btree_trans * trans,struct btree_iter * iter,struct bpos search_key)2235 static struct bkey_s_c __bch2_btree_iter_peek(struct btree_trans *trans, struct btree_iter *iter,
2236 struct bpos search_key)
2237 {
2238 struct bkey_s_c k, k2;
2239 int ret;
2240
2241 EBUG_ON(btree_iter_path(trans, iter)->cached);
2242 bch2_btree_iter_verify(trans, iter);
2243
2244 while (1) {
2245 iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
2246 iter->flags & BTREE_ITER_intent,
2247 btree_iter_ip_allocated(iter));
2248
2249 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2250 if (unlikely(ret)) {
2251 /* ensure that iter->k is consistent with iter->pos: */
2252 bch2_btree_iter_set_pos(trans, iter, iter->pos);
2253 k = bkey_s_c_err(ret);
2254 break;
2255 }
2256
2257 struct btree_path *path = btree_iter_path(trans, iter);
2258 struct btree_path_level *l = path_l(path);
2259
2260 if (unlikely(!l->b)) {
2261 /* No btree nodes at requested level: */
2262 bch2_btree_iter_set_pos(trans, iter, SPOS_MAX);
2263 k = bkey_s_c_null;
2264 break;
2265 }
2266
2267 btree_path_set_should_be_locked(trans, path);
2268
2269 k = btree_path_level_peek_all(trans->c, l, &iter->k);
2270
2271 if (unlikely(iter->flags & BTREE_ITER_with_key_cache) &&
2272 k.k &&
2273 (k2 = btree_trans_peek_key_cache(trans, iter, k.k->p)).k) {
2274 k = k2;
2275 if (bkey_err(k)) {
2276 bch2_btree_iter_set_pos(trans, iter, iter->pos);
2277 break;
2278 }
2279 }
2280
2281 if (unlikely(iter->flags & BTREE_ITER_with_journal))
2282 btree_trans_peek_journal(trans, iter, &k);
2283
2284 if (unlikely((iter->flags & BTREE_ITER_with_updates) &&
2285 trans->nr_updates))
2286 bch2_btree_trans_peek_updates(trans, iter, &k);
2287
2288 if (k.k && bkey_deleted(k.k)) {
2289 /*
2290 * If we've got a whiteout, and it's after the search
2291 * key, advance the search key to the whiteout instead
2292 * of just after the whiteout - it might be a btree
2293 * whiteout, with a real key at the same position, since
2294 * in the btree deleted keys sort before non deleted.
2295 */
2296 search_key = !bpos_eq(search_key, k.k->p)
2297 ? k.k->p
2298 : bpos_successor(k.k->p);
2299 continue;
2300 }
2301
2302 if (likely(k.k)) {
2303 break;
2304 } else if (likely(!bpos_eq(l->b->key.k.p, SPOS_MAX))) {
2305 /* Advance to next leaf node: */
2306 search_key = bpos_successor(l->b->key.k.p);
2307 } else {
2308 /* End of btree: */
2309 bch2_btree_iter_set_pos(trans, iter, SPOS_MAX);
2310 k = bkey_s_c_null;
2311 break;
2312 }
2313 }
2314
2315 bch2_btree_iter_verify(trans, iter);
2316 return k;
2317 }
2318
2319 /**
2320 * bch2_btree_iter_peek_max() - returns first key greater than or equal to
2321 * iterator's current position
2322 * @trans: btree transaction object
2323 * @iter: iterator to peek from
2324 * @end: search limit: returns keys less than or equal to @end
2325 *
2326 * Returns: key if found, or an error extractable with bkey_err().
2327 */
bch2_btree_iter_peek_max(struct btree_trans * trans,struct btree_iter * iter,struct bpos end)2328 struct bkey_s_c bch2_btree_iter_peek_max(struct btree_trans *trans, struct btree_iter *iter,
2329 struct bpos end)
2330 {
2331 struct bpos search_key = btree_iter_search_key(iter);
2332 struct bkey_s_c k;
2333 struct bpos iter_pos = iter->pos;
2334 int ret;
2335
2336 bch2_trans_verify_not_unlocked_or_in_restart(trans);
2337 bch2_btree_iter_verify_entry_exit(iter);
2338 EBUG_ON((iter->flags & BTREE_ITER_filter_snapshots) && bkey_eq(end, POS_MAX));
2339
2340 ret = trans_maybe_inject_restart(trans, _RET_IP_);
2341 if (unlikely(ret)) {
2342 k = bkey_s_c_err(ret);
2343 goto out_no_locked;
2344 }
2345
2346 if (iter->update_path) {
2347 bch2_path_put_nokeep(trans, iter->update_path,
2348 iter->flags & BTREE_ITER_intent);
2349 iter->update_path = 0;
2350 }
2351
2352 while (1) {
2353 k = __bch2_btree_iter_peek(trans, iter, search_key);
2354 if (unlikely(!k.k))
2355 goto end;
2356 if (unlikely(bkey_err(k)))
2357 goto out_no_locked;
2358
2359 if (iter->flags & BTREE_ITER_filter_snapshots) {
2360 /*
2361 * We need to check against @end before FILTER_SNAPSHOTS because
2362 * if we get to a different inode that requested we might be
2363 * seeing keys for a different snapshot tree that will all be
2364 * filtered out.
2365 *
2366 * But we can't do the full check here, because bkey_start_pos()
2367 * isn't monotonically increasing before FILTER_SNAPSHOTS, and
2368 * that's what we check against in extents mode:
2369 */
2370 if (unlikely(!(iter->flags & BTREE_ITER_is_extents)
2371 ? bkey_gt(k.k->p, end)
2372 : k.k->p.inode > end.inode))
2373 goto end;
2374
2375 if (iter->update_path &&
2376 !bkey_eq(trans->paths[iter->update_path].pos, k.k->p)) {
2377 bch2_path_put_nokeep(trans, iter->update_path,
2378 iter->flags & BTREE_ITER_intent);
2379 iter->update_path = 0;
2380 }
2381
2382 if ((iter->flags & BTREE_ITER_intent) &&
2383 !(iter->flags & BTREE_ITER_is_extents) &&
2384 !iter->update_path) {
2385 struct bpos pos = k.k->p;
2386
2387 if (pos.snapshot < iter->snapshot) {
2388 search_key = bpos_successor(k.k->p);
2389 continue;
2390 }
2391
2392 pos.snapshot = iter->snapshot;
2393
2394 /*
2395 * advance, same as on exit for iter->path, but only up
2396 * to snapshot
2397 */
2398 __btree_path_get(trans, trans->paths + iter->path, iter->flags & BTREE_ITER_intent);
2399 iter->update_path = iter->path;
2400
2401 iter->update_path = bch2_btree_path_set_pos(trans,
2402 iter->update_path, pos,
2403 iter->flags & BTREE_ITER_intent,
2404 _THIS_IP_);
2405 ret = bch2_btree_path_traverse(trans, iter->update_path, iter->flags);
2406 if (unlikely(ret)) {
2407 k = bkey_s_c_err(ret);
2408 goto out_no_locked;
2409 }
2410 }
2411
2412 /*
2413 * We can never have a key in a leaf node at POS_MAX, so
2414 * we don't have to check these successor() calls:
2415 */
2416 if (!bch2_snapshot_is_ancestor(trans->c,
2417 iter->snapshot,
2418 k.k->p.snapshot)) {
2419 search_key = bpos_successor(k.k->p);
2420 continue;
2421 }
2422
2423 if (bkey_whiteout(k.k) &&
2424 !(iter->flags & BTREE_ITER_key_cache_fill)) {
2425 search_key = bkey_successor(iter, k.k->p);
2426 continue;
2427 }
2428 }
2429
2430 /*
2431 * iter->pos should be mononotically increasing, and always be
2432 * equal to the key we just returned - except extents can
2433 * straddle iter->pos:
2434 */
2435 if (!(iter->flags & BTREE_ITER_is_extents))
2436 iter_pos = k.k->p;
2437 else
2438 iter_pos = bkey_max(iter->pos, bkey_start_pos(k.k));
2439
2440 if (unlikely(iter->flags & BTREE_ITER_all_snapshots ? bpos_gt(iter_pos, end) :
2441 iter->flags & BTREE_ITER_is_extents ? bkey_ge(iter_pos, end) :
2442 bkey_gt(iter_pos, end)))
2443 goto end;
2444
2445 break;
2446 }
2447
2448 iter->pos = iter_pos;
2449
2450 iter->path = bch2_btree_path_set_pos(trans, iter->path, k.k->p,
2451 iter->flags & BTREE_ITER_intent,
2452 btree_iter_ip_allocated(iter));
2453
2454 btree_path_set_should_be_locked(trans, btree_iter_path(trans, iter));
2455 out_no_locked:
2456 if (iter->update_path) {
2457 ret = bch2_btree_path_relock(trans, trans->paths + iter->update_path, _THIS_IP_);
2458 if (unlikely(ret))
2459 k = bkey_s_c_err(ret);
2460 else
2461 btree_path_set_should_be_locked(trans, trans->paths + iter->update_path);
2462 }
2463
2464 if (!(iter->flags & BTREE_ITER_all_snapshots))
2465 iter->pos.snapshot = iter->snapshot;
2466
2467 ret = bch2_btree_iter_verify_ret(trans, iter, k);
2468 if (unlikely(ret)) {
2469 bch2_btree_iter_set_pos(trans, iter, iter->pos);
2470 k = bkey_s_c_err(ret);
2471 }
2472
2473 bch2_btree_iter_verify_entry_exit(iter);
2474
2475 return k;
2476 end:
2477 bch2_btree_iter_set_pos(trans, iter, end);
2478 k = bkey_s_c_null;
2479 goto out_no_locked;
2480 }
2481
2482 /**
2483 * bch2_btree_iter_next() - returns first key greater than iterator's current
2484 * position
2485 * @trans: btree transaction object
2486 * @iter: iterator to peek from
2487 *
2488 * Returns: key if found, or an error extractable with bkey_err().
2489 */
bch2_btree_iter_next(struct btree_trans * trans,struct btree_iter * iter)2490 struct bkey_s_c bch2_btree_iter_next(struct btree_trans *trans, struct btree_iter *iter)
2491 {
2492 if (!bch2_btree_iter_advance(trans, iter))
2493 return bkey_s_c_null;
2494
2495 return bch2_btree_iter_peek(trans, iter);
2496 }
2497
__bch2_btree_iter_peek_prev(struct btree_trans * trans,struct btree_iter * iter,struct bpos search_key)2498 static struct bkey_s_c __bch2_btree_iter_peek_prev(struct btree_trans *trans, struct btree_iter *iter,
2499 struct bpos search_key)
2500 {
2501 struct bkey_s_c k, k2;
2502
2503 bch2_btree_iter_verify(trans, iter);
2504
2505 while (1) {
2506 iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
2507 iter->flags & BTREE_ITER_intent,
2508 btree_iter_ip_allocated(iter));
2509
2510 int ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2511 if (unlikely(ret)) {
2512 /* ensure that iter->k is consistent with iter->pos: */
2513 bch2_btree_iter_set_pos(trans, iter, iter->pos);
2514 k = bkey_s_c_err(ret);
2515 break;
2516 }
2517
2518 struct btree_path *path = btree_iter_path(trans, iter);
2519 struct btree_path_level *l = path_l(path);
2520
2521 if (unlikely(!l->b)) {
2522 /* No btree nodes at requested level: */
2523 bch2_btree_iter_set_pos(trans, iter, SPOS_MAX);
2524 k = bkey_s_c_null;
2525 break;
2526 }
2527
2528 btree_path_set_should_be_locked(trans, path);
2529
2530 k = btree_path_level_peek_all(trans->c, l, &iter->k);
2531 if (!k.k || bpos_gt(k.k->p, search_key)) {
2532 k = btree_path_level_prev(trans, path, l, &iter->k);
2533
2534 BUG_ON(k.k && bpos_gt(k.k->p, search_key));
2535 }
2536
2537 if (unlikely(iter->flags & BTREE_ITER_with_key_cache) &&
2538 k.k &&
2539 (k2 = btree_trans_peek_key_cache(trans, iter, k.k->p)).k) {
2540 k = k2;
2541 if (bkey_err(k2)) {
2542 bch2_btree_iter_set_pos(trans, iter, iter->pos);
2543 break;
2544 }
2545 }
2546
2547 if (unlikely(iter->flags & BTREE_ITER_with_journal))
2548 btree_trans_peek_prev_journal(trans, iter, &k);
2549
2550 if (unlikely((iter->flags & BTREE_ITER_with_updates) &&
2551 trans->nr_updates))
2552 bch2_btree_trans_peek_prev_updates(trans, iter, &k);
2553
2554 if (likely(k.k && !bkey_deleted(k.k))) {
2555 break;
2556 } else if (k.k) {
2557 search_key = bpos_predecessor(k.k->p);
2558 } else if (likely(!bpos_eq(path->l[0].b->data->min_key, POS_MIN))) {
2559 /* Advance to previous leaf node: */
2560 search_key = bpos_predecessor(path->l[0].b->data->min_key);
2561 } else {
2562 /* Start of btree: */
2563 bch2_btree_iter_set_pos(trans, iter, POS_MIN);
2564 k = bkey_s_c_null;
2565 break;
2566 }
2567 }
2568
2569 bch2_btree_iter_verify(trans, iter);
2570 return k;
2571 }
2572
2573 /**
2574 * bch2_btree_iter_peek_prev_min() - returns first key less than or equal to
2575 * iterator's current position
2576 * @trans: btree transaction object
2577 * @iter: iterator to peek from
2578 * @end: search limit: returns keys greater than or equal to @end
2579 *
2580 * Returns: key if found, or an error extractable with bkey_err().
2581 */
bch2_btree_iter_peek_prev_min(struct btree_trans * trans,struct btree_iter * iter,struct bpos end)2582 struct bkey_s_c bch2_btree_iter_peek_prev_min(struct btree_trans *trans, struct btree_iter *iter,
2583 struct bpos end)
2584 {
2585 if ((iter->flags & (BTREE_ITER_is_extents|BTREE_ITER_filter_snapshots)) &&
2586 !bkey_eq(iter->pos, POS_MAX) &&
2587 !((iter->flags & BTREE_ITER_is_extents) &&
2588 iter->pos.offset == U64_MAX)) {
2589
2590 /*
2591 * bkey_start_pos(), for extents, is not monotonically
2592 * increasing until after filtering for snapshots:
2593 *
2594 * Thus, for extents we need to search forward until we find a
2595 * real visible extents - easiest to just use peek_slot() (which
2596 * internally uses peek() for extents)
2597 */
2598 struct bkey_s_c k = bch2_btree_iter_peek_slot(trans, iter);
2599 if (bkey_err(k))
2600 return k;
2601
2602 if (!bkey_deleted(k.k) &&
2603 (!(iter->flags & BTREE_ITER_is_extents) ||
2604 bkey_lt(bkey_start_pos(k.k), iter->pos)))
2605 return k;
2606 }
2607
2608 struct bpos search_key = iter->pos;
2609 struct bkey_s_c k;
2610 btree_path_idx_t saved_path = 0;
2611
2612 bch2_trans_verify_not_unlocked_or_in_restart(trans);
2613 bch2_btree_iter_verify_entry_exit(iter);
2614 EBUG_ON((iter->flags & BTREE_ITER_filter_snapshots) && iter->pos.inode != end.inode);
2615
2616 int ret = trans_maybe_inject_restart(trans, _RET_IP_);
2617 if (unlikely(ret)) {
2618 k = bkey_s_c_err(ret);
2619 goto out_no_locked;
2620 }
2621
2622 while (1) {
2623 k = __bch2_btree_iter_peek_prev(trans, iter, search_key);
2624 if (unlikely(!k.k))
2625 goto end;
2626 if (unlikely(bkey_err(k)))
2627 goto out_no_locked;
2628
2629 if (iter->flags & BTREE_ITER_filter_snapshots) {
2630 struct btree_path *s = saved_path ? trans->paths + saved_path : NULL;
2631 if (s && bpos_lt(k.k->p, SPOS(s->pos.inode, s->pos.offset, iter->snapshot))) {
2632 /*
2633 * If we have a saved candidate, and we're past
2634 * the last possible snapshot overwrite, return
2635 * it:
2636 */
2637 bch2_path_put_nokeep(trans, iter->path,
2638 iter->flags & BTREE_ITER_intent);
2639 iter->path = saved_path;
2640 saved_path = 0;
2641 k = bch2_btree_path_peek_slot(btree_iter_path(trans, iter), &iter->k);
2642 break;
2643 }
2644
2645 /*
2646 * We need to check against @end before FILTER_SNAPSHOTS because
2647 * if we get to a different inode that requested we might be
2648 * seeing keys for a different snapshot tree that will all be
2649 * filtered out.
2650 */
2651 if (unlikely(bkey_lt(k.k->p, end)))
2652 goto end;
2653
2654 if (!bch2_snapshot_is_ancestor(trans->c, iter->snapshot, k.k->p.snapshot)) {
2655 search_key = bpos_predecessor(k.k->p);
2656 continue;
2657 }
2658
2659 if (k.k->p.snapshot != iter->snapshot) {
2660 /*
2661 * Have a key visible in iter->snapshot, but
2662 * might have overwrites: - save it and keep
2663 * searching. Unless it's a whiteout - then drop
2664 * our previous saved candidate:
2665 */
2666 if (saved_path) {
2667 bch2_path_put_nokeep(trans, saved_path,
2668 iter->flags & BTREE_ITER_intent);
2669 saved_path = 0;
2670 }
2671
2672 if (!bkey_whiteout(k.k)) {
2673 saved_path = btree_path_clone(trans, iter->path,
2674 iter->flags & BTREE_ITER_intent,
2675 _THIS_IP_);
2676 trace_btree_path_save_pos(trans,
2677 trans->paths + iter->path,
2678 trans->paths + saved_path);
2679 }
2680
2681 search_key = bpos_predecessor(k.k->p);
2682 continue;
2683 }
2684
2685 if (bkey_whiteout(k.k)) {
2686 search_key = bkey_predecessor(iter, k.k->p);
2687 search_key.snapshot = U32_MAX;
2688 continue;
2689 }
2690 }
2691
2692 EBUG_ON(iter->flags & BTREE_ITER_all_snapshots ? bpos_gt(k.k->p, iter->pos) :
2693 iter->flags & BTREE_ITER_is_extents ? bkey_ge(bkey_start_pos(k.k), iter->pos) :
2694 bkey_gt(k.k->p, iter->pos));
2695
2696 if (unlikely(iter->flags & BTREE_ITER_all_snapshots ? bpos_lt(k.k->p, end) :
2697 iter->flags & BTREE_ITER_is_extents ? bkey_le(k.k->p, end) :
2698 bkey_lt(k.k->p, end)))
2699 goto end;
2700
2701 break;
2702 }
2703
2704 /* Extents can straddle iter->pos: */
2705 iter->pos = bpos_min(iter->pos, k.k->p);;
2706
2707 if (iter->flags & BTREE_ITER_filter_snapshots)
2708 iter->pos.snapshot = iter->snapshot;
2709 out_no_locked:
2710 if (saved_path)
2711 bch2_path_put_nokeep(trans, saved_path, iter->flags & BTREE_ITER_intent);
2712
2713 bch2_btree_iter_verify_entry_exit(iter);
2714 bch2_btree_iter_verify(trans, iter);
2715 return k;
2716 end:
2717 bch2_btree_iter_set_pos(trans, iter, end);
2718 k = bkey_s_c_null;
2719 goto out_no_locked;
2720 }
2721
2722 /**
2723 * bch2_btree_iter_prev() - returns first key less than iterator's current
2724 * position
2725 * @trans: btree transaction object
2726 * @iter: iterator to peek from
2727 *
2728 * Returns: key if found, or an error extractable with bkey_err().
2729 */
bch2_btree_iter_prev(struct btree_trans * trans,struct btree_iter * iter)2730 struct bkey_s_c bch2_btree_iter_prev(struct btree_trans *trans, struct btree_iter *iter)
2731 {
2732 if (!bch2_btree_iter_rewind(trans, iter))
2733 return bkey_s_c_null;
2734
2735 return bch2_btree_iter_peek_prev(trans, iter);
2736 }
2737
bch2_btree_iter_peek_slot(struct btree_trans * trans,struct btree_iter * iter)2738 struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_trans *trans, struct btree_iter *iter)
2739 {
2740 struct bpos search_key;
2741 struct bkey_s_c k;
2742 int ret;
2743
2744 bch2_trans_verify_not_unlocked_or_in_restart(trans);
2745 bch2_btree_iter_verify(trans, iter);
2746 bch2_btree_iter_verify_entry_exit(iter);
2747 EBUG_ON(btree_iter_path(trans, iter)->level && (iter->flags & BTREE_ITER_with_key_cache));
2748
2749 ret = trans_maybe_inject_restart(trans, _RET_IP_);
2750 if (unlikely(ret)) {
2751 k = bkey_s_c_err(ret);
2752 goto out;
2753 }
2754
2755 /* extents can't span inode numbers: */
2756 if ((iter->flags & BTREE_ITER_is_extents) &&
2757 unlikely(iter->pos.offset == KEY_OFFSET_MAX)) {
2758 if (iter->pos.inode == KEY_INODE_MAX)
2759 return bkey_s_c_null;
2760
2761 bch2_btree_iter_set_pos(trans, iter, bpos_nosnap_successor(iter->pos));
2762 }
2763
2764 search_key = btree_iter_search_key(iter);
2765 iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
2766 iter->flags & BTREE_ITER_intent,
2767 btree_iter_ip_allocated(iter));
2768
2769 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2770 if (unlikely(ret)) {
2771 k = bkey_s_c_err(ret);
2772 goto out;
2773 }
2774
2775 struct btree_path *path = btree_iter_path(trans, iter);
2776 if (unlikely(!btree_path_node(path, path->level)))
2777 return bkey_s_c_null;
2778
2779 btree_path_set_should_be_locked(trans, path);
2780
2781 if ((iter->flags & BTREE_ITER_cached) ||
2782 !(iter->flags & (BTREE_ITER_is_extents|BTREE_ITER_filter_snapshots))) {
2783 k = bkey_s_c_null;
2784
2785 if (unlikely((iter->flags & BTREE_ITER_with_updates) &&
2786 trans->nr_updates)) {
2787 bch2_btree_trans_peek_slot_updates(trans, iter, &k);
2788 if (k.k)
2789 goto out;
2790 }
2791
2792 if (unlikely(iter->flags & BTREE_ITER_with_journal) &&
2793 (k = btree_trans_peek_slot_journal(trans, iter)).k)
2794 goto out;
2795
2796 if (unlikely(iter->flags & BTREE_ITER_with_key_cache) &&
2797 (k = btree_trans_peek_key_cache(trans, iter, iter->pos)).k) {
2798 if (!bkey_err(k))
2799 iter->k = *k.k;
2800 /* We're not returning a key from iter->path: */
2801 goto out;
2802 }
2803
2804 k = bch2_btree_path_peek_slot(btree_iter_path(trans, iter), &iter->k);
2805 if (unlikely(!k.k))
2806 goto out;
2807
2808 if (unlikely(k.k->type == KEY_TYPE_whiteout &&
2809 (iter->flags & BTREE_ITER_filter_snapshots) &&
2810 !(iter->flags & BTREE_ITER_key_cache_fill)))
2811 iter->k.type = KEY_TYPE_deleted;
2812 } else {
2813 struct bpos next;
2814 struct bpos end = iter->pos;
2815
2816 if (iter->flags & BTREE_ITER_is_extents)
2817 end.offset = U64_MAX;
2818
2819 EBUG_ON(btree_iter_path(trans, iter)->level);
2820
2821 if (iter->flags & BTREE_ITER_intent) {
2822 struct btree_iter iter2;
2823
2824 bch2_trans_copy_iter(trans, &iter2, iter);
2825 k = bch2_btree_iter_peek_max(trans, &iter2, end);
2826
2827 if (k.k && !bkey_err(k)) {
2828 swap(iter->key_cache_path, iter2.key_cache_path);
2829 iter->k = iter2.k;
2830 k.k = &iter->k;
2831 }
2832 bch2_trans_iter_exit(trans, &iter2);
2833 } else {
2834 struct bpos pos = iter->pos;
2835
2836 k = bch2_btree_iter_peek_max(trans, iter, end);
2837 if (unlikely(bkey_err(k)))
2838 bch2_btree_iter_set_pos(trans, iter, pos);
2839 else
2840 iter->pos = pos;
2841 }
2842
2843 if (unlikely(bkey_err(k)))
2844 goto out;
2845
2846 next = k.k ? bkey_start_pos(k.k) : POS_MAX;
2847
2848 if (bkey_lt(iter->pos, next)) {
2849 bkey_init(&iter->k);
2850 iter->k.p = iter->pos;
2851
2852 if (iter->flags & BTREE_ITER_is_extents) {
2853 bch2_key_resize(&iter->k,
2854 min_t(u64, KEY_SIZE_MAX,
2855 (next.inode == iter->pos.inode
2856 ? next.offset
2857 : KEY_OFFSET_MAX) -
2858 iter->pos.offset));
2859 EBUG_ON(!iter->k.size);
2860 }
2861
2862 k = (struct bkey_s_c) { &iter->k, NULL };
2863 }
2864 }
2865 out:
2866 bch2_btree_iter_verify_entry_exit(iter);
2867 bch2_btree_iter_verify(trans, iter);
2868 ret = bch2_btree_iter_verify_ret(trans, iter, k);
2869 if (unlikely(ret))
2870 return bkey_s_c_err(ret);
2871
2872 return k;
2873 }
2874
bch2_btree_iter_next_slot(struct btree_trans * trans,struct btree_iter * iter)2875 struct bkey_s_c bch2_btree_iter_next_slot(struct btree_trans *trans, struct btree_iter *iter)
2876 {
2877 if (!bch2_btree_iter_advance(trans, iter))
2878 return bkey_s_c_null;
2879
2880 return bch2_btree_iter_peek_slot(trans, iter);
2881 }
2882
bch2_btree_iter_prev_slot(struct btree_trans * trans,struct btree_iter * iter)2883 struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_trans *trans, struct btree_iter *iter)
2884 {
2885 if (!bch2_btree_iter_rewind(trans, iter))
2886 return bkey_s_c_null;
2887
2888 return bch2_btree_iter_peek_slot(trans, iter);
2889 }
2890
2891 /* Obsolete, but still used by rust wrapper in -tools */
bch2_btree_iter_peek_and_restart_outlined(struct btree_trans * trans,struct btree_iter * iter)2892 struct bkey_s_c bch2_btree_iter_peek_and_restart_outlined(struct btree_trans *trans, struct btree_iter *iter)
2893 {
2894 struct bkey_s_c k;
2895
2896 while (btree_trans_too_many_iters(trans) ||
2897 (k = bch2_btree_iter_peek_type(trans, iter, iter->flags),
2898 bch2_err_matches(bkey_err(k), BCH_ERR_transaction_restart)))
2899 bch2_trans_begin(trans);
2900
2901 return k;
2902 }
2903
2904 /* new transactional stuff: */
2905
2906 #ifdef CONFIG_BCACHEFS_DEBUG
btree_trans_verify_sorted_refs(struct btree_trans * trans)2907 static void btree_trans_verify_sorted_refs(struct btree_trans *trans)
2908 {
2909 struct btree_path *path;
2910 unsigned i;
2911
2912 BUG_ON(trans->nr_sorted != bitmap_weight(trans->paths_allocated, trans->nr_paths) - 1);
2913
2914 trans_for_each_path(trans, path, i) {
2915 BUG_ON(path->sorted_idx >= trans->nr_sorted);
2916 BUG_ON(trans->sorted[path->sorted_idx] != i);
2917 }
2918
2919 for (i = 0; i < trans->nr_sorted; i++) {
2920 unsigned idx = trans->sorted[i];
2921
2922 BUG_ON(!test_bit(idx, trans->paths_allocated));
2923 BUG_ON(trans->paths[idx].sorted_idx != i);
2924 }
2925 }
2926
btree_trans_verify_sorted(struct btree_trans * trans)2927 static void btree_trans_verify_sorted(struct btree_trans *trans)
2928 {
2929 struct btree_path *path, *prev = NULL;
2930 struct trans_for_each_path_inorder_iter iter;
2931
2932 if (!bch2_debug_check_iterators)
2933 return;
2934
2935 trans_for_each_path_inorder(trans, path, iter) {
2936 if (prev && btree_path_cmp(prev, path) > 0) {
2937 __bch2_dump_trans_paths_updates(trans, true);
2938 panic("trans paths out of order!\n");
2939 }
2940 prev = path;
2941 }
2942 }
2943 #else
btree_trans_verify_sorted_refs(struct btree_trans * trans)2944 static inline void btree_trans_verify_sorted_refs(struct btree_trans *trans) {}
btree_trans_verify_sorted(struct btree_trans * trans)2945 static inline void btree_trans_verify_sorted(struct btree_trans *trans) {}
2946 #endif
2947
__bch2_btree_trans_sort_paths(struct btree_trans * trans)2948 void __bch2_btree_trans_sort_paths(struct btree_trans *trans)
2949 {
2950 int i, l = 0, r = trans->nr_sorted, inc = 1;
2951 bool swapped;
2952
2953 btree_trans_verify_sorted_refs(trans);
2954
2955 if (trans->paths_sorted)
2956 goto out;
2957
2958 /*
2959 * Cocktail shaker sort: this is efficient because iterators will be
2960 * mostly sorted.
2961 */
2962 do {
2963 swapped = false;
2964
2965 for (i = inc > 0 ? l : r - 2;
2966 i + 1 < r && i >= l;
2967 i += inc) {
2968 if (btree_path_cmp(trans->paths + trans->sorted[i],
2969 trans->paths + trans->sorted[i + 1]) > 0) {
2970 swap(trans->sorted[i], trans->sorted[i + 1]);
2971 trans->paths[trans->sorted[i]].sorted_idx = i;
2972 trans->paths[trans->sorted[i + 1]].sorted_idx = i + 1;
2973 swapped = true;
2974 }
2975 }
2976
2977 if (inc > 0)
2978 --r;
2979 else
2980 l++;
2981 inc = -inc;
2982 } while (swapped);
2983
2984 trans->paths_sorted = true;
2985 out:
2986 btree_trans_verify_sorted(trans);
2987 }
2988
btree_path_list_remove(struct btree_trans * trans,struct btree_path * path)2989 static inline void btree_path_list_remove(struct btree_trans *trans,
2990 struct btree_path *path)
2991 {
2992 EBUG_ON(path->sorted_idx >= trans->nr_sorted);
2993 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2994 trans->nr_sorted--;
2995 memmove_u64s_down_small(trans->sorted + path->sorted_idx,
2996 trans->sorted + path->sorted_idx + 1,
2997 DIV_ROUND_UP(trans->nr_sorted - path->sorted_idx,
2998 sizeof(u64) / sizeof(btree_path_idx_t)));
2999 #else
3000 array_remove_item(trans->sorted, trans->nr_sorted, path->sorted_idx);
3001 #endif
3002 for (unsigned i = path->sorted_idx; i < trans->nr_sorted; i++)
3003 trans->paths[trans->sorted[i]].sorted_idx = i;
3004 }
3005
btree_path_list_add(struct btree_trans * trans,btree_path_idx_t pos,btree_path_idx_t path_idx)3006 static inline void btree_path_list_add(struct btree_trans *trans,
3007 btree_path_idx_t pos,
3008 btree_path_idx_t path_idx)
3009 {
3010 struct btree_path *path = trans->paths + path_idx;
3011
3012 path->sorted_idx = pos ? trans->paths[pos].sorted_idx + 1 : trans->nr_sorted;
3013
3014 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
3015 memmove_u64s_up_small(trans->sorted + path->sorted_idx + 1,
3016 trans->sorted + path->sorted_idx,
3017 DIV_ROUND_UP(trans->nr_sorted - path->sorted_idx,
3018 sizeof(u64) / sizeof(btree_path_idx_t)));
3019 trans->nr_sorted++;
3020 trans->sorted[path->sorted_idx] = path_idx;
3021 #else
3022 array_insert_item(trans->sorted, trans->nr_sorted, path->sorted_idx, path_idx);
3023 #endif
3024
3025 for (unsigned i = path->sorted_idx; i < trans->nr_sorted; i++)
3026 trans->paths[trans->sorted[i]].sorted_idx = i;
3027
3028 btree_trans_verify_sorted_refs(trans);
3029 }
3030
bch2_trans_iter_exit(struct btree_trans * trans,struct btree_iter * iter)3031 void bch2_trans_iter_exit(struct btree_trans *trans, struct btree_iter *iter)
3032 {
3033 if (iter->update_path)
3034 bch2_path_put_nokeep(trans, iter->update_path,
3035 iter->flags & BTREE_ITER_intent);
3036 if (iter->path)
3037 bch2_path_put(trans, iter->path,
3038 iter->flags & BTREE_ITER_intent);
3039 if (iter->key_cache_path)
3040 bch2_path_put(trans, iter->key_cache_path,
3041 iter->flags & BTREE_ITER_intent);
3042 iter->path = 0;
3043 iter->update_path = 0;
3044 iter->key_cache_path = 0;
3045 }
3046
bch2_trans_iter_init_outlined(struct btree_trans * trans,struct btree_iter * iter,enum btree_id btree_id,struct bpos pos,unsigned flags)3047 void bch2_trans_iter_init_outlined(struct btree_trans *trans,
3048 struct btree_iter *iter,
3049 enum btree_id btree_id, struct bpos pos,
3050 unsigned flags)
3051 {
3052 bch2_trans_iter_init_common(trans, iter, btree_id, pos, 0, 0,
3053 bch2_btree_iter_flags(trans, btree_id, 0, flags),
3054 _RET_IP_);
3055 }
3056
bch2_trans_node_iter_init(struct btree_trans * trans,struct btree_iter * iter,enum btree_id btree_id,struct bpos pos,unsigned locks_want,unsigned depth,unsigned flags)3057 void bch2_trans_node_iter_init(struct btree_trans *trans,
3058 struct btree_iter *iter,
3059 enum btree_id btree_id,
3060 struct bpos pos,
3061 unsigned locks_want,
3062 unsigned depth,
3063 unsigned flags)
3064 {
3065 flags |= BTREE_ITER_not_extents;
3066 flags |= BTREE_ITER_snapshot_field;
3067 flags |= BTREE_ITER_all_snapshots;
3068
3069 if (!depth && btree_id_cached(trans->c, btree_id))
3070 flags |= BTREE_ITER_with_key_cache;
3071
3072 bch2_trans_iter_init_common(trans, iter, btree_id, pos, locks_want, depth,
3073 bch2_btree_iter_flags(trans, btree_id, depth, flags),
3074 _RET_IP_);
3075
3076 iter->min_depth = depth;
3077
3078 struct btree_path *path = btree_iter_path(trans, iter);
3079 BUG_ON(path->locks_want < min(locks_want, BTREE_MAX_DEPTH));
3080 BUG_ON(path->level != depth);
3081 BUG_ON(iter->min_depth != depth);
3082 }
3083
bch2_trans_copy_iter(struct btree_trans * trans,struct btree_iter * dst,struct btree_iter * src)3084 void bch2_trans_copy_iter(struct btree_trans *trans,
3085 struct btree_iter *dst, struct btree_iter *src)
3086 {
3087 *dst = *src;
3088 #ifdef TRACK_PATH_ALLOCATED
3089 dst->ip_allocated = _RET_IP_;
3090 #endif
3091 if (src->path)
3092 __btree_path_get(trans, trans->paths + src->path, src->flags & BTREE_ITER_intent);
3093 if (src->update_path)
3094 __btree_path_get(trans, trans->paths + src->update_path, src->flags & BTREE_ITER_intent);
3095 dst->key_cache_path = 0;
3096 }
3097
__bch2_trans_kmalloc(struct btree_trans * trans,size_t size)3098 void *__bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
3099 {
3100 struct bch_fs *c = trans->c;
3101 unsigned new_top = trans->mem_top + size;
3102 unsigned old_bytes = trans->mem_bytes;
3103 unsigned new_bytes = roundup_pow_of_two(new_top);
3104 int ret;
3105 void *new_mem;
3106 void *p;
3107
3108 WARN_ON_ONCE(new_bytes > BTREE_TRANS_MEM_MAX);
3109
3110 ret = trans_maybe_inject_restart(trans, _RET_IP_);
3111 if (ret)
3112 return ERR_PTR(ret);
3113
3114 struct btree_transaction_stats *s = btree_trans_stats(trans);
3115 s->max_mem = max(s->max_mem, new_bytes);
3116
3117 if (trans->used_mempool) {
3118 if (trans->mem_bytes >= new_bytes)
3119 goto out_change_top;
3120
3121 /* No more space from mempool item, need malloc new one */
3122 new_mem = kmalloc(new_bytes, GFP_NOWAIT|__GFP_NOWARN);
3123 if (unlikely(!new_mem)) {
3124 bch2_trans_unlock(trans);
3125
3126 new_mem = kmalloc(new_bytes, GFP_KERNEL);
3127 if (!new_mem)
3128 return ERR_PTR(-BCH_ERR_ENOMEM_trans_kmalloc);
3129
3130 ret = bch2_trans_relock(trans);
3131 if (ret) {
3132 kfree(new_mem);
3133 return ERR_PTR(ret);
3134 }
3135 }
3136 memcpy(new_mem, trans->mem, trans->mem_top);
3137 trans->used_mempool = false;
3138 mempool_free(trans->mem, &c->btree_trans_mem_pool);
3139 goto out_new_mem;
3140 }
3141
3142 new_mem = krealloc(trans->mem, new_bytes, GFP_NOWAIT|__GFP_NOWARN);
3143 if (unlikely(!new_mem)) {
3144 bch2_trans_unlock(trans);
3145
3146 new_mem = krealloc(trans->mem, new_bytes, GFP_KERNEL);
3147 if (!new_mem && new_bytes <= BTREE_TRANS_MEM_MAX) {
3148 new_mem = mempool_alloc(&c->btree_trans_mem_pool, GFP_KERNEL);
3149 new_bytes = BTREE_TRANS_MEM_MAX;
3150 memcpy(new_mem, trans->mem, trans->mem_top);
3151 trans->used_mempool = true;
3152 kfree(trans->mem);
3153 }
3154
3155 if (!new_mem)
3156 return ERR_PTR(-BCH_ERR_ENOMEM_trans_kmalloc);
3157
3158 trans->mem = new_mem;
3159 trans->mem_bytes = new_bytes;
3160
3161 ret = bch2_trans_relock(trans);
3162 if (ret)
3163 return ERR_PTR(ret);
3164 }
3165 out_new_mem:
3166 trans->mem = new_mem;
3167 trans->mem_bytes = new_bytes;
3168
3169 if (old_bytes) {
3170 trace_and_count(c, trans_restart_mem_realloced, trans, _RET_IP_, new_bytes);
3171 return ERR_PTR(btree_trans_restart_ip(trans,
3172 BCH_ERR_transaction_restart_mem_realloced, _RET_IP_));
3173 }
3174 out_change_top:
3175 p = trans->mem + trans->mem_top;
3176 trans->mem_top += size;
3177 memset(p, 0, size);
3178 return p;
3179 }
3180
check_srcu_held_too_long(struct btree_trans * trans)3181 static inline void check_srcu_held_too_long(struct btree_trans *trans)
3182 {
3183 WARN(trans->srcu_held && time_after(jiffies, trans->srcu_lock_time + HZ * 10),
3184 "btree trans held srcu lock (delaying memory reclaim) for %lu seconds",
3185 (jiffies - trans->srcu_lock_time) / HZ);
3186 }
3187
bch2_trans_srcu_unlock(struct btree_trans * trans)3188 void bch2_trans_srcu_unlock(struct btree_trans *trans)
3189 {
3190 if (trans->srcu_held) {
3191 struct bch_fs *c = trans->c;
3192 struct btree_path *path;
3193 unsigned i;
3194
3195 trans_for_each_path(trans, path, i)
3196 if (path->cached && !btree_node_locked(path, 0))
3197 path->l[0].b = ERR_PTR(-BCH_ERR_no_btree_node_srcu_reset);
3198
3199 check_srcu_held_too_long(trans);
3200 srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx);
3201 trans->srcu_held = false;
3202 }
3203 }
3204
bch2_trans_srcu_lock(struct btree_trans * trans)3205 static void bch2_trans_srcu_lock(struct btree_trans *trans)
3206 {
3207 if (!trans->srcu_held) {
3208 trans->srcu_idx = srcu_read_lock(&trans->c->btree_trans_barrier);
3209 trans->srcu_lock_time = jiffies;
3210 trans->srcu_held = true;
3211 }
3212 }
3213
3214 /**
3215 * bch2_trans_begin() - reset a transaction after a interrupted attempt
3216 * @trans: transaction to reset
3217 *
3218 * Returns: current restart counter, to be used with trans_was_restarted()
3219 *
3220 * While iterating over nodes or updating nodes a attempt to lock a btree node
3221 * may return BCH_ERR_transaction_restart when the trylock fails. When this
3222 * occurs bch2_trans_begin() should be called and the transaction retried.
3223 */
bch2_trans_begin(struct btree_trans * trans)3224 u32 bch2_trans_begin(struct btree_trans *trans)
3225 {
3226 struct btree_path *path;
3227 unsigned i;
3228 u64 now;
3229
3230 bch2_trans_reset_updates(trans);
3231
3232 trans->restart_count++;
3233 trans->mem_top = 0;
3234 trans->journal_entries = NULL;
3235
3236 trans_for_each_path(trans, path, i) {
3237 path->should_be_locked = false;
3238
3239 /*
3240 * If the transaction wasn't restarted, we're presuming to be
3241 * doing something new: dont keep iterators excpt the ones that
3242 * are in use - except for the subvolumes btree:
3243 */
3244 if (!trans->restarted && path->btree_id != BTREE_ID_subvolumes)
3245 path->preserve = false;
3246
3247 /*
3248 * XXX: we probably shouldn't be doing this if the transaction
3249 * was restarted, but currently we still overflow transaction
3250 * iterators if we do that
3251 */
3252 if (!path->ref && !path->preserve)
3253 __bch2_path_free(trans, i);
3254 else
3255 path->preserve = false;
3256 }
3257
3258 now = local_clock();
3259
3260 if (!IS_ENABLED(CONFIG_BCACHEFS_NO_LATENCY_ACCT) &&
3261 time_after64(now, trans->last_begin_time + 10))
3262 __bch2_time_stats_update(&btree_trans_stats(trans)->duration,
3263 trans->last_begin_time, now);
3264
3265 if (!trans->restarted &&
3266 (need_resched() ||
3267 time_after64(now, trans->last_begin_time + BTREE_TRANS_MAX_LOCK_HOLD_TIME_NS))) {
3268 bch2_trans_unlock(trans);
3269 cond_resched();
3270 now = local_clock();
3271 }
3272 trans->last_begin_time = now;
3273
3274 if (unlikely(trans->srcu_held &&
3275 time_after(jiffies, trans->srcu_lock_time + msecs_to_jiffies(10))))
3276 bch2_trans_srcu_unlock(trans);
3277
3278 trans->last_begin_ip = _RET_IP_;
3279
3280 #ifdef CONFIG_BCACHEFS_INJECT_TRANSACTION_RESTARTS
3281 if (trans->restarted) {
3282 trans->restart_count_this_trans++;
3283 } else {
3284 trans->restart_count_this_trans = 0;
3285 }
3286 #endif
3287
3288 trans_set_locked(trans, false);
3289
3290 if (trans->restarted) {
3291 bch2_btree_path_traverse_all(trans);
3292 trans->notrace_relock_fail = false;
3293 }
3294
3295 bch2_trans_verify_not_unlocked_or_in_restart(trans);
3296 return trans->restart_count;
3297 }
3298
3299 const char *bch2_btree_transaction_fns[BCH_TRANSACTIONS_NR] = { "(unknown)" };
3300
bch2_trans_get_fn_idx(const char * fn)3301 unsigned bch2_trans_get_fn_idx(const char *fn)
3302 {
3303 for (unsigned i = 0; i < ARRAY_SIZE(bch2_btree_transaction_fns); i++)
3304 if (!bch2_btree_transaction_fns[i] ||
3305 bch2_btree_transaction_fns[i] == fn) {
3306 bch2_btree_transaction_fns[i] = fn;
3307 return i;
3308 }
3309
3310 pr_warn_once("BCH_TRANSACTIONS_NR not big enough!");
3311 return 0;
3312 }
3313
__bch2_trans_get(struct bch_fs * c,unsigned fn_idx)3314 struct btree_trans *__bch2_trans_get(struct bch_fs *c, unsigned fn_idx)
3315 __acquires(&c->btree_trans_barrier)
3316 {
3317 struct btree_trans *trans;
3318
3319 if (IS_ENABLED(__KERNEL__)) {
3320 trans = this_cpu_xchg(c->btree_trans_bufs->trans, NULL);
3321 if (trans) {
3322 memset(trans, 0, offsetof(struct btree_trans, list));
3323 goto got_trans;
3324 }
3325 }
3326
3327 trans = mempool_alloc(&c->btree_trans_pool, GFP_NOFS);
3328 memset(trans, 0, sizeof(*trans));
3329
3330 seqmutex_lock(&c->btree_trans_lock);
3331 if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) {
3332 struct btree_trans *pos;
3333 pid_t pid = current->pid;
3334
3335 trans->locking_wait.task = current;
3336
3337 list_for_each_entry(pos, &c->btree_trans_list, list) {
3338 struct task_struct *pos_task = READ_ONCE(pos->locking_wait.task);
3339 /*
3340 * We'd much prefer to be stricter here and completely
3341 * disallow multiple btree_trans in the same thread -
3342 * but the data move path calls bch2_write when we
3343 * already have a btree_trans initialized.
3344 */
3345 BUG_ON(pos_task &&
3346 pid == pos_task->pid &&
3347 pos->locked);
3348 }
3349 }
3350
3351 list_add(&trans->list, &c->btree_trans_list);
3352 seqmutex_unlock(&c->btree_trans_lock);
3353 got_trans:
3354 trans->c = c;
3355 trans->last_begin_time = local_clock();
3356 trans->fn_idx = fn_idx;
3357 trans->locking_wait.task = current;
3358 trans->journal_replay_not_finished =
3359 unlikely(!test_bit(JOURNAL_replay_done, &c->journal.flags)) &&
3360 atomic_inc_not_zero(&c->journal_keys.ref);
3361 trans->nr_paths = ARRAY_SIZE(trans->_paths);
3362 trans->paths_allocated = trans->_paths_allocated;
3363 trans->sorted = trans->_sorted;
3364 trans->paths = trans->_paths;
3365 trans->updates = trans->_updates;
3366
3367 *trans_paths_nr(trans->paths) = BTREE_ITER_INITIAL;
3368
3369 trans->paths_allocated[0] = 1;
3370
3371 static struct lock_class_key lockdep_key;
3372 lockdep_init_map(&trans->dep_map, "bcachefs_btree", &lockdep_key, 0);
3373
3374 if (fn_idx < BCH_TRANSACTIONS_NR) {
3375 trans->fn = bch2_btree_transaction_fns[fn_idx];
3376
3377 struct btree_transaction_stats *s = &c->btree_transaction_stats[fn_idx];
3378
3379 if (s->max_mem) {
3380 unsigned expected_mem_bytes = roundup_pow_of_two(s->max_mem);
3381
3382 trans->mem = kmalloc(expected_mem_bytes, GFP_KERNEL);
3383 if (likely(trans->mem))
3384 trans->mem_bytes = expected_mem_bytes;
3385 }
3386
3387 trans->nr_paths_max = s->nr_max_paths;
3388 trans->journal_entries_size = s->journal_entries_size;
3389 }
3390
3391 trans->srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
3392 trans->srcu_lock_time = jiffies;
3393 trans->srcu_held = true;
3394 trans_set_locked(trans, false);
3395
3396 closure_init_stack_release(&trans->ref);
3397 return trans;
3398 }
3399
check_btree_paths_leaked(struct btree_trans * trans)3400 static void check_btree_paths_leaked(struct btree_trans *trans)
3401 {
3402 #ifdef CONFIG_BCACHEFS_DEBUG
3403 struct bch_fs *c = trans->c;
3404 struct btree_path *path;
3405 unsigned i;
3406
3407 trans_for_each_path(trans, path, i)
3408 if (path->ref)
3409 goto leaked;
3410 return;
3411 leaked:
3412 bch_err(c, "btree paths leaked from %s!", trans->fn);
3413 trans_for_each_path(trans, path, i)
3414 if (path->ref)
3415 printk(KERN_ERR " btree %s %pS\n",
3416 bch2_btree_id_str(path->btree_id),
3417 (void *) path->ip_allocated);
3418 /* Be noisy about this: */
3419 bch2_fatal_error(c);
3420 #endif
3421 }
3422
bch2_trans_put(struct btree_trans * trans)3423 void bch2_trans_put(struct btree_trans *trans)
3424 __releases(&c->btree_trans_barrier)
3425 {
3426 struct bch_fs *c = trans->c;
3427
3428 if (trans->restarted)
3429 bch2_trans_in_restart_error(trans);
3430
3431 bch2_trans_unlock(trans);
3432
3433 trans_for_each_update(trans, i)
3434 __btree_path_put(trans, trans->paths + i->path, true);
3435 trans->nr_updates = 0;
3436
3437 check_btree_paths_leaked(trans);
3438
3439 if (trans->srcu_held) {
3440 check_srcu_held_too_long(trans);
3441 srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx);
3442 }
3443
3444 if (unlikely(trans->journal_replay_not_finished))
3445 bch2_journal_keys_put(c);
3446
3447 /*
3448 * trans->ref protects trans->locking_wait.task, btree_paths array; used
3449 * by cycle detector
3450 */
3451 closure_return_sync(&trans->ref);
3452 trans->locking_wait.task = NULL;
3453
3454 #ifdef CONFIG_BCACHEFS_DEBUG
3455 darray_exit(&trans->last_restarted_trace);
3456 #endif
3457
3458 unsigned long *paths_allocated = trans->paths_allocated;
3459 trans->paths_allocated = NULL;
3460 trans->paths = NULL;
3461
3462 if (paths_allocated != trans->_paths_allocated)
3463 kvfree_rcu_mightsleep(paths_allocated);
3464
3465 if (trans->used_mempool)
3466 mempool_free(trans->mem, &c->btree_trans_mem_pool);
3467 else
3468 kfree(trans->mem);
3469
3470 /* Userspace doesn't have a real percpu implementation: */
3471 if (IS_ENABLED(__KERNEL__))
3472 trans = this_cpu_xchg(c->btree_trans_bufs->trans, trans);
3473
3474 if (trans) {
3475 seqmutex_lock(&c->btree_trans_lock);
3476 list_del(&trans->list);
3477 seqmutex_unlock(&c->btree_trans_lock);
3478
3479 mempool_free(trans, &c->btree_trans_pool);
3480 }
3481 }
3482
bch2_current_has_btree_trans(struct bch_fs * c)3483 bool bch2_current_has_btree_trans(struct bch_fs *c)
3484 {
3485 seqmutex_lock(&c->btree_trans_lock);
3486 struct btree_trans *trans;
3487 bool ret = false;
3488 list_for_each_entry(trans, &c->btree_trans_list, list)
3489 if (trans->locking_wait.task == current &&
3490 trans->locked) {
3491 ret = true;
3492 break;
3493 }
3494 seqmutex_unlock(&c->btree_trans_lock);
3495 return ret;
3496 }
3497
3498 static void __maybe_unused
bch2_btree_bkey_cached_common_to_text(struct printbuf * out,struct btree_bkey_cached_common * b)3499 bch2_btree_bkey_cached_common_to_text(struct printbuf *out,
3500 struct btree_bkey_cached_common *b)
3501 {
3502 struct six_lock_count c = six_lock_counts(&b->lock);
3503 struct task_struct *owner;
3504 pid_t pid;
3505
3506 rcu_read_lock();
3507 owner = READ_ONCE(b->lock.owner);
3508 pid = owner ? owner->pid : 0;
3509 rcu_read_unlock();
3510
3511 prt_printf(out, "\t%px %c ", b, b->cached ? 'c' : 'b');
3512 bch2_btree_id_to_text(out, b->btree_id);
3513 prt_printf(out, " l=%u:", b->level);
3514 bch2_bpos_to_text(out, btree_node_pos(b));
3515
3516 prt_printf(out, "\t locks %u:%u:%u held by pid %u",
3517 c.n[0], c.n[1], c.n[2], pid);
3518 }
3519
bch2_btree_trans_to_text(struct printbuf * out,struct btree_trans * trans)3520 void bch2_btree_trans_to_text(struct printbuf *out, struct btree_trans *trans)
3521 {
3522 struct btree_bkey_cached_common *b;
3523 static char lock_types[] = { 'r', 'i', 'w' };
3524 struct task_struct *task = READ_ONCE(trans->locking_wait.task);
3525 unsigned l, idx;
3526
3527 /* before rcu_read_lock(): */
3528 bch2_printbuf_make_room(out, 4096);
3529
3530 if (!out->nr_tabstops) {
3531 printbuf_tabstop_push(out, 16);
3532 printbuf_tabstop_push(out, 32);
3533 }
3534
3535 prt_printf(out, "%i %s\n", task ? task->pid : 0, trans->fn);
3536
3537 /* trans->paths is rcu protected vs. freeing */
3538 rcu_read_lock();
3539 out->atomic++;
3540
3541 struct btree_path *paths = rcu_dereference(trans->paths);
3542 if (!paths)
3543 goto out;
3544
3545 unsigned long *paths_allocated = trans_paths_allocated(paths);
3546
3547 trans_for_each_path_idx_from(paths_allocated, *trans_paths_nr(paths), idx, 1) {
3548 struct btree_path *path = paths + idx;
3549 if (!path->nodes_locked)
3550 continue;
3551
3552 prt_printf(out, " path %u %c ",
3553 idx,
3554 path->cached ? 'c' : 'b');
3555 bch2_btree_id_to_text(out, path->btree_id);
3556 prt_printf(out, " l=%u:", path->level);
3557 bch2_bpos_to_text(out, path->pos);
3558 prt_newline(out);
3559
3560 for (l = 0; l < BTREE_MAX_DEPTH; l++) {
3561 if (btree_node_locked(path, l) &&
3562 !IS_ERR_OR_NULL(b = (void *) READ_ONCE(path->l[l].b))) {
3563 prt_printf(out, " %c l=%u ",
3564 lock_types[btree_node_locked_type(path, l)], l);
3565 bch2_btree_bkey_cached_common_to_text(out, b);
3566 prt_newline(out);
3567 }
3568 }
3569 }
3570
3571 b = READ_ONCE(trans->locking);
3572 if (b) {
3573 prt_printf(out, " blocked for %lluus on\n",
3574 div_u64(local_clock() - trans->locking_wait.start_time, 1000));
3575 prt_printf(out, " %c", lock_types[trans->locking_wait.lock_want]);
3576 bch2_btree_bkey_cached_common_to_text(out, b);
3577 prt_newline(out);
3578 }
3579 out:
3580 --out->atomic;
3581 rcu_read_unlock();
3582 }
3583
bch2_fs_btree_iter_exit(struct bch_fs * c)3584 void bch2_fs_btree_iter_exit(struct bch_fs *c)
3585 {
3586 struct btree_transaction_stats *s;
3587 struct btree_trans *trans;
3588 int cpu;
3589
3590 if (c->btree_trans_bufs)
3591 for_each_possible_cpu(cpu) {
3592 struct btree_trans *trans =
3593 per_cpu_ptr(c->btree_trans_bufs, cpu)->trans;
3594
3595 if (trans) {
3596 seqmutex_lock(&c->btree_trans_lock);
3597 list_del(&trans->list);
3598 seqmutex_unlock(&c->btree_trans_lock);
3599 }
3600 kfree(trans);
3601 }
3602 free_percpu(c->btree_trans_bufs);
3603
3604 trans = list_first_entry_or_null(&c->btree_trans_list, struct btree_trans, list);
3605 if (trans)
3606 panic("%s leaked btree_trans\n", trans->fn);
3607
3608 for (s = c->btree_transaction_stats;
3609 s < c->btree_transaction_stats + ARRAY_SIZE(c->btree_transaction_stats);
3610 s++) {
3611 kfree(s->max_paths_text);
3612 bch2_time_stats_exit(&s->lock_hold_times);
3613 }
3614
3615 if (c->btree_trans_barrier_initialized) {
3616 synchronize_srcu_expedited(&c->btree_trans_barrier);
3617 cleanup_srcu_struct(&c->btree_trans_barrier);
3618 }
3619 mempool_exit(&c->btree_trans_mem_pool);
3620 mempool_exit(&c->btree_trans_pool);
3621 }
3622
bch2_fs_btree_iter_init_early(struct bch_fs * c)3623 void bch2_fs_btree_iter_init_early(struct bch_fs *c)
3624 {
3625 struct btree_transaction_stats *s;
3626
3627 for (s = c->btree_transaction_stats;
3628 s < c->btree_transaction_stats + ARRAY_SIZE(c->btree_transaction_stats);
3629 s++) {
3630 bch2_time_stats_init(&s->duration);
3631 bch2_time_stats_init(&s->lock_hold_times);
3632 mutex_init(&s->lock);
3633 }
3634
3635 INIT_LIST_HEAD(&c->btree_trans_list);
3636 seqmutex_init(&c->btree_trans_lock);
3637 }
3638
bch2_fs_btree_iter_init(struct bch_fs * c)3639 int bch2_fs_btree_iter_init(struct bch_fs *c)
3640 {
3641 int ret;
3642
3643 c->btree_trans_bufs = alloc_percpu(struct btree_trans_buf);
3644 if (!c->btree_trans_bufs)
3645 return -ENOMEM;
3646
3647 ret = mempool_init_kmalloc_pool(&c->btree_trans_pool, 1,
3648 sizeof(struct btree_trans)) ?:
3649 mempool_init_kmalloc_pool(&c->btree_trans_mem_pool, 1,
3650 BTREE_TRANS_MEM_MAX) ?:
3651 init_srcu_struct(&c->btree_trans_barrier);
3652 if (ret)
3653 return ret;
3654
3655 /*
3656 * static annotation (hackily done) for lock ordering of reclaim vs.
3657 * btree node locks:
3658 */
3659 #ifdef CONFIG_LOCKDEP
3660 fs_reclaim_acquire(GFP_KERNEL);
3661 struct btree_trans *trans = bch2_trans_get(c);
3662 trans_set_locked(trans, false);
3663 bch2_trans_put(trans);
3664 fs_reclaim_release(GFP_KERNEL);
3665 #endif
3666
3667 c->btree_trans_barrier_initialized = true;
3668 return 0;
3669
3670 }
3671