1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "bkey_buf.h"
5 #include "bkey_methods.h"
6 #include "bkey_sort.h"
7 #include "btree_cache.h"
8 #include "btree_io.h"
9 #include "btree_iter.h"
10 #include "btree_locking.h"
11 #include "btree_update.h"
12 #include "btree_update_interior.h"
13 #include "buckets.h"
14 #include "checksum.h"
15 #include "debug.h"
16 #include "error.h"
17 #include "extents.h"
18 #include "io_write.h"
19 #include "journal_reclaim.h"
20 #include "journal_seq_blacklist.h"
21 #include "recovery.h"
22 #include "super-io.h"
23 #include "trace.h"
24
25 #include <linux/sched/mm.h>
26
bch2_btree_node_header_to_text(struct printbuf * out,struct btree_node * bn)27 static void bch2_btree_node_header_to_text(struct printbuf *out, struct btree_node *bn)
28 {
29 bch2_btree_id_level_to_text(out, BTREE_NODE_ID(bn), BTREE_NODE_LEVEL(bn));
30 prt_printf(out, " seq %llx %llu\n", bn->keys.seq, BTREE_NODE_SEQ(bn));
31 prt_str(out, "min: ");
32 bch2_bpos_to_text(out, bn->min_key);
33 prt_newline(out);
34 prt_str(out, "max: ");
35 bch2_bpos_to_text(out, bn->max_key);
36 }
37
bch2_btree_node_io_unlock(struct btree * b)38 void bch2_btree_node_io_unlock(struct btree *b)
39 {
40 EBUG_ON(!btree_node_write_in_flight(b));
41
42 clear_btree_node_write_in_flight_inner(b);
43 clear_btree_node_write_in_flight(b);
44 smp_mb__after_atomic();
45 wake_up_bit(&b->flags, BTREE_NODE_write_in_flight);
46 }
47
bch2_btree_node_io_lock(struct btree * b)48 void bch2_btree_node_io_lock(struct btree *b)
49 {
50 wait_on_bit_lock_io(&b->flags, BTREE_NODE_write_in_flight,
51 TASK_UNINTERRUPTIBLE);
52 }
53
__bch2_btree_node_wait_on_read(struct btree * b)54 void __bch2_btree_node_wait_on_read(struct btree *b)
55 {
56 wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight,
57 TASK_UNINTERRUPTIBLE);
58 }
59
__bch2_btree_node_wait_on_write(struct btree * b)60 void __bch2_btree_node_wait_on_write(struct btree *b)
61 {
62 wait_on_bit_io(&b->flags, BTREE_NODE_write_in_flight,
63 TASK_UNINTERRUPTIBLE);
64 }
65
bch2_btree_node_wait_on_read(struct btree * b)66 void bch2_btree_node_wait_on_read(struct btree *b)
67 {
68 wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight,
69 TASK_UNINTERRUPTIBLE);
70 }
71
bch2_btree_node_wait_on_write(struct btree * b)72 void bch2_btree_node_wait_on_write(struct btree *b)
73 {
74 wait_on_bit_io(&b->flags, BTREE_NODE_write_in_flight,
75 TASK_UNINTERRUPTIBLE);
76 }
77
verify_no_dups(struct btree * b,struct bkey_packed * start,struct bkey_packed * end)78 static void verify_no_dups(struct btree *b,
79 struct bkey_packed *start,
80 struct bkey_packed *end)
81 {
82 #ifdef CONFIG_BCACHEFS_DEBUG
83 struct bkey_packed *k, *p;
84
85 if (start == end)
86 return;
87
88 for (p = start, k = bkey_p_next(start);
89 k != end;
90 p = k, k = bkey_p_next(k)) {
91 struct bkey l = bkey_unpack_key(b, p);
92 struct bkey r = bkey_unpack_key(b, k);
93
94 BUG_ON(bpos_ge(l.p, bkey_start_pos(&r)));
95 }
96 #endif
97 }
98
set_needs_whiteout(struct bset * i,int v)99 static void set_needs_whiteout(struct bset *i, int v)
100 {
101 struct bkey_packed *k;
102
103 for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k))
104 k->needs_whiteout = v;
105 }
106
btree_bounce_free(struct bch_fs * c,size_t size,bool used_mempool,void * p)107 static void btree_bounce_free(struct bch_fs *c, size_t size,
108 bool used_mempool, void *p)
109 {
110 if (used_mempool)
111 mempool_free(p, &c->btree_bounce_pool);
112 else
113 kvfree(p);
114 }
115
btree_bounce_alloc(struct bch_fs * c,size_t size,bool * used_mempool)116 static void *btree_bounce_alloc(struct bch_fs *c, size_t size,
117 bool *used_mempool)
118 {
119 unsigned flags = memalloc_nofs_save();
120 void *p;
121
122 BUG_ON(size > c->opts.btree_node_size);
123
124 *used_mempool = false;
125 p = kvmalloc(size, __GFP_NOWARN|GFP_NOWAIT);
126 if (!p) {
127 *used_mempool = true;
128 p = mempool_alloc(&c->btree_bounce_pool, GFP_NOFS);
129 }
130 memalloc_nofs_restore(flags);
131 return p;
132 }
133
sort_bkey_ptrs(const struct btree * bt,struct bkey_packed ** ptrs,unsigned nr)134 static void sort_bkey_ptrs(const struct btree *bt,
135 struct bkey_packed **ptrs, unsigned nr)
136 {
137 unsigned n = nr, a = nr / 2, b, c, d;
138
139 if (!a)
140 return;
141
142 /* Heap sort: see lib/sort.c: */
143 while (1) {
144 if (a)
145 a--;
146 else if (--n)
147 swap(ptrs[0], ptrs[n]);
148 else
149 break;
150
151 for (b = a; c = 2 * b + 1, (d = c + 1) < n;)
152 b = bch2_bkey_cmp_packed(bt,
153 ptrs[c],
154 ptrs[d]) >= 0 ? c : d;
155 if (d == n)
156 b = c;
157
158 while (b != a &&
159 bch2_bkey_cmp_packed(bt,
160 ptrs[a],
161 ptrs[b]) >= 0)
162 b = (b - 1) / 2;
163 c = b;
164 while (b != a) {
165 b = (b - 1) / 2;
166 swap(ptrs[b], ptrs[c]);
167 }
168 }
169 }
170
bch2_sort_whiteouts(struct bch_fs * c,struct btree * b)171 static void bch2_sort_whiteouts(struct bch_fs *c, struct btree *b)
172 {
173 struct bkey_packed *new_whiteouts, **ptrs, **ptrs_end, *k;
174 bool used_mempool = false;
175 size_t bytes = b->whiteout_u64s * sizeof(u64);
176
177 if (!b->whiteout_u64s)
178 return;
179
180 new_whiteouts = btree_bounce_alloc(c, bytes, &used_mempool);
181
182 ptrs = ptrs_end = ((void *) new_whiteouts + bytes);
183
184 for (k = unwritten_whiteouts_start(b);
185 k != unwritten_whiteouts_end(b);
186 k = bkey_p_next(k))
187 *--ptrs = k;
188
189 sort_bkey_ptrs(b, ptrs, ptrs_end - ptrs);
190
191 k = new_whiteouts;
192
193 while (ptrs != ptrs_end) {
194 bkey_p_copy(k, *ptrs);
195 k = bkey_p_next(k);
196 ptrs++;
197 }
198
199 verify_no_dups(b, new_whiteouts,
200 (void *) ((u64 *) new_whiteouts + b->whiteout_u64s));
201
202 memcpy_u64s(unwritten_whiteouts_start(b),
203 new_whiteouts, b->whiteout_u64s);
204
205 btree_bounce_free(c, bytes, used_mempool, new_whiteouts);
206 }
207
should_compact_bset(struct btree * b,struct bset_tree * t,bool compacting,enum compact_mode mode)208 static bool should_compact_bset(struct btree *b, struct bset_tree *t,
209 bool compacting, enum compact_mode mode)
210 {
211 if (!bset_dead_u64s(b, t))
212 return false;
213
214 switch (mode) {
215 case COMPACT_LAZY:
216 return should_compact_bset_lazy(b, t) ||
217 (compacting && !bset_written(b, bset(b, t)));
218 case COMPACT_ALL:
219 return true;
220 default:
221 BUG();
222 }
223 }
224
bch2_drop_whiteouts(struct btree * b,enum compact_mode mode)225 static bool bch2_drop_whiteouts(struct btree *b, enum compact_mode mode)
226 {
227 bool ret = false;
228
229 for_each_bset(b, t) {
230 struct bset *i = bset(b, t);
231 struct bkey_packed *k, *n, *out, *start, *end;
232 struct btree_node_entry *src = NULL, *dst = NULL;
233
234 if (t != b->set && !bset_written(b, i)) {
235 src = container_of(i, struct btree_node_entry, keys);
236 dst = max(write_block(b),
237 (void *) btree_bkey_last(b, t - 1));
238 }
239
240 if (src != dst)
241 ret = true;
242
243 if (!should_compact_bset(b, t, ret, mode)) {
244 if (src != dst) {
245 memmove(dst, src, sizeof(*src) +
246 le16_to_cpu(src->keys.u64s) *
247 sizeof(u64));
248 i = &dst->keys;
249 set_btree_bset(b, t, i);
250 }
251 continue;
252 }
253
254 start = btree_bkey_first(b, t);
255 end = btree_bkey_last(b, t);
256
257 if (src != dst) {
258 memmove(dst, src, sizeof(*src));
259 i = &dst->keys;
260 set_btree_bset(b, t, i);
261 }
262
263 out = i->start;
264
265 for (k = start; k != end; k = n) {
266 n = bkey_p_next(k);
267
268 if (!bkey_deleted(k)) {
269 bkey_p_copy(out, k);
270 out = bkey_p_next(out);
271 } else {
272 BUG_ON(k->needs_whiteout);
273 }
274 }
275
276 i->u64s = cpu_to_le16((u64 *) out - i->_data);
277 set_btree_bset_end(b, t);
278 bch2_bset_set_no_aux_tree(b, t);
279 ret = true;
280 }
281
282 bch2_verify_btree_nr_keys(b);
283
284 bch2_btree_build_aux_trees(b);
285
286 return ret;
287 }
288
bch2_compact_whiteouts(struct bch_fs * c,struct btree * b,enum compact_mode mode)289 bool bch2_compact_whiteouts(struct bch_fs *c, struct btree *b,
290 enum compact_mode mode)
291 {
292 return bch2_drop_whiteouts(b, mode);
293 }
294
btree_node_sort(struct bch_fs * c,struct btree * b,unsigned start_idx,unsigned end_idx)295 static void btree_node_sort(struct bch_fs *c, struct btree *b,
296 unsigned start_idx,
297 unsigned end_idx)
298 {
299 struct btree_node *out;
300 struct sort_iter_stack sort_iter;
301 struct bset_tree *t;
302 struct bset *start_bset = bset(b, &b->set[start_idx]);
303 bool used_mempool = false;
304 u64 start_time, seq = 0;
305 unsigned i, u64s = 0, bytes, shift = end_idx - start_idx - 1;
306 bool sorting_entire_node = start_idx == 0 &&
307 end_idx == b->nsets;
308
309 sort_iter_stack_init(&sort_iter, b);
310
311 for (t = b->set + start_idx;
312 t < b->set + end_idx;
313 t++) {
314 u64s += le16_to_cpu(bset(b, t)->u64s);
315 sort_iter_add(&sort_iter.iter,
316 btree_bkey_first(b, t),
317 btree_bkey_last(b, t));
318 }
319
320 bytes = sorting_entire_node
321 ? btree_buf_bytes(b)
322 : __vstruct_bytes(struct btree_node, u64s);
323
324 out = btree_bounce_alloc(c, bytes, &used_mempool);
325
326 start_time = local_clock();
327
328 u64s = bch2_sort_keys(out->keys.start, &sort_iter.iter);
329
330 out->keys.u64s = cpu_to_le16(u64s);
331
332 BUG_ON(vstruct_end(&out->keys) > (void *) out + bytes);
333
334 if (sorting_entire_node)
335 bch2_time_stats_update(&c->times[BCH_TIME_btree_node_sort],
336 start_time);
337
338 /* Make sure we preserve bset journal_seq: */
339 for (t = b->set + start_idx; t < b->set + end_idx; t++)
340 seq = max(seq, le64_to_cpu(bset(b, t)->journal_seq));
341 start_bset->journal_seq = cpu_to_le64(seq);
342
343 if (sorting_entire_node) {
344 u64s = le16_to_cpu(out->keys.u64s);
345
346 BUG_ON(bytes != btree_buf_bytes(b));
347
348 /*
349 * Our temporary buffer is the same size as the btree node's
350 * buffer, we can just swap buffers instead of doing a big
351 * memcpy()
352 */
353 *out = *b->data;
354 out->keys.u64s = cpu_to_le16(u64s);
355 swap(out, b->data);
356 set_btree_bset(b, b->set, &b->data->keys);
357 } else {
358 start_bset->u64s = out->keys.u64s;
359 memcpy_u64s(start_bset->start,
360 out->keys.start,
361 le16_to_cpu(out->keys.u64s));
362 }
363
364 for (i = start_idx + 1; i < end_idx; i++)
365 b->nr.bset_u64s[start_idx] +=
366 b->nr.bset_u64s[i];
367
368 b->nsets -= shift;
369
370 for (i = start_idx + 1; i < b->nsets; i++) {
371 b->nr.bset_u64s[i] = b->nr.bset_u64s[i + shift];
372 b->set[i] = b->set[i + shift];
373 }
374
375 for (i = b->nsets; i < MAX_BSETS; i++)
376 b->nr.bset_u64s[i] = 0;
377
378 set_btree_bset_end(b, &b->set[start_idx]);
379 bch2_bset_set_no_aux_tree(b, &b->set[start_idx]);
380
381 btree_bounce_free(c, bytes, used_mempool, out);
382
383 bch2_verify_btree_nr_keys(b);
384 }
385
bch2_btree_sort_into(struct bch_fs * c,struct btree * dst,struct btree * src)386 void bch2_btree_sort_into(struct bch_fs *c,
387 struct btree *dst,
388 struct btree *src)
389 {
390 struct btree_nr_keys nr;
391 struct btree_node_iter src_iter;
392 u64 start_time = local_clock();
393
394 BUG_ON(dst->nsets != 1);
395
396 bch2_bset_set_no_aux_tree(dst, dst->set);
397
398 bch2_btree_node_iter_init_from_start(&src_iter, src);
399
400 nr = bch2_sort_repack(btree_bset_first(dst),
401 src, &src_iter,
402 &dst->format,
403 true);
404
405 bch2_time_stats_update(&c->times[BCH_TIME_btree_node_sort],
406 start_time);
407
408 set_btree_bset_end(dst, dst->set);
409
410 dst->nr.live_u64s += nr.live_u64s;
411 dst->nr.bset_u64s[0] += nr.bset_u64s[0];
412 dst->nr.packed_keys += nr.packed_keys;
413 dst->nr.unpacked_keys += nr.unpacked_keys;
414
415 bch2_verify_btree_nr_keys(dst);
416 }
417
418 /*
419 * We're about to add another bset to the btree node, so if there's currently
420 * too many bsets - sort some of them together:
421 */
btree_node_compact(struct bch_fs * c,struct btree * b)422 static bool btree_node_compact(struct bch_fs *c, struct btree *b)
423 {
424 unsigned unwritten_idx;
425 bool ret = false;
426
427 for (unwritten_idx = 0;
428 unwritten_idx < b->nsets;
429 unwritten_idx++)
430 if (!bset_written(b, bset(b, &b->set[unwritten_idx])))
431 break;
432
433 if (b->nsets - unwritten_idx > 1) {
434 btree_node_sort(c, b, unwritten_idx, b->nsets);
435 ret = true;
436 }
437
438 if (unwritten_idx > 1) {
439 btree_node_sort(c, b, 0, unwritten_idx);
440 ret = true;
441 }
442
443 return ret;
444 }
445
bch2_btree_build_aux_trees(struct btree * b)446 void bch2_btree_build_aux_trees(struct btree *b)
447 {
448 for_each_bset(b, t)
449 bch2_bset_build_aux_tree(b, t,
450 !bset_written(b, bset(b, t)) &&
451 t == bset_tree_last(b));
452 }
453
454 /*
455 * If we have MAX_BSETS (3) bsets, should we sort them all down to just one?
456 *
457 * The first bset is going to be of similar order to the size of the node, the
458 * last bset is bounded by btree_write_set_buffer(), which is set to keep the
459 * memmove on insert from being too expensive: the middle bset should, ideally,
460 * be the geometric mean of the first and the last.
461 *
462 * Returns true if the middle bset is greater than that geometric mean:
463 */
should_compact_all(struct bch_fs * c,struct btree * b)464 static inline bool should_compact_all(struct bch_fs *c, struct btree *b)
465 {
466 unsigned mid_u64s_bits =
467 (ilog2(btree_max_u64s(c)) + BTREE_WRITE_SET_U64s_BITS) / 2;
468
469 return bset_u64s(&b->set[1]) > 1U << mid_u64s_bits;
470 }
471
472 /*
473 * @bch_btree_init_next - initialize a new (unwritten) bset that can then be
474 * inserted into
475 *
476 * Safe to call if there already is an unwritten bset - will only add a new bset
477 * if @b doesn't already have one.
478 *
479 * Returns true if we sorted (i.e. invalidated iterators
480 */
bch2_btree_init_next(struct btree_trans * trans,struct btree * b)481 void bch2_btree_init_next(struct btree_trans *trans, struct btree *b)
482 {
483 struct bch_fs *c = trans->c;
484 struct btree_node_entry *bne;
485 bool reinit_iter = false;
486
487 EBUG_ON(!six_lock_counts(&b->c.lock).n[SIX_LOCK_write]);
488 BUG_ON(bset_written(b, bset(b, &b->set[1])));
489 BUG_ON(btree_node_just_written(b));
490
491 if (b->nsets == MAX_BSETS &&
492 !btree_node_write_in_flight(b) &&
493 should_compact_all(c, b)) {
494 bch2_btree_node_write_trans(trans, b, SIX_LOCK_write,
495 BTREE_WRITE_init_next_bset);
496 reinit_iter = true;
497 }
498
499 if (b->nsets == MAX_BSETS &&
500 btree_node_compact(c, b))
501 reinit_iter = true;
502
503 BUG_ON(b->nsets >= MAX_BSETS);
504
505 bne = want_new_bset(c, b);
506 if (bne)
507 bch2_bset_init_next(b, bne);
508
509 bch2_btree_build_aux_trees(b);
510
511 if (reinit_iter)
512 bch2_trans_node_reinit_iter(trans, b);
513 }
514
btree_err_msg(struct printbuf * out,struct bch_fs * c,struct bch_dev * ca,struct btree * b,struct bset * i,struct bkey_packed * k,unsigned offset,int write)515 static void btree_err_msg(struct printbuf *out, struct bch_fs *c,
516 struct bch_dev *ca,
517 struct btree *b, struct bset *i, struct bkey_packed *k,
518 unsigned offset, int write)
519 {
520 prt_printf(out, bch2_log_msg(c, "%s"),
521 write == READ
522 ? "error validating btree node "
523 : "corrupt btree node before write ");
524 if (ca)
525 prt_printf(out, "on %s ", ca->name);
526 prt_printf(out, "at btree ");
527 bch2_btree_pos_to_text(out, c, b);
528
529 prt_printf(out, "\nnode offset %u/%u",
530 b->written, btree_ptr_sectors_written(bkey_i_to_s_c(&b->key)));
531 if (i)
532 prt_printf(out, " bset u64s %u", le16_to_cpu(i->u64s));
533 if (k)
534 prt_printf(out, " bset byte offset %lu",
535 (unsigned long)(void *)k -
536 ((unsigned long)(void *)i & ~511UL));
537 prt_str(out, ": ");
538 }
539
540 __printf(10, 11)
__btree_err(int ret,struct bch_fs * c,struct bch_dev * ca,struct btree * b,struct bset * i,struct bkey_packed * k,int write,bool have_retry,enum bch_sb_error_id err_type,const char * fmt,...)541 static int __btree_err(int ret,
542 struct bch_fs *c,
543 struct bch_dev *ca,
544 struct btree *b,
545 struct bset *i,
546 struct bkey_packed *k,
547 int write,
548 bool have_retry,
549 enum bch_sb_error_id err_type,
550 const char *fmt, ...)
551 {
552 bool silent = c->curr_recovery_pass == BCH_RECOVERY_PASS_scan_for_btree_nodes;
553
554 if (!have_retry && ret == -BCH_ERR_btree_node_read_err_want_retry)
555 ret = -BCH_ERR_btree_node_read_err_fixable;
556 if (!have_retry && ret == -BCH_ERR_btree_node_read_err_must_retry)
557 ret = -BCH_ERR_btree_node_read_err_bad_node;
558
559 if (!silent && ret != -BCH_ERR_btree_node_read_err_fixable)
560 bch2_sb_error_count(c, err_type);
561
562 struct printbuf out = PRINTBUF;
563 if (write != WRITE && ret != -BCH_ERR_btree_node_read_err_fixable) {
564 printbuf_indent_add_nextline(&out, 2);
565 #ifdef BCACHEFS_LOG_PREFIX
566 prt_printf(&out, bch2_log_msg(c, ""));
567 #endif
568 }
569
570 btree_err_msg(&out, c, ca, b, i, k, b->written, write);
571
572 va_list args;
573 va_start(args, fmt);
574 prt_vprintf(&out, fmt, args);
575 va_end(args);
576
577 if (write == WRITE) {
578 prt_str(&out, ", ");
579 ret = __bch2_inconsistent_error(c, &out)
580 ? -BCH_ERR_fsck_errors_not_fixed
581 : 0;
582 silent = false;
583 }
584
585 switch (ret) {
586 case -BCH_ERR_btree_node_read_err_fixable:
587 ret = !silent
588 ? __bch2_fsck_err(c, NULL, FSCK_CAN_FIX, err_type, "%s", out.buf)
589 : -BCH_ERR_fsck_fix;
590 if (ret != -BCH_ERR_fsck_fix &&
591 ret != -BCH_ERR_fsck_ignore)
592 goto fsck_err;
593 ret = -BCH_ERR_fsck_fix;
594 goto out;
595 case -BCH_ERR_btree_node_read_err_bad_node:
596 prt_str(&out, ", ");
597 ret = __bch2_topology_error(c, &out);
598 if (ret)
599 silent = false;
600 break;
601 case -BCH_ERR_btree_node_read_err_incompatible:
602 ret = -BCH_ERR_fsck_errors_not_fixed;
603 silent = false;
604 break;
605 }
606
607 if (!silent)
608 bch2_print_string_as_lines(KERN_ERR, out.buf);
609 out:
610 fsck_err:
611 printbuf_exit(&out);
612 return ret;
613 }
614
615 #define btree_err(type, c, ca, b, i, k, _err_type, msg, ...) \
616 ({ \
617 int _ret = __btree_err(type, c, ca, b, i, k, write, have_retry, \
618 BCH_FSCK_ERR_##_err_type, \
619 msg, ##__VA_ARGS__); \
620 \
621 if (_ret != -BCH_ERR_fsck_fix) { \
622 ret = _ret; \
623 goto fsck_err; \
624 } \
625 \
626 *saw_error = true; \
627 })
628
629 #define btree_err_on(cond, ...) ((cond) ? btree_err(__VA_ARGS__) : false)
630
631 /*
632 * When btree topology repair changes the start or end of a node, that might
633 * mean we have to drop keys that are no longer inside the node:
634 */
635 __cold
bch2_btree_node_drop_keys_outside_node(struct btree * b)636 void bch2_btree_node_drop_keys_outside_node(struct btree *b)
637 {
638 for_each_bset(b, t) {
639 struct bset *i = bset(b, t);
640 struct bkey_packed *k;
641
642 for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k))
643 if (bkey_cmp_left_packed(b, k, &b->data->min_key) >= 0)
644 break;
645
646 if (k != i->start) {
647 unsigned shift = (u64 *) k - (u64 *) i->start;
648
649 memmove_u64s_down(i->start, k,
650 (u64 *) vstruct_end(i) - (u64 *) k);
651 i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - shift);
652 set_btree_bset_end(b, t);
653 }
654
655 for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k))
656 if (bkey_cmp_left_packed(b, k, &b->data->max_key) > 0)
657 break;
658
659 if (k != vstruct_last(i)) {
660 i->u64s = cpu_to_le16((u64 *) k - (u64 *) i->start);
661 set_btree_bset_end(b, t);
662 }
663 }
664
665 /*
666 * Always rebuild search trees: eytzinger search tree nodes directly
667 * depend on the values of min/max key:
668 */
669 bch2_bset_set_no_aux_tree(b, b->set);
670 bch2_btree_build_aux_trees(b);
671 b->nr = bch2_btree_node_count_keys(b);
672
673 struct bkey_s_c k;
674 struct bkey unpacked;
675 struct btree_node_iter iter;
676 for_each_btree_node_key_unpack(b, k, &iter, &unpacked) {
677 BUG_ON(bpos_lt(k.k->p, b->data->min_key));
678 BUG_ON(bpos_gt(k.k->p, b->data->max_key));
679 }
680 }
681
validate_bset(struct bch_fs * c,struct bch_dev * ca,struct btree * b,struct bset * i,unsigned offset,unsigned sectors,int write,bool have_retry,bool * saw_error)682 static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
683 struct btree *b, struct bset *i,
684 unsigned offset, unsigned sectors,
685 int write, bool have_retry, bool *saw_error)
686 {
687 unsigned version = le16_to_cpu(i->version);
688 unsigned ptr_written = btree_ptr_sectors_written(bkey_i_to_s_c(&b->key));
689 struct printbuf buf1 = PRINTBUF;
690 struct printbuf buf2 = PRINTBUF;
691 int ret = 0;
692
693 btree_err_on(!bch2_version_compatible(version),
694 -BCH_ERR_btree_node_read_err_incompatible,
695 c, ca, b, i, NULL,
696 btree_node_unsupported_version,
697 "unsupported bset version %u.%u",
698 BCH_VERSION_MAJOR(version),
699 BCH_VERSION_MINOR(version));
700
701 if (btree_err_on(version < c->sb.version_min,
702 -BCH_ERR_btree_node_read_err_fixable,
703 c, NULL, b, i, NULL,
704 btree_node_bset_older_than_sb_min,
705 "bset version %u older than superblock version_min %u",
706 version, c->sb.version_min)) {
707 mutex_lock(&c->sb_lock);
708 c->disk_sb.sb->version_min = cpu_to_le16(version);
709 bch2_write_super(c);
710 mutex_unlock(&c->sb_lock);
711 }
712
713 if (btree_err_on(BCH_VERSION_MAJOR(version) >
714 BCH_VERSION_MAJOR(c->sb.version),
715 -BCH_ERR_btree_node_read_err_fixable,
716 c, NULL, b, i, NULL,
717 btree_node_bset_newer_than_sb,
718 "bset version %u newer than superblock version %u",
719 version, c->sb.version)) {
720 mutex_lock(&c->sb_lock);
721 c->disk_sb.sb->version = cpu_to_le16(version);
722 bch2_write_super(c);
723 mutex_unlock(&c->sb_lock);
724 }
725
726 btree_err_on(BSET_SEPARATE_WHITEOUTS(i),
727 -BCH_ERR_btree_node_read_err_incompatible,
728 c, ca, b, i, NULL,
729 btree_node_unsupported_version,
730 "BSET_SEPARATE_WHITEOUTS no longer supported");
731
732 if (!write &&
733 btree_err_on(offset + sectors > (ptr_written ?: btree_sectors(c)),
734 -BCH_ERR_btree_node_read_err_fixable,
735 c, ca, b, i, NULL,
736 bset_past_end_of_btree_node,
737 "bset past end of btree node (offset %u len %u but written %zu)",
738 offset, sectors, ptr_written ?: btree_sectors(c)))
739 i->u64s = 0;
740
741 btree_err_on(offset && !i->u64s,
742 -BCH_ERR_btree_node_read_err_fixable,
743 c, ca, b, i, NULL,
744 bset_empty,
745 "empty bset");
746
747 btree_err_on(BSET_OFFSET(i) && BSET_OFFSET(i) != offset,
748 -BCH_ERR_btree_node_read_err_want_retry,
749 c, ca, b, i, NULL,
750 bset_wrong_sector_offset,
751 "bset at wrong sector offset");
752
753 if (!offset) {
754 struct btree_node *bn =
755 container_of(i, struct btree_node, keys);
756 /* These indicate that we read the wrong btree node: */
757
758 if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
759 struct bch_btree_ptr_v2 *bp =
760 &bkey_i_to_btree_ptr_v2(&b->key)->v;
761
762 /* XXX endianness */
763 btree_err_on(bp->seq != bn->keys.seq,
764 -BCH_ERR_btree_node_read_err_must_retry,
765 c, ca, b, NULL, NULL,
766 bset_bad_seq,
767 "incorrect sequence number (wrong btree node)");
768 }
769
770 btree_err_on(BTREE_NODE_ID(bn) != b->c.btree_id,
771 -BCH_ERR_btree_node_read_err_must_retry,
772 c, ca, b, i, NULL,
773 btree_node_bad_btree,
774 "incorrect btree id");
775
776 btree_err_on(BTREE_NODE_LEVEL(bn) != b->c.level,
777 -BCH_ERR_btree_node_read_err_must_retry,
778 c, ca, b, i, NULL,
779 btree_node_bad_level,
780 "incorrect level");
781
782 if (!write)
783 compat_btree_node(b->c.level, b->c.btree_id, version,
784 BSET_BIG_ENDIAN(i), write, bn);
785
786 if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
787 struct bch_btree_ptr_v2 *bp =
788 &bkey_i_to_btree_ptr_v2(&b->key)->v;
789
790 if (BTREE_PTR_RANGE_UPDATED(bp)) {
791 b->data->min_key = bp->min_key;
792 b->data->max_key = b->key.k.p;
793 }
794
795 btree_err_on(!bpos_eq(b->data->min_key, bp->min_key),
796 -BCH_ERR_btree_node_read_err_must_retry,
797 c, ca, b, NULL, NULL,
798 btree_node_bad_min_key,
799 "incorrect min_key: got %s should be %s",
800 (printbuf_reset(&buf1),
801 bch2_bpos_to_text(&buf1, bn->min_key), buf1.buf),
802 (printbuf_reset(&buf2),
803 bch2_bpos_to_text(&buf2, bp->min_key), buf2.buf));
804 }
805
806 btree_err_on(!bpos_eq(bn->max_key, b->key.k.p),
807 -BCH_ERR_btree_node_read_err_must_retry,
808 c, ca, b, i, NULL,
809 btree_node_bad_max_key,
810 "incorrect max key %s",
811 (printbuf_reset(&buf1),
812 bch2_bpos_to_text(&buf1, bn->max_key), buf1.buf));
813
814 if (write)
815 compat_btree_node(b->c.level, b->c.btree_id, version,
816 BSET_BIG_ENDIAN(i), write, bn);
817
818 btree_err_on(bch2_bkey_format_invalid(c, &bn->format, write, &buf1),
819 -BCH_ERR_btree_node_read_err_bad_node,
820 c, ca, b, i, NULL,
821 btree_node_bad_format,
822 "invalid bkey format: %s\n%s", buf1.buf,
823 (printbuf_reset(&buf2),
824 bch2_bkey_format_to_text(&buf2, &bn->format), buf2.buf));
825 printbuf_reset(&buf1);
826
827 compat_bformat(b->c.level, b->c.btree_id, version,
828 BSET_BIG_ENDIAN(i), write,
829 &bn->format);
830 }
831 fsck_err:
832 printbuf_exit(&buf2);
833 printbuf_exit(&buf1);
834 return ret;
835 }
836
btree_node_bkey_val_validate(struct bch_fs * c,struct btree * b,struct bkey_s_c k,enum bch_validate_flags flags)837 static int btree_node_bkey_val_validate(struct bch_fs *c, struct btree *b,
838 struct bkey_s_c k,
839 enum bch_validate_flags flags)
840 {
841 return bch2_bkey_val_validate(c, k, (struct bkey_validate_context) {
842 .from = BKEY_VALIDATE_btree_node,
843 .level = b->c.level,
844 .btree = b->c.btree_id,
845 .flags = flags
846 });
847 }
848
bset_key_validate(struct bch_fs * c,struct btree * b,struct bkey_s_c k,bool updated_range,enum bch_validate_flags flags)849 static int bset_key_validate(struct bch_fs *c, struct btree *b,
850 struct bkey_s_c k,
851 bool updated_range,
852 enum bch_validate_flags flags)
853 {
854 struct bkey_validate_context from = (struct bkey_validate_context) {
855 .from = BKEY_VALIDATE_btree_node,
856 .level = b->c.level,
857 .btree = b->c.btree_id,
858 .flags = flags,
859 };
860 return __bch2_bkey_validate(c, k, from) ?:
861 (!updated_range ? bch2_bkey_in_btree_node(c, b, k, from) : 0) ?:
862 (flags & BCH_VALIDATE_write ? btree_node_bkey_val_validate(c, b, k, flags) : 0);
863 }
864
bkey_packed_valid(struct bch_fs * c,struct btree * b,struct bset * i,struct bkey_packed * k)865 static bool bkey_packed_valid(struct bch_fs *c, struct btree *b,
866 struct bset *i, struct bkey_packed *k)
867 {
868 if (bkey_p_next(k) > vstruct_last(i))
869 return false;
870
871 if (k->format > KEY_FORMAT_CURRENT)
872 return false;
873
874 if (!bkeyp_u64s_valid(&b->format, k))
875 return false;
876
877 struct bkey tmp;
878 struct bkey_s u = __bkey_disassemble(b, k, &tmp);
879 return !__bch2_bkey_validate(c, u.s_c,
880 (struct bkey_validate_context) {
881 .from = BKEY_VALIDATE_btree_node,
882 .level = b->c.level,
883 .btree = b->c.btree_id,
884 .flags = BCH_VALIDATE_silent
885 });
886 }
887
btree_node_read_bkey_cmp(const struct btree * b,const struct bkey_packed * l,const struct bkey_packed * r)888 static inline int btree_node_read_bkey_cmp(const struct btree *b,
889 const struct bkey_packed *l,
890 const struct bkey_packed *r)
891 {
892 return bch2_bkey_cmp_packed(b, l, r)
893 ?: (int) bkey_deleted(r) - (int) bkey_deleted(l);
894 }
895
validate_bset_keys(struct bch_fs * c,struct btree * b,struct bset * i,int write,bool have_retry,bool * saw_error)896 static int validate_bset_keys(struct bch_fs *c, struct btree *b,
897 struct bset *i, int write,
898 bool have_retry, bool *saw_error)
899 {
900 unsigned version = le16_to_cpu(i->version);
901 struct bkey_packed *k, *prev = NULL;
902 struct printbuf buf = PRINTBUF;
903 bool updated_range = b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
904 BTREE_PTR_RANGE_UPDATED(&bkey_i_to_btree_ptr_v2(&b->key)->v);
905 int ret = 0;
906
907 for (k = i->start;
908 k != vstruct_last(i);) {
909 struct bkey_s u;
910 struct bkey tmp;
911 unsigned next_good_key;
912
913 if (btree_err_on(bkey_p_next(k) > vstruct_last(i),
914 -BCH_ERR_btree_node_read_err_fixable,
915 c, NULL, b, i, k,
916 btree_node_bkey_past_bset_end,
917 "key extends past end of bset")) {
918 i->u64s = cpu_to_le16((u64 *) k - i->_data);
919 break;
920 }
921
922 if (btree_err_on(k->format > KEY_FORMAT_CURRENT,
923 -BCH_ERR_btree_node_read_err_fixable,
924 c, NULL, b, i, k,
925 btree_node_bkey_bad_format,
926 "invalid bkey format %u", k->format))
927 goto drop_this_key;
928
929 if (btree_err_on(!bkeyp_u64s_valid(&b->format, k),
930 -BCH_ERR_btree_node_read_err_fixable,
931 c, NULL, b, i, k,
932 btree_node_bkey_bad_u64s,
933 "bad k->u64s %u (min %u max %zu)", k->u64s,
934 bkeyp_key_u64s(&b->format, k),
935 U8_MAX - BKEY_U64s + bkeyp_key_u64s(&b->format, k)))
936 goto drop_this_key;
937
938 if (!write)
939 bch2_bkey_compat(b->c.level, b->c.btree_id, version,
940 BSET_BIG_ENDIAN(i), write,
941 &b->format, k);
942
943 u = __bkey_disassemble(b, k, &tmp);
944
945 ret = bset_key_validate(c, b, u.s_c, updated_range, write);
946 if (ret == -BCH_ERR_fsck_delete_bkey)
947 goto drop_this_key;
948 if (ret)
949 goto fsck_err;
950
951 if (write)
952 bch2_bkey_compat(b->c.level, b->c.btree_id, version,
953 BSET_BIG_ENDIAN(i), write,
954 &b->format, k);
955
956 if (prev && btree_node_read_bkey_cmp(b, prev, k) >= 0) {
957 struct bkey up = bkey_unpack_key(b, prev);
958
959 printbuf_reset(&buf);
960 prt_printf(&buf, "keys out of order: ");
961 bch2_bkey_to_text(&buf, &up);
962 prt_printf(&buf, " > ");
963 bch2_bkey_to_text(&buf, u.k);
964
965 if (btree_err(-BCH_ERR_btree_node_read_err_fixable,
966 c, NULL, b, i, k,
967 btree_node_bkey_out_of_order,
968 "%s", buf.buf))
969 goto drop_this_key;
970 }
971
972 prev = k;
973 k = bkey_p_next(k);
974 continue;
975 drop_this_key:
976 next_good_key = k->u64s;
977
978 if (!next_good_key ||
979 (BSET_BIG_ENDIAN(i) == CPU_BIG_ENDIAN &&
980 version >= bcachefs_metadata_version_snapshot)) {
981 /*
982 * only do scanning if bch2_bkey_compat() has nothing to
983 * do
984 */
985
986 if (!bkey_packed_valid(c, b, i, (void *) ((u64 *) k + next_good_key))) {
987 for (next_good_key = 1;
988 next_good_key < (u64 *) vstruct_last(i) - (u64 *) k;
989 next_good_key++)
990 if (bkey_packed_valid(c, b, i, (void *) ((u64 *) k + next_good_key)))
991 goto got_good_key;
992 }
993
994 /*
995 * didn't find a good key, have to truncate the rest of
996 * the bset
997 */
998 next_good_key = (u64 *) vstruct_last(i) - (u64 *) k;
999 }
1000 got_good_key:
1001 le16_add_cpu(&i->u64s, -next_good_key);
1002 memmove_u64s_down(k, (u64 *) k + next_good_key, (u64 *) vstruct_end(i) - (u64 *) k);
1003 set_btree_node_need_rewrite(b);
1004 }
1005 fsck_err:
1006 printbuf_exit(&buf);
1007 return ret;
1008 }
1009
bch2_btree_node_read_done(struct bch_fs * c,struct bch_dev * ca,struct btree * b,bool have_retry,bool * saw_error)1010 int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
1011 struct btree *b, bool have_retry, bool *saw_error)
1012 {
1013 struct btree_node_entry *bne;
1014 struct sort_iter *iter;
1015 struct btree_node *sorted;
1016 struct bkey_packed *k;
1017 struct bset *i;
1018 bool used_mempool, blacklisted;
1019 bool updated_range = b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
1020 BTREE_PTR_RANGE_UPDATED(&bkey_i_to_btree_ptr_v2(&b->key)->v);
1021 unsigned u64s;
1022 unsigned ptr_written = btree_ptr_sectors_written(bkey_i_to_s_c(&b->key));
1023 u64 max_journal_seq = 0;
1024 struct printbuf buf = PRINTBUF;
1025 int ret = 0, retry_read = 0, write = READ;
1026 u64 start_time = local_clock();
1027
1028 b->version_ondisk = U16_MAX;
1029 /* We might get called multiple times on read retry: */
1030 b->written = 0;
1031
1032 iter = mempool_alloc(&c->fill_iter, GFP_NOFS);
1033 sort_iter_init(iter, b, (btree_blocks(c) + 1) * 2);
1034
1035 if (bch2_meta_read_fault("btree"))
1036 btree_err(-BCH_ERR_btree_node_read_err_must_retry,
1037 c, ca, b, NULL, NULL,
1038 btree_node_fault_injected,
1039 "dynamic fault");
1040
1041 btree_err_on(le64_to_cpu(b->data->magic) != bset_magic(c),
1042 -BCH_ERR_btree_node_read_err_must_retry,
1043 c, ca, b, NULL, NULL,
1044 btree_node_bad_magic,
1045 "bad magic: want %llx, got %llx",
1046 bset_magic(c), le64_to_cpu(b->data->magic));
1047
1048 if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
1049 struct bch_btree_ptr_v2 *bp =
1050 &bkey_i_to_btree_ptr_v2(&b->key)->v;
1051
1052 bch2_bpos_to_text(&buf, b->data->min_key);
1053 prt_str(&buf, "-");
1054 bch2_bpos_to_text(&buf, b->data->max_key);
1055
1056 btree_err_on(b->data->keys.seq != bp->seq,
1057 -BCH_ERR_btree_node_read_err_must_retry,
1058 c, ca, b, NULL, NULL,
1059 btree_node_bad_seq,
1060 "got wrong btree node: got\n%s",
1061 (printbuf_reset(&buf),
1062 bch2_btree_node_header_to_text(&buf, b->data),
1063 buf.buf));
1064 } else {
1065 btree_err_on(!b->data->keys.seq,
1066 -BCH_ERR_btree_node_read_err_must_retry,
1067 c, ca, b, NULL, NULL,
1068 btree_node_bad_seq,
1069 "bad btree header: seq 0\n%s",
1070 (printbuf_reset(&buf),
1071 bch2_btree_node_header_to_text(&buf, b->data),
1072 buf.buf));
1073 }
1074
1075 while (b->written < (ptr_written ?: btree_sectors(c))) {
1076 unsigned sectors;
1077 bool first = !b->written;
1078
1079 if (first) {
1080 bne = NULL;
1081 i = &b->data->keys;
1082 } else {
1083 bne = write_block(b);
1084 i = &bne->keys;
1085
1086 if (i->seq != b->data->keys.seq)
1087 break;
1088 }
1089
1090 struct nonce nonce = btree_nonce(i, b->written << 9);
1091 bool good_csum_type = bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i));
1092
1093 btree_err_on(!good_csum_type,
1094 bch2_csum_type_is_encryption(BSET_CSUM_TYPE(i))
1095 ? -BCH_ERR_btree_node_read_err_must_retry
1096 : -BCH_ERR_btree_node_read_err_want_retry,
1097 c, ca, b, i, NULL,
1098 bset_unknown_csum,
1099 "unknown checksum type %llu", BSET_CSUM_TYPE(i));
1100
1101 if (first) {
1102 if (good_csum_type) {
1103 struct bch_csum csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, b->data);
1104 bool csum_bad = bch2_crc_cmp(b->data->csum, csum);
1105 if (csum_bad)
1106 bch2_io_error(ca, BCH_MEMBER_ERROR_checksum);
1107
1108 btree_err_on(csum_bad,
1109 -BCH_ERR_btree_node_read_err_want_retry,
1110 c, ca, b, i, NULL,
1111 bset_bad_csum,
1112 "%s",
1113 (printbuf_reset(&buf),
1114 bch2_csum_err_msg(&buf, BSET_CSUM_TYPE(i), b->data->csum, csum),
1115 buf.buf));
1116
1117 ret = bset_encrypt(c, i, b->written << 9);
1118 if (bch2_fs_fatal_err_on(ret, c,
1119 "decrypting btree node: %s", bch2_err_str(ret)))
1120 goto fsck_err;
1121 }
1122
1123 btree_err_on(btree_node_type_is_extents(btree_node_type(b)) &&
1124 !BTREE_NODE_NEW_EXTENT_OVERWRITE(b->data),
1125 -BCH_ERR_btree_node_read_err_incompatible,
1126 c, NULL, b, NULL, NULL,
1127 btree_node_unsupported_version,
1128 "btree node does not have NEW_EXTENT_OVERWRITE set");
1129
1130 sectors = vstruct_sectors(b->data, c->block_bits);
1131 } else {
1132 if (good_csum_type) {
1133 struct bch_csum csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
1134 bool csum_bad = bch2_crc_cmp(bne->csum, csum);
1135 if (ca && csum_bad)
1136 bch2_io_error(ca, BCH_MEMBER_ERROR_checksum);
1137
1138 btree_err_on(csum_bad,
1139 -BCH_ERR_btree_node_read_err_want_retry,
1140 c, ca, b, i, NULL,
1141 bset_bad_csum,
1142 "%s",
1143 (printbuf_reset(&buf),
1144 bch2_csum_err_msg(&buf, BSET_CSUM_TYPE(i), bne->csum, csum),
1145 buf.buf));
1146
1147 ret = bset_encrypt(c, i, b->written << 9);
1148 if (bch2_fs_fatal_err_on(ret, c,
1149 "decrypting btree node: %s", bch2_err_str(ret)))
1150 goto fsck_err;
1151 }
1152
1153 sectors = vstruct_sectors(bne, c->block_bits);
1154 }
1155
1156 b->version_ondisk = min(b->version_ondisk,
1157 le16_to_cpu(i->version));
1158
1159 ret = validate_bset(c, ca, b, i, b->written, sectors,
1160 READ, have_retry, saw_error);
1161 if (ret)
1162 goto fsck_err;
1163
1164 if (!b->written)
1165 btree_node_set_format(b, b->data->format);
1166
1167 ret = validate_bset_keys(c, b, i, READ, have_retry, saw_error);
1168 if (ret)
1169 goto fsck_err;
1170
1171 SET_BSET_BIG_ENDIAN(i, CPU_BIG_ENDIAN);
1172
1173 blacklisted = bch2_journal_seq_is_blacklisted(c,
1174 le64_to_cpu(i->journal_seq),
1175 true);
1176
1177 btree_err_on(blacklisted && first,
1178 -BCH_ERR_btree_node_read_err_fixable,
1179 c, ca, b, i, NULL,
1180 bset_blacklisted_journal_seq,
1181 "first btree node bset has blacklisted journal seq (%llu)",
1182 le64_to_cpu(i->journal_seq));
1183
1184 btree_err_on(blacklisted && ptr_written,
1185 -BCH_ERR_btree_node_read_err_fixable,
1186 c, ca, b, i, NULL,
1187 first_bset_blacklisted_journal_seq,
1188 "found blacklisted bset (journal seq %llu) in btree node at offset %u-%u/%u",
1189 le64_to_cpu(i->journal_seq),
1190 b->written, b->written + sectors, ptr_written);
1191
1192 b->written = min(b->written + sectors, btree_sectors(c));
1193
1194 if (blacklisted && !first)
1195 continue;
1196
1197 sort_iter_add(iter,
1198 vstruct_idx(i, 0),
1199 vstruct_last(i));
1200
1201 max_journal_seq = max(max_journal_seq, le64_to_cpu(i->journal_seq));
1202 }
1203
1204 if (ptr_written) {
1205 btree_err_on(b->written < ptr_written,
1206 -BCH_ERR_btree_node_read_err_want_retry,
1207 c, ca, b, NULL, NULL,
1208 btree_node_data_missing,
1209 "btree node data missing: expected %u sectors, found %u",
1210 ptr_written, b->written);
1211 } else {
1212 for (bne = write_block(b);
1213 bset_byte_offset(b, bne) < btree_buf_bytes(b);
1214 bne = (void *) bne + block_bytes(c))
1215 btree_err_on(bne->keys.seq == b->data->keys.seq &&
1216 !bch2_journal_seq_is_blacklisted(c,
1217 le64_to_cpu(bne->keys.journal_seq),
1218 true),
1219 -BCH_ERR_btree_node_read_err_want_retry,
1220 c, ca, b, NULL, NULL,
1221 btree_node_bset_after_end,
1222 "found bset signature after last bset");
1223 }
1224
1225 sorted = btree_bounce_alloc(c, btree_buf_bytes(b), &used_mempool);
1226 sorted->keys.u64s = 0;
1227
1228 set_btree_bset(b, b->set, &b->data->keys);
1229
1230 b->nr = bch2_key_sort_fix_overlapping(c, &sorted->keys, iter);
1231 memset((uint8_t *)(sorted + 1) + b->nr.live_u64s * sizeof(u64), 0,
1232 btree_buf_bytes(b) -
1233 sizeof(struct btree_node) -
1234 b->nr.live_u64s * sizeof(u64));
1235
1236 u64s = le16_to_cpu(sorted->keys.u64s);
1237 *sorted = *b->data;
1238 sorted->keys.u64s = cpu_to_le16(u64s);
1239 swap(sorted, b->data);
1240 set_btree_bset(b, b->set, &b->data->keys);
1241 b->nsets = 1;
1242 b->data->keys.journal_seq = cpu_to_le64(max_journal_seq);
1243
1244 BUG_ON(b->nr.live_u64s != u64s);
1245
1246 btree_bounce_free(c, btree_buf_bytes(b), used_mempool, sorted);
1247
1248 if (updated_range)
1249 bch2_btree_node_drop_keys_outside_node(b);
1250
1251 i = &b->data->keys;
1252 for (k = i->start; k != vstruct_last(i);) {
1253 struct bkey tmp;
1254 struct bkey_s u = __bkey_disassemble(b, k, &tmp);
1255
1256 ret = btree_node_bkey_val_validate(c, b, u.s_c, READ);
1257 if (ret == -BCH_ERR_fsck_delete_bkey ||
1258 (bch2_inject_invalid_keys &&
1259 !bversion_cmp(u.k->bversion, MAX_VERSION))) {
1260 btree_keys_account_key_drop(&b->nr, 0, k);
1261
1262 i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
1263 memmove_u64s_down(k, bkey_p_next(k),
1264 (u64 *) vstruct_end(i) - (u64 *) k);
1265 set_btree_bset_end(b, b->set);
1266 set_btree_node_need_rewrite(b);
1267 continue;
1268 }
1269 if (ret)
1270 goto fsck_err;
1271
1272 if (u.k->type == KEY_TYPE_btree_ptr_v2) {
1273 struct bkey_s_btree_ptr_v2 bp = bkey_s_to_btree_ptr_v2(u);
1274
1275 bp.v->mem_ptr = 0;
1276 }
1277
1278 k = bkey_p_next(k);
1279 }
1280
1281 bch2_bset_build_aux_tree(b, b->set, false);
1282
1283 set_needs_whiteout(btree_bset_first(b), true);
1284
1285 btree_node_reset_sib_u64s(b);
1286
1287 rcu_read_lock();
1288 bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&b->key)), ptr) {
1289 struct bch_dev *ca2 = bch2_dev_rcu(c, ptr->dev);
1290
1291 if (!ca2 || ca2->mi.state != BCH_MEMBER_STATE_rw)
1292 set_btree_node_need_rewrite(b);
1293 }
1294 rcu_read_unlock();
1295
1296 if (!ptr_written)
1297 set_btree_node_need_rewrite(b);
1298 out:
1299 mempool_free(iter, &c->fill_iter);
1300 printbuf_exit(&buf);
1301 bch2_time_stats_update(&c->times[BCH_TIME_btree_node_read_done], start_time);
1302 return retry_read;
1303 fsck_err:
1304 if (ret == -BCH_ERR_btree_node_read_err_want_retry ||
1305 ret == -BCH_ERR_btree_node_read_err_must_retry) {
1306 retry_read = 1;
1307 } else {
1308 set_btree_node_read_error(b);
1309 bch2_btree_lost_data(c, b->c.btree_id);
1310 }
1311 goto out;
1312 }
1313
btree_node_read_work(struct work_struct * work)1314 static void btree_node_read_work(struct work_struct *work)
1315 {
1316 struct btree_read_bio *rb =
1317 container_of(work, struct btree_read_bio, work);
1318 struct bch_fs *c = rb->c;
1319 struct bch_dev *ca = rb->have_ioref ? bch2_dev_have_ref(c, rb->pick.ptr.dev) : NULL;
1320 struct btree *b = rb->b;
1321 struct bio *bio = &rb->bio;
1322 struct bch_io_failures failed = { .nr = 0 };
1323 struct printbuf buf = PRINTBUF;
1324 bool saw_error = false;
1325 bool retry = false;
1326 bool can_retry;
1327
1328 goto start;
1329 while (1) {
1330 retry = true;
1331 bch_info(c, "retrying read");
1332 ca = bch2_dev_get_ioref(c, rb->pick.ptr.dev, READ);
1333 rb->have_ioref = ca != NULL;
1334 rb->start_time = local_clock();
1335 bio_reset(bio, NULL, REQ_OP_READ|REQ_SYNC|REQ_META);
1336 bio->bi_iter.bi_sector = rb->pick.ptr.offset;
1337 bio->bi_iter.bi_size = btree_buf_bytes(b);
1338
1339 if (rb->have_ioref) {
1340 bio_set_dev(bio, ca->disk_sb.bdev);
1341 submit_bio_wait(bio);
1342 } else {
1343 bio->bi_status = BLK_STS_REMOVED;
1344 }
1345
1346 bch2_account_io_completion(ca, BCH_MEMBER_ERROR_read,
1347 rb->start_time, !bio->bi_status);
1348 start:
1349 printbuf_reset(&buf);
1350 bch2_btree_pos_to_text(&buf, c, b);
1351
1352 if (ca && bio->bi_status)
1353 bch_err_dev_ratelimited(ca,
1354 "btree read error %s for %s",
1355 bch2_blk_status_to_str(bio->bi_status), buf.buf);
1356 if (rb->have_ioref)
1357 percpu_ref_put(&ca->io_ref[READ]);
1358 rb->have_ioref = false;
1359
1360 bch2_mark_io_failure(&failed, &rb->pick, false);
1361
1362 can_retry = bch2_bkey_pick_read_device(c,
1363 bkey_i_to_s_c(&b->key),
1364 &failed, &rb->pick, -1) > 0;
1365
1366 if (!bio->bi_status &&
1367 !bch2_btree_node_read_done(c, ca, b, can_retry, &saw_error)) {
1368 if (retry)
1369 bch_info(c, "retry success");
1370 break;
1371 }
1372
1373 saw_error = true;
1374
1375 if (!can_retry) {
1376 set_btree_node_read_error(b);
1377 bch2_btree_lost_data(c, b->c.btree_id);
1378 break;
1379 }
1380 }
1381
1382 bch2_time_stats_update(&c->times[BCH_TIME_btree_node_read],
1383 rb->start_time);
1384 bio_put(&rb->bio);
1385
1386 if ((saw_error ||
1387 btree_node_need_rewrite(b)) &&
1388 !btree_node_read_error(b) &&
1389 c->curr_recovery_pass != BCH_RECOVERY_PASS_scan_for_btree_nodes) {
1390 if (saw_error) {
1391 printbuf_reset(&buf);
1392 bch2_btree_id_level_to_text(&buf, b->c.btree_id, b->c.level);
1393 prt_str(&buf, " ");
1394 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
1395 bch_err_ratelimited(c, "%s: rewriting btree node at due to error\n %s",
1396 __func__, buf.buf);
1397 }
1398
1399 bch2_btree_node_rewrite_async(c, b);
1400 }
1401
1402 printbuf_exit(&buf);
1403 clear_btree_node_read_in_flight(b);
1404 smp_mb__after_atomic();
1405 wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
1406 }
1407
btree_node_read_endio(struct bio * bio)1408 static void btree_node_read_endio(struct bio *bio)
1409 {
1410 struct btree_read_bio *rb =
1411 container_of(bio, struct btree_read_bio, bio);
1412 struct bch_fs *c = rb->c;
1413 struct bch_dev *ca = rb->have_ioref
1414 ? bch2_dev_have_ref(c, rb->pick.ptr.dev) : NULL;
1415
1416 bch2_account_io_completion(ca, BCH_MEMBER_ERROR_read,
1417 rb->start_time, !bio->bi_status);
1418
1419 queue_work(c->btree_read_complete_wq, &rb->work);
1420 }
1421
1422 struct btree_node_read_all {
1423 struct closure cl;
1424 struct bch_fs *c;
1425 struct btree *b;
1426 unsigned nr;
1427 void *buf[BCH_REPLICAS_MAX];
1428 struct bio *bio[BCH_REPLICAS_MAX];
1429 blk_status_t err[BCH_REPLICAS_MAX];
1430 };
1431
btree_node_sectors_written(struct bch_fs * c,void * data)1432 static unsigned btree_node_sectors_written(struct bch_fs *c, void *data)
1433 {
1434 struct btree_node *bn = data;
1435 struct btree_node_entry *bne;
1436 unsigned offset = 0;
1437
1438 if (le64_to_cpu(bn->magic) != bset_magic(c))
1439 return 0;
1440
1441 while (offset < btree_sectors(c)) {
1442 if (!offset) {
1443 offset += vstruct_sectors(bn, c->block_bits);
1444 } else {
1445 bne = data + (offset << 9);
1446 if (bne->keys.seq != bn->keys.seq)
1447 break;
1448 offset += vstruct_sectors(bne, c->block_bits);
1449 }
1450 }
1451
1452 return offset;
1453 }
1454
btree_node_has_extra_bsets(struct bch_fs * c,unsigned offset,void * data)1455 static bool btree_node_has_extra_bsets(struct bch_fs *c, unsigned offset, void *data)
1456 {
1457 struct btree_node *bn = data;
1458 struct btree_node_entry *bne;
1459
1460 if (!offset)
1461 return false;
1462
1463 while (offset < btree_sectors(c)) {
1464 bne = data + (offset << 9);
1465 if (bne->keys.seq == bn->keys.seq)
1466 return true;
1467 offset++;
1468 }
1469
1470 return false;
1471 return offset;
1472 }
1473
CLOSURE_CALLBACK(btree_node_read_all_replicas_done)1474 static CLOSURE_CALLBACK(btree_node_read_all_replicas_done)
1475 {
1476 closure_type(ra, struct btree_node_read_all, cl);
1477 struct bch_fs *c = ra->c;
1478 struct btree *b = ra->b;
1479 struct printbuf buf = PRINTBUF;
1480 bool dump_bset_maps = false;
1481 bool have_retry = false;
1482 int ret = 0, best = -1, write = READ;
1483 unsigned i, written = 0, written2 = 0;
1484 __le64 seq = b->key.k.type == KEY_TYPE_btree_ptr_v2
1485 ? bkey_i_to_btree_ptr_v2(&b->key)->v.seq : 0;
1486 bool _saw_error = false, *saw_error = &_saw_error;
1487
1488 for (i = 0; i < ra->nr; i++) {
1489 struct btree_node *bn = ra->buf[i];
1490
1491 if (ra->err[i])
1492 continue;
1493
1494 if (le64_to_cpu(bn->magic) != bset_magic(c) ||
1495 (seq && seq != bn->keys.seq))
1496 continue;
1497
1498 if (best < 0) {
1499 best = i;
1500 written = btree_node_sectors_written(c, bn);
1501 continue;
1502 }
1503
1504 written2 = btree_node_sectors_written(c, ra->buf[i]);
1505 if (btree_err_on(written2 != written, -BCH_ERR_btree_node_read_err_fixable,
1506 c, NULL, b, NULL, NULL,
1507 btree_node_replicas_sectors_written_mismatch,
1508 "btree node sectors written mismatch: %u != %u",
1509 written, written2) ||
1510 btree_err_on(btree_node_has_extra_bsets(c, written2, ra->buf[i]),
1511 -BCH_ERR_btree_node_read_err_fixable,
1512 c, NULL, b, NULL, NULL,
1513 btree_node_bset_after_end,
1514 "found bset signature after last bset") ||
1515 btree_err_on(memcmp(ra->buf[best], ra->buf[i], written << 9),
1516 -BCH_ERR_btree_node_read_err_fixable,
1517 c, NULL, b, NULL, NULL,
1518 btree_node_replicas_data_mismatch,
1519 "btree node replicas content mismatch"))
1520 dump_bset_maps = true;
1521
1522 if (written2 > written) {
1523 written = written2;
1524 best = i;
1525 }
1526 }
1527 fsck_err:
1528 if (dump_bset_maps) {
1529 for (i = 0; i < ra->nr; i++) {
1530 struct btree_node *bn = ra->buf[i];
1531 struct btree_node_entry *bne = NULL;
1532 unsigned offset = 0, sectors;
1533 bool gap = false;
1534
1535 if (ra->err[i])
1536 continue;
1537
1538 printbuf_reset(&buf);
1539
1540 while (offset < btree_sectors(c)) {
1541 if (!offset) {
1542 sectors = vstruct_sectors(bn, c->block_bits);
1543 } else {
1544 bne = ra->buf[i] + (offset << 9);
1545 if (bne->keys.seq != bn->keys.seq)
1546 break;
1547 sectors = vstruct_sectors(bne, c->block_bits);
1548 }
1549
1550 prt_printf(&buf, " %u-%u", offset, offset + sectors);
1551 if (bne && bch2_journal_seq_is_blacklisted(c,
1552 le64_to_cpu(bne->keys.journal_seq), false))
1553 prt_printf(&buf, "*");
1554 offset += sectors;
1555 }
1556
1557 while (offset < btree_sectors(c)) {
1558 bne = ra->buf[i] + (offset << 9);
1559 if (bne->keys.seq == bn->keys.seq) {
1560 if (!gap)
1561 prt_printf(&buf, " GAP");
1562 gap = true;
1563
1564 sectors = vstruct_sectors(bne, c->block_bits);
1565 prt_printf(&buf, " %u-%u", offset, offset + sectors);
1566 if (bch2_journal_seq_is_blacklisted(c,
1567 le64_to_cpu(bne->keys.journal_seq), false))
1568 prt_printf(&buf, "*");
1569 }
1570 offset++;
1571 }
1572
1573 bch_err(c, "replica %u:%s", i, buf.buf);
1574 }
1575 }
1576
1577 if (best >= 0) {
1578 memcpy(b->data, ra->buf[best], btree_buf_bytes(b));
1579 ret = bch2_btree_node_read_done(c, NULL, b, false, saw_error);
1580 } else {
1581 ret = -1;
1582 }
1583
1584 if (ret) {
1585 set_btree_node_read_error(b);
1586 bch2_btree_lost_data(c, b->c.btree_id);
1587 } else if (*saw_error)
1588 bch2_btree_node_rewrite_async(c, b);
1589
1590 for (i = 0; i < ra->nr; i++) {
1591 mempool_free(ra->buf[i], &c->btree_bounce_pool);
1592 bio_put(ra->bio[i]);
1593 }
1594
1595 closure_debug_destroy(&ra->cl);
1596 kfree(ra);
1597 printbuf_exit(&buf);
1598
1599 clear_btree_node_read_in_flight(b);
1600 smp_mb__after_atomic();
1601 wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
1602 }
1603
btree_node_read_all_replicas_endio(struct bio * bio)1604 static void btree_node_read_all_replicas_endio(struct bio *bio)
1605 {
1606 struct btree_read_bio *rb =
1607 container_of(bio, struct btree_read_bio, bio);
1608 struct bch_fs *c = rb->c;
1609 struct btree_node_read_all *ra = rb->ra;
1610
1611 if (rb->have_ioref) {
1612 struct bch_dev *ca = bch2_dev_have_ref(c, rb->pick.ptr.dev);
1613
1614 bch2_latency_acct(ca, rb->start_time, READ);
1615 percpu_ref_put(&ca->io_ref[READ]);
1616 }
1617
1618 ra->err[rb->idx] = bio->bi_status;
1619 closure_put(&ra->cl);
1620 }
1621
1622 /*
1623 * XXX This allocates multiple times from the same mempools, and can deadlock
1624 * under sufficient memory pressure (but is only a debug path)
1625 */
btree_node_read_all_replicas(struct bch_fs * c,struct btree * b,bool sync)1626 static int btree_node_read_all_replicas(struct bch_fs *c, struct btree *b, bool sync)
1627 {
1628 struct bkey_s_c k = bkey_i_to_s_c(&b->key);
1629 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1630 const union bch_extent_entry *entry;
1631 struct extent_ptr_decoded pick;
1632 struct btree_node_read_all *ra;
1633 unsigned i;
1634
1635 ra = kzalloc(sizeof(*ra), GFP_NOFS);
1636 if (!ra)
1637 return -BCH_ERR_ENOMEM_btree_node_read_all_replicas;
1638
1639 closure_init(&ra->cl, NULL);
1640 ra->c = c;
1641 ra->b = b;
1642 ra->nr = bch2_bkey_nr_ptrs(k);
1643
1644 for (i = 0; i < ra->nr; i++) {
1645 ra->buf[i] = mempool_alloc(&c->btree_bounce_pool, GFP_NOFS);
1646 ra->bio[i] = bio_alloc_bioset(NULL,
1647 buf_pages(ra->buf[i], btree_buf_bytes(b)),
1648 REQ_OP_READ|REQ_SYNC|REQ_META,
1649 GFP_NOFS,
1650 &c->btree_bio);
1651 }
1652
1653 i = 0;
1654 bkey_for_each_ptr_decode(k.k, ptrs, pick, entry) {
1655 struct bch_dev *ca = bch2_dev_get_ioref(c, pick.ptr.dev, READ);
1656 struct btree_read_bio *rb =
1657 container_of(ra->bio[i], struct btree_read_bio, bio);
1658 rb->c = c;
1659 rb->b = b;
1660 rb->ra = ra;
1661 rb->start_time = local_clock();
1662 rb->have_ioref = ca != NULL;
1663 rb->idx = i;
1664 rb->pick = pick;
1665 rb->bio.bi_iter.bi_sector = pick.ptr.offset;
1666 rb->bio.bi_end_io = btree_node_read_all_replicas_endio;
1667 bch2_bio_map(&rb->bio, ra->buf[i], btree_buf_bytes(b));
1668
1669 if (rb->have_ioref) {
1670 this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_btree],
1671 bio_sectors(&rb->bio));
1672 bio_set_dev(&rb->bio, ca->disk_sb.bdev);
1673
1674 closure_get(&ra->cl);
1675 submit_bio(&rb->bio);
1676 } else {
1677 ra->err[i] = BLK_STS_REMOVED;
1678 }
1679
1680 i++;
1681 }
1682
1683 if (sync) {
1684 closure_sync(&ra->cl);
1685 btree_node_read_all_replicas_done(&ra->cl.work);
1686 } else {
1687 continue_at(&ra->cl, btree_node_read_all_replicas_done,
1688 c->btree_read_complete_wq);
1689 }
1690
1691 return 0;
1692 }
1693
bch2_btree_node_read(struct btree_trans * trans,struct btree * b,bool sync)1694 void bch2_btree_node_read(struct btree_trans *trans, struct btree *b,
1695 bool sync)
1696 {
1697 struct bch_fs *c = trans->c;
1698 struct extent_ptr_decoded pick;
1699 struct btree_read_bio *rb;
1700 struct bch_dev *ca;
1701 struct bio *bio;
1702 int ret;
1703
1704 trace_and_count(c, btree_node_read, trans, b);
1705
1706 if (bch2_verify_all_btree_replicas &&
1707 !btree_node_read_all_replicas(c, b, sync))
1708 return;
1709
1710 ret = bch2_bkey_pick_read_device(c, bkey_i_to_s_c(&b->key),
1711 NULL, &pick, -1);
1712
1713 if (ret <= 0) {
1714 struct printbuf buf = PRINTBUF;
1715
1716 prt_str(&buf, "btree node read error: no device to read from\n at ");
1717 bch2_btree_pos_to_text(&buf, c, b);
1718 bch_err_ratelimited(c, "%s", buf.buf);
1719
1720 if (c->opts.recovery_passes & BIT_ULL(BCH_RECOVERY_PASS_check_topology) &&
1721 c->curr_recovery_pass > BCH_RECOVERY_PASS_check_topology)
1722 bch2_fatal_error(c);
1723
1724 set_btree_node_read_error(b);
1725 bch2_btree_lost_data(c, b->c.btree_id);
1726 clear_btree_node_read_in_flight(b);
1727 smp_mb__after_atomic();
1728 wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
1729 printbuf_exit(&buf);
1730 return;
1731 }
1732
1733 ca = bch2_dev_get_ioref(c, pick.ptr.dev, READ);
1734
1735 bio = bio_alloc_bioset(NULL,
1736 buf_pages(b->data, btree_buf_bytes(b)),
1737 REQ_OP_READ|REQ_SYNC|REQ_META,
1738 GFP_NOFS,
1739 &c->btree_bio);
1740 rb = container_of(bio, struct btree_read_bio, bio);
1741 rb->c = c;
1742 rb->b = b;
1743 rb->ra = NULL;
1744 rb->start_time = local_clock();
1745 rb->have_ioref = ca != NULL;
1746 rb->pick = pick;
1747 INIT_WORK(&rb->work, btree_node_read_work);
1748 bio->bi_iter.bi_sector = pick.ptr.offset;
1749 bio->bi_end_io = btree_node_read_endio;
1750 bch2_bio_map(bio, b->data, btree_buf_bytes(b));
1751
1752 if (rb->have_ioref) {
1753 this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_btree],
1754 bio_sectors(bio));
1755 bio_set_dev(bio, ca->disk_sb.bdev);
1756
1757 if (sync) {
1758 submit_bio_wait(bio);
1759 bch2_latency_acct(ca, rb->start_time, READ);
1760 btree_node_read_work(&rb->work);
1761 } else {
1762 submit_bio(bio);
1763 }
1764 } else {
1765 bio->bi_status = BLK_STS_REMOVED;
1766
1767 if (sync)
1768 btree_node_read_work(&rb->work);
1769 else
1770 queue_work(c->btree_read_complete_wq, &rb->work);
1771 }
1772 }
1773
__bch2_btree_root_read(struct btree_trans * trans,enum btree_id id,const struct bkey_i * k,unsigned level)1774 static int __bch2_btree_root_read(struct btree_trans *trans, enum btree_id id,
1775 const struct bkey_i *k, unsigned level)
1776 {
1777 struct bch_fs *c = trans->c;
1778 struct closure cl;
1779 struct btree *b;
1780 int ret;
1781
1782 closure_init_stack(&cl);
1783
1784 do {
1785 ret = bch2_btree_cache_cannibalize_lock(trans, &cl);
1786 closure_sync(&cl);
1787 } while (ret);
1788
1789 b = bch2_btree_node_mem_alloc(trans, level != 0);
1790 bch2_btree_cache_cannibalize_unlock(trans);
1791
1792 BUG_ON(IS_ERR(b));
1793
1794 bkey_copy(&b->key, k);
1795 BUG_ON(bch2_btree_node_hash_insert(&c->btree_cache, b, level, id));
1796
1797 set_btree_node_read_in_flight(b);
1798
1799 /* we can't pass the trans to read_done() for fsck errors, so it must be unlocked */
1800 bch2_trans_unlock(trans);
1801 bch2_btree_node_read(trans, b, true);
1802
1803 if (btree_node_read_error(b)) {
1804 mutex_lock(&c->btree_cache.lock);
1805 bch2_btree_node_hash_remove(&c->btree_cache, b);
1806 mutex_unlock(&c->btree_cache.lock);
1807
1808 ret = -BCH_ERR_btree_node_read_error;
1809 goto err;
1810 }
1811
1812 bch2_btree_set_root_for_read(c, b);
1813 err:
1814 six_unlock_write(&b->c.lock);
1815 six_unlock_intent(&b->c.lock);
1816
1817 return ret;
1818 }
1819
bch2_btree_root_read(struct bch_fs * c,enum btree_id id,const struct bkey_i * k,unsigned level)1820 int bch2_btree_root_read(struct bch_fs *c, enum btree_id id,
1821 const struct bkey_i *k, unsigned level)
1822 {
1823 return bch2_trans_run(c, __bch2_btree_root_read(trans, id, k, level));
1824 }
1825
1826 struct btree_node_scrub {
1827 struct bch_fs *c;
1828 struct bch_dev *ca;
1829 void *buf;
1830 bool used_mempool;
1831 unsigned written;
1832
1833 enum btree_id btree;
1834 unsigned level;
1835 struct bkey_buf key;
1836 __le64 seq;
1837
1838 struct work_struct work;
1839 struct bio bio;
1840 };
1841
btree_node_scrub_check(struct bch_fs * c,struct btree_node * data,unsigned ptr_written,struct printbuf * err)1842 static bool btree_node_scrub_check(struct bch_fs *c, struct btree_node *data, unsigned ptr_written,
1843 struct printbuf *err)
1844 {
1845 unsigned written = 0;
1846
1847 if (le64_to_cpu(data->magic) != bset_magic(c)) {
1848 prt_printf(err, "bad magic: want %llx, got %llx",
1849 bset_magic(c), le64_to_cpu(data->magic));
1850 return false;
1851 }
1852
1853 while (written < (ptr_written ?: btree_sectors(c))) {
1854 struct btree_node_entry *bne;
1855 struct bset *i;
1856 bool first = !written;
1857
1858 if (first) {
1859 bne = NULL;
1860 i = &data->keys;
1861 } else {
1862 bne = (void *) data + (written << 9);
1863 i = &bne->keys;
1864
1865 if (!ptr_written && i->seq != data->keys.seq)
1866 break;
1867 }
1868
1869 struct nonce nonce = btree_nonce(i, written << 9);
1870 bool good_csum_type = bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i));
1871
1872 if (first) {
1873 if (good_csum_type) {
1874 struct bch_csum csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, data);
1875 if (bch2_crc_cmp(data->csum, csum)) {
1876 bch2_csum_err_msg(err, BSET_CSUM_TYPE(i), data->csum, csum);
1877 return false;
1878 }
1879 }
1880
1881 written += vstruct_sectors(data, c->block_bits);
1882 } else {
1883 if (good_csum_type) {
1884 struct bch_csum csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
1885 if (bch2_crc_cmp(bne->csum, csum)) {
1886 bch2_csum_err_msg(err, BSET_CSUM_TYPE(i), bne->csum, csum);
1887 return false;
1888 }
1889 }
1890
1891 written += vstruct_sectors(bne, c->block_bits);
1892 }
1893 }
1894
1895 return true;
1896 }
1897
btree_node_scrub_work(struct work_struct * work)1898 static void btree_node_scrub_work(struct work_struct *work)
1899 {
1900 struct btree_node_scrub *scrub = container_of(work, struct btree_node_scrub, work);
1901 struct bch_fs *c = scrub->c;
1902 struct printbuf err = PRINTBUF;
1903
1904 __bch2_btree_pos_to_text(&err, c, scrub->btree, scrub->level,
1905 bkey_i_to_s_c(scrub->key.k));
1906 prt_newline(&err);
1907
1908 if (!btree_node_scrub_check(c, scrub->buf, scrub->written, &err)) {
1909 struct btree_trans *trans = bch2_trans_get(c);
1910
1911 struct btree_iter iter;
1912 bch2_trans_node_iter_init(trans, &iter, scrub->btree,
1913 scrub->key.k->k.p, 0, scrub->level - 1, 0);
1914
1915 struct btree *b;
1916 int ret = lockrestart_do(trans,
1917 PTR_ERR_OR_ZERO(b = bch2_btree_iter_peek_node(trans, &iter)));
1918 if (ret)
1919 goto err;
1920
1921 if (bkey_i_to_btree_ptr_v2(&b->key)->v.seq == scrub->seq) {
1922 bch_err(c, "error validating btree node during scrub on %s at btree %s",
1923 scrub->ca->name, err.buf);
1924
1925 ret = bch2_btree_node_rewrite(trans, &iter, b, 0);
1926 }
1927 err:
1928 bch2_trans_iter_exit(trans, &iter);
1929 bch2_trans_begin(trans);
1930 bch2_trans_put(trans);
1931 }
1932
1933 printbuf_exit(&err);
1934 bch2_bkey_buf_exit(&scrub->key, c);;
1935 btree_bounce_free(c, c->opts.btree_node_size, scrub->used_mempool, scrub->buf);
1936 percpu_ref_put(&scrub->ca->io_ref[READ]);
1937 kfree(scrub);
1938 bch2_write_ref_put(c, BCH_WRITE_REF_btree_node_scrub);
1939 }
1940
btree_node_scrub_endio(struct bio * bio)1941 static void btree_node_scrub_endio(struct bio *bio)
1942 {
1943 struct btree_node_scrub *scrub = container_of(bio, struct btree_node_scrub, bio);
1944
1945 queue_work(scrub->c->btree_read_complete_wq, &scrub->work);
1946 }
1947
bch2_btree_node_scrub(struct btree_trans * trans,enum btree_id btree,unsigned level,struct bkey_s_c k,unsigned dev)1948 int bch2_btree_node_scrub(struct btree_trans *trans,
1949 enum btree_id btree, unsigned level,
1950 struct bkey_s_c k, unsigned dev)
1951 {
1952 if (k.k->type != KEY_TYPE_btree_ptr_v2)
1953 return 0;
1954
1955 struct bch_fs *c = trans->c;
1956
1957 if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_btree_node_scrub))
1958 return -BCH_ERR_erofs_no_writes;
1959
1960 struct extent_ptr_decoded pick;
1961 int ret = bch2_bkey_pick_read_device(c, k, NULL, &pick, dev);
1962 if (ret <= 0)
1963 goto err;
1964
1965 struct bch_dev *ca = bch2_dev_get_ioref(c, pick.ptr.dev, READ);
1966 if (!ca) {
1967 ret = -BCH_ERR_device_offline;
1968 goto err;
1969 }
1970
1971 bool used_mempool = false;
1972 void *buf = btree_bounce_alloc(c, c->opts.btree_node_size, &used_mempool);
1973
1974 unsigned vecs = buf_pages(buf, c->opts.btree_node_size);
1975
1976 struct btree_node_scrub *scrub =
1977 kzalloc(sizeof(*scrub) + sizeof(struct bio_vec) * vecs, GFP_KERNEL);
1978 if (!scrub) {
1979 ret = -ENOMEM;
1980 goto err_free;
1981 }
1982
1983 scrub->c = c;
1984 scrub->ca = ca;
1985 scrub->buf = buf;
1986 scrub->used_mempool = used_mempool;
1987 scrub->written = btree_ptr_sectors_written(k);
1988
1989 scrub->btree = btree;
1990 scrub->level = level;
1991 bch2_bkey_buf_init(&scrub->key);
1992 bch2_bkey_buf_reassemble(&scrub->key, c, k);
1993 scrub->seq = bkey_s_c_to_btree_ptr_v2(k).v->seq;
1994
1995 INIT_WORK(&scrub->work, btree_node_scrub_work);
1996
1997 bio_init(&scrub->bio, ca->disk_sb.bdev, scrub->bio.bi_inline_vecs, vecs, REQ_OP_READ);
1998 bch2_bio_map(&scrub->bio, scrub->buf, c->opts.btree_node_size);
1999 scrub->bio.bi_iter.bi_sector = pick.ptr.offset;
2000 scrub->bio.bi_end_io = btree_node_scrub_endio;
2001 submit_bio(&scrub->bio);
2002 return 0;
2003 err_free:
2004 btree_bounce_free(c, c->opts.btree_node_size, used_mempool, buf);
2005 percpu_ref_put(&ca->io_ref[READ]);
2006 err:
2007 bch2_write_ref_put(c, BCH_WRITE_REF_btree_node_scrub);
2008 return ret;
2009 }
2010
bch2_btree_complete_write(struct bch_fs * c,struct btree * b,struct btree_write * w)2011 static void bch2_btree_complete_write(struct bch_fs *c, struct btree *b,
2012 struct btree_write *w)
2013 {
2014 unsigned long old, new;
2015
2016 old = READ_ONCE(b->will_make_reachable);
2017 do {
2018 new = old;
2019 if (!(old & 1))
2020 break;
2021
2022 new &= ~1UL;
2023 } while (!try_cmpxchg(&b->will_make_reachable, &old, new));
2024
2025 if (old & 1)
2026 closure_put(&((struct btree_update *) new)->cl);
2027
2028 bch2_journal_pin_drop(&c->journal, &w->journal);
2029 }
2030
__btree_node_write_done(struct bch_fs * c,struct btree * b,u64 start_time)2031 static void __btree_node_write_done(struct bch_fs *c, struct btree *b, u64 start_time)
2032 {
2033 struct btree_write *w = btree_prev_write(b);
2034 unsigned long old, new;
2035 unsigned type = 0;
2036
2037 bch2_btree_complete_write(c, b, w);
2038
2039 if (start_time)
2040 bch2_time_stats_update(&c->times[BCH_TIME_btree_node_write], start_time);
2041
2042 old = READ_ONCE(b->flags);
2043 do {
2044 new = old;
2045
2046 if ((old & (1U << BTREE_NODE_dirty)) &&
2047 (old & (1U << BTREE_NODE_need_write)) &&
2048 !(old & (1U << BTREE_NODE_never_write)) &&
2049 !(old & (1U << BTREE_NODE_write_blocked)) &&
2050 !(old & (1U << BTREE_NODE_will_make_reachable))) {
2051 new &= ~(1U << BTREE_NODE_dirty);
2052 new &= ~(1U << BTREE_NODE_need_write);
2053 new |= (1U << BTREE_NODE_write_in_flight);
2054 new |= (1U << BTREE_NODE_write_in_flight_inner);
2055 new |= (1U << BTREE_NODE_just_written);
2056 new ^= (1U << BTREE_NODE_write_idx);
2057
2058 type = new & BTREE_WRITE_TYPE_MASK;
2059 new &= ~BTREE_WRITE_TYPE_MASK;
2060 } else {
2061 new &= ~(1U << BTREE_NODE_write_in_flight);
2062 new &= ~(1U << BTREE_NODE_write_in_flight_inner);
2063 }
2064 } while (!try_cmpxchg(&b->flags, &old, new));
2065
2066 if (new & (1U << BTREE_NODE_write_in_flight))
2067 __bch2_btree_node_write(c, b, BTREE_WRITE_ALREADY_STARTED|type);
2068 else {
2069 smp_mb__after_atomic();
2070 wake_up_bit(&b->flags, BTREE_NODE_write_in_flight);
2071 }
2072 }
2073
btree_node_write_done(struct bch_fs * c,struct btree * b,u64 start_time)2074 static void btree_node_write_done(struct bch_fs *c, struct btree *b, u64 start_time)
2075 {
2076 struct btree_trans *trans = bch2_trans_get(c);
2077
2078 btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read);
2079
2080 /* we don't need transaction context anymore after we got the lock. */
2081 bch2_trans_put(trans);
2082 __btree_node_write_done(c, b, start_time);
2083 six_unlock_read(&b->c.lock);
2084 }
2085
btree_node_write_work(struct work_struct * work)2086 static void btree_node_write_work(struct work_struct *work)
2087 {
2088 struct btree_write_bio *wbio =
2089 container_of(work, struct btree_write_bio, work);
2090 struct bch_fs *c = wbio->wbio.c;
2091 struct btree *b = wbio->wbio.bio.bi_private;
2092 u64 start_time = wbio->start_time;
2093 int ret = 0;
2094
2095 btree_bounce_free(c,
2096 wbio->data_bytes,
2097 wbio->wbio.used_mempool,
2098 wbio->data);
2099
2100 bch2_bkey_drop_ptrs(bkey_i_to_s(&wbio->key), ptr,
2101 bch2_dev_list_has_dev(wbio->wbio.failed, ptr->dev));
2102
2103 if (!bch2_bkey_nr_ptrs(bkey_i_to_s_c(&wbio->key))) {
2104 ret = -BCH_ERR_btree_node_write_all_failed;
2105 goto err;
2106 }
2107
2108 if (wbio->wbio.first_btree_write) {
2109 if (wbio->wbio.failed.nr) {
2110
2111 }
2112 } else {
2113 ret = bch2_trans_do(c,
2114 bch2_btree_node_update_key_get_iter(trans, b, &wbio->key,
2115 BCH_WATERMARK_interior_updates|
2116 BCH_TRANS_COMMIT_journal_reclaim|
2117 BCH_TRANS_COMMIT_no_enospc|
2118 BCH_TRANS_COMMIT_no_check_rw,
2119 !wbio->wbio.failed.nr));
2120 if (ret)
2121 goto err;
2122 }
2123 out:
2124 bio_put(&wbio->wbio.bio);
2125 btree_node_write_done(c, b, start_time);
2126 return;
2127 err:
2128 set_btree_node_noevict(b);
2129
2130 if (!bch2_err_matches(ret, EROFS)) {
2131 struct printbuf buf = PRINTBUF;
2132 prt_printf(&buf, "writing btree node: %s\n ", bch2_err_str(ret));
2133 bch2_btree_pos_to_text(&buf, c, b);
2134 bch2_fs_fatal_error(c, "%s", buf.buf);
2135 printbuf_exit(&buf);
2136 }
2137 goto out;
2138 }
2139
btree_node_write_endio(struct bio * bio)2140 static void btree_node_write_endio(struct bio *bio)
2141 {
2142 struct bch_write_bio *wbio = to_wbio(bio);
2143 struct bch_write_bio *parent = wbio->split ? wbio->parent : NULL;
2144 struct bch_write_bio *orig = parent ?: wbio;
2145 struct btree_write_bio *wb = container_of(orig, struct btree_write_bio, wbio);
2146 struct bch_fs *c = wbio->c;
2147 struct btree *b = wbio->bio.bi_private;
2148 struct bch_dev *ca = wbio->have_ioref ? bch2_dev_have_ref(c, wbio->dev) : NULL;
2149
2150 bch2_account_io_completion(ca, BCH_MEMBER_ERROR_write,
2151 wbio->submit_time, !bio->bi_status);
2152
2153 if (ca && bio->bi_status) {
2154 struct printbuf buf = PRINTBUF;
2155 buf.atomic++;
2156 prt_printf(&buf, "btree write error: %s\n ",
2157 bch2_blk_status_to_str(bio->bi_status));
2158 bch2_btree_pos_to_text(&buf, c, b);
2159 bch_err_dev_ratelimited(ca, "%s", buf.buf);
2160 printbuf_exit(&buf);
2161 }
2162
2163 if (bio->bi_status) {
2164 unsigned long flags;
2165 spin_lock_irqsave(&c->btree_write_error_lock, flags);
2166 bch2_dev_list_add_dev(&orig->failed, wbio->dev);
2167 spin_unlock_irqrestore(&c->btree_write_error_lock, flags);
2168 }
2169
2170 /*
2171 * XXX: we should be using io_ref[WRITE], but we aren't retrying failed
2172 * btree writes yet (due to device removal/ro):
2173 */
2174 if (wbio->have_ioref)
2175 percpu_ref_put(&ca->io_ref[READ]);
2176
2177 if (parent) {
2178 bio_put(bio);
2179 bio_endio(&parent->bio);
2180 return;
2181 }
2182
2183 clear_btree_node_write_in_flight_inner(b);
2184 smp_mb__after_atomic();
2185 wake_up_bit(&b->flags, BTREE_NODE_write_in_flight_inner);
2186 INIT_WORK(&wb->work, btree_node_write_work);
2187 queue_work(c->btree_io_complete_wq, &wb->work);
2188 }
2189
validate_bset_for_write(struct bch_fs * c,struct btree * b,struct bset * i,unsigned sectors)2190 static int validate_bset_for_write(struct bch_fs *c, struct btree *b,
2191 struct bset *i, unsigned sectors)
2192 {
2193 bool saw_error;
2194
2195 int ret = bch2_bkey_validate(c, bkey_i_to_s_c(&b->key),
2196 (struct bkey_validate_context) {
2197 .from = BKEY_VALIDATE_btree_node,
2198 .level = b->c.level + 1,
2199 .btree = b->c.btree_id,
2200 .flags = BCH_VALIDATE_write,
2201 });
2202 if (ret) {
2203 bch2_fs_inconsistent(c, "invalid btree node key before write");
2204 return ret;
2205 }
2206
2207 ret = validate_bset_keys(c, b, i, WRITE, false, &saw_error) ?:
2208 validate_bset(c, NULL, b, i, b->written, sectors, WRITE, false, &saw_error);
2209 if (ret) {
2210 bch2_inconsistent_error(c);
2211 dump_stack();
2212 }
2213
2214 return ret;
2215 }
2216
btree_write_submit(struct work_struct * work)2217 static void btree_write_submit(struct work_struct *work)
2218 {
2219 struct btree_write_bio *wbio = container_of(work, struct btree_write_bio, work);
2220 BKEY_PADDED_ONSTACK(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp;
2221
2222 bkey_copy(&tmp.k, &wbio->key);
2223
2224 bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&tmp.k)), ptr)
2225 ptr->offset += wbio->sector_offset;
2226
2227 bch2_submit_wbio_replicas(&wbio->wbio, wbio->wbio.c, BCH_DATA_btree,
2228 &tmp.k, false);
2229 }
2230
__bch2_btree_node_write(struct bch_fs * c,struct btree * b,unsigned flags)2231 void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, unsigned flags)
2232 {
2233 struct btree_write_bio *wbio;
2234 struct bset *i;
2235 struct btree_node *bn = NULL;
2236 struct btree_node_entry *bne = NULL;
2237 struct sort_iter_stack sort_iter;
2238 struct nonce nonce;
2239 unsigned bytes_to_write, sectors_to_write, bytes, u64s;
2240 u64 seq = 0;
2241 bool used_mempool;
2242 unsigned long old, new;
2243 bool validate_before_checksum = false;
2244 enum btree_write_type type = flags & BTREE_WRITE_TYPE_MASK;
2245 void *data;
2246 u64 start_time = local_clock();
2247 int ret;
2248
2249 if (flags & BTREE_WRITE_ALREADY_STARTED)
2250 goto do_write;
2251
2252 /*
2253 * We may only have a read lock on the btree node - the dirty bit is our
2254 * "lock" against racing with other threads that may be trying to start
2255 * a write, we do a write iff we clear the dirty bit. Since setting the
2256 * dirty bit requires a write lock, we can't race with other threads
2257 * redirtying it:
2258 */
2259 old = READ_ONCE(b->flags);
2260 do {
2261 new = old;
2262
2263 if (!(old & (1 << BTREE_NODE_dirty)))
2264 return;
2265
2266 if ((flags & BTREE_WRITE_ONLY_IF_NEED) &&
2267 !(old & (1 << BTREE_NODE_need_write)))
2268 return;
2269
2270 if (old &
2271 ((1 << BTREE_NODE_never_write)|
2272 (1 << BTREE_NODE_write_blocked)))
2273 return;
2274
2275 if (b->written &&
2276 (old & (1 << BTREE_NODE_will_make_reachable)))
2277 return;
2278
2279 if (old & (1 << BTREE_NODE_write_in_flight))
2280 return;
2281
2282 if (flags & BTREE_WRITE_ONLY_IF_NEED)
2283 type = new & BTREE_WRITE_TYPE_MASK;
2284 new &= ~BTREE_WRITE_TYPE_MASK;
2285
2286 new &= ~(1 << BTREE_NODE_dirty);
2287 new &= ~(1 << BTREE_NODE_need_write);
2288 new |= (1 << BTREE_NODE_write_in_flight);
2289 new |= (1 << BTREE_NODE_write_in_flight_inner);
2290 new |= (1 << BTREE_NODE_just_written);
2291 new ^= (1 << BTREE_NODE_write_idx);
2292 } while (!try_cmpxchg_acquire(&b->flags, &old, new));
2293
2294 if (new & (1U << BTREE_NODE_need_write))
2295 return;
2296 do_write:
2297 BUG_ON((type == BTREE_WRITE_initial) != (b->written == 0));
2298
2299 atomic_long_dec(&c->btree_cache.nr_dirty);
2300
2301 BUG_ON(btree_node_fake(b));
2302 BUG_ON((b->will_make_reachable != 0) != !b->written);
2303
2304 BUG_ON(b->written >= btree_sectors(c));
2305 BUG_ON(b->written & (block_sectors(c) - 1));
2306 BUG_ON(bset_written(b, btree_bset_last(b)));
2307 BUG_ON(le64_to_cpu(b->data->magic) != bset_magic(c));
2308 BUG_ON(memcmp(&b->data->format, &b->format, sizeof(b->format)));
2309
2310 bch2_sort_whiteouts(c, b);
2311
2312 sort_iter_stack_init(&sort_iter, b);
2313
2314 bytes = !b->written
2315 ? sizeof(struct btree_node)
2316 : sizeof(struct btree_node_entry);
2317
2318 bytes += b->whiteout_u64s * sizeof(u64);
2319
2320 for_each_bset(b, t) {
2321 i = bset(b, t);
2322
2323 if (bset_written(b, i))
2324 continue;
2325
2326 bytes += le16_to_cpu(i->u64s) * sizeof(u64);
2327 sort_iter_add(&sort_iter.iter,
2328 btree_bkey_first(b, t),
2329 btree_bkey_last(b, t));
2330 seq = max(seq, le64_to_cpu(i->journal_seq));
2331 }
2332
2333 BUG_ON(b->written && !seq);
2334
2335 /* bch2_varint_decode may read up to 7 bytes past the end of the buffer: */
2336 bytes += 8;
2337
2338 /* buffer must be a multiple of the block size */
2339 bytes = round_up(bytes, block_bytes(c));
2340
2341 data = btree_bounce_alloc(c, bytes, &used_mempool);
2342
2343 if (!b->written) {
2344 bn = data;
2345 *bn = *b->data;
2346 i = &bn->keys;
2347 } else {
2348 bne = data;
2349 bne->keys = b->data->keys;
2350 i = &bne->keys;
2351 }
2352
2353 i->journal_seq = cpu_to_le64(seq);
2354 i->u64s = 0;
2355
2356 sort_iter_add(&sort_iter.iter,
2357 unwritten_whiteouts_start(b),
2358 unwritten_whiteouts_end(b));
2359 SET_BSET_SEPARATE_WHITEOUTS(i, false);
2360
2361 u64s = bch2_sort_keys_keep_unwritten_whiteouts(i->start, &sort_iter.iter);
2362 le16_add_cpu(&i->u64s, u64s);
2363
2364 b->whiteout_u64s = 0;
2365
2366 BUG_ON(!b->written && i->u64s != b->data->keys.u64s);
2367
2368 set_needs_whiteout(i, false);
2369
2370 /* do we have data to write? */
2371 if (b->written && !i->u64s)
2372 goto nowrite;
2373
2374 bytes_to_write = vstruct_end(i) - data;
2375 sectors_to_write = round_up(bytes_to_write, block_bytes(c)) >> 9;
2376
2377 if (!b->written &&
2378 b->key.k.type == KEY_TYPE_btree_ptr_v2)
2379 BUG_ON(btree_ptr_sectors_written(bkey_i_to_s_c(&b->key)) != sectors_to_write);
2380
2381 memset(data + bytes_to_write, 0,
2382 (sectors_to_write << 9) - bytes_to_write);
2383
2384 BUG_ON(b->written + sectors_to_write > btree_sectors(c));
2385 BUG_ON(BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN);
2386 BUG_ON(i->seq != b->data->keys.seq);
2387
2388 i->version = cpu_to_le16(c->sb.version);
2389 SET_BSET_OFFSET(i, b->written);
2390 SET_BSET_CSUM_TYPE(i, bch2_meta_checksum_type(c));
2391
2392 if (bch2_csum_type_is_encryption(BSET_CSUM_TYPE(i)))
2393 validate_before_checksum = true;
2394
2395 /* validate_bset will be modifying: */
2396 if (le16_to_cpu(i->version) < bcachefs_metadata_version_current)
2397 validate_before_checksum = true;
2398
2399 /* if we're going to be encrypting, check metadata validity first: */
2400 if (validate_before_checksum &&
2401 validate_bset_for_write(c, b, i, sectors_to_write))
2402 goto err;
2403
2404 ret = bset_encrypt(c, i, b->written << 9);
2405 if (bch2_fs_fatal_err_on(ret, c,
2406 "encrypting btree node: %s", bch2_err_str(ret)))
2407 goto err;
2408
2409 nonce = btree_nonce(i, b->written << 9);
2410
2411 if (bn)
2412 bn->csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bn);
2413 else
2414 bne->csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
2415
2416 /* if we're not encrypting, check metadata after checksumming: */
2417 if (!validate_before_checksum &&
2418 validate_bset_for_write(c, b, i, sectors_to_write))
2419 goto err;
2420
2421 /*
2422 * We handle btree write errors by immediately halting the journal -
2423 * after we've done that, we can't issue any subsequent btree writes
2424 * because they might have pointers to new nodes that failed to write.
2425 *
2426 * Furthermore, there's no point in doing any more btree writes because
2427 * with the journal stopped, we're never going to update the journal to
2428 * reflect that those writes were done and the data flushed from the
2429 * journal:
2430 *
2431 * Also on journal error, the pending write may have updates that were
2432 * never journalled (interior nodes, see btree_update_nodes_written()) -
2433 * it's critical that we don't do the write in that case otherwise we
2434 * will have updates visible that weren't in the journal:
2435 *
2436 * Make sure to update b->written so bch2_btree_init_next() doesn't
2437 * break:
2438 */
2439 if (bch2_journal_error(&c->journal) ||
2440 c->opts.nochanges)
2441 goto err;
2442
2443 trace_and_count(c, btree_node_write, b, bytes_to_write, sectors_to_write);
2444
2445 wbio = container_of(bio_alloc_bioset(NULL,
2446 buf_pages(data, sectors_to_write << 9),
2447 REQ_OP_WRITE|REQ_META,
2448 GFP_NOFS,
2449 &c->btree_bio),
2450 struct btree_write_bio, wbio.bio);
2451 wbio_init(&wbio->wbio.bio);
2452 wbio->data = data;
2453 wbio->data_bytes = bytes;
2454 wbio->sector_offset = b->written;
2455 wbio->start_time = start_time;
2456 wbio->wbio.c = c;
2457 wbio->wbio.used_mempool = used_mempool;
2458 wbio->wbio.first_btree_write = !b->written;
2459 wbio->wbio.bio.bi_end_io = btree_node_write_endio;
2460 wbio->wbio.bio.bi_private = b;
2461
2462 bch2_bio_map(&wbio->wbio.bio, data, sectors_to_write << 9);
2463
2464 bkey_copy(&wbio->key, &b->key);
2465
2466 b->written += sectors_to_write;
2467
2468 if (wbio->key.k.type == KEY_TYPE_btree_ptr_v2)
2469 bkey_i_to_btree_ptr_v2(&wbio->key)->v.sectors_written =
2470 cpu_to_le16(b->written);
2471
2472 atomic64_inc(&c->btree_write_stats[type].nr);
2473 atomic64_add(bytes_to_write, &c->btree_write_stats[type].bytes);
2474
2475 INIT_WORK(&wbio->work, btree_write_submit);
2476 queue_work(c->btree_write_submit_wq, &wbio->work);
2477 return;
2478 err:
2479 set_btree_node_noevict(b);
2480 b->written += sectors_to_write;
2481 nowrite:
2482 btree_bounce_free(c, bytes, used_mempool, data);
2483 __btree_node_write_done(c, b, 0);
2484 }
2485
2486 /*
2487 * Work that must be done with write lock held:
2488 */
bch2_btree_post_write_cleanup(struct bch_fs * c,struct btree * b)2489 bool bch2_btree_post_write_cleanup(struct bch_fs *c, struct btree *b)
2490 {
2491 bool invalidated_iter = false;
2492 struct btree_node_entry *bne;
2493
2494 if (!btree_node_just_written(b))
2495 return false;
2496
2497 BUG_ON(b->whiteout_u64s);
2498
2499 clear_btree_node_just_written(b);
2500
2501 /*
2502 * Note: immediately after write, bset_written() doesn't work - the
2503 * amount of data we had to write after compaction might have been
2504 * smaller than the offset of the last bset.
2505 *
2506 * However, we know that all bsets have been written here, as long as
2507 * we're still holding the write lock:
2508 */
2509
2510 /*
2511 * XXX: decide if we really want to unconditionally sort down to a
2512 * single bset:
2513 */
2514 if (b->nsets > 1) {
2515 btree_node_sort(c, b, 0, b->nsets);
2516 invalidated_iter = true;
2517 } else {
2518 invalidated_iter = bch2_drop_whiteouts(b, COMPACT_ALL);
2519 }
2520
2521 for_each_bset(b, t)
2522 set_needs_whiteout(bset(b, t), true);
2523
2524 bch2_btree_verify(c, b);
2525
2526 /*
2527 * If later we don't unconditionally sort down to a single bset, we have
2528 * to ensure this is still true:
2529 */
2530 BUG_ON((void *) btree_bkey_last(b, bset_tree_last(b)) > write_block(b));
2531
2532 bne = want_new_bset(c, b);
2533 if (bne)
2534 bch2_bset_init_next(b, bne);
2535
2536 bch2_btree_build_aux_trees(b);
2537
2538 return invalidated_iter;
2539 }
2540
2541 /*
2542 * Use this one if the node is intent locked:
2543 */
bch2_btree_node_write(struct bch_fs * c,struct btree * b,enum six_lock_type lock_type_held,unsigned flags)2544 void bch2_btree_node_write(struct bch_fs *c, struct btree *b,
2545 enum six_lock_type lock_type_held,
2546 unsigned flags)
2547 {
2548 if (lock_type_held == SIX_LOCK_intent ||
2549 (lock_type_held == SIX_LOCK_read &&
2550 six_lock_tryupgrade(&b->c.lock))) {
2551 __bch2_btree_node_write(c, b, flags);
2552
2553 /* don't cycle lock unnecessarily: */
2554 if (btree_node_just_written(b) &&
2555 six_trylock_write(&b->c.lock)) {
2556 bch2_btree_post_write_cleanup(c, b);
2557 six_unlock_write(&b->c.lock);
2558 }
2559
2560 if (lock_type_held == SIX_LOCK_read)
2561 six_lock_downgrade(&b->c.lock);
2562 } else {
2563 __bch2_btree_node_write(c, b, flags);
2564 if (lock_type_held == SIX_LOCK_write &&
2565 btree_node_just_written(b))
2566 bch2_btree_post_write_cleanup(c, b);
2567 }
2568 }
2569
bch2_btree_node_write_trans(struct btree_trans * trans,struct btree * b,enum six_lock_type lock_type_held,unsigned flags)2570 void bch2_btree_node_write_trans(struct btree_trans *trans, struct btree *b,
2571 enum six_lock_type lock_type_held,
2572 unsigned flags)
2573 {
2574 struct bch_fs *c = trans->c;
2575
2576 if (lock_type_held == SIX_LOCK_intent ||
2577 (lock_type_held == SIX_LOCK_read &&
2578 six_lock_tryupgrade(&b->c.lock))) {
2579 __bch2_btree_node_write(c, b, flags);
2580
2581 /* don't cycle lock unnecessarily: */
2582 if (btree_node_just_written(b) &&
2583 six_trylock_write(&b->c.lock)) {
2584 bch2_btree_post_write_cleanup(c, b);
2585 __bch2_btree_node_unlock_write(trans, b);
2586 }
2587
2588 if (lock_type_held == SIX_LOCK_read)
2589 six_lock_downgrade(&b->c.lock);
2590 } else {
2591 __bch2_btree_node_write(c, b, flags);
2592 if (lock_type_held == SIX_LOCK_write &&
2593 btree_node_just_written(b))
2594 bch2_btree_post_write_cleanup(c, b);
2595 }
2596 }
2597
__bch2_btree_flush_all(struct bch_fs * c,unsigned flag)2598 static bool __bch2_btree_flush_all(struct bch_fs *c, unsigned flag)
2599 {
2600 struct bucket_table *tbl;
2601 struct rhash_head *pos;
2602 struct btree *b;
2603 unsigned i;
2604 bool ret = false;
2605 restart:
2606 rcu_read_lock();
2607 for_each_cached_btree(b, c, tbl, i, pos)
2608 if (test_bit(flag, &b->flags)) {
2609 rcu_read_unlock();
2610 wait_on_bit_io(&b->flags, flag, TASK_UNINTERRUPTIBLE);
2611 ret = true;
2612 goto restart;
2613 }
2614 rcu_read_unlock();
2615
2616 return ret;
2617 }
2618
bch2_btree_flush_all_reads(struct bch_fs * c)2619 bool bch2_btree_flush_all_reads(struct bch_fs *c)
2620 {
2621 return __bch2_btree_flush_all(c, BTREE_NODE_read_in_flight);
2622 }
2623
bch2_btree_flush_all_writes(struct bch_fs * c)2624 bool bch2_btree_flush_all_writes(struct bch_fs *c)
2625 {
2626 return __bch2_btree_flush_all(c, BTREE_NODE_write_in_flight);
2627 }
2628
2629 static const char * const bch2_btree_write_types[] = {
2630 #define x(t, n) [n] = #t,
2631 BCH_BTREE_WRITE_TYPES()
2632 NULL
2633 };
2634
bch2_btree_write_stats_to_text(struct printbuf * out,struct bch_fs * c)2635 void bch2_btree_write_stats_to_text(struct printbuf *out, struct bch_fs *c)
2636 {
2637 printbuf_tabstop_push(out, 20);
2638 printbuf_tabstop_push(out, 10);
2639
2640 prt_printf(out, "\tnr\tsize\n");
2641
2642 for (unsigned i = 0; i < BTREE_WRITE_TYPE_NR; i++) {
2643 u64 nr = atomic64_read(&c->btree_write_stats[i].nr);
2644 u64 bytes = atomic64_read(&c->btree_write_stats[i].bytes);
2645
2646 prt_printf(out, "%s:\t%llu\t", bch2_btree_write_types[i], nr);
2647 prt_human_readable_u64(out, nr ? div64_u64(bytes, nr) : 0);
2648 prt_newline(out);
2649 }
2650 }
2651