1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include "bcachefs.h"
4 #include "async_objs.h"
5 #include "bkey_buf.h"
6 #include "bkey_methods.h"
7 #include "bkey_sort.h"
8 #include "btree_cache.h"
9 #include "btree_io.h"
10 #include "btree_iter.h"
11 #include "btree_locking.h"
12 #include "btree_update.h"
13 #include "btree_update_interior.h"
14 #include "buckets.h"
15 #include "checksum.h"
16 #include "debug.h"
17 #include "enumerated_ref.h"
18 #include "error.h"
19 #include "extents.h"
20 #include "io_write.h"
21 #include "journal_reclaim.h"
22 #include "journal_seq_blacklist.h"
23 #include "recovery.h"
24 #include "super-io.h"
25 #include "trace.h"
26 
27 #include <linux/sched/mm.h>
28 
29 static void bch2_btree_node_header_to_text(struct printbuf *out, struct btree_node *bn)
30 {
31 	bch2_btree_id_level_to_text(out, BTREE_NODE_ID(bn), BTREE_NODE_LEVEL(bn));
32 	prt_printf(out, " seq %llx %llu\n", bn->keys.seq, BTREE_NODE_SEQ(bn));
33 	prt_str(out, "min: ");
34 	bch2_bpos_to_text(out, bn->min_key);
35 	prt_newline(out);
36 	prt_str(out, "max: ");
37 	bch2_bpos_to_text(out, bn->max_key);
38 }
39 
40 void bch2_btree_node_io_unlock(struct btree *b)
41 {
42 	EBUG_ON(!btree_node_write_in_flight(b));
43 
44 	clear_btree_node_write_in_flight_inner(b);
45 	clear_btree_node_write_in_flight(b);
46 	smp_mb__after_atomic();
47 	wake_up_bit(&b->flags, BTREE_NODE_write_in_flight);
48 }
49 
50 void bch2_btree_node_io_lock(struct btree *b)
51 {
52 	wait_on_bit_lock_io(&b->flags, BTREE_NODE_write_in_flight,
53 			    TASK_UNINTERRUPTIBLE);
54 }
55 
56 void __bch2_btree_node_wait_on_read(struct btree *b)
57 {
58 	wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight,
59 		       TASK_UNINTERRUPTIBLE);
60 }
61 
62 void __bch2_btree_node_wait_on_write(struct btree *b)
63 {
64 	wait_on_bit_io(&b->flags, BTREE_NODE_write_in_flight,
65 		       TASK_UNINTERRUPTIBLE);
66 }
67 
68 void bch2_btree_node_wait_on_read(struct btree *b)
69 {
70 	wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight,
71 		       TASK_UNINTERRUPTIBLE);
72 }
73 
74 void bch2_btree_node_wait_on_write(struct btree *b)
75 {
76 	wait_on_bit_io(&b->flags, BTREE_NODE_write_in_flight,
77 		       TASK_UNINTERRUPTIBLE);
78 }
79 
80 static void verify_no_dups(struct btree *b,
81 			   struct bkey_packed *start,
82 			   struct bkey_packed *end)
83 {
84 #ifdef CONFIG_BCACHEFS_DEBUG
85 	struct bkey_packed *k, *p;
86 
87 	if (start == end)
88 		return;
89 
90 	for (p = start, k = bkey_p_next(start);
91 	     k != end;
92 	     p = k, k = bkey_p_next(k)) {
93 		struct bkey l = bkey_unpack_key(b, p);
94 		struct bkey r = bkey_unpack_key(b, k);
95 
96 		BUG_ON(bpos_ge(l.p, bkey_start_pos(&r)));
97 	}
98 #endif
99 }
100 
101 static void set_needs_whiteout(struct bset *i, int v)
102 {
103 	struct bkey_packed *k;
104 
105 	for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k))
106 		k->needs_whiteout = v;
107 }
108 
109 static void btree_bounce_free(struct bch_fs *c, size_t size,
110 			      bool used_mempool, void *p)
111 {
112 	if (used_mempool)
113 		mempool_free(p, &c->btree_bounce_pool);
114 	else
115 		kvfree(p);
116 }
117 
118 static void *btree_bounce_alloc(struct bch_fs *c, size_t size,
119 				bool *used_mempool)
120 {
121 	unsigned flags = memalloc_nofs_save();
122 	void *p;
123 
124 	BUG_ON(size > c->opts.btree_node_size);
125 
126 	*used_mempool = false;
127 	p = kvmalloc(size, __GFP_NOWARN|GFP_NOWAIT);
128 	if (!p) {
129 		*used_mempool = true;
130 		p = mempool_alloc(&c->btree_bounce_pool, GFP_NOFS);
131 	}
132 	memalloc_nofs_restore(flags);
133 	return p;
134 }
135 
136 static void sort_bkey_ptrs(const struct btree *bt,
137 			   struct bkey_packed **ptrs, unsigned nr)
138 {
139 	unsigned n = nr, a = nr / 2, b, c, d;
140 
141 	if (!a)
142 		return;
143 
144 	/* Heap sort: see lib/sort.c: */
145 	while (1) {
146 		if (a)
147 			a--;
148 		else if (--n)
149 			swap(ptrs[0], ptrs[n]);
150 		else
151 			break;
152 
153 		for (b = a; c = 2 * b + 1, (d = c + 1) < n;)
154 			b = bch2_bkey_cmp_packed(bt,
155 					    ptrs[c],
156 					    ptrs[d]) >= 0 ? c : d;
157 		if (d == n)
158 			b = c;
159 
160 		while (b != a &&
161 		       bch2_bkey_cmp_packed(bt,
162 				       ptrs[a],
163 				       ptrs[b]) >= 0)
164 			b = (b - 1) / 2;
165 		c = b;
166 		while (b != a) {
167 			b = (b - 1) / 2;
168 			swap(ptrs[b], ptrs[c]);
169 		}
170 	}
171 }
172 
173 static void bch2_sort_whiteouts(struct bch_fs *c, struct btree *b)
174 {
175 	struct bkey_packed *new_whiteouts, **ptrs, **ptrs_end, *k;
176 	bool used_mempool = false;
177 	size_t bytes = b->whiteout_u64s * sizeof(u64);
178 
179 	if (!b->whiteout_u64s)
180 		return;
181 
182 	new_whiteouts = btree_bounce_alloc(c, bytes, &used_mempool);
183 
184 	ptrs = ptrs_end = ((void *) new_whiteouts + bytes);
185 
186 	for (k = unwritten_whiteouts_start(b);
187 	     k != unwritten_whiteouts_end(b);
188 	     k = bkey_p_next(k))
189 		*--ptrs = k;
190 
191 	sort_bkey_ptrs(b, ptrs, ptrs_end - ptrs);
192 
193 	k = new_whiteouts;
194 
195 	while (ptrs != ptrs_end) {
196 		bkey_p_copy(k, *ptrs);
197 		k = bkey_p_next(k);
198 		ptrs++;
199 	}
200 
201 	verify_no_dups(b, new_whiteouts,
202 		       (void *) ((u64 *) new_whiteouts + b->whiteout_u64s));
203 
204 	memcpy_u64s(unwritten_whiteouts_start(b),
205 		    new_whiteouts, b->whiteout_u64s);
206 
207 	btree_bounce_free(c, bytes, used_mempool, new_whiteouts);
208 }
209 
210 static bool should_compact_bset(struct btree *b, struct bset_tree *t,
211 				bool compacting, enum compact_mode mode)
212 {
213 	if (!bset_dead_u64s(b, t))
214 		return false;
215 
216 	switch (mode) {
217 	case COMPACT_LAZY:
218 		return should_compact_bset_lazy(b, t) ||
219 			(compacting && !bset_written(b, bset(b, t)));
220 	case COMPACT_ALL:
221 		return true;
222 	default:
223 		BUG();
224 	}
225 }
226 
227 static bool bch2_drop_whiteouts(struct btree *b, enum compact_mode mode)
228 {
229 	bool ret = false;
230 
231 	for_each_bset(b, t) {
232 		struct bset *i = bset(b, t);
233 		struct bkey_packed *k, *n, *out, *start, *end;
234 		struct btree_node_entry *src = NULL, *dst = NULL;
235 
236 		if (t != b->set && !bset_written(b, i)) {
237 			src = container_of(i, struct btree_node_entry, keys);
238 			dst = max(write_block(b),
239 				  (void *) btree_bkey_last(b, t - 1));
240 		}
241 
242 		if (src != dst)
243 			ret = true;
244 
245 		if (!should_compact_bset(b, t, ret, mode)) {
246 			if (src != dst) {
247 				memmove(dst, src, sizeof(*src) +
248 					le16_to_cpu(src->keys.u64s) *
249 					sizeof(u64));
250 				i = &dst->keys;
251 				set_btree_bset(b, t, i);
252 			}
253 			continue;
254 		}
255 
256 		start	= btree_bkey_first(b, t);
257 		end	= btree_bkey_last(b, t);
258 
259 		if (src != dst) {
260 			memmove(dst, src, sizeof(*src));
261 			i = &dst->keys;
262 			set_btree_bset(b, t, i);
263 		}
264 
265 		out = i->start;
266 
267 		for (k = start; k != end; k = n) {
268 			n = bkey_p_next(k);
269 
270 			if (!bkey_deleted(k)) {
271 				bkey_p_copy(out, k);
272 				out = bkey_p_next(out);
273 			} else {
274 				BUG_ON(k->needs_whiteout);
275 			}
276 		}
277 
278 		i->u64s = cpu_to_le16((u64 *) out - i->_data);
279 		set_btree_bset_end(b, t);
280 		bch2_bset_set_no_aux_tree(b, t);
281 		ret = true;
282 	}
283 
284 	bch2_verify_btree_nr_keys(b);
285 
286 	bch2_btree_build_aux_trees(b);
287 
288 	return ret;
289 }
290 
291 bool bch2_compact_whiteouts(struct bch_fs *c, struct btree *b,
292 			    enum compact_mode mode)
293 {
294 	return bch2_drop_whiteouts(b, mode);
295 }
296 
297 static void btree_node_sort(struct bch_fs *c, struct btree *b,
298 			    unsigned start_idx,
299 			    unsigned end_idx)
300 {
301 	struct btree_node *out;
302 	struct sort_iter_stack sort_iter;
303 	struct bset_tree *t;
304 	struct bset *start_bset = bset(b, &b->set[start_idx]);
305 	bool used_mempool = false;
306 	u64 start_time, seq = 0;
307 	unsigned i, u64s = 0, bytes, shift = end_idx - start_idx - 1;
308 	bool sorting_entire_node = start_idx == 0 &&
309 		end_idx == b->nsets;
310 
311 	sort_iter_stack_init(&sort_iter, b);
312 
313 	for (t = b->set + start_idx;
314 	     t < b->set + end_idx;
315 	     t++) {
316 		u64s += le16_to_cpu(bset(b, t)->u64s);
317 		sort_iter_add(&sort_iter.iter,
318 			      btree_bkey_first(b, t),
319 			      btree_bkey_last(b, t));
320 	}
321 
322 	bytes = sorting_entire_node
323 		? btree_buf_bytes(b)
324 		: __vstruct_bytes(struct btree_node, u64s);
325 
326 	out = btree_bounce_alloc(c, bytes, &used_mempool);
327 
328 	start_time = local_clock();
329 
330 	u64s = bch2_sort_keys(out->keys.start, &sort_iter.iter);
331 
332 	out->keys.u64s = cpu_to_le16(u64s);
333 
334 	BUG_ON(vstruct_end(&out->keys) > (void *) out + bytes);
335 
336 	if (sorting_entire_node)
337 		bch2_time_stats_update(&c->times[BCH_TIME_btree_node_sort],
338 				       start_time);
339 
340 	/* Make sure we preserve bset journal_seq: */
341 	for (t = b->set + start_idx; t < b->set + end_idx; t++)
342 		seq = max(seq, le64_to_cpu(bset(b, t)->journal_seq));
343 	start_bset->journal_seq = cpu_to_le64(seq);
344 
345 	if (sorting_entire_node) {
346 		u64s = le16_to_cpu(out->keys.u64s);
347 
348 		BUG_ON(bytes != btree_buf_bytes(b));
349 
350 		/*
351 		 * Our temporary buffer is the same size as the btree node's
352 		 * buffer, we can just swap buffers instead of doing a big
353 		 * memcpy()
354 		 */
355 		*out = *b->data;
356 		out->keys.u64s = cpu_to_le16(u64s);
357 		swap(out, b->data);
358 		set_btree_bset(b, b->set, &b->data->keys);
359 	} else {
360 		start_bset->u64s = out->keys.u64s;
361 		memcpy_u64s(start_bset->start,
362 			    out->keys.start,
363 			    le16_to_cpu(out->keys.u64s));
364 	}
365 
366 	for (i = start_idx + 1; i < end_idx; i++)
367 		b->nr.bset_u64s[start_idx] +=
368 			b->nr.bset_u64s[i];
369 
370 	b->nsets -= shift;
371 
372 	for (i = start_idx + 1; i < b->nsets; i++) {
373 		b->nr.bset_u64s[i]	= b->nr.bset_u64s[i + shift];
374 		b->set[i]		= b->set[i + shift];
375 	}
376 
377 	for (i = b->nsets; i < MAX_BSETS; i++)
378 		b->nr.bset_u64s[i] = 0;
379 
380 	set_btree_bset_end(b, &b->set[start_idx]);
381 	bch2_bset_set_no_aux_tree(b, &b->set[start_idx]);
382 
383 	btree_bounce_free(c, bytes, used_mempool, out);
384 
385 	bch2_verify_btree_nr_keys(b);
386 }
387 
388 void bch2_btree_sort_into(struct bch_fs *c,
389 			 struct btree *dst,
390 			 struct btree *src)
391 {
392 	struct btree_nr_keys nr;
393 	struct btree_node_iter src_iter;
394 	u64 start_time = local_clock();
395 
396 	BUG_ON(dst->nsets != 1);
397 
398 	bch2_bset_set_no_aux_tree(dst, dst->set);
399 
400 	bch2_btree_node_iter_init_from_start(&src_iter, src);
401 
402 	nr = bch2_sort_repack(btree_bset_first(dst),
403 			src, &src_iter,
404 			&dst->format,
405 			true);
406 
407 	bch2_time_stats_update(&c->times[BCH_TIME_btree_node_sort],
408 			       start_time);
409 
410 	set_btree_bset_end(dst, dst->set);
411 
412 	dst->nr.live_u64s	+= nr.live_u64s;
413 	dst->nr.bset_u64s[0]	+= nr.bset_u64s[0];
414 	dst->nr.packed_keys	+= nr.packed_keys;
415 	dst->nr.unpacked_keys	+= nr.unpacked_keys;
416 
417 	bch2_verify_btree_nr_keys(dst);
418 }
419 
420 /*
421  * We're about to add another bset to the btree node, so if there's currently
422  * too many bsets - sort some of them together:
423  */
424 static bool btree_node_compact(struct bch_fs *c, struct btree *b)
425 {
426 	unsigned unwritten_idx;
427 	bool ret = false;
428 
429 	for (unwritten_idx = 0;
430 	     unwritten_idx < b->nsets;
431 	     unwritten_idx++)
432 		if (!bset_written(b, bset(b, &b->set[unwritten_idx])))
433 			break;
434 
435 	if (b->nsets - unwritten_idx > 1) {
436 		btree_node_sort(c, b, unwritten_idx, b->nsets);
437 		ret = true;
438 	}
439 
440 	if (unwritten_idx > 1) {
441 		btree_node_sort(c, b, 0, unwritten_idx);
442 		ret = true;
443 	}
444 
445 	return ret;
446 }
447 
448 void bch2_btree_build_aux_trees(struct btree *b)
449 {
450 	for_each_bset(b, t)
451 		bch2_bset_build_aux_tree(b, t,
452 				!bset_written(b, bset(b, t)) &&
453 				t == bset_tree_last(b));
454 }
455 
456 /*
457  * If we have MAX_BSETS (3) bsets, should we sort them all down to just one?
458  *
459  * The first bset is going to be of similar order to the size of the node, the
460  * last bset is bounded by btree_write_set_buffer(), which is set to keep the
461  * memmove on insert from being too expensive: the middle bset should, ideally,
462  * be the geometric mean of the first and the last.
463  *
464  * Returns true if the middle bset is greater than that geometric mean:
465  */
466 static inline bool should_compact_all(struct bch_fs *c, struct btree *b)
467 {
468 	unsigned mid_u64s_bits =
469 		(ilog2(btree_max_u64s(c)) + BTREE_WRITE_SET_U64s_BITS) / 2;
470 
471 	return bset_u64s(&b->set[1]) > 1U << mid_u64s_bits;
472 }
473 
474 /*
475  * @bch_btree_init_next - initialize a new (unwritten) bset that can then be
476  * inserted into
477  *
478  * Safe to call if there already is an unwritten bset - will only add a new bset
479  * if @b doesn't already have one.
480  *
481  * Returns true if we sorted (i.e. invalidated iterators
482  */
483 void bch2_btree_init_next(struct btree_trans *trans, struct btree *b)
484 {
485 	struct bch_fs *c = trans->c;
486 	struct btree_node_entry *bne;
487 	bool reinit_iter = false;
488 
489 	EBUG_ON(!six_lock_counts(&b->c.lock).n[SIX_LOCK_write]);
490 	BUG_ON(bset_written(b, bset(b, &b->set[1])));
491 	BUG_ON(btree_node_just_written(b));
492 
493 	if (b->nsets == MAX_BSETS &&
494 	    !btree_node_write_in_flight(b) &&
495 	    should_compact_all(c, b)) {
496 		bch2_btree_node_write_trans(trans, b, SIX_LOCK_write,
497 					    BTREE_WRITE_init_next_bset);
498 		reinit_iter = true;
499 	}
500 
501 	if (b->nsets == MAX_BSETS &&
502 	    btree_node_compact(c, b))
503 		reinit_iter = true;
504 
505 	BUG_ON(b->nsets >= MAX_BSETS);
506 
507 	bne = want_new_bset(c, b);
508 	if (bne)
509 		bch2_bset_init_next(b, bne);
510 
511 	bch2_btree_build_aux_trees(b);
512 
513 	if (reinit_iter)
514 		bch2_trans_node_reinit_iter(trans, b);
515 }
516 
517 static void btree_err_msg(struct printbuf *out, struct bch_fs *c,
518 			  struct bch_dev *ca,
519 			  bool print_pos,
520 			  struct btree *b, struct bset *i, struct bkey_packed *k,
521 			  unsigned offset, int rw)
522 {
523 	if (print_pos) {
524 		prt_str(out, rw == READ
525 			? "error validating btree node "
526 			: "corrupt btree node before write ");
527 		prt_printf(out, "at btree ");
528 		bch2_btree_pos_to_text(out, c, b);
529 		prt_newline(out);
530 	}
531 
532 	if (ca)
533 		prt_printf(out, "%s ", ca->name);
534 
535 	prt_printf(out, "node offset %u/%u",
536 		   b->written, btree_ptr_sectors_written(bkey_i_to_s_c(&b->key)));
537 	if (i)
538 		prt_printf(out, " bset u64s %u", le16_to_cpu(i->u64s));
539 	if (k)
540 		prt_printf(out, " bset byte offset %lu",
541 			   (unsigned long)(void *)k -
542 			   ((unsigned long)(void *)i & ~511UL));
543 	prt_str(out, ": ");
544 }
545 
546 __printf(11, 12)
547 static int __btree_err(int ret,
548 		       struct bch_fs *c,
549 		       struct bch_dev *ca,
550 		       struct btree *b,
551 		       struct bset *i,
552 		       struct bkey_packed *k,
553 		       int rw,
554 		       enum bch_sb_error_id err_type,
555 		       struct bch_io_failures *failed,
556 		       struct printbuf *err_msg,
557 		       const char *fmt, ...)
558 {
559 	if (c->recovery.curr_pass == BCH_RECOVERY_PASS_scan_for_btree_nodes)
560 		return -BCH_ERR_fsck_fix;
561 
562 	bool have_retry = false;
563 	int ret2;
564 
565 	if (ca) {
566 		bch2_mark_btree_validate_failure(failed, ca->dev_idx);
567 
568 		struct extent_ptr_decoded pick;
569 		have_retry = !bch2_bkey_pick_read_device(c,
570 					bkey_i_to_s_c(&b->key),
571 					failed, &pick, -1);
572 	}
573 
574 	if (!have_retry && ret == -BCH_ERR_btree_node_read_err_want_retry)
575 		ret = -BCH_ERR_btree_node_read_err_fixable;
576 	if (!have_retry && ret == -BCH_ERR_btree_node_read_err_must_retry)
577 		ret = -BCH_ERR_btree_node_read_err_bad_node;
578 
579 	bch2_sb_error_count(c, err_type);
580 
581 	bool print_deferred = err_msg &&
582 		rw == READ &&
583 		!(test_bit(BCH_FS_in_fsck, &c->flags) &&
584 		  c->opts.fix_errors == FSCK_FIX_ask);
585 
586 	struct printbuf out = PRINTBUF;
587 	bch2_log_msg_start(c, &out);
588 
589 	if (!print_deferred)
590 		err_msg = &out;
591 
592 	btree_err_msg(err_msg, c, ca, !print_deferred, b, i, k, b->written, rw);
593 
594 	va_list args;
595 	va_start(args, fmt);
596 	prt_vprintf(err_msg, fmt, args);
597 	va_end(args);
598 
599 	if (print_deferred) {
600 		prt_newline(err_msg);
601 
602 		switch (ret) {
603 		case -BCH_ERR_btree_node_read_err_fixable:
604 			ret2 = bch2_fsck_err_opt(c, FSCK_CAN_FIX, err_type);
605 			if (ret2 != -BCH_ERR_fsck_fix &&
606 			    ret2 != -BCH_ERR_fsck_ignore) {
607 				ret = ret2;
608 				goto fsck_err;
609 			}
610 
611 			if (!have_retry)
612 				ret = -BCH_ERR_fsck_fix;
613 			goto out;
614 		case -BCH_ERR_btree_node_read_err_bad_node:
615 			prt_str(&out, ", ");
616 			ret = __bch2_topology_error(c, &out);
617 			break;
618 		}
619 
620 		goto out;
621 	}
622 
623 	if (rw == WRITE) {
624 		prt_str(&out, ", ");
625 		ret = __bch2_inconsistent_error(c, &out)
626 			? -BCH_ERR_fsck_errors_not_fixed
627 			: 0;
628 		goto print;
629 	}
630 
631 	switch (ret) {
632 	case -BCH_ERR_btree_node_read_err_fixable:
633 		ret2 = __bch2_fsck_err(c, NULL, FSCK_CAN_FIX, err_type, "%s", out.buf);
634 		if (ret2 != -BCH_ERR_fsck_fix &&
635 		    ret2 != -BCH_ERR_fsck_ignore) {
636 			ret = ret2;
637 			goto fsck_err;
638 		}
639 
640 		if (!have_retry)
641 			ret = -BCH_ERR_fsck_fix;
642 		goto out;
643 	case -BCH_ERR_btree_node_read_err_bad_node:
644 		prt_str(&out, ", ");
645 		ret = __bch2_topology_error(c, &out);
646 		break;
647 	}
648 print:
649 	bch2_print_str(c, KERN_ERR, out.buf);
650 out:
651 fsck_err:
652 	printbuf_exit(&out);
653 	return ret;
654 }
655 
656 #define btree_err(type, c, ca, b, i, k, _err_type, msg, ...)		\
657 ({									\
658 	int _ret = __btree_err(type, c, ca, b, i, k, write,		\
659 			       BCH_FSCK_ERR_##_err_type,		\
660 			       failed, err_msg,				\
661 			       msg, ##__VA_ARGS__);			\
662 									\
663 	if (_ret != -BCH_ERR_fsck_fix) {				\
664 		ret = _ret;						\
665 		goto fsck_err;						\
666 	}								\
667 									\
668 	true;								\
669 })
670 
671 #define btree_err_on(cond, ...)	((cond) ? btree_err(__VA_ARGS__) : false)
672 
673 /*
674  * When btree topology repair changes the start or end of a node, that might
675  * mean we have to drop keys that are no longer inside the node:
676  */
677 __cold
678 void bch2_btree_node_drop_keys_outside_node(struct btree *b)
679 {
680 	for_each_bset(b, t) {
681 		struct bset *i = bset(b, t);
682 		struct bkey_packed *k;
683 
684 		for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k))
685 			if (bkey_cmp_left_packed(b, k, &b->data->min_key) >= 0)
686 				break;
687 
688 		if (k != i->start) {
689 			unsigned shift = (u64 *) k - (u64 *) i->start;
690 
691 			memmove_u64s_down(i->start, k,
692 					  (u64 *) vstruct_end(i) - (u64 *) k);
693 			i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - shift);
694 			set_btree_bset_end(b, t);
695 		}
696 
697 		for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k))
698 			if (bkey_cmp_left_packed(b, k, &b->data->max_key) > 0)
699 				break;
700 
701 		if (k != vstruct_last(i)) {
702 			i->u64s = cpu_to_le16((u64 *) k - (u64 *) i->start);
703 			set_btree_bset_end(b, t);
704 		}
705 	}
706 
707 	/*
708 	 * Always rebuild search trees: eytzinger search tree nodes directly
709 	 * depend on the values of min/max key:
710 	 */
711 	bch2_bset_set_no_aux_tree(b, b->set);
712 	bch2_btree_build_aux_trees(b);
713 	b->nr = bch2_btree_node_count_keys(b);
714 
715 	struct bkey_s_c k;
716 	struct bkey unpacked;
717 	struct btree_node_iter iter;
718 	for_each_btree_node_key_unpack(b, k, &iter, &unpacked) {
719 		BUG_ON(bpos_lt(k.k->p, b->data->min_key));
720 		BUG_ON(bpos_gt(k.k->p, b->data->max_key));
721 	}
722 }
723 
724 static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
725 			 struct btree *b, struct bset *i,
726 			 unsigned offset, unsigned sectors, int write,
727 			 struct bch_io_failures *failed,
728 			 struct printbuf *err_msg)
729 {
730 	unsigned version = le16_to_cpu(i->version);
731 	unsigned ptr_written = btree_ptr_sectors_written(bkey_i_to_s_c(&b->key));
732 	struct printbuf buf1 = PRINTBUF;
733 	struct printbuf buf2 = PRINTBUF;
734 	int ret = 0;
735 
736 	btree_err_on(!bch2_version_compatible(version),
737 		     -BCH_ERR_btree_node_read_err_incompatible,
738 		     c, ca, b, i, NULL,
739 		     btree_node_unsupported_version,
740 		     "unsupported bset version %u.%u",
741 		     BCH_VERSION_MAJOR(version),
742 		     BCH_VERSION_MINOR(version));
743 
744 	if (btree_err_on(version < c->sb.version_min,
745 			 -BCH_ERR_btree_node_read_err_fixable,
746 			 c, NULL, b, i, NULL,
747 			 btree_node_bset_older_than_sb_min,
748 			 "bset version %u older than superblock version_min %u",
749 			 version, c->sb.version_min)) {
750 		mutex_lock(&c->sb_lock);
751 		c->disk_sb.sb->version_min = cpu_to_le16(version);
752 		bch2_write_super(c);
753 		mutex_unlock(&c->sb_lock);
754 	}
755 
756 	if (btree_err_on(BCH_VERSION_MAJOR(version) >
757 			 BCH_VERSION_MAJOR(c->sb.version),
758 			 -BCH_ERR_btree_node_read_err_fixable,
759 			 c, NULL, b, i, NULL,
760 			 btree_node_bset_newer_than_sb,
761 			 "bset version %u newer than superblock version %u",
762 			 version, c->sb.version)) {
763 		mutex_lock(&c->sb_lock);
764 		c->disk_sb.sb->version = cpu_to_le16(version);
765 		bch2_write_super(c);
766 		mutex_unlock(&c->sb_lock);
767 	}
768 
769 	btree_err_on(BSET_SEPARATE_WHITEOUTS(i),
770 		     -BCH_ERR_btree_node_read_err_incompatible,
771 		     c, ca, b, i, NULL,
772 		     btree_node_unsupported_version,
773 		     "BSET_SEPARATE_WHITEOUTS no longer supported");
774 
775 	if (!write &&
776 	    btree_err_on(offset + sectors > (ptr_written ?: btree_sectors(c)),
777 			 -BCH_ERR_btree_node_read_err_fixable,
778 			 c, ca, b, i, NULL,
779 			 bset_past_end_of_btree_node,
780 			 "bset past end of btree node (offset %u len %u but written %zu)",
781 			 offset, sectors, ptr_written ?: btree_sectors(c)))
782 		i->u64s = 0;
783 
784 	btree_err_on(offset && !i->u64s,
785 		     -BCH_ERR_btree_node_read_err_fixable,
786 		     c, ca, b, i, NULL,
787 		     bset_empty,
788 		     "empty bset");
789 
790 	btree_err_on(BSET_OFFSET(i) && BSET_OFFSET(i) != offset,
791 		     -BCH_ERR_btree_node_read_err_want_retry,
792 		     c, ca, b, i, NULL,
793 		     bset_wrong_sector_offset,
794 		     "bset at wrong sector offset");
795 
796 	if (!offset) {
797 		struct btree_node *bn =
798 			container_of(i, struct btree_node, keys);
799 		/* These indicate that we read the wrong btree node: */
800 
801 		if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
802 			struct bch_btree_ptr_v2 *bp =
803 				&bkey_i_to_btree_ptr_v2(&b->key)->v;
804 
805 			/* XXX endianness */
806 			btree_err_on(bp->seq != bn->keys.seq,
807 				     -BCH_ERR_btree_node_read_err_must_retry,
808 				     c, ca, b, NULL, NULL,
809 				     bset_bad_seq,
810 				     "incorrect sequence number (wrong btree node)");
811 		}
812 
813 		btree_err_on(BTREE_NODE_ID(bn) != b->c.btree_id,
814 			     -BCH_ERR_btree_node_read_err_must_retry,
815 			     c, ca, b, i, NULL,
816 			     btree_node_bad_btree,
817 			     "incorrect btree id");
818 
819 		btree_err_on(BTREE_NODE_LEVEL(bn) != b->c.level,
820 			     -BCH_ERR_btree_node_read_err_must_retry,
821 			     c, ca, b, i, NULL,
822 			     btree_node_bad_level,
823 			     "incorrect level");
824 
825 		if (!write)
826 			compat_btree_node(b->c.level, b->c.btree_id, version,
827 					  BSET_BIG_ENDIAN(i), write, bn);
828 
829 		if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
830 			struct bch_btree_ptr_v2 *bp =
831 				&bkey_i_to_btree_ptr_v2(&b->key)->v;
832 
833 			if (BTREE_PTR_RANGE_UPDATED(bp)) {
834 				b->data->min_key = bp->min_key;
835 				b->data->max_key = b->key.k.p;
836 			}
837 
838 			btree_err_on(!bpos_eq(b->data->min_key, bp->min_key),
839 				     -BCH_ERR_btree_node_read_err_must_retry,
840 				     c, ca, b, NULL, NULL,
841 				     btree_node_bad_min_key,
842 				     "incorrect min_key: got %s should be %s",
843 				     (printbuf_reset(&buf1),
844 				      bch2_bpos_to_text(&buf1, bn->min_key), buf1.buf),
845 				     (printbuf_reset(&buf2),
846 				      bch2_bpos_to_text(&buf2, bp->min_key), buf2.buf));
847 		}
848 
849 		btree_err_on(!bpos_eq(bn->max_key, b->key.k.p),
850 			     -BCH_ERR_btree_node_read_err_must_retry,
851 			     c, ca, b, i, NULL,
852 			     btree_node_bad_max_key,
853 			     "incorrect max key %s",
854 			     (printbuf_reset(&buf1),
855 			      bch2_bpos_to_text(&buf1, bn->max_key), buf1.buf));
856 
857 		if (write)
858 			compat_btree_node(b->c.level, b->c.btree_id, version,
859 					  BSET_BIG_ENDIAN(i), write, bn);
860 
861 		btree_err_on(bch2_bkey_format_invalid(c, &bn->format, write, &buf1),
862 			     -BCH_ERR_btree_node_read_err_bad_node,
863 			     c, ca, b, i, NULL,
864 			     btree_node_bad_format,
865 			     "invalid bkey format: %s\n%s", buf1.buf,
866 			     (printbuf_reset(&buf2),
867 			      bch2_bkey_format_to_text(&buf2, &bn->format), buf2.buf));
868 		printbuf_reset(&buf1);
869 
870 		compat_bformat(b->c.level, b->c.btree_id, version,
871 			       BSET_BIG_ENDIAN(i), write,
872 			       &bn->format);
873 	}
874 fsck_err:
875 	printbuf_exit(&buf2);
876 	printbuf_exit(&buf1);
877 	return ret;
878 }
879 
880 static int btree_node_bkey_val_validate(struct bch_fs *c, struct btree *b,
881 					struct bkey_s_c k,
882 					enum bch_validate_flags flags)
883 {
884 	return bch2_bkey_val_validate(c, k, (struct bkey_validate_context) {
885 		.from	= BKEY_VALIDATE_btree_node,
886 		.level	= b->c.level,
887 		.btree	= b->c.btree_id,
888 		.flags	= flags
889 	});
890 }
891 
892 static int bset_key_validate(struct bch_fs *c, struct btree *b,
893 			     struct bkey_s_c k,
894 			     bool updated_range,
895 			     enum bch_validate_flags flags)
896 {
897 	struct bkey_validate_context from = (struct bkey_validate_context) {
898 		.from	= BKEY_VALIDATE_btree_node,
899 		.level	= b->c.level,
900 		.btree	= b->c.btree_id,
901 		.flags	= flags,
902 	};
903 	return __bch2_bkey_validate(c, k, from) ?:
904 		(!updated_range ? bch2_bkey_in_btree_node(c, b, k, from) : 0) ?:
905 		(flags & BCH_VALIDATE_write ? btree_node_bkey_val_validate(c, b, k, flags) : 0);
906 }
907 
908 static bool bkey_packed_valid(struct bch_fs *c, struct btree *b,
909 			 struct bset *i, struct bkey_packed *k)
910 {
911 	if (bkey_p_next(k) > vstruct_last(i))
912 		return false;
913 
914 	if (k->format > KEY_FORMAT_CURRENT)
915 		return false;
916 
917 	if (!bkeyp_u64s_valid(&b->format, k))
918 		return false;
919 
920 	struct bkey tmp;
921 	struct bkey_s u = __bkey_disassemble(b, k, &tmp);
922 	return !__bch2_bkey_validate(c, u.s_c,
923 				     (struct bkey_validate_context) {
924 					.from	= BKEY_VALIDATE_btree_node,
925 					.level	= b->c.level,
926 					.btree	= b->c.btree_id,
927 					.flags	= BCH_VALIDATE_silent
928 				     });
929 }
930 
931 static inline int btree_node_read_bkey_cmp(const struct btree *b,
932 				const struct bkey_packed *l,
933 				const struct bkey_packed *r)
934 {
935 	return bch2_bkey_cmp_packed(b, l, r)
936 		?: (int) bkey_deleted(r) - (int) bkey_deleted(l);
937 }
938 
939 static int validate_bset_keys(struct bch_fs *c, struct btree *b,
940 			 struct bset *i, int write,
941 			 struct bch_io_failures *failed,
942 			 struct printbuf *err_msg)
943 {
944 	unsigned version = le16_to_cpu(i->version);
945 	struct bkey_packed *k, *prev = NULL;
946 	struct printbuf buf = PRINTBUF;
947 	bool updated_range = b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
948 		BTREE_PTR_RANGE_UPDATED(&bkey_i_to_btree_ptr_v2(&b->key)->v);
949 	int ret = 0;
950 
951 	for (k = i->start;
952 	     k != vstruct_last(i);) {
953 		struct bkey_s u;
954 		struct bkey tmp;
955 		unsigned next_good_key;
956 
957 		if (btree_err_on(bkey_p_next(k) > vstruct_last(i),
958 				 -BCH_ERR_btree_node_read_err_fixable,
959 				 c, NULL, b, i, k,
960 				 btree_node_bkey_past_bset_end,
961 				 "key extends past end of bset")) {
962 			i->u64s = cpu_to_le16((u64 *) k - i->_data);
963 			break;
964 		}
965 
966 		if (btree_err_on(k->format > KEY_FORMAT_CURRENT,
967 				 -BCH_ERR_btree_node_read_err_fixable,
968 				 c, NULL, b, i, k,
969 				 btree_node_bkey_bad_format,
970 				 "invalid bkey format %u", k->format))
971 			goto drop_this_key;
972 
973 		if (btree_err_on(!bkeyp_u64s_valid(&b->format, k),
974 				 -BCH_ERR_btree_node_read_err_fixable,
975 				 c, NULL, b, i, k,
976 				 btree_node_bkey_bad_u64s,
977 				 "bad k->u64s %u (min %u max %zu)", k->u64s,
978 				 bkeyp_key_u64s(&b->format, k),
979 				 U8_MAX - BKEY_U64s + bkeyp_key_u64s(&b->format, k)))
980 			goto drop_this_key;
981 
982 		if (!write)
983 			bch2_bkey_compat(b->c.level, b->c.btree_id, version,
984 				    BSET_BIG_ENDIAN(i), write,
985 				    &b->format, k);
986 
987 		u = __bkey_disassemble(b, k, &tmp);
988 
989 		ret = bset_key_validate(c, b, u.s_c, updated_range, write);
990 		if (ret == -BCH_ERR_fsck_delete_bkey)
991 			goto drop_this_key;
992 		if (ret)
993 			goto fsck_err;
994 
995 		if (write)
996 			bch2_bkey_compat(b->c.level, b->c.btree_id, version,
997 				    BSET_BIG_ENDIAN(i), write,
998 				    &b->format, k);
999 
1000 		if (prev && btree_node_read_bkey_cmp(b, prev, k) >= 0) {
1001 			struct bkey up = bkey_unpack_key(b, prev);
1002 
1003 			printbuf_reset(&buf);
1004 			prt_printf(&buf, "keys out of order: ");
1005 			bch2_bkey_to_text(&buf, &up);
1006 			prt_printf(&buf, " > ");
1007 			bch2_bkey_to_text(&buf, u.k);
1008 
1009 			if (btree_err(-BCH_ERR_btree_node_read_err_fixable,
1010 				      c, NULL, b, i, k,
1011 				      btree_node_bkey_out_of_order,
1012 				      "%s", buf.buf))
1013 				goto drop_this_key;
1014 		}
1015 
1016 		prev = k;
1017 		k = bkey_p_next(k);
1018 		continue;
1019 drop_this_key:
1020 		next_good_key = k->u64s;
1021 
1022 		if (!next_good_key ||
1023 		    (BSET_BIG_ENDIAN(i) == CPU_BIG_ENDIAN &&
1024 		     version >= bcachefs_metadata_version_snapshot)) {
1025 			/*
1026 			 * only do scanning if bch2_bkey_compat() has nothing to
1027 			 * do
1028 			 */
1029 
1030 			if (!bkey_packed_valid(c, b, i, (void *) ((u64 *) k + next_good_key))) {
1031 				for (next_good_key = 1;
1032 				     next_good_key < (u64 *) vstruct_last(i) - (u64 *) k;
1033 				     next_good_key++)
1034 					if (bkey_packed_valid(c, b, i, (void *) ((u64 *) k + next_good_key)))
1035 						goto got_good_key;
1036 			}
1037 
1038 			/*
1039 			 * didn't find a good key, have to truncate the rest of
1040 			 * the bset
1041 			 */
1042 			next_good_key = (u64 *) vstruct_last(i) - (u64 *) k;
1043 		}
1044 got_good_key:
1045 		le16_add_cpu(&i->u64s, -next_good_key);
1046 		memmove_u64s_down(k, (u64 *) k + next_good_key, (u64 *) vstruct_end(i) - (u64 *) k);
1047 		set_btree_node_need_rewrite(b);
1048 	}
1049 fsck_err:
1050 	printbuf_exit(&buf);
1051 	return ret;
1052 }
1053 
1054 int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
1055 			      struct btree *b,
1056 			      struct bch_io_failures *failed,
1057 			      struct printbuf *err_msg)
1058 {
1059 	struct btree_node_entry *bne;
1060 	struct sort_iter *iter;
1061 	struct btree_node *sorted;
1062 	struct bkey_packed *k;
1063 	struct bset *i;
1064 	bool used_mempool, blacklisted;
1065 	bool updated_range = b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
1066 		BTREE_PTR_RANGE_UPDATED(&bkey_i_to_btree_ptr_v2(&b->key)->v);
1067 	unsigned ptr_written = btree_ptr_sectors_written(bkey_i_to_s_c(&b->key));
1068 	u64 max_journal_seq = 0;
1069 	struct printbuf buf = PRINTBUF;
1070 	int ret = 0, write = READ;
1071 	u64 start_time = local_clock();
1072 
1073 	b->version_ondisk = U16_MAX;
1074 	/* We might get called multiple times on read retry: */
1075 	b->written = 0;
1076 
1077 	iter = mempool_alloc(&c->fill_iter, GFP_NOFS);
1078 	sort_iter_init(iter, b, (btree_blocks(c) + 1) * 2);
1079 
1080 	if (bch2_meta_read_fault("btree"))
1081 		btree_err(-BCH_ERR_btree_node_read_err_must_retry,
1082 			  c, ca, b, NULL, NULL,
1083 			  btree_node_fault_injected,
1084 			  "dynamic fault");
1085 
1086 	btree_err_on(le64_to_cpu(b->data->magic) != bset_magic(c),
1087 		     -BCH_ERR_btree_node_read_err_must_retry,
1088 		     c, ca, b, NULL, NULL,
1089 		     btree_node_bad_magic,
1090 		     "bad magic: want %llx, got %llx",
1091 		     bset_magic(c), le64_to_cpu(b->data->magic));
1092 
1093 	if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
1094 		struct bch_btree_ptr_v2 *bp =
1095 			&bkey_i_to_btree_ptr_v2(&b->key)->v;
1096 
1097 		bch2_bpos_to_text(&buf, b->data->min_key);
1098 		prt_str(&buf, "-");
1099 		bch2_bpos_to_text(&buf, b->data->max_key);
1100 
1101 		btree_err_on(b->data->keys.seq != bp->seq,
1102 			     -BCH_ERR_btree_node_read_err_must_retry,
1103 			     c, ca, b, NULL, NULL,
1104 			     btree_node_bad_seq,
1105 			     "got wrong btree node: got\n%s",
1106 			     (printbuf_reset(&buf),
1107 			      bch2_btree_node_header_to_text(&buf, b->data),
1108 			      buf.buf));
1109 	} else {
1110 		btree_err_on(!b->data->keys.seq,
1111 			     -BCH_ERR_btree_node_read_err_must_retry,
1112 			     c, ca, b, NULL, NULL,
1113 			     btree_node_bad_seq,
1114 			     "bad btree header: seq 0\n%s",
1115 			     (printbuf_reset(&buf),
1116 			      bch2_btree_node_header_to_text(&buf, b->data),
1117 			      buf.buf));
1118 	}
1119 
1120 	while (b->written < (ptr_written ?: btree_sectors(c))) {
1121 		unsigned sectors;
1122 		bool first = !b->written;
1123 
1124 		if (first) {
1125 			bne = NULL;
1126 			i = &b->data->keys;
1127 		} else {
1128 			bne = write_block(b);
1129 			i = &bne->keys;
1130 
1131 			if (i->seq != b->data->keys.seq)
1132 				break;
1133 		}
1134 
1135 		struct nonce nonce = btree_nonce(i, b->written << 9);
1136 		bool good_csum_type = bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i));
1137 
1138 		btree_err_on(!good_csum_type,
1139 			     bch2_csum_type_is_encryption(BSET_CSUM_TYPE(i))
1140 			     ? -BCH_ERR_btree_node_read_err_must_retry
1141 			     : -BCH_ERR_btree_node_read_err_want_retry,
1142 			     c, ca, b, i, NULL,
1143 			     bset_unknown_csum,
1144 			     "unknown checksum type %llu", BSET_CSUM_TYPE(i));
1145 
1146 		if (first) {
1147 			if (good_csum_type) {
1148 				struct bch_csum csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, b->data);
1149 				bool csum_bad = bch2_crc_cmp(b->data->csum, csum);
1150 				if (csum_bad)
1151 					bch2_io_error(ca, BCH_MEMBER_ERROR_checksum);
1152 
1153 				btree_err_on(csum_bad,
1154 					     -BCH_ERR_btree_node_read_err_want_retry,
1155 					     c, ca, b, i, NULL,
1156 					     bset_bad_csum,
1157 					     "%s",
1158 					     (printbuf_reset(&buf),
1159 					      bch2_csum_err_msg(&buf, BSET_CSUM_TYPE(i), b->data->csum, csum),
1160 					      buf.buf));
1161 
1162 				ret = bset_encrypt(c, i, b->written << 9);
1163 				if (bch2_fs_fatal_err_on(ret, c,
1164 							 "decrypting btree node: %s", bch2_err_str(ret)))
1165 					goto fsck_err;
1166 			}
1167 
1168 			btree_err_on(btree_node_type_is_extents(btree_node_type(b)) &&
1169 				     !BTREE_NODE_NEW_EXTENT_OVERWRITE(b->data),
1170 				     -BCH_ERR_btree_node_read_err_incompatible,
1171 				     c, NULL, b, NULL, NULL,
1172 				     btree_node_unsupported_version,
1173 				     "btree node does not have NEW_EXTENT_OVERWRITE set");
1174 
1175 			sectors = vstruct_sectors(b->data, c->block_bits);
1176 		} else {
1177 			if (good_csum_type) {
1178 				struct bch_csum csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
1179 				bool csum_bad = bch2_crc_cmp(bne->csum, csum);
1180 				if (ca && csum_bad)
1181 					bch2_io_error(ca, BCH_MEMBER_ERROR_checksum);
1182 
1183 				btree_err_on(csum_bad,
1184 					     -BCH_ERR_btree_node_read_err_want_retry,
1185 					     c, ca, b, i, NULL,
1186 					     bset_bad_csum,
1187 					     "%s",
1188 					     (printbuf_reset(&buf),
1189 					      bch2_csum_err_msg(&buf, BSET_CSUM_TYPE(i), bne->csum, csum),
1190 					      buf.buf));
1191 
1192 				ret = bset_encrypt(c, i, b->written << 9);
1193 				if (bch2_fs_fatal_err_on(ret, c,
1194 						"decrypting btree node: %s", bch2_err_str(ret)))
1195 					goto fsck_err;
1196 			}
1197 
1198 			sectors = vstruct_sectors(bne, c->block_bits);
1199 		}
1200 
1201 		b->version_ondisk = min(b->version_ondisk,
1202 					le16_to_cpu(i->version));
1203 
1204 		ret = validate_bset(c, ca, b, i, b->written, sectors, READ, failed, err_msg);
1205 		if (ret)
1206 			goto fsck_err;
1207 
1208 		if (!b->written)
1209 			btree_node_set_format(b, b->data->format);
1210 
1211 		ret = validate_bset_keys(c, b, i, READ, failed, err_msg);
1212 		if (ret)
1213 			goto fsck_err;
1214 
1215 		SET_BSET_BIG_ENDIAN(i, CPU_BIG_ENDIAN);
1216 
1217 		blacklisted = bch2_journal_seq_is_blacklisted(c,
1218 					le64_to_cpu(i->journal_seq),
1219 					true);
1220 
1221 		btree_err_on(blacklisted && first,
1222 			     -BCH_ERR_btree_node_read_err_fixable,
1223 			     c, ca, b, i, NULL,
1224 			     bset_blacklisted_journal_seq,
1225 			     "first btree node bset has blacklisted journal seq (%llu)",
1226 			     le64_to_cpu(i->journal_seq));
1227 
1228 		btree_err_on(blacklisted && ptr_written,
1229 			     -BCH_ERR_btree_node_read_err_fixable,
1230 			     c, ca, b, i, NULL,
1231 			     first_bset_blacklisted_journal_seq,
1232 			     "found blacklisted bset (journal seq %llu) in btree node at offset %u-%u/%u",
1233 			     le64_to_cpu(i->journal_seq),
1234 			     b->written, b->written + sectors, ptr_written);
1235 
1236 		b->written = min(b->written + sectors, btree_sectors(c));
1237 
1238 		if (blacklisted && !first)
1239 			continue;
1240 
1241 		sort_iter_add(iter,
1242 			      vstruct_idx(i, 0),
1243 			      vstruct_last(i));
1244 
1245 		max_journal_seq = max(max_journal_seq, le64_to_cpu(i->journal_seq));
1246 	}
1247 
1248 	if (ptr_written) {
1249 		btree_err_on(b->written < ptr_written,
1250 			     -BCH_ERR_btree_node_read_err_want_retry,
1251 			     c, ca, b, NULL, NULL,
1252 			     btree_node_data_missing,
1253 			     "btree node data missing: expected %u sectors, found %u",
1254 			     ptr_written, b->written);
1255 	} else {
1256 		for (bne = write_block(b);
1257 		     bset_byte_offset(b, bne) < btree_buf_bytes(b);
1258 		     bne = (void *) bne + block_bytes(c))
1259 			btree_err_on(bne->keys.seq == b->data->keys.seq &&
1260 				     !bch2_journal_seq_is_blacklisted(c,
1261 								      le64_to_cpu(bne->keys.journal_seq),
1262 								      true),
1263 				     -BCH_ERR_btree_node_read_err_want_retry,
1264 				     c, ca, b, NULL, NULL,
1265 				     btree_node_bset_after_end,
1266 				     "found bset signature after last bset");
1267 	}
1268 
1269 	sorted = btree_bounce_alloc(c, btree_buf_bytes(b), &used_mempool);
1270 	sorted->keys.u64s = 0;
1271 
1272 	b->nr = bch2_key_sort_fix_overlapping(c, &sorted->keys, iter);
1273 	memset((uint8_t *)(sorted + 1) + b->nr.live_u64s * sizeof(u64), 0,
1274 			btree_buf_bytes(b) -
1275 			sizeof(struct btree_node) -
1276 			b->nr.live_u64s * sizeof(u64));
1277 
1278 	b->data->keys.u64s = sorted->keys.u64s;
1279 	*sorted = *b->data;
1280 	swap(sorted, b->data);
1281 	set_btree_bset(b, b->set, &b->data->keys);
1282 	b->nsets = 1;
1283 	b->data->keys.journal_seq = cpu_to_le64(max_journal_seq);
1284 
1285 	BUG_ON(b->nr.live_u64s != le16_to_cpu(b->data->keys.u64s));
1286 
1287 	btree_bounce_free(c, btree_buf_bytes(b), used_mempool, sorted);
1288 
1289 	if (updated_range)
1290 		bch2_btree_node_drop_keys_outside_node(b);
1291 
1292 	i = &b->data->keys;
1293 	for (k = i->start; k != vstruct_last(i);) {
1294 		struct bkey tmp;
1295 		struct bkey_s u = __bkey_disassemble(b, k, &tmp);
1296 
1297 		ret = btree_node_bkey_val_validate(c, b, u.s_c, READ);
1298 		if (ret == -BCH_ERR_fsck_delete_bkey ||
1299 		    (static_branch_unlikely(&bch2_inject_invalid_keys) &&
1300 		     !bversion_cmp(u.k->bversion, MAX_VERSION))) {
1301 			btree_keys_account_key_drop(&b->nr, 0, k);
1302 
1303 			i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
1304 			memmove_u64s_down(k, bkey_p_next(k),
1305 					  (u64 *) vstruct_end(i) - (u64 *) k);
1306 			set_btree_bset_end(b, b->set);
1307 			set_btree_node_need_rewrite(b);
1308 			continue;
1309 		}
1310 		if (ret)
1311 			goto fsck_err;
1312 
1313 		if (u.k->type == KEY_TYPE_btree_ptr_v2) {
1314 			struct bkey_s_btree_ptr_v2 bp = bkey_s_to_btree_ptr_v2(u);
1315 
1316 			bp.v->mem_ptr = 0;
1317 		}
1318 
1319 		k = bkey_p_next(k);
1320 	}
1321 
1322 	bch2_bset_build_aux_tree(b, b->set, false);
1323 
1324 	set_needs_whiteout(btree_bset_first(b), true);
1325 
1326 	btree_node_reset_sib_u64s(b);
1327 
1328 	rcu_read_lock();
1329 	bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&b->key)), ptr) {
1330 		struct bch_dev *ca2 = bch2_dev_rcu(c, ptr->dev);
1331 
1332 		if (!ca2 || ca2->mi.state != BCH_MEMBER_STATE_rw)
1333 			set_btree_node_need_rewrite(b);
1334 	}
1335 	rcu_read_unlock();
1336 
1337 	if (!ptr_written)
1338 		set_btree_node_need_rewrite(b);
1339 fsck_err:
1340 	mempool_free(iter, &c->fill_iter);
1341 	printbuf_exit(&buf);
1342 	bch2_time_stats_update(&c->times[BCH_TIME_btree_node_read_done], start_time);
1343 	return ret;
1344 }
1345 
1346 static void btree_node_read_work(struct work_struct *work)
1347 {
1348 	struct btree_read_bio *rb =
1349 		container_of(work, struct btree_read_bio, work);
1350 	struct bch_fs *c	= rb->c;
1351 	struct bch_dev *ca	= rb->have_ioref ? bch2_dev_have_ref(c, rb->pick.ptr.dev) : NULL;
1352 	struct btree *b		= rb->b;
1353 	struct bio *bio		= &rb->bio;
1354 	struct bch_io_failures failed = { .nr = 0 };
1355 	int ret = 0;
1356 
1357 	struct printbuf buf = PRINTBUF;
1358 	bch2_log_msg_start(c, &buf);
1359 
1360 	prt_printf(&buf, "btree node read error at btree ");
1361 	bch2_btree_pos_to_text(&buf, c, b);
1362 	prt_newline(&buf);
1363 
1364 	goto start;
1365 	while (1) {
1366 		ret = bch2_bkey_pick_read_device(c,
1367 					bkey_i_to_s_c(&b->key),
1368 					&failed, &rb->pick, -1);
1369 		if (ret) {
1370 			set_btree_node_read_error(b);
1371 			break;
1372 		}
1373 
1374 		ca = bch2_dev_get_ioref(c, rb->pick.ptr.dev, READ, BCH_DEV_READ_REF_btree_node_read);
1375 		rb->have_ioref		= ca != NULL;
1376 		rb->start_time		= local_clock();
1377 		bio_reset(bio, NULL, REQ_OP_READ|REQ_SYNC|REQ_META);
1378 		bio->bi_iter.bi_sector	= rb->pick.ptr.offset;
1379 		bio->bi_iter.bi_size	= btree_buf_bytes(b);
1380 
1381 		if (rb->have_ioref) {
1382 			bio_set_dev(bio, ca->disk_sb.bdev);
1383 			submit_bio_wait(bio);
1384 		} else {
1385 			bio->bi_status = BLK_STS_REMOVED;
1386 		}
1387 
1388 		bch2_account_io_completion(ca, BCH_MEMBER_ERROR_read,
1389 					   rb->start_time, !bio->bi_status);
1390 start:
1391 		if (rb->have_ioref)
1392 			enumerated_ref_put(&ca->io_ref[READ], BCH_DEV_READ_REF_btree_node_read);
1393 		rb->have_ioref = false;
1394 
1395 		if (bio->bi_status) {
1396 			bch2_mark_io_failure(&failed, &rb->pick, false);
1397 			continue;
1398 		}
1399 
1400 		ret = bch2_btree_node_read_done(c, ca, b, &failed, &buf);
1401 		if (ret == -BCH_ERR_btree_node_read_err_want_retry ||
1402 		    ret == -BCH_ERR_btree_node_read_err_must_retry)
1403 			continue;
1404 
1405 		if (ret)
1406 			set_btree_node_read_error(b);
1407 
1408 		break;
1409 	}
1410 
1411 	bch2_io_failures_to_text(&buf, c, &failed);
1412 
1413 	if (btree_node_read_error(b))
1414 		bch2_btree_lost_data(c, &buf, b->c.btree_id);
1415 
1416 	/*
1417 	 * only print retry success if we read from a replica with no errors
1418 	 */
1419 	if (btree_node_read_error(b))
1420 		prt_printf(&buf, "ret %s", bch2_err_str(ret));
1421 	else if (failed.nr) {
1422 		if (!bch2_dev_io_failures(&failed, rb->pick.ptr.dev))
1423 			prt_printf(&buf, "retry success");
1424 		else
1425 			prt_printf(&buf, "repair success");
1426 	}
1427 
1428 	if ((failed.nr ||
1429 	     btree_node_need_rewrite(b)) &&
1430 	    !btree_node_read_error(b) &&
1431 	    c->recovery.curr_pass != BCH_RECOVERY_PASS_scan_for_btree_nodes) {
1432 		prt_printf(&buf, " (rewriting node)");
1433 		bch2_btree_node_rewrite_async(c, b);
1434 	}
1435 	prt_newline(&buf);
1436 
1437 	if (failed.nr)
1438 		bch2_print_str_ratelimited(c, KERN_ERR, buf.buf);
1439 
1440 	async_object_list_del(c, btree_read_bio, rb->list_idx);
1441 	bch2_time_stats_update(&c->times[BCH_TIME_btree_node_read],
1442 			       rb->start_time);
1443 	bio_put(&rb->bio);
1444 	printbuf_exit(&buf);
1445 	clear_btree_node_read_in_flight(b);
1446 	smp_mb__after_atomic();
1447 	wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
1448 }
1449 
1450 static void btree_node_read_endio(struct bio *bio)
1451 {
1452 	struct btree_read_bio *rb =
1453 		container_of(bio, struct btree_read_bio, bio);
1454 	struct bch_fs *c	= rb->c;
1455 	struct bch_dev *ca	= rb->have_ioref
1456 		? bch2_dev_have_ref(c, rb->pick.ptr.dev) : NULL;
1457 
1458 	bch2_account_io_completion(ca, BCH_MEMBER_ERROR_read,
1459 				   rb->start_time, !bio->bi_status);
1460 
1461 	queue_work(c->btree_read_complete_wq, &rb->work);
1462 }
1463 
1464 void bch2_btree_read_bio_to_text(struct printbuf *out, struct btree_read_bio *rbio)
1465 {
1466 	bch2_bio_to_text(out, &rbio->bio);
1467 }
1468 
1469 struct btree_node_read_all {
1470 	struct closure		cl;
1471 	struct bch_fs		*c;
1472 	struct btree		*b;
1473 	unsigned		nr;
1474 	void			*buf[BCH_REPLICAS_MAX];
1475 	struct bio		*bio[BCH_REPLICAS_MAX];
1476 	blk_status_t		err[BCH_REPLICAS_MAX];
1477 };
1478 
1479 static unsigned btree_node_sectors_written(struct bch_fs *c, void *data)
1480 {
1481 	struct btree_node *bn = data;
1482 	struct btree_node_entry *bne;
1483 	unsigned offset = 0;
1484 
1485 	if (le64_to_cpu(bn->magic) !=  bset_magic(c))
1486 		return 0;
1487 
1488 	while (offset < btree_sectors(c)) {
1489 		if (!offset) {
1490 			offset += vstruct_sectors(bn, c->block_bits);
1491 		} else {
1492 			bne = data + (offset << 9);
1493 			if (bne->keys.seq != bn->keys.seq)
1494 				break;
1495 			offset += vstruct_sectors(bne, c->block_bits);
1496 		}
1497 	}
1498 
1499 	return offset;
1500 }
1501 
1502 static bool btree_node_has_extra_bsets(struct bch_fs *c, unsigned offset, void *data)
1503 {
1504 	struct btree_node *bn = data;
1505 	struct btree_node_entry *bne;
1506 
1507 	if (!offset)
1508 		return false;
1509 
1510 	while (offset < btree_sectors(c)) {
1511 		bne = data + (offset << 9);
1512 		if (bne->keys.seq == bn->keys.seq)
1513 			return true;
1514 		offset++;
1515 	}
1516 
1517 	return false;
1518 	return offset;
1519 }
1520 
1521 static CLOSURE_CALLBACK(btree_node_read_all_replicas_done)
1522 {
1523 	closure_type(ra, struct btree_node_read_all, cl);
1524 	struct bch_fs *c = ra->c;
1525 	struct btree *b = ra->b;
1526 	struct printbuf buf = PRINTBUF;
1527 	bool dump_bset_maps = false;
1528 	int ret = 0, best = -1, write = READ;
1529 	unsigned i, written = 0, written2 = 0;
1530 	__le64 seq = b->key.k.type == KEY_TYPE_btree_ptr_v2
1531 		? bkey_i_to_btree_ptr_v2(&b->key)->v.seq : 0;
1532 	bool _saw_error = false, *saw_error = &_saw_error;
1533 	struct printbuf *err_msg = NULL;
1534 	struct bch_io_failures *failed = NULL;
1535 
1536 	for (i = 0; i < ra->nr; i++) {
1537 		struct btree_node *bn = ra->buf[i];
1538 
1539 		if (ra->err[i])
1540 			continue;
1541 
1542 		if (le64_to_cpu(bn->magic) != bset_magic(c) ||
1543 		    (seq && seq != bn->keys.seq))
1544 			continue;
1545 
1546 		if (best < 0) {
1547 			best = i;
1548 			written = btree_node_sectors_written(c, bn);
1549 			continue;
1550 		}
1551 
1552 		written2 = btree_node_sectors_written(c, ra->buf[i]);
1553 		if (btree_err_on(written2 != written, -BCH_ERR_btree_node_read_err_fixable,
1554 				 c, NULL, b, NULL, NULL,
1555 				 btree_node_replicas_sectors_written_mismatch,
1556 				 "btree node sectors written mismatch: %u != %u",
1557 				 written, written2) ||
1558 		    btree_err_on(btree_node_has_extra_bsets(c, written2, ra->buf[i]),
1559 				 -BCH_ERR_btree_node_read_err_fixable,
1560 				 c, NULL, b, NULL, NULL,
1561 				 btree_node_bset_after_end,
1562 				 "found bset signature after last bset") ||
1563 		    btree_err_on(memcmp(ra->buf[best], ra->buf[i], written << 9),
1564 				 -BCH_ERR_btree_node_read_err_fixable,
1565 				 c, NULL, b, NULL, NULL,
1566 				 btree_node_replicas_data_mismatch,
1567 				 "btree node replicas content mismatch"))
1568 			dump_bset_maps = true;
1569 
1570 		if (written2 > written) {
1571 			written = written2;
1572 			best = i;
1573 		}
1574 	}
1575 fsck_err:
1576 	if (dump_bset_maps) {
1577 		for (i = 0; i < ra->nr; i++) {
1578 			struct btree_node *bn = ra->buf[i];
1579 			struct btree_node_entry *bne = NULL;
1580 			unsigned offset = 0, sectors;
1581 			bool gap = false;
1582 
1583 			if (ra->err[i])
1584 				continue;
1585 
1586 			printbuf_reset(&buf);
1587 
1588 			while (offset < btree_sectors(c)) {
1589 				if (!offset) {
1590 					sectors = vstruct_sectors(bn, c->block_bits);
1591 				} else {
1592 					bne = ra->buf[i] + (offset << 9);
1593 					if (bne->keys.seq != bn->keys.seq)
1594 						break;
1595 					sectors = vstruct_sectors(bne, c->block_bits);
1596 				}
1597 
1598 				prt_printf(&buf, " %u-%u", offset, offset + sectors);
1599 				if (bne && bch2_journal_seq_is_blacklisted(c,
1600 							le64_to_cpu(bne->keys.journal_seq), false))
1601 					prt_printf(&buf, "*");
1602 				offset += sectors;
1603 			}
1604 
1605 			while (offset < btree_sectors(c)) {
1606 				bne = ra->buf[i] + (offset << 9);
1607 				if (bne->keys.seq == bn->keys.seq) {
1608 					if (!gap)
1609 						prt_printf(&buf, " GAP");
1610 					gap = true;
1611 
1612 					sectors = vstruct_sectors(bne, c->block_bits);
1613 					prt_printf(&buf, " %u-%u", offset, offset + sectors);
1614 					if (bch2_journal_seq_is_blacklisted(c,
1615 							le64_to_cpu(bne->keys.journal_seq), false))
1616 						prt_printf(&buf, "*");
1617 				}
1618 				offset++;
1619 			}
1620 
1621 			bch_err(c, "replica %u:%s", i, buf.buf);
1622 		}
1623 	}
1624 
1625 	if (best >= 0) {
1626 		memcpy(b->data, ra->buf[best], btree_buf_bytes(b));
1627 		ret = bch2_btree_node_read_done(c, NULL, b, NULL, NULL);
1628 	} else {
1629 		ret = -1;
1630 	}
1631 
1632 	if (ret) {
1633 		set_btree_node_read_error(b);
1634 
1635 		struct printbuf buf = PRINTBUF;
1636 		bch2_btree_lost_data(c, &buf, b->c.btree_id);
1637 		if (buf.pos)
1638 			bch_err(c, "%s", buf.buf);
1639 		printbuf_exit(&buf);
1640 	} else if (*saw_error)
1641 		bch2_btree_node_rewrite_async(c, b);
1642 
1643 	for (i = 0; i < ra->nr; i++) {
1644 		mempool_free(ra->buf[i], &c->btree_bounce_pool);
1645 		bio_put(ra->bio[i]);
1646 	}
1647 
1648 	closure_debug_destroy(&ra->cl);
1649 	kfree(ra);
1650 	printbuf_exit(&buf);
1651 
1652 	clear_btree_node_read_in_flight(b);
1653 	smp_mb__after_atomic();
1654 	wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
1655 }
1656 
1657 static void btree_node_read_all_replicas_endio(struct bio *bio)
1658 {
1659 	struct btree_read_bio *rb =
1660 		container_of(bio, struct btree_read_bio, bio);
1661 	struct bch_fs *c	= rb->c;
1662 	struct btree_node_read_all *ra = rb->ra;
1663 
1664 	if (rb->have_ioref) {
1665 		struct bch_dev *ca = bch2_dev_have_ref(c, rb->pick.ptr.dev);
1666 
1667 		bch2_latency_acct(ca, rb->start_time, READ);
1668 		enumerated_ref_put(&ca->io_ref[READ],
1669 			BCH_DEV_READ_REF_btree_node_read_all_replicas);
1670 	}
1671 
1672 	ra->err[rb->idx] = bio->bi_status;
1673 	closure_put(&ra->cl);
1674 }
1675 
1676 /*
1677  * XXX This allocates multiple times from the same mempools, and can deadlock
1678  * under sufficient memory pressure (but is only a debug path)
1679  */
1680 static int btree_node_read_all_replicas(struct bch_fs *c, struct btree *b, bool sync)
1681 {
1682 	struct bkey_s_c k = bkey_i_to_s_c(&b->key);
1683 	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1684 	const union bch_extent_entry *entry;
1685 	struct extent_ptr_decoded pick;
1686 	struct btree_node_read_all *ra;
1687 	unsigned i;
1688 
1689 	ra = kzalloc(sizeof(*ra), GFP_NOFS);
1690 	if (!ra)
1691 		return -BCH_ERR_ENOMEM_btree_node_read_all_replicas;
1692 
1693 	closure_init(&ra->cl, NULL);
1694 	ra->c	= c;
1695 	ra->b	= b;
1696 	ra->nr	= bch2_bkey_nr_ptrs(k);
1697 
1698 	for (i = 0; i < ra->nr; i++) {
1699 		ra->buf[i] = mempool_alloc(&c->btree_bounce_pool, GFP_NOFS);
1700 		ra->bio[i] = bio_alloc_bioset(NULL,
1701 					      buf_pages(ra->buf[i], btree_buf_bytes(b)),
1702 					      REQ_OP_READ|REQ_SYNC|REQ_META,
1703 					      GFP_NOFS,
1704 					      &c->btree_bio);
1705 	}
1706 
1707 	i = 0;
1708 	bkey_for_each_ptr_decode(k.k, ptrs, pick, entry) {
1709 		struct bch_dev *ca = bch2_dev_get_ioref(c, pick.ptr.dev, READ,
1710 					BCH_DEV_READ_REF_btree_node_read_all_replicas);
1711 		struct btree_read_bio *rb =
1712 			container_of(ra->bio[i], struct btree_read_bio, bio);
1713 		rb->c			= c;
1714 		rb->b			= b;
1715 		rb->ra			= ra;
1716 		rb->start_time		= local_clock();
1717 		rb->have_ioref		= ca != NULL;
1718 		rb->idx			= i;
1719 		rb->pick		= pick;
1720 		rb->bio.bi_iter.bi_sector = pick.ptr.offset;
1721 		rb->bio.bi_end_io	= btree_node_read_all_replicas_endio;
1722 		bch2_bio_map(&rb->bio, ra->buf[i], btree_buf_bytes(b));
1723 
1724 		if (rb->have_ioref) {
1725 			this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_btree],
1726 				     bio_sectors(&rb->bio));
1727 			bio_set_dev(&rb->bio, ca->disk_sb.bdev);
1728 
1729 			closure_get(&ra->cl);
1730 			submit_bio(&rb->bio);
1731 		} else {
1732 			ra->err[i] = BLK_STS_REMOVED;
1733 		}
1734 
1735 		i++;
1736 	}
1737 
1738 	if (sync) {
1739 		closure_sync(&ra->cl);
1740 		btree_node_read_all_replicas_done(&ra->cl.work);
1741 	} else {
1742 		continue_at(&ra->cl, btree_node_read_all_replicas_done,
1743 			    c->btree_read_complete_wq);
1744 	}
1745 
1746 	return 0;
1747 }
1748 
1749 void bch2_btree_node_read(struct btree_trans *trans, struct btree *b,
1750 			  bool sync)
1751 {
1752 	struct bch_fs *c = trans->c;
1753 	struct extent_ptr_decoded pick;
1754 	struct btree_read_bio *rb;
1755 	struct bch_dev *ca;
1756 	struct bio *bio;
1757 	int ret;
1758 
1759 	trace_and_count(c, btree_node_read, trans, b);
1760 
1761 	if (static_branch_unlikely(&bch2_verify_all_btree_replicas) &&
1762 	    !btree_node_read_all_replicas(c, b, sync))
1763 		return;
1764 
1765 	ret = bch2_bkey_pick_read_device(c, bkey_i_to_s_c(&b->key),
1766 					 NULL, &pick, -1);
1767 
1768 	if (ret <= 0) {
1769 		bool ratelimit = true;
1770 		struct printbuf buf = PRINTBUF;
1771 		bch2_log_msg_start(c, &buf);
1772 
1773 		prt_str(&buf, "btree node read error: no device to read from\n at ");
1774 		bch2_btree_pos_to_text(&buf, c, b);
1775 		prt_newline(&buf);
1776 		bch2_btree_lost_data(c, &buf, b->c.btree_id);
1777 
1778 		if (c->recovery.passes_complete & BIT_ULL(BCH_RECOVERY_PASS_check_topology) &&
1779 		    bch2_fs_emergency_read_only2(c, &buf))
1780 			ratelimit = false;
1781 
1782 		static DEFINE_RATELIMIT_STATE(rs,
1783 					      DEFAULT_RATELIMIT_INTERVAL,
1784 					      DEFAULT_RATELIMIT_BURST);
1785 		if (!ratelimit || __ratelimit(&rs))
1786 			bch2_print_str(c, KERN_ERR, buf.buf);
1787 		printbuf_exit(&buf);
1788 
1789 		set_btree_node_read_error(b);
1790 		clear_btree_node_read_in_flight(b);
1791 		smp_mb__after_atomic();
1792 		wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
1793 		return;
1794 	}
1795 
1796 	ca = bch2_dev_get_ioref(c, pick.ptr.dev, READ, BCH_DEV_READ_REF_btree_node_read);
1797 
1798 	bio = bio_alloc_bioset(NULL,
1799 			       buf_pages(b->data, btree_buf_bytes(b)),
1800 			       REQ_OP_READ|REQ_SYNC|REQ_META,
1801 			       GFP_NOFS,
1802 			       &c->btree_bio);
1803 	rb = container_of(bio, struct btree_read_bio, bio);
1804 	rb->c			= c;
1805 	rb->b			= b;
1806 	rb->ra			= NULL;
1807 	rb->start_time		= local_clock();
1808 	rb->have_ioref		= ca != NULL;
1809 	rb->pick		= pick;
1810 	INIT_WORK(&rb->work, btree_node_read_work);
1811 	bio->bi_iter.bi_sector	= pick.ptr.offset;
1812 	bio->bi_end_io		= btree_node_read_endio;
1813 	bch2_bio_map(bio, b->data, btree_buf_bytes(b));
1814 
1815 	async_object_list_add(c, btree_read_bio, rb, &rb->list_idx);
1816 
1817 	if (rb->have_ioref) {
1818 		this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_btree],
1819 			     bio_sectors(bio));
1820 		bio_set_dev(bio, ca->disk_sb.bdev);
1821 
1822 		if (sync) {
1823 			submit_bio_wait(bio);
1824 			bch2_latency_acct(ca, rb->start_time, READ);
1825 			btree_node_read_work(&rb->work);
1826 		} else {
1827 			submit_bio(bio);
1828 		}
1829 	} else {
1830 		bio->bi_status = BLK_STS_REMOVED;
1831 
1832 		if (sync)
1833 			btree_node_read_work(&rb->work);
1834 		else
1835 			queue_work(c->btree_read_complete_wq, &rb->work);
1836 	}
1837 }
1838 
1839 static int __bch2_btree_root_read(struct btree_trans *trans, enum btree_id id,
1840 				  const struct bkey_i *k, unsigned level)
1841 {
1842 	struct bch_fs *c = trans->c;
1843 	struct closure cl;
1844 	struct btree *b;
1845 	int ret;
1846 
1847 	closure_init_stack(&cl);
1848 
1849 	do {
1850 		ret = bch2_btree_cache_cannibalize_lock(trans, &cl);
1851 		closure_sync(&cl);
1852 	} while (ret);
1853 
1854 	b = bch2_btree_node_mem_alloc(trans, level != 0);
1855 	bch2_btree_cache_cannibalize_unlock(trans);
1856 
1857 	BUG_ON(IS_ERR(b));
1858 
1859 	bkey_copy(&b->key, k);
1860 	BUG_ON(bch2_btree_node_hash_insert(&c->btree_cache, b, level, id));
1861 
1862 	set_btree_node_read_in_flight(b);
1863 
1864 	/* we can't pass the trans to read_done() for fsck errors, so it must be unlocked */
1865 	bch2_trans_unlock(trans);
1866 	bch2_btree_node_read(trans, b, true);
1867 
1868 	if (btree_node_read_error(b)) {
1869 		mutex_lock(&c->btree_cache.lock);
1870 		bch2_btree_node_hash_remove(&c->btree_cache, b);
1871 		mutex_unlock(&c->btree_cache.lock);
1872 
1873 		ret = -BCH_ERR_btree_node_read_error;
1874 		goto err;
1875 	}
1876 
1877 	bch2_btree_set_root_for_read(c, b);
1878 err:
1879 	six_unlock_write(&b->c.lock);
1880 	six_unlock_intent(&b->c.lock);
1881 
1882 	return ret;
1883 }
1884 
1885 int bch2_btree_root_read(struct bch_fs *c, enum btree_id id,
1886 			const struct bkey_i *k, unsigned level)
1887 {
1888 	return bch2_trans_run(c, __bch2_btree_root_read(trans, id, k, level));
1889 }
1890 
1891 struct btree_node_scrub {
1892 	struct bch_fs		*c;
1893 	struct bch_dev		*ca;
1894 	void			*buf;
1895 	bool			used_mempool;
1896 	unsigned		written;
1897 
1898 	enum btree_id		btree;
1899 	unsigned		level;
1900 	struct bkey_buf		key;
1901 	__le64			seq;
1902 
1903 	struct work_struct	work;
1904 	struct bio		bio;
1905 };
1906 
1907 static bool btree_node_scrub_check(struct bch_fs *c, struct btree_node *data, unsigned ptr_written,
1908 				   struct printbuf *err)
1909 {
1910 	unsigned written = 0;
1911 
1912 	if (le64_to_cpu(data->magic) != bset_magic(c)) {
1913 		prt_printf(err, "bad magic: want %llx, got %llx",
1914 			   bset_magic(c), le64_to_cpu(data->magic));
1915 		return false;
1916 	}
1917 
1918 	while (written < (ptr_written ?: btree_sectors(c))) {
1919 		struct btree_node_entry *bne;
1920 		struct bset *i;
1921 		bool first = !written;
1922 
1923 		if (first) {
1924 			bne = NULL;
1925 			i = &data->keys;
1926 		} else {
1927 			bne = (void *) data + (written << 9);
1928 			i = &bne->keys;
1929 
1930 			if (!ptr_written && i->seq != data->keys.seq)
1931 				break;
1932 		}
1933 
1934 		struct nonce nonce = btree_nonce(i, written << 9);
1935 		bool good_csum_type = bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i));
1936 
1937 		if (first) {
1938 			if (good_csum_type) {
1939 				struct bch_csum csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, data);
1940 				if (bch2_crc_cmp(data->csum, csum)) {
1941 					bch2_csum_err_msg(err, BSET_CSUM_TYPE(i), data->csum, csum);
1942 					return false;
1943 				}
1944 			}
1945 
1946 			written += vstruct_sectors(data, c->block_bits);
1947 		} else {
1948 			if (good_csum_type) {
1949 				struct bch_csum csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
1950 				if (bch2_crc_cmp(bne->csum, csum)) {
1951 					bch2_csum_err_msg(err, BSET_CSUM_TYPE(i), bne->csum, csum);
1952 					return false;
1953 				}
1954 			}
1955 
1956 			written += vstruct_sectors(bne, c->block_bits);
1957 		}
1958 	}
1959 
1960 	return true;
1961 }
1962 
1963 static void btree_node_scrub_work(struct work_struct *work)
1964 {
1965 	struct btree_node_scrub *scrub = container_of(work, struct btree_node_scrub, work);
1966 	struct bch_fs *c = scrub->c;
1967 	struct printbuf err = PRINTBUF;
1968 
1969 	__bch2_btree_pos_to_text(&err, c, scrub->btree, scrub->level,
1970 				 bkey_i_to_s_c(scrub->key.k));
1971 	prt_newline(&err);
1972 
1973 	if (!btree_node_scrub_check(c, scrub->buf, scrub->written, &err)) {
1974 		struct btree_trans *trans = bch2_trans_get(c);
1975 
1976 		struct btree_iter iter;
1977 		bch2_trans_node_iter_init(trans, &iter, scrub->btree,
1978 					  scrub->key.k->k.p, 0, scrub->level - 1, 0);
1979 
1980 		struct btree *b;
1981 		int ret = lockrestart_do(trans,
1982 			PTR_ERR_OR_ZERO(b = bch2_btree_iter_peek_node(trans, &iter)));
1983 		if (ret)
1984 			goto err;
1985 
1986 		if (bkey_i_to_btree_ptr_v2(&b->key)->v.seq == scrub->seq) {
1987 			bch_err(c, "error validating btree node during scrub on %s at btree %s",
1988 				scrub->ca->name, err.buf);
1989 
1990 			ret = bch2_btree_node_rewrite(trans, &iter, b, 0, 0);
1991 		}
1992 err:
1993 		bch2_trans_iter_exit(trans, &iter);
1994 		bch2_trans_begin(trans);
1995 		bch2_trans_put(trans);
1996 	}
1997 
1998 	printbuf_exit(&err);
1999 	bch2_bkey_buf_exit(&scrub->key, c);;
2000 	btree_bounce_free(c, c->opts.btree_node_size, scrub->used_mempool, scrub->buf);
2001 	enumerated_ref_put(&scrub->ca->io_ref[READ], BCH_DEV_READ_REF_btree_node_scrub);
2002 	kfree(scrub);
2003 	enumerated_ref_put(&c->writes, BCH_WRITE_REF_btree_node_scrub);
2004 }
2005 
2006 static void btree_node_scrub_endio(struct bio *bio)
2007 {
2008 	struct btree_node_scrub *scrub = container_of(bio, struct btree_node_scrub, bio);
2009 
2010 	queue_work(scrub->c->btree_read_complete_wq, &scrub->work);
2011 }
2012 
2013 int bch2_btree_node_scrub(struct btree_trans *trans,
2014 			  enum btree_id btree, unsigned level,
2015 			  struct bkey_s_c k, unsigned dev)
2016 {
2017 	if (k.k->type != KEY_TYPE_btree_ptr_v2)
2018 		return 0;
2019 
2020 	struct bch_fs *c = trans->c;
2021 
2022 	if (!enumerated_ref_tryget(&c->writes, BCH_WRITE_REF_btree_node_scrub))
2023 		return -BCH_ERR_erofs_no_writes;
2024 
2025 	struct extent_ptr_decoded pick;
2026 	int ret = bch2_bkey_pick_read_device(c, k, NULL, &pick, dev);
2027 	if (ret <= 0)
2028 		goto err;
2029 
2030 	struct bch_dev *ca = bch2_dev_get_ioref(c, pick.ptr.dev, READ,
2031 						BCH_DEV_READ_REF_btree_node_scrub);
2032 	if (!ca) {
2033 		ret = -BCH_ERR_device_offline;
2034 		goto err;
2035 	}
2036 
2037 	bool used_mempool = false;
2038 	void *buf = btree_bounce_alloc(c, c->opts.btree_node_size, &used_mempool);
2039 
2040 	unsigned vecs = buf_pages(buf, c->opts.btree_node_size);
2041 
2042 	struct btree_node_scrub *scrub =
2043 		kzalloc(sizeof(*scrub) + sizeof(struct bio_vec) * vecs, GFP_KERNEL);
2044 	if (!scrub) {
2045 		ret = -ENOMEM;
2046 		goto err_free;
2047 	}
2048 
2049 	scrub->c		= c;
2050 	scrub->ca		= ca;
2051 	scrub->buf		= buf;
2052 	scrub->used_mempool	= used_mempool;
2053 	scrub->written		= btree_ptr_sectors_written(k);
2054 
2055 	scrub->btree		= btree;
2056 	scrub->level		= level;
2057 	bch2_bkey_buf_init(&scrub->key);
2058 	bch2_bkey_buf_reassemble(&scrub->key, c, k);
2059 	scrub->seq		= bkey_s_c_to_btree_ptr_v2(k).v->seq;
2060 
2061 	INIT_WORK(&scrub->work, btree_node_scrub_work);
2062 
2063 	bio_init(&scrub->bio, ca->disk_sb.bdev, scrub->bio.bi_inline_vecs, vecs, REQ_OP_READ);
2064 	bch2_bio_map(&scrub->bio, scrub->buf, c->opts.btree_node_size);
2065 	scrub->bio.bi_iter.bi_sector	= pick.ptr.offset;
2066 	scrub->bio.bi_end_io		= btree_node_scrub_endio;
2067 	submit_bio(&scrub->bio);
2068 	return 0;
2069 err_free:
2070 	btree_bounce_free(c, c->opts.btree_node_size, used_mempool, buf);
2071 	enumerated_ref_put(&ca->io_ref[READ], BCH_DEV_READ_REF_btree_node_scrub);
2072 err:
2073 	enumerated_ref_put(&c->writes, BCH_WRITE_REF_btree_node_scrub);
2074 	return ret;
2075 }
2076 
2077 static void bch2_btree_complete_write(struct bch_fs *c, struct btree *b,
2078 				      struct btree_write *w)
2079 {
2080 	unsigned long old, new;
2081 
2082 	old = READ_ONCE(b->will_make_reachable);
2083 	do {
2084 		new = old;
2085 		if (!(old & 1))
2086 			break;
2087 
2088 		new &= ~1UL;
2089 	} while (!try_cmpxchg(&b->will_make_reachable, &old, new));
2090 
2091 	if (old & 1)
2092 		closure_put(&((struct btree_update *) new)->cl);
2093 
2094 	bch2_journal_pin_drop(&c->journal, &w->journal);
2095 }
2096 
2097 static void __btree_node_write_done(struct bch_fs *c, struct btree *b, u64 start_time)
2098 {
2099 	struct btree_write *w = btree_prev_write(b);
2100 	unsigned long old, new;
2101 	unsigned type = 0;
2102 
2103 	bch2_btree_complete_write(c, b, w);
2104 
2105 	if (start_time)
2106 		bch2_time_stats_update(&c->times[BCH_TIME_btree_node_write], start_time);
2107 
2108 	old = READ_ONCE(b->flags);
2109 	do {
2110 		new = old;
2111 
2112 		if ((old & (1U << BTREE_NODE_dirty)) &&
2113 		    (old & (1U << BTREE_NODE_need_write)) &&
2114 		    !(old & (1U << BTREE_NODE_never_write)) &&
2115 		    !(old & (1U << BTREE_NODE_write_blocked)) &&
2116 		    !(old & (1U << BTREE_NODE_will_make_reachable))) {
2117 			new &= ~(1U << BTREE_NODE_dirty);
2118 			new &= ~(1U << BTREE_NODE_need_write);
2119 			new |=  (1U << BTREE_NODE_write_in_flight);
2120 			new |=  (1U << BTREE_NODE_write_in_flight_inner);
2121 			new |=  (1U << BTREE_NODE_just_written);
2122 			new ^=  (1U << BTREE_NODE_write_idx);
2123 
2124 			type = new & BTREE_WRITE_TYPE_MASK;
2125 			new &= ~BTREE_WRITE_TYPE_MASK;
2126 		} else {
2127 			new &= ~(1U << BTREE_NODE_write_in_flight);
2128 			new &= ~(1U << BTREE_NODE_write_in_flight_inner);
2129 		}
2130 	} while (!try_cmpxchg(&b->flags, &old, new));
2131 
2132 	if (new & (1U << BTREE_NODE_write_in_flight))
2133 		__bch2_btree_node_write(c, b, BTREE_WRITE_ALREADY_STARTED|type);
2134 	else {
2135 		smp_mb__after_atomic();
2136 		wake_up_bit(&b->flags, BTREE_NODE_write_in_flight);
2137 	}
2138 }
2139 
2140 static void btree_node_write_done(struct bch_fs *c, struct btree *b, u64 start_time)
2141 {
2142 	struct btree_trans *trans = bch2_trans_get(c);
2143 
2144 	btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read);
2145 
2146 	/* we don't need transaction context anymore after we got the lock. */
2147 	bch2_trans_put(trans);
2148 	__btree_node_write_done(c, b, start_time);
2149 	six_unlock_read(&b->c.lock);
2150 }
2151 
2152 static void btree_node_write_work(struct work_struct *work)
2153 {
2154 	struct btree_write_bio *wbio =
2155 		container_of(work, struct btree_write_bio, work);
2156 	struct bch_fs *c	= wbio->wbio.c;
2157 	struct btree *b		= wbio->wbio.bio.bi_private;
2158 	u64 start_time		= wbio->start_time;
2159 	int ret = 0;
2160 
2161 	btree_bounce_free(c,
2162 		wbio->data_bytes,
2163 		wbio->wbio.used_mempool,
2164 		wbio->data);
2165 
2166 	bch2_bkey_drop_ptrs(bkey_i_to_s(&wbio->key), ptr,
2167 		bch2_dev_list_has_dev(wbio->wbio.failed, ptr->dev));
2168 
2169 	if (!bch2_bkey_nr_ptrs(bkey_i_to_s_c(&wbio->key))) {
2170 		ret = -BCH_ERR_btree_node_write_all_failed;
2171 		goto err;
2172 	}
2173 
2174 	if (wbio->wbio.first_btree_write) {
2175 		if (wbio->wbio.failed.nr) {
2176 
2177 		}
2178 	} else {
2179 		ret = bch2_trans_do(c,
2180 			bch2_btree_node_update_key_get_iter(trans, b, &wbio->key,
2181 					BCH_WATERMARK_interior_updates|
2182 					BCH_TRANS_COMMIT_journal_reclaim|
2183 					BCH_TRANS_COMMIT_no_enospc|
2184 					BCH_TRANS_COMMIT_no_check_rw,
2185 					!wbio->wbio.failed.nr));
2186 		if (ret)
2187 			goto err;
2188 	}
2189 out:
2190 	async_object_list_del(c, btree_write_bio, wbio->list_idx);
2191 	bio_put(&wbio->wbio.bio);
2192 	btree_node_write_done(c, b, start_time);
2193 	return;
2194 err:
2195 	set_btree_node_noevict(b);
2196 
2197 	if (!bch2_err_matches(ret, EROFS)) {
2198 		struct printbuf buf = PRINTBUF;
2199 		prt_printf(&buf, "writing btree node: %s\n  ", bch2_err_str(ret));
2200 		bch2_btree_pos_to_text(&buf, c, b);
2201 		bch2_fs_fatal_error(c, "%s", buf.buf);
2202 		printbuf_exit(&buf);
2203 	}
2204 	goto out;
2205 }
2206 
2207 static void btree_node_write_endio(struct bio *bio)
2208 {
2209 	struct bch_write_bio *wbio	= to_wbio(bio);
2210 	struct bch_write_bio *parent	= wbio->split ? wbio->parent : NULL;
2211 	struct bch_write_bio *orig	= parent ?: wbio;
2212 	struct btree_write_bio *wb	= container_of(orig, struct btree_write_bio, wbio);
2213 	struct bch_fs *c		= wbio->c;
2214 	struct btree *b			= wbio->bio.bi_private;
2215 	struct bch_dev *ca		= wbio->have_ioref ? bch2_dev_have_ref(c, wbio->dev) : NULL;
2216 
2217 	bch2_account_io_completion(ca, BCH_MEMBER_ERROR_write,
2218 				   wbio->submit_time, !bio->bi_status);
2219 
2220 	if (ca && bio->bi_status) {
2221 		struct printbuf buf = PRINTBUF;
2222 		buf.atomic++;
2223 		prt_printf(&buf, "btree write error: %s\n  ",
2224 			   bch2_blk_status_to_str(bio->bi_status));
2225 		bch2_btree_pos_to_text(&buf, c, b);
2226 		bch_err_dev_ratelimited(ca, "%s", buf.buf);
2227 		printbuf_exit(&buf);
2228 	}
2229 
2230 	if (bio->bi_status) {
2231 		unsigned long flags;
2232 		spin_lock_irqsave(&c->btree_write_error_lock, flags);
2233 		bch2_dev_list_add_dev(&orig->failed, wbio->dev);
2234 		spin_unlock_irqrestore(&c->btree_write_error_lock, flags);
2235 	}
2236 
2237 	/*
2238 	 * XXX: we should be using io_ref[WRITE], but we aren't retrying failed
2239 	 * btree writes yet (due to device removal/ro):
2240 	 */
2241 	if (wbio->have_ioref)
2242 		enumerated_ref_put(&ca->io_ref[READ],
2243 				   BCH_DEV_READ_REF_btree_node_write);
2244 
2245 	if (parent) {
2246 		bio_put(bio);
2247 		bio_endio(&parent->bio);
2248 		return;
2249 	}
2250 
2251 	clear_btree_node_write_in_flight_inner(b);
2252 	smp_mb__after_atomic();
2253 	wake_up_bit(&b->flags, BTREE_NODE_write_in_flight_inner);
2254 	INIT_WORK(&wb->work, btree_node_write_work);
2255 	queue_work(c->btree_write_complete_wq, &wb->work);
2256 }
2257 
2258 static int validate_bset_for_write(struct bch_fs *c, struct btree *b,
2259 				   struct bset *i, unsigned sectors)
2260 {
2261 	int ret = bch2_bkey_validate(c, bkey_i_to_s_c(&b->key),
2262 				     (struct bkey_validate_context) {
2263 					.from	= BKEY_VALIDATE_btree_node,
2264 					.level	= b->c.level + 1,
2265 					.btree	= b->c.btree_id,
2266 					.flags	= BCH_VALIDATE_write,
2267 				     });
2268 	if (ret) {
2269 		bch2_fs_inconsistent(c, "invalid btree node key before write");
2270 		return ret;
2271 	}
2272 
2273 	ret = validate_bset_keys(c, b, i, WRITE, NULL, NULL) ?:
2274 		validate_bset(c, NULL, b, i, b->written, sectors, WRITE, NULL, NULL);
2275 	if (ret) {
2276 		bch2_inconsistent_error(c);
2277 		dump_stack();
2278 	}
2279 
2280 	return ret;
2281 }
2282 
2283 static void btree_write_submit(struct work_struct *work)
2284 {
2285 	struct btree_write_bio *wbio = container_of(work, struct btree_write_bio, work);
2286 	BKEY_PADDED_ONSTACK(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp;
2287 
2288 	bkey_copy(&tmp.k, &wbio->key);
2289 
2290 	bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&tmp.k)), ptr)
2291 		ptr->offset += wbio->sector_offset;
2292 
2293 	bch2_submit_wbio_replicas(&wbio->wbio, wbio->wbio.c, BCH_DATA_btree,
2294 				  &tmp.k, false);
2295 }
2296 
2297 void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, unsigned flags)
2298 {
2299 	struct btree_write_bio *wbio;
2300 	struct bset *i;
2301 	struct btree_node *bn = NULL;
2302 	struct btree_node_entry *bne = NULL;
2303 	struct sort_iter_stack sort_iter;
2304 	struct nonce nonce;
2305 	unsigned bytes_to_write, sectors_to_write, bytes, u64s;
2306 	u64 seq = 0;
2307 	bool used_mempool;
2308 	unsigned long old, new;
2309 	bool validate_before_checksum = false;
2310 	enum btree_write_type type = flags & BTREE_WRITE_TYPE_MASK;
2311 	void *data;
2312 	u64 start_time = local_clock();
2313 	int ret;
2314 
2315 	if (flags & BTREE_WRITE_ALREADY_STARTED)
2316 		goto do_write;
2317 
2318 	/*
2319 	 * We may only have a read lock on the btree node - the dirty bit is our
2320 	 * "lock" against racing with other threads that may be trying to start
2321 	 * a write, we do a write iff we clear the dirty bit. Since setting the
2322 	 * dirty bit requires a write lock, we can't race with other threads
2323 	 * redirtying it:
2324 	 */
2325 	old = READ_ONCE(b->flags);
2326 	do {
2327 		new = old;
2328 
2329 		if (!(old & (1 << BTREE_NODE_dirty)))
2330 			return;
2331 
2332 		if ((flags & BTREE_WRITE_ONLY_IF_NEED) &&
2333 		    !(old & (1 << BTREE_NODE_need_write)))
2334 			return;
2335 
2336 		if (old &
2337 		    ((1 << BTREE_NODE_never_write)|
2338 		     (1 << BTREE_NODE_write_blocked)))
2339 			return;
2340 
2341 		if (b->written &&
2342 		    (old & (1 << BTREE_NODE_will_make_reachable)))
2343 			return;
2344 
2345 		if (old & (1 << BTREE_NODE_write_in_flight))
2346 			return;
2347 
2348 		if (flags & BTREE_WRITE_ONLY_IF_NEED)
2349 			type = new & BTREE_WRITE_TYPE_MASK;
2350 		new &= ~BTREE_WRITE_TYPE_MASK;
2351 
2352 		new &= ~(1 << BTREE_NODE_dirty);
2353 		new &= ~(1 << BTREE_NODE_need_write);
2354 		new |=  (1 << BTREE_NODE_write_in_flight);
2355 		new |=  (1 << BTREE_NODE_write_in_flight_inner);
2356 		new |=  (1 << BTREE_NODE_just_written);
2357 		new ^=  (1 << BTREE_NODE_write_idx);
2358 	} while (!try_cmpxchg_acquire(&b->flags, &old, new));
2359 
2360 	if (new & (1U << BTREE_NODE_need_write))
2361 		return;
2362 do_write:
2363 	BUG_ON((type == BTREE_WRITE_initial) != (b->written == 0));
2364 
2365 	atomic_long_dec(&c->btree_cache.nr_dirty);
2366 
2367 	BUG_ON(btree_node_fake(b));
2368 	BUG_ON((b->will_make_reachable != 0) != !b->written);
2369 
2370 	BUG_ON(b->written >= btree_sectors(c));
2371 	BUG_ON(b->written & (block_sectors(c) - 1));
2372 	BUG_ON(bset_written(b, btree_bset_last(b)));
2373 	BUG_ON(le64_to_cpu(b->data->magic) != bset_magic(c));
2374 	BUG_ON(memcmp(&b->data->format, &b->format, sizeof(b->format)));
2375 
2376 	bch2_sort_whiteouts(c, b);
2377 
2378 	sort_iter_stack_init(&sort_iter, b);
2379 
2380 	bytes = !b->written
2381 		? sizeof(struct btree_node)
2382 		: sizeof(struct btree_node_entry);
2383 
2384 	bytes += b->whiteout_u64s * sizeof(u64);
2385 
2386 	for_each_bset(b, t) {
2387 		i = bset(b, t);
2388 
2389 		if (bset_written(b, i))
2390 			continue;
2391 
2392 		bytes += le16_to_cpu(i->u64s) * sizeof(u64);
2393 		sort_iter_add(&sort_iter.iter,
2394 			      btree_bkey_first(b, t),
2395 			      btree_bkey_last(b, t));
2396 		seq = max(seq, le64_to_cpu(i->journal_seq));
2397 	}
2398 
2399 	BUG_ON(b->written && !seq);
2400 
2401 	/* bch2_varint_decode may read up to 7 bytes past the end of the buffer: */
2402 	bytes += 8;
2403 
2404 	/* buffer must be a multiple of the block size */
2405 	bytes = round_up(bytes, block_bytes(c));
2406 
2407 	data = btree_bounce_alloc(c, bytes, &used_mempool);
2408 
2409 	if (!b->written) {
2410 		bn = data;
2411 		*bn = *b->data;
2412 		i = &bn->keys;
2413 	} else {
2414 		bne = data;
2415 		bne->keys = b->data->keys;
2416 		i = &bne->keys;
2417 	}
2418 
2419 	i->journal_seq	= cpu_to_le64(seq);
2420 	i->u64s		= 0;
2421 
2422 	sort_iter_add(&sort_iter.iter,
2423 		      unwritten_whiteouts_start(b),
2424 		      unwritten_whiteouts_end(b));
2425 	SET_BSET_SEPARATE_WHITEOUTS(i, false);
2426 
2427 	u64s = bch2_sort_keys_keep_unwritten_whiteouts(i->start, &sort_iter.iter);
2428 	le16_add_cpu(&i->u64s, u64s);
2429 
2430 	b->whiteout_u64s = 0;
2431 
2432 	BUG_ON(!b->written && i->u64s != b->data->keys.u64s);
2433 
2434 	set_needs_whiteout(i, false);
2435 
2436 	/* do we have data to write? */
2437 	if (b->written && !i->u64s)
2438 		goto nowrite;
2439 
2440 	bytes_to_write = vstruct_end(i) - data;
2441 	sectors_to_write = round_up(bytes_to_write, block_bytes(c)) >> 9;
2442 
2443 	if (!b->written &&
2444 	    b->key.k.type == KEY_TYPE_btree_ptr_v2)
2445 		BUG_ON(btree_ptr_sectors_written(bkey_i_to_s_c(&b->key)) != sectors_to_write);
2446 
2447 	memset(data + bytes_to_write, 0,
2448 	       (sectors_to_write << 9) - bytes_to_write);
2449 
2450 	BUG_ON(b->written + sectors_to_write > btree_sectors(c));
2451 	BUG_ON(BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN);
2452 	BUG_ON(i->seq != b->data->keys.seq);
2453 
2454 	i->version = cpu_to_le16(c->sb.version);
2455 	SET_BSET_OFFSET(i, b->written);
2456 	SET_BSET_CSUM_TYPE(i, bch2_meta_checksum_type(c));
2457 
2458 	if (bch2_csum_type_is_encryption(BSET_CSUM_TYPE(i)))
2459 		validate_before_checksum = true;
2460 
2461 	/* validate_bset will be modifying: */
2462 	if (le16_to_cpu(i->version) < bcachefs_metadata_version_current)
2463 		validate_before_checksum = true;
2464 
2465 	/* if we're going to be encrypting, check metadata validity first: */
2466 	if (validate_before_checksum &&
2467 	    validate_bset_for_write(c, b, i, sectors_to_write))
2468 		goto err;
2469 
2470 	ret = bset_encrypt(c, i, b->written << 9);
2471 	if (bch2_fs_fatal_err_on(ret, c,
2472 			"encrypting btree node: %s", bch2_err_str(ret)))
2473 		goto err;
2474 
2475 	nonce = btree_nonce(i, b->written << 9);
2476 
2477 	if (bn)
2478 		bn->csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bn);
2479 	else
2480 		bne->csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
2481 
2482 	/* if we're not encrypting, check metadata after checksumming: */
2483 	if (!validate_before_checksum &&
2484 	    validate_bset_for_write(c, b, i, sectors_to_write))
2485 		goto err;
2486 
2487 	/*
2488 	 * We handle btree write errors by immediately halting the journal -
2489 	 * after we've done that, we can't issue any subsequent btree writes
2490 	 * because they might have pointers to new nodes that failed to write.
2491 	 *
2492 	 * Furthermore, there's no point in doing any more btree writes because
2493 	 * with the journal stopped, we're never going to update the journal to
2494 	 * reflect that those writes were done and the data flushed from the
2495 	 * journal:
2496 	 *
2497 	 * Also on journal error, the pending write may have updates that were
2498 	 * never journalled (interior nodes, see btree_update_nodes_written()) -
2499 	 * it's critical that we don't do the write in that case otherwise we
2500 	 * will have updates visible that weren't in the journal:
2501 	 *
2502 	 * Make sure to update b->written so bch2_btree_init_next() doesn't
2503 	 * break:
2504 	 */
2505 	if (bch2_journal_error(&c->journal) ||
2506 	    c->opts.nochanges)
2507 		goto err;
2508 
2509 	trace_and_count(c, btree_node_write, b, bytes_to_write, sectors_to_write);
2510 
2511 	wbio = container_of(bio_alloc_bioset(NULL,
2512 				buf_pages(data, sectors_to_write << 9),
2513 				REQ_OP_WRITE|REQ_META,
2514 				GFP_NOFS,
2515 				&c->btree_bio),
2516 			    struct btree_write_bio, wbio.bio);
2517 	wbio_init(&wbio->wbio.bio);
2518 	wbio->data			= data;
2519 	wbio->data_bytes		= bytes;
2520 	wbio->sector_offset		= b->written;
2521 	wbio->start_time		= start_time;
2522 	wbio->wbio.c			= c;
2523 	wbio->wbio.used_mempool		= used_mempool;
2524 	wbio->wbio.first_btree_write	= !b->written;
2525 	wbio->wbio.bio.bi_end_io	= btree_node_write_endio;
2526 	wbio->wbio.bio.bi_private	= b;
2527 
2528 	bch2_bio_map(&wbio->wbio.bio, data, sectors_to_write << 9);
2529 
2530 	bkey_copy(&wbio->key, &b->key);
2531 
2532 	b->written += sectors_to_write;
2533 
2534 	if (wbio->key.k.type == KEY_TYPE_btree_ptr_v2)
2535 		bkey_i_to_btree_ptr_v2(&wbio->key)->v.sectors_written =
2536 			cpu_to_le16(b->written);
2537 
2538 	atomic64_inc(&c->btree_write_stats[type].nr);
2539 	atomic64_add(bytes_to_write, &c->btree_write_stats[type].bytes);
2540 
2541 	async_object_list_add(c, btree_write_bio, wbio, &wbio->list_idx);
2542 
2543 	INIT_WORK(&wbio->work, btree_write_submit);
2544 	queue_work(c->btree_write_submit_wq, &wbio->work);
2545 	return;
2546 err:
2547 	set_btree_node_noevict(b);
2548 	b->written += sectors_to_write;
2549 nowrite:
2550 	btree_bounce_free(c, bytes, used_mempool, data);
2551 	__btree_node_write_done(c, b, 0);
2552 }
2553 
2554 /*
2555  * Work that must be done with write lock held:
2556  */
2557 bool bch2_btree_post_write_cleanup(struct bch_fs *c, struct btree *b)
2558 {
2559 	bool invalidated_iter = false;
2560 	struct btree_node_entry *bne;
2561 
2562 	if (!btree_node_just_written(b))
2563 		return false;
2564 
2565 	BUG_ON(b->whiteout_u64s);
2566 
2567 	clear_btree_node_just_written(b);
2568 
2569 	/*
2570 	 * Note: immediately after write, bset_written() doesn't work - the
2571 	 * amount of data we had to write after compaction might have been
2572 	 * smaller than the offset of the last bset.
2573 	 *
2574 	 * However, we know that all bsets have been written here, as long as
2575 	 * we're still holding the write lock:
2576 	 */
2577 
2578 	/*
2579 	 * XXX: decide if we really want to unconditionally sort down to a
2580 	 * single bset:
2581 	 */
2582 	if (b->nsets > 1) {
2583 		btree_node_sort(c, b, 0, b->nsets);
2584 		invalidated_iter = true;
2585 	} else {
2586 		invalidated_iter = bch2_drop_whiteouts(b, COMPACT_ALL);
2587 	}
2588 
2589 	for_each_bset(b, t)
2590 		set_needs_whiteout(bset(b, t), true);
2591 
2592 	bch2_btree_verify(c, b);
2593 
2594 	/*
2595 	 * If later we don't unconditionally sort down to a single bset, we have
2596 	 * to ensure this is still true:
2597 	 */
2598 	BUG_ON((void *) btree_bkey_last(b, bset_tree_last(b)) > write_block(b));
2599 
2600 	bne = want_new_bset(c, b);
2601 	if (bne)
2602 		bch2_bset_init_next(b, bne);
2603 
2604 	bch2_btree_build_aux_trees(b);
2605 
2606 	return invalidated_iter;
2607 }
2608 
2609 /*
2610  * Use this one if the node is intent locked:
2611  */
2612 void bch2_btree_node_write(struct bch_fs *c, struct btree *b,
2613 			   enum six_lock_type lock_type_held,
2614 			   unsigned flags)
2615 {
2616 	if (lock_type_held == SIX_LOCK_intent ||
2617 	    (lock_type_held == SIX_LOCK_read &&
2618 	     six_lock_tryupgrade(&b->c.lock))) {
2619 		__bch2_btree_node_write(c, b, flags);
2620 
2621 		/* don't cycle lock unnecessarily: */
2622 		if (btree_node_just_written(b) &&
2623 		    six_trylock_write(&b->c.lock)) {
2624 			bch2_btree_post_write_cleanup(c, b);
2625 			six_unlock_write(&b->c.lock);
2626 		}
2627 
2628 		if (lock_type_held == SIX_LOCK_read)
2629 			six_lock_downgrade(&b->c.lock);
2630 	} else {
2631 		__bch2_btree_node_write(c, b, flags);
2632 		if (lock_type_held == SIX_LOCK_write &&
2633 		    btree_node_just_written(b))
2634 			bch2_btree_post_write_cleanup(c, b);
2635 	}
2636 }
2637 
2638 void bch2_btree_node_write_trans(struct btree_trans *trans, struct btree *b,
2639 				 enum six_lock_type lock_type_held,
2640 				 unsigned flags)
2641 {
2642 	struct bch_fs *c = trans->c;
2643 
2644 	if (lock_type_held == SIX_LOCK_intent ||
2645 	    (lock_type_held == SIX_LOCK_read &&
2646 	     six_lock_tryupgrade(&b->c.lock))) {
2647 		__bch2_btree_node_write(c, b, flags);
2648 
2649 		/* don't cycle lock unnecessarily: */
2650 		if (btree_node_just_written(b) &&
2651 		    six_trylock_write(&b->c.lock)) {
2652 			bch2_btree_post_write_cleanup(c, b);
2653 			__bch2_btree_node_unlock_write(trans, b);
2654 		}
2655 
2656 		if (lock_type_held == SIX_LOCK_read)
2657 			six_lock_downgrade(&b->c.lock);
2658 	} else {
2659 		__bch2_btree_node_write(c, b, flags);
2660 		if (lock_type_held == SIX_LOCK_write &&
2661 		    btree_node_just_written(b))
2662 			bch2_btree_post_write_cleanup(c, b);
2663 	}
2664 }
2665 
2666 static bool __bch2_btree_flush_all(struct bch_fs *c, unsigned flag)
2667 {
2668 	struct bucket_table *tbl;
2669 	struct rhash_head *pos;
2670 	struct btree *b;
2671 	unsigned i;
2672 	bool ret = false;
2673 restart:
2674 	rcu_read_lock();
2675 	for_each_cached_btree(b, c, tbl, i, pos)
2676 		if (test_bit(flag, &b->flags)) {
2677 			rcu_read_unlock();
2678 			wait_on_bit_io(&b->flags, flag, TASK_UNINTERRUPTIBLE);
2679 			ret = true;
2680 			goto restart;
2681 		}
2682 	rcu_read_unlock();
2683 
2684 	return ret;
2685 }
2686 
2687 bool bch2_btree_flush_all_reads(struct bch_fs *c)
2688 {
2689 	return __bch2_btree_flush_all(c, BTREE_NODE_read_in_flight);
2690 }
2691 
2692 bool bch2_btree_flush_all_writes(struct bch_fs *c)
2693 {
2694 	return __bch2_btree_flush_all(c, BTREE_NODE_write_in_flight);
2695 }
2696 
2697 static const char * const bch2_btree_write_types[] = {
2698 #define x(t, n) [n] = #t,
2699 	BCH_BTREE_WRITE_TYPES()
2700 	NULL
2701 };
2702 
2703 void bch2_btree_write_stats_to_text(struct printbuf *out, struct bch_fs *c)
2704 {
2705 	printbuf_tabstop_push(out, 20);
2706 	printbuf_tabstop_push(out, 10);
2707 
2708 	prt_printf(out, "\tnr\tsize\n");
2709 
2710 	for (unsigned i = 0; i < BTREE_WRITE_TYPE_NR; i++) {
2711 		u64 nr		= atomic64_read(&c->btree_write_stats[i].nr);
2712 		u64 bytes	= atomic64_read(&c->btree_write_stats[i].bytes);
2713 
2714 		prt_printf(out, "%s:\t%llu\t", bch2_btree_write_types[i], nr);
2715 		prt_human_readable_u64(out, nr ? div64_u64(bytes, nr) : 0);
2716 		prt_newline(out);
2717 	}
2718 }
2719