Lines Matching full:b

26 void bch2_btree_node_io_unlock(struct btree *b)  in bch2_btree_node_io_unlock()  argument
28 EBUG_ON(!btree_node_write_in_flight(b)); in bch2_btree_node_io_unlock()
30 clear_btree_node_write_in_flight_inner(b); in bch2_btree_node_io_unlock()
31 clear_btree_node_write_in_flight(b); in bch2_btree_node_io_unlock()
32 wake_up_bit(&b->flags, BTREE_NODE_write_in_flight); in bch2_btree_node_io_unlock()
35 void bch2_btree_node_io_lock(struct btree *b) in bch2_btree_node_io_lock() argument
39 wait_on_bit_lock_io(&b->flags, BTREE_NODE_write_in_flight, in bch2_btree_node_io_lock()
43 void __bch2_btree_node_wait_on_read(struct btree *b) in __bch2_btree_node_wait_on_read() argument
45 wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight, in __bch2_btree_node_wait_on_read()
49 void __bch2_btree_node_wait_on_write(struct btree *b) in __bch2_btree_node_wait_on_write() argument
51 wait_on_bit_io(&b->flags, BTREE_NODE_write_in_flight, in __bch2_btree_node_wait_on_write()
55 void bch2_btree_node_wait_on_read(struct btree *b) in bch2_btree_node_wait_on_read() argument
59 wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight, in bch2_btree_node_wait_on_read()
63 void bch2_btree_node_wait_on_write(struct btree *b) in bch2_btree_node_wait_on_write() argument
67 wait_on_bit_io(&b->flags, BTREE_NODE_write_in_flight, in bch2_btree_node_wait_on_write()
71 static void verify_no_dups(struct btree *b, in verify_no_dups() argument
84 struct bkey l = bkey_unpack_key(b, p); in verify_no_dups()
85 struct bkey r = bkey_unpack_key(b, k); in verify_no_dups()
130 unsigned n = nr, a = nr / 2, b, c, d; in sort_bkey_ptrs() local
144 for (b = a; c = 2 * b + 1, (d = c + 1) < n;) in sort_bkey_ptrs()
145 b = bch2_bkey_cmp_packed(bt, in sort_bkey_ptrs()
149 b = c; in sort_bkey_ptrs()
151 while (b != a && in sort_bkey_ptrs()
154 ptrs[b]) >= 0) in sort_bkey_ptrs()
155 b = (b - 1) / 2; in sort_bkey_ptrs()
156 c = b; in sort_bkey_ptrs()
157 while (b != a) { in sort_bkey_ptrs()
158 b = (b - 1) / 2; in sort_bkey_ptrs()
159 swap(ptrs[b], ptrs[c]); in sort_bkey_ptrs()
164 static void bch2_sort_whiteouts(struct bch_fs *c, struct btree *b) in bch2_sort_whiteouts() argument
168 size_t bytes = b->whiteout_u64s * sizeof(u64); in bch2_sort_whiteouts()
170 if (!b->whiteout_u64s) in bch2_sort_whiteouts()
177 for (k = unwritten_whiteouts_start(b); in bch2_sort_whiteouts()
178 k != unwritten_whiteouts_end(b); in bch2_sort_whiteouts()
182 sort_bkey_ptrs(b, ptrs, ptrs_end - ptrs); in bch2_sort_whiteouts()
192 verify_no_dups(b, new_whiteouts, in bch2_sort_whiteouts()
193 (void *) ((u64 *) new_whiteouts + b->whiteout_u64s)); in bch2_sort_whiteouts()
195 memcpy_u64s(unwritten_whiteouts_start(b), in bch2_sort_whiteouts()
196 new_whiteouts, b->whiteout_u64s); in bch2_sort_whiteouts()
201 static bool should_compact_bset(struct btree *b, struct bset_tree *t, in should_compact_bset() argument
204 if (!bset_dead_u64s(b, t)) in should_compact_bset()
209 return should_compact_bset_lazy(b, t) || in should_compact_bset()
210 (compacting && !bset_written(b, bset(b, t))); in should_compact_bset()
218 static bool bch2_drop_whiteouts(struct btree *b, enum compact_mode mode) in bch2_drop_whiteouts() argument
223 for_each_bset(b, t) { in bch2_drop_whiteouts()
224 struct bset *i = bset(b, t); in bch2_drop_whiteouts()
228 if (t != b->set && !bset_written(b, i)) { in bch2_drop_whiteouts()
230 dst = max(write_block(b), in bch2_drop_whiteouts()
231 (void *) btree_bkey_last(b, t - 1)); in bch2_drop_whiteouts()
237 if (!should_compact_bset(b, t, ret, mode)) { in bch2_drop_whiteouts()
243 set_btree_bset(b, t, i); in bch2_drop_whiteouts()
248 start = btree_bkey_first(b, t); in bch2_drop_whiteouts()
249 end = btree_bkey_last(b, t); in bch2_drop_whiteouts()
254 set_btree_bset(b, t, i); in bch2_drop_whiteouts()
271 set_btree_bset_end(b, t); in bch2_drop_whiteouts()
272 bch2_bset_set_no_aux_tree(b, t); in bch2_drop_whiteouts()
276 bch2_verify_btree_nr_keys(b); in bch2_drop_whiteouts()
278 bch2_btree_build_aux_trees(b); in bch2_drop_whiteouts()
283 bool bch2_compact_whiteouts(struct bch_fs *c, struct btree *b, in bch2_compact_whiteouts() argument
286 return bch2_drop_whiteouts(b, mode); in bch2_compact_whiteouts()
289 static void btree_node_sort(struct bch_fs *c, struct btree *b, in btree_node_sort() argument
297 struct bset *start_bset = bset(b, &b->set[start_idx]); in btree_node_sort()
302 end_idx == b->nsets; in btree_node_sort()
304 sort_iter_stack_init(&sort_iter, b); in btree_node_sort()
306 for (t = b->set + start_idx; in btree_node_sort()
307 t < b->set + end_idx; in btree_node_sort()
309 u64s += le16_to_cpu(bset(b, t)->u64s); in btree_node_sort()
311 btree_bkey_first(b, t), in btree_node_sort()
312 btree_bkey_last(b, t)); in btree_node_sort()
316 ? btree_buf_bytes(b) in btree_node_sort()
334 for (t = b->set + start_idx; t < b->set + end_idx; t++) in btree_node_sort()
335 seq = max(seq, le64_to_cpu(bset(b, t)->journal_seq)); in btree_node_sort()
341 BUG_ON(bytes != btree_buf_bytes(b)); in btree_node_sort()
348 *out = *b->data; in btree_node_sort()
350 swap(out, b->data); in btree_node_sort()
351 set_btree_bset(b, b->set, &b->data->keys); in btree_node_sort()
360 b->nr.bset_u64s[start_idx] += in btree_node_sort()
361 b->nr.bset_u64s[i]; in btree_node_sort()
363 b->nsets -= shift; in btree_node_sort()
365 for (i = start_idx + 1; i < b->nsets; i++) { in btree_node_sort()
366 b->nr.bset_u64s[i] = b->nr.bset_u64s[i + shift]; in btree_node_sort()
367 b->set[i] = b->set[i + shift]; in btree_node_sort()
370 for (i = b->nsets; i < MAX_BSETS; i++) in btree_node_sort()
371 b->nr.bset_u64s[i] = 0; in btree_node_sort()
373 set_btree_bset_end(b, &b->set[start_idx]); in btree_node_sort()
374 bch2_bset_set_no_aux_tree(b, &b->set[start_idx]); in btree_node_sort()
378 bch2_verify_btree_nr_keys(b); in btree_node_sort()
417 static bool btree_node_compact(struct bch_fs *c, struct btree *b) in btree_node_compact() argument
423 unwritten_idx < b->nsets; in btree_node_compact()
425 if (!bset_written(b, bset(b, &b->set[unwritten_idx]))) in btree_node_compact()
428 if (b->nsets - unwritten_idx > 1) { in btree_node_compact()
429 btree_node_sort(c, b, unwritten_idx, in btree_node_compact()
430 b->nsets, false); in btree_node_compact()
435 btree_node_sort(c, b, 0, unwritten_idx, false); in btree_node_compact()
442 void bch2_btree_build_aux_trees(struct btree *b) in bch2_btree_build_aux_trees() argument
446 for_each_bset(b, t) in bch2_btree_build_aux_trees()
447 bch2_bset_build_aux_tree(b, t, in bch2_btree_build_aux_trees()
448 !bset_written(b, bset(b, t)) && in bch2_btree_build_aux_trees()
449 t == bset_tree_last(b)); in bch2_btree_build_aux_trees()
462 static inline bool should_compact_all(struct bch_fs *c, struct btree *b) in should_compact_all() argument
467 return bset_u64s(&b->set[1]) > 1U << mid_u64s_bits; in should_compact_all()
475 * if @b doesn't already have one.
479 void bch2_btree_init_next(struct btree_trans *trans, struct btree *b) in bch2_btree_init_next() argument
485 EBUG_ON(!six_lock_counts(&b->c.lock).n[SIX_LOCK_write]); in bch2_btree_init_next()
486 BUG_ON(bset_written(b, bset(b, &b->set[1]))); in bch2_btree_init_next()
487 BUG_ON(btree_node_just_written(b)); in bch2_btree_init_next()
489 if (b->nsets == MAX_BSETS && in bch2_btree_init_next()
490 !btree_node_write_in_flight(b) && in bch2_btree_init_next()
491 should_compact_all(c, b)) { in bch2_btree_init_next()
492 bch2_btree_node_write(c, b, SIX_LOCK_write, in bch2_btree_init_next()
497 if (b->nsets == MAX_BSETS && in bch2_btree_init_next()
498 btree_node_compact(c, b)) in bch2_btree_init_next()
501 BUG_ON(b->nsets >= MAX_BSETS); in bch2_btree_init_next()
503 bne = want_new_bset(c, b); in bch2_btree_init_next()
505 bch2_bset_init_next(b, bne); in bch2_btree_init_next()
507 bch2_btree_build_aux_trees(b); in bch2_btree_init_next()
510 bch2_trans_node_reinit_iter(trans, b); in bch2_btree_init_next()
515 struct btree *b, struct bset *i, in btree_err_msg() argument
525 bch2_btree_pos_to_text(out, c, b); in btree_err_msg()
528 b->written, btree_ptr_sectors_written(&b->key)); in btree_err_msg()
538 struct btree *b, in __btree_err() argument
548 btree_err_msg(&out, c, ca, b, i, b->written, write); in __btree_err()
600 #define btree_err(type, c, ca, b, i, _err_type, msg, ...) \ argument
602 int _ret = __btree_err(type, c, ca, b, i, write, have_retry, \
621 void bch2_btree_node_drop_keys_outside_node(struct btree *b) in bch2_btree_node_drop_keys_outside_node() argument
625 for_each_bset(b, t) { in bch2_btree_node_drop_keys_outside_node()
626 struct bset *i = bset(b, t); in bch2_btree_node_drop_keys_outside_node()
630 if (bkey_cmp_left_packed(b, k, &b->data->min_key) >= 0) in bch2_btree_node_drop_keys_outside_node()
639 set_btree_bset_end(b, t); in bch2_btree_node_drop_keys_outside_node()
643 if (bkey_cmp_left_packed(b, k, &b->data->max_key) > 0) in bch2_btree_node_drop_keys_outside_node()
648 set_btree_bset_end(b, t); in bch2_btree_node_drop_keys_outside_node()
656 bch2_bset_set_no_aux_tree(b, b->set); in bch2_btree_node_drop_keys_outside_node()
657 bch2_btree_build_aux_trees(b); in bch2_btree_node_drop_keys_outside_node()
662 for_each_btree_node_key_unpack(b, k, &iter, &unpacked) { in bch2_btree_node_drop_keys_outside_node()
663 BUG_ON(bpos_lt(k.k->p, b->data->min_key)); in bch2_btree_node_drop_keys_outside_node()
664 BUG_ON(bpos_gt(k.k->p, b->data->max_key)); in bch2_btree_node_drop_keys_outside_node()
669 struct btree *b, struct bset *i, in validate_bset() argument
680 c, ca, b, i, in validate_bset()
688 c, NULL, b, i, in validate_bset()
701 c, NULL, b, i, in validate_bset()
713 c, ca, b, i, in validate_bset()
719 c, ca, b, i, in validate_bset()
729 c, ca, b, i, in validate_bset()
735 c, ca, b, i, in validate_bset()
744 if (b->key.k.type == KEY_TYPE_btree_ptr_v2) { in validate_bset()
746 &bkey_i_to_btree_ptr_v2(&b->key)->v; in validate_bset()
751 c, ca, b, NULL, in validate_bset()
756 btree_err_on(BTREE_NODE_ID(bn) != b->c.btree_id, in validate_bset()
758 c, ca, b, i, in validate_bset()
762 btree_err_on(BTREE_NODE_LEVEL(bn) != b->c.level, in validate_bset()
764 c, ca, b, i, in validate_bset()
769 compat_btree_node(b->c.level, b->c.btree_id, version, in validate_bset()
772 if (b->key.k.type == KEY_TYPE_btree_ptr_v2) { in validate_bset()
774 &bkey_i_to_btree_ptr_v2(&b->key)->v; in validate_bset()
777 b->data->min_key = bp->min_key; in validate_bset()
778 b->data->max_key = b->key.k.p; in validate_bset()
781 btree_err_on(!bpos_eq(b->data->min_key, bp->min_key), in validate_bset()
783 c, ca, b, NULL, in validate_bset()
792 btree_err_on(!bpos_eq(bn->max_key, b->key.k.p), in validate_bset()
794 c, ca, b, i, in validate_bset()
801 compat_btree_node(b->c.level, b->c.btree_id, version, in validate_bset()
806 c, ca, b, i, in validate_bset()
813 compat_bformat(b->c.level, b->c.btree_id, version, in validate_bset()
824 static int bset_key_invalid(struct bch_fs *c, struct btree *b, in bset_key_invalid() argument
829 return __bch2_bkey_invalid(c, k, btree_node_type(b), READ, err) ?: in bset_key_invalid()
830 (!updated_range ? bch2_bkey_in_btree_node(c, b, k, err) : 0) ?: in bset_key_invalid()
834 static bool __bkey_valid(struct bch_fs *c, struct btree *b, in __bkey_valid() argument
845 struct bkey_s u = __bkey_disassemble(b, k, &tmp); in __bkey_valid()
846 bool ret = __bch2_bkey_invalid(c, u.s_c, btree_node_type(b), READ, &buf); in __bkey_valid()
851 static int validate_bset_keys(struct bch_fs *c, struct btree *b, in validate_bset_keys() argument
858 bool updated_range = b->key.k.type == KEY_TYPE_btree_ptr_v2 && in validate_bset_keys()
859 BTREE_PTR_RANGE_UPDATED(&bkey_i_to_btree_ptr_v2(&b->key)->v); in validate_bset_keys()
870 c, NULL, b, i, in validate_bset_keys()
879 c, NULL, b, i, in validate_bset_keys()
886 bch2_bkey_compat(b->c.level, b->c.btree_id, version, in validate_bset_keys()
888 &b->format, k); in validate_bset_keys()
890 u = __bkey_disassemble(b, k, &tmp); in validate_bset_keys()
893 if (bset_key_invalid(c, b, u.s_c, updated_range, write, &buf)) { in validate_bset_keys()
895 bset_key_invalid(c, b, u.s_c, updated_range, write, &buf); in validate_bset_keys()
900 c, NULL, b, i, in validate_bset_keys()
907 bch2_bkey_compat(b->c.level, b->c.btree_id, version, in validate_bset_keys()
909 &b->format, k); in validate_bset_keys()
911 if (prev && bkey_iter_cmp(b, prev, k) > 0) { in validate_bset_keys()
912 struct bkey up = bkey_unpack_key(b, prev); in validate_bset_keys()
921 c, NULL, b, i, in validate_bset_keys()
941 if (!__bkey_valid(c, b, i, (void *) ((u64 *) k + next_good_key))) { in validate_bset_keys()
945 if (__bkey_valid(c, b, i, (void *) ((u64 *) k + next_good_key))) in validate_bset_keys()
966 struct btree *b, bool have_retry, bool *saw_error) in bch2_btree_node_read_done() argument
974 bool updated_range = b->key.k.type == KEY_TYPE_btree_ptr_v2 && in bch2_btree_node_read_done()
975 BTREE_PTR_RANGE_UPDATED(&bkey_i_to_btree_ptr_v2(&b->key)->v); in bch2_btree_node_read_done()
977 unsigned ptr_written = btree_ptr_sectors_written(&b->key); in bch2_btree_node_read_done()
982 b->version_ondisk = U16_MAX; in bch2_btree_node_read_done()
984 b->written = 0; in bch2_btree_node_read_done()
987 sort_iter_init(iter, b, (btree_blocks(c) + 1) * 2); in bch2_btree_node_read_done()
991 c, ca, b, NULL, in bch2_btree_node_read_done()
995 btree_err_on(le64_to_cpu(b->data->magic) != bset_magic(c), in bch2_btree_node_read_done()
997 c, ca, b, NULL, in bch2_btree_node_read_done()
1000 bset_magic(c), le64_to_cpu(b->data->magic)); in bch2_btree_node_read_done()
1002 if (b->key.k.type == KEY_TYPE_btree_ptr_v2) { in bch2_btree_node_read_done()
1004 &bkey_i_to_btree_ptr_v2(&b->key)->v; in bch2_btree_node_read_done()
1006 bch2_bpos_to_text(&buf, b->data->min_key); in bch2_btree_node_read_done()
1008 bch2_bpos_to_text(&buf, b->data->max_key); in bch2_btree_node_read_done()
1010 btree_err_on(b->data->keys.seq != bp->seq, in bch2_btree_node_read_done()
1012 c, ca, b, NULL, in bch2_btree_node_read_done()
1016 bp->seq, b->data->keys.seq, in bch2_btree_node_read_done()
1017 bch2_btree_id_str(BTREE_NODE_ID(b->data)), in bch2_btree_node_read_done()
1018 BTREE_NODE_LEVEL(b->data), in bch2_btree_node_read_done()
1021 btree_err_on(!b->data->keys.seq, in bch2_btree_node_read_done()
1023 c, ca, b, NULL, in bch2_btree_node_read_done()
1028 while (b->written < (ptr_written ?: btree_sectors(c))) { in bch2_btree_node_read_done()
1031 bool first = !b->written; in bch2_btree_node_read_done()
1034 if (!b->written) { in bch2_btree_node_read_done()
1035 i = &b->data->keys; in bch2_btree_node_read_done()
1039 c, ca, b, i, in bch2_btree_node_read_done()
1043 nonce = btree_nonce(i, b->written << 9); in bch2_btree_node_read_done()
1045 struct bch_csum csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, b->data); in bch2_btree_node_read_done()
1046 csum_bad = bch2_crc_cmp(b->data->csum, csum); in bch2_btree_node_read_done()
1052 c, ca, b, i, in bch2_btree_node_read_done()
1056 bch2_csum_err_msg(&buf, BSET_CSUM_TYPE(i), b->data->csum, csum), in bch2_btree_node_read_done()
1059 ret = bset_encrypt(c, i, b->written << 9); in bch2_btree_node_read_done()
1064 btree_err_on(btree_node_type_is_extents(btree_node_type(b)) && in bch2_btree_node_read_done()
1065 !BTREE_NODE_NEW_EXTENT_OVERWRITE(b->data), in bch2_btree_node_read_done()
1067 c, NULL, b, NULL, in bch2_btree_node_read_done()
1071 sectors = vstruct_sectors(b->data, c->block_bits); in bch2_btree_node_read_done()
1073 bne = write_block(b); in bch2_btree_node_read_done()
1076 if (i->seq != b->data->keys.seq) in bch2_btree_node_read_done()
1081 c, ca, b, i, in bch2_btree_node_read_done()
1085 nonce = btree_nonce(i, b->written << 9); in bch2_btree_node_read_done()
1093 c, ca, b, i, in bch2_btree_node_read_done()
1100 ret = bset_encrypt(c, i, b->written << 9); in bch2_btree_node_read_done()
1108 b->version_ondisk = min(b->version_ondisk, in bch2_btree_node_read_done()
1111 ret = validate_bset(c, ca, b, i, b->written, sectors, in bch2_btree_node_read_done()
1116 if (!b->written) in bch2_btree_node_read_done()
1117 btree_node_set_format(b, b->data->format); in bch2_btree_node_read_done()
1119 ret = validate_bset_keys(c, b, i, READ, have_retry, saw_error); in bch2_btree_node_read_done()
1131 c, ca, b, i, in bch2_btree_node_read_done()
1138 c, ca, b, i, in bch2_btree_node_read_done()
1142 b->written, b->written + sectors, ptr_written); in bch2_btree_node_read_done()
1144 b->written += sectors; in bch2_btree_node_read_done()
1155 btree_err_on(b->written < ptr_written, in bch2_btree_node_read_done()
1157 c, ca, b, NULL, in bch2_btree_node_read_done()
1160 ptr_written, b->written); in bch2_btree_node_read_done()
1162 for (bne = write_block(b); in bch2_btree_node_read_done()
1163 bset_byte_offset(b, bne) < btree_buf_bytes(b); in bch2_btree_node_read_done()
1165 btree_err_on(bne->keys.seq == b->data->keys.seq && in bch2_btree_node_read_done()
1170 c, ca, b, NULL, in bch2_btree_node_read_done()
1175 sorted = btree_bounce_alloc(c, btree_buf_bytes(b), &used_mempool); in bch2_btree_node_read_done()
1178 set_btree_bset(b, b->set, &b->data->keys); in bch2_btree_node_read_done()
1180 b->nr = bch2_key_sort_fix_overlapping(c, &sorted->keys, iter); in bch2_btree_node_read_done()
1183 *sorted = *b->data; in bch2_btree_node_read_done()
1185 swap(sorted, b->data); in bch2_btree_node_read_done()
1186 set_btree_bset(b, b->set, &b->data->keys); in bch2_btree_node_read_done()
1187 b->nsets = 1; in bch2_btree_node_read_done()
1189 BUG_ON(b->nr.live_u64s != u64s); in bch2_btree_node_read_done()
1191 btree_bounce_free(c, btree_buf_bytes(b), used_mempool, sorted); in bch2_btree_node_read_done()
1194 bch2_btree_node_drop_keys_outside_node(b); in bch2_btree_node_read_done()
1196 i = &b->data->keys; in bch2_btree_node_read_done()
1199 struct bkey_s u = __bkey_disassemble(b, k, &tmp); in bch2_btree_node_read_done()
1214 c, NULL, b, i, in bch2_btree_node_read_done()
1218 btree_keys_account_key_drop(&b->nr, 0, k); in bch2_btree_node_read_done()
1223 set_btree_bset_end(b, b->set); in bch2_btree_node_read_done()
1236 bch2_bset_build_aux_tree(b, b->set, false); in bch2_btree_node_read_done()
1238 set_needs_whiteout(btree_bset_first(b), true); in bch2_btree_node_read_done()
1240 btree_node_reset_sib_u64s(b); in bch2_btree_node_read_done()
1242 bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&b->key)), ptr) { in bch2_btree_node_read_done()
1246 set_btree_node_need_rewrite(b); in bch2_btree_node_read_done()
1250 set_btree_node_need_rewrite(b); in bch2_btree_node_read_done()
1261 set_btree_node_read_error(b); in bch2_btree_node_read_done()
1270 struct btree *b = rb->b; in btree_node_read_work() local
1287 bio->bi_iter.bi_size = btree_buf_bytes(b); in btree_node_read_work()
1297 bch2_btree_pos_to_text(&buf, c, b); in btree_node_read_work()
1308 bkey_i_to_s_c(&b->key), in btree_node_read_work()
1312 !bch2_btree_node_read_done(c, ca, b, can_retry, &saw_error)) { in btree_node_read_work()
1321 set_btree_node_read_error(b); in btree_node_read_work()
1330 if (saw_error && !btree_node_read_error(b)) { in btree_node_read_work()
1332 bch2_bpos_to_text(&buf, b->key.k.p); in btree_node_read_work()
1334 __func__, bch2_btree_id_str(b->c.btree_id), b->c.level, buf.buf); in btree_node_read_work()
1336 bch2_btree_node_rewrite_async(c, b); in btree_node_read_work()
1340 clear_btree_node_read_in_flight(b); in btree_node_read_work()
1341 wake_up_bit(&b->flags, BTREE_NODE_read_in_flight); in btree_node_read_work()
1362 struct btree *b; member
1415 struct btree *b = ra->b; in CLOSURE_CALLBACK() local
1421 __le64 seq = b->key.k.type == KEY_TYPE_btree_ptr_v2 in CLOSURE_CALLBACK()
1422 ? bkey_i_to_btree_ptr_v2(&b->key)->v.seq : 0; in CLOSURE_CALLBACK()
1443 c, NULL, b, NULL, in CLOSURE_CALLBACK()
1449 c, NULL, b, NULL, in CLOSURE_CALLBACK()
1454 c, NULL, b, NULL, in CLOSURE_CALLBACK()
1515 memcpy(b->data, ra->buf[best], btree_buf_bytes(b)); in CLOSURE_CALLBACK()
1516 ret = bch2_btree_node_read_done(c, NULL, b, false, saw_error); in CLOSURE_CALLBACK()
1522 set_btree_node_read_error(b); in CLOSURE_CALLBACK()
1524 bch2_btree_node_rewrite_async(c, b); in CLOSURE_CALLBACK()
1535 clear_btree_node_read_in_flight(b); in CLOSURE_CALLBACK()
1536 wake_up_bit(&b->flags, BTREE_NODE_read_in_flight); in CLOSURE_CALLBACK()
1560 static int btree_node_read_all_replicas(struct bch_fs *c, struct btree *b, bool sync) in btree_node_read_all_replicas() argument
1562 struct bkey_s_c k = bkey_i_to_s_c(&b->key); in btree_node_read_all_replicas()
1575 ra->b = b; in btree_node_read_all_replicas()
1581 buf_pages(ra->buf[i], btree_buf_bytes(b)), in btree_node_read_all_replicas()
1593 rb->b = b; in btree_node_read_all_replicas()
1601 bch2_bio_map(&rb->bio, ra->buf[i], btree_buf_bytes(b)); in btree_node_read_all_replicas()
1628 void bch2_btree_node_read(struct btree_trans *trans, struct btree *b, in bch2_btree_node_read() argument
1638 trace_and_count(c, btree_node_read, trans, b); in bch2_btree_node_read()
1641 !btree_node_read_all_replicas(c, b, sync)) in bch2_btree_node_read()
1644 ret = bch2_bkey_pick_read_device(c, bkey_i_to_s_c(&b->key), in bch2_btree_node_read()
1651 bch2_btree_pos_to_text(&buf, c, b); in bch2_btree_node_read()
1658 set_btree_node_read_error(b); in bch2_btree_node_read()
1659 clear_btree_node_read_in_flight(b); in bch2_btree_node_read()
1660 wake_up_bit(&b->flags, BTREE_NODE_read_in_flight); in bch2_btree_node_read()
1668 buf_pages(b->data, btree_buf_bytes(b)), in bch2_btree_node_read()
1674 rb->b = b; in bch2_btree_node_read()
1682 bch2_bio_map(bio, b->data, btree_buf_bytes(b)); in bch2_btree_node_read()
1711 struct btree *b; in __bch2_btree_root_read() local
1721 b = bch2_btree_node_mem_alloc(trans, level != 0); in __bch2_btree_root_read()
1724 BUG_ON(IS_ERR(b)); in __bch2_btree_root_read()
1726 bkey_copy(&b->key, k); in __bch2_btree_root_read()
1727 BUG_ON(bch2_btree_node_hash_insert(&c->btree_cache, b, level, id)); in __bch2_btree_root_read()
1729 set_btree_node_read_in_flight(b); in __bch2_btree_root_read()
1731 bch2_btree_node_read(trans, b, true); in __bch2_btree_root_read()
1733 if (btree_node_read_error(b)) { in __bch2_btree_root_read()
1734 bch2_btree_node_hash_remove(&c->btree_cache, b); in __bch2_btree_root_read()
1737 list_move(&b->list, &c->btree_cache.freeable); in __bch2_btree_root_read()
1744 bch2_btree_set_root_for_read(c, b); in __bch2_btree_root_read()
1746 six_unlock_write(&b->c.lock); in __bch2_btree_root_read()
1747 six_unlock_intent(&b->c.lock); in __bch2_btree_root_read()
1758 static void bch2_btree_complete_write(struct bch_fs *c, struct btree *b, in bch2_btree_complete_write() argument
1761 unsigned long old, new, v = READ_ONCE(b->will_make_reachable); in bch2_btree_complete_write()
1769 } while ((v = cmpxchg(&b->will_make_reachable, old, new)) != old); in bch2_btree_complete_write()
1777 static void __btree_node_write_done(struct bch_fs *c, struct btree *b) in __btree_node_write_done() argument
1779 struct btree_write *w = btree_prev_write(b); in __btree_node_write_done()
1783 bch2_btree_complete_write(c, b, w); in __btree_node_write_done()
1785 v = READ_ONCE(b->flags); in __btree_node_write_done()
1807 } while ((v = cmpxchg(&b->flags, old, new)) != old); in __btree_node_write_done()
1810 __bch2_btree_node_write(c, b, BTREE_WRITE_ALREADY_STARTED|type); in __btree_node_write_done()
1812 wake_up_bit(&b->flags, BTREE_NODE_write_in_flight); in __btree_node_write_done()
1815 static void btree_node_write_done(struct bch_fs *c, struct btree *b) in btree_node_write_done() argument
1819 btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read); in btree_node_write_done()
1820 __btree_node_write_done(c, b); in btree_node_write_done()
1821 six_unlock_read(&b->c.lock); in btree_node_write_done()
1831 struct btree *b = wbio->wbio.bio.bi_private; in btree_node_write_work() local
1854 bch2_btree_node_update_key_get_iter(trans, b, &wbio->key, in btree_node_write_work()
1865 btree_node_write_done(c, b); in btree_node_write_work()
1868 set_btree_node_noevict(b); in btree_node_write_work()
1881 struct btree *b = wbio->bio.bi_private; in btree_node_write_endio() local
1906 clear_btree_node_write_in_flight_inner(b); in btree_node_write_endio()
1907 wake_up_bit(&b->flags, BTREE_NODE_write_in_flight_inner); in btree_node_write_endio()
1912 static int validate_bset_for_write(struct bch_fs *c, struct btree *b, in validate_bset_for_write() argument
1919 ret = bch2_bkey_invalid(c, bkey_i_to_s_c(&b->key), in validate_bset_for_write()
1928 ret = validate_bset_keys(c, b, i, WRITE, false, &saw_error) ?: in validate_bset_for_write()
1929 validate_bset(c, NULL, b, i, b->written, sectors, WRITE, false, &saw_error); in validate_bset_for_write()
1952 void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, unsigned flags) in __bch2_btree_node_write() argument
1981 old = new = READ_ONCE(b->flags); in __bch2_btree_node_write()
1995 if (b->written && in __bch2_btree_node_write()
2012 } while (cmpxchg_acquire(&b->flags, old, new) != old); in __bch2_btree_node_write()
2017 BUG_ON((type == BTREE_WRITE_initial) != (b->written == 0)); in __bch2_btree_node_write()
2021 BUG_ON(btree_node_fake(b)); in __bch2_btree_node_write()
2022 BUG_ON((b->will_make_reachable != 0) != !b->written); in __bch2_btree_node_write()
2024 BUG_ON(b->written >= btree_sectors(c)); in __bch2_btree_node_write()
2025 BUG_ON(b->written & (block_sectors(c) - 1)); in __bch2_btree_node_write()
2026 BUG_ON(bset_written(b, btree_bset_last(b))); in __bch2_btree_node_write()
2027 BUG_ON(le64_to_cpu(b->data->magic) != bset_magic(c)); in __bch2_btree_node_write()
2028 BUG_ON(memcmp(&b->data->format, &b->format, sizeof(b->format))); in __bch2_btree_node_write()
2030 bch2_sort_whiteouts(c, b); in __bch2_btree_node_write()
2032 sort_iter_stack_init(&sort_iter, b); in __bch2_btree_node_write()
2034 bytes = !b->written in __bch2_btree_node_write()
2038 bytes += b->whiteout_u64s * sizeof(u64); in __bch2_btree_node_write()
2040 for_each_bset(b, t) { in __bch2_btree_node_write()
2041 i = bset(b, t); in __bch2_btree_node_write()
2043 if (bset_written(b, i)) in __bch2_btree_node_write()
2048 btree_bkey_first(b, t), in __bch2_btree_node_write()
2049 btree_bkey_last(b, t)); in __bch2_btree_node_write()
2053 BUG_ON(b->written && !seq); in __bch2_btree_node_write()
2063 if (!b->written) { in __bch2_btree_node_write()
2065 *bn = *b->data; in __bch2_btree_node_write()
2069 bne->keys = b->data->keys; in __bch2_btree_node_write()
2077 unwritten_whiteouts_start(b), in __bch2_btree_node_write()
2078 unwritten_whiteouts_end(b)); in __bch2_btree_node_write()
2081 b->whiteout_u64s = 0; in __bch2_btree_node_write()
2086 BUG_ON(!b->written && i->u64s != b->data->keys.u64s); in __bch2_btree_node_write()
2091 if (b->written && !i->u64s) in __bch2_btree_node_write()
2097 if (!b->written && in __bch2_btree_node_write()
2098 b->key.k.type == KEY_TYPE_btree_ptr_v2) in __bch2_btree_node_write()
2099 BUG_ON(btree_ptr_sectors_written(&b->key) != sectors_to_write); in __bch2_btree_node_write()
2104 BUG_ON(b->written + sectors_to_write > btree_sectors(c)); in __bch2_btree_node_write()
2106 BUG_ON(i->seq != b->data->keys.seq); in __bch2_btree_node_write()
2109 SET_BSET_OFFSET(i, b->written); in __bch2_btree_node_write()
2121 validate_bset_for_write(c, b, i, sectors_to_write)) in __bch2_btree_node_write()
2124 ret = bset_encrypt(c, i, b->written << 9); in __bch2_btree_node_write()
2129 nonce = btree_nonce(i, b->written << 9); in __bch2_btree_node_write()
2138 validate_bset_for_write(c, b, i, sectors_to_write)) in __bch2_btree_node_write()
2156 * Make sure to update b->written so bch2_btree_init_next() doesn't in __bch2_btree_node_write()
2163 trace_and_count(c, btree_node_write, b, bytes_to_write, sectors_to_write); in __bch2_btree_node_write()
2174 wbio->sector_offset = b->written; in __bch2_btree_node_write()
2177 wbio->wbio.first_btree_write = !b->written; in __bch2_btree_node_write()
2179 wbio->wbio.bio.bi_private = b; in __bch2_btree_node_write()
2183 bkey_copy(&wbio->key, &b->key); in __bch2_btree_node_write()
2185 b->written += sectors_to_write; in __bch2_btree_node_write()
2189 cpu_to_le16(b->written); in __bch2_btree_node_write()
2198 set_btree_node_noevict(b); in __bch2_btree_node_write()
2199 b->written += sectors_to_write; in __bch2_btree_node_write()
2202 __btree_node_write_done(c, b); in __bch2_btree_node_write()
2208 bool bch2_btree_post_write_cleanup(struct bch_fs *c, struct btree *b) in bch2_btree_post_write_cleanup() argument
2214 if (!btree_node_just_written(b)) in bch2_btree_post_write_cleanup()
2217 BUG_ON(b->whiteout_u64s); in bch2_btree_post_write_cleanup()
2219 clear_btree_node_just_written(b); in bch2_btree_post_write_cleanup()
2234 if (b->nsets > 1) { in bch2_btree_post_write_cleanup()
2235 btree_node_sort(c, b, 0, b->nsets, true); in bch2_btree_post_write_cleanup()
2238 invalidated_iter = bch2_drop_whiteouts(b, COMPACT_ALL); in bch2_btree_post_write_cleanup()
2241 for_each_bset(b, t) in bch2_btree_post_write_cleanup()
2242 set_needs_whiteout(bset(b, t), true); in bch2_btree_post_write_cleanup()
2244 bch2_btree_verify(c, b); in bch2_btree_post_write_cleanup()
2250 BUG_ON((void *) btree_bkey_last(b, bset_tree_last(b)) > write_block(b)); in bch2_btree_post_write_cleanup()
2252 bne = want_new_bset(c, b); in bch2_btree_post_write_cleanup()
2254 bch2_bset_init_next(b, bne); in bch2_btree_post_write_cleanup()
2256 bch2_btree_build_aux_trees(b); in bch2_btree_post_write_cleanup()
2264 void bch2_btree_node_write(struct bch_fs *c, struct btree *b, in bch2_btree_node_write() argument
2270 six_lock_tryupgrade(&b->c.lock))) { in bch2_btree_node_write()
2271 __bch2_btree_node_write(c, b, flags); in bch2_btree_node_write()
2274 if (btree_node_just_written(b) && in bch2_btree_node_write()
2275 six_trylock_write(&b->c.lock)) { in bch2_btree_node_write()
2276 bch2_btree_post_write_cleanup(c, b); in bch2_btree_node_write()
2277 six_unlock_write(&b->c.lock); in bch2_btree_node_write()
2281 six_lock_downgrade(&b->c.lock); in bch2_btree_node_write()
2283 __bch2_btree_node_write(c, b, flags); in bch2_btree_node_write()
2285 btree_node_just_written(b)) in bch2_btree_node_write()
2286 bch2_btree_post_write_cleanup(c, b); in bch2_btree_node_write()
2294 struct btree *b; in __bch2_btree_flush_all() local
2299 for_each_cached_btree(b, c, tbl, i, pos) in __bch2_btree_flush_all()
2300 if (test_bit(flag, &b->flags)) { in __bch2_btree_flush_all()
2302 wait_on_bit_io(&b->flags, flag, TASK_UNINTERRUPTIBLE); in __bch2_btree_flush_all()