Lines Matching refs:b

103 #define insert_lock(s, b)	((b)->level <= (s)->lock)  argument
106 static inline struct bset *write_block(struct btree *b) in write_block() argument
108 return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c->cache); in write_block()
111 static void bch_btree_init_next(struct btree *b) in bch_btree_init_next() argument
114 if (b->level && b->keys.nsets) in bch_btree_init_next()
115 bch_btree_sort(&b->keys, &b->c->sort); in bch_btree_init_next()
117 bch_btree_sort_lazy(&b->keys, &b->c->sort); in bch_btree_init_next()
119 if (b->written < btree_blocks(b)) in bch_btree_init_next()
120 bch_bset_init_next(&b->keys, write_block(b), in bch_btree_init_next()
121 bset_magic(&b->c->cache->sb)); in bch_btree_init_next()
138 static uint64_t btree_csum_set(struct btree *b, struct bset *i) in btree_csum_set() argument
140 uint64_t crc = b->key.ptr[0]; in btree_csum_set()
147 void bch_btree_node_read_done(struct btree *b) in bch_btree_node_read_done() argument
150 struct bset *i = btree_bset_first(b); in bch_btree_node_read_done()
158 iter = mempool_alloc(&b->c->fill_iter, GFP_NOIO); in bch_btree_node_read_done()
159 iter->size = b->c->cache->sb.bucket_size / b->c->cache->sb.block_size; in bch_btree_node_read_done()
163 iter->b = &b->keys; in bch_btree_node_read_done()
170 b->written < btree_blocks(b) && i->seq == b->keys.set[0].data->seq; in bch_btree_node_read_done()
171 i = write_block(b)) { in bch_btree_node_read_done()
177 if (b->written + set_blocks(i, block_bytes(b->c->cache)) > in bch_btree_node_read_done()
178 btree_blocks(b)) in bch_btree_node_read_done()
182 if (i->magic != bset_magic(&b->c->cache->sb)) in bch_btree_node_read_done()
192 if (i->csum != btree_csum_set(b, i)) in bch_btree_node_read_done()
198 if (i != b->keys.set[0].data && !i->keys) in bch_btree_node_read_done()
203 b->written += set_blocks(i, block_bytes(b->c->cache)); in bch_btree_node_read_done()
207 for (i = write_block(b); in bch_btree_node_read_done()
208 bset_sector_offset(&b->keys, i) < KEY_SIZE(&b->key); in bch_btree_node_read_done()
209 i = ((void *) i) + block_bytes(b->c->cache)) in bch_btree_node_read_done()
210 if (i->seq == b->keys.set[0].data->seq) in bch_btree_node_read_done()
213 bch_btree_sort_and_fix_extents(&b->keys, iter, &b->c->sort); in bch_btree_node_read_done()
215 i = b->keys.set[0].data; in bch_btree_node_read_done()
217 if (b->keys.set[0].size && in bch_btree_node_read_done()
218 bkey_cmp(&b->key, &b->keys.set[0].end) < 0) in bch_btree_node_read_done()
221 if (b->written < btree_blocks(b)) in bch_btree_node_read_done()
222 bch_bset_init_next(&b->keys, write_block(b), in bch_btree_node_read_done()
223 bset_magic(&b->c->cache->sb)); in bch_btree_node_read_done()
225 mempool_free(iter, &b->c->fill_iter); in bch_btree_node_read_done()
228 set_btree_node_io_error(b); in bch_btree_node_read_done()
229 bch_cache_set_error(b->c, "%s at bucket %zu, block %u, %u keys", in bch_btree_node_read_done()
230 err, PTR_BUCKET_NR(b->c, &b->key, 0), in bch_btree_node_read_done()
231 bset_block_offset(b, i), i->keys); in bch_btree_node_read_done()
242 static void bch_btree_node_read(struct btree *b) in bch_btree_node_read() argument
248 trace_bcache_btree_read(b); in bch_btree_node_read()
252 bio = bch_bbio_alloc(b->c); in bch_btree_node_read()
253 bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9; in bch_btree_node_read()
258 bch_bio_map(bio, b->keys.set[0].data); in bch_btree_node_read()
260 bch_submit_bbio(bio, b->c, &b->key, 0); in bch_btree_node_read()
264 set_btree_node_io_error(b); in bch_btree_node_read()
266 bch_bbio_free(bio, b->c); in bch_btree_node_read()
268 if (btree_node_io_error(b)) in bch_btree_node_read()
271 bch_btree_node_read_done(b); in bch_btree_node_read()
272 bch_time_stats_update(&b->c->btree_read_time, start_time); in bch_btree_node_read()
276 bch_cache_set_error(b->c, "io error reading bucket %zu", in bch_btree_node_read()
277 PTR_BUCKET_NR(b->c, &b->key, 0)); in bch_btree_node_read()
280 static void btree_complete_write(struct btree *b, struct btree_write *w) in btree_complete_write() argument
283 !atomic_sub_return(w->prio_blocked, &b->c->prio_blocked)) in btree_complete_write()
284 wake_up_allocators(b->c); in btree_complete_write()
288 __closure_wake_up(&b->c->journal.wait); in btree_complete_write()
297 closure_type(b, struct btree, io); in CLOSURE_CALLBACK()
299 up(&b->io_mutex); in CLOSURE_CALLBACK()
304 closure_type(b, struct btree, io); in CLOSURE_CALLBACK()
305 struct btree_write *w = btree_prev_write(b); in CLOSURE_CALLBACK()
307 bch_bbio_free(b->bio, b->c); in CLOSURE_CALLBACK()
308 b->bio = NULL; in CLOSURE_CALLBACK()
309 btree_complete_write(b, w); in CLOSURE_CALLBACK()
311 if (btree_node_dirty(b)) in CLOSURE_CALLBACK()
312 queue_delayed_work(btree_io_wq, &b->work, 30 * HZ); in CLOSURE_CALLBACK()
319 closure_type(b, struct btree, io); in CLOSURE_CALLBACK()
321 bio_free_pages(b->bio); in CLOSURE_CALLBACK()
328 struct btree *b = container_of(cl, struct btree, io); in btree_node_write_endio() local
331 set_btree_node_io_error(b); in btree_node_write_endio()
333 bch_bbio_count_io_errors(b->c, bio, bio->bi_status, "writing btree"); in btree_node_write_endio()
337 static void do_btree_node_write(struct btree *b) in do_btree_node_write() argument
339 struct closure *cl = &b->io; in do_btree_node_write()
340 struct bset *i = btree_bset_last(b); in do_btree_node_write()
344 i->csum = btree_csum_set(b, i); in do_btree_node_write()
346 BUG_ON(b->bio); in do_btree_node_write()
347 b->bio = bch_bbio_alloc(b->c); in do_btree_node_write()
349 b->bio->bi_end_io = btree_node_write_endio; in do_btree_node_write()
350 b->bio->bi_private = cl; in do_btree_node_write()
351 b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c->cache)); in do_btree_node_write()
352 b->bio->bi_opf = REQ_OP_WRITE | REQ_META | REQ_FUA; in do_btree_node_write()
353 bch_bio_map(b->bio, i); in do_btree_node_write()
370 bkey_copy(&k.key, &b->key); in do_btree_node_write()
372 bset_sector_offset(&b->keys, i)); in do_btree_node_write()
374 if (!bch_bio_alloc_pages(b->bio, __GFP_NOWARN|GFP_NOWAIT)) { in do_btree_node_write()
379 bio_for_each_segment_all(bv, b->bio, iter_all) { in do_btree_node_write()
384 bch_submit_bbio(b->bio, b->c, &k.key, 0); in do_btree_node_write()
392 b->bio->bi_vcnt = 0; in do_btree_node_write()
393 bch_bio_map(b->bio, i); in do_btree_node_write()
395 bch_submit_bbio(b->bio, b->c, &k.key, 0); in do_btree_node_write()
402 void __bch_btree_node_write(struct btree *b, struct closure *parent) in __bch_btree_node_write() argument
404 struct bset *i = btree_bset_last(b); in __bch_btree_node_write()
406 lockdep_assert_held(&b->write_lock); in __bch_btree_node_write()
408 trace_bcache_btree_write(b); in __bch_btree_node_write()
411 BUG_ON(b->written >= btree_blocks(b)); in __bch_btree_node_write()
412 BUG_ON(b->written && !i->keys); in __bch_btree_node_write()
413 BUG_ON(btree_bset_first(b)->seq != i->seq); in __bch_btree_node_write()
414 bch_check_keys(&b->keys, "writing"); in __bch_btree_node_write()
416 cancel_delayed_work(&b->work); in __bch_btree_node_write()
419 down(&b->io_mutex); in __bch_btree_node_write()
420 closure_init(&b->io, parent ?: &b->c->cl); in __bch_btree_node_write()
422 clear_bit(BTREE_NODE_dirty, &b->flags); in __bch_btree_node_write()
423 change_bit(BTREE_NODE_write_idx, &b->flags); in __bch_btree_node_write()
425 do_btree_node_write(b); in __bch_btree_node_write()
427 atomic_long_add(set_blocks(i, block_bytes(b->c->cache)) * b->c->cache->sb.block_size, in __bch_btree_node_write()
428 &b->c->cache->btree_sectors_written); in __bch_btree_node_write()
430 b->written += set_blocks(i, block_bytes(b->c->cache)); in __bch_btree_node_write()
433 void bch_btree_node_write(struct btree *b, struct closure *parent) in bch_btree_node_write() argument
435 unsigned int nsets = b->keys.nsets; in bch_btree_node_write()
437 lockdep_assert_held(&b->lock); in bch_btree_node_write()
439 __bch_btree_node_write(b, parent); in bch_btree_node_write()
445 if (nsets && !b->keys.nsets) in bch_btree_node_write()
446 bch_btree_verify(b); in bch_btree_node_write()
448 bch_btree_init_next(b); in bch_btree_node_write()
451 static void bch_btree_node_write_sync(struct btree *b) in bch_btree_node_write_sync() argument
457 mutex_lock(&b->write_lock); in bch_btree_node_write_sync()
458 bch_btree_node_write(b, &cl); in bch_btree_node_write_sync()
459 mutex_unlock(&b->write_lock); in bch_btree_node_write_sync()
466 struct btree *b = container_of(to_delayed_work(w), struct btree, work); in btree_node_write_work() local
468 mutex_lock(&b->write_lock); in btree_node_write_work()
469 if (btree_node_dirty(b)) in btree_node_write_work()
470 __bch_btree_node_write(b, NULL); in btree_node_write_work()
471 mutex_unlock(&b->write_lock); in btree_node_write_work()
474 static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref) in bch_btree_leaf_dirty() argument
476 struct bset *i = btree_bset_last(b); in bch_btree_leaf_dirty()
477 struct btree_write *w = btree_current_write(b); in bch_btree_leaf_dirty()
479 lockdep_assert_held(&b->write_lock); in bch_btree_leaf_dirty()
481 BUG_ON(!b->written); in bch_btree_leaf_dirty()
484 if (!btree_node_dirty(b)) in bch_btree_leaf_dirty()
485 queue_delayed_work(btree_io_wq, &b->work, 30 * HZ); in bch_btree_leaf_dirty()
487 set_btree_node_dirty(b); in bch_btree_leaf_dirty()
496 journal_pin_cmp(b->c, w->journal, journal_ref)) { in bch_btree_leaf_dirty()
510 bch_btree_node_write(b, NULL); in bch_btree_leaf_dirty()
523 static void mca_data_free(struct btree *b) in mca_data_free() argument
525 BUG_ON(b->io_mutex.count != 1); in mca_data_free()
527 bch_btree_keys_free(&b->keys); in mca_data_free()
529 b->c->btree_cache_used--; in mca_data_free()
530 list_move(&b->list, &b->c->btree_cache_freed); in mca_data_free()
533 static void mca_bucket_free(struct btree *b) in mca_bucket_free() argument
535 BUG_ON(btree_node_dirty(b)); in mca_bucket_free()
537 b->key.ptr[0] = 0; in mca_bucket_free()
538 hlist_del_init_rcu(&b->hash); in mca_bucket_free()
539 list_move(&b->list, &b->c->btree_cache_freeable); in mca_bucket_free()
547 static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp) in mca_data_alloc() argument
549 if (!bch_btree_keys_alloc(&b->keys, in mca_data_alloc()
551 ilog2(b->c->btree_pages), in mca_data_alloc()
554 b->c->btree_cache_used++; in mca_data_alloc()
555 list_move(&b->list, &b->c->btree_cache); in mca_data_alloc()
557 list_move(&b->list, &b->c->btree_cache_freed); in mca_data_alloc()
566 const struct btree *b = container_of(_b, struct btree, lock.dep_map); in btree_lock_cmp_fn() local
568 return -cmp_int(a->level, b->level) ?: bkey_cmp(&a->key, &b->key); in btree_lock_cmp_fn()
573 const struct btree *b = container_of(map, struct btree, lock.dep_map); in btree_lock_print_fn() local
575 printk(KERN_CONT " l=%u %llu:%llu", b->level, in btree_lock_print_fn()
576 KEY_INODE(&b->key), KEY_OFFSET(&b->key)); in btree_lock_print_fn()
587 struct btree *b = kzalloc(sizeof(struct btree), gfp); in mca_bucket_alloc() local
589 if (!b) in mca_bucket_alloc()
592 init_rwsem(&b->lock); in mca_bucket_alloc()
593 lock_set_cmp_fn(&b->lock, btree_lock_cmp_fn, btree_lock_print_fn); in mca_bucket_alloc()
594 mutex_init(&b->write_lock); in mca_bucket_alloc()
595 lockdep_set_novalidate_class(&b->write_lock); in mca_bucket_alloc()
596 INIT_LIST_HEAD(&b->list); in mca_bucket_alloc()
597 INIT_DELAYED_WORK(&b->work, btree_node_write_work); in mca_bucket_alloc()
598 b->c = c; in mca_bucket_alloc()
599 sema_init(&b->io_mutex, 1); in mca_bucket_alloc()
601 mca_data_alloc(b, k, gfp); in mca_bucket_alloc()
602 return b; in mca_bucket_alloc()
605 static int mca_reap(struct btree *b, unsigned int min_order, bool flush) in mca_reap() argument
610 lockdep_assert_held(&b->c->bucket_lock); in mca_reap()
612 if (!down_write_trylock(&b->lock)) in mca_reap()
615 BUG_ON(btree_node_dirty(b) && !b->keys.set[0].data); in mca_reap()
617 if (b->keys.page_order < min_order) in mca_reap()
621 if (btree_node_dirty(b)) in mca_reap()
624 if (down_trylock(&b->io_mutex)) in mca_reap()
626 up(&b->io_mutex); in mca_reap()
635 mutex_lock(&b->write_lock); in mca_reap()
641 if (btree_node_journal_flush(b)) { in mca_reap()
642 pr_debug("bnode %p is flushing by journal, retry\n", b); in mca_reap()
643 mutex_unlock(&b->write_lock); in mca_reap()
648 if (btree_node_dirty(b)) in mca_reap()
649 __bch_btree_node_write(b, &cl); in mca_reap()
650 mutex_unlock(&b->write_lock); in mca_reap()
655 down(&b->io_mutex); in mca_reap()
656 up(&b->io_mutex); in mca_reap()
660 rw_unlock(true, b); in mca_reap()
668 struct btree *b, *t; in bch_mca_scan() local
699 list_for_each_entry_safe_reverse(b, t, &c->btree_cache_freeable, list) { in bch_mca_scan()
703 if (!mca_reap(b, 0, false)) { in bch_mca_scan()
704 mca_data_free(b); in bch_mca_scan()
705 rw_unlock(true, b); in bch_mca_scan()
712 list_for_each_entry_safe_reverse(b, t, &c->btree_cache, list) { in bch_mca_scan()
716 if (!mca_reap(b, 0, false)) { in bch_mca_scan()
717 mca_bucket_free(b); in bch_mca_scan()
718 mca_data_free(b); in bch_mca_scan()
719 rw_unlock(true, b); in bch_mca_scan()
747 struct btree *b; in bch_btree_cache_free() local
768 b = list_first_entry(&c->btree_cache, struct btree, list); in bch_btree_cache_free()
775 if (btree_node_dirty(b)) { in bch_btree_cache_free()
776 btree_complete_write(b, btree_current_write(b)); in bch_btree_cache_free()
777 clear_bit(BTREE_NODE_dirty, &b->flags); in bch_btree_cache_free()
779 mca_data_free(b); in bch_btree_cache_free()
783 b = list_first_entry(&c->btree_cache_freed, in bch_btree_cache_free()
785 list_del(&b->list); in bch_btree_cache_free()
786 cancel_delayed_work_sync(&b->work); in bch_btree_cache_free()
787 kfree(b); in bch_btree_cache_free()
854 struct btree *b; in mca_find() local
857 hlist_for_each_entry_rcu(b, mca_hash(c, k), hash) in mca_find()
858 if (PTR_HASH(c, &b->key) == PTR_HASH(c, k)) in mca_find()
860 b = NULL; in mca_find()
863 return b; in mca_find()
886 struct btree *b; in mca_cannibalize() local
893 list_for_each_entry_reverse(b, &c->btree_cache, list) in mca_cannibalize()
894 if (!mca_reap(b, btree_order(k), false)) in mca_cannibalize()
895 return b; in mca_cannibalize()
897 list_for_each_entry_reverse(b, &c->btree_cache, list) in mca_cannibalize()
898 if (!mca_reap(b, btree_order(k), true)) in mca_cannibalize()
899 return b; in mca_cannibalize()
924 struct btree *b; in mca_alloc() local
936 list_for_each_entry(b, &c->btree_cache_freeable, list) in mca_alloc()
937 if (!mca_reap(b, btree_order(k), false)) in mca_alloc()
943 list_for_each_entry(b, &c->btree_cache_freed, list) in mca_alloc()
944 if (!mca_reap(b, 0, false)) { in mca_alloc()
945 mca_data_alloc(b, k, __GFP_NOWARN|GFP_NOIO); in mca_alloc()
946 if (!b->keys.set[0].data) in mca_alloc()
952 b = mca_bucket_alloc(c, k, __GFP_NOWARN|GFP_NOIO); in mca_alloc()
953 if (!b) in mca_alloc()
956 BUG_ON(!down_write_trylock(&b->lock)); in mca_alloc()
957 if (!b->keys.set->data) in mca_alloc()
960 BUG_ON(b->io_mutex.count != 1); in mca_alloc()
962 bkey_copy(&b->key, k); in mca_alloc()
963 list_move(&b->list, &c->btree_cache); in mca_alloc()
964 hlist_del_init_rcu(&b->hash); in mca_alloc()
965 hlist_add_head_rcu(&b->hash, mca_hash(c, k)); in mca_alloc()
967 lock_set_subclass(&b->lock.dep_map, level + 1, _THIS_IP_); in mca_alloc()
968 b->parent = (void *) ~0UL; in mca_alloc()
969 b->flags = 0; in mca_alloc()
970 b->written = 0; in mca_alloc()
971 b->level = level; in mca_alloc()
973 if (!b->level) in mca_alloc()
974 bch_btree_keys_init(&b->keys, &bch_extent_keys_ops, in mca_alloc()
975 &b->c->expensive_debug_checks); in mca_alloc()
977 bch_btree_keys_init(&b->keys, &bch_btree_keys_ops, in mca_alloc()
978 &b->c->expensive_debug_checks); in mca_alloc()
980 return b; in mca_alloc()
982 if (b) in mca_alloc()
983 rw_unlock(true, b); in mca_alloc()
985 b = mca_cannibalize(c, op, k); in mca_alloc()
986 if (!IS_ERR(b)) in mca_alloc()
989 return b; in mca_alloc()
1009 struct btree *b; in bch_btree_node_get() local
1013 b = mca_find(c, k); in bch_btree_node_get()
1015 if (!b) { in bch_btree_node_get()
1020 b = mca_alloc(c, op, k, level); in bch_btree_node_get()
1023 if (!b) in bch_btree_node_get()
1025 if (IS_ERR(b)) in bch_btree_node_get()
1026 return b; in bch_btree_node_get()
1028 bch_btree_node_read(b); in bch_btree_node_get()
1031 downgrade_write(&b->lock); in bch_btree_node_get()
1033 rw_lock(write, b, level); in bch_btree_node_get()
1034 if (PTR_HASH(c, &b->key) != PTR_HASH(c, k)) { in bch_btree_node_get()
1035 rw_unlock(write, b); in bch_btree_node_get()
1038 BUG_ON(b->level != level); in bch_btree_node_get()
1041 if (btree_node_io_error(b)) { in bch_btree_node_get()
1042 rw_unlock(write, b); in bch_btree_node_get()
1046 BUG_ON(!b->written); in bch_btree_node_get()
1048 b->parent = parent; in bch_btree_node_get()
1050 for (; i <= b->keys.nsets && b->keys.set[i].size; i++) { in bch_btree_node_get()
1051 prefetch(b->keys.set[i].tree); in bch_btree_node_get()
1052 prefetch(b->keys.set[i].data); in bch_btree_node_get()
1055 for (; i <= b->keys.nsets; i++) in bch_btree_node_get()
1056 prefetch(b->keys.set[i].data); in bch_btree_node_get()
1058 return b; in bch_btree_node_get()
1063 struct btree *b; in btree_node_prefetch() local
1066 b = mca_alloc(parent->c, NULL, k, parent->level - 1); in btree_node_prefetch()
1069 if (!IS_ERR_OR_NULL(b)) { in btree_node_prefetch()
1070 b->parent = parent; in btree_node_prefetch()
1071 bch_btree_node_read(b); in btree_node_prefetch()
1072 rw_unlock(true, b); in btree_node_prefetch()
1078 static void btree_node_free(struct btree *b) in btree_node_free() argument
1080 trace_bcache_btree_node_free(b); in btree_node_free()
1082 BUG_ON(b == b->c->root); in btree_node_free()
1085 mutex_lock(&b->write_lock); in btree_node_free()
1092 if (btree_node_journal_flush(b)) { in btree_node_free()
1093 mutex_unlock(&b->write_lock); in btree_node_free()
1094 pr_debug("bnode %p journal_flush set, retry\n", b); in btree_node_free()
1099 if (btree_node_dirty(b)) { in btree_node_free()
1100 btree_complete_write(b, btree_current_write(b)); in btree_node_free()
1101 clear_bit(BTREE_NODE_dirty, &b->flags); in btree_node_free()
1104 mutex_unlock(&b->write_lock); in btree_node_free()
1106 cancel_delayed_work(&b->work); in btree_node_free()
1108 mutex_lock(&b->c->bucket_lock); in btree_node_free()
1109 bch_bucket_free(b->c, &b->key); in btree_node_free()
1110 mca_bucket_free(b); in btree_node_free()
1111 mutex_unlock(&b->c->bucket_lock); in btree_node_free()
1123 struct btree *b; in __bch_btree_node_alloc() local
1128 b = ERR_PTR(-EAGAIN); in __bch_btree_node_alloc()
1135 b = mca_alloc(c, op, &k.key, level); in __bch_btree_node_alloc()
1136 if (IS_ERR(b)) in __bch_btree_node_alloc()
1139 if (!b) { in __bch_btree_node_alloc()
1145 b->parent = parent; in __bch_btree_node_alloc()
1146 bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->cache->sb)); in __bch_btree_node_alloc()
1150 trace_bcache_btree_node_alloc(b); in __bch_btree_node_alloc()
1151 return b; in __bch_btree_node_alloc()
1158 return b; in __bch_btree_node_alloc()
1168 static struct btree *btree_node_alloc_replacement(struct btree *b, in btree_node_alloc_replacement() argument
1171 struct btree *n = bch_btree_node_alloc(b->c, op, b->level, b->parent); in btree_node_alloc_replacement()
1175 bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort); in btree_node_alloc_replacement()
1176 bkey_copy_key(&n->key, &b->key); in btree_node_alloc_replacement()
1183 static void make_btree_freeing_key(struct btree *b, struct bkey *k) in make_btree_freeing_key() argument
1187 mutex_lock(&b->c->bucket_lock); in make_btree_freeing_key()
1189 atomic_inc(&b->c->prio_blocked); in make_btree_freeing_key()
1191 bkey_copy(k, &b->key); in make_btree_freeing_key()
1196 bch_inc_gen(b->c->cache, in make_btree_freeing_key()
1197 PTR_BUCKET(b->c, &b->key, i))); in make_btree_freeing_key()
1199 mutex_unlock(&b->c->bucket_lock); in make_btree_freeing_key()
1202 static int btree_check_reserve(struct btree *b, struct btree_op *op) in btree_check_reserve() argument
1204 struct cache_set *c = b->c; in btree_check_reserve()
1206 unsigned int reserve = (c->root->level - b->level) * 2 + 1; in btree_check_reserve()
1220 return mca_cannibalize_lock(b->c, op); in btree_check_reserve()
1277 #define btree_mark_key(b, k) __bch_btree_mark_key(b->c, b->level, k) argument
1286 struct bucket *b = PTR_BUCKET(c, k, i); in bch_initial_mark_key() local
1288 b->gen = PTR_GEN(k, i); in bch_initial_mark_key()
1291 b->prio = BTREE_PRIO; in bch_initial_mark_key()
1292 else if (!level && b->prio == BTREE_PRIO) in bch_initial_mark_key()
1293 b->prio = INITIAL_PRIO; in bch_initial_mark_key()
1304 static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc) in btree_gc_mark_node() argument
1314 for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) { in btree_gc_mark_node()
1315 stale = max(stale, btree_mark_key(b, k)); in btree_gc_mark_node()
1318 if (bch_ptr_bad(&b->keys, k)) in btree_gc_mark_node()
1328 for (t = b->keys.set; t <= &b->keys.set[b->keys.nsets]; t++) in btree_gc_mark_node()
1330 bset_written(&b->keys, t) && in btree_gc_mark_node()
1331 bkey_cmp(&b->key, &t->end) < 0, in btree_gc_mark_node()
1332 b, "found short btree key in gc"); in btree_gc_mark_node()
1334 if (b->c->gc_always_rewrite) in btree_gc_mark_node()
1349 struct btree *b; member
1353 static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
1358 static int btree_gc_coalesce(struct btree *b, struct btree_op *op, in btree_gc_coalesce() argument
1369 if (btree_check_reserve(b, NULL)) in btree_gc_coalesce()
1375 while (nodes < GC_MERGE_NODES && !IS_ERR_OR_NULL(r[nodes].b)) in btree_gc_coalesce()
1378 blocks = btree_default_blocks(b->c) * 2 / 3; in btree_gc_coalesce()
1381 __set_blocks(b->keys.set[0].data, keys, in btree_gc_coalesce()
1382 block_bytes(b->c->cache)) > blocks * (nodes - 1)) in btree_gc_coalesce()
1386 new_nodes[i] = btree_node_alloc_replacement(r[i].b, NULL); in btree_gc_coalesce()
1397 if (btree_check_reserve(b, NULL)) in btree_gc_coalesce()
1416 block_bytes(b->c->cache)) > blocks) in btree_gc_coalesce()
1432 block_bytes(b->c->cache)) > in btree_gc_coalesce()
1438 last = &r->b->key; in btree_gc_coalesce()
1441 BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c->cache)) > in btree_gc_coalesce()
1481 if (__bch_keylist_realloc(&keylist, bkey_u64s(&r[i].b->key))) in btree_gc_coalesce()
1484 make_btree_freeing_key(r[i].b, keylist.top); in btree_gc_coalesce()
1488 bch_btree_insert_node(b, op, &keylist, NULL, NULL); in btree_gc_coalesce()
1492 btree_node_free(r[i].b); in btree_gc_coalesce()
1493 rw_unlock(true, r[i].b); in btree_gc_coalesce()
1495 r[i].b = new_nodes[i]; in btree_gc_coalesce()
1499 r[nodes - 1].b = ERR_PTR(-EINTR); in btree_gc_coalesce()
1518 atomic_dec(&b->c->prio_blocked); in btree_gc_coalesce()
1529 static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op, in btree_gc_rewrite_node() argument
1535 if (btree_check_reserve(b, NULL)) in btree_gc_rewrite_node()
1543 if (btree_check_reserve(b, NULL)) { in btree_gc_rewrite_node()
1557 bch_btree_insert_node(b, op, &keys, NULL, NULL); in btree_gc_rewrite_node()
1567 static unsigned int btree_gc_count_keys(struct btree *b) in btree_gc_count_keys() argument
1573 for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad) in btree_gc_count_keys()
1605 static int btree_gc_recurse(struct btree *b, struct btree_op *op, in btree_gc_recurse() argument
1615 bch_btree_iter_stack_init(&b->keys, &iter, &b->c->gc_done); in btree_gc_recurse()
1618 i->b = ERR_PTR(-EINTR); in btree_gc_recurse()
1621 k = bch_btree_iter_next_filter(&iter.iter, &b->keys, in btree_gc_recurse()
1624 r->b = bch_btree_node_get(b->c, op, k, b->level - 1, in btree_gc_recurse()
1625 true, b); in btree_gc_recurse()
1626 if (IS_ERR(r->b)) { in btree_gc_recurse()
1627 ret = PTR_ERR(r->b); in btree_gc_recurse()
1631 r->keys = btree_gc_count_keys(r->b); in btree_gc_recurse()
1633 ret = btree_gc_coalesce(b, op, gc, r); in btree_gc_recurse()
1638 if (!last->b) in btree_gc_recurse()
1641 if (!IS_ERR(last->b)) { in btree_gc_recurse()
1642 should_rewrite = btree_gc_mark_node(last->b, gc); in btree_gc_recurse()
1644 ret = btree_gc_rewrite_node(b, op, last->b); in btree_gc_recurse()
1649 if (last->b->level) { in btree_gc_recurse()
1650 ret = btree_gc_recurse(last->b, op, writes, gc); in btree_gc_recurse()
1655 bkey_copy_key(&b->c->gc_done, &last->b->key); in btree_gc_recurse()
1661 mutex_lock(&last->b->write_lock); in btree_gc_recurse()
1662 if (btree_node_dirty(last->b)) in btree_gc_recurse()
1663 bch_btree_node_write(last->b, writes); in btree_gc_recurse()
1664 mutex_unlock(&last->b->write_lock); in btree_gc_recurse()
1665 rw_unlock(true, last->b); in btree_gc_recurse()
1669 r->b = NULL; in btree_gc_recurse()
1671 if (atomic_read(&b->c->search_inflight) && in btree_gc_recurse()
1672 gc->nodes >= gc->nodes_pre + btree_gc_min_nodes(b->c)) { in btree_gc_recurse()
1685 if (!IS_ERR_OR_NULL(i->b)) { in btree_gc_recurse()
1686 mutex_lock(&i->b->write_lock); in btree_gc_recurse()
1687 if (btree_node_dirty(i->b)) in btree_gc_recurse()
1688 bch_btree_node_write(i->b, writes); in btree_gc_recurse()
1689 mutex_unlock(&i->b->write_lock); in btree_gc_recurse()
1690 rw_unlock(true, i->b); in btree_gc_recurse()
1696 static int bch_btree_gc_root(struct btree *b, struct btree_op *op, in bch_btree_gc_root() argument
1703 should_rewrite = btree_gc_mark_node(b, gc); in bch_btree_gc_root()
1705 n = btree_node_alloc_replacement(b, NULL); in bch_btree_gc_root()
1711 btree_node_free(b); in bch_btree_gc_root()
1718 __bch_btree_mark_key(b->c, b->level + 1, &b->key); in bch_btree_gc_root()
1720 if (b->level) { in bch_btree_gc_root()
1721 ret = btree_gc_recurse(b, op, writes, gc); in bch_btree_gc_root()
1726 bkey_copy_key(&b->c->gc_done, &b->key); in bch_btree_gc_root()
1734 struct bucket *b; in btree_gc_start() local
1744 for_each_bucket(b, ca) { in btree_gc_start()
1745 b->last_gc = b->gen; in btree_gc_start()
1746 if (bch_can_invalidate_bucket(ca, b)) in btree_gc_start()
1747 b->reclaimable_in_gc = 1; in btree_gc_start()
1748 if (!atomic_read(&b->pin)) { in btree_gc_start()
1749 SET_GC_MARK(b, 0); in btree_gc_start()
1750 SET_GC_SECTORS_USED(b, 0); in btree_gc_start()
1760 struct bucket *b; in bch_btree_gc_finish() local
1808 for_each_bucket(b, ca) { in bch_btree_gc_finish()
1809 c->need_gc = max(c->need_gc, bucket_gc_gen(b)); in bch_btree_gc_finish()
1811 if (b->reclaimable_in_gc) in bch_btree_gc_finish()
1812 b->reclaimable_in_gc = 0; in bch_btree_gc_finish()
1814 if (atomic_read(&b->pin)) in bch_btree_gc_finish()
1817 BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b)); in bch_btree_gc_finish()
1819 if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE) in bch_btree_gc_finish()
1913 static int bch_btree_check_recurse(struct btree *b, struct btree_op *op) in bch_btree_check_recurse() argument
1919 for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) in bch_btree_check_recurse()
1920 bch_initial_mark_key(b->c, b->level, k); in bch_btree_check_recurse()
1922 bch_initial_mark_key(b->c, b->level + 1, &b->key); in bch_btree_check_recurse()
1924 if (b->level) { in bch_btree_check_recurse()
1925 bch_btree_iter_stack_init(&b->keys, &iter, NULL); in bch_btree_check_recurse()
1928 k = bch_btree_iter_next_filter(&iter.iter, &b->keys, in bch_btree_check_recurse()
1931 btree_node_prefetch(b, k); in bch_btree_check_recurse()
1936 b->c->gc_stats.nodes++; in bch_btree_check_recurse()
1940 ret = bcache_btree(check_recurse, p, b, op); in bch_btree_check_recurse()
2128 struct bucket *b; in bch_initial_gc_finish() local
2143 for_each_bucket(b, ca) { in bch_initial_gc_finish()
2148 if (bch_can_invalidate_bucket(ca, b) && in bch_initial_gc_finish()
2149 !GC_MARK(b)) { in bch_initial_gc_finish()
2150 __bch_invalidate_one_bucket(ca, b); in bch_initial_gc_finish()
2152 b - ca->buckets)) in bch_initial_gc_finish()
2154 b - ca->buckets); in bch_initial_gc_finish()
2163 static bool btree_insert_key(struct btree *b, struct bkey *k, in btree_insert_key() argument
2168 BUG_ON(bkey_cmp(k, &b->key) > 0); in btree_insert_key()
2170 status = bch_btree_insert_key(&b->keys, k, replace_key); in btree_insert_key()
2172 bch_check_keys(&b->keys, "%u for %s", status, in btree_insert_key()
2175 trace_bcache_btree_insert_key(b, k, replace_key != NULL, in btree_insert_key()
2182 static size_t insert_u64s_remaining(struct btree *b) in insert_u64s_remaining() argument
2184 long ret = bch_btree_keys_u64s_remaining(&b->keys); in insert_u64s_remaining()
2189 if (b->keys.ops->is_extents) in insert_u64s_remaining()
2195 static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op, in bch_btree_insert_keys() argument
2200 int oldsize = bch_count_data(&b->keys); in bch_btree_insert_keys()
2205 if (bkey_u64s(k) > insert_u64s_remaining(b)) in bch_btree_insert_keys()
2208 if (bkey_cmp(k, &b->key) <= 0) { in bch_btree_insert_keys()
2209 if (!b->level) in bch_btree_insert_keys()
2210 bkey_put(b->c, k); in bch_btree_insert_keys()
2212 ret |= btree_insert_key(b, k, replace_key); in bch_btree_insert_keys()
2214 } else if (bkey_cmp(&START_KEY(k), &b->key) < 0) { in bch_btree_insert_keys()
2218 bch_cut_back(&b->key, &temp.key); in bch_btree_insert_keys()
2219 bch_cut_front(&b->key, insert_keys->keys); in bch_btree_insert_keys()
2221 ret |= btree_insert_key(b, &temp.key, replace_key); in bch_btree_insert_keys()
2231 BUG_ON(!bch_keylist_empty(insert_keys) && b->level); in bch_btree_insert_keys()
2233 BUG_ON(bch_count_data(&b->keys) < oldsize); in bch_btree_insert_keys()
2237 static int btree_split(struct btree *b, struct btree_op *op, in btree_split() argument
2250 if (btree_check_reserve(b, op)) { in btree_split()
2251 if (!b->level) in btree_split()
2257 n1 = btree_node_alloc_replacement(b, op); in btree_split()
2262 block_bytes(n1->c->cache)) > (btree_blocks(b) * 4) / 5; in btree_split()
2267 trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys); in btree_split()
2269 n2 = bch_btree_node_alloc(b->c, op, b->level, b->parent); in btree_split()
2273 if (!b->parent) { in btree_split()
2274 n3 = bch_btree_node_alloc(b->c, op, b->level + 1, NULL); in btree_split()
2304 bkey_copy_key(&n2->key, &b->key); in btree_split()
2311 trace_bcache_btree_node_compact(b, btree_bset_first(n1)->keys); in btree_split()
2332 } else if (!b->parent) { in btree_split()
2339 make_btree_freeing_key(b, parent_keys.top); in btree_split()
2342 bch_btree_insert_node(b->parent, op, &parent_keys, NULL, NULL); in btree_split()
2346 btree_node_free(b); in btree_split()
2349 bch_time_stats_update(&b->c->btree_split_time, start_time); in btree_split()
2353 bkey_put(b->c, &n2->key); in btree_split()
2357 bkey_put(b->c, &n1->key); in btree_split()
2361 WARN(1, "bcache: btree split failed (level %u)", b->level); in btree_split()
2371 static int bch_btree_insert_node(struct btree *b, struct btree_op *op, in bch_btree_insert_node() argument
2378 BUG_ON(b->level && replace_key); in bch_btree_insert_node()
2382 mutex_lock(&b->write_lock); in bch_btree_insert_node()
2384 if (write_block(b) != btree_bset_last(b) && in bch_btree_insert_node()
2385 b->keys.last_set_unwritten) in bch_btree_insert_node()
2386 bch_btree_init_next(b); /* just wrote a set */ in bch_btree_insert_node()
2388 if (bch_keylist_nkeys(insert_keys) > insert_u64s_remaining(b)) { in bch_btree_insert_node()
2389 mutex_unlock(&b->write_lock); in bch_btree_insert_node()
2393 BUG_ON(write_block(b) != btree_bset_last(b)); in bch_btree_insert_node()
2395 if (bch_btree_insert_keys(b, op, insert_keys, replace_key)) { in bch_btree_insert_node()
2396 if (!b->level) in bch_btree_insert_node()
2397 bch_btree_leaf_dirty(b, journal_ref); in bch_btree_insert_node()
2399 bch_btree_node_write(b, &cl); in bch_btree_insert_node()
2402 mutex_unlock(&b->write_lock); in bch_btree_insert_node()
2410 op->lock = b->c->root->level + 1; in bch_btree_insert_node()
2412 } else if (op->lock <= b->c->root->level) { in bch_btree_insert_node()
2413 op->lock = b->c->root->level + 1; in bch_btree_insert_node()
2417 int ret = btree_split(b, op, insert_keys, replace_key); in bch_btree_insert_node()
2427 int bch_btree_insert_check_key(struct btree *b, struct btree_op *op, in bch_btree_insert_check_key() argument
2431 uint64_t btree_ptr = b->key.ptr[0]; in bch_btree_insert_check_key()
2432 unsigned long seq = b->seq; in bch_btree_insert_check_key()
2439 rw_unlock(false, b); in bch_btree_insert_check_key()
2440 rw_lock(true, b, b->level); in bch_btree_insert_check_key()
2442 if (b->key.ptr[0] != btree_ptr || in bch_btree_insert_check_key()
2443 b->seq != seq + 1) { in bch_btree_insert_check_key()
2444 op->lock = b->level; in bch_btree_insert_check_key()
2456 ret = bch_btree_insert_node(b, op, &insert, NULL, NULL); in bch_btree_insert_check_key()
2461 downgrade_write(&b->lock); in bch_btree_insert_check_key()
2472 static int btree_insert_fn(struct btree_op *b_op, struct btree *b) in btree_insert_fn() argument
2477 int ret = bch_btree_insert_node(b, &op->op, op->keys, in btree_insert_fn()
2519 void bch_btree_set_root(struct btree *b) in bch_btree_set_root() argument
2526 trace_bcache_btree_set_root(b); in bch_btree_set_root()
2528 BUG_ON(!b->written); in bch_btree_set_root()
2530 for (i = 0; i < KEY_PTRS(&b->key); i++) in bch_btree_set_root()
2531 BUG_ON(PTR_BUCKET(b->c, &b->key, i)->prio != BTREE_PRIO); in bch_btree_set_root()
2533 mutex_lock(&b->c->bucket_lock); in bch_btree_set_root()
2534 list_del_init(&b->list); in bch_btree_set_root()
2535 mutex_unlock(&b->c->bucket_lock); in bch_btree_set_root()
2537 b->c->root = b; in bch_btree_set_root()
2539 bch_journal_meta(b->c, &cl); in bch_btree_set_root()
2545 static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op, in bch_btree_map_nodes_recurse() argument
2551 if (b->level) { in bch_btree_map_nodes_recurse()
2555 bch_btree_iter_stack_init(&b->keys, &iter, from); in bch_btree_map_nodes_recurse()
2557 while ((k = bch_btree_iter_next_filter(&iter.iter, &b->keys, in bch_btree_map_nodes_recurse()
2559 ret = bcache_btree(map_nodes_recurse, k, b, in bch_btree_map_nodes_recurse()
2568 if (!b->level || flags == MAP_ALL_NODES) in bch_btree_map_nodes_recurse()
2569 ret = fn(op, b); in bch_btree_map_nodes_recurse()
2580 int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op, in bch_btree_map_keys_recurse() argument
2588 bch_btree_iter_stack_init(&b->keys, &iter, from); in bch_btree_map_keys_recurse()
2590 while ((k = bch_btree_iter_next_filter(&iter.iter, &b->keys, in bch_btree_map_keys_recurse()
2592 ret = !b->level in bch_btree_map_keys_recurse()
2593 ? fn(op, b, k) in bch_btree_map_keys_recurse()
2595 b, op, from, fn, flags); in bch_btree_map_keys_recurse()
2602 if (!b->level && (flags & MAP_END_KEY)) in bch_btree_map_keys_recurse()
2603 ret = fn(op, b, &KEY(KEY_INODE(&b->key), in bch_btree_map_keys_recurse()
2604 KEY_OFFSET(&b->key), 0)); in bch_btree_map_keys_recurse()
2641 static int refill_keybuf_fn(struct btree_op *op, struct btree *b, in refill_keybuf_fn() argument