Lines Matching +full:cache +full:- +full:op +full:- +full:block +full:- +full:size

1 // SPDX-License-Identifier: GPL-2.0
5 * Uses a block device as cache for other block devices; optimized for SSDs.
6 * All allocation is done in buckets, which should match the erase block size
10 * bucket priority is increased on cache hit, and periodically all the buckets
21 * All configuration is done via sysfs; see Documentation/admin-guide/bcache.rst.
45 * Writeback: don't undirty key until after a cache flush
49 * On btree write error, mark bucket such that it won't be freed from the cache
64 * Keep 1 min/5 min/15 min statistics of how busy a block device has been, so
76 * If data write is less than hard sector size of ssd, round up offset in open
79 * Superblock needs to be fleshed out for multiple cache devices
100 (((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0))
104 #define insert_lock(s, b) ((b)->level <= (s)->lock)
109 return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c->cache); in write_block()
115 if (b->level && b->keys.nsets) in bch_btree_init_next()
116 bch_btree_sort(&b->keys, &b->c->sort); in bch_btree_init_next()
118 bch_btree_sort_lazy(&b->keys, &b->c->sort); in bch_btree_init_next()
120 if (b->written < btree_blocks(b)) in bch_btree_init_next()
121 bch_bset_init_next(&b->keys, write_block(b), in bch_btree_init_next()
122 bset_magic(&b->c->cache->sb)); in bch_btree_init_next()
134 atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin); in bkey_put()
141 uint64_t crc = b->key.ptr[0]; in btree_csum_set()
144 crc = crc64_be(crc, data, end - data); in btree_csum_set()
155 * c->fill_iter can allocate an iterator with more memory space in bch_btree_node_read_done()
157 * See the comment arount cache_set->fill_iter. in bch_btree_node_read_done()
159 iter = mempool_alloc(&b->c->fill_iter, GFP_NOIO); in bch_btree_node_read_done()
160 iter->size = b->c->cache->sb.bucket_size / b->c->cache->sb.block_size; in bch_btree_node_read_done()
161 iter->used = 0; in bch_btree_node_read_done()
164 iter->b = &b->keys; in bch_btree_node_read_done()
167 if (!i->seq) in bch_btree_node_read_done()
171 b->written < btree_blocks(b) && i->seq == b->keys.set[0].data->seq; in bch_btree_node_read_done()
174 if (i->version > BCACHE_BSET_VERSION) in bch_btree_node_read_done()
178 if (b->written + set_blocks(i, block_bytes(b->c->cache)) > in bch_btree_node_read_done()
183 if (i->magic != bset_magic(&b->c->cache->sb)) in bch_btree_node_read_done()
187 switch (i->version) { in bch_btree_node_read_done()
189 if (i->csum != csum_set(i)) in bch_btree_node_read_done()
193 if (i->csum != btree_csum_set(b, i)) in bch_btree_node_read_done()
199 if (i != b->keys.set[0].data && !i->keys) in bch_btree_node_read_done()
202 bch_btree_iter_push(iter, i->start, bset_bkey_last(i)); in bch_btree_node_read_done()
204 b->written += set_blocks(i, block_bytes(b->c->cache)); in bch_btree_node_read_done()
209 bset_sector_offset(&b->keys, i) < KEY_SIZE(&b->key); in bch_btree_node_read_done()
210 i = ((void *) i) + block_bytes(b->c->cache)) in bch_btree_node_read_done()
211 if (i->seq == b->keys.set[0].data->seq) in bch_btree_node_read_done()
214 bch_btree_sort_and_fix_extents(&b->keys, iter, &b->c->sort); in bch_btree_node_read_done()
216 i = b->keys.set[0].data; in bch_btree_node_read_done()
218 if (b->keys.set[0].size && in bch_btree_node_read_done()
219 bkey_cmp(&b->key, &b->keys.set[0].end) < 0) in bch_btree_node_read_done()
222 if (b->written < btree_blocks(b)) in bch_btree_node_read_done()
223 bch_bset_init_next(&b->keys, write_block(b), in bch_btree_node_read_done()
224 bset_magic(&b->c->cache->sb)); in bch_btree_node_read_done()
226 mempool_free(iter, &b->c->fill_iter); in bch_btree_node_read_done()
230 bch_cache_set_error(b->c, "%s at bucket %zu, block %u, %u keys", in bch_btree_node_read_done()
231 err, PTR_BUCKET_NR(b->c, &b->key, 0), in bch_btree_node_read_done()
232 bset_block_offset(b, i), i->keys); in bch_btree_node_read_done()
238 struct closure *cl = bio->bi_private; in btree_node_read_endio()
253 bio = bch_bbio_alloc(b->c); in bch_btree_node_read()
254 bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9; in bch_btree_node_read()
255 bio->bi_end_io = btree_node_read_endio; in bch_btree_node_read()
256 bio->bi_private = &cl; in bch_btree_node_read()
257 bio->bi_opf = REQ_OP_READ | REQ_META; in bch_btree_node_read()
259 bch_bio_map(bio, b->keys.set[0].data); in bch_btree_node_read()
261 bch_submit_bbio(bio, b->c, &b->key, 0); in bch_btree_node_read()
264 if (bio->bi_status) in bch_btree_node_read()
267 bch_bbio_free(bio, b->c); in bch_btree_node_read()
273 bch_time_stats_update(&b->c->btree_read_time, start_time); in bch_btree_node_read()
277 bch_cache_set_error(b->c, "io error reading bucket %zu", in bch_btree_node_read()
278 PTR_BUCKET_NR(b->c, &b->key, 0)); in bch_btree_node_read()
283 if (w->prio_blocked && in btree_complete_write()
284 !atomic_sub_return(w->prio_blocked, &b->c->prio_blocked)) in btree_complete_write()
285 wake_up_allocators(b->c); in btree_complete_write()
287 if (w->journal) { in btree_complete_write()
288 atomic_dec_bug(w->journal); in btree_complete_write()
289 __closure_wake_up(&b->c->journal.wait); in btree_complete_write()
292 w->prio_blocked = 0; in btree_complete_write()
293 w->journal = NULL; in btree_complete_write()
300 up(&b->io_mutex); in CLOSURE_CALLBACK()
308 bch_bbio_free(b->bio, b->c); in CLOSURE_CALLBACK()
309 b->bio = NULL; in CLOSURE_CALLBACK()
313 queue_delayed_work(btree_io_wq, &b->work, 30 * HZ); in CLOSURE_CALLBACK()
322 bio_free_pages(b->bio); in CLOSURE_CALLBACK()
323 __btree_node_write_done(&cl->work); in CLOSURE_CALLBACK()
328 struct closure *cl = bio->bi_private; in btree_node_write_endio()
331 if (bio->bi_status) in btree_node_write_endio()
334 bch_bbio_count_io_errors(b->c, bio, bio->bi_status, "writing btree"); in btree_node_write_endio()
340 struct closure *cl = &b->io; in do_btree_node_write()
344 i->version = BCACHE_BSET_VERSION; in do_btree_node_write()
345 i->csum = btree_csum_set(b, i); in do_btree_node_write()
347 BUG_ON(b->bio); in do_btree_node_write()
348 b->bio = bch_bbio_alloc(b->c); in do_btree_node_write()
350 b->bio->bi_end_io = btree_node_write_endio; in do_btree_node_write()
351 b->bio->bi_private = cl; in do_btree_node_write()
352 b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c->cache)); in do_btree_node_write()
353 b->bio->bi_opf = REQ_OP_WRITE | REQ_META | REQ_FUA; in do_btree_node_write()
354 bch_bio_map(b->bio, i); in do_btree_node_write()
357 * If we're appending to a leaf node, we don't technically need FUA - in do_btree_node_write()
361 * Similarly if we're writing a new btree root - the pointer is going to in do_btree_node_write()
371 bkey_copy(&k.key, &b->key); in do_btree_node_write()
373 bset_sector_offset(&b->keys, i)); in do_btree_node_write()
375 if (!bch_bio_alloc_pages(b->bio, __GFP_NOWARN|GFP_NOWAIT)) { in do_btree_node_write()
377 void *addr = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1)); in do_btree_node_write()
380 bio_for_each_segment_all(bv, b->bio, iter_all) { in do_btree_node_write()
381 memcpy(page_address(bv->bv_page), addr, PAGE_SIZE); in do_btree_node_write()
385 bch_submit_bbio(b->bio, b->c, &k.key, 0); in do_btree_node_write()
393 b->bio->bi_vcnt = 0; in do_btree_node_write()
394 bch_bio_map(b->bio, i); in do_btree_node_write()
396 bch_submit_bbio(b->bio, b->c, &k.key, 0); in do_btree_node_write()
407 lockdep_assert_held(&b->write_lock); in __bch_btree_node_write()
411 BUG_ON(current->bio_list); in __bch_btree_node_write()
412 BUG_ON(b->written >= btree_blocks(b)); in __bch_btree_node_write()
413 BUG_ON(b->written && !i->keys); in __bch_btree_node_write()
414 BUG_ON(btree_bset_first(b)->seq != i->seq); in __bch_btree_node_write()
415 bch_check_keys(&b->keys, "writing"); in __bch_btree_node_write()
417 cancel_delayed_work(&b->work); in __bch_btree_node_write()
419 /* If caller isn't waiting for write, parent refcount is cache set */ in __bch_btree_node_write()
420 down(&b->io_mutex); in __bch_btree_node_write()
421 closure_init(&b->io, parent ?: &b->c->cl); in __bch_btree_node_write()
423 clear_bit(BTREE_NODE_dirty, &b->flags); in __bch_btree_node_write()
424 change_bit(BTREE_NODE_write_idx, &b->flags); in __bch_btree_node_write()
428 atomic_long_add(set_blocks(i, block_bytes(b->c->cache)) * b->c->cache->sb.block_size, in __bch_btree_node_write()
429 &b->c->cache->btree_sectors_written); in __bch_btree_node_write()
431 b->written += set_blocks(i, block_bytes(b->c->cache)); in __bch_btree_node_write()
436 unsigned int nsets = b->keys.nsets; in bch_btree_node_write()
438 lockdep_assert_held(&b->lock); in bch_btree_node_write()
446 if (nsets && !b->keys.nsets) in bch_btree_node_write()
458 mutex_lock(&b->write_lock); in bch_btree_node_write_sync()
460 mutex_unlock(&b->write_lock); in bch_btree_node_write_sync()
469 mutex_lock(&b->write_lock); in btree_node_write_work()
472 mutex_unlock(&b->write_lock); in btree_node_write_work()
480 lockdep_assert_held(&b->write_lock); in bch_btree_leaf_dirty()
482 BUG_ON(!b->written); in bch_btree_leaf_dirty()
483 BUG_ON(!i->keys); in bch_btree_leaf_dirty()
486 queue_delayed_work(btree_io_wq, &b->work, 30 * HZ); in bch_btree_leaf_dirty()
491 * w->journal is always the oldest journal pin of all bkeys in bch_btree_leaf_dirty()
496 if (w->journal && in bch_btree_leaf_dirty()
497 journal_pin_cmp(b->c, w->journal, journal_ref)) { in bch_btree_leaf_dirty()
498 atomic_dec_bug(w->journal); in bch_btree_leaf_dirty()
499 w->journal = NULL; in bch_btree_leaf_dirty()
502 if (!w->journal) { in bch_btree_leaf_dirty()
503 w->journal = journal_ref; in bch_btree_leaf_dirty()
504 atomic_inc(w->journal); in bch_btree_leaf_dirty()
509 if (set_bytes(i) > PAGE_SIZE - 48 && in bch_btree_leaf_dirty()
510 !current->bio_list) in bch_btree_leaf_dirty()
515 * Btree in memory cache - allocation/freeing
516 * mca -> memory cache
519 #define mca_reserve(c) (((!IS_ERR_OR_NULL(c->root) && c->root->level) \
520 ? c->root->level : 1) * 8 + 16)
522 max_t(int, 0, c->btree_cache_used - mca_reserve(c))
526 BUG_ON(b->io_mutex.count != 1); in mca_data_free()
528 bch_btree_keys_free(&b->keys); in mca_data_free()
530 b->c->btree_cache_used--; in mca_data_free()
531 list_move(&b->list, &b->c->btree_cache_freed); in mca_data_free()
538 b->key.ptr[0] = 0; in mca_bucket_free()
539 hlist_del_init_rcu(&b->hash); in mca_bucket_free()
540 list_move(&b->list, &b->c->btree_cache_freeable); in mca_bucket_free()
550 if (!bch_btree_keys_alloc(&b->keys, in mca_data_alloc()
552 ilog2(b->c->btree_pages), in mca_data_alloc()
555 b->c->btree_cache_used++; in mca_data_alloc()
556 list_move(&b->list, &b->c->btree_cache); in mca_data_alloc()
558 list_move(&b->list, &b->c->btree_cache_freed); in mca_data_alloc()
562 #define cmp_int(l, r) ((l > r) - (l < r))
571 return -cmp_int(a->level, b->level) ?: bkey_cmp(&a->key, &b->key); in btree_lock_cmp_fn()
578 printk(KERN_CONT " l=%u %llu:%llu", b->level, in btree_lock_print_fn()
579 KEY_INODE(&b->key), KEY_OFFSET(&b->key)); in btree_lock_print_fn()
595 init_rwsem(&b->lock); in mca_bucket_alloc()
596 lock_set_cmp_fn(&b->lock, btree_lock_cmp_fn, btree_lock_print_fn); in mca_bucket_alloc()
597 mutex_init(&b->write_lock); in mca_bucket_alloc()
598 lockdep_set_novalidate_class(&b->write_lock); in mca_bucket_alloc()
599 INIT_LIST_HEAD(&b->list); in mca_bucket_alloc()
600 INIT_DELAYED_WORK(&b->work, btree_node_write_work); in mca_bucket_alloc()
601 b->c = c; in mca_bucket_alloc()
602 sema_init(&b->io_mutex, 1); in mca_bucket_alloc()
613 lockdep_assert_held(&b->c->bucket_lock); in mca_reap()
615 if (!down_write_trylock(&b->lock)) in mca_reap()
616 return -ENOMEM; in mca_reap()
618 BUG_ON(btree_node_dirty(b) && !b->keys.set[0].data); in mca_reap()
620 if (b->keys.page_order < min_order) in mca_reap()
627 if (down_trylock(&b->io_mutex)) in mca_reap()
629 up(&b->io_mutex); in mca_reap()
636 * b->write_lock before checking BTREE_NODE_dirty bit. in mca_reap()
638 mutex_lock(&b->write_lock); in mca_reap()
646 mutex_unlock(&b->write_lock); in mca_reap()
653 mutex_unlock(&b->write_lock); in mca_reap()
658 down(&b->io_mutex); in mca_reap()
659 up(&b->io_mutex); in mca_reap()
664 return -ENOMEM; in mca_reap()
670 struct cache_set *c = shrink->private_data; in bch_mca_scan()
672 unsigned long i, nr = sc->nr_to_scan; in bch_mca_scan()
676 if (c->shrinker_disabled) in bch_mca_scan()
679 if (c->btree_cache_alloc_lock) in bch_mca_scan()
682 /* Return -1 if we can't do anything right now */ in bch_mca_scan()
683 if (sc->gfp_mask & __GFP_IO) in bch_mca_scan()
684 mutex_lock(&c->bucket_lock); in bch_mca_scan()
685 else if (!mutex_trylock(&c->bucket_lock)) in bch_mca_scan()
686 return -1; in bch_mca_scan()
689 * It's _really_ critical that we don't free too many btree nodes - we in bch_mca_scan()
695 nr /= c->btree_pages; in bch_mca_scan()
701 btree_cache_used = c->btree_cache_used; in bch_mca_scan()
702 list_for_each_entry_safe_reverse(b, t, &c->btree_cache_freeable, list) { in bch_mca_scan()
711 nr--; in bch_mca_scan()
715 list_for_each_entry_safe_reverse(b, t, &c->btree_cache, list) { in bch_mca_scan()
726 nr--; in bch_mca_scan()
730 mutex_unlock(&c->bucket_lock); in bch_mca_scan()
731 return freed * c->btree_pages; in bch_mca_scan()
737 struct cache_set *c = shrink->private_data; in bch_mca_count()
739 if (c->shrinker_disabled) in bch_mca_count()
742 if (c->btree_cache_alloc_lock) in bch_mca_count()
745 return mca_can_free(c) * c->btree_pages; in bch_mca_count()
755 if (c->shrink) in bch_btree_cache_free()
756 shrinker_free(c->shrink); in bch_btree_cache_free()
758 mutex_lock(&c->bucket_lock); in bch_btree_cache_free()
761 if (c->verify_data) in bch_btree_cache_free()
762 list_move(&c->verify_data->list, &c->btree_cache); in bch_btree_cache_free()
764 free_pages((unsigned long) c->verify_ondisk, ilog2(meta_bucket_pages(&c->cache->sb))); in bch_btree_cache_free()
767 list_splice(&c->btree_cache_freeable, in bch_btree_cache_free()
768 &c->btree_cache); in bch_btree_cache_free()
770 while (!list_empty(&c->btree_cache)) { in bch_btree_cache_free()
771 b = list_first_entry(&c->btree_cache, struct btree, list); in bch_btree_cache_free()
775 * request on cache now, it is unnecessary to acquire in bch_btree_cache_free()
776 * b->write_lock before clearing BTREE_NODE_dirty anymore. in bch_btree_cache_free()
780 clear_bit(BTREE_NODE_dirty, &b->flags); in bch_btree_cache_free()
785 while (!list_empty(&c->btree_cache_freed)) { in bch_btree_cache_free()
786 b = list_first_entry(&c->btree_cache_freed, in bch_btree_cache_free()
788 list_del(&b->list); in bch_btree_cache_free()
789 cancel_delayed_work_sync(&b->work); in bch_btree_cache_free()
793 mutex_unlock(&c->bucket_lock); in bch_btree_cache_free()
802 return -ENOMEM; in bch_btree_cache_alloc()
804 list_splice_init(&c->btree_cache, in bch_btree_cache_alloc()
805 &c->btree_cache_freeable); in bch_btree_cache_alloc()
808 mutex_init(&c->verify_lock); in bch_btree_cache_alloc()
810 c->verify_ondisk = (void *) in bch_btree_cache_alloc()
812 ilog2(meta_bucket_pages(&c->cache->sb))); in bch_btree_cache_alloc()
813 if (!c->verify_ondisk) { in bch_btree_cache_alloc()
816 * allocated in previous for-loop, they will be in bch_btree_cache_alloc()
819 return -ENOMEM; in bch_btree_cache_alloc()
822 c->verify_data = mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL); in bch_btree_cache_alloc()
824 if (c->verify_data && in bch_btree_cache_alloc()
825 c->verify_data->keys.set->data) in bch_btree_cache_alloc()
826 list_del_init(&c->verify_data->list); in bch_btree_cache_alloc()
828 c->verify_data = NULL; in bch_btree_cache_alloc()
831 c->shrink = shrinker_alloc(0, "md-bcache:%pU", c->set_uuid); in bch_btree_cache_alloc()
832 if (!c->shrink) { in bch_btree_cache_alloc()
837 c->shrink->count_objects = bch_mca_count; in bch_btree_cache_alloc()
838 c->shrink->scan_objects = bch_mca_scan; in bch_btree_cache_alloc()
839 c->shrink->seeks = 4; in bch_btree_cache_alloc()
840 c->shrink->batch = c->btree_pages * 2; in bch_btree_cache_alloc()
841 c->shrink->private_data = c; in bch_btree_cache_alloc()
843 shrinker_register(c->shrink); in bch_btree_cache_alloc()
848 /* Btree in memory cache - hash table */
852 return &c->bucket_hash[hash_32(PTR_HASH(c, k), BUCKET_HASH_BITS)]; in mca_hash()
861 if (PTR_HASH(c, &b->key) == PTR_HASH(c, k)) in mca_find()
869 static int mca_cannibalize_lock(struct cache_set *c, struct btree_op *op) in mca_cannibalize_lock() argument
871 spin_lock(&c->btree_cannibalize_lock); in mca_cannibalize_lock()
872 if (likely(c->btree_cache_alloc_lock == NULL)) { in mca_cannibalize_lock()
873 c->btree_cache_alloc_lock = current; in mca_cannibalize_lock()
874 } else if (c->btree_cache_alloc_lock != current) { in mca_cannibalize_lock()
875 if (op) in mca_cannibalize_lock()
876 prepare_to_wait(&c->btree_cache_wait, &op->wait, in mca_cannibalize_lock()
878 spin_unlock(&c->btree_cannibalize_lock); in mca_cannibalize_lock()
879 return -EINTR; in mca_cannibalize_lock()
881 spin_unlock(&c->btree_cannibalize_lock); in mca_cannibalize_lock()
886 static struct btree *mca_cannibalize(struct cache_set *c, struct btree_op *op, in mca_cannibalize() argument
893 if (mca_cannibalize_lock(c, op)) in mca_cannibalize()
894 return ERR_PTR(-EINTR); in mca_cannibalize()
896 list_for_each_entry_reverse(b, &c->btree_cache, list) in mca_cannibalize()
900 list_for_each_entry_reverse(b, &c->btree_cache, list) in mca_cannibalize()
904 WARN(1, "btree cache cannibalize failed\n"); in mca_cannibalize()
905 return ERR_PTR(-ENOMEM); in mca_cannibalize()
916 spin_lock(&c->btree_cannibalize_lock); in bch_cannibalize_unlock()
917 if (c->btree_cache_alloc_lock == current) { in bch_cannibalize_unlock()
918 c->btree_cache_alloc_lock = NULL; in bch_cannibalize_unlock()
919 wake_up(&c->btree_cache_wait); in bch_cannibalize_unlock()
921 spin_unlock(&c->btree_cannibalize_lock); in bch_cannibalize_unlock()
924 static struct btree *mca_alloc(struct cache_set *c, struct btree_op *op, in mca_alloc() argument
929 BUG_ON(current->bio_list); in mca_alloc()
931 lockdep_assert_held(&c->bucket_lock); in mca_alloc()
939 list_for_each_entry(b, &c->btree_cache_freeable, list) in mca_alloc()
946 list_for_each_entry(b, &c->btree_cache_freed, list) in mca_alloc()
949 if (!b->keys.set[0].data) in mca_alloc()
959 BUG_ON(!down_write_trylock(&b->lock)); in mca_alloc()
960 if (!b->keys.set->data) in mca_alloc()
963 BUG_ON(b->io_mutex.count != 1); in mca_alloc()
965 bkey_copy(&b->key, k); in mca_alloc()
966 list_move(&b->list, &c->btree_cache); in mca_alloc()
967 hlist_del_init_rcu(&b->hash); in mca_alloc()
968 hlist_add_head_rcu(&b->hash, mca_hash(c, k)); in mca_alloc()
970 lock_set_subclass(&b->lock.dep_map, level + 1, _THIS_IP_); in mca_alloc()
971 b->parent = (void *) ~0UL; in mca_alloc()
972 b->flags = 0; in mca_alloc()
973 b->written = 0; in mca_alloc()
974 b->level = level; in mca_alloc()
976 if (!b->level) in mca_alloc()
977 bch_btree_keys_init(&b->keys, &bch_extent_keys_ops, in mca_alloc()
978 &b->c->expensive_debug_checks); in mca_alloc()
980 bch_btree_keys_init(&b->keys, &bch_btree_keys_ops, in mca_alloc()
981 &b->c->expensive_debug_checks); in mca_alloc()
988 b = mca_cannibalize(c, op, k); in mca_alloc()
996 * bch_btree_node_get - find a btree node in the cache and lock it, reading it
999 * If IO is necessary and running under submit_bio_noacct, returns -EAGAIN.
1002 * level and op->lock.
1007 struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op, in bch_btree_node_get() argument
1019 if (current->bio_list) in bch_btree_node_get()
1020 return ERR_PTR(-EAGAIN); in bch_btree_node_get()
1022 mutex_lock(&c->bucket_lock); in bch_btree_node_get()
1023 b = mca_alloc(c, op, k, level); in bch_btree_node_get()
1024 mutex_unlock(&c->bucket_lock); in bch_btree_node_get()
1034 downgrade_write(&b->lock); in bch_btree_node_get()
1037 if (PTR_HASH(c, &b->key) != PTR_HASH(c, k)) { in bch_btree_node_get()
1041 BUG_ON(b->level != level); in bch_btree_node_get()
1046 return ERR_PTR(-EIO); in bch_btree_node_get()
1049 BUG_ON(!b->written); in bch_btree_node_get()
1051 b->parent = parent; in bch_btree_node_get()
1053 for (; i <= b->keys.nsets && b->keys.set[i].size; i++) { in bch_btree_node_get()
1054 prefetch(b->keys.set[i].tree); in bch_btree_node_get()
1055 prefetch(b->keys.set[i].data); in bch_btree_node_get()
1058 for (; i <= b->keys.nsets; i++) in bch_btree_node_get()
1059 prefetch(b->keys.set[i].data); in bch_btree_node_get()
1068 mutex_lock(&parent->c->bucket_lock); in btree_node_prefetch()
1069 b = mca_alloc(parent->c, NULL, k, parent->level - 1); in btree_node_prefetch()
1070 mutex_unlock(&parent->c->bucket_lock); in btree_node_prefetch()
1073 b->parent = parent; in btree_node_prefetch()
1085 BUG_ON(b == b->c->root); in btree_node_free()
1088 mutex_lock(&b->write_lock); in btree_node_free()
1096 mutex_unlock(&b->write_lock); in btree_node_free()
1104 clear_bit(BTREE_NODE_dirty, &b->flags); in btree_node_free()
1107 mutex_unlock(&b->write_lock); in btree_node_free()
1109 cancel_delayed_work(&b->work); in btree_node_free()
1111 mutex_lock(&b->c->bucket_lock); in btree_node_free()
1112 bch_bucket_free(b->c, &b->key); in btree_node_free()
1114 mutex_unlock(&b->c->bucket_lock); in btree_node_free()
1121 struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op, in __bch_btree_node_alloc() argument
1128 mutex_lock(&c->bucket_lock); in __bch_btree_node_alloc()
1130 /* return ERR_PTR(-EAGAIN) when it fails */ in __bch_btree_node_alloc()
1131 b = ERR_PTR(-EAGAIN); in __bch_btree_node_alloc()
1136 SET_KEY_SIZE(&k.key, c->btree_pages * PAGE_SECTORS); in __bch_btree_node_alloc()
1138 b = mca_alloc(c, op, &k.key, level); in __bch_btree_node_alloc()
1144 "Tried to allocate bucket that was in btree cache"); in __bch_btree_node_alloc()
1148 b->parent = parent; in __bch_btree_node_alloc()
1149 bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->cache->sb)); in __bch_btree_node_alloc()
1151 mutex_unlock(&c->bucket_lock); in __bch_btree_node_alloc()
1158 mutex_unlock(&c->bucket_lock); in __bch_btree_node_alloc()
1165 struct btree_op *op, int level, in bch_btree_node_alloc() argument
1168 return __bch_btree_node_alloc(c, op, level, op != NULL, parent); in bch_btree_node_alloc()
1172 struct btree_op *op) in btree_node_alloc_replacement() argument
1174 struct btree *n = bch_btree_node_alloc(b->c, op, b->level, b->parent); in btree_node_alloc_replacement()
1177 mutex_lock(&n->write_lock); in btree_node_alloc_replacement()
1178 bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort); in btree_node_alloc_replacement()
1179 bkey_copy_key(&n->key, &b->key); in btree_node_alloc_replacement()
1180 mutex_unlock(&n->write_lock); in btree_node_alloc_replacement()
1190 mutex_lock(&b->c->bucket_lock); in make_btree_freeing_key()
1192 atomic_inc(&b->c->prio_blocked); in make_btree_freeing_key()
1194 bkey_copy(k, &b->key); in make_btree_freeing_key()
1199 bch_inc_gen(b->c->cache, in make_btree_freeing_key()
1200 PTR_BUCKET(b->c, &b->key, i))); in make_btree_freeing_key()
1202 mutex_unlock(&b->c->bucket_lock); in make_btree_freeing_key()
1205 static int btree_check_reserve(struct btree *b, struct btree_op *op) in btree_check_reserve() argument
1207 struct cache_set *c = b->c; in btree_check_reserve()
1208 struct cache *ca = c->cache; in btree_check_reserve()
1209 unsigned int reserve = (c->root->level - b->level) * 2 + 1; in btree_check_reserve()
1211 mutex_lock(&c->bucket_lock); in btree_check_reserve()
1213 if (fifo_used(&ca->free[RESERVE_BTREE]) < reserve) { in btree_check_reserve()
1214 if (op) in btree_check_reserve()
1215 prepare_to_wait(&c->btree_cache_wait, &op->wait, in btree_check_reserve()
1217 mutex_unlock(&c->bucket_lock); in btree_check_reserve()
1218 return -EINTR; in btree_check_reserve()
1221 mutex_unlock(&c->bucket_lock); in btree_check_reserve()
1223 return mca_cannibalize_lock(b->c, op); in btree_check_reserve()
1249 if (gen_after(g->last_gc, PTR_GEN(k, i))) in __bch_btree_mark_key()
1250 g->last_gc = PTR_GEN(k, i); in __bch_btree_mark_key()
1280 #define btree_mark_key(b, k) __bch_btree_mark_key(b->c, b->level, k)
1291 b->gen = PTR_GEN(k, i); in bch_initial_mark_key()
1294 b->prio = BTREE_PRIO; in bch_initial_mark_key()
1295 else if (!level && b->prio == BTREE_PRIO) in bch_initial_mark_key()
1296 b->prio = INITIAL_PRIO; in bch_initial_mark_key()
1304 stats->in_use = (c->nbuckets - c->avail_nbuckets) * 100 / c->nbuckets; in bch_update_bucket_in_use()
1315 gc->nodes++; in btree_gc_mark_node()
1317 for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) { in btree_gc_mark_node()
1321 if (bch_ptr_bad(&b->keys, k)) in btree_gc_mark_node()
1324 gc->key_bytes += bkey_u64s(k); in btree_gc_mark_node()
1325 gc->nkeys++; in btree_gc_mark_node()
1328 gc->data += KEY_SIZE(k); in btree_gc_mark_node()
1331 for (t = b->keys.set; t <= &b->keys.set[b->keys.nsets]; t++) in btree_gc_mark_node()
1332 btree_bug_on(t->size && in btree_gc_mark_node()
1333 bset_written(&b->keys, t) && in btree_gc_mark_node()
1334 bkey_cmp(&b->key, &t->end) < 0, in btree_gc_mark_node()
1337 if (b->c->gc_always_rewrite) in btree_gc_mark_node()
1343 if ((keys - good_keys) * 2 > keys) in btree_gc_mark_node()
1356 static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
1361 static int btree_gc_coalesce(struct btree *b, struct btree_op *op, in btree_gc_coalesce() argument
1381 blocks = btree_default_blocks(b->c) * 2 / 3; in btree_gc_coalesce()
1384 __set_blocks(b->keys.set[0].data, keys, in btree_gc_coalesce()
1385 block_bytes(b->c->cache)) > blocks * (nodes - 1)) in btree_gc_coalesce()
1396 * nodes, to make sure the insert below will succeed - we also check in btree_gc_coalesce()
1404 mutex_lock(&new_nodes[i]->write_lock); in btree_gc_coalesce()
1406 for (i = nodes - 1; i > 0; --i) { in btree_gc_coalesce()
1408 struct bset *n2 = btree_bset_first(new_nodes[i - 1]); in btree_gc_coalesce()
1414 for (k = n2->start; in btree_gc_coalesce()
1417 if (__set_blocks(n1, n1->keys + keys + in btree_gc_coalesce()
1419 block_bytes(b->c->cache)) > blocks) in btree_gc_coalesce()
1427 * Last node we're not getting rid of - we're getting in btree_gc_coalesce()
1434 if (__set_blocks(n1, n1->keys + n2->keys, in btree_gc_coalesce()
1435 block_bytes(b->c->cache)) > in btree_gc_coalesce()
1439 keys = n2->keys; in btree_gc_coalesce()
1441 last = &r->b->key; in btree_gc_coalesce()
1444 BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c->cache)) > in btree_gc_coalesce()
1448 bkey_copy_key(&new_nodes[i]->key, last); in btree_gc_coalesce()
1451 n2->start, in btree_gc_coalesce()
1452 (void *) bset_bkey_idx(n2, keys) - (void *) n2->start); in btree_gc_coalesce()
1454 n1->keys += keys; in btree_gc_coalesce()
1455 r[i].keys = n1->keys; in btree_gc_coalesce()
1457 memmove(n2->start, in btree_gc_coalesce()
1459 (void *) bset_bkey_last(n2) - in btree_gc_coalesce()
1462 n2->keys -= keys; in btree_gc_coalesce()
1465 bkey_u64s(&new_nodes[i]->key))) in btree_gc_coalesce()
1469 bch_keylist_add(&keylist, &new_nodes[i]->key); in btree_gc_coalesce()
1473 mutex_unlock(&new_nodes[i]->write_lock); in btree_gc_coalesce()
1478 BUG_ON(btree_bset_first(new_nodes[0])->keys); in btree_gc_coalesce()
1484 if (__bch_keylist_realloc(&keylist, bkey_u64s(&r[i].b->key))) in btree_gc_coalesce()
1491 bch_btree_insert_node(b, op, &keylist, NULL, NULL); in btree_gc_coalesce()
1501 memmove(r, r + 1, sizeof(r[0]) * (nodes - 1)); in btree_gc_coalesce()
1502 r[nodes - 1].b = ERR_PTR(-EINTR); in btree_gc_coalesce()
1505 gc->nodes--; in btree_gc_coalesce()
1510 return -EINTR; in btree_gc_coalesce()
1514 mutex_unlock(&new_nodes[i]->write_lock); in btree_gc_coalesce()
1521 atomic_dec(&b->c->prio_blocked); in btree_gc_coalesce()
1532 static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op, in btree_gc_rewrite_node() argument
1555 bch_keylist_add(&keys, &n->key); in btree_gc_rewrite_node()
1560 bch_btree_insert_node(b, op, &keys, NULL, NULL); in btree_gc_rewrite_node()
1567 return -EINTR; in btree_gc_rewrite_node()
1576 for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad) in btree_gc_count_keys()
1600 min_nodes = c->gc_stats.nodes / MAX_GC_TIMES; in btree_gc_min_nodes()
1608 static int btree_gc_recurse(struct btree *b, struct btree_op *op, in btree_gc_recurse() argument
1616 struct gc_merge_info *i, *last = r + ARRAY_SIZE(r) - 1; in btree_gc_recurse()
1618 bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done); in btree_gc_recurse()
1621 i->b = ERR_PTR(-EINTR); in btree_gc_recurse()
1624 k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad); in btree_gc_recurse()
1626 r->b = bch_btree_node_get(b->c, op, k, b->level - 1, in btree_gc_recurse()
1628 if (IS_ERR(r->b)) { in btree_gc_recurse()
1629 ret = PTR_ERR(r->b); in btree_gc_recurse()
1633 r->keys = btree_gc_count_keys(r->b); in btree_gc_recurse()
1635 ret = btree_gc_coalesce(b, op, gc, r); in btree_gc_recurse()
1640 if (!last->b) in btree_gc_recurse()
1643 if (!IS_ERR(last->b)) { in btree_gc_recurse()
1644 should_rewrite = btree_gc_mark_node(last->b, gc); in btree_gc_recurse()
1646 ret = btree_gc_rewrite_node(b, op, last->b); in btree_gc_recurse()
1651 if (last->b->level) { in btree_gc_recurse()
1652 ret = btree_gc_recurse(last->b, op, writes, gc); in btree_gc_recurse()
1657 bkey_copy_key(&b->c->gc_done, &last->b->key); in btree_gc_recurse()
1663 mutex_lock(&last->b->write_lock); in btree_gc_recurse()
1664 if (btree_node_dirty(last->b)) in btree_gc_recurse()
1665 bch_btree_node_write(last->b, writes); in btree_gc_recurse()
1666 mutex_unlock(&last->b->write_lock); in btree_gc_recurse()
1667 rw_unlock(true, last->b); in btree_gc_recurse()
1670 memmove(r + 1, r, sizeof(r[0]) * (GC_MERGE_NODES - 1)); in btree_gc_recurse()
1671 r->b = NULL; in btree_gc_recurse()
1673 if (atomic_read(&b->c->search_inflight) && in btree_gc_recurse()
1674 gc->nodes >= gc->nodes_pre + btree_gc_min_nodes(b->c)) { in btree_gc_recurse()
1675 gc->nodes_pre = gc->nodes; in btree_gc_recurse()
1676 ret = -EAGAIN; in btree_gc_recurse()
1681 ret = -EAGAIN; in btree_gc_recurse()
1687 if (!IS_ERR_OR_NULL(i->b)) { in btree_gc_recurse()
1688 mutex_lock(&i->b->write_lock); in btree_gc_recurse()
1689 if (btree_node_dirty(i->b)) in btree_gc_recurse()
1690 bch_btree_node_write(i->b, writes); in btree_gc_recurse()
1691 mutex_unlock(&i->b->write_lock); in btree_gc_recurse()
1692 rw_unlock(true, i->b); in btree_gc_recurse()
1698 static int bch_btree_gc_root(struct btree *b, struct btree_op *op, in bch_btree_gc_root() argument
1716 return -EINTR; in bch_btree_gc_root()
1720 __bch_btree_mark_key(b->c, b->level + 1, &b->key); in bch_btree_gc_root()
1722 if (b->level) { in bch_btree_gc_root()
1723 ret = btree_gc_recurse(b, op, writes, gc); in bch_btree_gc_root()
1728 bkey_copy_key(&b->c->gc_done, &b->key); in bch_btree_gc_root()
1735 struct cache *ca; in btree_gc_start()
1738 if (!c->gc_mark_valid) in btree_gc_start()
1741 mutex_lock(&c->bucket_lock); in btree_gc_start()
1743 c->gc_mark_valid = 0; in btree_gc_start()
1744 c->gc_done = ZERO_KEY; in btree_gc_start()
1746 ca = c->cache; in btree_gc_start()
1748 b->last_gc = b->gen; in btree_gc_start()
1749 if (!atomic_read(&b->pin)) { in btree_gc_start()
1755 mutex_unlock(&c->bucket_lock); in btree_gc_start()
1761 struct cache *ca; in bch_btree_gc_finish()
1765 mutex_lock(&c->bucket_lock); in bch_btree_gc_finish()
1768 c->gc_mark_valid = 1; in bch_btree_gc_finish()
1769 c->need_gc = 0; in bch_btree_gc_finish()
1771 for (i = 0; i < KEY_PTRS(&c->uuid_bucket); i++) in bch_btree_gc_finish()
1772 SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i), in bch_btree_gc_finish()
1777 for (i = 0; i < c->devices_max_used; i++) { in bch_btree_gc_finish()
1778 struct bcache_device *d = c->devices[i]; in bch_btree_gc_finish()
1782 if (!d || UUID_FLASH_ONLY(&c->uuids[i])) in bch_btree_gc_finish()
1786 spin_lock(&dc->writeback_keys.lock); in bch_btree_gc_finish()
1788 &dc->writeback_keys.keys, node) in bch_btree_gc_finish()
1789 for (j = 0; j < KEY_PTRS(&w->key); j++) in bch_btree_gc_finish()
1790 SET_GC_MARK(PTR_BUCKET(c, &w->key, j), in bch_btree_gc_finish()
1792 spin_unlock(&dc->writeback_keys.lock); in bch_btree_gc_finish()
1796 c->avail_nbuckets = 0; in bch_btree_gc_finish()
1798 ca = c->cache; in bch_btree_gc_finish()
1799 ca->invalidate_needs_gc = 0; in bch_btree_gc_finish()
1801 for (k = ca->sb.d; k < ca->sb.d + ca->sb.keys; k++) in bch_btree_gc_finish()
1802 SET_GC_MARK(ca->buckets + *k, GC_MARK_METADATA); in bch_btree_gc_finish()
1804 for (k = ca->prio_buckets; in bch_btree_gc_finish()
1805 k < ca->prio_buckets + prio_buckets(ca) * 2; k++) in bch_btree_gc_finish()
1806 SET_GC_MARK(ca->buckets + *k, GC_MARK_METADATA); in bch_btree_gc_finish()
1809 c->need_gc = max(c->need_gc, bucket_gc_gen(b)); in bch_btree_gc_finish()
1811 if (atomic_read(&b->pin)) in bch_btree_gc_finish()
1817 c->avail_nbuckets++; in bch_btree_gc_finish()
1820 mutex_unlock(&c->bucket_lock); in bch_btree_gc_finish()
1828 struct btree_op op; in bch_btree_gc() local
1835 bch_btree_op_init(&op, SHRT_MAX); in bch_btree_gc()
1841 ret = bcache_btree_root(gc_root, c, &op, &writes, &stats); in bch_btree_gc()
1845 if (ret == -EAGAIN) in bch_btree_gc()
1850 } while (ret && !test_bit(CACHE_SET_IO_DISABLE, &c->flags)); in bch_btree_gc()
1855 bch_time_stats_update(&c->btree_gc_time, start_time); in bch_btree_gc()
1860 memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat)); in bch_btree_gc()
1869 struct cache *ca = c->cache; in gc_should_run()
1871 if (ca->invalidate_needs_gc) in gc_should_run()
1874 if (atomic_read(&c->sectors_to_gc) < 0) in gc_should_run()
1885 wait_event_interruptible(c->gc_wait, in bch_gc_thread()
1887 test_bit(CACHE_SET_IO_DISABLE, &c->flags) || in bch_gc_thread()
1891 test_bit(CACHE_SET_IO_DISABLE, &c->flags)) in bch_gc_thread()
1904 c->gc_thread = kthread_run(bch_gc_thread, c, "bcache_gc"); in bch_gc_thread_start()
1905 return PTR_ERR_OR_ZERO(c->gc_thread); in bch_gc_thread_start()
1910 static int bch_btree_check_recurse(struct btree *b, struct btree_op *op) in bch_btree_check_recurse() argument
1916 for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) in bch_btree_check_recurse()
1917 bch_initial_mark_key(b->c, b->level, k); in bch_btree_check_recurse()
1919 bch_initial_mark_key(b->c, b->level + 1, &b->key); in bch_btree_check_recurse()
1921 if (b->level) { in bch_btree_check_recurse()
1922 bch_btree_iter_init(&b->keys, &iter, NULL); in bch_btree_check_recurse()
1925 k = bch_btree_iter_next_filter(&iter, &b->keys, in bch_btree_check_recurse()
1930 * initiallize c->gc_stats.nodes in bch_btree_check_recurse()
1933 b->c->gc_stats.nodes++; in bch_btree_check_recurse()
1937 ret = bcache_btree(check_recurse, p, b, op); in bch_btree_check_recurse()
1951 struct btree_check_state *check_state = info->state; in bch_btree_check_thread()
1952 struct cache_set *c = check_state->c; in bch_btree_check_thread()
1962 bch_btree_iter_init(&c->root->keys, &iter, NULL); in bch_btree_check_thread()
1963 k = bch_btree_iter_next_filter(&iter, &c->root->keys, bch_ptr_bad); in bch_btree_check_thread()
1971 * sub-tree indexed by the fetched key. in bch_btree_check_thread()
1973 spin_lock(&check_state->idx_lock); in bch_btree_check_thread()
1974 cur_idx = check_state->key_idx; in bch_btree_check_thread()
1975 check_state->key_idx++; in bch_btree_check_thread()
1976 spin_unlock(&check_state->idx_lock); in bch_btree_check_thread()
1978 skip_nr = cur_idx - prev_idx; in bch_btree_check_thread()
1982 &c->root->keys, in bch_btree_check_thread()
1992 atomic_set(&check_state->enough, 1); in bch_btree_check_thread()
1993 /* Update check_state->enough earlier */ in bch_btree_check_thread()
1997 skip_nr--; in bch_btree_check_thread()
2002 struct btree_op op; in bch_btree_check_thread() local
2004 btree_node_prefetch(c->root, p); in bch_btree_check_thread()
2005 c->gc_stats.nodes++; in bch_btree_check_thread()
2006 bch_btree_op_init(&op, 0); in bch_btree_check_thread()
2007 ret = bcache_btree(check_recurse, p, c->root, &op); in bch_btree_check_thread()
2009 * The op may be added to cache_set's btree_cache_wait in bch_btree_check_thread()
2012 * free op memory. in bch_btree_check_thread()
2016 finish_wait(&c->btree_cache_wait, &(&op)->wait); in bch_btree_check_thread()
2026 info->result = ret; in bch_btree_check_thread()
2027 /* update check_state->started among all CPUs */ in bch_btree_check_thread()
2029 if (atomic_dec_and_test(&check_state->started)) in bch_btree_check_thread()
2030 wake_up(&check_state->wait); in bch_btree_check_thread()
2058 for_each_key_filter(&c->root->keys, k, &iter, bch_ptr_invalid) in bch_btree_check()
2059 bch_initial_mark_key(c, c->root->level, k); in bch_btree_check()
2061 bch_initial_mark_key(c, c->root->level + 1, &c->root->key); in bch_btree_check()
2063 if (c->root->level == 0) in bch_btree_check()
2075 rw_lock(0, c->root, c->root->level); in bch_btree_check()
2078 * if check_state.enough is non-zero, it means current in bch_btree_check()
2097 for (--i; i >= 0; i--) in bch_btree_check()
2099 ret = -ENOMEM; in bch_btree_check()
2118 rw_unlock(0, c->root); in bch_btree_check()
2124 struct cache *ca = c->cache; in bch_initial_gc_finish()
2129 mutex_lock(&c->bucket_lock); in bch_initial_gc_finish()
2133 * order to get the allocator thread started - it needs freed buckets in in bch_initial_gc_finish()
2141 if (fifo_full(&ca->free[RESERVE_PRIO]) && in bch_initial_gc_finish()
2142 fifo_full(&ca->free[RESERVE_BTREE])) in bch_initial_gc_finish()
2148 if (!fifo_push(&ca->free[RESERVE_PRIO], in bch_initial_gc_finish()
2149 b - ca->buckets)) in bch_initial_gc_finish()
2150 fifo_push(&ca->free[RESERVE_BTREE], in bch_initial_gc_finish()
2151 b - ca->buckets); in bch_initial_gc_finish()
2155 mutex_unlock(&c->bucket_lock); in bch_initial_gc_finish()
2165 BUG_ON(bkey_cmp(k, &b->key) > 0); in btree_insert_key()
2167 status = bch_btree_insert_key(&b->keys, k, replace_key); in btree_insert_key()
2169 bch_check_keys(&b->keys, "%u for %s", status, in btree_insert_key()
2181 long ret = bch_btree_keys_u64s_remaining(&b->keys); in insert_u64s_remaining()
2186 if (b->keys.ops->is_extents) in insert_u64s_remaining()
2187 ret -= KEY_MAX_U64S; in insert_u64s_remaining()
2192 static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op, in bch_btree_insert_keys() argument
2197 int oldsize = bch_count_data(&b->keys); in bch_btree_insert_keys()
2200 struct bkey *k = insert_keys->keys; in bch_btree_insert_keys()
2205 if (bkey_cmp(k, &b->key) <= 0) { in bch_btree_insert_keys()
2206 if (!b->level) in bch_btree_insert_keys()
2207 bkey_put(b->c, k); in bch_btree_insert_keys()
2211 } else if (bkey_cmp(&START_KEY(k), &b->key) < 0) { in bch_btree_insert_keys()
2213 bkey_copy(&temp.key, insert_keys->keys); in bch_btree_insert_keys()
2215 bch_cut_back(&b->key, &temp.key); in bch_btree_insert_keys()
2216 bch_cut_front(&b->key, insert_keys->keys); in bch_btree_insert_keys()
2226 op->insert_collision = true; in bch_btree_insert_keys()
2228 BUG_ON(!bch_keylist_empty(insert_keys) && b->level); in bch_btree_insert_keys()
2230 BUG_ON(bch_count_data(&b->keys) < oldsize); in bch_btree_insert_keys()
2234 static int btree_split(struct btree *b, struct btree_op *op, in btree_split() argument
2247 if (btree_check_reserve(b, op)) { in btree_split()
2248 if (!b->level) in btree_split()
2249 return -EINTR; in btree_split()
2254 n1 = btree_node_alloc_replacement(b, op); in btree_split()
2259 block_bytes(n1->c->cache)) > (btree_blocks(b) * 4) / 5; in btree_split()
2264 trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys); in btree_split()
2266 n2 = bch_btree_node_alloc(b->c, op, b->level, b->parent); in btree_split()
2270 if (!b->parent) { in btree_split()
2271 n3 = bch_btree_node_alloc(b->c, op, b->level + 1, NULL); in btree_split()
2276 mutex_lock(&n1->write_lock); in btree_split()
2277 mutex_lock(&n2->write_lock); in btree_split()
2279 bch_btree_insert_keys(n1, op, insert_keys, replace_key); in btree_split()
2286 while (keys < (btree_bset_first(n1)->keys * 3) / 5) in btree_split()
2290 bkey_copy_key(&n1->key, in btree_split()
2294 btree_bset_first(n2)->keys = btree_bset_first(n1)->keys - keys; in btree_split()
2295 btree_bset_first(n1)->keys = keys; in btree_split()
2297 memcpy(btree_bset_first(n2)->start, in btree_split()
2299 btree_bset_first(n2)->keys * sizeof(uint64_t)); in btree_split()
2301 bkey_copy_key(&n2->key, &b->key); in btree_split()
2303 bch_keylist_add(&parent_keys, &n2->key); in btree_split()
2305 mutex_unlock(&n2->write_lock); in btree_split()
2308 trace_bcache_btree_node_compact(b, btree_bset_first(n1)->keys); in btree_split()
2310 mutex_lock(&n1->write_lock); in btree_split()
2311 bch_btree_insert_keys(n1, op, insert_keys, replace_key); in btree_split()
2314 bch_keylist_add(&parent_keys, &n1->key); in btree_split()
2316 mutex_unlock(&n1->write_lock); in btree_split()
2320 mutex_lock(&n3->write_lock); in btree_split()
2321 bkey_copy_key(&n3->key, &MAX_KEY); in btree_split()
2322 bch_btree_insert_keys(n3, op, &parent_keys, NULL); in btree_split()
2324 mutex_unlock(&n3->write_lock); in btree_split()
2329 } else if (!b->parent) { in btree_split()
2339 bch_btree_insert_node(b->parent, op, &parent_keys, NULL, NULL); in btree_split()
2346 bch_time_stats_update(&b->c->btree_split_time, start_time); in btree_split()
2350 bkey_put(b->c, &n2->key); in btree_split()
2354 bkey_put(b->c, &n1->key); in btree_split()
2358 WARN(1, "bcache: btree split failed (level %u)", b->level); in btree_split()
2360 if (n3 == ERR_PTR(-EAGAIN) || in btree_split()
2361 n2 == ERR_PTR(-EAGAIN) || in btree_split()
2362 n1 == ERR_PTR(-EAGAIN)) in btree_split()
2363 return -EAGAIN; in btree_split()
2365 return -ENOMEM; in btree_split()
2368 static int bch_btree_insert_node(struct btree *b, struct btree_op *op, in bch_btree_insert_node() argument
2375 BUG_ON(b->level && replace_key); in bch_btree_insert_node()
2379 mutex_lock(&b->write_lock); in bch_btree_insert_node()
2382 b->keys.last_set_unwritten) in bch_btree_insert_node()
2386 mutex_unlock(&b->write_lock); in bch_btree_insert_node()
2392 if (bch_btree_insert_keys(b, op, insert_keys, replace_key)) { in bch_btree_insert_node()
2393 if (!b->level) in bch_btree_insert_node()
2399 mutex_unlock(&b->write_lock); in bch_btree_insert_node()
2406 if (current->bio_list) { in bch_btree_insert_node()
2407 op->lock = b->c->root->level + 1; in bch_btree_insert_node()
2408 return -EAGAIN; in bch_btree_insert_node()
2409 } else if (op->lock <= b->c->root->level) { in bch_btree_insert_node()
2410 op->lock = b->c->root->level + 1; in bch_btree_insert_node()
2411 return -EINTR; in bch_btree_insert_node()
2414 int ret = btree_split(b, op, insert_keys, replace_key); in bch_btree_insert_node()
2419 return -EINTR; in bch_btree_insert_node()
2424 int bch_btree_insert_check_key(struct btree *b, struct btree_op *op, in bch_btree_insert_check_key() argument
2427 int ret = -EINTR; in bch_btree_insert_check_key()
2428 uint64_t btree_ptr = b->key.ptr[0]; in bch_btree_insert_check_key()
2429 unsigned long seq = b->seq; in bch_btree_insert_check_key()
2431 bool upgrade = op->lock == -1; in bch_btree_insert_check_key()
2437 rw_lock(true, b, b->level); in bch_btree_insert_check_key()
2439 if (b->key.ptr[0] != btree_ptr || in bch_btree_insert_check_key()
2440 b->seq != seq + 1) { in bch_btree_insert_check_key()
2441 op->lock = b->level; in bch_btree_insert_check_key()
2447 get_random_bytes(&check_key->ptr[0], sizeof(uint64_t)); in bch_btree_insert_check_key()
2453 ret = bch_btree_insert_node(b, op, &insert, NULL, NULL); in bch_btree_insert_check_key()
2458 downgrade_write(&b->lock); in bch_btree_insert_check_key()
2463 struct btree_op op; member
2471 struct btree_insert_op *op = container_of(b_op, in btree_insert_fn() local
2472 struct btree_insert_op, op); in btree_insert_fn()
2474 int ret = bch_btree_insert_node(b, &op->op, op->keys, in btree_insert_fn()
2475 op->journal_ref, op->replace_key); in btree_insert_fn()
2476 if (ret && !bch_keylist_empty(op->keys)) in btree_insert_fn()
2485 struct btree_insert_op op; in bch_btree_insert() local
2488 BUG_ON(current->bio_list); in bch_btree_insert()
2491 bch_btree_op_init(&op.op, 0); in bch_btree_insert()
2492 op.keys = keys; in bch_btree_insert()
2493 op.journal_ref = journal_ref; in bch_btree_insert()
2494 op.replace_key = replace_key; in bch_btree_insert()
2497 op.op.lock = 0; in bch_btree_insert()
2498 ret = bch_btree_map_leaf_nodes(&op.op, c, in bch_btree_insert()
2499 &START_KEY(keys->keys), in bch_btree_insert()
2510 } else if (op.op.insert_collision) in bch_btree_insert()
2511 ret = -ESRCH; in bch_btree_insert()
2525 BUG_ON(!b->written); in bch_btree_set_root()
2527 for (i = 0; i < KEY_PTRS(&b->key); i++) in bch_btree_set_root()
2528 BUG_ON(PTR_BUCKET(b->c, &b->key, i)->prio != BTREE_PRIO); in bch_btree_set_root()
2530 mutex_lock(&b->c->bucket_lock); in bch_btree_set_root()
2531 list_del_init(&b->list); in bch_btree_set_root()
2532 mutex_unlock(&b->c->bucket_lock); in bch_btree_set_root()
2534 b->c->root = b; in bch_btree_set_root()
2536 bch_journal_meta(b->c, &cl); in bch_btree_set_root()
2542 static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op, in bch_btree_map_nodes_recurse() argument
2548 if (b->level) { in bch_btree_map_nodes_recurse()
2552 bch_btree_iter_init(&b->keys, &iter, from); in bch_btree_map_nodes_recurse()
2554 while ((k = bch_btree_iter_next_filter(&iter, &b->keys, in bch_btree_map_nodes_recurse()
2557 op, from, fn, flags); in bch_btree_map_nodes_recurse()
2565 if (!b->level || flags == MAP_ALL_NODES) in bch_btree_map_nodes_recurse()
2566 ret = fn(op, b); in bch_btree_map_nodes_recurse()
2571 int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c, in __bch_btree_map_nodes() argument
2574 return bcache_btree_root(map_nodes_recurse, c, op, from, fn, flags); in __bch_btree_map_nodes()
2577 int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op, in bch_btree_map_keys_recurse() argument
2585 bch_btree_iter_init(&b->keys, &iter, from); in bch_btree_map_keys_recurse()
2587 while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) { in bch_btree_map_keys_recurse()
2588 ret = !b->level in bch_btree_map_keys_recurse()
2589 ? fn(op, b, k) in bch_btree_map_keys_recurse()
2591 b, op, from, fn, flags); in bch_btree_map_keys_recurse()
2598 if (!b->level && (flags & MAP_END_KEY)) in bch_btree_map_keys_recurse()
2599 ret = fn(op, b, &KEY(KEY_INODE(&b->key), in bch_btree_map_keys_recurse()
2600 KEY_OFFSET(&b->key), 0)); in bch_btree_map_keys_recurse()
2605 int bch_btree_map_keys(struct btree_op *op, struct cache_set *c, in bch_btree_map_keys() argument
2608 return bcache_btree_root(map_keys_recurse, c, op, from, fn, flags); in bch_btree_map_keys()
2616 if (bkey_cmp(&l->key, &START_KEY(&r->key)) <= 0) in keybuf_cmp()
2617 return -1; in keybuf_cmp()
2618 if (bkey_cmp(&START_KEY(&l->key), &r->key) >= 0) in keybuf_cmp()
2626 return clamp_t(int64_t, bkey_cmp(&l->key, &r->key), -1, 1); in keybuf_nonoverlapping_cmp()
2630 struct btree_op op; member
2637 static int refill_keybuf_fn(struct btree_op *op, struct btree *b, in refill_keybuf_fn() argument
2640 struct refill *refill = container_of(op, struct refill, op); in refill_keybuf_fn()
2641 struct keybuf *buf = refill->buf; in refill_keybuf_fn()
2644 if (bkey_cmp(k, refill->end) > 0) { in refill_keybuf_fn()
2652 if (refill->pred(buf, k)) { in refill_keybuf_fn()
2655 spin_lock(&buf->lock); in refill_keybuf_fn()
2657 w = array_alloc(&buf->freelist); in refill_keybuf_fn()
2659 spin_unlock(&buf->lock); in refill_keybuf_fn()
2663 w->private = NULL; in refill_keybuf_fn()
2664 bkey_copy(&w->key, k); in refill_keybuf_fn()
2666 if (RB_INSERT(&buf->keys, w, node, keybuf_cmp)) in refill_keybuf_fn()
2667 array_free(&buf->freelist, w); in refill_keybuf_fn()
2669 refill->nr_found++; in refill_keybuf_fn()
2671 if (array_freelist_empty(&buf->freelist)) in refill_keybuf_fn()
2674 spin_unlock(&buf->lock); in refill_keybuf_fn()
2677 buf->last_scanned = *k; in refill_keybuf_fn()
2684 struct bkey start = buf->last_scanned; in bch_refill_keybuf()
2689 bch_btree_op_init(&refill.op, -1); in bch_refill_keybuf()
2695 bch_btree_map_keys(&refill.op, c, &buf->last_scanned, in bch_refill_keybuf()
2700 KEY_INODE(&buf->last_scanned), in bch_refill_keybuf()
2701 KEY_OFFSET(&buf->last_scanned)); in bch_refill_keybuf()
2703 spin_lock(&buf->lock); in bch_refill_keybuf()
2705 if (!RB_EMPTY_ROOT(&buf->keys)) { in bch_refill_keybuf()
2708 w = RB_FIRST(&buf->keys, struct keybuf_key, node); in bch_refill_keybuf()
2709 buf->start = START_KEY(&w->key); in bch_refill_keybuf()
2711 w = RB_LAST(&buf->keys, struct keybuf_key, node); in bch_refill_keybuf()
2712 buf->end = w->key; in bch_refill_keybuf()
2714 buf->start = MAX_KEY; in bch_refill_keybuf()
2715 buf->end = MAX_KEY; in bch_refill_keybuf()
2718 spin_unlock(&buf->lock); in bch_refill_keybuf()
2723 rb_erase(&w->node, &buf->keys); in __bch_keybuf_del()
2724 array_free(&buf->freelist, w); in __bch_keybuf_del()
2729 spin_lock(&buf->lock); in bch_keybuf_del()
2731 spin_unlock(&buf->lock); in bch_keybuf_del()
2742 if (bkey_cmp(end, &buf->start) <= 0 || in bch_keybuf_check_overlapping()
2743 bkey_cmp(start, &buf->end) >= 0) in bch_keybuf_check_overlapping()
2746 spin_lock(&buf->lock); in bch_keybuf_check_overlapping()
2747 w = RB_GREATER(&buf->keys, s, node, keybuf_nonoverlapping_cmp); in bch_keybuf_check_overlapping()
2749 while (w && bkey_cmp(&START_KEY(&w->key), end) < 0) { in bch_keybuf_check_overlapping()
2753 if (p->private) in bch_keybuf_check_overlapping()
2759 spin_unlock(&buf->lock); in bch_keybuf_check_overlapping()
2767 spin_lock(&buf->lock); in bch_keybuf_next()
2769 w = RB_FIRST(&buf->keys, struct keybuf_key, node); in bch_keybuf_next()
2771 while (w && w->private) in bch_keybuf_next()
2775 w->private = ERR_PTR(-EINTR); in bch_keybuf_next()
2777 spin_unlock(&buf->lock); in bch_keybuf_next()
2793 if (bkey_cmp(&buf->last_scanned, end) >= 0) { in bch_keybuf_next_rescan()
2806 buf->last_scanned = MAX_KEY; in bch_keybuf_init()
2807 buf->keys = RB_ROOT; in bch_keybuf_init()
2809 spin_lock_init(&buf->lock); in bch_keybuf_init()
2810 array_allocator_init(&buf->freelist); in bch_keybuf_init()
2823 return -ENOMEM; in bch_btree_init()