1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com> 4 * 5 * Uses a block device as cache for other block devices; optimized for SSDs. 6 * All allocation is done in buckets, which should match the erase block size 7 * of the device. 8 * 9 * Buckets containing cached data are kept on a heap sorted by priority; 10 * bucket priority is increased on cache hit, and periodically all the buckets 11 * on the heap have their priority scaled down. This currently is just used as 12 * an LRU but in the future should allow for more intelligent heuristics. 13 * 14 * Buckets have an 8 bit counter; freeing is accomplished by incrementing the 15 * counter. Garbage collection is used to remove stale pointers. 16 * 17 * Indexing is done via a btree; nodes are not necessarily fully sorted, rather 18 * as keys are inserted we only sort the pages that have not yet been written. 19 * When garbage collection is run, we resort the entire node. 20 * 21 * All configuration is done via sysfs; see Documentation/admin-guide/bcache.rst. 22 */ 23 24 #include "bcache.h" 25 #include "btree.h" 26 #include "debug.h" 27 #include "extents.h" 28 29 #include <linux/slab.h> 30 #include <linux/bitops.h> 31 #include <linux/hash.h> 32 #include <linux/kthread.h> 33 #include <linux/prefetch.h> 34 #include <linux/random.h> 35 #include <linux/rcupdate.h> 36 #include <linux/sched/clock.h> 37 #include <linux/rculist.h> 38 #include <linux/delay.h> 39 #include <linux/sort.h> 40 #include <trace/events/bcache.h> 41 42 /* 43 * Todo: 44 * register_bcache: Return errors out to userspace correctly 45 * 46 * Writeback: don't undirty key until after a cache flush 47 * 48 * Create an iterator for key pointers 49 * 50 * On btree write error, mark bucket such that it won't be freed from the cache 51 * 52 * Journalling: 53 * Check for bad keys in replay 54 * Propagate barriers 55 * Refcount journal entries in journal_replay 56 * 57 * Garbage collection: 58 * Finish incremental gc 59 * Gc should free old UUIDs, data for invalid UUIDs 60 * 61 * Provide a way to list backing device UUIDs we have data cached for, and 62 * probably how long it's been since we've seen them, and a way to invalidate 63 * dirty data for devices that will never be attached again 64 * 65 * Keep 1 min/5 min/15 min statistics of how busy a block device has been, so 66 * that based on that and how much dirty data we have we can keep writeback 67 * from being starved 68 * 69 * Add a tracepoint or somesuch to watch for writeback starvation 70 * 71 * When btree depth > 1 and splitting an interior node, we have to make sure 72 * alloc_bucket() cannot fail. This should be true but is not completely 73 * obvious. 74 * 75 * Plugging? 76 * 77 * If data write is less than hard sector size of ssd, round up offset in open 78 * bucket to the next whole sector 79 * 80 * Superblock needs to be fleshed out for multiple cache devices 81 * 82 * Add a sysfs tunable for the number of writeback IOs in flight 83 * 84 * Add a sysfs tunable for the number of open data buckets 85 * 86 * IO tracking: Can we track when one process is doing io on behalf of another? 87 * IO tracking: Don't use just an average, weigh more recent stuff higher 88 * 89 * Test module load/unload 90 */ 91 92 #define MAX_GC_TIMES 100 93 #define MIN_GC_NODES 100 94 #define GC_SLEEP_MS 100 95 96 #define PTR_DIRTY_BIT (((uint64_t) 1 << 36)) 97 98 #define PTR_HASH(c, k) \ 99 (((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0)) 100 101 static struct workqueue_struct *btree_io_wq; 102 103 #define insert_lock(s, b) ((b)->level <= (s)->lock) 104 105 106 static inline struct bset *write_block(struct btree *b) 107 { 108 return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c->cache); 109 } 110 111 static void bch_btree_init_next(struct btree *b) 112 { 113 /* If not a leaf node, always sort */ 114 if (b->level && b->keys.nsets) 115 bch_btree_sort(&b->keys, &b->c->sort); 116 else 117 bch_btree_sort_lazy(&b->keys, &b->c->sort); 118 119 if (b->written < btree_blocks(b)) 120 bch_bset_init_next(&b->keys, write_block(b), 121 bset_magic(&b->c->cache->sb)); 122 123 } 124 125 /* Btree key manipulation */ 126 127 void bkey_put(struct cache_set *c, struct bkey *k) 128 { 129 unsigned int i; 130 131 for (i = 0; i < KEY_PTRS(k); i++) 132 if (ptr_available(c, k, i)) 133 atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin); 134 } 135 136 /* Btree IO */ 137 138 static uint64_t btree_csum_set(struct btree *b, struct bset *i) 139 { 140 uint64_t crc = b->key.ptr[0]; 141 void *data = (void *) i + 8, *end = bset_bkey_last(i); 142 143 crc = crc64_be(crc, data, end - data); 144 return crc ^ 0xffffffffffffffffULL; 145 } 146 147 void bch_btree_node_read_done(struct btree *b) 148 { 149 const char *err = "bad btree header"; 150 struct bset *i = btree_bset_first(b); 151 struct btree_iter iter; 152 153 /* 154 * c->fill_iter can allocate an iterator with more memory space 155 * than static MAX_BSETS. 156 * See the comment arount cache_set->fill_iter. 157 */ 158 iter.heap.data = mempool_alloc(&b->c->fill_iter, GFP_NOIO); 159 iter.heap.size = b->c->cache->sb.bucket_size / b->c->cache->sb.block_size; 160 iter.heap.nr = 0; 161 162 #ifdef CONFIG_BCACHE_DEBUG 163 iter.b = &b->keys; 164 #endif 165 166 if (!i->seq) 167 goto err; 168 169 for (; 170 b->written < btree_blocks(b) && i->seq == b->keys.set[0].data->seq; 171 i = write_block(b)) { 172 err = "unsupported bset version"; 173 if (i->version > BCACHE_BSET_VERSION) 174 goto err; 175 176 err = "bad btree header"; 177 if (b->written + set_blocks(i, block_bytes(b->c->cache)) > 178 btree_blocks(b)) 179 goto err; 180 181 err = "bad magic"; 182 if (i->magic != bset_magic(&b->c->cache->sb)) 183 goto err; 184 185 err = "bad checksum"; 186 switch (i->version) { 187 case 0: 188 if (i->csum != csum_set(i)) 189 goto err; 190 break; 191 case BCACHE_BSET_VERSION: 192 if (i->csum != btree_csum_set(b, i)) 193 goto err; 194 break; 195 } 196 197 err = "empty set"; 198 if (i != b->keys.set[0].data && !i->keys) 199 goto err; 200 201 bch_btree_iter_push(&iter, i->start, bset_bkey_last(i)); 202 203 b->written += set_blocks(i, block_bytes(b->c->cache)); 204 } 205 206 err = "corrupted btree"; 207 for (i = write_block(b); 208 bset_sector_offset(&b->keys, i) < KEY_SIZE(&b->key); 209 i = ((void *) i) + block_bytes(b->c->cache)) 210 if (i->seq == b->keys.set[0].data->seq) 211 goto err; 212 213 bch_btree_sort_and_fix_extents(&b->keys, &iter, &b->c->sort); 214 215 i = b->keys.set[0].data; 216 err = "short btree key"; 217 if (b->keys.set[0].size && 218 bkey_cmp(&b->key, &b->keys.set[0].end) < 0) 219 goto err; 220 221 if (b->written < btree_blocks(b)) 222 bch_bset_init_next(&b->keys, write_block(b), 223 bset_magic(&b->c->cache->sb)); 224 out: 225 mempool_free(iter.heap.data, &b->c->fill_iter); 226 return; 227 err: 228 set_btree_node_io_error(b); 229 bch_cache_set_error(b->c, "%s at bucket %zu, block %u, %u keys", 230 err, PTR_BUCKET_NR(b->c, &b->key, 0), 231 bset_block_offset(b, i), i->keys); 232 goto out; 233 } 234 235 static void btree_node_read_endio(struct bio *bio) 236 { 237 struct closure *cl = bio->bi_private; 238 239 closure_put(cl); 240 } 241 242 static void bch_btree_node_read(struct btree *b) 243 { 244 uint64_t start_time = local_clock(); 245 struct closure cl; 246 struct bio *bio; 247 248 trace_bcache_btree_read(b); 249 250 closure_init_stack(&cl); 251 252 bio = bch_bbio_alloc(b->c); 253 bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9; 254 bio->bi_end_io = btree_node_read_endio; 255 bio->bi_private = &cl; 256 bio->bi_opf = REQ_OP_READ | REQ_META; 257 258 bch_bio_map(bio, b->keys.set[0].data); 259 260 bch_submit_bbio(bio, b->c, &b->key, 0); 261 closure_sync(&cl); 262 263 if (bio->bi_status) 264 set_btree_node_io_error(b); 265 266 bch_bbio_free(bio, b->c); 267 268 if (btree_node_io_error(b)) 269 goto err; 270 271 bch_btree_node_read_done(b); 272 bch_time_stats_update(&b->c->btree_read_time, start_time); 273 274 return; 275 err: 276 bch_cache_set_error(b->c, "io error reading bucket %zu", 277 PTR_BUCKET_NR(b->c, &b->key, 0)); 278 } 279 280 static void btree_complete_write(struct btree *b, struct btree_write *w) 281 { 282 if (w->prio_blocked && 283 !atomic_sub_return(w->prio_blocked, &b->c->prio_blocked)) 284 wake_up_allocators(b->c); 285 286 if (w->journal) { 287 atomic_dec_bug(w->journal); 288 __closure_wake_up(&b->c->journal.wait); 289 } 290 291 w->prio_blocked = 0; 292 w->journal = NULL; 293 } 294 295 static CLOSURE_CALLBACK(btree_node_write_unlock) 296 { 297 closure_type(b, struct btree, io); 298 299 up(&b->io_mutex); 300 } 301 302 static CLOSURE_CALLBACK(__btree_node_write_done) 303 { 304 closure_type(b, struct btree, io); 305 struct btree_write *w = btree_prev_write(b); 306 307 bch_bbio_free(b->bio, b->c); 308 b->bio = NULL; 309 btree_complete_write(b, w); 310 311 if (btree_node_dirty(b)) 312 queue_delayed_work(btree_io_wq, &b->work, 30 * HZ); 313 314 closure_return_with_destructor(cl, btree_node_write_unlock); 315 } 316 317 static CLOSURE_CALLBACK(btree_node_write_done) 318 { 319 closure_type(b, struct btree, io); 320 321 bio_free_pages(b->bio); 322 __btree_node_write_done(&cl->work); 323 } 324 325 static void btree_node_write_endio(struct bio *bio) 326 { 327 struct closure *cl = bio->bi_private; 328 struct btree *b = container_of(cl, struct btree, io); 329 330 if (bio->bi_status) 331 set_btree_node_io_error(b); 332 333 bch_bbio_count_io_errors(b->c, bio, bio->bi_status, "writing btree"); 334 closure_put(cl); 335 } 336 337 static void do_btree_node_write(struct btree *b) 338 { 339 struct closure *cl = &b->io; 340 struct bset *i = btree_bset_last(b); 341 BKEY_PADDED(key) k; 342 343 i->version = BCACHE_BSET_VERSION; 344 i->csum = btree_csum_set(b, i); 345 346 BUG_ON(b->bio); 347 b->bio = bch_bbio_alloc(b->c); 348 349 b->bio->bi_end_io = btree_node_write_endio; 350 b->bio->bi_private = cl; 351 b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c->cache)); 352 b->bio->bi_opf = REQ_OP_WRITE | REQ_META | REQ_FUA; 353 bch_bio_map(b->bio, i); 354 355 /* 356 * If we're appending to a leaf node, we don't technically need FUA - 357 * this write just needs to be persisted before the next journal write, 358 * which will be marked FLUSH|FUA. 359 * 360 * Similarly if we're writing a new btree root - the pointer is going to 361 * be in the next journal entry. 362 * 363 * But if we're writing a new btree node (that isn't a root) or 364 * appending to a non leaf btree node, we need either FUA or a flush 365 * when we write the parent with the new pointer. FUA is cheaper than a 366 * flush, and writes appending to leaf nodes aren't blocking anything so 367 * just make all btree node writes FUA to keep things sane. 368 */ 369 370 bkey_copy(&k.key, &b->key); 371 SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) + 372 bset_sector_offset(&b->keys, i)); 373 374 if (!bch_bio_alloc_pages(b->bio, __GFP_NOWARN|GFP_NOWAIT)) { 375 struct bio_vec *bv; 376 void *addr = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1)); 377 struct bvec_iter_all iter_all; 378 379 bio_for_each_segment_all(bv, b->bio, iter_all) { 380 memcpy(page_address(bv->bv_page), addr, PAGE_SIZE); 381 addr += PAGE_SIZE; 382 } 383 384 bch_submit_bbio(b->bio, b->c, &k.key, 0); 385 386 continue_at(cl, btree_node_write_done, NULL); 387 } else { 388 /* 389 * No problem for multipage bvec since the bio is 390 * just allocated 391 */ 392 b->bio->bi_vcnt = 0; 393 bch_bio_map(b->bio, i); 394 395 bch_submit_bbio(b->bio, b->c, &k.key, 0); 396 397 closure_sync(cl); 398 continue_at_nobarrier(cl, __btree_node_write_done, NULL); 399 } 400 } 401 402 void __bch_btree_node_write(struct btree *b, struct closure *parent) 403 { 404 struct bset *i = btree_bset_last(b); 405 406 lockdep_assert_held(&b->write_lock); 407 408 trace_bcache_btree_write(b); 409 410 BUG_ON(current->bio_list); 411 BUG_ON(b->written >= btree_blocks(b)); 412 BUG_ON(b->written && !i->keys); 413 BUG_ON(btree_bset_first(b)->seq != i->seq); 414 bch_check_keys(&b->keys, "writing"); 415 416 cancel_delayed_work(&b->work); 417 418 /* If caller isn't waiting for write, parent refcount is cache set */ 419 down(&b->io_mutex); 420 closure_init(&b->io, parent ?: &b->c->cl); 421 422 clear_bit(BTREE_NODE_dirty, &b->flags); 423 change_bit(BTREE_NODE_write_idx, &b->flags); 424 425 do_btree_node_write(b); 426 427 atomic_long_add(set_blocks(i, block_bytes(b->c->cache)) * b->c->cache->sb.block_size, 428 &b->c->cache->btree_sectors_written); 429 430 b->written += set_blocks(i, block_bytes(b->c->cache)); 431 } 432 433 void bch_btree_node_write(struct btree *b, struct closure *parent) 434 { 435 unsigned int nsets = b->keys.nsets; 436 437 lockdep_assert_held(&b->lock); 438 439 __bch_btree_node_write(b, parent); 440 441 /* 442 * do verify if there was more than one set initially (i.e. we did a 443 * sort) and we sorted down to a single set: 444 */ 445 if (nsets && !b->keys.nsets) 446 bch_btree_verify(b); 447 448 bch_btree_init_next(b); 449 } 450 451 static void bch_btree_node_write_sync(struct btree *b) 452 { 453 struct closure cl; 454 455 closure_init_stack(&cl); 456 457 mutex_lock(&b->write_lock); 458 bch_btree_node_write(b, &cl); 459 mutex_unlock(&b->write_lock); 460 461 closure_sync(&cl); 462 } 463 464 static void btree_node_write_work(struct work_struct *w) 465 { 466 struct btree *b = container_of(to_delayed_work(w), struct btree, work); 467 468 mutex_lock(&b->write_lock); 469 if (btree_node_dirty(b)) 470 __bch_btree_node_write(b, NULL); 471 mutex_unlock(&b->write_lock); 472 } 473 474 static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref) 475 { 476 struct bset *i = btree_bset_last(b); 477 struct btree_write *w = btree_current_write(b); 478 479 lockdep_assert_held(&b->write_lock); 480 481 BUG_ON(!b->written); 482 BUG_ON(!i->keys); 483 484 if (!btree_node_dirty(b)) 485 queue_delayed_work(btree_io_wq, &b->work, 30 * HZ); 486 487 set_btree_node_dirty(b); 488 489 /* 490 * w->journal is always the oldest journal pin of all bkeys 491 * in the leaf node, to make sure the oldest jset seq won't 492 * be increased before this btree node is flushed. 493 */ 494 if (journal_ref) { 495 if (w->journal && 496 journal_pin_cmp(b->c, w->journal, journal_ref)) { 497 atomic_dec_bug(w->journal); 498 w->journal = NULL; 499 } 500 501 if (!w->journal) { 502 w->journal = journal_ref; 503 atomic_inc(w->journal); 504 } 505 } 506 507 /* Force write if set is too big */ 508 if (set_bytes(i) > PAGE_SIZE - 48 && 509 !current->bio_list) 510 bch_btree_node_write(b, NULL); 511 } 512 513 /* 514 * Btree in memory cache - allocation/freeing 515 * mca -> memory cache 516 */ 517 518 #define mca_reserve(c) (((!IS_ERR_OR_NULL(c->root) && c->root->level) \ 519 ? c->root->level : 1) * 8 + 16) 520 #define mca_can_free(c) \ 521 max_t(int, 0, c->btree_cache_used - mca_reserve(c)) 522 523 static void mca_data_free(struct btree *b) 524 { 525 BUG_ON(b->io_mutex.count != 1); 526 527 bch_btree_keys_free(&b->keys); 528 529 b->c->btree_cache_used--; 530 list_move(&b->list, &b->c->btree_cache_freed); 531 } 532 533 static void mca_bucket_free(struct btree *b) 534 { 535 BUG_ON(btree_node_dirty(b)); 536 537 b->key.ptr[0] = 0; 538 hlist_del_init_rcu(&b->hash); 539 list_move(&b->list, &b->c->btree_cache_freeable); 540 } 541 542 static unsigned int btree_order(struct bkey *k) 543 { 544 return ilog2(KEY_SIZE(k) / PAGE_SECTORS ?: 1); 545 } 546 547 static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp) 548 { 549 if (!bch_btree_keys_alloc(&b->keys, 550 max_t(unsigned int, 551 ilog2(b->c->btree_pages), 552 btree_order(k)), 553 gfp)) { 554 b->c->btree_cache_used++; 555 list_move(&b->list, &b->c->btree_cache); 556 } else { 557 list_move(&b->list, &b->c->btree_cache_freed); 558 } 559 } 560 561 #ifdef CONFIG_PROVE_LOCKING 562 static int btree_lock_cmp_fn(const struct lockdep_map *_a, 563 const struct lockdep_map *_b) 564 { 565 const struct btree *a = container_of(_a, struct btree, lock.dep_map); 566 const struct btree *b = container_of(_b, struct btree, lock.dep_map); 567 568 return -cmp_int(a->level, b->level) ?: bkey_cmp(&a->key, &b->key); 569 } 570 571 static void btree_lock_print_fn(const struct lockdep_map *map) 572 { 573 const struct btree *b = container_of(map, struct btree, lock.dep_map); 574 575 printk(KERN_CONT " l=%u %llu:%llu", b->level, 576 KEY_INODE(&b->key), KEY_OFFSET(&b->key)); 577 } 578 #endif 579 580 static struct btree *mca_bucket_alloc(struct cache_set *c, 581 struct bkey *k, gfp_t gfp) 582 { 583 /* 584 * kzalloc() is necessary here for initialization, 585 * see code comments in bch_btree_keys_init(). 586 */ 587 struct btree *b = kzalloc(sizeof(struct btree), gfp); 588 589 if (!b) 590 return NULL; 591 592 init_rwsem(&b->lock); 593 lock_set_cmp_fn(&b->lock, btree_lock_cmp_fn, btree_lock_print_fn); 594 mutex_init(&b->write_lock); 595 lockdep_set_novalidate_class(&b->write_lock); 596 INIT_LIST_HEAD(&b->list); 597 INIT_DELAYED_WORK(&b->work, btree_node_write_work); 598 b->c = c; 599 sema_init(&b->io_mutex, 1); 600 601 mca_data_alloc(b, k, gfp); 602 return b; 603 } 604 605 static int mca_reap(struct btree *b, unsigned int min_order, bool flush) 606 { 607 struct closure cl; 608 609 closure_init_stack(&cl); 610 lockdep_assert_held(&b->c->bucket_lock); 611 612 if (!down_write_trylock(&b->lock)) 613 return -ENOMEM; 614 615 BUG_ON(btree_node_dirty(b) && !b->keys.set[0].data); 616 617 if (b->keys.page_order < min_order) 618 goto out_unlock; 619 620 if (!flush) { 621 if (btree_node_dirty(b)) 622 goto out_unlock; 623 624 if (down_trylock(&b->io_mutex)) 625 goto out_unlock; 626 up(&b->io_mutex); 627 } 628 629 retry: 630 /* 631 * BTREE_NODE_dirty might be cleared in btree_flush_btree() by 632 * __bch_btree_node_write(). To avoid an extra flush, acquire 633 * b->write_lock before checking BTREE_NODE_dirty bit. 634 */ 635 mutex_lock(&b->write_lock); 636 /* 637 * If this btree node is selected in btree_flush_write() by journal 638 * code, delay and retry until the node is flushed by journal code 639 * and BTREE_NODE_journal_flush bit cleared by btree_flush_write(). 640 */ 641 if (btree_node_journal_flush(b)) { 642 pr_debug("bnode %p is flushing by journal, retry\n", b); 643 mutex_unlock(&b->write_lock); 644 udelay(1); 645 goto retry; 646 } 647 648 if (btree_node_dirty(b)) 649 __bch_btree_node_write(b, &cl); 650 mutex_unlock(&b->write_lock); 651 652 closure_sync(&cl); 653 654 /* wait for any in flight btree write */ 655 down(&b->io_mutex); 656 up(&b->io_mutex); 657 658 return 0; 659 out_unlock: 660 rw_unlock(true, b); 661 return -ENOMEM; 662 } 663 664 static unsigned long bch_mca_scan(struct shrinker *shrink, 665 struct shrink_control *sc) 666 { 667 struct cache_set *c = shrink->private_data; 668 struct btree *b, *t; 669 unsigned long i, nr = sc->nr_to_scan; 670 unsigned long freed = 0; 671 unsigned int btree_cache_used; 672 673 if (c->shrinker_disabled) 674 return SHRINK_STOP; 675 676 if (c->btree_cache_alloc_lock) 677 return SHRINK_STOP; 678 679 /* Return -1 if we can't do anything right now */ 680 if (sc->gfp_mask & __GFP_IO) 681 mutex_lock(&c->bucket_lock); 682 else if (!mutex_trylock(&c->bucket_lock)) 683 return -1; 684 685 /* 686 * It's _really_ critical that we don't free too many btree nodes - we 687 * have to always leave ourselves a reserve. The reserve is how we 688 * guarantee that allocating memory for a new btree node can always 689 * succeed, so that inserting keys into the btree can always succeed and 690 * IO can always make forward progress: 691 */ 692 nr /= c->btree_pages; 693 if (nr == 0) 694 nr = 1; 695 nr = min_t(unsigned long, nr, mca_can_free(c)); 696 697 i = 0; 698 btree_cache_used = c->btree_cache_used; 699 list_for_each_entry_safe_reverse(b, t, &c->btree_cache_freeable, list) { 700 if (nr <= 0) 701 goto out; 702 703 if (!mca_reap(b, 0, false)) { 704 mca_data_free(b); 705 rw_unlock(true, b); 706 freed++; 707 } 708 nr--; 709 i++; 710 } 711 712 list_for_each_entry_safe_reverse(b, t, &c->btree_cache, list) { 713 if (nr <= 0 || i >= btree_cache_used) 714 goto out; 715 716 if (!mca_reap(b, 0, false)) { 717 mca_bucket_free(b); 718 mca_data_free(b); 719 rw_unlock(true, b); 720 freed++; 721 } 722 723 nr--; 724 i++; 725 } 726 out: 727 mutex_unlock(&c->bucket_lock); 728 return freed * c->btree_pages; 729 } 730 731 static unsigned long bch_mca_count(struct shrinker *shrink, 732 struct shrink_control *sc) 733 { 734 struct cache_set *c = shrink->private_data; 735 736 if (c->shrinker_disabled) 737 return 0; 738 739 if (c->btree_cache_alloc_lock) 740 return 0; 741 742 return mca_can_free(c) * c->btree_pages; 743 } 744 745 void bch_btree_cache_free(struct cache_set *c) 746 { 747 struct btree *b; 748 struct closure cl; 749 750 closure_init_stack(&cl); 751 752 if (c->shrink) 753 shrinker_free(c->shrink); 754 755 mutex_lock(&c->bucket_lock); 756 757 #ifdef CONFIG_BCACHE_DEBUG 758 if (c->verify_data) 759 list_move(&c->verify_data->list, &c->btree_cache); 760 761 free_pages((unsigned long) c->verify_ondisk, ilog2(meta_bucket_pages(&c->cache->sb))); 762 #endif 763 764 list_splice(&c->btree_cache_freeable, 765 &c->btree_cache); 766 767 while (!list_empty(&c->btree_cache)) { 768 b = list_first_entry(&c->btree_cache, struct btree, list); 769 770 /* 771 * This function is called by cache_set_free(), no I/O 772 * request on cache now, it is unnecessary to acquire 773 * b->write_lock before clearing BTREE_NODE_dirty anymore. 774 */ 775 if (btree_node_dirty(b)) { 776 btree_complete_write(b, btree_current_write(b)); 777 clear_bit(BTREE_NODE_dirty, &b->flags); 778 } 779 mca_data_free(b); 780 } 781 782 while (!list_empty(&c->btree_cache_freed)) { 783 b = list_first_entry(&c->btree_cache_freed, 784 struct btree, list); 785 list_del(&b->list); 786 cancel_delayed_work_sync(&b->work); 787 kfree(b); 788 } 789 790 mutex_unlock(&c->bucket_lock); 791 } 792 793 int bch_btree_cache_alloc(struct cache_set *c) 794 { 795 unsigned int i; 796 797 for (i = 0; i < mca_reserve(c); i++) 798 if (!mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL)) 799 return -ENOMEM; 800 801 list_splice_init(&c->btree_cache, 802 &c->btree_cache_freeable); 803 804 #ifdef CONFIG_BCACHE_DEBUG 805 mutex_init(&c->verify_lock); 806 807 c->verify_ondisk = (void *) 808 __get_free_pages(GFP_KERNEL|__GFP_COMP, 809 ilog2(meta_bucket_pages(&c->cache->sb))); 810 if (!c->verify_ondisk) { 811 /* 812 * Don't worry about the mca_rereserve buckets 813 * allocated in previous for-loop, they will be 814 * handled properly in bch_cache_set_unregister(). 815 */ 816 return -ENOMEM; 817 } 818 819 c->verify_data = mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL); 820 821 if (c->verify_data && 822 c->verify_data->keys.set->data) 823 list_del_init(&c->verify_data->list); 824 else 825 c->verify_data = NULL; 826 #endif 827 828 c->shrink = shrinker_alloc(0, "md-bcache:%pU", c->set_uuid); 829 if (!c->shrink) { 830 pr_warn("bcache: %s: could not allocate shrinker\n", __func__); 831 return 0; 832 } 833 834 c->shrink->count_objects = bch_mca_count; 835 c->shrink->scan_objects = bch_mca_scan; 836 c->shrink->seeks = 4; 837 c->shrink->batch = c->btree_pages * 2; 838 c->shrink->private_data = c; 839 840 shrinker_register(c->shrink); 841 842 return 0; 843 } 844 845 /* Btree in memory cache - hash table */ 846 847 static struct hlist_head *mca_hash(struct cache_set *c, struct bkey *k) 848 { 849 return &c->bucket_hash[hash_32(PTR_HASH(c, k), BUCKET_HASH_BITS)]; 850 } 851 852 static struct btree *mca_find(struct cache_set *c, struct bkey *k) 853 { 854 struct btree *b; 855 856 rcu_read_lock(); 857 hlist_for_each_entry_rcu(b, mca_hash(c, k), hash) 858 if (PTR_HASH(c, &b->key) == PTR_HASH(c, k)) 859 goto out; 860 b = NULL; 861 out: 862 rcu_read_unlock(); 863 return b; 864 } 865 866 static int mca_cannibalize_lock(struct cache_set *c, struct btree_op *op) 867 { 868 spin_lock(&c->btree_cannibalize_lock); 869 if (likely(c->btree_cache_alloc_lock == NULL)) { 870 c->btree_cache_alloc_lock = current; 871 } else if (c->btree_cache_alloc_lock != current) { 872 if (op) 873 prepare_to_wait(&c->btree_cache_wait, &op->wait, 874 TASK_UNINTERRUPTIBLE); 875 spin_unlock(&c->btree_cannibalize_lock); 876 return -EINTR; 877 } 878 spin_unlock(&c->btree_cannibalize_lock); 879 880 return 0; 881 } 882 883 static struct btree *mca_cannibalize(struct cache_set *c, struct btree_op *op, 884 struct bkey *k) 885 { 886 struct btree *b; 887 888 trace_bcache_btree_cache_cannibalize(c); 889 890 if (mca_cannibalize_lock(c, op)) 891 return ERR_PTR(-EINTR); 892 893 list_for_each_entry_reverse(b, &c->btree_cache, list) 894 if (!mca_reap(b, btree_order(k), false)) 895 return b; 896 897 list_for_each_entry_reverse(b, &c->btree_cache, list) 898 if (!mca_reap(b, btree_order(k), true)) 899 return b; 900 901 WARN(1, "btree cache cannibalize failed\n"); 902 return ERR_PTR(-ENOMEM); 903 } 904 905 /* 906 * We can only have one thread cannibalizing other cached btree nodes at a time, 907 * or we'll deadlock. We use an open coded mutex to ensure that, which a 908 * cannibalize_bucket() will take. This means every time we unlock the root of 909 * the btree, we need to release this lock if we have it held. 910 */ 911 void bch_cannibalize_unlock(struct cache_set *c) 912 { 913 spin_lock(&c->btree_cannibalize_lock); 914 if (c->btree_cache_alloc_lock == current) { 915 c->btree_cache_alloc_lock = NULL; 916 wake_up(&c->btree_cache_wait); 917 } 918 spin_unlock(&c->btree_cannibalize_lock); 919 } 920 921 static struct btree *mca_alloc(struct cache_set *c, struct btree_op *op, 922 struct bkey *k, int level) 923 { 924 struct btree *b; 925 926 BUG_ON(current->bio_list); 927 928 lockdep_assert_held(&c->bucket_lock); 929 930 if (mca_find(c, k)) 931 return NULL; 932 933 /* btree_free() doesn't free memory; it sticks the node on the end of 934 * the list. Check if there's any freed nodes there: 935 */ 936 list_for_each_entry(b, &c->btree_cache_freeable, list) 937 if (!mca_reap(b, btree_order(k), false)) 938 goto out; 939 940 /* We never free struct btree itself, just the memory that holds the on 941 * disk node. Check the freed list before allocating a new one: 942 */ 943 list_for_each_entry(b, &c->btree_cache_freed, list) 944 if (!mca_reap(b, 0, false)) { 945 mca_data_alloc(b, k, __GFP_NOWARN|GFP_NOIO); 946 if (!b->keys.set[0].data) 947 goto err; 948 else 949 goto out; 950 } 951 952 b = mca_bucket_alloc(c, k, __GFP_NOWARN|GFP_NOIO); 953 if (!b) 954 goto err; 955 956 BUG_ON(!down_write_trylock(&b->lock)); 957 if (!b->keys.set->data) 958 goto err; 959 out: 960 BUG_ON(b->io_mutex.count != 1); 961 962 bkey_copy(&b->key, k); 963 list_move(&b->list, &c->btree_cache); 964 hlist_del_init_rcu(&b->hash); 965 hlist_add_head_rcu(&b->hash, mca_hash(c, k)); 966 967 lock_set_subclass(&b->lock.dep_map, level + 1, _THIS_IP_); 968 b->parent = (void *) ~0UL; 969 b->flags = 0; 970 b->written = 0; 971 b->level = level; 972 973 if (!b->level) 974 bch_btree_keys_init(&b->keys, &bch_extent_keys_ops, 975 &b->c->expensive_debug_checks); 976 else 977 bch_btree_keys_init(&b->keys, &bch_btree_keys_ops, 978 &b->c->expensive_debug_checks); 979 980 return b; 981 err: 982 if (b) 983 rw_unlock(true, b); 984 985 b = mca_cannibalize(c, op, k); 986 if (!IS_ERR(b)) 987 goto out; 988 989 return b; 990 } 991 992 /* 993 * bch_btree_node_get - find a btree node in the cache and lock it, reading it 994 * in from disk if necessary. 995 * 996 * If IO is necessary and running under submit_bio_noacct, returns -EAGAIN. 997 * 998 * The btree node will have either a read or a write lock held, depending on 999 * level and op->lock. 1000 * 1001 * Note: Only error code or btree pointer will be returned, it is unncessary 1002 * for callers to check NULL pointer. 1003 */ 1004 struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op, 1005 struct bkey *k, int level, bool write, 1006 struct btree *parent) 1007 { 1008 int i = 0; 1009 struct btree *b; 1010 1011 BUG_ON(level < 0); 1012 retry: 1013 b = mca_find(c, k); 1014 1015 if (!b) { 1016 if (current->bio_list) 1017 return ERR_PTR(-EAGAIN); 1018 1019 mutex_lock(&c->bucket_lock); 1020 b = mca_alloc(c, op, k, level); 1021 mutex_unlock(&c->bucket_lock); 1022 1023 if (!b) 1024 goto retry; 1025 if (IS_ERR(b)) 1026 return b; 1027 1028 bch_btree_node_read(b); 1029 1030 if (!write) 1031 downgrade_write(&b->lock); 1032 } else { 1033 rw_lock(write, b, level); 1034 if (PTR_HASH(c, &b->key) != PTR_HASH(c, k)) { 1035 rw_unlock(write, b); 1036 goto retry; 1037 } 1038 BUG_ON(b->level != level); 1039 } 1040 1041 if (btree_node_io_error(b)) { 1042 rw_unlock(write, b); 1043 return ERR_PTR(-EIO); 1044 } 1045 1046 BUG_ON(!b->written); 1047 1048 b->parent = parent; 1049 1050 for (; i <= b->keys.nsets && b->keys.set[i].size; i++) { 1051 prefetch(b->keys.set[i].tree); 1052 prefetch(b->keys.set[i].data); 1053 } 1054 1055 for (; i <= b->keys.nsets; i++) 1056 prefetch(b->keys.set[i].data); 1057 1058 return b; 1059 } 1060 1061 static void btree_node_prefetch(struct btree *parent, struct bkey *k) 1062 { 1063 struct btree *b; 1064 1065 mutex_lock(&parent->c->bucket_lock); 1066 b = mca_alloc(parent->c, NULL, k, parent->level - 1); 1067 mutex_unlock(&parent->c->bucket_lock); 1068 1069 if (!IS_ERR_OR_NULL(b)) { 1070 b->parent = parent; 1071 bch_btree_node_read(b); 1072 rw_unlock(true, b); 1073 } 1074 } 1075 1076 /* Btree alloc */ 1077 1078 static void btree_node_free(struct btree *b) 1079 { 1080 trace_bcache_btree_node_free(b); 1081 1082 BUG_ON(b == b->c->root); 1083 1084 retry: 1085 mutex_lock(&b->write_lock); 1086 /* 1087 * If the btree node is selected and flushing in btree_flush_write(), 1088 * delay and retry until the BTREE_NODE_journal_flush bit cleared, 1089 * then it is safe to free the btree node here. Otherwise this btree 1090 * node will be in race condition. 1091 */ 1092 if (btree_node_journal_flush(b)) { 1093 mutex_unlock(&b->write_lock); 1094 pr_debug("bnode %p journal_flush set, retry\n", b); 1095 udelay(1); 1096 goto retry; 1097 } 1098 1099 if (btree_node_dirty(b)) { 1100 btree_complete_write(b, btree_current_write(b)); 1101 clear_bit(BTREE_NODE_dirty, &b->flags); 1102 } 1103 1104 mutex_unlock(&b->write_lock); 1105 1106 cancel_delayed_work(&b->work); 1107 1108 mutex_lock(&b->c->bucket_lock); 1109 bch_bucket_free(b->c, &b->key); 1110 mca_bucket_free(b); 1111 mutex_unlock(&b->c->bucket_lock); 1112 } 1113 1114 /* 1115 * Only error code or btree pointer will be returned, it is unncessary for 1116 * callers to check NULL pointer. 1117 */ 1118 struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op, 1119 int level, bool wait, 1120 struct btree *parent) 1121 { 1122 BKEY_PADDED(key) k; 1123 struct btree *b; 1124 1125 mutex_lock(&c->bucket_lock); 1126 retry: 1127 /* return ERR_PTR(-EAGAIN) when it fails */ 1128 b = ERR_PTR(-EAGAIN); 1129 if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, wait)) 1130 goto err; 1131 1132 bkey_put(c, &k.key); 1133 SET_KEY_SIZE(&k.key, c->btree_pages * PAGE_SECTORS); 1134 1135 b = mca_alloc(c, op, &k.key, level); 1136 if (IS_ERR(b)) 1137 goto err_free; 1138 1139 if (!b) { 1140 cache_bug(c, 1141 "Tried to allocate bucket that was in btree cache"); 1142 goto retry; 1143 } 1144 1145 b->parent = parent; 1146 bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->cache->sb)); 1147 1148 mutex_unlock(&c->bucket_lock); 1149 1150 trace_bcache_btree_node_alloc(b); 1151 return b; 1152 err_free: 1153 bch_bucket_free(c, &k.key); 1154 err: 1155 mutex_unlock(&c->bucket_lock); 1156 1157 trace_bcache_btree_node_alloc_fail(c); 1158 return b; 1159 } 1160 1161 static struct btree *bch_btree_node_alloc(struct cache_set *c, 1162 struct btree_op *op, int level, 1163 struct btree *parent) 1164 { 1165 return __bch_btree_node_alloc(c, op, level, op != NULL, parent); 1166 } 1167 1168 static struct btree *btree_node_alloc_replacement(struct btree *b, 1169 struct btree_op *op) 1170 { 1171 struct btree *n = bch_btree_node_alloc(b->c, op, b->level, b->parent); 1172 1173 if (!IS_ERR(n)) { 1174 mutex_lock(&n->write_lock); 1175 bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort); 1176 bkey_copy_key(&n->key, &b->key); 1177 mutex_unlock(&n->write_lock); 1178 } 1179 1180 return n; 1181 } 1182 1183 static void make_btree_freeing_key(struct btree *b, struct bkey *k) 1184 { 1185 unsigned int i; 1186 1187 mutex_lock(&b->c->bucket_lock); 1188 1189 atomic_inc(&b->c->prio_blocked); 1190 1191 bkey_copy(k, &b->key); 1192 bkey_copy_key(k, &ZERO_KEY); 1193 1194 for (i = 0; i < KEY_PTRS(k); i++) 1195 SET_PTR_GEN(k, i, 1196 bch_inc_gen(b->c->cache, 1197 PTR_BUCKET(b->c, &b->key, i))); 1198 1199 mutex_unlock(&b->c->bucket_lock); 1200 } 1201 1202 static int btree_check_reserve(struct btree *b, struct btree_op *op) 1203 { 1204 struct cache_set *c = b->c; 1205 struct cache *ca = c->cache; 1206 unsigned int reserve = (c->root->level - b->level) * 2 + 1; 1207 1208 mutex_lock(&c->bucket_lock); 1209 1210 if (fifo_used(&ca->free[RESERVE_BTREE]) < reserve) { 1211 if (op) 1212 prepare_to_wait(&c->btree_cache_wait, &op->wait, 1213 TASK_UNINTERRUPTIBLE); 1214 mutex_unlock(&c->bucket_lock); 1215 return -EINTR; 1216 } 1217 1218 mutex_unlock(&c->bucket_lock); 1219 1220 return mca_cannibalize_lock(b->c, op); 1221 } 1222 1223 /* Garbage collection */ 1224 1225 static uint8_t __bch_btree_mark_key(struct cache_set *c, int level, 1226 struct bkey *k) 1227 { 1228 uint8_t stale = 0; 1229 unsigned int i; 1230 struct bucket *g; 1231 1232 /* 1233 * ptr_invalid() can't return true for the keys that mark btree nodes as 1234 * freed, but since ptr_bad() returns true we'll never actually use them 1235 * for anything and thus we don't want mark their pointers here 1236 */ 1237 if (!bkey_cmp(k, &ZERO_KEY)) 1238 return stale; 1239 1240 for (i = 0; i < KEY_PTRS(k); i++) { 1241 if (!ptr_available(c, k, i)) 1242 continue; 1243 1244 g = PTR_BUCKET(c, k, i); 1245 1246 if (gen_after(g->last_gc, PTR_GEN(k, i))) 1247 g->last_gc = PTR_GEN(k, i); 1248 1249 if (ptr_stale(c, k, i)) { 1250 stale = max(stale, ptr_stale(c, k, i)); 1251 continue; 1252 } 1253 1254 cache_bug_on(GC_MARK(g) && 1255 (GC_MARK(g) == GC_MARK_METADATA) != (level != 0), 1256 c, "inconsistent ptrs: mark = %llu, level = %i", 1257 GC_MARK(g), level); 1258 1259 if (level) 1260 SET_GC_MARK(g, GC_MARK_METADATA); 1261 else if (KEY_DIRTY(k)) 1262 SET_GC_MARK(g, GC_MARK_DIRTY); 1263 else if (!GC_MARK(g)) 1264 SET_GC_MARK(g, GC_MARK_RECLAIMABLE); 1265 1266 /* guard against overflow */ 1267 SET_GC_SECTORS_USED(g, min_t(unsigned int, 1268 GC_SECTORS_USED(g) + KEY_SIZE(k), 1269 MAX_GC_SECTORS_USED)); 1270 1271 BUG_ON(!GC_SECTORS_USED(g)); 1272 } 1273 1274 return stale; 1275 } 1276 1277 #define btree_mark_key(b, k) __bch_btree_mark_key(b->c, b->level, k) 1278 1279 void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k) 1280 { 1281 unsigned int i; 1282 1283 for (i = 0; i < KEY_PTRS(k); i++) 1284 if (ptr_available(c, k, i) && 1285 !ptr_stale(c, k, i)) { 1286 struct bucket *b = PTR_BUCKET(c, k, i); 1287 1288 b->gen = PTR_GEN(k, i); 1289 1290 if (level && bkey_cmp(k, &ZERO_KEY)) 1291 b->prio = BTREE_PRIO; 1292 else if (!level && b->prio == BTREE_PRIO) 1293 b->prio = INITIAL_PRIO; 1294 } 1295 1296 __bch_btree_mark_key(c, level, k); 1297 } 1298 1299 void bch_update_bucket_in_use(struct cache_set *c, struct gc_stat *stats) 1300 { 1301 stats->in_use = (c->nbuckets - c->avail_nbuckets) * 100 / c->nbuckets; 1302 } 1303 1304 static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc) 1305 { 1306 uint8_t stale = 0; 1307 unsigned int keys = 0, good_keys = 0; 1308 struct bkey *k; 1309 struct btree_iter iter; 1310 struct bset_tree *t; 1311 1312 min_heap_init(&iter.heap, NULL, MAX_BSETS); 1313 1314 gc->nodes++; 1315 1316 for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) { 1317 stale = max(stale, btree_mark_key(b, k)); 1318 keys++; 1319 1320 if (bch_ptr_bad(&b->keys, k)) 1321 continue; 1322 1323 gc->key_bytes += bkey_u64s(k); 1324 gc->nkeys++; 1325 good_keys++; 1326 1327 gc->data += KEY_SIZE(k); 1328 } 1329 1330 for (t = b->keys.set; t <= &b->keys.set[b->keys.nsets]; t++) 1331 btree_bug_on(t->size && 1332 bset_written(&b->keys, t) && 1333 bkey_cmp(&b->key, &t->end) < 0, 1334 b, "found short btree key in gc"); 1335 1336 if (b->c->gc_always_rewrite) 1337 return true; 1338 1339 if (stale > 10) 1340 return true; 1341 1342 if ((keys - good_keys) * 2 > keys) 1343 return true; 1344 1345 return false; 1346 } 1347 1348 #define GC_MERGE_NODES 4U 1349 1350 struct gc_merge_info { 1351 struct btree *b; 1352 unsigned int keys; 1353 }; 1354 1355 static int bch_btree_insert_node(struct btree *b, struct btree_op *op, 1356 struct keylist *insert_keys, 1357 atomic_t *journal_ref, 1358 struct bkey *replace_key); 1359 1360 static int btree_gc_coalesce(struct btree *b, struct btree_op *op, 1361 struct gc_stat *gc, struct gc_merge_info *r) 1362 { 1363 unsigned int i, nodes = 0, keys = 0, blocks; 1364 struct btree *new_nodes[GC_MERGE_NODES]; 1365 struct keylist keylist; 1366 struct closure cl; 1367 struct bkey *k; 1368 1369 bch_keylist_init(&keylist); 1370 1371 if (btree_check_reserve(b, NULL)) 1372 return 0; 1373 1374 memset(new_nodes, 0, sizeof(new_nodes)); 1375 closure_init_stack(&cl); 1376 1377 while (nodes < GC_MERGE_NODES && !IS_ERR_OR_NULL(r[nodes].b)) 1378 keys += r[nodes++].keys; 1379 1380 blocks = btree_default_blocks(b->c) * 2 / 3; 1381 1382 if (nodes < 2 || 1383 __set_blocks(b->keys.set[0].data, keys, 1384 block_bytes(b->c->cache)) > blocks * (nodes - 1)) 1385 return 0; 1386 1387 for (i = 0; i < nodes; i++) { 1388 new_nodes[i] = btree_node_alloc_replacement(r[i].b, NULL); 1389 if (IS_ERR(new_nodes[i])) 1390 goto out_nocoalesce; 1391 } 1392 1393 /* 1394 * We have to check the reserve here, after we've allocated our new 1395 * nodes, to make sure the insert below will succeed - we also check 1396 * before as an optimization to potentially avoid a bunch of expensive 1397 * allocs/sorts 1398 */ 1399 if (btree_check_reserve(b, NULL)) 1400 goto out_nocoalesce; 1401 1402 for (i = 0; i < nodes; i++) 1403 mutex_lock(&new_nodes[i]->write_lock); 1404 1405 for (i = nodes - 1; i > 0; --i) { 1406 struct bset *n1 = btree_bset_first(new_nodes[i]); 1407 struct bset *n2 = btree_bset_first(new_nodes[i - 1]); 1408 struct bkey *k, *last = NULL; 1409 1410 keys = 0; 1411 1412 if (i > 1) { 1413 for (k = n2->start; 1414 k < bset_bkey_last(n2); 1415 k = bkey_next(k)) { 1416 if (__set_blocks(n1, n1->keys + keys + 1417 bkey_u64s(k), 1418 block_bytes(b->c->cache)) > blocks) 1419 break; 1420 1421 last = k; 1422 keys += bkey_u64s(k); 1423 } 1424 } else { 1425 /* 1426 * Last node we're not getting rid of - we're getting 1427 * rid of the node at r[0]. Have to try and fit all of 1428 * the remaining keys into this node; we can't ensure 1429 * they will always fit due to rounding and variable 1430 * length keys (shouldn't be possible in practice, 1431 * though) 1432 */ 1433 if (__set_blocks(n1, n1->keys + n2->keys, 1434 block_bytes(b->c->cache)) > 1435 btree_blocks(new_nodes[i])) 1436 goto out_unlock_nocoalesce; 1437 1438 keys = n2->keys; 1439 /* Take the key of the node we're getting rid of */ 1440 last = &r->b->key; 1441 } 1442 1443 BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c->cache)) > 1444 btree_blocks(new_nodes[i])); 1445 1446 if (last) 1447 bkey_copy_key(&new_nodes[i]->key, last); 1448 1449 memcpy(bset_bkey_last(n1), 1450 n2->start, 1451 (void *) bset_bkey_idx(n2, keys) - (void *) n2->start); 1452 1453 n1->keys += keys; 1454 r[i].keys = n1->keys; 1455 1456 memmove(n2->start, 1457 bset_bkey_idx(n2, keys), 1458 (void *) bset_bkey_last(n2) - 1459 (void *) bset_bkey_idx(n2, keys)); 1460 1461 n2->keys -= keys; 1462 1463 if (__bch_keylist_realloc(&keylist, 1464 bkey_u64s(&new_nodes[i]->key))) 1465 goto out_unlock_nocoalesce; 1466 1467 bch_btree_node_write(new_nodes[i], &cl); 1468 bch_keylist_add(&keylist, &new_nodes[i]->key); 1469 } 1470 1471 for (i = 0; i < nodes; i++) 1472 mutex_unlock(&new_nodes[i]->write_lock); 1473 1474 closure_sync(&cl); 1475 1476 /* We emptied out this node */ 1477 BUG_ON(btree_bset_first(new_nodes[0])->keys); 1478 btree_node_free(new_nodes[0]); 1479 rw_unlock(true, new_nodes[0]); 1480 new_nodes[0] = NULL; 1481 1482 for (i = 0; i < nodes; i++) { 1483 if (__bch_keylist_realloc(&keylist, bkey_u64s(&r[i].b->key))) 1484 goto out_nocoalesce; 1485 1486 make_btree_freeing_key(r[i].b, keylist.top); 1487 bch_keylist_push(&keylist); 1488 } 1489 1490 bch_btree_insert_node(b, op, &keylist, NULL, NULL); 1491 BUG_ON(!bch_keylist_empty(&keylist)); 1492 1493 for (i = 0; i < nodes; i++) { 1494 btree_node_free(r[i].b); 1495 rw_unlock(true, r[i].b); 1496 1497 r[i].b = new_nodes[i]; 1498 } 1499 1500 memmove(r, r + 1, sizeof(r[0]) * (nodes - 1)); 1501 r[nodes - 1].b = ERR_PTR(-EINTR); 1502 1503 trace_bcache_btree_gc_coalesce(nodes); 1504 gc->nodes--; 1505 1506 bch_keylist_free(&keylist); 1507 1508 /* Invalidated our iterator */ 1509 return -EINTR; 1510 1511 out_unlock_nocoalesce: 1512 for (i = 0; i < nodes; i++) 1513 mutex_unlock(&new_nodes[i]->write_lock); 1514 1515 out_nocoalesce: 1516 closure_sync(&cl); 1517 1518 while ((k = bch_keylist_pop(&keylist))) 1519 if (!bkey_cmp(k, &ZERO_KEY)) 1520 atomic_dec(&b->c->prio_blocked); 1521 bch_keylist_free(&keylist); 1522 1523 for (i = 0; i < nodes; i++) 1524 if (!IS_ERR_OR_NULL(new_nodes[i])) { 1525 btree_node_free(new_nodes[i]); 1526 rw_unlock(true, new_nodes[i]); 1527 } 1528 return 0; 1529 } 1530 1531 static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op, 1532 struct btree *replace) 1533 { 1534 struct keylist keys; 1535 struct btree *n; 1536 1537 if (btree_check_reserve(b, NULL)) 1538 return 0; 1539 1540 n = btree_node_alloc_replacement(replace, NULL); 1541 if (IS_ERR(n)) 1542 return 0; 1543 1544 /* recheck reserve after allocating replacement node */ 1545 if (btree_check_reserve(b, NULL)) { 1546 btree_node_free(n); 1547 rw_unlock(true, n); 1548 return 0; 1549 } 1550 1551 bch_btree_node_write_sync(n); 1552 1553 bch_keylist_init(&keys); 1554 bch_keylist_add(&keys, &n->key); 1555 1556 make_btree_freeing_key(replace, keys.top); 1557 bch_keylist_push(&keys); 1558 1559 bch_btree_insert_node(b, op, &keys, NULL, NULL); 1560 BUG_ON(!bch_keylist_empty(&keys)); 1561 1562 btree_node_free(replace); 1563 rw_unlock(true, n); 1564 1565 /* Invalidated our iterator */ 1566 return -EINTR; 1567 } 1568 1569 static unsigned int btree_gc_count_keys(struct btree *b) 1570 { 1571 struct bkey *k; 1572 struct btree_iter iter; 1573 unsigned int ret = 0; 1574 1575 min_heap_init(&iter.heap, NULL, MAX_BSETS); 1576 1577 for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad) 1578 ret += bkey_u64s(k); 1579 1580 return ret; 1581 } 1582 1583 static size_t btree_gc_min_nodes(struct cache_set *c) 1584 { 1585 size_t min_nodes; 1586 1587 /* 1588 * Since incremental GC would stop 100ms when front 1589 * side I/O comes, so when there are many btree nodes, 1590 * if GC only processes constant (100) nodes each time, 1591 * GC would last a long time, and the front side I/Os 1592 * would run out of the buckets (since no new bucket 1593 * can be allocated during GC), and be blocked again. 1594 * So GC should not process constant nodes, but varied 1595 * nodes according to the number of btree nodes, which 1596 * realized by dividing GC into constant(100) times, 1597 * so when there are many btree nodes, GC can process 1598 * more nodes each time, otherwise, GC will process less 1599 * nodes each time (but no less than MIN_GC_NODES) 1600 */ 1601 min_nodes = c->gc_stats.nodes / MAX_GC_TIMES; 1602 if (min_nodes < MIN_GC_NODES) 1603 min_nodes = MIN_GC_NODES; 1604 1605 return min_nodes; 1606 } 1607 1608 1609 static int btree_gc_recurse(struct btree *b, struct btree_op *op, 1610 struct closure *writes, struct gc_stat *gc) 1611 { 1612 int ret = 0; 1613 bool should_rewrite; 1614 struct bkey *k; 1615 struct btree_iter iter; 1616 struct gc_merge_info r[GC_MERGE_NODES]; 1617 struct gc_merge_info *i, *last = r + ARRAY_SIZE(r) - 1; 1618 1619 min_heap_init(&iter.heap, NULL, MAX_BSETS); 1620 bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done); 1621 1622 for (i = r; i < r + ARRAY_SIZE(r); i++) 1623 i->b = ERR_PTR(-EINTR); 1624 1625 while (1) { 1626 k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad); 1627 if (k) { 1628 r->b = bch_btree_node_get(b->c, op, k, b->level - 1, 1629 true, b); 1630 if (IS_ERR(r->b)) { 1631 ret = PTR_ERR(r->b); 1632 break; 1633 } 1634 1635 r->keys = btree_gc_count_keys(r->b); 1636 1637 ret = btree_gc_coalesce(b, op, gc, r); 1638 if (ret) 1639 break; 1640 } 1641 1642 if (!last->b) 1643 break; 1644 1645 if (!IS_ERR(last->b)) { 1646 should_rewrite = btree_gc_mark_node(last->b, gc); 1647 if (should_rewrite) { 1648 ret = btree_gc_rewrite_node(b, op, last->b); 1649 if (ret) 1650 break; 1651 } 1652 1653 if (last->b->level) { 1654 ret = btree_gc_recurse(last->b, op, writes, gc); 1655 if (ret) 1656 break; 1657 } 1658 1659 bkey_copy_key(&b->c->gc_done, &last->b->key); 1660 1661 /* 1662 * Must flush leaf nodes before gc ends, since replace 1663 * operations aren't journalled 1664 */ 1665 mutex_lock(&last->b->write_lock); 1666 if (btree_node_dirty(last->b)) 1667 bch_btree_node_write(last->b, writes); 1668 mutex_unlock(&last->b->write_lock); 1669 rw_unlock(true, last->b); 1670 } 1671 1672 memmove(r + 1, r, sizeof(r[0]) * (GC_MERGE_NODES - 1)); 1673 r->b = NULL; 1674 1675 if (atomic_read(&b->c->search_inflight) && 1676 gc->nodes >= gc->nodes_pre + btree_gc_min_nodes(b->c)) { 1677 gc->nodes_pre = gc->nodes; 1678 ret = -EAGAIN; 1679 break; 1680 } 1681 1682 if (need_resched()) { 1683 ret = -EAGAIN; 1684 break; 1685 } 1686 } 1687 1688 for (i = r; i < r + ARRAY_SIZE(r); i++) 1689 if (!IS_ERR_OR_NULL(i->b)) { 1690 mutex_lock(&i->b->write_lock); 1691 if (btree_node_dirty(i->b)) 1692 bch_btree_node_write(i->b, writes); 1693 mutex_unlock(&i->b->write_lock); 1694 rw_unlock(true, i->b); 1695 } 1696 1697 return ret; 1698 } 1699 1700 static int bch_btree_gc_root(struct btree *b, struct btree_op *op, 1701 struct closure *writes, struct gc_stat *gc) 1702 { 1703 struct btree *n = NULL; 1704 int ret = 0; 1705 bool should_rewrite; 1706 1707 should_rewrite = btree_gc_mark_node(b, gc); 1708 if (should_rewrite) { 1709 n = btree_node_alloc_replacement(b, NULL); 1710 1711 if (!IS_ERR(n)) { 1712 bch_btree_node_write_sync(n); 1713 1714 bch_btree_set_root(n); 1715 btree_node_free(b); 1716 rw_unlock(true, n); 1717 1718 return -EINTR; 1719 } 1720 } 1721 1722 __bch_btree_mark_key(b->c, b->level + 1, &b->key); 1723 1724 if (b->level) { 1725 ret = btree_gc_recurse(b, op, writes, gc); 1726 if (ret) 1727 return ret; 1728 } 1729 1730 bkey_copy_key(&b->c->gc_done, &b->key); 1731 1732 return ret; 1733 } 1734 1735 static void btree_gc_start(struct cache_set *c) 1736 { 1737 struct cache *ca; 1738 struct bucket *b; 1739 1740 if (!c->gc_mark_valid) 1741 return; 1742 1743 mutex_lock(&c->bucket_lock); 1744 1745 c->gc_done = ZERO_KEY; 1746 1747 ca = c->cache; 1748 for_each_bucket(b, ca) { 1749 b->last_gc = b->gen; 1750 if (bch_can_invalidate_bucket(ca, b)) 1751 b->reclaimable_in_gc = 1; 1752 if (!atomic_read(&b->pin)) { 1753 SET_GC_MARK(b, 0); 1754 SET_GC_SECTORS_USED(b, 0); 1755 } 1756 } 1757 1758 c->gc_mark_valid = 0; 1759 mutex_unlock(&c->bucket_lock); 1760 } 1761 1762 static void bch_btree_gc_finish(struct cache_set *c) 1763 { 1764 struct bucket *b; 1765 struct cache *ca; 1766 unsigned int i, j; 1767 uint64_t *k; 1768 1769 mutex_lock(&c->bucket_lock); 1770 1771 set_gc_sectors(c); 1772 c->gc_mark_valid = 1; 1773 c->need_gc = 0; 1774 1775 for (i = 0; i < KEY_PTRS(&c->uuid_bucket); i++) 1776 SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i), 1777 GC_MARK_METADATA); 1778 1779 /* don't reclaim buckets to which writeback keys point */ 1780 rcu_read_lock(); 1781 for (i = 0; i < c->devices_max_used; i++) { 1782 struct bcache_device *d = c->devices[i]; 1783 struct cached_dev *dc; 1784 struct keybuf_key *w, *n; 1785 1786 if (!d || UUID_FLASH_ONLY(&c->uuids[i])) 1787 continue; 1788 dc = container_of(d, struct cached_dev, disk); 1789 1790 spin_lock(&dc->writeback_keys.lock); 1791 rbtree_postorder_for_each_entry_safe(w, n, 1792 &dc->writeback_keys.keys, node) 1793 for (j = 0; j < KEY_PTRS(&w->key); j++) 1794 SET_GC_MARK(PTR_BUCKET(c, &w->key, j), 1795 GC_MARK_DIRTY); 1796 spin_unlock(&dc->writeback_keys.lock); 1797 } 1798 rcu_read_unlock(); 1799 1800 c->avail_nbuckets = 0; 1801 1802 ca = c->cache; 1803 ca->invalidate_needs_gc = 0; 1804 1805 for (k = ca->sb.d; k < ca->sb.d + ca->sb.keys; k++) 1806 SET_GC_MARK(ca->buckets + *k, GC_MARK_METADATA); 1807 1808 for (k = ca->prio_buckets; 1809 k < ca->prio_buckets + prio_buckets(ca) * 2; k++) 1810 SET_GC_MARK(ca->buckets + *k, GC_MARK_METADATA); 1811 1812 for_each_bucket(b, ca) { 1813 c->need_gc = max(c->need_gc, bucket_gc_gen(b)); 1814 1815 if (b->reclaimable_in_gc) 1816 b->reclaimable_in_gc = 0; 1817 1818 if (atomic_read(&b->pin)) 1819 continue; 1820 1821 BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b)); 1822 1823 if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE) 1824 c->avail_nbuckets++; 1825 } 1826 1827 mutex_unlock(&c->bucket_lock); 1828 } 1829 1830 static void bch_btree_gc(struct cache_set *c) 1831 { 1832 int ret; 1833 struct gc_stat stats; 1834 struct closure writes; 1835 struct btree_op op; 1836 uint64_t start_time = local_clock(); 1837 1838 trace_bcache_gc_start(c); 1839 1840 memset(&stats, 0, sizeof(struct gc_stat)); 1841 closure_init_stack(&writes); 1842 bch_btree_op_init(&op, SHRT_MAX); 1843 1844 btree_gc_start(c); 1845 1846 /* if CACHE_SET_IO_DISABLE set, gc thread should stop too */ 1847 do { 1848 ret = bcache_btree_root(gc_root, c, &op, &writes, &stats); 1849 closure_sync(&writes); 1850 cond_resched(); 1851 1852 if (ret == -EAGAIN) 1853 schedule_timeout_interruptible(msecs_to_jiffies 1854 (GC_SLEEP_MS)); 1855 else if (ret) 1856 pr_warn("gc failed!\n"); 1857 } while (ret && !test_bit(CACHE_SET_IO_DISABLE, &c->flags)); 1858 1859 bch_btree_gc_finish(c); 1860 wake_up_allocators(c); 1861 1862 bch_time_stats_update(&c->btree_gc_time, start_time); 1863 1864 stats.key_bytes *= sizeof(uint64_t); 1865 stats.data <<= 9; 1866 bch_update_bucket_in_use(c, &stats); 1867 memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat)); 1868 1869 trace_bcache_gc_end(c); 1870 1871 bch_moving_gc(c); 1872 } 1873 1874 static bool gc_should_run(struct cache_set *c) 1875 { 1876 struct cache *ca = c->cache; 1877 1878 if (ca->invalidate_needs_gc) 1879 return true; 1880 1881 if (atomic_read(&c->sectors_to_gc) < 0) 1882 return true; 1883 1884 return false; 1885 } 1886 1887 static int bch_gc_thread(void *arg) 1888 { 1889 struct cache_set *c = arg; 1890 1891 while (1) { 1892 wait_event_interruptible(c->gc_wait, 1893 kthread_should_stop() || 1894 test_bit(CACHE_SET_IO_DISABLE, &c->flags) || 1895 gc_should_run(c)); 1896 1897 if (kthread_should_stop() || 1898 test_bit(CACHE_SET_IO_DISABLE, &c->flags)) 1899 break; 1900 1901 set_gc_sectors(c); 1902 bch_btree_gc(c); 1903 } 1904 1905 wait_for_kthread_stop(); 1906 return 0; 1907 } 1908 1909 int bch_gc_thread_start(struct cache_set *c) 1910 { 1911 c->gc_thread = kthread_run(bch_gc_thread, c, "bcache_gc"); 1912 return PTR_ERR_OR_ZERO(c->gc_thread); 1913 } 1914 1915 /* Initial partial gc */ 1916 1917 static int bch_btree_check_recurse(struct btree *b, struct btree_op *op) 1918 { 1919 int ret = 0; 1920 struct bkey *k, *p = NULL; 1921 struct btree_iter iter; 1922 1923 min_heap_init(&iter.heap, NULL, MAX_BSETS); 1924 1925 for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) 1926 bch_initial_mark_key(b->c, b->level, k); 1927 1928 bch_initial_mark_key(b->c, b->level + 1, &b->key); 1929 1930 if (b->level) { 1931 bch_btree_iter_init(&b->keys, &iter, NULL); 1932 1933 do { 1934 k = bch_btree_iter_next_filter(&iter, &b->keys, 1935 bch_ptr_bad); 1936 if (k) { 1937 btree_node_prefetch(b, k); 1938 /* 1939 * initiallize c->gc_stats.nodes 1940 * for incremental GC 1941 */ 1942 b->c->gc_stats.nodes++; 1943 } 1944 1945 if (p) 1946 ret = bcache_btree(check_recurse, p, b, op); 1947 1948 p = k; 1949 } while (p && !ret); 1950 } 1951 1952 return ret; 1953 } 1954 1955 1956 static int bch_btree_check_thread(void *arg) 1957 { 1958 int ret; 1959 struct btree_check_info *info = arg; 1960 struct btree_check_state *check_state = info->state; 1961 struct cache_set *c = check_state->c; 1962 struct btree_iter iter; 1963 struct bkey *k, *p; 1964 int cur_idx, prev_idx, skip_nr; 1965 1966 k = p = NULL; 1967 cur_idx = prev_idx = 0; 1968 ret = 0; 1969 1970 min_heap_init(&iter.heap, NULL, MAX_BSETS); 1971 1972 /* root node keys are checked before thread created */ 1973 bch_btree_iter_init(&c->root->keys, &iter, NULL); 1974 k = bch_btree_iter_next_filter(&iter, &c->root->keys, bch_ptr_bad); 1975 BUG_ON(!k); 1976 1977 p = k; 1978 while (k) { 1979 /* 1980 * Fetch a root node key index, skip the keys which 1981 * should be fetched by other threads, then check the 1982 * sub-tree indexed by the fetched key. 1983 */ 1984 spin_lock(&check_state->idx_lock); 1985 cur_idx = check_state->key_idx; 1986 check_state->key_idx++; 1987 spin_unlock(&check_state->idx_lock); 1988 1989 skip_nr = cur_idx - prev_idx; 1990 1991 while (skip_nr) { 1992 k = bch_btree_iter_next_filter(&iter, 1993 &c->root->keys, 1994 bch_ptr_bad); 1995 if (k) 1996 p = k; 1997 else { 1998 /* 1999 * No more keys to check in root node, 2000 * current checking threads are enough, 2001 * stop creating more. 2002 */ 2003 atomic_set(&check_state->enough, 1); 2004 /* Update check_state->enough earlier */ 2005 smp_mb__after_atomic(); 2006 goto out; 2007 } 2008 skip_nr--; 2009 cond_resched(); 2010 } 2011 2012 if (p) { 2013 struct btree_op op; 2014 2015 btree_node_prefetch(c->root, p); 2016 c->gc_stats.nodes++; 2017 bch_btree_op_init(&op, 0); 2018 ret = bcache_btree(check_recurse, p, c->root, &op); 2019 /* 2020 * The op may be added to cache_set's btree_cache_wait 2021 * in mca_cannibalize(), must ensure it is removed from 2022 * the list and release btree_cache_alloc_lock before 2023 * free op memory. 2024 * Otherwise, the btree_cache_wait will be damaged. 2025 */ 2026 bch_cannibalize_unlock(c); 2027 finish_wait(&c->btree_cache_wait, &(&op)->wait); 2028 if (ret) 2029 goto out; 2030 } 2031 p = NULL; 2032 prev_idx = cur_idx; 2033 cond_resched(); 2034 } 2035 2036 out: 2037 info->result = ret; 2038 /* update check_state->started among all CPUs */ 2039 smp_mb__before_atomic(); 2040 if (atomic_dec_and_test(&check_state->started)) 2041 wake_up(&check_state->wait); 2042 2043 return ret; 2044 } 2045 2046 2047 2048 static int bch_btree_chkthread_nr(void) 2049 { 2050 int n = num_online_cpus()/2; 2051 2052 if (n == 0) 2053 n = 1; 2054 else if (n > BCH_BTR_CHKTHREAD_MAX) 2055 n = BCH_BTR_CHKTHREAD_MAX; 2056 2057 return n; 2058 } 2059 2060 int bch_btree_check(struct cache_set *c) 2061 { 2062 int ret = 0; 2063 int i; 2064 struct bkey *k = NULL; 2065 struct btree_iter iter; 2066 struct btree_check_state check_state; 2067 2068 min_heap_init(&iter.heap, NULL, MAX_BSETS); 2069 2070 /* check and mark root node keys */ 2071 for_each_key_filter(&c->root->keys, k, &iter, bch_ptr_invalid) 2072 bch_initial_mark_key(c, c->root->level, k); 2073 2074 bch_initial_mark_key(c, c->root->level + 1, &c->root->key); 2075 2076 if (c->root->level == 0) 2077 return 0; 2078 2079 memset(&check_state, 0, sizeof(struct btree_check_state)); 2080 check_state.c = c; 2081 check_state.total_threads = bch_btree_chkthread_nr(); 2082 check_state.key_idx = 0; 2083 spin_lock_init(&check_state.idx_lock); 2084 atomic_set(&check_state.started, 0); 2085 atomic_set(&check_state.enough, 0); 2086 init_waitqueue_head(&check_state.wait); 2087 2088 rw_lock(0, c->root, c->root->level); 2089 /* 2090 * Run multiple threads to check btree nodes in parallel, 2091 * if check_state.enough is non-zero, it means current 2092 * running check threads are enough, unncessary to create 2093 * more. 2094 */ 2095 for (i = 0; i < check_state.total_threads; i++) { 2096 /* fetch latest check_state.enough earlier */ 2097 smp_mb__before_atomic(); 2098 if (atomic_read(&check_state.enough)) 2099 break; 2100 2101 check_state.infos[i].result = 0; 2102 check_state.infos[i].state = &check_state; 2103 2104 check_state.infos[i].thread = 2105 kthread_run(bch_btree_check_thread, 2106 &check_state.infos[i], 2107 "bch_btrchk[%d]", i); 2108 if (IS_ERR(check_state.infos[i].thread)) { 2109 pr_err("fails to run thread bch_btrchk[%d]\n", i); 2110 for (--i; i >= 0; i--) 2111 kthread_stop(check_state.infos[i].thread); 2112 ret = -ENOMEM; 2113 goto out; 2114 } 2115 atomic_inc(&check_state.started); 2116 } 2117 2118 /* 2119 * Must wait for all threads to stop. 2120 */ 2121 wait_event(check_state.wait, atomic_read(&check_state.started) == 0); 2122 2123 for (i = 0; i < check_state.total_threads; i++) { 2124 if (check_state.infos[i].result) { 2125 ret = check_state.infos[i].result; 2126 goto out; 2127 } 2128 } 2129 2130 out: 2131 rw_unlock(0, c->root); 2132 return ret; 2133 } 2134 2135 void bch_initial_gc_finish(struct cache_set *c) 2136 { 2137 struct cache *ca = c->cache; 2138 struct bucket *b; 2139 2140 bch_btree_gc_finish(c); 2141 2142 mutex_lock(&c->bucket_lock); 2143 2144 /* 2145 * We need to put some unused buckets directly on the prio freelist in 2146 * order to get the allocator thread started - it needs freed buckets in 2147 * order to rewrite the prios and gens, and it needs to rewrite prios 2148 * and gens in order to free buckets. 2149 * 2150 * This is only safe for buckets that have no live data in them, which 2151 * there should always be some of. 2152 */ 2153 for_each_bucket(b, ca) { 2154 if (fifo_full(&ca->free[RESERVE_PRIO]) && 2155 fifo_full(&ca->free[RESERVE_BTREE])) 2156 break; 2157 2158 if (bch_can_invalidate_bucket(ca, b) && 2159 !GC_MARK(b)) { 2160 __bch_invalidate_one_bucket(ca, b); 2161 if (!fifo_push(&ca->free[RESERVE_PRIO], 2162 b - ca->buckets)) 2163 fifo_push(&ca->free[RESERVE_BTREE], 2164 b - ca->buckets); 2165 } 2166 } 2167 2168 mutex_unlock(&c->bucket_lock); 2169 } 2170 2171 /* Btree insertion */ 2172 2173 static bool btree_insert_key(struct btree *b, struct bkey *k, 2174 struct bkey *replace_key) 2175 { 2176 unsigned int status; 2177 2178 BUG_ON(bkey_cmp(k, &b->key) > 0); 2179 2180 status = bch_btree_insert_key(&b->keys, k, replace_key); 2181 if (status != BTREE_INSERT_STATUS_NO_INSERT) { 2182 bch_check_keys(&b->keys, "%u for %s", status, 2183 replace_key ? "replace" : "insert"); 2184 2185 trace_bcache_btree_insert_key(b, k, replace_key != NULL, 2186 status); 2187 return true; 2188 } else 2189 return false; 2190 } 2191 2192 static size_t insert_u64s_remaining(struct btree *b) 2193 { 2194 long ret = bch_btree_keys_u64s_remaining(&b->keys); 2195 2196 /* 2197 * Might land in the middle of an existing extent and have to split it 2198 */ 2199 if (b->keys.ops->is_extents) 2200 ret -= KEY_MAX_U64S; 2201 2202 return max(ret, 0L); 2203 } 2204 2205 static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op, 2206 struct keylist *insert_keys, 2207 struct bkey *replace_key) 2208 { 2209 bool ret = false; 2210 int oldsize = bch_count_data(&b->keys); 2211 2212 while (!bch_keylist_empty(insert_keys)) { 2213 struct bkey *k = insert_keys->keys; 2214 2215 if (bkey_u64s(k) > insert_u64s_remaining(b)) 2216 break; 2217 2218 if (bkey_cmp(k, &b->key) <= 0) { 2219 if (!b->level) 2220 bkey_put(b->c, k); 2221 2222 ret |= btree_insert_key(b, k, replace_key); 2223 bch_keylist_pop_front(insert_keys); 2224 } else if (bkey_cmp(&START_KEY(k), &b->key) < 0) { 2225 BKEY_PADDED(key) temp; 2226 bkey_copy(&temp.key, insert_keys->keys); 2227 2228 bch_cut_back(&b->key, &temp.key); 2229 bch_cut_front(&b->key, insert_keys->keys); 2230 2231 ret |= btree_insert_key(b, &temp.key, replace_key); 2232 break; 2233 } else { 2234 break; 2235 } 2236 } 2237 2238 if (!ret) 2239 op->insert_collision = true; 2240 2241 BUG_ON(!bch_keylist_empty(insert_keys) && b->level); 2242 2243 BUG_ON(bch_count_data(&b->keys) < oldsize); 2244 return ret; 2245 } 2246 2247 static int btree_split(struct btree *b, struct btree_op *op, 2248 struct keylist *insert_keys, 2249 struct bkey *replace_key) 2250 { 2251 bool split; 2252 struct btree *n1, *n2 = NULL, *n3 = NULL; 2253 uint64_t start_time = local_clock(); 2254 struct closure cl; 2255 struct keylist parent_keys; 2256 2257 closure_init_stack(&cl); 2258 bch_keylist_init(&parent_keys); 2259 2260 if (btree_check_reserve(b, op)) { 2261 if (!b->level) 2262 return -EINTR; 2263 else 2264 WARN(1, "insufficient reserve for split\n"); 2265 } 2266 2267 n1 = btree_node_alloc_replacement(b, op); 2268 if (IS_ERR(n1)) 2269 goto err; 2270 2271 split = set_blocks(btree_bset_first(n1), 2272 block_bytes(n1->c->cache)) > (btree_blocks(b) * 4) / 5; 2273 2274 if (split) { 2275 unsigned int keys = 0; 2276 2277 trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys); 2278 2279 n2 = bch_btree_node_alloc(b->c, op, b->level, b->parent); 2280 if (IS_ERR(n2)) 2281 goto err_free1; 2282 2283 if (!b->parent) { 2284 n3 = bch_btree_node_alloc(b->c, op, b->level + 1, NULL); 2285 if (IS_ERR(n3)) 2286 goto err_free2; 2287 } 2288 2289 mutex_lock(&n1->write_lock); 2290 mutex_lock(&n2->write_lock); 2291 2292 bch_btree_insert_keys(n1, op, insert_keys, replace_key); 2293 2294 /* 2295 * Has to be a linear search because we don't have an auxiliary 2296 * search tree yet 2297 */ 2298 2299 while (keys < (btree_bset_first(n1)->keys * 3) / 5) 2300 keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1), 2301 keys)); 2302 2303 bkey_copy_key(&n1->key, 2304 bset_bkey_idx(btree_bset_first(n1), keys)); 2305 keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1), keys)); 2306 2307 btree_bset_first(n2)->keys = btree_bset_first(n1)->keys - keys; 2308 btree_bset_first(n1)->keys = keys; 2309 2310 memcpy(btree_bset_first(n2)->start, 2311 bset_bkey_last(btree_bset_first(n1)), 2312 btree_bset_first(n2)->keys * sizeof(uint64_t)); 2313 2314 bkey_copy_key(&n2->key, &b->key); 2315 2316 bch_keylist_add(&parent_keys, &n2->key); 2317 bch_btree_node_write(n2, &cl); 2318 mutex_unlock(&n2->write_lock); 2319 rw_unlock(true, n2); 2320 } else { 2321 trace_bcache_btree_node_compact(b, btree_bset_first(n1)->keys); 2322 2323 mutex_lock(&n1->write_lock); 2324 bch_btree_insert_keys(n1, op, insert_keys, replace_key); 2325 } 2326 2327 bch_keylist_add(&parent_keys, &n1->key); 2328 bch_btree_node_write(n1, &cl); 2329 mutex_unlock(&n1->write_lock); 2330 2331 if (n3) { 2332 /* Depth increases, make a new root */ 2333 mutex_lock(&n3->write_lock); 2334 bkey_copy_key(&n3->key, &MAX_KEY); 2335 bch_btree_insert_keys(n3, op, &parent_keys, NULL); 2336 bch_btree_node_write(n3, &cl); 2337 mutex_unlock(&n3->write_lock); 2338 2339 closure_sync(&cl); 2340 bch_btree_set_root(n3); 2341 rw_unlock(true, n3); 2342 } else if (!b->parent) { 2343 /* Root filled up but didn't need to be split */ 2344 closure_sync(&cl); 2345 bch_btree_set_root(n1); 2346 } else { 2347 /* Split a non root node */ 2348 closure_sync(&cl); 2349 make_btree_freeing_key(b, parent_keys.top); 2350 bch_keylist_push(&parent_keys); 2351 2352 bch_btree_insert_node(b->parent, op, &parent_keys, NULL, NULL); 2353 BUG_ON(!bch_keylist_empty(&parent_keys)); 2354 } 2355 2356 btree_node_free(b); 2357 rw_unlock(true, n1); 2358 2359 bch_time_stats_update(&b->c->btree_split_time, start_time); 2360 2361 return 0; 2362 err_free2: 2363 bkey_put(b->c, &n2->key); 2364 btree_node_free(n2); 2365 rw_unlock(true, n2); 2366 err_free1: 2367 bkey_put(b->c, &n1->key); 2368 btree_node_free(n1); 2369 rw_unlock(true, n1); 2370 err: 2371 WARN(1, "bcache: btree split failed (level %u)", b->level); 2372 2373 if (n3 == ERR_PTR(-EAGAIN) || 2374 n2 == ERR_PTR(-EAGAIN) || 2375 n1 == ERR_PTR(-EAGAIN)) 2376 return -EAGAIN; 2377 2378 return -ENOMEM; 2379 } 2380 2381 static int bch_btree_insert_node(struct btree *b, struct btree_op *op, 2382 struct keylist *insert_keys, 2383 atomic_t *journal_ref, 2384 struct bkey *replace_key) 2385 { 2386 struct closure cl; 2387 2388 BUG_ON(b->level && replace_key); 2389 2390 closure_init_stack(&cl); 2391 2392 mutex_lock(&b->write_lock); 2393 2394 if (write_block(b) != btree_bset_last(b) && 2395 b->keys.last_set_unwritten) 2396 bch_btree_init_next(b); /* just wrote a set */ 2397 2398 if (bch_keylist_nkeys(insert_keys) > insert_u64s_remaining(b)) { 2399 mutex_unlock(&b->write_lock); 2400 goto split; 2401 } 2402 2403 BUG_ON(write_block(b) != btree_bset_last(b)); 2404 2405 if (bch_btree_insert_keys(b, op, insert_keys, replace_key)) { 2406 if (!b->level) 2407 bch_btree_leaf_dirty(b, journal_ref); 2408 else 2409 bch_btree_node_write(b, &cl); 2410 } 2411 2412 mutex_unlock(&b->write_lock); 2413 2414 /* wait for btree node write if necessary, after unlock */ 2415 closure_sync(&cl); 2416 2417 return 0; 2418 split: 2419 if (current->bio_list) { 2420 op->lock = b->c->root->level + 1; 2421 return -EAGAIN; 2422 } else if (op->lock <= b->c->root->level) { 2423 op->lock = b->c->root->level + 1; 2424 return -EINTR; 2425 } else { 2426 /* Invalidated all iterators */ 2427 int ret = btree_split(b, op, insert_keys, replace_key); 2428 2429 if (bch_keylist_empty(insert_keys)) 2430 return 0; 2431 else if (!ret) 2432 return -EINTR; 2433 return ret; 2434 } 2435 } 2436 2437 int bch_btree_insert_check_key(struct btree *b, struct btree_op *op, 2438 struct bkey *check_key) 2439 { 2440 int ret = -EINTR; 2441 uint64_t btree_ptr = b->key.ptr[0]; 2442 unsigned long seq = b->seq; 2443 struct keylist insert; 2444 bool upgrade = op->lock == -1; 2445 2446 bch_keylist_init(&insert); 2447 2448 if (upgrade) { 2449 rw_unlock(false, b); 2450 rw_lock(true, b, b->level); 2451 2452 if (b->key.ptr[0] != btree_ptr || 2453 b->seq != seq + 1) { 2454 op->lock = b->level; 2455 goto out; 2456 } 2457 } 2458 2459 SET_KEY_PTRS(check_key, 1); 2460 get_random_bytes(&check_key->ptr[0], sizeof(uint64_t)); 2461 2462 SET_PTR_DEV(check_key, 0, PTR_CHECK_DEV); 2463 2464 bch_keylist_add(&insert, check_key); 2465 2466 ret = bch_btree_insert_node(b, op, &insert, NULL, NULL); 2467 2468 BUG_ON(!ret && !bch_keylist_empty(&insert)); 2469 out: 2470 if (upgrade) 2471 downgrade_write(&b->lock); 2472 return ret; 2473 } 2474 2475 struct btree_insert_op { 2476 struct btree_op op; 2477 struct keylist *keys; 2478 atomic_t *journal_ref; 2479 struct bkey *replace_key; 2480 }; 2481 2482 static int btree_insert_fn(struct btree_op *b_op, struct btree *b) 2483 { 2484 struct btree_insert_op *op = container_of(b_op, 2485 struct btree_insert_op, op); 2486 2487 int ret = bch_btree_insert_node(b, &op->op, op->keys, 2488 op->journal_ref, op->replace_key); 2489 if (ret && !bch_keylist_empty(op->keys)) 2490 return ret; 2491 else 2492 return MAP_DONE; 2493 } 2494 2495 int bch_btree_insert(struct cache_set *c, struct keylist *keys, 2496 atomic_t *journal_ref, struct bkey *replace_key) 2497 { 2498 struct btree_insert_op op; 2499 int ret = 0; 2500 2501 BUG_ON(current->bio_list); 2502 BUG_ON(bch_keylist_empty(keys)); 2503 2504 bch_btree_op_init(&op.op, 0); 2505 op.keys = keys; 2506 op.journal_ref = journal_ref; 2507 op.replace_key = replace_key; 2508 2509 while (!ret && !bch_keylist_empty(keys)) { 2510 op.op.lock = 0; 2511 ret = bch_btree_map_leaf_nodes(&op.op, c, 2512 &START_KEY(keys->keys), 2513 btree_insert_fn); 2514 } 2515 2516 if (ret) { 2517 struct bkey *k; 2518 2519 pr_err("error %i\n", ret); 2520 2521 while ((k = bch_keylist_pop(keys))) 2522 bkey_put(c, k); 2523 } else if (op.op.insert_collision) 2524 ret = -ESRCH; 2525 2526 return ret; 2527 } 2528 2529 void bch_btree_set_root(struct btree *b) 2530 { 2531 unsigned int i; 2532 struct closure cl; 2533 2534 closure_init_stack(&cl); 2535 2536 trace_bcache_btree_set_root(b); 2537 2538 BUG_ON(!b->written); 2539 2540 for (i = 0; i < KEY_PTRS(&b->key); i++) 2541 BUG_ON(PTR_BUCKET(b->c, &b->key, i)->prio != BTREE_PRIO); 2542 2543 mutex_lock(&b->c->bucket_lock); 2544 list_del_init(&b->list); 2545 mutex_unlock(&b->c->bucket_lock); 2546 2547 b->c->root = b; 2548 2549 bch_journal_meta(b->c, &cl); 2550 closure_sync(&cl); 2551 } 2552 2553 /* Map across nodes or keys */ 2554 2555 static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op, 2556 struct bkey *from, 2557 btree_map_nodes_fn *fn, int flags) 2558 { 2559 int ret = MAP_CONTINUE; 2560 2561 if (b->level) { 2562 struct bkey *k; 2563 struct btree_iter iter; 2564 2565 min_heap_init(&iter.heap, NULL, MAX_BSETS); 2566 bch_btree_iter_init(&b->keys, &iter, from); 2567 2568 while ((k = bch_btree_iter_next_filter(&iter, &b->keys, 2569 bch_ptr_bad))) { 2570 ret = bcache_btree(map_nodes_recurse, k, b, 2571 op, from, fn, flags); 2572 from = NULL; 2573 2574 if (ret != MAP_CONTINUE) 2575 return ret; 2576 } 2577 } 2578 2579 if (!b->level || flags == MAP_ALL_NODES) 2580 ret = fn(op, b); 2581 2582 return ret; 2583 } 2584 2585 int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c, 2586 struct bkey *from, btree_map_nodes_fn *fn, int flags) 2587 { 2588 return bcache_btree_root(map_nodes_recurse, c, op, from, fn, flags); 2589 } 2590 2591 int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op, 2592 struct bkey *from, btree_map_keys_fn *fn, 2593 int flags) 2594 { 2595 int ret = MAP_CONTINUE; 2596 struct bkey *k; 2597 struct btree_iter iter; 2598 2599 min_heap_init(&iter.heap, NULL, MAX_BSETS); 2600 bch_btree_iter_init(&b->keys, &iter, from); 2601 2602 while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) { 2603 ret = !b->level 2604 ? fn(op, b, k) 2605 : bcache_btree(map_keys_recurse, k, 2606 b, op, from, fn, flags); 2607 from = NULL; 2608 2609 if (ret != MAP_CONTINUE) 2610 return ret; 2611 } 2612 2613 if (!b->level && (flags & MAP_END_KEY)) 2614 ret = fn(op, b, &KEY(KEY_INODE(&b->key), 2615 KEY_OFFSET(&b->key), 0)); 2616 2617 return ret; 2618 } 2619 2620 int bch_btree_map_keys(struct btree_op *op, struct cache_set *c, 2621 struct bkey *from, btree_map_keys_fn *fn, int flags) 2622 { 2623 return bcache_btree_root(map_keys_recurse, c, op, from, fn, flags); 2624 } 2625 2626 /* Keybuf code */ 2627 2628 static inline int keybuf_cmp(struct keybuf_key *l, struct keybuf_key *r) 2629 { 2630 /* Overlapping keys compare equal */ 2631 if (bkey_cmp(&l->key, &START_KEY(&r->key)) <= 0) 2632 return -1; 2633 if (bkey_cmp(&START_KEY(&l->key), &r->key) >= 0) 2634 return 1; 2635 return 0; 2636 } 2637 2638 static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l, 2639 struct keybuf_key *r) 2640 { 2641 return clamp_t(int64_t, bkey_cmp(&l->key, &r->key), -1, 1); 2642 } 2643 2644 struct refill { 2645 struct btree_op op; 2646 unsigned int nr_found; 2647 struct keybuf *buf; 2648 struct bkey *end; 2649 keybuf_pred_fn *pred; 2650 }; 2651 2652 static int refill_keybuf_fn(struct btree_op *op, struct btree *b, 2653 struct bkey *k) 2654 { 2655 struct refill *refill = container_of(op, struct refill, op); 2656 struct keybuf *buf = refill->buf; 2657 int ret = MAP_CONTINUE; 2658 2659 if (bkey_cmp(k, refill->end) > 0) { 2660 ret = MAP_DONE; 2661 goto out; 2662 } 2663 2664 if (!KEY_SIZE(k)) /* end key */ 2665 goto out; 2666 2667 if (refill->pred(buf, k)) { 2668 struct keybuf_key *w; 2669 2670 spin_lock(&buf->lock); 2671 2672 w = array_alloc(&buf->freelist); 2673 if (!w) { 2674 spin_unlock(&buf->lock); 2675 return MAP_DONE; 2676 } 2677 2678 w->private = NULL; 2679 bkey_copy(&w->key, k); 2680 2681 if (RB_INSERT(&buf->keys, w, node, keybuf_cmp)) 2682 array_free(&buf->freelist, w); 2683 else 2684 refill->nr_found++; 2685 2686 if (array_freelist_empty(&buf->freelist)) 2687 ret = MAP_DONE; 2688 2689 spin_unlock(&buf->lock); 2690 } 2691 out: 2692 buf->last_scanned = *k; 2693 return ret; 2694 } 2695 2696 void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf, 2697 struct bkey *end, keybuf_pred_fn *pred) 2698 { 2699 struct bkey start = buf->last_scanned; 2700 struct refill refill; 2701 2702 cond_resched(); 2703 2704 bch_btree_op_init(&refill.op, -1); 2705 refill.nr_found = 0; 2706 refill.buf = buf; 2707 refill.end = end; 2708 refill.pred = pred; 2709 2710 bch_btree_map_keys(&refill.op, c, &buf->last_scanned, 2711 refill_keybuf_fn, MAP_END_KEY); 2712 2713 trace_bcache_keyscan(refill.nr_found, 2714 KEY_INODE(&start), KEY_OFFSET(&start), 2715 KEY_INODE(&buf->last_scanned), 2716 KEY_OFFSET(&buf->last_scanned)); 2717 2718 spin_lock(&buf->lock); 2719 2720 if (!RB_EMPTY_ROOT(&buf->keys)) { 2721 struct keybuf_key *w; 2722 2723 w = RB_FIRST(&buf->keys, struct keybuf_key, node); 2724 buf->start = START_KEY(&w->key); 2725 2726 w = RB_LAST(&buf->keys, struct keybuf_key, node); 2727 buf->end = w->key; 2728 } else { 2729 buf->start = MAX_KEY; 2730 buf->end = MAX_KEY; 2731 } 2732 2733 spin_unlock(&buf->lock); 2734 } 2735 2736 static void __bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w) 2737 { 2738 rb_erase(&w->node, &buf->keys); 2739 array_free(&buf->freelist, w); 2740 } 2741 2742 void bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w) 2743 { 2744 spin_lock(&buf->lock); 2745 __bch_keybuf_del(buf, w); 2746 spin_unlock(&buf->lock); 2747 } 2748 2749 bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start, 2750 struct bkey *end) 2751 { 2752 bool ret = false; 2753 struct keybuf_key *p, *w, s; 2754 2755 s.key = *start; 2756 2757 if (bkey_cmp(end, &buf->start) <= 0 || 2758 bkey_cmp(start, &buf->end) >= 0) 2759 return false; 2760 2761 spin_lock(&buf->lock); 2762 w = RB_GREATER(&buf->keys, s, node, keybuf_nonoverlapping_cmp); 2763 2764 while (w && bkey_cmp(&START_KEY(&w->key), end) < 0) { 2765 p = w; 2766 w = RB_NEXT(w, node); 2767 2768 if (p->private) 2769 ret = true; 2770 else 2771 __bch_keybuf_del(buf, p); 2772 } 2773 2774 spin_unlock(&buf->lock); 2775 return ret; 2776 } 2777 2778 struct keybuf_key *bch_keybuf_next(struct keybuf *buf) 2779 { 2780 struct keybuf_key *w; 2781 2782 spin_lock(&buf->lock); 2783 2784 w = RB_FIRST(&buf->keys, struct keybuf_key, node); 2785 2786 while (w && w->private) 2787 w = RB_NEXT(w, node); 2788 2789 if (w) 2790 w->private = ERR_PTR(-EINTR); 2791 2792 spin_unlock(&buf->lock); 2793 return w; 2794 } 2795 2796 struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c, 2797 struct keybuf *buf, 2798 struct bkey *end, 2799 keybuf_pred_fn *pred) 2800 { 2801 struct keybuf_key *ret; 2802 2803 while (1) { 2804 ret = bch_keybuf_next(buf); 2805 if (ret) 2806 break; 2807 2808 if (bkey_cmp(&buf->last_scanned, end) >= 0) { 2809 pr_debug("scan finished\n"); 2810 break; 2811 } 2812 2813 bch_refill_keybuf(c, buf, end, pred); 2814 } 2815 2816 return ret; 2817 } 2818 2819 void bch_keybuf_init(struct keybuf *buf) 2820 { 2821 buf->last_scanned = MAX_KEY; 2822 buf->keys = RB_ROOT; 2823 2824 spin_lock_init(&buf->lock); 2825 array_allocator_init(&buf->freelist); 2826 } 2827 2828 void bch_btree_exit(void) 2829 { 2830 if (btree_io_wq) 2831 destroy_workqueue(btree_io_wq); 2832 } 2833 2834 int __init bch_btree_init(void) 2835 { 2836 btree_io_wq = alloc_workqueue("bch_btree_io", WQ_MEM_RECLAIM, 0); 2837 if (!btree_io_wq) 2838 return -ENOMEM; 2839 2840 return 0; 2841 } 2842