Lines Matching full:keys
18 * as keys are inserted we only sort the pages that have not yet been written.
52 * Check for bad keys in replay
113 if (b->level && b->keys.nsets) in bch_btree_init_next()
114 bch_btree_sort(&b->keys, &b->c->sort); in bch_btree_init_next()
116 bch_btree_sort_lazy(&b->keys, &b->c->sort); in bch_btree_init_next()
119 bch_bset_init_next(&b->keys, write_block(b), in bch_btree_init_next()
162 iter->b = &b->keys; in bch_btree_node_read_done()
169 b->written < btree_blocks(b) && i->seq == b->keys.set[0].data->seq; in bch_btree_node_read_done()
197 if (i != b->keys.set[0].data && !i->keys) in bch_btree_node_read_done()
207 bset_sector_offset(&b->keys, i) < KEY_SIZE(&b->key); in bch_btree_node_read_done()
209 if (i->seq == b->keys.set[0].data->seq) in bch_btree_node_read_done()
212 bch_btree_sort_and_fix_extents(&b->keys, iter, &b->c->sort); in bch_btree_node_read_done()
214 i = b->keys.set[0].data; in bch_btree_node_read_done()
216 if (b->keys.set[0].size && in bch_btree_node_read_done()
217 bkey_cmp(&b->key, &b->keys.set[0].end) < 0) in bch_btree_node_read_done()
221 bch_bset_init_next(&b->keys, write_block(b), in bch_btree_node_read_done()
228 bch_cache_set_error(b->c, "%s at bucket %zu, block %u, %u keys", in bch_btree_node_read_done()
230 bset_block_offset(b, i), i->keys); in bch_btree_node_read_done()
257 bch_bio_map(bio, b->keys.set[0].data); in bch_btree_node_read()
371 bset_sector_offset(&b->keys, i)); in do_btree_node_write()
411 BUG_ON(b->written && !i->keys); in __bch_btree_node_write()
413 bch_check_keys(&b->keys, "writing"); in __bch_btree_node_write()
434 unsigned int nsets = b->keys.nsets; in bch_btree_node_write()
444 if (nsets && !b->keys.nsets) in bch_btree_node_write()
481 BUG_ON(!i->keys); in bch_btree_leaf_dirty()
526 bch_btree_keys_free(&b->keys); in mca_data_free()
548 if (!bch_btree_keys_alloc(&b->keys, in mca_data_alloc()
595 BUG_ON(btree_node_dirty(b) && !b->keys.set[0].data); in mca_reap()
597 if (b->keys.page_order < min_order) in mca_reap()
669 * succeed, so that inserting keys into the btree can always succeed and in bch_mca_scan()
802 c->verify_data->keys.set->data) in bch_btree_cache_alloc()
921 if (!b->keys.set[0].data) in mca_alloc()
932 if (!b->keys.set->data) in mca_alloc()
949 bch_btree_keys_init(&b->keys, &bch_extent_keys_ops, in mca_alloc()
952 bch_btree_keys_init(&b->keys, &bch_btree_keys_ops, in mca_alloc()
1022 for (; i <= b->keys.nsets && b->keys.set[i].size; i++) { in bch_btree_node_get()
1023 prefetch(b->keys.set[i].tree); in bch_btree_node_get()
1024 prefetch(b->keys.set[i].data); in bch_btree_node_get()
1027 for (; i <= b->keys.nsets; i++) in bch_btree_node_get()
1028 prefetch(b->keys.set[i].data); in bch_btree_node_get()
1112 bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->cache->sb)); in __bch_btree_node_alloc()
1141 bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort); in btree_node_alloc_replacement()
1199 * ptr_invalid() can't return true for the keys that mark btree nodes as in __bch_btree_mark_key()
1273 unsigned int keys = 0, good_keys = 0; in btree_gc_mark_node() local
1280 for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) { in btree_gc_mark_node()
1282 keys++; in btree_gc_mark_node()
1284 if (bch_ptr_bad(&b->keys, k)) in btree_gc_mark_node()
1294 for (t = b->keys.set; t <= &b->keys.set[b->keys.nsets]; t++) in btree_gc_mark_node()
1296 bset_written(&b->keys, t) && in btree_gc_mark_node()
1306 if ((keys - good_keys) * 2 > keys) in btree_gc_mark_node()
1316 unsigned int keys; member
1327 unsigned int i, nodes = 0, keys = 0, blocks; in btree_gc_coalesce() local
1342 keys += r[nodes++].keys; in btree_gc_coalesce()
1347 __set_blocks(b->keys.set[0].data, keys, in btree_gc_coalesce()
1374 keys = 0; in btree_gc_coalesce()
1380 if (__set_blocks(n1, n1->keys + keys + in btree_gc_coalesce()
1386 keys += bkey_u64s(k); in btree_gc_coalesce()
1392 * the remaining keys into this node; we can't ensure in btree_gc_coalesce()
1394 * length keys (shouldn't be possible in practice, in btree_gc_coalesce()
1397 if (__set_blocks(n1, n1->keys + n2->keys, in btree_gc_coalesce()
1402 keys = n2->keys; in btree_gc_coalesce()
1407 BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c->cache)) > in btree_gc_coalesce()
1415 (void *) bset_bkey_idx(n2, keys) - (void *) n2->start); in btree_gc_coalesce()
1417 n1->keys += keys; in btree_gc_coalesce()
1418 r[i].keys = n1->keys; in btree_gc_coalesce()
1421 bset_bkey_idx(n2, keys), in btree_gc_coalesce()
1423 (void *) bset_bkey_idx(n2, keys)); in btree_gc_coalesce()
1425 n2->keys -= keys; in btree_gc_coalesce()
1441 BUG_ON(btree_bset_first(new_nodes[0])->keys); in btree_gc_coalesce()
1498 struct keylist keys; in btree_gc_rewrite_node() local
1515 bch_keylist_init(&keys); in btree_gc_rewrite_node()
1516 bch_keylist_add(&keys, &n->key); in btree_gc_rewrite_node()
1518 make_btree_freeing_key(replace, keys.top); in btree_gc_rewrite_node()
1519 bch_keylist_push(&keys); in btree_gc_rewrite_node()
1521 bch_btree_insert_node(b, op, &keys, NULL, NULL); in btree_gc_rewrite_node()
1522 BUG_ON(!bch_keylist_empty(&keys)); in btree_gc_rewrite_node()
1537 for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad) in btree_gc_count_keys()
1579 bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done); in btree_gc_recurse()
1585 k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad); in btree_gc_recurse()
1594 r->keys = btree_gc_count_keys(r->b); in btree_gc_recurse()
1736 /* don't reclaim buckets to which writeback keys point */ in bch_btree_gc_finish()
1749 &dc->writeback_keys.keys, node) in bch_btree_gc_finish()
1762 for (k = ca->sb.d; k < ca->sb.d + ca->sb.keys; k++) in bch_btree_gc_finish()
1877 for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) in bch_btree_check_recurse()
1883 bch_btree_iter_init(&b->keys, &iter, NULL); in bch_btree_check_recurse()
1886 k = bch_btree_iter_next_filter(&iter, &b->keys, in bch_btree_check_recurse()
1922 /* root node keys are checked before thread created */ in bch_btree_check_thread()
1923 bch_btree_iter_init(&c->root->keys, &iter, NULL); in bch_btree_check_thread()
1924 k = bch_btree_iter_next_filter(&iter, &c->root->keys, bch_ptr_bad); in bch_btree_check_thread()
1930 * Fetch a root node key index, skip the keys which in bch_btree_check_thread()
1943 &c->root->keys, in bch_btree_check_thread()
1949 * No more keys to check in root node, in bch_btree_check_thread()
2010 /* check and mark root node keys */ in bch_btree_check()
2011 for_each_key_filter(&c->root->keys, k, &iter, bch_ptr_invalid) in bch_btree_check()
2122 status = bch_btree_insert_key(&b->keys, k, replace_key); in btree_insert_key()
2124 bch_check_keys(&b->keys, "%u for %s", status, in btree_insert_key()
2136 long ret = bch_btree_keys_u64s_remaining(&b->keys); in insert_u64s_remaining()
2141 if (b->keys.ops->is_extents) in insert_u64s_remaining()
2152 int oldsize = bch_count_data(&b->keys); in bch_btree_insert_keys()
2155 struct bkey *k = insert_keys->keys; in bch_btree_insert_keys()
2168 bkey_copy(&temp.key, insert_keys->keys); in bch_btree_insert_keys()
2171 bch_cut_front(&b->key, insert_keys->keys); in bch_btree_insert_keys()
2185 BUG_ON(bch_count_data(&b->keys) < oldsize); in bch_btree_insert_keys()
2217 unsigned int keys = 0; in btree_split() local
2219 trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys); in btree_split()
2241 while (keys < (btree_bset_first(n1)->keys * 3) / 5) in btree_split()
2242 keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1), in btree_split()
2243 keys)); in btree_split()
2246 bset_bkey_idx(btree_bset_first(n1), keys)); in btree_split()
2247 keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1), keys)); in btree_split()
2249 btree_bset_first(n2)->keys = btree_bset_first(n1)->keys - keys; in btree_split()
2250 btree_bset_first(n1)->keys = keys; in btree_split()
2254 btree_bset_first(n2)->keys * sizeof(uint64_t)); in btree_split()
2263 trace_bcache_btree_node_compact(b, btree_bset_first(n1)->keys); in btree_split()
2337 b->keys.last_set_unwritten) in bch_btree_insert_node()
2419 struct keylist *keys; member
2429 int ret = bch_btree_insert_node(b, &op->op, op->keys, in btree_insert_fn()
2431 if (ret && !bch_keylist_empty(op->keys)) in btree_insert_fn()
2437 int bch_btree_insert(struct cache_set *c, struct keylist *keys, in bch_btree_insert() argument
2444 BUG_ON(bch_keylist_empty(keys)); in bch_btree_insert()
2447 op.keys = keys; in bch_btree_insert()
2451 while (!ret && !bch_keylist_empty(keys)) { in bch_btree_insert()
2454 &START_KEY(keys->keys), in bch_btree_insert()
2463 while ((k = bch_keylist_pop(keys))) in bch_btree_insert()
2495 /* Map across nodes or keys */
2507 bch_btree_iter_init(&b->keys, &iter, from); in bch_btree_map_nodes_recurse()
2509 while ((k = bch_btree_iter_next_filter(&iter, &b->keys, in bch_btree_map_nodes_recurse()
2540 bch_btree_iter_init(&b->keys, &iter, from); in bch_btree_map_keys_recurse()
2542 while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) { in bch_btree_map_keys_recurse()
2570 /* Overlapping keys compare equal */ in keybuf_cmp()
2621 if (RB_INSERT(&buf->keys, w, node, keybuf_cmp)) in refill_keybuf_fn()
2660 if (!RB_EMPTY_ROOT(&buf->keys)) { in bch_refill_keybuf()
2663 w = RB_FIRST(&buf->keys, struct keybuf_key, node); in bch_refill_keybuf()
2666 w = RB_LAST(&buf->keys, struct keybuf_key, node); in bch_refill_keybuf()
2678 rb_erase(&w->node, &buf->keys); in __bch_keybuf_del()
2702 w = RB_GREATER(&buf->keys, s, node, keybuf_nonoverlapping_cmp); in bch_keybuf_check_overlapping()
2724 w = RB_FIRST(&buf->keys, struct keybuf_key, node); in bch_keybuf_next()
2762 buf->keys = RB_ROOT; in bch_keybuf_init()