Lines Matching +full:- +full:t

1 // SPDX-License-Identifier: GPL-2.0
26 unsigned n = ARRAY_SIZE(iter->data); in __btree_node_iter_used()
28 while (n && __btree_node_iter_set_end(iter, n - 1)) in __btree_node_iter_used()
29 --n; in __btree_node_iter_used()
40 * There are never duplicate live keys in the btree - but including keys that
50 * prior to the first key greater than the key we're inserting - our insert
63 if (!i->u64s) in bch2_dump_bset()
66 for (_k = i->start; in bch2_dump_bset()
71 if (!_k->u64s) { in bch2_dump_bset()
72 printk(KERN_ERR "block %u key %5zu - u64s 0? aieee!\n", set, in bch2_dump_bset()
73 _k->_data - i->_data); in bch2_dump_bset()
85 _k->_data - i->_data, buf.buf); in bch2_dump_bset()
92 if (bpos_lt(n.p, k.k->p)) { in bch2_dump_bset()
97 if (!bkey_deleted(k.k) && bpos_eq(n.p, k.k->p)) in bch2_dump_bset()
106 struct bset_tree *t; in bch2_dump_btree_node() local
109 for_each_bset(b, t) in bch2_dump_btree_node()
110 bch2_dump_bset(c, b, bset(b, t), t - b->set); in bch2_dump_btree_node()
121 __btree_node_iter_used(iter), b->nsets); in bch2_dump_btree_node_iter()
124 struct bkey_packed *k = __btree_node_offset_to_key(b, set->k); in bch2_dump_btree_node_iter()
125 struct bset_tree *t = bch2_bkey_to_bset(b, k); in bch2_dump_btree_node_iter() local
131 t - b->set, set->k, buf.buf); in bch2_dump_btree_node_iter()
141 struct bset_tree *t; in __bch2_verify_btree_nr_keys() local
145 for_each_bset(b, t) in __bch2_verify_btree_nr_keys()
146 bset_tree_for_each_key(b, t, k) in __bch2_verify_btree_nr_keys()
148 btree_keys_account_key_add(&nr, t - b->set, k); in __bch2_verify_btree_nr_keys()
150 BUG_ON(memcmp(&nr, &b->nr, sizeof(nr))); in __bch2_verify_btree_nr_keys()
181 struct bkey_packed *k2 = __btree_node_offset_to_key(b, set->k); in bch2_btree_node_iter_next_check()
182 struct bset_tree *t = bch2_bkey_to_bset(b, k2); in bch2_btree_node_iter_next_check() local
183 printk(" [%zi %zi]", t - b->set, in bch2_btree_node_iter_next_check()
184 k2->_data - bset(b, t)->_data); in bch2_btree_node_iter_next_check()
195 struct bset_tree *t; in bch2_btree_node_iter_verify() local
202 BUG_ON(set->k > set->end); in bch2_btree_node_iter_verify()
204 BUG_ON(set != s2 && set->end == s2->end); in bch2_btree_node_iter_verify()
207 /* Verify that set->end is correct: */ in bch2_btree_node_iter_verify()
209 for_each_bset(b, t) in bch2_btree_node_iter_verify()
210 if (set->end == t->end_offset) in bch2_btree_node_iter_verify()
214 BUG_ON(set->k < btree_bkey_first_offset(t) || in bch2_btree_node_iter_verify()
215 set->k >= t->end_offset); in bch2_btree_node_iter_verify()
220 BUG_ON(set != iter->data && in bch2_btree_node_iter_verify()
221 btree_node_iter_cmp(b, set[-1], set[0]) > 0); in bch2_btree_node_iter_verify()
225 for_each_bset(b, t) { in bch2_btree_node_iter_verify()
226 if (iter->data[0].end == t->end_offset) in bch2_btree_node_iter_verify()
229 p = bch2_bkey_prev_all(b, t, in bch2_btree_node_iter_verify()
230 bch2_btree_node_iter_bset_pos(iter, b, t)); in bch2_btree_node_iter_verify()
239 struct bset_tree *t = bch2_bkey_to_bset(b, where); in bch2_verify_insert_pos() local
240 struct bkey_packed *prev = bch2_bkey_prev_all(b, t, where); in bch2_verify_insert_pos()
241 struct bkey_packed *next = (void *) ((u64 *) where->_data + clobber_u64s); in bch2_verify_insert_pos()
264 BUG_ON(next != btree_bkey_last(b, t) && in bch2_verify_insert_pos()
267 if (next != btree_bkey_last(b, t) && in bch2_verify_insert_pos()
318 static unsigned bset_aux_tree_buf_end(const struct bset_tree *t) in bset_aux_tree_buf_end() argument
320 BUG_ON(t->aux_data_offset == U16_MAX); in bset_aux_tree_buf_end()
322 switch (bset_aux_tree_type(t)) { in bset_aux_tree_buf_end()
324 return t->aux_data_offset; in bset_aux_tree_buf_end()
326 return t->aux_data_offset + in bset_aux_tree_buf_end()
327 DIV_ROUND_UP(t->size * sizeof(struct bkey_float) + in bset_aux_tree_buf_end()
328 t->size * sizeof(u8), 8); in bset_aux_tree_buf_end()
330 return t->aux_data_offset + in bset_aux_tree_buf_end()
331 DIV_ROUND_UP(sizeof(struct rw_aux_tree) * t->size, 8); in bset_aux_tree_buf_end()
338 const struct bset_tree *t) in bset_aux_tree_buf_start() argument
340 return t == b->set in bset_aux_tree_buf_start()
341 ? DIV_ROUND_UP(b->unpack_fn_len, 8) in bset_aux_tree_buf_start()
342 : bset_aux_tree_buf_end(t - 1); in bset_aux_tree_buf_start()
346 const struct bset_tree *t) in __aux_tree_base() argument
348 return b->aux_data + t->aux_data_offset * 8; in __aux_tree_base()
352 const struct bset_tree *t) in ro_aux_tree_base() argument
354 EBUG_ON(bset_aux_tree_type(t) != BSET_RO_AUX_TREE); in ro_aux_tree_base()
356 return __aux_tree_base(b, t); in ro_aux_tree_base()
360 const struct bset_tree *t) in ro_aux_tree_prev() argument
362 EBUG_ON(bset_aux_tree_type(t) != BSET_RO_AUX_TREE); in ro_aux_tree_prev()
364 return __aux_tree_base(b, t) + bkey_float_byte_offset(t->size); in ro_aux_tree_prev()
368 const struct bset_tree *t, in bkey_float() argument
371 return ro_aux_tree_base(b, t)->f + idx; in bkey_float()
377 const struct bset_tree *t; in bset_aux_tree_verify() local
379 for_each_bset(b, t) { in bset_aux_tree_verify()
380 if (t->aux_data_offset == U16_MAX) in bset_aux_tree_verify()
383 BUG_ON(t != b->set && in bset_aux_tree_verify()
384 t[-1].aux_data_offset == U16_MAX); in bset_aux_tree_verify()
386 BUG_ON(t->aux_data_offset < bset_aux_tree_buf_start(b, t)); in bset_aux_tree_verify()
387 BUG_ON(t->aux_data_offset > btree_aux_data_u64s(b)); in bset_aux_tree_verify()
388 BUG_ON(bset_aux_tree_buf_end(t) > btree_aux_data_u64s(b)); in bset_aux_tree_verify()
397 b->nsets = 0; in bch2_btree_keys_init()
398 memset(&b->nr, 0, sizeof(b->nr)); in bch2_btree_keys_init()
401 b->set[i].data_offset = U16_MAX; in bch2_btree_keys_init()
403 bch2_bset_set_no_aux_tree(b, b->set); in bch2_btree_keys_init()
409 * Cacheline/offset <-> bkey pointer arithmetic:
411 * t->tree is a binary search tree in an array; each node corresponds to a key
412 * in one cacheline in t->set (BSET_CACHELINE bytes).
414 * This means we don't have to store the full index of the key that a node in
416 * then bkey_float->m gives us the offset within that cacheline, in units of 8
424 * bits we're going to store in bkey_float->mantissa. t->prev[j] stores the size
425 * of the previous key so we can walk backwards to it from t->tree[j]'s key.
429 const struct bset_tree *t, in bset_cacheline() argument
432 return (void *) round_down((unsigned long) btree_bkey_first(b, t), in bset_cacheline()
438 const struct bset_tree *t, in cacheline_to_bkey() argument
442 return bset_cacheline(b, t, cacheline) + offset * 8; in cacheline_to_bkey()
446 const struct bset_tree *t, in bkey_to_cacheline() argument
449 return ((void *) k - bset_cacheline(b, t, 0)) / BSET_CACHELINE; in bkey_to_cacheline()
453 const struct bset_tree *t, in __bkey_to_cacheline_offset() argument
457 return (u64 *) k - (u64 *) bset_cacheline(b, t, cacheline); in __bkey_to_cacheline_offset()
461 const struct bset_tree *t, in bkey_to_cacheline_offset() argument
465 size_t m = __bkey_to_cacheline_offset(b, t, cacheline, k); in bkey_to_cacheline_offset()
472 const struct bset_tree *t, in tree_to_bkey() argument
475 return cacheline_to_bkey(b, t, in tree_to_bkey()
476 __eytzinger1_to_inorder(j, t->size - 1, t->extra), in tree_to_bkey()
477 bkey_float(b, t, j)->key_offset); in tree_to_bkey()
481 const struct bset_tree *t, in tree_to_prev_bkey() argument
484 unsigned prev_u64s = ro_aux_tree_prev(b, t)[j]; in tree_to_prev_bkey()
486 return (void *) ((u64 *) tree_to_bkey(b, t, j)->_data - prev_u64s); in tree_to_prev_bkey()
490 const struct bset_tree *t) in rw_aux_tree() argument
492 EBUG_ON(bset_aux_tree_type(t) != BSET_RW_AUX_TREE); in rw_aux_tree()
494 return __aux_tree_base(b, t); in rw_aux_tree()
498 * For the write set - the one we're currently inserting keys into - we don't
499 * maintain a full search tree, we just keep a simple lookup table in t->prev.
502 struct bset_tree *t, in rw_aux_to_bkey() argument
505 return __btree_node_offset_to_key(b, rw_aux_tree(b, t)[j].offset); in rw_aux_to_bkey()
508 static void rw_aux_tree_set(const struct btree *b, struct bset_tree *t, in rw_aux_tree_set() argument
511 EBUG_ON(k >= btree_bkey_last(b, t)); in rw_aux_tree_set()
513 rw_aux_tree(b, t)[j] = (struct rw_aux_tree) { in rw_aux_tree_set()
520 struct bset_tree *t) in bch2_bset_verify_rw_aux_tree() argument
522 struct bkey_packed *k = btree_bkey_first(b, t); in bch2_bset_verify_rw_aux_tree()
528 BUG_ON(bset_has_ro_aux_tree(t)); in bch2_bset_verify_rw_aux_tree()
530 if (!bset_has_rw_aux_tree(t)) in bch2_bset_verify_rw_aux_tree()
533 BUG_ON(t->size < 1); in bch2_bset_verify_rw_aux_tree()
534 BUG_ON(rw_aux_to_bkey(b, t, j) != k); in bch2_bset_verify_rw_aux_tree()
538 if (rw_aux_to_bkey(b, t, j) == k) { in bch2_bset_verify_rw_aux_tree()
539 BUG_ON(!bpos_eq(rw_aux_tree(b, t)[j].k, in bch2_bset_verify_rw_aux_tree()
542 if (++j == t->size) in bch2_bset_verify_rw_aux_tree()
545 BUG_ON(rw_aux_tree(b, t)[j].offset <= in bch2_bset_verify_rw_aux_tree()
546 rw_aux_tree(b, t)[j - 1].offset); in bch2_bset_verify_rw_aux_tree()
550 BUG_ON(k >= btree_bkey_last(b, t)); in bch2_bset_verify_rw_aux_tree()
556 struct bset_tree *t, in rw_aux_tree_bsearch() argument
559 unsigned bset_offs = offset - btree_bkey_first_offset(t); in rw_aux_tree_bsearch()
560 unsigned bset_u64s = t->end_offset - btree_bkey_first_offset(t); in rw_aux_tree_bsearch()
561 unsigned idx = bset_u64s ? bset_offs * t->size / bset_u64s : 0; in rw_aux_tree_bsearch()
563 EBUG_ON(bset_aux_tree_type(t) != BSET_RW_AUX_TREE); in rw_aux_tree_bsearch()
564 EBUG_ON(!t->size); in rw_aux_tree_bsearch()
565 EBUG_ON(idx > t->size); in rw_aux_tree_bsearch()
567 while (idx < t->size && in rw_aux_tree_bsearch()
568 rw_aux_tree(b, t)[idx].offset < offset) in rw_aux_tree_bsearch()
572 rw_aux_tree(b, t)[idx - 1].offset >= offset) in rw_aux_tree_bsearch()
573 idx--; in rw_aux_tree_bsearch()
575 EBUG_ON(idx < t->size && in rw_aux_tree_bsearch()
576 rw_aux_tree(b, t)[idx].offset < offset); in rw_aux_tree_bsearch()
577 EBUG_ON(idx && rw_aux_tree(b, t)[idx - 1].offset >= offset); in rw_aux_tree_bsearch()
578 EBUG_ON(idx + 1 < t->size && in rw_aux_tree_bsearch()
579 rw_aux_tree(b, t)[idx].offset == in rw_aux_tree_bsearch()
580 rw_aux_tree(b, t)[idx + 1].offset); in rw_aux_tree_bsearch()
593 v = get_unaligned((u64 *) (((u8 *) k->_data) + (f->exponent >> 3))); in bkey_mantissa()
602 v >>= f->exponent & 7; in bkey_mantissa()
604 v >>= 64 - (f->exponent & 7) - BKEY_MANTISSA_BITS; in bkey_mantissa()
609 static __always_inline void make_bfloat(struct btree *b, struct bset_tree *t, in make_bfloat() argument
614 struct bkey_float *f = bkey_float(b, t, j); in make_bfloat()
615 struct bkey_packed *m = tree_to_bkey(b, t, j); in make_bfloat()
618 : tree_to_prev_bkey(b, t, j >> ffs(j)); in make_bfloat()
621 : tree_to_bkey(b, t, j >> (ffz(j) + 1)); in make_bfloat()
631 !b->nr_key_bits) { in make_bfloat()
632 f->exponent = BFLOAT_FAILED_UNPACKED; in make_bfloat()
639 * comparisons - that bit always becomes the high bit of in make_bfloat()
640 * bfloat->mantissa, and thus the exponent we're calculating here is in make_bfloat()
641 * the position of what will become the low bit in bfloat->mantissa: in make_bfloat()
643 * Note that this may be negative - we may be running off the low end in make_bfloat()
647 min_t(unsigned, BKEY_MANTISSA_BITS, b->nr_key_bits) - 1); in make_bfloat()
648 exponent = high_bit - (BKEY_MANTISSA_BITS - 1); in make_bfloat()
652 * (k->_data), to get the key bits starting at exponent: in make_bfloat()
655 shift = (int) (b->format.key_u64s * 64 - b->nr_key_bits) + exponent; in make_bfloat()
657 EBUG_ON(shift + BKEY_MANTISSA_BITS > b->format.key_u64s * 64); in make_bfloat()
660 b->nr_key_bits - in make_bfloat()
661 exponent - in make_bfloat()
668 f->exponent = shift; in make_bfloat()
672 * If we've got garbage bits, set them to all 1s - it's legal for the in make_bfloat()
676 mantissa |= ~(~0U << -exponent); in make_bfloat()
678 f->mantissa = mantissa; in make_bfloat()
681 /* bytes remaining - only valid for last bset: */
682 static unsigned __bset_tree_capacity(const struct btree *b, const struct bset_tree *t) in __bset_tree_capacity() argument
686 return btree_aux_data_bytes(b) - t->aux_data_offset * sizeof(u64); in __bset_tree_capacity()
689 static unsigned bset_ro_tree_capacity(const struct btree *b, const struct bset_tree *t) in bset_ro_tree_capacity() argument
691 return __bset_tree_capacity(b, t) / in bset_ro_tree_capacity()
695 static unsigned bset_rw_tree_capacity(const struct btree *b, const struct bset_tree *t) in bset_rw_tree_capacity() argument
697 return __bset_tree_capacity(b, t) / sizeof(struct rw_aux_tree); in bset_rw_tree_capacity()
700 static noinline void __build_rw_aux_tree(struct btree *b, struct bset_tree *t) in __build_rw_aux_tree() argument
704 t->size = 1; in __build_rw_aux_tree()
705 t->extra = BSET_RW_AUX_TREE_VAL; in __build_rw_aux_tree()
706 rw_aux_tree(b, t)[0].offset = in __build_rw_aux_tree()
707 __btree_node_key_to_offset(b, btree_bkey_first(b, t)); in __build_rw_aux_tree()
709 bset_tree_for_each_key(b, t, k) { in __build_rw_aux_tree()
710 if (t->size == bset_rw_tree_capacity(b, t)) in __build_rw_aux_tree()
713 if ((void *) k - (void *) rw_aux_to_bkey(b, t, t->size - 1) > in __build_rw_aux_tree()
715 rw_aux_tree_set(b, t, t->size++, k); in __build_rw_aux_tree()
719 static noinline void __build_ro_aux_tree(struct btree *b, struct bset_tree *t) in __build_ro_aux_tree() argument
721 struct bkey_packed *prev = NULL, *k = btree_bkey_first(b, t); in __build_ro_aux_tree()
725 t->size = min(bkey_to_cacheline(b, t, btree_bkey_last(b, t)), in __build_ro_aux_tree()
726 bset_ro_tree_capacity(b, t)); in __build_ro_aux_tree()
728 if (t->size < 2) { in __build_ro_aux_tree()
729 t->size = 0; in __build_ro_aux_tree()
730 t->extra = BSET_NO_AUX_TREE_VAL; in __build_ro_aux_tree()
734 t->extra = (t->size - rounddown_pow_of_two(t->size - 1)) << 1; in __build_ro_aux_tree()
737 eytzinger1_for_each(j, t->size - 1) { in __build_ro_aux_tree()
738 while (bkey_to_cacheline(b, t, k) < cacheline) in __build_ro_aux_tree()
741 if (k >= btree_bkey_last(b, t)) { in __build_ro_aux_tree()
743 t->size--; in __build_ro_aux_tree()
747 ro_aux_tree_prev(b, t)[j] = prev->u64s; in __build_ro_aux_tree()
748 bkey_float(b, t, j)->key_offset = in __build_ro_aux_tree()
749 bkey_to_cacheline_offset(b, t, cacheline++, k); in __build_ro_aux_tree()
751 EBUG_ON(tree_to_prev_bkey(b, t, j) != prev); in __build_ro_aux_tree()
752 EBUG_ON(tree_to_bkey(b, t, j) != k); in __build_ro_aux_tree()
755 while (k != btree_bkey_last(b, t)) in __build_ro_aux_tree()
758 if (!bkey_pack_pos(bkey_to_packed(&min_key), b->data->min_key, b)) { in __build_ro_aux_tree()
760 min_key.k.p = b->data->min_key; in __build_ro_aux_tree()
763 if (!bkey_pack_pos(bkey_to_packed(&max_key), b->data->max_key, b)) { in __build_ro_aux_tree()
765 max_key.k.p = b->data->max_key; in __build_ro_aux_tree()
769 eytzinger1_for_each(j, t->size - 1) in __build_ro_aux_tree()
770 make_bfloat(b, t, j, in __build_ro_aux_tree()
775 static void bset_alloc_tree(struct btree *b, struct bset_tree *t) in bset_alloc_tree() argument
779 for (i = b->set; i != t; i++) in bset_alloc_tree()
782 bch2_bset_set_no_aux_tree(b, t); in bset_alloc_tree()
785 t->aux_data_offset = round_up(bset_aux_tree_buf_start(b, t), in bset_alloc_tree()
791 void bch2_bset_build_aux_tree(struct btree *b, struct bset_tree *t, in bch2_bset_build_aux_tree() argument
795 ? bset_has_rw_aux_tree(t) in bch2_bset_build_aux_tree()
796 : bset_has_ro_aux_tree(t)) in bch2_bset_build_aux_tree()
799 bset_alloc_tree(b, t); in bch2_bset_build_aux_tree()
801 if (!__bset_tree_capacity(b, t)) in bch2_bset_build_aux_tree()
805 __build_rw_aux_tree(b, t); in bch2_bset_build_aux_tree()
807 __build_ro_aux_tree(b, t); in bch2_bset_build_aux_tree()
814 struct bset_tree *t; in bch2_bset_init_first() local
816 BUG_ON(b->nsets); in bch2_bset_init_first()
819 get_random_bytes(&i->seq, sizeof(i->seq)); in bch2_bset_init_first()
822 t = &b->set[b->nsets++]; in bch2_bset_init_first()
823 set_btree_bset(b, t, i); in bch2_bset_init_first()
828 struct bset *i = &bne->keys; in bch2_bset_init_next()
829 struct bset_tree *t; in bch2_bset_init_next() local
833 BUG_ON(b->nsets >= MAX_BSETS); in bch2_bset_init_next()
836 i->seq = btree_bset_first(b)->seq; in bch2_bset_init_next()
839 t = &b->set[b->nsets++]; in bch2_bset_init_next()
840 set_btree_bset(b, t, i); in bch2_bset_init_next()
844 * find _some_ key in the same bset as @k that precedes @k - not necessarily the
847 static struct bkey_packed *__bkey_prev(struct btree *b, struct bset_tree *t, in __bkey_prev() argument
854 EBUG_ON(k < btree_bkey_first(b, t) || in __bkey_prev()
855 k > btree_bkey_last(b, t)); in __bkey_prev()
857 if (k == btree_bkey_first(b, t)) in __bkey_prev()
860 switch (bset_aux_tree_type(t)) { in __bkey_prev()
862 p = btree_bkey_first(b, t); in __bkey_prev()
865 j = min_t(unsigned, t->size - 1, bkey_to_cacheline(b, t, k)); in __bkey_prev()
868 p = j ? tree_to_bkey(b, t, in __bkey_prev()
869 __inorder_to_eytzinger1(j--, in __bkey_prev()
870 t->size - 1, t->extra)) in __bkey_prev()
871 : btree_bkey_first(b, t); in __bkey_prev()
876 j = rw_aux_tree_bsearch(b, t, offset); in __bkey_prev()
877 p = j ? rw_aux_to_bkey(b, t, j - 1) in __bkey_prev()
878 : btree_bkey_first(b, t); in __bkey_prev()
886 struct bset_tree *t, in bch2_bkey_prev_filter() argument
892 while ((p = __bkey_prev(b, t, k)) && !ret) { in bch2_bkey_prev_filter()
894 if (i->type >= min_key_type) in bch2_bkey_prev_filter()
905 : btree_bkey_first(b, t); in bch2_bkey_prev_filter()
908 BUG_ON(i->type >= min_key_type); in bch2_bkey_prev_filter()
917 struct bset_tree *t, in bch2_bset_fix_lookup_table() argument
922 int shift = new_u64s - clobber_u64s; in bch2_bset_fix_lookup_table()
925 EBUG_ON(bset_has_ro_aux_tree(t)); in bch2_bset_fix_lookup_table()
927 if (!bset_has_rw_aux_tree(t)) in bch2_bset_fix_lookup_table()
931 l = rw_aux_tree_bsearch(b, t, where); in bch2_bset_fix_lookup_table()
935 else if (l < t->size && in bch2_bset_fix_lookup_table()
936 where < t->end_offset && in bch2_bset_fix_lookup_table()
937 rw_aux_tree(b, t)[l].offset == where) in bch2_bset_fix_lookup_table()
938 rw_aux_tree_set(b, t, l++, _where); in bch2_bset_fix_lookup_table()
943 j < t->size && in bch2_bset_fix_lookup_table()
944 rw_aux_tree(b, t)[j].offset < where + clobber_u64s; in bch2_bset_fix_lookup_table()
948 if (j < t->size && in bch2_bset_fix_lookup_table()
949 rw_aux_tree(b, t)[j].offset + shift == in bch2_bset_fix_lookup_table()
950 rw_aux_tree(b, t)[l - 1].offset) in bch2_bset_fix_lookup_table()
953 memmove(&rw_aux_tree(b, t)[l], in bch2_bset_fix_lookup_table()
954 &rw_aux_tree(b, t)[j], in bch2_bset_fix_lookup_table()
955 (void *) &rw_aux_tree(b, t)[t->size] - in bch2_bset_fix_lookup_table()
956 (void *) &rw_aux_tree(b, t)[j]); in bch2_bset_fix_lookup_table()
957 t->size -= j - l; in bch2_bset_fix_lookup_table()
959 for (j = l; j < t->size; j++) in bch2_bset_fix_lookup_table()
960 rw_aux_tree(b, t)[j].offset += shift; in bch2_bset_fix_lookup_table()
962 EBUG_ON(l < t->size && in bch2_bset_fix_lookup_table()
963 rw_aux_tree(b, t)[l].offset == in bch2_bset_fix_lookup_table()
964 rw_aux_tree(b, t)[l - 1].offset); in bch2_bset_fix_lookup_table()
966 if (t->size < bset_rw_tree_capacity(b, t) && in bch2_bset_fix_lookup_table()
967 (l < t->size in bch2_bset_fix_lookup_table()
968 ? rw_aux_tree(b, t)[l].offset in bch2_bset_fix_lookup_table()
969 : t->end_offset) - in bch2_bset_fix_lookup_table()
970 rw_aux_tree(b, t)[l - 1].offset > in bch2_bset_fix_lookup_table()
972 struct bkey_packed *start = rw_aux_to_bkey(b, t, l - 1); in bch2_bset_fix_lookup_table()
973 struct bkey_packed *end = l < t->size in bch2_bset_fix_lookup_table()
974 ? rw_aux_to_bkey(b, t, l) in bch2_bset_fix_lookup_table()
975 : btree_bkey_last(b, t); in bch2_bset_fix_lookup_table()
983 if ((void *) k - (void *) start >= L1_CACHE_BYTES) { in bch2_bset_fix_lookup_table()
984 memmove(&rw_aux_tree(b, t)[l + 1], in bch2_bset_fix_lookup_table()
985 &rw_aux_tree(b, t)[l], in bch2_bset_fix_lookup_table()
986 (void *) &rw_aux_tree(b, t)[t->size] - in bch2_bset_fix_lookup_table()
987 (void *) &rw_aux_tree(b, t)[l]); in bch2_bset_fix_lookup_table()
988 t->size++; in bch2_bset_fix_lookup_table()
989 rw_aux_tree_set(b, t, l, k); in bch2_bset_fix_lookup_table()
995 bch2_bset_verify_rw_aux_tree(b, t); in bch2_bset_fix_lookup_table()
1005 struct bkey_format *f = &b->format; in bch2_bset_insert()
1006 struct bset_tree *t = bset_tree_last(b); in bch2_bset_insert() local
1009 bch2_bset_verify_rw_aux_tree(b, t); in bch2_bset_insert()
1012 if (bch2_bkey_pack_key(&packed, &insert->k, f)) in bch2_bset_insert()
1015 if (!bkey_deleted(&insert->k)) in bch2_bset_insert()
1016 btree_keys_account_key_add(&b->nr, t - b->set, src); in bch2_bset_insert()
1018 if (src->u64s != clobber_u64s) { in bch2_bset_insert()
1019 u64 *src_p = (u64 *) where->_data + clobber_u64s; in bch2_bset_insert()
1020 u64 *dst_p = (u64 *) where->_data + src->u64s; in bch2_bset_insert()
1022 EBUG_ON((int) le16_to_cpu(bset(b, t)->u64s) < in bch2_bset_insert()
1023 (int) clobber_u64s - src->u64s); in bch2_bset_insert()
1025 memmove_u64s(dst_p, src_p, btree_bkey_last(b, t)->_data - src_p); in bch2_bset_insert()
1026 le16_add_cpu(&bset(b, t)->u64s, src->u64s - clobber_u64s); in bch2_bset_insert()
1027 set_btree_bset_end(b, t); in bch2_bset_insert()
1032 memcpy_u64s(bkeyp_val(f, where), &insert->v, in bch2_bset_insert()
1035 if (src->u64s != clobber_u64s) in bch2_bset_insert()
1036 bch2_bset_fix_lookup_table(b, t, where, clobber_u64s, src->u64s); in bch2_bset_insert()
1045 struct bset_tree *t = bset_tree_last(b); in bch2_bset_delete() local
1046 u64 *src_p = (u64 *) where->_data + clobber_u64s; in bch2_bset_delete()
1047 u64 *dst_p = where->_data; in bch2_bset_delete()
1049 bch2_bset_verify_rw_aux_tree(b, t); in bch2_bset_delete()
1051 EBUG_ON(le16_to_cpu(bset(b, t)->u64s) < clobber_u64s); in bch2_bset_delete()
1053 memmove_u64s_down(dst_p, src_p, btree_bkey_last(b, t)->_data - src_p); in bch2_bset_delete()
1054 le16_add_cpu(&bset(b, t)->u64s, -clobber_u64s); in bch2_bset_delete()
1055 set_btree_bset_end(b, t); in bch2_bset_delete()
1057 bch2_bset_fix_lookup_table(b, t, where, clobber_u64s, 0); in bch2_bset_delete()
1064 struct bset_tree *t, in bset_search_write_set() argument
1067 unsigned l = 0, r = t->size; in bset_search_write_set()
1072 if (bpos_lt(rw_aux_tree(b, t)[m].k, *search)) in bset_search_write_set()
1078 return rw_aux_to_bkey(b, t, l); in bset_search_write_set()
1084 asm("prefetcht0 (-127 + 64 * 0)(%0);" in prefetch_four_cachelines()
1085 "prefetcht0 (-127 + 64 * 1)(%0);" in prefetch_four_cachelines()
1086 "prefetcht0 (-127 + 64 * 2)(%0);" in prefetch_four_cachelines()
1087 "prefetcht0 (-127 + 64 * 3)(%0);" in prefetch_four_cachelines()
1103 unsigned key_bits_start = b->format.key_u64s * 64 - b->nr_key_bits; in bkey_mantissa_bits_dropped()
1105 return f->exponent > key_bits_start; in bkey_mantissa_bits_dropped()
1107 unsigned key_bits_end = high_bit_offset + b->nr_key_bits; in bkey_mantissa_bits_dropped()
1109 return f->exponent + BKEY_MANTISSA_BITS < key_bits_end; in bkey_mantissa_bits_dropped()
1115 const struct bset_tree *t, in bset_search_tree() argument
1119 struct ro_aux_tree *base = ro_aux_tree_base(b, t); in bset_search_tree()
1126 if (likely(n << 4 < t->size)) in bset_search_tree()
1127 prefetch(&base->f[n << 4]); in bset_search_tree()
1129 f = &base->f[n]; in bset_search_tree()
1130 if (unlikely(f->exponent >= BFLOAT_FAILED)) in bset_search_tree()
1133 l = f->mantissa; in bset_search_tree()
1142 k = tree_to_bkey(b, t, n); in bset_search_tree()
1148 } while (n < t->size); in bset_search_tree()
1150 inorder = __eytzinger1_to_inorder(n >> 1, t->size - 1, t->extra); in bset_search_tree()
1153 * n would have been the node we recursed to - the low bit tells us if in bset_search_tree()
1157 --inorder; in bset_search_tree()
1159 return btree_bkey_first(b, t); in bset_search_tree()
1161 f = &base->f[eytzinger1_prev(n >> 1, t->size - 1)]; in bset_search_tree()
1164 return cacheline_to_bkey(b, t, inorder, f->key_offset); in bset_search_tree()
1169 struct bset_tree *t, in __bch2_bset_search() argument
1183 * use a much simpler lookup table to do a binary search - in __bch2_bset_search()
1185 * * Or we use the auxiliary search tree we constructed earlier - in __bch2_bset_search()
1189 switch (bset_aux_tree_type(t)) { in __bch2_bset_search()
1191 return btree_bkey_first(b, t); in __bch2_bset_search()
1193 return bset_search_write_set(b, t, search); in __bch2_bset_search()
1195 return bset_search_tree(b, t, search, lossy_packed_search); in __bch2_bset_search()
1203 struct bset_tree *t, in bch2_bset_search_linear() argument
1210 while (m != btree_bkey_last(b, t) && in bch2_bset_search_linear()
1216 while (m != btree_bkey_last(b, t) && in bch2_bset_search_linear()
1221 struct bkey_packed *prev = bch2_bkey_prev_all(b, t, m); in bch2_bset_search_linear()
1244 BUG_ON(pos >= iter->data + ARRAY_SIZE(iter->data)); in __bch2_btree_node_iter_push()
1277 * bch2_btree_node_iter_init - initialize a btree node iterator, starting from a
1288 * When you don't filter out deleted keys, btree nodes _do_ contain duplicate
1289 * keys. This doesn't matter for most code, but it does matter for lookups.
1294 * If you search for k, the lookup code isn't guaranteed to return you any
1301 * - For non extents, we guarantee that the live key comes last - see
1302 * btree_node_iter_cmp(), keys_out_of_order(). So the duplicates you don't
1303 * see will only be deleted keys you don't care about.
1305 * - For extents, deleted keys sort last (see the comment at the top of this
1307 * key strictly greater than your search key - an extent that compares equal
1310 * But this does mean that we can't just search for
1312 * the range we want - if we're unlucky and there's an extent that ends
1325 struct btree_node_iter_set *pos = iter->data; in bch2_btree_node_iter_init()
1329 EBUG_ON(bpos_lt(*search, b->data->min_key)); in bch2_btree_node_iter_init()
1330 EBUG_ON(bpos_gt(*search, b->data->max_key)); in bch2_btree_node_iter_init()
1347 for (i = 0; i < b->nsets; i++) { in bch2_btree_node_iter_init()
1348 k[i] = __bch2_bset_search(b, b->set + i, search, &p); in bch2_btree_node_iter_init()
1352 for (i = 0; i < b->nsets; i++) { in bch2_btree_node_iter_init()
1353 struct bset_tree *t = b->set + i; in bch2_btree_node_iter_init() local
1354 struct bkey_packed *end = btree_bkey_last(b, t); in bch2_btree_node_iter_init()
1356 k[i] = bch2_bset_search_linear(b, t, search, in bch2_btree_node_iter_init()
1371 struct bset_tree *t; in bch2_btree_node_iter_init_from_start() local
1375 for_each_bset(b, t) in bch2_btree_node_iter_init_from_start()
1377 btree_bkey_first(b, t), in bch2_btree_node_iter_init_from_start()
1378 btree_bkey_last(b, t)); in bch2_btree_node_iter_init_from_start()
1384 struct bset_tree *t) in bch2_btree_node_iter_bset_pos() argument
1389 if (set->end == t->end_offset) in bch2_btree_node_iter_bset_pos()
1390 return __btree_node_offset_to_key(b, set->k); in bch2_btree_node_iter_bset_pos()
1392 return btree_bkey_last(b, t); in bch2_btree_node_iter_bset_pos()
1402 iter->data[first], in btree_node_iter_sort_two()
1403 iter->data[first + 1]) > 0))) in btree_node_iter_sort_two()
1404 swap(iter->data[first], iter->data[first + 1]); in btree_node_iter_sort_two()
1426 iter->data + ARRAY_SIZE(iter->data) - 1; in bch2_btree_node_iter_set_drop()
1428 memmove(&set[0], &set[1], (void *) last - (void *) set); in bch2_btree_node_iter_set_drop()
1435 iter->data->k += __bch2_btree_node_iter_peek_all(iter, b)->u64s; in __bch2_btree_node_iter_advance()
1437 EBUG_ON(iter->data->k > iter->data->end); in __bch2_btree_node_iter_advance()
1441 iter->data[0] = iter->data[1]; in __bch2_btree_node_iter_advance()
1442 iter->data[1] = iter->data[2]; in __bch2_btree_node_iter_advance()
1443 iter->data[2] = (struct btree_node_iter_set) { 0, 0 }; in __bch2_btree_node_iter_advance()
1478 struct bset_tree *t; in bch2_btree_node_iter_prev_all() local
1484 for_each_bset(b, t) { in bch2_btree_node_iter_prev_all()
1485 k = bch2_bkey_prev_all(b, t, in bch2_btree_node_iter_prev_all()
1486 bch2_btree_node_iter_bset_pos(iter, b, t)); in bch2_btree_node_iter_prev_all()
1490 end = t->end_offset; in bch2_btree_node_iter_prev_all()
1499 * prev we picked ends up in slot 0 - sort won't necessarily put it in bch2_btree_node_iter_prev_all()
1503 if (set->end == end) in bch2_btree_node_iter_prev_all()
1506 BUG_ON(set != &iter->data[__btree_node_iter_used(iter)]); in bch2_btree_node_iter_prev_all()
1508 BUG_ON(set >= iter->data + ARRAY_SIZE(iter->data)); in bch2_btree_node_iter_prev_all()
1510 memmove(&iter->data[1], in bch2_btree_node_iter_prev_all()
1511 &iter->data[0], in bch2_btree_node_iter_prev_all()
1512 (void *) set - (void *) &iter->data[0]); in bch2_btree_node_iter_prev_all()
1514 iter->data[0].k = __btree_node_key_to_offset(b, prev); in bch2_btree_node_iter_prev_all()
1515 iter->data[0].end = end; in bch2_btree_node_iter_prev_all()
1547 const struct bset_tree *t; in bch2_btree_keys_stats() local
1549 for_each_bset(b, t) { in bch2_btree_keys_stats()
1550 enum bset_aux_tree_type type = bset_aux_tree_type(t); in bch2_btree_keys_stats()
1553 stats->sets[type].nr++; in bch2_btree_keys_stats()
1554 stats->sets[type].bytes += le16_to_cpu(bset(b, t)->u64s) * in bch2_btree_keys_stats()
1557 if (bset_has_ro_aux_tree(t)) { in bch2_btree_keys_stats()
1558 stats->floats += t->size - 1; in bch2_btree_keys_stats()
1560 for (j = 1; j < t->size; j++) in bch2_btree_keys_stats()
1561 stats->failed += in bch2_btree_keys_stats()
1562 bkey_float(b, t, j)->exponent == in bch2_btree_keys_stats()
1571 struct bset_tree *t = bch2_bkey_to_bset(b, k); in bch2_bfloat_to_text() local
1575 if (!bset_has_ro_aux_tree(t)) in bch2_bfloat_to_text()
1578 inorder = bkey_to_cacheline(b, t, k); in bch2_bfloat_to_text()
1579 if (!inorder || inorder >= t->size) in bch2_bfloat_to_text()
1582 j = __inorder_to_eytzinger1(inorder, t->size - 1, t->extra); in bch2_bfloat_to_text()
1583 if (k != tree_to_bkey(b, t, j)) in bch2_bfloat_to_text()
1586 switch (bkey_float(b, t, j)->exponent) { in bch2_bfloat_to_text()
1591 "\t", in bch2_bfloat_to_text()