1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Code for working with individual keys, and sorted sets of keys with in a
4 * btree node
5 *
6 * Copyright 2012 Google, Inc.
7 */
8
9 #include "bcachefs.h"
10 #include "btree_cache.h"
11 #include "bset.h"
12 #include "eytzinger.h"
13 #include "trace.h"
14 #include "util.h"
15
16 #include <asm/unaligned.h>
17 #include <linux/console.h>
18 #include <linux/random.h>
19 #include <linux/prefetch.h>
20
21 static inline void __bch2_btree_node_iter_advance(struct btree_node_iter *,
22 struct btree *);
23
__btree_node_iter_used(struct btree_node_iter * iter)24 static inline unsigned __btree_node_iter_used(struct btree_node_iter *iter)
25 {
26 unsigned n = ARRAY_SIZE(iter->data);
27
28 while (n && __btree_node_iter_set_end(iter, n - 1))
29 --n;
30
31 return n;
32 }
33
bch2_bkey_to_bset(struct btree * b,struct bkey_packed * k)34 struct bset_tree *bch2_bkey_to_bset(struct btree *b, struct bkey_packed *k)
35 {
36 return bch2_bkey_to_bset_inlined(b, k);
37 }
38
39 /*
40 * There are never duplicate live keys in the btree - but including keys that
41 * have been flagged as deleted (and will be cleaned up later) we _will_ see
42 * duplicates.
43 *
44 * Thus the sort order is: usual key comparison first, but for keys that compare
45 * equal the deleted key(s) come first, and the (at most one) live version comes
46 * last.
47 *
48 * The main reason for this is insertion: to handle overwrites, we first iterate
49 * over keys that compare equal to our insert key, and then insert immediately
50 * prior to the first key greater than the key we're inserting - our insert
51 * position will be after all keys that compare equal to our insert key, which
52 * by the time we actually do the insert will all be deleted.
53 */
54
bch2_dump_bset(struct bch_fs * c,struct btree * b,struct bset * i,unsigned set)55 void bch2_dump_bset(struct bch_fs *c, struct btree *b,
56 struct bset *i, unsigned set)
57 {
58 struct bkey_packed *_k, *_n;
59 struct bkey uk, n;
60 struct bkey_s_c k;
61 struct printbuf buf = PRINTBUF;
62
63 if (!i->u64s)
64 return;
65
66 for (_k = i->start;
67 _k < vstruct_last(i);
68 _k = _n) {
69 _n = bkey_p_next(_k);
70
71 if (!_k->u64s) {
72 printk(KERN_ERR "block %u key %5zu - u64s 0? aieee!\n", set,
73 _k->_data - i->_data);
74 break;
75 }
76
77 k = bkey_disassemble(b, _k, &uk);
78
79 printbuf_reset(&buf);
80 if (c)
81 bch2_bkey_val_to_text(&buf, c, k);
82 else
83 bch2_bkey_to_text(&buf, k.k);
84 printk(KERN_ERR "block %u key %5zu: %s\n", set,
85 _k->_data - i->_data, buf.buf);
86
87 if (_n == vstruct_last(i))
88 continue;
89
90 n = bkey_unpack_key(b, _n);
91
92 if (bpos_lt(n.p, k.k->p)) {
93 printk(KERN_ERR "Key skipped backwards\n");
94 continue;
95 }
96
97 if (!bkey_deleted(k.k) && bpos_eq(n.p, k.k->p))
98 printk(KERN_ERR "Duplicate keys\n");
99 }
100
101 printbuf_exit(&buf);
102 }
103
bch2_dump_btree_node(struct bch_fs * c,struct btree * b)104 void bch2_dump_btree_node(struct bch_fs *c, struct btree *b)
105 {
106 struct bset_tree *t;
107
108 console_lock();
109 for_each_bset(b, t)
110 bch2_dump_bset(c, b, bset(b, t), t - b->set);
111 console_unlock();
112 }
113
bch2_dump_btree_node_iter(struct btree * b,struct btree_node_iter * iter)114 void bch2_dump_btree_node_iter(struct btree *b,
115 struct btree_node_iter *iter)
116 {
117 struct btree_node_iter_set *set;
118 struct printbuf buf = PRINTBUF;
119
120 printk(KERN_ERR "btree node iter with %u/%u sets:\n",
121 __btree_node_iter_used(iter), b->nsets);
122
123 btree_node_iter_for_each(iter, set) {
124 struct bkey_packed *k = __btree_node_offset_to_key(b, set->k);
125 struct bset_tree *t = bch2_bkey_to_bset(b, k);
126 struct bkey uk = bkey_unpack_key(b, k);
127
128 printbuf_reset(&buf);
129 bch2_bkey_to_text(&buf, &uk);
130 printk(KERN_ERR "set %zu key %u: %s\n",
131 t - b->set, set->k, buf.buf);
132 }
133
134 printbuf_exit(&buf);
135 }
136
137 #ifdef CONFIG_BCACHEFS_DEBUG
138
__bch2_verify_btree_nr_keys(struct btree * b)139 void __bch2_verify_btree_nr_keys(struct btree *b)
140 {
141 struct bset_tree *t;
142 struct bkey_packed *k;
143 struct btree_nr_keys nr = { 0 };
144
145 for_each_bset(b, t)
146 bset_tree_for_each_key(b, t, k)
147 if (!bkey_deleted(k))
148 btree_keys_account_key_add(&nr, t - b->set, k);
149
150 BUG_ON(memcmp(&nr, &b->nr, sizeof(nr)));
151 }
152
bch2_btree_node_iter_next_check(struct btree_node_iter * _iter,struct btree * b)153 static void bch2_btree_node_iter_next_check(struct btree_node_iter *_iter,
154 struct btree *b)
155 {
156 struct btree_node_iter iter = *_iter;
157 const struct bkey_packed *k, *n;
158
159 k = bch2_btree_node_iter_peek_all(&iter, b);
160 __bch2_btree_node_iter_advance(&iter, b);
161 n = bch2_btree_node_iter_peek_all(&iter, b);
162
163 bkey_unpack_key(b, k);
164
165 if (n &&
166 bkey_iter_cmp(b, k, n) > 0) {
167 struct btree_node_iter_set *set;
168 struct bkey ku = bkey_unpack_key(b, k);
169 struct bkey nu = bkey_unpack_key(b, n);
170 struct printbuf buf1 = PRINTBUF;
171 struct printbuf buf2 = PRINTBUF;
172
173 bch2_dump_btree_node(NULL, b);
174 bch2_bkey_to_text(&buf1, &ku);
175 bch2_bkey_to_text(&buf2, &nu);
176 printk(KERN_ERR "out of order/overlapping:\n%s\n%s\n",
177 buf1.buf, buf2.buf);
178 printk(KERN_ERR "iter was:");
179
180 btree_node_iter_for_each(_iter, set) {
181 struct bkey_packed *k2 = __btree_node_offset_to_key(b, set->k);
182 struct bset_tree *t = bch2_bkey_to_bset(b, k2);
183 printk(" [%zi %zi]", t - b->set,
184 k2->_data - bset(b, t)->_data);
185 }
186 panic("\n");
187 }
188 }
189
bch2_btree_node_iter_verify(struct btree_node_iter * iter,struct btree * b)190 void bch2_btree_node_iter_verify(struct btree_node_iter *iter,
191 struct btree *b)
192 {
193 struct btree_node_iter_set *set, *s2;
194 struct bkey_packed *k, *p;
195 struct bset_tree *t;
196
197 if (bch2_btree_node_iter_end(iter))
198 return;
199
200 /* Verify no duplicates: */
201 btree_node_iter_for_each(iter, set) {
202 BUG_ON(set->k > set->end);
203 btree_node_iter_for_each(iter, s2)
204 BUG_ON(set != s2 && set->end == s2->end);
205 }
206
207 /* Verify that set->end is correct: */
208 btree_node_iter_for_each(iter, set) {
209 for_each_bset(b, t)
210 if (set->end == t->end_offset)
211 goto found;
212 BUG();
213 found:
214 BUG_ON(set->k < btree_bkey_first_offset(t) ||
215 set->k >= t->end_offset);
216 }
217
218 /* Verify iterator is sorted: */
219 btree_node_iter_for_each(iter, set)
220 BUG_ON(set != iter->data &&
221 btree_node_iter_cmp(b, set[-1], set[0]) > 0);
222
223 k = bch2_btree_node_iter_peek_all(iter, b);
224
225 for_each_bset(b, t) {
226 if (iter->data[0].end == t->end_offset)
227 continue;
228
229 p = bch2_bkey_prev_all(b, t,
230 bch2_btree_node_iter_bset_pos(iter, b, t));
231
232 BUG_ON(p && bkey_iter_cmp(b, k, p) < 0);
233 }
234 }
235
bch2_verify_insert_pos(struct btree * b,struct bkey_packed * where,struct bkey_packed * insert,unsigned clobber_u64s)236 void bch2_verify_insert_pos(struct btree *b, struct bkey_packed *where,
237 struct bkey_packed *insert, unsigned clobber_u64s)
238 {
239 struct bset_tree *t = bch2_bkey_to_bset(b, where);
240 struct bkey_packed *prev = bch2_bkey_prev_all(b, t, where);
241 struct bkey_packed *next = (void *) ((u64 *) where->_data + clobber_u64s);
242 struct printbuf buf1 = PRINTBUF;
243 struct printbuf buf2 = PRINTBUF;
244 #if 0
245 BUG_ON(prev &&
246 bkey_iter_cmp(b, prev, insert) > 0);
247 #else
248 if (prev &&
249 bkey_iter_cmp(b, prev, insert) > 0) {
250 struct bkey k1 = bkey_unpack_key(b, prev);
251 struct bkey k2 = bkey_unpack_key(b, insert);
252
253 bch2_dump_btree_node(NULL, b);
254 bch2_bkey_to_text(&buf1, &k1);
255 bch2_bkey_to_text(&buf2, &k2);
256
257 panic("prev > insert:\n"
258 "prev key %s\n"
259 "insert key %s\n",
260 buf1.buf, buf2.buf);
261 }
262 #endif
263 #if 0
264 BUG_ON(next != btree_bkey_last(b, t) &&
265 bkey_iter_cmp(b, insert, next) > 0);
266 #else
267 if (next != btree_bkey_last(b, t) &&
268 bkey_iter_cmp(b, insert, next) > 0) {
269 struct bkey k1 = bkey_unpack_key(b, insert);
270 struct bkey k2 = bkey_unpack_key(b, next);
271
272 bch2_dump_btree_node(NULL, b);
273 bch2_bkey_to_text(&buf1, &k1);
274 bch2_bkey_to_text(&buf2, &k2);
275
276 panic("insert > next:\n"
277 "insert key %s\n"
278 "next key %s\n",
279 buf1.buf, buf2.buf);
280 }
281 #endif
282 }
283
284 #else
285
bch2_btree_node_iter_next_check(struct btree_node_iter * iter,struct btree * b)286 static inline void bch2_btree_node_iter_next_check(struct btree_node_iter *iter,
287 struct btree *b) {}
288
289 #endif
290
291 /* Auxiliary search trees */
292
293 #define BFLOAT_FAILED_UNPACKED U8_MAX
294 #define BFLOAT_FAILED U8_MAX
295
296 struct bkey_float {
297 u8 exponent;
298 u8 key_offset;
299 u16 mantissa;
300 };
301 #define BKEY_MANTISSA_BITS 16
302
bkey_float_byte_offset(unsigned idx)303 static unsigned bkey_float_byte_offset(unsigned idx)
304 {
305 return idx * sizeof(struct bkey_float);
306 }
307
308 struct ro_aux_tree {
309 u8 nothing[0];
310 struct bkey_float f[];
311 };
312
313 struct rw_aux_tree {
314 u16 offset;
315 struct bpos k;
316 };
317
bset_aux_tree_buf_end(const struct bset_tree * t)318 static unsigned bset_aux_tree_buf_end(const struct bset_tree *t)
319 {
320 BUG_ON(t->aux_data_offset == U16_MAX);
321
322 switch (bset_aux_tree_type(t)) {
323 case BSET_NO_AUX_TREE:
324 return t->aux_data_offset;
325 case BSET_RO_AUX_TREE:
326 return t->aux_data_offset +
327 DIV_ROUND_UP(t->size * sizeof(struct bkey_float) +
328 t->size * sizeof(u8), 8);
329 case BSET_RW_AUX_TREE:
330 return t->aux_data_offset +
331 DIV_ROUND_UP(sizeof(struct rw_aux_tree) * t->size, 8);
332 default:
333 BUG();
334 }
335 }
336
bset_aux_tree_buf_start(const struct btree * b,const struct bset_tree * t)337 static unsigned bset_aux_tree_buf_start(const struct btree *b,
338 const struct bset_tree *t)
339 {
340 return t == b->set
341 ? DIV_ROUND_UP(b->unpack_fn_len, 8)
342 : bset_aux_tree_buf_end(t - 1);
343 }
344
__aux_tree_base(const struct btree * b,const struct bset_tree * t)345 static void *__aux_tree_base(const struct btree *b,
346 const struct bset_tree *t)
347 {
348 return b->aux_data + t->aux_data_offset * 8;
349 }
350
ro_aux_tree_base(const struct btree * b,const struct bset_tree * t)351 static struct ro_aux_tree *ro_aux_tree_base(const struct btree *b,
352 const struct bset_tree *t)
353 {
354 EBUG_ON(bset_aux_tree_type(t) != BSET_RO_AUX_TREE);
355
356 return __aux_tree_base(b, t);
357 }
358
ro_aux_tree_prev(const struct btree * b,const struct bset_tree * t)359 static u8 *ro_aux_tree_prev(const struct btree *b,
360 const struct bset_tree *t)
361 {
362 EBUG_ON(bset_aux_tree_type(t) != BSET_RO_AUX_TREE);
363
364 return __aux_tree_base(b, t) + bkey_float_byte_offset(t->size);
365 }
366
bkey_float(const struct btree * b,const struct bset_tree * t,unsigned idx)367 static struct bkey_float *bkey_float(const struct btree *b,
368 const struct bset_tree *t,
369 unsigned idx)
370 {
371 return ro_aux_tree_base(b, t)->f + idx;
372 }
373
bset_aux_tree_verify(const struct btree * b)374 static void bset_aux_tree_verify(const struct btree *b)
375 {
376 #ifdef CONFIG_BCACHEFS_DEBUG
377 const struct bset_tree *t;
378
379 for_each_bset(b, t) {
380 if (t->aux_data_offset == U16_MAX)
381 continue;
382
383 BUG_ON(t != b->set &&
384 t[-1].aux_data_offset == U16_MAX);
385
386 BUG_ON(t->aux_data_offset < bset_aux_tree_buf_start(b, t));
387 BUG_ON(t->aux_data_offset > btree_aux_data_u64s(b));
388 BUG_ON(bset_aux_tree_buf_end(t) > btree_aux_data_u64s(b));
389 }
390 #endif
391 }
392
bch2_btree_keys_init(struct btree * b)393 void bch2_btree_keys_init(struct btree *b)
394 {
395 unsigned i;
396
397 b->nsets = 0;
398 memset(&b->nr, 0, sizeof(b->nr));
399
400 for (i = 0; i < MAX_BSETS; i++)
401 b->set[i].data_offset = U16_MAX;
402
403 bch2_bset_set_no_aux_tree(b, b->set);
404 }
405
406 /* Binary tree stuff for auxiliary search trees */
407
408 /*
409 * Cacheline/offset <-> bkey pointer arithmetic:
410 *
411 * t->tree is a binary search tree in an array; each node corresponds to a key
412 * in one cacheline in t->set (BSET_CACHELINE bytes).
413 *
414 * This means we don't have to store the full index of the key that a node in
415 * the binary tree points to; eytzinger1_to_inorder() gives us the cacheline, and
416 * then bkey_float->m gives us the offset within that cacheline, in units of 8
417 * bytes.
418 *
419 * cacheline_to_bkey() and friends abstract out all the pointer arithmetic to
420 * make this work.
421 *
422 * To construct the bfloat for an arbitrary key we need to know what the key
423 * immediately preceding it is: we have to check if the two keys differ in the
424 * bits we're going to store in bkey_float->mantissa. t->prev[j] stores the size
425 * of the previous key so we can walk backwards to it from t->tree[j]'s key.
426 */
427
bset_cacheline(const struct btree * b,const struct bset_tree * t,unsigned cacheline)428 static inline void *bset_cacheline(const struct btree *b,
429 const struct bset_tree *t,
430 unsigned cacheline)
431 {
432 return (void *) round_down((unsigned long) btree_bkey_first(b, t),
433 L1_CACHE_BYTES) +
434 cacheline * BSET_CACHELINE;
435 }
436
cacheline_to_bkey(const struct btree * b,const struct bset_tree * t,unsigned cacheline,unsigned offset)437 static struct bkey_packed *cacheline_to_bkey(const struct btree *b,
438 const struct bset_tree *t,
439 unsigned cacheline,
440 unsigned offset)
441 {
442 return bset_cacheline(b, t, cacheline) + offset * 8;
443 }
444
bkey_to_cacheline(const struct btree * b,const struct bset_tree * t,const struct bkey_packed * k)445 static unsigned bkey_to_cacheline(const struct btree *b,
446 const struct bset_tree *t,
447 const struct bkey_packed *k)
448 {
449 return ((void *) k - bset_cacheline(b, t, 0)) / BSET_CACHELINE;
450 }
451
__bkey_to_cacheline_offset(const struct btree * b,const struct bset_tree * t,unsigned cacheline,const struct bkey_packed * k)452 static ssize_t __bkey_to_cacheline_offset(const struct btree *b,
453 const struct bset_tree *t,
454 unsigned cacheline,
455 const struct bkey_packed *k)
456 {
457 return (u64 *) k - (u64 *) bset_cacheline(b, t, cacheline);
458 }
459
bkey_to_cacheline_offset(const struct btree * b,const struct bset_tree * t,unsigned cacheline,const struct bkey_packed * k)460 static unsigned bkey_to_cacheline_offset(const struct btree *b,
461 const struct bset_tree *t,
462 unsigned cacheline,
463 const struct bkey_packed *k)
464 {
465 size_t m = __bkey_to_cacheline_offset(b, t, cacheline, k);
466
467 EBUG_ON(m > U8_MAX);
468 return m;
469 }
470
tree_to_bkey(const struct btree * b,const struct bset_tree * t,unsigned j)471 static inline struct bkey_packed *tree_to_bkey(const struct btree *b,
472 const struct bset_tree *t,
473 unsigned j)
474 {
475 return cacheline_to_bkey(b, t,
476 __eytzinger1_to_inorder(j, t->size - 1, t->extra),
477 bkey_float(b, t, j)->key_offset);
478 }
479
tree_to_prev_bkey(const struct btree * b,const struct bset_tree * t,unsigned j)480 static struct bkey_packed *tree_to_prev_bkey(const struct btree *b,
481 const struct bset_tree *t,
482 unsigned j)
483 {
484 unsigned prev_u64s = ro_aux_tree_prev(b, t)[j];
485
486 return (void *) ((u64 *) tree_to_bkey(b, t, j)->_data - prev_u64s);
487 }
488
rw_aux_tree(const struct btree * b,const struct bset_tree * t)489 static struct rw_aux_tree *rw_aux_tree(const struct btree *b,
490 const struct bset_tree *t)
491 {
492 EBUG_ON(bset_aux_tree_type(t) != BSET_RW_AUX_TREE);
493
494 return __aux_tree_base(b, t);
495 }
496
497 /*
498 * For the write set - the one we're currently inserting keys into - we don't
499 * maintain a full search tree, we just keep a simple lookup table in t->prev.
500 */
rw_aux_to_bkey(const struct btree * b,struct bset_tree * t,unsigned j)501 static struct bkey_packed *rw_aux_to_bkey(const struct btree *b,
502 struct bset_tree *t,
503 unsigned j)
504 {
505 return __btree_node_offset_to_key(b, rw_aux_tree(b, t)[j].offset);
506 }
507
rw_aux_tree_set(const struct btree * b,struct bset_tree * t,unsigned j,struct bkey_packed * k)508 static void rw_aux_tree_set(const struct btree *b, struct bset_tree *t,
509 unsigned j, struct bkey_packed *k)
510 {
511 EBUG_ON(k >= btree_bkey_last(b, t));
512
513 rw_aux_tree(b, t)[j] = (struct rw_aux_tree) {
514 .offset = __btree_node_key_to_offset(b, k),
515 .k = bkey_unpack_pos(b, k),
516 };
517 }
518
bch2_bset_verify_rw_aux_tree(struct btree * b,struct bset_tree * t)519 static void bch2_bset_verify_rw_aux_tree(struct btree *b,
520 struct bset_tree *t)
521 {
522 struct bkey_packed *k = btree_bkey_first(b, t);
523 unsigned j = 0;
524
525 if (!bch2_expensive_debug_checks)
526 return;
527
528 BUG_ON(bset_has_ro_aux_tree(t));
529
530 if (!bset_has_rw_aux_tree(t))
531 return;
532
533 BUG_ON(t->size < 1);
534 BUG_ON(rw_aux_to_bkey(b, t, j) != k);
535
536 goto start;
537 while (1) {
538 if (rw_aux_to_bkey(b, t, j) == k) {
539 BUG_ON(!bpos_eq(rw_aux_tree(b, t)[j].k,
540 bkey_unpack_pos(b, k)));
541 start:
542 if (++j == t->size)
543 break;
544
545 BUG_ON(rw_aux_tree(b, t)[j].offset <=
546 rw_aux_tree(b, t)[j - 1].offset);
547 }
548
549 k = bkey_p_next(k);
550 BUG_ON(k >= btree_bkey_last(b, t));
551 }
552 }
553
554 /* returns idx of first entry >= offset: */
rw_aux_tree_bsearch(struct btree * b,struct bset_tree * t,unsigned offset)555 static unsigned rw_aux_tree_bsearch(struct btree *b,
556 struct bset_tree *t,
557 unsigned offset)
558 {
559 unsigned bset_offs = offset - btree_bkey_first_offset(t);
560 unsigned bset_u64s = t->end_offset - btree_bkey_first_offset(t);
561 unsigned idx = bset_u64s ? bset_offs * t->size / bset_u64s : 0;
562
563 EBUG_ON(bset_aux_tree_type(t) != BSET_RW_AUX_TREE);
564 EBUG_ON(!t->size);
565 EBUG_ON(idx > t->size);
566
567 while (idx < t->size &&
568 rw_aux_tree(b, t)[idx].offset < offset)
569 idx++;
570
571 while (idx &&
572 rw_aux_tree(b, t)[idx - 1].offset >= offset)
573 idx--;
574
575 EBUG_ON(idx < t->size &&
576 rw_aux_tree(b, t)[idx].offset < offset);
577 EBUG_ON(idx && rw_aux_tree(b, t)[idx - 1].offset >= offset);
578 EBUG_ON(idx + 1 < t->size &&
579 rw_aux_tree(b, t)[idx].offset ==
580 rw_aux_tree(b, t)[idx + 1].offset);
581
582 return idx;
583 }
584
bkey_mantissa(const struct bkey_packed * k,const struct bkey_float * f,unsigned idx)585 static inline unsigned bkey_mantissa(const struct bkey_packed *k,
586 const struct bkey_float *f,
587 unsigned idx)
588 {
589 u64 v;
590
591 EBUG_ON(!bkey_packed(k));
592
593 v = get_unaligned((u64 *) (((u8 *) k->_data) + (f->exponent >> 3)));
594
595 /*
596 * In little endian, we're shifting off low bits (and then the bits we
597 * want are at the low end), in big endian we're shifting off high bits
598 * (and then the bits we want are at the high end, so we shift them
599 * back down):
600 */
601 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
602 v >>= f->exponent & 7;
603 #else
604 v >>= 64 - (f->exponent & 7) - BKEY_MANTISSA_BITS;
605 #endif
606 return (u16) v;
607 }
608
make_bfloat(struct btree * b,struct bset_tree * t,unsigned j,struct bkey_packed * min_key,struct bkey_packed * max_key)609 static __always_inline void make_bfloat(struct btree *b, struct bset_tree *t,
610 unsigned j,
611 struct bkey_packed *min_key,
612 struct bkey_packed *max_key)
613 {
614 struct bkey_float *f = bkey_float(b, t, j);
615 struct bkey_packed *m = tree_to_bkey(b, t, j);
616 struct bkey_packed *l = is_power_of_2(j)
617 ? min_key
618 : tree_to_prev_bkey(b, t, j >> ffs(j));
619 struct bkey_packed *r = is_power_of_2(j + 1)
620 ? max_key
621 : tree_to_bkey(b, t, j >> (ffz(j) + 1));
622 unsigned mantissa;
623 int shift, exponent, high_bit;
624
625 /*
626 * for failed bfloats, the lookup code falls back to comparing against
627 * the original key.
628 */
629
630 if (!bkey_packed(l) || !bkey_packed(r) || !bkey_packed(m) ||
631 !b->nr_key_bits) {
632 f->exponent = BFLOAT_FAILED_UNPACKED;
633 return;
634 }
635
636 /*
637 * The greatest differing bit of l and r is the first bit we must
638 * include in the bfloat mantissa we're creating in order to do
639 * comparisons - that bit always becomes the high bit of
640 * bfloat->mantissa, and thus the exponent we're calculating here is
641 * the position of what will become the low bit in bfloat->mantissa:
642 *
643 * Note that this may be negative - we may be running off the low end
644 * of the key: we handle this later:
645 */
646 high_bit = max(bch2_bkey_greatest_differing_bit(b, l, r),
647 min_t(unsigned, BKEY_MANTISSA_BITS, b->nr_key_bits) - 1);
648 exponent = high_bit - (BKEY_MANTISSA_BITS - 1);
649
650 /*
651 * Then we calculate the actual shift value, from the start of the key
652 * (k->_data), to get the key bits starting at exponent:
653 */
654 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
655 shift = (int) (b->format.key_u64s * 64 - b->nr_key_bits) + exponent;
656
657 EBUG_ON(shift + BKEY_MANTISSA_BITS > b->format.key_u64s * 64);
658 #else
659 shift = high_bit_offset +
660 b->nr_key_bits -
661 exponent -
662 BKEY_MANTISSA_BITS;
663
664 EBUG_ON(shift < KEY_PACKED_BITS_START);
665 #endif
666 EBUG_ON(shift < 0 || shift >= BFLOAT_FAILED);
667
668 f->exponent = shift;
669 mantissa = bkey_mantissa(m, f, j);
670
671 /*
672 * If we've got garbage bits, set them to all 1s - it's legal for the
673 * bfloat to compare larger than the original key, but not smaller:
674 */
675 if (exponent < 0)
676 mantissa |= ~(~0U << -exponent);
677
678 f->mantissa = mantissa;
679 }
680
681 /* bytes remaining - only valid for last bset: */
__bset_tree_capacity(const struct btree * b,const struct bset_tree * t)682 static unsigned __bset_tree_capacity(const struct btree *b, const struct bset_tree *t)
683 {
684 bset_aux_tree_verify(b);
685
686 return btree_aux_data_bytes(b) - t->aux_data_offset * sizeof(u64);
687 }
688
bset_ro_tree_capacity(const struct btree * b,const struct bset_tree * t)689 static unsigned bset_ro_tree_capacity(const struct btree *b, const struct bset_tree *t)
690 {
691 return __bset_tree_capacity(b, t) /
692 (sizeof(struct bkey_float) + sizeof(u8));
693 }
694
bset_rw_tree_capacity(const struct btree * b,const struct bset_tree * t)695 static unsigned bset_rw_tree_capacity(const struct btree *b, const struct bset_tree *t)
696 {
697 return __bset_tree_capacity(b, t) / sizeof(struct rw_aux_tree);
698 }
699
__build_rw_aux_tree(struct btree * b,struct bset_tree * t)700 static noinline void __build_rw_aux_tree(struct btree *b, struct bset_tree *t)
701 {
702 struct bkey_packed *k;
703
704 t->size = 1;
705 t->extra = BSET_RW_AUX_TREE_VAL;
706 rw_aux_tree(b, t)[0].offset =
707 __btree_node_key_to_offset(b, btree_bkey_first(b, t));
708
709 bset_tree_for_each_key(b, t, k) {
710 if (t->size == bset_rw_tree_capacity(b, t))
711 break;
712
713 if ((void *) k - (void *) rw_aux_to_bkey(b, t, t->size - 1) >
714 L1_CACHE_BYTES)
715 rw_aux_tree_set(b, t, t->size++, k);
716 }
717 }
718
__build_ro_aux_tree(struct btree * b,struct bset_tree * t)719 static noinline void __build_ro_aux_tree(struct btree *b, struct bset_tree *t)
720 {
721 struct bkey_packed *prev = NULL, *k = btree_bkey_first(b, t);
722 struct bkey_i min_key, max_key;
723 unsigned cacheline = 1;
724
725 t->size = min(bkey_to_cacheline(b, t, btree_bkey_last(b, t)),
726 bset_ro_tree_capacity(b, t));
727 retry:
728 if (t->size < 2) {
729 t->size = 0;
730 t->extra = BSET_NO_AUX_TREE_VAL;
731 return;
732 }
733
734 t->extra = (t->size - rounddown_pow_of_two(t->size - 1)) << 1;
735
736 /* First we figure out where the first key in each cacheline is */
737 eytzinger1_for_each(j, t->size - 1) {
738 while (bkey_to_cacheline(b, t, k) < cacheline)
739 prev = k, k = bkey_p_next(k);
740
741 if (k >= btree_bkey_last(b, t)) {
742 /* XXX: this path sucks */
743 t->size--;
744 goto retry;
745 }
746
747 ro_aux_tree_prev(b, t)[j] = prev->u64s;
748 bkey_float(b, t, j)->key_offset =
749 bkey_to_cacheline_offset(b, t, cacheline++, k);
750
751 EBUG_ON(tree_to_prev_bkey(b, t, j) != prev);
752 EBUG_ON(tree_to_bkey(b, t, j) != k);
753 }
754
755 while (k != btree_bkey_last(b, t))
756 prev = k, k = bkey_p_next(k);
757
758 if (!bkey_pack_pos(bkey_to_packed(&min_key), b->data->min_key, b)) {
759 bkey_init(&min_key.k);
760 min_key.k.p = b->data->min_key;
761 }
762
763 if (!bkey_pack_pos(bkey_to_packed(&max_key), b->data->max_key, b)) {
764 bkey_init(&max_key.k);
765 max_key.k.p = b->data->max_key;
766 }
767
768 /* Then we build the tree */
769 eytzinger1_for_each(j, t->size - 1)
770 make_bfloat(b, t, j,
771 bkey_to_packed(&min_key),
772 bkey_to_packed(&max_key));
773 }
774
bset_alloc_tree(struct btree * b,struct bset_tree * t)775 static void bset_alloc_tree(struct btree *b, struct bset_tree *t)
776 {
777 struct bset_tree *i;
778
779 for (i = b->set; i != t; i++)
780 BUG_ON(bset_has_rw_aux_tree(i));
781
782 bch2_bset_set_no_aux_tree(b, t);
783
784 /* round up to next cacheline: */
785 t->aux_data_offset = round_up(bset_aux_tree_buf_start(b, t),
786 SMP_CACHE_BYTES / sizeof(u64));
787
788 bset_aux_tree_verify(b);
789 }
790
bch2_bset_build_aux_tree(struct btree * b,struct bset_tree * t,bool writeable)791 void bch2_bset_build_aux_tree(struct btree *b, struct bset_tree *t,
792 bool writeable)
793 {
794 if (writeable
795 ? bset_has_rw_aux_tree(t)
796 : bset_has_ro_aux_tree(t))
797 return;
798
799 bset_alloc_tree(b, t);
800
801 if (!__bset_tree_capacity(b, t))
802 return;
803
804 if (writeable)
805 __build_rw_aux_tree(b, t);
806 else
807 __build_ro_aux_tree(b, t);
808
809 bset_aux_tree_verify(b);
810 }
811
bch2_bset_init_first(struct btree * b,struct bset * i)812 void bch2_bset_init_first(struct btree *b, struct bset *i)
813 {
814 struct bset_tree *t;
815
816 BUG_ON(b->nsets);
817
818 memset(i, 0, sizeof(*i));
819 get_random_bytes(&i->seq, sizeof(i->seq));
820 SET_BSET_BIG_ENDIAN(i, CPU_BIG_ENDIAN);
821
822 t = &b->set[b->nsets++];
823 set_btree_bset(b, t, i);
824 }
825
bch2_bset_init_next(struct btree * b,struct btree_node_entry * bne)826 void bch2_bset_init_next(struct btree *b, struct btree_node_entry *bne)
827 {
828 struct bset *i = &bne->keys;
829 struct bset_tree *t;
830
831 BUG_ON(bset_byte_offset(b, bne) >= btree_buf_bytes(b));
832 BUG_ON((void *) bne < (void *) btree_bkey_last(b, bset_tree_last(b)));
833 BUG_ON(b->nsets >= MAX_BSETS);
834
835 memset(i, 0, sizeof(*i));
836 i->seq = btree_bset_first(b)->seq;
837 SET_BSET_BIG_ENDIAN(i, CPU_BIG_ENDIAN);
838
839 t = &b->set[b->nsets++];
840 set_btree_bset(b, t, i);
841 }
842
843 /*
844 * find _some_ key in the same bset as @k that precedes @k - not necessarily the
845 * immediate predecessor:
846 */
__bkey_prev(struct btree * b,struct bset_tree * t,struct bkey_packed * k)847 static struct bkey_packed *__bkey_prev(struct btree *b, struct bset_tree *t,
848 struct bkey_packed *k)
849 {
850 struct bkey_packed *p;
851 unsigned offset;
852 int j;
853
854 EBUG_ON(k < btree_bkey_first(b, t) ||
855 k > btree_bkey_last(b, t));
856
857 if (k == btree_bkey_first(b, t))
858 return NULL;
859
860 switch (bset_aux_tree_type(t)) {
861 case BSET_NO_AUX_TREE:
862 p = btree_bkey_first(b, t);
863 break;
864 case BSET_RO_AUX_TREE:
865 j = min_t(unsigned, t->size - 1, bkey_to_cacheline(b, t, k));
866
867 do {
868 p = j ? tree_to_bkey(b, t,
869 __inorder_to_eytzinger1(j--,
870 t->size - 1, t->extra))
871 : btree_bkey_first(b, t);
872 } while (p >= k);
873 break;
874 case BSET_RW_AUX_TREE:
875 offset = __btree_node_key_to_offset(b, k);
876 j = rw_aux_tree_bsearch(b, t, offset);
877 p = j ? rw_aux_to_bkey(b, t, j - 1)
878 : btree_bkey_first(b, t);
879 break;
880 }
881
882 return p;
883 }
884
bch2_bkey_prev_filter(struct btree * b,struct bset_tree * t,struct bkey_packed * k,unsigned min_key_type)885 struct bkey_packed *bch2_bkey_prev_filter(struct btree *b,
886 struct bset_tree *t,
887 struct bkey_packed *k,
888 unsigned min_key_type)
889 {
890 struct bkey_packed *p, *i, *ret = NULL, *orig_k = k;
891
892 while ((p = __bkey_prev(b, t, k)) && !ret) {
893 for (i = p; i != k; i = bkey_p_next(i))
894 if (i->type >= min_key_type)
895 ret = i;
896
897 k = p;
898 }
899
900 if (bch2_expensive_debug_checks) {
901 BUG_ON(ret >= orig_k);
902
903 for (i = ret
904 ? bkey_p_next(ret)
905 : btree_bkey_first(b, t);
906 i != orig_k;
907 i = bkey_p_next(i))
908 BUG_ON(i->type >= min_key_type);
909 }
910
911 return ret;
912 }
913
914 /* Insert */
915
bch2_bset_fix_lookup_table(struct btree * b,struct bset_tree * t,struct bkey_packed * _where,unsigned clobber_u64s,unsigned new_u64s)916 static void bch2_bset_fix_lookup_table(struct btree *b,
917 struct bset_tree *t,
918 struct bkey_packed *_where,
919 unsigned clobber_u64s,
920 unsigned new_u64s)
921 {
922 int shift = new_u64s - clobber_u64s;
923 unsigned l, j, where = __btree_node_key_to_offset(b, _where);
924
925 EBUG_ON(bset_has_ro_aux_tree(t));
926
927 if (!bset_has_rw_aux_tree(t))
928 return;
929
930 /* returns first entry >= where */
931 l = rw_aux_tree_bsearch(b, t, where);
932
933 if (!l) /* never delete first entry */
934 l++;
935 else if (l < t->size &&
936 where < t->end_offset &&
937 rw_aux_tree(b, t)[l].offset == where)
938 rw_aux_tree_set(b, t, l++, _where);
939
940 /* l now > where */
941
942 for (j = l;
943 j < t->size &&
944 rw_aux_tree(b, t)[j].offset < where + clobber_u64s;
945 j++)
946 ;
947
948 if (j < t->size &&
949 rw_aux_tree(b, t)[j].offset + shift ==
950 rw_aux_tree(b, t)[l - 1].offset)
951 j++;
952
953 memmove(&rw_aux_tree(b, t)[l],
954 &rw_aux_tree(b, t)[j],
955 (void *) &rw_aux_tree(b, t)[t->size] -
956 (void *) &rw_aux_tree(b, t)[j]);
957 t->size -= j - l;
958
959 for (j = l; j < t->size; j++)
960 rw_aux_tree(b, t)[j].offset += shift;
961
962 EBUG_ON(l < t->size &&
963 rw_aux_tree(b, t)[l].offset ==
964 rw_aux_tree(b, t)[l - 1].offset);
965
966 if (t->size < bset_rw_tree_capacity(b, t) &&
967 (l < t->size
968 ? rw_aux_tree(b, t)[l].offset
969 : t->end_offset) -
970 rw_aux_tree(b, t)[l - 1].offset >
971 L1_CACHE_BYTES / sizeof(u64)) {
972 struct bkey_packed *start = rw_aux_to_bkey(b, t, l - 1);
973 struct bkey_packed *end = l < t->size
974 ? rw_aux_to_bkey(b, t, l)
975 : btree_bkey_last(b, t);
976 struct bkey_packed *k = start;
977
978 while (1) {
979 k = bkey_p_next(k);
980 if (k == end)
981 break;
982
983 if ((void *) k - (void *) start >= L1_CACHE_BYTES) {
984 memmove(&rw_aux_tree(b, t)[l + 1],
985 &rw_aux_tree(b, t)[l],
986 (void *) &rw_aux_tree(b, t)[t->size] -
987 (void *) &rw_aux_tree(b, t)[l]);
988 t->size++;
989 rw_aux_tree_set(b, t, l, k);
990 break;
991 }
992 }
993 }
994
995 bch2_bset_verify_rw_aux_tree(b, t);
996 bset_aux_tree_verify(b);
997 }
998
bch2_bset_insert(struct btree * b,struct btree_node_iter * iter,struct bkey_packed * where,struct bkey_i * insert,unsigned clobber_u64s)999 void bch2_bset_insert(struct btree *b,
1000 struct btree_node_iter *iter,
1001 struct bkey_packed *where,
1002 struct bkey_i *insert,
1003 unsigned clobber_u64s)
1004 {
1005 struct bkey_format *f = &b->format;
1006 struct bset_tree *t = bset_tree_last(b);
1007 struct bkey_packed packed, *src = bkey_to_packed(insert);
1008
1009 bch2_bset_verify_rw_aux_tree(b, t);
1010 bch2_verify_insert_pos(b, where, bkey_to_packed(insert), clobber_u64s);
1011
1012 if (bch2_bkey_pack_key(&packed, &insert->k, f))
1013 src = &packed;
1014
1015 if (!bkey_deleted(&insert->k))
1016 btree_keys_account_key_add(&b->nr, t - b->set, src);
1017
1018 if (src->u64s != clobber_u64s) {
1019 u64 *src_p = (u64 *) where->_data + clobber_u64s;
1020 u64 *dst_p = (u64 *) where->_data + src->u64s;
1021
1022 EBUG_ON((int) le16_to_cpu(bset(b, t)->u64s) <
1023 (int) clobber_u64s - src->u64s);
1024
1025 memmove_u64s(dst_p, src_p, btree_bkey_last(b, t)->_data - src_p);
1026 le16_add_cpu(&bset(b, t)->u64s, src->u64s - clobber_u64s);
1027 set_btree_bset_end(b, t);
1028 }
1029
1030 memcpy_u64s_small(where, src,
1031 bkeyp_key_u64s(f, src));
1032 memcpy_u64s(bkeyp_val(f, where), &insert->v,
1033 bkeyp_val_u64s(f, src));
1034
1035 if (src->u64s != clobber_u64s)
1036 bch2_bset_fix_lookup_table(b, t, where, clobber_u64s, src->u64s);
1037
1038 bch2_verify_btree_nr_keys(b);
1039 }
1040
bch2_bset_delete(struct btree * b,struct bkey_packed * where,unsigned clobber_u64s)1041 void bch2_bset_delete(struct btree *b,
1042 struct bkey_packed *where,
1043 unsigned clobber_u64s)
1044 {
1045 struct bset_tree *t = bset_tree_last(b);
1046 u64 *src_p = (u64 *) where->_data + clobber_u64s;
1047 u64 *dst_p = where->_data;
1048
1049 bch2_bset_verify_rw_aux_tree(b, t);
1050
1051 EBUG_ON(le16_to_cpu(bset(b, t)->u64s) < clobber_u64s);
1052
1053 memmove_u64s_down(dst_p, src_p, btree_bkey_last(b, t)->_data - src_p);
1054 le16_add_cpu(&bset(b, t)->u64s, -clobber_u64s);
1055 set_btree_bset_end(b, t);
1056
1057 bch2_bset_fix_lookup_table(b, t, where, clobber_u64s, 0);
1058 }
1059
1060 /* Lookup */
1061
1062 __flatten
bset_search_write_set(const struct btree * b,struct bset_tree * t,struct bpos * search)1063 static struct bkey_packed *bset_search_write_set(const struct btree *b,
1064 struct bset_tree *t,
1065 struct bpos *search)
1066 {
1067 unsigned l = 0, r = t->size;
1068
1069 while (l + 1 != r) {
1070 unsigned m = (l + r) >> 1;
1071
1072 if (bpos_lt(rw_aux_tree(b, t)[m].k, *search))
1073 l = m;
1074 else
1075 r = m;
1076 }
1077
1078 return rw_aux_to_bkey(b, t, l);
1079 }
1080
prefetch_four_cachelines(void * p)1081 static inline void prefetch_four_cachelines(void *p)
1082 {
1083 #ifdef CONFIG_X86_64
1084 asm("prefetcht0 (-127 + 64 * 0)(%0);"
1085 "prefetcht0 (-127 + 64 * 1)(%0);"
1086 "prefetcht0 (-127 + 64 * 2)(%0);"
1087 "prefetcht0 (-127 + 64 * 3)(%0);"
1088 :
1089 : "r" (p + 127));
1090 #else
1091 prefetch(p + L1_CACHE_BYTES * 0);
1092 prefetch(p + L1_CACHE_BYTES * 1);
1093 prefetch(p + L1_CACHE_BYTES * 2);
1094 prefetch(p + L1_CACHE_BYTES * 3);
1095 #endif
1096 }
1097
bkey_mantissa_bits_dropped(const struct btree * b,const struct bkey_float * f,unsigned idx)1098 static inline bool bkey_mantissa_bits_dropped(const struct btree *b,
1099 const struct bkey_float *f,
1100 unsigned idx)
1101 {
1102 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
1103 unsigned key_bits_start = b->format.key_u64s * 64 - b->nr_key_bits;
1104
1105 return f->exponent > key_bits_start;
1106 #else
1107 unsigned key_bits_end = high_bit_offset + b->nr_key_bits;
1108
1109 return f->exponent + BKEY_MANTISSA_BITS < key_bits_end;
1110 #endif
1111 }
1112
1113 __flatten
bset_search_tree(const struct btree * b,const struct bset_tree * t,const struct bpos * search,const struct bkey_packed * packed_search)1114 static struct bkey_packed *bset_search_tree(const struct btree *b,
1115 const struct bset_tree *t,
1116 const struct bpos *search,
1117 const struct bkey_packed *packed_search)
1118 {
1119 struct ro_aux_tree *base = ro_aux_tree_base(b, t);
1120 struct bkey_float *f;
1121 struct bkey_packed *k;
1122 unsigned inorder, n = 1, l, r;
1123 int cmp;
1124
1125 do {
1126 if (likely(n << 4 < t->size))
1127 prefetch(&base->f[n << 4]);
1128
1129 f = &base->f[n];
1130 if (unlikely(f->exponent >= BFLOAT_FAILED))
1131 goto slowpath;
1132
1133 l = f->mantissa;
1134 r = bkey_mantissa(packed_search, f, n);
1135
1136 if (unlikely(l == r) && bkey_mantissa_bits_dropped(b, f, n))
1137 goto slowpath;
1138
1139 n = n * 2 + (l < r);
1140 continue;
1141 slowpath:
1142 k = tree_to_bkey(b, t, n);
1143 cmp = bkey_cmp_p_or_unp(b, k, packed_search, search);
1144 if (!cmp)
1145 return k;
1146
1147 n = n * 2 + (cmp < 0);
1148 } while (n < t->size);
1149
1150 inorder = __eytzinger1_to_inorder(n >> 1, t->size - 1, t->extra);
1151
1152 /*
1153 * n would have been the node we recursed to - the low bit tells us if
1154 * we recursed left or recursed right.
1155 */
1156 if (likely(!(n & 1))) {
1157 --inorder;
1158 if (unlikely(!inorder))
1159 return btree_bkey_first(b, t);
1160
1161 f = &base->f[eytzinger1_prev(n >> 1, t->size - 1)];
1162 }
1163
1164 return cacheline_to_bkey(b, t, inorder, f->key_offset);
1165 }
1166
1167 static __always_inline __flatten
__bch2_bset_search(struct btree * b,struct bset_tree * t,struct bpos * search,const struct bkey_packed * lossy_packed_search)1168 struct bkey_packed *__bch2_bset_search(struct btree *b,
1169 struct bset_tree *t,
1170 struct bpos *search,
1171 const struct bkey_packed *lossy_packed_search)
1172 {
1173
1174 /*
1175 * First, we search for a cacheline, then lastly we do a linear search
1176 * within that cacheline.
1177 *
1178 * To search for the cacheline, there's three different possibilities:
1179 * * The set is too small to have a search tree, so we just do a linear
1180 * search over the whole set.
1181 * * The set is the one we're currently inserting into; keeping a full
1182 * auxiliary search tree up to date would be too expensive, so we
1183 * use a much simpler lookup table to do a binary search -
1184 * bset_search_write_set().
1185 * * Or we use the auxiliary search tree we constructed earlier -
1186 * bset_search_tree()
1187 */
1188
1189 switch (bset_aux_tree_type(t)) {
1190 case BSET_NO_AUX_TREE:
1191 return btree_bkey_first(b, t);
1192 case BSET_RW_AUX_TREE:
1193 return bset_search_write_set(b, t, search);
1194 case BSET_RO_AUX_TREE:
1195 return bset_search_tree(b, t, search, lossy_packed_search);
1196 default:
1197 BUG();
1198 }
1199 }
1200
1201 static __always_inline __flatten
bch2_bset_search_linear(struct btree * b,struct bset_tree * t,struct bpos * search,struct bkey_packed * packed_search,const struct bkey_packed * lossy_packed_search,struct bkey_packed * m)1202 struct bkey_packed *bch2_bset_search_linear(struct btree *b,
1203 struct bset_tree *t,
1204 struct bpos *search,
1205 struct bkey_packed *packed_search,
1206 const struct bkey_packed *lossy_packed_search,
1207 struct bkey_packed *m)
1208 {
1209 if (lossy_packed_search)
1210 while (m != btree_bkey_last(b, t) &&
1211 bkey_iter_cmp_p_or_unp(b, m,
1212 lossy_packed_search, search) < 0)
1213 m = bkey_p_next(m);
1214
1215 if (!packed_search)
1216 while (m != btree_bkey_last(b, t) &&
1217 bkey_iter_pos_cmp(b, m, search) < 0)
1218 m = bkey_p_next(m);
1219
1220 if (bch2_expensive_debug_checks) {
1221 struct bkey_packed *prev = bch2_bkey_prev_all(b, t, m);
1222
1223 BUG_ON(prev &&
1224 bkey_iter_cmp_p_or_unp(b, prev,
1225 packed_search, search) >= 0);
1226 }
1227
1228 return m;
1229 }
1230
1231 /* Btree node iterator */
1232
__bch2_btree_node_iter_push(struct btree_node_iter * iter,struct btree * b,const struct bkey_packed * k,const struct bkey_packed * end)1233 static inline void __bch2_btree_node_iter_push(struct btree_node_iter *iter,
1234 struct btree *b,
1235 const struct bkey_packed *k,
1236 const struct bkey_packed *end)
1237 {
1238 if (k != end) {
1239 struct btree_node_iter_set *pos;
1240
1241 btree_node_iter_for_each(iter, pos)
1242 ;
1243
1244 BUG_ON(pos >= iter->data + ARRAY_SIZE(iter->data));
1245 *pos = (struct btree_node_iter_set) {
1246 __btree_node_key_to_offset(b, k),
1247 __btree_node_key_to_offset(b, end)
1248 };
1249 }
1250 }
1251
bch2_btree_node_iter_push(struct btree_node_iter * iter,struct btree * b,const struct bkey_packed * k,const struct bkey_packed * end)1252 void bch2_btree_node_iter_push(struct btree_node_iter *iter,
1253 struct btree *b,
1254 const struct bkey_packed *k,
1255 const struct bkey_packed *end)
1256 {
1257 __bch2_btree_node_iter_push(iter, b, k, end);
1258 bch2_btree_node_iter_sort(iter, b);
1259 }
1260
1261 noinline __flatten __cold
btree_node_iter_init_pack_failed(struct btree_node_iter * iter,struct btree * b,struct bpos * search)1262 static void btree_node_iter_init_pack_failed(struct btree_node_iter *iter,
1263 struct btree *b, struct bpos *search)
1264 {
1265 struct bkey_packed *k;
1266
1267 trace_bkey_pack_pos_fail(search);
1268
1269 bch2_btree_node_iter_init_from_start(iter, b);
1270
1271 while ((k = bch2_btree_node_iter_peek(iter, b)) &&
1272 bkey_iter_pos_cmp(b, k, search) < 0)
1273 bch2_btree_node_iter_advance(iter, b);
1274 }
1275
1276 /**
1277 * bch2_btree_node_iter_init - initialize a btree node iterator, starting from a
1278 * given position
1279 *
1280 * @iter: iterator to initialize
1281 * @b: btree node to search
1282 * @search: search key
1283 *
1284 * Main entry point to the lookup code for individual btree nodes:
1285 *
1286 * NOTE:
1287 *
1288 * When you don't filter out deleted keys, btree nodes _do_ contain duplicate
1289 * keys. This doesn't matter for most code, but it does matter for lookups.
1290 *
1291 * Some adjacent keys with a string of equal keys:
1292 * i j k k k k l m
1293 *
1294 * If you search for k, the lookup code isn't guaranteed to return you any
1295 * specific k. The lookup code is conceptually doing a binary search and
1296 * iterating backwards is very expensive so if the pivot happens to land at the
1297 * last k that's what you'll get.
1298 *
1299 * This works out ok, but it's something to be aware of:
1300 *
1301 * - For non extents, we guarantee that the live key comes last - see
1302 * btree_node_iter_cmp(), keys_out_of_order(). So the duplicates you don't
1303 * see will only be deleted keys you don't care about.
1304 *
1305 * - For extents, deleted keys sort last (see the comment at the top of this
1306 * file). But when you're searching for extents, you actually want the first
1307 * key strictly greater than your search key - an extent that compares equal
1308 * to the search key is going to have 0 sectors after the search key.
1309 *
1310 * But this does mean that we can't just search for
1311 * bpos_successor(start_of_range) to get the first extent that overlaps with
1312 * the range we want - if we're unlucky and there's an extent that ends
1313 * exactly where we searched, then there could be a deleted key at the same
1314 * position and we'd get that when we search instead of the preceding extent
1315 * we needed.
1316 *
1317 * So we've got to search for start_of_range, then after the lookup iterate
1318 * past any extents that compare equal to the position we searched for.
1319 */
1320 __flatten
bch2_btree_node_iter_init(struct btree_node_iter * iter,struct btree * b,struct bpos * search)1321 void bch2_btree_node_iter_init(struct btree_node_iter *iter,
1322 struct btree *b, struct bpos *search)
1323 {
1324 struct bkey_packed p, *packed_search = NULL;
1325 struct btree_node_iter_set *pos = iter->data;
1326 struct bkey_packed *k[MAX_BSETS];
1327 unsigned i;
1328
1329 EBUG_ON(bpos_lt(*search, b->data->min_key));
1330 EBUG_ON(bpos_gt(*search, b->data->max_key));
1331 bset_aux_tree_verify(b);
1332
1333 memset(iter, 0, sizeof(*iter));
1334
1335 switch (bch2_bkey_pack_pos_lossy(&p, *search, b)) {
1336 case BKEY_PACK_POS_EXACT:
1337 packed_search = &p;
1338 break;
1339 case BKEY_PACK_POS_SMALLER:
1340 packed_search = NULL;
1341 break;
1342 case BKEY_PACK_POS_FAIL:
1343 btree_node_iter_init_pack_failed(iter, b, search);
1344 return;
1345 }
1346
1347 for (i = 0; i < b->nsets; i++) {
1348 k[i] = __bch2_bset_search(b, b->set + i, search, &p);
1349 prefetch_four_cachelines(k[i]);
1350 }
1351
1352 for (i = 0; i < b->nsets; i++) {
1353 struct bset_tree *t = b->set + i;
1354 struct bkey_packed *end = btree_bkey_last(b, t);
1355
1356 k[i] = bch2_bset_search_linear(b, t, search,
1357 packed_search, &p, k[i]);
1358 if (k[i] != end)
1359 *pos++ = (struct btree_node_iter_set) {
1360 __btree_node_key_to_offset(b, k[i]),
1361 __btree_node_key_to_offset(b, end)
1362 };
1363 }
1364
1365 bch2_btree_node_iter_sort(iter, b);
1366 }
1367
bch2_btree_node_iter_init_from_start(struct btree_node_iter * iter,struct btree * b)1368 void bch2_btree_node_iter_init_from_start(struct btree_node_iter *iter,
1369 struct btree *b)
1370 {
1371 struct bset_tree *t;
1372
1373 memset(iter, 0, sizeof(*iter));
1374
1375 for_each_bset(b, t)
1376 __bch2_btree_node_iter_push(iter, b,
1377 btree_bkey_first(b, t),
1378 btree_bkey_last(b, t));
1379 bch2_btree_node_iter_sort(iter, b);
1380 }
1381
bch2_btree_node_iter_bset_pos(struct btree_node_iter * iter,struct btree * b,struct bset_tree * t)1382 struct bkey_packed *bch2_btree_node_iter_bset_pos(struct btree_node_iter *iter,
1383 struct btree *b,
1384 struct bset_tree *t)
1385 {
1386 struct btree_node_iter_set *set;
1387
1388 btree_node_iter_for_each(iter, set)
1389 if (set->end == t->end_offset)
1390 return __btree_node_offset_to_key(b, set->k);
1391
1392 return btree_bkey_last(b, t);
1393 }
1394
btree_node_iter_sort_two(struct btree_node_iter * iter,struct btree * b,unsigned first)1395 static inline bool btree_node_iter_sort_two(struct btree_node_iter *iter,
1396 struct btree *b,
1397 unsigned first)
1398 {
1399 bool ret;
1400
1401 if ((ret = (btree_node_iter_cmp(b,
1402 iter->data[first],
1403 iter->data[first + 1]) > 0)))
1404 swap(iter->data[first], iter->data[first + 1]);
1405 return ret;
1406 }
1407
bch2_btree_node_iter_sort(struct btree_node_iter * iter,struct btree * b)1408 void bch2_btree_node_iter_sort(struct btree_node_iter *iter,
1409 struct btree *b)
1410 {
1411 /* unrolled bubble sort: */
1412
1413 if (!__btree_node_iter_set_end(iter, 2)) {
1414 btree_node_iter_sort_two(iter, b, 0);
1415 btree_node_iter_sort_two(iter, b, 1);
1416 }
1417
1418 if (!__btree_node_iter_set_end(iter, 1))
1419 btree_node_iter_sort_two(iter, b, 0);
1420 }
1421
bch2_btree_node_iter_set_drop(struct btree_node_iter * iter,struct btree_node_iter_set * set)1422 void bch2_btree_node_iter_set_drop(struct btree_node_iter *iter,
1423 struct btree_node_iter_set *set)
1424 {
1425 struct btree_node_iter_set *last =
1426 iter->data + ARRAY_SIZE(iter->data) - 1;
1427
1428 memmove(&set[0], &set[1], (void *) last - (void *) set);
1429 *last = (struct btree_node_iter_set) { 0, 0 };
1430 }
1431
__bch2_btree_node_iter_advance(struct btree_node_iter * iter,struct btree * b)1432 static inline void __bch2_btree_node_iter_advance(struct btree_node_iter *iter,
1433 struct btree *b)
1434 {
1435 iter->data->k += __bch2_btree_node_iter_peek_all(iter, b)->u64s;
1436
1437 EBUG_ON(iter->data->k > iter->data->end);
1438
1439 if (unlikely(__btree_node_iter_set_end(iter, 0))) {
1440 /* avoid an expensive memmove call: */
1441 iter->data[0] = iter->data[1];
1442 iter->data[1] = iter->data[2];
1443 iter->data[2] = (struct btree_node_iter_set) { 0, 0 };
1444 return;
1445 }
1446
1447 if (__btree_node_iter_set_end(iter, 1))
1448 return;
1449
1450 if (!btree_node_iter_sort_two(iter, b, 0))
1451 return;
1452
1453 if (__btree_node_iter_set_end(iter, 2))
1454 return;
1455
1456 btree_node_iter_sort_two(iter, b, 1);
1457 }
1458
bch2_btree_node_iter_advance(struct btree_node_iter * iter,struct btree * b)1459 void bch2_btree_node_iter_advance(struct btree_node_iter *iter,
1460 struct btree *b)
1461 {
1462 if (bch2_expensive_debug_checks) {
1463 bch2_btree_node_iter_verify(iter, b);
1464 bch2_btree_node_iter_next_check(iter, b);
1465 }
1466
1467 __bch2_btree_node_iter_advance(iter, b);
1468 }
1469
1470 /*
1471 * Expensive:
1472 */
bch2_btree_node_iter_prev_all(struct btree_node_iter * iter,struct btree * b)1473 struct bkey_packed *bch2_btree_node_iter_prev_all(struct btree_node_iter *iter,
1474 struct btree *b)
1475 {
1476 struct bkey_packed *k, *prev = NULL;
1477 struct btree_node_iter_set *set;
1478 struct bset_tree *t;
1479 unsigned end = 0;
1480
1481 if (bch2_expensive_debug_checks)
1482 bch2_btree_node_iter_verify(iter, b);
1483
1484 for_each_bset(b, t) {
1485 k = bch2_bkey_prev_all(b, t,
1486 bch2_btree_node_iter_bset_pos(iter, b, t));
1487 if (k &&
1488 (!prev || bkey_iter_cmp(b, k, prev) > 0)) {
1489 prev = k;
1490 end = t->end_offset;
1491 }
1492 }
1493
1494 if (!prev)
1495 return NULL;
1496
1497 /*
1498 * We're manually memmoving instead of just calling sort() to ensure the
1499 * prev we picked ends up in slot 0 - sort won't necessarily put it
1500 * there because of duplicate deleted keys:
1501 */
1502 btree_node_iter_for_each(iter, set)
1503 if (set->end == end)
1504 goto found;
1505
1506 BUG_ON(set != &iter->data[__btree_node_iter_used(iter)]);
1507 found:
1508 BUG_ON(set >= iter->data + ARRAY_SIZE(iter->data));
1509
1510 memmove(&iter->data[1],
1511 &iter->data[0],
1512 (void *) set - (void *) &iter->data[0]);
1513
1514 iter->data[0].k = __btree_node_key_to_offset(b, prev);
1515 iter->data[0].end = end;
1516
1517 if (bch2_expensive_debug_checks)
1518 bch2_btree_node_iter_verify(iter, b);
1519 return prev;
1520 }
1521
bch2_btree_node_iter_prev(struct btree_node_iter * iter,struct btree * b)1522 struct bkey_packed *bch2_btree_node_iter_prev(struct btree_node_iter *iter,
1523 struct btree *b)
1524 {
1525 struct bkey_packed *prev;
1526
1527 do {
1528 prev = bch2_btree_node_iter_prev_all(iter, b);
1529 } while (prev && bkey_deleted(prev));
1530
1531 return prev;
1532 }
1533
bch2_btree_node_iter_peek_unpack(struct btree_node_iter * iter,struct btree * b,struct bkey * u)1534 struct bkey_s_c bch2_btree_node_iter_peek_unpack(struct btree_node_iter *iter,
1535 struct btree *b,
1536 struct bkey *u)
1537 {
1538 struct bkey_packed *k = bch2_btree_node_iter_peek(iter, b);
1539
1540 return k ? bkey_disassemble(b, k, u) : bkey_s_c_null;
1541 }
1542
1543 /* Mergesort */
1544
bch2_btree_keys_stats(const struct btree * b,struct bset_stats * stats)1545 void bch2_btree_keys_stats(const struct btree *b, struct bset_stats *stats)
1546 {
1547 const struct bset_tree *t;
1548
1549 for_each_bset(b, t) {
1550 enum bset_aux_tree_type type = bset_aux_tree_type(t);
1551 size_t j;
1552
1553 stats->sets[type].nr++;
1554 stats->sets[type].bytes += le16_to_cpu(bset(b, t)->u64s) *
1555 sizeof(u64);
1556
1557 if (bset_has_ro_aux_tree(t)) {
1558 stats->floats += t->size - 1;
1559
1560 for (j = 1; j < t->size; j++)
1561 stats->failed +=
1562 bkey_float(b, t, j)->exponent ==
1563 BFLOAT_FAILED;
1564 }
1565 }
1566 }
1567
bch2_bfloat_to_text(struct printbuf * out,struct btree * b,struct bkey_packed * k)1568 void bch2_bfloat_to_text(struct printbuf *out, struct btree *b,
1569 struct bkey_packed *k)
1570 {
1571 struct bset_tree *t = bch2_bkey_to_bset(b, k);
1572 struct bkey uk;
1573 unsigned j, inorder;
1574
1575 if (!bset_has_ro_aux_tree(t))
1576 return;
1577
1578 inorder = bkey_to_cacheline(b, t, k);
1579 if (!inorder || inorder >= t->size)
1580 return;
1581
1582 j = __inorder_to_eytzinger1(inorder, t->size - 1, t->extra);
1583 if (k != tree_to_bkey(b, t, j))
1584 return;
1585
1586 switch (bkey_float(b, t, j)->exponent) {
1587 case BFLOAT_FAILED:
1588 uk = bkey_unpack_key(b, k);
1589 prt_printf(out,
1590 " failed unpacked at depth %u\n"
1591 "\t",
1592 ilog2(j));
1593 bch2_bpos_to_text(out, uk.p);
1594 prt_printf(out, "\n");
1595 break;
1596 }
1597 }
1598