1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
4 * Copyright (C) 2014 Datera Inc.
5 */
6
7 #include "bcachefs.h"
8 #include "alloc_background.h"
9 #include "alloc_foreground.h"
10 #include "backpointers.h"
11 #include "bkey_methods.h"
12 #include "bkey_buf.h"
13 #include "btree_journal_iter.h"
14 #include "btree_key_cache.h"
15 #include "btree_locking.h"
16 #include "btree_node_scan.h"
17 #include "btree_update_interior.h"
18 #include "btree_io.h"
19 #include "btree_gc.h"
20 #include "buckets.h"
21 #include "clock.h"
22 #include "debug.h"
23 #include "disk_accounting.h"
24 #include "ec.h"
25 #include "error.h"
26 #include "extents.h"
27 #include "journal.h"
28 #include "keylist.h"
29 #include "move.h"
30 #include "progress.h"
31 #include "recovery_passes.h"
32 #include "reflink.h"
33 #include "recovery.h"
34 #include "replicas.h"
35 #include "super-io.h"
36 #include "trace.h"
37
38 #include <linux/slab.h>
39 #include <linux/bitops.h>
40 #include <linux/freezer.h>
41 #include <linux/kthread.h>
42 #include <linux/preempt.h>
43 #include <linux/rcupdate.h>
44 #include <linux/sched/task.h>
45
46 #define DROP_THIS_NODE 10
47 #define DROP_PREV_NODE 11
48 #define DID_FILL_FROM_SCAN 12
49
50 /*
51 * Returns true if it's a btree we can easily reconstruct, or otherwise won't
52 * cause data loss if it's missing:
53 */
btree_id_important(enum btree_id btree)54 static bool btree_id_important(enum btree_id btree)
55 {
56 if (btree_id_is_alloc(btree))
57 return false;
58
59 switch (btree) {
60 case BTREE_ID_quotas:
61 case BTREE_ID_snapshot_trees:
62 case BTREE_ID_logged_ops:
63 case BTREE_ID_rebalance_work:
64 case BTREE_ID_subvolume_children:
65 return false;
66 default:
67 return true;
68 }
69 }
70
71 static const char * const bch2_gc_phase_strs[] = {
72 #define x(n) #n,
73 GC_PHASES()
74 #undef x
75 NULL
76 };
77
bch2_gc_pos_to_text(struct printbuf * out,struct gc_pos * p)78 void bch2_gc_pos_to_text(struct printbuf *out, struct gc_pos *p)
79 {
80 prt_str(out, bch2_gc_phase_strs[p->phase]);
81 prt_char(out, ' ');
82 bch2_btree_id_level_to_text(out, p->btree, p->level);
83 prt_char(out, ' ');
84 bch2_bpos_to_text(out, p->pos);
85 }
86
unsafe_bkey_s_c_to_s(struct bkey_s_c k)87 static struct bkey_s unsafe_bkey_s_c_to_s(struct bkey_s_c k)
88 {
89 return (struct bkey_s) {{{
90 (struct bkey *) k.k,
91 (struct bch_val *) k.v
92 }}};
93 }
94
__gc_pos_set(struct bch_fs * c,struct gc_pos new_pos)95 static inline void __gc_pos_set(struct bch_fs *c, struct gc_pos new_pos)
96 {
97 preempt_disable();
98 write_seqcount_begin(&c->gc_pos_lock);
99 c->gc_pos = new_pos;
100 write_seqcount_end(&c->gc_pos_lock);
101 preempt_enable();
102 }
103
gc_pos_set(struct bch_fs * c,struct gc_pos new_pos)104 static inline void gc_pos_set(struct bch_fs *c, struct gc_pos new_pos)
105 {
106 BUG_ON(gc_pos_cmp(new_pos, c->gc_pos) < 0);
107 __gc_pos_set(c, new_pos);
108 }
109
btree_ptr_to_v2(struct btree * b,struct bkey_i_btree_ptr_v2 * dst)110 static void btree_ptr_to_v2(struct btree *b, struct bkey_i_btree_ptr_v2 *dst)
111 {
112 switch (b->key.k.type) {
113 case KEY_TYPE_btree_ptr: {
114 struct bkey_i_btree_ptr *src = bkey_i_to_btree_ptr(&b->key);
115
116 dst->k.p = src->k.p;
117 dst->v.mem_ptr = 0;
118 dst->v.seq = b->data->keys.seq;
119 dst->v.sectors_written = 0;
120 dst->v.flags = 0;
121 dst->v.min_key = b->data->min_key;
122 set_bkey_val_bytes(&dst->k, sizeof(dst->v) + bkey_val_bytes(&src->k));
123 memcpy(dst->v.start, src->v.start, bkey_val_bytes(&src->k));
124 break;
125 }
126 case KEY_TYPE_btree_ptr_v2:
127 bkey_copy(&dst->k_i, &b->key);
128 break;
129 default:
130 BUG();
131 }
132 }
133
set_node_min(struct bch_fs * c,struct btree * b,struct bpos new_min)134 static int set_node_min(struct bch_fs *c, struct btree *b, struct bpos new_min)
135 {
136 struct bkey_i_btree_ptr_v2 *new;
137 int ret;
138
139 if (c->opts.verbose) {
140 struct printbuf buf = PRINTBUF;
141
142 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
143 prt_str(&buf, " -> ");
144 bch2_bpos_to_text(&buf, new_min);
145
146 bch_info(c, "%s(): %s", __func__, buf.buf);
147 printbuf_exit(&buf);
148 }
149
150 new = kmalloc_array(BKEY_BTREE_PTR_U64s_MAX, sizeof(u64), GFP_KERNEL);
151 if (!new)
152 return -BCH_ERR_ENOMEM_gc_repair_key;
153
154 btree_ptr_to_v2(b, new);
155 b->data->min_key = new_min;
156 new->v.min_key = new_min;
157 SET_BTREE_PTR_RANGE_UPDATED(&new->v, true);
158
159 ret = bch2_journal_key_insert_take(c, b->c.btree_id, b->c.level + 1, &new->k_i);
160 if (ret) {
161 kfree(new);
162 return ret;
163 }
164
165 bch2_btree_node_drop_keys_outside_node(b);
166 bkey_copy(&b->key, &new->k_i);
167 return 0;
168 }
169
set_node_max(struct bch_fs * c,struct btree * b,struct bpos new_max)170 static int set_node_max(struct bch_fs *c, struct btree *b, struct bpos new_max)
171 {
172 struct bkey_i_btree_ptr_v2 *new;
173 int ret;
174
175 if (c->opts.verbose) {
176 struct printbuf buf = PRINTBUF;
177
178 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
179 prt_str(&buf, " -> ");
180 bch2_bpos_to_text(&buf, new_max);
181
182 bch_info(c, "%s(): %s", __func__, buf.buf);
183 printbuf_exit(&buf);
184 }
185
186 ret = bch2_journal_key_delete(c, b->c.btree_id, b->c.level + 1, b->key.k.p);
187 if (ret)
188 return ret;
189
190 new = kmalloc_array(BKEY_BTREE_PTR_U64s_MAX, sizeof(u64), GFP_KERNEL);
191 if (!new)
192 return -BCH_ERR_ENOMEM_gc_repair_key;
193
194 btree_ptr_to_v2(b, new);
195 b->data->max_key = new_max;
196 new->k.p = new_max;
197 SET_BTREE_PTR_RANGE_UPDATED(&new->v, true);
198
199 ret = bch2_journal_key_insert_take(c, b->c.btree_id, b->c.level + 1, &new->k_i);
200 if (ret) {
201 kfree(new);
202 return ret;
203 }
204
205 bch2_btree_node_drop_keys_outside_node(b);
206
207 mutex_lock(&c->btree_cache.lock);
208 __bch2_btree_node_hash_remove(&c->btree_cache, b);
209
210 bkey_copy(&b->key, &new->k_i);
211 ret = __bch2_btree_node_hash_insert(&c->btree_cache, b);
212 BUG_ON(ret);
213 mutex_unlock(&c->btree_cache.lock);
214 return 0;
215 }
216
btree_check_node_boundaries(struct btree_trans * trans,struct btree * b,struct btree * prev,struct btree * cur,struct bpos * pulled_from_scan)217 static int btree_check_node_boundaries(struct btree_trans *trans, struct btree *b,
218 struct btree *prev, struct btree *cur,
219 struct bpos *pulled_from_scan)
220 {
221 struct bch_fs *c = trans->c;
222 struct bpos expected_start = !prev
223 ? b->data->min_key
224 : bpos_successor(prev->key.k.p);
225 struct printbuf buf = PRINTBUF;
226 int ret = 0;
227
228 BUG_ON(b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
229 !bpos_eq(bkey_i_to_btree_ptr_v2(&b->key)->v.min_key,
230 b->data->min_key));
231
232 if (bpos_eq(expected_start, cur->data->min_key))
233 return 0;
234
235 prt_printf(&buf, " at ");
236 bch2_btree_id_level_to_text(&buf, b->c.btree_id, b->c.level);
237 prt_printf(&buf, ":\nparent: ");
238 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
239
240 if (prev) {
241 prt_printf(&buf, "\nprev: ");
242 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&prev->key));
243 }
244
245 prt_str(&buf, "\nnext: ");
246 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&cur->key));
247
248 if (bpos_lt(expected_start, cur->data->min_key)) { /* gap */
249 if (b->c.level == 1 &&
250 bpos_lt(*pulled_from_scan, cur->data->min_key)) {
251 ret = bch2_get_scanned_nodes(c, b->c.btree_id, 0,
252 expected_start,
253 bpos_predecessor(cur->data->min_key));
254 if (ret)
255 goto err;
256
257 *pulled_from_scan = cur->data->min_key;
258 ret = DID_FILL_FROM_SCAN;
259 } else {
260 if (mustfix_fsck_err(trans, btree_node_topology_bad_min_key,
261 "btree node with incorrect min_key%s", buf.buf))
262 ret = set_node_min(c, cur, expected_start);
263 }
264 } else { /* overlap */
265 if (prev && BTREE_NODE_SEQ(cur->data) > BTREE_NODE_SEQ(prev->data)) { /* cur overwrites prev */
266 if (bpos_ge(prev->data->min_key, cur->data->min_key)) { /* fully? */
267 if (mustfix_fsck_err(trans, btree_node_topology_overwritten_by_next_node,
268 "btree node overwritten by next node%s", buf.buf))
269 ret = DROP_PREV_NODE;
270 } else {
271 if (mustfix_fsck_err(trans, btree_node_topology_bad_max_key,
272 "btree node with incorrect max_key%s", buf.buf))
273 ret = set_node_max(c, prev,
274 bpos_predecessor(cur->data->min_key));
275 }
276 } else {
277 if (bpos_ge(expected_start, cur->data->max_key)) { /* fully? */
278 if (mustfix_fsck_err(trans, btree_node_topology_overwritten_by_prev_node,
279 "btree node overwritten by prev node%s", buf.buf))
280 ret = DROP_THIS_NODE;
281 } else {
282 if (mustfix_fsck_err(trans, btree_node_topology_bad_min_key,
283 "btree node with incorrect min_key%s", buf.buf))
284 ret = set_node_min(c, cur, expected_start);
285 }
286 }
287 }
288 err:
289 fsck_err:
290 printbuf_exit(&buf);
291 return ret;
292 }
293
btree_repair_node_end(struct btree_trans * trans,struct btree * b,struct btree * child,struct bpos * pulled_from_scan)294 static int btree_repair_node_end(struct btree_trans *trans, struct btree *b,
295 struct btree *child, struct bpos *pulled_from_scan)
296 {
297 struct bch_fs *c = trans->c;
298 struct printbuf buf = PRINTBUF;
299 int ret = 0;
300
301 if (bpos_eq(child->key.k.p, b->key.k.p))
302 return 0;
303
304 prt_printf(&buf, "\nat: ");
305 bch2_btree_id_level_to_text(&buf, b->c.btree_id, b->c.level);
306 prt_printf(&buf, "\nparent: ");
307 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
308
309 prt_str(&buf, "\nchild: ");
310 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&child->key));
311
312 if (mustfix_fsck_err(trans, btree_node_topology_bad_max_key,
313 "btree node with incorrect max_key%s", buf.buf)) {
314 if (b->c.level == 1 &&
315 bpos_lt(*pulled_from_scan, b->key.k.p)) {
316 ret = bch2_get_scanned_nodes(c, b->c.btree_id, 0,
317 bpos_successor(child->key.k.p), b->key.k.p);
318 if (ret)
319 goto err;
320
321 *pulled_from_scan = b->key.k.p;
322 ret = DID_FILL_FROM_SCAN;
323 } else {
324 ret = set_node_max(c, child, b->key.k.p);
325 }
326 }
327 err:
328 fsck_err:
329 printbuf_exit(&buf);
330 return ret;
331 }
332
bch2_btree_repair_topology_recurse(struct btree_trans * trans,struct btree * b,struct bpos * pulled_from_scan)333 static int bch2_btree_repair_topology_recurse(struct btree_trans *trans, struct btree *b,
334 struct bpos *pulled_from_scan)
335 {
336 struct bch_fs *c = trans->c;
337 struct btree_and_journal_iter iter;
338 struct bkey_s_c k;
339 struct bkey_buf prev_k, cur_k;
340 struct btree *prev = NULL, *cur = NULL;
341 bool have_child, new_pass = false;
342 struct printbuf buf = PRINTBUF;
343 int ret = 0;
344
345 if (!b->c.level)
346 return 0;
347
348 bch2_bkey_buf_init(&prev_k);
349 bch2_bkey_buf_init(&cur_k);
350 again:
351 cur = prev = NULL;
352 have_child = new_pass = false;
353 bch2_btree_and_journal_iter_init_node_iter(trans, &iter, b);
354 iter.prefetch = true;
355
356 while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
357 BUG_ON(bpos_lt(k.k->p, b->data->min_key));
358 BUG_ON(bpos_gt(k.k->p, b->data->max_key));
359
360 bch2_btree_and_journal_iter_advance(&iter);
361 bch2_bkey_buf_reassemble(&cur_k, c, k);
362
363 cur = bch2_btree_node_get_noiter(trans, cur_k.k,
364 b->c.btree_id, b->c.level - 1,
365 false);
366 ret = PTR_ERR_OR_ZERO(cur);
367
368 printbuf_reset(&buf);
369 bch2_btree_id_level_to_text(&buf, b->c.btree_id, b->c.level - 1);
370 prt_char(&buf, ' ');
371 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(cur_k.k));
372
373 if (mustfix_fsck_err_on(bch2_err_matches(ret, EIO),
374 trans, btree_node_read_error,
375 "Topology repair: unreadable btree node at\n%s",
376 buf.buf)) {
377 bch2_btree_node_evict(trans, cur_k.k);
378 cur = NULL;
379 ret = bch2_journal_key_delete(c, b->c.btree_id,
380 b->c.level, cur_k.k->k.p);
381 if (ret)
382 break;
383
384 ret = bch2_btree_lost_data(c, b->c.btree_id);
385 if (ret)
386 break;
387 continue;
388 }
389
390 bch_err_msg(c, ret, "getting btree node");
391 if (ret)
392 break;
393
394 if (bch2_btree_node_is_stale(c, cur)) {
395 bch_info(c, "btree node older than nodes found by scanning\n %s", buf.buf);
396 six_unlock_read(&cur->c.lock);
397 bch2_btree_node_evict(trans, cur_k.k);
398 ret = bch2_journal_key_delete(c, b->c.btree_id,
399 b->c.level, cur_k.k->k.p);
400 cur = NULL;
401 if (ret)
402 break;
403 continue;
404 }
405
406 ret = btree_check_node_boundaries(trans, b, prev, cur, pulled_from_scan);
407 if (ret == DID_FILL_FROM_SCAN) {
408 new_pass = true;
409 ret = 0;
410 }
411
412 if (ret == DROP_THIS_NODE) {
413 six_unlock_read(&cur->c.lock);
414 bch2_btree_node_evict(trans, cur_k.k);
415 ret = bch2_journal_key_delete(c, b->c.btree_id,
416 b->c.level, cur_k.k->k.p);
417 cur = NULL;
418 if (ret)
419 break;
420 continue;
421 }
422
423 if (prev)
424 six_unlock_read(&prev->c.lock);
425 prev = NULL;
426
427 if (ret == DROP_PREV_NODE) {
428 bch_info(c, "dropped prev node");
429 bch2_btree_node_evict(trans, prev_k.k);
430 ret = bch2_journal_key_delete(c, b->c.btree_id,
431 b->c.level, prev_k.k->k.p);
432 if (ret)
433 break;
434
435 bch2_btree_and_journal_iter_exit(&iter);
436 goto again;
437 } else if (ret)
438 break;
439
440 prev = cur;
441 cur = NULL;
442 bch2_bkey_buf_copy(&prev_k, c, cur_k.k);
443 }
444
445 if (!ret && !IS_ERR_OR_NULL(prev)) {
446 BUG_ON(cur);
447 ret = btree_repair_node_end(trans, b, prev, pulled_from_scan);
448 if (ret == DID_FILL_FROM_SCAN) {
449 new_pass = true;
450 ret = 0;
451 }
452 }
453
454 if (!IS_ERR_OR_NULL(prev))
455 six_unlock_read(&prev->c.lock);
456 prev = NULL;
457 if (!IS_ERR_OR_NULL(cur))
458 six_unlock_read(&cur->c.lock);
459 cur = NULL;
460
461 if (ret)
462 goto err;
463
464 bch2_btree_and_journal_iter_exit(&iter);
465
466 if (new_pass)
467 goto again;
468
469 bch2_btree_and_journal_iter_init_node_iter(trans, &iter, b);
470 iter.prefetch = true;
471
472 while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
473 bch2_bkey_buf_reassemble(&cur_k, c, k);
474 bch2_btree_and_journal_iter_advance(&iter);
475
476 cur = bch2_btree_node_get_noiter(trans, cur_k.k,
477 b->c.btree_id, b->c.level - 1,
478 false);
479 ret = PTR_ERR_OR_ZERO(cur);
480
481 bch_err_msg(c, ret, "getting btree node");
482 if (ret)
483 goto err;
484
485 ret = bch2_btree_repair_topology_recurse(trans, cur, pulled_from_scan);
486 six_unlock_read(&cur->c.lock);
487 cur = NULL;
488
489 if (ret == DROP_THIS_NODE) {
490 bch2_btree_node_evict(trans, cur_k.k);
491 ret = bch2_journal_key_delete(c, b->c.btree_id,
492 b->c.level, cur_k.k->k.p);
493 new_pass = true;
494 }
495
496 if (ret)
497 goto err;
498
499 have_child = true;
500 }
501
502 printbuf_reset(&buf);
503 bch2_btree_id_level_to_text(&buf, b->c.btree_id, b->c.level);
504 prt_newline(&buf);
505 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
506
507 if (mustfix_fsck_err_on(!have_child,
508 trans, btree_node_topology_interior_node_empty,
509 "empty interior btree node at %s", buf.buf))
510 ret = DROP_THIS_NODE;
511 err:
512 fsck_err:
513 if (!IS_ERR_OR_NULL(prev))
514 six_unlock_read(&prev->c.lock);
515 if (!IS_ERR_OR_NULL(cur))
516 six_unlock_read(&cur->c.lock);
517
518 bch2_btree_and_journal_iter_exit(&iter);
519
520 if (!ret && new_pass)
521 goto again;
522
523 BUG_ON(!ret && bch2_btree_node_check_topology(trans, b));
524
525 bch2_bkey_buf_exit(&prev_k, c);
526 bch2_bkey_buf_exit(&cur_k, c);
527 printbuf_exit(&buf);
528 return ret;
529 }
530
bch2_check_topology(struct bch_fs * c)531 int bch2_check_topology(struct bch_fs *c)
532 {
533 struct btree_trans *trans = bch2_trans_get(c);
534 struct bpos pulled_from_scan = POS_MIN;
535 struct printbuf buf = PRINTBUF;
536 int ret = 0;
537
538 bch2_trans_srcu_unlock(trans);
539
540 for (unsigned i = 0; i < btree_id_nr_alive(c) && !ret; i++) {
541 struct btree_root *r = bch2_btree_id_root(c, i);
542 bool reconstructed_root = false;
543
544 printbuf_reset(&buf);
545 bch2_btree_id_to_text(&buf, i);
546
547 if (r->error) {
548 ret = bch2_btree_lost_data(c, i);
549 if (ret)
550 break;
551 reconstruct_root:
552 bch_info(c, "btree root %s unreadable, must recover from scan", buf.buf);
553
554 r->alive = false;
555 r->error = 0;
556
557 if (!bch2_btree_has_scanned_nodes(c, i)) {
558 __fsck_err(trans,
559 FSCK_CAN_FIX|(!btree_id_important(i) ? FSCK_AUTOFIX : 0),
560 btree_root_unreadable_and_scan_found_nothing,
561 "no nodes found for btree %s, continue?", buf.buf);
562 bch2_btree_root_alloc_fake_trans(trans, i, 0);
563 } else {
564 bch2_btree_root_alloc_fake_trans(trans, i, 1);
565 bch2_shoot_down_journal_keys(c, i, 1, BTREE_MAX_DEPTH, POS_MIN, SPOS_MAX);
566 ret = bch2_get_scanned_nodes(c, i, 0, POS_MIN, SPOS_MAX);
567 if (ret)
568 break;
569 }
570
571 reconstructed_root = true;
572 }
573
574 struct btree *b = r->b;
575
576 btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read);
577 ret = bch2_btree_repair_topology_recurse(trans, b, &pulled_from_scan);
578 six_unlock_read(&b->c.lock);
579
580 if (ret == DROP_THIS_NODE) {
581 mutex_lock(&c->btree_cache.lock);
582 bch2_btree_node_hash_remove(&c->btree_cache, b);
583 mutex_unlock(&c->btree_cache.lock);
584
585 r->b = NULL;
586
587 if (!reconstructed_root)
588 goto reconstruct_root;
589
590 bch_err(c, "empty btree root %s", buf.buf);
591 bch2_btree_root_alloc_fake_trans(trans, i, 0);
592 r->alive = false;
593 ret = 0;
594 }
595 }
596 fsck_err:
597 printbuf_exit(&buf);
598 bch2_trans_put(trans);
599 return ret;
600 }
601
602 /* marking of btree keys/nodes: */
603
bch2_gc_mark_key(struct btree_trans * trans,enum btree_id btree_id,unsigned level,struct btree ** prev,struct btree_iter * iter,struct bkey_s_c k,bool initial)604 static int bch2_gc_mark_key(struct btree_trans *trans, enum btree_id btree_id,
605 unsigned level, struct btree **prev,
606 struct btree_iter *iter, struct bkey_s_c k,
607 bool initial)
608 {
609 struct bch_fs *c = trans->c;
610
611 if (iter) {
612 struct btree_path *path = btree_iter_path(trans, iter);
613 struct btree *b = path_l(path)->b;
614
615 if (*prev != b) {
616 int ret = bch2_btree_node_check_topology(trans, b);
617 if (ret)
618 return ret;
619 }
620 *prev = b;
621 }
622
623 struct bkey deleted = KEY(0, 0, 0);
624 struct bkey_s_c old = (struct bkey_s_c) { &deleted, NULL };
625 struct printbuf buf = PRINTBUF;
626 int ret = 0;
627
628 deleted.p = k.k->p;
629
630 if (initial) {
631 BUG_ON(bch2_journal_seq_verify &&
632 k.k->bversion.lo > atomic64_read(&c->journal.seq));
633
634 if (fsck_err_on(btree_id != BTREE_ID_accounting &&
635 k.k->bversion.lo > atomic64_read(&c->key_version),
636 trans, bkey_version_in_future,
637 "key version number higher than recorded %llu\n%s",
638 atomic64_read(&c->key_version),
639 (bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
640 atomic64_set(&c->key_version, k.k->bversion.lo);
641 }
642
643 if (mustfix_fsck_err_on(level && !bch2_dev_btree_bitmap_marked(c, k),
644 trans, btree_bitmap_not_marked,
645 "btree ptr not marked in member info btree allocated bitmap\n%s",
646 (printbuf_reset(&buf),
647 bch2_bkey_val_to_text(&buf, c, k),
648 buf.buf))) {
649 mutex_lock(&c->sb_lock);
650 bch2_dev_btree_bitmap_mark(c, k);
651 bch2_write_super(c);
652 mutex_unlock(&c->sb_lock);
653 }
654
655 /*
656 * We require a commit before key_trigger() because
657 * key_trigger(BTREE_TRIGGER_GC) is not idempotant; we'll calculate the
658 * wrong result if we run it multiple times.
659 */
660 unsigned flags = !iter ? BTREE_TRIGGER_is_root : 0;
661
662 ret = bch2_key_trigger(trans, btree_id, level, old, unsafe_bkey_s_c_to_s(k),
663 BTREE_TRIGGER_check_repair|flags);
664 if (ret)
665 goto out;
666
667 if (trans->nr_updates) {
668 ret = bch2_trans_commit(trans, NULL, NULL, 0) ?:
669 -BCH_ERR_transaction_restart_nested;
670 goto out;
671 }
672
673 ret = bch2_key_trigger(trans, btree_id, level, old, unsafe_bkey_s_c_to_s(k),
674 BTREE_TRIGGER_gc|BTREE_TRIGGER_insert|flags);
675 out:
676 fsck_err:
677 printbuf_exit(&buf);
678 bch_err_fn(c, ret);
679 return ret;
680 }
681
bch2_gc_btree(struct btree_trans * trans,struct progress_indicator_state * progress,enum btree_id btree,bool initial)682 static int bch2_gc_btree(struct btree_trans *trans,
683 struct progress_indicator_state *progress,
684 enum btree_id btree, bool initial)
685 {
686 struct bch_fs *c = trans->c;
687 unsigned target_depth = btree_node_type_has_triggers(__btree_node_type(0, btree)) ? 0 : 1;
688 int ret = 0;
689
690 /* We need to make sure every leaf node is readable before going RW */
691 if (initial)
692 target_depth = 0;
693
694 for (unsigned level = target_depth; level < BTREE_MAX_DEPTH; level++) {
695 struct btree *prev = NULL;
696 struct btree_iter iter;
697 bch2_trans_node_iter_init(trans, &iter, btree, POS_MIN, 0, level,
698 BTREE_ITER_prefetch);
699
700 ret = for_each_btree_key_continue(trans, iter, 0, k, ({
701 bch2_progress_update_iter(trans, progress, &iter, "check_allocations");
702 gc_pos_set(c, gc_pos_btree(btree, level, k.k->p));
703 bch2_gc_mark_key(trans, btree, level, &prev, &iter, k, initial);
704 }));
705 if (ret)
706 goto err;
707 }
708
709 /* root */
710 do {
711 retry_root:
712 bch2_trans_begin(trans);
713
714 struct btree_iter iter;
715 bch2_trans_node_iter_init(trans, &iter, btree, POS_MIN,
716 0, bch2_btree_id_root(c, btree)->b->c.level, 0);
717 struct btree *b = bch2_btree_iter_peek_node(trans, &iter);
718 ret = PTR_ERR_OR_ZERO(b);
719 if (ret)
720 goto err_root;
721
722 if (b != btree_node_root(c, b)) {
723 bch2_trans_iter_exit(trans, &iter);
724 goto retry_root;
725 }
726
727 gc_pos_set(c, gc_pos_btree(btree, b->c.level + 1, SPOS_MAX));
728 struct bkey_s_c k = bkey_i_to_s_c(&b->key);
729 ret = bch2_gc_mark_key(trans, btree, b->c.level + 1, NULL, NULL, k, initial);
730 err_root:
731 bch2_trans_iter_exit(trans, &iter);
732 } while (bch2_err_matches(ret, BCH_ERR_transaction_restart));
733 err:
734 bch_err_fn(c, ret);
735 return ret;
736 }
737
btree_id_gc_phase_cmp(enum btree_id l,enum btree_id r)738 static inline int btree_id_gc_phase_cmp(enum btree_id l, enum btree_id r)
739 {
740 return cmp_int(gc_btree_order(l), gc_btree_order(r));
741 }
742
bch2_gc_btrees(struct bch_fs * c)743 static int bch2_gc_btrees(struct bch_fs *c)
744 {
745 struct btree_trans *trans = bch2_trans_get(c);
746 struct printbuf buf = PRINTBUF;
747 int ret = 0;
748
749 struct progress_indicator_state progress;
750 bch2_progress_init(&progress, c, ~0ULL);
751
752 enum btree_id ids[BTREE_ID_NR];
753 for (unsigned i = 0; i < BTREE_ID_NR; i++)
754 ids[i] = i;
755 bubble_sort(ids, BTREE_ID_NR, btree_id_gc_phase_cmp);
756
757 for (unsigned i = 0; i < btree_id_nr_alive(c) && !ret; i++) {
758 unsigned btree = i < BTREE_ID_NR ? ids[i] : i;
759
760 if (IS_ERR_OR_NULL(bch2_btree_id_root(c, btree)->b))
761 continue;
762
763 ret = bch2_gc_btree(trans, &progress, btree, true);
764 }
765
766 printbuf_exit(&buf);
767 bch2_trans_put(trans);
768 bch_err_fn(c, ret);
769 return ret;
770 }
771
bch2_mark_superblocks(struct bch_fs * c)772 static int bch2_mark_superblocks(struct bch_fs *c)
773 {
774 gc_pos_set(c, gc_phase(GC_PHASE_sb));
775
776 return bch2_trans_mark_dev_sbs_flags(c, BTREE_TRIGGER_gc);
777 }
778
bch2_gc_free(struct bch_fs * c)779 static void bch2_gc_free(struct bch_fs *c)
780 {
781 bch2_accounting_gc_free(c);
782
783 genradix_free(&c->reflink_gc_table);
784 genradix_free(&c->gc_stripes);
785
786 for_each_member_device(c, ca)
787 genradix_free(&ca->buckets_gc);
788 }
789
bch2_gc_start(struct bch_fs * c)790 static int bch2_gc_start(struct bch_fs *c)
791 {
792 for_each_member_device(c, ca) {
793 int ret = bch2_dev_usage_init(ca, true);
794 if (ret) {
795 bch2_dev_put(ca);
796 return ret;
797 }
798 }
799
800 return 0;
801 }
802
803 /* returns true if not equal */
bch2_alloc_v4_cmp(struct bch_alloc_v4 l,struct bch_alloc_v4 r)804 static inline bool bch2_alloc_v4_cmp(struct bch_alloc_v4 l,
805 struct bch_alloc_v4 r)
806 {
807 return l.gen != r.gen ||
808 l.oldest_gen != r.oldest_gen ||
809 l.data_type != r.data_type ||
810 l.dirty_sectors != r.dirty_sectors ||
811 l.stripe_sectors != r.stripe_sectors ||
812 l.cached_sectors != r.cached_sectors ||
813 l.stripe_redundancy != r.stripe_redundancy ||
814 l.stripe != r.stripe;
815 }
816
bch2_alloc_write_key(struct btree_trans * trans,struct btree_iter * iter,struct bch_dev * ca,struct bkey_s_c k)817 static int bch2_alloc_write_key(struct btree_trans *trans,
818 struct btree_iter *iter,
819 struct bch_dev *ca,
820 struct bkey_s_c k)
821 {
822 struct bch_fs *c = trans->c;
823 struct bkey_i_alloc_v4 *a;
824 struct bch_alloc_v4 old_gc, gc, old_convert, new;
825 const struct bch_alloc_v4 *old;
826 int ret;
827
828 if (!bucket_valid(ca, k.k->p.offset))
829 return 0;
830
831 old = bch2_alloc_to_v4(k, &old_convert);
832 gc = new = *old;
833
834 __bucket_m_to_alloc(&gc, *gc_bucket(ca, iter->pos.offset));
835
836 old_gc = gc;
837
838 if ((old->data_type == BCH_DATA_sb ||
839 old->data_type == BCH_DATA_journal) &&
840 !bch2_dev_is_online(ca)) {
841 gc.data_type = old->data_type;
842 gc.dirty_sectors = old->dirty_sectors;
843 }
844
845 /*
846 * gc.data_type doesn't yet include need_discard & need_gc_gen states -
847 * fix that here:
848 */
849 alloc_data_type_set(&gc, gc.data_type);
850 if (gc.data_type != old_gc.data_type ||
851 gc.dirty_sectors != old_gc.dirty_sectors) {
852 ret = bch2_alloc_key_to_dev_counters(trans, ca, &old_gc, &gc, BTREE_TRIGGER_gc);
853 if (ret)
854 return ret;
855
856 /*
857 * Ugly: alloc_key_to_dev_counters(..., BTREE_TRIGGER_gc) is not
858 * safe w.r.t. transaction restarts, so fixup the gc_bucket so
859 * we don't run it twice:
860 */
861 struct bucket *gc_m = gc_bucket(ca, iter->pos.offset);
862 gc_m->data_type = gc.data_type;
863 gc_m->dirty_sectors = gc.dirty_sectors;
864 }
865
866 if (fsck_err_on(new.data_type != gc.data_type,
867 trans, alloc_key_data_type_wrong,
868 "bucket %llu:%llu gen %u has wrong data_type"
869 ": got %s, should be %s",
870 iter->pos.inode, iter->pos.offset,
871 gc.gen,
872 bch2_data_type_str(new.data_type),
873 bch2_data_type_str(gc.data_type)))
874 new.data_type = gc.data_type;
875
876 #define copy_bucket_field(_errtype, _f) \
877 if (fsck_err_on(new._f != gc._f, \
878 trans, _errtype, \
879 "bucket %llu:%llu gen %u data type %s has wrong " #_f \
880 ": got %llu, should be %llu", \
881 iter->pos.inode, iter->pos.offset, \
882 gc.gen, \
883 bch2_data_type_str(gc.data_type), \
884 (u64) new._f, (u64) gc._f)) \
885 new._f = gc._f; \
886
887 copy_bucket_field(alloc_key_gen_wrong, gen);
888 copy_bucket_field(alloc_key_dirty_sectors_wrong, dirty_sectors);
889 copy_bucket_field(alloc_key_stripe_sectors_wrong, stripe_sectors);
890 copy_bucket_field(alloc_key_cached_sectors_wrong, cached_sectors);
891 copy_bucket_field(alloc_key_stripe_wrong, stripe);
892 copy_bucket_field(alloc_key_stripe_redundancy_wrong, stripe_redundancy);
893 #undef copy_bucket_field
894
895 if (!bch2_alloc_v4_cmp(*old, new))
896 return 0;
897
898 a = bch2_alloc_to_v4_mut(trans, k);
899 ret = PTR_ERR_OR_ZERO(a);
900 if (ret)
901 return ret;
902
903 a->v = new;
904
905 /*
906 * The trigger normally makes sure these are set, but we're not running
907 * triggers:
908 */
909 if (a->v.data_type == BCH_DATA_cached && !a->v.io_time[READ])
910 a->v.io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now));
911
912 ret = bch2_trans_update(trans, iter, &a->k_i, BTREE_TRIGGER_norun);
913 fsck_err:
914 return ret;
915 }
916
bch2_gc_alloc_done(struct bch_fs * c)917 static int bch2_gc_alloc_done(struct bch_fs *c)
918 {
919 int ret = 0;
920
921 for_each_member_device(c, ca) {
922 ret = bch2_trans_run(c,
923 for_each_btree_key_max_commit(trans, iter, BTREE_ID_alloc,
924 POS(ca->dev_idx, ca->mi.first_bucket),
925 POS(ca->dev_idx, ca->mi.nbuckets - 1),
926 BTREE_ITER_slots|BTREE_ITER_prefetch, k,
927 NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
928 bch2_alloc_write_key(trans, &iter, ca, k)));
929 if (ret) {
930 bch2_dev_put(ca);
931 break;
932 }
933 }
934
935 bch_err_fn(c, ret);
936 return ret;
937 }
938
bch2_gc_alloc_start(struct bch_fs * c)939 static int bch2_gc_alloc_start(struct bch_fs *c)
940 {
941 int ret = 0;
942
943 for_each_member_device(c, ca) {
944 ret = genradix_prealloc(&ca->buckets_gc, ca->mi.nbuckets, GFP_KERNEL);
945 if (ret) {
946 bch2_dev_put(ca);
947 ret = -BCH_ERR_ENOMEM_gc_alloc_start;
948 break;
949 }
950 }
951
952 bch_err_fn(c, ret);
953 return ret;
954 }
955
bch2_gc_write_stripes_key(struct btree_trans * trans,struct btree_iter * iter,struct bkey_s_c k)956 static int bch2_gc_write_stripes_key(struct btree_trans *trans,
957 struct btree_iter *iter,
958 struct bkey_s_c k)
959 {
960 struct bch_fs *c = trans->c;
961 struct printbuf buf = PRINTBUF;
962 const struct bch_stripe *s;
963 struct gc_stripe *m;
964 bool bad = false;
965 unsigned i;
966 int ret = 0;
967
968 if (k.k->type != KEY_TYPE_stripe)
969 return 0;
970
971 s = bkey_s_c_to_stripe(k).v;
972 m = genradix_ptr(&c->gc_stripes, k.k->p.offset);
973
974 for (i = 0; i < s->nr_blocks; i++) {
975 u32 old = stripe_blockcount_get(s, i);
976 u32 new = (m ? m->block_sectors[i] : 0);
977
978 if (old != new) {
979 prt_printf(&buf, "stripe block %u has wrong sector count: got %u, should be %u\n",
980 i, old, new);
981 bad = true;
982 }
983 }
984
985 if (bad)
986 bch2_bkey_val_to_text(&buf, c, k);
987
988 if (fsck_err_on(bad,
989 trans, stripe_sector_count_wrong,
990 "%s", buf.buf)) {
991 struct bkey_i_stripe *new;
992
993 new = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
994 ret = PTR_ERR_OR_ZERO(new);
995 if (ret)
996 return ret;
997
998 bkey_reassemble(&new->k_i, k);
999
1000 for (i = 0; i < new->v.nr_blocks; i++)
1001 stripe_blockcount_set(&new->v, i, m ? m->block_sectors[i] : 0);
1002
1003 ret = bch2_trans_update(trans, iter, &new->k_i, 0);
1004 }
1005 fsck_err:
1006 printbuf_exit(&buf);
1007 return ret;
1008 }
1009
bch2_gc_stripes_done(struct bch_fs * c)1010 static int bch2_gc_stripes_done(struct bch_fs *c)
1011 {
1012 return bch2_trans_run(c,
1013 for_each_btree_key_commit(trans, iter,
1014 BTREE_ID_stripes, POS_MIN,
1015 BTREE_ITER_prefetch, k,
1016 NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
1017 bch2_gc_write_stripes_key(trans, &iter, k)));
1018 }
1019
1020 /**
1021 * bch2_check_allocations - walk all references to buckets, and recompute them:
1022 *
1023 * @c: filesystem object
1024 *
1025 * Returns: 0 on success, or standard errcode on failure
1026 *
1027 * Order matters here:
1028 * - Concurrent GC relies on the fact that we have a total ordering for
1029 * everything that GC walks - see gc_will_visit_node(),
1030 * gc_will_visit_root()
1031 *
1032 * - also, references move around in the course of index updates and
1033 * various other crap: everything needs to agree on the ordering
1034 * references are allowed to move around in - e.g., we're allowed to
1035 * start with a reference owned by an open_bucket (the allocator) and
1036 * move it to the btree, but not the reverse.
1037 *
1038 * This is necessary to ensure that gc doesn't miss references that
1039 * move around - if references move backwards in the ordering GC
1040 * uses, GC could skip past them
1041 */
bch2_check_allocations(struct bch_fs * c)1042 int bch2_check_allocations(struct bch_fs *c)
1043 {
1044 int ret;
1045
1046 down_read(&c->state_lock);
1047 down_write(&c->gc_lock);
1048
1049 bch2_btree_interior_updates_flush(c);
1050
1051 ret = bch2_gc_accounting_start(c) ?:
1052 bch2_gc_start(c) ?:
1053 bch2_gc_alloc_start(c) ?:
1054 bch2_gc_reflink_start(c);
1055 if (ret)
1056 goto out;
1057
1058 gc_pos_set(c, gc_phase(GC_PHASE_start));
1059
1060 ret = bch2_mark_superblocks(c);
1061 bch_err_msg(c, ret, "marking superblocks");
1062 if (ret)
1063 goto out;
1064
1065 ret = bch2_gc_btrees(c);
1066 if (ret)
1067 goto out;
1068
1069 c->gc_count++;
1070
1071 ret = bch2_gc_alloc_done(c) ?:
1072 bch2_gc_accounting_done(c) ?:
1073 bch2_gc_stripes_done(c) ?:
1074 bch2_gc_reflink_done(c);
1075 out:
1076 percpu_down_write(&c->mark_lock);
1077 /* Indicates that gc is no longer in progress: */
1078 __gc_pos_set(c, gc_phase(GC_PHASE_not_running));
1079
1080 bch2_gc_free(c);
1081 percpu_up_write(&c->mark_lock);
1082
1083 up_write(&c->gc_lock);
1084 up_read(&c->state_lock);
1085
1086 /*
1087 * At startup, allocations can happen directly instead of via the
1088 * allocator thread - issue wakeup in case they blocked on gc_lock:
1089 */
1090 closure_wake_up(&c->freelist_wait);
1091 bch_err_fn(c, ret);
1092 return ret;
1093 }
1094
gc_btree_gens_key(struct btree_trans * trans,struct btree_iter * iter,struct bkey_s_c k)1095 static int gc_btree_gens_key(struct btree_trans *trans,
1096 struct btree_iter *iter,
1097 struct bkey_s_c k)
1098 {
1099 struct bch_fs *c = trans->c;
1100 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1101 struct bkey_i *u;
1102 int ret;
1103
1104 if (unlikely(test_bit(BCH_FS_going_ro, &c->flags)))
1105 return -EROFS;
1106
1107 rcu_read_lock();
1108 bkey_for_each_ptr(ptrs, ptr) {
1109 struct bch_dev *ca = bch2_dev_rcu(c, ptr->dev);
1110 if (!ca)
1111 continue;
1112
1113 if (dev_ptr_stale(ca, ptr) > 16) {
1114 rcu_read_unlock();
1115 goto update;
1116 }
1117 }
1118
1119 bkey_for_each_ptr(ptrs, ptr) {
1120 struct bch_dev *ca = bch2_dev_rcu(c, ptr->dev);
1121 if (!ca)
1122 continue;
1123
1124 u8 *gen = &ca->oldest_gen[PTR_BUCKET_NR(ca, ptr)];
1125 if (gen_after(*gen, ptr->gen))
1126 *gen = ptr->gen;
1127 }
1128 rcu_read_unlock();
1129 return 0;
1130 update:
1131 u = bch2_bkey_make_mut(trans, iter, &k, 0);
1132 ret = PTR_ERR_OR_ZERO(u);
1133 if (ret)
1134 return ret;
1135
1136 bch2_extent_normalize(c, bkey_i_to_s(u));
1137 return 0;
1138 }
1139
bch2_alloc_write_oldest_gen(struct btree_trans * trans,struct bch_dev * ca,struct btree_iter * iter,struct bkey_s_c k)1140 static int bch2_alloc_write_oldest_gen(struct btree_trans *trans, struct bch_dev *ca,
1141 struct btree_iter *iter, struct bkey_s_c k)
1142 {
1143 struct bch_alloc_v4 a_convert;
1144 const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &a_convert);
1145 struct bkey_i_alloc_v4 *a_mut;
1146 int ret;
1147
1148 if (a->oldest_gen == ca->oldest_gen[iter->pos.offset])
1149 return 0;
1150
1151 a_mut = bch2_alloc_to_v4_mut(trans, k);
1152 ret = PTR_ERR_OR_ZERO(a_mut);
1153 if (ret)
1154 return ret;
1155
1156 a_mut->v.oldest_gen = ca->oldest_gen[iter->pos.offset];
1157
1158 return bch2_trans_update(trans, iter, &a_mut->k_i, 0);
1159 }
1160
bch2_gc_gens(struct bch_fs * c)1161 int bch2_gc_gens(struct bch_fs *c)
1162 {
1163 u64 b, start_time = local_clock();
1164 int ret;
1165
1166 if (!mutex_trylock(&c->gc_gens_lock))
1167 return 0;
1168
1169 trace_and_count(c, gc_gens_start, c);
1170
1171 /*
1172 * We have to use trylock here. Otherwise, we would
1173 * introduce a deadlock in the RO path - we take the
1174 * state lock at the start of going RO.
1175 */
1176 if (!down_read_trylock(&c->state_lock)) {
1177 mutex_unlock(&c->gc_gens_lock);
1178 return 0;
1179 }
1180
1181 for_each_member_device(c, ca) {
1182 struct bucket_gens *gens = bucket_gens(ca);
1183
1184 BUG_ON(ca->oldest_gen);
1185
1186 ca->oldest_gen = kvmalloc(gens->nbuckets, GFP_KERNEL);
1187 if (!ca->oldest_gen) {
1188 bch2_dev_put(ca);
1189 ret = -BCH_ERR_ENOMEM_gc_gens;
1190 goto err;
1191 }
1192
1193 for (b = gens->first_bucket;
1194 b < gens->nbuckets; b++)
1195 ca->oldest_gen[b] = gens->b[b];
1196 }
1197
1198 for (unsigned i = 0; i < BTREE_ID_NR; i++)
1199 if (btree_type_has_ptrs(i)) {
1200 c->gc_gens_btree = i;
1201 c->gc_gens_pos = POS_MIN;
1202
1203 ret = bch2_trans_run(c,
1204 for_each_btree_key_commit(trans, iter, i,
1205 POS_MIN,
1206 BTREE_ITER_prefetch|BTREE_ITER_all_snapshots,
1207 k,
1208 NULL, NULL,
1209 BCH_TRANS_COMMIT_no_enospc,
1210 gc_btree_gens_key(trans, &iter, k)));
1211 if (ret)
1212 goto err;
1213 }
1214
1215 struct bch_dev *ca = NULL;
1216 ret = bch2_trans_run(c,
1217 for_each_btree_key_commit(trans, iter, BTREE_ID_alloc,
1218 POS_MIN,
1219 BTREE_ITER_prefetch,
1220 k,
1221 NULL, NULL,
1222 BCH_TRANS_COMMIT_no_enospc, ({
1223 ca = bch2_dev_iterate(c, ca, k.k->p.inode);
1224 if (!ca) {
1225 bch2_btree_iter_set_pos(trans, &iter, POS(k.k->p.inode + 1, 0));
1226 continue;
1227 }
1228 bch2_alloc_write_oldest_gen(trans, ca, &iter, k);
1229 })));
1230 bch2_dev_put(ca);
1231
1232 if (ret)
1233 goto err;
1234
1235 c->gc_gens_btree = 0;
1236 c->gc_gens_pos = POS_MIN;
1237
1238 c->gc_count++;
1239
1240 bch2_time_stats_update(&c->times[BCH_TIME_btree_gc], start_time);
1241 trace_and_count(c, gc_gens_end, c);
1242 err:
1243 for_each_member_device(c, ca) {
1244 kvfree(ca->oldest_gen);
1245 ca->oldest_gen = NULL;
1246 }
1247
1248 up_read(&c->state_lock);
1249 mutex_unlock(&c->gc_gens_lock);
1250 if (!bch2_err_matches(ret, EROFS))
1251 bch_err_fn(c, ret);
1252 return ret;
1253 }
1254
bch2_gc_gens_work(struct work_struct * work)1255 static void bch2_gc_gens_work(struct work_struct *work)
1256 {
1257 struct bch_fs *c = container_of(work, struct bch_fs, gc_gens_work);
1258 bch2_gc_gens(c);
1259 bch2_write_ref_put(c, BCH_WRITE_REF_gc_gens);
1260 }
1261
bch2_gc_gens_async(struct bch_fs * c)1262 void bch2_gc_gens_async(struct bch_fs *c)
1263 {
1264 if (bch2_write_ref_tryget(c, BCH_WRITE_REF_gc_gens) &&
1265 !queue_work(c->write_ref_wq, &c->gc_gens_work))
1266 bch2_write_ref_put(c, BCH_WRITE_REF_gc_gens);
1267 }
1268
bch2_fs_btree_gc_exit(struct bch_fs * c)1269 void bch2_fs_btree_gc_exit(struct bch_fs *c)
1270 {
1271 }
1272
bch2_fs_btree_gc_init(struct bch_fs * c)1273 int bch2_fs_btree_gc_init(struct bch_fs *c)
1274 {
1275 seqcount_init(&c->gc_pos_lock);
1276 INIT_WORK(&c->gc_gens_work, bch2_gc_gens_work);
1277
1278 init_rwsem(&c->gc_lock);
1279 mutex_init(&c->gc_gens_lock);
1280 return 0;
1281 }
1282