1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Moving/copying garbage collector
4 *
5 * Copyright 2012 Google, Inc.
6 */
7
8 #include "bcachefs.h"
9 #include "alloc_background.h"
10 #include "alloc_foreground.h"
11 #include "btree_iter.h"
12 #include "btree_update.h"
13 #include "btree_write_buffer.h"
14 #include "buckets.h"
15 #include "clock.h"
16 #include "errcode.h"
17 #include "error.h"
18 #include "lru.h"
19 #include "move.h"
20 #include "movinggc.h"
21 #include "trace.h"
22
23 #include <linux/freezer.h>
24 #include <linux/kthread.h>
25 #include <linux/math64.h>
26 #include <linux/sched/task.h>
27 #include <linux/wait.h>
28
29 struct buckets_in_flight {
30 struct rhashtable table;
31 struct move_bucket_in_flight *first;
32 struct move_bucket_in_flight *last;
33 size_t nr;
34 size_t sectors;
35 };
36
37 static const struct rhashtable_params bch_move_bucket_params = {
38 .head_offset = offsetof(struct move_bucket_in_flight, hash),
39 .key_offset = offsetof(struct move_bucket_in_flight, bucket.k),
40 .key_len = sizeof(struct move_bucket_key),
41 .automatic_shrinking = true,
42 };
43
44 static struct move_bucket_in_flight *
move_bucket_in_flight_add(struct buckets_in_flight * list,struct move_bucket b)45 move_bucket_in_flight_add(struct buckets_in_flight *list, struct move_bucket b)
46 {
47 struct move_bucket_in_flight *new = kzalloc(sizeof(*new), GFP_KERNEL);
48 int ret;
49
50 if (!new)
51 return ERR_PTR(-ENOMEM);
52
53 new->bucket = b;
54
55 ret = rhashtable_lookup_insert_fast(&list->table, &new->hash,
56 bch_move_bucket_params);
57 if (ret) {
58 kfree(new);
59 return ERR_PTR(ret);
60 }
61
62 if (!list->first)
63 list->first = new;
64 else
65 list->last->next = new;
66
67 list->last = new;
68 list->nr++;
69 list->sectors += b.sectors;
70 return new;
71 }
72
bch2_bucket_is_movable(struct btree_trans * trans,struct move_bucket * b,u64 time)73 static int bch2_bucket_is_movable(struct btree_trans *trans,
74 struct move_bucket *b, u64 time)
75 {
76 struct bch_fs *c = trans->c;
77
78 if (bch2_bucket_is_open(c, b->k.bucket.inode, b->k.bucket.offset))
79 return 0;
80
81 struct btree_iter iter;
82 struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_alloc,
83 b->k.bucket, BTREE_ITER_cached);
84 int ret = bkey_err(k);
85 if (ret)
86 return ret;
87
88 struct bch_dev *ca = bch2_dev_tryget(c, k.k->p.inode);
89 if (!ca)
90 goto out;
91
92 if (ca->mi.state != BCH_MEMBER_STATE_rw ||
93 !bch2_dev_is_online(ca))
94 goto out_put;
95
96 struct bch_alloc_v4 _a;
97 const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &_a);
98 b->k.gen = a->gen;
99 b->sectors = bch2_bucket_sectors_dirty(*a);
100 u64 lru_idx = alloc_lru_idx_fragmentation(*a, ca);
101
102 ret = lru_idx && lru_idx <= time;
103 out_put:
104 bch2_dev_put(ca);
105 out:
106 bch2_trans_iter_exit(trans, &iter);
107 return ret;
108 }
109
move_buckets_wait(struct moving_context * ctxt,struct buckets_in_flight * list,bool flush)110 static void move_buckets_wait(struct moving_context *ctxt,
111 struct buckets_in_flight *list,
112 bool flush)
113 {
114 struct move_bucket_in_flight *i;
115 int ret;
116
117 while ((i = list->first)) {
118 if (flush)
119 move_ctxt_wait_event(ctxt, !atomic_read(&i->count));
120
121 if (atomic_read(&i->count))
122 break;
123
124 list->first = i->next;
125 if (!list->first)
126 list->last = NULL;
127
128 list->nr--;
129 list->sectors -= i->bucket.sectors;
130
131 ret = rhashtable_remove_fast(&list->table, &i->hash,
132 bch_move_bucket_params);
133 BUG_ON(ret);
134 kfree(i);
135 }
136
137 bch2_trans_unlock_long(ctxt->trans);
138 }
139
bucket_in_flight(struct buckets_in_flight * list,struct move_bucket_key k)140 static bool bucket_in_flight(struct buckets_in_flight *list,
141 struct move_bucket_key k)
142 {
143 return rhashtable_lookup_fast(&list->table, &k, bch_move_bucket_params);
144 }
145
146 typedef DARRAY(struct move_bucket) move_buckets;
147
bch2_copygc_get_buckets(struct moving_context * ctxt,struct buckets_in_flight * buckets_in_flight,move_buckets * buckets)148 static int bch2_copygc_get_buckets(struct moving_context *ctxt,
149 struct buckets_in_flight *buckets_in_flight,
150 move_buckets *buckets)
151 {
152 struct btree_trans *trans = ctxt->trans;
153 struct bch_fs *c = trans->c;
154 size_t nr_to_get = max_t(size_t, 16U, buckets_in_flight->nr / 4);
155 size_t saw = 0, in_flight = 0, not_movable = 0, sectors = 0;
156 int ret;
157
158 move_buckets_wait(ctxt, buckets_in_flight, false);
159
160 ret = bch2_btree_write_buffer_tryflush(trans);
161 if (bch2_err_matches(ret, EROFS))
162 return ret;
163
164 if (bch2_fs_fatal_err_on(ret, c, "%s: from bch2_btree_write_buffer_tryflush()", bch2_err_str(ret)))
165 return ret;
166
167 bch2_trans_begin(trans);
168
169 ret = for_each_btree_key_max(trans, iter, BTREE_ID_lru,
170 lru_pos(BCH_LRU_BUCKET_FRAGMENTATION, 0, 0),
171 lru_pos(BCH_LRU_BUCKET_FRAGMENTATION, U64_MAX, LRU_TIME_MAX),
172 0, k, ({
173 struct move_bucket b = { .k.bucket = u64_to_bucket(k.k->p.offset) };
174 int ret2 = 0;
175
176 saw++;
177
178 ret2 = bch2_bucket_is_movable(trans, &b, lru_pos_time(k.k->p));
179 if (ret2 < 0)
180 goto err;
181
182 if (!ret2)
183 not_movable++;
184 else if (bucket_in_flight(buckets_in_flight, b.k))
185 in_flight++;
186 else {
187 ret2 = darray_push(buckets, b);
188 if (ret2)
189 goto err;
190 sectors += b.sectors;
191 }
192
193 ret2 = buckets->nr >= nr_to_get;
194 err:
195 ret2;
196 }));
197
198 pr_debug("have: %zu (%zu) saw %zu in flight %zu not movable %zu got %zu (%zu)/%zu buckets ret %i",
199 buckets_in_flight->nr, buckets_in_flight->sectors,
200 saw, in_flight, not_movable, buckets->nr, sectors, nr_to_get, ret);
201
202 return ret < 0 ? ret : 0;
203 }
204
205 noinline
bch2_copygc(struct moving_context * ctxt,struct buckets_in_flight * buckets_in_flight,bool * did_work)206 static int bch2_copygc(struct moving_context *ctxt,
207 struct buckets_in_flight *buckets_in_flight,
208 bool *did_work)
209 {
210 struct btree_trans *trans = ctxt->trans;
211 struct bch_fs *c = trans->c;
212 struct data_update_opts data_opts = {
213 .btree_insert_flags = BCH_WATERMARK_copygc,
214 };
215 move_buckets buckets = { 0 };
216 struct move_bucket_in_flight *f;
217 u64 sectors_seen = atomic64_read(&ctxt->stats->sectors_seen);
218 u64 sectors_moved = atomic64_read(&ctxt->stats->sectors_moved);
219 int ret = 0;
220
221 ret = bch2_copygc_get_buckets(ctxt, buckets_in_flight, &buckets);
222 if (ret)
223 goto err;
224
225 darray_for_each(buckets, i) {
226 if (kthread_should_stop() || freezing(current))
227 break;
228
229 f = move_bucket_in_flight_add(buckets_in_flight, *i);
230 ret = PTR_ERR_OR_ZERO(f);
231 if (ret == -EEXIST) { /* rare race: copygc_get_buckets returned same bucket more than once */
232 ret = 0;
233 continue;
234 }
235 if (ret == -ENOMEM) { /* flush IO, continue later */
236 ret = 0;
237 break;
238 }
239
240 ret = bch2_evacuate_bucket(ctxt, f, f->bucket.k.bucket,
241 f->bucket.k.gen, data_opts);
242 if (ret)
243 goto err;
244
245 *did_work = true;
246 }
247 err:
248
249 /* no entries in LRU btree found, or got to end: */
250 if (bch2_err_matches(ret, ENOENT))
251 ret = 0;
252
253 if (ret < 0 && !bch2_err_matches(ret, EROFS))
254 bch_err_msg(c, ret, "from bch2_move_data()");
255
256 sectors_seen = atomic64_read(&ctxt->stats->sectors_seen) - sectors_seen;
257 sectors_moved = atomic64_read(&ctxt->stats->sectors_moved) - sectors_moved;
258 trace_and_count(c, copygc, c, buckets.nr, sectors_seen, sectors_moved);
259
260 darray_exit(&buckets);
261 return ret;
262 }
263
264 /*
265 * Copygc runs when the amount of fragmented data is above some arbitrary
266 * threshold:
267 *
268 * The threshold at the limit - when the device is full - is the amount of space
269 * we reserved in bch2_recalc_capacity; we can't have more than that amount of
270 * disk space stranded due to fragmentation and store everything we have
271 * promised to store.
272 *
273 * But we don't want to be running copygc unnecessarily when the device still
274 * has plenty of free space - rather, we want copygc to smoothly run every so
275 * often and continually reduce the amount of fragmented space as the device
276 * fills up. So, we increase the threshold by half the current free space.
277 */
bch2_copygc_wait_amount(struct bch_fs * c)278 unsigned long bch2_copygc_wait_amount(struct bch_fs *c)
279 {
280 s64 wait = S64_MAX, fragmented_allowed, fragmented;
281
282 for_each_rw_member(c, ca) {
283 struct bch_dev_usage_full usage_full = bch2_dev_usage_full_read(ca);
284 struct bch_dev_usage usage;
285
286 for (unsigned i = 0; i < BCH_DATA_NR; i++)
287 usage.buckets[i] = usage_full.d[i].buckets;
288
289 fragmented_allowed = ((__dev_buckets_available(ca, usage, BCH_WATERMARK_stripe) *
290 ca->mi.bucket_size) >> 1);
291 fragmented = 0;
292
293 for (unsigned i = 0; i < BCH_DATA_NR; i++)
294 if (data_type_movable(i))
295 fragmented += usage_full.d[i].fragmented;
296
297 wait = min(wait, max(0LL, fragmented_allowed - fragmented));
298 }
299
300 return wait;
301 }
302
bch2_copygc_wait_to_text(struct printbuf * out,struct bch_fs * c)303 void bch2_copygc_wait_to_text(struct printbuf *out, struct bch_fs *c)
304 {
305 printbuf_tabstop_push(out, 32);
306 prt_printf(out, "running:\t%u\n", c->copygc_running);
307 prt_printf(out, "copygc_wait:\t%llu\n", c->copygc_wait);
308 prt_printf(out, "copygc_wait_at:\t%llu\n", c->copygc_wait_at);
309
310 prt_printf(out, "Currently waiting for:\t");
311 prt_human_readable_u64(out, max(0LL, c->copygc_wait -
312 atomic64_read(&c->io_clock[WRITE].now)) << 9);
313 prt_newline(out);
314
315 prt_printf(out, "Currently waiting since:\t");
316 prt_human_readable_u64(out, max(0LL,
317 atomic64_read(&c->io_clock[WRITE].now) -
318 c->copygc_wait_at) << 9);
319 prt_newline(out);
320
321 prt_printf(out, "Currently calculated wait:\t");
322 prt_human_readable_u64(out, bch2_copygc_wait_amount(c));
323 prt_newline(out);
324
325 rcu_read_lock();
326 struct task_struct *t = rcu_dereference(c->copygc_thread);
327 if (t)
328 get_task_struct(t);
329 rcu_read_unlock();
330
331 if (t) {
332 bch2_prt_task_backtrace(out, t, 0, GFP_KERNEL);
333 put_task_struct(t);
334 }
335 }
336
bch2_copygc_thread(void * arg)337 static int bch2_copygc_thread(void *arg)
338 {
339 struct bch_fs *c = arg;
340 struct moving_context ctxt;
341 struct bch_move_stats move_stats;
342 struct io_clock *clock = &c->io_clock[WRITE];
343 struct buckets_in_flight *buckets;
344 u64 last, wait;
345 int ret = 0;
346
347 buckets = kzalloc(sizeof(struct buckets_in_flight), GFP_KERNEL);
348 if (!buckets)
349 return -ENOMEM;
350 ret = rhashtable_init(&buckets->table, &bch_move_bucket_params);
351 bch_err_msg(c, ret, "allocating copygc buckets in flight");
352 if (ret) {
353 kfree(buckets);
354 return ret;
355 }
356
357 set_freezable();
358
359 /*
360 * Data move operations can't run until after check_snapshots has
361 * completed, and bch2_snapshot_is_ancestor() is available.
362 */
363 kthread_wait_freezable(c->recovery_pass_done > BCH_RECOVERY_PASS_check_snapshots ||
364 kthread_should_stop());
365
366 bch2_move_stats_init(&move_stats, "copygc");
367 bch2_moving_ctxt_init(&ctxt, c, NULL, &move_stats,
368 writepoint_ptr(&c->copygc_write_point),
369 false);
370
371 while (!ret && !kthread_should_stop()) {
372 bool did_work = false;
373
374 bch2_trans_unlock_long(ctxt.trans);
375 cond_resched();
376
377 if (!c->opts.copygc_enabled) {
378 move_buckets_wait(&ctxt, buckets, true);
379 kthread_wait_freezable(c->opts.copygc_enabled ||
380 kthread_should_stop());
381 }
382
383 if (unlikely(freezing(current))) {
384 move_buckets_wait(&ctxt, buckets, true);
385 __refrigerator(false);
386 continue;
387 }
388
389 last = atomic64_read(&clock->now);
390 wait = bch2_copygc_wait_amount(c);
391
392 if (wait > clock->max_slop) {
393 c->copygc_wait_at = last;
394 c->copygc_wait = last + wait;
395 move_buckets_wait(&ctxt, buckets, true);
396 trace_and_count(c, copygc_wait, c, wait, last + wait);
397 bch2_kthread_io_clock_wait(clock, last + wait,
398 MAX_SCHEDULE_TIMEOUT);
399 continue;
400 }
401
402 c->copygc_wait = 0;
403
404 c->copygc_running = true;
405 ret = bch2_copygc(&ctxt, buckets, &did_work);
406 c->copygc_running = false;
407
408 wake_up(&c->copygc_running_wq);
409
410 if (!wait && !did_work) {
411 u64 min_member_capacity = bch2_min_rw_member_capacity(c);
412
413 if (min_member_capacity == U64_MAX)
414 min_member_capacity = 128 * 2048;
415
416 move_buckets_wait(&ctxt, buckets, true);
417 bch2_kthread_io_clock_wait(clock, last + (min_member_capacity >> 6),
418 MAX_SCHEDULE_TIMEOUT);
419 }
420 }
421
422 move_buckets_wait(&ctxt, buckets, true);
423
424 rhashtable_destroy(&buckets->table);
425 kfree(buckets);
426 bch2_moving_ctxt_exit(&ctxt);
427 bch2_move_stats_exit(&move_stats, c);
428
429 return 0;
430 }
431
bch2_copygc_stop(struct bch_fs * c)432 void bch2_copygc_stop(struct bch_fs *c)
433 {
434 if (c->copygc_thread) {
435 kthread_stop(c->copygc_thread);
436 put_task_struct(c->copygc_thread);
437 }
438 c->copygc_thread = NULL;
439 }
440
bch2_copygc_start(struct bch_fs * c)441 int bch2_copygc_start(struct bch_fs *c)
442 {
443 struct task_struct *t;
444 int ret;
445
446 if (c->copygc_thread)
447 return 0;
448
449 if (c->opts.nochanges)
450 return 0;
451
452 if (bch2_fs_init_fault("copygc_start"))
453 return -ENOMEM;
454
455 t = kthread_create(bch2_copygc_thread, c, "bch-copygc/%s", c->name);
456 ret = PTR_ERR_OR_ZERO(t);
457 bch_err_msg(c, ret, "creating copygc thread");
458 if (ret)
459 return ret;
460
461 get_task_struct(t);
462
463 c->copygc_thread = t;
464 wake_up_process(c->copygc_thread);
465
466 return 0;
467 }
468
bch2_fs_copygc_init(struct bch_fs * c)469 void bch2_fs_copygc_init(struct bch_fs *c)
470 {
471 init_waitqueue_head(&c->copygc_running_wq);
472 c->copygc_running = false;
473 }
474