1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Moving/copying garbage collector
4 *
5 * Copyright 2012 Google, Inc.
6 */
7
8 #include "bcachefs.h"
9 #include "alloc_background.h"
10 #include "alloc_foreground.h"
11 #include "btree_iter.h"
12 #include "btree_update.h"
13 #include "btree_write_buffer.h"
14 #include "buckets.h"
15 #include "clock.h"
16 #include "errcode.h"
17 #include "error.h"
18 #include "lru.h"
19 #include "move.h"
20 #include "movinggc.h"
21 #include "trace.h"
22
23 #include <linux/freezer.h>
24 #include <linux/kthread.h>
25 #include <linux/math64.h>
26 #include <linux/sched/task.h>
27 #include <linux/wait.h>
28
29 struct buckets_in_flight {
30 struct rhashtable table;
31 struct move_bucket_in_flight *first;
32 struct move_bucket_in_flight *last;
33 size_t nr;
34 size_t sectors;
35 };
36
37 static const struct rhashtable_params bch_move_bucket_params = {
38 .head_offset = offsetof(struct move_bucket_in_flight, hash),
39 .key_offset = offsetof(struct move_bucket_in_flight, bucket.k),
40 .key_len = sizeof(struct move_bucket_key),
41 };
42
43 static struct move_bucket_in_flight *
move_bucket_in_flight_add(struct buckets_in_flight * list,struct move_bucket b)44 move_bucket_in_flight_add(struct buckets_in_flight *list, struct move_bucket b)
45 {
46 struct move_bucket_in_flight *new = kzalloc(sizeof(*new), GFP_KERNEL);
47 int ret;
48
49 if (!new)
50 return ERR_PTR(-ENOMEM);
51
52 new->bucket = b;
53
54 ret = rhashtable_lookup_insert_fast(&list->table, &new->hash,
55 bch_move_bucket_params);
56 if (ret) {
57 kfree(new);
58 return ERR_PTR(ret);
59 }
60
61 if (!list->first)
62 list->first = new;
63 else
64 list->last->next = new;
65
66 list->last = new;
67 list->nr++;
68 list->sectors += b.sectors;
69 return new;
70 }
71
bch2_bucket_is_movable(struct btree_trans * trans,struct move_bucket * b,u64 time)72 static int bch2_bucket_is_movable(struct btree_trans *trans,
73 struct move_bucket *b, u64 time)
74 {
75 struct btree_iter iter;
76 struct bkey_s_c k;
77 struct bch_alloc_v4 _a;
78 const struct bch_alloc_v4 *a;
79 int ret;
80
81 if (bch2_bucket_is_open(trans->c,
82 b->k.bucket.inode,
83 b->k.bucket.offset))
84 return 0;
85
86 k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_alloc,
87 b->k.bucket, BTREE_ITER_CACHED);
88 ret = bkey_err(k);
89 if (ret)
90 return ret;
91
92 a = bch2_alloc_to_v4(k, &_a);
93 b->k.gen = a->gen;
94 b->sectors = bch2_bucket_sectors_dirty(*a);
95
96 ret = data_type_movable(a->data_type) &&
97 a->fragmentation_lru &&
98 a->fragmentation_lru <= time;
99
100 bch2_trans_iter_exit(trans, &iter);
101 return ret;
102 }
103
move_buckets_wait(struct moving_context * ctxt,struct buckets_in_flight * list,bool flush)104 static void move_buckets_wait(struct moving_context *ctxt,
105 struct buckets_in_flight *list,
106 bool flush)
107 {
108 struct move_bucket_in_flight *i;
109 int ret;
110
111 while ((i = list->first)) {
112 if (flush)
113 move_ctxt_wait_event(ctxt, !atomic_read(&i->count));
114
115 if (atomic_read(&i->count))
116 break;
117
118 list->first = i->next;
119 if (!list->first)
120 list->last = NULL;
121
122 list->nr--;
123 list->sectors -= i->bucket.sectors;
124
125 ret = rhashtable_remove_fast(&list->table, &i->hash,
126 bch_move_bucket_params);
127 BUG_ON(ret);
128 kfree(i);
129 }
130
131 bch2_trans_unlock_long(ctxt->trans);
132 }
133
bucket_in_flight(struct buckets_in_flight * list,struct move_bucket_key k)134 static bool bucket_in_flight(struct buckets_in_flight *list,
135 struct move_bucket_key k)
136 {
137 return rhashtable_lookup_fast(&list->table, &k, bch_move_bucket_params);
138 }
139
140 typedef DARRAY(struct move_bucket) move_buckets;
141
bch2_copygc_get_buckets(struct moving_context * ctxt,struct buckets_in_flight * buckets_in_flight,move_buckets * buckets)142 static int bch2_copygc_get_buckets(struct moving_context *ctxt,
143 struct buckets_in_flight *buckets_in_flight,
144 move_buckets *buckets)
145 {
146 struct btree_trans *trans = ctxt->trans;
147 struct bch_fs *c = trans->c;
148 size_t nr_to_get = max_t(size_t, 16U, buckets_in_flight->nr / 4);
149 size_t saw = 0, in_flight = 0, not_movable = 0, sectors = 0;
150 int ret;
151
152 move_buckets_wait(ctxt, buckets_in_flight, false);
153
154 ret = bch2_btree_write_buffer_tryflush(trans);
155 if (bch2_err_matches(ret, EROFS))
156 return ret;
157
158 if (bch2_fs_fatal_err_on(ret, c, "%s: error %s from bch2_btree_write_buffer_tryflush()",
159 __func__, bch2_err_str(ret)))
160 return ret;
161
162 ret = for_each_btree_key_upto(trans, iter, BTREE_ID_lru,
163 lru_pos(BCH_LRU_FRAGMENTATION_START, 0, 0),
164 lru_pos(BCH_LRU_FRAGMENTATION_START, U64_MAX, LRU_TIME_MAX),
165 0, k, ({
166 struct move_bucket b = { .k.bucket = u64_to_bucket(k.k->p.offset) };
167 int ret2 = 0;
168
169 saw++;
170
171 ret2 = bch2_bucket_is_movable(trans, &b, lru_pos_time(k.k->p));
172 if (ret2 < 0)
173 goto err;
174
175 if (!ret2)
176 not_movable++;
177 else if (bucket_in_flight(buckets_in_flight, b.k))
178 in_flight++;
179 else {
180 ret2 = darray_push(buckets, b);
181 if (ret2)
182 goto err;
183 sectors += b.sectors;
184 }
185
186 ret2 = buckets->nr >= nr_to_get;
187 err:
188 ret2;
189 }));
190
191 pr_debug("have: %zu (%zu) saw %zu in flight %zu not movable %zu got %zu (%zu)/%zu buckets ret %i",
192 buckets_in_flight->nr, buckets_in_flight->sectors,
193 saw, in_flight, not_movable, buckets->nr, sectors, nr_to_get, ret);
194
195 return ret < 0 ? ret : 0;
196 }
197
198 noinline
bch2_copygc(struct moving_context * ctxt,struct buckets_in_flight * buckets_in_flight,bool * did_work)199 static int bch2_copygc(struct moving_context *ctxt,
200 struct buckets_in_flight *buckets_in_flight,
201 bool *did_work)
202 {
203 struct btree_trans *trans = ctxt->trans;
204 struct bch_fs *c = trans->c;
205 struct data_update_opts data_opts = {
206 .btree_insert_flags = BCH_WATERMARK_copygc,
207 };
208 move_buckets buckets = { 0 };
209 struct move_bucket_in_flight *f;
210 u64 moved = atomic64_read(&ctxt->stats->sectors_moved);
211 int ret = 0;
212
213 ret = bch2_copygc_get_buckets(ctxt, buckets_in_flight, &buckets);
214 if (ret)
215 goto err;
216
217 darray_for_each(buckets, i) {
218 if (kthread_should_stop() || freezing(current))
219 break;
220
221 f = move_bucket_in_flight_add(buckets_in_flight, *i);
222 ret = PTR_ERR_OR_ZERO(f);
223 if (ret == -EEXIST) { /* rare race: copygc_get_buckets returned same bucket more than once */
224 ret = 0;
225 continue;
226 }
227 if (ret == -ENOMEM) { /* flush IO, continue later */
228 ret = 0;
229 break;
230 }
231
232 ret = bch2_evacuate_bucket(ctxt, f, f->bucket.k.bucket,
233 f->bucket.k.gen, data_opts);
234 if (ret)
235 goto err;
236
237 *did_work = true;
238 }
239 err:
240 darray_exit(&buckets);
241
242 /* no entries in LRU btree found, or got to end: */
243 if (bch2_err_matches(ret, ENOENT))
244 ret = 0;
245
246 if (ret < 0 && !bch2_err_matches(ret, EROFS))
247 bch_err_msg(c, ret, "from bch2_move_data()");
248
249 moved = atomic64_read(&ctxt->stats->sectors_moved) - moved;
250 trace_and_count(c, copygc, c, moved, 0, 0, 0);
251 return ret;
252 }
253
254 /*
255 * Copygc runs when the amount of fragmented data is above some arbitrary
256 * threshold:
257 *
258 * The threshold at the limit - when the device is full - is the amount of space
259 * we reserved in bch2_recalc_capacity; we can't have more than that amount of
260 * disk space stranded due to fragmentation and store everything we have
261 * promised to store.
262 *
263 * But we don't want to be running copygc unnecessarily when the device still
264 * has plenty of free space - rather, we want copygc to smoothly run every so
265 * often and continually reduce the amount of fragmented space as the device
266 * fills up. So, we increase the threshold by half the current free space.
267 */
bch2_copygc_wait_amount(struct bch_fs * c)268 unsigned long bch2_copygc_wait_amount(struct bch_fs *c)
269 {
270 s64 wait = S64_MAX, fragmented_allowed, fragmented;
271
272 for_each_rw_member(c, ca) {
273 struct bch_dev_usage usage = bch2_dev_usage_read(ca);
274
275 fragmented_allowed = ((__dev_buckets_available(ca, usage, BCH_WATERMARK_stripe) *
276 ca->mi.bucket_size) >> 1);
277 fragmented = 0;
278
279 for (unsigned i = 0; i < BCH_DATA_NR; i++)
280 if (data_type_movable(i))
281 fragmented += usage.d[i].fragmented;
282
283 wait = min(wait, max(0LL, fragmented_allowed - fragmented));
284 }
285
286 return wait;
287 }
288
bch2_copygc_wait_to_text(struct printbuf * out,struct bch_fs * c)289 void bch2_copygc_wait_to_text(struct printbuf *out, struct bch_fs *c)
290 {
291 prt_printf(out, "Currently waiting for: ");
292 prt_human_readable_u64(out, max(0LL, c->copygc_wait -
293 atomic64_read(&c->io_clock[WRITE].now)) << 9);
294 prt_newline(out);
295
296 prt_printf(out, "Currently waiting since: ");
297 prt_human_readable_u64(out, max(0LL,
298 atomic64_read(&c->io_clock[WRITE].now) -
299 c->copygc_wait_at) << 9);
300 prt_newline(out);
301
302 prt_printf(out, "Currently calculated wait: ");
303 prt_human_readable_u64(out, bch2_copygc_wait_amount(c));
304 prt_newline(out);
305 }
306
bch2_copygc_thread(void * arg)307 static int bch2_copygc_thread(void *arg)
308 {
309 struct bch_fs *c = arg;
310 struct moving_context ctxt;
311 struct bch_move_stats move_stats;
312 struct io_clock *clock = &c->io_clock[WRITE];
313 struct buckets_in_flight *buckets;
314 u64 last, wait;
315 int ret = 0;
316
317 buckets = kzalloc(sizeof(struct buckets_in_flight), GFP_KERNEL);
318 if (!buckets)
319 return -ENOMEM;
320 ret = rhashtable_init(&buckets->table, &bch_move_bucket_params);
321 bch_err_msg(c, ret, "allocating copygc buckets in flight");
322 if (ret) {
323 kfree(buckets);
324 return ret;
325 }
326
327 set_freezable();
328
329 bch2_move_stats_init(&move_stats, "copygc");
330 bch2_moving_ctxt_init(&ctxt, c, NULL, &move_stats,
331 writepoint_ptr(&c->copygc_write_point),
332 false);
333
334 while (!ret && !kthread_should_stop()) {
335 bool did_work = false;
336
337 bch2_trans_unlock_long(ctxt.trans);
338 cond_resched();
339
340 if (!c->copy_gc_enabled) {
341 move_buckets_wait(&ctxt, buckets, true);
342 kthread_wait_freezable(c->copy_gc_enabled ||
343 kthread_should_stop());
344 }
345
346 if (unlikely(freezing(current))) {
347 move_buckets_wait(&ctxt, buckets, true);
348 __refrigerator(false);
349 continue;
350 }
351
352 last = atomic64_read(&clock->now);
353 wait = bch2_copygc_wait_amount(c);
354
355 if (wait > clock->max_slop) {
356 c->copygc_wait_at = last;
357 c->copygc_wait = last + wait;
358 move_buckets_wait(&ctxt, buckets, true);
359 trace_and_count(c, copygc_wait, c, wait, last + wait);
360 bch2_kthread_io_clock_wait(clock, last + wait,
361 MAX_SCHEDULE_TIMEOUT);
362 continue;
363 }
364
365 c->copygc_wait = 0;
366
367 c->copygc_running = true;
368 ret = bch2_copygc(&ctxt, buckets, &did_work);
369 c->copygc_running = false;
370
371 wake_up(&c->copygc_running_wq);
372
373 if (!wait && !did_work) {
374 u64 min_member_capacity = bch2_min_rw_member_capacity(c);
375
376 if (min_member_capacity == U64_MAX)
377 min_member_capacity = 128 * 2048;
378
379 bch2_trans_unlock_long(ctxt.trans);
380 bch2_kthread_io_clock_wait(clock, last + (min_member_capacity >> 6),
381 MAX_SCHEDULE_TIMEOUT);
382 }
383 }
384
385 move_buckets_wait(&ctxt, buckets, true);
386
387 rhashtable_destroy(&buckets->table);
388 kfree(buckets);
389 bch2_moving_ctxt_exit(&ctxt);
390 bch2_move_stats_exit(&move_stats, c);
391
392 return 0;
393 }
394
bch2_copygc_stop(struct bch_fs * c)395 void bch2_copygc_stop(struct bch_fs *c)
396 {
397 if (c->copygc_thread) {
398 kthread_stop(c->copygc_thread);
399 put_task_struct(c->copygc_thread);
400 }
401 c->copygc_thread = NULL;
402 }
403
bch2_copygc_start(struct bch_fs * c)404 int bch2_copygc_start(struct bch_fs *c)
405 {
406 struct task_struct *t;
407 int ret;
408
409 if (c->copygc_thread)
410 return 0;
411
412 if (c->opts.nochanges)
413 return 0;
414
415 if (bch2_fs_init_fault("copygc_start"))
416 return -ENOMEM;
417
418 t = kthread_create(bch2_copygc_thread, c, "bch-copygc/%s", c->name);
419 ret = PTR_ERR_OR_ZERO(t);
420 bch_err_msg(c, ret, "creating copygc thread");
421 if (ret)
422 return ret;
423
424 get_task_struct(t);
425
426 c->copygc_thread = t;
427 wake_up_process(c->copygc_thread);
428
429 return 0;
430 }
431
bch2_fs_copygc_init(struct bch_fs * c)432 void bch2_fs_copygc_init(struct bch_fs *c)
433 {
434 init_waitqueue_head(&c->copygc_running_wq);
435 c->copygc_running = false;
436 }
437