1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "btree_key_cache.h"
5 #include "btree_update.h"
6 #include "btree_write_buffer.h"
7 #include "buckets.h"
8 #include "errcode.h"
9 #include "error.h"
10 #include "journal.h"
11 #include "journal_io.h"
12 #include "journal_reclaim.h"
13 #include "replicas.h"
14 #include "sb-members.h"
15 #include "trace.h"
16
17 #include <linux/kthread.h>
18 #include <linux/sched/mm.h>
19
20 static bool __should_discard_bucket(struct journal *, struct journal_device *);
21
22 /* Free space calculations: */
23
journal_space_from(struct journal_device * ja,enum journal_space_from from)24 static unsigned journal_space_from(struct journal_device *ja,
25 enum journal_space_from from)
26 {
27 switch (from) {
28 case journal_space_discarded:
29 return ja->discard_idx;
30 case journal_space_clean_ondisk:
31 return ja->dirty_idx_ondisk;
32 case journal_space_clean:
33 return ja->dirty_idx;
34 default:
35 BUG();
36 }
37 }
38
bch2_journal_dev_buckets_available(struct journal * j,struct journal_device * ja,enum journal_space_from from)39 unsigned bch2_journal_dev_buckets_available(struct journal *j,
40 struct journal_device *ja,
41 enum journal_space_from from)
42 {
43 if (!ja->nr)
44 return 0;
45
46 unsigned available = (journal_space_from(ja, from) -
47 ja->cur_idx - 1 + ja->nr) % ja->nr;
48
49 /*
50 * Don't use the last bucket unless writing the new last_seq
51 * will make another bucket available:
52 */
53 if (available && ja->dirty_idx_ondisk == ja->dirty_idx)
54 --available;
55
56 return available;
57 }
58
bch2_journal_set_watermark(struct journal * j)59 void bch2_journal_set_watermark(struct journal *j)
60 {
61 struct bch_fs *c = container_of(j, struct bch_fs, journal);
62 bool low_on_space = j->space[journal_space_clean].total * 4 <=
63 j->space[journal_space_total].total;
64 bool low_on_pin = fifo_free(&j->pin) < j->pin.size / 4;
65 bool low_on_wb = bch2_btree_write_buffer_must_wait(c);
66 unsigned watermark = low_on_space || low_on_pin || low_on_wb
67 ? BCH_WATERMARK_reclaim
68 : BCH_WATERMARK_stripe;
69
70 if (track_event_change(&c->times[BCH_TIME_blocked_journal_low_on_space], low_on_space) ||
71 track_event_change(&c->times[BCH_TIME_blocked_journal_low_on_pin], low_on_pin) ||
72 track_event_change(&c->times[BCH_TIME_blocked_write_buffer_full], low_on_wb))
73 trace_and_count(c, journal_full, c);
74
75 mod_bit(JOURNAL_space_low, &j->flags, low_on_space || low_on_pin);
76
77 swap(watermark, j->watermark);
78 if (watermark > j->watermark)
79 journal_wake(j);
80 }
81
82 static struct journal_space
journal_dev_space_available(struct journal * j,struct bch_dev * ca,enum journal_space_from from)83 journal_dev_space_available(struct journal *j, struct bch_dev *ca,
84 enum journal_space_from from)
85 {
86 struct journal_device *ja = &ca->journal;
87 unsigned sectors, buckets, unwritten;
88 u64 seq;
89
90 if (from == journal_space_total)
91 return (struct journal_space) {
92 .next_entry = ca->mi.bucket_size,
93 .total = ca->mi.bucket_size * ja->nr,
94 };
95
96 buckets = bch2_journal_dev_buckets_available(j, ja, from);
97 sectors = ja->sectors_free;
98
99 /*
100 * We that we don't allocate the space for a journal entry
101 * until we write it out - thus, account for it here:
102 */
103 for (seq = journal_last_unwritten_seq(j);
104 seq <= journal_cur_seq(j);
105 seq++) {
106 unwritten = j->buf[seq & JOURNAL_BUF_MASK].sectors;
107
108 if (!unwritten)
109 continue;
110
111 /* entry won't fit on this device, skip: */
112 if (unwritten > ca->mi.bucket_size)
113 continue;
114
115 if (unwritten >= sectors) {
116 if (!buckets) {
117 sectors = 0;
118 break;
119 }
120
121 buckets--;
122 sectors = ca->mi.bucket_size;
123 }
124
125 sectors -= unwritten;
126 }
127
128 if (sectors < ca->mi.bucket_size && buckets) {
129 buckets--;
130 sectors = ca->mi.bucket_size;
131 }
132
133 return (struct journal_space) {
134 .next_entry = sectors,
135 .total = sectors + buckets * ca->mi.bucket_size,
136 };
137 }
138
__journal_space_available(struct journal * j,unsigned nr_devs_want,enum journal_space_from from)139 static struct journal_space __journal_space_available(struct journal *j, unsigned nr_devs_want,
140 enum journal_space_from from)
141 {
142 struct bch_fs *c = container_of(j, struct bch_fs, journal);
143 unsigned pos, nr_devs = 0;
144 struct journal_space space, dev_space[BCH_SB_MEMBERS_MAX];
145 unsigned min_bucket_size = U32_MAX;
146
147 BUG_ON(nr_devs_want > ARRAY_SIZE(dev_space));
148
149 rcu_read_lock();
150 for_each_member_device_rcu(c, ca, &c->rw_devs[BCH_DATA_journal]) {
151 if (!ca->journal.nr ||
152 !ca->mi.durability)
153 continue;
154
155 min_bucket_size = min(min_bucket_size, ca->mi.bucket_size);
156
157 space = journal_dev_space_available(j, ca, from);
158 if (!space.next_entry)
159 continue;
160
161 for (pos = 0; pos < nr_devs; pos++)
162 if (space.total > dev_space[pos].total)
163 break;
164
165 array_insert_item(dev_space, nr_devs, pos, space);
166 }
167 rcu_read_unlock();
168
169 if (nr_devs < nr_devs_want)
170 return (struct journal_space) { 0, 0 };
171
172 /*
173 * We sorted largest to smallest, and we want the smallest out of the
174 * @nr_devs_want largest devices:
175 */
176 space = dev_space[nr_devs_want - 1];
177 space.next_entry = min(space.next_entry, min_bucket_size);
178 return space;
179 }
180
bch2_journal_space_available(struct journal * j)181 void bch2_journal_space_available(struct journal *j)
182 {
183 struct bch_fs *c = container_of(j, struct bch_fs, journal);
184 unsigned clean, clean_ondisk, total;
185 unsigned max_entry_size = min(j->buf[0].buf_size >> 9,
186 j->buf[1].buf_size >> 9);
187 unsigned nr_online = 0, nr_devs_want;
188 bool can_discard = false;
189 int ret = 0;
190
191 lockdep_assert_held(&j->lock);
192
193 rcu_read_lock();
194 for_each_member_device_rcu(c, ca, &c->rw_devs[BCH_DATA_journal]) {
195 struct journal_device *ja = &ca->journal;
196
197 if (!ja->nr)
198 continue;
199
200 while (ja->dirty_idx != ja->cur_idx &&
201 ja->bucket_seq[ja->dirty_idx] < journal_last_seq(j))
202 ja->dirty_idx = (ja->dirty_idx + 1) % ja->nr;
203
204 while (ja->dirty_idx_ondisk != ja->dirty_idx &&
205 ja->bucket_seq[ja->dirty_idx_ondisk] < j->last_seq_ondisk)
206 ja->dirty_idx_ondisk = (ja->dirty_idx_ondisk + 1) % ja->nr;
207
208 can_discard |= __should_discard_bucket(j, ja);
209
210 max_entry_size = min_t(unsigned, max_entry_size, ca->mi.bucket_size);
211 nr_online++;
212 }
213 rcu_read_unlock();
214
215 j->can_discard = can_discard;
216
217 if (nr_online < metadata_replicas_required(c)) {
218 struct printbuf buf = PRINTBUF;
219 buf.atomic++;
220 prt_printf(&buf, "insufficient writeable journal devices available: have %u, need %u\n"
221 "rw journal devs:", nr_online, metadata_replicas_required(c));
222
223 rcu_read_lock();
224 for_each_member_device_rcu(c, ca, &c->rw_devs[BCH_DATA_journal])
225 prt_printf(&buf, " %s", ca->name);
226 rcu_read_unlock();
227
228 bch_err(c, "%s", buf.buf);
229 printbuf_exit(&buf);
230 ret = -BCH_ERR_insufficient_journal_devices;
231 goto out;
232 }
233
234 nr_devs_want = min_t(unsigned, nr_online, c->opts.metadata_replicas);
235
236 for (unsigned i = 0; i < journal_space_nr; i++)
237 j->space[i] = __journal_space_available(j, nr_devs_want, i);
238
239 clean_ondisk = j->space[journal_space_clean_ondisk].total;
240 clean = j->space[journal_space_clean].total;
241 total = j->space[journal_space_total].total;
242
243 if (!j->space[journal_space_discarded].next_entry)
244 ret = -BCH_ERR_journal_full;
245
246 if ((j->space[journal_space_clean_ondisk].next_entry <
247 j->space[journal_space_clean_ondisk].total) &&
248 (clean - clean_ondisk <= total / 8) &&
249 (clean_ondisk * 2 > clean))
250 set_bit(JOURNAL_may_skip_flush, &j->flags);
251 else
252 clear_bit(JOURNAL_may_skip_flush, &j->flags);
253
254 bch2_journal_set_watermark(j);
255 out:
256 j->cur_entry_sectors = !ret
257 ? round_down(j->space[journal_space_discarded].next_entry,
258 block_sectors(c))
259 : 0;
260 j->cur_entry_error = ret;
261
262 if (!ret)
263 journal_wake(j);
264 }
265
266 /* Discards - last part of journal reclaim: */
267
__should_discard_bucket(struct journal * j,struct journal_device * ja)268 static bool __should_discard_bucket(struct journal *j, struct journal_device *ja)
269 {
270 unsigned min_free = max(4, ja->nr / 8);
271
272 return bch2_journal_dev_buckets_available(j, ja, journal_space_discarded) <
273 min_free &&
274 ja->discard_idx != ja->dirty_idx_ondisk;
275 }
276
should_discard_bucket(struct journal * j,struct journal_device * ja)277 static bool should_discard_bucket(struct journal *j, struct journal_device *ja)
278 {
279 spin_lock(&j->lock);
280 bool ret = __should_discard_bucket(j, ja);
281 spin_unlock(&j->lock);
282
283 return ret;
284 }
285
286 /*
287 * Advance ja->discard_idx as long as it points to buckets that are no longer
288 * dirty, issuing discards if necessary:
289 */
bch2_journal_do_discards(struct journal * j)290 void bch2_journal_do_discards(struct journal *j)
291 {
292 struct bch_fs *c = container_of(j, struct bch_fs, journal);
293
294 mutex_lock(&j->discard_lock);
295
296 for_each_rw_member(c, ca) {
297 struct journal_device *ja = &ca->journal;
298
299 while (should_discard_bucket(j, ja)) {
300 if (!c->opts.nochanges &&
301 ca->mi.discard &&
302 bdev_max_discard_sectors(ca->disk_sb.bdev))
303 blkdev_issue_discard(ca->disk_sb.bdev,
304 bucket_to_sector(ca,
305 ja->buckets[ja->discard_idx]),
306 ca->mi.bucket_size, GFP_NOFS);
307
308 spin_lock(&j->lock);
309 ja->discard_idx = (ja->discard_idx + 1) % ja->nr;
310
311 bch2_journal_space_available(j);
312 spin_unlock(&j->lock);
313 }
314 }
315
316 mutex_unlock(&j->discard_lock);
317 }
318
319 /*
320 * Journal entry pinning - machinery for holding a reference on a given journal
321 * entry, holding it open to ensure it gets replayed during recovery:
322 */
323
bch2_journal_reclaim_fast(struct journal * j)324 void bch2_journal_reclaim_fast(struct journal *j)
325 {
326 bool popped = false;
327
328 lockdep_assert_held(&j->lock);
329
330 /*
331 * Unpin journal entries whose reference counts reached zero, meaning
332 * all btree nodes got written out
333 */
334 while (!fifo_empty(&j->pin) &&
335 j->pin.front <= j->seq_ondisk &&
336 !atomic_read(&fifo_peek_front(&j->pin).count)) {
337 j->pin.front++;
338 popped = true;
339 }
340
341 if (popped) {
342 bch2_journal_space_available(j);
343 __closure_wake_up(&j->reclaim_flush_wait);
344 }
345 }
346
__bch2_journal_pin_put(struct journal * j,u64 seq)347 bool __bch2_journal_pin_put(struct journal *j, u64 seq)
348 {
349 struct journal_entry_pin_list *pin_list = journal_seq_pin(j, seq);
350
351 return atomic_dec_and_test(&pin_list->count);
352 }
353
bch2_journal_pin_put(struct journal * j,u64 seq)354 void bch2_journal_pin_put(struct journal *j, u64 seq)
355 {
356 if (__bch2_journal_pin_put(j, seq)) {
357 spin_lock(&j->lock);
358 bch2_journal_reclaim_fast(j);
359 spin_unlock(&j->lock);
360 }
361 }
362
__journal_pin_drop(struct journal * j,struct journal_entry_pin * pin)363 static inline bool __journal_pin_drop(struct journal *j,
364 struct journal_entry_pin *pin)
365 {
366 struct journal_entry_pin_list *pin_list;
367
368 if (!journal_pin_active(pin))
369 return false;
370
371 if (j->flush_in_progress == pin)
372 j->flush_in_progress_dropped = true;
373
374 pin_list = journal_seq_pin(j, pin->seq);
375 pin->seq = 0;
376 list_del_init(&pin->list);
377
378 if (j->reclaim_flush_wait.list.first)
379 __closure_wake_up(&j->reclaim_flush_wait);
380
381 /*
382 * Unpinning a journal entry may make journal_next_bucket() succeed, if
383 * writing a new last_seq will now make another bucket available:
384 */
385 return atomic_dec_and_test(&pin_list->count) &&
386 pin_list == &fifo_peek_front(&j->pin);
387 }
388
bch2_journal_pin_drop(struct journal * j,struct journal_entry_pin * pin)389 void bch2_journal_pin_drop(struct journal *j,
390 struct journal_entry_pin *pin)
391 {
392 spin_lock(&j->lock);
393 if (__journal_pin_drop(j, pin))
394 bch2_journal_reclaim_fast(j);
395 spin_unlock(&j->lock);
396 }
397
journal_pin_type(struct journal_entry_pin * pin,journal_pin_flush_fn fn)398 static enum journal_pin_type journal_pin_type(struct journal_entry_pin *pin,
399 journal_pin_flush_fn fn)
400 {
401 if (fn == bch2_btree_node_flush0 ||
402 fn == bch2_btree_node_flush1) {
403 unsigned idx = fn == bch2_btree_node_flush1;
404 struct btree *b = container_of(pin, struct btree, writes[idx].journal);
405
406 return JOURNAL_PIN_TYPE_btree0 - b->c.level;
407 } else if (fn == bch2_btree_key_cache_journal_flush)
408 return JOURNAL_PIN_TYPE_key_cache;
409 else
410 return JOURNAL_PIN_TYPE_other;
411 }
412
bch2_journal_pin_set_locked(struct journal * j,u64 seq,struct journal_entry_pin * pin,journal_pin_flush_fn flush_fn,enum journal_pin_type type)413 static inline void bch2_journal_pin_set_locked(struct journal *j, u64 seq,
414 struct journal_entry_pin *pin,
415 journal_pin_flush_fn flush_fn,
416 enum journal_pin_type type)
417 {
418 struct journal_entry_pin_list *pin_list = journal_seq_pin(j, seq);
419
420 /*
421 * flush_fn is how we identify journal pins in debugfs, so must always
422 * exist, even if it doesn't do anything:
423 */
424 BUG_ON(!flush_fn);
425
426 atomic_inc(&pin_list->count);
427 pin->seq = seq;
428 pin->flush = flush_fn;
429
430 if (list_empty(&pin_list->unflushed[type]) &&
431 j->reclaim_flush_wait.list.first)
432 __closure_wake_up(&j->reclaim_flush_wait);
433
434 list_add(&pin->list, &pin_list->unflushed[type]);
435 }
436
bch2_journal_pin_copy(struct journal * j,struct journal_entry_pin * dst,struct journal_entry_pin * src,journal_pin_flush_fn flush_fn)437 void bch2_journal_pin_copy(struct journal *j,
438 struct journal_entry_pin *dst,
439 struct journal_entry_pin *src,
440 journal_pin_flush_fn flush_fn)
441 {
442 spin_lock(&j->lock);
443
444 u64 seq = READ_ONCE(src->seq);
445
446 if (seq < journal_last_seq(j)) {
447 /*
448 * bch2_journal_pin_copy() raced with bch2_journal_pin_drop() on
449 * the src pin - with the pin dropped, the entry to pin might no
450 * longer to exist, but that means there's no longer anything to
451 * copy and we can bail out here:
452 */
453 spin_unlock(&j->lock);
454 return;
455 }
456
457 bool reclaim = __journal_pin_drop(j, dst);
458
459 bch2_journal_pin_set_locked(j, seq, dst, flush_fn, journal_pin_type(dst, flush_fn));
460
461 if (reclaim)
462 bch2_journal_reclaim_fast(j);
463
464 /*
465 * If the journal is currently full, we might want to call flush_fn
466 * immediately:
467 */
468 if (seq == journal_last_seq(j))
469 journal_wake(j);
470 spin_unlock(&j->lock);
471 }
472
bch2_journal_pin_set(struct journal * j,u64 seq,struct journal_entry_pin * pin,journal_pin_flush_fn flush_fn)473 void bch2_journal_pin_set(struct journal *j, u64 seq,
474 struct journal_entry_pin *pin,
475 journal_pin_flush_fn flush_fn)
476 {
477 spin_lock(&j->lock);
478
479 BUG_ON(seq < journal_last_seq(j));
480
481 bool reclaim = __journal_pin_drop(j, pin);
482
483 bch2_journal_pin_set_locked(j, seq, pin, flush_fn, journal_pin_type(pin, flush_fn));
484
485 if (reclaim)
486 bch2_journal_reclaim_fast(j);
487 /*
488 * If the journal is currently full, we might want to call flush_fn
489 * immediately:
490 */
491 if (seq == journal_last_seq(j))
492 journal_wake(j);
493
494 spin_unlock(&j->lock);
495 }
496
497 /**
498 * bch2_journal_pin_flush: ensure journal pin callback is no longer running
499 * @j: journal object
500 * @pin: pin to flush
501 */
bch2_journal_pin_flush(struct journal * j,struct journal_entry_pin * pin)502 void bch2_journal_pin_flush(struct journal *j, struct journal_entry_pin *pin)
503 {
504 BUG_ON(journal_pin_active(pin));
505
506 wait_event(j->pin_flush_wait, j->flush_in_progress != pin);
507 }
508
509 /*
510 * Journal reclaim: flush references to open journal entries to reclaim space in
511 * the journal
512 *
513 * May be done by the journal code in the background as needed to free up space
514 * for more journal entries, or as part of doing a clean shutdown, or to migrate
515 * data off of a specific device:
516 */
517
518 static struct journal_entry_pin *
journal_get_next_pin(struct journal * j,u64 seq_to_flush,unsigned allowed_below_seq,unsigned allowed_above_seq,u64 * seq)519 journal_get_next_pin(struct journal *j,
520 u64 seq_to_flush,
521 unsigned allowed_below_seq,
522 unsigned allowed_above_seq,
523 u64 *seq)
524 {
525 struct journal_entry_pin_list *pin_list;
526 struct journal_entry_pin *ret = NULL;
527
528 fifo_for_each_entry_ptr(pin_list, &j->pin, *seq) {
529 if (*seq > seq_to_flush && !allowed_above_seq)
530 break;
531
532 for (unsigned i = 0; i < JOURNAL_PIN_TYPE_NR; i++)
533 if (((BIT(i) & allowed_below_seq) && *seq <= seq_to_flush) ||
534 (BIT(i) & allowed_above_seq)) {
535 ret = list_first_entry_or_null(&pin_list->unflushed[i],
536 struct journal_entry_pin, list);
537 if (ret)
538 return ret;
539 }
540 }
541
542 return NULL;
543 }
544
545 /* returns true if we did work */
journal_flush_pins(struct journal * j,u64 seq_to_flush,unsigned allowed_below_seq,unsigned allowed_above_seq,unsigned min_any,unsigned min_key_cache)546 static size_t journal_flush_pins(struct journal *j,
547 u64 seq_to_flush,
548 unsigned allowed_below_seq,
549 unsigned allowed_above_seq,
550 unsigned min_any,
551 unsigned min_key_cache)
552 {
553 struct journal_entry_pin *pin;
554 size_t nr_flushed = 0;
555 journal_pin_flush_fn flush_fn;
556 u64 seq;
557 int err;
558
559 lockdep_assert_held(&j->reclaim_lock);
560
561 while (1) {
562 unsigned allowed_above = allowed_above_seq;
563 unsigned allowed_below = allowed_below_seq;
564
565 if (min_any) {
566 allowed_above |= ~0;
567 allowed_below |= ~0;
568 }
569
570 if (min_key_cache) {
571 allowed_above |= BIT(JOURNAL_PIN_TYPE_key_cache);
572 allowed_below |= BIT(JOURNAL_PIN_TYPE_key_cache);
573 }
574
575 cond_resched();
576
577 j->last_flushed = jiffies;
578
579 spin_lock(&j->lock);
580 pin = journal_get_next_pin(j, seq_to_flush,
581 allowed_below,
582 allowed_above, &seq);
583 if (pin) {
584 BUG_ON(j->flush_in_progress);
585 j->flush_in_progress = pin;
586 j->flush_in_progress_dropped = false;
587 flush_fn = pin->flush;
588 }
589 spin_unlock(&j->lock);
590
591 if (!pin)
592 break;
593
594 if (min_key_cache && pin->flush == bch2_btree_key_cache_journal_flush)
595 min_key_cache--;
596
597 if (min_any)
598 min_any--;
599
600 err = flush_fn(j, pin, seq);
601
602 spin_lock(&j->lock);
603 /* Pin might have been dropped or rearmed: */
604 if (likely(!err && !j->flush_in_progress_dropped))
605 list_move(&pin->list, &journal_seq_pin(j, seq)->flushed[journal_pin_type(pin, flush_fn)]);
606 j->flush_in_progress = NULL;
607 j->flush_in_progress_dropped = false;
608 spin_unlock(&j->lock);
609
610 wake_up(&j->pin_flush_wait);
611
612 if (err)
613 break;
614
615 nr_flushed++;
616 }
617
618 return nr_flushed;
619 }
620
journal_seq_to_flush(struct journal * j)621 static u64 journal_seq_to_flush(struct journal *j)
622 {
623 struct bch_fs *c = container_of(j, struct bch_fs, journal);
624 u64 seq_to_flush = 0;
625
626 spin_lock(&j->lock);
627
628 for_each_rw_member(c, ca) {
629 struct journal_device *ja = &ca->journal;
630 unsigned nr_buckets, bucket_to_flush;
631
632 if (!ja->nr)
633 continue;
634
635 /* Try to keep the journal at most half full: */
636 nr_buckets = ja->nr / 2;
637
638 nr_buckets = min(nr_buckets, ja->nr);
639
640 bucket_to_flush = (ja->cur_idx + nr_buckets) % ja->nr;
641 seq_to_flush = max(seq_to_flush,
642 ja->bucket_seq[bucket_to_flush]);
643 }
644
645 /* Also flush if the pin fifo is more than half full */
646 seq_to_flush = max_t(s64, seq_to_flush,
647 (s64) journal_cur_seq(j) -
648 (j->pin.size >> 1));
649 spin_unlock(&j->lock);
650
651 return seq_to_flush;
652 }
653
654 /**
655 * __bch2_journal_reclaim - free up journal buckets
656 * @j: journal object
657 * @direct: direct or background reclaim?
658 * @kicked: requested to run since we last ran?
659 *
660 * Background journal reclaim writes out btree nodes. It should be run
661 * early enough so that we never completely run out of journal buckets.
662 *
663 * High watermarks for triggering background reclaim:
664 * - FIFO has fewer than 512 entries left
665 * - fewer than 25% journal buckets free
666 *
667 * Background reclaim runs until low watermarks are reached:
668 * - FIFO has more than 1024 entries left
669 * - more than 50% journal buckets free
670 *
671 * As long as a reclaim can complete in the time it takes to fill up
672 * 512 journal entries or 25% of all journal buckets, then
673 * journal_next_bucket() should not stall.
674 */
__bch2_journal_reclaim(struct journal * j,bool direct,bool kicked)675 static int __bch2_journal_reclaim(struct journal *j, bool direct, bool kicked)
676 {
677 struct bch_fs *c = container_of(j, struct bch_fs, journal);
678 struct btree_cache *bc = &c->btree_cache;
679 bool kthread = (current->flags & PF_KTHREAD) != 0;
680 u64 seq_to_flush;
681 size_t min_nr, min_key_cache, nr_flushed;
682 unsigned flags;
683 int ret = 0;
684
685 /*
686 * We can't invoke memory reclaim while holding the reclaim_lock -
687 * journal reclaim is required to make progress for memory reclaim
688 * (cleaning the caches), so we can't get stuck in memory reclaim while
689 * we're holding the reclaim lock:
690 */
691 lockdep_assert_held(&j->reclaim_lock);
692 flags = memalloc_noreclaim_save();
693
694 do {
695 if (kthread && kthread_should_stop())
696 break;
697
698 ret = bch2_journal_error(j);
699 if (ret)
700 break;
701
702 bch2_journal_do_discards(j);
703
704 seq_to_flush = journal_seq_to_flush(j);
705 min_nr = 0;
706
707 /*
708 * If it's been longer than j->reclaim_delay_ms since we last flushed,
709 * make sure to flush at least one journal pin:
710 */
711 if (time_after(jiffies, j->last_flushed +
712 msecs_to_jiffies(c->opts.journal_reclaim_delay)))
713 min_nr = 1;
714
715 if (j->watermark != BCH_WATERMARK_stripe)
716 min_nr = 1;
717
718 size_t btree_cache_live = bc->live[0].nr + bc->live[1].nr;
719 if (atomic_long_read(&bc->nr_dirty) * 2 > btree_cache_live)
720 min_nr = 1;
721
722 min_key_cache = min(bch2_nr_btree_keys_need_flush(c), (size_t) 128);
723
724 trace_and_count(c, journal_reclaim_start, c,
725 direct, kicked,
726 min_nr, min_key_cache,
727 atomic_long_read(&bc->nr_dirty), btree_cache_live,
728 atomic_long_read(&c->btree_key_cache.nr_dirty),
729 atomic_long_read(&c->btree_key_cache.nr_keys));
730
731 nr_flushed = journal_flush_pins(j, seq_to_flush,
732 ~0, 0,
733 min_nr, min_key_cache);
734
735 if (direct)
736 j->nr_direct_reclaim += nr_flushed;
737 else
738 j->nr_background_reclaim += nr_flushed;
739 trace_and_count(c, journal_reclaim_finish, c, nr_flushed);
740
741 if (nr_flushed)
742 wake_up(&j->reclaim_wait);
743 } while ((min_nr || min_key_cache) && nr_flushed && !direct);
744
745 memalloc_noreclaim_restore(flags);
746
747 return ret;
748 }
749
bch2_journal_reclaim(struct journal * j)750 int bch2_journal_reclaim(struct journal *j)
751 {
752 return __bch2_journal_reclaim(j, true, true);
753 }
754
bch2_journal_reclaim_thread(void * arg)755 static int bch2_journal_reclaim_thread(void *arg)
756 {
757 struct journal *j = arg;
758 struct bch_fs *c = container_of(j, struct bch_fs, journal);
759 unsigned long delay, now;
760 bool journal_empty;
761 int ret = 0;
762
763 set_freezable();
764
765 j->last_flushed = jiffies;
766
767 while (!ret && !kthread_should_stop()) {
768 bool kicked = j->reclaim_kicked;
769
770 j->reclaim_kicked = false;
771
772 mutex_lock(&j->reclaim_lock);
773 ret = __bch2_journal_reclaim(j, false, kicked);
774 mutex_unlock(&j->reclaim_lock);
775
776 now = jiffies;
777 delay = msecs_to_jiffies(c->opts.journal_reclaim_delay);
778 j->next_reclaim = j->last_flushed + delay;
779
780 if (!time_in_range(j->next_reclaim, now, now + delay))
781 j->next_reclaim = now + delay;
782
783 while (1) {
784 set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
785 if (kthread_should_stop())
786 break;
787 if (j->reclaim_kicked)
788 break;
789
790 spin_lock(&j->lock);
791 journal_empty = fifo_empty(&j->pin);
792 spin_unlock(&j->lock);
793
794 long timeout = j->next_reclaim - jiffies;
795
796 if (journal_empty)
797 schedule();
798 else if (timeout > 0)
799 schedule_timeout(timeout);
800 else
801 break;
802 }
803 __set_current_state(TASK_RUNNING);
804 }
805
806 return 0;
807 }
808
bch2_journal_reclaim_stop(struct journal * j)809 void bch2_journal_reclaim_stop(struct journal *j)
810 {
811 struct task_struct *p = j->reclaim_thread;
812
813 j->reclaim_thread = NULL;
814
815 if (p) {
816 kthread_stop(p);
817 put_task_struct(p);
818 }
819 }
820
bch2_journal_reclaim_start(struct journal * j)821 int bch2_journal_reclaim_start(struct journal *j)
822 {
823 struct bch_fs *c = container_of(j, struct bch_fs, journal);
824 struct task_struct *p;
825 int ret;
826
827 if (j->reclaim_thread)
828 return 0;
829
830 p = kthread_create(bch2_journal_reclaim_thread, j,
831 "bch-reclaim/%s", c->name);
832 ret = PTR_ERR_OR_ZERO(p);
833 bch_err_msg(c, ret, "creating journal reclaim thread");
834 if (ret)
835 return ret;
836
837 get_task_struct(p);
838 j->reclaim_thread = p;
839 wake_up_process(p);
840 return 0;
841 }
842
journal_pins_still_flushing(struct journal * j,u64 seq_to_flush,unsigned types)843 static bool journal_pins_still_flushing(struct journal *j, u64 seq_to_flush,
844 unsigned types)
845 {
846 struct journal_entry_pin_list *pin_list;
847 u64 seq;
848
849 spin_lock(&j->lock);
850 fifo_for_each_entry_ptr(pin_list, &j->pin, seq) {
851 if (seq > seq_to_flush)
852 break;
853
854 for (unsigned i = 0; i < JOURNAL_PIN_TYPE_NR; i++)
855 if ((BIT(i) & types) &&
856 (!list_empty(&pin_list->unflushed[i]) ||
857 !list_empty(&pin_list->flushed[i]))) {
858 spin_unlock(&j->lock);
859 return true;
860 }
861 }
862 spin_unlock(&j->lock);
863
864 return false;
865 }
866
journal_flush_pins_or_still_flushing(struct journal * j,u64 seq_to_flush,unsigned types)867 static bool journal_flush_pins_or_still_flushing(struct journal *j, u64 seq_to_flush,
868 unsigned types)
869 {
870 return journal_flush_pins(j, seq_to_flush, types, 0, 0, 0) ||
871 journal_pins_still_flushing(j, seq_to_flush, types);
872 }
873
journal_flush_done(struct journal * j,u64 seq_to_flush,bool * did_work)874 static int journal_flush_done(struct journal *j, u64 seq_to_flush,
875 bool *did_work)
876 {
877 int ret = 0;
878
879 ret = bch2_journal_error(j);
880 if (ret)
881 return ret;
882
883 mutex_lock(&j->reclaim_lock);
884
885 for (int type = JOURNAL_PIN_TYPE_NR - 1;
886 type >= 0;
887 --type)
888 if (journal_flush_pins_or_still_flushing(j, seq_to_flush, BIT(type))) {
889 *did_work = true;
890 goto unlock;
891 }
892
893 if (seq_to_flush > journal_cur_seq(j))
894 bch2_journal_entry_close(j);
895
896 spin_lock(&j->lock);
897 /*
898 * If journal replay hasn't completed, the unreplayed journal entries
899 * hold refs on their corresponding sequence numbers
900 */
901 ret = !test_bit(JOURNAL_replay_done, &j->flags) ||
902 journal_last_seq(j) > seq_to_flush ||
903 !fifo_used(&j->pin);
904
905 spin_unlock(&j->lock);
906 unlock:
907 mutex_unlock(&j->reclaim_lock);
908
909 return ret;
910 }
911
bch2_journal_flush_pins(struct journal * j,u64 seq_to_flush)912 bool bch2_journal_flush_pins(struct journal *j, u64 seq_to_flush)
913 {
914 /* time_stats this */
915 bool did_work = false;
916
917 if (!test_bit(JOURNAL_running, &j->flags))
918 return false;
919
920 closure_wait_event(&j->reclaim_flush_wait,
921 journal_flush_done(j, seq_to_flush, &did_work));
922
923 return did_work;
924 }
925
bch2_journal_flush_device_pins(struct journal * j,int dev_idx)926 int bch2_journal_flush_device_pins(struct journal *j, int dev_idx)
927 {
928 struct bch_fs *c = container_of(j, struct bch_fs, journal);
929 struct journal_entry_pin_list *p;
930 u64 iter, seq = 0;
931 int ret = 0;
932
933 spin_lock(&j->lock);
934 fifo_for_each_entry_ptr(p, &j->pin, iter)
935 if (dev_idx >= 0
936 ? bch2_dev_list_has_dev(p->devs, dev_idx)
937 : p->devs.nr < c->opts.metadata_replicas)
938 seq = iter;
939 spin_unlock(&j->lock);
940
941 bch2_journal_flush_pins(j, seq);
942
943 ret = bch2_journal_error(j);
944 if (ret)
945 return ret;
946
947 mutex_lock(&c->replicas_gc_lock);
948 bch2_replicas_gc_start(c, 1 << BCH_DATA_journal);
949
950 /*
951 * Now that we've populated replicas_gc, write to the journal to mark
952 * active journal devices. This handles the case where the journal might
953 * be empty. Otherwise we could clear all journal replicas and
954 * temporarily put the fs into an unrecoverable state. Journal recovery
955 * expects to find devices marked for journal data on unclean mount.
956 */
957 ret = bch2_journal_meta(&c->journal);
958 if (ret)
959 goto err;
960
961 seq = 0;
962 spin_lock(&j->lock);
963 while (!ret) {
964 struct bch_replicas_padded replicas;
965
966 seq = max(seq, journal_last_seq(j));
967 if (seq >= j->pin.back)
968 break;
969 bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal,
970 journal_seq_pin(j, seq)->devs);
971 seq++;
972
973 if (replicas.e.nr_devs) {
974 spin_unlock(&j->lock);
975 ret = bch2_mark_replicas(c, &replicas.e);
976 spin_lock(&j->lock);
977 }
978 }
979 spin_unlock(&j->lock);
980 err:
981 ret = bch2_replicas_gc_end(c, ret);
982 mutex_unlock(&c->replicas_gc_lock);
983
984 return ret;
985 }
986
bch2_journal_seq_pins_to_text(struct printbuf * out,struct journal * j,u64 * seq)987 bool bch2_journal_seq_pins_to_text(struct printbuf *out, struct journal *j, u64 *seq)
988 {
989 struct journal_entry_pin_list *pin_list;
990 struct journal_entry_pin *pin;
991
992 spin_lock(&j->lock);
993 if (!test_bit(JOURNAL_running, &j->flags)) {
994 spin_unlock(&j->lock);
995 return true;
996 }
997
998 *seq = max(*seq, j->pin.front);
999
1000 if (*seq >= j->pin.back) {
1001 spin_unlock(&j->lock);
1002 return true;
1003 }
1004
1005 out->atomic++;
1006
1007 pin_list = journal_seq_pin(j, *seq);
1008
1009 prt_printf(out, "%llu: count %u\n", *seq, atomic_read(&pin_list->count));
1010 printbuf_indent_add(out, 2);
1011
1012 prt_printf(out, "unflushed:\n");
1013 for (unsigned i = 0; i < ARRAY_SIZE(pin_list->unflushed); i++)
1014 list_for_each_entry(pin, &pin_list->unflushed[i], list)
1015 prt_printf(out, "\t%px %ps\n", pin, pin->flush);
1016
1017 prt_printf(out, "flushed:\n");
1018 for (unsigned i = 0; i < ARRAY_SIZE(pin_list->flushed); i++)
1019 list_for_each_entry(pin, &pin_list->flushed[i], list)
1020 prt_printf(out, "\t%px %ps\n", pin, pin->flush);
1021
1022 printbuf_indent_sub(out, 2);
1023
1024 --out->atomic;
1025 spin_unlock(&j->lock);
1026
1027 return false;
1028 }
1029
bch2_journal_pins_to_text(struct printbuf * out,struct journal * j)1030 void bch2_journal_pins_to_text(struct printbuf *out, struct journal *j)
1031 {
1032 u64 seq = 0;
1033
1034 while (!bch2_journal_seq_pins_to_text(out, j, &seq))
1035 seq++;
1036 }
1037