1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * bcachefs journalling code, for btree insertions
4 *
5 * Copyright 2012 Google, Inc.
6 */
7
8 #include "bcachefs.h"
9 #include "alloc_foreground.h"
10 #include "bkey_methods.h"
11 #include "btree_gc.h"
12 #include "btree_update.h"
13 #include "btree_write_buffer.h"
14 #include "buckets.h"
15 #include "error.h"
16 #include "journal.h"
17 #include "journal_io.h"
18 #include "journal_reclaim.h"
19 #include "journal_sb.h"
20 #include "journal_seq_blacklist.h"
21 #include "trace.h"
22
journal_seq_unwritten(struct journal * j,u64 seq)23 static inline bool journal_seq_unwritten(struct journal *j, u64 seq)
24 {
25 return seq > j->seq_ondisk;
26 }
27
__journal_entry_is_open(union journal_res_state state)28 static bool __journal_entry_is_open(union journal_res_state state)
29 {
30 return state.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL;
31 }
32
nr_unwritten_journal_entries(struct journal * j)33 static inline unsigned nr_unwritten_journal_entries(struct journal *j)
34 {
35 return atomic64_read(&j->seq) - j->seq_ondisk;
36 }
37
journal_entry_is_open(struct journal * j)38 static bool journal_entry_is_open(struct journal *j)
39 {
40 return __journal_entry_is_open(j->reservations);
41 }
42
bch2_journal_buf_to_text(struct printbuf * out,struct journal * j,u64 seq)43 static void bch2_journal_buf_to_text(struct printbuf *out, struct journal *j, u64 seq)
44 {
45 union journal_res_state s = READ_ONCE(j->reservations);
46 unsigned i = seq & JOURNAL_BUF_MASK;
47 struct journal_buf *buf = j->buf + i;
48
49 prt_printf(out, "seq:\t%llu\n", seq);
50 printbuf_indent_add(out, 2);
51
52 if (!buf->write_started)
53 prt_printf(out, "refcount:\t%u\n", journal_state_count(s, i & JOURNAL_STATE_BUF_MASK));
54
55 struct closure *cl = &buf->io;
56 int r = atomic_read(&cl->remaining);
57 prt_printf(out, "io:\t%pS r %i\n", cl->fn, r & CLOSURE_REMAINING_MASK);
58
59 if (buf->data) {
60 prt_printf(out, "size:\t");
61 prt_human_readable_u64(out, vstruct_bytes(buf->data));
62 prt_newline(out);
63 }
64
65 prt_printf(out, "expires:\t%li jiffies\n", buf->expires - jiffies);
66
67 prt_printf(out, "flags:\t");
68 if (buf->noflush)
69 prt_str(out, "noflush ");
70 if (buf->must_flush)
71 prt_str(out, "must_flush ");
72 if (buf->separate_flush)
73 prt_str(out, "separate_flush ");
74 if (buf->need_flush_to_write_buffer)
75 prt_str(out, "need_flush_to_write_buffer ");
76 if (buf->write_started)
77 prt_str(out, "write_started ");
78 if (buf->write_allocated)
79 prt_str(out, "write_allocated ");
80 if (buf->write_done)
81 prt_str(out, "write_done");
82 prt_newline(out);
83
84 printbuf_indent_sub(out, 2);
85 }
86
bch2_journal_bufs_to_text(struct printbuf * out,struct journal * j)87 static void bch2_journal_bufs_to_text(struct printbuf *out, struct journal *j)
88 {
89 lockdep_assert_held(&j->lock);
90 out->atomic++;
91
92 if (!out->nr_tabstops)
93 printbuf_tabstop_push(out, 24);
94
95 for (u64 seq = journal_last_unwritten_seq(j);
96 seq <= journal_cur_seq(j);
97 seq++)
98 bch2_journal_buf_to_text(out, j, seq);
99 prt_printf(out, "last buf %s\n", journal_entry_is_open(j) ? "open" : "closed");
100
101 --out->atomic;
102 }
103
104 static inline struct journal_buf *
journal_seq_to_buf(struct journal * j,u64 seq)105 journal_seq_to_buf(struct journal *j, u64 seq)
106 {
107 struct journal_buf *buf = NULL;
108
109 EBUG_ON(seq > journal_cur_seq(j));
110
111 if (journal_seq_unwritten(j, seq))
112 buf = j->buf + (seq & JOURNAL_BUF_MASK);
113 return buf;
114 }
115
journal_pin_list_init(struct journal_entry_pin_list * p,int count)116 static void journal_pin_list_init(struct journal_entry_pin_list *p, int count)
117 {
118 for (unsigned i = 0; i < ARRAY_SIZE(p->unflushed); i++)
119 INIT_LIST_HEAD(&p->unflushed[i]);
120 for (unsigned i = 0; i < ARRAY_SIZE(p->flushed); i++)
121 INIT_LIST_HEAD(&p->flushed[i]);
122 atomic_set(&p->count, count);
123 p->devs.nr = 0;
124 }
125
126 /*
127 * Detect stuck journal conditions and trigger shutdown. Technically the journal
128 * can end up stuck for a variety of reasons, such as a blocked I/O, journal
129 * reservation lockup, etc. Since this is a fatal error with potentially
130 * unpredictable characteristics, we want to be fairly conservative before we
131 * decide to shut things down.
132 *
133 * Consider the journal stuck when it appears full with no ability to commit
134 * btree transactions, to discard journal buckets, nor acquire priority
135 * (reserved watermark) reservation.
136 */
137 static inline bool
journal_error_check_stuck(struct journal * j,int error,unsigned flags)138 journal_error_check_stuck(struct journal *j, int error, unsigned flags)
139 {
140 struct bch_fs *c = container_of(j, struct bch_fs, journal);
141 bool stuck = false;
142 struct printbuf buf = PRINTBUF;
143
144 buf.atomic++;
145
146 if (!(error == -BCH_ERR_journal_full ||
147 error == -BCH_ERR_journal_pin_full) ||
148 nr_unwritten_journal_entries(j) ||
149 (flags & BCH_WATERMARK_MASK) != BCH_WATERMARK_reclaim)
150 return stuck;
151
152 spin_lock(&j->lock);
153
154 if (j->can_discard) {
155 spin_unlock(&j->lock);
156 return stuck;
157 }
158
159 stuck = true;
160
161 /*
162 * The journal shutdown path will set ->err_seq, but do it here first to
163 * serialize against concurrent failures and avoid duplicate error
164 * reports.
165 */
166 if (j->err_seq) {
167 spin_unlock(&j->lock);
168 return stuck;
169 }
170 j->err_seq = journal_cur_seq(j);
171
172 __bch2_journal_debug_to_text(&buf, j);
173 spin_unlock(&j->lock);
174 prt_printf(&buf, bch2_fmt(c, "Journal stuck! Hava a pre-reservation but journal full (error %s)"),
175 bch2_err_str(error));
176 bch2_print_string_as_lines(KERN_ERR, buf.buf);
177
178 printbuf_reset(&buf);
179 bch2_journal_pins_to_text(&buf, j);
180 bch_err(c, "Journal pins:\n%s", buf.buf);
181 printbuf_exit(&buf);
182
183 bch2_fatal_error(c);
184 dump_stack();
185
186 return stuck;
187 }
188
bch2_journal_do_writes(struct journal * j)189 void bch2_journal_do_writes(struct journal *j)
190 {
191 for (u64 seq = journal_last_unwritten_seq(j);
192 seq <= journal_cur_seq(j);
193 seq++) {
194 unsigned idx = seq & JOURNAL_BUF_MASK;
195 struct journal_buf *w = j->buf + idx;
196
197 if (w->write_started && !w->write_allocated)
198 break;
199 if (w->write_started)
200 continue;
201
202 if (!journal_state_seq_count(j, j->reservations, seq)) {
203 j->seq_write_started = seq;
204 w->write_started = true;
205 closure_call(&w->io, bch2_journal_write, j->wq, NULL);
206 }
207
208 break;
209 }
210 }
211
212 /*
213 * Final processing when the last reference of a journal buffer has been
214 * dropped. Drop the pin list reference acquired at journal entry open and write
215 * the buffer, if requested.
216 */
bch2_journal_buf_put_final(struct journal * j,u64 seq)217 void bch2_journal_buf_put_final(struct journal *j, u64 seq)
218 {
219 lockdep_assert_held(&j->lock);
220
221 if (__bch2_journal_pin_put(j, seq))
222 bch2_journal_reclaim_fast(j);
223 bch2_journal_do_writes(j);
224
225 /*
226 * for __bch2_next_write_buffer_flush_journal_buf(), when quiescing an
227 * open journal entry
228 */
229 wake_up(&j->wait);
230 }
231
232 /*
233 * Returns true if journal entry is now closed:
234 *
235 * We don't close a journal_buf until the next journal_buf is finished writing,
236 * and can be opened again - this also initializes the next journal_buf:
237 */
__journal_entry_close(struct journal * j,unsigned closed_val,bool trace)238 static void __journal_entry_close(struct journal *j, unsigned closed_val, bool trace)
239 {
240 struct bch_fs *c = container_of(j, struct bch_fs, journal);
241 struct journal_buf *buf = journal_cur_buf(j);
242 union journal_res_state old, new;
243 unsigned sectors;
244
245 BUG_ON(closed_val != JOURNAL_ENTRY_CLOSED_VAL &&
246 closed_val != JOURNAL_ENTRY_ERROR_VAL);
247
248 lockdep_assert_held(&j->lock);
249
250 old.v = atomic64_read(&j->reservations.counter);
251 do {
252 new.v = old.v;
253 new.cur_entry_offset = closed_val;
254
255 if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL ||
256 old.cur_entry_offset == new.cur_entry_offset)
257 return;
258 } while (!atomic64_try_cmpxchg(&j->reservations.counter,
259 &old.v, new.v));
260
261 if (!__journal_entry_is_open(old))
262 return;
263
264 if (old.cur_entry_offset == JOURNAL_ENTRY_BLOCKED_VAL)
265 old.cur_entry_offset = j->cur_entry_offset_if_blocked;
266
267 /* Close out old buffer: */
268 buf->data->u64s = cpu_to_le32(old.cur_entry_offset);
269
270 if (trace_journal_entry_close_enabled() && trace) {
271 struct printbuf pbuf = PRINTBUF;
272 pbuf.atomic++;
273
274 prt_str(&pbuf, "entry size: ");
275 prt_human_readable_u64(&pbuf, vstruct_bytes(buf->data));
276 prt_newline(&pbuf);
277 bch2_prt_task_backtrace(&pbuf, current, 1, GFP_NOWAIT);
278 trace_journal_entry_close(c, pbuf.buf);
279 printbuf_exit(&pbuf);
280 }
281
282 sectors = vstruct_blocks_plus(buf->data, c->block_bits,
283 buf->u64s_reserved) << c->block_bits;
284 if (unlikely(sectors > buf->sectors)) {
285 struct printbuf err = PRINTBUF;
286 err.atomic++;
287
288 prt_printf(&err, "journal entry overran reserved space: %u > %u\n",
289 sectors, buf->sectors);
290 prt_printf(&err, "buf u64s %u u64s reserved %u cur_entry_u64s %u block_bits %u\n",
291 le32_to_cpu(buf->data->u64s), buf->u64s_reserved,
292 j->cur_entry_u64s,
293 c->block_bits);
294 prt_printf(&err, "fatal error - emergency read only");
295 bch2_journal_halt_locked(j);
296
297 bch_err(c, "%s", err.buf);
298 printbuf_exit(&err);
299 return;
300 }
301
302 buf->sectors = sectors;
303
304 /*
305 * We have to set last_seq here, _before_ opening a new journal entry:
306 *
307 * A threads may replace an old pin with a new pin on their current
308 * journal reservation - the expectation being that the journal will
309 * contain either what the old pin protected or what the new pin
310 * protects.
311 *
312 * After the old pin is dropped journal_last_seq() won't include the old
313 * pin, so we can only write the updated last_seq on the entry that
314 * contains whatever the new pin protects.
315 *
316 * Restated, we can _not_ update last_seq for a given entry if there
317 * could be a newer entry open with reservations/pins that have been
318 * taken against it.
319 *
320 * Hence, we want update/set last_seq on the current journal entry right
321 * before we open a new one:
322 */
323 buf->last_seq = journal_last_seq(j);
324 buf->data->last_seq = cpu_to_le64(buf->last_seq);
325 BUG_ON(buf->last_seq > le64_to_cpu(buf->data->seq));
326
327 cancel_delayed_work(&j->write_work);
328
329 bch2_journal_space_available(j);
330
331 __bch2_journal_buf_put(j, le64_to_cpu(buf->data->seq));
332 }
333
bch2_journal_halt(struct journal * j)334 void bch2_journal_halt(struct journal *j)
335 {
336 spin_lock(&j->lock);
337 __journal_entry_close(j, JOURNAL_ENTRY_ERROR_VAL, true);
338 if (!j->err_seq)
339 j->err_seq = journal_cur_seq(j);
340 journal_wake(j);
341 spin_unlock(&j->lock);
342 }
343
bch2_journal_halt_locked(struct journal * j)344 void bch2_journal_halt_locked(struct journal *j)
345 {
346 lockdep_assert_held(&j->lock);
347
348 __journal_entry_close(j, JOURNAL_ENTRY_ERROR_VAL, true);
349 if (!j->err_seq)
350 j->err_seq = journal_cur_seq(j);
351 journal_wake(j);
352 }
353
journal_entry_want_write(struct journal * j)354 static bool journal_entry_want_write(struct journal *j)
355 {
356 bool ret = !journal_entry_is_open(j) ||
357 journal_cur_seq(j) == journal_last_unwritten_seq(j);
358
359 /* Don't close it yet if we already have a write in flight: */
360 if (ret)
361 __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true);
362 else if (nr_unwritten_journal_entries(j)) {
363 struct journal_buf *buf = journal_cur_buf(j);
364
365 if (!buf->flush_time) {
366 buf->flush_time = local_clock() ?: 1;
367 buf->expires = jiffies;
368 }
369 }
370
371 return ret;
372 }
373
bch2_journal_entry_close(struct journal * j)374 bool bch2_journal_entry_close(struct journal *j)
375 {
376 bool ret;
377
378 spin_lock(&j->lock);
379 ret = journal_entry_want_write(j);
380 spin_unlock(&j->lock);
381
382 return ret;
383 }
384
385 /*
386 * should _only_ called from journal_res_get() - when we actually want a
387 * journal reservation - journal entry is open means journal is dirty:
388 */
journal_entry_open(struct journal * j)389 static int journal_entry_open(struct journal *j)
390 {
391 struct bch_fs *c = container_of(j, struct bch_fs, journal);
392 struct journal_buf *buf = j->buf +
393 ((journal_cur_seq(j) + 1) & JOURNAL_BUF_MASK);
394 union journal_res_state old, new;
395 int u64s;
396
397 lockdep_assert_held(&j->lock);
398 BUG_ON(journal_entry_is_open(j));
399 BUG_ON(BCH_SB_CLEAN(c->disk_sb.sb));
400
401 if (j->blocked)
402 return -BCH_ERR_journal_blocked;
403
404 if (j->cur_entry_error)
405 return j->cur_entry_error;
406
407 int ret = bch2_journal_error(j);
408 if (unlikely(ret))
409 return ret;
410
411 if (!fifo_free(&j->pin))
412 return -BCH_ERR_journal_pin_full;
413
414 if (nr_unwritten_journal_entries(j) == ARRAY_SIZE(j->buf))
415 return -BCH_ERR_journal_max_in_flight;
416
417 if (atomic64_read(&j->seq) - j->seq_write_started == JOURNAL_STATE_BUF_NR)
418 return -BCH_ERR_journal_max_open;
419
420 if (journal_cur_seq(j) >= JOURNAL_SEQ_MAX) {
421 bch_err(c, "cannot start: journal seq overflow");
422 if (bch2_fs_emergency_read_only_locked(c))
423 bch_err(c, "fatal error - emergency read only");
424 return -BCH_ERR_journal_shutdown;
425 }
426
427 if (!j->free_buf && !buf->data)
428 return -BCH_ERR_journal_buf_enomem; /* will retry after write completion frees up a buf */
429
430 BUG_ON(!j->cur_entry_sectors);
431
432 if (!buf->data) {
433 swap(buf->data, j->free_buf);
434 swap(buf->buf_size, j->free_buf_size);
435 }
436
437 buf->expires =
438 (journal_cur_seq(j) == j->flushed_seq_ondisk
439 ? jiffies
440 : j->last_flush_write) +
441 msecs_to_jiffies(c->opts.journal_flush_delay);
442
443 buf->u64s_reserved = j->entry_u64s_reserved;
444 buf->disk_sectors = j->cur_entry_sectors;
445 buf->sectors = min(buf->disk_sectors, buf->buf_size >> 9);
446
447 u64s = (int) (buf->sectors << 9) / sizeof(u64) -
448 journal_entry_overhead(j);
449 u64s = clamp_t(int, u64s, 0, JOURNAL_ENTRY_CLOSED_VAL - 1);
450
451 if (u64s <= (ssize_t) j->early_journal_entries.nr)
452 return -BCH_ERR_journal_full;
453
454 if (fifo_empty(&j->pin) && j->reclaim_thread)
455 wake_up_process(j->reclaim_thread);
456
457 /*
458 * The fifo_push() needs to happen at the same time as j->seq is
459 * incremented for journal_last_seq() to be calculated correctly
460 */
461 atomic64_inc(&j->seq);
462 journal_pin_list_init(fifo_push_ref(&j->pin), 1);
463
464 BUG_ON(j->pin.back - 1 != atomic64_read(&j->seq));
465
466 BUG_ON(j->buf + (journal_cur_seq(j) & JOURNAL_BUF_MASK) != buf);
467
468 bkey_extent_init(&buf->key);
469 buf->noflush = false;
470 buf->must_flush = false;
471 buf->separate_flush = false;
472 buf->flush_time = 0;
473 buf->need_flush_to_write_buffer = true;
474 buf->write_started = false;
475 buf->write_allocated = false;
476 buf->write_done = false;
477
478 memset(buf->data, 0, sizeof(*buf->data));
479 buf->data->seq = cpu_to_le64(journal_cur_seq(j));
480 buf->data->u64s = 0;
481
482 if (j->early_journal_entries.nr) {
483 memcpy(buf->data->_data, j->early_journal_entries.data,
484 j->early_journal_entries.nr * sizeof(u64));
485 le32_add_cpu(&buf->data->u64s, j->early_journal_entries.nr);
486 }
487
488 /*
489 * Must be set before marking the journal entry as open:
490 */
491 j->cur_entry_u64s = u64s;
492
493 old.v = atomic64_read(&j->reservations.counter);
494 do {
495 new.v = old.v;
496
497 BUG_ON(old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL);
498
499 new.idx++;
500 BUG_ON(journal_state_count(new, new.idx));
501 BUG_ON(new.idx != (journal_cur_seq(j) & JOURNAL_STATE_BUF_MASK));
502
503 journal_state_inc(&new);
504
505 /* Handle any already added entries */
506 new.cur_entry_offset = le32_to_cpu(buf->data->u64s);
507 } while (!atomic64_try_cmpxchg(&j->reservations.counter,
508 &old.v, new.v));
509
510 if (nr_unwritten_journal_entries(j) == 1)
511 mod_delayed_work(j->wq,
512 &j->write_work,
513 msecs_to_jiffies(c->opts.journal_flush_delay));
514 journal_wake(j);
515
516 if (j->early_journal_entries.nr)
517 darray_exit(&j->early_journal_entries);
518 return 0;
519 }
520
journal_quiesced(struct journal * j)521 static bool journal_quiesced(struct journal *j)
522 {
523 bool ret = atomic64_read(&j->seq) == j->seq_ondisk;
524
525 if (!ret)
526 bch2_journal_entry_close(j);
527 return ret;
528 }
529
journal_quiesce(struct journal * j)530 static void journal_quiesce(struct journal *j)
531 {
532 wait_event(j->wait, journal_quiesced(j));
533 }
534
journal_write_work(struct work_struct * work)535 static void journal_write_work(struct work_struct *work)
536 {
537 struct journal *j = container_of(work, struct journal, write_work.work);
538
539 spin_lock(&j->lock);
540 if (__journal_entry_is_open(j->reservations)) {
541 long delta = journal_cur_buf(j)->expires - jiffies;
542
543 if (delta > 0)
544 mod_delayed_work(j->wq, &j->write_work, delta);
545 else
546 __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true);
547 }
548 spin_unlock(&j->lock);
549 }
550
journal_buf_prealloc(struct journal * j)551 static void journal_buf_prealloc(struct journal *j)
552 {
553 if (j->free_buf &&
554 j->free_buf_size >= j->buf_size_want)
555 return;
556
557 unsigned buf_size = j->buf_size_want;
558
559 spin_unlock(&j->lock);
560 void *buf = kvmalloc(buf_size, GFP_NOFS);
561 spin_lock(&j->lock);
562
563 if (buf &&
564 (!j->free_buf ||
565 buf_size > j->free_buf_size)) {
566 swap(buf, j->free_buf);
567 swap(buf_size, j->free_buf_size);
568 }
569
570 if (unlikely(buf)) {
571 spin_unlock(&j->lock);
572 /* kvfree can sleep */
573 kvfree(buf);
574 spin_lock(&j->lock);
575 }
576 }
577
__journal_res_get(struct journal * j,struct journal_res * res,unsigned flags)578 static int __journal_res_get(struct journal *j, struct journal_res *res,
579 unsigned flags)
580 {
581 struct bch_fs *c = container_of(j, struct bch_fs, journal);
582 struct journal_buf *buf;
583 bool can_discard;
584 int ret;
585 retry:
586 if (journal_res_get_fast(j, res, flags))
587 return 0;
588
589 ret = bch2_journal_error(j);
590 if (unlikely(ret))
591 return ret;
592
593 if (j->blocked)
594 return -BCH_ERR_journal_blocked;
595
596 if ((flags & BCH_WATERMARK_MASK) < j->watermark) {
597 ret = -BCH_ERR_journal_full;
598 can_discard = j->can_discard;
599 goto out;
600 }
601
602 if (nr_unwritten_journal_entries(j) == ARRAY_SIZE(j->buf) && !journal_entry_is_open(j)) {
603 ret = -BCH_ERR_journal_max_in_flight;
604 goto out;
605 }
606
607 spin_lock(&j->lock);
608
609 journal_buf_prealloc(j);
610
611 /*
612 * Recheck after taking the lock, so we don't race with another thread
613 * that just did journal_entry_open() and call bch2_journal_entry_close()
614 * unnecessarily
615 */
616 if (journal_res_get_fast(j, res, flags)) {
617 ret = 0;
618 goto unlock;
619 }
620
621 /*
622 * If we couldn't get a reservation because the current buf filled up,
623 * and we had room for a bigger entry on disk, signal that we want to
624 * realloc the journal bufs:
625 */
626 buf = journal_cur_buf(j);
627 if (journal_entry_is_open(j) &&
628 buf->buf_size >> 9 < buf->disk_sectors &&
629 buf->buf_size < JOURNAL_ENTRY_SIZE_MAX)
630 j->buf_size_want = max(j->buf_size_want, buf->buf_size << 1);
631
632 __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, false);
633 ret = journal_entry_open(j) ?: -BCH_ERR_journal_retry_open;
634 unlock:
635 can_discard = j->can_discard;
636 spin_unlock(&j->lock);
637 out:
638 if (likely(!ret))
639 return 0;
640 if (ret == -BCH_ERR_journal_retry_open)
641 goto retry;
642
643 if (journal_error_check_stuck(j, ret, flags))
644 ret = -BCH_ERR_journal_stuck;
645
646 if (ret == -BCH_ERR_journal_max_in_flight &&
647 track_event_change(&c->times[BCH_TIME_blocked_journal_max_in_flight], true) &&
648 trace_journal_entry_full_enabled()) {
649 struct printbuf buf = PRINTBUF;
650
651 bch2_printbuf_make_room(&buf, 4096);
652
653 spin_lock(&j->lock);
654 prt_printf(&buf, "seq %llu\n", journal_cur_seq(j));
655 bch2_journal_bufs_to_text(&buf, j);
656 spin_unlock(&j->lock);
657
658 trace_journal_entry_full(c, buf.buf);
659 printbuf_exit(&buf);
660 count_event(c, journal_entry_full);
661 }
662
663 if (ret == -BCH_ERR_journal_max_open &&
664 track_event_change(&c->times[BCH_TIME_blocked_journal_max_open], true) &&
665 trace_journal_entry_full_enabled()) {
666 struct printbuf buf = PRINTBUF;
667
668 bch2_printbuf_make_room(&buf, 4096);
669
670 spin_lock(&j->lock);
671 prt_printf(&buf, "seq %llu\n", journal_cur_seq(j));
672 bch2_journal_bufs_to_text(&buf, j);
673 spin_unlock(&j->lock);
674
675 trace_journal_entry_full(c, buf.buf);
676 printbuf_exit(&buf);
677 count_event(c, journal_entry_full);
678 }
679
680 /*
681 * Journal is full - can't rely on reclaim from work item due to
682 * freezing:
683 */
684 if ((ret == -BCH_ERR_journal_full ||
685 ret == -BCH_ERR_journal_pin_full) &&
686 !(flags & JOURNAL_RES_GET_NONBLOCK)) {
687 if (can_discard) {
688 bch2_journal_do_discards(j);
689 goto retry;
690 }
691
692 if (mutex_trylock(&j->reclaim_lock)) {
693 bch2_journal_reclaim(j);
694 mutex_unlock(&j->reclaim_lock);
695 }
696 }
697
698 return ret;
699 }
700
max_dev_latency(struct bch_fs * c)701 static unsigned max_dev_latency(struct bch_fs *c)
702 {
703 u64 nsecs = 0;
704
705 for_each_rw_member(c, ca)
706 nsecs = max(nsecs, ca->io_latency[WRITE].stats.max_duration);
707
708 return nsecs_to_jiffies(nsecs);
709 }
710
711 /*
712 * Essentially the entry function to the journaling code. When bcachefs is doing
713 * a btree insert, it calls this function to get the current journal write.
714 * Journal write is the structure used set up journal writes. The calling
715 * function will then add its keys to the structure, queuing them for the next
716 * write.
717 *
718 * To ensure forward progress, the current task must not be holding any
719 * btree node write locks.
720 */
bch2_journal_res_get_slowpath(struct journal * j,struct journal_res * res,unsigned flags,struct btree_trans * trans)721 int bch2_journal_res_get_slowpath(struct journal *j, struct journal_res *res,
722 unsigned flags,
723 struct btree_trans *trans)
724 {
725 int ret;
726
727 if (closure_wait_event_timeout(&j->async_wait,
728 !bch2_err_matches(ret = __journal_res_get(j, res, flags), BCH_ERR_operation_blocked) ||
729 (flags & JOURNAL_RES_GET_NONBLOCK),
730 HZ))
731 return ret;
732
733 if (trans)
734 bch2_trans_unlock_long(trans);
735
736 struct bch_fs *c = container_of(j, struct bch_fs, journal);
737 int remaining_wait = max(max_dev_latency(c) * 2, HZ * 10);
738
739 remaining_wait = max(0, remaining_wait - HZ);
740
741 if (closure_wait_event_timeout(&j->async_wait,
742 !bch2_err_matches(ret = __journal_res_get(j, res, flags), BCH_ERR_operation_blocked) ||
743 (flags & JOURNAL_RES_GET_NONBLOCK),
744 remaining_wait))
745 return ret;
746
747 struct printbuf buf = PRINTBUF;
748 bch2_journal_debug_to_text(&buf, j);
749 bch2_print_string_as_lines(KERN_ERR, buf.buf);
750 prt_printf(&buf, bch2_fmt(c, "Journal stuck? Waited for 10 seconds, err %s"), bch2_err_str(ret));
751 printbuf_exit(&buf);
752
753 closure_wait_event(&j->async_wait,
754 !bch2_err_matches(ret = __journal_res_get(j, res, flags), BCH_ERR_operation_blocked) ||
755 (flags & JOURNAL_RES_GET_NONBLOCK));
756 return ret;
757 }
758
759 /* journal_entry_res: */
760
bch2_journal_entry_res_resize(struct journal * j,struct journal_entry_res * res,unsigned new_u64s)761 void bch2_journal_entry_res_resize(struct journal *j,
762 struct journal_entry_res *res,
763 unsigned new_u64s)
764 {
765 union journal_res_state state;
766 int d = new_u64s - res->u64s;
767
768 spin_lock(&j->lock);
769
770 j->entry_u64s_reserved += d;
771 if (d <= 0)
772 goto out;
773
774 j->cur_entry_u64s = max_t(int, 0, j->cur_entry_u64s - d);
775 state = READ_ONCE(j->reservations);
776
777 if (state.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL &&
778 state.cur_entry_offset > j->cur_entry_u64s) {
779 j->cur_entry_u64s += d;
780 /*
781 * Not enough room in current journal entry, have to flush it:
782 */
783 __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true);
784 } else {
785 journal_cur_buf(j)->u64s_reserved += d;
786 }
787 out:
788 spin_unlock(&j->lock);
789 res->u64s += d;
790 }
791
792 /* journal flushing: */
793
794 /**
795 * bch2_journal_flush_seq_async - wait for a journal entry to be written
796 * @j: journal object
797 * @seq: seq to flush
798 * @parent: closure object to wait with
799 * Returns: 1 if @seq has already been flushed, 0 if @seq is being flushed,
800 * -BCH_ERR_journal_flush_err if @seq will never be flushed
801 *
802 * Like bch2_journal_wait_on_seq, except that it triggers a write immediately if
803 * necessary
804 */
bch2_journal_flush_seq_async(struct journal * j,u64 seq,struct closure * parent)805 int bch2_journal_flush_seq_async(struct journal *j, u64 seq,
806 struct closure *parent)
807 {
808 struct journal_buf *buf;
809 int ret = 0;
810
811 if (seq <= j->flushed_seq_ondisk)
812 return 1;
813
814 spin_lock(&j->lock);
815
816 if (WARN_ONCE(seq > journal_cur_seq(j),
817 "requested to flush journal seq %llu, but currently at %llu",
818 seq, journal_cur_seq(j)))
819 goto out;
820
821 /* Recheck under lock: */
822 if (j->err_seq && seq >= j->err_seq) {
823 ret = -BCH_ERR_journal_flush_err;
824 goto out;
825 }
826
827 if (seq <= j->flushed_seq_ondisk) {
828 ret = 1;
829 goto out;
830 }
831
832 /* if seq was written, but not flushed - flush a newer one instead */
833 seq = max(seq, journal_last_unwritten_seq(j));
834
835 recheck_need_open:
836 if (seq > journal_cur_seq(j)) {
837 struct journal_res res = { 0 };
838
839 if (journal_entry_is_open(j))
840 __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true);
841
842 spin_unlock(&j->lock);
843
844 /*
845 * We're called from bch2_journal_flush_seq() -> wait_event();
846 * but this might block. We won't usually block, so we won't
847 * livelock:
848 */
849 sched_annotate_sleep();
850 ret = bch2_journal_res_get(j, &res, jset_u64s(0), 0, NULL);
851 if (ret)
852 return ret;
853
854 seq = res.seq;
855 buf = journal_seq_to_buf(j, seq);
856 buf->must_flush = true;
857
858 if (!buf->flush_time) {
859 buf->flush_time = local_clock() ?: 1;
860 buf->expires = jiffies;
861 }
862
863 if (parent && !closure_wait(&buf->wait, parent))
864 BUG();
865
866 bch2_journal_res_put(j, &res);
867
868 spin_lock(&j->lock);
869 goto want_write;
870 }
871
872 /*
873 * if write was kicked off without a flush, or if we promised it
874 * wouldn't be a flush, flush the next sequence number instead
875 */
876 buf = journal_seq_to_buf(j, seq);
877 if (buf->noflush) {
878 seq++;
879 goto recheck_need_open;
880 }
881
882 buf->must_flush = true;
883 j->flushing_seq = max(j->flushing_seq, seq);
884
885 if (parent && !closure_wait(&buf->wait, parent))
886 BUG();
887 want_write:
888 if (seq == journal_cur_seq(j))
889 journal_entry_want_write(j);
890 out:
891 spin_unlock(&j->lock);
892 return ret;
893 }
894
bch2_journal_flush_seq(struct journal * j,u64 seq,unsigned task_state)895 int bch2_journal_flush_seq(struct journal *j, u64 seq, unsigned task_state)
896 {
897 u64 start_time = local_clock();
898 int ret, ret2;
899
900 /*
901 * Don't update time_stats when @seq is already flushed:
902 */
903 if (seq <= j->flushed_seq_ondisk)
904 return 0;
905
906 ret = wait_event_state(j->wait,
907 (ret2 = bch2_journal_flush_seq_async(j, seq, NULL)),
908 task_state);
909
910 if (!ret)
911 bch2_time_stats_update(j->flush_seq_time, start_time);
912
913 return ret ?: ret2 < 0 ? ret2 : 0;
914 }
915
916 /*
917 * bch2_journal_flush_async - if there is an open journal entry, or a journal
918 * still being written, write it and wait for the write to complete
919 */
bch2_journal_flush_async(struct journal * j,struct closure * parent)920 void bch2_journal_flush_async(struct journal *j, struct closure *parent)
921 {
922 bch2_journal_flush_seq_async(j, atomic64_read(&j->seq), parent);
923 }
924
bch2_journal_flush(struct journal * j)925 int bch2_journal_flush(struct journal *j)
926 {
927 return bch2_journal_flush_seq(j, atomic64_read(&j->seq), TASK_UNINTERRUPTIBLE);
928 }
929
930 /*
931 * bch2_journal_noflush_seq - ask the journal not to issue any flushes in the
932 * range [start, end)
933 * @seq
934 */
bch2_journal_noflush_seq(struct journal * j,u64 start,u64 end)935 bool bch2_journal_noflush_seq(struct journal *j, u64 start, u64 end)
936 {
937 struct bch_fs *c = container_of(j, struct bch_fs, journal);
938 u64 unwritten_seq;
939 bool ret = false;
940
941 if (!(c->sb.features & (1ULL << BCH_FEATURE_journal_no_flush)))
942 return false;
943
944 if (c->journal.flushed_seq_ondisk >= start)
945 return false;
946
947 spin_lock(&j->lock);
948 if (c->journal.flushed_seq_ondisk >= start)
949 goto out;
950
951 for (unwritten_seq = journal_last_unwritten_seq(j);
952 unwritten_seq < end;
953 unwritten_seq++) {
954 struct journal_buf *buf = journal_seq_to_buf(j, unwritten_seq);
955
956 /* journal flush already in flight, or flush requseted */
957 if (buf->must_flush)
958 goto out;
959
960 buf->noflush = true;
961 }
962
963 ret = true;
964 out:
965 spin_unlock(&j->lock);
966 return ret;
967 }
968
__bch2_journal_meta(struct journal * j)969 static int __bch2_journal_meta(struct journal *j)
970 {
971 struct journal_res res = {};
972 int ret = bch2_journal_res_get(j, &res, jset_u64s(0), 0, NULL);
973 if (ret)
974 return ret;
975
976 struct journal_buf *buf = j->buf + (res.seq & JOURNAL_BUF_MASK);
977 buf->must_flush = true;
978
979 if (!buf->flush_time) {
980 buf->flush_time = local_clock() ?: 1;
981 buf->expires = jiffies;
982 }
983
984 bch2_journal_res_put(j, &res);
985
986 return bch2_journal_flush_seq(j, res.seq, TASK_UNINTERRUPTIBLE);
987 }
988
bch2_journal_meta(struct journal * j)989 int bch2_journal_meta(struct journal *j)
990 {
991 struct bch_fs *c = container_of(j, struct bch_fs, journal);
992
993 if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_journal))
994 return -BCH_ERR_erofs_no_writes;
995
996 int ret = __bch2_journal_meta(j);
997 bch2_write_ref_put(c, BCH_WRITE_REF_journal);
998 return ret;
999 }
1000
1001 /* block/unlock the journal: */
1002
bch2_journal_unblock(struct journal * j)1003 void bch2_journal_unblock(struct journal *j)
1004 {
1005 spin_lock(&j->lock);
1006 if (!--j->blocked &&
1007 j->cur_entry_offset_if_blocked < JOURNAL_ENTRY_CLOSED_VAL &&
1008 j->reservations.cur_entry_offset == JOURNAL_ENTRY_BLOCKED_VAL) {
1009 union journal_res_state old, new;
1010
1011 old.v = atomic64_read(&j->reservations.counter);
1012 do {
1013 new.v = old.v;
1014 new.cur_entry_offset = j->cur_entry_offset_if_blocked;
1015 } while (!atomic64_try_cmpxchg(&j->reservations.counter, &old.v, new.v));
1016 }
1017 spin_unlock(&j->lock);
1018
1019 journal_wake(j);
1020 }
1021
__bch2_journal_block(struct journal * j)1022 static void __bch2_journal_block(struct journal *j)
1023 {
1024 if (!j->blocked++) {
1025 union journal_res_state old, new;
1026
1027 old.v = atomic64_read(&j->reservations.counter);
1028 do {
1029 j->cur_entry_offset_if_blocked = old.cur_entry_offset;
1030
1031 if (j->cur_entry_offset_if_blocked >= JOURNAL_ENTRY_CLOSED_VAL)
1032 break;
1033
1034 new.v = old.v;
1035 new.cur_entry_offset = JOURNAL_ENTRY_BLOCKED_VAL;
1036 } while (!atomic64_try_cmpxchg(&j->reservations.counter, &old.v, new.v));
1037
1038 if (old.cur_entry_offset < JOURNAL_ENTRY_BLOCKED_VAL)
1039 journal_cur_buf(j)->data->u64s = cpu_to_le32(old.cur_entry_offset);
1040 }
1041 }
1042
bch2_journal_block(struct journal * j)1043 void bch2_journal_block(struct journal *j)
1044 {
1045 spin_lock(&j->lock);
1046 __bch2_journal_block(j);
1047 spin_unlock(&j->lock);
1048
1049 journal_quiesce(j);
1050 }
1051
__bch2_next_write_buffer_flush_journal_buf(struct journal * j,u64 max_seq,bool * blocked)1052 static struct journal_buf *__bch2_next_write_buffer_flush_journal_buf(struct journal *j,
1053 u64 max_seq, bool *blocked)
1054 {
1055 struct journal_buf *ret = NULL;
1056
1057 /* We're inside wait_event(), but using mutex_lock(: */
1058 sched_annotate_sleep();
1059 mutex_lock(&j->buf_lock);
1060 spin_lock(&j->lock);
1061 max_seq = min(max_seq, journal_cur_seq(j));
1062
1063 for (u64 seq = journal_last_unwritten_seq(j);
1064 seq <= max_seq;
1065 seq++) {
1066 unsigned idx = seq & JOURNAL_BUF_MASK;
1067 struct journal_buf *buf = j->buf + idx;
1068
1069 if (buf->need_flush_to_write_buffer) {
1070 union journal_res_state s;
1071 s.v = atomic64_read_acquire(&j->reservations.counter);
1072
1073 unsigned open = seq == journal_cur_seq(j) && __journal_entry_is_open(s);
1074
1075 if (open && !*blocked) {
1076 __bch2_journal_block(j);
1077 *blocked = true;
1078 }
1079
1080 ret = journal_state_count(s, idx & JOURNAL_STATE_BUF_MASK) > open
1081 ? ERR_PTR(-EAGAIN)
1082 : buf;
1083 break;
1084 }
1085 }
1086
1087 spin_unlock(&j->lock);
1088 if (IS_ERR_OR_NULL(ret))
1089 mutex_unlock(&j->buf_lock);
1090 return ret;
1091 }
1092
bch2_next_write_buffer_flush_journal_buf(struct journal * j,u64 max_seq,bool * blocked)1093 struct journal_buf *bch2_next_write_buffer_flush_journal_buf(struct journal *j,
1094 u64 max_seq, bool *blocked)
1095 {
1096 struct journal_buf *ret;
1097 *blocked = false;
1098
1099 wait_event(j->wait, (ret = __bch2_next_write_buffer_flush_journal_buf(j,
1100 max_seq, blocked)) != ERR_PTR(-EAGAIN));
1101 if (IS_ERR_OR_NULL(ret) && *blocked)
1102 bch2_journal_unblock(j);
1103
1104 return ret;
1105 }
1106
1107 /* allocate journal on a device: */
1108
bch2_set_nr_journal_buckets_iter(struct bch_dev * ca,unsigned nr,bool new_fs,struct closure * cl)1109 static int bch2_set_nr_journal_buckets_iter(struct bch_dev *ca, unsigned nr,
1110 bool new_fs, struct closure *cl)
1111 {
1112 struct bch_fs *c = ca->fs;
1113 struct journal_device *ja = &ca->journal;
1114 u64 *new_bucket_seq = NULL, *new_buckets = NULL;
1115 struct open_bucket **ob = NULL;
1116 long *bu = NULL;
1117 unsigned i, pos, nr_got = 0, nr_want = nr - ja->nr;
1118 int ret = 0;
1119
1120 BUG_ON(nr <= ja->nr);
1121
1122 bu = kcalloc(nr_want, sizeof(*bu), GFP_KERNEL);
1123 ob = kcalloc(nr_want, sizeof(*ob), GFP_KERNEL);
1124 new_buckets = kcalloc(nr, sizeof(u64), GFP_KERNEL);
1125 new_bucket_seq = kcalloc(nr, sizeof(u64), GFP_KERNEL);
1126 if (!bu || !ob || !new_buckets || !new_bucket_seq) {
1127 ret = -BCH_ERR_ENOMEM_set_nr_journal_buckets;
1128 goto err_free;
1129 }
1130
1131 for (nr_got = 0; nr_got < nr_want; nr_got++) {
1132 enum bch_watermark watermark = new_fs
1133 ? BCH_WATERMARK_btree
1134 : BCH_WATERMARK_normal;
1135
1136 ob[nr_got] = bch2_bucket_alloc(c, ca, watermark,
1137 BCH_DATA_journal, cl);
1138 ret = PTR_ERR_OR_ZERO(ob[nr_got]);
1139 if (ret)
1140 break;
1141
1142 if (!new_fs) {
1143 ret = bch2_trans_run(c,
1144 bch2_trans_mark_metadata_bucket(trans, ca,
1145 ob[nr_got]->bucket, BCH_DATA_journal,
1146 ca->mi.bucket_size, BTREE_TRIGGER_transactional));
1147 if (ret) {
1148 bch2_open_bucket_put(c, ob[nr_got]);
1149 bch_err_msg(c, ret, "marking new journal buckets");
1150 break;
1151 }
1152 }
1153
1154 bu[nr_got] = ob[nr_got]->bucket;
1155 }
1156
1157 if (!nr_got)
1158 goto err_free;
1159
1160 /* Don't return an error if we successfully allocated some buckets: */
1161 ret = 0;
1162
1163 if (c) {
1164 bch2_journal_flush_all_pins(&c->journal);
1165 bch2_journal_block(&c->journal);
1166 mutex_lock(&c->sb_lock);
1167 }
1168
1169 memcpy(new_buckets, ja->buckets, ja->nr * sizeof(u64));
1170 memcpy(new_bucket_seq, ja->bucket_seq, ja->nr * sizeof(u64));
1171
1172 BUG_ON(ja->discard_idx > ja->nr);
1173
1174 pos = ja->discard_idx ?: ja->nr;
1175
1176 memmove(new_buckets + pos + nr_got,
1177 new_buckets + pos,
1178 sizeof(new_buckets[0]) * (ja->nr - pos));
1179 memmove(new_bucket_seq + pos + nr_got,
1180 new_bucket_seq + pos,
1181 sizeof(new_bucket_seq[0]) * (ja->nr - pos));
1182
1183 for (i = 0; i < nr_got; i++) {
1184 new_buckets[pos + i] = bu[i];
1185 new_bucket_seq[pos + i] = 0;
1186 }
1187
1188 nr = ja->nr + nr_got;
1189
1190 ret = bch2_journal_buckets_to_sb(c, ca, new_buckets, nr);
1191 if (ret)
1192 goto err_unblock;
1193
1194 bch2_write_super(c);
1195
1196 /* Commit: */
1197 if (c)
1198 spin_lock(&c->journal.lock);
1199
1200 swap(new_buckets, ja->buckets);
1201 swap(new_bucket_seq, ja->bucket_seq);
1202 ja->nr = nr;
1203
1204 if (pos <= ja->discard_idx)
1205 ja->discard_idx = (ja->discard_idx + nr_got) % ja->nr;
1206 if (pos <= ja->dirty_idx_ondisk)
1207 ja->dirty_idx_ondisk = (ja->dirty_idx_ondisk + nr_got) % ja->nr;
1208 if (pos <= ja->dirty_idx)
1209 ja->dirty_idx = (ja->dirty_idx + nr_got) % ja->nr;
1210 if (pos <= ja->cur_idx)
1211 ja->cur_idx = (ja->cur_idx + nr_got) % ja->nr;
1212
1213 if (c)
1214 spin_unlock(&c->journal.lock);
1215 err_unblock:
1216 if (c) {
1217 bch2_journal_unblock(&c->journal);
1218 mutex_unlock(&c->sb_lock);
1219 }
1220
1221 if (ret && !new_fs)
1222 for (i = 0; i < nr_got; i++)
1223 bch2_trans_run(c,
1224 bch2_trans_mark_metadata_bucket(trans, ca,
1225 bu[i], BCH_DATA_free, 0,
1226 BTREE_TRIGGER_transactional));
1227 err_free:
1228 for (i = 0; i < nr_got; i++)
1229 bch2_open_bucket_put(c, ob[i]);
1230
1231 kfree(new_bucket_seq);
1232 kfree(new_buckets);
1233 kfree(ob);
1234 kfree(bu);
1235 return ret;
1236 }
1237
bch2_set_nr_journal_buckets_loop(struct bch_fs * c,struct bch_dev * ca,unsigned nr,bool new_fs)1238 static int bch2_set_nr_journal_buckets_loop(struct bch_fs *c, struct bch_dev *ca,
1239 unsigned nr, bool new_fs)
1240 {
1241 struct journal_device *ja = &ca->journal;
1242 int ret = 0;
1243
1244 struct closure cl;
1245 closure_init_stack(&cl);
1246
1247 /* don't handle reducing nr of buckets yet: */
1248 if (nr < ja->nr)
1249 return 0;
1250
1251 while (!ret && ja->nr < nr) {
1252 struct disk_reservation disk_res = { 0, 0, 0 };
1253
1254 /*
1255 * note: journal buckets aren't really counted as _sectors_ used yet, so
1256 * we don't need the disk reservation to avoid the BUG_ON() in buckets.c
1257 * when space used goes up without a reservation - but we do need the
1258 * reservation to ensure we'll actually be able to allocate:
1259 *
1260 * XXX: that's not right, disk reservations only ensure a
1261 * filesystem-wide allocation will succeed, this is a device
1262 * specific allocation - we can hang here:
1263 */
1264 if (!new_fs) {
1265 ret = bch2_disk_reservation_get(c, &disk_res,
1266 bucket_to_sector(ca, nr - ja->nr), 1, 0);
1267 if (ret)
1268 break;
1269 }
1270
1271 ret = bch2_set_nr_journal_buckets_iter(ca, nr, new_fs, &cl);
1272
1273 if (ret == -BCH_ERR_bucket_alloc_blocked ||
1274 ret == -BCH_ERR_open_buckets_empty)
1275 ret = 0; /* wait and retry */
1276
1277 bch2_disk_reservation_put(c, &disk_res);
1278 closure_sync(&cl);
1279 }
1280
1281 return ret;
1282 }
1283
1284 /*
1285 * Allocate more journal space at runtime - not currently making use if it, but
1286 * the code works:
1287 */
bch2_set_nr_journal_buckets(struct bch_fs * c,struct bch_dev * ca,unsigned nr)1288 int bch2_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca,
1289 unsigned nr)
1290 {
1291 down_write(&c->state_lock);
1292 int ret = bch2_set_nr_journal_buckets_loop(c, ca, nr, false);
1293 up_write(&c->state_lock);
1294
1295 bch_err_fn(c, ret);
1296 return ret;
1297 }
1298
bch2_dev_journal_alloc(struct bch_dev * ca,bool new_fs)1299 int bch2_dev_journal_alloc(struct bch_dev *ca, bool new_fs)
1300 {
1301 unsigned nr;
1302 int ret;
1303
1304 if (dynamic_fault("bcachefs:add:journal_alloc")) {
1305 ret = -BCH_ERR_ENOMEM_set_nr_journal_buckets;
1306 goto err;
1307 }
1308
1309 /* 1/128th of the device by default: */
1310 nr = ca->mi.nbuckets >> 7;
1311
1312 /*
1313 * clamp journal size to 8192 buckets or 8GB (in sectors), whichever
1314 * is smaller:
1315 */
1316 nr = clamp_t(unsigned, nr,
1317 BCH_JOURNAL_BUCKETS_MIN,
1318 min(1 << 13,
1319 (1 << 24) / ca->mi.bucket_size));
1320
1321 ret = bch2_set_nr_journal_buckets_loop(ca->fs, ca, nr, new_fs);
1322 err:
1323 bch_err_fn(ca, ret);
1324 return ret;
1325 }
1326
bch2_fs_journal_alloc(struct bch_fs * c)1327 int bch2_fs_journal_alloc(struct bch_fs *c)
1328 {
1329 for_each_online_member(c, ca) {
1330 if (ca->journal.nr)
1331 continue;
1332
1333 int ret = bch2_dev_journal_alloc(ca, true);
1334 if (ret) {
1335 percpu_ref_put(&ca->io_ref[READ]);
1336 return ret;
1337 }
1338 }
1339
1340 return 0;
1341 }
1342
1343 /* startup/shutdown: */
1344
bch2_journal_writing_to_device(struct journal * j,unsigned dev_idx)1345 static bool bch2_journal_writing_to_device(struct journal *j, unsigned dev_idx)
1346 {
1347 bool ret = false;
1348 u64 seq;
1349
1350 spin_lock(&j->lock);
1351 for (seq = journal_last_unwritten_seq(j);
1352 seq <= journal_cur_seq(j) && !ret;
1353 seq++) {
1354 struct journal_buf *buf = journal_seq_to_buf(j, seq);
1355
1356 if (bch2_bkey_has_device_c(bkey_i_to_s_c(&buf->key), dev_idx))
1357 ret = true;
1358 }
1359 spin_unlock(&j->lock);
1360
1361 return ret;
1362 }
1363
bch2_dev_journal_stop(struct journal * j,struct bch_dev * ca)1364 void bch2_dev_journal_stop(struct journal *j, struct bch_dev *ca)
1365 {
1366 wait_event(j->wait, !bch2_journal_writing_to_device(j, ca->dev_idx));
1367 }
1368
bch2_fs_journal_stop(struct journal * j)1369 void bch2_fs_journal_stop(struct journal *j)
1370 {
1371 if (!test_bit(JOURNAL_running, &j->flags))
1372 return;
1373
1374 bch2_journal_reclaim_stop(j);
1375 bch2_journal_flush_all_pins(j);
1376
1377 wait_event(j->wait, bch2_journal_entry_close(j));
1378
1379 /*
1380 * Always write a new journal entry, to make sure the clock hands are up
1381 * to date (and match the superblock)
1382 */
1383 __bch2_journal_meta(j);
1384
1385 journal_quiesce(j);
1386 cancel_delayed_work_sync(&j->write_work);
1387
1388 WARN(!bch2_journal_error(j) &&
1389 test_bit(JOURNAL_replay_done, &j->flags) &&
1390 j->last_empty_seq != journal_cur_seq(j),
1391 "journal shutdown error: cur seq %llu but last empty seq %llu",
1392 journal_cur_seq(j), j->last_empty_seq);
1393
1394 if (!bch2_journal_error(j))
1395 clear_bit(JOURNAL_running, &j->flags);
1396 }
1397
bch2_fs_journal_start(struct journal * j,u64 cur_seq)1398 int bch2_fs_journal_start(struct journal *j, u64 cur_seq)
1399 {
1400 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1401 struct journal_entry_pin_list *p;
1402 struct journal_replay *i, **_i;
1403 struct genradix_iter iter;
1404 bool had_entries = false;
1405 u64 last_seq = cur_seq, nr, seq;
1406
1407 if (cur_seq >= JOURNAL_SEQ_MAX) {
1408 bch_err(c, "cannot start: journal seq overflow");
1409 return -EINVAL;
1410 }
1411
1412 genradix_for_each_reverse(&c->journal_entries, iter, _i) {
1413 i = *_i;
1414
1415 if (journal_replay_ignore(i))
1416 continue;
1417
1418 last_seq = le64_to_cpu(i->j.last_seq);
1419 break;
1420 }
1421
1422 nr = cur_seq - last_seq;
1423
1424 /*
1425 * Extra fudge factor, in case we crashed when the journal pin fifo was
1426 * nearly or completely full. We'll need to be able to open additional
1427 * journal entries (at least a few) in order for journal replay to get
1428 * going:
1429 */
1430 nr += nr / 4;
1431
1432 if (nr + 1 > j->pin.size) {
1433 free_fifo(&j->pin);
1434 init_fifo(&j->pin, roundup_pow_of_two(nr + 1), GFP_KERNEL);
1435 if (!j->pin.data) {
1436 bch_err(c, "error reallocating journal fifo (%llu open entries)", nr);
1437 return -BCH_ERR_ENOMEM_journal_pin_fifo;
1438 }
1439 }
1440
1441 j->replay_journal_seq = last_seq;
1442 j->replay_journal_seq_end = cur_seq;
1443 j->last_seq_ondisk = last_seq;
1444 j->flushed_seq_ondisk = cur_seq - 1;
1445 j->seq_write_started = cur_seq - 1;
1446 j->seq_ondisk = cur_seq - 1;
1447 j->pin.front = last_seq;
1448 j->pin.back = cur_seq;
1449 atomic64_set(&j->seq, cur_seq - 1);
1450
1451 fifo_for_each_entry_ptr(p, &j->pin, seq)
1452 journal_pin_list_init(p, 1);
1453
1454 genradix_for_each(&c->journal_entries, iter, _i) {
1455 i = *_i;
1456
1457 if (journal_replay_ignore(i))
1458 continue;
1459
1460 seq = le64_to_cpu(i->j.seq);
1461 BUG_ON(seq >= cur_seq);
1462
1463 if (seq < last_seq)
1464 continue;
1465
1466 if (journal_entry_empty(&i->j))
1467 j->last_empty_seq = le64_to_cpu(i->j.seq);
1468
1469 p = journal_seq_pin(j, seq);
1470
1471 p->devs.nr = 0;
1472 darray_for_each(i->ptrs, ptr)
1473 bch2_dev_list_add_dev(&p->devs, ptr->dev);
1474
1475 had_entries = true;
1476 }
1477
1478 if (!had_entries)
1479 j->last_empty_seq = cur_seq - 1; /* to match j->seq */
1480
1481 spin_lock(&j->lock);
1482 j->last_flush_write = jiffies;
1483
1484 j->reservations.idx = journal_cur_seq(j);
1485
1486 c->last_bucket_seq_cleanup = journal_cur_seq(j);
1487 spin_unlock(&j->lock);
1488
1489 return 0;
1490 }
1491
bch2_journal_set_replay_done(struct journal * j)1492 void bch2_journal_set_replay_done(struct journal *j)
1493 {
1494 /*
1495 * journal_space_available must happen before setting JOURNAL_running
1496 * JOURNAL_running must happen before JOURNAL_replay_done
1497 */
1498 spin_lock(&j->lock);
1499 bch2_journal_space_available(j);
1500
1501 set_bit(JOURNAL_need_flush_write, &j->flags);
1502 set_bit(JOURNAL_running, &j->flags);
1503 set_bit(JOURNAL_replay_done, &j->flags);
1504 spin_unlock(&j->lock);
1505 }
1506
1507 /* init/exit: */
1508
bch2_dev_journal_exit(struct bch_dev * ca)1509 void bch2_dev_journal_exit(struct bch_dev *ca)
1510 {
1511 struct journal_device *ja = &ca->journal;
1512
1513 for (unsigned i = 0; i < ARRAY_SIZE(ja->bio); i++) {
1514 kfree(ja->bio[i]);
1515 ja->bio[i] = NULL;
1516 }
1517
1518 kfree(ja->buckets);
1519 kfree(ja->bucket_seq);
1520 ja->buckets = NULL;
1521 ja->bucket_seq = NULL;
1522 }
1523
bch2_dev_journal_init(struct bch_dev * ca,struct bch_sb * sb)1524 int bch2_dev_journal_init(struct bch_dev *ca, struct bch_sb *sb)
1525 {
1526 struct journal_device *ja = &ca->journal;
1527 struct bch_sb_field_journal *journal_buckets =
1528 bch2_sb_field_get(sb, journal);
1529 struct bch_sb_field_journal_v2 *journal_buckets_v2 =
1530 bch2_sb_field_get(sb, journal_v2);
1531
1532 ja->nr = 0;
1533
1534 if (journal_buckets_v2) {
1535 unsigned nr = bch2_sb_field_journal_v2_nr_entries(journal_buckets_v2);
1536
1537 for (unsigned i = 0; i < nr; i++)
1538 ja->nr += le64_to_cpu(journal_buckets_v2->d[i].nr);
1539 } else if (journal_buckets) {
1540 ja->nr = bch2_nr_journal_buckets(journal_buckets);
1541 }
1542
1543 ja->bucket_seq = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
1544 if (!ja->bucket_seq)
1545 return -BCH_ERR_ENOMEM_dev_journal_init;
1546
1547 unsigned nr_bvecs = DIV_ROUND_UP(JOURNAL_ENTRY_SIZE_MAX, PAGE_SIZE);
1548
1549 for (unsigned i = 0; i < ARRAY_SIZE(ja->bio); i++) {
1550 ja->bio[i] = kzalloc(struct_size(ja->bio[i], bio.bi_inline_vecs,
1551 nr_bvecs), GFP_KERNEL);
1552 if (!ja->bio[i])
1553 return -BCH_ERR_ENOMEM_dev_journal_init;
1554
1555 ja->bio[i]->ca = ca;
1556 ja->bio[i]->buf_idx = i;
1557 bio_init(&ja->bio[i]->bio, NULL, ja->bio[i]->bio.bi_inline_vecs, nr_bvecs, 0);
1558 }
1559
1560 ja->buckets = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
1561 if (!ja->buckets)
1562 return -BCH_ERR_ENOMEM_dev_journal_init;
1563
1564 if (journal_buckets_v2) {
1565 unsigned nr = bch2_sb_field_journal_v2_nr_entries(journal_buckets_v2);
1566 unsigned dst = 0;
1567
1568 for (unsigned i = 0; i < nr; i++)
1569 for (unsigned j = 0; j < le64_to_cpu(journal_buckets_v2->d[i].nr); j++)
1570 ja->buckets[dst++] =
1571 le64_to_cpu(journal_buckets_v2->d[i].start) + j;
1572 } else if (journal_buckets) {
1573 for (unsigned i = 0; i < ja->nr; i++)
1574 ja->buckets[i] = le64_to_cpu(journal_buckets->buckets[i]);
1575 }
1576
1577 return 0;
1578 }
1579
bch2_fs_journal_exit(struct journal * j)1580 void bch2_fs_journal_exit(struct journal *j)
1581 {
1582 if (j->wq)
1583 destroy_workqueue(j->wq);
1584
1585 darray_exit(&j->early_journal_entries);
1586
1587 for (unsigned i = 0; i < ARRAY_SIZE(j->buf); i++)
1588 kvfree(j->buf[i].data);
1589 kvfree(j->free_buf);
1590 free_fifo(&j->pin);
1591 }
1592
bch2_fs_journal_init(struct journal * j)1593 int bch2_fs_journal_init(struct journal *j)
1594 {
1595 static struct lock_class_key res_key;
1596
1597 mutex_init(&j->buf_lock);
1598 spin_lock_init(&j->lock);
1599 spin_lock_init(&j->err_lock);
1600 init_waitqueue_head(&j->wait);
1601 INIT_DELAYED_WORK(&j->write_work, journal_write_work);
1602 init_waitqueue_head(&j->reclaim_wait);
1603 init_waitqueue_head(&j->pin_flush_wait);
1604 mutex_init(&j->reclaim_lock);
1605 mutex_init(&j->discard_lock);
1606
1607 lockdep_init_map(&j->res_map, "journal res", &res_key, 0);
1608
1609 atomic64_set(&j->reservations.counter,
1610 ((union journal_res_state)
1611 { .cur_entry_offset = JOURNAL_ENTRY_CLOSED_VAL }).v);
1612
1613 if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)))
1614 return -BCH_ERR_ENOMEM_journal_pin_fifo;
1615
1616 j->free_buf_size = j->buf_size_want = JOURNAL_ENTRY_SIZE_MIN;
1617 j->free_buf = kvmalloc(j->free_buf_size, GFP_KERNEL);
1618 if (!j->free_buf)
1619 return -BCH_ERR_ENOMEM_journal_buf;
1620
1621 for (unsigned i = 0; i < ARRAY_SIZE(j->buf); i++)
1622 j->buf[i].idx = i;
1623
1624 j->pin.front = j->pin.back = 1;
1625
1626 j->wq = alloc_workqueue("bcachefs_journal",
1627 WQ_HIGHPRI|WQ_FREEZABLE|WQ_UNBOUND|WQ_MEM_RECLAIM, 512);
1628 if (!j->wq)
1629 return -BCH_ERR_ENOMEM_fs_other_alloc;
1630 return 0;
1631 }
1632
1633 /* debug: */
1634
1635 static const char * const bch2_journal_flags_strs[] = {
1636 #define x(n) #n,
1637 JOURNAL_FLAGS()
1638 #undef x
1639 NULL
1640 };
1641
__bch2_journal_debug_to_text(struct printbuf * out,struct journal * j)1642 void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
1643 {
1644 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1645 union journal_res_state s;
1646 unsigned long now = jiffies;
1647 u64 nr_writes = j->nr_flush_writes + j->nr_noflush_writes;
1648
1649 printbuf_tabstops_reset(out);
1650 printbuf_tabstop_push(out, 28);
1651 out->atomic++;
1652
1653 rcu_read_lock();
1654 s = READ_ONCE(j->reservations);
1655
1656 prt_printf(out, "flags:\t");
1657 prt_bitflags(out, bch2_journal_flags_strs, j->flags);
1658 prt_newline(out);
1659 prt_printf(out, "dirty journal entries:\t%llu/%llu\n", fifo_used(&j->pin), j->pin.size);
1660 prt_printf(out, "seq:\t%llu\n", journal_cur_seq(j));
1661 prt_printf(out, "seq_ondisk:\t%llu\n", j->seq_ondisk);
1662 prt_printf(out, "last_seq:\t%llu\n", journal_last_seq(j));
1663 prt_printf(out, "last_seq_ondisk:\t%llu\n", j->last_seq_ondisk);
1664 prt_printf(out, "flushed_seq_ondisk:\t%llu\n", j->flushed_seq_ondisk);
1665 prt_printf(out, "watermark:\t%s\n", bch2_watermarks[j->watermark]);
1666 prt_printf(out, "each entry reserved:\t%u\n", j->entry_u64s_reserved);
1667 prt_printf(out, "nr flush writes:\t%llu\n", j->nr_flush_writes);
1668 prt_printf(out, "nr noflush writes:\t%llu\n", j->nr_noflush_writes);
1669 prt_printf(out, "average write size:\t");
1670 prt_human_readable_u64(out, nr_writes ? div64_u64(j->entry_bytes_written, nr_writes) : 0);
1671 prt_newline(out);
1672 prt_printf(out, "free buf:\t%u\n", j->free_buf ? j->free_buf_size : 0);
1673 prt_printf(out, "nr direct reclaim:\t%llu\n", j->nr_direct_reclaim);
1674 prt_printf(out, "nr background reclaim:\t%llu\n", j->nr_background_reclaim);
1675 prt_printf(out, "reclaim kicked:\t%u\n", j->reclaim_kicked);
1676 prt_printf(out, "reclaim runs in:\t%u ms\n", time_after(j->next_reclaim, now)
1677 ? jiffies_to_msecs(j->next_reclaim - jiffies) : 0);
1678 prt_printf(out, "blocked:\t%u\n", j->blocked);
1679 prt_printf(out, "current entry sectors:\t%u\n", j->cur_entry_sectors);
1680 prt_printf(out, "current entry error:\t%s\n", bch2_err_str(j->cur_entry_error));
1681 prt_printf(out, "current entry:\t");
1682
1683 switch (s.cur_entry_offset) {
1684 case JOURNAL_ENTRY_ERROR_VAL:
1685 prt_printf(out, "error\n");
1686 break;
1687 case JOURNAL_ENTRY_CLOSED_VAL:
1688 prt_printf(out, "closed\n");
1689 break;
1690 case JOURNAL_ENTRY_BLOCKED_VAL:
1691 prt_printf(out, "blocked\n");
1692 break;
1693 default:
1694 prt_printf(out, "%u/%u\n", s.cur_entry_offset, j->cur_entry_u64s);
1695 break;
1696 }
1697
1698 prt_printf(out, "unwritten entries:\n");
1699 bch2_journal_bufs_to_text(out, j);
1700
1701 prt_printf(out, "space:\n");
1702 printbuf_indent_add(out, 2);
1703 prt_printf(out, "discarded\t%u:%u\n",
1704 j->space[journal_space_discarded].next_entry,
1705 j->space[journal_space_discarded].total);
1706 prt_printf(out, "clean ondisk\t%u:%u\n",
1707 j->space[journal_space_clean_ondisk].next_entry,
1708 j->space[journal_space_clean_ondisk].total);
1709 prt_printf(out, "clean\t%u:%u\n",
1710 j->space[journal_space_clean].next_entry,
1711 j->space[journal_space_clean].total);
1712 prt_printf(out, "total\t%u:%u\n",
1713 j->space[journal_space_total].next_entry,
1714 j->space[journal_space_total].total);
1715 printbuf_indent_sub(out, 2);
1716
1717 for_each_member_device_rcu(c, ca, &c->rw_devs[BCH_DATA_journal]) {
1718 if (!ca->mi.durability)
1719 continue;
1720
1721 struct journal_device *ja = &ca->journal;
1722
1723 if (!test_bit(ca->dev_idx, c->rw_devs[BCH_DATA_journal].d))
1724 continue;
1725
1726 if (!ja->nr)
1727 continue;
1728
1729 prt_printf(out, "dev %u:\n", ca->dev_idx);
1730 prt_printf(out, "durability %u:\n", ca->mi.durability);
1731 printbuf_indent_add(out, 2);
1732 prt_printf(out, "nr\t%u\n", ja->nr);
1733 prt_printf(out, "bucket size\t%u\n", ca->mi.bucket_size);
1734 prt_printf(out, "available\t%u:%u\n", bch2_journal_dev_buckets_available(j, ja, journal_space_discarded), ja->sectors_free);
1735 prt_printf(out, "discard_idx\t%u\n", ja->discard_idx);
1736 prt_printf(out, "dirty_ondisk\t%u (seq %llu)\n",ja->dirty_idx_ondisk, ja->bucket_seq[ja->dirty_idx_ondisk]);
1737 prt_printf(out, "dirty_idx\t%u (seq %llu)\n", ja->dirty_idx, ja->bucket_seq[ja->dirty_idx]);
1738 prt_printf(out, "cur_idx\t%u (seq %llu)\n", ja->cur_idx, ja->bucket_seq[ja->cur_idx]);
1739 printbuf_indent_sub(out, 2);
1740 }
1741
1742 prt_printf(out, "replicas want %u need %u\n", c->opts.metadata_replicas, c->opts.metadata_replicas_required);
1743
1744 rcu_read_unlock();
1745
1746 --out->atomic;
1747 }
1748
bch2_journal_debug_to_text(struct printbuf * out,struct journal * j)1749 void bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
1750 {
1751 spin_lock(&j->lock);
1752 __bch2_journal_debug_to_text(out, j);
1753 spin_unlock(&j->lock);
1754 }
1755