Lines Matching +full:k +full:- +full:to +full:- +full:j

1 // SPDX-License-Identifier: GPL-2.0
17 #include "sb-clean.h"
26 lockdep_assert_held(&c->sb_lock); in bch2_journal_pos_from_member_info_set()
29 struct bch_member *m = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx); in bch2_journal_pos_from_member_info_set()
31 m->last_journal_bucket = cpu_to_le32(ca->journal.cur_idx); in bch2_journal_pos_from_member_info_set()
32 m->last_journal_bucket_offset = cpu_to_le32(ca->mi.bucket_size - ca->journal.sectors_free); in bch2_journal_pos_from_member_info_set()
38 mutex_lock(&c->sb_lock); in bch2_journal_pos_from_member_info_resume()
40 struct bch_member m = bch2_sb_member_get(c->disk_sb.sb, ca->dev_idx); in bch2_journal_pos_from_member_info_resume()
43 if (idx < ca->journal.nr) in bch2_journal_pos_from_member_info_resume()
44 ca->journal.cur_idx = idx; in bch2_journal_pos_from_member_info_resume()
46 if (offset <= ca->mi.bucket_size) in bch2_journal_pos_from_member_info_resume()
47 ca->journal.sectors_free = ca->mi.bucket_size - offset; in bch2_journal_pos_from_member_info_resume()
49 mutex_unlock(&c->sb_lock); in bch2_journal_pos_from_member_info_resume()
53 struct journal_replay *j) in bch2_journal_ptrs_to_text() argument
55 darray_for_each(j->ptrs, i) { in bch2_journal_ptrs_to_text()
56 if (i != j->ptrs.data) in bch2_journal_ptrs_to_text()
59 i->dev, i->bucket, i->bucket_offset, i->sector); in bch2_journal_ptrs_to_text()
64 struct journal_replay *j) in bch2_journal_replay_to_text() argument
66 prt_printf(out, "seq %llu ", le64_to_cpu(j->j.seq)); in bch2_journal_replay_to_text()
68 bch2_journal_ptrs_to_text(out, c, j); in bch2_journal_replay_to_text()
70 for_each_jset_entry_type(entry, &j->j, BCH_JSET_ENTRY_datetime) { in bch2_journal_replay_to_text()
73 bch2_prt_datetime(out, le64_to_cpu(datetime->seconds)); in bch2_journal_replay_to_text()
82 [1] = ((__le32 *) &jset->seq)[0], in journal_nonce()
83 [2] = ((__le32 *) &jset->seq)[1], in journal_nonce()
88 static bool jset_csum_good(struct bch_fs *c, struct jset *j, struct bch_csum *csum) in jset_csum_good() argument
90 if (!bch2_checksum_type_valid(c, JSET_CSUM_TYPE(j))) { in jset_csum_good()
95 *csum = csum_vstruct(c, JSET_CSUM_TYPE(j), journal_nonce(j), j); in jset_csum_good()
96 return !bch2_crc_cmp(j->csum, *csum); in jset_csum_good()
101 return (seq - c->journal_entries_base_seq) & (~0U >> 1); in journal_entry_radix_idx()
108 genradix_ptr(&c->journal_entries, in __journal_replay_free()
109 journal_entry_radix_idx(c, le64_to_cpu(i->j.seq))); in __journal_replay_free()
119 i->ignore_blacklisted = true; in journal_replay_free()
121 i->ignore_not_dirty = true; in journal_replay_free()
123 if (!c->opts.read_entire_journal) in journal_replay_free()
138 * Given a journal entry we just read, add it to the list of journal entries to
143 struct journal_list *jlist, struct jset *j) in journal_entry_add() argument
147 size_t bytes = vstruct_bytes(j); in journal_entry_add()
148 u64 last_seq = !JSET_NO_FLUSH(j) ? le64_to_cpu(j->last_seq) : 0; in journal_entry_add()
152 if (!c->journal.oldest_seq_found_ondisk || in journal_entry_add()
153 le64_to_cpu(j->seq) < c->journal.oldest_seq_found_ondisk) in journal_entry_add()
154 c->journal.oldest_seq_found_ondisk = le64_to_cpu(j->seq); in journal_entry_add()
157 if (!c->opts.read_entire_journal && in journal_entry_add()
158 le64_to_cpu(j->seq) < jlist->last_seq) in journal_entry_add()
164 * within the range of +-2billion of the filrst one we find. in journal_entry_add()
166 if (!c->journal_entries_base_seq) in journal_entry_add()
167 c->journal_entries_base_seq = max_t(s64, 1, le64_to_cpu(j->seq) - S32_MAX); in journal_entry_add()
170 if (last_seq > jlist->last_seq && !c->opts.read_entire_journal) { in journal_entry_add()
171 genradix_for_each_from(&c->journal_entries, iter, _i, in journal_entry_add()
172 journal_entry_radix_idx(c, jlist->last_seq)) { in journal_entry_add()
178 if (le64_to_cpu(i->j.seq) >= last_seq) in journal_entry_add()
185 jlist->last_seq = max(jlist->last_seq, last_seq); in journal_entry_add()
187 _i = genradix_ptr_alloc(&c->journal_entries, in journal_entry_add()
188 journal_entry_radix_idx(c, le64_to_cpu(j->seq)), in journal_entry_add()
191 return -BCH_ERR_ENOMEM_journal_entry_add; in journal_entry_add()
199 bool identical = bytes == vstruct_bytes(&dup->j) && in journal_entry_add()
200 !memcmp(j, &dup->j, bytes); in journal_entry_add()
203 dup->csum_good; in journal_entry_add()
206 darray_for_each(dup->ptrs, ptr) in journal_entry_add()
207 if (ptr->dev == ca->dev_idx) in journal_entry_add()
210 ret = darray_push(&dup->ptrs, entry_ptr); in journal_entry_add()
232 i = kvmalloc(offsetof(struct journal_replay, j) + bytes, GFP_KERNEL); in journal_entry_add()
234 return -BCH_ERR_ENOMEM_journal_entry_add; in journal_entry_add()
236 darray_init(&i->ptrs); in journal_entry_add()
237 i->csum_good = entry_ptr.csum_good; in journal_entry_add()
238 i->ignore_blacklisted = false; in journal_entry_add()
239 i->ignore_not_dirty = false; in journal_entry_add()
240 unsafe_memcpy(&i->j, j, bytes, "embedded variable length struct"); in journal_entry_add()
244 darray_for_each(dup->ptrs, ptr) in journal_entry_add()
245 darray_push(&i->ptrs, *ptr); in journal_entry_add()
248 darray_push(&i->ptrs, entry_ptr); in journal_entry_add()
281 bch2_prt_jset_entry_type(out, entry->type); in journal_entry_err_msg()
288 prt_printf(out, " seq=%llu", le64_to_cpu(jset->seq)); in journal_entry_err_msg()
292 (u64 *) entry - jset->_data, in journal_entry_err_msg()
293 le32_to_cpu(jset->u64s)); in journal_entry_err_msg()
314 ret = -BCH_ERR_fsck_errors_not_fixed; \
332 struct bkey_i *k, in journal_validate_key() argument
341 if (journal_entry_err_on(!k->k.u64s, in journal_validate_key()
344 "k->u64s 0")) { in journal_validate_key()
345 entry->u64s = cpu_to_le16((u64 *) k - entry->_data); in journal_validate_key()
350 if (journal_entry_err_on((void *) bkey_next(k) > in journal_validate_key()
355 entry->u64s = cpu_to_le16((u64 *) k - entry->_data); in journal_validate_key()
360 if (journal_entry_err_on(k->k.format != KEY_FORMAT_CURRENT, in journal_validate_key()
363 "bad format %u", k->k.format)) { in journal_validate_key()
364 le16_add_cpu(&entry->u64s, -((u16) k->k.u64s)); in journal_validate_key()
365 memmove(k, bkey_next(k), next - (void *) bkey_next(k)); in journal_validate_key()
372 write, NULL, bkey_to_packed(k)); in journal_validate_key()
374 ret = bch2_bkey_validate(c, bkey_i_to_s_c(k), from); in journal_validate_key()
375 if (ret == -BCH_ERR_fsck_delete_bkey) { in journal_validate_key()
376 le16_add_cpu(&entry->u64s, -((u16) k->k.u64s)); in journal_validate_key()
377 memmove(k, bkey_next(k), next - (void *) bkey_next(k)); in journal_validate_key()
386 write, NULL, bkey_to_packed(k)); in journal_validate_key()
397 struct bkey_i *k = entry->start; in journal_entry_btree_keys_validate() local
399 from.level = entry->level; in journal_entry_btree_keys_validate()
400 from.btree = entry->btree_id; in journal_entry_btree_keys_validate()
402 while (k != vstruct_last(entry)) { in journal_entry_btree_keys_validate()
403 int ret = journal_validate_key(c, jset, entry, k, from, version, big_endian); in journal_entry_btree_keys_validate()
409 k = bkey_next(k); in journal_entry_btree_keys_validate()
420 jset_entry_for_each_key(entry, k) { in journal_entry_btree_keys_to_text()
423 bch2_prt_jset_entry_type(out, entry->type); in journal_entry_btree_keys_to_text()
426 bch2_btree_id_level_to_text(out, entry->btree_id, entry->level); in journal_entry_btree_keys_to_text()
428 bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(k)); in journal_entry_btree_keys_to_text()
439 struct bkey_i *k = entry->start; in journal_entry_btree_root_validate() local
443 from.level = entry->level + 1; in journal_entry_btree_root_validate()
444 from.btree = entry->btree_id; in journal_entry_btree_root_validate()
446 if (journal_entry_err_on(!entry->u64s || in journal_entry_btree_root_validate()
447 le16_to_cpu(entry->u64s) != k->k.u64s, in journal_entry_btree_root_validate()
453 * we don't want to null out this jset_entry, in journal_entry_btree_root_validate()
455 * we were _supposed_ to have a btree root in journal_entry_btree_root_validate()
457 entry->u64s = 0; in journal_entry_btree_root_validate()
462 ret = journal_validate_key(c, jset, entry, k, from, version, big_endian); in journal_entry_btree_root_validate()
498 if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 1, in journal_entry_blacklist_validate()
514 prt_printf(out, "seq=%llu", le64_to_cpu(bl->seq)); in journal_entry_blacklist_to_text()
526 if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 2, in journal_entry_blacklist_v2_validate()
536 if (journal_entry_err_on(le64_to_cpu(bl_entry->start) > in journal_entry_blacklist_v2_validate()
537 le64_to_cpu(bl_entry->end), in journal_entry_blacklist_v2_validate()
555 le64_to_cpu(bl->start), in journal_entry_blacklist_v2_to_text()
556 le64_to_cpu(bl->end)); in journal_entry_blacklist_v2_to_text()
567 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64); in journal_entry_usage_validate()
589 bch2_prt_fs_usage_type(out, u->entry.btree_id); in journal_entry_usage_to_text()
590 prt_printf(out, " v=%llu", le64_to_cpu(u->v)); in journal_entry_usage_to_text()
601 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64); in journal_entry_data_usage_validate()
606 bytes < sizeof(*u) + u->r.nr_devs, in journal_entry_data_usage_validate()
614 if (journal_entry_err_on(bch2_replicas_entry_validate(&u->r, c, &err), in journal_entry_data_usage_validate()
633 bch2_replicas_entry_to_text(out, &u->r); in journal_entry_data_usage_to_text()
634 prt_printf(out, "=%llu", le64_to_cpu(u->v)); in journal_entry_data_usage_to_text()
645 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64); in journal_entry_clock_validate()
656 if (journal_entry_err_on(clock->rw > 1, in journal_entry_clock_validate()
674 prt_printf(out, "%s=%llu", str_write_read(clock->rw), le64_to_cpu(clock->time)); in journal_entry_clock_to_text()
685 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64); in journal_entry_dev_usage_validate()
698 if (journal_entry_err_on(u->pad, in journal_entry_dev_usage_validate()
720 prt_printf(out, "dev=%u", le32_to_cpu(u->dev)); in journal_entry_dev_usage_to_text()
727 le64_to_cpu(u->d[i].buckets), in journal_entry_dev_usage_to_text()
728 le64_to_cpu(u->d[i].sectors), in journal_entry_dev_usage_to_text()
729 le64_to_cpu(u->d[i].fragmented)); in journal_entry_dev_usage_to_text()
748 prt_printf(out, "%.*s", jset_entry_log_msg_bytes(l), l->d); in journal_entry_log_to_text()
829 bch2_prt_datetime(out, le64_to_cpu(datetime->seconds)); in journal_entry_datetime_to_text()
855 return entry->type < BCH_JSET_ENTRY_NR in bch2_journal_entry_validate()
856 ? bch2_jset_entry_ops[entry->type].validate(c, jset, entry, in bch2_journal_entry_validate()
864 bch2_prt_jset_entry_type(out, entry->type); in bch2_journal_entry_to_text()
866 if (entry->type < BCH_JSET_ENTRY_NR) { in bch2_journal_entry_to_text()
868 bch2_jset_entry_ops[entry->type].to_text(out, c, entry); in bch2_journal_entry_to_text()
878 .journal_seq = le64_to_cpu(jset->seq), in jset_validate_entries()
881 unsigned version = le32_to_cpu(jset->version); in jset_validate_entries()
885 from.journal_offset = (u64 *) entry - jset->_data; in jset_validate_entries()
891 jset->u64s = cpu_to_le32((u64 *) entry - jset->_data); in jset_validate_entries()
912 .journal_seq = le64_to_cpu(jset->seq), in jset_validate()
916 if (le64_to_cpu(jset->magic) != jset_magic(c)) in jset_validate()
919 unsigned version = le32_to_cpu(jset->version); in jset_validate()
924 ca ? ca->name : c->name, in jset_validate()
925 sector, le64_to_cpu(jset->seq), in jset_validate()
928 /* don't try to continue: */ in jset_validate()
929 return -EINVAL; in jset_validate()
936 ca ? ca->name : c->name, in jset_validate()
937 sector, le64_to_cpu(jset->seq), in jset_validate()
943 le64_to_cpu(jset->last_seq) > le64_to_cpu(jset->seq), in jset_validate()
947 le64_to_cpu(jset->last_seq), in jset_validate()
948 le64_to_cpu(jset->seq))) { in jset_validate()
949 jset->last_seq = jset->seq; in jset_validate()
966 .journal_seq = le64_to_cpu(jset->seq), in jset_validate_early()
970 if (le64_to_cpu(jset->magic) != jset_magic(c)) in jset_validate_early()
973 unsigned version = le32_to_cpu(jset->version); in jset_validate_early()
978 ca ? ca->name : c->name, in jset_validate_early()
979 sector, le64_to_cpu(jset->seq), in jset_validate_early()
982 /* don't try to continue: */ in jset_validate_early()
983 return -EINVAL; in jset_validate_early()
995 ca ? ca->name : c->name, in jset_validate_early()
996 sector, le64_to_cpu(jset->seq), bytes)) in jset_validate_early()
997 le32_add_cpu(&jset->u64s, in jset_validate_early()
998 -((bytes - (bucket_sectors_left << 9)) / 8)); in jset_validate_early()
1015 return -BCH_ERR_ENOMEM_journal_read_buf_realloc; in journal_read_buf_realloc()
1020 return -BCH_ERR_ENOMEM_journal_read_buf_realloc; in journal_read_buf_realloc()
1022 kvfree(b->data); in journal_read_buf_realloc()
1023 b->data = n; in journal_read_buf_realloc()
1024 b->size = new_size; in journal_read_buf_realloc()
1033 struct bch_fs *c = ca->fs; in journal_read_bucket()
1034 struct journal_device *ja = &ca->journal; in journal_read_bucket()
1035 struct jset *j = NULL; in journal_read_bucket() local
1037 u64 offset = bucket_to_sector(ca, ja->buckets[bucket]), in journal_read_bucket()
1038 end = offset + ca->mi.bucket_size; in journal_read_bucket()
1051 end - offset, buf->size >> 9); in journal_read_bucket()
1052 nr_bvecs = buf_pages(buf->data, sectors_read << 9); in journal_read_bucket()
1056 return -BCH_ERR_ENOMEM_journal_read_bucket; in journal_read_bucket()
1057 bio_init(bio, ca->disk_sb.bdev, bio->bi_inline_vecs, nr_bvecs, REQ_OP_READ); in journal_read_bucket()
1059 bio->bi_iter.bi_sector = offset; in journal_read_bucket()
1060 bch2_bio_map(bio, buf->data, sectors_read << 9); in journal_read_bucket()
1067 ret = -BCH_ERR_EIO_fault_injected; in journal_read_bucket()
1084 j = buf->data; in journal_read_bucket()
1087 ret = jset_validate_early(c, ca, j, offset, in journal_read_bucket()
1088 end - offset, sectors_read); in journal_read_bucket()
1091 sectors = vstruct_sectors(j, c->block_bits); in journal_read_bucket()
1094 if (vstruct_bytes(j) > buf->size) { in journal_read_bucket()
1096 vstruct_bytes(j)); in journal_read_bucket()
1115 if (le64_to_cpu(j->seq) > ja->highest_seq_found) { in journal_read_bucket()
1116 ja->highest_seq_found = le64_to_cpu(j->seq); in journal_read_bucket()
1117 ja->cur_idx = bucket; in journal_read_bucket()
1118 ja->sectors_free = ca->mi.bucket_size - in journal_read_bucket()
1119 bucket_remainder(ca, offset) - sectors; in journal_read_bucket()
1123 * This happens sometimes if we don't have discards on - in journal_read_bucket()
1128 if (le64_to_cpu(j->seq) < ja->bucket_seq[bucket]) in journal_read_bucket()
1131 ja->bucket_seq[bucket] = le64_to_cpu(j->seq); in journal_read_bucket()
1133 enum bch_csum_type csum_type = JSET_CSUM_TYPE(j); in journal_read_bucket()
1135 csum_good = jset_csum_good(c, j, &csum); in journal_read_bucket()
1143 bch2_csum_err_msg(&err, csum_type, j->csum, csum), in journal_read_bucket()
1148 ret = bch2_encrypt(c, JSET_CSUM_TYPE(j), journal_nonce(j), in journal_read_bucket()
1149 j->encrypted_start, in journal_read_bucket()
1150 vstruct_end(j) - (void *) j->encrypted_start); in journal_read_bucket()
1153 mutex_lock(&jlist->lock); in journal_read_bucket()
1156 .dev = ca->dev_idx, in journal_read_bucket()
1158 .bucket_offset = offset - in journal_read_bucket()
1159 bucket_to_sector(ca, ja->buckets[bucket]), in journal_read_bucket()
1161 }, jlist, j); in journal_read_bucket()
1162 mutex_unlock(&jlist->lock); in journal_read_bucket()
1175 sectors_read -= sectors; in journal_read_bucket()
1176 j = ((void *) j) + (sectors << 9); in journal_read_bucket()
1190 struct bch_fs *c = ca->fs; in CLOSURE_CALLBACK()
1192 container_of(cl->parent, struct journal_list, cl); in CLOSURE_CALLBACK()
1197 if (!ja->nr) in CLOSURE_CALLBACK()
1204 pr_debug("%u journal buckets", ja->nr); in CLOSURE_CALLBACK()
1206 for (i = 0; i < ja->nr; i++) { in CLOSURE_CALLBACK()
1213 * Set dirty_idx to indicate the entire journal is full and needs to be in CLOSURE_CALLBACK()
1214 * reclaimed - journal reclaim will immediately reclaim whatever isn't in CLOSURE_CALLBACK()
1217 ja->discard_idx = ja->dirty_idx_ondisk = in CLOSURE_CALLBACK()
1218 ja->dirty_idx = (ja->cur_idx + 1) % ja->nr; in CLOSURE_CALLBACK()
1220 bch_verbose(c, "journal read done on device %s, ret %i", ca->name, ret); in CLOSURE_CALLBACK()
1222 percpu_ref_put(&ca->io_ref[READ]); in CLOSURE_CALLBACK()
1226 mutex_lock(&jlist->lock); in CLOSURE_CALLBACK()
1227 jlist->ret = ret; in CLOSURE_CALLBACK()
1228 mutex_unlock(&jlist->lock); in CLOSURE_CALLBACK()
1251 if (!c->opts.fsck && in bch2_journal_read()
1255 if ((ca->mi.state == BCH_MEMBER_STATE_rw || in bch2_journal_read()
1256 ca->mi.state == BCH_MEMBER_STATE_ro) && in bch2_journal_read()
1257 percpu_ref_tryget(&ca->io_ref[READ])) in bch2_journal_read()
1258 closure_call(&ca->journal.read, in bch2_journal_read()
1277 * Find most recent flush entry, and ignore newer non flush entries - in bch2_journal_read()
1280 genradix_for_each_reverse(&c->journal_entries, radix_iter, _i) { in bch2_journal_read()
1287 *blacklist_seq = *start_seq = le64_to_cpu(i->j.seq) + 1; in bch2_journal_read()
1289 if (JSET_NO_FLUSH(&i->j)) { in bch2_journal_read()
1290 i->ignore_blacklisted = true; in bch2_journal_read()
1294 if (!last_write_torn && !i->csum_good) { in bch2_journal_read()
1296 i->ignore_blacklisted = true; in bch2_journal_read()
1302 .journal_seq = le64_to_cpu(i->j.seq), in bch2_journal_read()
1304 if (journal_entry_err_on(le64_to_cpu(i->j.last_seq) > le64_to_cpu(i->j.seq), in bch2_journal_read()
1305 c, le32_to_cpu(i->j.version), &i->j, NULL, in bch2_journal_read()
1308 le64_to_cpu(i->j.last_seq), in bch2_journal_read()
1309 le64_to_cpu(i->j.seq))) in bch2_journal_read()
1310 i->j.last_seq = i->j.seq; in bch2_journal_read()
1312 *last_seq = le64_to_cpu(i->j.last_seq); in bch2_journal_read()
1313 *blacklist_seq = le64_to_cpu(i->j.seq) + 1; in bch2_journal_read()
1324 "journal read done, but no entries found after dropping non-flushes"); in bch2_journal_read()
1328 bch_info(c, "journal read done, replaying entries %llu-%llu", in bch2_journal_read()
1329 *last_seq, *blacklist_seq - 1); in bch2_journal_read()
1332 bch_info(c, "dropped unflushed entries %llu-%llu", in bch2_journal_read()
1333 *blacklist_seq, *start_seq - 1); in bch2_journal_read()
1336 genradix_for_each(&c->journal_entries, radix_iter, _i) { in bch2_journal_read()
1342 seq = le64_to_cpu(i->j.seq); in bch2_journal_read()
1349 fsck_err_on(!JSET_NO_FLUSH(&i->j), c, in bch2_journal_read()
1352 i->ignore_blacklisted = true; in bch2_journal_read()
1358 genradix_for_each(&c->journal_entries, radix_iter, _i) { in bch2_journal_read()
1364 BUG_ON(seq > le64_to_cpu(i->j.seq)); in bch2_journal_read()
1366 while (seq < le64_to_cpu(i->j.seq)) { in bch2_journal_read()
1370 while (seq < le64_to_cpu(i->j.seq) && in bch2_journal_read()
1374 if (seq == le64_to_cpu(i->j.seq)) in bch2_journal_read()
1379 while (seq < le64_to_cpu(i->j.seq) && in bch2_journal_read()
1385 prt_printf(&buf1, " size %zu", vstruct_sectors(&prev->j, c->block_bits)); in bch2_journal_read()
1390 missing_end = seq - 1; in bch2_journal_read()
1392 "journal entries %llu-%llu missing! (replaying %llu-%llu)\n" in bch2_journal_read()
1396 *last_seq, *blacklist_seq - 1, in bch2_journal_read()
1407 genradix_for_each(&c->journal_entries, radix_iter, _i) { in bch2_journal_read()
1418 darray_for_each(i->ptrs, ptr) { in bch2_journal_read()
1419 struct bch_dev *ca = bch2_dev_have_ref(c, ptr->dev); in bch2_journal_read()
1421 if (!ptr->csum_good) in bch2_journal_read()
1422 bch_err_dev_offset(ca, ptr->sector, in bch2_journal_read()
1424 le64_to_cpu(i->j.seq), in bch2_journal_read()
1425 i->csum_good ? " (had good copy on another device)" : ""); in bch2_journal_read()
1429 bch2_dev_have_ref(c, i->ptrs.data[0].dev), in bch2_journal_read()
1430 &i->j, in bch2_journal_read()
1431 i->ptrs.data[0].sector, in bch2_journal_read()
1436 darray_for_each(i->ptrs, ptr) in bch2_journal_read()
1437 replicas_entry_add_dev(&replicas.e, ptr->dev); in bch2_journal_read()
1446 (le64_to_cpu(i->j.seq) == *last_seq || in bch2_journal_read()
1449 le64_to_cpu(i->j.seq), buf.buf))) { in bch2_journal_read()
1463 static void journal_advance_devs_to_next_bucket(struct journal *j, in journal_advance_devs_to_next_bucket() argument
1467 struct bch_fs *c = container_of(j, struct bch_fs, journal); in journal_advance_devs_to_next_bucket()
1470 struct bch_dev *ca = rcu_dereference(c->devs[*i]); in journal_advance_devs_to_next_bucket()
1474 struct journal_device *ja = &ca->journal; in journal_advance_devs_to_next_bucket()
1476 if (sectors > ja->sectors_free && in journal_advance_devs_to_next_bucket()
1477 sectors <= ca->mi.bucket_size && in journal_advance_devs_to_next_bucket()
1478 bch2_journal_dev_buckets_available(j, ja, in journal_advance_devs_to_next_bucket()
1480 ja->cur_idx = (ja->cur_idx + 1) % ja->nr; in journal_advance_devs_to_next_bucket()
1481 ja->sectors_free = ca->mi.bucket_size; in journal_advance_devs_to_next_bucket()
1484 * ja->bucket_seq[ja->cur_idx] must always have in journal_advance_devs_to_next_bucket()
1487 ja->bucket_seq[ja->cur_idx] = le64_to_cpu(seq); in journal_advance_devs_to_next_bucket()
1492 static void __journal_write_alloc(struct journal *j, in __journal_write_alloc() argument
1499 struct bch_fs *c = container_of(j, struct bch_fs, journal); in __journal_write_alloc()
1502 struct bch_dev *ca = rcu_dereference(c->devs[*i]); in __journal_write_alloc()
1506 struct journal_device *ja = &ca->journal; in __journal_write_alloc()
1512 if (!ca->mi.durability || in __journal_write_alloc()
1513 ca->mi.state != BCH_MEMBER_STATE_rw || in __journal_write_alloc()
1514 !ja->nr || in __journal_write_alloc()
1515 bch2_bkey_has_device_c(bkey_i_to_s_c(&w->key), ca->dev_idx) || in __journal_write_alloc()
1516 sectors > ja->sectors_free) in __journal_write_alloc()
1519 bch2_dev_stripe_increment(ca, &j->wp.stripe); in __journal_write_alloc()
1521 bch2_bkey_append_ptr(&w->key, in __journal_write_alloc()
1524 ja->buckets[ja->cur_idx]) + in __journal_write_alloc()
1525 ca->mi.bucket_size - in __journal_write_alloc()
1526 ja->sectors_free, in __journal_write_alloc()
1527 .dev = ca->dev_idx, in __journal_write_alloc()
1530 ja->sectors_free -= sectors; in __journal_write_alloc()
1531 ja->bucket_seq[ja->cur_idx] = le64_to_cpu(w->data->seq); in __journal_write_alloc()
1533 *replicas += ca->mi.durability; in __journal_write_alloc()
1541 * journal_write_alloc - decide where to write next journal entry
1543 * @j: journal object
1544 * @w: journal buf (entry to be written)
1546 * Returns: 0 on success, or -BCH_ERR_insufficient_devices on failure
1548 static int journal_write_alloc(struct journal *j, struct journal_buf *w) in journal_write_alloc() argument
1550 struct bch_fs *c = container_of(j, struct bch_fs, journal); in journal_write_alloc()
1553 unsigned sectors = vstruct_sectors(w->data, c->block_bits); in journal_write_alloc()
1554 unsigned target = c->opts.metadata_target ?: in journal_write_alloc()
1555 c->opts.foreground_target; in journal_write_alloc()
1557 READ_ONCE(c->opts.metadata_replicas); in journal_write_alloc()
1559 READ_ONCE(c->opts.metadata_replicas_required)); in journal_write_alloc()
1564 /* We might run more than once if we have to stop and do discards: */ in journal_write_alloc()
1565 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(bkey_i_to_s_c(&w->key)); in journal_write_alloc()
1567 struct bch_dev *ca = bch2_dev_rcu_noerror(c, p->dev); in journal_write_alloc()
1569 replicas += ca->mi.durability; in journal_write_alloc()
1574 devs_sorted = bch2_dev_alloc_list(c, &j->wp.stripe, &devs); in journal_write_alloc()
1576 __journal_write_alloc(j, w, &devs_sorted, sectors, &replicas, replicas_want); in journal_write_alloc()
1582 journal_advance_devs_to_next_bucket(j, &devs_sorted, sectors, w->data->seq); in journal_write_alloc()
1596 BUG_ON(bkey_val_u64s(&w->key.k) > BCH_REPLICAS_MAX); in journal_write_alloc()
1598 return replicas >= replicas_need ? 0 : -BCH_ERR_insufficient_journal_devices; in journal_write_alloc()
1601 static void journal_buf_realloc(struct journal *j, struct journal_buf *buf) in journal_buf_realloc() argument
1603 struct bch_fs *c = container_of(j, struct bch_fs, journal); in journal_buf_realloc()
1605 /* we aren't holding j->lock: */ in journal_buf_realloc()
1606 unsigned new_size = READ_ONCE(j->buf_size_want); in journal_buf_realloc()
1609 if (buf->buf_size >= new_size) in journal_buf_realloc()
1621 memcpy(new_buf, buf->data, buf->buf_size); in journal_buf_realloc()
1623 spin_lock(&j->lock); in journal_buf_realloc()
1624 swap(buf->data, new_buf); in journal_buf_realloc()
1625 swap(buf->buf_size, new_size); in journal_buf_realloc()
1626 spin_unlock(&j->lock); in journal_buf_realloc()
1634 struct journal *j = container_of(w, struct journal, buf[w->idx]); in CLOSURE_CALLBACK() local
1635 struct bch_fs *c = container_of(j, struct bch_fs, journal); in CLOSURE_CALLBACK()
1637 u64 seq = le64_to_cpu(w->data->seq); in CLOSURE_CALLBACK()
1640 bch2_time_stats_update(!JSET_NO_FLUSH(w->data) in CLOSURE_CALLBACK()
1641 ? j->flush_write_time in CLOSURE_CALLBACK()
1642 : j->noflush_write_time, j->write_start_time); in CLOSURE_CALLBACK()
1644 if (!w->devs_written.nr) { in CLOSURE_CALLBACK()
1645 if (!bch2_journal_error(j)) in CLOSURE_CALLBACK()
1646 bch_err(c, "unable to write journal to sufficient devices"); in CLOSURE_CALLBACK()
1647 err = -BCH_ERR_journal_write_err; in CLOSURE_CALLBACK()
1650 w->devs_written); in CLOSURE_CALLBACK()
1659 spin_lock(&j->lock); in CLOSURE_CALLBACK()
1660 if (seq >= j->pin.front) in CLOSURE_CALLBACK()
1661 journal_seq_pin(j, seq)->devs = w->devs_written; in CLOSURE_CALLBACK()
1662 if (err && (!j->err_seq || seq < j->err_seq)) in CLOSURE_CALLBACK()
1663 j->err_seq = seq; in CLOSURE_CALLBACK()
1664 w->write_done = true; in CLOSURE_CALLBACK()
1666 if (!j->free_buf || j->free_buf_size < w->buf_size) { in CLOSURE_CALLBACK()
1667 swap(j->free_buf, w->data); in CLOSURE_CALLBACK()
1668 swap(j->free_buf_size, w->buf_size); in CLOSURE_CALLBACK()
1671 if (w->data) { in CLOSURE_CALLBACK()
1672 void *buf = w->data; in CLOSURE_CALLBACK()
1673 w->data = NULL; in CLOSURE_CALLBACK()
1674 w->buf_size = 0; in CLOSURE_CALLBACK()
1676 spin_unlock(&j->lock); in CLOSURE_CALLBACK()
1678 spin_lock(&j->lock); in CLOSURE_CALLBACK()
1684 for (seq = journal_last_unwritten_seq(j); in CLOSURE_CALLBACK()
1685 seq <= journal_cur_seq(j); in CLOSURE_CALLBACK()
1687 w = j->buf + (seq & JOURNAL_BUF_MASK); in CLOSURE_CALLBACK()
1688 if (!w->write_done) in CLOSURE_CALLBACK()
1691 if (!j->err_seq && !w->noflush) { in CLOSURE_CALLBACK()
1692 j->flushed_seq_ondisk = seq; in CLOSURE_CALLBACK()
1693 j->last_seq_ondisk = w->last_seq; in CLOSURE_CALLBACK()
1695 closure_wake_up(&c->freelist_wait); in CLOSURE_CALLBACK()
1699 j->seq_ondisk = seq; in CLOSURE_CALLBACK()
1708 if (j->watermark != BCH_WATERMARK_stripe) in CLOSURE_CALLBACK()
1709 journal_reclaim_kick(&c->journal); in CLOSURE_CALLBACK()
1711 closure_wake_up(&w->wait); in CLOSURE_CALLBACK()
1716 bch2_journal_reclaim_fast(j); in CLOSURE_CALLBACK()
1717 bch2_journal_space_available(j); in CLOSURE_CALLBACK()
1719 track_event_change(&c->times[BCH_TIME_blocked_journal_max_in_flight], false); in CLOSURE_CALLBACK()
1721 journal_wake(j); in CLOSURE_CALLBACK()
1724 if (journal_last_unwritten_seq(j) == journal_cur_seq(j) && in CLOSURE_CALLBACK()
1725 j->reservations.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL) { in CLOSURE_CALLBACK()
1726 struct journal_buf *buf = journal_cur_buf(j); in CLOSURE_CALLBACK()
1727 long delta = buf->expires - jiffies; in CLOSURE_CALLBACK()
1730 * We don't close a journal entry to write it while there's in CLOSURE_CALLBACK()
1731 * previous entries still in flight - the current journal entry in CLOSURE_CALLBACK()
1732 * might want to be written now: in CLOSURE_CALLBACK()
1734 mod_delayed_work(j->wq, &j->write_work, max(0L, delta)); in CLOSURE_CALLBACK()
1738 * We don't typically trigger journal writes from her - the next journal in CLOSURE_CALLBACK()
1740 * allocated, in bch2_journal_write() - but the journal write error path in CLOSURE_CALLBACK()
1743 bch2_journal_do_writes(j); in CLOSURE_CALLBACK()
1744 spin_unlock(&j->lock); in CLOSURE_CALLBACK()
1753 struct bch_dev *ca = jbio->ca; in journal_write_endio()
1754 struct journal *j = &ca->fs->journal; in journal_write_endio() local
1755 struct journal_buf *w = j->buf + jbio->buf_idx; in journal_write_endio()
1758 jbio->submit_time, !bio->bi_status); in journal_write_endio()
1760 if (bio->bi_status) { in journal_write_endio()
1763 le64_to_cpu(w->data->seq), in journal_write_endio()
1764 bch2_blk_status_to_str(bio->bi_status)); in journal_write_endio()
1767 spin_lock_irqsave(&j->err_lock, flags); in journal_write_endio()
1768 bch2_dev_list_drop_dev(&w->devs_written, ca->dev_idx); in journal_write_endio()
1769 spin_unlock_irqrestore(&j->err_lock, flags); in journal_write_endio()
1772 closure_put(&w->io); in journal_write_endio()
1773 percpu_ref_put(&ca->io_ref[WRITE]); in journal_write_endio()
1779 struct journal *j = container_of(w, struct journal, buf[w->idx]); in CLOSURE_CALLBACK() local
1780 struct bch_fs *c = container_of(j, struct bch_fs, journal); in CLOSURE_CALLBACK()
1781 unsigned sectors = vstruct_sectors(w->data, c->block_bits); in CLOSURE_CALLBACK()
1783 extent_for_each_ptr(bkey_i_to_s_extent(&w->key), ptr) { in CLOSURE_CALLBACK()
1784 struct bch_dev *ca = bch2_dev_get_ioref(c, ptr->dev, WRITE); in CLOSURE_CALLBACK()
1787 bch_err(c, "missing device %u for journal write", ptr->dev); in CLOSURE_CALLBACK()
1791 this_cpu_add(ca->io_done->sectors[WRITE][BCH_DATA_journal], in CLOSURE_CALLBACK()
1794 struct journal_device *ja = &ca->journal; in CLOSURE_CALLBACK()
1795 struct journal_bio *jbio = ja->bio[w->idx]; in CLOSURE_CALLBACK()
1796 struct bio *bio = &jbio->bio; in CLOSURE_CALLBACK()
1798 jbio->submit_time = local_clock(); in CLOSURE_CALLBACK()
1800 bio_reset(bio, ca->disk_sb.bdev, REQ_OP_WRITE|REQ_SYNC|REQ_META); in CLOSURE_CALLBACK()
1801 bio->bi_iter.bi_sector = ptr->offset; in CLOSURE_CALLBACK()
1802 bio->bi_end_io = journal_write_endio; in CLOSURE_CALLBACK()
1803 bio->bi_private = ca; in CLOSURE_CALLBACK()
1804 bio->bi_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_RT, 0); in CLOSURE_CALLBACK()
1806 BUG_ON(bio->bi_iter.bi_sector == ca->prev_journal_sector); in CLOSURE_CALLBACK()
1807 ca->prev_journal_sector = bio->bi_iter.bi_sector; in CLOSURE_CALLBACK()
1809 if (!JSET_NO_FLUSH(w->data)) in CLOSURE_CALLBACK()
1810 bio->bi_opf |= REQ_FUA; in CLOSURE_CALLBACK()
1811 if (!JSET_NO_FLUSH(w->data) && !w->separate_flush) in CLOSURE_CALLBACK()
1812 bio->bi_opf |= REQ_PREFLUSH; in CLOSURE_CALLBACK()
1814 bch2_bio_map(bio, w->data, sectors << 9); in CLOSURE_CALLBACK()
1819 ja->bucket_seq[ja->cur_idx] = le64_to_cpu(w->data->seq); in CLOSURE_CALLBACK()
1822 continue_at(cl, journal_write_done, j->wq); in CLOSURE_CALLBACK()
1828 struct journal *j = container_of(w, struct journal, buf[w->idx]); in CLOSURE_CALLBACK() local
1829 struct bch_fs *c = container_of(j, struct bch_fs, journal); in CLOSURE_CALLBACK()
1832 * Wait for previous journal writes to comelete; they won't necessarily in CLOSURE_CALLBACK()
1835 if (j->seq_ondisk + 1 != le64_to_cpu(w->data->seq)) { in CLOSURE_CALLBACK()
1836 spin_lock(&j->lock); in CLOSURE_CALLBACK()
1837 if (j->seq_ondisk + 1 != le64_to_cpu(w->data->seq)) { in CLOSURE_CALLBACK()
1838 closure_wait(&j->async_wait, cl); in CLOSURE_CALLBACK()
1839 spin_unlock(&j->lock); in CLOSURE_CALLBACK()
1840 continue_at(cl, journal_write_preflush, j->wq); in CLOSURE_CALLBACK()
1843 spin_unlock(&j->lock); in CLOSURE_CALLBACK()
1846 if (w->separate_flush) { in CLOSURE_CALLBACK()
1848 percpu_ref_get(&ca->io_ref[WRITE]); in CLOSURE_CALLBACK()
1850 struct journal_device *ja = &ca->journal; in CLOSURE_CALLBACK()
1851 struct bio *bio = &ja->bio[w->idx]->bio; in CLOSURE_CALLBACK()
1852 bio_reset(bio, ca->disk_sb.bdev, in CLOSURE_CALLBACK()
1854 bio->bi_end_io = journal_write_endio; in CLOSURE_CALLBACK()
1855 bio->bi_private = ca; in CLOSURE_CALLBACK()
1859 continue_at(cl, journal_write_submit, j->wq); in CLOSURE_CALLBACK()
1862 * no need to punt to another work item if we're not waiting on in CLOSURE_CALLBACK()
1865 journal_write_submit(&cl->work); in CLOSURE_CALLBACK()
1869 static int bch2_journal_write_prep(struct journal *j, struct journal_buf *w) in bch2_journal_write_prep() argument
1871 struct bch_fs *c = container_of(j, struct bch_fs, journal); in bch2_journal_write_prep()
1873 struct jset *jset = w->data; in bch2_journal_write_prep()
1878 u64 seq = le64_to_cpu(jset->seq); in bch2_journal_write_prep()
1886 * If we wanted to be really fancy here, we could sort all the keys in in bch2_journal_write_prep()
1887 * the jset and drop keys that were overwritten - probably not worth it: in bch2_journal_write_prep()
1890 unsigned u64s = le16_to_cpu(i->u64s); in bch2_journal_write_prep()
1898 * entry gets written we have to propagate them to in bch2_journal_write_prep()
1899 * c->btree_roots in bch2_journal_write_prep()
1901 * But, every journal entry we write has to contain all the in bch2_journal_write_prep()
1903 * to c->btree_roots we have to get any missing btree roots and in bch2_journal_write_prep()
1904 * add them to this journal entry: in bch2_journal_write_prep()
1906 switch (i->type) { in bch2_journal_write_prep()
1909 __set_bit(i->btree_id, &btree_roots_have); in bch2_journal_write_prep()
1912 EBUG_ON(!w->need_flush_to_write_buffer); in bch2_journal_write_prep()
1917 jset_entry_for_each_key(i, k) { in bch2_journal_write_prep()
1918 ret = bch2_journal_key_to_wb(c, &wb, i->btree_id, k); in bch2_journal_write_prep()
1920 bch2_fs_fatal_error(c, "flushing journal keys to btree write buffer: %s", in bch2_journal_write_prep()
1926 i->type = BCH_JSET_ENTRY_btree_keys; in bch2_journal_write_prep()
1934 bch2_fs_fatal_error(c, "error flushing journal keys to btree write buffer: %s", in bch2_journal_write_prep()
1940 spin_lock(&c->journal.lock); in bch2_journal_write_prep()
1941 w->need_flush_to_write_buffer = false; in bch2_journal_write_prep()
1942 spin_unlock(&c->journal.lock); in bch2_journal_write_prep()
1950 d->entry.type = BCH_JSET_ENTRY_datetime; in bch2_journal_write_prep()
1951 d->seconds = cpu_to_le64(ktime_get_real_seconds()); in bch2_journal_write_prep()
1954 u64s = (u64 *) end - (u64 *) start; in bch2_journal_write_prep()
1956 WARN_ON(u64s > j->entry_u64s_reserved); in bch2_journal_write_prep()
1958 le32_add_cpu(&jset->u64s, u64s); in bch2_journal_write_prep()
1960 sectors = vstruct_sectors(jset, c->block_bits); in bch2_journal_write_prep()
1963 if (sectors > w->sectors) { in bch2_journal_write_prep()
1965 vstruct_bytes(jset), w->sectors << 9, in bch2_journal_write_prep()
1966 u64s, w->u64s_reserved, j->entry_u64s_reserved); in bch2_journal_write_prep()
1967 return -EINVAL; in bch2_journal_write_prep()
1970 jset->magic = cpu_to_le64(jset_magic(c)); in bch2_journal_write_prep()
1971 jset->version = cpu_to_le32(c->sb.version); in bch2_journal_write_prep()
1977 j->last_empty_seq = seq; in bch2_journal_write_prep()
1982 if (le32_to_cpu(jset->version) < bcachefs_metadata_version_current) in bch2_journal_write_prep()
1990 jset->encrypted_start, in bch2_journal_write_prep()
1991 vstruct_end(jset) - (void *) jset->encrypted_start); in bch2_journal_write_prep()
1995 jset->csum = csum_vstruct(c, JSET_CSUM_TYPE(jset), in bch2_journal_write_prep()
2002 memset((void *) jset + bytes, 0, (sectors << 9) - bytes); in bch2_journal_write_prep()
2006 static int bch2_journal_write_pick_flush(struct journal *j, struct journal_buf *w) in bch2_journal_write_pick_flush() argument
2008 struct bch_fs *c = container_of(j, struct bch_fs, journal); in bch2_journal_write_pick_flush()
2009 int error = bch2_journal_error(j); in bch2_journal_write_pick_flush()
2012 * If the journal is in an error state - we did an emergency shutdown - in bch2_journal_write_pick_flush()
2013 * we prefer to continue doing journal writes. We just mark them as in bch2_journal_write_pick_flush()
2015 * list_journal tool - this helps in debugging. in bch2_journal_write_pick_flush()
2021 * previously - we can't leave the journal without any flush writes in in bch2_journal_write_pick_flush()
2027 if (error && test_bit(JOURNAL_need_flush_write, &j->flags)) in bch2_journal_write_pick_flush()
2031 w->noflush || in bch2_journal_write_pick_flush()
2032 (!w->must_flush && in bch2_journal_write_pick_flush()
2033 time_before(jiffies, j->last_flush_write + in bch2_journal_write_pick_flush()
2034 msecs_to_jiffies(c->opts.journal_flush_delay)) && in bch2_journal_write_pick_flush()
2035 test_bit(JOURNAL_may_skip_flush, &j->flags))) { in bch2_journal_write_pick_flush()
2036 w->noflush = true; in bch2_journal_write_pick_flush()
2037 SET_JSET_NO_FLUSH(w->data, true); in bch2_journal_write_pick_flush()
2038 w->data->last_seq = 0; in bch2_journal_write_pick_flush()
2039 w->last_seq = 0; in bch2_journal_write_pick_flush()
2041 j->nr_noflush_writes++; in bch2_journal_write_pick_flush()
2043 w->must_flush = true; in bch2_journal_write_pick_flush()
2044 j->last_flush_write = jiffies; in bch2_journal_write_pick_flush()
2045 j->nr_flush_writes++; in bch2_journal_write_pick_flush()
2046 clear_bit(JOURNAL_need_flush_write, &j->flags); in bch2_journal_write_pick_flush()
2055 struct journal *j = container_of(w, struct journal, buf[w->idx]); in CLOSURE_CALLBACK() local
2056 struct bch_fs *c = container_of(j, struct bch_fs, journal); in CLOSURE_CALLBACK()
2064 BUG_ON(BCH_SB_CLEAN(c->disk_sb.sb)); in CLOSURE_CALLBACK()
2065 BUG_ON(!w->write_started); in CLOSURE_CALLBACK()
2066 BUG_ON(w->write_allocated); in CLOSURE_CALLBACK()
2067 BUG_ON(w->write_done); in CLOSURE_CALLBACK()
2069 j->write_start_time = local_clock(); in CLOSURE_CALLBACK()
2071 spin_lock(&j->lock); in CLOSURE_CALLBACK()
2073 w->separate_flush = true; in CLOSURE_CALLBACK()
2075 ret = bch2_journal_write_pick_flush(j, w); in CLOSURE_CALLBACK()
2076 spin_unlock(&j->lock); in CLOSURE_CALLBACK()
2080 mutex_lock(&j->buf_lock); in CLOSURE_CALLBACK()
2081 journal_buf_realloc(j, w); in CLOSURE_CALLBACK()
2083 ret = bch2_journal_write_prep(j, w); in CLOSURE_CALLBACK()
2084 mutex_unlock(&j->buf_lock); in CLOSURE_CALLBACK()
2088 j->entry_bytes_written += vstruct_bytes(w->data); in CLOSURE_CALLBACK()
2091 spin_lock(&j->lock); in CLOSURE_CALLBACK()
2092 ret = journal_write_alloc(j, w); in CLOSURE_CALLBACK()
2093 if (!ret || !j->can_discard) in CLOSURE_CALLBACK()
2096 spin_unlock(&j->lock); in CLOSURE_CALLBACK()
2097 bch2_journal_do_discards(j); in CLOSURE_CALLBACK()
2100 if (ret && !bch2_journal_error(j)) { in CLOSURE_CALLBACK()
2104 __bch2_journal_debug_to_text(&buf, j); in CLOSURE_CALLBACK()
2105 spin_unlock(&j->lock); in CLOSURE_CALLBACK()
2106 prt_printf(&buf, bch2_fmt(c, "Unable to allocate journal write at seq %llu for %zu sectors: %s"), in CLOSURE_CALLBACK()
2107 le64_to_cpu(w->data->seq), in CLOSURE_CALLBACK()
2108 vstruct_sectors(w->data, c->block_bits), in CLOSURE_CALLBACK()
2117 * write is allocated, no longer need to account for it in in CLOSURE_CALLBACK()
2120 w->sectors = 0; in CLOSURE_CALLBACK()
2121 w->write_allocated = true; in CLOSURE_CALLBACK()
2127 bch2_journal_space_available(j); in CLOSURE_CALLBACK()
2128 bch2_journal_do_writes(j); in CLOSURE_CALLBACK()
2129 spin_unlock(&j->lock); in CLOSURE_CALLBACK()
2131 w->devs_written = bch2_bkey_devs(bkey_i_to_s_c(&w->key)); in CLOSURE_CALLBACK()
2133 if (c->opts.nochanges) in CLOSURE_CALLBACK()
2137 * Mark journal replicas before we submit the write to guarantee in CLOSURE_CALLBACK()
2141 w->devs_written); in CLOSURE_CALLBACK()
2146 if (!JSET_NO_FLUSH(w->data)) in CLOSURE_CALLBACK()
2147 continue_at(cl, journal_write_preflush, j->wq); in CLOSURE_CALLBACK()
2149 continue_at(cl, journal_write_submit, j->wq); in CLOSURE_CALLBACK()
2152 continue_at(cl, journal_write_done, j->wq); in CLOSURE_CALLBACK()
2156 continue_at(cl, journal_write_done, j->wq); in CLOSURE_CALLBACK()